�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  h��   �� ����&�&�(6Xȍ�������(�@���� $*06;AGMSY_ekpv|���������������������� "(.4:@FLRW]ciou{����������������������  &+05;AGMSY_ekpv|����������������������� #)/5;AFLRX^djpv|���������������������� &+17=CINTZ`flrx~���������������������� #)/5;AGMSX^djou{���������������������� #).4:@FLRX^diou{���������������������� #)/59?EKQW\bhntz���������������������� #)/5;@FLRX^djpv|����������������������    ! ' - 3 9 ? E K P V \ b h n t z � � � � � � � � � � � � � � � � � � � � � � �       % + 1 7 = C I O T Z ` f l r x ~ � � � � � � � � � � � � � � � � � � � � � �     ! ' - 3 9 ? E K P V \ b h n t z � � � � � � � � � � � � � � � � � � � � � �       % + 1 7 = C I O U [ a g m s y  � � � � � � � � � � � � � � � � � � � � � �     " ( . 4 : @ F K Q V [ ` f l r x ~ � � � � � � � � � � � � � � � � � � � � � �  #)/5;AFKQW]chntz���������������������� &,28>DJPV\aflrx}���������������������� #)/5;AGMSY_ejpu{���������������������� &,17<BGMSY_ekpu{���������������������%+17<AGMSY_ejpv|����������������������  &,28>DJPV\bhmsy����������������������  &,28>DJPV\bhnty��������������������� $*05;AGMSX^djpv{���������������������� %+16<AGMSY^djpv|���������������61617419586019328290788556925648204481764048071228508337163587601858965540702281278535247345045824745597203214338389419038897289527592834753961959373664056926632982507819894309141053533307496621243334115484570165785336517745182783387970833962423010794445326909774695696645867537836275342679745918563646205266307115203143123040753458693770916191403045019431574415111204649303908278836575372185667546512065945635894861274715493654749653106327435439961743265471037899795593882614016892713500494422022573684204017880194501736875870465402426926060075228093363142518695199035307227405253938538503446606047808276218884785529917719635164873088135133721233849368682248472045438778162881590182819052322885078293675391166110158438260830038827329130871698468259823266881577270891279814593896256027482760522572102224443934657889137112839815438479676388695355537014971473268141202386691972602707147378819085166117728734890082146967182096942167419761000046127334298184998450140458423928398802194784156359916635038753621457526683939400937205269487126998869555667985782361131902577733827475681038428628856479368938897486101302565072541602025096995018257711243791418428698785403296305697025026960522903291124808135604380114476975856768344744321156598991058010448186640930538831863065918040077645881764517305144229078017997447302322822682701958215540941980404324837354826770251362713269660602161895558534966191401934102248541110229059661407175504636720469908235257400505862165928935305812675281358631480446350263958696278341875637694271712093541564329736042592995146350107319676640144483124478940589806210742510035156436379643565336054762559265783371401654146610443532679850734654965461035830658138945423221459078382607432053316674457022398683712816960177721254562434652163402548116634725413362206092390081896246066162540682897555952444589776473364852559499730241240961133126655453043526265462609578328523803634694315799361009826199913895614811332588622636276202657359590091390453462916109928544537306419426866517495166931119165781969433633809635515962813920886237376656622584970334848470694123304548174313974571136581401053485585155227319547141142233822919857364612634077252960136910612913794759406785341389423220184835839230239331220266519724928738155123484353048610835866108658805164786453789533693111976023447746215957675419533221005154141736356045845911855919386947630365174017040034761537191716334656700280189587398363854118008229177368699661587211086462179901133746323162641418650458727817647908926402393633290764655675230446832411540954058020577651752141224252702357511266215616943980232250930619663892558272190728193087459153269927637451531406285807116143695912357161936203010914525693925411062701558154143614192035406134830138246653605156003540977814393083921240610159934856884116335616732250213531823633360778923837813764310905944090745259881444536143401340268933226776575819289946182373660709407608743621649025033421428665630002462142769355621275132335197011873045405424725293496552230344736544639221316001399113280053622301291873721192648265973335558833264395077236236016166665173579421632004585683092183470552514283193035183142627106389985828631812185166725248446391281087541768565664975660933435493153693060725010253594962763201250578588396391429648612836543229534799123772568261339694343043583801675774901235716463785551880761477266112912087959537355238373469484354212420340752172851603051314102452180262583949027294847084947082022753894182270614473981223199708147858578007538812594248160276349870907010881649749123536250497397679271059391263175124647151066096525932282948171730184418031155396279344957142333392488851035472597135372102125815150714004864232769394456073461487477027219809774345670342160424386522665053359292990705061081710642497743747742880015071782999562469082204563995184581085055663138373629134592656303079883750145762163924650243098143928606137251762604866580620638838469670182178393318240471172246310913492976329532737093348761137840529538279723074294839270657952238434146798117978239437144075644934542323756482966354097210762545816156696025845933867131721243825671555198591409126607774655432117631462931477311426323816319001920520841530858357922000937693518662814440313464034022669147640611972236473732020673538223415835432401233052040063511343686525945573552351176513894661170123459051251363013142747812569753447281659166204729531512726087335105341948204766275220802279604916967816964072886465391484452923445806073471614114640262461654054488377413970316463353520950437728857143533179219521150881187864415597245911159893165059205209162941442058656667199231537688259545131158192239457771411474583957476701382245562751162645497394954086613473529884496033491101099292523432604601316570762555182368406000352186256403821310254873404502382032635917064566962348622525982621352212166301081496526247128404123749763492557744764795249819928051329422421603441492590282817935936774003960859237040237246623759942580828096738594236903028274619816819246749824238137421737146984506090137970421345405303504759997914069431001240986041957841921164290854954694139889653577428338833056319987318897347200576745636898606695484156498004406344117010743081294862801266263495897821148131121981947771760512233675998116517501301355759941434202010782411460881918027315632143459618841027408621514756615252057040608445393010413454042208411798753378945768834305851891045254731652992025487447456173863854454237350789939038125754133214732932584883536414366543479875129556315807882502595393394594780786647351999225948463663544046066586638744137510261989959290021129729728938096858508080728297929445196578716361558451521103716193214572610704262791531955485800651367493258335002179022471971571490645807773782909835954418096541701164407361109438775367953435349482499603524305146553091115466258763297589540905411664146960534434232891516368579272408000393854619260650401S �lo����;�޾L���� �!e�j�������\��t�������!3�F�Yu��>��:�?dSid'�Z���I��4�7�W:�>�d�s���*m8�@�Gc]�h[z�����r��c����GCLb[�a�ɐۻ����^� О @� � � �� 7� � ֱ � �� � � �% 4 *7 fO �R D ׾ ,� �� ^� �� p� 4 �7 �� `� /� �� � �% t � Q� _� � �� wF��A�KVk�sۅܡw+]8s����f�Sjo�s��֨��5���g��A �bWkl�w�e�� �;���>��#;+�LS�Z7`o|��5t��3�8���%�4+l��-86�:�SՇ��z���Z���6�x����0�`Skam���{�c��t���Lv����Fb'�:z�K���&�����_� i � 00����K �n\ɒ��˼ ��������MdQc�o"� n���u��!1�9�rm�ɛ��M�H�� \ g# %> ^N �O �,#�=#�\#bi#�p#7�#s$ $H$�.$�6$EG$W$��$��$X%P%CD%�J%�k%m%?o%f�%�%R�%��%I&c�&N�& 'N�'��'m(�o(�(i�(Һ(��(Y*)�D)�c)�d)-i)��)��){�)�7*;L*.O*�*��*+�*H+�+l-+�A+�H+1Q+�\+�}+��+D�+�q,پ,�%-�.-mD-�R-��-ߐ-'�-��-��-?�-� .d.�}.��.��.��. �.��.J�.�S/Y/$`/�e/B�/<�/��/_0dz0X�0ɦ0|�0�1 1-&1J81��1H�1+�1��1��1��1�1t 2Z,2 ;2~q2”2��2��2�3 (3�@3��3��3O4�n4p�4�4f�4Y�4D�4`�49]5&}5G�57�5,�5Mq6��68�6Y�6"�6��6��6� 7�7�!7�B7�H74N7�`7�b7�z7x�7�7�7�8�P8Ӭ8��8�9׸9N�9��9:'=:8F:M:�^:�k:4}:\�:�:�: ;�;�;;�@;J;�e;�;��;ˤ;X�;��;�3<�=<�<��< �<9�<$=E=�=#=^[=�d=�=,�=��=C�=��=��=��=O�>C�>?R'?��?=�?�?#�?M @�"@}?@7�@p�@��@��@�A%AC'A�)AV�\V�vV�V�V!W-0W �W}�W��W�=X�>X@AX�EX�QX��X/�X�X�YH&Y=1Y�7Y0IYVPYRYl�Y��Y�Y��YX�Y��Y{�Y#�Z̙Zk�Z��Z'*[>C[�U[�\[��[��[�[Z�[x�[� \�\�+\A\�H\�R\z~\>�\T�\R4]x>]Q@]{F]�J]P]�p]L}] �]��]��]��]��]w^X_�f_�n_Fs_�_�_T�_-�_Q�_-�_C�_?9`�T`$^`��`��`��`^�`��`�*aSOa�`a��a^�aV�a��a��a� b�b#Eb+Hb�NbVbu�b�b��bB�b��b-�b�c"c�0c�?c�_cF�c�`d�zd4�d��dy�d$�d e�Dese;�e��el�e;f�5fv�f%�f��fd gD=g�Og{g��g��g��g#Eh�Rh4nh{|hn�h_i� i�i(!i˩k��k�k`�k�l�l&7l�El�Vl�al��l��l�mLm��m߫mW�m̉n��n|�n��n��nl&oe*o>Noyro�o�o��o��o��o�prppp�p��p �p9�pN�r��r�r{�r�rS�r{ s]s�%s �s^�s0�s"�s��sf�st��tm�t�u�Eu'wuъuܞuϨu=�u��uCv�%v�~xl�x��x �x{�x\�xd yzF\zioz�z{X8{_M{]N{zU{�W{m{/�{��{��{%�{�%|�)|u-|�|'�|^}�}��}7�}1�}t�}� ~�~�t~��~,0�27LP�Tg���P��:�s?��T�K������̀?ހ �M�r�^(�������椁���MсH��G�@�g��#�90��0��M�fw�8}�"���ƒ�σ?�>��C+�c0��b�ln����E��W��pĄ;ʄ�߄i����H���>S�!|���&�G*��6�1_��g��x�����Ն�������X�m����C��>��u��Ĝ� Ո�݈���R>���攉uĉ!���� ��\��p�G��c��Ê��Q� D��X�j���Ǎ������ˋH��e���l�����]�t����H�E����j��]�y�����čXȍ# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (01:Introduction)= # # Introduction #
# Python packages are a core element of the Python programming language and are how you write reuseable and shareable code in Python. If you’re reading this book, chances are you already know how to use packages with the help of the `import` statement in Python. For example, importing and using the `numpy` package to round pi to 3 decimal places is as simple as: # + import numpy as np np.round(np.pi, decimals=3) # - # At a minimum, a package simply bundles together code (such as functions, classes, variables, or scripts) so that it can be easily reused across different projects. However, packages may also include things like documentation and tests, which become exponentially more important if you wish to share your package with others. # # As of January 2021, there are over 280,000 packages available on the Python Package Index (PyPI). Packages are a key reason why Python is such a powerful and widely used programming language. The chances are that someone has already solved a problem that you’re working on, and you can benefit from their work by downloading and installing their package (which they have kindly developed and shared) by, for example, using Python's native package manager `pip` and a simple `pip install ` at the command line. Put simply, packages are how you make it as easy as possible to share, maintain and collaborate on Python code with others; whether they be your friends, work colleagues, or the world! # # Even if you never intend to share your code with others, making packages will ultimately save you time. Creating Python packages will make it significantly easier for you to access, reuse and maintain your code within a project and across different projects. At some point, all of us have wanted to reuse code from one project in another; this is something often accomplished through the reprehensible method of copy-and-pasting your existing code into the new project. Despite being obviously inefficient, this practice also makes it difficult to improve and maintain your code and its dependenices across projects. Creating a simple Python package will solve these problems. # # Regardless of your motivation, the goal of this book is to show you how to easily develop Python packages. The focus is overwhelmingly practical - we will leverage modern methods and tools to develop and maintain packages efficiently, reproducibly, and with as much automation as possible; so you can focus on writing and sharing code. Along the way, we'll also enlighten some of the lower-level details of Python packaging and the Python programming language. # # ```{figure} images/packaging-flowchart.png # --- # width: 75% # name: 01-package-flowchart # alt: The Python packaging workflow. # --- # The Python packaging workflow. # ``` # ## Why you should create packages # As discussed above, there are many reasons why you should develop Python packages! Let's summarise the key reasons below: # # - To effectively share your code with others. # - They save you time. Even if you don't intend to share your code, packages help you easily reuse and maintain your code across multiple projects. # - They force you to organise and document your code, such that it can be more easily understood and used at a later time. # - They isolate dependencies for your code and improve its reproducibility. # - They are a good way to practice writing good code. # - Finally, developing and distributing packages supports the Python ecosystem and other Python users who can benefit from your work. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="l5rq92V7ObHs" colab_type="code" colab={} # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # + id="B4VTPpXkOhds" colab_type="code" colab={} import numpy as np np.random.seed(42) xs = np.array([2*np.random.rand(100), 2*np.random.rand(100)]) ys = np.array(4 + 3 * (xs[0]-xs[1]) + np.random.rand(100)) # + id="PlhVE5hJSbsy" colab_type="code" outputId="5e5411e1-f61c-4f68-8f6b-d1c1e4f6efa0" colab={"base_uri": "https://localhost:8080/", "height": 294} for i in np.arange(len(xs)): plt.subplot(2,1,i+1) plt.plot(xs[i], ys, 'b.') plt.xlabel('$X_%d$' % (i+1)) plt.ylabel('$y$') plt.axis([0, 2, 0, 15]) plt.subplots_adjust(hspace=.75) plt.show() # + id="9WIBehqmUXeM" colab_type="code" colab={} # split the data into training and test sets # train set train_xs = xs[:,:80] train_ys = ys[:80] # test set test_xs = xs[:,80:] test_ys = ys[80:] # + [markdown] id="wq5R9tfxOlK6" colab_type="text" # # Linear regression using numpy # # + id="V7S2Rljr4gBI" colab_type="code" colab={} # number of epochs epochs = 10 # learning rate lr = 0.01 # + id="gpUiggnZ35PN" colab_type="code" colab={} # initial value for weight w and bias b w = np.array([np.random.rand(1), np.random.rand(1)]) b = np.zeros(1) # + id="ia_EGxHl4WNT" colab_type="code" colab={} for epoch in np.arange(epochs): for i in np.arange(80): y_pred = w[0]*train_xs[0][i] + w[1]*train_xs[1][i] + b grad_w0 = (y_pred - train_ys[i]) * train_xs[0][i] grad_w1 = (y_pred - train_ys[i]) * train_xs[1][i] grad_b = (y_pred - train_ys[i]) w[0] -= lr * grad_w0 w[1] -= lr * grad_w1 b -= lr * grad_b # + id="ouGCs5h0Vb7j" colab_type="code" outputId="910ed314-7981-493d-cb8a-a738dbfbaa79" colab={"base_uri": "https://localhost:8080/", "height": 34} test_loss = 0 print(len(test_xs)) for i in np.arange(20): test_loss += 0.5 * (w[0]*test_xs[0][i] + w[1]*test_xs[1][i] + b - test_ys[i]) ** 2 test_loss /= 20 # + id="ceyzFn1sVtb8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5ba90bbe-b4a2-42da-a0ae-2346c5888b3f" test_loss # + id="OagpMGGUVw5k" colab_type="code" colab={} pred_ys = w[0]*test_xs[0] + w[1]*test_xs[1] + b # + id="QS4IdviyXBac" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="0112ad88-996b-450f-9594-45f9694105b3" # plt.plot(test_xs, test_ys, "b.") # plt.plot(test_xs, pred_ys, "r.") # predicted values # plt.xlabel("$x_1$", fontsize=18) # plt.ylabel("$y$", rotation=0, fontsize=18) # plt.axis([0, 2, 0, 15]) # plt.show() for i in np.arange(len(test_xs)): plt.subplot(2,1,i+1) plt.plot(test_xs[i], test_ys, 'b.') plt.plot(test_xs[i], pred_ys, 'r.') plt.xlabel('$X_%d$' % (i+1)) plt.ylabel('$y$') plt.axis([0, 2, 0, 15]) plt.subplots_adjust(hspace=.75) plt.show() # + id="bG_ZQa2_X4o9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8c838bdf-7281-4790-9da2-dd0172594930" b # + id="YNsGzVF0hMW9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1ddd0358-fc2a-4380-9124-b2081bfcf8dc" w # + id="472UYfbXha8U" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Tce3stUlHN0L" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="tuOe1ymfHZPu" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="qFdPvlXBOdUN" # # Random number generation # + [markdown] id="MfBg1C5NB3X0" # # # # # #
# View on TensorFlow.org # # Run in Google Colab # # View source on GitHub # # Download notebook #
# + [markdown] id="BlGY1iiph_C2" # TensorFlow provides a set of pseudo-random number generators (RNG), in the `tf.random` module. This document describes how you can control the random number generators, and how these generators interact with other tensorflow sub-systems. # # Note: The random numbers are not guaranteed to be consistent across TensorFlow versions. See: [Version Compatibility](https://www.tensorflow.org/guide/versions#what_is_not_covered) # # TensorFlow provides two approaches for controlling the random number generation process: # # 1. Through the explicit use of `tf.random.Generator` objects. Each such object maintains a state (in `tf.Variable`) that will be changed after each number generation. # # 2. Through the purely-functional stateless random functions like `tf.random.stateless_uniform`. Calling these functions with the same arguments (which include the seed) and on the same device will always produce the same results. # # Warning: The old RNGs from TF 1.x such as `tf.random.uniform` and `tf.random.normal` are not yet deprecated but strongly discouraged. # + [markdown] id="zIGh9faCOp6x" # ## Setup # + id="ECDrttf0s8Nu" import tensorflow as tf # Creates some virtual devices (cpu:0, cpu:1, etc.) for using distribution strategy physical_devices = tf.config.list_physical_devices("CPU") tf.config.experimental.set_virtual_device_configuration( physical_devices[0], [ tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration() ]) # + [markdown] id="eqMlrUsVu2Ai" # ## The `tf.random.Generator` class # # The `tf.random.Generator` class is used in cases where you want each RNG call to produce different results. It maintains an internal state (managed by a `tf.Variable` object) which will be updated every time random numbers are generated. Because the state is managed by `tf.Variable`, it enjoys all facilities provided by `tf.Variable` such as easy checkpointing, automatic control-dependency and thread safety. # # You can get a `tf.random.Generator` by manually creating an object of the class or call `tf.random.get_global_generator()` to get the default global generator: # + id="7yU1E3JvxOQD" g1 = tf.random.Generator.from_seed(1) print(g1.normal(shape=[2, 3])) g2 = tf.random.get_global_generator() print(g2.normal(shape=[2, 3])) # + [markdown] id="QmRCeAvTxulW" # There are multiple ways to create a generator object. The easiest is `Generator.from_seed`, as shown above, that creates a generator from a seed. A seed is any non-negative integer. `from_seed` also takes an optional argument `alg` which is the RNG algorithm that will be used by this generator: # + id="kISbOE4Xfjhv" g1 = tf.random.Generator.from_seed(1, alg='philox') print(g1.normal(shape=[2, 3])) # + [markdown] id="_mCRaN7dfd8j" # See the *Algorithms* section below for more information about it. # # Another way to create a generator is with `Generator.from_non_deterministic_state`. A generator created this way will start from a non-deterministic state, depending on e.g. time and OS. # + id="gxPLCLsz00qY" g = tf.random.Generator.from_non_deterministic_state() print(g.normal(shape=[2, 3])) # + [markdown] id="zSAp2BMj1JZ6" # There are yet other ways to create generators, such as from explicit states, which are not covered by this guide. # # When using `tf.random.get_global_generator` to get the global generator, you need to be careful about device placement. The global generator is created (from a non-deterministic state) at the first time `tf.random.get_global_generator` is called, and placed on the default device at that call. So, for example, if the first site you call `tf.random.get_global_generator` is within a `tf.device("gpu")` scope, the global generator will be placed on the GPU, and using the global generator later on from the CPU will incur a GPU-to-CPU copy. # # There is also a function `tf.random.set_global_generator` for replacing the global generator with another generator object. This function should be used with caution though, because the old global generator may have been captured by a `tf.function` (as a weak reference), and replacing it will cause it to be garbage collected, breaking the `tf.function`. A better way to reset the global generator is to use one of the "reset" functions such as `Generator.reset_from_seed`, which won't create new generator objects. # + id="324S5bpd9HRg" g = tf.random.Generator.from_seed(1) print(g.normal([])) print(g.normal([])) g.reset_from_seed(1) print(g.normal([])) # + [markdown] id="z9H0wuvp9VwH" # ### Creating independent random-number streams # # In many applications one needs multiple independent random-number streams, independent in the sense that they won't overlap and won't have any statistically detectable correlations. This is achieved by using `Generator.split` to create multiple generators that are guaranteed to be independent of each other (i.e. generating independent streams). # + id="Vg5_KN18OZjo" g = tf.random.Generator.from_seed(1) print(g.normal([])) new_gs = g.split(3) for new_g in new_gs: print(new_g.normal([])) print(g.normal([])) # + [markdown] id="dqOaGVzKOsRJ" # `split` will change the state of the generator on which it is called (`g` in the above example), similar to an RNG method such as `normal`. In addition to being independent of each other, the new generators (`new_gs`) are also guaranteed to be independent of the old one (`g`). # # Spawning new generators is also useful when you want to make sure the generator you use is on the same device as other computations, to avoid the overhead of cross-device copy. For example: # + id="5jSnJBlUQzF3" with tf.device("cpu"): # change "cpu" to the device you want g = tf.random.get_global_generator().split(1)[0] print(g.normal([])) # use of g won't cause cross-device copy, unlike the global generator # + [markdown] id="sCxbccYMRdd4" # Note: In theory, you can use constructors such as `from_seed` instead of `split` here to obtain a new generator, but by doing so you lose the guarantee that the new generator is independent of the global generator. You will also run the risk that you may accidentally create two generators with the same seed or with seeds that lead to overlapping random-number streams. # # You can do splitting recursively, calling `split` on split generators. There are no limits (barring integer overflow) on the depth of recursions. # + [markdown] id="8JUgnQM_O0lg" # ### Interaction with `tf.function` # # `tf.random.Generator` obeys the same rules as `tf.Variable` when used with `tf.function`. This includes three aspects. # + [markdown] id="jnSjhY6WM-J8" # #### Creating generators outside `tf.function` # # `tf.function` can use a generator created outside of it. # + id="a5EEy0E2UHMw" g = tf.random.Generator.from_seed(1) @tf.function def foo(): return g.normal([]) print(foo()) # + [markdown] id="L_8kC7kbO5uu" # The user needs to make sure that the generator object is still alive (not garbage-collected) when the function is called. # + [markdown] id="PwIrBv_zUYwI" # #### Creating generators inside `tf.function` # # Creation of generators inside a `tf.function` can only happened during the first run of the function. # + id="3JzpUvqJU4MW" g = None @tf.function def foo(): global g if g is None: g = tf.random.Generator.from_seed(1) return g.normal([]) print(foo()) print(foo()) # + [markdown] id="UaTVnOhHVM9a" # #### Passing generators as arguments to `tf.function` # # When used as an argument to a `tf.function`, different generator objects will cause retracing of the `tf.function`. # + id="DeR9kvt0V-ad" num_traces = 0 @tf.function def foo(g): global num_traces num_traces += 1 return g.normal([]) foo(tf.random.Generator.from_seed(1)) foo(tf.random.Generator.from_seed(2)) print(num_traces) # + [markdown] id="E0RxllJzkGfo" # Note that this retracing behavior is consistent with `tf.Variable`: # + id="oWD2f_qxkSe7" num_traces = 0 @tf.function def foo(v): global num_traces num_traces += 1 return v.read_value() foo(tf.Variable(1)) foo(tf.Variable(2)) print(num_traces) # + [markdown] id="fxcS6IY8WZuh" # ### Interaction with distribution strategies # # There are two ways in which `Generator` interacts with distribution strategies. # + [markdown] id="GyZv9QJkZfkQ" # #### Creating generators outside distribution strategies # # If a generator is created outside strategy scopes, all replicas’ access to the generator will be serialized, and hence the replicas will get different random numbers. # + id="HX_beT9SZWMp" g = tf.random.Generator.from_seed(1) strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat.scope(): def f(): print(g.normal([])) results = strat.run(f) # + [markdown] id="ydYQbUqLPAgH" # Note that this usage may have performance issues because the generator's device is different from the replicas. # + [markdown] id="Yal4LbBKbAeN" # #### Creating generators inside distribution strategies # # If a generator is created inside a strategy scope, each replica will get a different and independent stream of random numbers. # + id="5SeUu7IFmTyQ" strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat.scope(): g = tf.random.Generator.from_seed(1) print(strat.run(lambda: g.normal([]))) print(strat.run(lambda: g.normal([]))) # + [markdown] id="PFBlrOudfu9u" # Note: Currently `tf.random.Generator` doesn't provide an option to let different replicas get identical (instead of different) streams (which is technically not hard). If you have a use case for this feature, please let the TensorFlow developers know. # # If the generator is seeded (e.g. created by `Generator.from_seed`), the random numbers are determined by the seed, even though different replicas get different and uncorrelated numbers. One can think of a random number generated on a replica as a hash of the replica ID and a "primary" random number that is common to all replicas. Hence, the whole system is still deterministic. # # `tf.random.Generator` can also be created inside `Strategy.run`: # + id="nlQXi5Msb1Wu" strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat.scope(): def f(): g = tf.random.Generator.from_seed(1) a = g.normal([]) b = g.normal([]) return tf.stack([a, b]) print(strat.run(f)) print(strat.run(f)) # + [markdown] id="4Sv-aiaOmrOr" # We no longer recommend passing `tf.random.Generator` as arguments to `Strategy.run`, because `Strategy.run` generally expects the arguments to be tensors, not generators. # + [markdown] id="8RbM4vabtiWM" # ### Saving generators # # Generally for saving or serializing you can handle a `tf.random.Generator` the same way you would handle a `tf.Variable` or a `tf.Module` (or its subclasses). In TF there are two mechanisms for serialization: [Checkpoint](https://www.tensorflow.org/guide/checkpoint) and [SavedModel](https://www.tensorflow.org/guide/saved_model). # + [markdown] id="PDtySQDotWQc" # #### Checkpoint # # Generators can be freely saved and restored using `tf.train.Checkpoint`. The random-number stream from the restoring point will be the same as that from the saving point. # + id="uB_bDSbzpbne" filename = "./checkpoint" g = tf.random.Generator.from_seed(1) cp = tf.train.Checkpoint(generator=g) print(g.normal([])) # + id="bKKtRWeIkIjX" cp.write(filename) print("RNG stream from saving point:") print(g.normal([])) print(g.normal([])) # + id="-cIHcHwRkQp3" cp.restore(filename) print("RNG stream from restoring point:") print(g.normal([])) print(g.normal([])) # + [markdown] id="A-OeUUQEJ37X" # You can also save and restore within a distribution strategy: # + id="3aI6TQ2lq28w" filename = "./checkpoint" strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat.scope(): g = tf.random.Generator.from_seed(1) cp = tf.train.Checkpoint(my_generator=g) print(strat.run(lambda: g.normal([]))) # + id="kTZcdaMwkvJI" with strat.scope(): cp.write(filename) print("RNG stream from saving point:") print(strat.run(lambda: g.normal([]))) print(strat.run(lambda: g.normal([]))) # + id="nizFA5IrkzN1" with strat.scope(): cp.restore(filename) print("RNG stream from restoring point:") print(strat.run(lambda: g.normal([]))) print(strat.run(lambda: g.normal([]))) # + [markdown] id="Z2rsPfp9J6JA" # You should make sure that the replicas don't diverge in their RNG call history (e.g. one replica makes one RNG call while another makes two RNG calls) before saving. Otherwise, their internal RNG states will diverge and `tf.train.Checkpoint` (which only saves the first replica's state) won't properly restore all the replicas. # # You can also restore a saved checkpoint to a different distribution strategy with a different number of replicas. Because a `tf.random.Generator` object created in a strategy can only be used in the same strategy, to restore to a different strategy, you have to create a new `tf.random.Generator` in the target strategy and a new `tf.train.Checkpoint` for it, as shown in this example: # + id="zgoFRf59-IvW" filename = "./checkpoint" strat1 = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat1.scope(): g1 = tf.random.Generator.from_seed(1) cp1 = tf.train.Checkpoint(my_generator=g1) print(strat1.run(lambda: g1.normal([]))) # + id="Lu79ETxMlDpO" with strat1.scope(): cp1.write(filename) print("RNG stream from saving point:") print(strat1.run(lambda: g1.normal([]))) print(strat1.run(lambda: g1.normal([]))) # + id="VYoRFUjklKOk" strat2 = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1", "cpu:2"]) with strat2.scope(): g2 = tf.random.Generator.from_seed(1) cp2 = tf.train.Checkpoint(my_generator=g2) cp2.restore(filename) print("RNG stream from restoring point:") print(strat2.run(lambda: g2.normal([]))) print(strat2.run(lambda: g2.normal([]))) # + [markdown] id="kMltUKbANqgl" # Although `g1` and `cp1` are different objects from `g2` and `cp2`, they are linked via the common checkpoint file `filename` and object name `my_generator`. Overlapping replicas between strategies (e.g. `cpu:0` and `cpu:1` above) will have their RNG streams properly restored like in previous examples. This guarantee doesn't cover the case when a generator is saved in a strategy scope and restored outside of any strategy scope or vice versa, because a device outside strategies is treated as different from any replica in a strategy. # + [markdown] id="w9dqrp1LnTaJ" # #### SavedModel # # `tf.random.Generator` can be saved to a SavedModel. The generator can be created within a strategy scope. The saving can also happen within a strategy scope. # + id="0AKO5SnUtyqx" filename = "./saved_model" class MyModule(tf.Module): def __init__(self): super(MyModule, self).__init__() self.g = tf.random.Generator.from_seed(0) @tf.function def __call__(self): return self.g.normal([]) @tf.function def state(self): return self.g.state strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"]) with strat.scope(): m = MyModule() print(strat.run(m)) print("state:", m.state()) # + id="jg2148hulfLB" with strat.scope(): tf.saved_model.save(m, filename) print("RNG stream from saving point:") print(strat.run(m)) print("state:", m.state()) print(strat.run(m)) print("state:", m.state()) # + id="93AgVyzOllG7" imported = tf.saved_model.load(filename) print("RNG stream from loading point:") print("state:", imported.state()) print(imported()) print("state:", imported.state()) print(imported()) print("state:", imported.state()) # + [markdown] id="sbb23j3pZNNq" # Loading a SavedModel containing `tf.random.Generator` into a distribution strategy is not recommended because the replicas will all generate the same random-number stream (which is because replica ID is frozen in SavedModel's graph). # # Loading a distributed `tf.random.Generator` (a generator created within a distribution strategy) into a non-strategy environment, like the above example, also has a caveat. The RNG state will be properly restored, but the random numbers generated will be different from the original generator in its strategy (again because a device outside strategies is treated as different from any replica in a strategy). # + [markdown] id="73an1POpsi6V" # ## Stateless RNGs # # Usage of stateless RNGs is simple. Since they are just pure functions, there is no state or side effect involved. # + id="0-aOOA3gasn_" print(tf.random.stateless_normal(shape=[2, 3], seed=[1, 2])) print(tf.random.stateless_normal(shape=[2, 3], seed=[1, 2])) # + [markdown] id="2O_D-RAFNH2Q" # Every stateless RNG requires a `seed` argument, which needs to be an integer Tensor of shape `[2]`. The results of the op are fully determined by this seed. # # The RNG algorithm used by stateless RNGs is device-dependent, meaning the same op running on a different device may produce different outputs. # + [markdown] id="4BvGkPnaOUPF" # ## Algorithms # + [markdown] id="58-8kvR4pRwO" # ### General # # Both the `tf.random.Generator` class and the `stateless` functions support the Philox algorithm (written as `"philox"` or `tf.random.Algorithm.PHILOX`) on all devices. # # Different devices will generate the same integer numbers, if using the same algorithm and starting from the same state. They will also generate "almost the same" float-point numbers, though there may be small numerical discrepancies caused by the different ways the devices carry out the float-point computation (e.g. reduction order). # + [markdown] id="WETA04F1OYPL" # ### XLA devices # # On XLA-driven devices (such as TPU, and also CPU/GPU when XLA is enabled) the ThreeFry algorithm (written as `"threefry"` or `tf.random.Algorithm.THREEFRY`) is also supported. This algorithm is fast on TPU but slow on CPU/GPU compared to Philox. # + [markdown] id="c04JkebCPTPu" # See paper ['Parallel Random Numbers: As Easy as 1, 2, 3'](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf) for more details about these algorithms. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas import importlib import sys import os os.chdir('C:\\Users\\james\\PycharmProjects\\compcaplux') import lux df=pandas.read_csv('C:\\Users\\james\\PycharmProjects\\compcaplux\\lux\\data\\college.csv') df.intent=[lux.Clause(attribute="SATAverage",filter_op=">",value=30)] df df.intent=["AverageCost>10"] df df.intent="AverageCost>10" df # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 1* # # --- # # # # Define ML problems # # You will use your portfolio project dataset for all assignments this sprint. # # ## Assignment # # Complete these tasks for your project, and document your decisions. # # - [ ] Choose your target. Which column in your tabular dataset will you predict? # - [ ] Is your problem regression or classification? # - [ ] How is your target distributed? # - Classification: How many classes? Are the classes imbalanced? # - Regression: Is the target right-skewed? If so, you may want to log transform the target. # - [ ] Choose your evaluation metric(s). # - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? # - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics? # - [ ] Choose which observations you will use to train, validate, and test your model. # - Are some observations outliers? Will you exclude them? # - Will you do a random split or a time-based split? # - [ ] Begin to clean and explore your data. # - [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information? # # If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset. # # Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393) # + [markdown] id="-f604BpWQfH-" colab_type="text" # My dataset is from https://www.bls.gov/web/cewqtr.supp.toc.htm # + [markdown] id="JCjwHWvnfLKE" colab_type="text" # Choose your target.----------wage. # # Which column in your tabular dataset will you predict?----Average Weekly Wage # # Is your problem regression or classification?-------------regression # # # + id="jKKCmJ4kUrzM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="644aee6d-e8a1-46f5-dc7e-3f9896bd43b5" # !pip install category_encoders==2.* # + id="_czxb4cTPGCF" colab_type="code" outputId="5fd63b55-8f58-4234-d537-b57741929976" colab={"base_uri": "https://localhost:8080/", "height": 505} import pandas as pd df=pd.read_excel('2019_2ndQ_wage_industry.xlsx') print(df.shape) df.head() # + [markdown] id="es0kEedNSieF" colab_type="text" # There are 13 weeks in this quarter. According to BLS describe about this data set,"average weekly wage values are calculated by dividing quarterly total wages by the average of the three monthly."Average weekly wages are affected by the ratio of full-time to part-time workers as well as the number of # individuals in high-paying and low-paying occupations and the incidence of pay periods within a quarter. For instance, the average weekly wage of the workforce could increase significantly when there is a large decline in # the number of employees that had been receiving below-average wages. Wages may include payments to workers not present in the employment counts because they did not work during the pay period including the 12th of the # month. When comparing average weekly wage levels between industries, states, or quarters, these factors should # be taken into consideration. # # + id="WGV9JcRfuUtT" colab_type="code" outputId="a5ee0e77-35fe-41f5-b368-43ae3b683c96" colab={"base_uri": "https://localhost:8080/", "height": 421} df.isnull().sum() # + id="sdAtvuIZQuYh" colab_type="code" outputId="a0adcf00-2fcd-498e-a9b8-b03e568cc97e" colab={"base_uri": "https://localhost:8080/", "height": 403} df.columns.to_list() # + id="Joia9OWfWyKu" colab_type="code" colab={} # + id="8W7HXsQzQuPS" colab_type="code" colab={} df= df.drop(columns=['Cnty', 'St Name', 'Status Code']) # + id="mpHxiH30RlLb" colab_type="code" colab={} df=df.drop(columns=['Year','Qtr','Total Quarterly Wages']) # + id="Ls2m1onUllWL" colab_type="code" outputId="66568653-fb60-4258-bcc6-34e8587811a0" colab={"base_uri": "https://localhost:8080/", "height": 352} df.describe() # + id="eMO3m5OaxRNj" colab_type="code" colab={} df.columns.to_list() # + id="MLfwJFMvaOTs" colab_type="code" outputId="db10ea91-740e-4978-e62d-c752795ca82b" colab={"base_uri": "https://localhost:8080/", "height": 35} df.select_dtypes(exclude="number").columns.tolist() # + id="8nVL-Y4zP_af" colab_type="code" outputId="56e28831-7e8a-4767-a3b1-38d148061093" colab={"base_uri": "https://localhost:8080/", "height": 237} df['Area\nCode'].value_counts() # + id="-O2hxAi-ZIxB" colab_type="code" colab={} # drop na in the state(St) df=df.dropna(subset=['St']) # + id="veiYy4vNZovy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 329} outputId="d09c7aca-21bc-4fa2-c4c4-de96e71ff0cb" df.isnull().sum() # + id="GFtXXonkiLKI" colab_type="code" outputId="c21b03e6-c5a2-4bb7-de91-f229eb5408d2" colab={"base_uri": "https://localhost:8080/", "height": 109} df['St'].unique() # + id="96CAH9h-iqYO" colab_type="code" outputId="aca4db72-072c-47e8-89b1-a2017db98c0b" colab={"base_uri": "https://localhost:8080/", "height": 127} df['Own'].value_counts() # + id="-PcweWKdjKeU" colab_type="code" outputId="394e0429-08f7-4127-9030-64df8fd0fdb0" colab={"base_uri": "https://localhost:8080/", "height": 35} df['Area Type'].unique() # + id="19q8XaLxjj5L" colab_type="code" outputId="8f9dff4a-762c-4508-eb78-e19b6e5669ff" colab={"base_uri": "https://localhost:8080/", "height": 72} df['Area'].unique() # + id="A4SLaiZ-uWOw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 109} outputId="f88653b8-8ed7-4f14-942b-6b867afad3a4" df['Area'].describe() # + id="bdlye6cSay5P" colab_type="code" outputId="04ed2a42-3963-4240-9730-11c722396310" colab={"base_uri": "https://localhost:8080/", "height": 54} df['Ownership'].unique() # + id="9vDLTuvSutuy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} outputId="c01c9587-ef32-4b50-fd63-926736557e02" df['Industry'].unique() # + id="FSnU7hpUi9Hp" colab_type="code" outputId="00718cbf-78b0-4c29-bbde-4dc225d882f7" colab={"base_uri": "https://localhost:8080/", "height": 182} df['NAICS'].describe() # + id="Mp6pTI9puwhj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} outputId="120359d4-b2b9-405a-eb74-e10ed98bd838" df['Establishment Count'].describe() # + id="EJhDXmipuwcA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} outputId="2f4776fc-1651-4822-e76e-6e2c3dd91e2c" df['April Employment'].describe() # + id="kxuIt81xly71" colab_type="code" colab={} import numpy as np # + id="sgaQdcvXkNng" colab_type="code" outputId="0a4e9456-33c0-4317-c314-f4c3ffd5b321" colab={"base_uri": "https://localhost:8080/", "height": 182} df['Employment Location Quotient Relative to U.S.'].describe() # + id="A5ULOmdjkgMB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} outputId="8b35cdd1-1814-42ad-8a62-bc092f0123bc" df['Total Wage Location Quotient Relative to U.S.'].describe() # + id="qG1mgh1-0KC1" colab_type="code" outputId="99622c31-122e-4d51-aac7-891c33f80b85" colab={"base_uri": "https://localhost:8080/", "height": 182} df['Average Weekly Wage'].describe() # + [markdown] id="aL4LUFy81BUM" colab_type="text" # How is your target distributed? # # # Regression: Is the target right-skewed? If so, you may want to log transform the target. # # # + id="yrj-25bys6v5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e261c828-b843-4342-9d85-acdb1715625e" #Meean baseline df['Average Weekly Wage'].mean() # + id="O1rB7qFmaKj4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="18ddb152-9cd4-4f99-f2e9-4d2be3dfad2f" df.shape # + id="snNpYXVAuu_o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="73858958-a546-4a42-8f80-77841ed1bd89" import seaborn as sns y= df['Average Weekly Wage'] sns.distplot(y); # + [markdown] id="Hp9JcU9zelk5" colab_type="text" # ### Lower bound and Upper Bound are numberic data. From following I will check whether they are outliers. Do I need exclude them? # + [markdown] id="lFcEMXrG3_xf" colab_type="text" # Yes, there are outliers. Some really high and some really low. I will remove the most extreme 1% wage. # + id="WPvO8G9m4bNy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e18dc70a-3811-4a79-8633-af0ae952f83e" import numpy as np df= df[(df['Average Weekly Wage'] >= np.percentile(df['Average Weekly Wage'],5))] df.shape # + id="cKZwzgOHEtcp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="aff0c017-94be-46d9-9742-b5d02f32f6ef" y=df['Average Weekly Wage'] sns.distplot(y); # + [markdown] id="7vsdON-g6GD2" colab_type="text" # The shape is till same as before(right_skewed). So we need log -Transform the target.. # + id="_8uo0BcJ6i9-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="aebaae04-7cd9-4849-955f-bbf56ac20ee2" y_log = np.log1p(y) import matplotlib.pyplot as plt sns.distplot(y_log) plt.title('Log- transformed target, in log- dollars'); # + id="tbHroLzk6i57" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="3781c27f-3de1-4bd1-e168-5f2ef63b222c" y_untransformed = np.expm1(y_log) sns.distplot(y_untransformed) plt.title('Back to the original units'); # + id="chrbXiDeZFBj" colab_type="code" colab={} y= y_untransformed # + id="vlxJxEnQ6i1d" colab_type="code" colab={} from sklearn.model_selection import train_test_split # + id="RR2icAZBRzho" colab_type="code" colab={} X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.2) # + id="z5XXuX-vSLnd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="bb5b6d73-91d8-49be-c0c8-52beb5dbb3ce" print (X_train.shape, y_train.shape) print (X_test.shape, y_test.shape) # + id="ZViRq3DXT1vE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="f42c269a-7b38-4e07-98d9-2c6be5f22d3e" X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2) print (X_train.shape, y_train.shape) print (X_val.shape, y_val.shape) # + id="SKF_zYTCSRfc" colab_type="code" colab={} target = 'Average Weekly Wage' features= df.columns. drop([target]) # + id="NAUp68N7whmc" colab_type="code" outputId="f9f4b310-9e62-46a0-81a2-3820e079d318" colab={"base_uri": "https://localhost:8080/", "height": 219} # !pip install category_encoders==2.* # + id="uvd1g22obm7K" colab_type="code" colab={} # + id="2dVVKeVhyzNe" colab_type="code" colab={} # %matplotlib inline import category_encoders as ce import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestRegressor # + id="5kAARZAZuhOH" colab_type="code" outputId="dbfaf97c-6c2b-4315-d431-a64aa75d864c" colab={"base_uri": "https://localhost:8080/", "height": 35} pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(strategy='median'), RandomForestRegressor(max_depth=2) ) pipeline.fit(X_train,y_train) print('Validation Accuracy:', pipeline.score(X_val,y_val)) # + [markdown] id="uQCN_jojWTCA" colab_type="text" # Validation Accuracy: 0.85 # + id="V1jk0UQ4r3UW" colab_type="code" colab={} import graphviz # + id="tgYW9q3vWvHE" colab_type="code" colab={} # This score is too good to be true. # Visualize the RandomeForestClassification to see what the model "learned" target = 'Average Weekly Wage' features= df.columns. drop([target]) X_train =X_train[features] X_val = X_val[features] # + id="VXYSuQT_cJjR" colab_type="code" colab={} from sklearn.tree import export_graphviz tree = pipeline.named_steps['randomforestregressor'] dot_data = export_graphviz( tree, out_file = None, feature_names=X_train.columns, class_names=y_train.unique().astype(str), filled=True, impurity=False ) graphviz.Source(dot_data) # + id="5uWaufgxP_WU" colab_type="code" colab={} # + id="nzk0gX94P_Qu" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Numpy练习题 # 打印当前Numpy版本 import numpy as np print(np.__version__) # 构造一个全零的矩阵,并打印其占用的内存大小 z = np.zeros((5,5)) print('%d bytes' % (z.size * z.itemsize)) # 打印函数的帮助文档,比如numpy.add print(help(np.info(np.add))) # 创建一个10-49的数组,并将其倒序排序 # + tang_array = np.arange(10,50,1) tang_array = tang_array[::-1] tang_array # - # 找到一个数组当中不为0的索引 np.nonzero([1,2,3,4,5,0,0,0,1234,0,1]) # 随机构造一个3*3的矩阵,并打印其中最大最小值 tang_array = np.random.random((3,3)) tang_array.max() tang_array.min() # 构建一个5*5的矩阵,令其值都为1,并在最外层加上一圈0 tang_array = np.ones((5,5)) tang_array = np.pad(tang_array, pad_width=1, mode='constant', constant_values=0) tang_array # + # print(help(np.pad)) # - # 构建一个shape为(6,7,8)的矩阵,并找到第100个元素的索引值 np.unravel_index(100, (6,7,8)) # 对一个5*5的矩阵做归一化操作 tang_array = np.random.random((5,5)) tang_max = tang_array.max() tang_min = tang_array.min() tang_array = (tang_array - tang_min)/(tang_max - tang_min) tang_array # 找到两个数组中相同的值 z1 = np.random.randint(0,10,10) z2 = np.random.randint(0,10,10) print(z1) print(z2) # print(np.intersectld(z1,z2)) # 得到今天 明天 昨天的日期 yesterday = np.datetime64('today', 'D') - np.timedelta64(1, 'D') today = np.datetime64('today', 'D') tomorrow = np.datetime64('today', 'D') - np.timedelta64(1, 'D') yesterday today tomorrow # 得到一个月中所有的天 np.arange('2017-10', '2017-11', dtype='datetime64[D]') # 得到一个数的整数部分 z = np.random.uniform(0,10,10) np.floor(z) # 构造一个是数组,让它不能被改变 z = np.zeros(5) z.flags.writeable = False z[0] = 1 # 打印大数据的部分值,全部值 z = np.zeros((15,15)) np.set_printoptions(threshold=5) z np.set_printoptions(threshold=np.nan) z # 找到在一个数组中,最接近一个数的索引 z = np.arange(100) v = np.random.uniform(0,100) print(v) index = (np.abs(z-v)).argmin() print(z[index]) # 32为float类型和32位int类型转换 z = np.arange(10, dtype=np.int32) print(z.dtype) z = z.astype(np.float32) print(z.dtype) # 打印数组元素位置坐标与数值 z = np.arange(9).reshape(3,3) for index,value in np.ndenumerate(z): print(index,value) # 按照数组的某一列进行排序 z = np.random.randint(0,10,(3,3)) print(z) print(z[z[:,1].argsort()]) # 统计数组中每个数值出现的次数 z = np.array([1,1,1,2,2,3,3,4,5,8]) np.bincount(z) # 如何对一个四维数组的最后两维来求和 z = np.random.randint(0,10,(4,4,4,4)) res = z.sum(axis=(-2,-1)) print(res) # 交换矩阵中的两行 z = np.arange(25).reshape(5,5) z[[0,1]] = z[[1,0]] z # 找到一个数组中最常出现的数字 z = np.random.randint(0,10,50) print(np.bincount(z).argmax()) # 快速插件找TOP K z = np.arange(10000) np.random.shuffle(z) n = 5 print(z[np.argpartition(-z,n)[:n]]) # 去除一个数组中,所有元素都相同的数据 np.set_printoptions(threshold=np.nan) z = np.random.randint(0,5,(10,3)) z e = np.all(z[:,1:] == z[:,:-1], axis=1) print(e) a = np.array([1,2,3,4]) b = np.array([1,2,3,5]) np.all(a == b) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np df = pd.read_csv("small_MINST_14.csv") n = len(df) X = df.values[:, 0:256] y = df.values[:, -1].reshape(n, 1) y[y==-1] = 0 y # - df = pd.read_csv("iris.csv", header=None) df.columns = ['X0','X1','X2','X3','y'] n = len(df) df = df[df['y'].isin(['Iris-virginica','Iris-versicolor'])] X = df.iloc[:,:4].values y = df.iloc[:,-1].values y[y == 'Iris-virginica'] = 0 y[y == 'Iris-versicolor'] = 1 df.iloc[:, :8] df = pd.read_csv("marketing.csv") n = len(df) df_x = df.iloc[:, :8] y = df.values[:, -1].reshape(n, 1) y[y == 'no'] = 0 y[y == 'yes'] = 1 df_one_hot = pd.get_dummies(df_x) X = df_one_hot.values X, y X,y # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import time import os from collections import namedtuple, deque from unityagents import UnityEnvironment from agent_utils import plot_training_scores from ddpg_agent import DDPG_Agent from ddpg_trainer import train_ddpg import matplotlib.pyplot as plt # %matplotlib inline SEED = 0 PATH_TO_ENV = 'Tennis_Windows_x86_64/Tennis.exe' # + # create the environment env = UnityEnvironment(file_name=PATH_TO_ENV) # gather scenario information # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # reset the environment and grab infos env_info = env.reset(train_mode=True)[brain_name] num_agents = len(env_info.agents) action_size = brain.vector_action_space_size states = env_info.vector_observations state_size = states.shape[1] train_mode = True # - # ## Create Agent # parameters used for the provided agent agent_params = { 'name': 'Agent OmegaPong', 'buffer_size': int(1e6), 'batch_size': 256, 'layers_actor': [512, 256], 'lr_actor': 5e-4, 'layers_critic': [512, 256, 256], 'lr_critic': 1e-3, 'learn_every': 5, 'learn_passes':5, 'gamma': 0.99, 'tau': 5e-3, 'batch_norm': True, 'weight_decay':0.0 } # create the agent agent = DDPG_Agent(state_size, action_size, brain_name, seed=SEED, params=agent_params) print(agent.display_params()) # ## Train Agent # + # train the agent n_episodes = 3000 max_t = 2000 print_every = 50 goal_score = 0.5 score_window_size = 100 keep_training = True scores = train_ddpg(env, agent, num_agents, n_episodes=n_episodes, max_t=max_t, print_every=print_every, goal_score=goal_score, score_window_size=score_window_size, keep_training=keep_training) # - # plot training results plot_training_scores(scores, goal_score, window=score_window_size, ylabel='Max Score for all Agents', agent_name=agent.name) # ## Demo Trained or Saved Agents # + # demo the agent trained in this notebook by uncommenting the cells below #from demos import demo_agent_cont #demo_scores = demo_agent_cont(env, agent, num_agents, n_episodes=3) # + # load a saved agent and run demo from demos import demo_saved_agent_cont demo_agent_name = 'Agent OmegaPong' demo_saved_agent_cont(env, demo_agent_name, n_episodes=3) # - # close the environment when complete env.close() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Neural networks for segmentation # ! wget https://www.dropbox.com/s/jy34yowcf85ydba/data.zip?dl=0 -O data.zip # ! unzip -q data.zip # Your next task is to train neural network to segment cells edges. # # Here is an example of input data with corresponding ground truth: # + tags=[] # !pip install torch torchvision # + import scipy as sp import scipy.misc import matplotlib.pyplot as plt import scipy.misc import numpy as np import skimage.io import skimage import os import torch import numpy as np import torchvision from torchvision import transforms import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.utils.data.dataset import Dataset import random import torchvision.transforms.functional as TF from PIL import Image import itertools # %matplotlib inline # - # Human HT29 colon-cancer cells plt.figure(figsize=(10,8)) plt.subplot(1,2,1) im = skimage.img_as_ubyte(skimage.io.imread('BBBC018_v1_images-fixed/train/00735-actin.DIB.bmp')) plt.imshow(im) plt.subplot(1,2,2) mask = skimage.img_as_ubyte(skimage.io.imread('BBBC018_v1_outlines/train/00735-cells.png')) plt.imshow(mask, 'gray') # This time you aren't provided with any code snippets, just input data and target metric - intersection-over-union (IoU) (see implementation below). # # You should train neural network to predict mask of edge pixels (pixels in gt images with value greater than 0). # # Use everything you've learnt by now: # * any architectures for semantic segmentation (encoder-decoder like or based on dilated convolutions) # * data augmentation (you will need that since train set consists of just 41 images) # * fine-tuning # # You're not allowed to do only one thing: to train you network on test set. # # Your final solution will consist of an ipython notebook with code (for final network training + any experiments with data) and an archive with png images with network predictions for test images (one-channel images, 0 - for non-edge pixels, any non-zero value for edge pixels). # # Forestalling questions about baseline... well, let's say that a good network should be able to segment images with iou >= 0.29. This is not a strict criterion of full points solution, but try to obtain better numbers. # # Practical notes: # * There is a hard data class imbalance in dataset, so the network output will be biased toward "zero" class. You can either tune the minimal probability threshold for "edge" class, or add class weights to increase the cost of edge pixels in optimized loss. # * Dataset is small so actively use data augmentation: rotations, flip, random contrast and brightness # * Better spend time on experiments with neural network than on postprocessing tricks (i.e test set augmentation). # * Keep in mind that network architecture defines receptive field of pixel. If the size of network input is smaller than receptive field of output pixel, than probably you can throw some layers without loss of quality. It is ok to modify "of-the-shelf" architectures. # # Good luck! def calc_iou(prediction, ground_truth): n_images = len(prediction) intersection, union = 0, 0 for i in range(n_images): intersection += np.logical_and(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum() union += np.logical_or(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum() return float(intersection) / union # + train_images_path = ['./BBBC018_v1_images-fixed/train/' + fname for fname in os.listdir('./BBBC018_v1_images-fixed/train')] train_masks_path = ['./BBBC018_v1_outlines/train/' + fname for fname in os.listdir('./BBBC018_v1_images-fixed/train')] val_images_path = ['./BBBC018_v1_images-fixed/val/' + fname for fname in os.listdir('./BBBC018_v1_images-fixed/val')] val_masks_path = ['./BBBC018_v1_outlines/val/' + fname for fname in os.listdir('./BBBC018_v1_images-fixed/val')] class SegmentationDataset(Dataset): def __init__(self, image_paths=None, target_paths=None, train=True): self.image_paths = image_paths self.target_paths = target_paths self.train = train def transform(self, image, mask): # Resize if self.train: # Random vertical flipping if random.random() > 0.35: image = TF.vflip(image) mask = TF.vflip(mask) # mask = YOUR CODE if random.random() > 0.3: image = TF.rotate(image, 30) mask = TF.rotate(mask, 30) # Random rotate using TF.rotate(image, angle) # Transform to tensor image = TF.to_tensor(image) mask = (TF.to_tensor(mask) > 0.01).type(torch.FloatTensor) return image, mask def __getitem__(self, index): image = Image.open(self.image_paths[index]) if self.target_paths: mask = Image.open(self.target_paths[index][:-14] + '-cells.png') else: mask = Image.open(self.image_paths[index]) x, y = self.transform(image, mask) return x, self.image_paths[index] x, y = self.transform(image, mask) return x, y def __len__(self): return len(self.image_paths) # - train_dataset = SegmentationDataset(image_paths=train_images_path, target_paths=train_masks_path, train=False) val_dataset = SegmentationDataset(image_paths=val_images_path, target_paths=val_masks_path, train=False) train_dataset[0] batch_size = 32 train_batch_gen = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_batch_gen = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True) class Net(nn.Module): def __init__(self, filters): super(Net, self).__init__() self.filters = filters encoder_layers = [] in_filters = 3 for i, ifilters in enumerate(filters): encoder_layers.append(nn.ModuleList([ nn.MaxPool2d(2), nn.Conv2d(in_filters, ifilters, kernel_size=3, padding=1), nn.BatchNorm2d(ifilters), nn.ReLU(), nn.Conv2d(ifilters, ifilters, kernel_size=3, padding=1), nn.BatchNorm2d(ifilters), nn.ReLU(), ])) in_filters = ifilters self.encoder_list = encoder_layers [self.add_module('encoder_' + str(i), layer) for i, layer in enumerate(list(itertools.chain(*self.encoder_list)))] decoder_layers = [] for i in range(len(filters) - 1): print(i) print('filters ', -i -2, '', filters[-i-2]) decoder_layers.append(([ nn.ConvTranspose2d(filters[-i - 1], filters[-i - 2], kernel_size=2, stride=2), nn.Conv2d(filters[-i - 1], filters[-i - 2], kernel_size=3, padding=1), nn.BatchNorm2d(filters[-i - 2]), nn.ReLU(), nn.Conv2d(filters[-i - 2], filters[-i - 2], kernel_size=3, padding=1), nn.BatchNorm2d(filters[-i - 2]), nn.ReLU() ])) decoder_layers.append( ([nn.ConvTranspose2d(filters[0], filters[0], kernel_size=2, stride=2)])) self.decoder_list = decoder_layers self.encoder_outputs = [] [self.add_module('decoder_' + str(i), layer) for i, layer in enumerate(list(itertools.chain(*self.decoder_list)))] self.head_list = [nn.Conv2d(filters[0], 1, kernel_size=1), nn.Sigmoid()] [self.add_module('my_head' + str(i), layer) for i, layer in enumerate(self.head_list)] def encoder(self, x): output = x for i, block in enumerate(self.encoder_list): for j, layer in enumerate(block): output = layer(output) self.encoder_outputs.append(output) return output def decoder(self, x): output = x for i, block in enumerate(self.decoder_list[:-1]): upsampled_x = block[0](output) encoder_tensor = self.encoder_outputs[-i - 2] output = torch.cat([encoder_tensor, upsampled_x], 1) for j, layer in enumerate(block[1:]): output = layer(output) output = self.decoder_list[-1][0](output) return output def forward(self, x): encoder_output = self.encoder(x) decoder_output = self.decoder(encoder_output) output = self.head_list[0](decoder_output) self.encoder_outputs = [] return self.head_list[1](output) model = Net([10, 20, 40]) # + tags=[] opt = torch.optim.Adam(model.parameters()) train_loss = [] val_accuracy = [] compute_loss = nn.BCELoss() import time num_epochs = 274 for epoch in range(num_epochs): start_time = time.time() model.train(True) for (X_batch, y_batch) in train_batch_gen: X_batch = torch.tensor(X_batch, dtype=torch.float32) y_batch = torch.tensor(y_batch, dtype=torch.float32) logits = model(X_batch) y_batch = y_batch.view(y_batch.size(0), -1) y_predicted = (logits.view(X_batch.size(0), -1)) loss = compute_loss(y_predicted, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.data.numpy()) print (epoch) model.train(False) for X_batch, y_batch in val_batch_gen: logits = model(torch.FloatTensor(X_batch)).data.numpy() y_pred = (logits > 0.3).astype(np.float32) val_accuracy.append(calc_iou(y_pred, y_batch.numpy())) print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(train_dataset) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100)) # - TRESHOLD = 0.3 for X_batch, y_batch in val_batch_gen: logits = model(torch.FloatTensor(X_batch)).data.numpy() y_pred = (logits > TRESHOLD).astype(np.float32) for i in range(len(X_batch)): fig, ax = plt.subplots(1, 3) ax[0].imshow(X_batch[i, ...].T) ax[1].imshow(y_batch[i,...].T[..., 0]) ax[2].imshow(y_pred[i, ...].T[..., 0]) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Imports # Suppress TensorFlow warnings. # + # Copied from: # https://weepingfish.github.io/2020/07/22/0722-suppress-tensorflow-warnings/ # Filter tensorflow version warnings import os # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action="ignore", category=FutureWarning) warnings.simplefilter(action="ignore", category=Warning) import tensorflow as tf tf.get_logger().setLevel("INFO") tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) # + from tensorflow.keras import layers from tensorflow import keras import tensorflow_hub as hub from torchvision.datasets import ImageFolder from torchvision import transforms from torch.utils.data import DataLoader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD # - # ## Constants AUTO = tf.data.AUTOTUNE BATCH_SIZE = 256 IMAGE_SIZE = 224 TF_MODEL_ROOT = "gs://deit-tf" # ## DeiT models # + model_paths = tf.io.gfile.listdir(TF_MODEL_ROOT) deit_paths = [ path for path in model_paths if str(IMAGE_SIZE) in path and "fe" not in path ] print(deit_paths) # - # ## Image loader # To have an apples-to-apples comparison with the original PyTorch models for evaluation, it's important to ensure we use the same transformations. # + # Transformations from: # (1) https://github.com/facebookresearch/deit/blob/colab/notebooks/deit_inference.ipynb # (2) https://github.com/facebookresearch/deit/blob/main/datasets.py size = int((256 / 224) * IMAGE_SIZE) transform_chain = transforms.Compose( [ transforms.Resize(size, interpolation=3), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD), ] ) # + dataset = ImageFolder("val", transform=transform_chain) dataloader = DataLoader(dataset, batch_size=BATCH_SIZE) batch = next(iter(dataloader)) print(batch[0].shape) # - # ## Run evaluation def get_model(model_url): inputs = tf.keras.Input((IMAGE_SIZE, IMAGE_SIZE, 3)) hub_module = hub.KerasLayer(model_url) outputs, _ = hub_module(inputs) return tf.keras.Model(inputs, outputs) # + # Copied and modified from: # https://github.com/sebastian-sz/resnet-rs-keras/blob/main/imagenet_evaluation/main.py log_file = f"deit_tf_{IMAGE_SIZE}.csv" if not os.path.exists(log_file): with open(log_file, "w") as f: f.write("model_name,top1_acc(%),top5_acc(%)\n") for deit_path in deit_paths: print(f"Evaluating {deit_path}.") model = get_model(f"{TF_MODEL_ROOT}/{deit_path}") top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1, name="top1") top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name="top5") progbar = tf.keras.utils.Progbar(target=len(dataset) // BATCH_SIZE) for idx, (images, y_true) in enumerate(dataloader): images = images.numpy().transpose(0, 2, 3, 1) y_true = y_true.numpy() y_pred = model.predict(images) top1.update_state(y_true=y_true, y_pred=y_pred) top5.update_state(y_true=y_true, y_pred=y_pred) progbar.update( idx, [("top1", top1.result().numpy()), ("top5", top5.result().numpy())] ) print() print(f"TOP1: {top1.result().numpy()}. TOP5: {top5.result().numpy()}") top_1 = top1.result().numpy() * 100.0 top_5 = top5.result().numpy() * 100.0 with open(log_file, "a") as f: f.write("%s,%0.3f,%0.3f\n" % (deit_path, top_1, top_5)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solution 1 # + import random number = random.randint(1,9) guess = 0 count = 0 while guess != number and guess != "exit": guess = input("What's your guess?") if guess == "exit": break guess = int(guess) count += 1 if guess < number: print("Too low!") elif guess > number: print("Too high!") else: print("You got it!") print("And it only took you",count,"tries!") # - # # Solution 2 # + import string import random def pw_gen(size = 8, chars=string.ascii_letters + string.digits + string.punctuation): return ''.join(random.choice(chars) for _ in range(size)) print(pw_gen(int(input('How many characters in your password?')))) # - # # Solution 3 # + from string import ascii_lowercase from words import get_random_word def get_num_attempts(): """Get user-inputted number of incorrect attempts for the game.""" while True: num_attempts = input( 'How many incorrect attempts do you want? [1-25] ') try: num_attempts = int(num_attempts) if 1 <= num_attempts <= 25: return num_attempts else: print('{0} is not between 1 and 25'.format(num_attempts)) except ValueError: print('{0} is not an integer between 1 and 25'.format( num_attempts)) def get_min_word_length(): """Get user-inputted minimum word length for the game.""" while True: min_word_length = input( 'What minimum word length do you want? [4-16] ') try: min_word_length = int(min_word_length) if 4 <= min_word_length <= 16: return min_word_length else: print('{0} is not between 4 and 16'.format(min_word_length)) except ValueError: print('{0} is not an integer between 4 and 16'.format(min_word_length)) def get_display_word(word, idxs): """Get the word suitable for display.""" if len(word) != len(idxs): raise ValueError('Word length and indices length are not the same') displayed_word = ''.join( [letter if idxs[i] else '*' for i, letter in enumerate(word)]) return displayed_word.strip() def get_next_letter(remaining_letters): """Get the user-inputted next letter.""" if len(remaining_letters) == 0: raise ValueError('There are no remaining letters') while True: next_letter = input('Choose the next letter: ').lower() if len(next_letter) != 1: print('{0} is not a single character'.format(next_letter)) elif next_letter not in ascii_lowercase: print('{0} is not a letter'.format(next_letter)) elif next_letter not in remaining_letters: print('{0} has been guessed before'.format(next_letter)) else: remaining_letters.remove(next_letter) return next_letter def play_hangman(): """Play a game of hangman. At the end of the game, returns if the player wants to retry. """ # Let player specify difficulty print('Starting a game of Hangman...') attempts_remaining = get_num_attempts() min_word_length = get_min_word_length() # Randomly select a word print('Selecting a word...') word = get_random_word(min_word_length) print() # Initialize game state variables idxs = [letter not in ascii_lowercase for letter in word] remaining_letters = set(ascii_lowercase) wrong_letters = [] word_solved = False # Main game loop while attempts_remaining > 0 and not word_solved: # Print current game state print('Word: {0}'.format(get_display_word(word, idxs))) print('Attempts Remaining: {0}'.format(attempts_remaining)) print('Previous Guesses: {0}'.format(' '.join(wrong_letters))) # Get player's next letter guess next_letter = get_next_letter(remaining_letters) # Check if letter guess is in word if next_letter in word: # Guessed correctly print('{0} is in the word!'.format(next_letter)) # Reveal matching letters for i in range(len(word)): if word[i] == next_letter: idxs[i] = True else: # Guessed incorrectly print('{0} is NOT in the word!'.format(next_letter)) # Decrement num of attempts left and append guess to wrong guesses attempts_remaining -= 1 wrong_letters.append(next_letter) # Check if word is completely solved if False not in idxs: word_solved = True print() # The game is over: reveal the word print('The word is {0}'.format(word)) # Notify player of victory or defeat if word_solved: print('Congratulations! You won!') else: print('Try again next time!') # Ask player if he/she wants to try again try_again = input('Would you like to try again? [y/Y] ') return try_again.lower() == 'y' while play_hangman(): print() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Agent Behaviour Model - Analytic # # ## $\alpha$ in Prediction Market # # $\Delta s$ is some amount of tokens that an agent would use to make an attetstation between $(\Delta s_{min}, \Delta s_{max}]$ # # We know the expected payout at time $t$ prior to attestation $$\mathbb{E}(\Theta)_{t} = \frac{s_f}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1}{Q_1} \frac{S_1}{S} + (1-\hat\alpha) (R) \frac{q_0}{Q_0} \frac{S_0}{S}$$ # # The expected payout at time $t+1$ after a positive attestation is $$\mathbb{E}(\Theta)^+ = \frac{s_f- \Delta s}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1+\Delta q_1}{Q_1+\Delta q_1} \frac{S_1+\Delta s}{S} + (1-\hat\alpha) (R) \frac{q_0}{Q_0} \frac{S_0}{S}$$ # # The expected payout at time $t+1$ after a negative attestation is $$\mathbb{E}(\Theta)^- = \frac{s_f - \Delta s}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1}{Q_1} \frac{S_1}{S} + (1-\hat\alpha) (R) \frac{q_0+\Delta q_0}{Q_0 +\Delta q_0} \frac{S_0+\Delta s}{S} $$ # # - If $\hat\alpha \neq \alpha$ **and** $s_t > 0$ # - $\hat\alpha \neq \alpha$ implies $\mathbb{E}(\Theta)_{t+1} > \mathbb{E}(\Theta)_{t}$. Agent makes an attestation # - If $\hat\alpha > \alpha$ # - Agent makes a positive attestation # - If $\hat\alpha < \alpha$ # - Agent makes a negative attestation # - Else if $\hat\alpha = \alpha$ **or** $s_t \leq 0$ # - Agent does not attest # # $\Delta s$ is some amount of tokens that an agent would use to make an attetstation between $(\Delta s_{min}, \Delta s_{max}]$ # # ### Steps an agent takes towards making an optimal decision # **Step 1**: Check if $\hat\alpha = \alpha$ # - If $\hat\alpha = \alpha$ # - Don't attest # - Else if $\hat\alpha > \alpha$ **or** $\hat\alpha < \alpha$ # - Go to Step 2 # # **Step 2**: Check if $s_t > 0$ # - If $s_t \leq 0$ # - Don't attest # - Else if $s_t > 0$ # - Go to Step 3 # # # **Step 3**: Make a decision on attestation # - Choose $\Delta s$ # - $\Delta s \in (\Delta s_{min}, \Delta s_{max}]$ # - Compute $\mathbb{E}(\Theta)_{t}$ # - Compute $\mathbb{E}(\Theta)_{t+1}^+$ # - Compute $\mathbb{E}(\Theta)_{t+1}^-$ # - If $\mathbb{E}(\Theta)_{t+1}^+ > \mathbb{E}(\Theta)_{t}$ # - Attest positive # - Else if $\mathbb{E}(\Theta)_{t+1}^- > \mathbb{E}(\Theta)_{t}$ # - Attest negative # # There's no need to check for the case $\mathbb{E}(\Theta)_{t+1} = \mathbb{E}(\Theta)_{t}$ since $\hat\alpha \neq \alpha$ ensures that the case where $\mathbb{E}(\Theta)_{t+1} = \mathbb{E}(\Theta)_{t}$ is not possible. # # ### Interval on $\Delta s$ # # #### Upper bound $\Delta s_{max}$ # The maximum amount that an agent can attest at time $t$ is equal to the amount of supply tokens they hold at that time $s_t$. # # However, $\Delta s_{max}$ must be less than the $\Delta s$ that would result in $\alpha$ that overshoots $\hat\alpha$ # # In the case of a positive attestation, # $$max(\mathbb{E}(\Theta)_{t+1}) = \mathbb{E}(\Theta)_{t+1} \space\space | \space\space \alpha_{t+1}=\hat\alpha_t$$ # # We have $\mathbb{E}(\Theta)_{t} \space \stackrel{\mathrm{\Delta s_{max}}}{\longrightarrow} \space max(\mathbb{E}(\Theta)_{t+1})$ # # Expressing $\mathbb{E}(\Theta)_{t+1}$ in terms of state variables at $t$, upon a positive attestation # $$\mathbb{E}(\Theta)_{t+1} = \frac{s_f-\Delta s_{max}}{S}(\hat\alpha_{t} C + R) + \hat\alpha_{t} (C+R) \frac{q_1+\Delta q_1}{Q_1+\Delta q_1} \frac{S_1+\Delta s_{max}}{S} + (1-\hat\alpha_{t}) (R) \frac{q_0}{Q_0} \frac{S_0}{S}$$ # # Since $\mathbb{E}(\Theta)_{t+1} = max(\mathbb{E}(\Theta)_{t+1})$, $\hat\alpha_t = \alpha_{t+1}$ # # $$\mathbb{E}(\Theta)_{t+1} = \frac{s_f-\Delta s_{max}}{S}(\alpha_{t+1} C + R) + \alpha_{t+1} (C+R) \frac{q_1+\Delta q_1}{Q_1+\Delta q_1} \frac{S_1+\Delta s_{max}}{S} + (1-\alpha_{t+1}) (R) \frac{q_0}{Q_0} \frac{S_0}{S}$$ # # Equate the two $\mathbb{E}(\Theta)_{t+1}$ and solve for $\Delta s_{max}$. Substitute $\Delta q_1 = Q_1 \sqrt{(1+\frac{\Delta s_{max}}{S}) - 1}$ # # ... # # #### Lower bound $\Delta s_{min}$ # An agent would only make an attestation if their expected payout after the attestation improves, i.e. $\mathbb{E}(\Theta)_{t+1} > \mathbb{E}(\Theta)_{t}$ # # So the lower bound on $\Delta s$ is the $\Delta s$ that would result in $\mathbb{E}(\Theta)_{t+1} = \mathbb{E}(\Theta)_{t}$ # # $$\mathbb{E}(\Theta)_{t+1} = \mathbb{E}(\Theta)_{t} \space\space | \space\space \Delta s = \Delta s_{min}$$ # # $\frac{s_f-\Delta s_{min}}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1+\Delta q_1}{Q_1+\Delta q_1} \frac{S_1+\Delta s_{min}}{S} + (1-\hat\alpha) (R) \frac{q_0}{Q_0} \frac{S_0}{S} = \frac{s_f}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1}{Q_1} \frac{S_1}{S} + (1-\hat\alpha) (R) \frac{q_0}{Q_0} \frac{S_0}{S}$ # # $\frac{s_f-\Delta s_{min}}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1+\Delta q_1}{Q_1+\Delta q_1} \frac{S_1+\Delta s_{min}}{S} = \frac{s_f}{S}(\hat\alpha C + R) + \hat\alpha (C+R) \frac{q_1}{Q_1} \frac{S_1}{S}$ # # Substitute $\Delta q_1 = Q_1 \sqrt{(1+\frac{\Delta s_{min}}{S}) - 1}$. Solve for $\Delta s_{min}$ # # $$\Delta s_{min} = 0$$ # # # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ResNet26(model.py) # + from __future__ import print_function from input_data import * import torch.nn as nn from torch.autograd import Variable from utils import * import os import math import argparse #解析命令行参数 import torch import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset from torchvision import transforms # from torch.utils.data.distributed import DistributedSampler import random from random import choice from sklearn import manifold import matplotlib.pyplot as plt import numpy as np import pandas as pd from tensorboardX import SummaryWriter import logging class ResidualBlock(nn.Module): def __init__(self,in_channels,out_channels, stride=1,downsample=None, k=2): super(ResidualBlock,self).__init__() self.k = k # print(in_channels, self.k) self.in_channels = in_channels # print('init_in_channels:', self.in_channels) self.out_channels = out_channels * self.k self.bn1 = nn.BatchNorm2d(in_channels) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1) self.bn2 = nn.BatchNorm2d(self.out_channels) self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3,padding=1) self.dropout = nn.Dropout(p = 0.2) self.bn3 = nn.BatchNorm2d(self.out_channels) self.conv3 = nn.Conv2d(self.out_channels, self.out_channels*4, kernel_size=1,stride=stride) self.downsample = downsample self.stride = stride def forward(self, x): residual = x x = self.bn1(x) x = self.relu(x) x = self.conv1(x) # print("block conv1 size",x.size()) x = self.bn2(x) x = self.relu(x) x = self.conv2(x) # print("block conv2 size",x.size()) x = self.dropout(x) x = self.bn3(x) x = self.relu(x) x = self.conv3(x) # print("block conv3 size ",x.size()) # print("bolck residual size",residual.size()) # print("downsample ;",self.downsample) if self.downsample is not None: residual = self.downsample(residual) # print("x size",x.size()) # print('residual size',residual.size()) x += residual return x class ResNet(nn.Module): def __init__(self,ResidualBlock,layers,k=2, num_classes=5): self.k = 2 super(ResNet, self).__init__() self.in_channels = 64 self.num_classes = num_classes self.stage1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=6, stride=1,padding=3), nn.MaxPool2d(kernel_size=2,stride=2),) self.stage2 = self._make_layer(ResidualBlock, 64, layers[0], stride=2) self.stage3 = self._make_layer(ResidualBlock, 128, layers[1], stride=2) self.stage4 = self._make_layer(ResidualBlock, 256, layers[2], stride=2) self.stage5 = self._make_layer(ResidualBlock, 512, layers[3], stride=1) self.avgpool = nn.AvgPool2d(4) self.fc = nn.Linear(2048*self.k, num_classes) self.classifier = nn.Softmax(1) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # def _make_layer(self,block, out_channels, blocks, stride=1): def _make_layer(self, block, out_channels,blocks=1,stride=1): downsample = None # print('downsample: in_channel :',self.in_channels) # print("make layer downsample: ",downsample) # layers = [] # layers.append(block(self.in_channels, out_channels, stride, downsample)) # for i in range(1, blocks): # layers.append(block(self.in_channels, out_channels)) # self.in_channels = out_channels # self.in_channels = out_channels # return nn.Sequential(*layers) layers = [] downsample = None for i in range(0,blocks-1): if self.in_channels != out_channels * 4 * self.k: # print(out_channels*4*self.k) downsample = nn.Conv2d(self.in_channels, out_channels * 4 * self.k, kernel_size=1) # print('else') layers.append(block(self.in_channels, out_channels, stride=1, downsample=downsample)) self.in_channels = out_channels * 4 * self.k downsample = None if stride != 1 : downsample = nn.MaxPool2d(kernel_size=1, stride=stride, ) else: # print('else 2') pass layers.append(block(self.in_channels, out_channels, stride, downsample=downsample)) self.in_channels = out_channels * 4 * self.k return nn.Sequential(*layers) def forward(self, x): x = self.stage1(x) # print("stage1 ",x.size()) x = self.stage2(x) # print("stage2 size ",x.size()) x = self.stage3(x) x = self.stage4(x) x = self.stage5(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) self.classifier(x) return x def Resnet26(pretrained=False,num_classes=5): model = ResNet(ResidualBlock, [2, 2, 2, 2], num_classes=5) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model if __name__ == '__main__' : model = Resnet26() # input = torch.randn(10,3,64,64 # print(train_dataset[0][0]) print(type(test_dataset)) input = test_dataset[0] input = Variable(input) output = model(input) print(output.size()) # - # # data cleaning(choose_clean_sanmple.py) # + import pandas as pd import pprint import matplotlib.pyplot as plt import matplotlib.image as mpimg import random def choose_clean_sample(dir): train_solution_df=pd.read_csv(dir) index_id = train_solution_df['GalaxyID'] f_smooth = train_solution_df['Class1.1'] f_completely_round = train_solution_df['Class7.1'] f_in_between = train_solution_df['Class7.2'] f_cigar_shaped = train_solution_df['Class7.3'] f_features_disk = train_solution_df['Class1.2'] f_edge_on_yes = train_solution_df['Class2.1'] f_edge_on_no = train_solution_df['Class2.2'] f_spiral_yes = train_solution_df['Class4.1'] #print(index_id[0]) # print(f_smooth.loc[0]) # pprint.pprint(f_smooth) label = {} # label_id = {} for index in train_solution_df.index: if f_smooth.loc[index] >= 0.469 and f_completely_round.loc[index] >= 0.5: # label.append([index, 0]) label[index] = 0 elif f_smooth.loc[index] >= 0.469 and f_in_between.loc[index] >= 0.5: # label.append([index, 1]) label[index] = 1 elif f_smooth.loc[index] >= 0.469 and f_cigar_shaped.loc[index] >= 0.5: # label.append([index, 2]) label[index] = 2 elif f_features_disk.loc[index] >= 0.430 and f_edge_on_yes.loc[index] >=0.602: # label.append([index, 3]) label[index] = 3 elif f_features_disk.loc[index] >= 0.430 and f_edge_on_no.loc[index] >= 0.715 and f_spiral_yes.loc[index] >= 0.619: # label.append([index, 4]) label[index] = 4 else: pass # print(len(label)) picture = [] picture_label = [] for key,value in label.items(): key_id = index_id[key] # label_id[key_id] = value # print(len(label_id)) picture.append('data/galaxy/images_training_rev1/'+str( key_id)+'.jpg ') picture_label.append(value) # draw = mpimg.imread('./'+ picture[0]) #读取图片 # plt.imshow(draw) # 显示图片 # plt.axis('off') # 不显示坐标轴 # plt.show() data = {'root':picture,'label':picture_label,} f_data = pd.DataFrame(data) cols = f_data.columns.tolist() cols = cols[-1:] + cols[:-1] f_data = f_data[cols] #print(f_data.head(3)) #圆形星系 data_round = f_data[f_data['label'] == 0] data_round = data_round.reset_index(drop=True) # print(data_round.shape) ind_list_r = list(range(data_round.shape[0])) ind_sample_r = random.sample(ind_list_r,round(data_round.shape[0]*0.9)) # print("ind_sanmple_r : ",len(ind_sample_r)) #7592 ind_rest_r = [x for x in ind_list_r if x not in ind_sample_r] # print("ind_rest_r : ", len(ind_rest_r)) # 844 train_round = pd.DataFrame() test_round = pd.DataFrame() # print("data_round.index ",len(data_round.index)) #8436 # print("data_round.index ",data_round.index) for index in ind_sample_r: train_round = train_round.append(data_round.loc[[index]], ignore_index=True) # print(" train_round ", train_round .shape) for index in ind_rest_r: test_round = test_round.append(data_round.loc[[index]], ignore_index=True) # drawname = test_round.to_string(header=False,index=False).split()[0] # print('drawname :'+drawname) # draw = mpimg.imread(drawname) #读取图片 # plt.imshow(draw) # 显示图片 # plt.axis('off') # 不显示坐标轴 # plt.show() # print("test round ", test_round.shape) #middle data_middle = f_data[f_data['label'] == 1] data_middle = data_middle.reset_index(drop=True) # print(data_middle.shape) ind_list_m = list(range(data_middle.shape[0])) ind_sample_m = random.sample(ind_list_m,round(data_middle.shape[0]*0.9)) ind_rest_m = [x for x in ind_list_m if x not in ind_sample_m] train_middle = pd.DataFrame() test_middle = pd.DataFrame() for index in ind_sample_m: train_middle = train_middle.append(data_middle.loc[[index]], ignore_index=True) for index in ind_rest_m: test_middle = test_middle.append(data_middle.loc[[index]], ignore_index=True) # print("middle ", test_middle.shape) #cigar data_cigar = f_data[f_data['label'] == 2] data_cigar = data_cigar.reset_index(drop=True) ind_list_c = list(range(data_cigar.shape[0])) ind_sample_c = random.sample(ind_list_c, round(data_cigar.shape[0] * 0.9)) ind_rest_c = [x for x in ind_list_c if x not in ind_sample_c] train_cigar = pd.DataFrame() test_cigar = pd.DataFrame() for index in ind_sample_c: train_cigar = train_cigar.append(data_cigar.loc[[index]], ignore_index=True) for index in ind_rest_c: test_cigar = test_cigar.append(data_cigar.loc[[index]], ignore_index=True) # print("cigar ", test_cigar.shape) #lateral data_lateral = f_data[f_data['label'] == 3] data_lateral = data_lateral.reset_index(drop=True) ind_list_l = list(range(data_lateral.shape[0])) ind_sample_l = random.sample(ind_list_l, round(data_lateral.shape[0]*0.9)) ind_rest_l = [x for x in ind_list_l if x not in ind_sample_l] train_lateral = pd.DataFrame() test_lateral = pd.DataFrame() for index in ind_sample_l: train_lateral = train_lateral.append(data_lateral.loc[[index]], ignore_index=True) for index in ind_rest_l: test_lateral = test_lateral.append(data_lateral.loc[[index]], ignore_index=True) # print("lateral ",test_lateral.shape) # spiral data_spiral = f_data[f_data['label'] == 4] data_spiral = data_spiral.reset_index(drop=True) ind_list_s = list(range(data_spiral.shape[0])) ind_sample_s = random.sample(ind_list_s, round(data_spiral.shape[0] * 0.9)) ind_rest_s = [x for x in ind_list_s if x not in ind_sample_s] train_spiral = pd.DataFrame() test_spiral = pd.DataFrame() for index in ind_sample_s: train_spiral = train_spiral.append(data_spiral.loc[[index]], ignore_index=True) for index in ind_rest_s: test_spiral = test_spiral.append(data_spiral.loc[[index]], ignore_index=True) # print("spiral",test_spiral.shape) # print(data_spiral.head(3)) train_clean_data = train_round.append([train_middle,train_cigar,train_lateral,train_spiral]) train_clean_data = train_clean_data.sample(frac=1).reset_index(drop=True) test_clean_data = test_round.append([test_middle, test_cigar, test_lateral, test_spiral]) test_clean_data = test_clean_data.sample(frac=1).reset_index(drop=True) # print('train ',train_clean_data.shape) print('test ', test_clean_data.shape) #返回的f_data 是 dataframe,第一列叫‘root',第二列叫"label" return train_clean_data, test_clean_data if __name__ == '__main__': dir = 'data/galaxy/training_solutions_rev1/training_solutions_rev1.csv' train_clean_sample, test_clean_sample = choose_clean_sample(dir) filename1 = 'train_list.txt' filename2 = 'test_list.txt' train_clean_sample_str = train_clean_sample.to_string(header=False,index=False) test_clean_sample_str = test_clean_sample.to_string(header=False,index=False) with open(filename1,'w') as file: file.write(train_clean_sample_str) with open(filename2, 'w') as file: file.write(test_clean_sample_str)a # - # # Data_loader (input_data.py) from torchvision import transforms from PIL import Image import torch import numpy as np import math import random import cv2 from model import * cv2.ocl.setUseOpenCL(False) from torch.utils.data import Dataset # from MLP import * class ResizeCV2(object): def __init__(self, new_width, new_height): self.new_width = new_width self.new_height = new_height def __call__(self, img): img_np = np.array(img) img_np = cv2.resize(img_np, (self.new_width, self.new_height)) img = Image.fromarray(img_np) return img class McDataset(Dataset): def __init__(self, root_dir, meta_file, transform=None, output_index=False): self.root_dir = root_dir self.transform = transform with open(meta_file) as f: lines = f.readlines() print("building dataset from %s" % meta_file) self.num = len(lines) self.metas = [] for line in lines: path, cls = line.rstrip().split() self.metas.append((path, int(cls))) print("read meta done") self.initialized = False self.output_index = output_index def __len__(self): return self.num def __getitem__(self, idx): filename = self.root_dir + '/' + self.metas[idx][0] cls = self.metas[idx][1] img = Image.open(filename) ## transform origin_img = (np.array(transforms.ToTensor()(img)).transpose((1,2,0)).copy()*255).astype(np.int32) cv2.imwrite('origin.jpg', origin_img) if self.transform is not None: img = self.transform(img) change_img = (np.array(img).transpose((1,2,0)).copy()*255).astype(np.int32) cv2.imwrite('change.jpg', change_img) if self.output_index: return img, cls, idx else: return img, cls class White(object): def __init__(self): pass def __call__(self, img): # print('img:', img) size = img.size() # print(size[0]) img = img.view(size[0], -1) #print(img.size()) eps = torch.ones(size[0],1)*(1/math.sqrt(size[1])) # print(eps.size()) # print('img:', img) mean = torch.mean(img, dim=1, keepdim=True) #print('mean:', mean.size()) std_tmp = torch.cat((torch.std(img, dim=1, keepdim=True), eps), dim=0) # print(torch.cat((torch.std(img, dim=0, keepdim=True), eps), dim=0).size()) std = torch.max(std_tmp, dim=0)[0].expand_as(mean) # std = max(torch.std(img, dim=0, keepdim=True), eps) #print('std:', std.size()) img = (img - mean) / std img = img.view(size[0], size[1], size[2]) # print(img.size()) return img if __name__ == '__main__': filename = 'test_list.txt' test_dataset = McDataset( '.', filename, transforms.Compose([ transforms.CenterCrop((170, 240)), ResizeCV2(80, 80), transforms.RandomCrop((64, 64)), transforms.RandomRotation(90 * random.randint(0, 4)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.5, contrast=0.8, saturation=0, hue=0), transforms.ToTensor(), White(), ])) # print(train_dataset[0][0]) input = test_dataset[0][0] print(input.size()) input = input.reshape(1,3,64,64) input = Variable(input) model = Resnet26() checkpoint = torch.load('checkpoint/_204.pth.tar') net = Resnet26() # net.load_state_dict(checkpoint['state_dict']) own_state = net.state_dict() state_dict = checkpoint['state_dict'] # print(own_state.keys()) for name, param in state_dict.items(): name = 'module.' + name # print(name) if name in own_state: # print('here') if isinstance(param, torch.nn.Parameter): # isinstance函数来判断一个对象是否是一个已知的类型 # backwards compatibility for serialized parameters param = param.data try: own_state[name].copy_(param) except Exception: print('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) print("But don't worry about it. Continue pretraining.") output = model(input) # print(output.size()) # # traning # # 204个epoch左右达最高精确度 # + def train(epoch): model.train() # 把module设成training模式,对Dropout和BatchNorm有影响 if epoch%args.decay_epoch == 0: adjust_learning_rate(optimizer, decay_rate=args.decay_rate) for batch_idx, (data, target) in enumerate(train_loader): if args.cuda: data, target = data.cuda(), target.cuda() data,target = Variable(data),Variable(target) # Variable类对Tensor对象进行封装,会保存该张量对应的梯度,以及对生成该张量的函数grad_fn的一个引用。 # print("data.size : ",data.size()) # (64,1,28,28) # data = data.view(-1, 28,28) # print(target.size()) optimizer.zero_grad() # zero the gradient buffers,必须要置零 output = model(data) #print('data',data.size()) #print('output', output.size()) loss = F.cross_entropy(output, target) loss.backward() optimizer.step() #print(output.data) pred = output.data.max(1)[1] # get the index of the max log-probability #print(pred) #print('predsize',pred.size()) # output是(64,10)的tensor,pred.size是[64] acc = [] recall = [] correct = pred.eq(target.data) #如果预测正确,correct加一 #print(target.data.size()) for i in range(5): #5 class class_tmp = torch.ones(target.data.size()[0])*i class_tmp = class_tmp.long() class_index = target.data.cpu().eq(class_tmp) #print(pred[pred == i].size()) if pred[pred == i].size()[0]: #print(correct[class_index.cuda().byte()==1].sum()) #print(pred[pred == i].size()[0]) acc.append(float(correct[class_index.cuda().byte()==1].sum())/(pred[pred == i].size()[0])) # 准确率 else: acc.append(0) #print(pred[pred == i].sum()) #print(pred[pred == i]) #print(target.data[target.data == i].sum()) if target.data[target.data == i].size()[0]: recall.append(float(correct[class_index.cuda().byte()==1].sum())/(target.data[target.data == i].size()[0])) # 召回率 else: recall.append(0) #print('acc', acc, 'recall', recall) curr_step = epoch*len(train_loader)/args.batch_size + batch_idx tb_logger.add_scalar('acc0_train', acc[0], curr_step) tb_logger.add_scalar('acc1_train', acc[1], curr_step) tb_logger.add_scalar('acc2_train', acc[2], curr_step) tb_logger.add_scalar('acc3_train', acc[3], curr_step) tb_logger.add_scalar('acc4_train', acc[4], curr_step) tb_logger.add_scalar('loss', loss.data, curr_step) #tb_logger.add_scalar('lr', , curr_step) logger.info('Loss:{loss}, curr_step:{curr_step}, accuracy:{acc}, recall:{recall}'.format(loss=loss, curr_step=curr_step, acc=acc, recall=recall)) if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data[0])) # - # # testing # # 最高精确率为96%. def test(epoch): model.eval() # 把module设置为评估模式,只对Dropout和BatchNorm模块有影响 test_loss = 0 correct_num = 0 acc, recall, pred_num, target_num = [0]*5, [0]*5, [0]*5, [0]*5 for batch_idx, (data, target) in enumerate(test_loader): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) # volatile=true 排除子图 让test数据不参与梯度的计算,加速测试; # volatile was removed and now has no effect. Use `with torch.no_grad():` instead. # data = data.view(-1, 28,28) output = model(data) #print('output', output) test_loss += F.cross_entropy(output, target).data[0] # Variable.data pred = output.data.max(1)[1] # get the index of the max log-probability #print('pred', pred) #print('predsize',pred.size()) # output是(64,10)的tensor,pred.size是[64] correct_num += pred.eq(target.data).cpu().sum() #如果预测正确,correct加一 correct = pred.eq(target.data) #如果预测正确,correct加一 #print(target.data.size()) for i in range(5): #5 class class_tmp = torch.ones(target.data.size()[0])*i class_tmp = class_tmp.long() class_index = target.data.cpu().eq(class_tmp) #print(pred[pred == i].size()) #print(correct[class_index.cuda().byte()==1].sum()) #print(pred[pred == i].size()[0]) acc[i] += float(correct[class_index.cuda().byte()==1].sum()) pred_num[i] += pred[pred == i].size()[0] #print(pred[pred == i].sum()) #print(pred[pred == i]) #print(target.data[target.data == i].sum()) recall[i] += float(correct[class_index.cuda().byte()==1].sum()) target_num[i] += target.data[target.data == i].size()[0] print(acc, recall, pred_num, target_num) for i in range(len(acc)): if pred_num[i]: acc[i] = acc[i]/pred_num[i] else: acc[i] = 0 recall[i] = recall[i]/target_num[i] test_loss = test_loss test_loss /= len(test_loader) # loss function already averages over batch size logger.info('\nTest set: Average loss: {:.4f}, AccuracyofAll: {}/{} ({:.0f}%), acc:{acc}, recall:{recall}\n'.format( test_loss, correct_num, len(test_loader.dataset), 100. * correct_num / len(test_loader.dataset), acc=acc, recall=recall)) print('\nTest set: Average loss: {:.4f}, AccuracyofAll: {}/{} ({:.4f}%), acc:{acc}, recall:{recall}\n'.format( test_loss, correct_num, len(test_loader.dataset), 100. * correct_num / len(test_loader.dataset), acc=acc, recall=recall)) # # show the classification # + # # %load predict.py from torchvision import transforms from PIL import Image import matplotlib.image as mpimg import torch import numpy as np import math import random import cv2 from model import * cv2.ocl.setUseOpenCL(False) from torch.utils.data import Dataset #import matplotlib.image as mpimg # from MLP import * class ResizeCV2(object): def __init__(self, new_width, new_height): self.new_width = new_width self.new_height = new_height def __call__(self, img): img_np = np.array(img) img_np = cv2.resize(img_np, (self.new_width, self.new_height)) img = Image.fromarray(img_np) return img class McDataset(Dataset): def __init__(self, root_dir, meta_file, transform=None, output_index=False): self.root_dir = root_dir self.transform = transform with open(meta_file) as f: lines = f.readlines() print("building dataset from %s" % meta_file) self.num = len(lines) self.metas = [] for line in lines: path, cls = line.rstrip().split() self.metas.append((path, int(cls))) print("read meta done") self.initialized = False self.output_index = output_index def __len__(self): return self.num def __getitem__(self, idx): filename = self.root_dir + '/' + self.metas[idx][0] cls = self.metas[idx][1] img = Image.open(filename) ## transform origin_img = (np.array(transforms.ToTensor()(img)).transpose((1,2,0)).copy()*255).astype(np.int32) cv2.imwrite('origin.jpg', origin_img) if self.transform is not None: img = self.transform(img) change_img = (np.array(img).transpose((1,2,0)).copy()*255).astype(np.int32) cv2.imwrite('change.jpg', change_img) if self.output_index: return img, cls, idx else: return img, cls, filename class White(object): def __init__(self): pass def __call__(self, img): # print('img:', img) size = img.size() # print(size[0]) img = img.view(size[0], -1) #print(img.size()) eps = torch.ones(size[0],1)*(1/math.sqrt(size[1])) # print(eps.size()) # print('img:', img) mean = torch.mean(img, dim=1, keepdim=True) # print('mean:', mean.size()) std_tmp = torch.cat((torch.std(img, dim=1, keepdim=True), eps), dim=0) # print(torch.cat((torch.std(img, dim=0, keepdim=True), eps), dim=0).size()) std = torch.max(std_tmp, dim=0)[0].expand_as(mean) # std = max(torch.std(img, dim=0, keepdim=True), eps) #print('std:', std.size()) img = (img - mean) / std img = img.view(size[0], size[1], size[2]) # print(img.size()) return img if __name__ == '__main__': filename = 'test_list.txt' test_dataset = McDataset( '.', filename, transforms.Compose([ transforms.CenterCrop(220), ResizeCV2(80, 80), transforms.CenterCrop((64, 64)), transforms.ToTensor(), White(), ])) # print(train_dataset[0][0]) num = random.randint(0, 2880) print('select picture randomly:',num) input = test_dataset[num][0] img,cls,root =test_dataset.__getitem__(num) input = input.reshape(1,3,64,64) input = Variable(input) model = Resnet26() cls = int(cls) if cls == 0: clsname = 'round' elif cls == 1: clsname = 'middle' elif cls == 2: clsname = 'cigar' elif cls == 3: clsname = 'lateral' else: clsname = 'spiral' checkpoint = torch.load('checkpoint/_204.pth.tar') state_dict = checkpoint['state_dict'] own_state = model.state_dict() # print(own_state.keys()) for name, param in state_dict.items(): # name = 'module.'+name name = name[7:] # print(name) if name in own_state: # print('here') if isinstance(param, torch.nn.Parameter): # isinstance函数来判断一个对象是否是一个已知的类型 # backwards compatibility for serialized parameters param = param.data try: own_state[name].copy_(param) # print('here') except Exception: print('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) print("But don't worry about it. Continue pretraining.") output = model(input) pred = output.data.max(1)[1] pred = int(pred) # print(output.size()) if pred == 0: predname = 'round' elif pred == 1: predname = 'middle' elif pred == 2: predname = 'cigar' elif pred == 3: predname = 'lateral' else: predname = 'spiral' # draw = mpimg.imread(root) # plt.imshow(draw) # 显示图片 # plt.axis('off') # 不显示坐标轴 # plt.title('true label: '+clsname+' prediction: '+ predname) # plt.show() # print(output.size()) # - print('galaxy ID : ',root[35:41]) print('true label: ',cls) print('predict',int(pred)) draw = mpimg.imread(root) plt.imshow(draw) plt.axis('off') plt.title(' true label: '+clsname+'\n prediction: '+ predname) plt.show() # --- # jupyter: # jupytext: # formats: ipynb,md # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.0-DEV # language: julia # name: julia-1.7 # --- # + [markdown] tags=[] # ## Python # + tags=[] from sympy import * # - a = -oo a b = -float('inf') b type(a) type(b) # ## Julia using SymPy a = -oo using PyCall b = py"""-float('inf')""" typeof(a) typeof(b) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Famous Iris Flower Species Dataset # # **Objective** # # * The Iris Flower Dataset involves predicting the flower species given measurements of iris flowers; # * It is a multiclass classification problem. The number of observations for each class is balanced; # * Solve it in 3 steps: Calculate Euclidean Distance; Get Nearest Neighbors (KNN) and Make Predictions. # # **Dataset features** # # * **sepal length** in cm # * **sepal width** in cm # * **petal length** in cm # * **petal width** in cm # * **class**: # -- Iris Setosa # -- Iris Versicolour # -- Iris Virginica # # * Rows: 150 # * Columns: 5 # * File format: csv # # + from math import sqrt # calculate the Euclidean distance between two vectors def euclidean_distance(row1, row2): distance = 0.0 for i in range(len(row1)-1): # for index, value in enumerate(names) distance += (row1[i] - row2[i])**2 return sqrt(distance) # - # The function assumes that the last column in each row is an output value which is ignored from the distance calculation # Test distance function dataset = [[2.7810836,2.550537003,0], [1.465489372,2.362125076,0], [3.396561688,4.400293529,0], [1.38807019,1.850220317,0], [3.06407232,3.005305973,0], [7.627531214,2.759262235,1], [5.332441248,2.088626775,1], [6.922596716,1.77106367,1], [8.675418651,-0.242068655,1], [7.673756466,3.508563011,1]] row0 = dataset[0] for row in dataset: distance = euclidean_distance(row0, row) print(distance) # Locate the most similar neighbors def get_neighbors(train, test_row, num_neighbors): distances = list() for train_row in train: dist = euclidean_distance(test_row, train_row) distances.append((train_row, dist)) distances.sort(key=lambda tup: tup[1]) neighbors = list() for i in range(num_neighbors): neighbors.append(distances[i][0]) return neighbors # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook import control as c import ipywidgets as w import numpy as np from IPython.display import display, HTML import matplotlib.pyplot as plt import matplotlib.animation as animation display(HTML('')) # - # ## Transfer functions # # In the following example, we will take a look at the formulation of transfer functions. While used in various fields, in control theory transfer functions describe the frequency transfer characteristics of a Linear Time-invariant (LTI) system. # # Generally, a transfer function describes the relationship between the Laplace transform of the input $u(t)$ and output $y(t)$ signals: # # $$U(s)=\mathcal{L}\{u(t)\} \quad Y(s)=\mathcal{L}\{y(t)\}$$ # # $$Y(s)=G(s)U(s)$$ # # # # In the case of LTI systems, the transfer function can be described as a fraction of two polynomials: # # $$G(s)=\frac{\sum\limits_{j=0}^{m}b_js^j}{\sum\limits_{i=0}^{n}a_is^i},$$ # # where the denominator $a(s)$ is the characteristic polynomial of the system. #
Normalizing the components to $a^n=1$, the resulting forms are: # # $$a(s) = s^n + a_{n-1}s^{n-1} + a_{n-2}s^{n-2} + ... + a_1s^1 + a_0$$ # $$b(s) = b_ms^m + b_{m-1}s^{m-1} + b_{m-2}s^{m-2} + ... + b_1s^1 + b_0$$ # # For physical systems, the degree of the numerator is less than the degree of the denominator; this type is called a strictly proper transfer function. In this case, the frequency transfer converges to zero at infinity. # # #
Assemble a transfer function by selecting the polynomial terms! # + b = {} a = {} b[0] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) b[1] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) b[2] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) b[3] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) b[4] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[0] = w.FloatText(value=10.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[1] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[2] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[3] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[4] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) a[5] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%')) def transfer_function(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4): b1c = b1 b2c = b2 b3c = b3 b4c = b4 global b if a5 == 0: b[4].disabled=True b4c = 0 else: b[4].disabled=False if a5 == 0 and a4==0: b[3].disabled=True b3c = 0 else: b[3].disabled=False if a5 == 0 and a4 == 0 and a3 == 0: b[2].disabled=True b2c = 0 else: b[2].disabled=False if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0: b[1].disabled=True b1c = 0 else: b[1].disabled=False G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function print('The resulting transfer function:') print(G) input_data = w.interactive_output(transfer_function, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5], 'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]}) display(w.HBox([w.VBox([w.Label('$G(s)=$')], layout=w.Layout(justify_content="center", align_items='flex-start')), w.VBox([w.HBox([b[4], w.Label('$s^4+$'), b[3], w.Label('$s^3+$'), b[2], w.Label('$s^2+$'), b[1], w.Label('$s+$'), b[0]], layout=w.Layout(justify_content='center')), w.HBox([w.HTML(value='
', layout=w.Layout(width='100%'))], layout=w.Layout(justify_content='center')), w.HBox([a[5], w.Label('$s^5+$'), a[4], w.Label('$s^4+$'), a[3], w.Label('$s^3+$'), a[2], w.Label('$s^2+$'), a[1], w.Label('$s+$'), a[0]], layout=w.Layout(justify_content='center')) ], layout=w.Layout(width='70%'))], layout=w.Layout(justify_content='center') ), input_data) # - # The solutions of the numerator and denominator polynomials are called the system's zeros and poles, respectively. They can be used to determine system's stability and evaluate its performance. # # Both zeros and poles are either real numbers or complex conjugate pairs (if the system's differential equations are real-valued). # #
Experiment with the polynomial terms and observe the changes in the pole-zero map! # + fig1, (f1_ax1) = plt.subplots(1, 1) fig1.set_size_inches((5, 5)) fig1.set_tight_layout(True) f1_line1, = f1_ax1.plot([], [], 'rs') f1_line2, = f1_ax1.plot([], [], 'bo') f1_ax1.axhline(y=0, color='k', lw=0.5) f1_ax1.axvline(x=0, color='k', lw=0.5) f1_ax1.grid(which='both', axis='both', color='lightgray') f1_ax1.autoscale(enable=True, axis='both') f1_ax1.set_title('Pole-Zero Map', fontsize=12) f1_ax1.set_xlabel('Re', labelpad=0, fontsize=10) f1_ax1.set_ylabel('Im', labelpad=0, fontsize=10) f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8) f1_ax1.legend([f1_line1, f1_line2], ['Zeros', 'Poles']) def pz_map(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4): b1c = b1 b2c = b2 b3c = b3 b4c = b4 if a5 == 0: b4c = 0 if a5 == 0 and a4==0: b3c = 0 if a5 == 0 and a4 == 0 and a3 == 0: b2c = 0 if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0: b1c = 0 G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function p = c.pole(G) # Poles z = c.zero(G) # Zeros px = [x.real for x in p] py = [x.imag for x in p] zx = [x.real for x in z] zy = [x.imag for x in z] global f1_line1, f1_line2 try: f1_ax1.lines.remove(f1_line1) f1_ax1.lines.remove(f1_line2) except: pass f1_line1, = f1_ax1.plot(zx, zy, 'rs') f1_line2, = f1_ax1.plot(px, py, 'bo') f1_ax1.relim() f1_ax1.autoscale_view() w.interactive_output(pz_map, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5], 'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]}) # - # The time-domain counterpart of the transfer function is the impulse response: $g(t)$. As the Laplace transform of the Dirac delta function is the constant 1, the transfer function of an LTI system can be matched with its impulse response. # # $$\mathcal{L}\{\delta(t)\} = 1$$ # # $$1\cdot G(s) \; \xrightarrow{\mathcal{L}^{-1}} \; \delta(t)*g(t)$$ # # $$Y(s) = G(s)U(s) \; \xrightarrow{\mathcal{L}^{-1}} \; y(t) = \int\limits_{-\infty}^{\infty}g(t-\tau)u(\tau) d\tau,$$ # # where, according to Laplace transformation rules, multiplication in the frequency domain is matched with convolution in the time domain, and convolving a function with the Dirac delta returns the same function. # #
Observe the changes in the impulse response between various configurations! # + fig2, (f2_ax1) = plt.subplots(1, 1) fig2.set_size_inches((9.8, 5)) fig2.set_tight_layout(True) f2_line1, = f2_ax1.plot([], []) f2_ax1.grid(which='both', axis='both', color='lightgray') f2_ax1.autoscale(enable=True, axis='both') f2_ax1.set_title('Impulse Response', fontsize=12) f2_ax1.set_xlabel(r'$t$ [s]', labelpad=0, fontsize=10) f2_ax1.set_ylabel(r'$y(t)$ [/]', labelpad=0, fontsize=10) f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8) def inp_resp(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4): b1c = b1 b2c = b2 b3c = b3 b4c = b4 if a5 == 0: b4c = 0 if a5 == 0 and a4==0: b3c = 0 if a5 == 0 and a4 == 0 and a3 == 0: b2c = 0 if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0: b1c = 0 G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function tout, yout = c.impulse_response(G) global f2_line1 try: f2_ax1.lines.remove(f2_line1) except: pass f2_line1, = f2_ax1.plot(np.concatenate(([0], tout)), np.concatenate(([0], yout)), '-b') f2_ax1.relim() f2_ax1.autoscale_view() w.interactive_output(inp_resp, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5], 'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]}) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #

Box Plot


import numpy as np import pandas as pd import matplotlib as mlp import matplotlib.pyplot as plt from IPython.display import display, HTML import matplotlib.dates as mdates import matplotlib.cbook as cbook import datetime as dt from matplotlib.dates import DateFormatter, MonthLocator, DayLocator import seaborn as sns # %matplotlib inline from matplotlib.backends.backend_pdf import PdfPages from textwrap import fill from matplotlib.ticker import FormatStrFormatter from operator import add # could also use holoview: http://holoviews.org/ list_csv_file = [] for i in range(270, 300): list_csv_file.append("Results_model_run%s.csv" % i) list_dataframe = [] for filename in list_csv_file: list_dataframe.append(pd.read_csv(filename)) merged_dataframes = pd.concat(list_dataframe) merged_dataframes['Cumulative PV capacity'] = (merged_dataframes['Total product']) / 1E9 merged_dataframes['Repaired eol PV modules'] = (merged_dataframes['eol - new repaired weight'] + merged_dataframes['eol - used repaired weight']) / 1E9 merged_dataframes['Sold eol PV modules'] = (merged_dataframes['eol - new sold weight'] + merged_dataframes['eol - used sold weight']) / 1E9 merged_dataframes['Recycled eol PV modules'] = (merged_dataframes['eol - new recycled weight'] + merged_dataframes['eol - used recycled weight']) / 1E9 * 0.8036 # baseline=0.8036 # frelp=0.9685 merged_dataframes['Landfilled eol PV modules'] = (merged_dataframes['eol - new landfilled weight'] + merged_dataframes['eol - used landfilled weight']) / 1E9 + ( (1 - 0.8036) * merged_dataframes['Recycled eol PV modules']) # baseline=0.8036 frelp=0.9685 merged_dataframes['Stored eol PV modules'] = (merged_dataframes['eol - new stored weight'] + merged_dataframes['eol - used stored weight']) / 1E9 merged_dataframes['Recycled'] = merged_dataframes['Recycled eol PV modules'] merged_dataframes['Repaired & reused'] = (merged_dataframes['Sold eol PV modules'] + merged_dataframes['Repaired eol PV modules']) merged_dataframes['CE pathways'] = merged_dataframes['Recycled'] + merged_dataframes['Repaired & reused'] merged_dataframes['Landfill & Storage'] = (merged_dataframes['Landfilled eol PV modules'] + merged_dataframes['Stored eol PV modules']) merged_dataframes['Recycled material value (million $)'] = merged_dataframes['Recycled material value'] / 1E6 merged_dataframes['Reuse & repair material value (million $)'] = ( (merged_dataframes['End-of-life - repaired'] + merged_dataframes['End-of-life - sold'] * -1 * merged_dataframes['Average selling cost']) / 1E6) merged_dataframes['Circular material value (million $)'] = ( merged_dataframes['Recycled material value (million $)'] + merged_dataframes['Reuse & repair material value (million $)']) merged_dataframes["volume_recycled"] = (merged_dataframes["eol - new recycled weight"] + merged_dataframes["eol - used recycled weight"]) merged_dataframes['Cumulative modules'] = ( merged_dataframes['End-of-life - recycled'] / 235.2) merged_dataframes['Yearly modules'] = merged_dataframes[ 'Cumulative modules'].sub(merged_dataframes['Cumulative modules'].shift()) # Wp/module = 235.2 (considered constant), Frelp recovery revenue = 0.456647 $/kg, asu = 0.404126, baseline = 0.196257 merged_dataframes['Net income ($/module)'] = (-1 * merged_dataframes['Recycler costs'] / merged_dataframes['Cumulative modules']) merged_dataframes.loc[~np.isfinite(merged_dataframes['Net income ($/module)']), 'Net income ($/module)'] = 0 merged_dataframes = merged_dataframes.groupby(['Year']).mean() merged_dataframes['Yearly modules'].iloc[0] = merged_dataframes['Cumulative modules'].iloc[0] merged_dataframes['Net income ($/module)'].iloc[0] = 0 merged_dataframes['Net income'] = merged_dataframes['Yearly modules'] * merged_dataframes['Net income ($/module)'] merged_dataframes['Cumulative net income'] = merged_dataframes['Net income'].cumsum() merged_dataframes = merged_dataframes.filter([ 'Year', 'Repaired eol PV modules', 'Sold eol PV modules', 'Recycled eol PV modules', 'Landfilled eol PV modules', 'Stored eol PV modules', 'Cumulative PV capacity', 'volume_recycled', 'Recycled material volume', 'Net income ($/module)', 'Cumulative modules', 'Yearly modules', 'Net income', 'Cumulative net income', 'Circular material value (million $)', 'Recycled material value (million $)', 'Reuse & repair material value (million $)'], axis=1) merged_dataframes.to_csv("MergedData.csv") #print(merged_dataframes['Cumulative net income (without recycling fee)']) # + data_in = pd.read_csv("SmallMultiples_3metrics_v3.csv") data_in2 = pd.read_csv("SmallMultiples_3metrics_v2.csv") sns.set(style="whitegrid", font_scale=1.6, color_codes=True) from pylab import rcParams import matplotlib.ticker as mtick import textwrap import matplotlib as mpl #print(data_in) from textwrap import wrap labels=['Mass of PV materials in EOL pathways (million tonnes)'] labels = [ '\n'.join(wrap(l, 30)) for l in labels ][0] data_in[labels] = ( data_in["Mass of PV materials in EOL pathways (million tonnes)"]) labels2=['Mass fraction of materials in all circular pathways'] labels2 = [ '\n'.join(wrap(l, 30)) for l in labels2 ][0] data_in2[labels2] = ( data_in2["Mass fraction of materials in all circular pathways"] * 100) #data_in["Recycled"] = data_in["Recycled"] * 100 #data_in["Reused"] = data_in["Reused"] * 100 #data_in["Materials recovered (kg)"] = data_in["Materials recovered (kg)"] #data_in["Value recovered ($)"] = data_in["Value recovered ($)"] data_in['Scenario'] = data_in['Scenario'].str.wrap(27) rcParams['figure.figsize'] = 8, 4 ax = sns.catplot(x="Year", y=labels, col="Scenario", hue='End-of-life pathway', data=data_in, scale=0.7, kind="point", col_wrap=3, inner=None, legend=False) for tick in ax.axes.flat: labels = tick.get_xticklabels() # get x labels for i,l in enumerate(labels): #print(i, l) if(i%10 != 0): labels[i] = '' # skip even labels tick.set_xticklabels(labels) #tick.yaxis.set_major_formatter(mtick.PercentFormatter()) #ax.set_ylabel('') #ax.yaxis.set_major_formatter(mtick.PercentFormatter()) plt.legend(loc='upper left', bbox_to_anchor=(-2.445, 3.46), ncol=1) count=0 for ax in ax.axes.flat: ax.set_ylim(0, 9) ax.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator(2)) ax.get_xaxis().set_minor_locator(mpl.ticker.LinearLocator(10)) ax.grid(b=True, which='minor', color='lightgrey', linewidth=0.5) count += 1 ax2 = ax.twinx() sns.scatterplot(x=range(31), y=data_in2[labels2].iloc[ 31*(count-1):31*count], data=data_in2, color=['maroon'], marker="D") #plt.plot(2050, 50) #ax2.axes.get_xaxis().set_visible(False) if count != 3 and count != 6 and count != 9: ax2.axes.get_yaxis().set_visible(False) ax2.yaxis.label.set_color('maroon') ax2.tick_params(axis='y', colors='maroon') ax2.yaxis.set_major_formatter(mtick.PercentFormatter()) ax2.set_ylim(0, 100) ax2.grid(False) plt.savefig("figure10.2.png", bbox_inches='tight', figsize=(5.5, 3.5), dpi=500) # + data_in = pd.read_csv("SmallMultiples_3metrics.csv") sns.set(style="whitegrid", font_scale=1.6, color_codes=True) from pylab import rcParams import matplotlib.ticker as mtick import textwrap #print(data_in) from textwrap import wrap labels=['Material mass fraction from EOL PV modules (%)'] labels = [ '\n'.join(wrap(l, 25)) for l in labels ][0] data_in[labels] = ( data_in["Material mass fraction from EOL PV modules (%)"] * 100) #data_in["Recycled"] = data_in["Recycled"] * 100 #data_in["Reused"] = data_in["Reused"] * 100 #data_in["Materials recovered (kg)"] = data_in["Materials recovered (kg)"] #data_in["Value recovered ($)"] = data_in["Value recovered ($)"] g = sns.catplot(x="Year", y=labels, kind="violin", col="Scenario", hue='End-of-life pathway', inner=None, data=data_in, col_wrap=3) #count=0 #for ax in g.axes.flat: # count += 1 # if count == 3 or count == 6 or count == 9: # ax.twinx() #sns.swarmplot(x="Year", y=labels, color="k", col="Scenario", hue='End-of-life pathway', # size=3, data=data_in, ax=g.axes) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="490FWZrsRT4e" # # Sentiment Analysis Using Simple RNN, LSTM, GRU, Bidirectional RNN # # # # + [markdown] colab_type="text" id="_F-ZEaTpBhOx" # ## Enabling and Checking for the GPU # + colab={"base_uri": "https://localhost:8080/", "height": 110} colab_type="code" id="M-eFuJp_BrYa" outputId="b89d527d-f915-4b8e-b4bf-d6dd1762dfab" import tensorflow as tf tf.test.is_gpu_available( cuda_only=False, min_cuda_compute_capability=None ) # + [markdown] colab_type="text" id="AUoIppa_RiiF" # ## Installing # - Graphviz and Hierarchymagic for drawing block diagrams # - Keras # - Bokeh for visualizations # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="fsjcKzx8Rpfn" outputId="aae3bdf0-781a-4dfc-aaca-16ee7c5ec339" language="bash" # apt install graphviz # pip install keras # pip install bokeh # pip install git+https://github.com/tla/ipython-hierarchymagic # + colab={} colab_type="code" id="tSBL163Gnc9M" # %load_ext hierarchymagic # + [markdown] colab_type="text" id="63OYi2oCR4XD" # ### Checking Bokeh # # + colab={} colab_type="code" id="U8mszQ7qR0gs" from bokeh.plotting import figure, show from bokeh.io import output_notebook, push_notebook # - output_notebook() # + [markdown] colab_type="text" id="9qVGCpKztGaS" # ### Matplot lib for graph plotting. # + colab={} colab_type="code" id="9x7pzKP9tLRd" import matplotlib.pyplot as plt def plot_graphs(history, metric): plt.plot(history.history[metric]) plt.plot(history.history['val_'+metric], '') plt.xlabel("Epochs") plt.ylabel(metric) plt.legend([metric, 'val_'+metric]) plt.show() # + [markdown] colab_type="text" id="IIc2fG6MDpG8" # ### Importing Numpy, Pandas, Keras # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="tbj23rw5SXfm" outputId="7738140d-970d-4ce2-e384-f22005fdfcfb" import numpy as np import pandas as pd # + [markdown] colab_type="text" id="m_OHiSh5Sl4x" # ## Sentiment Analysis Data # + [markdown] colab_type="text" id="sMJyGX-SpN6U" # ### 1) Movie Review Data # Keras IMDB dataset is used here.This dataset consist of 25,000 movies reviews from IMDB, labeled by sentiment (positive/negative).[Dataset Link](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) # + colab={} colab_type="code" id="eUZXDcDYUd3b" from tensorflow.keras.datasets import imdb # + colab={"base_uri": "https://localhost:8080/", "height": 126} colab_type="code" id="s4nASiKQUxnn" outputId="dbd6c4d4-7f5a-43df-9d70-8f065314aea7" (x_train, y_train), (x_test, y_test) = imdb.load_data() print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape) # + [markdown] colab_type="text" id="9DxwpnHsGPJV" # ### Dataset Instances # + colab={"base_uri": "https://localhost:8080/", "height": 164} colab_type="code" id="m43gz-YGuxaN" outputId="f36e571d-7fd5-4665-95a7-b1b1db3bd9f4" word_to_id = imdb.get_word_index() INDEX_FROM=3 word_to_id = {k:(v+INDEX_FROM) for k,v in word_to_id.items()} word_to_id[""] = 0 #Padding word_to_id[""] = 1 #Start Index word_to_id[""] = 2 #Unkown Words id_to_word = {value:key for key,value in word_to_id.items()} for i in range(5): print("REVIEW",str(i+1),"\t",' '.join(id_to_word[id] for id in x_train[i] )) # + [markdown] colab_type="text" id="RrAs5ePietE8" # Dictionary of words # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="OZgadwClXORP" outputId="5eb34724-87b7-4e17-fe0a-46266d110327" dictionary=word_to_id len(dictionary) # + [markdown] colab_type="text" id="Or_fbxD0lkH4" # ### 2) Hotel Review Dataset # I will also add another dataset to the existing training dataset. This dataset contains 515,000 customer reviews and scoring of 1493 luxury hotels across Europe. Meanwhile, the geographical location of hotels are also provided for further analysis.This dataset is fetched from [booking.com](https://www.booking.com/) # - # !gdown --id 1e1SsVct7B6m3GLKlYKHb4ZGy8Ws4K35r --output 515k-hotel-reviews-data-in-europe.zip # + colab={} colab_type="code" id="bxlrpPUNlIXa" # !unzip -q 515k-hotel-reviews-data-in-europe.zip # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="vEErOOA5lLKS" outputId="662da624-7a06-41a4-e188-378eb28ad265" # !ls # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="x7rzo0jslOe-" outputId="d4ff354d-b51c-44c2-9468-5b49680ef2aa" df = pd.read_csv("Hotel_Reviews.csv", parse_dates=['Review_Date']) df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 695} colab_type="code" id="nyE-RDBhle8S" outputId="0f749e05-f12c-45cd-9a48-72e8de30e1ed" df.head() # + colab={} colab_type="code" id="5iWYO7gjnVUt" df["review"] = df["Negative_Review"] + df["Positive_Review"] df["review_type"] = df["Reviewer_Score"].apply( lambda x: "Negative" if x < 7 else "Positive" ) # + colab={} colab_type="code" id="yHGVM5R_npiw" df = df[["review", "review_type"]] # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="p8RAtWFfnwMC" outputId="06bb335f-1eb4-4d9d-b056-75409be58375" df.review_type.value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" id="1-gkHW5in3L3" outputId="944bc18c-52f6-4512-9197-2bc60e1a9f20" import seaborn as sns sns.countplot( x='review_type', data=df, order=df.review_type.value_counts().index ) plt.xlabel("type") plt.title("Review type"); # + colab={"base_uri": "https://localhost:8080/", "height": 424} colab_type="code" id="0xlMsbLRorBj" outputId="7991e869-653e-4c61-9d6d-1b4399644d74" df # + [markdown] colab_type="text" id="pZY-T6fnfkrL" # ## Data Preprocessing # + [markdown] colab_type="text" id="XEjBbAbk-OvI" # Blancing the hotel review dataset # + colab={} colab_type="code" id="9TaGX1M1-NyV" positive_reviews = df[df.review_type == "Positive"] negative_reviews = df[df.review_type == "Negative"] # + colab={} colab_type="code" id="JJyU8T99-xNl" RANDOM_SEED = 42 positive_df = positive_reviews.sample(n=len(negative_reviews), random_state=RANDOM_SEED) negative_df = negative_reviews # + colab={"base_uri": "https://localhost:8080/", "height": 424} colab_type="code" id="bO0jutrn_Nr4" outputId="5a9ac7dd-4292-445e-dc07-410c90ab2776" review_df = positive_df.append(negative_df).reset_index(drop=True) review_df # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="xDObodq2_o2n" outputId="1d12ca91-0ca1-4ddd-d96c-9e1154c8590f" sns.countplot( x='review_type', data=review_df, order=review_df.review_type.value_counts().index ) plt.xlabel("type") plt.title("Review type (resampled)"); # + [markdown] colab_type="text" id="HcPO_hLNtG1f" # Function to remove punctuations. # + colab={} colab_type="code" id="Xvmmk1a9tTs4" from string import punctuation def rem_punc(s): r=''.join([c for c in s if c not in punctuation]) return r # + [markdown] colab_type="text" id="DAEC9I2kruOW" # Function to encode sentence using the word dictionary. # + colab={} colab_type="code" id="jg1pse-ortCF" def enc_sen(s): e=[1] #start wl=s.lower().strip().split(" ") for w in wl: if w in dictionary: e.append(dictionary[w]) else: e.append(2) #Unknown return e # + [markdown] colab_type="text" id="MMYnVlg42JHL" # Function to encode labels { 1->Positive 0->Negative }. # + colab={} colab_type="code" id="2t2jM0Uu2Xac" def enc_labels(l): d={'Positive':1,'Negative':0} a=np.array([d[x] for x in l]) return a # + [markdown] colab_type="text" id="YF2zgJ2huATj" # Review dataframe to list. # + colab={} colab_type="code" id="60NJcXCrqMoc" rev_list=list(review_df['review']) # + colab={} colab_type="code" id="2PD8a7on4Msz" lab_list=list(review_df['review_type']) # + [markdown] colab_type="text" id="EhONxGETuJzq" # Removing Punctuation # + colab={"base_uri": "https://localhost:8080/", "height": 128} colab_type="code" id="6hd8vF5FrPiZ" outputId="61d4b884-e8e1-4eab-e3a2-e0b77ff17247" rev_list=[rem_punc(x) for x in rev_list] print("\n".join(rev_list[:5])) #Top five reviews in review list # + [markdown] colab_type="text" id="XaPj7x_1vxK4" # Encoding Sentences # + colab={"base_uri": "https://localhost:8080/", "height": 164} colab_type="code" id="r3EK9Lcnv1zG" outputId="dcd81eb5-ce3a-4a6a-cd92-6c2a0e7e7277" enc_rev=np.array([enc_sen(m) for m in rev_list]) enc_rev # + [markdown] colab_type="text" id="nT8tY79U3xBO" # Encoding Labels # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="-6np5YwN0aw5" outputId="7c2e7fe1-4089-4096-eeb0-3ec8c0cc49ee" enc_labs=enc_labels(lab_list) enc_labs # + [markdown] colab_type="text" id="3xT5Xmou4mdB" # Train-Test Split # + colab={} colab_type="code" id="f40Px6ir4qbv" from sklearn.model_selection import train_test_split x_train1, x_test1, y_train1, y_test1 = train_test_split( enc_rev, enc_labs, test_size=.1, random_state=RANDOM_SEED ) # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" id="GwJvV3uW5rCk" outputId="de35c46f-e1ad-4eb2-caed-383dba07ed2f" print(x_train1.shape) print(x_test1.shape) print(y_train1.shape) print(y_test1.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" id="6fsc0LAn5wyX" outputId="d7f524a4-01ae-4f97-accb-26d76c91edf5" print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape) # + [markdown] colab_type="text" id="EnjiuqX1DRfM" # Concatenating the test and train sets of both datasets # + colab={} colab_type="code" id="XtAmZOIv7ZZE" x_train=np.concatenate((x_train,x_train1)) x_test=np.concatenate((x_test,x_test1)) y_train=np.concatenate((y_train,y_train1)) y_test=np.concatenate((y_test,y_test1)) # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" id="7j01XBYiDBGI" outputId="acddae67-34a1-442d-cf78-37edadd9a6ac" print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape) # + [markdown] colab_type="text" id="ZX1BiA4lqIN9" # The RNN will take sequences of constant length. This length is the `words_limit` which is defined to be 100. Since the reviews differ heavily in terms of lengths, I will trim each review to its first 200 words. If reviews are shorter than 200 words I will pad them with zeros. # + colab={} colab_type="code" id="SrM7FlK_iSsf" words_limit=200 # + colab={} colab_type="code" id="G5rn3G_dmRN_" import random import json from six.moves import range import six # + [markdown] colab_type="text" id="pH8X3ava6n8W" # Function to pad sequences to word_limit # + colab={} colab_type="code" id="1xGzfrLNfj5R" def pad_sequences(sequences, maxlen=None): dtype='int32';padding='pre';truncating='pre';value=0. num_samples = len(sequences) lengths = [] sample_shape = () flag = True # take the sample shape from the first non empty sequence # checking for consistency in the main loop below. for x in sequences: try: lengths.append(len(x)) if flag and len(x): sample_shape = np.asarray(x).shape[1:] flag = False except TypeError: raise ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable: ' + str(x)) if maxlen is None: maxlen = np.max(lengths) is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_) x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if not len(s): continue # empty list/array was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] trunc = np.asarray(trunc, dtype=dtype) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc return x # + [markdown] colab_type="text" id="ZLwJXcDIf9wF" # Padding the sequences # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="eDWse0BSfj_E" outputId="1125c8e4-09c2-4f74-fa3a-da442af19f8a" x_train_seq = pad_sequences(x_train, maxlen=words_limit) x_test_seq = pad_sequences(x_test, maxlen=words_limit) print('train shape:', x_train_seq.shape) print('test shape:', x_test_seq.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 144} colab_type="code" id="tlkZTs3kvvT7" outputId="f5047fbe-0b92-450a-f20d-d361e7c751ad" x_train_seq # + [markdown] colab_type="text" id="vnFjkUXIVjU9" # Keras Models and Layers # + colab={} colab_type="code" id="LZKhJIEhO_p9" from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Embedding, SimpleRNN, Dense, Dropout, Activation, Input, LSTM, GRU, Bidirectional # + [markdown] colab_type="text" id="Bhs9axrqxfAf" # ## RNN Model for Sentiment Analysis # # + colab={"base_uri": "https://localhost:8080/", "height": 184} colab_type="code" id="knaAJVhEqE30" outputId="03ac0b3f-6e23-48de-a83c-c7f5a57f1aa0" # %%dot digraph G { rankdir=LR; node1 [label="RNN_Cell"]; node2 [label="RNN_Cell"]; node3 [label="RNN_Cell"]; first_word -> node1; second_word -> node2; nth_word -> node3; node1 -> node2; node2 -> node3; node3 -> Output_Cell; } # - # ### The model # + colab={} colab_type="code" id="g7fpxHE3iY0W" rnn_input = Input(shape=(words_limit,)) embedding = Embedding(len(dictionary), 128, input_length=words_limit)(rnn_input) simple_rnn = SimpleRNN(128)(embedding) dropout = Dropout(0.4)(simple_rnn) dense = Dense(1)(dropout) activation = Activation('sigmoid')(dense) model1 = Model(rnn_input, activation) # + colab={"base_uri": "https://localhost:8080/", "height": 380} colab_type="code" id="yrqP4S8_kq_H" outputId="256b987e-5c3e-4e32-e880-36009603f000" model1.summary() # + [markdown] colab_type="text" id="xZ6xspYRPpG5" # Optimizer: Adam and Loss Model: Binary CrossEntropy # + colab={} colab_type="code" id="utnMKHdjkuDQ" model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # - # Creating checkpoints to save best model # + import os checkpoint_path_rnn = "rnn_model.h5" checkpoint_dir = os.path.dirname(checkpoint_path_rnn) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_rnn, save_weights_only=False, save_best_only=True, verbose=1) # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="m4ZgxJIalJrr" outputId="b6dcd7e8-3da3-45f7-847e-7ec92a96c533" history=model1.fit(x_train_seq, y_train, batch_size=32, epochs=10, validation_data=(x_test_seq, y_test),callbacks=[cp_callback]) # - # ## Performance Measure of the model # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="zHIEepZ1Tw3k" outputId="d9b80d66-6bef-4ec4-ee4d-9a20384ff63c" history.history.keys() # - for j in history.history.keys(): print(history.history[j]) plot_graphs(history,'loss') plot_graphs(history,'accuracy') # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="zIwtTxhCT2FQ" outputId="cd1ba83a-7a87-48cf-b4f2-4615fc0508e5" p = figure(title="Loss History", x_axis_label='Time', y_axis_label='Loss') range(len(history.history['val_loss'])) p.line(range(len(history.history['val_loss'])), history.history['val_loss'], legend="Val. Loss", line_width=2, line_color='orange') p.line(range(len(history.history['loss'])), history.history['loss'], legend="Loss", line_width=2, line_color='blue') # - output_notebook() show(p) p = figure(title="Accuracy History", x_axis_label='Time', y_axis_label='Loss') p.line(range(len(history.history['val_accuracy'])), history.history['val_accuracy'], legend="Val. Acc", line_width=2, line_color='orange') p.line(range(len(history.history['accuracy'])), history.history['accuracy'], legend="Acc", line_width=2, line_color='blue') output_notebook() show(p) # ## Loading the Model Loaded_model1 = tf.keras.models.load_model( checkpoint_path_rnn ) # + [markdown] colab_type="text" id="wZFRvw5RUQp-" # ## Loss Score and Accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="f-SiWS3jRY95" outputId="59a1ff3f-9d12-4c8a-cf0a-485fc2d3b7d7" score, acc = Loaded_model1.evaluate(x_test_seq, y_test, batch_size=32, verbose=0) print("Loss Score: ",score) print("Accuracy: ",acc) # - # ## Confusion Matrix y_pred=Loaded_model1.predict(x_test_seq) y_pred=[1 if x>=0.5 else 0 for x in y_pred] con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy() con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2) con_mat_df = pd.DataFrame(con_mat_norm, index = ['POSITIVE','NEGATIVE'], columns = ['POSITIVE','NEGATIVE']) figure = plt.figure(figsize=(8, 8)) sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # ## LSTM Model for Sentiment Analysis lstm_input = Input(shape=(words_limit,)) embedding = Embedding(len(dictionary), 128, input_length=words_limit)(lstm_input) simple_lstm = LSTM(128)(embedding) dropout = Dropout(0.4)(simple_lstm) dense = Dense(1)(dropout) activation = Activation('sigmoid')(dense) model2 = Model(lstm_input, activation) model2.summary() # + [markdown] colab_type="text" id="xZ6xspYRPpG5" # Optimizer: Adam and Loss Model: Binary CrossEntropy # + colab={} colab_type="code" id="utnMKHdjkuDQ" model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # - # Creating checkpoints to save best model # + import os checkpoint_path_lstm = "lstm_model.h5" checkpoint_dir = os.path.dirname(checkpoint_path_lstm) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_lstm, save_weights_only=False, save_best_only=True, verbose=1) # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="m4ZgxJIalJrr" outputId="b6dcd7e8-3da3-45f7-847e-7ec92a96c533" history=model2.fit(x_train_seq, y_train, batch_size=32, epochs=10, validation_data=(x_test_seq, y_test),callbacks=[cp_callback]) # - # ## Performance Measure of the model # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="zHIEepZ1Tw3k" outputId="d9b80d66-6bef-4ec4-ee4d-9a20384ff63c" history.history.keys() # - for j in history.history.keys(): print(history.history[j]) plot_graphs(history,'loss') plot_graphs(history,'accuracy') # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="zIwtTxhCT2FQ" outputId="cd1ba83a-7a87-48cf-b4f2-4615fc0508e5" p = figure(title="Loss History", x_axis_label='Time', y_axis_label='Loss') range(len(history.history['val_loss'])) p.line(range(len(history.history['val_loss'])), history.history['val_loss'], legend="Val. Loss", line_width=2, line_color='orange') p.line(range(len(history.history['loss'])), history.history['loss'], legend="Loss", line_width=2, line_color='blue') # - output_notebook() show(p) p = figure(title="Accuracy History", x_axis_label='Time', y_axis_label='Loss') p.line(range(len(history.history['val_accuracy'])), history.history['val_accuracy'], legend="Val. Acc", line_width=2, line_color='orange') p.line(range(len(history.history['accuracy'])), history.history['accuracy'], legend="Acc", line_width=2, line_color='blue') output_notebook() show(p) # ## Loading the Model Loaded_model2 = tf.keras.models.load_model( checkpoint_path_lstm ) # + [markdown] colab_type="text" id="wZFRvw5RUQp-" # ## Loss Score and Accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="f-SiWS3jRY95" outputId="59a1ff3f-9d12-4c8a-cf0a-485fc2d3b7d7" score, acc = Loaded_model2.evaluate(x_test_seq, y_test, batch_size=32, verbose=0) print("Loss Score: ",score) print("Accuracy: ",acc) # - # ## Confusion Matrix y_pred=Loaded_model2.predict(x_test_seq) y_pred=[1 if x>=0.5 else 0 for x in y_pred] con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy() con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2) con_mat_df = pd.DataFrame(con_mat_norm, index = ['POSITIVE','NEGATIVE'], columns = ['POSITIVE','NEGATIVE']) figure = plt.figure(figsize=(8, 8)) sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # ## GRU Model for Sentiment Analysis gru_input = Input(shape=(words_limit,)) embedding = Embedding(len(dictionary), 128, input_length=words_limit)(gru_input) gru = GRU(128)(embedding) dropout = Dropout(0.4)(gru) dense = Dense(1)(dropout) activation = Activation('sigmoid')(dense) model3 = Model(gru_input, activation) model3.summary() # + [markdown] colab_type="text" id="xZ6xspYRPpG5" # Optimizer: Adam and Loss Model: Binary CrossEntropy # + colab={} colab_type="code" id="utnMKHdjkuDQ" model3.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # - # Creating checkpoints to save best model # + import os checkpoint_path_gru = "gru_model.h5" checkpoint_dir = os.path.dirname(checkpoint_path_gru) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_gru, save_weights_only=False, save_best_only=True, verbose=1) # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="m4ZgxJIalJrr" outputId="b6dcd7e8-3da3-45f7-847e-7ec92a96c533" history=model3.fit(x_train_seq, y_train, batch_size=32, epochs=10, validation_data=(x_test_seq, y_test),callbacks=[cp_callback]) # - # ## Performance Measure of the model # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="zHIEepZ1Tw3k" outputId="d9b80d66-6bef-4ec4-ee4d-9a20384ff63c" history.history.keys() # - for j in history.history.keys(): print(history.history[j]) plot_graphs(history,'loss') plot_graphs(history,'accuracy') # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="zIwtTxhCT2FQ" outputId="cd1ba83a-7a87-48cf-b4f2-4615fc0508e5" p = figure(title="Loss History", x_axis_label='Time', y_axis_label='Loss') range(len(history.history['val_loss'])) p.line(range(len(history.history['val_loss'])), history.history['val_loss'], legend="Val. Loss", line_width=2, line_color='orange') p.line(range(len(history.history['loss'])), history.history['loss'], legend="Loss", line_width=2, line_color='blue') # - output_notebook() show(p) p = figure(title="Accuracy History", x_axis_label='Time', y_axis_label='Loss') p.line(range(len(history.history['val_accuracy'])), history.history['val_accuracy'], legend="Val. Acc", line_width=2, line_color='orange') p.line(range(len(history.history['accuracy'])), history.history['accuracy'], legend="Acc", line_width=2, line_color='blue') output_notebook() show(p) # ## Loading the Model checkpoint_path_gru = "gru_model.h5" Loaded_model3 = tf.keras.models.load_model( checkpoint_path_gru ) # + [markdown] colab_type="text" id="wZFRvw5RUQp-" # ## Loss Score and Accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="f-SiWS3jRY95" outputId="59a1ff3f-9d12-4c8a-cf0a-485fc2d3b7d7" score, acc = Loaded_model3.evaluate(x_test_seq, y_test, batch_size=32, verbose=0) print("Loss Score: ",score) print("Accuracy: ",acc) # - # ## Confusion Matrix y_pred=Loaded_model3.predict(x_test_seq) y_pred=[1 if x>=0.5 else 0 for x in y_pred] con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy() con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2) con_mat_df = pd.DataFrame(con_mat_norm, index = ['POSITIVE','NEGATIVE'], columns = ['POSITIVE','NEGATIVE']) figure = plt.figure(figsize=(8, 8)) sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # ## Bidirectional RNN Model for Sentiment Analysis bidir_rnn_input = Input(shape=(words_limit,)) embedding = Embedding(len(dictionary), 128, input_length=words_limit)(bidir_rnn_input) bidir_lstm = Bidirectional(LSTM(128))(embedding) dropout = Dropout(0.4)(bidir_lstm) dense = Dense(1)(dropout) activation = Activation('sigmoid')(dense) model4 = Model(bidir_rnn_input, activation) model4.summary() # + [markdown] colab_type="text" id="xZ6xspYRPpG5" # Optimizer: Adam and Loss Model: Binary CrossEntropy # + colab={} colab_type="code" id="utnMKHdjkuDQ" model4.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # - # Creating checkpoints to save best model # + import os checkpoint_path_bidir_rnn = "bidir_rnn_model.h5" checkpoint_dir = os.path.dirname(checkpoint_path_bidir_rnn) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_bidir_rnn, save_weights_only=False, save_best_only=True, verbose=1) # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="m4ZgxJIalJrr" outputId="b6dcd7e8-3da3-45f7-847e-7ec92a96c533" history=model4.fit(x_train_seq, y_train, batch_size=32, epochs=10, validation_data=(x_test_seq, y_test),callbacks=[cp_callback]) # - # ## Performance Measure of the model # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="zHIEepZ1Tw3k" outputId="d9b80d66-6bef-4ec4-ee4d-9a20384ff63c" history.history.keys() # - for j in history.history.keys(): print(history.history[j]) plot_graphs(history,'loss') plot_graphs(history,'accuracy') # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="zIwtTxhCT2FQ" outputId="cd1ba83a-7a87-48cf-b4f2-4615fc0508e5" p = figure(title="Loss History", x_axis_label='Time', y_axis_label='Loss') range(len(history.history['val_loss'])) p.line(range(len(history.history['val_loss'])), history.history['val_loss'], legend="Val. Loss", line_width=2, line_color='orange') p.line(range(len(history.history['loss'])), history.history['loss'], legend="Loss", line_width=2, line_color='blue') # - output_notebook() show(p) p = figure(title="Accuracy History", x_axis_label='Time', y_axis_label='Loss') p.line(range(len(history.history['val_accuracy'])), history.history['val_accuracy'], legend="Val. Acc", line_width=2, line_color='orange') p.line(range(len(history.history['accuracy'])), history.history['accuracy'], legend="Acc", line_width=2, line_color='blue') output_notebook() show(p) # ## Loading the Model Loaded_model4 = tf.keras.models.load_model( checkpoint_path_bidir_rnn ) # + [markdown] colab_type="text" id="wZFRvw5RUQp-" # ## Loss Score and Accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="f-SiWS3jRY95" outputId="59a1ff3f-9d12-4c8a-cf0a-485fc2d3b7d7" score, acc = Loaded_model4.evaluate(x_test_seq, y_test, batch_size=32, verbose=0) print("Loss Score: ",score) print("Accuracy: ",acc) # - # ## Confusion Matrix y_pred=Loaded_model4.predict(x_test_seq) y_pred=[1 if x>=0.5 else 0 for x in y_pred] con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy() con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2) con_mat_df = pd.DataFrame(con_mat_norm, index = ['POSITIVE','NEGATIVE'], columns = ['POSITIVE','NEGATIVE']) figure = plt.figure(figsize=(8, 8)) sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # ## Prediction of the models # Some reviews to predict. sentences=[ "It is one of the greatest movies of all time. I love the action, drama and horror in the movie. :)", "I wish I had never come to watch this movie. The worst movie of my life. :(" ] # Removing punctuation. sentences2 = [rem_punc(s) for s in sentences] sentences2 # Encoding Sentences sentences3 = np.array([enc_sen(s) for s in sentences2]) sentences3 # Padding the encodings sentence_lst = pad_sequences(sentences3,maxlen=words_limit) sentence_lst # ### Model 1 # Simple RNN Model model_path_rnn='rnn_model.h5' RNN_Model = tf.keras.models.load_model( model_path_rnn ) RNN_Model.predict(sentence_lst[0]) # + [markdown] colab_type="text" id="Sz5DDjQlE1ZG" # # Conclusion # RNN and its kind are a great way to do sentiment analysis with minimum amount of workflow. The validation accuracy is going down and the model accuracy is going up [overfitting]. Since the change is negligible, we can ignore this. # + colab={} colab_type="code" id="VxPkViEi0VFa" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] colab_type="text" id="NMEswXWh9mqw" # # ASSIGNMENT # # ### 1) Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). # # Get caught up to where we got our example in class and then try and take things further. How close to "pixel perfect" can you make the lecture graph? # # Once you have something that you're proud of, share your graph in the cohort channel and move on to the second exercise. # # ### 2) Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). # # **WARNING**: There are a lot of very custom graphs and tables at the above link. I **highly** recommend not trying to reproduce any that look like a table of values or something really different from the graph types that we are already familiar with. Search through the posts until you find a graph type that you are more or less familiar with: histogram, bar chart, stacked bar chart, line chart, [seaborn relplot](https://seaborn.pydata.org/generated/seaborn.relplot.html), etc. Recreating some of the graphics that 538 uses would be a lot easier in Adobe photoshop/illustrator than with matplotlib. # # - If you put in some time to find a graph that looks "easy" to replicate you'll probably find that it's not as easy as you thought. # # - If you start with a graph that looks hard to replicate you'll probably run up against a brick wall and be disappointed with your afternoon. # # # # # # # # # # # # + [markdown] id="EsKWzuqf8A_h" colab_type="text" # ###Make Prototypes # + id="7SY1ZHawyZvz" colab_type="code" outputId="67eac46e-991c-42d2-888e-1b618713bc78" colab={"base_uri": "https://localhost:8080/", "height": 285} # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38,3,2,1,2,4,6,5,5,33], index=range(1,11)) fake.plot.bar(color='#ed713a', width=0.9); # + id="_Bu6u3jN8zI6" colab_type="code" outputId="1909a371-fb4b-410b-f5f5-28815ff87c96" colab={"base_uri": "https://localhost:8080/", "height": 508} style_list = ['default', 'classic'] + sorted( style for style in plt.style.available if style != 'classic') style_list # + id="_C2uwBX59CaD" colab_type="code" outputId="2990f47d-dee8-4edb-a1c2-3660c89a9d4b" colab={"base_uri": "https://localhost:8080/", "height": 126} fake2 = pd.Series( [1, 1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 2,2,2, 3,3,3, 4,4, 5,5,5, 6,6,6,6, 7,7,7,7,7, 8,8,8,8, 9,9,9,9, 10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]) fake2.head() # + id="-JB02j-B9dUF" colab_type="code" outputId="eb3951e5-408c-431e-9e41-2a7d325ed519" colab={"base_uri": "https://localhost:8080/", "height": 303} plt.style.use('fivethirtyeight') fake2.value_counts().sort_index().plot.bar(color='#ed713a', width=0.9) # + [markdown] id="fNhRt6E-9qOQ" colab_type="text" # ###Annotate with text # + id="qMy8zubk9sO_" colab_type="code" outputId="4a0476a8-690e-4d67-a837-fc91a074df39" colab={"base_uri": "https://localhost:8080/", "height": 355} from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example) # + id="2U5vDeLu95mD" colab_type="code" outputId="0a17a37e-2f00-4d43-fd0d-8f890648015d" colab={"base_uri": "https://localhost:8080/", "height": 325} fig = plt.figure(facecolor='black') ax = fake2.value_counts().sort_index().plot.bar(color="#ed713a", width=0.9); ax.set(facecolor='black') plt.xlabel('Rating', color='white') plt.ylabel('Percent of total votes', color='white') # + id="UUANk-I7-UkA" colab_type="code" outputId="751572fe-d5f8-42b6-f710-a8795a03b555" colab={"base_uri": "https://localhost:8080/", "height": 35} list(range(0,50,10)) # + id="hcK15YbJ-XJQ" colab_type="code" outputId="5b84362a-7ebe-4606-8825-d291303ae9fd" colab={"base_uri": "https://localhost:8080/", "height": 453} fig = plt.figure(facecolor='white', figsize=(5,4)) ax = fake.plot.bar(color='#ed713a', width=0.9) ax.set(facecolor='white') ax.patch.set_alpha(0.1) plt.xlabel('Rating', fontweight='bold') plt.ylabel('Percent of total votes', fontweight='bold') plt.title('`An Inconvenient Sequel: Truth to Power` is divisive', fontsize=12, loc='left', x=-0.1, y=1.1, fontweight= 'bold') plt.text(x=-1.7, y=fake.max() + 4, s='IMDb ratings for the film as of Aug. 29', fontsize=10) plt.xticks(rotation=0, color='#a7a7a7') plt.yticks(range(0,50,10), labels=[f'{i}' if i != 40 else f'{i}%' for i in range(0,50,10)], color='#a7a7a7') # + [markdown] id="yan3cosIALoc" colab_type="text" # ###Reproduce with real data # + id="_7CsJyMeAmgu" colab_type="code" outputId="378dfbbb-03f1-4367-a412-38f4fa7c6fc0" colab={"base_uri": "https://localhost:8080/", "height": 305} df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv') df.head() # + id="5YNghW8MA3xf" colab_type="code" outputId="411057db-b59b-42a9-d9a8-b07ec7208f29" colab={"base_uri": "https://localhost:8080/", "height": 526} df.dtypes # + id="RY4X7DsKA8_N" colab_type="code" outputId="670014f3-2b04-458e-8067-8039fc4f65a0" colab={"base_uri": "https://localhost:8080/", "height": 308} df['timestamp'] = pd.to_datetime(df['timestamp']) df.describe() # + id="DfjUyH-jBGh2" colab_type="code" outputId="cebf73a6-5fb1-4fc6-fe41-f8805079454b" colab={"base_uri": "https://localhost:8080/", "height": 35} df['timestamp'].min() # + id="Alm8wTsrBMWj" colab_type="code" outputId="359b2b0c-e6f3-4516-f11e-912d8dd6dcc0" colab={"base_uri": "https://localhost:8080/", "height": 35} df['timestamp'].max() # + id="jP_u4hKVBOwr" colab_type="code" outputId="0199393a-f6dc-42d8-85ef-ff7226b231e4" colab={"base_uri": "https://localhost:8080/", "height": 335} df = df.set_index('timestamp') df.head() # + id="7o3C2luCBSnE" colab_type="code" outputId="01e67fa6-c77a-40b9-925b-8f8ae4f255c9" colab={"base_uri": "https://localhost:8080/", "height": 1000} df['2017-08-29'] # + id="dZzZDkbIBYCY" colab_type="code" outputId="6fb663fe-cf93-4d1f-afa2-af6a96c2c67a" colab={"base_uri": "https://localhost:8080/", "height": 335} lastday = df['2017-08-29'] lastday_filtered = lastday[lastday['category'] == 'IMDb users'] lastday_filtered.head() # + id="i3RxZ27TBmds" colab_type="code" outputId="d8034cc9-b8aa-4049-c1c6-832218452a9d" colab={"base_uri": "https://localhost:8080/", "height": 315} lastday_filtered['respondents'].plot() # + id="Y8RDbAGuBrwC" colab_type="code" outputId="ac8af7c7-38d4-4f96-f9d9-058863211b0a" colab={"base_uri": "https://localhost:8080/", "height": 54} lastday_filtered['category'].value_counts() # + id="xuJkHy_PBy2B" colab_type="code" outputId="177db95d-78b8-4aa5-82f8-491eefbdf493" colab={"base_uri": "https://localhost:8080/", "height": 199} pct_columns = [f'{i}_pct' for i in range(1,11)] pct_columns # + id="TCAsO0jYBygb" colab_type="code" colab={} final = lastday_filtered.tail(1) # + id="2-1LjPwvB_C0" colab_type="code" outputId="12d593a9-463d-4c13-f3f5-d5c97f195552" colab={"base_uri": "https://localhost:8080/", "height": 145} final # + id="-RSLzOK4CCxb" colab_type="code" outputId="c388734f-b170-4c00-dee0-cd1e5f6185f3" colab={"base_uri": "https://localhost:8080/", "height": 348} final[pct_columns].T # + id="PJNnqGomCG7W" colab_type="code" colab={} plot_data = final[pct_columns].T plot_data.index = range(1,11) # + id="9ylMSh8xCOP_" colab_type="code" outputId="cfe4e8ac-155a-4a00-c4bd-67b78631fdf4" colab={"base_uri": "https://localhost:8080/", "height": 348} plot_data # + id="PItaLcFhCSu8" colab_type="code" outputId="25796a09-04ce-492e-bcf3-03774525d8d2" colab={"base_uri": "https://localhost:8080/", "height": 366} plt.style.use('fivethirtyeight') ax = plot_data.plot.bar(color='#ed713a', width=0.9) plt.xlabel('Rating', fontsize=9, fontweight='bold') plt.ylabel('Percent of total votes', fontsize=9, fontweight='bold') plt.title('`An Inconvenient Sequel: Truth to Power` is divisive', fontsize=12, x=-0.1, y=1.1, loc='left', fontweight='bold', fontname='Tahoma') plt.text(x=-1.7, y=plot_data.max() + 4, s='IMDb ratings for the film as of Aug. 29', fontsize=11) plt.xticks(rotation=0, color='#a7a7a7', fontsize=8, clip_on='false') plt.yticks(range(0,50,10), labels=[f'{i}' if i != 40 else f'{i}%' for i in range(0,50,10)], color='#a7a7a7', fontsize=8, clip_on='false') ax.set(facecolor='white') fig = plt.figure(facecolor='white', edgecolor='white') fig.patch.set_facecolor('white') legend = ax.legend() legend.remove() display(fig) # + id="GiC-_BeSFIzI" colab_type="code" outputId="2569b024-b540-437f-bd4b-81bd429b2f6a" colab={"base_uri": "https://localhost:8080/", "height": 355} display(example) # + [markdown] id="Lk0Fn4c1IpPV" colab_type="text" # ## Reproduce Another Graph # + id="5maGmwACJRLz" colab_type="code" outputId="3a206167-33c0-4faa-e3bf-7f2c3c7a688a" colab={"base_uri": "https://localhost:8080/", "height": 400} url = 'https://fivethirtyeight.com/wp-content/uploads/2017/04/roeder-scrabble-1.png' example = Image(url=url, width=400) display(example) # + id="_dtx8OzWJz7F" colab_type="code" colab={} scrabble = pd.read_csv('https://media.githubusercontent.com/media/fivethirtyeight/data/master/scrabble-games/scrabble_games.csv') # + id="J000zVPiJ4p5" colab_type="code" outputId="d96c430f-c543-4dbb-ee1e-d293ffe1b327" colab={"base_uri": "https://localhost:8080/", "height": 305} scrabble.head() # + id="EDow2SiMJ9Kt" colab_type="code" colab={} #first need to remove tie games #scrabble = scrabble[~scrabble['tie']] # + id="W_S0k76zKV7r" colab_type="code" colab={} #scrabble['tie'].sample(20) # + id="3hpYGaN_Kl1u" colab_type="code" colab={} #all I need is the winning score and the losing score for each game columns = ['gameid', 'winnerscore', 'loserscore'] scrabble = scrabble[columns] # + id="oLTS2dURK3fJ" colab_type="code" outputId="b40a9eca-c7ec-4566-d6c8-204a0df96ac7" colab={"base_uri": "https://localhost:8080/", "height": 348} scrabble.sample(10) # + id="e5H5z9dELQaq" colab_type="code" outputId="1ee319d9-ae24-489e-b029-b7ee4972535c" colab={"base_uri": "https://localhost:8080/", "height": 288} scrabble.describe() # + id="w_c5jmwDLb7k" colab_type="code" colab={} scrabble = scrabble.set_index('gameid') # + id="gd_IoFxTN5OV" colab_type="code" outputId="bc606a68-f534-4689-feb0-3c33105a4eb7" colab={"base_uri": "https://localhost:8080/", "height": 378} scrabble.sample(10) # + id="VwSCO-AaN-GK" colab_type="code" outputId="4d022e8f-204f-4d3e-dc3e-69cd2c5cc499" colab={"base_uri": "https://localhost:8080/", "height": 288} scrabble.describe #there are...negative scores? I should remove those, nothing in the original graph goes below zero # + id="Pa7pry7POHF3" colab_type="code" colab={} condition = scrabble['winnerscore'] >= 1 scrabble = scrabble[condition] condition = scrabble['loserscore'] >= 1 scrabble = scrabble[condition] #initial histogram showed that most of the scores were 0/0 (I guess forfeits?) and that ruined the graph # + id="ZQmfPpC4Rjb0" colab_type="code" outputId="9d5d1367-6e56-4969-f4dd-26e5babbba14" colab={"base_uri": "https://localhost:8080/", "height": 299} plt.hist(scrabble['loserscore'], color='red', bins=100) plt.hist(scrabble['winnerscore'], color='green', bins=200) plt.title('700,000 games of Scrabble', fontweight='bold', fontsize=14, loc='left') plt.show() # + id="KPL2hL73Zbp_" colab_type="code" outputId="da7cbe30-ff46-480c-a572-3973a9296251" colab={"base_uri": "https://localhost:8080/", "height": 300} scrabble['winnerscore'].plot.kde() # + id="k_hlKPRhWG0m" colab_type="code" outputId="574ddc0f-a8a0-4b59-c779-c9582b38e621" colab={"base_uri": "https://localhost:8080/", "height": 400} display(example) # + id="P8G_v8dTWUep" colab_type="code" colab={} #why are the numbers in my graph not the same? # + id="93go5lM-PCWS" colab_type="code" outputId="501601fa-95a1-4d1f-b67b-f2d103ff66f6" colab={"base_uri": "https://localhost:8080/", "height": 534} import seaborn as sns sns.distplot(sample['loserscore'], bins=40, hist=True, color='green', kde=False).set_title('70,000 games of Scrabble') sns.distplot(sample['winnerscore'], bins=75, hist=True, color='red', kde=False) ax.set_xlabel('Score') ax.set_yticks(range(0,10000, 2500)); # + [markdown] id="xC2OBVBpK2-a" colab_type="text" # # + [markdown] id="0wSrBzmJyWaV" colab_type="text" # # STRETCH OPTIONS # # ### 1) Reproduce one of the following using the matplotlib or seaborn libraries: # # - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) # - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) # - or another example of your choice! # # ### 2) Make more charts! # # Choose a chart you want to make, from [Visual Vocabulary - Vega Edition](http://ft.com/vocabulary). # # Find the chart in an example gallery of a Python data visualization library: # - [Seaborn](http://seaborn.pydata.org/examples/index.html) # - [Altair](https://altair-viz.github.io/gallery/index.html) # - [Matplotlib](https://matplotlib.org/gallery.html) # - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html) # # Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes. # # Take notes. Consider sharing your work with your cohort! # + id="dRJkKftiy5BJ" colab_type="code" colab={} # More Work Here # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Make formatted table S1 for CMIP6 models # # This details the models and runs that we used. # # We do CMIP5 separately as we use Piers's data. import json with open('../data_output/branch_points.json', 'r') as f: branch_points = json.load(f) def sort_on_run(runs): runtuples = [] for run in runs: index = int(run[1:].split('i')[0]) runtuples.append((index, run)) return sorted(runtuples) for expt in branch_points.keys(): print(expt) print('-----------') for model in branch_points[expt].keys(): runs = branch_points[expt][model].keys() runtuples = sort_on_run(runs) print(model) for i in runtuples: print(i[1]) print() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S-109A Introduction to Data Science # ## Homework 4 - Regularization # # # # **Harvard University**
# **Summer 2018**
# **Instructors**: , # #
# ### INSTRUCTIONS # # - To submit your assignment follow the instructions given in canvas. # - Restart the kernel and run the whole notebook again before you submit. # - If you submit individually and you have worked with someone, please include the name of your [one] partner below. # # Names of people you have worked with goes here: # #
from IPython.core.display import HTML def css_styling(): styles = open("cs109.css", "r").read(); return HTML(styles) css_styling() # import these libraries # + import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt from sklearn.metrics import r2_score from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import RidgeCV from sklearn.linear_model import LassoCV from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import KFold #import statsmodels.api as sm from pandas.core import datetools # %matplotlib inline # - # # Continuing Bike Sharing Usage Data # # In this homework, we will focus on regularization and cross validation. We will continue to build regression models for the Capital Bikeshare program in Washington D.C. See homework 3 for more information about the Capital Bikeshare data that we'll be using extensively. # # # ## Data Preparation # # # #
Question 1
# In HW3 Questions 1-3, you preprocessed the data in preparation for your regression analysis. We ask you to repeat those steps (particularly those in Question 3) so that we can compare the analysis models in this HW with those you developed in HW3. In this HW we'll be using models from sklearn exclusively (as opposed to statsmodels) # # **1.1** [From HW3] Read `data/BSS_train.csv` and `data/BSS_test.csv` into dataframes `BSS_train` and `BSS_test`, respectively. Remove the `dteday` column from both train and test dataset. We do not need it, and its format cannot be used for analysis. Also remove the `casual` and `registered` columns for both training and test datasets as they make `count` trivial. # # **1.2** Since we'll be exploring Regularization and Polynomial Features, it will make sense to standardize our data. Standardize the numerical features. Store the dataframes for the processed training and test predictors into the variables `X_train` and `X_test`. Store the appropriately shaped numpy arrays for the corresponding train and test `count` columns into `y_train` and `y_test`. # # **1.3** Use the `LinearRegression` library from `sklearn` to fit a multiple linear regression model to the training set data in `X_train`. Store the fitted model in the variable `BikeOLSModel`. # # **1.4** What are the training and test set $R^2$ scores? Store the training and test $R^2$ scores of the `BikeOLSModel` in a dictionary `BikeOLS_r2scores` using the string 'training' and 'test' as keys. # # **1.5** We're going to use bootstrapped confidence intervals (use 500 bootstrap iterations) to determine which of the estimated coefficients for the `BikeOLSModel` are statistically significant at a significance level of 5% . We'll do so by creating 3 different functions: # # 1. `make_bootstrap_sample(dataset_X, dataset_y)` returns a bootstrap sample of `dataset_X` and `dataset_y` # 2. `calculate_coefficients(dataset_X, dataset_y, model)` returns in the form of a dictionary regression coefficients calculated by your model on `dataset_X` and `dataset_y`. The keys for regression coefficients dictionary should be the names of the features. The values should be the coefficient values of that feature calculated on your model. An example would be {'hum': 12.3, 'windspeed': -1.2, 'Sunday': 0.6 ... } # 3. `get_significant_predictors(regression_coefficients, significance_level)` takes as input a list of regression coefficient dictionaries (each one the output of `calculate_coefficients` and returns a python list of the feature names of the significant predictors e.g. ['Monday', 'hum', 'holiday', ... ] # # In the above functions `dataset_X` should always be a pandas dataframe with your features, `dataset_y` a numpy column vector with the values of the response variable and collectively they form the dataset upon which the operations take place. `model` is the `sklearn` regression model that will be used to generate the regression coefficients. `regression_coefficients` is a list of dictionaries of numpy arrays with each numpy array containing the regression coefficients (not including the intercept) calculated from one bootstrap sample. `significance_level` represents the significance level as a floating point number. So a 5% significance level should be represented as 0.05. # # # Store the feature names as a list of strings in the variable `BikeOLS_significant_bootstrap` and print them for your answer. # # ### Answers # #### 1.1 Read `data/BSS_train.csv` and `data/BSS_test.csv` into Pandas DataFrames # + # your code here # - # #### 1.2 Standardizing our data # + # your code here # - # #### 1.3 Use the `LinearRegression` library from `sklearn` to fit a multiple linear regression. # + # your code here # - # #### 1.4 What are the training and test set $R^2$ scores? Store the $R^2$ scores of the `BikeOLSModel` on the training and test sets in a dictionary `BikeOLS_r2scores`. #
# Your answer here #
# + # your code here # - # #### 1.5 We're going to use bootstrapped confidence intervals to determine which of the estimated coefficients ... # + # your code here # dataset_x should be a pandas dataframe ## accepts dataset inputs as numpy arrays def make_bootstrap_sample(dataset_X, dataset_y, size = None): # your code here # by default return a bootstrap sample of the same size as the original dataset if not size: size = len(dataset_X) # if the X and y datasets aren't the same size, raise an exception if len(dataset_X) != len(dataset_y): raise Exception("Data size must match between dataset_X and dataset_y") # return as a tuple your bootstrap samples of dataset_X as a pandas dataframe # and your bootstrap samples of dataset y as a numpy column vector return (bootstrap_dataset_X, bootstrap_dataset_y) def calculate_coefficients(dataset_X, dataset_y, model): # your code here # return coefficients in the variable coefficients_dictioanry as a dictionary # with the key being the name of the feature as a string # the value being the value of the coefficients # do not return the intercept as part of this return coefficients_dictionary def get_significant_predictors(regression_coefficients, significance_level): # your code here # regression_coefficients is a list of dictionaries # with the key being the name of the feature as a string # the value being the value of the coefficients # each dictionary in th list should be the output of calculate_coefficients # return the significant coefficients as a list of strings return significant_coefficients # - # ## Penalization Methods # # In HW 3 Question 5 we explored using subset selection to find a significant subset of features. We then fit a regression model just on that subset of features instead of on the full dataset (including all features). As an alternative to selecting a subset of predictors and fitting a regression model on the subset, one can fit a linear regression model on all predictors, but shrink or regularize the coefficient estimates to make sure that the model does not "overfit" the training set. # #
Question 2
# We're going to use Ridge and Lasso regression regularization techniques to fit linear models to the training set. We'll use cross-validation and shrinkage parameters $\lambda$ from the set $\{.001,.005,1,5,10,50,100,500,1000\}$ to pick the best model for each regularization technique. # # **2.1** Use 5-fold cross-validation to pick the best shrinkage parameter from the set $\{.001,.005,1,5,10,50,100,500,1000\}$ for your Ridge Regression model on the training data. Fit a Ridge Regression model on the training set with the selected shrinkage parameter and store your fitted model in the variable `BikeRRModel`. Store the selected shrinkage parameter in the variable `BikeRR_shrinkage_parameter`. # # **2.2** Use 5-fold cross-validation to pick the best shrinkage parameter from the set $\{.001,.005,1,5,10,50,100,500,1000\}$ for your Lasso Regression model on the training data. Fit a Lasso Regression model on the training set with the selected shrinkage parameter and store your fitted model in the variable `BikeLRModel`. Store the selected shrinkage parameter in the variable `BikeLR_shrinkage_parameter`. # # **2.3** Create three dictionaries `BikeOLSparams`, `BikeLRparams`, and `BikeRRparams`. Store in each the corresponding regression coefficients for each of the regression models indexed by the string feature name. # # **2.4** For the Lasso and Ridge Regression models list the features that are assigned a coefficient value close to 0 (i.e. the absolute value of the coefficient is less than 0.1). How closely do they match the redundant predictors found (if any) in HW 3, Question 5? # # **2.5** To get a visual sense of how the features different regression models (Multiple Linear Regression, Ridge Regression, Lasso Regression) estimate coefficients, order the features by magnitude of the estimated coefficients in the Multiple Linear Regression Model (no shrinkage). Plot a bar graph of the magnitude (absolute value) of the estimated coefficients from Multiple Linear Regression in order from greatest to least. Using a different color (and alpha values) overlay bar graphs of the magnitude of the estimated coefficients (in the same order as the Multiple Linear Regression coefficients) from Ridge and Lasso Regression. # # **2.6** Let's examine a pair of features we believe to be related. Is there a difference in the way Ridge and Lasso regression assign coefficients to the predictors `temp` and `atemp`? If so, explain the reason for the difference. # # **2.7** Discuss the Results: # # 1. How do the estimated coefficients compare to or differ from the coefficients estimated by a plain linear regression (without shrinkage penalty) in Question 1? # 2. Is there a difference between coefficients estimated by the two shrinkage methods? If so, give an explantion for the difference. # 3. Is the significance related to the shrinkage in some way? # # *Hint:* You may use `sklearn`'s `RidgeCV` and `LassoCV` classes to implement Ridge and Lasso regression. These classes automatically perform cross-validation to tune the parameter $\lambda$ from a given range of values. # ### Answers lambdas = [.001, .005, 1, 5, 10, 50, 100, 500, 1000] # #### 2.1 Use 5-fold cross-validation to pick the best shrinkage parameter from the set $\{.001,.005,1,5,10,50,100,500,1000\}$ for your Ridge Regression model. # + # your code here # - # #### 2.2 Use 5-fold cross-validation to pick the best shrinkage parameter from the set $\{.001,.005,1,5,10,50,100,500,1000\}$ for your Lasso Regression model. # + # your code here # - # #### 2.3 Create three dictionaries `BikeOLSparams`, `BikeLRparams`, and `BikeRRparams`. Store in each the corresponding regression coefficients. # + # your code here # - # #### 2.4 For the Lasso and Ridge Regression models list the features that are assigned a coefficient value close to 0 ... # your code here #
# Your answer here #
# #### 2.5 To get a visual sense of how the features different regression models (Multiple Linear Regression, Ridge Regression, Lasso Regression) estimate coefficients, order the features by magnitude of the estimated coefficients in the Multiple Linear Regression Model (no shrinkage). # + # your code here # - # #### 2.6 Let's examine a pair of features we believe to be related. Is there a difference in the way Ridge and Lasso regression assign coefficients ...v #
# Your answer here #
# #### 2.7.1 How do the estimated coefficients compare to or differ from ... #
# Your answer here #
# #### 2.7.2 Is there a difference between coefficients estimated by the two shrinkage methods ... #
# Your answer here #
# #### 2.7.3 Is the significance related to the shrinkage in some way? #
# Your answer here #
#
Question 3: Polynomial Features, Interaction Terms, and Cross Validation
# # We would like to fit a model to include all main effects and polynomial terms for numerical predictors up to the $4^{th}$ order. More precisely use the following terms: # - predictors in `X_train` and `X_test` # - $X^1_j$, $X^2_j$, $X^3_j$, and $X^4_j$ for each numerical predictor $X_j$ # # **3.1** Create an expanded training set including all the desired terms mentioned above. Store that training set (as a pandas dataframe) in the variable `X_train_poly`. Create the corresponding test set and store it as a pandas dataframe in `X_test_poly`. # # **3.2** Discuss the following: # # 1. What are the dimensions of this 'design matrix' of all the predictor variables in 3.1? # 2. What issues may we run into attempting to fit a regression model using all of these predictors? # # **3.3** Let's try fitting a regression model on all the predictors anyway. Use the `LinearRegression` library from `sklearn` to fit a multiple linear regression model to the training set data in `X_train_poly`. Store the fitted model in the variable `BikeOLSPolyModel`. # # **3.4** Discuss the following: # 1. What are the training and test $R^2$ scores? # 2. How does the model performance compare with the OLS model on the original set of features in Question 1? # # **3.5** The training set $R^2$ score we generated for our model with polynomial and interaction terms doesn't have any error bars. Let's use cross-validation to generate sample sets of $R^2$ for our model. Use 5-fold cross-validation to generate $R^2$ scores for the multiple linear regression model with polynomial terms. What are the mean and standard deviation of the $R^2$ scores for your model. # # **3.6** Visualize the $R^2$ scores generated from the 5-fold cross validation as a box and whisker plot. # # **3.7** We've used cross-validation to generate error bars around our $R^2$ scores, but another use of cross-validation is as a way of model selection. Let's construct the following model alternatives: # # 1. Multiple linear regression model generated based upon the feature set in Question 1 (let's call these the base features. # 2. base features plus polynomial features to order 2 # 3. base features plus polynomial features to order 4 # # Use 5-fold cross validation on the training set to select the best model. Make sure to evaluate all the models as much as possible on the same folds. For each model generate a mean and standard deviation for the $R^2$ score. # # **3.8** Visualize the $R^2$ scores generated for each model from 5-fold cross validation in box and whiskers plots. Do the box and whisker plots influence your view of which model was best? # # **3.9** Evaluate each of the model alternatives on the test set. How do the results compare with the results from cross-validation? # ### Answers # #### 3.1 Create an expanded training set including all the desired terms mentioned above. Store that training set (as a numpy array) in the variable `X_train_poly`.... # + # your code here # - X_poly_train, X_poly_test = get_poly_dataset(X_train, X_test, 4) # #### 3.2.1 What are the dimensions of this 'design matrix'...** #
# Your answer here #
# #### 3.2.2 What issues may we run into attempting to fit a regression model using all of these predictors? ...** #
# Your answer here #
# #### 3.3 Let's try fitting a regression model on all the predictors anyway. Use the `LinearRegression` library from `sklearn` to fit a multiple linear regression model .... # + # your code here # - # #### 3.4.1 What are the training and test $R^2$ scores? # + # your code here # - #
# Your answer here #
# #### 3.4.2 How does the model performance compare with the OLS model on the original set of features in Question 1? #
# Your answer here #
# #### 3.5 The training set $R^2$ score we generated for our model with polynomial and interaction terms doesn't have any error bars. Let's use cross-validation to generate sample... # + # your code here # - #
# Your answer here #
# #### 3.6 Visualize the $R^2$ scores generated from the 5-fold cross validation as a box and whisker plot. # + # your code here # - # #### 3.7 We've used cross-validation to generate error bars around our $R^2$ scores, but another use of cross-validation is as a way of model selection. Let's construct the following model alternatives ... # + # your code here # - #
# Your answer here #
# #### 3.8 Visualize the $R^2$ scores generated for each model from 5-fold cross validation in box and whiskers plots. Do the box and whisker plots influence your view of which model was best? ... # + # your code here # - # #### 3.9 Evaluate each of the model alternatives on the test set. How do the results compare with the results from cross-validation? # + # your code here # - #
# Your answer here #
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # # CHAPTER 2 Numbers, Strings,and Variables # ## Python的基本元素:數字,字串和變量 # # * [2.1 變數 名稱 物件](#Variables_Names_Objects) # * [2.2 數字](#Numbers) # * [2.3 字串](#Strings) # # # Python內建的型態有 # * 布林值 Boolean (True 、 False) # * 整數 Integer # * 浮點數 Float (可用科學記號表示法 ex. 1.0e3 = 1000.0) # * 字串 String (字元組成的陣列) # ------ # # ## 2.1 變數 名稱 物件 # [回目錄](#HOME) # # Python為物件導向作為設計,所有的數據皆為Objcet,詳細的物件導向觀念可參考wiki中的介紹或是書中的介紹。 # 其中要特別注意所謂的針對某個特定物件型別內建method上的使用, # 例如string型別內建upper函數,可以把文字轉成大寫,則要使用上的方法為__string.upper()__, # 若是一個一般的內建method或是使用者自行建立的function, # 例如__len()__,則使用上的方法為 __len('abc')__, # 所以在學習Python時,需要清楚了解此function是針對某個型別的內建還是一般狀況。 # # 其中要注意的是Python為強型別(Strongly typed),也就是說在執行 '1' + 2,會出現TypeError,並不會出現3或是'12'的結果,所以進行不同型別之間的處理可以使用 int('1') + 2 = 3或是 '1' + str(2) = '12'來作為處理。 # # 跟大多數語言一樣,再給定變數數值是使用 '=' 來賦予值,在Python中變數不用宣告,並且還有一個特性為,變數在記憶體位置中僅僅像是標籤一般,對某個記憶體位置做貼標籤功能,在變數改變內容時,記憶體內的值不會改變,而是變數標籤貼到其他記憶體位置上。因此Python不用宣告變數型別,故可以改變變數內的型別,所以可以使用_type(變數)_做為檢測變數現在的型別。 # # 變數的命名不外乎就是只能大小寫英文字母、 數字與底線(_),且不能以數字開頭,相關保留關鍵字如下,請勿作為變數名稱使用。 # # # ![Alt text](http://i.imgur.com/vS3VxFe.png "保留關鍵字") # ------ # # ## 2.2 數字 # [回目錄](#HOME) # # 基本運算符號如下 # # |符號|解釋|用法| # |--|--|--| # |+|加法|2 + 3 = 5| # |-|減法|2 - 3 = -1| # |\*|乘法|2 * 3 = 6| # |/|浮點數除法|3 / 2 = 1.5| # |//|整數除法 (商數)|3 // 2 = 1| # |%|餘數|3 % 2 = 1| # |\*\*|次方|2 \*\* 3 = 8| # # # 對於正整數的操作,比較要注意的為0不可以放置在數字前端,如 a = 05 會出現SyntaxError。 # 於整數使用'/'除法上也會得到浮點數結果,並不需要特別轉換成浮點數再做除法運算。 # # 其餘運算規則與用法詳細請看書本介紹(ex. a = a + 1可以寫成 a += 1 等等) # 數字型別轉換可以用__int()__,裡面不允許有非數字出現,浮點數會無條件捨去,其中python允許使用__int(98.7) = 98__,但__int('98.7')__則會出現錯誤,這點要多加小心。 # # 最為重要的一點為Python3之後沒有溢位問題,意旨儲存整數大小無上限,取決於自身記憶體限制。 # # 轉換成浮點數可以使用__float()__。 # ------ # # ## 2.3 字串 # [回目錄](#HOME) # # Python3支援Unicode!!!! 表示可以顯示中文等等,檔案編碼方式記得選擇Unicode # 使用單或雙引號皆可以創建字串,若要在字串中包含單雙引號只要 # 用相反的作為外框 # 使用跳脫字元\' \" # 連續使用三次即可(單,雙引號都可以) # # 三個單引號'''還可以用再多行字串的建立,一般常用於多行註解上使用。 # 在使用print()指令時,會自動將跳脫字元轉換成正確的顯示方式(ex. \n轉換成跳行等等) # 並且會在變數之間插入一空白 # # ```Python # print('a','b','c') # 'a' 'b' 'c' # ``` # # 可以使用__str()__將其餘類別轉換成字串型態。 # # 字串相連接時可以使用 + 號或是直接把兩字串擺在前後即可。( print('a'+'b') print('a''b') 都可以得到 'ab'的結果 ) # 使用\*可以快速建立重複字串。 # # ```Python # print('a' * 5) # 'aaaaa' # ``` # + #前述提到字串為字元的陣列,故可以使用[ ]來提取特定位置之字元,(相關的容器介紹在CH3) a = 'bcd' print(a[0]) #'b' print(a[-1]) #'d' #index從0開始,-1為最後一個字元 # - # 更多的提取方法如下 # # |用法|說明| # |--|--| # |[ : ]|提取全部| # |[start : ]|提取 start 至結束| # |[ : end]|提取開頭到 end - 1| # |[start : end]|提取 start 至 end - 1| # |[start : end : step]|提取 start 至 end - 1,間隔為 step (step為負的時則從右邊開始,start與end需反過來擺放)| # a = 'abcde' print(a[::-1]) #'edcba' 可以變成反序排列 print(a[-2:0:-1]) #'dcb' # 其中變數內的字串是不能替換內容(因為容器為類似TUPLES的型態,CH3會說明), # 若要替換內容,則可以使用重組或是 __string.replace()__ name = 'Henny' #name[0] = 'P' #錯誤!!!!!! a = name.replace('H', 'P') #'Penny' print(a) print('P' + name[1:]) #'Penny' # __len()__ 可以獲得長度 a = 'abc' len(a) # __string.split()__可以分割字串成list,( )內可以指定符號,預設會切割\n(換行) 、 \t(tab)與空格三種 todos = 'get gloves,get mask,give cat vitamins,call ambulance' print(todos.split(',')) print(todos.split()) # __'符號'.join()__可以合併list成字串 crypto_list = ['Yeti', 'Bigfoot', 'Loch Ness Monster'] ', '.join(crypto_list) # __string.startswith()__ 與 __string.endswith()__ 分別可以檢查開頭與結束字串是否為特定字串,回傳__True__或__False__ poem = 'abcdef' print(poem.startswith('ab')) print(poem.endswith('eef')) # __string.find()__ 與 __string.rfind()__ 可以查詢第一次與最後一次出現搜尋字串的index,__string.count()__可以查詢字串出現次數 poem = 'abcdefbcd' print(poem.find('bc')) print(poem.rfind('bc')) print(poem.count('bc')) # __string.isalnum()__可以查詢字串中是否都是字母或數字,回傳__True__或__False__ poem = 'abc@def' poem.isalnum() # + #其餘還有一些方便的string內建function可以使用 setup = 'a duck goes into a bar...' print(setup.strip('.')) #刪除結尾特定符號 'a duck goes into a bar' print(setup.capitalize()) #字串第一個字元大寫 'A duck goes into a bar...' print(setup.title()) #每個單字開頭大寫 'A Duck Goes Into A Bar...' print(setup.upper()) #全部大寫 'A DUCK GOES INTO A BAR...' print(setup.lower()) #全部小寫 'a duck goes into a bar...' print(setup.swapcase()) #大小寫交換 'a DUCK GOES INTO A BAR...' print(setup.center(30)) #將字串中心移動至30個字元的中間 ' a duck goes into a bar... ' print(setup.ljust(30)) #左對齊 'a duck goes into a bar... ' print(setup.rjust(30)) #右對齊 ' a duck goes into a bar...' print(setup.replace('duck', 'marmoset')) #'a marmoset goes into a bar...' print(setup.replace('a ', 'a famous ', 100)) #只替換前100個'a ' # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Programmatically Access Materials Project Electrolyte Genome Data # and
# Created: November 18, 2015
# Last Update: April 19, 2018 # This notebook documents URL patterns to access Electrolyte Genome data and provides examples of access using the Python `requests` library. # # If you have questions, please contact the Materials Project team. Contact information is available at https://materialsproject.org. # ## URL patterns # There is one way to query for results given search criteria (`results`), and there are a few ways to obtain data for individual molecules, either in full with metadata (`json`) or simply the structure for display (`svg`) or analysis (`xyz`). Below are the four corresponding URL patterns. urlpattern = { "results": "https://materialsproject.org/molecules/results?query={spec}", "mol_json": "https://materialsproject.org/molecules/{mol_id}/json", "mol_svg": "https://materialsproject.org/molecules/{mol_id}/svg", "mol_xyz": "https://materialsproject.org/molecules/{mol_id}/xyz", } # ## Setup # + import json import os import sys if sys.version_info[0] == 2: from urllib import quote_plus else: from urllib.parse import quote_plus import requests # + # Ensure you have an API key, which is located on your dashboard # (https://materialsproject.org/dashboard). MAPI_KEY = "fAkEaP1K4y" # <-- replace with your api key # Please do NOT share a notebook with others with your API key hard-coded in it. # One alternative: Load API key from a set environment variable, e.g. # # MAPI_KEY = os.environ['PMG_MAPI_KEY'] # # Best alternative: Store and load API key using pymatgen, e.g. ### Do once, on command line (without "!" in front) or in notebook # # !pmg config --add PMG_MAPI_KEY "your_api_key_goes_here" ### Then, in notebook/script: # from pymatgen import SETTINGS # MAPI_KEY = SETTINGS.get("PMG_MAPI_KEY") # - # ## Getting a set of molecules # + # Here is a function we'll use to get results. We'll walk though some examples that use it. def get_results(spec, fields=None): """Take a specification document (a `dict`), and return a list of matching molecules. """ # Stringify `spec`, ensure the string uses double quotes, and percent-encode it... str_spec = quote_plus(str(spec).replace("'", '"')) # ...because the spec is the value of a "query" key in the final URL. url = urlpattern["results"].format(spec=str_spec) return (requests.get(url, headers={'X-API-KEY': MAPI_KEY})).json() # + # Find molecules containing oxygen and phosphorous, # and collect the ionization energies (relative to a lithium electrode) of the results. # Separate elements with a "-" spec = {"elements": "O-P"} results = get_results(spec) # Not all molecules have data for all available properties ionization_energies = [molecule["IE"] for molecule in results if "IE" in molecule] # + # Molecules with ionization energies ("IE") will have oxidation potentials relative to metallic electrodes, # available as "oxidation_" keys. "IE" itself is relative to lithium. # There is an analogous relationship between the presence of electron affinity ("EA") values # and corresponding "reduction_" keys for reduction potentials using a reference metal. # `task_id` is the molecule's identifier, which we'll use later in this notebook. # `MW` is molecular weight # `smiles`: https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system for key in results[0]: print(key) # + # A "silly" example specification that demonstrates many keys available to query, and # the expected format of their value specifications. # # The "$"-prefixed keys are MongoDB syntax (https://docs.mongodb.org/manual/reference/operator/query/). spec = { "elements": "C-H-O-F", "notelements": ["Al", "Br"], # a list (inconsistent for now with "elements" -- sorry) "charge": {"$in": [0, -1]}, # {0, 1, -1} "pointgroup": "C1", "functional_groups": {"$in": ["-COOH"]}, "base_molecule": {"$in": ["s3"]}, "nelements": 4, "EA": {"$gte": 0.4}, # >= 0.4 "IE": {"$lt": 5}, # < 5 "formula": "H11 C11 O4 F1", # "H11C11O4F" works too } results = get_results(spec) # - # What if we just want "everything"? Let's use an empty spec. results = get_results({}) print("{} molecules in total right now".format(len(results))) # The above request might take some time, but hopefully not much more than a few seconds. Why do we allow this? Well, we don't return all the data for each molecule, and the total size of what we send right now is less than 10 MB. # # As our collection of molecules grows in size, this policy may change. So, please use targeted query specifications to get the results you need, *especially* if you want to periodically check for new molecules that meet some specification. # ## Getting data for individual molecules # You can get all data for a molecule given its ID. def get_molecule(mol_id, fmt='json'): url = urlpattern["mol_" + fmt].format(mol_id=mol_id) response = requests.get(url, headers={'X-API-KEY': MAPI_KEY}) if fmt == 'json': return response.json() else: return response.content # + first_result = results[0] mol_id = first_result['task_id'] print("ID: {}".format(mol_id)) # Get all data by default molecule = get_molecule(mol_id) print("There are {} key/value pairs in molecule {}. Have a look around!".format(len(molecule), mol_id)) # The SVG format provides a two-dimensional "pretty picture" of the molecular structure. svg_of_molecule = get_molecule(mol_id, fmt='svg') with open('molecule.svg','w') as f: f.write(svg_of_molecule) print("scalable vector graphic saved") # The XYZ representation provided is the optimized geometry of the molecule in a charge-neutral state. xyz_of_molecule = get_molecule(mol_id, fmt='xyz') with open('molecule.xyz','w') as f: f.write(xyz_of_molecule) print("XYZ file saved. Can load into molecule-viewer software.") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.model_selection import train_test_split r_cols = ['user_id', 'movie_id', 'rating', 'timestemp'] # + ratings_base_2 = pd.read_csv('mvl/10M.dat', sep='::', names=r_cols, encoding='latin-1', engine='python') ratings_matrix_2= ratings_base_2.as_matrix() ratings_matrix_2[:, :2] -= 1 ratings_matrix_2.shape[0] # testSize = 0.3 # Data_train_2, Data_test_2= train_test_split(ratings_matrix_2, test_size = testSize, random_state = 5) # ratings_base_3 = pd.read_csv('mvl/10M.dat', sep='::', names=r_cols, encoding='latin-1', engine='python') # ratings_matrix_3 = ratings_base_3.as_matrix() # ratings_matrix_3[:, :2] -= 1 # testSize = 0.1 # Data_train_3, Data_test_3= train_test_split(ratings_matrix_3, test_size = testSize, random_state = 5) # ratings_base_4 = pd.read_csv('mvl/20M.csv', names=r_cols, encoding='latin-1', engine='python') # ratings_matrix_4 = ratings_base_4.as_matrix() # ratings_matrix_4[:, :2] -= 1 # testSize = 0.5 # Data_train_4, Data_test_4= train_test_split(ratings_matrix_4, test_size=testSize, random_state = 5) # - df = pd.DataFrame(Data_train_3, columns= r_cols) df.to_csv('mvl_can/10M_train_01.dat', sep=':', encoding='latin-1', index=False, header=False, ) dft = pd.DataFrame(Data_test_3, columns= r_cols) dft.to_csv('mvl_can/10M_test_01.dat', sep=':', encoding= 'latin-1', index=False, header=False) # + # n = Data_train_2.shape[0] # Data_train_2 = Data_train_2.tolist() # for i in range(n): # f1.write('%d::%d::%r\r\n' % (Data_train_2[i][0], Data_train_2[i][1], Data_train_2[i][2])) # f1.close() f1 = open('10M_train1.dat', 'a+') f2 = open('10M_test1.dat', 'a+') us = (np.max(ratings_matrix_2[:, 0]) + 1).astype(int) Y = ratings_matrix_2.copy() users = ratings_matrix_2[:, 0] ratings = ratings_matrix_2[:, 2] for u in range(us): ids = np.where((u == users) & (ratings >= 4))[0].astype(int) test_ids = ids[:(ids.shape[0]//2)] dft = pd.DataFrame(ratings_matrix_2[test_ids], columns= r_cols) dft.to_csv(f2, sep=':', encoding='latin-1', index=False, header=False) Y = np.delete(Y, test_ids, 0) df = pd.DataFrame(Y, columns= r_cols) df.to_csv(f1, sep=':', encoding='latin-1', index=False, header=False) f1.close() f2.close() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # bulbea # > Deep Learning based Python Library for Stock Market Prediction and Modelling # # ![](bulbea.png) # + [markdown] deletable=true editable=true # A canonical way of importing the `bulbea` module is as follows: # + deletable=true editable=true import bulbea as bb # + [markdown] deletable=true editable=true # ### `bulbea.Share` # + [markdown] deletable=true editable=true # In order to analyse a desired share, we use the `Share` object defined under `bulbea` which considers 2 arguments - *the **source code** for the economic data* and *the **ticker symbol** for a said company*. # + deletable=true editable=true from coinmarketcap_draw import coinmarketcap_data # + [markdown] deletable=true editable=true # Go ahead and create a `Share` object as follows: # + deletable=true editable=true coin_name = '0x' data = coinmarketcap_data(coin_name) # + deletable=true editable=true share = bb.Share("123",'123',data=data) # + [markdown] deletable=true editable=true # By default, a `Share` object for a said source and symbol provides you historical data since a company's inception, as a `pandas.DataFrame` object. In order to access the same, use the `Share` object's member variable - `data` as follows: # + deletable=true editable=true data = share.data nsamples = 5 data.tail(nsamples) # + [markdown] deletable=true editable=true # In order to analyse a given attribute, you could plot the same as follows: # + deletable=true editable=true figsize = (20, 15) % matplotlib inline # + deletable=true editable=true share.plot(figsize = figsize) # + [markdown] deletable=true editable=true # ### Statistics # + [markdown] deletable=true editable=true # #### Global Mean # In order to plot the **global mean** of the stock, we could do the same as follows: # + deletable=true editable=true share.plot(figsize = (20, 15), global_mean = True) # + [markdown] deletable=true editable=true # #### Moving Averages and Bollinger Bands (R) # + deletable=true editable=true bands = share.bollinger_bands(period = 50, bandwidth = 2) bands.tail(nsamples) # + deletable=true editable=true share.plot(['Close'], figsize = (20, 15), bollinger_bands = True, period = 100, bandwidth = 2) # + [markdown] deletable=true editable=true # ### Training & Testing # + deletable=true editable=true from bulbea.learn.evaluation import split # + deletable=true editable=true Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize = True, train=0.01) # + deletable=true editable=true import numpy as np Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1)) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1)) # + [markdown] deletable=true editable=true # ### Modelling # + deletable=true editable=true from bulbea.learn.models import RNN # + deletable=true editable=true rnn = RNN([1, 100, 100, 1]) # number of neurons in each layer # + [markdown] deletable=true editable=true # #### TRAINING # + deletable=true editable=true rnn.fit(Xtrain, ytrain) # + [markdown] deletable=true editable=true # #### TESTING # + deletable=true editable=true predicted = rnn.predict(Xtest) # + deletable=true editable=true from sklearn.metrics import mean_squared_error # + deletable=true editable=true sqr_err = mean_squared_error(ytest, predicted) print(sqr_err) # + deletable=true editable=true import matplotlib.pyplot as pplt from bulbea.entity.share import _reverse_cummulative_return _,_,_,ori_ytest = split(share, 'Close', normalize = False, train = 0.01) new_pre = [] for x in range(0,len(ori_ytest)): t = ori_ytest[x] predict = predicted[x] new_pre.append(_reverse_cummulative_return(t,predict)) pplt.plot(ori_ytest) pplt.plot(new_pre) pplt.show() # + deletable=true editable=true data.tail(5) # + deletable=true editable=true _, Xtest, _, ytest = split(share, 'Close', normalize = True, train = 0.0) _,ori_Xtest,_,ori_ytest = split(share, 'Close', normalize = False, train = 0.0) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1)) # + deletable=true editable=true predicted = rnn.predict(Xtest) # + deletable=true editable=true mean_squared_error(ytest, predicted) # + deletable=true editable=true new_pre = [] for x in range(0,len(ori_ytest)): t = ori_ytest[x] predict = predicted[x] new_pre.append(_reverse_cummulative_return(t,predict)) pplt.plot(ori_ytest) pplt.plot(new_pre) pplt.show() # + deletable=true editable=true print(ori_Xtest[-1]) # + deletable=true editable=true from datetime import datetime rnn.model.save("models/{}_{}_{}.h5".format(coin_name,sqr_err,datetime.utcnow().strftime('%Y_%b_%d_%H_%M'))) # + deletable=true editable=true # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Image Denoising with Autoencoders # # ## Task 1: Introduction and Importing Libraries # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + import numpy as np from tensorflow.keras.datasets import mnist from matplotlib import pyplot as plt from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Input from tensorflow.keras.callbacks import EarlyStopping, LambdaCallback from tensorflow.keras.utils import to_categorical # %matplotlib inline # - # ## Task 2: Data Preprocessing # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float')/255.0 x_test = x_test.astype('float')/255.0 x_train = np.reshape(x_train, (60000, 784)) x_test = np.reshape(x_test,(10000, 784)) # ## Task 3: Adding Noise # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + noise_factor = 0.9 x_train_noisy = x_train + np.random.rand(60000, 784)*noise_factor x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_test_noisy = x_test + np.random.rand(10000, 784)*noise_factor x_test_noisy = np.clip(x_test_noisy, 0., 1.) # - def plot(x, p, labels=False): plt.figure(figsize=(20,2)) for i in range(10): plt.subplot(1,10,i+1) plt.imshow(x[i].reshape(28,28), cmap='binary') plt.xticks([]) plt.yticks([]) if labels: plt.xlabel(np.argmax(p[i])) plt.show() plot(x_train, None) plot(x_train_noisy, None) # ## Task 4: Building and Training a Classifier # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + classifier = Sequential([ Dense(256, activation='relu', input_shape=(784,)), Dense(256, activation='relu'), Dense(128, activation='relu'), Dense(10, activation='softmax') ]) classifier.compile( optimizer = 'adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) classifier.summary() # - history=classifier.fit( x_train, y_train, batch_size=512, epochs=5 ) loss, accuracy = classifier.evaluate(x_test, y_test) print(accuracy) loss, accuracy = classifier.evaluate(x_test_noisy, y_test) print(accuracy) # ## Task 5: Building the Autoencoder # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + Input_image = Input(shape=(784,)) encoded = Dense(64, activation='relu')(Input_image) decoded = Dense(784, activation='sigmoid')(encoded) autoencoder = Model(Input_image, decoded) autoencoder.compile( optimizer = 'adam', loss ='binary_crossentropy', metrics=['accuracy'] ) autoencoder.summary() # - # ## Task 6: Training the Autoencoder # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + autoencoder.fit( x_train_noisy, x_train, validation_split=0.2, batch_size=512, epochs=100, verbose=False, callbacks=[ EarlyStopping(monitor='val_loss', patience=5), LambdaCallback(on_epoch_end=lambda e,l: print('{:.3f}'.format(l['val_loss']), end=' _ ')) ] ) print(' _ ') print('Training Done') # - # ## Task 7: Denoised Images # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ predictions = autoencoder.predict(x_test_noisy) plot(x_test_noisy, None) plot(predictions, None) loss, accuracy = classifier.evaluate(predictions, y_test) print(accuracy) # ## Task 8: Composite Model # ___ # Note: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All # ___ # + input_image = Input(shape=(784,)) x=autoencoder(input_image) y=classifier(x) denoise_and_classify = Model(input_image, y) # - predictions = denoise_and_classify.predict(x_test_noisy) plot(x_test_noisy, predictions, True) plot(x_test, to_categorical(y_test), True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Experimento #2 # ## Lâmpada Elétrica de Filamento # # ### Nome: # ### Matricula: 15/0013329 # Objetivo: Caracterização elétrica de uma lâmpada de filamento com o levantamento da curva de corrente e # tensão. Compreensão do conceito de resistividade em metais como função da temperatura. Observação da # relação entre temperatura e a cor (espectro de emissão) do filamento. # Importando os dados # + dict_data = { 'Vdc[V]': [0, 1, 2, 3, 4, 5, 6, 7], 'Va[V]': [0, 1.001, 2.00, 3.00, 4.00, 5.00, 6.00, 6.99], 'Vb[V]': [0, 0.1406, 0.1951, 0.244, 0.290, 0.329, 0.364, 0.400], 'Cor': ['Apagado','Apagado','Vermelho Tênue','Vermelho Alaranjado','Laranja','Amarelo','Amarelo Claro','Branco'] } r2 = 1.00 # - import pandas as pd import numpy as np import matplotlib.pyplot as plt data_df = pd.DataFrame.from_dict(dict_data) data_df.head() # Para fazer o preenchimento dos restantes dos dados será utilizados as seguintes formulas: # # $$ V_{AB}[V] = V_{A}[V] - V_{B}[V] $$ # # # $$ I[A] = \frac{V_{B}[V]}{R_{2}} $$ # # # $$ R_{1}[\Omega] = \frac{R_{2}(V_{A}[V] - V_{B}[V])}{V_{B}[V]} $$ # # # $$ P_{1}[W] = \frac{V_{AB}[V]^{2}}{R_{1}[\Omega]} $$ # def calculo_f(row, r2): '''Função para fazer o preenchimento dos dados completo da tabela''' try: Vab = row['Va[V]'] - row['Vb[V]'] I = row['Vb[V]']/r2 R1 = (r2*Vab)/row['Vb[V]'] P1 = (Vab**2)/R1 row['Vab[V]'] = Vab row['I[A]'] = I row['R1[OHMS]'] = R1 row['P1[W]'] = P1 except ZeroDivisionError: row['Vab[V]'] = 0 row['I[A]'] = 0 row['R1[OHMS]'] = 0 row['P1[W]'] = 0 return row full_data_df = data_df.apply(lambda row : calculo_f(row, r2), axis = 1) full_data_df # #### 2) Obtenha o gráfico de pontos experimentais para curva I x V AB . Onde I é a corrente que passa pelo filamento e V AB = (V A -V B ) é o potencial sobre a lâmpada. full_data_df.plot(kind='scatter',y='I[A]',x='Vab[V]',color='red', grid=True, title='Curva de I x Vab', legend=True, xticks=full_data_df['Vab[V]'], yticks=full_data_df['I[A]'], figsize=(10,10) ) plt.show() # #### 3) No mesmo gráfico do item-2, ajuste por mínimos quadrados os pontos experimentais e plote a curva de corrente x tensão (I x V AB ) do modelo da lâmpada incandescente. Explique se o modelo é adequado. xc = np.linspace(min(full_data_df['Vab[V]']), max(full_data_df['Vab[V]']), num=100) # + # calculate polynomial z = np.polyfit(x=full_data_df['Vab[V]'].values, y=(full_data_df['I[A]'].values), deg=2) f = np.poly1d(z) # calculate new x's and y's x_new = np.linspace(min(full_data_df['Vab[V]']), max(full_data_df['Vab[V]']), num=100) y_new = f(x_new) # - plt.plot(full_data_df['Vab[V]'], full_data_df['I[A]'],'o', x_new, y_new,) plt.xlabel('Vab[V]') plt.ylabel('I[A]') plt.show() # Calculando o erro médio f(full_data_df['Vab[V]']) # + from sklearn.metrics import mean_squared_error from math import sqrt em = mean_squared_error(full_data_df['I[A]'], f(full_data_df['Vab[V]'])) # - print('O root means foi de: ', em) print('As constates quadraticas do modelo foram: ', z) # ## 4) Questionário: # #### a) Assumindo um espectro de radiação aproximadamente de corpo negro, calcule em que temperatura uma lâmpada incandescente deveria operar em uma máxima eficiência de iluminação para o olho humano. # A temperatura considerada como ideal seno visivel a olho humano é representada na equação # # $$ \lambda[max] = \frac{0.7 + 0.4}{2} = 0.55 [\mu m] $$ # # Como o \lambda[max]*T é considerado uma constate 2897.756 [\mu m.K]. Podemos dividir o cosiente pela constate e encontrar a temperatura: # # $$ T = \frac{2897.756 [\mu m.K]}{0.55 [\mu m]} = 5268.6473 K $$ # # Não sendo totalmente eficiente visto que o valor de fusão dos filamentos de Tugstênio são de aproximadamente 3.695 K # #### b) Pesquisa: Qual a maior eficiência teórica que poderia ser obtida de lâmpadas com filamento incandescente de tungstênio comuns? Explique e cite as suas fontes # A maior eficiência encontrara em um filamento de Tungstêncio foi: # # $$ \lambda[max] = \frac{2897.756 [\mu m.K]}{3695} K = 0.78 [\mu m] $$ # ## 5) Questões Complementares: # #### g) Verifique se o modelo físico-matemático I vs. V AB adotado neste experimento foi adequado, calculando a métrica de Erro Quadrático Médio (EQM). # Como o erro quadrado foi bastante pequeno é possivel assumir que o modelo de aproximação de metodos quadrados teve um resultado muito bom # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"metadata": false} # ### Learning Objectives # The goal of this notebook is to see and practice: # - Creating a data set by merging tables # - Select and engineer features of the data set # - Transform the values of the data # - Visualize the data set # - See and practice data science research tools and practices # # ### 1 Practical Data Science Research # Can we find good online learning strategies? # # Define: # - "strategy", what is a strategy for learning and how would you observe it? # - What can a strategy look like, what are the features that define it? # - What are the strategies we observe in the data given the features we postulate? # # **Note:** The features are important # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} import os import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # + [markdown] pycharm={"metadata": false} # **Note:** what do we do if it exists and there is data # in it what are the risks? # - Risk reproduction or recomputing # - Always delete output folder and make the output and input folders parameters # - Never delete output folder # # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Path to the data DATA_FOLDER = '../data' # Set plotting style sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5}) # Path to the output we create OUT_PATH = '../output' # If path does not exist make it if not os.path.exists(OUT_PATH): # Create folder os.mkdir(OUT_PATH) # + [markdown] pycharm={"metadata": false} # ### 2 Create a data set # Why do we need to create a data set? # - The data is rarely structured so each data file contains all the information to answer your research questions. # - Information and insights are often gained by linking data # - The data stored often does not describe the features believed to be useful to answer the research question # - Which features to select # - Which new features to create # # + [markdown] pycharm={"metadata": false, "name": "#%% md\n"} # ### 3 Does clicking on resources correlate with final result performance for a module? # That is, is a good strategy for learning to click on course resources. # # By looking at the schema we see which tables are needed and which columns can be used to merge them into one data set. # The information is spread over multiple tables: `studentVle.csv` and `studentInfo.csv` # # What are other features we can use? # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Load tables print('studentVle:') student_vle_path = os.path.join(DATA_FOLDER, 'studentVle.csv') student_vle = pd.read_csv(student_vle_path) print(student_vle.info()) print('studentInfo') student_info_path = os.path.join(DATA_FOLDER, 'studentInfo.csv') student_info = pd.read_csv(student_info_path) print(student_info.info()) # + [markdown] pycharm={"metadata": false} # Filter on a module code # # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Pick one code_module MODULE_CODE = 'AAA' student_vle = student_vle.loc[student_vle['code_module'] == MODULE_CODE] print('student_vle shape: {}'.format(student_vle.shape)) student_info = student_info.loc[student_info['code_module'] == MODULE_CODE] print('student_info shape: {}'.format(student_info.shape)) # + [markdown] pycharm={"metadata": false} # Merge the tables # - Identify the columns to merge the tables on by looking at the schema # # **Note:** Keep the tables slim by only selecting the columns we need for the data set creation # # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Merge student info and assessment on the student id, module and presentation code. student_info_and_vle = pd.merge(student_info, student_vle, on=['code_module', 'id_student', 'code_presentation'], how='inner') # Get the columns we need (You can get all as well) student_info_and_vle = student_info_and_vle[ ['id_student', 'final_result', 'code_module', 'code_presentation', 'sum_click', 'id_site'] ] student_info_and_vle.drop_duplicates(inplace=True) print(student_info_and_vle.info()) # + [markdown] pycharm={"metadata": false} # ### 4 Create and Select Features for the data set # Total number of clicks, the id of the resources visited and the final result for each student # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Get the sum of all the clicks for each student total_clicks = student_info_and_vle.groupby(by='id_student').sum_click.sum() # Get the number of resources each student visited n_sites = student_info_and_vle.groupby(by='id_student').id_site.count() # Get the final result for the student final_result = student_info_and_vle.groupby(by='id_student').final_result.first() # Create a data frame for the data set df = pd.DataFrame({'total_clicks': total_clicks.values, 'site_count': n_sites.values, 'final_result': final_result.values}) # + [markdown] pycharm={"metadata": false, "name": "#%% md\n"} # ### 5 Save the data set # Everyone has their favorite format, e.g. `csv` or `json` # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Create path for the data set as a json file out_file = os.path.join(OUT_PATH, 'final_results.json') # Save the data set as a json file df.to_json(out_file) # How do we save as CSV? # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Plot the correlation between total_clicks and number of sites visited sns.jointplot(x='total_clicks', y='site_count', data=df, kind='reg') # Save the figure to path with a "tight" bounding box around the figure plt.savefig(os.path.join(OUT_PATH, 'jointplot_final_results.pdf'), bbox_inches='tight') # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Plot the final result as well sns.scatterplot(x='total_clicks', y='site_count', hue='final_result', data=df) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Individual plot for each final result g = sns.FacetGrid(df, row='final_result') _ = g.map(sns.scatterplot, 'total_clicks', 'site_count') # + pycharm={"is_executing": false, "metadata": false, "name": "#%%\n"} # Are the features correlated (TODO how to check) sns.pairplot(df) # + [markdown] pycharm={"metadata": false, "name": "#%% md\n"} # # #### What does other features look like? # - What features should be selected? # - How to create a data set that looks at asset use as well? # - Check correlation between features # - Check the variance of the features # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd import numpy as np from _utils.clean import normalize, normalize_frame, exclude_3sd derivs_dir = os.path.join('..','derivatives') scales_dir = os.path.join(derivs_dir,'qualtrics','2.subscaled') output_dir = os.path.join(derivs_dir,'05.subject-level') try:os.mkdir(output_dir) except WindowsError as e: print(e) # # Import data fname=os.path.join(derivs_dir,'sub-all_task-all_VALUES.xlsx') behav_data = pd.read_excel(fname).rename(columns={'subjnum':'ssid'}) fname=os.path.join(scales_dir,'all_subscales.csv') scale_data = pd.read_csv(fname) behav_data.head() scale_data.head() # # Normalize PANAS and BISBAS subscales keys_to_normalize = [ 'PAS','NAS','BIS','BAS_fs','BAS_rr','BAS_dr','DMS_i','DMS_r','DMS_d','DMS_s','DMS_a', ] df = scale_data.copy(deep=True) df.columns=[ 'ssid','zip','sleep','stress','fin_dif','PAS','NAS','BAS_dr','BAS_fs','BAS_rr','BIS', 'DMS_i','DMS_r','DMS_d','DMS_s','DMS_a','fin_lit' ] df['study'] = df['ssid'].astype(str).str[0] z_keys=[] for key in keys_to_normalize: z_key = 'z_'+ key z_keys.append(z_key) df[z_key] = df.apply(normalize,axis=1) df.head() columns = z_keys + ['ssid'] norms = df[columns] norms.head() # ### Natural Logarithm of subscales # We take the natural log of each subscale's RAW score. # # *NOT* their normalized score, because we can't take the log of a negative. ln_keys = ['ln_'+key for key in keys_to_normalize] df[ln_keys] = df[keys_to_normalize].apply(np.log,axis=1) df.head() logs = df[ln_keys + ['ssid']] logs.head() # # 3sd trial exclusions behav_data.columns df=behav_data[['ssid','block','trial','domain','estimation','trueprob-norm','waschoiceoptimal','val-estdiff-valid']].rename( columns={'val-estdiff-valid':'val_estdiff_valid'} ) subj_3sd = df.groupby('ssid').std()['val_estdiff_valid'] * 3 subj_3sd.head() subj_means = df.groupby('ssid').mean()['val_estdiff_valid'] subj_means.head() df['valError_3sd'] = df.apply(exclude_3sd,axis=1) df.head() subj_means = df.groupby('ssid').mean()['valError_3sd'] subj_means = pd.DataFrame(subj_means).reset_index() subj_means.head() # # gender-judgment trial exclusions # + active="" # We don't have the gender judgment data in the source file for this notebook, but this is where I want to exclude trials in which the gender judgment was wrong. Have to go further upstream to include the gender judgment in this source file, then apply the gender judgment exclusion here. # - # # subject-level means optimal_choice_freq = df.groupby('ssid').mean().reset_index()[['ssid','waschoiceoptimal']] optimal_choice_freq.head() domain_means = df.groupby(['ssid','domain']).mean().reset_index() domain_means.head() gain_ave_val_error = domain_means[domain_means['domain'] == 'GAIN'][['ssid','valError_3sd']] gain_ave_val_error = gain_ave_val_error.set_index('ssid') loss_ave_val_error = domain_means[domain_means['domain'] == 'LOSS'][['ssid','valError_3sd']] loss_ave_val_error = loss_ave_val_error.set_index('ssid') # ### Framing Normalization # We want to normalize for the way the value estimation question is framed. # # We're going to multiply valError means by `1` for subjects who were estimating the probability that the stock is *good*, and multiply means by `-1` for subjects who were estimating the probability that the stock is *bad*. # # 100s: `* 1` # # 200s: `* -1` # # 300s: `* 1` means_df = df.groupby('ssid').mean()[['valError_3sd']] means_df[85:91] nf_valerror = pd.DataFrame(means_df.apply(normalize_frame,axis=1)) nf_valerror = nf_valerror.rename(columns={0:'nf_valError'}) nf_valerror[85:91] gain_ave_val_error[85:91] nf_valerr_gain = pd.DataFrame(gain_ave_val_error.apply(normalize_frame,axis=1)) nf_valerr_gain = nf_valerr_gain.rename(columns={0:'nf_gainValError'}) nf_valerr_gain[85:91] loss_ave_val_error[85:91] nf_valerr_loss = pd.DataFrame(loss_ave_val_error.apply(normalize_frame,axis=1)) nf_valerr_loss = nf_valerr_loss.rename(columns={0:'nf_lossValError'}) nf_valerr_loss[85:91] # # output output = pd.DataFrame({ 'ssid':list(subj_means['ssid']), 'valError':list(subj_means['valError_3sd']), }) output.head() output = output.merge(optimal_choice_freq).rename(columns={'waschoiceoptimal':'optimal_choice_freq'}) output.head() output = output.merge(gain_ave_val_error.rename(columns={'valError_3sd':'gainValError'}).reset_index()) output.head() output = output.merge(loss_ave_val_error.rename(columns={'valError_3sd':'lossValError'}).reset_index()) output.head() output = output.merge(nf_valerror.reset_index()) output = output.merge(nf_valerr_gain.reset_index()) output = output.merge(nf_valerr_loss.reset_index()) output[85:91] output['valWedge'] = abs(output['gainValError'] - output['lossValError']) output['nf_valWedge'] = abs(output['nf_gainValError'] - output['nf_lossValError']) output.head() output = output.merge(norms) output.head() output = output.merge(logs) output.head() fname=os.path.join(output_dir,'subject-level.csv') #columns = ['ssid','valError','gainValError','lossValError','valWedge'] + zkeys output.to_csv(fname,index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hall strip # **Background** # # Motion in a rotating frame can be decomposed into a fast 'cyclotron' coordinate motion and a slower motion of the centers (X,Y) of cyclotron motion. It can be shown that X and Y do not commute, even though absolute spatial coordinates commute. As a result, a force generates motion in a perpendicular direction (the Hall effect). As a special case, a saddle potential enacts a squeezing operator on (X, Y), resulting in the exponential growth and decay of orthogonal sizes of a collection of particles. Since a BEC rotating sufficiently fast in an elliptical trap experiences a saddle potential in the rotating frame, this results in the extension of the cloud into a long strip. The strip has an unbounded length and a minimum width defined by the zero-point cyclotron motion of the bosons. # # For more, see https://arxiv.org/abs/1911.12347 # # + import sys, os sys.path.append(os.path.abspath('..')) import numpy as np import matplotlib.pyplot as plt from copy import copy, deepcopy from tqdm.notebook import tqdm from scipy.ndimage import rotate as rotate_image from scipy.optimize import curve_fit import time import h5py import pandas as pd from PIL import Image from condensate import Wavefunction, Environment, hbar # %matplotlib inline # - # ## Prepare groundstate # + omega = 2*np.pi*10 epsilon = 0.225 dt = 1e-5 fov = 300e-6 e = Environment(DIM=512, fov=fov, N=0.5e6) e.harmonic_potential(omega=omega, epsilon=epsilon) # - groundstate = Wavefunction(e) groundstate.initialize_Psi(width=100) groundstate.relax(vmax=3e8, dt=dt, steps=10000) groundstate.env.rotating_frame(omegaR=[0.001]*10000) groundstate.evolve(dt=dt, steps=10000, cooling=0.1) groundstate.show_density() groundstate.show_phase() plt.imshow(np.angle(groundstate.Psi)) # ## Spin up def rotation_freq_simple(timestep, whirrTime=30000, whirrMax=omega): tanh = omega * 1.002 * np.tanh(3.46 * timestep / whirrTime) return np.min([whirrMax,tanh]) def rotation_freq(timestep, whirrTime=30000, whirrMax=omega): firstrampTime = whirrTime/10 tanh1 = np.max([0.05*omega, omega * 1.001 * np.tanh(3.3 * (timestep-0.86*firstrampTime) / whirrTime)]) tanh2 = 0.05*omega *(0.97+ 1.001 * np.tanh(3.3 * (-firstrampTime+timestep) / firstrampTime)) if timestep1 out.append(density) for i in tqdm(range(frames), leave=False): psi.evolve(dt=dt, steps=runtime, cooling=0.0) density = psi.density density *= density>1 out.append(density) with h5py.File(datafile, 'a') as f: dsname = f"geosqueeze" dset = f.create_dataset(dsname, data=np.array(out)) dset.attrs['time'] = dt*runtime * np.arange(frames) dset.attrs['dt'] = dt geometric_squeezing(widestrip, steps=90000, frames=300) # ## Process Data # + # Set some processing parameters viewx = 350 viewy = 350 fov = 300e-6 dx = fov/512 mass = 3.8e-26 lb = np.sqrt(hbar / (2*mass *omega)) x = dx * (np.arange(viewx) - viewx//2) y = dx * (np.arange(viewy) - viewy//2) dt = 1e-5 times = np.arange(51)*dt*5e3 def gauss(x,x0,a,s): return a*np.exp(- (x-x0) **2 / (2*s**2)) def satexp(t, tau,a): return (3.31+a*np.exp(-t/tau)) def rotate_crop(array, viewx=200, viewy=350, angle=0): """ Rotate and crop a 2d array """ s = np.shape(array) rotated = rotate_image(array, angle, reshape=False) cropped = rotated[(s[0]-viewy)//2 : (s[0]+viewy)//2 , (s[1]-viewx)//2 : (s[1]+viewx)//2 ] return cropped def find_angle(ncrop): xsize = len(ncrop) xx = np.linspace(-10, 10, xsize) yy = np.linspace(-10, 10, xsize) xx, yy = np.meshgrid(xx, yy) # Calculate the moment of inertia tensor Ixx = np.sum(ncrop*yy*yy) Iyy = np.sum(ncrop*xx*xx) Ixy = np.sum(ncrop*xx*yy) Iyx = Ixy I =np.array( [[Ixx, Ixy], [Iyx, Iyy]]) evals, evecs = np.linalg.eig(I) iangle = (180*np.arctan(evecs[np.argmin(evals)][1]/evecs[np.argmin(evals)][0])/np.pi) return iangle def process_r1d(dset): """ Process a dataset corresponding to a single squeeze time """ clouds = np.array([rotate_crop(a, viewx, viewy, 42) for a in dset[()]]) times = np.array(dset.attrs['time']) xprofile = np.sum(rotate_crop(clouds[0],viewy=150), axis=0) xprofilemax = np.max(xprofile) gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6]) newresults = pd.DataFrame([[clouds, times, xprofile, gaussfit]], columns=columns) return newresults # + columns = ['cloud', 'time', 'xprofile', 'yprofile', 'gaussfit'] gs = pd.DataFrame(columns=columns) with h5py.File('data/geometricsqueezing_withramp_straight.hdf5', 'r') as f: for name in tqdm(f): dset=f[name] alltimes = np.array(dset.attrs['time']) for i in tqdm(range(len(dset)-1), leave=False): cloud = rotate_crop(np.fliplr(dset[i]), viewx, viewy, 43.5) xprofile = np.sum(rotate_crop(cloud, viewx=350, viewy=150), axis=0) xprofilemax = np.max(xprofile) gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6]) yprofile = np.sum(rotate_crop(cloud, viewx=150, viewy=350), axis=1) newresults = pd.DataFrame([[cloud, alltimes[i], xprofile, yprofile, gaussfit]], columns=columns) gs = gs.append(newresults) gs = gs.reset_index() # - # ### Hall drift velocity yprofiles = [] for i,r in gs.iterrows(): if i>150: yprofiles.append(r.yprofile) yprofiles = np.array(yprofiles) R = 25e-6 mask = abs(y) < R Rj = np.argmax(np.diff(mask)) plt.plot(1e6*y, yprofiles[0,:]) plt.plot(1e6*y, yprofiles[70,:]) plt.axvspan(-1e6*y[Rj],1e6*y[Rj], color='r', alpha=0.2) # + def linear(x,m,b): return m*x + b Rs = np.linspace(5e-6,100e-6,100) columns = ['R', 'xint', 'yint', 'fit', 'slope'] drift = pd.DataFrame(columns=columns) deltat = (gs.iloc[1].time - gs.iloc[0].time) for R in tqdm(Rs, leave=False): mask = abs(y) < R Rindex = np.argmax(np.diff(mask)) N0 = np.sum(yprofiles[0,mask]) xint = [] yint = [] for i,yp in enumerate(yprofiles): Nt = np.sum(yp[mask]) integral = np.trapz(yprofiles[:i,Rindex] / dx, dx=deltat) xint.append( 2*omega * lb * integral / N0) yint.append(1-(Nt/N0)) f,_ = curve_fit(linear, xint,yint, [0.1,0]) newresults = pd.DataFrame([[R, xint, yint, f, f[0]]], columns=columns) drift = drift.append(newresults) # - Forcestraight = 0.5*epsilon*driftstraight.R/lb Forcequartic = 0.5*epsilon*driftquartic.R/lb testx = np.linspace(0,2.5) plt.plot(testx, linear(testx,1,0),'r-', label='expected') plt.plot(Forcestraight, driftstraight.slope,'g.-', label=r'GP $x^2-y^2$') plt.plot(Forcequartic, driftquartic.slope,'b.-', label=r'GP $x^2-y^2 + r^4$') plt.legend() plt.xlabel(r'$F(2m\omega^2\ell_B)$') plt.ylabel(r'$v_d (\omega\ell_B)$') plt.xlim([0,2.5]) plt.tight_layout() plt.savefig('figures/rotini1d/Fig2.png', dpi=120) gpexport = pd.DataFrame(columns=['F', 'saddle','quartic']) gpexport.F = Forcestraight gpexport.saddle = driftstraight.slope gpexport.quartic = driftquartic.slope gpexport.to_csv('data/gp_drift.csv') # + # driftquartic = drift.copy() # + # driftstraight = drift.copy() # - plt.plot(drift.iloc[50].xint,drift.iloc[50].yint,'b.') testx = np.linspace(0,0.8,100) plt.plot(testx, linear(testx, *drift.iloc[50].fit),'r-') plt.ylabel(r'$1-N(t)/N(0)$') plt.xlabel(r'$\omega\ell_B\int_0^t dt \,n(t)\,/\,\,N(0)$') plt.tight_layout() plt.savefig('figures/rotini1d/Fig2inset.png', dpi=120) # ### squeezing cloud = gs.iloc[202].cloud plt.imshow(cloud) len(gs) gs.iloc[i].gaussfit i=2 plotx = (x-gs.iloc[i].gaussfit[0])/lb plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile) plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit)) plt.xlim([-10,10]) plt.xlabel(r'r ($\ell_B$)') plt.ylabel(r'n (a.u.)') plt.savefig('figures/rotini1d/largemu.png', dpi=120) plt.show() # i=50 # plotx = (x-gs.iloc[i].gaussfit[0])/lb # plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile) # plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit)) # plt.xlim([-5,5]) # plt.xlabel(r'r ($\ell_B$)') # plt.ylabel(r'n (a.u.)') # plt.show() i=250 plotx = (x-gs.iloc[i].gaussfit[0])/lb plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile) plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit)) plt.xlim([-5,5]) plt.xlabel(r'r ($\ell_B$)') plt.ylabel(r'n (a.u.)') plt.savefig('figures/rotini1d/smallmu.png', dpi=120) i=250 gpexport = pd.DataFrame(columns=['x', 'n']) gpexport.x, gpexport.n = (x-gs.iloc[i].gaussfit[0])/lb, 1e-16 * gs.iloc[i].xprofile gpexport.to_csv('data/gp_smallmu.csv') gs.head() widths = np.array([abs(r[6][2]) for r in gs.itertuples()]) plt.plot(gs['time'], 1e6*widths) plt.axhline(1e6*lb/np.sqrt(2),c='k') plt.xlabel('time [s]') plt.ylabel(r'$\sigma$ [$\mu m$]') # plt.ylim([2,8.5]) # plt.savefig('figures/rotini1d/widths_nog.png') labdata = pd.read_csv('data/widthData.csv', names=['zt', 's']) labdata.sort_values(by='zt', inplace=True, ignore_index=True) zeta = 1.2*epsilon * omega / 2 plt.figure(figsize=(7,4)) # plt.figure(figsize=(17,14)) widths = np.array([1e-6 * abs(r[6][2]) / lb for r in gs.itertuples()]) plt.plot(labdata.zt[labdata.s<3], labdata.s[labdata.s<3],'k.-' ,alpha=0.4, label='data') plt.plot((zeta * gs['time'] )-0.2, 1e6*widths, 'r-', label='GP') plt.axhline(1/np.sqrt(2),c='k') plt.xlabel(r'$\zeta t$') plt.ylabel(r'$\sigma$ [$\ell_B$]') plt.legend() plt.ylim([0,2.5]) plt.xlim([0,7.5]) # plt.savefig('figures/rotini1d/widths_much_less_wiggles.png', dpi=200) gpexport = pd.DataFrame(columns=['zt', 's']) gpexport.zt = (zeta * gs['time'] )-0.2 gpexport.s = 1e6*widths # + # gpexport.to_csv('data/gp_squeezing_slowramp.csv') # - widths = np.array([abs(r[6][2]) for r in gs.itertuples()]) plt.plot(gs['time'], 1e6*widths) plt.axhline(1e6*lb/np.sqrt(2),c='k') plt.xlabel('time [s]') plt.ylabel(r'$\sigma$ [$\mu m$]') # plt.savefig('figures/rotini1d/widths.png') # + angles = [] for i,r in gs.iterrows(): angles.append(find_angle(r.cloud)) plt.plot(gs['time'], -44.97+np.array(angles)) # plt.axhline(90,c='k') plt.xlabel('time [s]') plt.ylabel(r'$\theta$ [deg]') # plt.savefig('figures/rotini1d/angles.png') # - # # Reproducing lab parameters # + omega = 2*np.pi*88.6 epsilon = 0.2 dt = 1e-6 e = Environment(DIM=512, fov=120e-6, N=8e5) e.harmonic_potential(omega=omega, epsilon=epsilon) groundstate = Wavefunction(e) groundstate.initialize_Psi(width=100) groundstate.relax(vmax=1e9, dt=2*dt, steps=4000) groundstate.evolve(dt=dt, cooling=0.01, steps=4000) whirrMax = omega steps = 100000 whirrtime = 100000 times = np.arange(steps) Omega = [np.min([whirrMax, omega * 1.001 * np.tanh(3.3 * t / whirrtime)]) for t in times] plt.plot(dt*times, Omega) plt.axhline(Omega[-1],ls='--',c='k') plt.show() # + # Run the sim - takes longer than the 10Hz sim above hallstrip = deepcopy(groundstate) hallenv = hallstrip.env hallenv.rotating_frame(omegaR=Omega) hallenv.absorbing_boundaries(strength=1, radius=e.fov/2) hallstrip.evolve(dt=dt, steps=steps, cooling=0) hallstrip.show_density() hallstrip.show_phase() # - finalstrip = deepcopy(hallstrip) # ## Crop and find the angle to the diagonal after the ramp width = 512//2 plotrange=140 ncrop = finalstrip.density[(width-plotrange):(width+plotrange),(width-plotrange):(width+plotrange)] plt.imshow(ncrop) # + xsize = len(ncrop) xx = np.linspace(-10, 10, xsize) yy = np.linspace(-10, 10, xsize) xx, yy = np.meshgrid(xx, yy) # Calculate the moment of inertia tensor Ixx = np.sum(ncrop*yy*yy) Iyy = np.sum(ncrop*xx*xx) Ixy = np.sum(ncrop*xx*yy) Iyx = Ixy I =np.array( [[Ixx, Ixy], [Iyx, Iyy]]) evals, evecs = np.linalg.eig(I) iangle = (180*np.arctan(evecs[np.argmin(evals)][1]/evecs[np.argmin(evals)][0])/np.pi) # - print(f"angle from diagonal equipotential: {(iangle-45):.2f} degrees") # ## Quartic potential # + omega = 2*np.pi*10 epsilon = 0.225 dt = 1e-5 fov = 300e-6 DIM = 512 e = Environment(DIM=512, fov=fov, N=1e5) V = np.zeros((DIM,DIM)) for i in range(DIM): for j in range(DIM): x = (i-DIM//2)*fov / DIM y = (j-DIM//2)*fov / DIM rsq = x**2 + y**2 harmonic = 0.5 * e.mass * ( ((1-epsilon) * (omega * x) **2) + ((1+epsilon) * (omega * y) **2)) quartic = 3e6* 0.5 * e.mass * (omega**2) * (rsq**2) V[i,j] = (harmonic + quartic)/hbar e.custom_potential(V) e.show_potential() # - groundstate = Wavefunction(e) groundstate.initialize_Psi(width=100) groundstate.relax(vmax=3e8, dt=dt, steps=4000) V = e.V.copy() for i in range(DIM): for j in range(DIM): x = (i-DIM//2)*fov / DIM y = (j-DIM//2)*fov / DIM rsq = x**2 + y**2 centrif = 0.5 * e.mass * (omega**2) * rsq V[i,j] -= centrif/hbar a = plt.contour(V) plt.colorbar() plt.gca().set_aspect('equal', 'box') plt.show() def rotation_freq(timestep, whirrTime=30000, whirrMax=omega): return np.min([whirrMax, omega * 1.001 * np.tanh(3.3 * timestep / whirrTime)]) # + steps = 100000 times = np.arange(steps) Omega = [ rotation_freq(t) for t in times] plt.plot(omega*dt*times, np.array(Omega)/omega) plt.axhline(Omega[-1]/omega,ls='--',c='k') plt.xlabel(r'$\omega t$') plt.ylabel(r'$\Omega/\omega$') # plt.savefig('figures/rampup.png') plt.show() # - def geometric_squeezing_withramp(stripPsi, steps=60000, frames=300, datafile='data/geometricsqueezing_withramp.hdf5'): steps = frames*(steps//frames) runtime = steps//frames dt = 1e-5 out = [] psi = copy(stripPsi) for i in tqdm(range(frames), leave=False): times = np.arange(i*runtime, (i+1)*runtime) psi.env.rotating_frame(omegaR=[ rotation_freq(t) for t in times ]) psi.evolve(dt=dt, steps=runtime, cooling=0.0) density = psi.density density *= density>1 out.append(density) with h5py.File(datafile, 'a') as f: dsname = f"geosqueeze" dset = f.create_dataset(dsname, data=np.array(out)) dset.attrs['time'] = dt*runtime * np.arange(frames) dset.attrs['dt'] = dt hallstrip = deepcopy(groundstate) hallstrip.env.absorbing_boundaries(strength=1, radius=e.fov/2) geometric_squeezing_withramp(hallstrip, steps=60000, datafile='data/geometricsqueezing_withramp_quartic.hdf5') # + columns = ['cloud', 'time'] gs = pd.DataFrame(columns=columns) with h5py.File('data/geometricsqueezing_withramp_quartic.hdf5', 'r') as f: for name in tqdm(f): dset=f[name] alltimes = np.array(dset.attrs['time']) for i in tqdm(range(len(dset)-1), leave=False): cloud = np.fliplr(dset[i]) newresults = pd.DataFrame([[cloud, alltimes[i]]], columns=columns) gs = gs.append(newresults) # - plt.imshow(gs.iloc[0].cloud) steps = 60000 times = np.arange(steps) Omega = [ rotation_freq(t) for t in times] frames= 300 def plot_frame(frame, savefig=False): V_frame = hallstrip.env.V.copy() Omega_frame = Omega[frame*(steps//frames)] for i in range(DIM): for j in range(DIM): x = (i-DIM//2)*fov / DIM y = (j-DIM//2)*fov / DIM rsq = x**2 + y**2 centrif = 0.5 * e.mass * (Omega_frame**2) * rsq V_frame[i,j] -= centrif/hbar f, axarr = plt.subplots(ncols=2, figsize=(8,4)) axarr[0].imshow(gs.iloc[frame].cloud, vmax=5e13, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='inferno') axarr[0].contour(V_frame, 25, alpha=0.7, vmin=-1e3, vmax=1e3, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='RdBu_r') axarr[0].set_aspect('equal', 'box') view = 213/2 axarr[0].set(xlim=[-view,view], ylim=[-view,view], xlabel=r'x [$\mu m$]', ylabel=r'y [$\mu m$]') axarr[1].plot(omega*dt*times, np.array(Omega)/omega) plt.axhline(Omega[-1]/omega,ls='--',c='k') xnow = omega*dt*times[frame * steps//frames] ynow = Omega_frame/omega axarr[1].set(xlim=[0,np.max(omega*dt*times)], ylim=[0,1.1], xlabel=r'$\omega t$', ylabel=r'$\Omega/\omega$') axarr[1].plot([xnow], [ynow], 'ro') plt.axvline(xnow, c='k', alpha=0.1) plt.axhline(ynow, c='k', alpha=0.1) plt.tight_layout() if savefig: plt.savefig(f'figures/rotini1d/quartic_frames/geosqueeze_{frame}.jpg', dpi=190) plt.close() plot_frame(140, savefig=False) 2*1e-6*(213/2) * DIM / fov img = gs.iloc[140].cloud img = np.uint8(255*img/np.max(img)) size=363 img = rotate_crop(img, viewx=size,viewy=size) im = Image.fromarray(img) im # im.save('figures/rotini1d/GPquarticframe.tiff') plt.imshow(gs.iloc[140].cloud, vmax=5e13, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='inferno') for frame in tqdm(range(frames-1)): plot_frame(frame, savefig=True) V = hallenv.V.copy() for i in range(DIM): for j in range(DIM): x = (i-DIM//2)*fov / DIM y = (j-DIM//2)*fov / DIM rsq = x**2 + y**2 centrif = 0.5 * e.mass * (omega**2) * rsq V[i,j] -= centrif/hbar a = plt.contour(V) plt.gca().set_aspect('equal', 'box') plt.imshow(hallstrip.density) plt.show() hallenv.rotating_frame(omegaR = [omega]*40000) hallstrip.evolve(dt=dt, steps=40000) # # Noninteracting # + omega = 2*np.pi*10 epsilon = 0.225 dt = 1e-5 fov = 300e-6 e = Environment(DIM=512, fov=fov, N=0.00001) e.harmonic_potential(omega=omega, epsilon=epsilon) groundstate = Wavefunction(e) groundstate.initialize_Psi(width=100) groundstate.relax(vmax=3e8, dt=dt, steps=4000) # - hallstrip = deepcopy(groundstate) # + hallenv = hallstrip.env hallenv.rotating_frame(omegaR=[omega]*100) hallenv.absorbing_boundaries(strength=1, radius=e.fov/2) hallstrip.evolve(dt=dt, steps=100, cooling=0) # - widestrip = deepcopy(hallstrip) geometric_squeezing(widestrip, steps=70000, frames=300, datafile='data/geometricsqueezing_nonint.hdf5') # + columns = ['cloud', 'time', 'xprofile', 'gaussfit'] gsnonint = pd.DataFrame(columns=columns) with h5py.File('data/geometricsqueezing_nonint.hdf5', 'r') as f: for name in tqdm(f): dset=f[name] alltimes = np.array(dset.attrs['time']) for i in tqdm(range(len(dset)-1), leave=False): cloud = rotate_crop(np.fliplr(dset[i]), viewx, viewy, 42.2) xprofile = np.sum(rotate_crop(cloud, viewx=350, viewy=150), axis=0) xprofilemax = np.max(xprofile) gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6]) newresults = pd.DataFrame([[cloud, alltimes[i], xprofile, gaussfit]], columns=columns) gsnonint = gsnonint.append(newresults) # - widths = np.array([abs(r[4][2]) for r in gs.itertuples()]) plt.plot(gs['time'], 1e6*widths) plt.axhline(1e6*lb/np.sqrt(2),c='k') plt.xlabel('time [s]') plt.ylabel(r'$\sigma$ [$\mu m$]') plt.ylim([2,8.5]) # plt.savefig('figures/rotini1d/widths_nog.png') zeta = 0.222 * omega / 2 plt.figure(figsize=(7,4)) # plt.figure(figsize=(17,14)) widths = np.array([1e-6 * abs(r[4][2]) / lb for r in gs.itertuples()]) widthsnonint = np.array([1e-6 * abs(r[4][2]) / lb for r in gsnonint.itertuples()]) plt.plot(labdata.zt, labdata.s,'k.-' ,alpha=0.05, label='data') plt.plot((zeta * gs['time'] )+1.54, 1e6*widths, 'r-', label='GP') plt.plot((zeta * gsnonint['time'] )+3, 1e6*widthsnonint, 'b-', label='GP g=0') plt.axhline(1/np.sqrt(2),c='k') plt.xlabel(r'$\zeta t$') plt.ylabel(r'$\sigma$ [$\ell_B$]') plt.legend() plt.ylim([0,2]) plt.xlim([0,8.5]) # plt.savefig('figures/rotini1d/widths_vsnonint.png', dpi=200) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:TF-1_1] # language: python # name: conda-env-TF-1_1-py # --- # # VGG Neural Network Architecture # *by * # # # VGG Origins # # VGG is a convolutional neural network model invented, back in 2014, by the **Visual Geometry Group**(VGG) at the University of Oxford. Convolutional networks have been the state of the art in visual recognition . The team's main contribution are significant improvements on the prior state of the art convolutional network, achieving, for it's time, a substantially deeper network. Since then CNNs are now much deeper (100s of layers), but they are all still based on the block architecure developed by the VGG team. # # # VGG Paper # # The corresponding published paper [*Very Deep Convolutional Networks for Large-Scale Image Recognition*](https://arxiv.org/pdf/1409.1556.pdf) is a rigorous evaluation of the network's achitecure as depth is increased. # # # # # VGG Result # # In Summary, the VGG team secured the 1st and the 2nd places in the ImageNet Challenge for localisation and classification tasks respectively. ImageNet is a dataset of over 14 million images belonging to 1000 classes. # # The Results are summaries below: # # # # # VGG Architecture # # The VGG team created an extremely homogeneous architecture that only performs 3x3 convolutional layers stacked on top of each other in increasing depth. Reducing volume size is handled by 2x2 max pooling. The convolutional layers are then followed by two fully-connected layers and end with softmax classifier. # # The achitecture comes in two size “VGG16” and “VGG19”, which stands for the number of parameterized layers in the network (best performance by configure D): # # # # ## VGG16 Architecture # # # # **Main Points of the VGG architecture:** # # - The use of only 3x3 sized filters, which is small compared to previous models that used 11x11 and 7x7 filter size. However, it turns out that the combination of two 3x3 conv layers has an effective receptive field of 5x5. This simulates a larger filter while keeping the benefits of smaller filter sizes. One of the benefits is a decrease in the number of parameters. Also, with two conv layers, we’re able to use two ReLU nonlinearity layers instead of one. # # - 3 conv layers back to back have an effective receptive field of 7x7. # # - As the spatial size of the input volumes at each layer decrease (result of the conv and pool layers), the depth of the volumes increase due to the increased number of filters as you go down the network. # # - The number of filters doubles after each maxpool layer. This reinforces the idea of shrinking spatial dimensions, but growing depth. # # - Works well on both image classification and localization tasks. Localization is treated as a regression task. # # - Uses ReLU activation layers after each conv layer and trained with batch gradient descent. # # ** Down side of VGG architecture:** # # - it can be slow to train on large dataset because the number of model parameters is quite large, due to its depth and its large fully-connected layers. This makes deploying VGG network difficult. # # - Smaller network architectures have been since proposed with comparable performance, such as SqueezeNet. # ## Next Lesson # ### VGG with TensorFlow-Keras # - You will implement an improved version of the VGG network in TensorFlow-Keras # # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Simulate the model and make Figure SI-1 # ## Imports # First run all of the code in this section to import the necessary packages. # # First we load some magic commands: # %load_ext autoreload # %autoreload 2 # %matplotlib inline # Next load some standard modules. If you do not have one of these modules (such as [progressbar](https://pypi.python.org/pypi/progressbar2) or [joblib](https://pypi.python.org/pypi/joblib)), then run, for example, `!pip install progressbar` to install it using `pip`. import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd import seaborn as sns import time, datetime import progressbar import pickle import os from itertools import product from joblib import Parallel, delayed from scipy import stats import sys sys.setrecursionlimit(10000) # to be able to pickle the results of simulations and avoid a RecursionError # Set style parameters for matplotlib figures: from matplotlib import rc rc('font', **{'family': 'sans-serif','sans-serif': ['Helvetica']}) rc('text', usetex=True) rc('axes', **{'titlesize': 10, 'labelsize': 8}) rc('legend', **{'fontsize': 9}) # Set the path for saving the figures: figures_path = os.path.join(os.pardir, 'figures') if not os.path.exists(figures_path): os.mkdir(figures_path) # Import the code for simulating the model: import ABM import EconomySimulator # ## Panel (a): long-run fraction functional as a function of the initial condition # ### Compute the data (takes about 1.5 hours to run) # The code in the cell below creates a pandas DataFrame called `long_run_results`. It in `long_run_results` the dictionary returned by the function `EconomySimulator.simulate_economy_long_run`. This dictionary contains some measures of the state of the model economy after 1000 production attempts have been simulated. # # The function `run_long_run_sim` sets the parameters of the economy, and the for loop iterates over the initial condition `F0` (the initial fraction of functional agents), `r` in `[1, 2000]`, `xi` in `[0, 1]`, and a trial index `trial` in `range(1000)` (we run 1000 trials for each initial condition). # # # **Warning**: _This code takes about 1.5 hours to run on a laptop computer_. To avoid having to re-run this, run the cell under the section heading **Load `long_run_results` from the hard drive** below. # + def run_long_run_sim(trial_number, F0, r, xi): n_agents = 200 beta = .4 n_steps = 5 * n_agents L = 1 exog_fail = 0.0001 alpha = 0.15 tolerance_std = 0.0 n_steps_detect_fixed_point = 50 return EconomySimulator.simulate_economy_long_run( n_agents=n_agents, init_fraction_functional=F0, alpha=alpha, beta=beta, r=r, L=L, xi=xi, exog_fail=exog_fail, n_steps=n_steps, trial=trial_number, tolerance_std=tolerance_std, n_steps_detect_fixed_point=n_steps_detect_fixed_point) try: long_run_results except NameError: long_run_results = None start_time = time.time() long_run_results = pd.concat([long_run_results, pd.DataFrame( Parallel(n_jobs=4)( delayed(run_long_run_sim)(trial, F0, r, xi) for trial in range(1000) for F0 in np.arange(.155, .205, .01) for r in [1., 2000.] for xi in [0, 1] ) )]) end_time = time.time() print(datetime.timedelta(seconds=(end_time - start_time))) # - # This confirms that we have 1000 simulations for each quadruple `(r, xi, n_agents, init_F)`. long_run_results.groupby(['r', 'xi', 'n_agents', 'init_F']).size() # #### Save the data to the hard drive as a `CSV` file long_run_results.to_csv( os.path.join( 'simulated_data', 'long_run_results_n200_alpha0p15_beta0p4_epsilon0p0001.csv')) # #### Load `long_run_results` from the hard drive # Run the code below to load the results from the CSV file in order to avoid having to re-run the simulations above (which takes about 90 minutes): long_run_results = pd.read_csv( os.path.join( 'simulated_data', 'long_run_results_n200_alpha0p15_beta0p4_epsilon0p0001.csv'), index_col=0) # ## Panel (b): show two representative time-series # #### Simulate the original model and the model with sticky links and preferential attachment # Either # # * run the simulations below (which should take around 50 minutes to run), or # * load the results of those simulations that were pickled (scroll down to the heading **Load the simulations from the `pickle` file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`**). # ##### Simulate the original model # Set up the simulation: sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7 = EconomySimulator.AssortativitySimulator( ABM.Economy(1000, .7, alpha=.15, beta=.4, r=1, exog_fail=0.0001, xi=0)) # This takes about 22 minutes to run: sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7.simulate(200000) # Plot some time-series from the simulation: sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7.combined_plot() # ##### Simulate the model with sticky links and preferential attachment # Set up the simulation: sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7 = EconomySimulator.AssortativitySimulator( ABM.Economy(1000, .7, alpha=.15, beta=.4, r=2000., exog_fail=0.0001, xi=1)) # This takes about 25 minutes to run: sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7.simulate(200000) # Plot some time-series from the simulation: sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7.combined_plot() # #### Save and load the two simulations above using `pickle` # ###### Save (pickle) the simulations to file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`: # + with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r1_xi0.pkl'), 'wb') as f: pickle.dump(sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7, f) with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r2000_xi1.pkl'), 'wb') as f: pickle.dump(sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7, f) # - # ###### Load the simulations from the `pickle` file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`: # Run the code below to avoid having to run the two simulations above: # + with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r1_xi0.pkl'), 'rb') as f: sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7 = pickle.load(f) with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r2000_xi1.pkl'), 'rb') as f: sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7 = pickle.load(f) # - # ## Make Figure SI-1 # The cell below makes Figure SI-1 and saves it to the folder `figures` as a PDF. # + data = long_run_results data.init_F = np.round(data.init_F, 3) data = data[((data.r == 1) & (data.xi == 0)) | ((data.r > 1) & (data.xi > 0))] grouped_by_r_xi = data.groupby(['r', 'xi']) fig, ax = plt.subplots(ncols=2, figsize=(3.4 * 2 * .95, 3.4 / 5 * 3)) colors = ['#2ca02c', '#e377c2'] handles = [] labels = [] indx = 0 for r_xi, r_df in grouped_by_r_xi: color = colors[indx] indx += 1 labels.append(r_xi) linestyle = {0: '-', 1: '--'}.get(r_xi[1]) data_final_F = ( r_df.groupby('init_F')['final_F'] .agg({ 'mean_final_F': np.mean, 'std_final_F': np.std, 'num_trials': 'size', 'sem_final_F': lambda final_F: np.std(final_F) / len(final_F)**.5, '75_percentile_final_F': lambda final_F: np.percentile(final_F, 75.), '25_percentile_final_F': lambda final_F: np.percentile(final_F, 25.)})) handle, = ax[0].plot(data_final_F.index, data_final_F.mean_final_F, label=str(r_xi), color=color, alpha=1, linewidth=1, linestyle='-') ax[0].errorbar(data_final_F.index, data_final_F.mean_final_F, yerr=2 * data_final_F.sem_final_F, label=str(r_xi), color=color) handles.append(handle) ax[0].set_xlabel(r'$F(0) \equiv$ initial fraction functional') ax[0].set_ylabel(r'mean of $F(1000)$') ax[0].set_ylim(0, 1) xlim = (0.14 - .001, .201) ax[0].set_xlim(*xlim) height_trap_label = .01 label_size = 8 ax[0].annotate( "", xy=(xlim[0], height_trap_label), xytext=(.15, height_trap_label), arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25)) ax[0].text(xlim[0] * .65 + .15 * .35, height_trap_label + .04, 'trap', color='k', size=label_size) height_bimodal_label = height_trap_label ax[0].annotate( "", xy=(.152, height_bimodal_label), xytext=(.185, height_bimodal_label), arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25)) ax[0].annotate( "", xytext=(.152, height_bimodal_label), xy=(.185, height_bimodal_label), arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25)) ax[0].text(.152 * .65 + .185 * .35, height_bimodal_label + .04, 'bimodal', color='k', size=label_size) ax[0].annotate( 'original model' #'\n' #r'$(r, \xi) = (1, 0)$' , size=label_size, xy=(.1725, .56), xytext=(.17, .30), xycoords='data', textcoords='data', arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2")) ax[0].annotate( 'sticky links' #r' ($r = 2000$)' ' and' '\n' 'prefential attachment' #r' ($\xi = 1$)' , size=label_size, xy=(.1625, .5), xytext=(.145, .74), xycoords='data', textcoords='data', arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2")) sims = [ sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7, sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7 ] for indx, sim in enumerate(sims): ax[1].plot(sim.fraction_functional_history, alpha=.8, color=colors[indx], linewidth=1) ax[1].set_ylabel(r'$F(t)$') ax[1].set_xlabel(r'time $t$ (number of production attempts)') ax[1].set_xlim(0, sims[0].economy.n_production_attempts) ax[1].set_ylim(0, 1) ax[1].set_xticks([0, 10**5, 2 * 10**5], ['0', '10^5', '2 10^5']) ax[1].tick_params(axis='both', labelsize=7, colors='.4') ax[0].tick_params(axis='both', labelsize=7, colors='.4') def format_label(value, pos): return { 0: '0', 2.5 * 10**4: '',#r'$2.5\!\!\times\!\!10^4$', 5 * 10**4: r'$5\!\!\times\!\!10^4$', 10**5: r'$10^5$', 1.5 * 10**5: r'$1.5\!\!\times\!\!10^5$', 2*10**5: r'$2\!\!\times\!\!10^5$' }.get(value, '') ax[1].xaxis.set_major_formatter(mpl.ticker.FuncFormatter(format_label)) fig.text(.001, .94, r'\textbf{(a)}', size=label_size) fig.text(#.49, .50, .94, r'\textbf{(b)}', size=label_size) fig.tight_layout(pad=0.15) fig.subplots_adjust(wspace=.25) fig.savefig(os.path.join(figures_path, 'figure_SI_1.pdf')) plt.show() # - # ### Check statistical significance of the difference in means in Figure SI-1(a) # In the cell below, we find that the means of $F(1000)$ are statistically significantly different between the two models for $F(0) = 0.155, 0.16, 0.165, ..., 0.2$ according to the two-sided Mann-Whitney $U$ test ($p$-value $< 10^{-5}$): for init_F, df in long_run_results.groupby('init_F'): df_grouped_by_r_xi = df.groupby(['r', 'xi']) print('F(0) = {:>5}'.format(init_F), end='\n\t') original_final_F = df_grouped_by_r_xi.get_group((1, 0))['final_F'] sticky_PA_final_F = df_grouped_by_r_xi.get_group((2000, 1))['final_F'] print('mean F(1000) for original model: {:>5.3f}'.format(original_final_F.mean()), end='\n\t') print('mean F(1000) for sticky/PA model: {:>5.3f}'.format(sticky_PA_final_F.mean()), end='\n\t') mann_whitney_test = stats.mannwhitneyu(sticky_PA_final_F, original_final_F, alternative='two-sided') print('Mann-Whitney U test:') print('\t\tp-value: ', mann_whitney_test.pvalue, end=' ') if mann_whitney_test.pvalue < 10**(-3): print('*' * 3) else: print('') print('\t\tU = ', mann_whitney_test.statistic, end=' ') print('\n') # ### Check the robustness of the difference in variance in the time-series in Figure SI-1(b) # Below we run simulations with the same parameters and starting condition as in Figure SI-1(b) and record the mean and standard deviation of the time-series. # #### Run 200 simulations as in Figure SI-1(b) # Running the cell below takes about 21 hours to complete. Either run this cell or skip it to import the results in the section titled **Import the results of running 200 simulations**. # + parameters = product(range(200), ((1, 0), (2000, 1))) def simulate_long_run_variance(trial_number, r, xi): n_agents = 1000 beta = .4 n_steps = 200 * n_agents L = 1 F0 = 0.7 exog_fail = 0.0001 alpha = 0.15 econ = ABM.Economy( n_agents, F0, alpha=alpha, beta=beta, r=r, exog_fail=exog_fail, xi=xi) frac_functional_history = [] init_best_response = econ.latest_best_response result = { 'init_n_inputs_needed': init_best_response.n_inputs_needed, 'init_n_inputs_attempted': init_best_response.n_inputs_attempted} for i in range(n_steps): econ.update_one_step() frac_functional_history.append(econ.fraction_functional_agents()) final_best_response = econ.latest_best_response result.update({ 'final_n_inputs_needed': final_best_response.n_inputs_needed, 'final_n_inputs_attempted': final_best_response.n_inputs_attempted, 'final_F': econ.fraction_functional_agents(), 'n_agents': n_agents, 'init_F': F0, 'alpha': alpha, 'beta': beta, 'xi': xi, 'r': r, 'L': L, 'n_steps': n_steps, 'mean_F': np.mean(frac_functional_history), 'std_F': np.std(frac_functional_history), 'max_F': np.max(frac_functional_history), 'min_F': np.min(frac_functional_history)}) buffers = { 'init_buffer': (result['init_n_inputs_attempted'] - result['init_n_inputs_needed']), 'final_buffer': (result['final_n_inputs_attempted'] - result['final_n_inputs_needed'])} result.update(buffers) return result try: long_run_variance_simulations except NameError: long_run_variance_simulations = None if __name__ == '__main__': bar = progressbar.ProgressBar() long_run_variance_simulations = pd.concat([long_run_variance_simulations, pd.DataFrame( Parallel(n_jobs=4)( delayed(simulate_long_run_variance)(trial, r, xi) for trial, (r, xi) in bar(list(parameters)) ) )]) # - # ##### Save the results to a `CSV` file: long_run_variance_simulations.to_csv( os.path.join( 'simulated_data', 'long_run_variance_simulations_n1000_alpha0p15_beta0p4_eps0p0001_initF0p7.csv')) # ##### Import the results of running 200 simulations long_run_variance_simulations = pd.read_csv( os.path.join( 'simulated_data', 'long_run_variance_simulations_n1000_alpha0p15_beta0p4_eps0p0001_initF0p7.csv'), index_col=0) # ### Analyze the results # First we plot histograms of the standard deviation of the time-series $F(t)$ for the two models. This figure is saved as `compare_std_dev_F.pdf` in the `figures` folder. # + colors = {(1, 0): '#2ca02c', (2000, 1): '#e377c2'} fig, ax = plt.subplots(figsize=(3.4, 3.4 / 5 * 3)) grouped_std_F = long_run_variance_simulations.groupby(['r', 'xi'])['std_F'] for r_xi, df in grouped_std_F: ax.hist(df, bins=30, normed=False, color=colors[r_xi]) ax.set_xlabel('standard deviation of $F(t)$', size=12) ax.set_ylabel('count', size=12) ax.annotate( 'original model\n' r'$(r, \xi) = (1, 0)$', xy=(.02, 5), xytext=(.05, 5), xycoords='data', textcoords='data', arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2")) ax.annotate( 'sticky links \& preferential \nattachment\n' r'$(r, \xi) = (2000, 1)$', xy=(.14, 8), xytext=(.06, 12), xycoords='data', textcoords='data', arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2")) fig.tight_layout(pad=.15) fig.savefig(os.path.join(figures_path, 'compare_std_dev_F.pdf')) plt.show() # - # Next we group by `(r, xi)` and then compute the mean and standard deviation of the mean of the time-series. compare_std_F = long_run_variance_simulations.groupby(['r', 'xi']).std_F.agg( {'mean_std_F': 'mean', 'std_std_F': 'std', 'count': 'size'}) compare_std_F # The sticky links + preferential attachment model has a variance that is 8.6 times larger: compare_std_F.loc[(2000, 1)].mean_std_F / compare_std_F.loc[(1, 0)].mean_std_F # This 8.6-fold difference amounts to a difference in 14.6 standard deviations: ((compare_std_F.loc[(2000, 1)].mean_std_F - compare_std_F.loc[(1, 0)].mean_std_F) / compare_std_F.loc[(2000, 1)].std_std_F) # In a two-sided t-test (using [scipy's `ttest_ind`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.stats.mstats.ttest_ind.html)) that allows for unequal variances in the two populations (because, as found below, the variances are found to be statistically significantly different), we obtain a p-value of `5.3e-251`: # + std_F_sticky_PA = long_run_variance_simulations.groupby(['r', 'xi']).get_group((2000, 1)).std_F std_F_original_model = long_run_variance_simulations.groupby(['r', 'xi']).get_group((1, 0)).std_F print('two-sided t-test: ', stats.ttest_ind(std_F_sticky_PA, std_F_original_model, equal_var = False)) # - # We also find that a two-sided [Mann-Whitney U test](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html) has a very small p-value (`1e-67`): stats.mannwhitneyu(std_F_sticky_PA, std_F_original_model, alternative='two-sided') # ##### Check normality and different variances # Below we find that the standard deviations of the time-series $F(t)$ (plotted as a histogram above) are normal with p-values `0.06` and `2.6e-5`. # + print('standard deviation of the time-series F(t) in the sticky links + preferential attachment model (r, xi) = (2000, 1)') print('-' * 114) print(' variance: ', np.var(std_F_sticky_PA)) print(' normality test: ', stats.normaltest(std_F_sticky_PA), end='\n' * 3) print('standard deviation of the time-series F(t) in the original model (r, xi) = (1, 0)') print('-' * 81) print(' variance: ', np.var(std_F_original_model)) print(' normality test: ', stats.normaltest(std_F_original_model)) # - # According to the [Bartlett test](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.bartlett.html#scipy.stats.bartlett), their variances are different (p-value `2.6e-74`), so we reject the null hypothesis that they are drawn from populations with the same variance. # # In case the sticky/preferential attachment model's standard deviation of $F(t)$ is not normally distributed, we also use the [Levene test](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.levene.html) with the parameter `center` set to the `'mean'` and to `'median'` (to check both). # # In all three cases, we get a very small p-value (`1e-74`, `1e-44`, `1e-42`, respectively), so we reject the null hypothesis that the variances are the same, and hence in the two-sided t-test above we set the keyword argument `equal_var` to `False`. print('Bartlett test (null hypothesis: equal variance; used for normal data):\n\t', stats.bartlett(std_F_sticky_PA, std_F_original_model), end='\n\n') print('Levene test with center=mean (null hypothesis: equal variance; used for potentially non-normal data)\n\t', stats.levene(std_F_sticky_PA, std_F_original_model, center='mean'), end='\n\n') print('Levene test with center=mean (null hypothesis: equal variance; used for potentially non-normal data)\n\t', stats.levene(std_F_sticky_PA, std_F_original_model, center='median')) # ## Dependencies import sys sys.version import joblib for pkg in [mpl, pd, sns, np, progressbar, joblib]: print(pkg.__name__, pkg.__version__) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('./course-data/StudentsPerformance.csv') df.head() sns.catplot(data=df, y='gender',x='math score',kind='box') sns.catplot(data=df, x='gender',y='math score',kind='box',row='lunch') sns.catplot(data=df, x='gender',y='math score',kind='box',row='lunch',col='test preparation course') sns.catplot(data=df, x='writing score',y='math score',kind='violin',row='lunch',col='test preparation course') sns.catplot(data=df, x='writing score',y='math score',row='lunch',col='test preparation course') sns.pairplot(df) sns.PairGrid(df) # + g = sns.PairGrid(df) g = g.map_upper(sns.scatterplot) g = g.map_lower(sns.kdeplot) g = g.map_diag(sns.kdeplot) # + g = sns.PairGrid(df,hue='gender') g = g.map_upper(sns.scatterplot) g = g.map_lower(sns.kdeplot) g = g.map_diag(sns.kdeplot) # + g = sns.PairGrid(df,hue='gender') g = g.map_upper(sns.scatterplot) g = g.map_lower(sns.kdeplot) g = g.map_diag(sns.kdeplot) g = g.add_legend() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (reco_base) # language: python # name: reco_base # --- # ## Wikidata Knowledge Graph Extraction # Many recommendation algorithms (DKN, RippleNet, KGCN) use Knowledge Graphs as an external source of information. We found that one of the bottlenecks to benchmark current algorithms like DKN, RippleNet or KGCN is that they used Microsoft Satori. As Satori is not open source, it's not possible to replicate the results found in the papers. The solution is using other open source KGs. # # The goal of this notebook is to provide examples of how to interact with Wikipedia queries and Wikidata to extract a Knowledge Graph that can be used with the mentioned algorithms. # # The steps covered are: # - How to find a Wikidata entity (https://www.wikidata.org/wiki/Wikidata:Glossary/en from a text query # - How to find surrounding entities and descriptions for an entity # - Create a KG for Movielens # + # set the environment path to find Recommenders import sys sys.path.append("../../") print("System version: {}".format(sys.version)) import pandas as pd from reco_utils.dataset.wikidata import (search_wikidata, find_wikidata_id, query_entity_links, read_linked_entities, query_entity_description) import networkx as nx import matplotlib.pyplot as plt from tqdm import tqdm from reco_utils.dataset import movielens from reco_utils.common.notebook_utils import is_jupyter # + tags=["parameters"] # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' MOVIELENS_SAMPLE = True MOVIELENS_SAMPLE_SIZE = 50 # - # ## 1. Create a KG from linked entities in Wikidata names = ["The Godfather", "", "", "", "", "", "My Best Friend's Wedding"] # + # %%time # the following code has been wrapped in a helper function called search_wikidata() # it is provided here to show the details of which calls are being made to wikipedia APIs # capture results as a list of dicts to transform to DataFrame (this is faster than appending to DataFrames) results_list = [] for idx, name in enumerate(names): # first get the wikipedia entity_id for each name entity_id = find_wikidata_id(name) if entity_id == "entityNotFound": continue # next we query wikipedia to get entity links json_links = query_entity_links(entity_id) # the following function extracts entities from the links related_links = read_linked_entities(json_links) # now we can construct an connection in our graph between two entities for related_entity, related_name in related_links: result = dict( name=name, original_entity=entity_id, linked_entities=related_entity, name_linked_entities=related_name, ) results_list.append(result) results_list = pd.DataFrame(results_list) results_list.head() # - # ### Visualize KG using networkx G = nx.from_pandas_edgelist(results_list, 'original_entity', 'linked_entities') target_names = results_list[["linked_entities", "name_linked_entities"]].drop_duplicates().rename(columns={"linked_entities": "labels", "name_linked_entities": "name"}) source_names = results_list[["original_entity", "name"]].drop_duplicates().rename(columns={"original_entity": "labels"}) names = pd.concat([target_names, source_names]) names = names.set_index("labels") names = names.to_dict()["name"] plt.figure(figsize=(12,12)) pos = nx.spring_layout(G) nx.draw(G,pos, node_size=60,font_size=9, width = 0.2) nx.draw_networkx_labels(G, pos, names, font_size=9) plt.show() # ## 2. Create a KG from the Movielens Dataset # Obtain pairs of Movie Title - IDs from Movielens df = movielens.load_pandas_df(MOVIELENS_DATA_SIZE, ('UserId', 'ItemId', 'Rating', 'Timestamp'), title_col='Title', genres_col='Genres', year_col='Year' ) movies = df[["Title", "ItemId"]].drop_duplicates().reset_index() movies["Title"][0:5] # For notebook testing if MOVIELENS_SAMPLE == True: movies = movies.head(MOVIELENS_SAMPLE_SIZE) movies.shape names = [t + ' film' for t in movies['Title']] result = search_wikidata(names, extras=movies[['Title', 'ItemId']].to_dict()) result.head() result["Title"].value_counts() # + # result.to_csv("movielens_" + MOVIELENS_DATA_SIZE + '_wikidata.csv', index = False) # - number_movies = len(result["Title"].unique()) # Record results with papermill for tests - ignore this cell if is_jupyter(): # Record results with papermill for unit-tests import papermill as pm pm.record("length_result", number_movies) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pokemon = pd.read_csv('https://stepik.org/media/attachments/lesson/363874/Pokemon.csv') # Сначала измените названия исходных столбцов: # # пробелы и точки нужно заменить на "_" (напр. Sp. Atk --> sp_atk). # приведите все названия к нижнему регистру # колонку "#" переименовать в "id" # Полученные результаты запишите в исходный датафрейм pokemon. # # Затем сгруппируйте данные по поколению покемонов (generation), и с помощью value_counts() посчитайте, сколько в каком поколении легендарных покемонов (legendary), а также сколько в этих поколениях нелегендарных покемонов. Полученный объект приведите к формату датафрейма (.to_frame()) и сохраните в legends. pokemon = pokemon.rename(columns=lambda x: x.replace('. ', '_').replace(' ', '_').lower()) pokemon = pokemon.rename(columns={'#': 'id'}) pokemon legends = pokemon.groupby('generation') \ .legendary.value_counts() \ .to_frame() # Используйте датафрейм legends, полученный на предыдущем шаге, и измените в нём название столбца, перезаписав его в ту же переменную. # Затем используйте unstack, чтобы поместить уровень индекса legendary в уровень оси столбцов. Иными словами, должно получиться две колонки – False & True. Результат сохраните в legends_unstacked. legends = legends.rename(columns={'legendary': 'legendary_count'}) legends_unstacked = legends.unstack() # Немного усложним задачу. Теперь попробуем узнать, среди каких типов покемонов и какого поколения больше всего легендарных. # # Сгруппируйте датасет pokemon по переменным generation и type_1, посчитайте количество легендарных покемонов внутри групп. Приведите данные в формат датафрейма, а затем используйте unstack(). В качестве ответа выберите вид и поколение покемона, среди которых больше всего легендарных. pokemon_f = pokemon.groupby(['generation', 'type_1']) \ .legendary \ .value_counts() \ .to_frame() legends_1_unstacked = pokemon_f.unstack() legends_1_unstacked.idxmax() # Преобразуйте представленные данные в длинный формат и запишите в переменную avocado_agg_long. В качестве индекса используйте type avocado_agg = pd.DataFrame({'type' : ['conventional', 'organic'], 'AvgPrice_2015' : [1.077963, 1.673324], 'AvgPrice_2016' : [1.105595, 1.571684], 'AvgPrice_2017' : [1.294888, 1.735521], 'AvgPrice_2018' : [1.127886, 1.567176], }) avocado_agg avocado_agg_long = pd.wide_to_long(avocado_agg, ['AvgPrice'], i=['type'], j='year', sep= '_') avocado_agg_long superheroes = pd.read_csv('https://stepik.org/media/attachments/lesson/363874/superheroes_power_matrix.csv') superheroes superheroes_long = superheroes.melt(id_vars=['Name'], var_name = 'superpower') superheroes_powers = superheroes_long.query('value == True') \ .groupby('Name') \ .superpower \ .apply(list) \ .to_frame() \ .reset_index() superheroes_powers # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WAKY plotting # -- Dependencies -- import sys import numpy as np import pandas as pd import kwakpriv # ## Narrow # -- Load the data file -- Examples_dir = "./examples/" File_name = "Narrow.csv" File_dframe = pd.read_csv(Examples_dir+File_name) # + Nevents = np.asarray(File_dframe['Nevents'].values, dtype=int) # The number of events must be integer-type Hypothesis = np.asarray(File_dframe['Hypothesis'].values, dtype=float) assert(len(Nevents)==len(Hypothesis)), "Nevents and Hypothesis arrays must have the same length." # Optional: # Separate the "Signal" region (first 64 bins) from the "overflow" region (last 64 bins). Nbins = 64 data_signal = Nevents[:Nbins] data_overflow = np.sum(Nevents[Nbins:]) hypo_signal = Hypothesis[:Nbins] hypo_overflow = np.sum(Hypothesis[Nbins:]) # Optional (for plotting): # Get the "Generating Function" for the Nsigma Scalograms. GenFunc = np.asarray(File_dframe['Generating Function'].values, dtype=float) genfunc_signal = GenFunc[:Nbins] genfunc_overflow = np.sum(GenFunc[Nbins:]) # + extr_nsets = kwakpriv.nsets(data_signal, hypo_signal, nsets=3*10**2, seed=123, extrapolate=False, fastGaussian=False, outputdir=Output_dir) # Save the attributes of extr_nsets Level = extr_nsets.Level Histogram = extr_nsets.Histogram Nsigma = extr_nsets.Nsigma NsigmaFixedRes = extr_nsets.NsigmaFixedRes # If fastGaussian is False #PlessX = extr_nsets.PlessX #PeqX = extr_nsets.PeqX # If fastGaussian is False AND extrapolate is true #Nsigma_fit = extr_nsets.Nsigma_fit #PlessX_fit = extr_nsets.PlessX_fit #PeqX_fit = extr_nsets.PeqX_fit #NsigmaFixedRes_fit = extr_nsets.NsigmaFixedRes_fit # - r_percent = 10 kwakpriv.nsigScalogram(data=data_signal, hypothesis=hypo_signal, nsigma=Nsigma, #generating_function=genfunc_signal, nsigma_percent=r_percent*0.01, nsigma_colorcode=True, title="Narrow Bump", titlesize=14, xlabel=None, textsize=11, figsize=(6,6), labelpos=-.11, ypad=.57, ticksize=11, outputfile=None) # ## Wide Bump # + # -- Load the data file -- Examples_dir = "./examples/" File_name = "Wide.csv" File_dframe = pd.read_csv(Examples_dir+File_name) Nevents = np.asarray(File_dframe['Nevents'].values, dtype=int) # The number of events must be integer-type Hypothesis = np.asarray(File_dframe['Hypothesis'].values, dtype=float) assert(len(Nevents)==len(Hypothesis)), "Nevents and Hypothesis arrays must have the same length." # Optional: # Separate the "Signal" region (first 64 bins) from the "overflow" region (last 64 bins). Nbins = 64 data_signal = Nevents[:Nbins] data_overflow = np.sum(Nevents[Nbins:]) hypo_signal = Hypothesis[:Nbins] hypo_overflow = np.sum(Hypothesis[Nbins:]) # Optional (for plotting): # Get the "Generating Function" for the Nsigma Scalograms. GenFunc = np.asarray(File_dframe['Generating Function'].values, dtype=float) genfunc_signal = GenFunc[:Nbins] genfunc_overflow = np.sum(GenFunc[Nbins:]) extr_nsets = kwakpriv.nsets(data_signal, hypo_signal, nsets=3*10**2, seed=123, extrapolate=False, fastGaussian=False, outputdir=Output_dir) # Save the attributes of extr_nsets Level = extr_nsets.Level Histogram = extr_nsets.Histogram Nsigma = extr_nsets.Nsigma NsigmaFixedRes = extr_nsets.NsigmaFixedRes # If fastGaussian is False #PlessX = extr_nsets.PlessX #PeqX = extr_nsets.PeqX # If fastGaussian is False AND extrapolate is true #Nsigma_fit = extr_nsets.Nsigma_fit #PlessX_fit = extr_nsets.PlessX_fit #PeqX_fit = extr_nsets.PeqX_fit #NsigmaFixedRes_fit = extr_nsets.NsigmaFixedRes_fit r_percent = 10 kwakpriv.nsigScalogram(data=data_signal, hypothesis=hypo_signal, nsigma=Nsigma, #generating_function=genfunc_signal, nsigma_percent=r_percent*0.01, nsigma_colorcode=True, title="Narrow Bump", titlesize=14, xlabel=None, textsize=11, figsize=(6,6), labelpos=-.11, ypad=.57, ticksize=11, outputfile=None) # - # ## BumpDip # + # -- Load the data file -- Examples_dir = "./examples/" File_name = "BumpDip.csv" File_dframe = pd.read_csv(Examples_dir+File_name) Nevents = np.asarray(File_dframe['Nevents'].values, dtype=int) # The number of events must be integer-type Hypothesis = np.asarray(File_dframe['Hypothesis'].values, dtype=float) assert(len(Nevents)==len(Hypothesis)), "Nevents and Hypothesis arrays must have the same length." # Optional: # Separate the "Signal" region (first 64 bins) from the "overflow" region (last 64 bins). Nbins = 64 data_signal = Nevents[:Nbins] data_overflow = np.sum(Nevents[Nbins:]) hypo_signal = Hypothesis[:Nbins] hypo_overflow = np.sum(Hypothesis[Nbins:]) # Optional (for plotting): # Get the "Generating Function" for the Nsigma Scalograms. GenFunc = np.asarray(File_dframe['Generating Function'].values, dtype=float) genfunc_signal = GenFunc[:Nbins] genfunc_overflow = np.sum(GenFunc[Nbins:]) extr_nsets = kwakpriv.nsets(data_signal, hypo_signal, nsets=3*10**2, seed=123, extrapolate=False, fastGaussian=False, outputdir=Output_dir) # Save the attributes of extr_nsets Level = extr_nsets.Level Histogram = extr_nsets.Histogram Nsigma = extr_nsets.Nsigma NsigmaFixedRes = extr_nsets.NsigmaFixedRes # If fastGaussian is False #PlessX = extr_nsets.PlessX #PeqX = extr_nsets.PeqX # If fastGaussian is False AND extrapolate is true #Nsigma_fit = extr_nsets.Nsigma_fit #PlessX_fit = extr_nsets.PlessX_fit #PeqX_fit = extr_nsets.PeqX_fit #NsigmaFixedRes_fit = extr_nsets.NsigmaFixedRes_fit r_percent = 10 kwakpriv.nsigScalogram(data=data_signal, hypothesis=hypo_signal, nsigma=Nsigma, #generating_function=genfunc_signal, nsigma_percent=r_percent*0.01, nsigma_colorcode=True, title="Narrow Bump", titlesize=14, xlabel=None, textsize=11, figsize=(6,6), labelpos=-.11, ypad=.57, ticksize=11, outputfile=None) # - # ## Oscillations # + # -- Load the data file -- Examples_dir = "./examples/" File_name = "Oscillations.csv" File_dframe = pd.read_csv(Examples_dir+File_name) Nevents = np.asarray(File_dframe['Nevents'].values, dtype=int) # The number of events must be integer-type Hypothesis = np.asarray(File_dframe['Hypothesis'].values, dtype=float) assert(len(Nevents)==len(Hypothesis)), "Nevents and Hypothesis arrays must have the same length." # Optional: # Separate the "Signal" region (first 64 bins) from the "overflow" region (last 64 bins). Nbins = 64 data_signal = Nevents[:Nbins] data_overflow = np.sum(Nevents[Nbins:]) hypo_signal = Hypothesis[:Nbins] hypo_overflow = np.sum(Hypothesis[Nbins:]) # Optional (for plotting): # Get the "Generating Function" for the Nsigma Scalograms. GenFunc = np.asarray(File_dframe['Generating Function'].values, dtype=float) genfunc_signal = GenFunc[:Nbins] genfunc_overflow = np.sum(GenFunc[Nbins:]) extr_nsets = kwakpriv.nsets(data_signal, hypo_signal, nsets=3*10**2, seed=123, extrapolate=False, fastGaussian=False, outputdir=Output_dir) # Save the attributes of extr_nsets Level = extr_nsets.Level Histogram = extr_nsets.Histogram Nsigma = extr_nsets.Nsigma NsigmaFixedRes = extr_nsets.NsigmaFixedRes # If fastGaussian is False #PlessX = extr_nsets.PlessX #PeqX = extr_nsets.PeqX # If fastGaussian is False AND extrapolate is true #Nsigma_fit = extr_nsets.Nsigma_fit #PlessX_fit = extr_nsets.PlessX_fit #PeqX_fit = extr_nsets.PeqX_fit #NsigmaFixedRes_fit = extr_nsets.NsigmaFixedRes_fit r_percent = 10 kwakpriv.nsigScalogram(data=data_signal, hypothesis=hypo_signal, nsigma=Nsigma, #generating_function=genfunc_signal, nsigma_percent=r_percent*0.01, nsigma_colorcode=True, title="Narrow Bump", titlesize=14, xlabel=None, textsize=11, figsize=(6,6), labelpos=-.11, ypad=.57, ticksize=11, outputfile=None) # - # ## KK # + # -- Load the data file -- Examples_dir = "./examples/" File_name = "KK.csv" File_dframe = pd.read_csv(Examples_dir+File_name) Nevents = np.asarray(File_dframe['Nevents'].values, dtype=int) # The number of events must be integer-type Hypothesis = np.asarray(File_dframe['Hypothesis'].values, dtype=float) assert(len(Nevents)==len(Hypothesis)), "Nevents and Hypothesis arrays must have the same length." # Optional: # Separate the "Signal" region (first 64 bins) from the "overflow" region (last 64 bins). Nbins = 64 data_signal = Nevents[:Nbins] data_overflow = np.sum(Nevents[Nbins:]) hypo_signal = Hypothesis[:Nbins] hypo_overflow = np.sum(Hypothesis[Nbins:]) # Optional (for plotting): # Get the "Generating Function" for the Nsigma Scalograms. GenFunc = np.asarray(File_dframe['Generating Function'].values, dtype=float) genfunc_signal = GenFunc[:Nbins] genfunc_overflow = np.sum(GenFunc[Nbins:]) extr_nsets = kwakpriv.nsets(data_signal, hypo_signal, nsets=3*10**2, seed=123, extrapolate=False, fastGaussian=False, outputdir=Output_dir) # Save the attributes of extr_nsets Level = extr_nsets.Level Histogram = extr_nsets.Histogram Nsigma = extr_nsets.Nsigma NsigmaFixedRes = extr_nsets.NsigmaFixedRes # If fastGaussian is False #PlessX = extr_nsets.PlessX #PeqX = extr_nsets.PeqX # If fastGaussian is False AND extrapolate is true #Nsigma_fit = extr_nsets.Nsigma_fit #PlessX_fit = extr_nsets.PlessX_fit #PeqX_fit = extr_nsets.PeqX_fit #NsigmaFixedRes_fit = extr_nsets.NsigmaFixedRes_fit r_percent = 10 kwakpriv.nsigScalogram(data=data_signal, hypothesis=hypo_signal, nsigma=Nsigma, #generating_function=genfunc_signal, nsigma_percent=r_percent*0.01, nsigma_colorcode=True, title="Narrow Bump", titlesize=14, xlabel=None, textsize=11, figsize=(9,9), labelpos=-.11, ypad=.57, ticksize=11, outputfile=None) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Stacking # # In this notebook we look at the best parameters found for the following models: # # 1. XGBoost # 2. LightGBM # 3. CatBoost # 4. HistGradientBoosting (scikit-learn) # # We then use stacking to ensemble these 4 models. # # **Note:** I leave the models on their verbose settings so I can monitor their training since it will take a long time to finish # Global variables for testing changes to this notebook quickly RANDOM_SEED = 0 NUM_TREES = 15000 EARLY_STOP = 200 NUM_FOLDS = 3 TEST = False SUBMIT = True # + # General imports import numpy as np import pandas as pd import scipy.stats as stats import pyarrow import time import gc # Evaluation and model selection from sklearn.base import clone from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.linear_model import LogisticRegression, SGDClassifier # Models from catboost import CatBoostClassifier from lightgbm import LGBMClassifier from xgboost import XGBClassifier from sklearn.ensemble import HistGradientBoostingClassifier # Hide warnings (makes optuna output easier to parse) import warnings warnings.filterwarnings('ignore') # - # # Preparing the Data # # We define our cross-validation scheme at the start to ensure that it is the same across all the models we consider # + # %%time # Load Data train = pd.read_feather("../data/train.feather") test = pd.read_feather("../data/test.feather") submission = pd.read_csv('../data/sample_submission.csv') if TEST: train, junk = train_test_split( train, train_size = 0.1, shuffle = True, stratify = train['target'], ) train.reset_index(drop = True, inplace = True) del junk gc.collect() # Relevant features features = [x for x in train.columns if x not in ['id','target']] # + # Stratified k-fold cross-validation train['kfold'] = -1 skf = StratifiedKFold(n_splits = NUM_FOLDS, shuffle = True, random_state = RANDOM_SEED) for fold, (train_idx, valid_idx) in enumerate(skf.split(train, train['target'])): train['kfold'].iloc[valid_idx] = fold oof_preds = pd.DataFrame( data = dict(kfold = train['kfold']) ) test_preds = pd.DataFrame( data = dict(id = test['id']) ) # - # # Feature Engineering # # We experiment with feature engineering using row statistics, primarily to add variance to our predictions. def create_row_stats(data): cont_cols, cat_cols = list(), list() for col in features: if data[col].dtype.name.startswith("int"): cat_cols.append(col) else: cont_cols.append(col) new_data = data.copy() new_data['binary_count'] = data[cat_cols].sum(axis=1) new_data['binary_std'] = data[cat_cols].std(axis=1) new_data['min'] = data[cont_cols].min(axis=1) new_data['std'] = data[cont_cols].std(axis=1) new_data['max'] = data[cont_cols].max(axis=1) new_data['median'] = data[cont_cols].median(axis=1) new_data['mean'] = data[cont_cols].mean(axis=1) #new_data['var'] = data[cont_cols].var(axis=1) #new_data['sum'] = data[cont_cols].sum(axis=1) #new_data['sem'] = data[cont_cols].sem(axis=1) new_data['skew'] = data[cont_cols].skew(axis=1) new_data['median_abs_dev'] = stats.median_abs_deviation(data[cont_cols], axis=1) new_data['zscore'] = (np.abs(stats.zscore(data[cont_cols]))).sum(axis=1) return new_data # + # %%time train = create_row_stats(train) test = create_row_stats(test) # New features all_features = [x for x in train.columns if x not in ['id','target','kfold']] assert features != all_features # - # # 1. XGBoost # # We use the best parameters from [this Kaggle notebook](https://www.kaggle.com/rsizem2/tps-10-21-optuna-w-pruning-callbacks-xgboost). Except for using CPU rather than GPU, which in a lot of cases results in more accurate results # Best Parameters xgboost_params = { 'random_state': RANDOM_SEED, 'n_estimators': NUM_TREES, #'tree_method': 'hist', 'max_depth': 5, 'learning_rate': 0.02261104274598307, 'min_child_weight': 74.7573299373233, 'subsample': 0.766, 'colsample_bytree': 0.268, 'colsample_bylevel': 0.591, 'reg_lambda': 75.35694292360638 } def train_xgboost(model_params = {}, fit_params = {}, new_features = False): # Store the predictions oof_preds = np.zeros((train.shape[0],)) test_preds = np.zeros((test.shape[0],)) print('') # Stratified k-fold cross-validation for fold in range(NUM_FOLDS): # Training and Validation Sets if new_features: X_train, y_train = train[train.kfold != fold][features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][features], train[train.kfold == fold]['target'] X_test = test[features] else: X_train, y_train = train[train.kfold != fold][all_features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][all_features], train[train.kfold == fold]['target'] X_test = test[all_features] # Define Model model = XGBClassifier(**{**xgboost_params, **model_params}) gc.collect() start = time.time() model.fit( X_train, y_train, verbose = False, eval_set = [(X_valid, y_valid)], eval_metric = "auc", early_stopping_rounds = EARLY_STOP, **fit_params ) # validation and test predictions valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS oof_preds[train.kfold == fold] = valid_preds # fold auc score fold_auc = roc_auc_score(y_valid, valid_preds) end = time.time() print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end - start, 2)}s.') return test_preds, oof_preds # Train 3 models test_preds['XGBoost'], oof_preds['XGBoost'] = train_xgboost() test_preds['XGB_Hist'], oof_preds['XGB_Hist'] = train_xgboost( model_params = dict(tree_method = 'hist') ) test_preds['XGB_Stats'], oof_preds['XGB_Stats'] = train_xgboost(new_features = True) # # 2. LightGBM # Best Parameters lightgbm_params = { 'random_state': RANDOM_SEED, 'n_estimators': NUM_TREES, 'max_depth': 6, 'learning_rate': 0.009099999999999999, 'min_child_samples': 4260, 'subsample': 0.87, 'subsample_freq': 3, 'colsample_bytree': 0.27, 'reg_lambda': 0.0003694272556917343, 'num_leaves': 26, } def train_lightgbm(model_params = {}, fit_params = {}, new_features = False): # Store the holdout predictions oof_preds = np.zeros((train.shape[0],)) test_preds = np.zeros((test.shape[0],)) print('') # Stratified k-fold cross-validation for fold in range(NUM_FOLDS): # Training and Validation Sets if new_features: X_train, y_train = train[train.kfold != fold][features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][features], train[train.kfold == fold]['target'] X_test = test[features] else: X_train, y_train = train[train.kfold != fold][all_features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][all_features], train[train.kfold == fold]['target'] X_test = test[all_features] # Define Model model = LGBMClassifier(**{**lightgbm_params, **model_params}) gc.collect() start = time.time() model.fit( X_train, y_train, verbose = 0, eval_set = [(X_valid, y_valid)], eval_metric = "auc", early_stopping_rounds = EARLY_STOP, **fit_params ) # validation and test predictions valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS oof_preds[train.kfold == fold] = valid_preds # fold auc score fold_auc = roc_auc_score(y_valid, valid_preds) end = time.time() print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end - start, 2)}s.') return test_preds, oof_preds # Train 2 models test_preds['LightGBM'], oof_preds['LightGBM'] = train_lightgbm() test_preds['LGBM_Stats'], oof_preds['LGBM_Stats'] = train_lightgbm(new_features = True) # # 3. CatBoost # Best Parameters catboost_params = { 'random_state': RANDOM_SEED, 'n_estimators': NUM_TREES, 'boosting_type': 'Plain', 'bootstrap_type': 'Bernoulli', 'early_stopping_rounds': EARLY_STOP, 'eval_metric': 'AUC', 'max_depth': 7, 'learning_rate': 0.01, 'min_child_samples': 12710, 'random_strength': 33.21156029537479, 'leaf_estimation_iterations': 1, 'subsample': 0.6990000000000001, 'reg_lambda': 60.52806724303393 } def train_catboost(model_params = {}, fit_params = {}, new_features = False): # Store the predictions oof_preds = np.zeros((train.shape[0],)) test_preds = np.zeros((test.shape[0],)) print('') # Stratified k-fold cross-validation for fold in range(NUM_FOLDS): # Training and Validation Sets if new_features: X_train, y_train = train[train.kfold != fold][features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][features], train[train.kfold == fold]['target'] X_test = test[features] else: X_train, y_train = train[train.kfold != fold][all_features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][all_features], train[train.kfold == fold]['target'] X_test = test[all_features] start = time.time() # Define Model model = CatBoostClassifier(**{**catboost_params, **model_params}) gc.collect() model.fit( X_train, y_train, verbose = False, eval_set = [(X_valid, y_valid)], use_best_model = True, **fit_params ) # validation and test predictions valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS oof_preds[train.kfold == fold] = valid_preds # fold auc score fold_auc = roc_auc_score(y_valid, valid_preds) end = time.time() print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end - start, 2)}s.') return test_preds, oof_preds # Train CatBoost test_preds['CatBoost'], oof_preds['CatBoost'] = train_catboost() test_preds['Cat_Stats'], oof_preds['Cat_Stats'] = train_catboost(new_features = True) # # 4. Scikit-Learn # Best Parameters histgbc_params = { 'random_state': RANDOM_SEED, 'max_iter': NUM_TREES, 'validation_fraction': 0.33, 'early_stopping': True, 'n_iter_no_change': EARLY_STOP, 'verbose': 0, } def train_histgbm(model_params = {}, fit_params = {}, new_features = False): # Store the predictions oof_preds = np.zeros((train.shape[0],)) test_preds = np.zeros((test.shape[0],)) print('') # Stratified k-fold cross-validation for fold in range(NUM_FOLDS): # Training and Validation Sets if new_features: X_train, y_train = train[train.kfold != fold][features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][features], train[train.kfold == fold]['target'] X_test = test[features] else: X_train, y_train = train[train.kfold != fold][all_features], train[train.kfold != fold]['target'] X_valid, y_valid = train[train.kfold == fold][all_features], train[train.kfold == fold]['target'] X_test = test[all_features] # Define Model model = HistGradientBoostingClassifier(**{**histgbc_params, **model_params}) gc.collect() start = time.time() model.fit( X_train, y_train, **fit_params ) # validation and test predictions valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS oof_preds[train.kfold == fold] = valid_preds # fold auc score fold_auc = roc_auc_score(y_valid, valid_preds) end = time.time() print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end - start, 2)}s.') return test_preds, oof_preds # Train 2 models with different random seets test_preds['HistGBM'], oof_preds['HistGBM'] = train_histgbm() test_preds['Hist_Stats'], oof_preds['Hist_Stats'] = train_histgbm(new_features = True) # # Predictions oof_preds.head() test_preds.head() # # Generate Submissions # # We create submissions for the CPU generated predictions to see if they are better than the GPU generated models we created with Kaggle notebooks. # Make submission submission['target'] = test_preds['XGBoost'] if SUBMIT: submission.to_csv(f'../output/xgboost_cpu_{NUM_FOLDS}fold_submission.csv', index=False) # Make submission submission['target'] = test_preds['CatBoost'] if SUBMIT: submission.to_csv(f'../output/catboost_cpu_{NUM_FOLDS}fold_submission.csv', index=False) # # Stacking # # We use XGBoost and LightGBM as meta models for stacking: # ## 1. LightGBM Classifier def stack_lightgbm(): preds = np.zeros((test.shape[0],)) scores = np.zeros(NUM_FOLDS) for j in range(NUM_FOLDS): X_train = oof_preds[oof_preds.kfold != j].drop('kfold', axis = 1) X_valid = oof_preds[oof_preds.kfold == j].drop('kfold', axis = 1) y_train = train['target'][train.kfold != j] y_valid = train['target'][train.kfold == j] X_test = test_preds.drop('id', axis = 1) model = LGBMClassifier(random_state = RANDOM_SEED, n_estimators = 200) model.fit( X_train, y_train, verbose = 0, eval_set = [(X_valid, y_valid)], eval_metric = "auc", early_stopping_rounds = 25, ) preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS preds_valid = model.predict_proba(X_valid)[:, 1] scores[j] = roc_auc_score(y_valid, preds_valid) print("Fold", j ,"(AUC):", scores[j]) print("Avg (AUC):", round(scores.mean(),6)) print("Min (AUC):", round(scores.min(),6)) return preds # LGBMClassifier meta model submission['target'] = stack_lightgbm() if SUBMIT: submission.to_csv(f'../output/stack_lgbm_{NUM_FOLDS}fold_submission.csv', index=False) # ## 2. XGBoost Classifier def stack_xgboost(): preds = np.zeros((test.shape[0],)) scores = np.zeros(NUM_FOLDS) for j in range(NUM_FOLDS): X_train = oof_preds[oof_preds.kfold != j].drop('kfold', axis = 1) X_valid = oof_preds[oof_preds.kfold == j].drop('kfold', axis = 1) y_train = train['target'][train.kfold != j] y_valid = train['target'][train.kfold == j] X_test = test_preds.drop('id', axis = 1) model = XGBClassifier(random_state = RANDOM_SEED, n_estimators = 200) model.fit( X_train, y_train, verbose = False, eval_set = [(X_valid, y_valid)], eval_metric = "auc", early_stopping_rounds = 25, ) preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS preds_valid = model.predict_proba(X_valid)[:, 1] scores[j] = roc_auc_score(y_valid, preds_valid) print("Fold", j ,"(AUC):", scores[j]) print("Avg (AUC):", round(scores.mean(),6)) print("Min (AUC):", round(scores.min(),6)) return preds # XGBClassifier meta model submission['target'] = stack_xgboost() if SUBMIT: submission.to_csv(f'../output/stack_xgb_{NUM_FOLDS}fold_submission.csv', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # Credentials you get from registering a new application client_id = '78nsl9dmv9su9r' client_secret = '' # OAuth endpoints given in the LinkedIn API documentation authorization_base_url = 'https://www.linkedin.com/uas/oauth2/authorization' token_url = 'https://www.linkedin.com/uas/oauth2/accessToken' from requests_oauthlib import OAuth2Session from requests_oauthlib.compliance_fixes import linkedin_compliance_fix linkedin = OAuth2Session(client_id, redirect_uri='http://127.0.0.1') linkedin = linkedin_compliance_fix(linkedin) # Redirect user to LinkedIn for authorization authorization_url, state = linkedin.authorization_url(authorization_base_url) print 'Please go here and authorize,', authorization_url # Get the authorization verifier code from the callback url redirect_response = raw_input('Paste the full redirect URL here:') # Fetch the access token linkedin.fetch_token(token_url, client_secret=client_secret, authorization_response=redirect_response) # Fetch a protected resource, i.e. user profile r = linkedin.get('https://api.linkedin.com/v1/people/~') print r.content # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # + # Oberservation # 1: The is a possible correlation between mouse weight and tumor volume. #2: Infubinol had the highest final tumor volume between the four most potential drugs. #3: all the possible outliers were below 20 and above 51 # - # # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset mouse_study_df = pd.merge(study_results,mouse_metadata, on = "Mouse ID") # Display the data table for preview mouse_study_df.head() # - # Checking the number of mice. full_mouse_count = mouse_study_df["Mouse ID"] # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. mouse_ID = mouse_study_df.loc[:,["Mouse ID", "Timepoint"]] mouse_group = mouse_ID.groupby(["Mouse ID"]) mouse_count = mouse_group.count() mouse_count.head() # Optional: Get all the data for the duplicate mouse ID. all_mouse_id = mouse_study_df.groupby(["Mouse ID"]) all_ID_count = all_mouse_id.count() all_ID_count.head() # Create a clean DataFrame by dropping the duplicate mouse by its ID. clean_mouse_data = mouse_study_df.drop_duplicates(subset=["Mouse ID"], keep="last") # Checking the number of mice in the clean DataFrame. clean_mouse_count = clean_mouse_data["Mouse ID"] clean_mouse_data # ## Summary Statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen drug_tumor_df = mouse_study_df.loc[:,["Drug Regimen", "Tumor Volume (mm3)"]] drug_index = drug_tumor_df.set_index("Drug Regimen") drug_mean = drug_index.groupby("Drug Regimen")["Tumor Volume (mm3)"].mean() mean_name = drug_mean.rename("Mean Tumor Volume") drug_median = drug_index.groupby("Drug Regimen")["Tumor Volume (mm3)"].median() median_name = drug_median.rename("Median Tumor Volume") drug_Var = drug_index.groupby("Drug Regimen")["Tumor Volume (mm3)"].var() Var_name = drug_Var.rename("Tumor Volume Variance") drug_std = drug_index.groupby("Drug Regimen")["Tumor Volume (mm3)"].std() STD_name = drug_std.rename("Tumor Volume Std. Dev") drug_sem = drug_index.groupby("Drug Regimen")["Tumor Volume (mm3)"].sem() Error_name = drug_sem.rename("Tumor Volume Std. Err.") # This method is the most straighforward, creating multiple series and putting them all together at the end. drug_stats = pd.concat([mean_name, median_name, Var_name ,STD_name,Error_name], axis = 1) drug_stats # - # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen drug_stats2 = drug_tumor_df.groupby(['Drug Regimen']).agg({"Tumor Volume (mm3)": ["mean", "median", "var", "std", "sem"]}) # This method produces everything in a single groupby function drug_stats2 # ## Bar and Pie Charts # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas. drug_bar = drug_stats.plot(kind = "bar", title= "Tumor Volume Summary") drug_bar.legend(loc = 'upper center', bbox_to_anchor=(-.4, 1),shadow=True, ncol=1) # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot. groupdrug = drug_index.groupby("Drug Regimen") group_count = groupdrug.count() index_list = group_count.index.tolist() column_list = group_count["Tumor Volume (mm3)"].tolist() plt.bar(index_list, column_list, color= ("blue","orange","green","red","purple", "maroon", "pink", "grey", "yellow", "cyan")) plt.xticks(rotation = "vertical") plt.xlabel("Drug Regimen") plt.ylabel("Total number of mice for each treatment") bar = group_count.plot(kind = "bar", legend = None , color = [('Blue','orange','green','red', 'purple', 'maroon', 'pink', 'grey', 'yellow', 'cyan')]) plt.ylabel("Total number of mice for each treatment") # Generate a pie plot showing the distribution of female versus male mice using pandas sex = mouse_study_df.loc[:,["Sex", "Mouse ID"]] sex_group = sex.groupby("Sex") sex_count = sex_group.count() sex_count = sex_count.rename(columns ={"Mouse ID": "Sex Count"}) colors = ["magenta","blue"] sex_count.plot(kind='pie', y = 'Sex Count', colors= colors ,autopct='%1.1f%%',legend = False) plt.show() # Generate a pie plot showing the distribution of female versus male mice using pyplot sex = mouse_study_df.loc[:,["Sex" , "Mouse ID"]] sex_group = sex.groupby("Sex") counts = sex_group.count() colors = ["magenta","blue"] plt.pie(counts.values, labels=counts.index.values ,colors = colors ,autopct='%1.1f%%') plt.show() # ## Quartiles, Outliers and Boxplots # + # Calculate the final tumor volume of each mouse across four of the treatment regimens: # Capomulin, Ramicane, Infubinol, and Ceftamin cap_df = mouse_study_df.loc[mouse_study_df["Drug Regimen"] == "Capomulin",:] ram_df = mouse_study_df.loc[mouse_study_df["Drug Regimen"] == "Ramicane", :] in_df = mouse_study_df.loc[mouse_study_df["Drug Regimen"] == "Infubinol", :] ceft_df = mouse_study_df.loc[mouse_study_df["Drug Regimen"] == "Ceftamin", :] # Start by getting the last (greatest) timepoint for each mouse # Merge this group df with the original dataframe to get the tumor volume at the last timepoint # + #Capomulin data caplast = cap_df.groupby('Mouse ID').max()['Timepoint'] caplastvol = pd.DataFrame(caplast) caplastmerge = pd.merge(caplastvol, mouse_study_df, on=("Mouse ID","Timepoint"),how="left") tumors = caplastmerge["Tumor Volume (mm3)"] quartiles = tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Capomulin potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.") # + #Ramicane Data ramlast = ram_df.groupby('Mouse ID').max()['Timepoint'] ramlastvol = pd.DataFrame(ramlast) ramlastmerge = pd.merge(ramlastvol, mouse_study_df, on=("Mouse ID","Timepoint"),how="left") tumors1 = ramlastmerge["Tumor Volume (mm3)"] quartiles = tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Ramicane potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.") # + #Infubinol Data INlast = in_df.groupby('Mouse ID').max()['Timepoint'] INlastvol = pd.DataFrame(INlast) INlastmerge = pd.merge(INlastvol, mouse_study_df, on=("Mouse ID","Timepoint"),how="left") tumors2 = INlastmerge["Tumor Volume (mm3)"] quartiles = tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Infubinol potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.") # + #Ceftamin Data celast = ceft_df.groupby('Mouse ID').max()['Timepoint'] celastvol = pd.DataFrame(celast) celastmerge = pd.merge(celastvol, mouse_study_df, on=("Mouse ID","Timepoint"),how="left") tumors3 = celastmerge["Tumor Volume (mm3)"] quartiles = tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Ceftamin potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.") # - # Generate a box plot of the final tumor volume of each mouse across four regimens of interest data_to_plot = [tumors, tumors1 ,tumors2, tumors3] fig1, ax = plt.subplots() ax.boxplot(data_to_plot, labels=["Capomulin","Ramicane","Infubinol","Ceftamin",]) ax.set_ylabel('Final Tumor Volume(MM3)') ax.boxplot(data_to_plot, sym='r') plt.show() # ## Line and Scatter Plots # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin speical_mouse = cap_df.loc[cap_df['Mouse ID']=='j119'] sort_mouse = speical_mouse.sort_values(['Timepoint'],ascending=True) plt.plot(sort_mouse['Timepoint'], sort_mouse['Tumor Volume (mm3)']) plt.title('Capomulin treatment of mouse j199') plt.xlabel("Timepoint(Days)") plt.ylabel("Tumor Volume(mm3)") # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen new_df = pd.DataFrame(cap_df.groupby(['Mouse ID', 'Weight (g)'])["Tumor Volume (mm3)"].mean()).reset_index() new_df = new_df.rename(columns={"Tumor Volume (mm3)": "Average Volume"}) new_df.plot(kind="scatter", x="Weight (g)", y="Average Volume", grid=True, figsize=(4,4), title="Weight Vs. Average Tumor Volume") plt.clf() plt.cla() plt.close() # ## Correlation and Regression # Calculate the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen #new_df = new_df.set_index('Mouse ID') weight = new_df.iloc[:,1] tumor = new_df.iloc[:,2] correlation = st.pearsonr(weight,tumor) print(f"The correlation between both factors is {round(correlation[0],2)}") from scipy.stats import linregress x_values = new_df['Weight (g)'] y_values = new_df['Average Volume'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.scatter(x_values,y_values) plt.plot(x_values,regress_values,"r-") plt.annotate(line_eq,(6,10),fontsize=15,color="red") plt.xlabel('Mouse Weight') plt.ylabel('Average Tumor Volume') plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Article Spinning Intro # * Changing certain words of an article so it does not match the original, so a search engine can't mark it as duplicate content # * How is this done: # * take an article and slightly modify it, different terms, same meaning # * "Udemy is a **platform** or **marketplace** for online **learning**" # * "Udemy is a **podium** or **forum** for online **research**" # * Clearly context is very important! # * the idea is that you need to use the surrounding words to influence the replacement of the current word # --- # # Trigram Model and Markov Models # * how can we model the probability of a word given the surrounding words? # * Lets start by taking an entire document and labeling all of the words: **w(1), w(2),...,w(n)** # * we can then model the probability of **w(i)** using the surrounding words: # * those that came before w(i): w(1)...w(i-1) # * and those that came after w(i): w(i+1)...w(n) # * Probabilistically this would look like: # $$P\Big(w(i)\;\Big|\;w(1)...w(i-1), w(i+1)...w(n)\Big)$$ # * Why wouldn't this work? # * well, using this approach we are considering every word in the document, which means that only that model itself would match it exactly # * We need to do something similar to what we do with markov models and only consider the closest words # # ## Trigram # * we are going to use something called a trigram to accomplish this! # * we are going to create triples, where we store combinations of 3 consecutive words # * A few pieces of vocav worth knowing: # * **corpus**: collection of text # * **tokens**: words and punctuation that make up the corpus # * **Type**: distinct token # * **vocabulary**: set of all types # * **unigram**: 1 token sequence # * **bigram**: 2 token sequence # * **trigram**: 3 token sequence # * **n-gram**: n token sequence # * in the case of a trigram we are going to use the previous words and next word to predict the current word: # $$P\Big(w(i)\;\Big|\;w(i-1), w(i+1)\Big)$$ # * How will we implement this? # * We are going to create a dictionary with the previous word and next word as the key, and then randomly sample the middle word **w(i)**! # * for example we could have the key ('I ', 'sports'), which would have an array of values, ['hate','love', 'enjoy', etc.] # * we would randomly sample from that array # * this is sort of like a markov model, expect a markov model is only concerned with P(w(i)|w(i-1)) # * We won't replace every single word in the document, because that wouldn't give us anything useful # * so we will make the decision to replace the word based on some small probability # * Both this and latent semantic analysis are what we call unsupervised learning algorithms, because they have no labels and we just want to learn the structure of the data # * Note: spam detector and sentiment analyzer were supervised because we had labels to match to # --- # # Markov Chains and Monte Carlo Methods # * Great tutorial: https://deeplearning4j.org/markovchainmontecarlo # * Markov Chain Monte Carlo (MCMC) is a mathematical method that draws samples randomly from a black-box to approximate the probability distribution of attributes over a range of objects (the height of men, the names of babies, the outcomes of events like coin tosses, the reading levels of school children, the rewards resulting from certain actions) or the futures of states. # * MCMC methods help gauge the distribution of an outcome or statistic you’re trying to predict, by randomly sampling from a complex probabilistic space. # * As with all statistical techniques, we sample from a distribution when we don’t know the function to succinctly describe the relation to two variables (actions and rewards). MCMC helps us approximate a black-box probability distribution. # # ## Concrete Example # Let’s say you’re a gambler in the saloon of a Gold Rush town and you roll a suspicious die without knowing if it is fair or loaded. You roll a six-sided die a hundred times, count the number of times you roll a four, and divide by a hundred. That gives you the probability of four in the total distribution. If it’s close to 16.7 (1/6 * 100), the die is probably fair. # # Monte Carlo looks at the results of rolling the die many times and tallies the results to determine the probabilities of different states. It is an inductive method, drawing from experience. The die has a state space of six, one for each side. # # ## Systems and States # At a more abstract level, where words mean almost anything at all, a system is a set of things connected together (you might even call it a graph, where each state is a vertex, and each transition is an edge). It’s a set of states, where each state is a condition of the system. But what are states? # # * Cities on a map are “states”. A road trip strings them together in transitions. The map represents the system. # * Words in a language are states. A sentence is just a series of transitions from word to word. # * Genes on a chromosome are states. To read them (and create amino acids) is to go through their transitions. # * Web pages on the Internet are states. Links are the transitions. # * Bank accounts in a financial system are states. Transactions are the transitions. # * Emotions are states in a psychological system. Mood swings are the transitions. # * Social media profiles are states in the network. Follows, likes, messages and friending are the transitions. # * Rooms in a house are states. People walking through doorways are the transitions. # # So states are an abstraction used to describe these discrete, separable, things. A group of those states bound together by transitions is a system. And those systems have structure, in that some states are more likely to occur than others (ocean, land), or that some states are more likely to follow others. # # We are more like to read the sequence Paris -> France than Paris -> Texas, although both series exist, just as we are more likely to drive from Los Angeles to Las Vegas than from L.A. to Slab City, although both places are nearby. # # A list of all possible states is known as the “state space.” The more states you have, the larger the state space gets, and the more complex your combinatorial problem becomes. # # ## Markov Chains # Since states can occur one after another, it may make sense to traverse the state space, moving from one to the next. A Markov chain is a probabilistic way to traverse a system of states. It traces a series of transitions from one state to another. It’s a random walk across a graph. # # Each current state may have a set of possible future states that differs from any other. For example, you can’t drive straight from Atlanta to Seattle - you’ll need to hit other states in between. We are all, always, in such corridors of probabilities; from each state, we face an array of possible future states, which in turn offer an array of future states two degrees away from the start, changing with each step as the state tree unfolds. New possibilites open up, others close behind us. Since we generally don’t have enough compute to explore every possible state of a game tree for complex games like go, one trick that organizations like DeepMind use is Monte Carlo Tree Search to narrow the beam of possibilities to only those states that promise the most likely reward. # # Traversing a Markov chain, you’re not sampling with a God’s-eye view any more like a conquering alien. You are in the middle of things, groping your way toward one of several possible future states step by probabilistic step, through a Markov Chain. # # While our journeys across a state space may seem unique, like road trips across America, an infinite number of road trips would slowly give us a picture of the country as a whole, and the network that links its cities together. This is known as an equilibrium distribution. That is, given infinite random walks through a state space, you can come to know how much total time would be spent in any given state. If this condition holds, you can use Monte Carlo methods to initiate randoms “draws”, or walks through the state space, in order to sample it. # # ## Markov Time # Markov chains have a particular property: oblivion, or forgetting. # # That is, they have no long-term memory. They know nothing beyond the present, which means that the only factor determining the transition to a future state is a Markov chain’s current state. You could say the “m” in Markov stands for “memoryless”: A woman with amnesia pacing through the rooms of a house without knowing why. # # Or you might say that Markov Chains assume the entirety of the past is encoded in the present, so we don’t need to know anything more than where we are to infer where we will be next. Check out a visual demo here: http://setosa.io/ev/markov-chains/ # # So imagine the current state as the input data, and the distribution of attributes related to those states (perhaps that attribute is reward, or perhaps it is simply the most likely future states), as the output. From each state in the system, by sampling you can determine the probability of what will happen next, doing so recursively at each step of the walk through the system’s states. # # ## Probability as a Spaced # When they call it a state space, they’re not joking. You can picture it, just like you can picture land and water, each one of them a probability as much as they are a physical thing. Unfold a six-sided die and you have a flattened state space in six equal pieces, shapes on a plane. Line up the letters by their frequency for 11 different languages, and you get 11 different state spaces. # # Another tutorial: https://jeremykun.com/2015/04/06/markov-chain-monte-carlo-without-all-the-bullshit/ # --- # # Article Spinner Code # A great resource for this article spinner is found here: http://norvig.com/ngrams/ch14.pdf # Lets now write the code for our article spinner. Start with our imports. import nltk import random # needed for probabilities and sampling import numpy as np from bs4 import BeautifulSoup # ### Load our positive reviews. positive_reviews = BeautifulSoup(open('data/electronics/positive.review').read(), "lxml") positive_reviews = positive_reviews.findAll('review_text') # ### Collect all of the Trigrams # Recall, for each trigram the key is the previous and next word, and the value is going to be the possible middle words (so an array, may only contain a single value) # trigrams = {} for review in positive_reviews: # loop through every review s = review.text.lower() # don't want two versions of same word tokens = nltk.tokenize.word_tokenize(s) for i in range(len(tokens) - 2): k = (tokens[i], tokens[i+2]) # the key is a tuple, tuples are immutable and can be key if k not in trigrams: trigrams[k] = [] trigrams[k].append(tokens[i+1]) # now we have all of the possible middle words # ### Transform into a probability vector # Now that we have all of the possible middle words, we need to transform this into a probability vector. We need to convert these trigrams into probabilities. trigrams_probabilities = {} # dictionary to hold trigram probabilities, the loop through trigrams for k, words in trigrams.items(): # k will be the key, and words is a list of words for that key if len(set(words)) > 1: # set gets rid of duplicates, then we need to make sure > 1 word d = {} # another dictionary d, keyed by the middle word n = 0 for w in words: # loop through each word, d count how many times the middle word occur if w not in d: d[w] = 0 d[w] += 1 n += 1 # n is going to track the total number of words for w, c in d.items(): d[w] = float(c)/n # # of times each word occurs, divided by total number of words trigrams_probabilities[k] = d # setting trigram prob for specific key to be that of d # ### Function to Randomly Sample Trigram Probabilities # Now we need to create a function that will randomly sample from these trigram probabilities. def random_sample(d): # function, takes dictionary (key is word, value is probability of that word) r = random.random() # generate random number cumulative = 0 for w, p in d.items(): cumulative += p if r < cumulative: return w # ### Function to test spinner # It needs to randomly choose a review, then try to spin it and print both out so we can compare them. def test_spinner(): review = random.choice(positive_reviews) # grab a random positive review s = review.text.lower() print('Original:', s) tokens = nltk.tokenize.word_tokenize(s) # tokenize the positive review for i in range(len(tokens) - 2): # loop through each token if random.random() < 0.2: # choose with a small probability to replace (20% chance) k = (tokens[i], tokens[i+2]) # get the word before and after our word if k in trigrams_probabilities: w = random_sample(trigrams_probabilities[k]) tokens[i+1] = w print ('Spun:') print(" ".join(tokens).replace(" .", ".").replace(" '", "'").replace(" ,", ",").replace("$ ", "$").replace(" !", "!")) test_spinner() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import math a=[3,3,10,4,9,10,3,12,6,11,11,10,6,8,5,5,10,4,7,8,6,7,3,8,7,15,7,7,5,9,10,5,10,9,4,12,5,11,5,9,13,3,7,6,6,13,4,5,11,9,6,7,9,10,6,3,4,11,5,9,6,7,7,8,8,6,8,8,9,8,2,11,4,8,5,7,6,11,6,8,6,5,12,8,6,4,10,5,8,6,8,7,10,9,7,8,11,8,14,12] plt.hist(a) np.mean(a) n=7.53 npoints=100 ar= np.array(npoints) for i in range(len(a)): y=100*(1/(np.exp(n))*(n**a[i])/(math.factorial(a[i]))) print(y) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import codecs import tensorflow as tf import collections with open('/crimea/geeticka/data/relation_extraction/semeval2010/pre-processed/original/bert/train_original_border_50.json') as file: for line in file.readlines(): data = json.loads(line) print(data) break # because the dumping of the embeddings was done using pytorch, refer to their method of writing to # figure out how to do the reading # https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/extract_features.py # refer to the above to figure out how to read this file # need to import collections def write_bert_tokens(input_filename, output_filename): with open(input_filename, 'r', encoding='utf-8') as input_file, open(output_filename, 'w', encoding='utf-8' as output_file: for line in input_file.readlines(): if data['features'][0]['token'] != '[CLS]': raise Exception("The first token has to be CLS!") if data['features'][-1]['token'] != '[SEP]': raise Exception("The last token has to be SEP!") output_json = collections.OrderedDict() data = json.loads(line) output_json['linex_index'] = data['linex_index'] features = data['features'] # basically for all features['token'] that starts with ##, add up the values # for the respective indexes to put the words back together, ignore [CLS] and [SEP] tokens new_feature_map = generate_feature_map_without_word_piece(features) # this new feature map needs to be # called layers because things have now been shuffled. output_json['layers'] = new_feature_map output_filename.write(json.dumps(output_json) + "\n") a = [1] a.extend([2,3,4]) features = [{'token': '3', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-4, -2]}, {'index': -3, 'values': [-2, -1]}, {'index': -4, 'values': [-1,2]}]}, {'token': 'a', 'layers': [{'index': -1, 'values': [1,2]}, {'index': -2, 'values': [3,4]}, {'index': -3, 'values': [4,5]}, {'index': -4, 'values': [2,1]}]}, {'token': '##b', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-2, -1]}, {'index': -3, 'values': [-3, -2]}, {'index': -4, 'values': [-1, -1]}]}, {'token': '3', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-4, -2]}, {'index': -3, 'values': [-2, -1]}, {'index': -4, 'values': [-1,2]}]}, {'token': '##b', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-2, -1]}, {'index': -3, 'values': [-3, -2]}, {'index': -4, 'values': [-1, -1]}]}, {'token': '##b', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-2, -1]}, {'index': -3, 'values': [-3, -2]}, {'index': -4, 'values': [-1, -1]}]}, {'token': '3', 'layers': [{'index': -1, 'values': [-1, -3]}, {'index': -2, 'values': [-4, -2]}, {'index': -3, 'values': [-2, -1]}, {'index': -4, 'values': [-1,2]}]}] # indexes = [0,1] # + # def generate_feature_map_without_word_piece(features): # # need to double check and see why this is happening # new_features = [] # i = 0 # while(i < len(features)): # if features[i]['token'] == '[CLS]' or features[i]['token'] == '[SEP]': # i += 1 # continue # captured_indexes = [] # for j in range(i + 1, len(features)): # if not features[j]['token'].startswith('##'): # break # captured_indexes.append(j) # if len(captured_indexes) == 0: # new_features.append(features[i]) # i += 1 # continue # sum_indexes = [i] # sum_indexes.extend(captured_indexes) # new_feature = average_over_token_embedding(sum_indexes, features) # new_features.append(new_feature) # i = captured_indexes[-1] + 1 # # rewrite in the elmo format as well # new_features_map = [] # we are converting from the (token, layers) shape to (layers, token) shape # layer_minus1 = []; layer_minus2 = []; layer_minus3 = []; layer_minus4 = []; # for token in new_features: # layer_minus1.append({'token': token['token'], 'features': token['layers'][0]['values']}) # layer_minus2.append({'token': token['token'], 'features': token['layers'][1]['values']}) # layer_minus3.append({'token': token['token'], 'features': token['layers'][2]['values']}) # layer_minus4.append({'token': token['token'], 'features': token['layers'][3]['values']}) # new_features_map.append({'index': -1, 'values': layer_minus1}) # new_features_map.append({'index': -2, 'values': layer_minus2}) # new_features_map.append({'index': -3, 'values': layer_minus3}) # new_features_map.append({'index': -4, 'values': layer_minus4}) # return new_features_map # - generate_feature_map_without_word_piece(features) a = {2: [1,2]} np.mean([a[2], [3,4]], axis=1) # + # def average_over_token_embedding(indexes, features): # new_feature = collections.OrderedDict() # new_token = '' # new_layers = [] # layer_minus_1 = []; layer_minus_2 = []; layer_minus_3 = []; layer_minus_4 = []; # for index in indexes: # layer_minus_1.append(features[index]['layers'][0]['values']) # layer_minus_2.append(features[index]['layers'][1]['values']) # layer_minus_3.append(features[index]['layers'][2]['values']) # layer_minus_4.append(features[index]['layers'][3]['values']) # new_token += features[index]['token'] # new_layers.append({'index': -1, 'values': list(np.mean(layer_minus_1, axis=0, dtype=np.float32))}) # new_layers.append({'index': -2, 'values': list(np.mean(layer_minus_2, axis=0, dtype=np.float32))}) # new_layers.append({'index': -3, 'values': list(np.mean(layer_minus_3, axis=0, dtype=np.float32))}) # new_layers.append({'index': -4, 'values': list(np.mean(layer_minus_4, axis=0, dtype=np.float32))}) # new_feature['token'] = new_token # new_feature['layers'] = new_layers # return new_feature # - average_over_token_embedding(indexes, features) len(embeddings[0]) len(embeddings[0]) # %load_ext autoreload # %autoreload import sys sys.path.append('..') from relation_extraction.data import utils import numpy as np bert_embeddings = utils.get_bert_embeddings('/crimea/geeticka/data/relation_extraction/semeval2010/pre-processed/original/bert/train_original_border_50.json') elmo_embeddings = utils.get_elmo_embeddings('/crimea/geeticka/data/relation_extraction/semeval2010/pre-processed/original/elmo/train_original_border_-1.hdf5') len(elmo_embeddings[0]) for item in elmo_embeddings[0]: print(len(item)) for i_item in item: print(len(i_item)) for ii_item in i_item: print(len(ii_item)) for iii_item in i_item: print(len(iii_item)) break break break break # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K-Means(Application) # # - (1)传统K-Means # - (2)K-Means++ # - (3)K-Means在压缩图片中的应用 import numpy as np import pandas as pd from sklearn.datasets import load_iris import matplotlib.pyplot as plt # ### 1 Load data # # 这里的数据集我们依然选择scikit-learn上的鸢尾花数据集 def create_data(): iris = load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['label'] = iris.target df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label'] data = np.array(df.iloc[:100, [0, 1, -1]]) return data[:,:2], data[:,-1] X, y = create_data() plt.scatter(X[:,0],X[:,1],c=y) # ### 2 Origianl K-Means # # **K-Means Algorithm:** # # 输入:样本集$X={x_1,x_2,...,x_m}$,聚类数为$K$,最大迭代数$Iter$. # # 输出: 划分的聚类数$c_1,c_2,...,c_k$ # # (1)从数据集中随意挑选出$K$个样本作为初始的质心(center)向量$\mu$ # # (2)迭代$iter=1,2,...,Iter$. # # - 对于$i=1,2,...,m;k=1,2,...,K$个样本$x_i$计算和各个center的距离$d_{ik}$ # - $d_{ik}= ||x_{ik}-\mu_{ik}||^{2}$或者$D = \sum_{i=1}^{m}\sum_{k=1}^{K}||x_i-\mu{k}||^{2}$ # # - 对于每一个样本点,获取与其最近的center,将其归为一类. # # - 得到新的$k$类群后,计算新的center,即当前聚类群的均值. # - $\mu=\frac{\sum x_{k}}{N_{c_k}}$ # # - 如果$k$个center向量变化在阈值内,则视为收敛退出迭代 # # (3) 得到$K$个center:$C={c_1,c_2,...,c_k}$ # ### 2.1 K-Means Original Model # # 假设现在数据的维度是m,n:m个样本,n个features. # # 参数维度: # - center:有K个质心且每个质心都能与样本计算距离,所以center(K,n) # - labels:有m个样本需要预测,所以labels(m,) def K_means_Original(X,K,Iter): """ Implementation original K-Means Parameters: ---------- X: data set K: Number of cluster centers. Iter: Number of iterative Return: ------ labels: predict labels. center: cluster centers cost: loss value. """ m,n = X.shape D = np.zeros((m,K)) # cache distance d. # initial center about 3 lines. shuffle_X = np.copy(X) np.random.shuffle(shuffle_X) center = shuffle_X[:K] cost = {} # cache loss value. for iter_ in range(Iter): # calculate distance with every points and every center. for i in range(K): d = np.sum((X-center[i])**2,axis=1) D[:,i] += d # Get the point with the nearest center. labels = np.argmin(D,axis=1) # if label = 0: center1,label=1:center2 # start update center. for k in range(K): center_X = X[np.where(labels==k)[0]] # get point in the nearest center. center[k,:] = np.sum(center_X,axis=0) / center_X.shape[0] # compute mean,and update center. # compute loss loss = np.sum(np.sum((center_X-center[k,:])**2,axis=1)) /m # cache loss in k center if k not in cost and Iter % 10 ==0: cost[k] = [loss] else: cost[k].append(loss) return labels,center,cost labels,center,cost = K_means_Original(X,2,100) # 得到了最优的labels我们可以尝试使用预测的labels来绘制数据 plt.scatter(X[:,0],X[:,1],c=labels) # 可以看出分类的结果只是一般的,如果你多运行几次,你会发现效果差异很大. # # 我们继续绘制cost for k,loss in cost.items(): plt.plot(loss,label="classification:"+str(k)) plt.legend() # 这里大家可能就会有疑问了,为什么一条loss是下降的,而另一条是上升的,这是因为: # # 在初次迭代的时候,肯定有一个center下的数据量会多一些,那么loss初始值就会大,另一个数据量会少些,loss初始值就会小. # # 在迭代过程中: # - center的loss初始值大的会因为丢弃掉一些数据点而下降,最终该center周围的数据点趋近稳定. # - center的loss初始值小的会因为得到一些数据点而上升,最终该center周围的数据点趋近稳定. # # # 所以只要将上升的loss添加一个负号就可以了 for k,loss in cost.items(): if k == 0: plt.plot([-i for i in loss],label="classification:"+str(k)) else: plt.plot(loss,label="classification:"+str(k)) plt.legend() # ### 2.2 K-Means-Animation # # 我们定义一个动态图,来动态观测下K-Means的聚合过程 def K_means_Animation(X,center,labels): # %matplotlib inline from IPython import display plt.scatter(X[:,0],X[:,1],c=labels) plt.scatter(center[0,0],center[0,1],c='red',marker='^',s=100) plt.scatter(center[1,0],center[1,1],c='black',marker='o',s=100) plt.pause(0.1) plt.show() display.clear_output(wait=True) def K_means_Original_Animation(X,K,Iter): """ Implementation original K-Means Parameters: ---------- X: data set K: Number of cluster centers. Iter: Number of iterative Return: ------ labels: predict labels. center: cluster centers cost: loss value. """ m,n = X.shape D = np.zeros((m,K)) # cache distance d. # initial center about 3 lines. shuffle_X = np.copy(X) np.random.shuffle(shuffle_X) center = shuffle_X[:K] cost = {} # cache loss value. for iter_ in range(Iter): # calculate distance with every points and every center. for i in range(K): d = np.sum((X-center[i])**2,axis=1) D[:,i] += d # Get the point with the nearest center. labels = np.argmin(D,axis=1) # if label = 0: center1,label=1:center2 # start update center. for k in range(K): center_X = X[np.where(labels==k)[0]] # get point in the nearest center. center[k,:] = np.sum(center_X,axis=0) / center_X.shape[0] # compute mean,and update center. # compute loss loss = np.sum(np.sum((center_X-center[k,:])**2,axis=1)) /m # cache loss in k center if k not in cost and Iter % 10 ==0: cost[k] = [loss] else: cost[k].append(loss) # start Animation plot K_means_Animation(X=X,center=center,labels=labels) return labels,center,cost labels,center,cost = K_means_Original_Animation(X,2,100) # ### 3 K-Means++ # # 我们知道传统K-Means由于center的初始值是随机的,所以聚合的效果不是很好,我们现在采用K-Means++得到的初始值进行尝试. # # **K-Means++ algorithm:** # # 输入:数据集,聚类数量$K$ # # 输出: 聚类center$c_1,c_2,...,c_K$ # # (1)从输入的数据点集合中随机选择一个点作为第一个聚类center. # # (2)对于样本的每一个数据点,**计算与其最近的center**,得到距离$d_i$,center集群中的样本组成的距离向量为$D(X)=\{d_1,d_2,...,d_{n}\}$,随机取一个Random能够落在$\sum D$内,然后做循环: Random -= $D(x)$ 直到Random <=0. # # (3) 以概率的方式选择距离较大的点作为下一个聚类center. # # (4)重复(2),(3)直到获取$K$个center作为初始center放入传统的K-Means中去. # # **Ps:** # - 由于第一个质心(center)是随机产生的,所以实际上只迭代K-1次. def kpp(X,K): """ Implementation K-Means++ Parameters: ---------- X: data set. K: Number of cluster centers. Return: ------ cluster_centers:centers array. shape is (K,n) """ m,n = X.shape cluster_centers = np.zeros((K,n)) # cache centers. index = np.random.randint(0,X.shape[0]) # random index cluster_centers[0] = X[index] # get initial random data in X ds_cache = np.zeros((m,K)) # cache distance with every centers and points. # start get centers. for k in range(1,K): for j in range(K): ds_cache[:,j] = np.sum(np.power((X-cluster_centers[j]),2),axis=1) # compute distance with every centers and points. D = np.min(ds_cache,axis=1) # 计算每个数据点与其最近的中心距离 random_ = np.random.rand() * D.sum() # 保证随机数能够落在sum(D(x)) 内 # 寻找最远的样本点的索引放入X从而得到距离center最远的点 for j,di in enumerate(D): random_ -= di if random_<= 0: cluster_centers[k,:] += X[j] break return cluster_centers cluster_centers = kpp(X,2) cluster_centers plt.scatter(X[:,0],X[:,1]) plt.scatter(cluster_centers[0,0],cluster_centers[0,1],c='red',marker='^',s=100) plt.scatter(cluster_centers[1,0],cluster_centers[1,1],c='black',marker='o',s=100) # 可以看出两个初始center之间的距离相对比较远. # # 现在使用K-Means++来对K-Means进行优化 def K_means(X,K,Iter): """ Implementation original K-Means Parameters: ---------- X: data set K: Number of cluster centers. Iter: Number of iterative Return: ------ labels: predict labels. center: cluster centers cost: loss value. """ m,n = X.shape D = np.zeros((m,K)) # cache distance d. # use KPP initial center. center = kpp(X,K) for iter_ in range(Iter): # calculate distance with every points and every center. for i in range(K): d = np.sum((X-center[i])**2,axis=1) D[:,i] += d # Get the point with the nearest center. labels = np.argmin(D,axis=1) # if label = 0: center1,label=1:center2 # start update center. for k in range(K): center_X = X[np.where(labels==k)[0]] # get point in the nearest center. center[k,:] = np.sum(center_X,axis=0) / center_X.shape[0] # compute mean,and update center. # compute loss loss = np.sum(np.sum((center_X-center[k,:])**2,axis=1)) /m return labels,center labels,center = K_means(X,2,300) plt.scatter(X[:,0],X[:,1],c=labels) # ### 4 Scikit-learn # # 同样我们也可以使用sklearn中的[KMeans](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html)来聚合. from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=2, random_state=0,max_iter=300).fit(X) B = kmeans.labels_ plt.scatter(X[:,0],X[:,1],c=B) # 可以看出在相同迭代下,结果是一样的. # ### 5 K-Means之缩略图处理 # # 实际做法就是:将每个像素点当作一个数据,跑一下K-means,得到k个centroids,然后用这些centroids的像素值来代替对应的cluster里的所有点的像素值。对于彩色图片来说,例如RGB三色的图片,每一个像素被当作是一个3维向量空间中的点. # # 就是将原图中的每一个像素以K中颜色填充,从而压缩图片. # # # 下图显示了[K-Means压缩详解](https://www.jianshu.com/p/594e9fc5db9a): # ![](picture/51.png) # 首先加载原图: image = plt.imread('data_set/lenna.png') image.shape # 可以看出图片的$width=220,height=220,channel=3$ # # 显示原图: plt.imshow(image) # 将图片转化为K-Means能够接受的形式: # - shape:width * height,3. # - 这样的数据实际上每一个像素点(每一行)有三个特征分别是R,G,B. new_image = image.reshape(image.shape[0]*image.shape[1],3) labels,center = K_means(X=new_image,K=2,Iter=300) print("labels shape is : ",labels.shape) print("center is : ",center) # - 这里显示的$48400=220*220$就是所有的像素点(样本点). # - 这里显示的ceter就是聚类中心,两个中心,每个中心有RGB三个特征. # 使用cnter的值代替每个像素点的值 K_image = np.zeros((image.shape[0],image.shape[1],3)) # 遍历每个像素点,找到聚类中心对应的像素值 pixel_count = 0 for i in range(image.shape[0]): for j in range(image.shape[1]): cluster_idx = labels[pixel_count] # 获取聚类中心索引位置上的像素值 cluster_value = center[cluster_idx] # 放入center的点的值 K_image[i,j,:] += cluster_value pixel_count +=1 plt.imshow(K_image) plt.imsave('K_image.png',K_image) # 也可以先将labels转化为width,height的形式. K_image2 = np.zeros((image.shape[0],image.shape[1],3)) labels_iamge = labels.reshape(image.shape[0],image.shape[1]) for i in range(image.shape[0]): for j in range(image.shape[1]): K_image2[i,j,:] = center[labels_iamge[i,j],:] plt.imshow(K_image2);plt.xticks(());plt.yticks(());plt.title("K=2") # 现在尝试不同的K对图像的压缩结果. # 测试K=2,64,100 def Image_Compression(image,K=list): plt.figure(figsize=(10, 10)) plt.subplot(2,2,1);plt.imshow(image);plt.xticks(());plt.yticks(());plt.title("Original") width,height,channel = image.shape new_image = image.reshape(width * height,3) for k in range(len(K)): labels,center = K_means(X=new_image,K=K[k],Iter=100) K_image = np.zeros((width,height,channel)) labels_iamge = labels.reshape(width,height) for i in range(width): for j in range(height): K_image[i,j,:] = center[labels_iamge[i,j],:] plt.subplot(2,2,k+2);plt.imshow(K_image);plt.xticks(());plt.yticks(());plt.title("K="+str(K[k])) plt.show() Image_Compression(image,K=[2,64,100]) # 需要注意的是: # # 如果你使用的是jpg,或者jpge的图片,那么在创建K_image矩阵的时候要指定detype=np.uint8 # # 可以看出当K=64的时候,基本和原图没有什么区别,然而文件大小却减小了,可以使用plt.imsave()来保存图片并查看图片的大小. # # 到此K-Means的所有内容已经结束,Good Luck # # # Homework # # 使用K-Means聚类手写数字 # # Good Luck~~ from sklearn.datasets import load_digits from sklearn.preprocessing import scale digits = load_digits() data = scale(digits.data) n_samples, n_features = data.shape n_digits = len(np.unique(digits.target)) labels = digits.target # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import stats from matplotlib.patches import Ellipse from matplotlib.gridspec import GridSpec # %matplotlib inline # - # set the betweeen, within, cross-sectional distribution colors and alpha for plotting dist_c = ['#66c2a5','#fc8d62','#8da0cb'] dist_alpha = [0.4,0.4,0.4] # + # Figure 1 def genTimeCourse(limits,rng,t): y = [rng.uniform(-limits,limits)] for time in np.arange(len(t)-1): delta = rng.normal(y[-1],0.5) # resample untile within limits while delta > limits: delta = rng.normal(y[-1],0.1) while delta < -limits: delta = rng.normal(y[-1],0.1) new_y = delta y.append(new_y) return y t = np.arange(0,50,1) rng = np.random.RandomState(seed=0) limits = 1 NA = genTimeCourse(limits,rng,t) # plt.plot(t,NA); # plt.ylim(-3,3) # get AU to be corrected with NA! rng = np.random.RandomState(seed=0) AU = [] for iY in NA: AU.append(rng.normal(iY,0.2)) # create figure fig = plt.figure(figsize=(24,8)) gs = GridSpec(nrows=3, ncols=3) # vertical line time vline_time = 10 setfontsize = 24 ######################################################################################## # First axes ax0_c = 'r' ax0 = fig.add_subplot(gs[0:2, 0]) ax0.plot(t, NA, color = ax0_c) ax0.hlines(y=0, xmin=0, xmax=50, linestyles='--', linewidth=2, color=ax0_c, alpha =0.5) ax0.vlines(x=vline_time, ymin=np.min(NA), ymax=np.max(NA), linestyles='--', linewidth=1, color='black', alpha =0.5) state_time = 43 ax0.arrow(t[state_time], 0, 0, NA[state_time], head_width=1.5, head_length=0.2, linewidth=1, color='black', length_includes_head=True) ax0.arrow(t[state_time], NA[state_time], 0, -NA[state_time], head_width=1.5, head_length=0.2, linewidth=1, color='black', length_includes_head=True) ax0.set_yticklabels([]) ax0.set_xticklabels([]) ax0.set_ylabel('Negative Affect (a.u.)', fontsize=setfontsize) ######################################################################################## # Second axes ax1_c = 'b' ax1 = fig.add_subplot(gs[2, 0]) ax1.plot(t, AU, color = ax1_c) ax1.hlines(y=0, xmin=0, xmax=50, linestyles='--', linewidth=2, color=ax1_c, alpha =0.5) ax1.vlines(x=vline_time, ymin=np.min(AU), ymax=np.max(AU), linestyles='--', linewidth=1, color='black', alpha =0.5) ax1.set_yticklabels([]) ax1.set_xticklabels([]) ax1.set_xlabel('Time (a.u.)', fontsize=setfontsize) ax1.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) ######################################################################################## # # third ax2 = fig.add_subplot(gs[:, 1]) ellipse2 = Ellipse((0, 0), 1, 3, angle=-45, alpha=0.9, facecolor = 'tan') ax2.add_artist(ellipse2) ax2.set_yticklabels([]) ax2.set_xticklabels([]) # ax2.set_aspect('equal') ax2.set_xlim(-2.2, 2.2) ax2.set_ylim(-2.2, 2.2) ax2.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax2.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) ax2.hlines(y=0, xmin=-2.2, xmax=2.2, linestyles='--', linewidth=2, color=ax1_c, alpha =0.2) ax2.vlines(x=0, ymin=-2.2, ymax=2.2, linestyles='--', linewidth=2, color=ax0_c, alpha =0.2) # add example point! point2 = Ellipse((0.7, 0.9), 0.1, 0.1, angle=0, alpha=0.9, facecolor = 'black') ax2.add_artist(point2) ######################################################################################## # fourth ax3 = fig.add_subplot(gs[:, 2]) ellipse3 = Ellipse((0, 0), 3, 4, angle=-45, alpha=dist_alpha[1], facecolor = dist_c[1]) ax3.add_artist(ellipse3) ax3.set_yticklabels([]) ax3.set_xticklabels([]) # ax3.set_aspect('equal') ax3.set_xlim(-2.2, 2.2) ax3.set_ylim(-2.2, 2.2) ax3.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax3.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) ax3.hlines(y=0, xmin=-2.2, xmax=2.2, linestyles='--', linewidth=2, color=ax1_c, alpha =0.2) ax3.vlines(x=0, ymin=-2.2, ymax=2.2, linestyles='--', linewidth=2, color=ax0_c, alpha =0.2) # add unattenuated plot ellipse2 = Ellipse((0, 0), 1, 3, angle=-45, alpha=0.9, facecolor = 'tan') ax3.add_artist(ellipse2) point2 = Ellipse((0.7, 0.9), 0.1, 0.1, angle=0, alpha=0.9, facecolor = 'black') ax3.add_artist(point2) # add error circle and actual observed point as x! ellipse2 = Ellipse((0.7, 0.9), 1.6, 1.6, angle=0, alpha=0.2, facecolor = 'lime') ax3.add_artist(ellipse2) ax3.scatter(0.2, 1.4, color = 'black', alpha = 1, marker = 'x', s = 150) ######################################################################################## plt.tight_layout(pad=5.0) plt.show() # - # + # Figure 2 t = np.arange(0,50,1) rng = np.random.RandomState(seed=0) limits = 1 N_subj = 3 time_courses = [] for iSubj in np.arange(N_subj*2): iTimeCourse = genTimeCourse(limits,rng,t) time_courses.append(iTimeCourse) # sample traits from correlated distribution! and add the trait offset to each time course! rng = np.random.RandomState(seed=0) bs_corr = 0.95 bs_var = 5 bs_cov = bs_corr*np.sqrt(bs_var*bs_var) bs_cov_mat = np.array([[bs_var,bs_cov], [bs_cov,bs_var]]) latent_data = rng.multivariate_normal((0,0),bs_cov_mat,N_subj) # rescale trait data for plotting! latent_data_zscored = stats.zscore(latent_data, axis=0) latent_data_zscored = latent_data_zscored/1.5 # create figure fig = plt.figure(figsize=(20,10)) gs = GridSpec(nrows=2, ncols=2, # width_ratios=[3, 1], # height_ratios=[3, 1,1], ) subj_colors = ['c','m','y'] setfontsize = 24 ######################################################################################## # First axes ax0 = fig.add_subplot(gs[0, 0]) for iSubj in np.arange(N_subj): ax0.plot(t,np.array(time_courses[iSubj])+latent_data[iSubj,0], alpha = 0.5, color=subj_colors[iSubj] ) ax0.hlines(y=latent_data[iSubj,0], xmin=0, xmax=50, linestyles='--', linewidth=2, alpha = 1, color=subj_colors[iSubj] ) ax0.set_yticklabels([]) ax0.set_xticklabels([]) ax0.set_ylabel('Negative Affect (a.u.)', fontsize=setfontsize) ######################################################################################## # second axes ax1 = fig.add_subplot(gs[1, 0]) for iSubj in np.arange(N_subj): ax1.plot(t,np.array(time_courses[iSubj+N_subj])+latent_data[iSubj,1], alpha = 0.5, color=subj_colors[iSubj] ) ax1.hlines(y=latent_data[iSubj,1], xmin=0, xmax=50, linestyles='--', linewidth=2, alpha = 1, color=subj_colors[iSubj] ) ax1.set_yticklabels([]) ax1.set_xticklabels([]) ax1.set_xlabel('Time (a.u.)', fontsize=setfontsize) ax1.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) ######################################################################################## # third axes ax2 = fig.add_subplot(gs[:, 1]) ellipse2 = Ellipse((0, 0), 2, 3, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax2.add_artist(ellipse2) ax2.set_yticklabels([]) ax2.set_xticklabels([]) ax2.set_xlim(-2.2, 2.2) ax2.set_ylim(-2.2, 2.2) ax2.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax2.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) for iSubj in np.arange(N_subj): ax2.scatter(latent_data_zscored[iSubj,0], latent_data_zscored[iSubj,1], alpha = 0.9, s = 150, color = subj_colors[iSubj] ) plt.tight_layout(pad=4) plt.show() # - # + # Figure 3 N_subj = 4 fig = plt.figure(figsize=(30,20)) gs = GridSpec(nrows=2, ncols=3) setfontsize = 26 point_size = 150 ######################################################################################## # first axes ax0 = fig.add_subplot(gs[0, 0]) ellipse2 = Ellipse((0, 0), 2, 3, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax0.add_artist(ellipse2) ax0.set_yticklabels([]) ax0.set_xticklabels([]) # ax2.set_aspect('equal') ax0.set_xlim(-2.2, 2.2) ax0.set_ylim(-2.2, 2.2) ax0.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax0.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) latent0 = np.array([[(3/2)/np.sqrt(2), (3/2)/np.sqrt(2)], [-(3/2)/np.sqrt(2), -(3/2)/np.sqrt(2)], [(2/2)/np.sqrt(2), -(2/2)/np.sqrt(2)], [-(2/2)/np.sqrt(2), (2/2)/np.sqrt(2)]]) for iSubj in np.arange(N_subj): ax0.scatter(latent0[iSubj,0], latent0[iSubj,1], alpha = 0.9, s = point_size, color = 'black' ) ######################################################################################## # second axes ax1 = fig.add_subplot(gs[0, 1]) ellipse2 = Ellipse((0, 0), 2, 3, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax1.add_artist(ellipse2) ax1.set_yticklabels([]) ax1.set_xticklabels([]) # ax2.set_aspect('equal') ax1.set_xlim(-2.2, 2.2) ax1.set_ylim(-2.2, 2.2) ax1.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax1.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) for iSubj in np.arange(N_subj): # within x and y centers w_x = latent0[iSubj,0] w_y = latent0[iSubj,1] # add center point! ax1.scatter(w_x, w_y, alpha = 0.9, # marker = 'x', s = point_size, color = 'black' ) # add within-subject distribution! ellipse_temp1 = Ellipse((w_x, w_y), 1, 2, angle=-45, alpha=dist_alpha[1], facecolor = dist_c[1]) ax1.add_artist(ellipse_temp1) # ignore the measurement error and plot actual measurment! w_measures = np.array([[0.3, -0.1], [-0.4, -0.4], [0.3, 0.6], [-0.1,0.4]]) ax1.scatter(w_x+w_measures[iSubj,0], w_y+ w_measures[iSubj,1], color = 'black', alpha = 1, marker = 'x', s = point_size) ######################################################################################## # third axes ax2 = fig.add_subplot(gs[0, 2]) ellipse2 = Ellipse((0, 0), 2, 3, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax2.add_artist(ellipse2) ax2.set_yticklabels([]) ax2.set_xticklabels([]) # ax2.set_aspect('equal') ax2.set_xlim(-2.2, 2.2) ax2.set_ylim(-2.2, 2.2) ax2.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax2.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) for iSubj in np.arange(N_subj): # within x and y centers w_x = latent0[iSubj,0] w_y = latent0[iSubj,1] # add center point! ax2.scatter(w_x, w_y, alpha = 0.9, # marker = 'x', s = point_size, color = 'black' ) # add within-subject distribution! ellipse_temp1 = Ellipse((w_x, w_y), 1, 2, angle=-45, alpha=dist_alpha[1], facecolor = dist_c[1]) ax2.add_artist(ellipse_temp1) # ignore the measurement error and plot actual measurment! w_measures = np.array([[0.3, -0.1], [-0.4, -0.4], [0.3, 0.6], [-0.1,0.4]]) ax2.scatter(w_x+w_measures[iSubj,0], w_y+ w_measures[iSubj,1], color = 'black', alpha = 1, marker = 'x', s = point_size) # add cross-sectional distribution! ellipse2 = Ellipse((0, 0), 3, 5, angle=-45, alpha=dist_alpha[2], facecolor = dist_c[2]) ax2.add_artist(ellipse2) ######################################################################################## # fourth axes trait_h = 2 trait_w = 4 ax3 = fig.add_subplot(gs[1, 0]) ellipse2 = Ellipse((0, 0), trait_h, trait_w, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax3.add_artist(ellipse2) latent0 = np.array([[(trait_w/2)/np.sqrt(2), (trait_w/2)/np.sqrt(2)], [-(trait_w/2)/np.sqrt(2), -(trait_w/2)/np.sqrt(2)], [(trait_h /2)/np.sqrt(2), -(trait_h /2)/np.sqrt(2)], [-(trait_h /2)/np.sqrt(2), (trait_h /2)/np.sqrt(2)]]) ax3.set_yticklabels([]) ax3.set_xticklabels([]) # ax2.set_aspect('equal') ax3.set_xlim(-2.2, 2.2) ax3.set_ylim(-2.2, 2.2) ax3.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax3.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) for iSubj in np.arange(N_subj): # within x and y centers w_x = latent0[iSubj,0] w_y = latent0[iSubj,1] # add center point! ax3.scatter(w_x, w_y, alpha = 0.9, s = point_size, color = 'black' ) # add within-subject distribution! ellipse_temp1 = Ellipse((w_x, w_y), 1, 1, angle=-45, alpha=dist_alpha[1], facecolor = dist_c[1]) ax3.add_artist(ellipse_temp1) # add cross-sectional distribution! ellipse2 = Ellipse((0, 0), 3, 5, angle=-45, alpha=dist_alpha[2], facecolor = dist_c[2]) ax3.add_artist(ellipse2) ######################################################################################## ######################################################################################## ######################################################################################## ######################################################################################## ######################################################################################## # fifth axes trait_h = 2 trait_w = 2 ax4 = fig.add_subplot(gs[1, 1]) ellipse2 = Ellipse((0, 0), trait_h, trait_w, angle=-45, alpha=dist_alpha[0], facecolor = dist_c[0]) ax4.add_artist(ellipse2) latent0 = np.array([[(trait_w/2)/np.sqrt(2), (trait_w/2)/np.sqrt(2)], [-(trait_w/2)/np.sqrt(2), -(trait_w/2)/np.sqrt(2)], [(trait_h /2)/np.sqrt(2), -(trait_h /2)/np.sqrt(2)], [-(trait_h /2)/np.sqrt(2), (trait_h /2)/np.sqrt(2)]]) ax4.set_yticklabels([]) ax4.set_xticklabels([]) # ax2.set_aspect('equal') ax4.set_xlim(-2.2, 2.2) ax4.set_ylim(-2.2, 2.2) ax4.set_xlabel('Negative Affect (a.u.)', fontsize=setfontsize) ax4.set_ylabel('Alcohol Use (a.u.)', fontsize=setfontsize) for iSubj in np.arange(N_subj): # within x and y centers w_x = latent0[iSubj,0] w_y = latent0[iSubj,1] # add center point! ax4.scatter(w_x, w_y, alpha = 0.9, s = point_size, color = 'black' ) # add within-subject distribution! ellipse_temp1 = Ellipse((w_x, w_y), 1, 3, angle=-45, alpha=dist_alpha[1], facecolor = dist_c[1]) ax4.add_artist(ellipse_temp1) # add cross-sectional distribution! ellipse2 = Ellipse((0, 0), 3, 5, angle=-45, alpha=dist_alpha[2], facecolor = dist_c[2]) ax4.add_artist(ellipse2) plt.tight_layout(pad=5) plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + is_male = True if is_male: print("You are a Male") # + is_male = False if is_male: print("You are a Male") # + is_male = True if is_male: print("You are a Male") else: print("you are not a man") # + is_male = True is_tall = True if is_male or is_tall: print("You are a Male or tall or both") else: print("you are nither a man nor tall") # + is_male = False is_tall = True if is_male or is_tall: print("You are a Male or tall or both") else: print("you are nither a man nor tall") # - is_male = False is_tall = False if is_male or is_tall: print("You are a Male or tall or both") else: print("you are nither a man nor tall") # + is_male = False is_tall = True if is_male and is_tall: print("You are a tall Male ") else: print("you are either not male nor tall or both") # + is_male = False is_tall = False if is_male and is_tall: print("You are a tall Male ") else: print("you are either not male nor tall or both") # + is_male = True is_tall = True if is_male and is_tall: print("You are a tall Male ") else: print("you are either not male nor tall or both") # + is_male =True is_tall = True if is_male and is_tall: print("You are a tall Male ") elif is_male and not(is_tall): print("You Are a short man") elif not(is_male) and is_tall: print("You are a tall women or other") else: print("you are either not male nor tall or both") # + is_male = False is_tall = True if is_male and is_tall: print("You are a tall Male ") elif is_male and not(is_tall): print("You Are a short man") elif not(is_male) and is_tall: print("You are a tall women or other") else: print("you are either not male nor tall or both") # + is_male = True is_tall = False if is_male and is_tall: print("You are a tall Male ") elif is_male and not(is_tall): print("You Are a short man") elif not(is_male) and is_tall: print("You are a tall women or other") else: print("you are either not male nor tall or both") # + is_male = False is_tall = False if is_male and is_tall: print("You are a tall Male ") elif is_male and not(is_tall): print("You Are a short man") elif not(is_male) and is_tall: print("You are a tall women or other") else: print("you are either not male nor tall or both") # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Psych 81.09 # language: python # name: psych81.09 # --- # + #Imported relevant and necessary libraries and data cleaning tools import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import hypertools as hyp import plotly_express as px #plotly express is a library for drawing interactive figures from glob import glob as lsdir import os import re import datetime as dt from sklearn import linear_model from sklearn.neural_network import MLPRegressor from sklearn.model_selection import train_test_split # %matplotlib inline # + #Code from Professor Manning to set up and read in the relevant UVLT data data_readers = {'xlsx': pd.read_excel, 'xls': pd.read_excel, 'dta': pd.read_stata} get_extension = lambda x: x.split('.')[-1] def read_data(datadir, readers): files = lsdir(os.path.join('..', datadir, '*')) readable_files = [] data = [] for f in files: ext = get_extension(f) if ext in readers.keys(): readable_files.append(f) data.append(data_readers[ext](f)) return readable_files, data fnames, data = read_data('data', data_readers) # - # #### I changed U_Tot_Amt to 'TotalUnrestrictedDonations' to make it easier to work with later on # + #Renaming relevant columns in UVLT individual data to be more easily readable names={'DeceasedDateYN' : 'Is the donor Deceased?', 'U_Tot_Amt': 'TotalUnrestrictedDonations', 'U_Tot_Cnt': 'Total # Unrestricted Donations', 'ConservedOwner' : 'Owns Conserved Land?', 'RTotAmt' : 'Total Restricted Donations', 'RTotCnt': 'Total # Restricted Donations', 'VTotCnt' : 'Total Volunteer Occurances', 'ETotCnt' : 'Total Event Attendances'} data[1].rename(names, inplace=True, axis=1) #copying each set of data into more memorably named versions final_data=data[1].copy() # - # ### Multi-dimensional view of the data # # Here, I used hypertools to plot the data, colored by Town (rows where unrestricted donations were null or 0 were removed). This functions takes multi-dimensional data, plotting it based on the three most important factors. Using this visualization early in the analysis supports the hypothesis that Town is a significant factor on donations. We see this in the way colors group together whereas if Town was insignificant, we would expect to see a random scatter of colors/Town. # + # remove rows where total unrestricted donations is null or zero udonations = final_data[pd.notnull(final_data['TotalUnrestrictedDonations'])] udonations = udonations[udonations.TotalUnrestrictedDonations != 0] # plot 3d view of data, colored by towns plot = hyp.plot(udonations, '.', hue=udonations.Town) # - # #### I added "TotUdonations" which is the total donations for a town # + #creates new DataFrame of average unrestricted donations -- this will be used to store all averages avg_data = final_data.groupby('Town', as_index=False)['TotalUnrestrictedDonations'].mean() #add columns with average town data to the new DataFrame avg_data['Avg # UR Donations'] = final_data.groupby('Town', as_index=True)['Total # Unrestricted Donations'].mean().values avg_data['TotUdonations'] = final_data.groupby('Town', as_index=True)['TotalUnrestrictedDonations'].sum().values avg_data['Avg R Donations'] = final_data.groupby('Town', as_index=True)['Total Restricted Donations'].mean().values avg_data['Avg # R Donations'] = final_data.groupby('Town', as_index=True)['Total # Restricted Donations'].mean().values avg_data['Avg Volunteer Occurances'] = final_data.groupby('Town', as_index=True)['Total Volunteer Occurances'].mean().values avg_data['Avg Event Attendances'] = final_data.groupby('Town', as_index=True)['Total Event Attendances'].mean().values avg_data.rename({'TotalUnrestrictedDonations' : 'Avg UR Donations'}, inplace=True, axis = 1) avg_data.drop(avg_data.index[0], inplace=True) zipcode_data=data[3].copy() town_data=data[4].copy() #merge town demographics to main averages DataFrame avg_data = pd.merge(avg_data, town_data, on="Town") avg_data.drop(['TownID'], axis=1) avg_data.head() # - # ### Creating deciles in the data # # I created deciles in the data based on the total amount of unrestricted donations. By using total as opposed to average, we can later determine which towns provide the most significant funding. Average donations are a useful view as well, but total donations in a town may differ significantly from average donations due to outliers (ie. one or two highly significant donors) # + # remove rows where total unrestricted donations is null or zero avg_data = avg_data[pd.notnull(avg_data['TotUdonations'])] avg_data = avg_data[avg_data.TotUdonations != 0] # inspired by http://www.datasciencemadesimple.com/quantile-decile-rank-column-pandas-python-2/ # create quintiles based on unrestricted donations avg_data['decile_ud']=pd.qcut(avg_data['TotUdonations'],10,labels=False) # create stacked bar plot pivot_avg_data = avg_data.pivot(index='decile_ud', columns='Town', values='TotUdonations') pivot_avg_data #inspired by https://pstblog.com/2016/10/04/stacked-charts # - # ### Stacked bar plot based on deciles # # By viewing a stacked bar plot of towns and the deciles in which we belong, it is clearly apparent that the majority of donations stems from the top decile, while very small amounts of donations come from towns in lower deciles. It's important to keep in mind that the comparable size of the town will play a role in the total amount of donations, and it is also possible that marketing and events are focused in some towns more than others. # # That being said, by understanding that the majority of donations come from only a few towns, further questions can explored as to whether marketing is warranted or should be increased in smaller towns. It could be the case that costs of soliciting donations in smaller towns outweighs donations that come from those towns, or it could be the case that there exists opportunity in doubling down efforts to smaller towns. # + # stacked bar plot pivot_avg_data.plot.bar(stacked=True, figsize=(10,10)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('ggplot') plt.style.available # + import os csv = dict() def register(project_name): csv_name = project_name + '_output.txt' csv_path = os.path.join(os.path.expanduser('~'), 'Dropbox','shared','Python scripts', 'LazyUT_TEST', csv_name) csv[project_name] = pd.read_csv(csv_path) projects = ['Abseil'] for p in projects: register(p) # + class ModeData: def __init__(self, mode): self.mode = mode self.v_num_test = [] self.v_preprocessor_time = [] self.v_lazyut_time = [] self.v_testing_time = [] self.v_cum_testing_time = [] def size(self): return len(self.v_testing_time) def name(self): if self.mode == 'test-affected': return 'LazyUT' if self.mode == 'test-all': return 'RetestAll' return 'unknown' def make_mode_data(csv, mode): mode_data = ModeData(mode) csv_commits = csv.loc[csv['MODE'] == mode].groupby('COMMIT') cum_time = 0 for commit, data in csv_commits: test_time = data.TEST.mean() if mode == 'test-affected': test_time += data.ANALYSIS.mean() cum_time += test_time mode_data.v_testing_time.append(test_time) mode_data.v_cum_testing_time.append(cum_time) mode_data.v_num_test.append(data.FILES.max()) mode_data.v_preprocessor_time.append(data.PREPROCESSOR.mean()) mode_data.v_lazyut_time.append(data.ANALYSIS.mean()) return mode_data def listToLatexCoord(lst): result_str = '' for i, val in enumerate(lst): result_str += '({}, {}) '.format(i + 1, val) return result_str def getVar(var_name, data, project_name, modeName = "", method = listToLatexCoord): return {'var{}{}{}'.format(project_name, modeName, var_name) : '{' + str(method(data)) + '}'} def getTicks(size, project_name): def listWithoutBrackets(lst): result_str = '' for val in lst: if result_str: result_str += ', ' result_str += str(val) return result_str return getVar('Ticks', range(1, size + 1), project_name, method=listWithoutBrackets) def makeLatexVariables(v_mode_data, project_name): variables = dict() variables.update(getTicks(v_mode_data[0].size(), project_name)) max_accum = max(v_mode_data[1].v_cum_testing_time) max_lazyut_accum = max(v_mode_data[0].v_cum_testing_time) variables['efficiency'] = round(max_lazyut_accum / max_accum, 2) for mode_data in v_mode_data: variables.update(getVar('Test', mode_data.v_testing_time, project_name, mode_data.name())) variables.update(getVar('TestAccum', [x / max_accum for x in mode_data.v_cum_testing_time], project_name, mode_data.name())) if mode_data.name() == 'LazyUT': variables.update(getVar('Analysis', mode_data.v_lazyut_time, project_name, mode_data.name())) else: variables.update(getVar('Preprocessor', mode_data.v_preprocessor_time, project_name, mode_data.name())) variables.update(getVar('NumTest', mode_data.v_num_test, project_name, mode_data.name())) return variables def printLatex(v_mode_data, project_name): variables = makeLatexVariables(v_mode_data, '') variables.update(dict(lazyut_color = 'Turquoise', retestall_color = 'Salmon', analysis_color = 'Gray', preprocessor_color = 'Fuchsia', project_name=project_name)) variables['xmax'] = v_mode_data[0].size() + 0.5 text_form = ''' \\begin{{tikzpicture}} \\begin{{groupplot}}[ group style={{ group name=my plots, group size=1 by 3, xlabels at=edge bottom, xticklabels at=edge bottom, ylabels at=edge left, yticklabels at=edge left, vertical sep=1.5cm}}, footnotesize, width=\\textwidth, height=5cm, xlabel=Порядковый номер версии, xmin=0.5, xmax={xmax}, xtick/.expanded={varTicks}, tickpos=left, ytick align=outside, xtick align=outside] \\nextgroupplot[title={{Время тестирования}}] \\addplot [very thick, {lazyut_color}] coordinates{varLazyUTTest};\label{{plots:testLazy{project_name}}} \\addplot [very thick, {retestall_color}] coordinates{varRetestAllTest};\label{{plots:testAll{project_name}}} \\addplot [very thick, {analysis_color}] coordinates{varLazyUTAnalysis};\label{{plots:analysis{project_name}}} \\addplot [very thick, {preprocessor_color}] coordinates{varRetestAllPreprocessor};\label{{plots:preprocessor{project_name}}} \\coordinate (top) at (rel axis cs:0,1);% coordinate at top of the first plot \\nextgroupplot[title={{Число тестовых файлов}}] \\addplot [very thick, {lazyut_color}]coordinates{varLazyUTNumTest}; \\addplot [very thick, {retestall_color}]coordinates{varRetestAllNumTest}; \\nextgroupplot[title={{Совокупное время тестирования}}] \\addplot [very thick, {lazyut_color}]coordinates{varLazyUTTestAccum}; \\addplot [very thick, {retestall_color}]coordinates{varRetestAllTestAccum}; \\coordinate (bot) at (rel axis cs:1,0);% coordinate at bottom of the last plot \\coordinate (A) at (axis cs: 0 , {efficiency}); \\coordinate (O1) at (rel axis cs:0,0); \\coordinate (O2) at (rel axis cs:1,0); \\draw [RoyalBlue,dashed] (A -| O1) node [xshift=1.5cm, above] {{ускорение {efficiency} \\%}} -- (A -| O2); \\end{{groupplot}} % legend \\path (top|-current bounding box.north)-- coordinate(legendpos) (bot|-current bounding box.north); \\matrix[ matrix of nodes, anchor=south, draw, inner sep=0.2em, draw ]at([yshift=1ex]legendpos) {{ \\ref{{plots:testLazy{project_name}}}& LazyUT&[5pt] \\ref{{plots:analysis{project_name}}}& Время анализа&[5pt] \\\\ \\ref{{plots:testAll{project_name}}}& Полное тестирование&[5pt] \\ref{{plots:preprocessor{project_name}}}& Время препроцессинга \\\\}}; \\end{{tikzpicture}}''' print(text_form.format(**variables)) def make_plots(project): csv_proj = csv[project] def modes_data(csv, modes): for mode_str in modes: yield make_mode_data(csv, mode_str) plt.figure(figsize=(17,8)) affected_mode_data = make_mode_data(csv_proj, 'test-affected') all_mode_data = make_mode_data(csv_proj, 'test-all') v_mode_data = [affected_mode_data, all_mode_data] printLatex(v_mode_data, project) return ax1 = plt.subplot(3, 1, 1) ax1.set_title('Время тестирования') for mode_data in v_mode_data: plt.plot(range(0, mode_data.size()), mode_data.v_testing_time, label=mode_data.name()) plt.setp(ax1.get_xticklabels(), visible=False) plt.plot(all_mode_data.v_preprocessor_time, label='Время препроцессинга') plt.plot(affected_mode_data.v_lazyut_time, label='Время анализа') plt.legend(loc='upper center', bbox_to_anchor=(0.85, 1.55), prop={'size': 16}, ncol=2, fancybox=True, shadow=True) ax2 = plt.subplot(3, 1, 2, sharex = ax1) ax2.set_title('Совокупное время тестирования') for mode_data in v_mode_data: plt.plot(mode_data.v_cum_testing_time, label=mode_data.mode) plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(3, 1, 3, sharex = ax1) ax3.set_title('Число тестовых файлов') for mode_data in v_mode_data: plt.plot(mode_data.v_num_test, label=mode_data.mode) plt.setp(ax3.get_xticklabels(), fontsize=10) plt.show() # number of test files # testing time # cumulative testing time for p in projects: print('---- PROJECT {} ----'.format(p)) make_plots(p) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os os.environ['VARIATION_NORM_EB_PROD'] = 'true' import itertools from variation.mane_transcript import MANETranscript from variation.tokenizers.caches import AminoAcidCache from variation.data_sources import SeqRepoAccess, TranscriptMappings, MANETranscriptMappings, UTA # - transcript_mappings = TranscriptMappings() amino_acid_cache = AminoAcidCache() seqrepo = SeqRepoAccess() mane_transcript_mappings = MANETranscriptMappings() uta = UTA(db_pwd='') mane_transcript = MANETranscript(seqrepo, transcript_mappings, mane_transcript_mappings, uta) # # BRAF V600E # ## p -> MANE p # ### RefSeq mane_transcript.get_mane_transcript('NP_004324.2', 600, None, 'p', 'V', normalize_endpoint=True) # #### Other accessions that should work mane_transcript.get_mane_transcript('NP_001365401.1', 548, None, 'p', 'V') mane_transcript.get_mane_transcript('NP_001365402.1', 548, None, 'p', 'V') mane_transcript.get_mane_transcript('NP_001365400.1', 563, None, 'p', 'V') # ### Ensembl mane_transcript.get_mane_transcript('ENSP00000288602.6', 600, None, 'p', 'V', normalize_endpoint=True) # ## c -> MANE c # ### RefSeq # #### Most recent accession version mane_transcript.get_mane_transcript('NM_004333.6', 1799, None, 'c', 'T', normalize_endpoint=True) # #### Other accessions that should point to the same mane_transcript.get_mane_transcript('NM_001378471.1', 1688, None, 'c', 'T', normalize_endpoint=True) mane_transcript.get_mane_transcript('NM_001378472.1', 1643, None, 'c', 'T') mane_transcript.get_mane_transcript('NM_001378475.1', 1535, None, 'c', 'T') # #### Older accession versions mane_transcript.get_mane_transcript('NM_004333.5', 1799, None, 'c', 'T', normalize_endpoint=True) mane_transcript.get_mane_transcript('NM_004333.4', 1799, None, 'c', 'T', normalize_endpoint=True) # ### Ensembl mane_transcript.get_mane_transcript('ENST00000288602.11', 1799, None, 'c', 'T', normalize_endpoint=True) mane_transcript.get_mane_transcript('ENST00000288602.11', 1799, None, 'c', 'T', normalize_endpoint=False) # ## g -> MANE c mane_transcript.get_mane_transcript('NC_000007.14', 140753336, None, 'g', normalize_endpoint=True) mane_transcript.get_mane_transcript('NC_000007.13', 140453136, None, 'g', normalize_endpoint=True) # # EGFR L858R # ## p -> MANE p # ### RefSeq mane_transcript.get_mane_transcript('NP_005219.2', 858, None, 'p', 'L', normalize_endpoint=True) # ### Ensembl mane_transcript.get_mane_transcript('ENSP00000275493.2', 858, None, 'p', 'L', normalize_endpoint=True) # ## c -> MANE c # ### RefSeq # #### Most recent accession version mane_transcript.get_mane_transcript('NM_005228.5', 2573, None, 'c', 'T', normalize_endpoint=True) # #### Older accession versions mane_transcript.get_mane_transcript('NM_005228.4', 2573, None, 'c', 'T', normalize_endpoint=True) # ### Ensembl mane_transcript.get_mane_transcript('ENST00000275493.7', 2573, None, 'c', 'T', normalize_endpoint=True) # ## g -> MANE c mane_transcript.get_mane_transcript('NC_000007.13', 55259515, None, 'g', normalize_endpoint=True) # # More examples # https://civicdb.org/events/genes/4/summary/variants/2/summary#variant # https://reg.genome.network/allele?hgvs=NM_007313.2:c.1001C%3ET mane_transcript.get_mane_transcript('NP_005148.2', 315, 315, 'p', normalize_endpoint=True) # https://civicdb.org/events/genes/4/summary/variants/2/summary#variant # https://reg.genome.network/allele?hgvs=NM_007313.2:c.1001C%3ET mane_transcript.get_mane_transcript('NM_007313.2', 1001, None, 'c', normalize_endpoint=True) # https://civicdb.org/events/genes/19/summary/variants/34/summary#variant # https://reg.genome.network/allele?hgvs=NM_005228.4:c.2369C%3ET mane_transcript.get_mane_transcript('NP_005219.2', 790, None, 'p', normalize_endpoint=True) # https://civicdb.org/events/genes/19/summary/variants/34/summary#variant # https://reg.genome.network/allele?hgvs=NM_005228.4:c.2369C%3ET mane_transcript.get_mane_transcript('NM_005228.5', 2369, None, 'c', normalize_endpoint=True) # https://civicdb.org/events/genes/30/summary/variants/79/summary#variant # https://reg.genome.network/allele?hgvs=NM_004985.4:c.35G%3EA mane_transcript.get_mane_transcript('NP_004976.2', 12, None, 'p', normalize_endpoint=True) # https://civicdb.org/events/genes/30/summary/variants/79/summary#variant # https://reg.genome.network/allele?hgvs=NM_004985.4:c.35G%3EA mane_transcript.get_mane_transcript('NM_004985.5', 35, None, 'c', normalize_endpoint=True) # https://reg.genome.network/allele?hgvs=NM_004448.4:c.2262_2276del mane_transcript.get_mane_transcript('NP_004439.2', 755, 759, 'p', normalize_endpoint=True) # https://reg.genome.network/allele?hgvs=NM_004448.4:c.2262_2276del mane_transcript.get_mane_transcript('NM_004448.4', 2262, 2276, 'c', normalize_endpoint=True) # # Scratch mane_transcript.get_mane_transcript('NP_004976.2', 12, None, 'p') mane_transcript.get_mane_transcript('ENST00000288602.7', 1799, None, 'p') # # Longest compatible transcript mane_transcript.get_longest_compatible_transcript('BRAF', 600, None, 'p') mane_transcript.get_longest_compatible_transcript('BRAF', 1799, None, 'c') # # Scratch def get_seq(ac, cds, start, end): return seqrepo.seq_repo_client.fetch(ac)[cds + start - 1: cds + end] mane = 'NM_004448.4' mane_cds = uta.get_coding_start_site(mane) mane_cds q = 'NM_004448.3' q_cds = uta.get_coding_start_site(q) q_cds # Original query q_start, q_end = 2263, 2277 q_seq = get_seq(q, q_cds, q_start, q_end) q_seq # Variation Normalization v_start, v_end = 2263, 2277 v_seq = get_seq(mane, mane_cds, v_start, v_end) v_seq # ClinGen Allele Registry API cgar_api_start, cgar_api_end = 2264, 2278 cgar_api_seq = get_seq(mane, mane_cds, cgar_api_start, cgar_api_end) cgar_api_seq q_seq == v_seq q_seq == cgar_api_seq # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 1.469154, "end_time": "2021-03-09T05:22:23.480073", "exception": false, "start_time": "2021-03-09T05:22:22.010919", "status": "completed"} tags=[] # Import the libraries import pandas as pd import matplotlib as plt from sklearn.cluster import MeanShift # + papermill={"duration": 0.061064, "end_time": "2021-03-09T05:22:23.549743", "exception": false, "start_time": "2021-03-09T05:22:23.488679", "status": "completed"} tags=[] # Load and show the dataset df_candy = pd.read_csv('../input/the-ultimate-halloween-candy-power-ranking/candy-data.csv') df_candy.head() # + papermill={"duration": 0.017456, "end_time": "2021-03-09T05:22:23.576085", "exception": false, "start_time": "2021-03-09T05:22:23.558629", "status": "completed"} tags=[] # We gonna drop the string colum X = df_candy.drop('competitorname', axis=1) # + papermill={"duration": 0.284463, "end_time": "2021-03-09T05:22:23.869369", "exception": false, "start_time": "2021-03-09T05:22:23.584906", "status": "completed"} tags=[] # We gonna fit the model meanshift = MeanShift().fit(X) # + papermill={"duration": 0.018019, "end_time": "2021-03-09T05:22:23.896507", "exception": false, "start_time": "2021-03-09T05:22:23.878488", "status": "completed"} tags=[] # The algoritm return the nums of clusters max(meanshift.labels_) # + papermill={"duration": 0.018113, "end_time": "2021-03-09T05:22:23.924069", "exception": false, "start_time": "2021-03-09T05:22:23.905956", "status": "completed"} tags=[] # We gonna show the location of the centers that you put on our data meanshift.cluster_centers_ # + papermill={"duration": 0.03061, "end_time": "2021-03-09T05:22:23.964628", "exception": false, "start_time": "2021-03-09T05:22:23.934018", "status": "completed"} tags=[] # And like the last one, we gonna add the location of centens in the original dataset df_candy['meanshift'] = meanshift.labels_ df_candy.head() # + papermill={"duration": 0.01904, "end_time": "2021-03-09T05:22:23.994855", "exception": false, "start_time": "2021-03-09T05:22:23.975815", "status": "completed"} tags=[] # Import MiniBatchKMeans from sklearn.cluster import MiniBatchKMeans # + papermill={"duration": 0.092931, "end_time": "2021-03-09T05:22:24.098587", "exception": false, "start_time": "2021-03-09T05:22:24.005656", "status": "completed"} tags=[] # We group in 4, by groups of 8 kmeans = MiniBatchKMeans(n_clusters=4, batch_size=8).fit(X) # + papermill={"duration": 0.020881, "end_time": "2021-03-09T05:22:24.130890", "exception": false, "start_time": "2021-03-09T05:22:24.110009", "status": "completed"} tags=[] # We show the total of centers len(kmeans.cluster_centers_) # + papermill={"duration": 0.026933, "end_time": "2021-03-09T05:22:24.169248", "exception": false, "start_time": "2021-03-09T05:22:24.142315", "status": "completed"} tags=[] # Return an array with every category of candies kmeans.predict(X) # + papermill={"duration": 0.046529, "end_time": "2021-03-09T05:22:24.227829", "exception": false, "start_time": "2021-03-09T05:22:24.181300", "status": "completed"} tags=[] # Adding the groups results in the original dataset df_candy['MiniBatchKMeans'] = kmeans.predict(X) df_candy # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import altair as alt from altair import datum import ipywidgets as widgets from ipywidgets import interact, interact_manual from IPython.display import HTML, Markdown as md import datetime as dt from src.data.read_dataset import get_processed_dataset from src.visualization import visualize as viz italy = get_processed_dataset('dpc_nazionale') df = get_processed_dataset("austria_country") austria = df[df.BundeslandID == 10].copy() # + c1 = alt.Chart(italy).mark_line().encode( x=alt.X("data", title='Date'), y=alt.Y("incidenza", title="Incidence") ).properties(height=200, width=600,) c2 = alt.Chart(austria).mark_line(color="#FFAA00").encode( x=alt.X("Date:T"), y=alt.Y("Incidence", title="Incidence") ).properties(height=200, width=600,) alt.layer( c1, c2 ).resolve_scale(color='independent') # + import altair as alt import pandas as pd from altair import datum from pyprojroot import here from src.data.read_dataset import get_processed_dataset from src.visualization import visualize as viz df = get_processed_dataset("austria_gkz") import geopandas as gpd import matplotlib.pyplot as plt austria = gpd.read_file('../../data/raw/bezirke_999_geo.json').to_crs("EPSG:32633") austria['iso'] = austria.iso.astype(int) merged_aut = pd.merge(austria, df, left_on='iso', right_on='GKZ') prov_df = get_processed_dataset('dpc_province') prov = prov_df.groupby('provincia').nth(-1).reset_index() start_date = prov_df.data.max() - pd.Timedelta(days=7) last_week = prov_df[prov_df.data > start_date] last_grouped = last_week.groupby(['provincia', 'codice_provincia', 'codice_regione', 'regione']).mean().reset_index() italy = gpd.read_file('../../data/raw/prov2019.geojson') merged_ita = pd.merge(italy, last_grouped, left_on="COD_PROV", right_on="codice_provincia").rename(columns={"nuovi_positivi_per_1M_pop": "Incidence"}) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # %matplotlib inline import numpy as np import pylab as plt import logging # The set of pixel configurations that will be examined IMAGES = [ [[0,1,0],[1,1,1],[0,1,0]], [[1,0,0],[1,1,1],[1,0,0]], [[1,1,0],[0,1,1],[0,0,1]], [[0,1,1],[1,1,0],[1,0,0]], [[1,1,0],[1,1,1],[0,0,0]], [[1,1,0],[1,1,0],[0,1,0]], ] def theta_image(im): """ Derive THETA_IMAGE based on the SExtractor manual (Sec. 10.1)""" x,y = np.array(np.where(im),dtype=float)-1 logging.info('x = %s; y = %s'%(x,y)) xbar = np.sum(x)/np.sum(im) ybar = np.sum(y)/np.sum(im) logging.info('xbar = %s; ybar = %s'%(xbar,ybar)) x2bar = np.sum(x**2)/np.sum(im) - xbar**2 y2bar = np.sum(y**2)/np.sum(im) - ybar**2 logging.info('x2bar = %s; y2bar = %s'%(x2bar,y2bar)) xybar = np.sum(x*y)/np.sum(im) - xbar*ybar logging.info('xybar = %s'%xybar); theta = np.degrees(np.arctan2(2*xybar,(x2bar - y2bar))/2) return theta # + # Set logging.INFO to print info logging.getLogger().setLevel(logging.WARN) fig,axes = plt.subplots(len(IMAGES)//2 + len(IMAGES)%2,2,figsize=(10,16)) plt.subplots_adjust(hspace=0.1) for j,im in enumerate(IMAGES): ax = axes[j//2,j%2] plt.sca(ax) im = np.array(im) theta = theta_image(im) ax.pcolormesh(im,edgecolor='gray',cmap='gray',vmax=1.2) ax.set_aspect('equal') ax.set_xticks([0.5,1.5,2.5]); ax.set_xticklabels([-1,0,1]) ax.set_yticks([0.5,1.5,2.5]); ax.set_yticklabels([-1,0,1]) plt.title("theta_image = %.1f deg"%theta,fontsize=14) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # %config InlineBackend.figure_format = "retina" from __future__ import print_function from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20 # - # # Quickstart # # This notebook was made with the following version of emcee: import emcee emcee.__version__ # The easiest way to get started with using emcee is to use it for a project. To get you started, here’s an annotated, fully-functional example that demonstrates a standard usage pattern. # # ## How to sample a multi-dimensional Gaussian # # We’re going to demonstrate how you might draw samples from the multivariate Gaussian density given by: # # $$ # p(\vec{x}) \propto \exp \left [ - \frac{1}{2} (\vec{x} - # \vec{\mu})^\mathrm{T} \, \Sigma ^{-1} \, (\vec{x} - \vec{\mu}) # \right ] # $$ # # where $\vec{\mu}$ is an $N$-dimensional vector position of the mean of the density and $\Sigma$ is the square N-by-N covariance matrix. # # The first thing that we need to do is import the necessary modules: import numpy as np # Then, we’ll code up a Python function that returns the density $p(\vec{x})$ for specific values of $\vec{x}$, $\vec{\mu}$ and $\Sigma^{-1}$. In fact, emcee actually requires the logarithm of $p$. We’ll call it `log_prob`: def log_prob(x, mu, cov): diff = x - mu return -0.5*np.dot(diff, np.linalg.solve(cov,diff)) # It is important that the first argument of the probability function is # the position of a single "walker" (a *N* dimensional # `numpy` array). The following arguments are going to be constant every # time the function is called and the values come from the `args` parameter # of our :class:`EnsembleSampler` that we'll see soon. # # Now, we'll set up the specific values of those "hyperparameters" in 5 # dimensions: # + ndim = 5 np.random.seed(42) means = np.random.rand(ndim) cov = 0.5 - np.random.rand(ndim ** 2).reshape((ndim, ndim)) cov = np.triu(cov) cov += cov.T - np.diag(cov.diagonal()) cov = np.dot(cov,cov) # - # and where `cov` is $\Sigma$. # How about we use 32 walkers? Before we go on, we need to guess a starting point for each # of the 32 walkers. This position will be a 5-dimensional vector so the # initial guess should be a 32-by-5 array. # It's not a very good guess but we'll just guess a # random number between 0 and 1 for each component: nwalkers = 32 p0 = np.random.rand(nwalkers, ndim) # Now that we've gotten past all the bookkeeping stuff, we can move on to # the fun stuff. The main interface provided by `emcee` is the # :class:`EnsembleSampler` object so let's get ourselves one of those: sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, args=[means, cov]) # Remember how our function `log_prob` required two extra arguments when it # was called? By setting up our sampler with the `args` argument, we're # saying that the probability function should be called as: log_prob(p0[0], means, cov) # If we didn't provide any # `args` parameter, the calling sequence would be `log_prob(p0[0])` instead. # # It's generally a good idea to run a few "burn-in" steps in your MCMC # chain to let the walkers explore the parameter space a bit and get # settled into the maximum of the density. We'll run a burn-in of 100 # steps (yep, I just made that number up... it's hard to really know # how many steps of burn-in you'll need before you start) starting from # our initial guess ``p0``: state = sampler.run_mcmc(p0, 100) sampler.reset() # You'll notice that I saved the final position of the walkers (after the # 100 steps) to a variable called `pos`. You can check out what will be # contained in the other output variables by looking at the documentation for # the :func:`EnsembleSampler.run_mcmc` function. The call to the # :func:`EnsembleSampler.reset` method clears all of the important bookkeeping # parameters in the sampler so that we get a fresh start. It also clears the # current positions of the walkers so it's a good thing that we saved them # first. # # Now, we can do our production run of 10000 steps: sampler.run_mcmc(state, 10000); # The samples can be accessed using the :func:`EnsembleSampler.get_chain` method. # This will return an array # with the shape `(10000, 32, 5)` giving the parameter values for each walker # at each step in the chain. # Take note of that shape and make sure that you know where each of those numbers come from. # You can make histograms of these samples to get an estimate of the density that you were sampling: # + import matplotlib.pyplot as plt samples = sampler.get_chain(flat=True) plt.hist(samples[:, 0], 100, color="k", histtype="step") plt.xlabel(r"$\theta_1$") plt.ylabel(r"$p(\theta_1)$") plt.gca().set_yticks([]); # - # Another good test of whether or not the sampling went well is to check # the mean acceptance fraction of the ensemble using the # :func:`EnsembleSampler.acceptance_fraction` property: print("Mean acceptance fraction: {0:.3f}" .format(np.mean(sampler.acceptance_fraction))) # and the integrated autocorrelation time (see the :ref:`autocorr` tutorial for more details) print("Mean autocorrelation time: {0:.3f} steps" .format(np.mean(sampler.get_autocorr_time()))) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- t = ['azn', 'autists', 'like', 'stonks', 'bynd', 'oil', 'short', 'yolo', 'chinese', 'june', 'citron', 'ocgn', 'jartek', 'ama', 'crm', 'gme', 'crashes', 'need', 'happy', 'jobs', 'million', 'plug', 'bull', 'spy', 'two', 'whats', 'trump', 'make', 'stock', 'monday', 'vs', 'trevor', 'boeing', 'shit', 'baba', 'lets', 'atvi', 'skin', 'never', 'slv', 'dip', 'tendies', 'msft', 'gnc', 'rkt', 'biden', 'bezos', 'fortnite', 'logo', 'split', 'tlry', 'next', 'holding', 'fuck', 'zm', 'currency', 'mt', 'sndl', 'help', 'c', 'turn', 'spce', 'corona', 'china', 'mvis', 'battery', 'yolos', 'going', 'witching', 'pltr', 'today', 'milton', 'tomorrow', 'mnmd', 'squeeze', 'futures', 'loop', 'ev', 'prpl', 'amc', 'options', 'virus', 'yearold', 'airlines', 'balance', 'long', 'know', 'nio', 'coinbase', 'plan', 'war', 'way', 'calls', 'bb', 'market', 'account', 'got', 'kobe', 'amzn', 'goes', 'bulls', 'puts', 'right', 'shares', 'apes', 'tesla', 'pton', 'thanksgiving', 'confirmed', 'amd', 'questrade', 'wsbvotebot', 'work', 'fed', 'aal', 'strong', 'big', 'retards', 'acb', 'tsla', 'life', 'earnings', 'silver', 'kodak', 'corn', 'closed', 'log', 'papa', 'hearing', 'biggest', 'dogecoin', 'coin', 'put', 'bears', 'doge', 'anyone', 'uber', 'new', 'musk', 'get', 'riots', 'mlk', 'suicide', 'mascot', 'pi', 'unemployment', 'purple', 'cramer', 'gang', 'election', 'guys', 'nok', 'weed', 'sos', 'crude', 'portfolio', 'made', 'together', 'p', 'vale', 'sp', 'autist', 'stocks', 'trade', 'good', 'july', 'positive', 'questtrade', 'covid', 'trading', 'looking', 'snow', 'every', 'go', 'thoughts', 'adopted', 'gains', 'elon', 'fsly', 'first', 'january', 'day', 'apple', 'christmas', 'call', 'play', 'wsb', 'coronavirus', 'robinhood', 'started', 'etrade', 'buy', 'month', 'sell', 'one', 'tuesday', 'clov', 'hacked', 'worth', 'dearborn', 'debate', 'nikola', 'nvda', 'already', 'hold', 'deal', 'seeing', 'look', 'rh', 'moon', 'stimulus', 'tiktok', 'future', 'merry', 'snap', 'years', 'sprint', 'money', 'test', 'please', 'april', 'nkla', 'theres', 'warren', 'airbnb', 'jpow', 'rand', 'negative', 'uso', 'zoom', 'diamond', 'plant', 'gold', 'still', 'shitron', 'ape', 'free', 'dfv', 'gay', 'may', 'wkhs', 'dont', 'iran', 'us', 'back', 'week', 'sub', 'detroit', 'ally', 'kim', 'oracle', 'bear', 'gamestop', 'k', 'year', 'time', 'twitter', 'count', 'bloomberg', 'thcb', 'im', 'dd', 'hertz', 'aapl', 'kodk', 'ford', 'think', 'uwmc', 'kangaroo', 'bought'] len(t) # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.1 # language: sage # name: sagemath # --- # ### 1. Plotting @interact #세 점 (a, 1), (b, 0), (c, 0)을 지나는 다항식의 그래프 def my_lag(a=0, b=1, c=2): f = (x-b)*(x-c)/((a-b)*(a-c)) P = plot(f,(x,min(a, b, c)-1, max(a, b, c)+1), figsize=4) Q = point([(a, 1), (b, 0), (c, 0)], color="red") (P+Q).show() # ### 2. Plotting with Lagrange equation # - P : Lagrange equation, R : Basic polynomial constituting a lagrange polynomial, Q : three points # + #세 점 (0, 3), (1, 4), (2, 1)을 지나는 다항식의 그래프 v = [(0, 3), (1, 4), (2, 1)] f1 = (x-v[1][0])*(x-v[2][0])/((v[0][0]-v[1][0])*(v[0][0]-v[2][0])) f2 = (x-v[0][0])*(x-v[2][0])/((v[1][0]-v[0][0])*(v[1][0]-v[2][0])) f3 = (x-v[0][0])*(x-v[1][0])/((v[2][0]-v[0][0])*(v[1][0]-v[0][0])) f = v[0][1]*f1 + v[1][1]*f2 + v[2][1]*f3 P = plot(f,(x,min(v[0][0], v[1][0], v[2][0])-1, max(v[0][0], v[1][0], v[2][0])+1),\ figsize=4) R = plot((f1, f2, f3),(x,min(v[0][0], v[1][0], v[2][0])-1, max(v[0][0], v[1][0], v[2][0])+1),\ figsize=4, color="green") Q = point(v, color="red") (P+Q+R).show() # - # ### 3. 자연수 p로 나눈 나머지들의 곱셈표 # - 고정된 자연수 p로 나눈 나머지들 사이에서 라그랑지 다항식을 구현하려면 나눗셈이 가능해야 합니다. # 즉, 0이 아닌 나머지는 항상 곱해서 1이 되는 나머지가 있어야 합니다. # - 아래에서 자연수 p로 나눈 나머지들의 곱셈표를 보여줍니다. # p가 소수인 경우와 그렇지 않은 경우(가령 8이나 10)에 첫번째 행과 열을 제외한 부분에서 # 1이 항상 나오는지 그렇지 않은지 확인해보세요. # + p=7 v=[] for i in range(p): v1=[] for j in range(p): v1.append(Mod(i*j, p)) v.append(v1) for i in range(p): print(v[i]) # - # ### 4. Shamir의 (3, 5) - 비밀공유방식 # 1. 먼저 주어진 비밀문장을 256진법을 이용하여 숫자 M으로 변환 # 2. 비밀 M보다 큰 소수 p 생성 # 3. 계수들이 p로 나눈 나머지(GF(p)의 원소)인 다항식들의 공간 R 생성 # 4. M을 상수항으로 하고 나머지 항들은 임의의 값인 2차식 f 생성 # 5. 5개의 비밀조각 (1, f(1)), (2, f(2)), (3, f(3)), (4, f(4)), (5, f(5)) 생성 # + s = 'Hi' M = sum(ord(s[i])*256^i for i in range(len(s))) #비밀문장을 숫자로 변환 p = random_prime(M*10, M*2) #비밀 M보다 큰 소수 print('p=', p) R. = GF(p)[x] # p로 나눈 나머지들을 계수로 갖고 x를 미지수로 하는 다항식들 f = M + Mod(ZZ.random_element(p),p)*x + Mod(ZZ.random_element(p),p)*x^2 # 상수항이 바로 비밀 M이고 나머지 항들은 임의의 수들로 만든 2차식 print('f=', f) print (1, f(1)), (2, f(2)), (3, f(3)), (4, f(4)), (5, f(5)) # 5개의 비밀조각 # + p=78607 v=[(1, 10886), (2, 42972), (5, 35107)] R. = GF(p)[x] s = Mod(10886, p)*(x-2)*(x-5)/Mod((1-2)*(1-5), p) \ + Mod(42972, p)*(x-1)*(x-5)/Mod((2-1)*(2-5), p) \ + Mod(35107, p)*(x-1)*(x-2)/Mod((5-1)*(5-2), p) print(s) f = R.lagrange_polynomial(v) print(f) # - # ### 5. Shamir의 (t, m) - 비밀공유방식(threshold scheme) # + def Bimil2(s, t, m): # t개 이상의 비밀조각이 모이면 비밀을 알아내는 코드 s = str(s) # 비밀은 문자열 M = sum(ord(s[i])*256^i for i in range(len(s))) # 비밀을 숫자로 바굼 p = random_prime(M*10, M*2) R. = GF(p)['x'] # p로 나눈 나머지들을 계수로 갖고 x를 미지수로 하는 다항식들 f = M # 비밀은 상수항 for i in range(1, t): f = f + Mod(ZZ.random_element(p),p)*x^i # 나머지 항들은 임의의 수로 v=[] for i in range(1, m+1): v.append((i, f(i))) return p, f, v Bimil2('Hello', 4, 8) # + def Bomul2(p, v): R. = GF(p)['x'] # p로 나눈 나머지들을 계수로 갖고 x를 미지수로 하는 다항식들 f = R.lagrange_polynomial(v); M = lift(f(0)) v=[] while M != 0: v.append(chr(M % 256)) M = M//256 # this replaces n by floor(n/256). return ''.join(v) Bomul2(4389394056821, [(3, 3100430341689), (4, 1061722902032), (5, 3419208830193)]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd # language: python # name: drlnd # --- # # Collaboration and Competition # # --- # # In this notebook, you will learn how to use the Unity ML-Agents environment for the third project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. # # ### 1. Start the Environment # # We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). from unityagents import UnityEnvironment from collections import namedtuple, deque import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import random import copy import torch.optim as optim # Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. # # - **Mac**: `"path/to/Tennis.app"` # - **Windows** (x86): `"path/to/Tennis_Windows_x86/Tennis.exe"` # - **Windows** (x86_64): `"path/to/Tennis_Windows_x86_64/Tennis.exe"` # - **Linux** (x86): `"path/to/Tennis_Linux/Tennis.x86"` # - **Linux** (x86_64): `"path/to/Tennis_Linux/Tennis.x86_64"` # - **Linux** (x86, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86"` # - **Linux** (x86_64, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86_64"` # # For instance, if you are using a Mac, then you downloaded `Tennis.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: # ``` # env = UnityEnvironment(file_name="Tennis.app") # ``` env = UnityEnvironment(file_name="/home/deeprl/deep-reinforcement-learning/p3_collab-compet/Tennis_Linux/Tennis.x86_64", seed=1) env.reset() # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] print(brain) # ### 2. Examine the State and Action Spaces # # In this environment, two agents control rackets to bounce a ball over a net. If an agent hits the ball over the net, it receives a reward of +0.1. If an agent lets a ball hit the ground or hits the ball out of bounds, it receives a reward of -0.01. Thus, the goal of each agent is to keep the ball in play. # # The observation space consists of 8 variables corresponding to the position and velocity of the ball and racket. Two continuous actions are available, corresponding to movement toward (or away from) the net, and jumping. # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) # - # ### 4. Create Critic and Actor models # + from collections import namedtuple, deque import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import random import copy import torch.optim as optim def hidden_init(layer): # source: The other layers were initialized from uniform distributions # [− 1/sqrt(f) , 1/sqrt(f) ] where f is the fan-in of the layer fan_in = layer.weight.data.size()[0] lim = 1. / np.sqrt(fan_in) return (-lim, lim) class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=128): """Initialize parameters and build model. :param state_size: int. Dimension of each state :param action_size: int. Dimension of each action :param seed: int. Random seed :param fc1_units: int. Number of nodes in first hidden layer :param fc2_units: int. Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) # source: The low-dimensional networks had 2 hidden layers self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc1)) # source: The final layer weights and biases of the actor and were # initialized from a uniform distribution [−3 × 10−3, 3 × 10−3] self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state): """ Build an actor (policy) network that maps states -> actions. """ # source: used the rectified non-linearity for all hidden layers x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) # source The final output layer of the actor was a tanh layer, # to bound the actions return torch.tanh(self.fc3(x)) class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, nb_agents, seed, fcs1_units=256, fc2_units=128): """Initialize parameters and build model. :param state_size: int. Dimension of each state :param action_size: int. Dimension of each action :param seed: int. Random seed :param fcs1_units: int. Nb of nodes in the first hiddenlayer :param fc2_units: int. Nb of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear((state_size+action_size)*nb_agents, fcs1_units)#*nb_agents self.fc2 = nn.Linear(fcs1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) # source: The final layer weights and biases of the critic were # initialized from a uniform distribution [3 × 10−4, 3 × 10−4] self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state, action): """ Build a critic (value) network that maps (state, action) pairs -> Q-values :param state: tuple. :param action: tuple. """ xs = torch.cat((state, action.float()), dim=1) x = F.relu(self.fcs1(xs)) x = F.relu(self.fc2(x)) return self.fc3(x) # - # ### 5. Create Noise Generator class OUNoise: """Ornstein-Uhlenbeck process.""" def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2): """Initialize parameters and noise process.""" self.mu = mu * np.ones(size) self.size = size self.theta = theta self.sigma = sigma self.seed = random.seed(seed) self.reset() def reset(self): """Reset the internal state (= noise) to mean (mu).""" self.state = copy.copy(self.mu) def sample(self): """Update internal state and return it as a noise sample.""" x = self.state dx = self.theta * (self.mu - x) # dx += self.sigma * np.random.rand(*self.size) # Uniform disribution dx += self.sigma * np.random.randn(self.size) # normal distribution # dx += self.sigma * np.array([random.random() for i in range(len(x))]) self.state = x + dx return self.state # ### 6. Create Replay Buffer device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class ReplayBuffer: """Fixed-size buffer to store experience tuples.""" def __init__(self, buffer_size, batch_size): """Initialize a ReplayBuffer object. Params ====== buffer_size (int): maximum size of buffer batch_size (int): size of each training batch """ self.memory = deque(maxlen=buffer_size) # internal memory (deque) self.batch_size = batch_size self.experience = namedtuple("Experience", field_names=["states", "actions", "rewards", "next_states", "dones"]) def add(self, state, action, reward, next_state, done): """Add a new experience to memory.""" e = self.experience(state, action, reward, next_state, done) self.memory.append(e) def sample(self): """Randomly sample a batch of experiences from memory.""" experiences = random.sample(self.memory, k=self.batch_size) states_list = [torch.from_numpy(np.vstack([e.states[index] for e in experiences if e is not None])).float().to(device) for index in range(num_agents)] actions_list = [torch.from_numpy(np.vstack([e.actions[index] for e in experiences if e is not None])).float().to(device) for index in range(num_agents)] next_states_list = [torch.from_numpy(np.vstack([e.next_states[index] for e in experiences if e is not None])).float().to(device) for index in range(num_agents)] rewards = torch.from_numpy(np.vstack([e.rewards for e in experiences if e is not None])).float().to(device) dones = torch.from_numpy(np.vstack([e.dones for e in experiences if e is not None]).astype(np.uint8)).float().to(device) return (states_list, actions_list, rewards, next_states_list, dones) def __len__(self): """Return the current size of internal memory.""" return len(self.memory) # ### 7. Create DDPG single agent # + BUFFER_SIZE = int(1e5) # replay buffer size BATCH_SIZE = 250 # minibatch size GAMMA = 0.99 # discount factor TAU = 1e-3 # for soft update of target parameters LR_ACTOR = 0.0001 # learning rate of the actor LR_CRITIC = 0.001 # learning rate of the critic WEIGHT_DECAY = 0.0001 # L2 weight decay NOISE_DECAY = 0.99 sharedBuffer = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE) class DDPGAgent(object): ''' Implementation of a DDPG agent that interacts with and learns from the environment ''' def __init__(self, state_size, action_size, rand_seed,nb_agents): '''Initialize an MetaAgent object. :param state_size: int. dimension of each state :param action_size: int. dimension of each action :param rand_seed: int. random seed ''' self.state_size = state_size self.action_size = action_size # Actor Network (w/ Target Network) self.actor_local = Actor(state_size, action_size, rand_seed).to(device) self.actor_target = Actor(state_size, action_size, rand_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) # Critic Network (w/ Target Network) self.critic_local = Critic(state_size, action_size, nb_agents, rand_seed).to(device) self.critic_target = Critic(state_size, action_size, nb_agents, rand_seed).to(device) # NOTE: the decay corresponds to L2 regularization self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC) # , weight_decay=WEIGHT_DECAY) # Noise process self.noise = OUNoise(action_size, rand_seed) def step(self): if len(sharedBuffer) > BATCH_SIZE: experiences = sharedBuffer.sample() self.learn(experiences, GAMMA) def act(self, states, add_noise=True): '''Returns actions for given states as per current policy. :param states: array_like. current states :param add_noise: Boolean. If should add noise to the action ''' states = torch.from_numpy(states).float().to(device) self.actor_local.eval() with torch.no_grad(): actions = self.actor_local(states).cpu().data.numpy() self.actor_local.train() # source: Select action at = μ(st|θμ) + Nt according to the current # policy and exploration noise if add_noise: actions += self.noise.sample() return np.clip(actions, -1, 1) def reset(self): self.noise.reset() def learn(self, experiences, gamma): ''' Update policy and value params using given batch of experience tuples. Q_targets = r + γ * critic_target(next_state, actor_target(next_state)) where: actor_target(state) -> action critic_target(state, action) -> Q-value :param experiences: Tuple[torch.Tensor]. tuple of (s, a, r, s', done) :param gamma: float. discount factor ''' states_list, actions_list, rewards, next_states_list, dones = experiences next_states_tensor = torch.cat(next_states_list, dim=1).to(device) states_tensor = torch.cat(states_list, dim=1).to(device) actions_tensor = torch.cat(actions_list, dim=1).to(device) # ---------------------------- update critic ---------------------------- # # Get predicted next-state actions and Q values from target models next_actions = [self.actor_target(states) for states in states_list] next_actions_tensor = torch.cat(next_actions, dim=1).to(device) Q_targets_next = self.critic_target(next_states_tensor, next_actions_tensor) # Compute Q targets for current states (y_i) Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Compute critic loss Q_expected = self.critic_local(states_tensor, actions_tensor) critic_loss = F.mse_loss(Q_expected, Q_targets) # Minimize the loss self.critic_optimizer.zero_grad() critic_loss.backward() #torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) self.critic_optimizer.step() # ---------------------------- update actor ---------------------------- # # Compute actor loss # take the current states and predict actions actions_pred = [self.actor_local(states) for states in states_list] actions_pred_tensor = torch.cat(actions_pred, dim=1).to(device) # -1 * (maximize) Q value for the current prediction actor_loss = -self.critic_local(states_tensor, actions_pred_tensor).mean() # Minimize the loss self.actor_optimizer.zero_grad() actor_loss.backward() #torch.nn.utils.clip_grad_norm_(self.actor_local.parameters(), 1) self.actor_optimizer.step() # ----------------------- update target networks ----------------------- # self.soft_update(self.critic_local, self.critic_target, TAU) self.soft_update(self.actor_local, self.actor_target, TAU) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model: PyTorch model (weights will be copied from) target_model: PyTorch model (weights will be copied to) tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data) # - # ### 8. Create Multi-agent DDPG class MADDPG(object): ''' Implementation of a MADDPG agent that interacts with and learns from the environment ''' def __init__(self, state_size, action_size, nb_agents, random_seed): '''Initialize an MultiAgent object. :param state_size: int. dimension of each state :param action_size: int. dimension of each action :param nb_agents: int. number of agents to use :param rand_seed: int. random seed ''' self.nb_agents = nb_agents self.action_size = action_size self.agents = [DDPGAgent(state_size,action_size,random_seed,nb_agents) for x in range(nb_agents)]# creating agents def step(self, states, actions, rewards, next_states, dones): sharedBuffer.add(states, actions, rewards, next_states, dones) for agent in self.agents: agent.step() def act(self, states, add_noise=True): actions = np.zeros([num_agents, action_size]) for index, agent in enumerate(self.agents): actions[index, :] = agent.act(states[index], add_noise) return actions def reset(self): for agent in self.agents: agent.reset() def __len__(self): return self.nb_agents def __getitem__(self, key): return self.agents[key] def save_weights(self): for index, agent in enumerate(self.agents): torch.save(agent.actor_local.state_dict(), 'agent{}_checkpoint_actor.pth'.format(index+1)) torch.save(agent.critic_local.state_dict(), 'agent{}_checkpoint_critic.pth'.format(index+1)) # ### 9. Train Agent # + agent = MADDPG(state_size=state_size, action_size=action_size,nb_agents = num_agents, random_seed=0) def maddpg(n_episodes=5000, max_t=1000, print_every=100): scores_window = deque(maxlen=print_every) scores = [] scores_avg = [] scores_std = [] for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] #reset the environment with every episode states = env_info.vector_observations agent.reset() score = np.zeros(len(agent)) for t in range(max_t): actions = agent.act(states) env_info = env.step(actions)[brain_name] #Taking one step next_states = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done agent.step(states, actions, rewards, next_states, dones) states = next_states score += rewards if np.any(dones): break scores.append(np.max(score)) scores_window.append(np.max(score)) scores_avg.append(np.mean(scores_window)) scores_std.append(np.std(scores_window)) print('\rEpisode {}\tAverage Score: {:.3f} \t Max Score: {:.3f}'.format(i_episode, np.mean(scores_window),np.max(scores_window)), end="") """ for i in range(len(agent)): str_actor = ('checkpoint_actor_maddpg_%i.pth')%(i) str_critic = ('checkpoint_critic_maddpg_%i.pth')%(i) torch.save(agent[i].actor_local.state_dict(), str_actor) torch.save(agent[i].critic_local.state_dict(), str_critic) """ if i_episode % print_every == 0: print('\rEpisode {}\tAverage Score: {:.3f} '.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=0.5: agent.save_weights() print('\nEnvironment solved in {:d} Episodes \tAverage Score: {:.3f} '.format(i_episode, np.mean(scores_window))) break return scores scores = maddpg() # - # ### 10. Plot Results # + import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores)+1), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() # - # ### 11. Watch smart agents # + env_info = env.reset(train_mode=False)[brain_name] agent = MADDPG(state_size=state_size, action_size=action_size,nb_agents = num_agents, random_seed=0) for i in range(len(agent)): agent[i].actor_local.load_state_dict(torch.load('agent{}_checkpoint_actor.pth'.format(i+1))) agent[i].critic_local.load_state_dict(torch.load('agent{}_checkpoint_critic.pth'.format(i+1))) states = env_info.vector_observations # get the current state scores = np.zeros(len(agent)) # initialize the score for i in range(1000): actions = agent.act(states) # select an action env_info = env.step(actions)[brain_name] # send the action to the environment next_states = env_info.vector_observations # get the next state rewards = env_info.rewards # get the reward dones = env_info.local_done # see if episode has finished scores += rewards # update the score states = next_states # roll over the state to next time step print("Average Score: {}".format(np.mean(scores))) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #

NOTE

# All queries which don't have results (such as Q7 and Q9 in PT schema) should has 0 instead of "N/A" in the log files #

Create rank table for storage formats

# # 1. Copy the results from the tables with title "RESULTS FOR STORAGE FORMATS COMPARISON" into the "results.xlsx" excel file # 2. For PT schema, delete values which are 0 (Q7 and Q9) # 2. Run the code, and ranking results are saved on "rankings.xlsx" excel file # 3. Copy the rankings from "rankings.xlsx" into the according table in "Rank tables" excel file. # + import scipy.stats as ss import pandas as pd import numpy as np df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\results.xlsx", header = None) print("Original data \n", df, "\n") df_ranks = [] for index, row in df.iterrows(): df_ranks.append(ss.rankdata(row)) df_ranks = pd.DataFrame(df_ranks) # print("Ranks \n", df_ranks, "\n") df_transpose = df_ranks.transpose() # print("Transposed Ranks \n", df_transpose, "\n") rank_table = [] for index, row in df_transpose.iterrows(): result_row = np.zeros(5) for i in range(len(row)): result_row[int(row[i])-1] +=1 rank_table.append(result_row) rank_table = pd.DataFrame(rank_table) print("Rank Table \n", rank_table) rank_table.to_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx") # - #

Compute ranking scores for Storage formats

# # 1. Run code to calculate Rank Scores for the results which are created after the "Create rank table for storage formats". # 2. According to the formula, t is 5, b is 11 ( b is 9 for PT schema) # 3. Take the results and copy into the according table in "Rank Scores" excel file # + import pandas as pd df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx", header = None) df = df.drop(0) df = df.drop(0, axis = 1) print("Rank Table \n", df.head(), "\n") b=9 # b=11 #total number of queries t=5 #number of file formats #r is a rank number 1<=r<=t df_list = ["avro", "csv", "orc", "parq.", "hive"] print("Rank Scores \n") for index, row in df.iterrows(): s = 0 for r in range(t): s = s + ( row[r+1]*(t-(r+1)) / (b*(t-1)) ) print(df_list[index-1], "\t", s) # - #

Create rank table for Partitioning techniques

# # 1. Copy the results from the tables with title "RESULTS FOR PARTITIONING TECHNIQUES COMPARISON" into the "results.xlsx" excel file # 2. Delete values which are 0 (for example Q7 and Q9 in PT schema) # 2. Run the code, and results are saved on "rankings.xlsx" excel file # 3. Copy the rankings from "rankings.xlsx" into the according table in "Rank tables" excel file. # + import scipy.stats as ss import pandas as pd import numpy as np df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\results.xlsx", header = None) print("Original data \n", df, "\n") df_ranks = [] for index, row in df.iterrows(): df_ranks.append(ss.rankdata(row)) df_ranks = pd.DataFrame(df_ranks) # print("Ranks \n", df_ranks, "\n") df_transpose = df_ranks.transpose() # print("Transposed Ranks \n", df_transpose, "\n") rank_table = [] for index, row in df_transpose.iterrows(): result_row = np.zeros(3) for i in range(len(row)): result_row[int(row[i])-1] +=1 rank_table.append(result_row) rank_table = pd.DataFrame(rank_table) print("Rank Table \n", rank_table) rank_table.to_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx") # - #

Compute ranking scores for Partitioning Techniques

# # 1. Run code to calculate Rank Scores for the results which are created after the "Create rank table for partitioning techniques". # 2. According to the formula, t is 3, b is 11 (b is 9 for PT) # 3. Take the results and copy into the according table in "Rank Scores" excel file # + import pandas as pd df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx", header = None) df = df.drop(0) df = df.drop(0, axis = 1) print("Rank Table \n", df.head(), "\n") # b=9 b=11 #total number benchmark execurions t=3 #number of file formats #r is a rank number 1<=r<=t df_list = ["HP", "SBP", "PBP" ] print("Rank Scores \n") for index, row in df.iterrows(): s = 0 for r in range(t): s = s + ( row[r+1]*(t-(r+1)) / (b*(t-1)) ) print(df_list[index-1], "\t", s) # - #

Create rank table for relational schemas

# # 1. Copy the results from the tables with title "RESULTS FOR Relational Schema COMPARISON" into the "results.xlsx" excel file # 2. Delete values which are 0 (for example Q7 and Q9 in PT schema) # 2. Run the code, and results are saved on "rankings.xlsx" excel file # 3. Copy the rankings from "rankings.xlsx" into the according table in "Rank tables" excel file. # + import scipy.stats as ss import pandas as pd import numpy as np df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\results.xlsx", header = None) print("Original data \n", df, "\n") df_ranks = [] for index, row in df.iterrows(): df_ranks.append(ss.rankdata(row)) df_ranks = pd.DataFrame(df_ranks) # print("Ranks \n", df_ranks, "\n") df_transpose = df_ranks.transpose() # print("Transposed Ranks \n", df_transpose, "\n") rank_table = [] for index, row in df_transpose.iterrows(): result_row = np.zeros(3) for i in range(len(row)): result_row[int(row[i])-1] +=1 rank_table.append(result_row) rank_table = pd.DataFrame(rank_table) print("Rank Table \n", rank_table, "\n") # print("Rank Table Modified \n", rank_table) rank_table.to_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx") # - #

Compute ranking scores for Relational Schemas

# # 1. Run code to calculate Rank Scores for the results which are created after the "Create rank table for Relational Schemas". # 2. According to the formula, t is 3, b is 11 # 3. Take the results and copy into the according table in "Rank Scores" excel file # + import pandas as pd df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx", header = None) df = df.drop(0) df = df.drop(0, axis = 1) print("Rank Table \n", df.head(), "\n") b=11 #total number of queries t=3 #number of schemas #r is a rank number 1<=r<=t df_list = ["ST", "VT", "PT"] print("Rank Scores \n") for index, row in df.iterrows(): s = 0 for r in range(t): s = s + ( row[r+1]*(t-(r+1)) / (b*(t-1)) ) print(df_list[index-1], "\t", s) # - #

Create rank table for configurations

# + import scipy.stats as ss import pandas as pd import numpy as np df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\results.xlsx", header = None) print("Original data \n", df, "\n") df_ranks = [] for index, row in df.iterrows(): df_ranks.append(ss.rankdata(row)) df_ranks = pd.DataFrame(df_ranks) print("Ranks \n", df_ranks, "\n") df_transpose = df_ranks.transpose() df_transpose[6][30:] = 31 df_transpose[8][30:] = 31 print("Transposed Ranks \n", df_transpose, "\n") rank_table = [] for index, row in df_transpose.iterrows(): result_row = np.zeros(45) for i in range(len(row)): result_row[int(row[i])-1] +=1 rank_table.append(result_row) rank_table = pd.DataFrame(rank_table) rank_table[30][30:] = rank_table[30][30:] - 2 print("Rank Table \n", rank_table) rank_table.to_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx") # - #

Compute ranking scores for Configurations

# + import pandas as pd df = pd.read_excel("C:\\Users\\Sadiq\\Desktop\\rankings.xlsx", header = None) df = df.drop(0) df = df.drop(0, axis = 1) print("Rank Table \n", df.head(), "\n") b=11 #total number of queries t=45 #number of schemas #r is a rank number 1<=r<=t # df_list = ["ST", "VT", "PT"] print("Rank Scores \n") for index, row in df.iterrows(): s = 0 for r in range(t): s = s + ( row[r+1]*(t-(r+1)) / (b*(t-1)) ) print(index, "\t", s) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #

Python: without numpy or sklearn

#

Q1: Given two matrices please print the product of those two matrices

#
#
# Ex 1: A   = [[1 3 4]
#              [2 5 7]
#              [5 9 6]]
#       B   = [[1 0 0]
#              [0 1 0]
#              [0 0 1]]
#       A*B = [[1 3 4]
#              [2 5 7]
#              [5 9 6]]
#
#      
# Ex 2: A   = [[1 2]
#              [3 4]]
#       B   = [[1 2 3 4 5]
#              [5 6 7 8 9]]
#       A*B = [[11 14 17 20 23]
#              [18 24 30 36 42]]
#              
# Ex 3: A   = [[1 2]
#              [3 4]]
#       B   = [[1 4]
#              [5 6]
#              [7 8]
#              [9 6]]
#       A*B =Not possible
# 
# + # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input examples # you can free to change all these codes/structure # here A and B are list of lists def matrix_mul(A, B): # first, find #col of A and #row of B ncolA = len(A[0]) # it will return no.of element in first row(means no.of column in matrix A) nrowB = len(B)# it will return no.of row of matrix B # let's check for matrix multiplication # #col of A should equal to #row of B if ncolA != nrowB: print('Not Possible') else: # initialize a 2d matrix using list comprehension matrix =[[0 for i in range(len(B[0]))]for j in range(len(A))] # implement the logic of matrix implementation for i in range(len(A)): # iterate #row in matrix A for j in range(len(B[0])): # iterate for #col in matrix B # finding resultant matrix's element for k in range((len(A[0]))): matrix[i][j] += A[i][k] * B[k][j] # return resultant matrix return(matrix) # + # case 1: #colA == #rowB # initialize two matrix A=[ [1,2,3], [4,5,6] ] B=[ [3,4], [4,6], [4,3] ] matrix_mul(A,B) # + # case 2: #colA != #rowB A=[ [1,2,3], [4,5,6] ] B=[ [5,6], [3,4], [4,6], [4,3] ] matrix_mul(A,B) # - A = [[1, 2], [3, 4]] B = [[1, 2, 3, 4, 5], [5, 6, 7, 8, 9]] matrix_mul(A,B) #

Q2: Select a number randomly with probability proportional to its magnitude from the given array of n elements

# # consider an experiment, selecting an element from the list A randomly with probability proportional to its magnitude. # assume we are doing the same experiment for 100 times with replacement, in each experiment you will print a number that is selected randomly from A. # #
# Ex 1: A = [0 5 27 6 13 28 100 45 10 79]
# let f(x) denote the number of times x getting selected in 100 experiments.
# f(100) > f(79) > f(45) > f(28) > f(27) > f(13) > f(10) > f(6) > f(5) > f(0)
# 
# + from random import uniform # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input examples # you can free to change all these codes/structure def pick_a_number_from_list(A): A=A.copy() Sum = 0 # taken 'Sum' as a variable b'z 'sum' is predefined in python #print(A) A.sort() #after sorting add first number twice A.insert(0,A[0]) #print(A) #find length of A(list) lengthA = len(A) #calculate total sum for i in range(lengthA): Sum += A[i] # divide every element of list by it's total sum weightedA = [] for i in range(lengthA): res = A[i] /Sum weightedA.append(res) #print(weightedA) # let's find cummulative sum of weightedA cumulativeA =[] cumulativeA.append(weightedA[0]) for i in range(1,lengthA): cumulativeA.append(weightedA[i]) cumulativeA[i] = cumulativeA[i]+ cumulativeA[i-1] cumulativeA.sort() #print(cumulativeA) # find a uniform random value between 0.0 and 1.0 because our cumulative list in the range[0.0,1.0] #pick a uniform random variable uniform_value = uniform(0.0,1.0) #print(uniform_value) # let's check the condition for i in range(1,lengthA): if uniform_value=cumulativeA[i-1]: # return selected number from the list return A[i] # + def sampling_based_on_magnitude(): found_random_numbers =[] # list to be used to find the probability of every element for i in range(1,100): number = pick_a_number_from_list(A) found_random_numbers.append(number) print(number) return found_random_numbers # + A = [0, 5, 27, 6, 13, 28, 100, 45, 10, 79] found_random_numbers = list(sampling_based_on_magnitude()) # - # + # let's find probability of every element probability={} for i in range(len(A)): count = 0 for j in range(99): if A[i] == found_random_numbers[j]: count+=1 probability[A[i]]=count/99 # - probability #

We can see in above code that 100 has highest probability, right.

#

Q3: Replace the digits in the string with #

# # Consider a string that will have digits in that, we need to remove all the characters which are not digits and replace the digits with # #
# Ex 1: A = 234                Output: ###
# Ex 2: A = a2b3c4             Output: ###
# Ex 3: A = abc                Output:   (empty string)
# Ex 5: A = #2a$#b%c%561#      Output: ####
# 
# + import re # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input examples # you can free to change all these codes/structure # String: it will be the input to your program def replace_digits(String): list_of_string = list(String) result_string=[] #initializing digit_count digit_count=0 for i in range(len(list_of_string)): if list_of_string[i].isdigit(): result_string.append('#') # count no.of occurences of digits digit_count+=1 # check if digit_count=0 or not if digit_count == 0: print('empty string') else: # join the elements of list and return it return ''.join(result_string) # modified string which is after replacing the # with digits # - replace_digits('3e4e5r') replace_digits('12345') replace_digits('The NO.1 Chekuri Sir!') replace_digits('Chekuri verma sir') #

Q4: Students marks dashboard

# # Consider the marks list of class students given in two lists
# Students = ['student1','student2','student3','student4','student5','student6','student7','student8','student9','student10']
# Marks = [45, 78, 12, 14, 48, 43, 45, 98, 35, 80]
# from the above two lists the Student[0] got Marks[0], Student[1] got Marks[1] and so on.

# # Your task is to print the name of students # # a. Who got top 5 ranks, in the descending order of marks
# b. Who got least 5 ranks, in the increasing order of marks
# d. Who got marks between >25th percentile <75th percentile, in the increasing order of marks. # #
# Ex 1: 
# Students=['student1','student2','student3','student4','student5','student6','student7','student8','student9','student10'] 
# Marks = [45, 78, 12, 14, 48, 43, 47, 98, 35, 80]
#
# a. 
# student8  98
# student10 80
# student2  78
# student5  48
# student7  47
#
# b.
# student3 12
# student4 14
# student9 35
# student6 43
# student1 45
#
# c.
# student9 35
# student6 43
# student1 45
# student7 47
# student5 48
# 
# + # learned the concept from : https://www.dummies.com/education/math/statistics/how-to-calculate-percentiles-in-statistics/ import math def percentile(A,percent): Data = A.copy() # sort the list Data.sort() # multiply percent by the total number of values n, n is index here of last element index = (len(Data) -1) *percent # check whether index is whole number or not #let's find floor and ceil value of given index index_floor = math.floor(index) index_ceil = math.ceil(index) # if both index_floor and index_cell is equal # it means index is whole number if index_floor == index_ceil: return (Data[index]+Data[index+1])/2 #else round the index to nearest integer else: rounded_index = round(index) return Data[rounded_index] # + ############################################################ # reference: geeksforgeeks for few concepts(not code) # ############################################################ # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input examples # you can free to change all these codes/structure def display_dash_board(students, marks): #let's first zip the given two lists and change it into dictionary zip_stu_marks = dict(zip(students, marks)) # write code for computing top top 5 students # let's sort zipped list sorted_top_students = sorted(zip_stu_marks.items(), key= lambda x:(x[1],x[0]),reverse=True) # assign top 5 students top_5_students = sorted_top_students[0:5] # write code for computing top least 5 students least_5_students = sorted(zip_stu_marks.items(), key= lambda x:(x[1],x[0]),reverse=False)[0:5] # write code for computing top least 5 students # find total marks total_marks = 0 for i in range(len(marks)): total_marks += marks[i] #find 25%ile percentile_25 = percentile(marks,0.25) #print(percentile_25) #find 75%ile percentile_75 = percentile(marks,0.75) #print(percentile_75) # sorte the students in increasing order of marks sorted_students = sorted(zip_stu_marks.items(), key= lambda x:(x[1],x[0]),reverse=False) # initializing list to store result students_within_25_and_75=[] # iterating over complete sorted list and finding desired student for i in range(len(sorted_students)): if int(sorted_students[i][1]) < int(percentile_75) and int(sorted_students[i][1]) >= int(percentile_25): students_within_25_and_75.append(sorted_students[i]) # finally return three found results return top_5_students, least_5_students, students_within_25_and_75 # - students=['student1','student2','student3','student4','student5','student6','student7','student8','student9','student10'] marks = [45, 78, 12, 14, 48, 43, 47, 98, 35, 80] top_5_students, least_5_students, students_within_25_and_75 = display_dash_board(students, marks) # + print('a') for student,mark in top_5_students: # tuple unpacking is used here print(student,mark) print('\nb') for student,mark in least_5_students: print(student,mark) print('\nc') for student,mark in students_within_25_and_75: print(student,mark) # - #

Q5: Find the closest points

# # Consider you are given n data points in the form of list of tuples like S=[(x1,y1),(x2,y2),(x3,y3),(x4,y4),(x5,y5),..,(xn,yn)] and a point P=(p,q)
your task is to find 5 closest points(based on cosine distance) in S from P # #
Cosine distance between two points (x,y) and (p,q) is defined as $cos^{-1}(\frac{(x\cdot p+y\cdot q)}{\sqrt(x^2+y^2)\cdot\sqrt(p^2+q^2)})$ #
# Ex:
#
# S= [(1,2),(3,4),(-1,1),(6,-7),(0, 6),(-5,-8),(-1,-1)(6,0),(1,-1)]
# P= (3,-4)
# 
# Output:
# (6,-7)
# (1,-1)
# (6,0)
# (-5,-8)
# (-1,-1)
# 
# + import math # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input examples # you can free to change all these codes/structure # here S is list of tuples and P is a tuple ot len=2 def closest_points_to_p(S, P): # write your code here closest_points_to_p = [] # initializing resultant list point_dict={} # initializing dictinary to store the points and their distances xq,yq = P #unpack P(tuple) for point in S: x1,y1 = point # unpack point to axis value # let's find distance from P(x1,y1) to point(xq,yq) using cosine distance dist = math.acos( (x1*xq +y1*yq)/ ( (math.sqrt( (x1**2)+(y1**2) ))*(math.sqrt((xq**2)+(yq**2))) ) ) point_dict[point] = dist #let's sort the dictionary by it's key(distance), sorted function returns list point_list = sorted(point_dict.items(),key = lambda x: (x[1]),reverse=False) # print(point_list) for i in range(5): closest_points_to_p.append(point_list[i][0]) # finally returns the list of closest point return closest_points_to_p # its list of tuples # - S= [(1,2),(3,4),(-1,1),(6,-7),(0, 6),(-5,-8),(-1,-1),(6,0),(1,-1)] P= (3,-4) # + points = closest_points_to_p(S, P) #print the returned values for i in points: print(i) # + # intentionally left blank # - #

Q6: Find which line separates oranges and apples

# # Consider you are given two set of data points in the form of list of tuples like #
# Red =[(R11,R12),(R21,R22),(R31,R32),(R41,R42),(R51,R52),..,(Rn1,Rn2)]
# Blue=[(B11,B12),(B21,B22),(B31,B32),(B41,B42),(B51,B52),..,(Bm1,Bm2)]
# 
# and set of line equations(in the string format, i.e list of strings) #
# Lines = [a1x+b1y+c1,a2x+b2y+c2,a3x+b3y+c3,a4x+b4y+c4,..,K lines]
# Note: You need to do string parsing here and get the coefficients of x,y and intercept.
# 
# Your task here is to print "YES"/"NO" for each line given. You should print YES, if all the red points are one side of the line and blue points are on other side of the line, otherwise you should print NO. #
# Ex:
# Red= [(1,1),(2,1),(4,2),(2,4), (-1,4)]
# Blue= [(-2,-1),(-1,-2),(-3,-2),(-3,-1),(1,-3)]
# Lines=["1x+1y+0","1x-1y+0","1x+0y-3","0x+1y-0.5"]
# 
# Output:
# YES
# NO
# NO
# YES
# 
def coefficient_finder(expression): """ expression ex: ax+by+c returns three value: a,b,c this function returns coefficient of x and y. also reutrns value of c. >>> a,b,c = coefficient_finder('1050x-500y+10000') >>> a,b,c output: (1050.0, -500.0, 10000.0) """ temp_a=expression.split('x') a = temp_a[0] temp_b = temp_a[1].split('y') b = temp_b[0] c = temp_b[1] return float(a), float(b), float(c) # + def i_am_the_one(red,blue,line): # find the coefficients a,b,c = coefficient_finder(line) # here m is slope # find slope and y-intercept(c) if b!=0: # if b is not equal to 0, then divide it by y's coefficient m = ((-1)*float(a))/b c = ((-1)*float(c))/b else: m = ((-1)*float(a)) c = ((-1)*float(c)) #initializing total_red_points and total_blue_points total_red_points = 0 total_blue_points= 0 # check for all red points that which side of line it belongs to # if point lies below the list subtract 1 from total_red_point # if point lies above the list add +1 to the total_red_point # iterate for every point in red list for x,y in red: # if y>mx+c, it means point lies above the line if y > (m*x + c): # so, add +1 to the total_red_points total_red_points +=1 # if y (m*x + c): total_blue_points +=1 elif y< (m*x + c): total_blue_points -=1 # if length of given red points list is equal to found total_red_points # and len(red list) is equal to found total_blue_points # then given line well separates given points, so return 'YES'. otherwise 'NO' if len(red) == abs(total_red_points) and len(blue)== abs(total_blue_points): # check, if both category of points(red_points and blue_points) has opposite sign or not if (total_red_points<0 and total_blue_points > 0) or (total_red_points>0 and total_blue_points < 0): return 'YES' else: return 'NO' # - Red= [(1,1),(2,1),(4,2),(2,4), (-1,4)] Blue= [(-2,-1),(-1,-2),(-3,-2),(-3,-1),(1,-3)] Lines=["1x+1y+0","1x-1y+0","1x+0y-3","0x+1y-0.5"] for i in Lines: yes_or_no = i_am_the_one(Red, Blue, i) print(yes_or_no) # the returned value #

Q7: Filling the missing values in the specified format

# # You will be given a string with digits and '\_'(missing value) symbols you have to replace the '\_' symbols as explained # #
# Ex 1: _, _, _, 24 ==> 24/4, 24/4, 24/4, 24/4 i.e we. have distributed the 24 equally to all 4 places 
# Ex 2: 40, _, _, _, 60 ==> (60+40)/5,(60+40)/5,(60+40)/5,(60+40)/5,(60+40)/5 ==> 20, 20, 20, 20, 20 i.e. the sum of (60+40) is distributed qually to all 5 places
# Ex 3: 80, _, _, _, _ ==> 80/5,80/5,80/5,80/5,80/5 ==> 16, 16, 16, 16, 16 i.e. the 80 is distributed qually to all 5 missing values that are right to it
# Ex 4: _, _, 30, _, _, _, 50, _, _ # ==> we will fill the missing values from left to right # a. first we will distribute the 30 to left two missing values (10, 10, 10, _, _, _, 50, _, _) # b. now distribute the sum (10+50) missing values in between (10, 10, 12, 12, 12, 12, 12, _, _) # c. now we will distribute 12 to right side missing values (10, 10, 12, 12, 12, 12, 4, 4, 4) #
# for a given string with comma seprate values, which will have both missing values numbers like ex: "_, _, x, _, _, _" # you need fill the missing values # # Q: your program reads a string like ex: "_, _, x, _, _, _" and returns the filled sequence # # Ex: #
# Input1: "_,_,_,24"
# Output1: 6,6,6,6
#
# Input2: "40,_,_,_,60"
# Output2: 20,20,20,20,20
#
# Input3: "80,_,_,_,_"
# Output3: 16,16,16,16,16
#
# Input4: "_,_,30,_,_,_,50,_,_"
# Output4: 10,10,12,12,12,12,4,4,4
# 
# # # + ############################################################################# ### I have used print() function, just for testing purpose ################## ### Author: azmi,student at AAIC,pvt,ltd.########################## ############################################################################# ############################################################################# ############################################################################# # let's design the function for case1,case2, and case3 # case1: _,_,_,40 # case2: 40_,_,_,_ # case3: 10,_,_,_,20 # design function for case1: _,_,_,40 def case1(in_list): #find length of in_list len_in_list = len(in_list) # find the last digit last_digit = in_list[-1] # calculate value value = int(last_digit)/len_in_list # finally return the value return int(value) # design function for case2: 40,_,_,_ def case2(in_list): # find the length of in_list len_in_list = len(in_list) # find the first digit first_digit = in_list[0] # calculate value value = int(first_digit)/int(len_in_list) # return value return int(value) # design function for case3: 10,_,_,_,20 def case3(in_list): #find the length of in_list len_in_list = len(in_list) # find first and last digit first_digit = int(in_list[0]) last_digit = int(in_list[-1]) # calculate value value = (first_digit+last_digit)/len_in_list # return the value return int(value) # + def curve_smoothing(string): #initializing count by 0, it will help to keep track of elements count = 0 #convert the given string into list string_list = string.split(',') # convert string digit to int value for i in range(len(string_list)): if string_list[i] != '_': string_list[i] = int(string_list[i]) # iterate count <= len(string_list) while(count<=len(string_list)): # case1: '_,_,_,40', so check whether list starts with '_' if string_list[0] == '_': # create temporary list to store the desired list temp_list=[] # flag is used to insert the value in the temporary list flag = True # iterate till flag==True while flag==True: #check whether item is '_' or not. if string_list[count]=='_': # If yes, append to the temporary list temp_list.append(string_list[count]) # and increment the counter count+=1 # if item is not '_', check if it is digit. elif str(string_list[count]).isdigit(): #if it is digit then append the item to the temporary list # and don't increment the count temp_list.append(string_list[count]) # set flag as 'False' b'z we stop here inserting element into temporary list # because starting with '_' and end with 'digit' is completely a case(case1) flag=False # come out of the data inserting loop # pass the whole temporary sublist to the case1 function # case1 function will do further operation and will return distribution_item returned_item = case1(temp_list) # Distribute the returned distribution_item to the main list(string_list) for i in range(count+1): string_list[i] = returned_item # Now check whether sublist and main_list is equal in length or not # if both are equal, means we should return the main_list if len(string_list) == len(temp_list): return string_list # returning list case1 # case2 or case 3: If not started with '_' # check if it is digit elif str(string_list[count]).isdigit(): # if it is digit, store start_index # initialize start_index by value of count start_index = count # Q: Why we are storing start_index? # A: Because case3('_,_,"23,_,_,_"')(double quoted string is case3) might lies in # between the whole string # initialize temporary list temp_list = [] # append the first digit to the temporary list temp_list.append(string_list[count]) # increment the count value count+=1 # set flag=True for appending the item into the temporary list flag = True # iterate till flag==True. while flag==True: # check whether current element is '_' or not if string_list[count] == '_': # if so, then check whether it is last item or not if count!= len(string_list): # If so, then append the current item to the temporary list temp_list.append(string_list[count]) count+=1 # increment the value of count # check at every insertion of element that current element is last or not if count== len(string_list): # if the current item is last item, # then, it belongs to case2(40,_,_,_) # so, pass the temp_list to the case2 function returned_item = case2(temp_list) # distribute the returned_item to the specified index for i in range(start_index,count): string_list[i] = returned_item # finally, return the string_list(main list) return string_list # returning list case2 # if given string is starting wiht 'digit' and not ending with '_' # it goes to case3(30,_,_,30) # so, check whether current element is digit or not elif str(string_list[count]).isdigit(): # now, check whether found item is last or not if count!= len(string_list): # if current digit is not last element, # append the current element temp_list.append(string_list[count]) flag=False # pass it to case3 function returned_item = case3(temp_list) # update the original list for i in range(start_index,count+1): string_list[i] = returned_item # check if the current digit is last element or not if count == len(string_list)-1: # if so, then return string_list return string_list # - def returnString(smoothed_value): ''' this function return the string format of list. with ',' separation >>> returnString([10,20,30]) >>> '10,20,30' ''' test=[] for i in range(len(smoothed_value)): test.append(str(smoothed_value[i])) test.append(str(',')) test.pop() string = ''.join(test) return string S= "_,_,_,24" smoothed_values= returnString(curve_smoothing(S)) print(smoothed_values) S= "40,_,_,_,60" smoothed_values= returnString(curve_smoothing(S)) print(smoothed_values) S= "80,_,_,_,_" smoothed_values= returnString(curve_smoothing(S)) print(smoothed_values) S='_,_,30,_,_,_,50,_,_' smoothed_values = returnString(curve_smoothing(S)) smoothed_values # + # luckily, it just took me half of the day, not a full day to solve this. # - #

Q8: Find the probabilities

# # You will be given a list of lists, each sublist will be of length 2 i.e. [[x,y],[p,q],[l,m]..[r,s]] # consider its like a martrix of n rows and two columns # 1. The first column F will contain only 5 uniques values (F1, F2, F3, F4, F5) # 2. The second column S will contain only 3 uniques values (S1, S2, S3) #
# your task is to find
# a. Probability of P(F=F1|S==S1), P(F=F1|S==S2), P(F=F1|S==S3)
# b. Probability of P(F=F2|S==S1), P(F=F2|S==S2), P(F=F2|S==S3)
# c. Probability of P(F=F3|S==S1), P(F=F3|S==S2), P(F=F3|S==S3)
# d. Probability of P(F=F4|S==S1), P(F=F4|S==S2), P(F=F4|S==S3)
# e. Probability of P(F=F5|S==S1), P(F=F5|S==S2), P(F=F5|S==S3)
# 
# Ex: # #
# [[F1,S1],[F2,S2],[F3,S3],[F1,S2],[F2,S3],[F3,S2],[F2,S1],[F4,S1],[F4,S3],[F5,S1]]
#
# a. P(F=F1|S==S1)=1/4, P(F=F1|S==S2)=1/3, P(F=F1|S==S3)=0/3
# b. P(F=F2|S==S1)=1/4, P(F=F2|S==S2)=1/3, P(F=F2|S==S3)=1/3
# c. P(F=F3|S==S1)=0/4, P(F=F3|S==S2)=1/3, P(F=F3|S==S3)=1/3
# d. P(F=F4|S==S1)=1/4, P(F=F4|S==S2)=0/3, P(F=F4|S==S3)=1/3
# e. P(F=F5|S==S1)=1/4, P(F=F5|S==S2)=0/3, P(F=F5|S==S3)=0/3
# 
# # # + # create function to find first and second column def first_second_col(A): ''' It returns the list of both first and second column of given 2-dimensional matrix ''' # initialize temp first column first_col = [] second_col= [] for element in A: # iterate for every element in A first_col.append(element[0]) # appending elements to the list second_col.append(element[1]) return first_col,second_col # return both lists # create unique list returning function def unique_element_list(A): ''' It returns the list of unique elements from given list. ''' # initialize temporary list temp_list = [] # iterate over all element of A for element in A: if element not in temp_list: temp_list.append(element) return temp_list # + def conditional_probability(A): # first find the unique list of main given list # unique_list = unique_element_list(A) # then find the first column list and second column list # F = first_column(A) # S = second_column(A) F,S = first_second_col(A) # Now, find the unique first_elements list and unique second elements'list # Q: Why find unique_f and unique_s # A: to find no.of possible probabilities to be computed unique_f = unique_element_list(F) unique_s = unique_element_list(S) for f in unique_f: for s in unique_s: #probability = A.count([f,s])/S.count(s) print('P(F={}|S=={}) = {}/{}'.format(f,s,A.count([f,s]),S.count(s)),end=' ') print('') # - main_list = [ ['F1','S1'],['F2','S2'], ['F3','S3'],['F1','S2'], ['F2','S3'],['F3','S2'], ['F2','S1'],['F4','S1'], ['F4','S3'],['F5','S1'], ] # find conditional probabilities conditional_probability(main_list) #

Q9: Operations on sentences

# # You will be given two sentances S1, S2 your task is to find #
# a. Number of common words between S1, S2
# b. Words in S1 but not in S2
# c. Words in S2 but not in S1
# 
# # Ex: #
# S1= "the first column F will contain only 5 unique values"
# S2= "the second column S will contain only 3 unique values"
# Output:
# a. 7
# b. ['first','F','5']
# c. ['second','S','3']
# 
# + # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input strings # you can free to change all these codes/structure def string_features(S1, S2): # given S1 and S2 are strings # so, split it into list of words S1 = S1.split(' ') S2 = S2.split(' ') # let's solve the problem 'a' # a. Number of common words between S1,S2 common_words = 0 common_list = [] # iterate for all words in S1 for i in S1: if i in S2: # if S1's word is also present in S2 common_list.append(i) #then, append to the common_list # you can use either this code # uncomment below code to calculate above solutino # for j in S2: # if j in S1: # common_list.append(j) a = len(common_list) # find the length of common list and store it to a # let's solve problem 'b' # b. Words in S1 but not in S2 b=[] for i in S1: if i not in S2: b.append(i) # let's solve problem 'c' # c. words in S2 but not in S1 c=[] for j in S2: if j not in S1: c.append(j) return a, b, c # - S1= "the first column F will contain only 5 uniques values" S2= "the second column S will contain only 3 uniques values" a,b,c = string_features(S1, S2) print(a,'\n',b,'\n',c) S1= "I am thinking to love you" S2= "I am thinking not to leave you" a,b,c = string_features(S1, S2) print(a,'\n',b,'\n',c) #

Q10: Error Function

# # You will be given a list of lists, each sublist will be of length 2 i.e. [[x,y],[p,q],[l,m]..[r,s]] # consider its like a martrix of n rows and two columns # # a. the first column Y will contain interger values
# b. the second column $Y_{score}$ will be having float values
# Your task is to find the value of $f(Y,Y_{score}) = -1*\frac{1}{n}\Sigma_{for each Y,Y_{score} pair}(Ylog10(Y_{score})+(1-Y)log10(1-Y_{score}))$ here n is the number of rows in the matrix #
# Ex:
# [[1, 0.4], [0, 0.5], [0, 0.9], [0, 0.3], [0, 0.6], [1, 0.1], [1, 0.9], [1, 0.8]]
# output:
# 0.44982
# 
# $\frac{-1}{8}\cdot((1\cdot log_{10}(0.4)+0\cdot log_{10}(0.6))+(0\cdot log_{10}(0.5)+1\cdot log_{10}(0.5)) + ... + (1\cdot log_{10}(0.8)+0\cdot log_{10}(0.2)) )$ # + # write your python code here # you can take the above example as sample input for your program to test # it should work for any general input try not to hard code for only given input strings import math # you can free to change all these codes/structure def compute_log_loss(A): # let's find length of A, first. lengthA = len(A) log_loss = 0 Sum = 0 for item in A: y = item[0] y_score = item[1] Sum += (y* math.log(y_score,10) + (1-y)* math.log(1-y_score,10)) loss = (-1/lengthA) * Sum return loss # - A = [[1, 0.4], [0, 0.5], [0, 0.9], [0, 0.3], [0, 0.6], [1, 0.1], [1, 0.9], [1, 0.8]] loss = compute_log_loss(A) print(loss) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Time series analysis and visualization # + # Hide all warnings import warnings warnings.simplefilter('ignore') import numpy as np import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt import statsmodels as sm import statsmodels.api from tqdm import tqdm # + from pylab import rcParams # Run-Control (default) parameters rcParams['figure.figsize'] = 16, 8 rcParams['lines.linewidth'] = 4 rcParams['font.size'] = 26 # - #
# ## Time series analysis is for # # * compact **dynamics description** of observable processes # * interpretation of dynamics and **estimation of impulse response** # * **forecasting** and simulation # * solution **optimal control** problems #
# ## The objective of time series analysis # # Construct a model of time series for _current value_ of **endogeneous** variable $y_t$ # # * by the _history_ of itself $$y_{:t} = (y_{t-1}, y_{t-2}, \ldots)$$ # * by _current value_ of **exogeneous** variables $x_t$ and possibly by its _history_ too # $$ # y_t \approx \text{model}\bigl( t,\, y_{:t},\, x_t,\, x_{:t} \bigr) # \,. $$ # # Usually one forecasts a single time step ahead. #
# ## Difference from other Machine Learning tasks # # * Data are sequential # * order of **time** has to be respected strictly due to not break the causality # * Much attention to **extrapolation** — a forecast of future values related to observed sample # * It is important to be sure that data do not leak from future to current and to past observations of train subsample during feature engineering and training the model # Thus features of the model can depend only on # * **endogeneous** variables $y_{t-1}, y_{t-2}, \ldots$, i.e. they are available to the moment $t-1$ _inclusively_ # * **exogeneous** variables $x_t, x_{t-1}, \ldots$, i.e. they are available to the moment $t$ _inclusively_ #
# ## $CO_2$ concentration in atmosphere [dataset](https://www.co2.earth/weekly-co2) dataset = pd.read_csv('./mauna_loa_atmospheric_c02.csv', index_col=None, usecols=['date', 'WMLCO2']) dataset.head() # When you loads a time series within `Pandas` you have to set format of date and time explicitly dataset['date'] = pd.to_datetime(dataset['date'], format='%Y-%m-%d') # Create the index for loaded data: it will be **weekly periodical index**. We will get data with regular frequency. dataset = dataset.set_index('date').to_period('W') dataset.head() # Plot dynamics of the time series # + dataset.plot() plt.grid(which='major', axis='both') # - # Aggregate weekly data to monthly # + dataset = dataset.to_timestamp() dataset = dataset.resample('M').mean() # - dataset.head() # + dataset.plot() plt.grid(which='major', axis='both') # - # Create summary statistics # + print('Series {1}, Observations {0}'.format(*dataset.shape)) dataset.describe().T.head() # + dataset.loc['1960':'1967'].plot() plt.grid(which='major', axis='both') # - # ### Missed values maginfy_slice = slice('1960', '1967') # Missed values can be filled by # # 1) last known observable # * **+** doesn't look through the future # * **-** can't fill the beginning of the series # * **-** doesn't account specificity of the series dataset_ff = dataset.fillna(method='ffill') # + dataset_ff.loc[maginfy_slice].plot() plt.grid(which='major', axis='both') # - # 2) iterpolation of the neighboring values # # * **+** smooth peaks # * **-** doesn't fill the ends of the series # * **-** slightly look through the future # + dataset_linterp = dataset.interpolate(method='linear') dataset_pinterp = dataset.interpolate(method='polynomial', order=2) # + ax = dataset_pinterp.loc[maginfy_slice].plot() dataset_linterp.loc[maginfy_slice].plot(ax=ax, linewidth=4, alpha=0.7) plt.grid(which='major', axis='both') # - # 3) exlude at all # # * **+** doesn't change the values # * **-** break the regularity and related periodicity # * **-** deplete the sampling dataset_drop = dataset.dropna() # + dataset_drop.loc[maginfy_slice].plot() plt.grid(which='major', axis='both') # - # 4) estimate by probabilty model # # * **+** filling based on extracted patterns (learned dependencies) # * **-** it is needed to specify the model and to train it # 5) smooth by splines or by local kernel model # * **+** explicitly accounts close in time observations # * **+** allows to increase the frequency of observations ("_resolution_") # * **+** allows to fill missed boundary values # * **-** look through the future far # * **-** it is needed to define th kernel and the model for extrapolation # Looking into the future can be ignorred if **missed values are minority**. # # But if missed values are majority then it is needed to understand why it is happened in the sampling. full_dataset = dataset_pinterp # Prepare train and test samplings in the ratio 3 to 1 # + holdout = full_dataset.loc['1991-01-01':] dataset = full_dataset.loc[:'1990-12-31'] print(len(dataset), len(holdout)) # - # Make sure the parts don't intersect pd.concat([ dataset.tail(), holdout.head() ], axis=1) # Store the bounds of the intervals explicitly # + holdout_slice = slice(*holdout.index[[0, -1]]) print('Train sample from {} to {}'.format(*dataset.index[[0, -1]])) print('Test sample from {} to {}'.format(holdout_slice.start, holdout_slice.stop)) # - # Select the column of target variable target_column = 'WMLCO2' # + fig = plt.figure() ax = fig.add_subplot(111, xlabel='Date', ylabel='value', title=target_column) # 111 means 1 row 1 column 1st axes on the "grid" # plot dynamics of entire time series full_dataset[target_column].plot(ax=ax) # highlight delayed interval for testing ax.axvspan(holdout_slice.start, holdout_slice.stop, color='C1', alpha=0.25, zorder=-99) ax.grid(which='major', axis='both'); # - #
# # A property # # **Stationarity** is a property of a process $\{y_t\}_{t\geq0}$ meaning # > probabilistic interconnections in the set $(y_{t_1},\,\ldots,\,y_{t_m})$ are invariant with respect to shift $s \neq 0$. # That means # * **there are no special moments** in the time when statistical properties of observables are changing # * patterns are stable in time and are determined by **indentation of observables** relative to each other: # * mean, dispersion, and autocorrelation doesn't depend on moment of time # ## A ghost property # # Stochastic processes in real problems are **almost always non-stationary** # * mean depends on time (there is a trend in the dynamics) # * calendar events (holidays or vacations) # * season periodicity # * daily rhythm of power grid load # * season temperature # * yearly peak of monthly inflation in the beginning of year # * unpredictable structural drift # * political decisions # * blackouts # * hysteresis # Thus majority of time series especially economic, climatic, and financial are non-stationary. #
# # Visualization and diagnosis of non-stationarity # Visualization in time series analysis allows to # * get preliminary picture of correlations # * select reasonable strategy of validation a model # * estimate if there is structural drift # * leaps and gaps # * clusters of intensive oscillations or periods of plateau # * diagnose non-stationarity: trend, seasonality, etc. # ### A plot of moving statistics # Moving statistics of a series within window of length $N$ allow to discover changes in time # # * **moving average** of time series level # $$ # m_t = \frac1{N} \sum_{s=t-N+1}^t y_s # $$ # * **moving standard deviation** (scatter) # $$ # s_t = \sqrt{s^2_t} # \,, \quad # s^2_t = \frac1{N-1} \sum_{s=t-N+1}^t (y_s - m_t)^2 # $$ rcParams['figure.figsize'] = 16, 10 def rolling_diagnostics(series, window=500): rolling = series.rolling(window) # Create top and bottom plots fig = plt.figure() ax_top = fig.add_subplot(211, title='Moving average', xlabel='Date', ylabel='value') ax_bottom = fig.add_subplot(212, title='Moving standard deviation', sharex=ax_top, xlabel='Date', ylabel='std.') # Plot the graphs # series itself and moving average rolling.mean().plot(ax=ax_top) series.plot(ax=ax_top, color='black', lw=2, alpha=.25, zorder=-10) ax_top.grid(which='major', axis='both') # moving std. rolling.std().plot(ax=ax_bottom) ax_bottom.grid(which='major', axis='both') fig.tight_layout() return fig rolling_diagnostics(dataset[target_column], window=36); # The graphs show the trend in the dynamics of time series #
# ### Rough estimate of seasonality # It is disarable to make season normalization relatively to trend. # Let's discover seasonality, for example monthly def monthly_seasonality_diagnostics(series, fraction=0.66, period='month'): # Use non-parametric local linear regression to preliminary estimate the trend trend = sm.api.nonparametric.lowess(series, np.r_[:len(series)], frac=fraction, it=5) # Aggregate by months and calculate average and standard deviation by = getattr(series.index, period, 'month') season_groupby = (series - trend[:, 1]).groupby(by) seas_mean, seas_std = season_groupby.mean(), season_groupby.std() # Create subplots fig = plt.figure() ax_top = fig.add_subplot(211, title='Trend', xlabel='Date') ax_bottom = fig.add_subplot(212, title='Seasonality', xlabel=period) # Plot the graphs # The series and the trend pd.Series(trend[:, 1], index=series.index).plot(ax=ax_top) series.plot(ax=ax_top, color="black", lw=2, alpha=.25, zorder=-10) ax_top.grid(which="major", axis="both") # Seasonality and 90% normal confidence interval ax_bottom.plot(1 + np.r_[:len(seas_mean)], seas_mean, lw=2) ax_bottom.fill_between(1 + np.r_[:len(seas_mean)], seas_mean - 1.96 * seas_std, seas_mean + 1.96 * seas_std, zorder=-10, color="C1", alpha=0.15) ax_bottom.grid(which="major", axis="both") fig.tight_layout() return fig monthly_seasonality_diagnostics(dataset[target_column], fraction=0.33, period='month'); # The graph shows the **monthly** seasonality in the dynamics # + ## TODO: check visually if there is weekly seasonality # - #
# ### Total vs. partial autocorrelations # The functions estimate influence of observation of $h$ steps (_lags_) on the current observation, but they does it differently # * **total autocorrelation** $\rho_h$ # * shows cumulative impact $y_{t-h}$ to $y_t$ **via** influence on all intermediate $y_{t-j}$, $j=1,\,...,\,h-1$ # * **partial autocorrelation** $\phi_h$ # * shows **net** (pure) impract $y_{t-h}$ to $y_t$ **excluding** influence on all intermediate $y_{t-j}$, $j=1,\,...,\,h-1$ from statsmodels.tsa.stattools import acf, pacf # + from statsmodels.graphics.tsaplots import plot_acf, plot_pacf def correlation_diagnostics(series, lags=60): fig = plt.figure(figsize=(20, 6)) ax_left, ax_right = fig.subplots( nrows=1, ncols=2, sharey=True, sharex=True, subplot_kw={'xlabel': 'lag', 'ylim': (-1.1, 1.1)}) # Use intrinsic statsmodels functions plot_acf(series, ax_left, lags=lags, zero=False, alpha=0.05, title='Sample autocorrelation', marker=None) plot_pacf(series, ax_right, lags=lags, zero=False, alpha=0.05, title='Sample partial autocorrelation', marker=None) fig.tight_layout() return fig # - # Let's explore sample autocorrelations of the series correlation_diagnostics(dataset[target_column], lags=250); # * On the **left plot** autocorrelation of small lags is near to $1.0$ and decreases pretty slowly # * On the **right plot** observations with lag $1$, $110$, $215$ has statistically non-null net effect # # It is indication of very typical kind of non-stationarity: $y_t = y_{t-1} + \ldots$. # # That means it is observed strong dependance of the past (the history of a process). # --- # # Key steps of model construction for time series # # * Stationarize a time series # * Estimate parameter of the model # * Visualize remains after stationarization # * check if respect the model requirements # * Validation of the model # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Pyspark 2 # language: python # name: pyspark2 # --- # # Quick Recap of Python # # Let us quickly recap of some of the core programming concepts of Python before we get into Spark. # ## Data Engineering Life Cycle # # Let us first understand the Data Engineering Life Cycle. We typically read the data, process it by applying business rules and write the data back to different targets # * Read the data from different sources. # * Files # * Databases # * Mainframes # * APIs # * Processing the data # * Row Level Transformations # * Aggregations # * Sorting # * Ranking # * Joining multiple data sets # * Write data to different targets. # * Files # * Databases # * Mainframes # * APIs # ## Python CLI or Jupyter Notebook # # We can use Python CLI or Jupyter Notebook to explore APIs. # # * We can launch Python CLI using `python` command. # * We can launch the Jupyter Notebook using the `jupyter notebook` command. # * A web service will be started on port number 8888 by default. # * We can go to the browser and connect to the web server using IP address and port number. # * We should be able to explore code in interactive fashion. # * We can issue magic commands such as %%sh to run shell commands, %%md to document using markdown etc. # ### Tasks # # Let us perform these tasks to just recollect how to use Python CLI or Jupyter Notebook. # * Create variables i and j assigning 10 and 20.5 respectively. i = 10 j = 20.5 # * Add the values and assign it to res. res = i + j print(str(res)) # * Get the type of i, j and res. type(i) type(j) type(res) # * Get the help on int. help(int) # * Get the help on startswith that is available on str. help(str.startswith) # ## Basic Programming Constructs # # Let us recollect some of the basic programming constructs of Python. # * Comparison Operations (==, !=, <, >, <=, >=, etc) # * All the comparison operators return a True or False (Boolean value) # * Conditionals (if) # * We typically use comparison operators as part of conditionals. # * Loops (for) # * We can iterate through collection using for i in l where l is a standard collection such as list or set. # * Python provides special function called as range which will return a collection of integers between the given range. It excludes the upper bound value. # * In Python, scope is defined by indentation. # ### Tasks # # Let us perform few tasks to quickly recap basic programming constructs of Python. # * Get all the odd numbers between 1 and 15. # list(range(1, 16, 2)) # * Print all those numbers which are divisible by 3 from the above list. for i in list(range(1, 16, 2)): if(i%3 == 0): print(i) # ## Developing Functions # # Let us understand how to develop functions using Python as programming language. # * Function starts with def followed by function name. # * Parameters can be of different types. # * Required # * Keyword # * Variable Number # * Functions # * Functions which take another function as an argument is called higher order functions. # # ### Tasks # # Let us perform few tasks to understand how to develop functions in Python. # # * Sum of integers between lower bound and upper bound using formula. # # # + def sumOfN(n): return int((n * (n + 1)) / 2) sumOfN(10) # + def sumOfIntegers(lb, ub): return sumOfN(ub) - sumOfN(lb -1) sumOfIntegers(5, 10) # - # * Sum of integers between lower bound and upper bound using loops. # + def sumOfIntegers(lb, ub): total = 0 for e in range(lb, ub + 1): total += e return total sumOfIntegers(1, 10) # - # * Sum of squares of integers between lower bound and upper bound using loops. # + def sumOfSquares(lb, ub): total = 0 for e in range(lb, ub + 1): total += e * e return total sumOfSquares(2, 4) # - # * Sum of the even numbers between lower bound and upper bound using loops. # + def sumOfEvens(lb, ub): total = 0 for e in range(lb, ub + 1): total += e if e%2==0 else 0 return total sumOfEvens(2, 4) # - # ## Lambda Functions # # Let us recap details related to lambda functions. # # * We can develop functions with out names. They are called Lambda Functions and also known as Anonymous Functions. # * We typically use them to pass as arguments to higher order functions which takes functions as arguments # # ### Tasks # # Let us perform few tasks related to lambda functions. # # * Create a generic function mySum which is supposed to perform arithmetic using integers within a range. # # * It takes 3 arguments - lb, ub and f. # * Function f should be invoked inside the function on each element within the range. # # def mySum(lb, ub, f): total = 0 for e in range(lb, ub + 1): total += f(e) return total # * Sum of integers between lower bound and upper bound using mySum. mySum(2, 4, lambda i: i) # * Sum of squares of integers between lower bound and upper bound using mySum. mySum(2, 4, lambda i: i * i) # * Sum of the even numbers between lower bound and upper bound using mySum. mySum(2, 4, lambda i: i if i%2 == 0 else 0) # ## Overview of Collections and Tuples # # Let's quickly recap about Collections and Tuples in Python. We will primarily talk about collections and tuples that comes as part of Python standard library such as list, set, dict and tuple. # # * Group of elements with length and index - list # * Group of unique elements - set # * Group of key value pairs - dict # * While list, set and dict contain group of homogeneous elements, tuple contains group of heterogeneous elements. # * We can consider list, set and dict as a table in a database and tuple as a row or record in a given table. # * Typically we create list of tuples or set of tuples and dict is nothing but collection of tuples with 2 elements and key is unique. # * We typically use Map Reduce APIs to process the data in collections. There are also some pre-defined functions such as len, sum, min, max etc for aggregating data in collections. # ### Tasks # # Let us perform few tasks to quickly recap details about Collections and Tuples in Python. We will also quickly recap about Map Reduce APIs. # # * Create a collection of orders by reading data from a file. # + language="sh" # ls -ltr /data/retail_db/orders/part-00000 # - orders_path = "/data/retail_db/orders/part-00000" orders = open(orders_path). \ read(). \ splitlines() # * Get all unique order statuses. Make sure data is sorted in alphabetical order. sorted(set(map(lambda o: o.split(",")[3], orders))) # * Get count of all unique dates. len(list(map(lambda o: o.split(",")[1], orders))) # * Sort the data in orders in ascending order by order_customer_id and then order_date. sorted(orders, key=lambda k: (int(k.split(",")[2]), k.split(",")[1])) # * Create a collection of order_items by reading data from a file. order_items_path = "/data/retail_db/order_items/part-00000" order_items = open(order_items_path). \ read(). \ splitlines() # * Get revenue for a given order_item_order_id. # + def get_order_revenue(order_items, order_id): order_items_filtered = filter(lambda oi: int(oi.split(",")[1]) == 2, order_items ) order_items_map = map(lambda oi: float(oi.split(",")[4]), order_items_filtered ) return round(sum(order_items_map), 2) get_order_revenue(order_items, 2) # - # ## Overview of Pandas Data Frames # # While collections are typically the group of objects or tuples or simple strings, we need to parse them to further process the data. This process is tedious at times. # * With Data Frames we can define the structure. # * Data Frame is nothing but group of rows where each row have multiple attributes with names. # * Data Frame is similar to a Database Table or Spreadsheet with Header. # * Pandas provide rich and simple functions to convert data in files into Data Frames and process them # * Data can be read from files into Data Frame using functions such as read_csv. # * We can perform all standard operations on Data Frames. # * Projection or Selection # * Filtering # * Aggregations # * Joins # * Sorting # ### Tasks # # Let us perform few tasks to recap the usage of Pandas Data Frames. # # * Read order items data from the location on your system. In mine it is /data/retail_db/order_items/part-00000. Use the information below to define schema. # * It has 6 fields with the below names in the same order as specified below. # * order_item_id # * order_item_order_id # * order_item_product_id # * order_item_quantity # * order_item_subtotal # * order_item_product_price import pandas as pd order_items_path = "/data/retail_db/order_items/part-00000" order_items = pd. \ read_csv(order_items_path, names=["order_item_id", "order_item_order_id", "order_item_product_id", "order_item_quantity", "order_item_subtotal", "order_item_product_price" ] ) # * Project order_item_order_id and order_item_subtotal order_items[["order_item_id", "order_item_subtotal"]] # * Filter for order_item_order_id 2 order_items.query("order_item_order_id == 2") # * Compute revenue for order_item_order_id 2 order_items. \ query("order_item_order_id == 2")["order_item_subtotal"]. \ sum() # * Get number of items and revenue for each order id. Give alias to the order revenue as **revenue**. order_items. \ groupby("order_item_order_id")["order_item_subtotal"]. \ sum() order_items. \ groupby("order_item_order_id")["order_item_subtotal"]. \ agg(['sum', 'count']). \ rename(columns={'sum': 'revenue'}) # ## Limitations of Pandas # # We can use Pandas for data processing. It provides rich APIs to read data from different sources, process the data and then write it to different targets. # # * Pandas works well for light weight data processing. # * Pandas is typically single threaded, which means only one process take care of processing the data. # * As data volume grows, the processing time might grow exponentially and also run into resource contention. # * It is not trivial to use distributed processing using Pandas APIs. We will end up struggling with multi threading rather than business logic. # * There are Distributed Computing Frameworks such as Hadoop Map Reduce, Spark etc to take care of data processing at scale on multi node Hadoop or Spark Clusters. # * Both Hadoop Map Reduce and Spark comes with Distributed Computing Frameworks as well as APIs. # # **Pandas is typically used for light weight Data Processing and Spark is used for Data Processing at Scale.** # ## Development Life Cycle # # Let us understand the development life cycle. We typically use IDEs such as PyCharm to develop Python based applications. # # * Create Project - retail # * Choose the interpreter 3.x # * Make sure plugins such as pandas are installed. # * Create config.py script for externalizing run time parameters such as input path, output path etc. # * Create app folder for the source code. # ### Tasks # # Let us develop a simple application to understand end to end development life cycle. # # * Read the data from order_items # * Get revenue for each order id # * Save the output which contain order id and revenue to a file. # # Click [here](https://github.com/dgadiraju/python-retail/tree/v1.0) for the complete code for the above tasks. # ## Exercises # # Let us perform few exercises to understand how to process the data. We will use LinkedIn data to perform some basic data processing using Python. # # * Get LinkedIn archive. # * Go to https://linkedin.com # * Me on top -> Settings & Privacy # * Then go to "How LinkedIn users your data" -> Getting a copy of your data # * Register and download. You will get a link as part of the email. # * Data contain multiple CSV files. We will limit the analysis to **Contacts.csv** and **Connections.csv**. # * Get the number of **contacts** with out email ids. # * Get the number of **contacts** from each source. # * Get the number of **connections** with each title. # * Get the number of **connections** from each company. # * Get the number of **contacts** for each month in the year 2018. # * Use Postgres or MySQL as databases (you can setup in your laptop) and write connections data to the database # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 2016-10-07: Regularized Logistic Regression # In this lab, we will appply logistic regression to the Endometrium vs. Uterus cancer data. # # Let us start by setting up our environment, loading the data, and setting up our cross-validation. import numpy as np # %pylab inline # Load the data as usual (here the code for Python 2.7) X = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=range(1, 3001)) y = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=[3001], converters={3001: lambda s: 0 if s=='Endometrium' else 1}, dtype='int') # Set up a stratified 10-fold cross-validation from sklearn import cross_validation folds = cross_validation.StratifiedKFold(y, 10, shuffle=True) # Create a function that does cross-validation and scales the features on each training set. from sklearn import preprocessing def cross_validate_with_scaling(design_matrix, labels, classifier, cv_folds): """ Perform a cross-validation and returns the predictions. Use a scaler to scale the features to mean 0, standard deviation 1. Parameters: ----------- design_matrix: (n_samples, n_features) np.array Design matrix for the experiment. labels: (n_samples, ) np.array Vector of labels. classifier: sklearn classifier object Classifier instance; must have the following methods: - fit(X, y) to train the classifier on the data X, y - predict_proba(X) to apply the trained classifier to the data X and return probability estimates cv_folds: sklearn cross-validation object Cross-validation iterator. Return: ------- pred: (n_samples, ) np.array Vectors of predictions (same order as labels). """ pred = np.zeros(labels.shape) # vector of 0 in which to store the predictions for tr, te in cv_folds: # Restrict data to train/test folds Xtr = design_matrix[tr, :] ytr = labels[tr] Xte = design_matrix[te, :] #print Xtr.shape, ytr.shape, Xte.shape # Scale data scaler = preprocessing.StandardScaler() # create scaler Xtr = scaler.fit_transform(Xtr) # fit the scaler to the training data and transform training data Xte = scaler.transform(Xte) # transform test data # Fit classifier classifier.fit(Xtr, ytr) # Predict probabilities (of belonging to +1 class) on test data yte_pred = classifier.predict_proba(Xte) # two-dimensional array # Identify the index, in yte_pred, of the positive class (y=1) # index_of_class_1 = np.nonzero(classifier.classes_ == 1)[0][0] index_of_class_1 = 1 - ytr[0] # 0 if the first sample is positive, 1 otherwise pred[te] = yte_pred[:, index_of_class_1] return pred # ## 1. L1-Regularized Logistic Regression # # Let us start with default parameters. from sklearn import linear_model clf = linear_model.LogisticRegression(penalty='l1') # **Question** Compute the cross-validated predictions of the l1-regularized logistic regression with default parameters on our data. # **Question** Plot the corresponding ROC curve, and compare it to that obtained for non-regularized logistic regression. # ### Setting the C parameter # What does the C parameter correspond to? See the documentation at http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression for help. # # Scikit-learn makes it really easy to use a nested cross-validation to choose a good value for C among a grid of several choices. from sklearn import grid_search param_grid = {'C':[1e-3, 1e-2, 1e-1, 1., 1e2, 1e3]} clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l1'), param_grid) # **Question** What criterion is used to chose the optimal C? See the documentation at http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html#sklearn.grid_search.GridSearchCV. Try changing this criterion http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter # **Question** Compute the cross-validated predictions of the l1-regularized logistic regression with optimized C parameter on our data. # GridSearchCV also uses the optimal parameter(s) it detected to fit a model to its entire training data again, generating a "best model" that is accessible via the `best_estimator_` attribute. # # In our case, because we called GridSearchCV from inside a cross-validation loop, `clf.best_estimator_` is the "best model" *on the last training fold*. print clf.best_estimator_ # **Question** Plot the corresponding ROC curve, and compare to that obtained for # * non-regularized logistic regression. # * l1-regularized logistic regression with default C parameter. # ### Regression weights # Remember the goal of l1-regularization is to build sparse models. # This code plots the regression weights of the classifier 'clf' plt.plot(range(len(clf.best_estimator_.coef_[0])), clf.best_estimator_.coef_[0], color='blue', marker='+', linestyle='') plt.xlabel('Genes', fontsize=16) plt.ylabel('Weights', fontsize=16) plt.title('Logistic regression weights', fontsize=16) plt.xlim([0, X.shape[1]]) # **Question** Compare the regression weights obtained with and without l1-regularization, in two side-by-side plots. # + fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(121) # use a 1x2 subplot grid; ax will refer to the 1st subplot number_of_weights = #TODO logreg_weights = #TODO ax.plot(range(number_of_weights), logreg_weights, color='blue', marker='+', linestyle='') ax.set_xlabel('Genes', fontsize=16) ax.set_ylabel('Weights', fontsize=16) ax.set_title('Logistic regression weights', fontsize=16) ax.set_xlim([0, X.shape[1]]) ax = fig.add_subplot(122) # use a 1x2 subplot grid; ax will refer to the 2nd subplot l1_logreg_weights = #TODO ax.plot(ange(number_of_weights), l1_logreg_weights, color='blue', marker='+', linestyle='') ax.set_xlabel('Genes', fontsize=16) ax.set_ylabel('Weights', fontsize=16) ax.set_title('Regularized Logistic regression weights', fontsize=16) ax.set_xlim([0, X.shape[1]]) plt.tight_layout() # - # ## 2. L2-regularized logistic regression # # **Question** What is the role of l2 regularization? clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l2'), param_grid) # **Question** Compute the cross-validated predictions of an l2-regularized logistic regression with optimized C parameters on our data. # **Question** Plot the corresponding ROC curve, and compare to that obtained for # * non-regularized logistic regression # * l1-regularized logistic regression (with optimized C parameter) # **Question** Compare the regression weights obtained with l2-regularization to those obtained # * with l1-regularization. # * with no regularization. # Do your observations match your expectations? # ## 3. Kaggle challenge # * Cross-validate an l1-regularized linear regression (lasso) on your data, using the folds you previously set up for non-regularized linear regression. Do you obtain better performance? Can you draw some conclusions regarding the usefulness of the different features for the prediction task? # * Cross-validate an l2-regularized linear regression (ridge regression) on your data, using the folds you previously set up for non-regularized linear regression. Do you obtain better performance? # * Submit predictions to the leaderboard for both those models. Do the results on the leaderboard data match your expectations? # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="dTD_FAV5gLPe" colab_type="code" colab={} # !unzip data.zip # + id="yOdGjgsS5p1v" colab_type="code" colab={} # !mkdir logs # !mkdir models # + id="8Ttdh4AhUfX_" colab_type="code" outputId="9f50fb7c-2c80-403f-ff8b-2c2912a4e474" colab={"base_uri": "https://localhost:8080/", "height": 34} import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import tensorflow as tf import cv2 from keras.models import Sequential from keras.layers import Conv2D, Dense, Dropout, Flatten, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint from sklearn.model_selection import train_test_split from keras.callbacks import ModelCheckpoint, TensorBoard import keras # + id="GczNLoL9cXxE" colab_type="code" colab={} img_row = 32 img_col = 32 num_channel = 1 epoch = 20 num_classes = 62 # + id="RXa3Q-FMeHKd" colab_type="code" outputId="14fb500c-74b2-4fa3-8c82-38b077389b99" colab={"base_uri": "https://localhost:8080/", "height": 71} datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, zca_whitening=True, rotation_range=15, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=False, vertical_flip=False, validation_split=0.2) # + id="rGNUSugMej1z" colab_type="code" outputId="3bdb6923-7cf6-4faf-9d85-1ab560c5d0b9" colab={"base_uri": "https://localhost:8080/", "height": 51} train_generator = datagen.flow_from_directory( 'data/', target_size=(img_row, img_col), batch_size=32, color_mode='grayscale', class_mode='categorical', subset='training') # set as training data validation_generator = datagen.flow_from_directory( 'data/', # same directory as training data target_size=(img_row, img_col), batch_size=32, color_mode='grayscale', class_mode='categorical', subset='validation') # set as validation data # + id="ohvKgktrgIMt" colab_type="code" colab={} tensorboard = TensorBoard(log_dir = 'logs/epochs_{}'.format(epoch)) # + id="VAK4DunzhBLQ" colab_type="code" colab={} augmented_checkpoint = ModelCheckpoint('models/augmented_best_model.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto') # + id="vidb-9yGhJCV" colab_type="code" outputId="bf8f8bc4-522d-45ad-f31a-ceab159c8ce9" colab={"base_uri": "https://localhost:8080/", "height": 1000} model = Sequential() model.add(Conv2D(32, (1, 1), strides=(1, 1), activation='relu', input_shape=(img_row, img_col, num_channel))) model.add(Conv2D(32, (1, 1), strides=(1, 1), activation='relu')) model.add(Conv2D(32, (1, 1), strides=(1, 1), activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu')) model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu')) model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu')) model.add(Conv2D(32, (3, 3), strides=(1, 1), activation='relu')) model.add(Conv2D(32, (3, 3), strides=(1, 1), activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Conv2D(16, (3, 3), strides=(1, 1), activation='relu')) model.add(Conv2D(16, (3, 3), strides=(1, 1), activation='relu')) model.add(Flatten()) model.add(Dense(num_classes*16, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.summary() # + id="00u72JF2h7XQ" colab_type="code" outputId="231a50be-bf4a-448a-9af3-f710624e133b" colab={"base_uri": "https://localhost:8080/", "height": 71} model.compile(loss=keras.losses.categorical_crossentropy, # Better loss function for neural networks optimizer=keras.optimizers.Adam(), # Adam optimizer with 1.0e-4 learning rate metrics=['accuracy']) # + id="1RruCKN9h7TT" colab_type="code" colab={} history = model.fit_generator( train_generator, steps_per_epoch = train_generator.samples // 32, validation_data = validation_generator, validation_steps = validation_generator.samples // 32, epochs = epoch, callbacks=[tensorboard, augmented_checkpoint]) # + id="wsV0G31qkp-r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="1d2985ee-e459-4528-b078-ab1ec538f95c" # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # + id="55aY8bPQh7Qi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="e6d12318-6443-425d-9615-29a8e1d1440b" predictions = model.predict_generator(validation_generator, steps=18461/32) # + id="C84XDiiDh7N-" colab_type="code" colab={} y_pred = np.argmax(predictions, axis=1) # + id="_0XwJIOdh-TW" colab_type="code" colab={} from sklearn.metrics import confusion_matrix, classification_report # + id="AFd2nV6qC__D" colab_type="code" colab={} c_mat = confusion_matrix(validation_generator.classes, y_pred) # + id="64YcAwBKF1zJ" colab_type="code" colab={} target_names = validation_generator.class_indices # + id="5SaHE6AGGil1" colab_type="code" colab={} cls_report = classification_report(validation_generator.classes, y_pred, target_names=target_names) print(cls_report) # + id="ct1kERu9G8Mh" colab_type="code" colab={} history = model.fit_generator( train_generator, steps_per_epoch = train_generator.samples // 32, validation_data = validation_generator, validation_steps = validation_generator.samples // 32, epochs = 5, callbacks=[tensorboard, augmented_checkpoint]) # + id="sb60JHQJHIJs" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## ``zip()`` # Assuming you have two one-dimensioned containers with the same length, you can create a **list** wherein each row is a tuple made up of the corresponding items of the containers. # + from __future__ import print_function, division import numpy as np x = np.array([0, 0.0001, 0.0003, 0.0007, 0.001, 0.002]) y = np.array([0.8, 0.85, 0.9, 0.95, 0.975, 1]) z = zip(x, y) print('Object z is of', type(z)) print(z) for item in z: print(type(item), item[0], item[1]) # - # The **``zip()``** function will work with more than two containers, as long as all the containers are of the same length. a = np.array([1, 2, 3, 4, 5], dtype=float) b = np.array([10, 11, 13, 14, 15], dtype=float) c = np.array([21, 22, 23, 24, 25], dtype=float) z = zip(a, b, c) print(z) # Containers being zipped can be of different types. As long as each container is one-dimensioned and of the same length as the other containers being zipped, it is possible to zip them together. a = np.array([1, 2, 3, 4, 5], dtype=float) b = [10, 11, 13, 14, 15] c = (21, 22, 23, 24, 25) z = zip(a, b, c) print(type(a), type(b), type(c)) print(z) # ## ``enumerate()`` # Given a one-dimensioned container, you can create a two-dimensioned list with each row of type **``tuple``** with the first column being an index and second column being the given container. e = list(enumerate(x)) print('e is of', type(e)) print(e) for item in e: print(type(item), item[0], item[1]) # **``enumerate()``** is very useful when you want to iterate over elements of a container, as well as use the index of the item. # + x = [10, 20, 30, 40, 50] print(len(x)) print('Using range(len())') for i in range(len(x)): print(i, x[i]) print('Using enumerate()') for i, xx in list(enumerate(x)): print(i, xx) # - z = zip(a, b, c) print(z) e = enumerate(z) # e is an object of type enumerate print(e) l = list(e) # Generate a list from the enumerate object print(l) for i, j in l: print(i, j) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Análise dos dados de reembolso dos senadores - 2018 # Importando as bibliotecas import pandas as pd # + # Carregar o arquivo csv df = pd.read_csv('2018.csv', sep=';', encoding='cp1252', skiprows=1, decimal=',') df.head() # - # Verificar tamanhos e tipos dos dados df.info() # Mostra uma análise quantitativa dos dados df.describe() # qual o total de reembolsos? df['VALOR_REEMBOLSADO'].sum() # Quantos reembolsos foram solicitados por cada senador? df['SENADOR'].value_counts() # Quanto cada senador solicitou de reembolso? df.groupby(['SENADOR'])['VALOR_REEMBOLSADO'].sum().sort_values(ascending=False) # Quais são os 5 maiores valores de reembolso? df.nlargest(5, 'VALOR_REEMBOLSADO').T # Quais são os 5 menos reembolsos? df.nsmallest(5, 'VALOR_REEMBOLSADO').T # Quantidade de reembolsos por tipo de despesa df['TIPO_DESPESA'].value_counts() # Quais são os valores totais de reembolso por tipo de despesa? df.groupby('TIPO_DESPESA')['VALOR_REEMBOLSADO'].sum().sort_values(ascending=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="SFtzChFWMzKt" colab_type="code" outputId="586491ca-5ebc-4617-9410-6985a9131d37" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !python -m pip install pymongo[srv] --user # !python -m pip install pymongo==3.9.0 --user # !python -m pip install flask-cors --user # !python -m pip install flask_ngrok --user # !python -m pip install turicreate # + id="wB6Tj2WYNEW6" colab_type="code" outputId="be598195-09dc-43b2-9c6b-d6c7da9875ca" colab={"base_uri": "https://localhost:8080/", "height": 607} from flask import Flask,jsonify,request import pandas as pd import numpy as np import time from sklearn.model_selection import train_test_split import sys import turicreate as tc sys.path.append("..") import json from flask_cors import CORS from flask import request import datetime import json as json from pymongo import MongoClient from sklearn.svm import SVR import matplotlib.pyplot as plt from scipy import stats from sklearn.ensemble import RandomForestRegressor from bson import ObjectId import math from flask_ngrok import run_with_ngrok app=Flask(__name__) CORS(app) run_with_ngrok(app) url='mongodb+srv://test:@/test?retryWrites=true&w=majority' db_name='shop_list' def read_json(url,db_name,table_name): client = MongoClient(url) db = client.get_database(db_name) if(table_name=="customers"): return(db.customers) elif(table_name=="transactions"): return(db.transactions) elif(table_name=="itemlist"): return(db.itemlist) elif(table_name=="category"): return(db.category) elif(table_name=="rta"): return(db.rta) elif(table_name=="Recent_purchases"): return(db.Recent_purchases) #functions for recommendation -->> #To get the overall users list def get_user(): users_table=read_json(url,db_name,"customers") res=users_table.find({},{"_id":0}) users=[] for i in res: users.append(str(i["cust_id"])) return users #To get the the data for recommendation def get_data(users): user_data=[]#output 1 item_data=[]#output 2 target_data=[]#output 3 transactions_table=read_json(url,db_name,"transactions") for user in users: #An object to find in the table query={} query["cust_id"]=int(user) res=transactions_table.find(query,{"_id":0,"cust_id":0})#ignoring the _id and cust_id fields for obj in res: for enteries in obj["Transaction"]: user_data.append(str(user)) item_data.append(str(enteries["item_id"])) target_data.append(len(enteries["item_transactions"])) return user_data,item_data,target_data #Functions for prediction algorithms -->> def calc_error(predicted,actual): error=0 for i in range(0,len(actual)): error=error+((actual[i]-predicted[i])*(actual[i]-predicted[i])) error=error/len(actual) return math.sqrt(error) def prefetch(item_id_dict,item_info): for x in item_info: for y in x["Transaction"]: if(item_id_dict.get(y['item_id'])!=None): dates=[] quantity=[] item_trans = y['item_transactions'] for z in item_trans: dates.append(z['date']) quantity.append(z['quantity']) item_id_dict[y['item_id']]["dates"]=dates item_id_dict[y['item_id']]["quantity"]=quantity return item_id_dict def removeOutliers(frequency,threshold): modified_freq=[] modified_quantity=[] for freq,arr in frequency.items(): if(len(arr)==1): modified_freq.append(freq) modified_quantity.append(arr[0]) else: z=stats.zscore(arr) for idx in range(0,len(z)): if(np.isnan(z[idx])==True): modified_freq.append(freq) modified_quantity.append(arr[idx]) elif(abs(z[idx]) max(dates_arr): maximum = max(dates_arr)[0] k = 0 max_quant = 0 for i in dates_arr: if (i[0] == maximum): if (quantity[k] > max_quant): max_quant = quantity[k] k += 1 return(round(max_quant)) rbf= svr_rbf.predict(dates_arr) rf=random_forest.predict(dates_arr)#rf=Random Forest rounded_rbf=[] rounded_rf=[] for i in range(0,len(rbf)): rounded_rbf.append(round(rbf[i])) rounded_rf.append(round(rf[i])) error_rbf=calc_error(rounded_rbf,quantity) error_rf=calc_error(rounded_rf,quantity) #print(error_rbf,error_rf) -->> ERROR PRINTING if(error_rbf<=error_rf): return svr_rbf.predict(predict_dates)[0] else: return random_forest.predict(predict_dates)[0] @app.route('/ml/recommend',methods=['GET']) #Main function for recommendation def recommend(): user_id = request.args.get('userid') users=get_user() #users=[25] user_data,item_data,target_data=get_data(users) user_arr=[] user_arr.append(str(user_id)) sf = tc.SFrame({'user_id':user_data,'item_id':item_data,'frequency':target_data}) m = tc.item_similarity_recommender.create(sf,target="frequency",similarity_type='cosine') #recom=m.recommend(users,k=10) UNCOMMENT IF want to test for all users recom=m.recommend(user_arr,k=10) output={} output["item_id"]=[] for items in recom["item_id"]: output["item_id"].append(items) return json.dumps(output) @app.route('/ml/predict',methods=['GET']) def predict(): userid = request.args.get('userid') transaction =read_json(url,db_name,"transactions") recent_purchases = read_json(url,db_name,"Recent_purchases")#Getting the rta table # itemlist = db.itemlist user_dict={} user_dict["cust_id"]=int(userid) item_info = transaction.find(user_dict,{"Transaction.item_transactions.date":1, "Transaction.item_transactions.quantity":1,"Transaction.item_id":1,"_id":0}) itemDetails = recent_purchases.find(user_dict,{'_id':0})#Mongo query output = [] item_id_dict={}#Stores the item and dates and quantity array item_info_dict=[] #stores the avg , last_date and item_id for item in itemDetails: for one_item in item['recents']: item_obj_dict={} item_id_dict[one_item["item_id"]]={} item_obj_dict["item_id"]=one_item["item_id"] item_obj_dict["avg"]=one_item["avg"] item_obj_dict["last_date"]=one_item["last_date"] item_info_dict.append(item_obj_dict) item_id_dict=prefetch(item_id_dict,item_info) for one_item in item_info_dict: avg = one_item['avg'] #Fetch the avg of an item for a particular user datetimeobj = datetime.datetime.now() date = datetimeobj.strftime("%Y") + "-" +datetimeobj.strftime("%m") + "-" + datetimeobj.strftime("%d") last_date_of_purchase=one_item['last_date'] t = (datetime.datetime.strptime(date,"%Y-%m-%d") - datetime.datetime.strptime(last_date_of_purchase,"%Y-%m-%d")) t = t.days avg=math.ceil(avg) if(avg !=0 and ((avg)-2)<=t and t<=(avg+3)): item_pred = {} itemid = one_item['item_id'] item_dict=item_id_dict.get(itemid) if(len(item_dict["dates"])>2 and len(item_dict["quantity"])>2): ans = algo(dates=item_dict["dates"],quantity=item_dict["quantity"],gap=t) dictionary = dict({'item_id' : itemid}) # itemName = itemlist.find( dictionary, {'item_name':1 ,'item_id':1, '_id':0}) item_pred['itemID'] = itemid # for name in itemName['item_name']: item_pred['itemName'] = "Test_items" item_pred['Quantity'] = round(ans) output.append(item_pred) # else: # print("Hello") # customer_dict={} # customer_dict["cust_id"]=user # info_dict={} # info_dict["recent.item_id"]=one_item["item_id"] # recent_transactions.update(customer_dict,{'$pull':info_dict}) json_output=json.dumps(output) return json_output if __name__=='__main__': app.run() # + id="t5sDyHPzWZMd" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- import sys sys.path.append('../') from PINN_Survey.problems.burgers.data.load import load_burgers_bounds from PINN_Base.util import bounds_from_data, random_choice, random_choices from PINN_Survey.problems.burgers.v1 import Burgers, Burgers_Sphere_Mesh from PINN_Survey.viz.loss_viz import viz_base, viz_2d_layer_norm, viz_mesh, viz_2d_svd, save_as_heightmap from PINN_Survey.viz.hessian_viz import lanczos_mat_free, random_unit_vector, viz_hessian_eigenvalue_ratio, Normalization import numpy as np import matplotlib.pyplot as plt # + @viz_base class Burgers_viz(Burgers): pass X_true,U_true,X_boundary,U_boundary, _ = load_burgers_bounds() lower_bound,upper_bound = bounds_from_data(X_true) X = np.vstack(X_boundary) U = np.vstack(U_boundary) X_df = random_choice(X_true) # - # We begin as before. This time we pass the add_grad_ops flag to add the ops to be able to compute the hessian # + tags=[] model_train = Burgers(0.01 / np.pi, lower_bound,upper_bound, [2,20,20,20,20,20,20,1], add_grad_ops=True) model_train.train_BFGS(X,U,X_df,True) # - # For small enough networks, we can get the explicit hessian. However, it takes too long to use it fill out the space. # + tags=[] H = model_train.get_hessian(X,U,X_df) # + tags=[] vals,vecs = np.linalg.eig(H) vals = np.sort(vals) print(np.abs(vals[0] / vals[-1])) # - # Instead, we use Lanczos iteration to get the hessian's eigenvalues across the grid. This requires the viz model as well. The ratio obtained from the true Hessian typically differs from the Lanczos by no more than one part in 1E-6. Note that this visualization looks bad due to the low resolution. I didn't have time to wait for the ~6 hours to get a higher rez version. # + tags=[] w0 = model_train.get_all_weights() model_viz = Burgers_viz(0.01 / np.pi, lower_bound,upper_bound, [2,20,20,20,20,20,20,1], add_grad_ops=True) t1s,t2s,vals, _ = viz_hessian_eigenvalue_ratio(model_viz,X,U,X_df,w0, grid_steps=10) plt.contourf(t1s,t2s,np.log(np.abs(vals)) ) plt.colorbar() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- # # Analysis # # This module implements methods for analyzing single models. The purpose of this study is to identify key metabolites that can be used to classify certain community interactions as "competitive" or "commensal". In later investigations of the community we can detect shard uptakes, indicating resource overlap and, thus, competitive interactions between species. Yet, in this case we should also verify that both species need this metabolite to grow, and that a reduction in the metabolite will also lead to a reduction in growth. from ncmw.analysis import compute_uptake_sekretion_table, compute_fvas, sekretion_uptake_fva, jaccard_similarity_matrices from ncmw.visualization import plot_full_fva, plot_medium_fva_range, plot_scaled_medium_growth, plot_scaled_medium_growth, uptake_sekretion_venn_diagrams, jacard_index_similarity_heatmap, plot_growth_sensitivity from ncmw.utils import get_models from ncmw.setup_models import set_default_configs_and_snm3_medium, gapfill_medium models = get_models("models") for i in range(len(models)): models[i] = set_default_configs_and_snm3_medium(models[i]) models[i], extension = gapfill_medium(models[i]) fvas = compute_fvas(models, 1.) for model, df in zip(models, fvas): sol = model.optimize() df["flux"] = sol.fluxes # ## Flux variability analysis (FVA) # # FVA provides a way to determine possible flux values while still achieving the optimal growth rate. On the other hand, FBA yields a point estimate within this range. In any case, these values shouldn't be too large, which indicates a problem with the metabolic model.. fig = plot_full_fva(fvas[0]) # This plots the flux value of any reaction. Thereby we can make sure that none of the values in "unrealistic". # # In any case, to investigate community interaction we are majorly interested only within the FVA results of any exchange reaction. Below we plot these more nicely! fig = plot_medium_fva_range(models[0]) # These can reveal several interesting thinks. For example to achive maximal biomass rate we absolutly require all the EX_o2_e in the medium. Yet we only require some EX_thr__L_e. # ## Growth investigateion # # Different models may react differently to an increase of metabolites. Thus we are maybe interested how the growth scales with abundance of metabolites. To investigate this we multiply the medium with a certain factor and investigate how the growth changes. fig = plot_scaled_medium_growth(models) # Next we analyse the sensitivity of the biomass rate to a descreasing the concentration of a single metabolite. This will help us to identify metabolites, which are either "necessary", "necessary only up to a threshold", "irrelevant" or "only in small concentrations". f = plot_growth_sensitivity(models[0], list(models[0].medium.keys())) # ## Uptake and sekretion reactions # # A major player within a community is what a model must uptake and what it can produce. Produced metabolites can benefit other community member, to many shared uptakes can lead to competition within the communiy (Resource overlap). uptakes = [] sekretions = [] for i, model in enumerate(models): uptake, sekretion = sekretion_uptake_fva(fvas[i]) uptakes.append(uptake) sekretions.append(sekretion) fig = uptake_sekretion_venn_diagrams(models, uptakes, sekretions) fig = jacard_index_similarity_heatmap(*jaccard_similarity_matrices(models)) # All in all we now know much more properties of our models. This can be extremly valuable for the interpreation of results obtained within the community! This also serves as a small overview what the command "ncmw_analysis" can do. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import osm2gmns as og net = og.getNetFromOSMFile('map.osm', POIs=True) og.connectPOIWithNet(net) og.generateNodeActivityInfo(net) og.outputNetToCSV(net) import grid2demand as gd # + "Step 1: Read Input Network Data" net = gd.ReadNetworkFiles('') "Step 2: Partition Grid into cells" zone = gd.PartitionGrid(number_of_x_blocks=5, number_of_y_blocks=5, cell_width=None, cell_height=None, latitude=30) # user can customize number of grid cells or cell's width and height "Step 3: Get Production/Attraction Rates of Each Land Use Type with a Specific Trip Purpose" triprate = gd.GetPoiTripRate(trip_rate_folder='',trip_purpose=1) # user can customize poi_trip_rate.csv and trip purpose "Step 4: Define Production/Attraction Value of Each Node According to POI Type" nodedemand = gd.GetNodeDemand() "Step 5: Calculate Zone-to-zone Accessibility Matrix by Centroid-to-centroid Straight Distance" accessibility = gd.ProduceAccessMatrix(latitude=30, accessibility_folder='') # user can customize the latitude of the research area and accessibility.csv "Step 6: Apply Gravity Model to Conduct Trip Distribution" demand = gd.RunGravityModel(trip_purpose=1, a=None, b=None, c=None) # user can customize friction factor coefficients under a specific trip purpose "Step 7: Generate Agent" demand = gd.GenerateAgentBasedDemand() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## ECON 628: Final project # # ### Using Python and R to georeference polling stations in Rio de Janeiro, Brazil. # # ### # ### Contents # # - [Introduction](#Introduction) # - [Data](#Downloading-data) # - [Google Maps Platform](#Google-Maps-Platform) # - [GPS Coordinates](#GPS-Coordinates) # - [Maps in R](#Maps-using-R) # # # # ### Introduction # # In 2008, the state government of Rio de Janeiro (Brazil) launched a policy to reclaim territories (slums) that had been under control of drug gangs for decades. I am working on a research project to investigate if restablishing state control over these territories have effects on electoral oucomes. Electoral data is very rich: information on voters and candidates are available at the polling station level. # # This project requires me to match slums in Rio to nearby polling stations in order to compare the policy's effects on votes in reclaimed slums (treatment group) to slums dominated by a drug gang (control group). # # I have the geographical limits of slums in Rio (georreferenced) and the address for all the polling stations in the 2018 general elections. My goal in this project is to use the addresses to find their GPS location and create a shapefile so that I can calculate distances between the slums and the polling stations. This will be helpful for me because I want to invest some time in learning how to use spatial data. # # I used Python to do the geocoding using Google Maps Platform. The reason I used Python instead of R for this step (although I had never used Python before) is that I was having trouble installing the necessary packages in R, while they are already default in Python. The result is a database including addresses and geographical coordinates for polling stations in the city of Rio. Then, I used R to create a shapefile and plot voting stations and slums within the city in a map. # # All necessary files, including this notebook, are saved in [this repository](https://github.com/pedropessoa/VotingLocations). # # ### Downloading data # # Source: Tribunal Regional Eleitoral do Rio de Janeiro (TRE-RJ, Regional Electoral Court) # # I used Python to donwload the original data from the TRE's website. The dataset is an Excel spreadsheet containing addresses for all voting locations for the 2018 elections in the state of Rio de Janeiro. The reason I used Python instead of R for this step is that I was having trouble installing the necessary packages in R, while they are already default in Python. # # I also saved the original data on the repo, but decided to code everything from downloading it to the final output in order to keep it transparent. # # + import urllib.request DatasetURL = 'http://www2.tre-rj.jus.br/site/eleicoes/2018/arquivos/DeParaSecoes_2Turno.xls' DatasetFile = 'PollingStationsRJ.xlsx' urllib.request.urlretrieve(DatasetURL, DatasetFile) # - # I made some changes to the original dataset. # # - Removed other cities in the state of Rio other than the capital, Rio de Janeiro, which is my area of interest. # # - Removed special characters from the address variable using `unicodedata` because this will be necessary to retrieve GPS coordinates using the Google Maps Platform in the next step. # # This piece of code reads through the spreadsheet and stores its contents in a list. I end up with 1253 unique addresses. # # + import xlrd import unicodedata worksheet = xlrd.open_workbook(DatasetFile).sheet_by_index(0) # using dictionary so that code does not depend on order of cols. # as long as I have right var names... : header_cols = dict() for colNum in range(worksheet.ncols): header_cols[worksheet.cell_value(0, colNum)] = colNum # reads lines in spreadsheet and store data as list addresses = dict() for rowNum in range(worksheet.nrows): if worksheet.cell_value(rowNum, header_cols['MUNICIPIO']) != 'RIO DE JANEIRO': continue #keep Rio only data = list() for colName in ['ZONA ATUAL','SECAO ATUAL','LOCAL','ENDERECOLOCAL']: entry = worksheet.cell_value(rowNum, header_cols[colName]) entry = unicodedata.normalize('NFKD', str(entry)).encode('ascii','ignore').decode('utf-8') #rm special chars. data.append(entry) address = worksheet.cell_value(rowNum, header_cols['ENDERECOLOCAL']) address = unicodedata.normalize('NFKD', entry).encode('ascii','ignore').decode('utf-8') addresses[address] = data # - # The output is a dictionary using addresses as key to store the variables, which include the name of the location (usually a school) and its administrative code (which will be necessary to match with other datasets on electoral outcomes available in TSE's website). For e.g.: addresses['AV. PRESIDENTE VARGAS 642, CENTRO'] # ### Geocoding (Latitude/Longitude Lookup) # # I used [Geocoding API](https://developers.google.com/maps/documentation/geocoding/intro#geocoding) requests to convert addresses into geographic coordinates. This Geocoding API request is a http request using the Google Maps Platform to retrieve latitude and longitude for a particular address. The request looks like this: # # `https://maps.googleapis.com/maps/api/geocode/json?parameters` # # Where the two fundamental parameters are the address and your [API key](https://developers.google.com/maps/documentation/geocoding/get-api-key) to use Google's geocoding. E.g.: # # `https://maps.googleapis.com/maps/api/geocode/json?address=AV.+PRESIDENTE+VARGAS+642,+CENTRO&key=` # # The output of the API request an array of geocoded address information and geometry information, including latitude and longitude, in Json format. The code below sends an API request for each address in `addresses` and extracts the latitude and longitude from the results. # # Some errors might cause the request to fail: # # - Sometimes the connection breaks. This may happen because you reach the maximum no. of request with your unpaid API key. # # - The request might fail. This happened, for example, when there were special characters. # # - The address is not found. # # I wrote the code so that it doesn't crash if an error occurs (using `try`) and it helps identify which part didn't work. Also, if the request went through but lat/lon were not obtained for any reason, it tries again (up to 3 times). If it fails to get lat/lon for an address, it returns 0. # + import json api='https://maps.googleapis.com/maps/api/geocode/json?' api_key='' for key in addresses: print('Geocoding for : {}'.format(address)) address = key.replace(' ', '+') lat = 0.0 lng = 0.0 for attempt in range(3): try: response = urllib.request.urlopen('{}address={}&key={}'.format(api, address, api_key)) except Exception as err: print('HTTP ERR:') print(err) break # let me know if error bc. request didn't go through try: geo = json.load(response) lat = geo['results'][0]['geometry']['location']['lat'] lng = geo['results'][0]['geometry']['location']['lng'] break # successfully retrieved lat/lon except Exception as err: print('Attempt {} failed'.format(attempt + 1)) print(err) print(geo) print(lat, lng) # let me know if request went through but failed retrieving lat/lon # return zeros lat = 0.0 lng = 0.0 addresses[key].append(str(lat)) addresses[key].append(str(lng)) # - # Print the dataset including GPS location in a `.tsv` file. out_file = open('PollsGeocoded.tsv', 'w+') for key in addresses: print('\t'.join(addresses[key]), file=out_file) out_file.close() # ### Maps using R # # `PollsPlotting.R` plots the voting locations and slums in a map of the city of Rio. # # First, I used the coordinates that I obtained to create a shapefile with voting locations. I specify the coordinates and its corresponding coordinate referencing system (CRS) using `SpatialPointsDataFrama` from the package `sp`. # # Second, since the coordinates data I got using geocoding uses the WGS84 coordinate system and my shapefiles with the geographical limits of Rio and its slums use SAD69, I transformed the CRS from WGS84 to SAD69 using `spTransform` from package `Rgdal`. # # Finally, I load and plot the shapefiles using the `rgeos` package. My map looks like this: # # # # The voting locations are the red crosses, slums are in blue, and slums that were retaken by the police (treatment group) are green. The map is a bit messy, but I am glad it works! I had a hard time discovering how to make the coordinates systems compatible so that all the layers are properly located. # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction: # - Usually, the 40% of emails received by a people are SPAM. Several email systems contain heuristics for detecting this type of emails. Hence, in this course, you should create a model able for predicting spam emails. # - You can download a training dataset with 4,136 objects, which contain the text of emails as well as a label for each email; a value of zero (0) corresponds to human and one (1) to spam. # Importing the libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import string import seaborn as sns from nltk.stem import SnowballStemmer from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier dataset=pd.read_csv("spam.csv") dataset.head() dataset.tail() dataset.shape dataset.columns dataset = dataset.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1) dataset = dataset.rename(columns={"v1":"class", "v2":"text"}) dataset.head() dataset['length'] = dataset['text'].apply(len) dataset.head() sns.countplot(x="class",data=dataset) def pre_process(text): text = text.translate(str.maketrans('', '', string.punctuation)) text = [word for word in text.split() if word.lower() not in stopwords.words('english')] words = "" for i in text: stemmer = SnowballStemmer("english") words += (stemmer.stem(i))+" " return words textFeatures = dataset['text'].copy() textFeatures = textFeatures.apply(pre_process) vectorizer = TfidfVectorizer("english") features = vectorizer.fit_transform(textFeatures) x_train, x_test, y_train, y_test = train_test_split(features, dataset['class'], test_size=0.3, random_state=111) svc = SVC(kernel='sigmoid', gamma=1.0) svc.fit(x_train, y_train) prediction = svc.predict(x_test) accuracy_score(y_test,prediction) rfc = RandomForestClassifier(random_state = 42) rfc.fit(x_train, y_train) prediction_on_training_data =rfc.predict(x_train) accuracy_on_training_data = accuracy_score(y_train, prediction_on_training_data) print('Accuracy on training data : ', accuracy_on_training_data *100) knn_cv = KNeighborsClassifier(n_neighbors=3) knn_cv.fit(x_train, y_train) prediction_on_training_data = knn_cv.predict(x_train) accuracy_on_training_data = accuracy_score(y_train, prediction_on_training_data) print('Accuracy on training data : ', accuracy_on_training_data *100) # # Conclusion: # Hence, the accuracy of random forest is more, this model is used to build the model. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #
# # ## Открытый курс по машинному обучению #
Автор материала: , @kulikovpavel. # #
ELI5 - библиотека для визуализации и отладки ML моделей
# # + [markdown] _uuid="f43ebff8d97c806cd841e0ba22645d3afd39a365" # Ссылки: # # [Документация](http://eli5.readthedocs.io/en/latest/) (отличная!) # # [Github](https://github.com/TeamHG-Memex/eli5/blob/master/docs/source/index.rst) # # Авторы: ([@kmike](https://opendatascience.slack.com/messages/@U064DRUF4)), ([@kostia](https://opendatascience.slack.com/team/U0P95857C)) # # [Мотивационное видео](https://www.youtube.com/watch?v=pqqcUzj3R90) # # Установка # # ```pip install eli5``` # > # + [markdown] _uuid="2643287e8f661e74bad803c16744aac1c2490775" # Библиотека из коробки умеет работать с линейными моделями, деревьями и ансамблями (scikit-learn, xgboost, LightGBM, lightning, sklearn-crfsuite) и в красивом виде показывает значимость признаков, может строить деревья, как текст или как картинки. Кроме этого есть важный функционал анализа предсказаний, можно визуально оценить, почему для того или иного примера ваша модель выдала тот или иной результат # # ![](https://raw.githubusercontent.com/TeamHG-Memex/eli5/master/docs/source/static/word-highlight.png) # # Может работать с пайплайнами, в тот числе с HashingVectorizer и даже с препроцессингом в виде черного ящика, реализация алгоритма [LIME](https://arxiv.org/abs/1602.04938) # # У библиотеки настолько прекрасная документация и подробные примеры, что просто проанализирую пару датасетов, а за остальным лучше к ребятам на сайт # # + [markdown] _uuid="2dc415580683adda95341c92e549a32af64ba5fc" # ## XGBClassifier and LogisticRegression, categorial # # Young People Survey. Explore the preferences, interests, habits, opinions, and fears of young people # # [Ссылка на датасет](https://www.kaggle.com/miroslavsabo/young-people-survey) # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import eli5 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # + _cell_guid="82f01488-9c67-4400-801b-e5a459b6a3ec" _uuid="e0a0617445dfd0f1920afe2b3c7dc7a78eb73b14" df = pd.read_csv('responses.csv') # + _uuid="6885b2da2bfb78a5c0bc2befe6c6b9124e622b84" df.head() # + [markdown] _uuid="4f3808787e302949866899ec66ac796d4ba5a729" # Возьмем в качестве целевой переменной место, где живет человек, деревня или город # + _cell_guid="289d7028-83ce-4756-87c5-3f1f5496346e" _uuid="5105719a67111030aa10c2a7f730de0842956b26" df['Village - town'].value_counts() # + _uuid="5d6c95640560eb9f32f17592d014829a8a4a2d3f" df['Village - town'].fillna('city', inplace=True) # + _cell_guid="90792a77-1fe5-4a0b-9dd2-d7710794c4db" _uuid="55f2de35757110684fd9a1bb46d493e9db47cf84" X = df.drop(['Village - town'], axis=1) # + _cell_guid="65e19591-5c5d-494c-80b1-d7aba0e93bb5" _uuid="fc3cf4062f6e4059c34c02be593a7b6b001efaee" target = df['Village - town'].map(dict(city=0, village=1)) # + _cell_guid="68f5cd71-3644-437b-8595-ddb3be814970" _uuid="45086a710c91741abdd9e5335ac7ae619fe3cfbd" import warnings # xgboost <= 0.6a2 shows a warning when used with scikit-learn 0.18+ warnings.filterwarnings('ignore', category=DeprecationWarning) from sklearn.base import BaseEstimator, TransformerMixin from sklearn.feature_extraction import DictVectorizer from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelBinarizer from xgboost import XGBClassifier, XGBRegressor # workaround for xgboost 0.7 def _check_booster_args(xgb, is_regression=None): # type: (Any, bool) -> Tuple[Booster, bool] if isinstance(xgb, eli5.xgboost.Booster): # patch (from "xgb, Booster") booster = xgb else: booster = xgb.get_booster() # patch (from "xgb.booster()" where `booster` is now a string) _is_regression = isinstance(xgb, XGBRegressor) if is_regression is not None and is_regression != _is_regression: raise ValueError( 'Inconsistent is_regression={} passed. ' 'You don\'t have to pass it when using scikit-learn API' .format(is_regression)) is_regression = _is_regression return booster, is_regression eli5.xgboost._check_booster_args = _check_booster_args # + _uuid="bf0d07a7351028defb87e4745d5681a9ac62886e" def prepare_df(data, columns=None): if not columns: columns = data.columns.values arr_categorial = list() for col in columns: lb = LabelBinarizer() transformed = lb.fit_transform(data[col].astype('str')) arr_categorial.append(pd.DataFrame(transformed, columns=col + '__' + lb.classes_.astype('object')).to_sparse()) concated_df = pd.concat([data.drop(columns, axis=1)] + arr_categorial, axis=1).to_sparse() return concated_df categorical_columns = ['Smoking', 'Alcohol', 'Punctuality', 'Lying', 'Internet usage', 'Gender', 'Left - right handed', 'Education', 'Only child', 'House - block of flats'] binarized_x = prepare_df(X, categorical_columns) # + _uuid="bedfcb37680eb83a43a76c0e8ca3aa730c2f1bed" xgb = XGBClassifier() def evaluate(_clf, df, target): scores = cross_val_score(_clf, df, target, scoring='roc_auc', cv=10) print('Accuracy: {:.3f} ± {:.3f}'.format(np.mean(scores), 2 * np.std(scores))) _clf.fit(df, target) # so that parts of the original pipeline are fitted evaluate(xgb, binarized_x, target) # + _uuid="c1920c1a103048129db34e19b64b859b69e8d181" eli5.explain_weights(xgb, top=50) # + [markdown] _uuid="7e22b44c7da6dab68f27252405450d0fd441ac70" # Важность признаков для классификатора. По умолчанию используется прирост информации, "gain”, среднее значение по всем деревьям. Есть другие варианты, можно поменять через свойство importance_type. # # Мы можем взглянуть теперь на конкретный пример # + _uuid="e5c8bccb3240157597f2bf4afd3553a37025325f" eli5.show_prediction(xgb, binarized_x.iloc[300], show_feature_values=True) # + [markdown] _uuid="914540b58a0bc130ba933c4d85a21af810df9b91" # Получили, что данный участник, вероятно, живет в городе, потому что не живет в квартире, тратит деньги на благотворительность и носит брендовые вещи # # Посмотрим на логистическую регрессию # + _uuid="9193fd78e60546d9d4d93766971d60acf3284d44" from sklearn.linear_model import LogisticRegression lr = LogisticRegression() evaluate(lr, binarized_x.fillna('0'), target) # + _uuid="" eli5.show_weights(lr, feature_names=binarized_x.columns.values, top=100) # + _uuid="bbf19bb2535fae6ceae89e926b4d4b65bb37ea11" eli5.show_prediction(lr, binarized_x.iloc[300].fillna('0'), show_feature_values=True) # + [markdown] _uuid="e01720f15d6494f2027a6debbdbe6bdc4bcbfd02" # Сразу заметно, что мы допустили ошибку (не отскалировали величины), и логистическая регрессия напрасно берет вес и рост как сильный значимый фактор, причем вес в плюс, а рост в минус, по сути компенсируя взаимно (факторы скоррелированы). И возраст тоже. Переобучение. # + [markdown] _uuid="9044071b9217988bca25d011fb35675d0dbe4a0e" # ## Анализ текста # # First GOP Debate Twitter Sentiment. Analyze tweets on the first 2016 GOP Presidential Debate # # [Ссылка на датасет](https://www.kaggle.com/crowdflower/first-gop-debate-twitter-sentiment) # + from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.linear_model import LogisticRegressionCV from sklearn.pipeline import make_pipeline df = pd.read_csv("Sentiment.csv.zip") # - vec = CountVectorizer() clf = LogisticRegressionCV() pipe = make_pipeline(vec, clf) pipe.fit(df.text, df.sentiment) eli5.show_weights(clf, vec=vec, top=20) eli5.show_prediction(clf, df.iloc[140].text, vec=vec) vec = TfidfVectorizer(analyzer='char_wb', ngram_range=(3,10), max_features=20000) clf = LogisticRegressionCV() pipe = make_pipeline(vec, clf) pipe.fit(df.text, df.sentiment) eli5.show_weights(clf, vec=vec, top=20) eli5.show_prediction(clf, df.iloc[140].text, vec=vec) # При работе с большими объемами часто применятеся HashingVectorizer, для уменьшения размерности признакового пространства. ELI5 поддерживает работу с такими преобразованиями с помощью инвертирования. # # ``` # from eli5.sklearn import InvertableHashingVectorizer # import numpy as np # # vec = HashingVectorizer(stop_words='english', ngram_range=(1,2)) # ivec = InvertableHashingVectorizer(vec) # sample_size = len(twenty_train.data) // 10 # X_sample = np.random.choice(twenty_train.data, size=sample_size) # ivec.fit(X_sample); # ``` # # http://eli5.readthedocs.io/en/latest/libraries/sklearn.html#reversing-hashing-trick # ## LIME, черный ящик в текстовой обработке # # Идея заключается в том, чтобы чуть-чуть менять входные строки, убирать случайным образом слова-символы, и смотреть как меняются предсказания модели, таким образом запоминать их влияние на на модель # # http://eli5.readthedocs.io/en/latest/tutorials/black-box-text-classifiers.html # # ### # # # # # # +7 903 118 37 41 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # The BWT # ------- # # Follow along at doi:10.1093/bioinformatics/btp324, _Fast and accurate short read alignment with Burrows–Wheeler transform_ by and . # # But note that a couple of their definitions seem to be incorrect. Adjustments will be noted. # # For an alphabet of symbols: # # $\Sigma = [ A, C, G, T ] $ # # $\alpha = $ one symbol of alphabet $\Sigma$ # # ${X}$ = source string $\alpha_{0}\alpha_{1}\ldots\alpha_{n-1}$ # # $\$ =$ end of string # # ${n}$ = number of symbols in ${X}$ # # ${X}[i] = \alpha_i$ # # ${i} = 0,1,\ldots,{n-1}$ # # ${X}[i,j] = \alpha_i\ldots\alpha_j$ (substring) # # ${X_i} = X[i,n-1]$ (suffix) # # $S(i) = i$ th lexicographically smallest suffix (aka index into X where suffix starts) # # $B$ is the BWT string: list of symbols that precede the first symbol in the sorted suffix list # # >$B[i] = \$$ when $S(i) = 0$ # # >$B[i] = X[S(i) - 1]$ # # $W =$ a potential substring of $X$ # # Bounds: # # >$\underline{R}(W) = min\{k:W $ is the prefix of $X_{S(k)}\}$ # # >$\overline{R}(W) = max\{k:W $ is the prefix of $X_{S(k)}\}$ # # For empty string $W = \$$: # # >$\underline{R}(W) = 0$ # # >$\overline{R}(W) = n - 1$ # # (Note that Li and Durbin define $\underline{R}(W) = 1$ for empty string W to eliminate the need to define $O(\alpha, -1)$, but this leads to off-by-one errors later.) # # Set of positions of all occurrences of $W$ in $X$: # # >$\{S(k):\underline{R}(W) <= k <= \overline{R}(W)\}$ # # Is W a substring of X? # # > If $\underline{R}(W) > \overline{R}(W)$ then $W$ is not a substring of $X$. # # > If $\underline{R}(W) = \overline{R}(W)$ then $W$ matches exactly one BWT entry. # # > If $\underline{R}(W) < \overline{R}(W)$ then $W$ matches all BWT entries between (inclusive). # # $SA$ interval $= [ \underline{R}(W), \overline{R}(W) ]$ # # Backward search in $O(|W|)$ time: # # >$C(\alpha) =$ # of symbols in $X[0,n-1)$ (exclusive!) that are lexicographically smaller than $\alpha$ # # >$O(\alpha,i) =$ # of occurrences of $\alpha$ in $B[0,i]$ (inclusive!) # # By Spiral's definition: # # >$O(\alpha, -1) = 0$ # # If $W$ is a substring of $X$: # # >$\underline{R}(\alpha{W}) = C(\alpha) + O(\alpha,\underline{R}(W)-1) + 1$ # # >$\overline{R}(\alpha{W}) = C(\alpha) + O(\alpha, \overline{R}(W))$ # # + # For string X X = "ATTGCTAC$" # Calculate all suffixes suffixes = sorted([X[i:] for i in range(len(X))]) print "# suffix" for i, suffix in enumerate(suffixes): print "{i} {suffix}".format(i=i, suffix=suffix) # + # Calculate S S = [] for suffix in suffixes: S.append(X.find(suffix)) print S # + # C(a) = # of symbols in X[0,n−1) that are lexicographically smaller than a. # Precalculate the C(a) table. This lets us look up C(a) without knowing B. Ca = {} # all unique symbols in X except for $ symbols = ''.join(sorted(list(set(X)))[1:]) for symbol in symbols: print symbol + ': ' + str([x for x in X[:-1] if x < symbol]) Ca[symbol] = len([x for x in X[:-1] if x < symbol]) print '\n', Ca # + # B: X[S(i)-1] def B(i): return X[S[i]-1] # n == |X| == |B| == |S| n = len(X) # String representation of B B_str = ''.join([B(i) for i in range(n)]) print B_str print n # + # O(a,i): number of occurrences of a in B up to index i (inclusive) def O(a, i): if i < 0: return 0 # O(a, -1) == 0 count = 0 for base in B_str[:i+1]: if base == a: count += 1 return count # r underbar: first suffix that matches W (silly linear search) def r(w): if not w: return 0 # r('') == 0 for i, suffix in enumerate(suffixes): if w == suffix[:len(w)]: return i return n # R overbar: last suffix that matches W (silly linear search) def R(w): if not w: return n - 1 # R('') = n - 1 for i, suffix in enumerate(suffixes[::-1]): if w == suffix[:len(w)]: return n - i - 1 return 1 # SA value: compute [i,j] for W def SA(w): return [r(w), R(w)] # + # Let's find SA values for some substrings print "# suffix" for i, suffix in enumerate(suffixes): print "{i} {suffix}".format(i=i, suffix=suffix) print "\nB = " + B_str + "\n" for symbol in symbols: print symbol + ':', SA(symbol) queries = [ 'GCT', # i == j, exactly one match 'GC', # i == j, exactly one match 'GA', # i > j, not in X 'T', # i < j, more than one match '', # empty string, full range ] for q in queries: print "SA('" + q + "') = " + str(SA(q)) # + # Calculate bitcounts, saving start entry of each base from copy import deepcopy bitcounts = [{'A':0, 'C':0, 'G':0, 'T':0}] for i, f in enumerate(S): prev = deepcopy(bitcounts[i]) this = {} for base in "ACGT": this[base] = prev[base] if(f): base = X[f - 1] this[base] = this[base] + 1 bitcounts.append(this) # Drop the placeholder bitcounts[0] row bitcounts = bitcounts[1:] print "Bitcounts:\n" for i, b in enumerate(bitcounts): print "{i} {b}".format(i=i, b=b) # - # A little bit of bit math # ------------------------ # # The whole point of this exercise is to quickly find the position(s) in $X$ where $W$ is an exact match (if any). One more simplification lets us calculate $O(\alpha, i)$ with bitcount lookups: # # >$\underline{R}(\alpha{W}) = C(\alpha) + O(\alpha,\underline{R}(W)-1) + 1$ # # >$O(\alpha,\underline{R}(W)-1) = \underline{R}(\alpha{W}) - C(\alpha) - 1$ # # If $\underline{R}(\alpha{W})$ and $C(\alpha)$ are known, then $\underline{R}(W)-1$ becomes a lookup in the bitcount table. # # + # fast_O(a,i): lookup r(W) in the bitcounts table def fast_O(a, i): if i < 0: return 0 return bitcounts[i][a] # The two methods are equivalent for a in symbols: for i in range(n): if O(a, i) != fast_O(a, i): raise RuntimeError("O({0},{1}) {2} != {3}".format(a, i, O(a, i), fast_O(a, i))) print "O(a,i) == fast_O(a,i)" # + # r underbar: lower limit of substring W in BWT # by Spiral's definition, r('$') == 0 r_cache = {'': 0} def fast_r(w): # Precache all substrings. We're gonna need them. for aW in [w[i:] for i in range(len(w))][::-1]: if(not aW in r_cache): a = aW[0] W = aW[1:] r_cache[aW] = Ca[a] + fast_O(a, fast_r(W) - 1) + 1 return r_cache[w] # R overbar: upper limit of substring W in BWT # by definition, $('$') == n - 1 R_cache = {'': n - 1} def fast_R(w): for aW in [w[i:] for i in range(len(w))][::-1]: if(not aW in R_cache): a = aW[0] W = aW[1:] R_cache[aW] = Ca[a] + fast_O(a, fast_R(W)) return R_cache[w] # SA value: compute [i,j] for W def fast_SA(w): return [fast_r(w), fast_R(w)] # + print "# suffix" for i, suffix in enumerate(suffixes): print "{i} {suffix}".format(i=i, suffix=suffix) print for symbol in symbols: print symbol + ':', SA(symbol), fast_SA(symbol) print queries = [ 'GCT', # i == j, exactly one match 'GA', # i > j, not in X 'T', # i < j, more than one match '', # empty string, full range ] for q in queries: print "SA('" + q + "') = " + str(SA(q)) + ' ' + str(fast_SA(q)) # + # Century table: getting back to X # Keep a position entry every (mod) positions in the original sequence mod = 3 # Also track the cumulative count of century bits (a la bitcount) centcount = [0] print "Century bits:\n" print "# c o suffix" for i, s in enumerate(suffixes): centbit = 0 if S[i]%mod else 1 centcount.append(centcount[-1] + centbit) print "{i} {m} {o} {s}".format(i=i, m=centbit, o=centcount[i], s=s) century = [] for i, f in enumerate(S): if not S[i]%mod: century.append(f) print "\nCentury table:\n" print "# pos" for i, c in enumerate(century): print "{i} {c}".format(i=i, c=c) # + def w_to_x(W): (i, j) = fast_SA(W) reply = [] for k in range(i, j + 1): e = k # e is the bwt entry under examination w = W # we will be pushing bases to the front of w d = 0 # distance from century entry (# of pushes) # No need to store the full century table: if centcount goes up on the next entry, # then this is a century entry. while centcount[e + 1] - centcount[e] == 0: w = B_str[e] + w e = fast_r(w) d += 1 reply.append(century[centcount[e]] + d) return sorted(reply) def find_me(W): print '{}:'.format(W) for pos in w_to_x(W): if W != X[pos:pos + len(W)]: raise RuntimeError("X[{}] {} != {}".format(pos, X[pos:pos + len(W)], W)) print " X[{}] == {}".format(pos, X[pos:pos + len(W)]) # + print "X = {}\n".format(X) print 'All symbols:' for base in symbols: find_me(base) print queries = [ 'GCT', 'AAA', 'TGCTAC', 'ATT', 'TGCTA', 'CTA', 'CTTAGGAGAAC', 'AC' ] print 'Canned lookups:' for q in queries: find_me(q) print # + def revcomp(seq): flip = { 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a', 'n': 'n', '$': '' } reply = '' for c in seq: reply += flip[c] # Got a $? Leave a $. if c == '$': reply = '$' + reply return reply[::-1] def find_me_twice(W): print 'Query {}:'.format(W) for pos in w_to_x(W): if W != X[pos:pos + len(W)]: raise RuntimeError("X[{}] {} != {}".format(pos, X[pos:pos + len(W)], W)) print " X[{}] >> {}".format(pos, X[pos:pos + len(W)]) for pos in w_to_x(revcomp(W)): if revcomp(W) != X[pos:pos + len(W)]: raise RuntimeError("X[{}] {} != {}".format(pos, revcomp(W), X[pos:pos + len(W)])) rpos = n - 1 - pos - len(W) # Position in reverse complement(X) print " R[{}] == X[{}] << {}".format(rpos, pos, revcomp(X)[rpos:rpos + len(W)]) print X print revcomp(X) print find_me_twice('T') # + from random import randrange, choice runs = 10 print '{} random substring lookups:'.format(runs) for i in range(runs): r = randrange(0, n - 2) cache = [] q = 'x' while q not in cache: q = X[r:randrange(r + 1, n - 1)] cache.append(q) find_me_twice(q) print '\n{} random 2-mer lookups:'.format(runs) for i in range(runs): cache = [] q = 'x' while q not in cache: q = choice(symbols) + choice(symbols) cache.append(q) find_me_twice(q) print '\n{} random 3-mer lookups:'.format(runs) for i in range(runs): cache = [] q = 'x' while q not in cache: q = choice(symbols) + choice(symbols) + choice(symbols) cache.append(q) find_me_twice(q) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import sys, os base_path = os.getcwd()[0:os.getcwd().rfind('Watermark')] + "Watermark/" sys.path.append(base_path) import matplotlib.pyplot as plt from src.asiaccs_main import asiaccs_whitebox from src.models import get_deep_cnn_for_cifar, get_lenet_model_for_mnist from src.preprocess_data import load_cifar_images, load_mnist_images from src.util import plot_whitebox # - # # Description # In this notebook we are running a surrogate model attack. The attacker and owner data is disjoint. surr_model, all_history = asiaccs_whitebox( load_dataset_func=load_mnist_images, # Which dataset to choose. Should return training and testing data dataset_label="MNIST", # Label of the dataset (for caching) load_wm_model_func=get_lenet_model_for_mnist, # Model specification for wm_embedding wm_type="logo", # logo or gaussian owner_data_size=30000, total_owner_data_size=30000, key_length=5000, key_length_test=1000, attacker_data_size=30000, total_attacker_data_size=30000, attacker_data_size_reg=3000, epochs_embed=10, epochs_reg=30, epochs_surr=10, freeze_first_layers=0, early_stopping_wm_reg=0.1, patience_reg=2, lr_surr=0.001, reg_whitebox=0.003, reg_surr=0, batchsize_reg=64, batchsize_surr=64, cache_embed_wm="asiaccs_logo_mnist_30000", cache_reg_model=None, cache_surr_model=None, verbose=True ) plot_whitebox(all_history) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-3d_env] * # language: python # name: conda-env-.conda-3d_env-py # --- # + # %matplotlib inline import numpy as np import sys import os import matplotlib.pyplot as plt import math import pickle import pandas as pd import scipy.io import time import h5py import csv from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import ListedColormap, LinearSegmentedColormap from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable from numpy import linalg as LA from scipy.spatial import Delaunay from sklearn.neighbors import NearestNeighbors #sys.path.insert(0, "../") from info3d import * from nn_matchers import * from query_sets import * # + # Global parameters radius_range = np.arange(0.5,1.6,0.5) with open('point_collection/new_contiguous_point_collection.pickle','rb') as f: new_contiguous_point_collection = pickle.load(f) point_collection_indices = np.arange(len(new_contiguous_point_collection)) point_collection_indices # - # # Step 0.1: Getting sample points for one-time partial radius # + samples = 1000 sample_points = [] samples_indeces = [] t0 = time.time() for i in np.arange(samples): random_object = np.random.choice(point_collection_indices) object_name = new_contiguous_point_collection[random_object][0] pointCloud = new_contiguous_point_collection[random_object][1] triangles = new_contiguous_point_collection[random_object][2] triangle_index = np.random.choice(np.arange(len(triangles))) vertex_index = triangles[triangle_index,1] original_vertex = pointCloud[vertex_index] sample_points.append([ random_object, object_name, original_vertex ]) samples_indeces.append(random_object) print("Done generating",len(sample_points),"samples in {:.3f} seconds.".format(time.time()-t0)) with open('sample_points.pickle','wb') as f: pickle.dump(sample_points,f) plt.title("Distribution of the sample spaces") plt.xlabel("Count") plt.ylabel("Sample space") plt.hist(samples_indeces,bins = np.arange(0,8)) # - # # Step 0.2: Creating a synthetic set of successive partial spaces # # Similar to the partial case above, we use the same sample points, i.e. centroids, for successive releases but will only vary the size of the partial space for every release. # # + samples = 100 releases = 100 nearby_range = 2.0 t1 = time.time() successive_sample_points = [] for i in np.arange(samples):# random_object = np.random.choice(point_collection_indices) #reference_ransac = np.random.randint(5) object_name = new_contiguous_point_collection[random_object][0] pointCloud = new_contiguous_point_collection[random_object][1] triangles = new_contiguous_point_collection[random_object][2] current_vertex = pointCloud[np.random.randint(len(pointCloud))] growing_point_collection_vertices = [[ random_object, object_name, current_vertex ]] nbrs = NearestNeighbors(n_neighbors=min(20000,len(pointCloud)),algorithm='kd_tree').fit(pointCloud[:,:3]) for release in np.arange(releases-1): distances, indices = nbrs.kneighbors([current_vertex[:3]]) cand_indices = indices[0,np.where(distances[0]<(nearby_range))[0]] distribution = np.sort(abs(np.random.normal(nearby_range*0.5,nearby_range*0.3,len(cand_indices)))) current_vertex = pointCloud[ np.random.choice( cand_indices, p = distribution/np.sum(distribution) ) ] growing_point_collection_vertices.append([ random_object, object_name, current_vertex ]) successive_sample_points.append([ [random_object, object_name], growing_point_collection_vertices ]) if i % 33 == 1: print(" Done with successive {} sample_points extraction in {:.3f} seconds".format(i,time.time()-t1)) t1 = time.time() with open('successive_sample_points.pickle','wb') as f: pickle.dump(successive_sample_points,f) # + t1 = time.time() try: with open('successive_sample_points.pickle','rb') as f: successive_point_collection = pickle.load(f) samples = len(successive_point_collection) releases = len(successive_point_collection[0][1]) print(samples,"samples for radius",radius) print(releases,"releases each") except Exception as e1: print(e1) successive_sample_points_per_release = [[]] for k, [obj_, growing_point_collection] in enumerate(successive_point_collection): t2 = time.time() successive_sample_points = [] reference_ransac = np.random.randint(5) for i, obj_meta in enumerate(growing_point_collection): successive_sample_points.append([obj_meta, reference_ransac]) try: successive_sample_points_per_release[i].append(successive_sample_points) except: successive_sample_points_per_release.append([successive_sample_points]) #print(len(successive_sample_points_per_release[i]),len(successive_sample_points_per_release[i][k])) with open('successive_sample_points_per_release.pickle','wb') as f: pickle.dump(successive_sample_points_per_release,f) print(" Done with successive sample_points extraction in {:.3f} seconds".format(time.time()-t1)) # - # # Step 0.3: Create submaps for pointnetvlad using same samples # + spatial_span = 2.0 interval = 0.5 num_points = 4096 cutoff = 0.5 with open('sample_points.pickle','rb') as f: sample_points = pickle.load(f) # - # # Step 0.3.1: Generate the reference dataset using the raw dataset # + baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) raw_path = os.path.join(baseline_path,"raw_dataset") raw_pc_path = os.path.join(raw_path,"pointcloud_4m_0.25") if not os.path.exists(raw_path): os.mkdir(raw_path) if not os.path.exists(raw_pc_path): os.mkdir(raw_pc_path) t0 = time.time() csvfile = open(raw_path+"/pointcloud_centroids_4m_0.25.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting','alting','obj']) for obj_, [object_name, pointCloud, triangles] in enumerate(new_contiguous_point_collection): if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 0 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 new_Y = pointCloud[:,1] new_object_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=20000, algorithm='kd_tree').fit(new_object_pointcloud) round_new_pointcloud = 0.25*100*np.around((0.01/0.25)*new_object_pointcloud,decimals=2) unq_round_pointcloud = np.unique(round_new_pointcloud[:,:3],axis = 0) raw_centroids = unq_round_pointcloud#+np.random.normal(0,0.25,unq_round_pointcloud.shape) for northing, easting, alting in raw_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_object_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(raw_pc_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) print("Done with submap generation for object ({}) {} in {:.3f} seconds".format(obj_,object_name,time.time()-t0)) csvfile.close() # - # # Step 0.3.2: Generate a reference dataset using a sample RANSAC dataset # + # First, we need to create the reference dataset using the raw dataset and a (randomly chosen) ransac dataset. num_points = 4096 baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) try: trial = np.random.randint(5) with open("../ransac_pc/ransac_point_collection_{}.pickle".format(trial),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, pointCloud_, tri_ = ransac_trial_point_collection[0] print("Chosen ransac trial",trial) except Exception as ex: print("Error:",ex) raw_path = os.path.join(baseline_path,"ransac_dataset") raw_pc_path = os.path.join(raw_path,"pointcloud_4m_0.25") if not os.path.exists(raw_path): os.mkdir(raw_path) if not os.path.exists(raw_pc_path): os.mkdir(raw_pc_path) t0 = time.time() csvfile = open(raw_path+"/pointcloud_centroids_4m_0.25.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting','alting','obj']) for obj_, [object_name, pointCloud, triangles] in enumerate(ransac_trial_point_collection): if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 0 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 new_Y = pointCloud[:,1] new_object_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=20000, algorithm='kd_tree').fit(new_object_pointcloud) round_new_pointcloud = 0.25*100*np.around((0.01/0.25)*new_object_pointcloud,decimals=2) unq_round_pointcloud = np.unique(round_new_pointcloud[:,:3],axis = 0) raw_centroids = unq_round_pointcloud#+np.random.normal(0,0.25,unq_round_pointcloud.shape) for northing, easting, alting in raw_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_object_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(raw_pc_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) print("Done with submap generation for object ({}) {} in {:.3f} seconds".format(obj_,object_name,time.time()-t0)) csvfile.close() # - # # Step 0.3.3: Generate the test submaps: # - using Raw spaces for validation # - using Ransac spaces for evaluation # - using Ransac spaces for the successive case # + # One-time releases Raw partial spaces baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) for radius in np.arange(0.25,3.1,0.25): per_radius_partial_length = [] t1 = time.time() partial_path = os.path.join(baseline_path,"raw_partial_radius_"+str(radius)+"_"+str(num_points))+"_unassisted" pointcloud_partial_path = os.path.join(partial_path,"pointcloud_4m") #pointcloud_partial_bin_path = os.path.join(partial_path,"pointcloud_4m_npy") if not os.path.exists(partial_path): os.mkdir(partial_path) if not os.path.exists(pointcloud_partial_path): os.mkdir(pointcloud_partial_path) #if not os.path.exists(pointcloud_partial_bin_path): os.mkdir(pointcloud_partial_bin_path) print(" ",pointcloud_partial_path) #""" csvfile = open(partial_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) #""" count = 0 for obj_, object_name, original_vertex in sample_points: new_partial_pointcloud = [] new_vX = [] new_vZ = [] try: object_, ransac_pointCloud, tri_ = new_contiguous_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] except: print("Can't get ransac samples for",trial,obj_meta[0],dist_.shape,ind_.shape) continue #if len(gen_planes) == 0: continue if len(pointCloud) == 0: continue if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",obj_meta) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T max_known_span = max(np.amax(new_partial_pointcloud, axis = 0) - np.amin(new_partial_pointcloud, axis = 0)) nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique if max_known_span > 3*spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # raw_partial_centroids = unq_round_partial_pointcloud c_nbrs = NearestNeighbors(n_neighbors = min(25,len(raw_partial_centroids)), algorithm='kd_tree').fit(raw_partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(raw_partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) raw_partial_centroids = raw_partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] raw_partial_centroids = raw_partial_centroids+np.random.normal(0,interval,raw_partial_centroids.shape) else: # Correcting this, because the attacker is supposed to not know the true centroid # and has to estimate it instead. #raw_partial_centroids = [[new_vX, new_vZ, original_vertex[1]]] raw_partial_centroids = [np.mean(new_partial_pointcloud, axis = 0)] for northing, easting, alting in raw_partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_partial_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 print(" Done with submap generation for radius {} ( {} samples) in {:.3f} seconds".format(radius,count,time.time()-t1)) csvfile.close() # + # One-time releases RANSAC partial spaces baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) for radius in np.arange(0.25,3.1,0.25): t1 = time.time() partial_path = os.path.join(baseline_path,"ransac_partial_radius_"+str(radius)+"_"+str(num_points))+"_unassisted" pointcloud_partial_path = os.path.join(partial_path,"pointcloud_4m") #pointcloud_partial_bin_path = os.path.join(partial_path,"pointcloud_4m_npy") if not os.path.exists(partial_path): os.mkdir(partial_path) if not os.path.exists(pointcloud_partial_path): os.mkdir(pointcloud_partial_path) #if not os.path.exists(pointcloud_partial_bin_path): os.mkdir(pointcloud_partial_bin_path) print(" ",pointcloud_partial_path) #""" csvfile = open(partial_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) #""" count = 0 for obj_, object_name, original_vertex in sample_points: new_partial_pointcloud = [] new_vX = [] new_vZ = [] try: trial = np.random.randint(5) with open("../ransac_pc/ransac_point_collection_{}.pickle".format(trial),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, ransac_pointCloud, tri_ = ransac_trial_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] except: print("Can't get ransac samples for",trial,obj_meta[0]) continue #if len(gen_planes) == 0: continue if len(pointCloud) == 0: continue if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",obj_meta) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T max_known_span = max(np.amax(new_partial_pointcloud, axis = 0) - np.amin(new_partial_pointcloud, axis = 0)) nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique if max_known_span > 3*spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # raw_partial_centroids = unq_round_partial_pointcloud c_nbrs = NearestNeighbors(n_neighbors = min(25,len(raw_partial_centroids)), algorithm='kd_tree').fit(raw_partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(raw_partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) raw_partial_centroids = raw_partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] raw_partial_centroids = raw_partial_centroids+np.random.normal(0,interval,raw_partial_centroids.shape) else: # Correcting this, because the attacker is supposed to not know the true centroid # and has to estimate it instead. #raw_partial_centroids = [[new_vX, new_vZ, original_vertex[1]]] raw_partial_centroids = [np.mean(new_partial_pointcloud, axis = 0)] for northing, easting, alting in raw_partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_partial_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 print(" Done with submap generation for radius {} ( {} samples) in {:.3f} seconds".format(radius,count,time.time()-t1)) csvfile.close() # + # Successive release of RANSAC partial spaces baseline_path = 'pointnetvlad_submaps/' skip = 5 t0 = time.time() with open('successive_sample_points_per_release.pickle','rb') as f: successive_sample_points_per_release = pickle.load(f) for radius in np.arange(0.5,2.1,0.5): t1 = time.time() successive_path = os.path.join(baseline_path,"successive_radius_"+str(radius)) if not os.path.exists(successive_path): os.mkdir(successive_path) for i in np.arange(1,100,skip): # releases successive_release_path = os.path.join(successive_path,"release_"+str(i)) pointcloud_successive_path = os.path.join(successive_release_path,"pointcloud_4m") if not os.path.exists(successive_release_path): os.mkdir(successive_release_path) if not os.path.exists(pointcloud_successive_path): os.mkdir(pointcloud_successive_path) csvfile = open(successive_release_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) count = 0 for successive_sample_points_per_release_per_obj in successive_sample_points_per_release[i]: #print(" ",len(successive_sample_points_per_release_per_obj),"releases") growing_point_cloud = [] new_vX = [] new_vZ = [] ransac_pointCloud = [] for [obj_, object_name, original_vertex], reference_ransac in successive_sample_points_per_release_per_obj[:i]: try: if len(ransac_pointCloud) == 0: # if empty, open. This only happens at beginning with open("../ransac_pc/ransac_point_collection_{}.pickle".format(reference_ransac),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, ransac_pointCloud, tri_ = ransac_trial_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) except: print("Can't get ransac samples for",i,obj_, object_name) continue dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] if len(pointCloud) == 0: continue #Regular Accumulation if len(growing_point_cloud) == 0: growing_point_cloud = pointCloud else: growing_point_cloud = np.concatenate( (growing_point_cloud,pointCloud), axis=0 ) if len(growing_point_cloud) == 0: continue pointCloud = np.unique(growing_point_cloud,axis=0) if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",[obj_, object_name, original_vertex]) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique #if radius > spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # partial_centroids = unq_round_partial_pointcloud+np.random.normal(0,interval,unq_round_partial_pointcloud.shape) c_nbrs = NearestNeighbors(n_neighbors = min(25,len(partial_centroids)), algorithm='kd_tree').fit(partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) partial_centroids = partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] for northing, easting, alting in partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span #per_radius_partial_length.append([northing, easting, alting, len(submap_pointcloud)]) if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_successive_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 if i % 10 == 1: print(" Done with submap generation for iteration {}, radius {} ({} submaps) in {:.3f} seconds".format(i,radius,count,time.time()-t1)) t1 = time.time() csvfile.close() print(" Done with generalized submap generation for radius {} in {:.3f} seconds".format(radius,time.time()-t0)) t0 = time.time() # - # # Step 0.3.4: Building database and query files for evaluation with pointnetVLAD # - the combined Raw and RANSAC referece database # - for validation with one-time released Raw spaces # - for testing with one-time released RANSAC spaces # - for testing with successive RANSAC spaces # + base_path= "pointnetvlad_submaps/"#"../partial_dataset/" construct_query_and_database_sets( base_path, ['raw_dataset', 'ransac_dataset'], "/pointcloud_4m_0.25/", "pointcloud_centroids_4m_0.25.csv")#, all_folders[index]) # + # For validation with raw queries. for radius in np.arange(0.25,3.1,0.25): partial_path = 'raw_partial_radius_'+str(radius)+"_4096_unassisted"# print(partial_path) construct_query_sets(partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) # + # For the Ransac queries. for radius in np.arange(0.25,3.1,0.25): partial_path = 'ransac_partial_radius_'+str(radius)+"_4096_unassisted"# print(partial_path) construct_query_sets(partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) # + # For the successive queries. successive_dir = os.path.join(base_path,"successive_queries") if not os.path.exists(successive_dir): os.mkdir(successive_dir) for radius in np.arange(0.5,2.1,0.5): successive_path = 'successive_radius_'+str(radius) for release in np.arange(1,100,5): partial_path = 'release_'+str(release) print(partial_path) construct_successive_query_sets(successive_path,partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) #print(all_folders) #print("training:",train_folders) # # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Short Corridor with Switched Actions # --- # Consider the small corridor grid world shown inset in the graph below. The reward is -1 per step, as usual. In each of the three nonterminal states there are only two actions, right and left. These actions have their usual consequences in the first and third states (left causes no movement in the first state), but in the second state they are reversed, so that right moves to the left and left moves to the right. The problem is difficult because all the states appear identical under the function approximation. In particular, we define `x(s, right) = [1, 0]` and `x(s, left) = [0, 1]`, for all s. # # $$J(\theta) = V_{\pi_\theta}(S)$$ # # # # MC Policy Gradient # --- # # # import numpy as np import matplotlib.pyplot as plt # + code_folding=[] class ShortCorridor: def __init__(self, alpha=0.2, gamma=0.8): self.actions = ["left", "right"] self.x = np.array([[0, 1], [1, 0]]) # left|s, right|s self.theta = np.array([-1.47, 1.47]) self.state = 0 # initial state 0 self.gamma = gamma self.alpha = alpha def softmax(self, vector): return np.exp(vector)/sum(np.exp(vector)) def chooseAction(self): h = np.dot(self.theta, self.x) prob = self.softmax(h) # left, right probability for all state imin = np.argmin(prob) epsilon = 0.05 if prob[imin] < epsilon: prob[:] = 1 - epsilon prob[imin] = epsilon action = np.random.choice(self.actions, p=prob) return action def takeAction(self, action): if self.state == 0: nxtState = 0 if action == "left" else 1 elif self.state == 1: nxtState = 2 if action == "left" else 0 # reversed elif self.state == 2: nxtState = 1 if action == "left" else 3 else: nxtState = 2 if action == "left" else 3 return nxtState def giveReward(self): if self.state == 3: return 0 return -1 def reset(self): self.state = 0 def run(self, rounds=100): actions = [] rewards = [] for i in range(1, rounds+1): reward_sum = 0 while True: action = self.chooseAction() nxtState = self.takeAction(action) reward = self.giveReward() reward_sum += reward actions.append(action) rewards.append(reward) self.state = nxtState # game end if self.state == 3: T = len(rewards) for t in range(T): # calculate G G = 0 for k in range(t+1, T): G += np.power(self.gamma, k-t-1)*rewards[k] j = 1 if actions[t] == "right" else 0 # dev on particular state h = np.dot(self.theta, self.x) prob = self.softmax(h) grad = self.x[:, j] - np.dot(self.x, prob) self.theta += self.alpha*np.power(self.gamma, t)*G*grad # reset self.state = 0 actions = [] rewards = [] if i % 50 == 0: print("round {}: current prob {} reward {}".format(i, prob, reward_sum)) reward_sum = 0 break # - sc = ShortCorridor(alpha=2e-4, gamma=1) sc.run(1000) h = np.dot(sc.theta, sc.x) sc.softmax(h) # left, right probability for all state # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #Bien, comenzemos con esto: # # pip install opencv-python import os import gc import cv2 import random import numpy as np import matplotlib.pyplot as plt from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten # + DATA_PATH = Path('../input/plant-pathology-2020-fgvc7') IMG_PATH = DATA_PATH / 'images' LABEL_COLS = ['healthy', 'multiple_diseases', 'rust', 'scab'] IMG_SIZE = 512 SEED = 420 N_FOLDS = 5 BS = 16 N_FOLDS = 5 ARCH = densenet121 # - #viene lo del repo de kaggle: https://www.kaggle.com/lextoumbourou/plant-pathology-2020-eda-training-fastai2 # !pip install git+https://github.com/fastai/fastcore > /dev/null # !pip install git+https://github.com/fastai/fastai2 > /dev/null # !pip install iterative-stratification > /dev/null # + # %load_ext autoreload # %autoreload 2 import os import pandas as pd import sys from collections import Counter from pathlib import Path from iterstrat.ml_stratifiers import MultilabelStratifiedKFold from tqdm.notebook import tqdm #progress bar lib from torchvision.models import densenet121 # from sklearn.metrics import roc_auc_score from torch.utils.data.sampler import WeightedRandomSampler from fastai2.basics import * from fastai2.callback.all import * from fastai2.vision.all import * #https://pytorch.org/docs/stable/torchvision/models.html #https://www.kaggle.com/pytorch/densenet121 and pretrained models to save time and resources of execution #more on dense nets https://github.com/liuzhuang13/DenseNet and in https://arxiv.org/abs/1608.06993 #https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html #https://link.springer.com/article/10.1023/A:1010920819831 for more on ROC Operating Characteristic curves.. # - #definiendo semilla en vez de que sea 42 jaja def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True # + #metiendo paths de las imágenes.. DATA_PATH = Path('/Users/enrique/Documents/Ironhack/datamex0320/proyecto-final/ml-foliar-tree-disease/plant-pathology-2020-fgvc7') IMG_PATH = DATA_PATH / 'images' LABEL_COLS = ['healthy', 'multiple_diseases', 'rust', 'scab'] IMG_SIZE = 512 SEED = 420 N_FOLDS = 5 BS = 16 N_FOLDS = 5 ARCH = densenet121 # + #inicia semilla en cuatro 20 seed_everything(SEED) # - # Exploratory Data Analysis #dataframe de las imágenes del train.. train_df = pd.read_csv('/Users/enrique/Documents/Ironhack/datamex0320/proyecto-final/ml-foliar-tree-disease/plant-pathology-2020-fgvc7/train.csv') test_df = pd.read_csv('/Users/enrique/Documents/Ironhack/datamex0320/proyecto-final/ml-foliar-tree-disease/plant-pathology-2020-fgvc7/test.csv') train_df.head(15) # + #Observación: como es el train da la proba de uno # - # DataSet size (len(train_df), len(test_df)) #las muestras tienen 1822 entradas en los csv # Label distribution _, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(10, 3)) for ax, column in zip(axes, LABEL_COLS): train_df[column].value_counts().plot.bar(title=column, ax=ax) plt.show() plt.title('Label dist') train_df[LABEL_COLS].idxmax(axis=1).value_counts().plot.bar() train_df.iloc[:,1:-1].sum(axis=1).value_counts() train_df[['healthy', 'multiple_diseases', 'rust', 'scab']].sum(axis=1).unique() # del value counts y del unique vemos que tal vez se trata de una multiclase pero no clasificación multilabel. #hacemos función que tome tamaños de las imágenes en la carpeta de img_path. con ello tendremos dimensiones para el modelo def get_size(df): hs, ws = [], [] for _, row in tqdm(df.iterrows(), total=len(train_df)): img = Image.open(IMG_PATH/(row.image_id+'.jpg')) h, w = img.size hs.append(h) ws.append(w) return hs, ws train_hs, train_ws = get_size(train_df) test_hs, test_ws = get_size(test_df) # + #se plotea para saber resolución de las imágenes del dataset de Kaggle.. for set_label, set_size in ('train', [train_hs, train_ws]), ('test', [test_hs, test_ws]): print(f'{set_label} height val counts: {Counter(set_size[0])}') print(f'{set_label} width val counts: {Counter(set_size[1])}') _, axes = plt.subplots(ncols=2, nrows=1, constrained_layout=True, figsize=(10, 3)) for ax, column, vals in zip(axes, ['heights', 'widths'], set_size): ax.hist(vals, bins=100) ax.set_title(f'{set_label} {column} hist') plt.show() # + #por la inspección que había hecho... y por las gráficas vemos que las dimensiones son de entre #2048x1365 o 1365x2048 pixeles. # - # Ahora vemos el color. Distribución del color... por RGB #histogram function for colors.. more can be consulted in the book: #ADVANCED GUIDE TO PYTHON 3 PROGRAMMING by #gets means for plotting in a Histogram for Red, Blue and Green spectra. def plot_colour_hist(df, title): red_values = []; green_values = []; blue_values = []; all_channels = [] for _, row in tqdm(df.iterrows(), total=len(df)): img = np.array(Image.open(IMG_PATH/(row.image_id+'.jpg'))) red_values.append(np.mean(img[:, :, 0])) green_values.append(np.mean(img[:, :, 1])) blue_values.append(np.mean(img[:, :, 2])) all_channels.append(np.mean(img)) _, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True) for ax, column, vals, c in zip( axes, ['red', 'green', 'blue', 'all colours'], [red_values, green_values, blue_values, all_channels], 'rgbk' ): ax.hist(vals, bins=100, color=c) ax.set_title(f'{column} hist') plt.suptitle(title) plt.show() #we try the funtion on the hole train set #the idea is that each bin is plotted by the ammount of RGB COLORS component for each image,interesting to see they have some sort of like normal distribution but not so much if we would look closer plot_colour_hist(train_df, title='Train colour dist') #then for the test dataframe plot_colour_hist(test_df, title='Test colour dist') # Creating folds for # Following the author,iterative stratification is used to create balanced folds. # # + #using strat_folds is for deling with multilabel data.. next would be to use data train_df['fold'] = -1 strat_kfold = MultilabelStratifiedKFold(n_splits=N_FOLDS, random_state=SEED, shuffle=True) for i, (_, test_index) in enumerate(strat_kfold.split(train_df.image_id.values, train_df.iloc[:,1:].values)): train_df.iloc[test_index, -1] = i train_df['fold'] = train_df['fold'].astype('int') # + _, axes = plt.subplots(ncols=5, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True) for ax, fold in zip(axes, range(5)): train_df.query(f'fold == {fold}')[LABEL_COLS].idxmax(axis=1).value_counts().plot.bar(ax=ax) ax.set_title(f'Fold {fold} label dist') plt.show() # + #by inspection, we notice there are no variation for diferent folds, then we shal proceede # - train_df.to_csv('train_with_strat_folds.csv', index=False) #saving as train_with_strat_folds with the fold stratification. #similar as elbow stratification # #Data # oversamplin inc.. # references: https://en.wikipedia.org/wiki/Oversampling_and_undersampling_in_data_analysis # # Oversampling and undersampling in data analysis are techniques used to adjust the class distribution of a data set (i.e. the ratio between the different classes/categories represented train_df['label'] = train_df[LABEL_COLS].idxmax(axis=1) # + #as the author suggests, there are x2 the number of multiple diseases labels as that appears to be the majorly unrepresented class. # - #function to see how are images displayed and labeled on the sets. this helps us get them in batches def get_data(fold): train_df_no_val = train_df.query(f'fold != {fold}') train_df_just_val = train_df.query(f'fold == {fold}') train_df_bal = pd.concat( [train_df_no_val.query('label != "multiple_diseases"'), train_df_just_val] + [train_df_no_val.query('label == "multiple_diseases"')] * 2 ).sample(frac=1.0, random_state=SEED).reset_index(drop=True) datablock = DataBlock( blocks=(ImageBlock, CategoryBlock(vocab=LABEL_COLS)), getters=[ ColReader('image_id', pref=IMG_PATH, suff='.jpg'), ColReader('label') ], splitter=IndexSplitter(train_df_bal.loc[train_df_bal.fold==fold].index), item_tfms=Resize(IMG_SIZE), batch_tfms=aug_transforms(size=IMG_SIZE, max_rotate=30., min_scale=0.75, flip_vert=True, do_flip=True) ) return datablock.dataloaders(source=train_df_bal, bs=BS) #dataloaders on fold 0 ... https://docs.fast.ai/vision.data.html dls = get_data(fold=0) #we display a batch from: https://docs.fast.ai/vision.data.html everytime you execute gives you a different one dls.show_batch() # + #Training of model # + # custom functions def comp_metric(preds, targs, labels=range(len(LABEL_COLS))): # One-hot encode targets targs = np.eye(4)[targs] return np.mean([roc_auc_score(targs[:,i], preds[:,i]) for i in labels]) def healthy_roc_auc(*args): return comp_metric(*args, labels=[0]) def multiple_diseases_roc_auc(*args): return comp_metric(*args, labels=[1]) def rust_roc_auc(*args): return comp_metric(*args, labels=[2]) def scab_roc_auc(*args): return comp_metric(*args, labels=[3]) # - '''traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - dense networks have L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. ''' def get_learner(fold_num, lr=1e-3): opt_func = partial(Adam, lr=lr, wd=0.01, eps=1e-8) data = get_data(fold_num) learn = cnn_learner( data, ARCH, opt_func=opt_func, loss_func=LabelSmoothingCrossEntropy(), metrics=[ AccumMetric(healthy_roc_auc, flatten=False), AccumMetric(multiple_diseases_roc_auc, flatten=False), AccumMetric(rust_roc_auc, flatten=False), AccumMetric(scab_roc_auc, flatten=False), AccumMetric(comp_metric, flatten=False)] ).to_fp16() return learn get_learner(fold_num=0).lr_find() def print_metrics(val_preds, val_labels): comp_metric_fold = comp_metric(val_preds, val_labels) print(f'Comp metric: {comp_metric_fold}') healthy_roc_auc_metric = healthy_roc_auc(val_preds, val_labels) print(f'Healthy metric: {healthy_roc_auc_metric}') multiple_diseases_roc_auc_metric = multiple_diseases_roc_auc(val_preds, val_labels) print(f'Multi disease: {multiple_diseases_roc_auc_metric}') rust_roc_auc_metric = rust_roc_auc(val_preds, val_labels) print(f'Rust metric: {rust_roc_auc_metric}') scab_roc_auc_metric = scab_roc_auc(val_preds, val_labels) print(f'Scab metric: {scab_roc_auc_metric}') # + all_val_preds = [] all_val_labels = [] all_test_preds = [] for i in range(N_FOLDS): print(f'Fold {i} results') learn = get_learner(fold_num=i) learn.fit_one_cycle(4) learn.unfreeze() learn.fit_one_cycle(8, slice(1e-5, 1e-4)) learn.recorder.plot_loss() learn.save(f'model_fold_{i}') val_preds, val_labels = learn.get_preds() print_metrics(val_preds, val_labels) all_val_preds.append(val_preds) all_val_labels.append(val_labels) test_dl = dls.test_dl(test_df) test_preds, _ = learn.get_preds(dl=test_dl) all_test_preds.append(test_preds) plt.show() # - tras un rato de que corra... print_metrics(np.concatenate(all_val_preds), np.concatenate(all_val_labels)) Interpreting interp = ClassificationInterpretation.from_learner(learn) interp.plot_top_losses(9, figsize=(15, 10)) interp.plot_confusion_matrix(normalize=True, figsize=(6, 6)) Test predictions test_df_output = pd.concat([test_df, pd.DataFrame(np.mean(np.stack(all_test_preds), axis=0), columns=LABEL_COLS)], axis=1) test_df_output.head() test_df_output.to_csv('submission.csv', index=False) # !head -n 5 submission.csv # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from pyproj import Proj, transform from scipy.interpolate import griddata p1 = Proj("+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +towgs84=565.237,50.0087,465.658,-0.406857,0.350733,-1.87035,4.0812 +units=m +no_defs") p2 = Proj(proj='latlong',datum='WGS84') import matplotlib.pyplot as plt plt.style.use('ggplot') % matplotlib notebook import os from cartopy import config import cartopy.crs as ccrs import cartopy.feature as cfeature from netCDF4 import Dataset, num2date # + directory = 'D:/Use_case_Schouwen/' name_baty = 'suppletie 5_1.XYZ' name_baty2 = 'suppletie 5_2.XYZ' nodes = np.loadtxt('D:/Use_case_Schouwen/tidal_an/nodes.csv') # - coords_RD1 = np.loadtxt(directory + name_baty, dtype= float) coords_RD2 = np.loadtxt(directory + name_baty2, skiprows=1, dtype= float) coords_RD = np.concatenate((coords_RD1, coords_RD2)) coords_RD[:,2] = coords_RD[:,2] / 100 # + coords_WGS = [] for C in coords_RD: lon, lat, z = transform(p1, p2, C[0], C[1], C[2]) coords_WGS.append([lon, lat, z]) coords_WGS = np.array(coords_WGS) coords_WGS[:,2] = coords_RD[:,2] # nc = Dataset('D:/DCSMv6-ZUNOv4/E5_2018.dtm') # source https://maps.ngdc.noaa.gov/viewers/bathymetry/ # A = 0 # B = 300 # C = -900 # D = -600 # lon = nc.variables['COLUMNS'][A:B] # lat = nc.variables['LINES'][C:D] # lon, lat = np.meshgrid(lon, lat) # bat = nc.variables['DEPTH_SMOOTH'][C:D,A:B] # lon2 = lon.reshape(lon.size) # lat2 = lat.reshape(lon.size) # bat2 = bat.reshape(lon.size) # extra_pnts = np.zeros((lon2.shape[0],3)) # extra_pnts[:,0] = lon2 # extra_pnts[:,1] = lat2 # extra_pnts[:,2] = bat2 # coords_WGS = np.concatenate((coords_WGS, extra_pnts)) # + class LineBuilder: def __init__(self, line): self.line = line self.xs = [] self.ys = [] self.cid = line.figure.canvas.mpl_connect('button_press_event', self) self.nodes = [] def __call__(self, event): print('click', event) if event.inaxes!=self.line.axes: return self.xs.append(event.xdata) self.ys.append(event.ydata) self.line.set_data(self.xs, self.ys) self.line.figure.canvas.draw() self.nodes.append((event.xdata, event.ydata)) D_emp = 4.0 D_full = 5.0 ukc = 1.0 tide = -1.5 N = 300 start = [3.676014309976414,51.71540093282213] stop = [3.522637481591586,51.76880095558772] x_r = np.arange(3.66,3.72, (-3.66+3.72)/N) y_r = np.arange(51.66,51.71, (-51.66+51.71)/N) y_r, x_r = np.meshgrid(y_r,x_r) WD_r = tide - griddata(coords_WGS[:,:2], coords_WGS[:,2], (x_r, y_r), method= 'linear') # - # % matplotlib notebook # # fig = plt.figure(figsize=(15, 30)) # ax = plt.subplot(projection=ccrs.PlateCarree()) # # cval = [-1000,D_emp +ukc, D_full+ukc, 100] # im = plt.contourf(x_r,y_r,WD_r,cval,transform=ccrs.PlateCarree(), colors = ('sandybrown', 'cornflowerblue', 'darkblue')) # fig.colorbar(im, ax=ax, label = 'Waterdepth in meters') # # cval2 = [D_emp+ukc, D_full+ukc] # plt.contour(x_r,y_r,WD_r,cval2,transform=ccrs.PlateCarree(), colors = 'black') # # ax.coastlines(resolution='10m', color='black', linewidth=3) # ax.gridlines(color = 'grey', zorder = 3) # # # plt.plot(coords_WGS[:,0], coords_WGS[:,1], 'r.',markersize = 0.2,transform=ccrs.PlateCarree()) # plt.plot(nodes[:,1], nodes[:,0], 'r.',transform=ccrs.PlateCarree()) # plt.plot(start[0], start[1],'mo',transform=ccrs.PlateCarree()) # plt.plot(stop[0], stop[1], 'mo',transform=ccrs.PlateCarree() ) # plt.plot(3.67074012, 51.70969009,'ro', transform=ccrs.PlateCarree()) # ax.set_extent([3.66,3.72,51.66,51.71]) # # # line, = plt.plot([], [], 'b') # points, = plt.plot([],[], 'ro') # linebuilder = LineBuilder(line) # pointbuilder = LineBuilder(points) # # # plt.show() # # # add_nodes = linebuilder.nodes np.savetxt('additional_nodes.csv', add_nodes) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext sql # %config SqlMagic.autocommit=False # %sql mysql+pymysql://root:root@127.0.0.1:3306/mysql # # ## Problem 1: Controls # # Write a Python script that proves that the lines of data in Germplasm.tsv, and LocusGene are in the same sequence, based on the AGI Locus Code (ATxGxxxxxx). (hint: This will help you decide how to load the data into the database) # # + #Problem 1 solution germplasm = open("Germplasm.tsv", "r") locusgene = open("LocusGene.tsv", "r") a = 0 counter = 0 #variable that counts the number of genes import re for line in germplasm.readlines(): germplasmAGIcode = re.search( r'AT\dG\d+\t', line) germplasm.seek(a) if germplasmAGIcode: for line in locusgene.readlines(): locusgeneAGIcode = re.search( r'AT\dG\d+\t', line) locusgene.seek(0) a = a+1 if locusgeneAGIcode: if germplasmAGIcode.group() == locusgeneAGIcode.group(): print(locusgeneAGIcode.group()+"is in both files") counter+=1 germplasm.close() locusgene.close() print ("There are "+str(counter)+" genes") # - # **Problem 1 explanation** # # For the problem number 1, I have used two loops for. The first one, read the file germplasm and looks for an AGIcode; after that, begin the second loop for that looks for AGI code in locusgene file and with an if command try to search the same AGIcode extracted from germplasm file. # When the command if finish, the output prints the AGI code of the gene + "is in both files" to indicate that the gene is in germplasm and locusgene files. # When the if command finishes, the code comeback to the first for loop, but begins the reading in the next line thanks to .seek() command. I used a variable called "a" that rise in 1 in each loop and, on this way, I get that the germplasm file reading begins a line after. # At the end, with a variable called counter, I count the number of genes there are in common. # ## Problem 2: Design and create the database. # * It should have two tables - one for each of the two data files. # * The two tables should be linked in a 1:1 relationship # * you may use either sqlMagic or pymysql to build the database # # # + #Problem 2 solution # %load_ext sql # #%config SqlMagic.autocommit=False # %sql mysql+pymysql://root:root@127.0.0.1:3306/mysql # #%sql drop database Germplasm_Locusgene; # %sql create database Germplasm_Locusgene; # %sql use Germplasm_Locusgene; # %sql CREATE TABLE Germplasm(Locus VARCHAR(9) NOT NULL, Germplasm VARCHAR(50) NOT NULL, Phenotype VARCHAR(1500) NOT NULL, Pubmed VARCHAR(50) NOT NULL); # %sql CREATE TABLE LocusGene(Locus VARCHAR(9) NOT NULL, Gene VARCHAR(20) NOT NULL, ProteinLength VARCHAR (20) NOT NULL) # %sql show tables; # - # **Problem 2 explanation** # # Using command in mysql(%sql) create database, I have created a database called Germplasm_Locusgene. After that, I have selected the database using a command called use and finally, I have created two tables inside the database Germplasm_Locusgene. # The first table is called Germplasm and have four columns(Locus, Germplasm, Phenotype and Pubmed), the second table is called LocusGene and have three columns(Locus, Gene and ProteinLength). # As both tables coincide in number and genes, the gene to gene relationship between tables is a 1:1 ratio, and I don't have to insert and auto primary key id. # ## Problem 3: Fill the database # Using pymysql, create a Python script that reads the data from these files, and fills the database. There are a variety of strategies to accomplish this. I will give all strategies equal credit - do whichever one you are most confident with. # # + #Problem 3 solution germplasm = open("Germplasm.tsv", "r") locusgene = open("LocusGene.tsv", "r") locusgenelist = [] germplasmlist = [] for line in locusgene.readlines(): line = line.rstrip() Locus, Gene, ProteinLength = line.split('\t') locusgenelist.append((Locus, Gene, ProteinLength)) #I do the list inserting data with .append() command for line in germplasm.readlines(): line = line.rstrip() Locus, Germplasm, Phenotype, Pubmed = line.split('\t') germplasmlist.append((Locus, Germplasm, Phenotype, Pubmed)) locusgene.close() germplasm.close() locusgenelist.pop(0) germplasmlist.pop(0) import pymysql.cursors connection = pymysql.connect(host='localhost', user='root', password='', db='Germplasm_Locusgene', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) #connection.autocommit = False try: with connection.cursor() as cursor: sql = """INSERT INTO LocusGene (Locus, Gene, ProteinLength) VALUES (%s,%s,%s)""" val = locusgenelist cursor.executemany(sql,val) #execute sql and insert into %s the data contained in locusgenelist connection.commit() mql = """INSERT INTO Germplasm (Locus, Germplasm, Phenotype, Pubmed) VALUES (%s,%s,%s,%s)""" cal = germplasmlist cursor.executemany(mql, cal) connection.commit() finally: print("") connection.close() # %sql SELECT * FROM Germplasm; # %sql SELECT * FROM LocusGene; # - # %sql SELECT * FROM Germplasm; # **Problem 3 explanation** # # At first, I opened and read the tsv files using the command open(file, 'r'(read). After that I created two empty lists called locusgenelist and germplasmlist to fill them with two for loops. The for loop use the command readline to read the lines in order and fill the lists in order using the command .append. The file is a tsv file and for this reason I have used the command line.split(\t) to shows the code how the elements in a line in the files are separated. # Once I filled the lists, I close the files and remove the header line of each list using the command .pop(0) that remove the first element of the list (I have removed the header line). # After that, using Python SQL library I filled the tables created in MySQL in problem 2 with the lists (locusgenelist and germplasmlist). To do it I filled the tables with command cursor.executemany and execute the sql line to insert data and the list to fill the values (%s). # Finally, I have closed the connection and assure that the tables are filled correctly usig the command %sql SELECT * FROM. # As we can see in the tables, the 32 genes are correctly inserted in the tables. # ## Problem 4: Create reports, written to a file # # 1. Create a report that shows the full, joined, content of the two database tables (including a header line) # # 2. Create a joined report that only includes the Genes SKOR and MAA3 # # 3. Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # 4. Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # When creating reports 2 and 3, remember the "Don't Repeat Yourself" rule! # # All reports should be written to **the same file**. You may name the file anything you wish. #Problem 4.1 solution # %sql SELECT * FROM LocusGene, Germplasm WHERE \ # LocusGene.Locus = Germplasm.Locus; # **Problem 4.1 explanation** # # To create a report that joined the Germplasm table and the LocusGene table, I have used the command SELECT * FROM LocusGene, Germplasm (to select both tables) WHERE LocusGene.Locus = Germplasm.Locus (to say to the code that the row Locus in LocusGene table is the same row that Locus row in Germplasm table). # And on this way, I get an output that join both tables and ordering data according to the Locus. # I didn't use id primary key because if I had used that, in the output would have had two id and two locus (one per table), but the tables have the same genes I can order tables in a relationship 1:1 using locus. #Problem 4.2 solution # %sql SELECT * FROM LocusGene, Germplasm WHERE LocusGene.Locus = Germplasm.Locus AND (Gene = ('MAA3') OR Gene=('SKOR')); # **Problem 4.2 explanation** # # As I did in the problem 4.1, I used SELECT * FROM command and I put that LocusGene.Locus = Germplasm.Locus but I have shown only MAA3 and SKOR genes. # + #Problem 4.3 solution import pymysql.cursors connection = pymysql.connect(host='localhost', user='root', password='', db='Germplasm_Locusgene', charset='utf8mb4', # note utf8... this is important for unusual characters! cursorclass=pymysql.cursors.DictCursor) #connection.autocommit = False # note that it is possible to delay putting changes into the database! #Report = open("Reports.tsv", 'w') try: with connection.cursor() as cursor: for chromosome in range(1,6): chr_name= "chromosome "+str(chromosome) regex_var = "AT"+str(chromosome)+"G.+" sql= "SELECT COUNT(*) AS %s FROM LocusGene, Germplasm WHERE \ LocusGene.Locus = Germplasm.Locus AND \ LocusGene.Locus REGEXP %s;" val= (chr_name, regex_var) #variable var contains data to insert in sql cursor.execute(sql, val) #execute sql and insert val data into %s. print(cursor.fetchone()) #report.write(str(cursor.fetchone())) finally: print("") connection.close() #report.close() #checkcontent = open("Reports.tsv", "r") #print(checkcontent.read()) #checkcontent.close() # - # **Problem 4.3 explanation** # # At first, I connected with the SQL library in python and use the database Germplasm_Locusgene. # With a for loop in range (1,6), I selected the chromosomos from 1 to 5. In the loop, I have created two variables; the first one to name the chromosome and the second one to write the regular expression of the AGIcode per each chromosome. # After that and using the command count in SQL I counted the number of genes there are in each chromosome. # + #Problem 4.4 solution import pymysql.cursors # Connect to the database connection = pymysql.connect(host='localhost', user='root', password='', db='Germplasm_Locusgene', charset='utf8mb4', # note utf8... this is important for unusual characters! cursorclass=pymysql.cursors.DictCursor) #connection.autocommit = False # note that it is possible to delay putting changes into the database! try: with connection.cursor() as cursor: # Read a single record for chromosome in range(1,6): chr_name= "chromosome "+str(chromosome) regex_var = "AT"+str(chromosome)+"G.+" sql= "SELECT AVG(LocusGene.ProteinLength) FROM LocusGene, Germplasm WHERE \ LocusGene.Locus = Germplasm.Locus AND \ LocusGene.Locus REGEXP %s;" val=(regex_var) cursor.execute(sql, val) print(cursor.fetchone()) finally: print("") connection.close() # - # **Problem 4.4 explanation** # Like in problem number 4.3 I used the library for SQL in python and the same loop for each chromosome. The difference between this one and the previous one is that I have used the command average (AVG). # # I couldn't have wroten the file using command Report = open("Reports.tsv", 'w') # report.write(str(cursor.fetchone())) # report.close() (This commands are in problem 4.3) # I thought that this way could be a great way to write the file reports but I don't know why the data didn't save. # Instead of this way, I used the wrong way to make the file reports. I ran the code, copied the output and pasting the output into a tsv file. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="hmg6CbLINQQ4" # **CHALLENGE OVERVIEW** # # I try different techniques as suggested in the provided links: # # 1. REDUCE MODEL SIZE # 2. PRUNING # 3. QUANTIZATION # 4. CLUSTERING # 5. DISTILLATION # # **PHASE 1 - BASELINE**
# I run the baseline training (the model as it is delivered), to be able to optimize and compare it. # # **PHASE 2 - REDUCE MODEL**
# I decrease the size of every layer in the model to reduce the number of parameters and make the model faster (most naive approach).
# n.b. I also try distillation to train a lighter model, but as I notice a better performance with the simple reduction I choose not to use Distillation. # # **PHASE 3 - OPTIMIZATION**
# I experiment mentioned optimizations on given model by using them singularly or combining them. I then plot the results to find the best tradeoff among 3 criteria: # # # * Accuracy # * Time (Lower is better) # * Model weight (Lower is better) # # The reason is that I want to keep accuracy high while decreasing Time and Weight as much as possible as requested by the challange. # # Using the Reduced model and applying Quantization and then Pruning seems the best solution. # This is because Redux operation hugely decreases model dimension. Also doing pruning as the last operations seems more beneficial, rather than using it before quantization. # # I choose the rqp model and save it as .h5 file. # # **PHASE 4 - COREML**
# The third request is to convert the model to a supported format, preferably CoreML. # I use the appropriate library to convert the model to a CoreML format to be easily used in an IOS environment. # # **EXTRA**
# The "run_model.py" program is provided to classify a single image using the CoreML model in a MacOS environment.
# Command:
# python run_model.py --image *image_name*
# --image default option is 'fashion1.png' # # + id="x3XiYFRNk5Vh" import tensorflow as tf import numpy as np import os import time # + id="pIfh8hd5ljrF" colab={"base_uri": "https://localhost:8080/"} outputId="558e5879-5c06-4322-d083-a5854305e873" #Load the dataset. (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() # + id="ZbmJQ_EulozQ" #Add a trailing unitary dimension to make a 3D multidimensional array (tensor). # N x 28 x 28 --> N x 28 x 28 x 1 x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # + colab={"base_uri": "https://localhost:8080/"} id="WRMMx-SNlr1t" outputId="8023e1eb-7d29-4ef1-ea97-0b0fd5173e56" #Convert the labels from integers to one-hot encoding. y_train = tf.keras.utils.to_categorical(y_train, 10) y_test = tf.keras.utils.to_categorical(y_test, 10) print(x_train.shape) print(y_train.shape) # + id="" LR = 1E-3 EPOCHS = 10 BATCH_SIZE = 64 # + id="kQQFM1KClxjy" # Define baseline (and redux) models and train/test functions def build_model(input_shape): model = tf.keras.models.Sequential() model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(256)) model.add(tf.keras.layers.Activation('elu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) return model # Reduced model using halved dimensions def build_model_reduced(input_shape): model = tf.keras.models.Sequential() model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(32, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128)) model.add(tf.keras.layers.Activation('elu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) return model def train(x_train, y_train, x_test, y_test, redux = False): """ Train the model given the dataset and the global parameters (LR, EPOCHS and BATCH_SIZE). The model is automalically saved after the training. """ if redux: model = build_model_reduced(x_train.shape[1:]) else: model = build_model(x_train.shape[1:]) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=LR), loss='categorical_crossentropy', metrics=['categorical_accuracy'], ) print(model.summary()) start_time = time.time() model.fit( x=x_train.astype(np.float32), y=y_train.astype(np.float32), epochs=EPOCHS, validation_data=(x_test.astype(np.float32), y_test.astype(np.float32)), batch_size=BATCH_SIZE, ) end_time = time.time() print("Train elapsed time: {} seconds".format(end_time - start_time)) if redux: model.save("fashion_mnist_model_redux.tf", overwrite=True) else: model.save("fashion_mnist_model.tf", overwrite=True) def test(x_test, y_test, redux = False): """ Load the saved model and evaluate it against the test set. """ if redux: model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") else: model = tf.keras.models.load_model("./fashion_mnist_model.tf") print(model.summary()) start_time = time.time() model.evaluate(x_test, y_test) end_time = time.time() print("Test elapsed time: {} seconds".format(end_time - start_time)) # + colab={"base_uri": "https://localhost:8080/"} id="IjJHdck_l2CR" outputId="18e6e689-39b5-42df-f5d5-af5036576a04" # BASELINE train(x_train, y_train, x_test, y_test) test(x_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="sDdCz-Ir_AdR" outputId="ec7f1387-abbd-4d6b-be84-37ddf0c98cf8" #model dimension # !du -h fashion_mnist_model.tf # + id="UctEE8m-siux" colab={"base_uri": "https://localhost:8080/"} outputId="2dea8751-4baf-45b7-b714-ffc1863c3079" # !pip install -q tensorflow-model-optimization # + [markdown] id="qz0Y7gqpOqzD" # **OPTIMIZATION FUNCTIONS DEFINITION** # # # # + id="Dzp-KMDfonA2" #1 PRUNING import tensorflow_model_optimization as tfmot def prune_model(model): prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude # Compute end step to finish pruning after 2 epochs. batch_size = 128 epochs = 2 validation_split = 0.1 # 10% of training set will be used for validation set. num_images = x_train.shape[0] * (1 - validation_split) end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs # Define model for pruning. pruning_params = { 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50, final_sparsity=0.80, begin_step=0, end_step=end_step) } model_for_pruning = prune_low_magnitude(model, **pruning_params) # `prune_low_magnitude` requires a recompile. model_for_pruning.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) print(model_for_pruning.summary()) model_for_pruning.save("pruned_model.tf", overwrite=True) # dimension = !du -h pruned_model.tf print(dimension) logdir = './' callbacks = [ tfmot.sparsity.keras.UpdatePruningStep(), tfmot.sparsity.keras.PruningSummaries(log_dir=logdir), ] model_for_pruning.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=validation_split, callbacks=callbacks) start_time = time.time() _, model_for_pruning_accuracy = model_for_pruning.evaluate( x_test, y_test, verbose=0) end_time = time.time() prune_time = end_time - start_time print("Test elapsed time: {} seconds".format(end_time - start_time)) print('Pruned test accuracy:', model_for_pruning_accuracy) return model, [model_for_pruning_accuracy, prune_time, float(dimension[2].split('M')[0])] # + id="r7pXwbfM5VSW" #2 QUANTIZATION def apply_quantization_to_all(layer): if isinstance(layer, (#tf.keras.layers.Dense #tf.keras.layers.Conv2D tf.keras.layers.MaxPooling2D, tf.keras.layers.Dropout )): return tfmot.quantization.keras.quantize_annotate_layer(layer) return layer model = tf.keras.models.load_model("./fashion_mnist_model.tf") annotated_model = tf.keras.models.clone_model( model, clone_function=apply_quantization_to_all, ) def quant_model(model): quantize_model = tfmot.quantization.keras.quantize_model # q_aware stands for for quantization aware. #q_aware_model = quantize_model(annotated_model) q_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model) # `quantize_model` requires a recompile. q_aware_model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) print(q_aware_model.summary()) q_aware_model.save("quant_model.tf", overwrite=True) # dimension = !du -h quant_model.tf print(dimension) train_images_subset = x_train[0:1000] # out of 60000 train_labels_subset = y_train[0:1000] q_aware_model.fit(train_images_subset, train_labels_subset, batch_size=500, epochs=1, validation_split=0.1) start_time = time.time() _, q_aware_model_accuracy = q_aware_model.evaluate( x_test, y_test, verbose=0) end_time = time.time() quant_time = end_time - start_time print("Test elapsed time: {} seconds".format(end_time - start_time)) print('Pruned test accuracy:', q_aware_model_accuracy) print(q_aware_model_accuracy, quant_time, dimension[2]) return model, [q_aware_model_accuracy, quant_time, float(dimension[2].split('M')[0])] # + id="cZJDwdYb-Tet" #3 Weight Clustering def cluster_model(model): model = tf.keras.models.load_model("./fashion_mnist_model.tf") cluster_weights = tfmot.clustering.keras.cluster_weights CentroidInitialization = tfmot.clustering.keras.CentroidInitialization clustering_params = { 'number_of_clusters': 10, 'cluster_centroids_init': CentroidInitialization.LINEAR } clustered_model = cluster_weights(model, **clustering_params) # Use smaller learning rate for fine-tuning clustered model opt = tf.keras.optimizers.Adam(learning_rate=1e-5) clustered_model.compile( loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy']) print(clustered_model.summary()) # Fine-tune model clustered_model.fit( x_train, y_train, batch_size=500, epochs=1, validation_split=0.1) start_time = time.time() _, clustered_model_accuracy = clustered_model.evaluate( x_test, y_test, verbose=0) end_time = time.time() clust_time = end_time - start_time print("Test elapsed time: {} seconds".format(end_time - start_time)) print('Clustered test accuracy:', clustered_model_accuracy) clustered_model.save("cluster_model.tf", overwrite=True) # dimension = !du -h cluster_model.tf print(dimension) return model, [clustered_model_accuracy, clust_time, float(dimension[2].split('M')[0])] # + id="hQ2Jw7jfpZyj" #4 Distillation class Distiller(tf.keras.Model): def __init__(self, student, teacher): super(Distiller, self).__init__() self.teacher = teacher self.student = student def compile( self, optimizer, metrics, student_loss_fn, distillation_loss_fn, alpha=0.1, temperature=10, ): """ Configure the distiller. Args: optimizer: Keras optimizer for the student weights metrics: Keras metrics for evaluation student_loss_fn: Loss function of difference between student predictions and ground-truth distillation_loss_fn: Loss function of difference between soft student predictions and soft teacher predictions alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn temperature: Temperature for softening probability distributions. Larger temperature gives softer distributions. """ super(Distiller, self).compile(optimizer=optimizer, metrics=metrics) self.student_loss_fn = student_loss_fn self.distillation_loss_fn = distillation_loss_fn self.alpha = alpha self.temperature = temperature def train_step(self, data): # Unpack data x, y = data # Forward pass of teacher teacher_predictions = self.teacher(x, training=False) with tf.GradientTape() as tape: # Forward pass of student student_predictions = self.student(x, training=True) # Compute losses student_loss = self.student_loss_fn(y, student_predictions) distillation_loss = self.distillation_loss_fn( tf.nn.softmax(teacher_predictions / self.temperature, axis=1), tf.nn.softmax(student_predictions / self.temperature, axis=1), ) loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss # Compute gradients trainable_vars = self.student.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update the metrics configured in `compile()`. self.compiled_metrics.update_state(y, student_predictions) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} results.update( {"student_loss": student_loss, "distillation_loss": distillation_loss} ) return results def test_step(self, data): # Unpack the data x, y = data # Compute predictions y_prediction = self.student(x, training=False) # Calculate the loss student_loss = self.student_loss_fn(y, y_prediction) # Update the metrics. self.compiled_metrics.update_state(y, y_prediction) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} results.update({"student_loss": student_loss}) return results # Create the student def get_student(input_shape): student = tf.keras.models.Sequential() student.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) student.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu')) student.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) student.add(tf.keras.layers.Dropout(0.25)) student.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) student.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu')) student.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) student.add(tf.keras.layers.Dropout(0.25)) student.add(tf.keras.layers.BatchNormalization(input_shape=input_shape)) student.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', activation='elu')) student.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) student.add(tf.keras.layers.Dropout(0.25)) student.add(tf.keras.layers.Flatten()) student.add(tf.keras.layers.Dense(256)) student.add(tf.keras.layers.Activation('elu')) student.add(tf.keras.layers.Dropout(0.5)) student.add(tf.keras.layers.Dense(10)) student.add(tf.keras.layers.Activation('softmax')) return student def distill_model(model): teacher = model student = get_student(x_train.shape[1:]) # Clone student for later comparison student_scratch = tf.keras.models.clone_model(student) # Initialize and compile distiller distiller = Distiller(student=student, teacher=teacher) distiller.compile( optimizer=tf.keras.optimizers.Adam(), metrics=[tf.keras.metrics.CategoricalAccuracy()], student_loss_fn=tf.keras.losses.CategoricalCrossentropy(from_logits=True), distillation_loss_fn=tf.keras.losses.KLDivergence(), alpha=0.1, temperature=10, ) print(x_train.shape, y_train.shape) # Distill teacher to student distiller.fit(x_train, y_train, epochs=10) # Evaluate student on test dataset distiller.evaluate(x_test, y_test) return distiller # + colab={"base_uri": "https://localhost:8080/"} id="x36BMSGbsc8j" outputId="95f8a4d9-1d1b-4b66-9c5a-0a1b95502f22" #Run Distillation t 10 ep 10 ## n.b. here I notice poor performance and decide to drop it model = tf.keras.models.load_model("./fashion_mnist_model.tf") distilled_model = distill_model(model) # + id="1Voy8CVdyR3s" colab={"base_uri": "https://localhost:8080/"} outputId="13776327-4396-4722-da1b-ede388cd1a14" #Run Distillation ## n.b. here I notice poor performance and decide to drop it distilled_model = distill_model(model) # + [markdown] id="sSwnU1CjPYsH" # **PHASE 1 - BASELINE** # + colab={"base_uri": "https://localhost:8080/"} id="XKc86pv1Ngua" outputId="3402d844-2a50-48d9-a0cf-37ed6c764f43" # BASELINE model = tf.keras.models.load_model("./fashion_mnist_model.tf") start_time = time.time() _, model_accuracy = model.evaluate(x_test, y_test) end_time = time.time() baseline_time = end_time - start_time print('Baseline test accuracy:', model_accuracy) print("Test elapsed time: {} seconds".format(end_time - start_time)) # baseline_dim = !du -h fashion_mnist_model.tf baseline_dim = float(baseline_dim[2].split('M')[0]) # + [markdown] id="fbvjqRC4mV1d" # **PHASE 2 - REDUCE MODEL** # + colab={"base_uri": "https://localhost:8080/"} id="Z3xCVCHPNjQq" outputId="7dafed41-b6ba-4ac9-8bdb-7cc9f6bc6af7" # REDUX (model with reduced dimensions) train(x_train, y_train, x_test, y_test, redux = True) #test(x_test, y_test, redux = True) model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") start_time = time.time() _, redux_accuracy = model.evaluate(x_test, y_test) end_time = time.time() redux_time = end_time - start_time print('Redux test accuracy:', model_accuracy) print("Test elapsed time: {} seconds".format(end_time - start_time)) # redux_dim = !du -h fashion_mnist_model_redux.tf redux_dim = float(redux_dim[2].split('M')[0]) # + [markdown] id="HQjs3BopmPHr" # **PHASE 3 - OPTIMIZATION** # + colab={"base_uri": "https://localhost:8080/"} id="yXEUICcqtIA7" outputId="533722e6-0c40-42f5-de48-7e9f05518a82" #PRUNING model = tf.keras.models.load_model("./fashion_mnist_model.tf") pruned_model, pruned_results = prune_model(model) # prune_dim = !du -h pruned_model.tf prune_dim = float(prune_dim[2].split('M')[0]) # + colab={"base_uri": "https://localhost:8080/"} id="b5aqxGelugV7" outputId="2dbb04ad-d15e-4eda-b9ff-74f521b53447" #QUANTIZATION model = tf.keras.models.load_model("./fashion_mnist_model.tf") quantized_model, quantized_results = quant_model(model) # quant_dim = !du -h quant_model.tf quant_dim = float(quant_dim[2].split('M')[0]) # + colab={"base_uri": "https://localhost:8080/"} id="6MDVlFdQvTtc" outputId="e1ccd136-9c01-4cac-da5a-6a818f52284d" #CLUSTER model = tf.keras.models.load_model("./fashion_mnist_model.tf") clusterd_model, clustered_results = cluster_model(model) # clust_dim = !du -h cluster_model.tf clust_dim = float(clust_dim[2].split('M')[0]) # + [markdown] id="TjbGHw5MPkcP" # **COMBINATION OF OPTIMIZATIONS** # + colab={"base_uri": "https://localhost:8080/"} id="1OJTvWFmOI2t" outputId="74c5986d-7620-4c7e-f773-ae1cc225a767" #OPTIMIZATIONS ON BASELINE #QUANT + PRUNE model = tf.keras.models.load_model("./fashion_mnist_model.tf") qp_model, qp_results = quant_model(model) qp_model, qp_results = prune_model(qp_model) #PRUNE + QUANT model = tf.keras.models.load_model("./fashion_mnist_model.tf") pq_model, pq_results = prune_model(model) pq_model, pq_results = quant_model(pq_model) #QUANT + CLUST model = tf.keras.models.load_model("./fashion_mnist_model.tf") qc_model, qc_results = quant_model(model) qc_model, qc_results = cluster_model(qc_model) #CLUST + QUANT model = tf.keras.models.load_model("./fashion_mnist_model.tf") cq_model, cq_results = cluster_model(model) cq_model, cq_results = quant_model(cq_model) #PRUNE + CLUST model = tf.keras.models.load_model("./fashion_mnist_model.tf") pc_model, pc_results = prune_model(model) pc_model, pc_results = cluster_model(pc_model) # + colab={"base_uri": "https://localhost:8080/"} id="IaKVEtyJyq2K" outputId="6c22bacd-f27b-44aa-a541-1059be8603b9" ## OPTIMIZATIONS ON REDUX #QUANT + PRUNE model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rqp_model, rqp_results = quant_model(model) rqp_model, rqp_results = prune_model(rqp_model) #PRUNE + QUANT model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rpq_model, rpq_results = prune_model(model) rpq_model, rpq_results = quant_model(rpq_model) #QUANT + CLUST model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rqc_model, rqc_results = quant_model(model) rqc_model, rqc_results = cluster_model(rqc_model) #CLUST + QUANT model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rcq_model, rcq_results = cluster_model(model) rcq_model, rcq_results = quant_model(rcq_model) #PRUNE + CLUST model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rpc_model, rpc_results = prune_model(model) rpc_model, rpc_results = cluster_model(rpc_model) # + colab={"base_uri": "https://localhost:8080/"} id="apl8DDna4ag8" outputId="24117a0a-54ee-483d-ca07-3cc6e9f6ee77" # All OPT # q + p + c model = tf.keras.models.load_model("./fashion_mnist_model.tf") qpc_model, qpc_results = quant_model(model) qpc_model, qpc_results = prune_model(qpc_model) qpc_model, qpc_results = cluster_model(qpc_model) # r + q + p + c model = tf.keras.models.load_model("./fashion_mnist_model_redux.tf") rqpc_model, rqpc_results = quant_model(model) rqpc_model, rqpc_results = prune_model(rqpc_model) rqpc_model, rqpc_results = cluster_model(rqpc_model) # + id="6XoRiFkL6jqT" # b -> baseline # r -> redux # p -> pruning # q -> quantization # c -> clustering models = ['b', 'r', 'p', 'q', 'c', 'qp', 'pq', 'qc', 'cq', 'pc', 'rqp', 'rpq', 'rqc', 'rcq', 'rpc', 'qpc', 'rqpc'] scores = [model_accuracy, redux_accuracy, pruned_results[0], quantized_results[0], clustered_results[0], qp_results[0], pq_results[0], qc_results[0], cq_results[0], pc_results[0], rqp_results[0], rpq_results[0], rqc_results[0], rcq_results[0], rpc_results[0], qpc_results[0], rqpc_results[0]] times = [baseline_time, redux_time, pruned_results[1], quantized_results[1], clustered_results[1], qp_results[1], pq_results[1], qc_results[1], cq_results[1], pc_results[1], rqp_results[1], rpq_results[1], rqc_results[1], rcq_results[1], rpc_results[1], qpc_results[1], rqpc_results[1]] dimensions = [baseline_dim, redux_dim, pruned_results[2], quantized_results[2], clustered_results[2], qp_results[2], pq_results[2], qc_results[2], cq_results[2], pc_results[2], rqp_results[2], rpq_results[2], rqc_results[2], rcq_results[2], rpc_results[2], qpc_results[2], rqpc_results[2]] # + colab={"base_uri": "https://localhost:8080/", "height": 808} id="YvcX6cKklAEW" outputId="15f5ffac-ec39-4861-e584-cc353189123b" #Plot all configurations for tradeoff import matplotlib.pyplot as plt plt.figure() plt.title('Score') plt.plot(models, scores) plt.grid() plt.show() plt.figure() plt.title('Time') plt.plot(models, times) plt.grid() plt.show() plt.figure() plt.title('Dimension') plt.plot(models, dimensions) plt.grid() plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="--ecO7rWl8SV" outputId="39966ec1-36a4-46ae-eef0-c6d2af72cf03" # Percentage gain in performance with respect to baseline def get_perc_gain(final_results): base_acc = scores[0] base_time = times[0] base_dim = dimensions[0] perc_acc_gain = -100 * (1 - final_results[0] / base_acc) perc_time_gain = -100 * (1 - final_results[1] / base_time) perc_dim_gain = -100 * (1 - final_results[2] / base_dim) ''' print('Opt model accuracy: ' + str(final_results[0])) print('Baseline accuracy: ' + str(base_acc)) print('Opt model time: ' + str(final_results[1])) print('Baseline time: ' + str(base_time)) print('Opt model dimension: ' + str(final_results[2])) print('Baseline dimension: ' + str(base_dim)) print('\n') ''' print('Accuracy gain flat: ' + str(round(final_results[0] - base_acc, 3)) + ' %' ) print('Accuracy gain percentage: ' + str(round(perc_acc_gain, 2)) + ' %' ) print('Time gain flat: ' + str(round(final_results[1] - base_time, 2)) + ' s' ) print('Time gain percentage: ' + str(round(perc_time_gain, 2)) + ' %') print('Dimension gain flat: ' + str(final_results[2] - base_dim) + ' Mb' ) print('Dimension gain percentage: ' + str(round(perc_dim_gain, 2)) + ' %') print('\n') print('rqp model') get_perc_gain(rqp_results) print('rpq model') get_perc_gain(rpq_results) print('q model') get_perc_gain(quantized_results) # + colab={"base_uri": "https://localhost:8080/"} id="OQxaE52Y94JH" outputId="ea640752-57e0-40d4-aa52-afff7c7915de" # number of params gain model = tf.keras.models.load_model("./fashion_mnist_model.tf") param_gain = rqp_model.count_params() - model.count_params() perc_param_gain = str(round(-100 * (1 - rqp_model.count_params() / model.count_params()), 2)) print('Number of parameters flat gain: ' + str(param_gain)) print('Number of parameters perc gain: ' + str(perc_param_gain) + '%') # + id="1AmAkGP1-I8k" # + [markdown] id="wvKrG1FPb09e" # **CHOICE**
# The best solutions are: # - q: we drop it because it does not reduce the number of parameters # - rqp: best performance for Time and Dim but Accuracy is (slightly) lower than rpq # - rpq: less efficient in terms of Time and Dim when compared to rqp but higher Accuracy # # We choose **rqp** model as it has the overall best performance in Time and Dimension while having a relatively small amount of accuracy loss(<0.01%) when compared to rpq. We choose to give more emphasis to the Time parameter as we run the model on a mobile. # # The final (optimized) model performace when compared to baseline (for percentages look at previous cells): # - Accuracy gain flat: -0.001 % # - Time gain flat: -0.51 s # - Dimension gain flat: -12.3 Mb # - Parameter number gain flat: -1,213,184 # + id="FWhfsUACoTTu" # Save the chosen model - rqp rqp_model.save('final_model.tf') final_model = tf.keras.models.load_model("./final_model.tf") final_model.save('final_model.h5') # + colab={"base_uri": "https://localhost:8080/"} id="e7FhEyjniPKs" outputId="29827083-f354-44bc-da5c-70755e4a2b44" final_model.summary() # + id="chrZ2MlqbeGn" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="43d2f2c0-d9bd-41ab-ebea-6c658d15f916" #Plot the tradeoff as percentage gain when compared to baseline ''' base_acc = scores[0] base_time = times[0] base_dim = dimensions[0] for i, v in enumerate(scores): scores[i] = 1 - float(scores[i]/base_acc) times[i] = 1 - float(times[i]/base_time) dimensions[i] = 1 - float(dimensions[i]/base_dim) ''' # + [markdown] id="XKpph2JwflRj" # **PHASE 4 - COREML** # + colab={"base_uri": "https://localhost:8080/"} id="eLlb0pfgqYS4" outputId="28c71317-64c9-4ba2-eeaa-b903f8f3bea7" # !pip install coremltools # + id="z5_WGK2HnXoH" import coremltools as ct # + colab={"base_uri": "https://localhost:8080/"} id="0QImFEeGgua4" outputId="8ac456e7-2077-434e-d3d2-afb4a33f907e" # Convert TF model to CoreML cml_model = ct.convert('final_model.h5') # + id="wPJnejfCeCW4" # Change input and output names spec = cml_model.get_spec() ct.utils.rename_feature(spec, 'batch_normalization_9_input', 'input_image') ct.utils.rename_feature(spec, 'Identity', 'output') cml_model = ct.models.MLModel(spec) # + id="QC3bxW9iiHIU" # Save final model cml_model.save("final_model_cml.mlmodel") # + colab={"base_uri": "https://localhost:8080/"} id="laQuobRqid6N" outputId="e30ab9c7-40ec-4fc4-9566-f7efbe6831dd" cml_model # + colab={"base_uri": "https://localhost:8080/", "height": 67} id="HPwKGTq7_l0A" outputId="d2f696c6-95d0-433c-912a-ef9c8f5384c0" ''' # Define the input type as image, # set pre-processing parameters to normalize the image # to have its values in the interval [-1,1] # as expected by the mobilenet model image_input = ct.ImageType(shape=(1, 28, 28, 1,), bias=[-1,-1,-1], scale=1/127) # set class labels classifier_config = ct.ClassifierConfig(class_labels) # Convert the model using the Unified Conversion API model = ct.convert( final_model, inputs=[image_input], classifier_config=classifier_config, ) ''' # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="ZaXJPI__9Aux" import os import tensorflow as tf from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization, Input, GlobalAveragePooling2D from tensorflow.keras import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator from IPython.display import clear_output import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="BUpQK2r59gEr" outputId="77bfadc3-9166-4234-caa6-e065ce559200" from google.colab import drive drive.mount('drive') # + id="OZ6fZp0SCDST" # !unzip drive/MyDrive/Datasets/X-Ray_Validation_Dataset.zip clear_output() # + id="_UAe7N3JDmCB" INPUT_SHAPE = 160 BATCH_SIZE = 64 # + colab={"base_uri": "https://localhost:8080/"} id="3m6b6dDBCDGV" outputId="7398e6f4-a804-4764-f179-8106085bb560" generator = ImageDataGenerator(rescale=1./255) train_data = generator.flow_from_directory( "DATASET/train", target_size=(INPUT_SHAPE, INPUT_SHAPE), batch_size=BATCH_SIZE, class_mode='binary', shuffle=True) validation_data = generator.flow_from_directory( "DATASET/val", target_size=(INPUT_SHAPE, INPUT_SHAPE), batch_size=BATCH_SIZE, class_mode='binary', shuffle=True) test_data = generator.flow_from_directory( "DATASET/test", target_size=(INPUT_SHAPE, INPUT_SHAPE), batch_size=BATCH_SIZE, class_mode='binary', shuffle=True) # + colab={"base_uri": "https://localhost:8080/"} id="tbAXb_GfI6nX" outputId="2333a6f2-ac0b-4cfd-b914-82f0a765ee80" train_data.class_indices # + colab={"base_uri": "https://localhost:8080/"} id="-E-Nw34l_ICN" outputId="f4ad5976-cd4f-4137-9e5d-c284d2c31410" model = Sequential([ Input(shape=(INPUT_SHAPE, INPUT_SHAPE, 3)), Conv2D(32, (3,3), activation='relu'), MaxPooling2D(pool_size=(2,2)), Conv2D(64, (3,3), activation='relu'), MaxPooling2D(pool_size=(2,2)), Conv2D(128, (3,3), activation='relu'), MaxPooling2D(pool_size=(2,2)), Conv2D(256, (3,3), activation='relu'), MaxPooling2D(pool_size=(2,2)), Conv2D(512, (3,3), activation='relu'), MaxPooling2D(pool_size=(2,2)), Flatten(), Dense(512, activation='relu'), Dropout(0.4), Dense(256, activation='relu'), Dropout(0.4), Dense(1, activation='sigmoid') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="TmJ_8WcBBB8Q" outputId="b3c51b67-8197-410c-a2be-2160b5dbc3bb" with tf.device("/device:GPU:0"): history = model.fit_generator( train_data, steps_per_epoch = train_data.samples // BATCH_SIZE, validation_data = validation_data, validation_steps = validation_data.samples // BATCH_SIZE, epochs = 2 ) # + colab={"base_uri": "https://localhost:8080/", "height": 613} id="RtWJmp3tIZUX" outputId="8926703e-ecb2-4452-c334-9416a82c03f5" plt.figure(figsize=(20,10)) for i, met in enumerate(['accuracy', 'loss']): plt.subplot(1,2,i+1) plt.plot(history.history[met]) plt.plot(history.history["val_"+met]) plt.title('Model '+met.capitalize()) plt.xlabel('epochs') plt.ylabel(met) plt.legend(['train', 'val']) # + colab={"base_uri": "https://localhost:8080/"} id="bi_52gEcNGFz" outputId="e2045070-1a9a-4f12-e018-30f98b5a5d3f" model.evaluate(test_data) # + id="CEzqDFXfTxvG" model.save("validator.h5") # + id="bnQeVmDdOAU2" # + id="mwbkfTzdV2Ri" import cv2 import numpy as np # + id="9XKPBHi3V4ir" img1 = cv2.resize(cv2.imread("wf.jpeg"),(160,160))/255. img2 = cv2.resize(cv2.imread("bb.jpg"),(160,160))/255. img3 = cv2.resize(cv2.imread("1.jpg"),(160,160))/255. img4 = cv2.resize(cv2.imread("2.png"),(160,160))/255. img5 = cv2.resize(cv2.imread("3.jpg"),(160,160))/255. # + colab={"base_uri": "https://localhost:8080/"} id="5FItACZMWToy" outputId="840c3ed9-6f1d-4bf7-bd2c-0c66af8ea5e3" x=np.array([img1,img2,img3,img4,img5]) x.shape # + colab={"base_uri": "https://localhost:8080/"} id="e81sXEWiW0EH" outputId="94fecf12-3a9a-483c-e3eb-eac80a51dba0" pred = model.predict(x) y_classes = ((pred > 0.5)+0).ravel() print(pred) y_classes # + id="lpifNXYXavY_" # !cp validator.h5 drive/MyDrive/Covid-Pneumonia_Detection_Model/xray_validator_v2.h5 # + id="CtmxKrp0XmJr" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import numpy as np import progressbar import random import os import cv2 from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from keras.applications import VGG16 from keras.applications import imagenet_utils from keras.preprocessing.image import img_to_array from keras.datasets import cifar10 import matplotlib.pyplot as plt # %matplotlib inline from helpers import HDF5DatasetWriter from helpers import Utils # - import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) output_path_train = "../input/data/cifar/vgg16_features_train.hdf5" output_path_test = "../input/data/cifar/vgg16_features_test.hdf5" batch_size = 32 buffer_size = 1000 model = VGG16(weights='imagenet', include_top=False) labels_name = np.array(["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]) (x_train, y_train), (x_test, y_test) = cifar10.load_data() def extract_feature(model, x, y, output_path, batch_size = 32, buffer_size = 1000): dataset = HDF5DatasetWriter((x.shape[0], 512 * 7 * 7), (y.shape[0], 10), output_path, dataKey="features", bufSize = buffer_size) dataset.storeClassLabels(labels_name) lb = LabelBinarizer() y = lb.fit_transform(y) widgets = ["Extracting Features: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()] pbar = progressbar.ProgressBar(maxval=x.shape[0], widgets=widgets).start() for i in np.arange(0, x.shape[0], batch_size): batchData = x[i:i + batch_size] batchLabels = y[i:i + batch_size] batchImages = [] for (j, image) in enumerate(batchData): image = cv2.resize(image, (224, 224)) image = img_to_array(image) image = np.expand_dims(image, axis=0) image = imagenet_utils.preprocess_input(image) / 255.0 batchImages.append(image) batchImages = np.vstack(batchImages) features = model.predict(batchImages, batch_size=batch_size) features = features.reshape((features.shape[0], 512 * 7 * 7)) dataset.add(features, batchLabels) pbar.update(i) dataset.close() pbar.finish() extract_feature(model, x_train, y_train, output_path_train, batch_size = 32, buffer_size = 1000) extract_feature(model, x_test, y_test, output_path_test, batch_size = 32, buffer_size = 1000) import h5py db = h5py.File(output_path_train) list(db.keys()) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # #### Palindromes # # A palindrome is a word (or words) spelled the same backwards and forwards str1a = "bob" str1a[::-1] str1b = "Bob" str1b[::-1] # Technically, both are palindromes but using the _step_ technique and a _-1_ index (which is shorthand for reversal) but aren't equivalents str2 = "race car" str2[::-1] #display in reverse def checkIfPalindrome1(string): result = True stringLength = len(string) stringHalfLength = stringLength//2 index = 0 while (index <= stringHalfLength): if (string[index] != string[stringLength - 1 - index]): result = False index += 1 return result print(checkIfPalindrome1(str1a)) # It only returns True for str1a, how come? # # Like math, you can easily use a calculator or computer for division but they show you the long-hand method so you can better understand the process. Below, is an alternative way of doing it. # # This highlights an important point about programming: often, the solutions are divergent and not convergent (that is there is not only one way to arrive at an answer). def checkIfPalindrome2(string): result = False stringReversed = string[::-1] if (string == stringReversed): result = True return result print(checkIfPalindrome2(str1a)) print(checkIfPalindrome2(str1b)) # This didn't work because the first letter was capitalised so let's modify the function to deal with this case. def checkIfPalindrome2a(string): result = False stringLower = string.lower() stringReversed = stringLower[::-1] #print(stringLower, stringReversed) if (stringLower == stringReversed): result = True return result print(checkIfPalindrome2a(str1b)) print(checkIfPalindrome2a(str2)) # This, however doesn't work when there's a whitespace in the middle so we've modified the code. The point is # , there will always be exceptions... # # Debugging tip: Sometimes a simple print statement can help you trace your logic if you are not using an Integrated Development Environment (IDE) and can't see the variable's contents. def checkIfPalindrome2b(string): result = False stringLower = string.lower().strip().replace(" ", "") stringReversed = stringLower[::-1] stringReversed = stringLower[::-1] #print(stringLower, stringReversed) if (stringLower == stringReversed): result = True return result print(checkIfPalindrome2b(str2)) # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: r # --- # --- # output: html_notebook # title: "Exploratory Analysis" # --- source(rprojroot::is_git_root$find_file('lib/R/utils.R')) config <- setup(packages=c('dplyr', 'GEOquery', 'ggplot2', 'jetset')) # ## Choose dataset config$geo_datasets %>% paste(seq_along(.), ., sep=': ') %>% cat(sep=', ') # + gse_id <- config$geo_datasets[6] cat('Selected dataset:', gse_id) # - # # Load data # + eset <- load_data(gse_id) print(eset) # + pdata <- read.delim(sprintf('data/meta/%s.tsv', gse_id), check.names=FALSE, colClasses='character') annot <- gse_id %>% parse_annotation(pdata) %>% as.data.frame() %>% cbind(pdata) %>% annot %>% filter(her2 == 'HER2+') %>% select(treatment, outcome) %>% table() # + platform <- c(GPL96='hgu133a', GPL570='hgu133plus2') probes <- jmap(platform[attr(eset, 'annotation')], symbol=config$genes) samples <- annot %>% filter(her2 == 'HER2+' & treatment != 'none' & !is.na(outcome)) %>% getElement('geo_accession') dat <- gse_id %>% load_data() %>% exprs() %>% extract(probes, samples) %>% t() %>% as.data.frame() %>% setNames(config$genes) %>% cbind(select(annot, geo_accession, treatment, outcome)) print(head(dat)) # - # # Expression levels # + plot_expr <- function(gene, dat) { p_value <- with(dat, { x <- dat[outcome == 'pCR', gene] y <- dat[outcome == 'RD', gene] t.test(x, y)$p.value }) ggplot(dat) + geom_boxplot() + geom_jitter(position=position_jitter(width=0.1)) + aes_string(x='outcome', y=gene, fill='outcome') + ggtitle(sprintf('P-value for t-test: %.3f', p_value)) } for (gene in config$genes) { print(plot_expr(gene, dat)) } # - # ## Correlation between gene expression levels # + plot_corr <- function(pair, dat) { ggplot(dat) + geom_point() + aes_string(pair[1], pair[2], color='outcome') + ggtitle(sprintf('Correlation: %.2f', cor(dat[[pair[1]]], dat[[pair[2]]]))) } gene_pairs <- combn(config$genes, 2, simplify=FALSE) for (pair in gene_pairs) { print(plot_corr(pair, dat)) } # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # The `CascModel` class provides an interface for loading and manipulating pre-generated IGMF cascade models. This class reweights and interpolates IGMF model tables to generate models for an arbitrary choice of IGMF parameters, source geometry, and intrinisc SED. To construct an instance you can call the `create_from_fits` method on a IGMF table FITS file. from ebltable.tau_from_model import OptDepth # + # %matplotlib inline from astropy.table import Table import matplotlib.pyplot as plt import matplotlib.cm from fermipy.spectrum import PLExpCutoff, PowerLaw from haloanalysis.model import make_prim_model, make_casc_model from haloanalysis.model import CascModel from haloanalysis.utils import Axis import numpy as np #cm0 = CascModel.create_from_fits('results_th_jet6.00_z0.13_tmax1e+07.fits.gz') cm0 = CascModel.create_from_fits( '/nfs/farm/g/glast/u/mmeyer/projects/FermiHalo/Output/EBLm6/th_jet6.00/gam-2.00/results_merged_z_th6d_t1e7.fits', ) cm0.set_eblmodel(eblmodel = 'dominguez') axis_eobs = cm0.axes[1] emin = 10**axis_eobs.lo emax = 10**axis_eobs.hi ectr = 10**axis_eobs.centers deltae = emax-emin efct = ectr**2/deltae # - # The `CascModel` class provides the following methods for evaluating the spectral/spatial model of a source: # * `prim_flux` : computes the flux vs. energy for the EBL-absorbed primary component # * `casc_flux` : computes the flux vs. energy for the cascade component # * `casc_r68` : computes the width vs. energy for the cascade component # # These methods accept the following input arguments: # * `inj_spectrum` : spectral function object representing the model for the injection (i.e. unabsorbed) SED # * `p0` : Parameters of the IGMF model (e.g. Lcoh and B) # * `p1` : Parameters of the chosen model for the injection spectrum # # + # Injection spectrum inj_spec = PLExpCutoff([1E-13,-1.5,10E6],scale=1E3) # energies in MeV #inj_spec = PowerLaw([1E-13,-1.5],scale=1E3) axis_eobs = Axis('eobs',np.linspace(2,6,33)) # energies in MeV inj_flux = inj_spec.flux(emin,emax) # injected flux #prim_flux = cm0.prim_flux(inj_spec,[0.14,0.0,-16.0]) # primary flux for injection spec and IGMF with Lcoh = 1Mpc and 1e-16 G #casc_flux = cm0.casc_flux(inj_spec,[0.14,0.0,-16.0]) # cascade flux for injection spec and IGMF with Lcoh = 1Mpc and 1e-16 G prim_flux = cm0.prim_flux(inj_spec,[(0.14,0.5),(0.0,0.0),(-16.0,-16.)]) casc_flux = cm0.casc_flux(inj_spec,[(0.14,0.5),(0.0,0.0),(-16.0,-16.)]) print prim_flux.shape print casc_flux.shape plt.figure() plt.loglog(ectr,inj_flux*efct,label='Injection Spectrum',color='k') plt.loglog(ectr,prim_flux[:,1]*efct,label='Primary Spectrum',color='b') plt.loglog(ectr,casc_flux[:,1]*efct,label='Cascade Spectrum',color='g') plt.loglog(ectr,prim_flux[:,0]*efct,label='Primary Spectrum',color='b', ls = '--') plt.loglog(ectr,casc_flux[:,0]*efct,label='Cascade Spectrum',color='g', ls ='--') plt.gca().set_ylim(1E-10) plt.gca().legend(frameon=False,loc='lower left') plt.gca().set_xlabel('Energy [MeV]') # + inj_spec = PLExpCutoff([1E-13,-1.5,10E6],scale=1E3) nstep = 9 igmf_val = np.linspace(-20,-12,nstep) lcoh_val = np.linspace(-4,4,nstep) prim_flux = cm0.prim_flux(inj_spec,[0.0,-16.0]) inj_flux = inj_spec.flux(emin,emax) # Cascade Flux at Constant Lcoh casc_flux = cm0.casc_flux(inj_spec,[0.0,igmf_val]) fig = plt.figure(figsize=(15,5)) fig.add_subplot(121) plt.loglog(ectr,inj_flux*ectr,color='k',linewidth=1.5,label='Inj Spectrum') plt.loglog(ectr,prim_flux*ectr,color='k',linewidth=1.5,linestyle='--',label='Prim Spectrum') for i in range(casc_flux.shape[1]): plt.loglog(ectr,casc_flux[:,i]*efct,color=matplotlib.cm.jet(float(i)/float(nstep)), label='B = %5.2g G'%10**igmf_val[i]) plt.gca().set_ylim(1E-10,1E-4) plt.gca().set_title('lcoh = %5.2g Mpc'%1.0) plt.gca().legend(frameon=False,loc='upper right',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') plt.gca().set_ylabel('E$^{2}$dFdE [MeV cm$^{-2}$ s$^{-1}$]') # Cascade Flux at Constant Bfield fig.add_subplot(122) casc_flux = cm0.casc_flux(inj_spec,[lcoh_val,-16.0]) plt.loglog(ectr,inj_flux*ectr,color='k',linewidth=1.5,label='Inj Spectrum') plt.loglog(ectr,prim_flux*ectr,color='k',linewidth=1.5,linestyle='--',label='Prim Spectrum') for i in range(casc_flux.shape[1]): plt.loglog(ectr,casc_flux[:,i]*efct,color=matplotlib.cm.jet(float(i)/float(nstep)), label='Lcoh = %5.2g Mpc'%10**lcoh_val[i]) plt.gca().set_ylim(1E-10,1E-4) plt.gca().set_title('B = %5.2g G'%10**-16.0) plt.gca().legend(frameon=False,loc='upper right',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') plt.gca().set_ylabel('E$^{2}$dFdE [MeV cm$^{-2}$ s$^{-1}$]') # R68 at Constant Lcoh fig = plt.figure(figsize=(15,5)) fig.add_subplot(121) casc_r68 = cm0.casc_r68(inj_spec,[0.0,igmf_val]) for i in range(casc_r68.shape[1]): plt.loglog(ectr,casc_r68[:,i],color=matplotlib.cm.jet(float(i)/9.), label='B = %5.2g G'%10**igmf_val[i]) plt.gca().set_ylim(1E-5,100) plt.gca().set_title('lcoh = %5.2g Mpc'%1.0) plt.gca().legend(frameon=False,loc='best',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') plt.gca().set_ylabel('R68 [deg]') # R68 at Constant Bfield casc_r68 = cm0.casc_r68(inj_spec,[lcoh_val,-16.0]) fig.add_subplot(122) for i in range(casc_r68.shape[1]): plt.loglog(ectr,casc_r68[:,i],color=matplotlib.cm.jet(float(i)/9.), label='Lcoh = %5.2g Mpc'%10**lcoh_val[i]) plt.gca().set_ylim(1E-5,100) plt.gca().set_title('B = %5.2g G'%10**-16.0) plt.gca().legend(frameon=False,loc='upper right',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') plt.gca().set_ylabel('R68 [deg]') # + import os if not os.path.isfile('results_th_jet6.00_tmax1e+07.fits.gz'): # !curl -OL https://www.dropbox.com/s/v03oh5ipaeuazaw/results_th_jet6.00_tmax1e%2B07.fits.gz?dl=0 cm1 = CascModel.create_from_fits('results_th_jet6.00_tmax1e+07.fits.gz') # + redshift_val = [0.01,0.03,0.05,0.1,0.2,0.3,0.4,0.6] inj_spec = PLExpCutoff([1E-13,-1.,10E6],scale=1E3) prim_flux = cm1.prim_flux(inj_spec,[redshift_val,0.0,-16.0]) inj_flux = inj_spec.flux(emin, emax) casc_flux = cm1.casc_flux(inj_spec,[redshift_val,0.0,-16.0]) casc_r68 = cm1.casc_r68(inj_spec,[redshift_val,0.0,-16.0]) fig = plt.figure(figsize=(15,5)) fig.add_subplot(121) plt.loglog(ectr,inj_flux*ectr,color='k',linewidth=1.5,label='Inj Spec.') for i in range(casc_flux.shape[1]): plt.loglog(ectr,prim_flux[:,i]*ectr,color=matplotlib.cm.jet(float(i)/len(redshift_val)), linewidth=1.5,linestyle='--') plt.loglog(ectr,casc_flux[:,i]*efct,color=matplotlib.cm.jet(float(i)/len(redshift_val)), label='z = %5.2g'%redshift_val[i]) plt.gca().set_ylim(1E-10) plt.gca().legend(frameon=False,loc='lower left',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') fig.add_subplot(122) plt.loglog(ectr,np.sqrt((2.85*(ectr/100.)**-0.8)**2+0.035**2),color='k',label='SOURCE::PSF3') for i in range(casc_r68.shape[1]): plt.loglog(ectr,casc_r68[:,i],color=matplotlib.cm.jet(float(i)/casc_r68.shape[1]), label='z = %5.2g'%redshift_val[i]) plt.gca().legend(frameon=False,loc='lower left',prop={'size':10},ncol=2) plt.gca().set_xlabel('Energy [MeV]') plt.gca().set_xlim(1E2,1E5) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Build Your Own GPT-2 # # ## 2. Fine Tuning the Language Model # # 在[上一篇]()裡我們用 HuggingFace `transformers` 套件裡預先訓練好的標記化工具(tokenizer)跟語言模型(language model),來自動生成新的文本,接下來要嘗試進一步用自己準備的語料來調整預先訓練的語言模型。 # # ### 參考資料: # - [Natural Language Generation Part 2: GPT2 and Huggingface](https://towardsdatascience.com/natural-language-generation-part-2-gpt-2-and-huggingface-f3acb35bc86a) # # - [(transformers-document) Training and fine-tuning](https://huggingface.co/transformers/training.html) # + # setup imports to use the model from transformers import TFGPT2LMHeadModel from transformers import GPT2Tokenizer, BertTokenizer model = TFGPT2LMHeadModel.from_pretrained('ckiplab/gpt2-base-chinese', from_pt=True) tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") input_ids = tokenizer.encode("今天天氣很好", return_tensors='tf') print(input_ids) # - # 我們可以把預訓練好的模型先存起來,作為後續 fine-tune 的基礎。 # # > model.save_pretrained('../data/mygpt2/') # # # ## 把語料轉換成訓練用的格式 # # 接下來,我們要把訓練用的語料,轉換成 `TFGPT2LMHeadModel()` 使用的格式。 # # ### 1. 製作語料文本的檔案清單 # + # Get all files in the corpus directory def list_corpus_files(corpus_path, prefix='wiki_'): import os # flist = [] for dirPath, dirNames, fileNames in os.walk(corpus_path): for f in fileNames: if f.startswith(prefix): flist.append(os.path.join(dirPath, f)) return(flist) # Test function with the wiki_zh corpus wikifiles = list_corpus_files('D:\data\corpus\wiki_zh') print(len(wikifiles)) print(wikifiles[:5]) # - # ### 2. 讀取單一語料檔案的工具 # + # Read specified json file def read_json_corpus(corpus_file, to_zhtw=True): import json import opencc converter = opencc.OpenCC('s2tw.json') # To Taiwan Chinese data = [] with open(corpus_file, 'r', encoding='utf8') as f: line = f.readline() while line: if to_zhtw: line = converter.convert(line) data.append(json.loads(line)) line = f.readline() return(data) # Test data = read_json_corpus(wikifiles[1]) print(len(data)) print(data[5]['text'][:100]) # - # ### 3. 將語料轉換成標記化資料 (tokenized data) # # 我們建好可以讀取大量文本的工具函數,在我們參考 [`gpt2-chinese`](https://github.com/Morizeyao/GPT2-Chinese/) 的 [`train.py`](https://github.com/Morizeyao/GPT2-Chinese/blob/master/train.py) 當中,有一個將大量文本轉換成「標記化資料 (tokenized data)」的函數,進行的工作如下: # # 1. 將「分行符號」(`\n`)轉換成 BertTokenizer 的段落符號(`[SEP]`) # 2. 重組以「文件」為單位的語料: # 1. 指定最後要分割成的檔案數量 `num_pieces`,將文件數量平均分配到每個 piece 裡 # 2. 將長度超過 `min_length` 的文件透過指定的 tokenizer (here, we use BertTokenizer) 轉換成向量,並在文件前後分別加上`[MASK]`和`[CLS]`的標記。 # 3. 以 piece 為單位儲存。 # # 基本上這個函數是將文件語料轉換成 token vector 的工具,不見得一定要這麼使用。 # def build_files(lines, tokenized_data_path, num_pieces, full_tokenizer, min_length): import os, tqdm # Process raw strings print('reading lines') lines = [line.replace('\n', ' [SEP] ') for line in lines] # 用[SEP]表示换行, 段落之间使用SEP表示段落结束 all_len = len(lines) # Prepare output if not os.path.exists(tokenized_data_path): print('creating path for tokenized data: '+tokenized_data_path) os.mkdir(tokenized_data_path) for i in tqdm.tqdm(range(num_pieces)): sublines = lines[(all_len // num_pieces * i):(all_len // num_pieces * (i + 1))] if i == num_pieces - 1: sublines.extend(lines[all_len // num_pieces * (i + 1):]) # 把尾部例子添加到最后一个 piece sublines = [full_tokenizer.tokenize(line) for line in sublines if len(line) > min_length] # 只考虑长度超过 min_length 的句子 sublines = [full_tokenizer.convert_tokens_to_ids(line) for line in sublines] full_line = [] for subline in sublines: full_line.append(full_tokenizer.convert_tokens_to_ids('[MASK]')) # 文章开头添加MASK表示文章开始 full_line.extend(subline) full_line.append(full_tokenizer.convert_tokens_to_ids('[CLS]')) # 文章之间添加CLS表示文章结束 with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'w') as f: for id in full_line: f.write(str(id) + ' ') print('finish') # **製作以「文件」為單位的資料集** # # 我們利用前面讀取語料檔案的工具,來建立以「文件」為單位的資料集。讓我們先讀取10個檔案作測試,我們不需要語料當中的其他欄位,只需要'text'即可。 # + ## Fetch content from all wikifiles import tqdm data = [] for i in tqdm.tqdm(range(len(wikifiles[:10]))): data+=read_json_corpus(wikifiles[i]) print(len(data)) corpus_text = [] for article in data: corpus_text.append(article['text']) print(len(corpus_text)) # - # **分段測試函數功能** # # 讀取的10個檔案中總共有885篇文件,由於上面的函數比較長,我們用分段測試來理解它的功能。 print(corpus_text[10][:100]) lines = [line.replace('\n', ' [SEP] ') for line in corpus_text] # 用[SEP]表示换行, 段落之间使用SEP表示段落结束 print(lines[0][:100]) all_len = len(lines) print(all_len) # 接下來這段是主迴圈,基本上就是把 article-based 單位換成 piece-based 的單位。 num_pieces = 10 min_length = 30 for i in range(num_pieces): idx1 = all_len // num_pieces * i idx2 = all_len // num_pieces * (i + 1) print(idx1, idx2) sublines = lines[(all_len // num_pieces * i):(all_len // num_pieces * (i + 1))] if i == num_pieces - 1: sublines.extend(lines[all_len // num_pieces * (i + 1):]) # 把尾部例子添加到最后一个 piece sublines = [tokenizer.tokenize(line) for line in sublines if len(line) > min_length] # 只考虑长度超过 min_length 的句子 sublines = [tokenizer.convert_tokens_to_ids(line) for line in sublines] print(len(sublines)) # + tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") build_files(corpus_text, tokenized_data_path='../data/tokenized_data/', num_pieces=100, full_tokenizer=tokenizer, min_length=32) # - # ### 4. 建立我們的訓練用標記化語料 # + # Get all files in the corpus directory def list_corpus_files(corpus_path, prefix='wiki_'): import os # flist = [] for dirPath, dirNames, fileNames in os.walk(corpus_path): for f in fileNames: if f.startswith(prefix): flist.append(os.path.join(dirPath, f)) return(flist) # Read specified json file def read_json_corpus(corpus_file, to_zhtw=True): import json import opencc converter = opencc.OpenCC('s2tw.json') # To Taiwan Chinese data = [] with open(corpus_file, 'r', encoding='utf8') as f: line = f.readline() while line: if to_zhtw: line = converter.convert(line) data.append(json.loads(line)) line = f.readline() return(data) def build_files(lines, tokenized_data_path, num_pieces, full_tokenizer, min_length): import os, tqdm # Process raw strings print('reading lines') lines = [line.replace('\n', ' [SEP] ') for line in lines] # 用[SEP]表示换行, 段落之间使用SEP表示段落结束 all_len = len(lines) # Prepare output if not os.path.exists(tokenized_data_path): print('creating path for tokenized data: '+tokenized_data_path) os.mkdir(tokenized_data_path) for i in tqdm.tqdm(range(num_pieces)): sublines = lines[(all_len // num_pieces * i):(all_len // num_pieces * (i + 1))] if i == num_pieces - 1: sublines.extend(lines[all_len // num_pieces * (i + 1):]) # 把尾部例子添加到最后一个 piece sublines = [full_tokenizer.tokenize(line) for line in sublines if len(line) > min_length] # 只考虑长度超过 min_length 的句子 sublines = [full_tokenizer.convert_tokens_to_ids(line) for line in sublines] full_line = [] for subline in sublines: full_line.append(full_tokenizer.convert_tokens_to_ids('[MASK]')) # 文章开头添加MASK表示文章开始 full_line.extend(subline) full_line.append(full_tokenizer.convert_tokens_to_ids('[CLS]')) # 文章之间添加CLS表示文章结束 with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'w') as f: for id in full_line: f.write(str(id) + ' ') print('finish') def build_tokenized_corpus(corpus_path, tokenizer, output_path='../data/tokenized_data/', output_pieces=100, min_length=10): import tqdm # List all corpus files corpusfiles = list_corpus_files(corpus_path) print('Number of files: '+str(len(corpusfiles))) # Read and combine corpus print('Reading files... ') data = [] for i in tqdm.tqdm(range(len(corpusfiles))): data+=read_json_corpus(corpusfiles[i]) # Convert file-based corpus to article-based corpus_text = [] for article in data: corpus_text.append(article['text']) # build_files(corpus_text, tokenized_data_path='../data/tokenized_data/', num_pieces=output_pieces, full_tokenizer=tokenizer, min_length=min_length) # return(0) # - tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") build_tokenized_corpus(corpus_path='D:\data\corpus\wiki_zh', tokenizer=tokenizer, output_pieces=256) # ## 用 Model 類別直接訓練 # # 我們先讀取預訓練的模式,檢查模式的設定。 # + from datetime import datetime import random import numpy as np import transformers import tensorflow as tf from transformers import TFGPT2LMHeadModel from transformers import GPT2Tokenizer, BertTokenizer model = TFGPT2LMHeadModel.from_pretrained('../data/mygpt2') tokenizer = BertTokenizer.from_pretrained('../data/tokenizer_bert_base_chinese') num_pieces = 100 tokenized_data_path = '../data/tokenized_data/' full_tokenizer=tokenizer output_dir = '../data/gpt2_ft/' model_config = transformers.GPT2Config.from_json_file('../data/mygpt2/config.json') print('config:\n' + model_config.to_json_string('../data/mygpt2/config.json')) # + # Load data import numpy as np with open('../data/tokenized_data/tokenized_train_237.txt', 'r') as f: tokenized_ids = f.readline().strip().split(' ') print(len(tokenized_ids)) data = [int(id) for id in tokenized_ids] #print(data[:20]) dataset = tf.data.Dataset.from_tensor_slices(data).window(model_config.n_ctx, drop_remainder=True) print(dataset.element_spec) train_data = [] for window in dataset: train_data.append(np.array([elem.numpy() for elem in window]).astype('int32')) #print(len(train_data)) #print(train_data[1]) # + #train_data = np.array(train_data) print(np.array(train_data).shape) #print(train_data[1]) print(max(data)) print(min(data)) optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5) model.compile(optimizer=optimizer, loss=model.compute_loss) # can also use any keras loss fn model.summary() model.fit(x=train_data, epochs=3, batch_size=16) # + stride=768 batch_size=8 n_ctx = model_config.n_ctx print('starting training') now = datetime.now() print('time: {}'.format(now)) overall_step = 0 running_loss = 0 for epoch in range(epochs): x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32) random.shuffle(x) piece_num = 0 for i in x: with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'r') as f: line = f.read().strip() tokens = line.split() tokens = [int(token) for token in tokens] start_point = 0 samples = [] while start_point < len(tokens) - n_ctx: samples.append(tokens[start_point: start_point + n_ctx]) start_point += stride if start_point < len(tokens): samples.append(tokens[len(tokens)-n_ctx:]) random.shuffle(samples) for step in range(len(samples) // batch_size): # drop last # prepare data batch = samples[step * batch_size: (step + 1) * batch_size] batch_inputs = [] for ids in batch: int_ids = [int(x) for x in ids] batch_inputs.append(int_ids) batch_inputs = tf.tensor(batch_inputs).long().to(device) # forward pass outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs) loss, logits = outputs[:2] # get loss if multi_gpu: loss = loss.mean() if gradient_accumulation > 1: loss = loss / gradient_accumulation # loss backward if fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # optimizer step if (overall_step + 1) % gradient_accumulation == 0: running_loss += loss.item() optimizer.step() optimizer.zero_grad() scheduler.step() if (overall_step + 1) % log_step == 0: tb_writer.add_scalar('loss', loss.item() * gradient_accumulation, overall_step) print('now time: {}:{}. Step {} of piece {} of epoch {}, loss {}'.format( datetime.now().hour, datetime.now().minute, step + 1, piece_num, epoch + 1, running_loss * gradient_accumulation / (log_step / gradient_accumulation))) running_loss = 0 overall_step += 1 piece_num += 1 print('saving model for epoch {}'.format(epoch + 1)) if not os.path.exists(output_dir + 'model_epoch{}'.format(epoch + 1)): os.mkdir(output_dir + 'model_epoch{}'.format(epoch + 1)) model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(output_dir + 'model_epoch{}'.format(epoch + 1)) print('epoch {} finished'.format(epoch + 1)) print('training finished') then = datetime.now() print('time: {}'.format(then)) print('time for training: {}'.format(then - now)) if not os.path.exists(output_dir + 'final_model'): os.mkdir(output_dir + 'final_model') model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(output_dir + 'final_model') # - # ## 使用 Trainer 類別來調整模型 # # `HuggingFace` 提供了 [`Trainer` class](https://huggingface.co/transformers/main_classes/trainer.html#transformers.Trainer) 作為調整模型參數的工具。在使用 `Trainer` 之前,我們需要先決定模型結構(通常是使用 `model.from_pretrained`),並且據此以 [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments) 來設定訓練用的 hyper-parameters,例如 `learning_rate`, `num_train_epochs`, 和 `per_device_train_batch_size` 等等。 # # + from transformers import BertTokenizer, glue_convert_examples_to_features import tensorflow as tf import tensorflow_datasets as tfds tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') data = tfds.load('glue/mrpc') train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=128, task='mrpc') train_dataset = train_dataset.shuffle(100).batch(32).repeat(2) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Effect of Spots on a Star's Radius and Luminosity # # ## # # Max-Planck-Institut für Physik und Astrophysik # # --- # # ## Brief Summary and Key Results # # To be written later. # # --- # # ## Motivation & Purpose # # Stars with convective envelopes often exhibit observational signatures characteristic of possessing spots on their surface, analagous to sunspots. Spots block flux, but the fate of this blocked flux is not known. Is the flux stored in the convection zone or does it escape through the stellar surface? Following on the author's early work showing that the trapped heat must be re-distributed throughout the convection zone (Spruit [1977, SoPh, 55, 3](http://adsabs.harvard.edu/abs/1977SoPh...55....3S)), this paper addresses the question of whether that re-distributed heat emerges from the stellar surface or whether it is stored within the convection zone over longer time scales. # # ## Uncertain Terms # # ## Methods # # --- # # _This paper can be found in its entirety on [ADS](http://adsabs.harvard.edu/abs/1982A%26A...108..348S). Views expressed here are my own and do not necessarily represent those of my institution._ # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import tempfile import seaborn as sns sns.set(style="darkgrid") import logging logging.getLogger().setLevel(logging.INFO) from banditpylib import trials_to_dataframe from banditpylib.arms import BernoulliArm from banditpylib.bandits import MultiArmedBandit from banditpylib.protocols import SinglePlayerProtocol from banditpylib.learners.mab_learner import UCB, EpsGreedy, ThompsonSampling # - horizon = 2000 means = np.array([0.3, 0.5, 0.7]) arms = [BernoulliArm(mean) for mean in means] bandit = MultiArmedBandit(arms=arms) learners = [EpsGreedy(arm_num=len(arms), name='Epsilon Greedy'), UCB(arm_num=len(arms), name='UCB'), ThompsonSampling(arm_num=len(arms), name='Thompson Sampling')] # Report intermediate regrets after these horizons intermediate_horizons = list(range(0, horizon+1, 50)) temp_file = tempfile.NamedTemporaryFile() game = SinglePlayerProtocol(bandit, learners) # Start playing the game # Add `debug=True` for debugging purpose game.play(200, temp_file.name, intermediate_horizons=intermediate_horizons, horizon=horizon) trials_df = trials_to_dataframe(temp_file.name) trials_df.head() sns.lineplot(x='total_actions', y='regret', hue='learner', data=trials_df) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="q-gigy72KsQ7" colab_type="text" # 目標: 使用 Seaborn 自帶的dataset, 利用 PANDAS 處理來繪製資料集的可是畫圖表 # # 重點: 不同型態的類別要套用的對應圖像是不一樣的 # + colab_type="code" id="1aQwvVYyLQAa" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="72344432-2c03-4a3a-ec5c-31af9ecfad27" # 導入必要的程式庫 import pandas as pd import seaborn as sns from matplotlib import pyplot as plt # 取得鳶尾花資料集 df = sns.load_dataset('iris') # + id="XYhQE52jLhRW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="d7034745-17d1-42a4-858e-8abd0ce1d038" df.info() # + id="9ydYgLopMvK-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="6c6910ce-4a48-4954-a7f7-86f26ad960ae" # 直接使用PANDAS dataframe, 當作參數 # 箱形圖顯示了數據的總體分布,同時繪製了異常值的數據點。這個物理點讓它們的特定值在樣本之間容易被識別和比較。 # 數據中的任何異常值都繪製為單個點。 sns.boxplot(data = df, orient = "h") plt.show() # 分布的擴散差不多是相等的,並且可以很容易地比較異常值 # + id="6AfkVr8FKaiw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="d3ec5c0a-1bac-4e21-e876-b72c44213b83" # 當一個或兩個正在研究的變數是分類的時,我們使用像條帶線()、swarmplot()等的圖。 # 查看到每個物種petal_length的差異。但是,散點圖的主要問題是散點圖上的點重疊 sns.stripplot(x = "species", y = "petal_length", data = df) plt.show() # + id="81yzxgVkKai4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="de0815a2-e08e-4f68-fdae-4106c7003506" # 上述散點圖的主要問題是散點圖上的點重疊。我們使用"抖動"參數來處理此類方案。 # 抖動會為數據添加一些隨機雜訊。此參數將沿分類軸調整位置。 sns.stripplot(x = "species", y = "petal_length", data = df, jitter=True) plt.show() # + id="Pk8RXLriMe-c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="e1eee2b8-3d8e-4c19-9250-a4ab96467878" #另一個可以用作「抖動」 的替代選項是函數群圖()。 #此函數將散點圖的每個點都放在分類軸上,從而避免重疊點 sns.swarmplot(x = "species", y = "petal_length", data = df) plt.show() # + id="KOmcmA75nAbk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 748} outputId="8d4daaaa-ad8c-4c86-f53b-90dc92e1a5f0" # 可以觀察每個情節的變化。繪圖採用矩陣格式,其中行名表示 x 軸,列名稱表示 y 軸。 # 對角線圖是內核密度圖,其中其他圖是散點圖 # 就是hue在sns.pairplot函數調用中使用關鍵字: sns.set_style("ticks") #STYLE初始化 sns.pairplot(df,hue = 'species',diag_kind = "kde",kind = "scatter",palette = "husl") plt.show() # + id="E3hCnBUgr5jS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 748} outputId="4e7e5392-0d7e-4ec8-be81-24d01458780f" # 可以在上三角形和下三角形使用不同的函數來查看關係的不同方面 g = sns.pairplot(df,hue = 'species',diag_kind = "kde",kind = "scatter",palette = "husl") g.map_upper(plt.scatter) g.map_lower(sns.kdeplot, cmap = "Blues_d") g.map_diag(sns.kdeplot, lw = 3, legend = False); plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from nltk.tokenize import sent_tokenize, word_tokenize ex = "Time to get started with natural language processing. Python will make it easy!" sent_tokens = sent_tokenize(ex) sent_tokens word_tokens = word_tokenize(ex) word_tokens from nltk.tag import pos_tag tags = pos_tag(word_tokens) tags import nltk nltk.help.upenn_tagset('VB') list_of_tags = [] for pair in tags: list_of_tags.append(pair[1]) list_of_tags = list(set(list_of_tags)) list_of_tags for pos in list_of_tags: print(nltk.help.upenn_tagset(pos)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd # language: python # name: drlnd # --- # # Continuous Control # # --- # # In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. # # ### 1. Start the Environment # # We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). # + from unityagents import UnityEnvironment from collections import deque from ddpg_agent import Agent import numpy as np import torch import matplotlib.pyplot as plt # %matplotlib inline # - # Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. # # - **Mac**: `"path/to/Reacher.app"` # - **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"` # - **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"` # - **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"` # - **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"` # - **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"` # - **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"` # # For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: # ``` # env = UnityEnvironment(file_name="Reacher.app") # ``` env = UnityEnvironment(file_name='Reacher_Linux_NoVis/Reacher.x86_64') # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # ### 2. Examine the State and Action Spaces # # In this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible. # # The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`. # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) # - # When finished, you can close the environment. # ### 3. Training the agent! # # Now it's your turn to train the agent to solve the environment! def train_agent(n_episodes=300, max_t=500): avg_score = [] scores_deque = deque(maxlen=100) scores = np.zeros(num_agents) time_steps = 20 update = 10 env_info = env.reset(train_mode=True)[brain_name] states = env_info.vector_observations agent_tuple = {"state_size": state_size, "action_size": action_size, "random_seed": 2,} agents = [Agent(**agent_tuple) for _ in range(num_agents)] action = [agent.act(states[i]) for i, agent in enumerate(agents)] for i_episode in range(1, n_episodes+1): states = env_info.vector_observations for agent in agents: agent.reset() for t in range(max_t): actions = [agent.act(states[i]) for i, agent in enumerate(agents)] env_info = env.step(actions)[brain_name] next_states = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done step_t = zip(agents, states, actions, rewards, next_states, dones) for agent, state, action, reward, next_step, done in step_t: agent.memory.add(state, action, reward, next_step, done) if ( t % time_steps == 0): agent.step(state, action, reward, next_step, done, update) states = next_states scores += rewards if np.any(dones): break score = np.mean(scores) avg_score.append(score) scores_deque.append(score) avg = np.mean(scores_deque) print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, avg,), end="\n") if avg >= 30: print("\r\rEnviroment solved in @ i_episode={i_episode}, w/ avg_score={avg:.2f}\r".format(i_episode=i_episode, avg=avg)) break torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') return avg_score scores = train_agent() def plot_scores(scores): fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores)+1), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() plot_scores(scores) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = pd.read_excel("statewise_sugarcane.xlsx") data data['MADHYAPRADESH']= data['MADHYAPRADESH'].fillna(data['MADHYAPRADESH'].mean()) data from scipy.stats import kruskal stat, p = kruskal(data["ANDHRAPRADESH"],data["GUJARAT"],data["KARNATAKA"],data["MADHYAPRADESH"],data["MAHARASTRA"],data["ORISSA"],data["TAMILNADU"]) print('Statistics=%.3f, p=%.3f' % (stat, p)) alpha = 0.05 if p > alpha: print('Same distributions (fail to reject H0)') else: print('Different distributions (reject H0)') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # + [markdown] slideshow={"slide_type": "-"} # # Social Network Recommendations # # In this example we're going to build a powerful social network predictive capability with some simple Gremlin queries. The techniques intrdocued here can be used to build predictions in other domains outside of social. # # ### People You May Know # # A common feature of many social network applications is the ability to recommend People-You-May-Know (or People-You-May-Want-To-Know) – sometimes abbreviated PYMK. # # Using Amazon Neptune, we can implement a PYMK capability using a well-understood phenomenon called *triadic closure*. Tridaic closure is the # tendency for elements at a very local level in a graph to form stable triangles as the data changes over time. This behaviour can be observed in graphs in all kinds of different domains. It's the basis of many homophily-based recommendation systems – systems that exploit the fact that similarity breeds connections. In this example we're going to look at using triadic closure in the context of a social network. # # Let's imagine we have a social network whose members include Bill, Terry and Sarah. Terry is friends with both Bill and Sarah; that is, Terry and Sarah have a mutual friend in Bill. # # Because they have Bill in common, there's a good chance that Sarah and Bill either already know one other or may get to know one another in the near future. Just looking at the graph, we can see they have both the *means* and the *motive* to be friends. Hanging around with Bill provides the means for Sarah and Terry to meet. And because they trust Bill, they have the motive to trust people with whom Bill is friends, increasing the chance that if they do meet, they'll form a connection and close the triangle. # # In the context of a social network, we can use triadic closure to implement PYMK. When a particular user logs into the system, we can look up their vertex in the graph, and then traverse their friend-of-a-friend network, looking for opportunities to close triangles. The more paths that extend from our user, through their immediate friends, to someone to whom they are not currently connected, the greater the likelihood the user may either already know that person, or may benefit from getting to know them. # - # ### Setup # # Before we begin, we'll clear any existing data from our Neptune cluster, using the cell magic `%%gremlin` and a subsequent drop query: # + # %%gremlin g.V().drop() # - # How do we know which Neptune cluster to access? The cell magics exposed by Neptune Notebooks use a configuration located by default under `~/graph_notebook_config.json` At the time of initialization of the Sagemaker instance, this configuration is generated using environment variables derived from the cluster being connected to. # # You can check the contents of the configuration in two ways. You can print the file itself, or you can look for the configuration being used by the notebook which you have opened. # + language="bash" # # cat ~/graph_notebook_config.json # - # %graph_notebook_config # ### Create a Social Network # # Next, we'll create a small social network. Note that the script below comprises a single statement. All the vertices and edges here will be created in the context of a single transaction. # + # %%gremlin g. addV('User').property('name','Bill').property('birthdate', '1988-03-22'). addV('User').property('name','Sarah').property('birthdate', '1992-05-03'). addV('User').property('name','Ben').property('birthdate', '1989-10-21'). addV('User').property('name','Lucy').property('birthdate', '1998-01-17'). addV('User').property('name','Colin').property('birthdate', '2001-08-14'). addV('User').property('name','Emily').property('birthdate', '1998-03-05'). addV('User').property('name','Gordon').property('birthdate', '2002-12-04'). addV('User').property('name','Kate').property('birthdate', '1995-02-12'). addV('User').property('name','Peter').property('birthdate', '2001-02-27'). addV('User').property('name','Terry').property('birthdate', '1989-10-02'). addV('User').property('name','Alistair').property('birthdate', '1992-06-30'). addV('User').property('name','Eve').property('birthdate', '2000-05-13'). addV('User').property('name','Gary').property('birthdate', '1998-09-20'). addV('User').property('name','Mary').property('birthdate', '1997-01-27'). addV('User').property('name','Charlie').property('birthdate', '1989-11-02'). addV('User').property('name','Sue').property('birthdate', '1994-03-08'). addV('User').property('name','Arnold').property('birthdate', '2002-07-23'). addV('User').property('name','Chloe').property('birthdate', '1988-11-04'). addV('User').property('name','Henry').property('birthdate', '1996-03-15'). addV('User').property('name','Josie').property('birthdate', '2003-08-21'). V().hasLabel('User').has('name','Sarah').as('a').V().hasLabel('User').has('name','Bill').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Colin').as('a').V().hasLabel('User').has('name','Bill').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Terry').as('a').V().hasLabel('User').has('name','Bill').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Peter').as('a').V().hasLabel('User').has('name','Colin').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Kate').as('a').V().hasLabel('User').has('name','Ben').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Kate').as('a').V().hasLabel('User').has('name','Lucy').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Eve').as('a').V().hasLabel('User').has('name','Lucy').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Alistair').as('a').V().hasLabel('User').has('name','Kate').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Gary').as('a').V().hasLabel('User').has('name','Colin').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Gordon').as('a').V().hasLabel('User').has('name','Emily').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Alistair').as('a').V().hasLabel('User').has('name','Emily').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Terry').as('a').V().hasLabel('User').has('name','Gordon').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Alistair').as('a').V().hasLabel('User').has('name','Terry').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Gary').as('a').V().hasLabel('User').has('name','Terry').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Mary').as('a').V().hasLabel('User').has('name','Terry').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Henry').as('a').V().hasLabel('User').has('name','Alistair').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Sue').as('a').V().hasLabel('User').has('name','Eve').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Sue').as('a').V().hasLabel('User').has('name','Charlie').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Josie').as('a').V().hasLabel('User').has('name','Charlie').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Henry').as('a').V().hasLabel('User').has('name','Charlie').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Henry').as('a').V().hasLabel('User').has('name','Mary').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Mary').as('a').V().hasLabel('User').has('name','Gary').addE('FRIEND').to('a').property('strength',1). V().hasLabel('User').has('name','Henry').as('a').V().hasLabel('User').has('name','Gary').addE('FRIEND').to('a').property('strength',2). V().hasLabel('User').has('name','Chloe').as('a').V().hasLabel('User').has('name','Gary').addE('FRIEND').to('a').property('strength',3). V().hasLabel('User').has('name','Henry').as('a').V().hasLabel('User').has('name','Arnold').addE('FRIEND').to('a').property('strength',1). next() # - # This is what the network looks like: # # # ### Create a Recommendation # # Let's now create a PYMK recommendation for a specific user. # # In the query below, we're finding the vertex that represents our user. We're then traversing `FRIEND` relationships (we don't care about relationship direction, so we're using `both()`) to find that user's immediate friends. We're then traversing another hop into the graph, looking for friends of those friends who _are not currently connected to our user_ (i.e., we're looking for the unclosed triangles). # # We then count the paths to these candidate friends, and order the results based on the number of times we can reach a candidate via one of the user's immediate friends. # + # %%gremlin g.V().hasLabel('User').has('name', 'Terry').as('user'). both('FRIEND').aggregate('friends'). both('FRIEND'). where(P.neq('user')).where(P.without('friends')). groupCount().by('name'). order(Scope.local).by(values, Order.decr). next() # - # ### Using Friendship Strength to Improve Recommendations # # What if we wanted to base our recommendations only on resonably strong friendship bonds? # # If you look at the Gremlin we used to create our graph, you'll see that each `FRIEND` edge has a `strength` property. In the following query, the traversal applies a predicate to this `strength` property. Note that we use `bothE()` rather than `both()` to position the traversal on an edge, where we then apply the predicate. We proceed only where `strength` is greater than one. # + # %%gremlin g.V().hasLabel('User').has('name', 'Terry').as_('user') .bothE('FRIEND') .has('strength', P.gt(1)).otherV() .aggregate('friends') .bothE('FRIEND') .has('strength', P.gt(1)).otherV() .where(P.neq('user')).where(P.without('friends')) .groupCount().by('name') .order(Scope.local).by(values, Order.decr) .next() # - # Because we discount weak friendships even when traversing to immediate friends, this query can sometimes end up recommending people that have a weak direct tie to our user. But that makes sense in the context of our social domain: one of our close friends has a strong friendship with one of the people with whom we have a weak connection; therefore, we might predict that over time this weak bond will grow stronger. # ## What's next? # # Curious about the business problems can be solved with graph? Check out these sample application notebooks for some inspiration. # # [Introduction to Fraud Graphs](../Introduction-to-Fraud-Graphs.ipynb) # # [Introduction to Knowledge Graphs](../Introduction-to-Knowledge-Graphs.ipynb) # # [Introduction to Identity Graphs](../Introduction-to-Identity-Graphs.ipynb) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DSFP Object Oriented Programming Notebook # # Incorporating classes, objects, and functions into your code will improve its efficiency, readability, and make it easier to extend to other programs down the road. This notebook attempts to take you through many of the concepts associated with creating a class and have it do something useful and interesting. # # * * * # # By (Caltech / NSF Fellow) # + import matplotlib.pyplot as plt import random import numpy as np # %matplotlib inline # - # ## Problem 1) Create a galaxy class # # We will begin our experience with object-oriented code by creating a class to represent a galaxy. In order to do this, we must define the class, give it some default methods, and then define some attributes to describe each instance of a galaxy. # **Problem 1a** # # What do *you* think should be the attributes and characteristics that should be tracked for a galaxy object? Think critically and make a list of at least six different quantities that would make sense if this were a research project. Please do this prior to starting (or looking at) the example below. Discuss with your code partner. # *Provide your answer to Problem 1a here* # **Problem 1b** # # Define a galaxy class, and give it an initializer class method. Have your initializer accept **arguments** for `total_mass`, `cold_gas_mass`, and `stellar_mass`. Have it also accept **keyword arguments** for `age` with a default set to 0. Maybe also include a `SFR` and `color` in your attributes, but initialize it to 0 and `'red'` respectively. I'll include docstrings for you to help guide you along the way. # # *Hint: Don't forget to use `self` to indicate an object of this class.* class Galaxy(): """ Galaxy class for simply representing a galaxy. """ def __init__( #complete # **Problem 1c** # # Try creating a galaxy instance of your galaxy class! Create a galaxy object, named after your favorite galaxy, with appropriate values for your arguments and keyword arguments (or as best as you can approximate). There isn't much you can do with it right now, but at least you can be assured that your existing code works. Try printing your object to see what python thinks of it. milky_way = Galaxy( #complete # **Problem 1d** # # So Python probably didn't do a very good internal representation of your galaxy. Remember, classes have a few built-in class methods to be defined. One is the `__init__` method, so that we know how to *initialize* the class. Another very useful class method is the `__repr__`, which tells us how to *represent* a class object when we try to print it out. It's very useful for debugging. Please copy your class definition from above, and add the `__repr__` method to return a string somehow identifying a galaxy based on its attributes. Now recreate your favorite galaxy, and print it out again and see if it is a bit more readable than before. class Galaxy(): """ Galaxy class for simply representing a galaxy. """ def __init__(#complete # ## Problem 2) Make a more *interesting* galaxy class that can evolve with time # # Now let's build on our basic galaxy class with some relevant class methods specific to galaxies that enables them to change with time. Obviously, galaxies have a *huge* number of traits, and we don't currently understand all the ways in which these variables interrelate. But we can make a toy model here just to get the idea of how they *might* evolve. This is largely the premise of semi-analytic models that people use to study how galaxies might evolve with time. # **Problem 2a** # # Think about what methods *you* would add to allow your galaxy to change with time using the galaxy attributes defined above and/or other galaxy attributes. Come up with at least three and then discuss with your partner before continuing. # *Provide your answer to Problem 2a here* # **Problem 2b** # # We want to enhance our general `Galaxy` class to enable galaxies to actually evolve with time. Rather than just copying all of the code from our `Galaxy` class above, we can **subclass** the `Galaxy` class to inherit all of its attributes, methods, etc! Create a new class named `EvolvingGalaxy` below, which uses the `Galaxy` class as its "parent" or "super" class. Remember, because you already defined `__init__` and `__repr__` methods for this class, they are inherited and you don't need to redefine them, unless you want to. # # Now try creating an instance of `EvolvingGalaxy` and printing it out, just like you did for your favorite galaxy in problem 1c. # + class EvolvingGalaxy( #complete """ Galaxy class for representing a galaxy that can evolve over time. """ #complete # - # **Problem 2c** # # Let's start by making an `evolve` method for our `Galaxy` class in order to evolve the galaxy forward in time. `evolve` should expect an argument for `time` in years. And at least initially, let's just make this a simple method that just changes the `age` of the galaxy by the appropriate amount. We'll add more on to it in a second. # # Because we're going to be following the evolution of this galaxy, we should create a `current_state` method, which returns a tuple of the attributes of the `EvolvingGalaxy` object at that moment. Now as our `EvolvingGalaxy` changes with time, we will be able to track it and save that information somewhere. class EvolvingGalaxy( #complete """ Galaxy class for representing a galaxy that can evolve over time. """ #complete # ##### **Problem 2d** # # Now that we can *evolve* our `EvolvingGalaxy` and check on its `current_state` over time, let's run it forward in time and see the results! # # Make a new function (not a class method) called `integrate_time` that accepts an `EvolvingGalaxy` object, a timestep (some number of years long), and a number of timesteps over which to evolve the galaxy. Step the `EvolvingGalaxy` forward in time using your `for` loop, one timestep at a time, and store its `current_state` at each timestep. You can store this current_state information however you'd like, but I find it easiest to create a NumPy array of size [5, `n_timesteps`], and fill it in using the `for` loop. # # Similarly, make a new function called `plot_galaxy_evolution` that accepts the output from your `integrate_time` function and uses Matplotlib's pyplot module to plot up the evolution of the `EvolvingGalaxy`'s properties versus its age. # # Finally, create an instance of EvolvingGalaxy, and plug it into your new `integrate_time` and `plot_galaxy_evolution` functions to integrate for a gigayear, and plot how the `EvolvingGalaxy` quantities change with age. Not much going on--sad! # + def integrate_time( #complete """ Integrate the time forward for a galaxy and record its state at each timestep; return as array """ #complete def plot_galaxy_evolution( #complete """ Plot the evolution of a galaxy from its input data array """ #complete #complete # - # **Problem 2e** # # How do galaxies evolve with time? They do in lots of ways, but let's make a few specific rules. # # Galaxies tend to accrete gas from the intergalactic medium over time. We'll modify the code from above for your `EvolvingGalaxy` class to include a `accrete_gas_from_IGM` method, which adds to the `cold_gas_mass` and `total_mass` of the galaxy with time. Let's give it a random component too. # # Galaxies form stars, converting `cold_gas_mass` to `stellar_mass` according to their star formation rate (`SFR`). But there must be sufficient `cold_gas_mass` to consume, so make sure there is enough for the `SFR` to operate for the alloted `time`, otherwise the `SFR` should drop to 0. # # Finally, we haven't actually calculated a `SFR`, so let's just create a method, `calculate_star_formation_rate`, to choose a random value every time it is called. We'll also scale it in some way by the `total_mass` of the galaxy and use the Milky Way as a guide. The Milky Way forms about 1 Msun/year and it has a `total_mass` of 1e12 Msun. # # *Note: You can use the random.random() function to get a random number between 0 and 1.* # # Include calls to these methods in our `evolve` method, and re-generate a galaxy evolution plot like you did in Problem 2d. Since there is a random component, you can try re-running it a few times and see how the behavior changes. # + class EvolvingGalaxy(Galaxy): """ Galaxy class for representing a galaxy that can evolve over time. """ def current_state( #complete """ Return a tuple of the galaxy's total_mass, cold_gas_mass, stellar_mass, age, and SFR """ #complete def calculate_star_formation_rate( #complete """ Calculate the star formation rate by taking a random number between 0 and 1 normalized by the galaxy total mass / 1e12; Also updates the galaxy's color to blue if SFR > 0.01, otherwise color = red """ #complete def accrete_gas_from_IGM( #complete """ Allow the galaxy to accrete cold gas from the IGM at a variable rate normalized to the galaxy's mass """ #complete def form_stars( #complete """ Form stars according to the current star formation rate and time available If unable cold gas, then shut off star formation """ #complete def evolve( #complete """ Evolve this galaxy forward for a period time """ #complete #complete # - # **Problem 2f** # # Based on your knowledge of how galaxies evolve, do these results make sense? Discuss with your coding partner if so/not, and ways in which you might tweak it to work better. # *Provide your answer to Problem 2f here* # ## Problem 3) Make a galaxy class that can interact with other galaxies (challenging!) # # Most galaxies do not evolve in isolation, so let's make a galaxy class that enables galaxies to actually interact with each other. We can re-use much of the code from our previous examples through inheritance! Yeah! # **Problem 3a** # # Let us create a new subclass of `EvolveGalaxy`, so that it inherits most of the traits of our previous work. But we will add some new features including position and velocity values in the x-y plane, so that our galaxy can actually move over time. # # Create a new class `MovingGalaxy` that is a subclass of `EvolveGalaxy`. Make a new `__init__` method with all of the arguments from it's parent class `EvolveGalaxy`, but add the additional arguments of `x_position`, `y_position`, `x_velocity`, `y_velocity`, and `id_num`. # # *Note: all arguments must come before any keyword arguments, so the `age` kwarg has to come at the very end.* # # Also, make a new `__repr__` method for this class that just includes it's `idnum`, `x_position`, and `y_position`. # # To assure your code works, create an instance of a `MovingGalaxy`, and print it out. # + class MovingGalaxy(EvolvingGalaxy): """ Galaxy class that can evolve and move in the x,y plane """ def __init__(self, total_mass, cold_gas_mass, stellar_mass, #complete # Replace self with super to activate the superclass's methods super().__init__(total_mass, cold_gas_mass, stellar_mass) #complete def __repr__(self): #complete #complete # - # **Problem 3b** # # In order for our `MovingGalaxy` to move, we have to give it a few more methods. Copy your class definition from above, and add three more methods. # # Create a `move` method which accepts a `time` argument and updates the `MovingGalaxy`'s `x_position`, and `y_position` by accounting for the motion from its `x_velocity` and `y_velocity`. # # Create a `calculate_momentum` method that returns a tuple of (x_momentum, y_momentum) for a galaxy. # # Finally, create an `evolve` method, which accepts a `time` argument, and just executes your `move` method before calling the superclass's `evolve` method (i.e. `EvolvingGalaxy`'s). class MovingGalaxy(EvolvingGalaxy): """ This galaxy can move over time in the x,y plane """ def __init__(self, total_mass, cold_gas_mass, stellar_mass, x_position, y_position, x_velocity, y_velocity, idnum, age=0): # Replace self with super to activate the superclass's methods super().__init__(total_mass, cold_gas_mass, stellar_mass) self.x_position = x_position self.y_position = y_position self.x_velocity = x_velocity self.y_velocity = y_velocity self.idnum = idnum def __repr__(self): return "Galaxy %i (x = %.0f; y = %.0f)" % (self.idnum, self.x_position, self.y_position) def move( #complete def calculate_momentum( #complete def evolve( #complete # **Problem 3c** # # OK, so we have a `MovingGalaxy` that can move, but we need a Universe in which it can move! # # So let us create a new `Universe` class that can hold one or more galaxies. We will give it x and y limits which define its borders to be 0 and 100, as well as an internal list to hold the galaxies themselves. Make an `evolve` method for the `Universe` class that moves its time forward some timestep. How does this affect the galaxies? Are there boundaries to your `Universe`? What happens when `MovingGalaxy`'s run into each other? Don't worry about gravity for now, but you could make them merge when they are close enough to each other, or just let them fly through each other. After all, it's your Universe. # # It may also be useful to make a method to add a random `MovingGalaxy`, so that you can add several to your `Universe` during initialization. Start out only adding a few, as it might get expensive! # # Make a command that plots the positions of the galaxies with time using `matplotlib.scatter()`, and save the corresponding outputs to a files called `frame%04i.png`. The next steps allow you to make a movie of these frames. class Universe(): """ A Universe for galaxies to move around in and evolve """ def __init__( #complete def __repr__( #complete #complete # **Problem 3d** # # Initialize a `Universe` with several galaxies in it. Run your `Universe` forward in time, and plot the resulting state of the `Universe` over time to files called `frame%04i.png`. But be careful to not use too many galaxies or to evolve the `Universe` forward, as it may be really expensive for your computer. Start small and build from there! #complete # **Problem 3e** # # The command below will output a movie file called `movie.mp4` by combining all of your `frame%04i.png` files and packaging them together at a framerate of 20 FPS. It requires ffmpeg to be installed, but you can do this easily if you have homebrew installed by running `brew install ffmpeg`. There are other means to download `ffmpeg` online. Otherwise, you can flip through the frames manually to see how the `Universe` changes with time. # # If you were able to execute this step OK, execute to the next two fields and watch your movie! Again, since there is a random component to these models, you can try running it a few times to see how the results change. # + language="bash" # ffmpeg -r 20 -f image2 -i frame%04d.png -vcodec libx264 -pix_fmt yuv420p -crf 25 -y movie.mp4 # - # %%HTML # ## Challenge Problem: Add in gravitational accelerations to your Universe # # Using the framework you developed for Problems 1-3, add in gravitational effects to calculate the gravitational acceleration of each galaxy with each other galaxy. This is what is known as an N-body integrator, and it can be surprisingly easy to write one. Do this for your `Universe` above and run it forward in time. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pymedphys-master # language: python # name: pymedphys-master # --- # + import re import sys import numpy as np import matplotlib.pyplot as plt import pydicom # - # Makes it so any changes in pymedphys is automatically # propagated into the notebook without needing a kernel reset. from IPython.lib.deepreload import reload # %load_ext autoreload # %autoreload 2 import pymedphys data_paths = pymedphys.zip_data_paths("monaco_trf_compare.zip") static_angles = [path for path in data_paths if path.parent.name == "StaticAnglesNoCol"] # + def get_file_type(input_paths, file_type): paths = [path for path in input_paths if file_type in path.name] assert len(paths) == 1 return paths[0] tel_path = get_file_type(static_angles, "tel") trf_path = get_file_type(static_angles, "trf") # - delivery_tel = pymedphys.Delivery.from_monaco(tel_path) delivery_trf = pymedphys.Delivery.from_logfile(trf_path) mu_density_angles = set(delivery_tel.gantry) mudensity_tel = delivery_tel.mudensity(gantry_angles=mu_density_angles) mudensity_trf = delivery_trf.mudensity(gantry_angles=mu_density_angles) coords = (grid['jaw'], grid['mlc']) # + gammas = [] percent_deviation = 1 mm_dist_threshold = 0.2 for tel_result, trf_result in zip(mudensity_tel, mudensity_trf): gammas.append( pymedphys.gamma(coords, tel_result, coords, trf_result, percent_deviation, mm_dist_threshold, local_gamma=True)) # - def plot_gamma_hist(gamma, percent, dist): valid_gamma = gamma[~np.isnan(gamma)] plt.hist(valid_gamma, 50, density=True) pass_ratio = np.sum(valid_gamma <= 1) / len(valid_gamma) plt.title("Local Gamma ({0}%/{1}mm) | Percent Pass: {2:.2f} % | Max Gamma: {3:.2f}".format(percent, dist, pass_ratio*100, np.max(valid_gamma))) grid = pymedphys.mudensity.grid() for gantry_angle, tel_result, trf_result, gamma in zip(mu_density_angles, mudensity_tel, mudensity_trf, gammas): print(f"Gantry Angle = {gantry_angle}") plt.figure() pymedphys.mudensity.display(grid, tel_result) plt.title("Monaco Plan") plt.figure() pymedphys.mudensity.display(grid, tel_result) plt.title("Logfile Result") diff = trf_result - tel_result largest_item = np.max(np.abs(diff)) plt.figure() pymedphys.mudensity.display(grid, diff, cmap="seismic", vmin=-largest_item, vmax=largest_item) plt.title("Logfile - Monaco") plt.figure() pymedphys.mudensity.display(grid, gamma, cmap="coolwarm", vmin=0, vmax=2) plt.title(f"Local Gamma | {percent_deviation}%/{mm_dist_threshold}mm") plt.figure() plot_gamma_hist(gamma, percent_deviation, mm_dist_threshold) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup url = "https://dl.acm.org/doi/proceedings/10.1145/2814864" req = requests.get(url) soup = BeautifulSoup(req.content, "html.parser") print(soup.prettify()) soup.find_all("a") for link in soup.find_all("a"): print(link) print("-------") soup.title.string soup.title.parent.name for publication in soup.find_all("h5"): print(publication) print("---------------") for publication in soup.find_all("h5", {"class": "issue-item__title"}): print(publication) print("---------------") for publication in soup.find_all("h5", {"class": "issue-item__title"}): print(f"Title: {publication.string}") print(f'DOI: {publication.a["href"].replace("/doi/", "")}') print("---------------") with open("title_and_doi.tsv", "w") as title_fh: title_fh.write("title\tdoi\n") for publication in soup.find_all("h5", {"class": "issue-item__title"}): # print(f"Title: {publication.string}") # print(f'DOI: {publication.a["href"].replace("/doi/", "")}') # print("---------------") title_fh.write( f'{publication.string}\t{publication.a["href"].replace("/doi/", "")}\n') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Face completion with a multi-output estimators # # # This example shows the use of multi-output estimator to complete images. # The goal is to predict the lower half of a face given its upper half. # # The first column of images shows true faces. The next columns illustrate # how extremely randomized trees, k nearest neighbors, linear # regression and ridge regression complete the lower half of those faces. # # # # + print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] # Upper half of the faces X_train = train[:, :(n_pixels + 1) // 2] # Lower half of the faces y_train = train[:, n_pixels // 2:] X_test = test[:, :(n_pixels + 1) // 2] y_test = test[:, n_pixels // 2:] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- shopping = ["bread", "milk", "eggs"] print(shopping) for item in shopping: print(item) mixed = [365, "days", True] print(mixed) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to PYNQ for Alveo # # In this notebook we will explore how PYNQ compares with OpenCL when interacting with an Alveo device. To this purpose, we will use the [hello world](https://github.com/Xilinx/Vitis_Accel_Examples/tree/63bae10d581df40cf9402ed71ea825476751305d/hello_world) application of the [Vitis Accel Examples' Repository](https://github.com/Xilinx/Vitis_Accel_Examples/tree/63bae10d581df40cf9402ed71ea825476751305d). # # The comparison is mainly visual, and is done by putting side-to-side the code from the original [host.cpp](https://github.com/Xilinx/Vitis_Accel_Examples/blob/63bae10d581df40cf9402ed71ea825476751305d/hello_world/src/host.cpp) from the Vitis_Accel_Examples' hello_world application, and the code from the vector addition [notebook](./1-vector-addition.ipynb), since they both use the same kernel. Code from the OpenCL source file is edited for readability. # ![pynq-opencl](img/pynq-opencl.png "PYNQ vs OpenCL comparison") # ## Code Walkthrough # # ### Device initialization # # The first thing to do in both cases, is to program the device and initialize the software context. # In the OpenCL version, this is achieved with the following code # # ```cpp # auto devices = xcl::get_xil_devices(); # auto fileBuf = xcl::read_binary_file(binaryFile); # cl::Program::Binaries bins{{fileBuf.data(), fileBuf.size()}}; # OCL_CHECK(err, context = cl::Context({device}, NULL, NULL, NULL, &err)); # OCL_CHECK(err, q = cl::CommandQueue(context, {device}, CL_QUEUE_PROFILING_ENABLE, &err)); # OCL_CHECK(err, cl::Program program(context, {device}, bins, NULL, &err)); # OCL_CHECK(err, krnl_vector_add = cl::Kernel(program, "vadd", &err)); # ``` # # In particular, the `get_xil_devices()` function finds the available Xilinx devices and return them as a list. Then, `read_binary_file()` loads the binary file (the `.xclbin`) and returns a pointer to the loaded file, that is then consumed to initialize the `bins` object. A new OpenCL `context` is then created, that will be used for this run. After that, a command queue `q` is created, in order to send commands to the device. # Then, the detected `device` is programmed, and finally the vector addition kernel included in the design is assigned to the `krnl_vector_add` variable. # # With PYNQ, the same set of operations is achieved by intantiating a `pynq.Overlay` object (the device is programmed at this stage), and then assigning the vector addition kernel to the `vadd` variable, accessing directly the overlay. import pynq ol = pynq.Overlay("intro.xclbin") vadd = ol.vadd_1 # If you want to use multiple devices, you can pass the `device` argument when you instantiate a `pynq.Overlay` object. Of course, you have to make sure the overlay you are trying to load is compatible with the target device, or an exception will be raised. # ```python3 # ol = pynq.Overlay("intro.xclbin", device=another_device) # ``` # Devices can be listed accessing `pynq.Device.devices`. # # ### Buffers allocation # # In OpenCL host and FPGA buffers need to be handled separately. Therefore, we first have to create the host buffer, and only after that is done, we can instantiate the FPGA buffer, linking it to the corresponding host buffer. # # ```cpp # std::vector> source_in1(DATA_SIZE); # std::vector> source_in2(DATA_SIZE); # std::vector> source_hw_results(DATA_SIZE); # OCL_CHECK(err, l::Buffer buffer_in1(context, # CL_MEM_USE_HOST_PTR | CL_MEM_READ_ONLY, vector_size_bytes, # source_in1.data(), &err)); # OCL_CHECK(err, cl::Buffer buffer_in2(context, # CL_MEM_USE_HOST_PTR | CL_MEM_READ_ONLY, vector_size_bytes, # source_in2.data(), &err)); # OCL_CHECK(err, cl::Buffer buffer_output(context, # CL_MEM_USE_HOST_PTR | CL_MEM_WRITE_ONLY, vector_size_bytes, # source_hw_results.data(), &err)); # ``` # # With PYNQ, buffers allocation is carried by [`pynq.allocate`](https://pynq.readthedocs.io/en/v2.5/pynq_libraries/allocate.html), which provides the same interface as a [`numpy.ndarray`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html). Host and FPGA buffers are transparently managed, and the user is only presented with a single interface for both size = 1024*1024 in1 = pynq.allocate((1024, 1024), 'u4') in2 = pynq.allocate((1024, 1024), 'u4') out = pynq.allocate((1024, 1024), 'u4') # ### Send data from host to FPGA # # The `enqueueMigrateMemObjects()` is used in OpenCL to initiate data transfers. The developer must specify the direction as a function parameter. In this case, we are sending data from the host to the FPGA memory, therefore we need to pass `0` as direction. # # ```cpp # OCL_CHECK(err, err = q.enqueueMigrateMemObjects({buffer_in1, buffer_in2}, # 0 /* 0 means from host*/)); # ``` # # The same behavior is achieved in PYNQ by invoking `.sync_to_device()` on each input buffer in1.sync_to_device() in2.sync_to_device() # ### Run the kernel # # To run the kernel in OpenCL each kernel argument need to be set explicitly using the `setArgs()` function, before starting the execution with `enqueueTask()`. # # ```cpp # int size = DATA_SIZE; # OCL_CHECK(err, err = krnl_vector_add.setArg(0, buffer_in1)); # OCL_CHECK(err, err = krnl_vector_add.setArg(1, buffer_in2)); # OCL_CHECK(err, err = krnl_vector_add.setArg(2, buffer_output)); # OCL_CHECK(err, err = krnl_vector_add.setArg(3, size)); # // send data here # OCL_CHECK(err, err = q.enqueueTask(krnl_vector_add)); # // retrieve data here # q.finish(); # ``` # # In PYNQ, we use the `.call()` function to do everything in a single line. The function will take care of correctly setting the `register_map` of the IP and send the start signal. vadd.call(in1, in2, out, size) # ### Receive data from FPGA to host # # Again, the `enqueueMigrateMemObjects()` is used in OpenCL to initiate data transfers. In this case, we are retrieving data from the FPGA to the host memory, and the host code here uses the `CL_MIGRATE_MEM_OBJECT_HOST` constant. # # ```cpp # OCL_CHECK(err, err = q.enqueueMigrateMemObjects({buffer_output}, # CL_MIGRATE_MEM_OBJECT_HOST)); # ``` # # We achieve the same in PYNQ by calling `.sync_from_device()` on our output buffer out.sync_from_device() # ## Cleaning up # # Let us clean up the allocated resources before ending this notebook. del in1 del in2 del out ol.free() # Copyright (C) 2020 Xilinx, Inc # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [py35] # language: python # name: Python [py35] # --- # + import numpy as np import sys if "../" not in sys.path: sys.path.append("../") from sklearn.datasets import make_gaussian_quantiles from adaboost import AdaBoost import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' import seaborn as sns sns.set_context('notebook') sns.set_style('whitegrid') # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # - # Construct dataset X1, y1 = make_gaussian_quantiles(cov=2., n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1) X = np.concatenate((X1, X2)) y = np.concatenate((y1, - y2 + 1)) # + model = AdaBoost(n_estimators=1000) model.fit(X, y) np.mean(model.predict(X) == y) # + # Reference: http://scikit-learn.org/stable/auto_examples/ensemble/plot_adaboost_twoclass.html#sphx-glr-auto-examples-ensemble-plot-adaboost-twoclass-py # You can try with different number of n_estimators and see decision boundaries plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Shells 2D # ## Init symbols for *sympy* from sympy import * from sympy.vector import CoordSys3D N = CoordSys3D('N') x1, x2 = symbols("x_1 x_2") alpha1, alpha2 = symbols("alpha_1 alpha_2") R, L, ga, gv = symbols("R L g_a g_v") init_printing() # ## Cylindrical coordinates # + a1 = pi / 2 + (L / 2 - alpha1)/R x = R * cos(a1) y = R * sin(a1) r = x*N.i + y*N.j # - # #### Curve in 2D coordinates system will be defined with the following vector $\vec{r}=\vec{r(\alpha_1)}$ r # #### Tangent to curve v_temp = r.diff(alpha1) dr_len = v_temp.magnitude() v = v_temp / dr_len v = trigsimp(v) v_temp trigsimp(dr_len) # #### Normal to curve n_temp = v.diff(alpha1) k=trigsimp(n_temp.magnitude()) n = n_temp/k q=1/(R*sqrt(1/R**2)) n = trigsimp(n).subs(q, 1) n v.dot(n) n.dot(v) # #### Curvature sympify(k) # #### Derivative of base vectors # Let's find # $\frac { d\vec{n} } { d\alpha_1}$ # $\frac { d\vec{v} } { d\alpha_1}$ # $\frac { d\vec{n} } { d\alpha_2}$ # $\frac { d\vec{v} } { d\alpha_2}$ n.diff(alpha1) # $ \frac { d\vec{n} } { d\alpha_1} = -\frac {1}{R} \vec{v} = -k \vec{v} $ v.diff(alpha1) # $ \frac { d\vec{v} } { d\alpha_1} = \frac {1}{R} \vec{n} = k \vec{n} $ # #### Derivative of vectors # # $ \vec{u} = u_v \vec{v} + u_n\vec{n} $ # # $ \frac { d\vec{u} } { d\alpha_1} = \frac { d(u_v\vec{v}) } { d\alpha_1} + \frac { d(u_n\vec{n}) } { d\alpha_1} = # \frac { du_n } { d\alpha_1} \vec{n} + u_n \frac { d\vec{n} } { d\alpha_1} + \frac { du_v } { d\alpha_1} \vec{v} + u_v \frac { d\vec{v} } { d\alpha_1} = \frac { du_n } { d\alpha_1} \vec{n} - u_n k \vec{v} + \frac { du_v } { d\alpha_1} \vec{v} + u_v k \vec{n}$ # # Then # $ \frac { d\vec{u} } { d\alpha_1} = \left( \frac { du_v } { d\alpha_1} - u_n k \right) \vec{v} + \left( \frac { du_n } { d\alpha_1} + u_v k \right) \vec{n}$ # # $ \frac { d\vec{u} } { d\alpha_2} = \frac { d(u_n\vec{n}) } { d\alpha_2} + \frac { d(u_v\vec{v}) } { d\alpha_2} = # \frac { du_n } { d\alpha_2} \vec{n} + u_n \frac { d\vec{n} } { d\alpha_2} + \frac { du_v } { d\alpha_2} \vec{v} + u_v \frac { d\vec{v} } { d\alpha_2} = \frac { du_n } { d\alpha_2} \vec{n} + \frac { du_v } { d\alpha_2} \vec{v} $ # #### Base Vectors $\vec{R}_1, \vec{R}_2$ R_alpha=r+alpha2*n R_alpha R1=R_alpha.diff(alpha1) R2=R_alpha.diff(alpha2) trigsimp(R1) R2 # #### Let's find Jacobi matrix: # # $ A = \left( # \begin{array}{cc} # \frac{\partial x_1}{\partial \alpha_1} & \frac{\partial x_1}{\partial \alpha_2} \\ # \frac{\partial x_2}{\partial \alpha_1} & \frac{\partial x_2}{\partial \alpha_2} # \end{array} # \right)$ # # $ \left[ # \begin{array}{cc} # \vec{R}_1 & \vec{R}_2 # \end{array} # \right] = \left[ # \begin{array}{cc} # \vec{e}_1 & \vec{e}_2 # \end{array} # \right] \cdot \left( # \begin{array}{cc} # \frac{\partial x_1}{\partial \alpha_1} & \frac{\partial x_1}{\partial \alpha_2} \\ # \frac{\partial x_2}{\partial \alpha_1} & \frac{\partial x_2}{\partial \alpha_2} # \end{array} # \right) = \left[ # \begin{array}{cc} # \vec{e}_1 & \vec{e}_2 # \end{array} # \right] \cdot A$ # # $ \left[ # \begin{array}{cc} # \vec{e}_1 & \vec{e}_2 # \end{array} # \right] = \left[ # \begin{array}{cc} # \vec{R}_1 & \vec{R}_2 # \end{array} # \right] \cdot A^{-1}$ m11=R1.dot(N.i) m12=R2.dot(N.i) m21=R1.dot(N.j) m22=R2.dot(N.j) A=Matrix([[m11, m12], [m21, m22]]) A A_inv = trigsimp(A**-1) sympify(trigsimp(Matrix([R1, R2]).T*A_inv)) trigsimp(A.det()) # #### Metric tensor # + g11=R1.dot(R1) g12=R1.dot(R2) g21=R2.dot(R1) g22=R2.dot(R2) G=Matrix([[g11, g12],[g21, g22]]) G=trigsimp(G) G # - G_inv = G**-1 # #### Derivative of base vectors dR1dalpha1 = trigsimp(R1.diff(alpha1)) dR1dalpha1 # $ \frac { d\vec{R_1} } { d\alpha_1} = \frac {1}{R} \left( 1-\frac{\alpha_2}{R} \right) \vec{R_2} $ dR1dalpha2 = trigsimp(R1.diff(alpha2)) dR1dalpha2 # $ \frac { d\vec{R_1} } { d\alpha_2} = -\frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \vec{R_1} $ dR2dalpha1 = trigsimp(R2.diff(alpha1)) dR2dalpha1 # $ \frac { d\vec{R_2} } { d\alpha_1} = -\frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \vec{R_1} $ dR2dalpha2 = trigsimp(R2.diff(alpha2)) dR2dalpha2 # $ \frac { d\vec{R_2} } { d\alpha_2} = \vec{0} $ # #### Derivative of vectors # # $ \vec{u} = u^1 \vec{R_1} + u^2\vec{R_2} $ # # $ \frac { d\vec{u} } { d\alpha_1} = \frac { d(u^1\vec{R_1}) } { d\alpha_1} + \frac { d(u^2\vec{R_2}) } { d\alpha_1} = # \frac { du^1 } { d\alpha_1} \vec{R_1} + u^1 \frac { d\vec{R_1} } { d\alpha_1} + \frac { du^2 } { d\alpha_1} \vec{R_2} + u^2 \frac { d\vec{R_2} } { d\alpha_1} = \frac { du^1 } { d\alpha_1} \vec{R_1} + u^1 \frac {1}{R} \left( 1-\frac{\alpha_2}{R} \right) \vec{R_2} + \frac { du^2 } { d\alpha_1} \vec{R_2} - u^2 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \vec{R_1}$ # # Then # $ \frac { d\vec{u} } { d\alpha_1} = \left( \frac { du^1 } { d\alpha_1} - u^2 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \right) \vec{R_1} + \left( \frac { du^2 } { d\alpha_1} + u^1 \frac {1}{R} \left( 1-\frac{\alpha_2}{R} \right) \right) \vec{R_2}$ # # $ \frac { d\vec{u} } { d\alpha_2} = \frac { d(u^1\vec{R_1}) } { d\alpha_2} + \frac { d(u^2\vec{R_2}) } { d\alpha_2} = # \frac { du^1 } { d\alpha_2} \vec{R_1} + u^1 \frac { d\vec{R_1} } { d\alpha_2} + \frac { du^2 } { d\alpha_2} \vec{R_2} + u^2 \frac { d\vec{R_2} } { d\alpha_2} = \frac { du^1 } { d\alpha_2} \vec{R_1} - u^1 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \vec{R_1} + \frac { du^2 } { d\alpha_2} \vec{R_2} $ # # Then # $ \frac { d\vec{u} } { d\alpha_2} = \left( \frac { du^1 } { d\alpha_2} - u^1 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}} \right) \vec{R_1} + \frac { du^2 } { d\alpha_2} \vec{R_2}$ # # $\nabla_1 u^1 = \frac { \partial u^1 } { \partial \alpha_1} - u^2 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}}$ # # $\nabla_1 u^2 = \frac { \partial u^2 } { \partial \alpha_1} + u^1 \frac {1}{R} \left( 1-\frac{\alpha_2}{R} \right) $ # # $\nabla_2 u^1 = \frac { \partial u^1 } { \partial \alpha_2} - u^1 \frac {1}{R} \frac {1}{1-\frac{\alpha_2}{R}}$ # # $\nabla_2 u^2 = \frac { \partial u^2 } { \partial \alpha_2}$ # # $ \nabla \vec{u} = \left( # \begin{array}{cc} # \nabla_1 u^1 & \nabla_1 u^2 \\ # \nabla_2 u^1 & \nabla_2 u^2 # \end{array} # \right)$ # + u1=Function('u^1') u2=Function('u^2') u1_nabla1 = u1(alpha1, alpha2).diff(alpha1) - u2(alpha1, alpha2) / R * (S(1)/(1-alpha2/R)) u2_nabla1 = u2(alpha1, alpha2).diff(alpha1) + u1(alpha1, alpha2) / R * ( 1-alpha2/R) u1_nabla2 = u1(alpha1, alpha2).diff(alpha2) - u1(alpha1, alpha2) / R * (S(1)/(1-alpha2/R)) u2_nabla2 = u2(alpha1, alpha2).diff(alpha2) # $\nabla_2 u^2 = \frac { \partial u^2 } { \partial \alpha_2}$ grad_u = Matrix([[u1_nabla1, u2_nabla1],[u1_nabla2, u2_nabla2]]) grad_u # - q=Symbol('q') grad_u_down=grad_u.subs(1-alpha2/R, q)*G.subs((R-alpha2)/R,q) #grad_u_down=grad_u*G expand(simplify(grad_u_down))#.subs((R-alpha2)/R, q) # $ # \left( # \begin{array}{c} # \nabla_1 u_1 \\ \nabla_2 u_1 \\ # \nabla_1 u_2 \\ \nabla_2 u_2 # \end{array} # \right) # = # \left( # \begin{array}{c} # \left( 1-\frac{\alpha_2}{R} \right)^2 \frac { \partial u^1 } { \partial \alpha_1} - u^2 \frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} \\ # \left( 1-\frac{\alpha_2}{R} \right)^2 \frac { \partial u^1 } { \partial \alpha_2} - u^1 \frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} \\ # \frac { \partial u^2 } { \partial \alpha_1} + u^1 \frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} \\ # \frac { \partial u^2 } { \partial \alpha_2} # \end{array} # \right) # $ # # $ # \left( # \begin{array}{c} # \nabla_1 u_1 \\ \nabla_2 u_1 \\ # \nabla_1 u_2 \\ \nabla_2 u_2 # \end{array} # \right) # = # \left( # \begin{array}{cccccc} # 0 & \left( 1-\frac{\alpha_2}{R} \right)^2 & 0 & -\frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} & 0 & 0 \\ # -\frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} & 0 & \left( 1-\frac{\alpha_2}{R} \right)^2 & 0 & 0 & 0 \\ # \frac {\left( 1-\frac{\alpha_2}{R} \right)}{R} & 0 & 0 & 0 & 1 & 0 \\ # 0 & 0 & 0 & 0 & 0 & 1 \\ # \end{array} # \right) # \left( # \begin{array}{c} # u^1 \\ # \frac { \partial u^1 } { \partial \alpha_1} \\ # \frac { \partial u^1 } { \partial \alpha_2} \\ # u^2 \\ # \frac { \partial u^2 } { \partial \alpha_1} \\ # \frac { \partial u^2 } { \partial \alpha_2} \\ # \end{array} # \right) # $ # ### Elasticity tensor(stiffness tensor) # + from sympy import MutableDenseNDimArray C_x = MutableDenseNDimArray.zeros(3, 3, 3, 3) for i in range(3): for j in range(3): for k in range(3): for l in range(3): elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1) el = Symbol(elem_index) C_x[i,j,k,l] = el C_x # + C_x_1 = MutableDenseNDimArray.zeros(3, 3, 3, 3) def getCIndecies(index): if (index == 0): return 0, 0 elif (index == 1): return 1, 1 elif (index == 2): return 2, 2 elif (index == 3): return 0, 1 elif (index == 4): return 0, 2 elif (index == 5): return 1, 2 for i in range(3): for j in range(i, 3): for k in range(3): for l in range(k, 3): elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1) el = Symbol(elem_index) C_x_1[i,j,k,l] = el C_x_1[i,j,l,k] = el C_x_1[j,i,k,l] = el C_x_1[j,i,l,k] = el if (i >= k or j >= l): C_x_1[k,l,i,j] = el C_x_1[k,l,j,i] = el C_x_1[l,k,i,j] = el C_x_1[l,k,j,i] = el C_x_1 # + # for i in range(3): # for j in range(i, 3): # for k in range(3): # for l in range(k, 3): # elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_x[i,j,k,l] = el # C_x[i,j,l,k] = el # C_x[j,i,k,l] = el # C_x[j,i,l,k] = el # C_x # for i in range(3): # for j in range(3): # for k in range(3): # for l in range(3): # elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_x[i,j,k,l] = el # C_x[k,l,i,j] = el # if (s < 3 and t < 3): # elif (s==t): # i,j = getCIndecies(s) # k,l = getCIndecies(t) # elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_x[i,j,k,l] = el # def getCIndecies(index): # if (index == 0): # return 0, 0 # elif (index == 1): # return 1, 1 # elif (index == 2): # return 2, 2 # elif (index == 3): # return 0, 1 # elif (index == 4): # return 0, 2 # elif (index == 5): # return 1, 2 # def getCalpha(C, A, q, p, s, t): # res = S(0) # for i in range(3): # for j in range(3): # for k in range(3): # for l in range(3): # res += C[i,j,k,l]*A[q,i] # return simplify(trigsimp(res)) # C_alpha = MutableDenseNDimArray.zeros(3, 3, 3, 3) # C_alpha_empty = MutableDenseNDimArray.zeros(3, 3, 3, 3) # m11=R1.dot(N.i) # m12=R2.dot(N.i) # m21=R1.dot(N.j) # m22=R2.dot(N.j) # A=Matrix([[m11, m12, 0], [m21, m22, 0], [0,0,1]]) # A_inv=A**-1 # for s in range(6): # for t in range(s, 6): # if (s < 3 and t < 3): # i,j = getCIndecies(s) # k,l = getCIndecies(t) # elem_index = 'C^{}{}{}{}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_x[i,j,k,l] = el # C_x[k,l,i,j] = el # elif (s==t): # i,j = getCIndecies(s) # k,l = getCIndecies(t) # elem_index = 'C^{}{}{}{}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_x[i,j,k,l] = el # for i in range(3): # for j in range(3): # for k in range(3): # for l in range(3): # c = getCalpha(C_x, A_inv, i, j, k, l) # C_alpha[i,j,k,l] = c # if (c != 0): # elem_index = 'C_{{\alpha}}^{}{}{}{}'.format(i+1, j+1, k+1, l+1) # el = Symbol(elem_index) # C_alpha_empty[i,j,k,l] = el # # trigsimp(C_alpha[0,2,0,2]) # C_alpha_empty # - C_x # #### Virtual work # + def contraction3DSameRank(A,B): res = S(0) for i in range(3): for j in range(3): res += A[i,j]*B[j,i] return res def contraction3D(C,e): res = MutableDenseNDimArray.zeros(3, 3) for i in range(3): for j in range(3): res[i,j] = S(0) for k in range(3): for l in range(3): res[i,j] += C[i,j,k,l]*e[k,l] return res # + e11 = Symbol("e_{11}") e12 = Symbol("e_{12}") e22 = Symbol("e_{22}") e13 = Symbol("e_{13}") e23 = Symbol("e_{23}") e33 = Symbol("e_{33}") # s11 = Symbol("s_{11}") # s12 = Symbol("s_{12}") # s22 = Symbol("s_{22}") e=Matrix([[e11, e12, e13], [e12, e22, e23], [e13, e23, e33]]) s=contraction3D(C_alpha, e) E=contraction3DSameRank(s, e) # + # e_alpha=G*A_inv*e*A_inv.T*G # #e_alpha=A*e*A.T # s_alpha=A_inv*s*A_inv.T # E_alpha=contraction2D(s_alpha, e_alpha) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Further plan # 1. (done) Fix image size, flips, T, beam_center, ... # * (done) Add masks! # * (done) Change structure. q vectors - to detector geometry. # * Fix units! Compare window size, ... # * (done) Optimize cython code and move to a standalone module. # * Write tests (maybe for matlab comparsion as well) # * Make module for visualization # # Compare to matlab results # ## Parameters definition # + from numpy import pi k = 1 numberOfImages = 2 nameCounters = [1, 12] detectorAnglesDelta = [1, 2] detectorAnglesGamma = [6, 6] correctSurfaceRefraction = 0; criticalAngle = 0.14 wavelength = 0.5636 k0 = 2 * pi / wavelength angleOfIncidence = (k-1)*0.02 sampleTiltAngle = -angleOfIncidence isRotatedCounterclockwise = 1 isFlippedLeftRight = 0 isFlippedUpDown = 0 detectorDistance = 295 horizontalDetectorSize = 487 verticalDetectorSize = 619 detectorSizeY, detectorSizeZ = verticalDetectorSize, horizontalDetectorSize numberOfPixels = detectorSizeZ * detectorSizeY detectorGapsAlongY = [] detectorGapsAlongZ = [[195, 212], [407, 424]] pixelSizeInMm = 0.172 beamCenterYAtDelta0 = 117 beamCenterZAtGamma0 = 242 outputQResolution = 0.002 outputMaxQXy = 3.5 outputMaxQZ = 2.7 averagingWindow = 1.3 * 2*pi/wavelength * pixelSizeInMm/ detectorDistance intensityCapBeforeIntRescaling = 100000 hotPixelThreshold = 100000 # - # ## Coordinates # ### Python implementation # + from typing import NamedTuple import numpy as np class BeamCenter(NamedTuple): z: int y: int class Size(NamedTuple): z: int y: int def init_coordinates(number_of_pixels: int, size: Size, beam_center: BeamCenter, pixel_size: float, detector_distance: float) -> np.ndarray: z_indices = np.arange(size.z - 1, -1, -1) y_indices = np.arange(size.y - 1, -1, -1) yy, zz = np.meshgrid(y_indices, z_indices) z_coordinates = (zz.T.flatten() - beam_center.z) * pixel_size y_coordinates = (yy.T.flatten() - beam_center.y) * pixel_size x_coordinates = np.ones(number_of_pixels) * detector_distance normalization = np.sqrt(x_coordinates ** 2 + y_coordinates ** 2 + z_coordinates ** 2) return np.array([x_coordinates, y_coordinates, z_coordinates]) / normalization # - # ### Test beam_center = BeamCenter(beamCenterZAtGamma0, beamCenterYAtDelta0) size = Size(detectorSizeZ, detectorSizeY) coordinates = init_coordinates(numberOfPixels, size, beam_center, pixelSizeInMm, detectorDistance) coordinates # ## Read image # ### Edf reader # + import os import gzip import numpy as np def read_edf_from_file(file_path: str): data = get_data_from_filepath(file_path) return read_edf_from_data(data) def read_edf_gz(gz_filepath, *, reshape: bool = True): _check_file(gz_filepath, '.edf.gz') with gzip.open(gz_filepath, 'rb') as f: data = f.read() return read_edf_from_data(data, reshape=reshape) def read_edf(edf_filepath, *, reshape: bool = True): _check_file(edf_filepath, '.edf') with open(edf_filepath, 'rb') as f: data = f.read() return read_edf_from_data(data, reshape=reshape) def read_edf_from_data(data, *, reshape: bool = True): header_dict = read_header_from_data(data) header_end_index = header_dict['headerSize'] image_size = int(header_dict['Size']) raw_image_data = data[header_end_index:header_end_index + image_size] data_type = _get_numpy_type(header_dict['DataType']) image_shape = (int(header_dict['Dim_2']), int(header_dict['Dim_1'])) data = np.frombuffer(raw_image_data, data_type) if reshape: data = np.rot90(np.reshape(data, image_shape)) return data, header_dict def read_edf_header(edf_filepath): _check_file(edf_filepath, '.edf') with open(edf_filepath, 'rb') as f: data = f.read() return read_header_from_data(data) def read_edf_header_from_gz(gz_filepath): _check_file(gz_filepath, '.edf.gz') with gzip.open(gz_filepath, 'rb') as f: data = f.read() return read_header_from_data(data) def read_header_from_data(data) -> dict: header_end_index = data.find(b'}\n') + 2 header = data[1:header_end_index].decode('utf-8') header_dict = _get_header_dict(header) header_dict.update({'headerSize': header_end_index}) return header_dict def read_header_from_file(filepath): data = get_data_from_filepath(filepath) return read_header_from_data(data) def get_data_from_filepath(filepath: str): _check_file(filepath) if filepath.endswith('.edf'): with open(filepath, 'rb') as f: return f.read() elif filepath.endswith('.edf.gz'): with gzip.open(filepath, 'rb') as f: return f.read() else: raise ValueError('Unknown file type') def _get_header_dict(header): header_dict = {} raw_list = header.replace('\n', '').strip(). \ replace(' ', ''). \ replace('{', ''). \ replace('}', ''). \ split(';') for item in raw_list: item = item.split('=') if len(item) == 2: header_dict.update([item]) return header_dict def _check_file(filepath: str, end_filter: str = None) -> None: if not os.path.isfile(filepath): raise FileNotFoundError(f'File {filepath} doesn\'t exist') if end_filter and not filepath.endswith(end_filter): raise ValueError(f'File {filepath} is not an {end_filter} file') def _get_numpy_type(edf_type): """ Returns NumPy type based on edf type """ edf_type = edf_type.upper() if edf_type == 'SIGNEDBYTE': return np.int8 # "b" elif edf_type == 'UNSIGNEDBYTE': return np.uint8 # "B" elif edf_type == 'SIGNEDSHORT': return np.int16 # "h" elif edf_type == 'UNSIGNEDSHORT': return np.uint16 # "H" elif edf_type == 'SIGNEDINTEGER': return np.int32 # "i" elif edf_type == 'UNSIGNEDINTEGER': return np.uint32 # "I" elif edf_type == 'SIGNEDLONG': return np.int32 # "i" elif edf_type == 'UNSIGNEDLONG': return np.uint32 # "I" elif edf_type == 'SIGNED64': return np.int64 # "l" elif edf_type == 'UNSIGNED64': return np.uint64 # "L" elif edf_type == 'FLOATVALUE': return np.float32 # "f" elif edf_type == 'FLOAT': return np.float32 # "f" elif edf_type == 'DOUBLEVALUE': return np.float64 # "d" else: raise TypeError(f'unknown EdfType {edf_type}') # - # ### Read image from pathlib import Path edf_filepaths = list((Path()/'images_for_tests').glob('*.edf')) len(edf_filepaths) couple_image_list = [(read_edf(str(edf_filepaths[i]))[0], read_edf(str(edf_filepaths[i + 11]))[0]) for i in range(len(edf_filepaths) // 2)] img, _ = read_edf(str(edf_filepaths[0])) img2, _ = read_edf(str(edf_filepaths[11])) # + import matplotlib.pyplot as plt plt.imshow(img, vmax=5000) # - # ## Rotation matrices def init_rotation_matrix(angle_gamma: float, angle_delta: float, sample_tilt_angle: float) -> np.ndarray: gamma_angle = angle_gamma * np.pi / 180 delta_angle = angle_delta * np.pi / 180 sample_tilt_angle = sample_tilt_angle * np.pi / 180 r_matrix_gamma = np.array([[np.cos(gamma_angle), 0, - np.sin(gamma_angle)], [0, 1, 0], [np.sin(gamma_angle), 0, np.cos(gamma_angle)]]) r_matrix_delta = np.array([[np.cos(delta_angle), - np.sin(delta_angle), 0], [np.sin(delta_angle), np.cos(delta_angle), 0], [0, 0, 1]]) r_matrix_chi = np.array([[np.cos(sample_tilt_angle), 0, - np.sin(sample_tilt_angle)], [0, 1, 0], [np.sin(sample_tilt_angle), 0, np.cos(sample_tilt_angle)]]) return r_matrix_delta.dot(r_matrix_gamma).dot(r_matrix_chi) matrix = init_rotation_matrix(detectorAnglesGamma[0], detectorAnglesDelta[0], sampleTiltAngle) matrix rotated_vector = matrix.dot(coordinates) # ## Angles # + from typing import Tuple def init_angle_vectors(coordinates: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: vertical_angles = np.arcsin(coordinates[2]) horizontal_angles = np.sign(coordinates[1]) * np.arccos( coordinates[0] / np.sin(np.pi / 2 - vertical_angles) ) return vertical_angles, horizontal_angles # - vertical_angles, horizontal_angles = init_angle_vectors(rotated_vector) vertical_angles[0], vertical_angles[-1] horizontal_angles[0], horizontal_angles[-1] horizontal_angles.shape vertical_angles.shape # ## Q vectors def transform_angles_to_q(vertical_angles: np.ndarray, horizontal_angles: np.ndarray, angle_of_incidence: float, number_of_pixels: int, k0: float) -> Tuple[np.ndarray, np.ndarray]: angle_of_incidence = angle_of_incidence * np.pi / 180 xy_sign = np.sign(horizontal_angles) q_xy = xy_sign * k0 * np.sqrt( ( np.cos(vertical_angles) * np.cos(horizontal_angles) - np.cos(angle_of_incidence) ) ** 2 + ( np.cos( vertical_angles) * np.sin(horizontal_angles) ) ** 2 ) q_z = k0 * (np.sin(vertical_angles) + np.sin(angle_of_incidence)) return q_xy, q_z q_xy, q_z = transform_angles_to_q(vertical_angles, horizontal_angles, angleOfIncidence, numberOfPixels, k0) q_xy q_z img.flatten() # ## Interpolation from scipy.interpolate import LinearNDInterpolator class QMap(NamedTuple): qxy_start: float qxy_end: float qxy_num: int qz_start: float qz_end: float qz_num: int @classmethod def from_step(cls, qxy_end: float, qz_end: float, q_resolution: float): qxy_start = 0 qz_start = 0 qxy_num = int(qxy_end / q_resolution) qz_num = int(qz_end / q_resolution) return cls(qxy_start, qxy_end, qxy_num, qz_start, qz_end, qz_num) @property def qxy(self) -> np.ndarray: return np.linspace(self.qxy_start, self.qxy_end, self.qxy_num) @property def qz(self) -> np.ndarray: return np.linspace(self.qz_start, self.qz_end, self.qz_num) @property def q_vector(self) -> np.ndarray: qxy, qz = np.meshgrid(self.qxy, self.qz) return np.swapaxes(np.stack([qxy.flatten(), qz.flatten()]), 0, 1) def update(self, **kwargs) -> 'QMap': params = self._asdict() params.update(kwargs) return QMap(**params) q_map = QMap.from_step(outputMaxQXy, outputMaxQZ, outputQResolution) q_map.q_vector.shape coordinate_vector = np.swapaxes(np.stack([q_xy, q_z]), 0, 1) coordinate_vector.shape img.flatten().shape interpolator = LinearNDInterpolator(coordinate_vector, img.flatten()) res = interpolator(q_map.q_vector) converted = res.reshape(q_map.qxy_num, q_map.qz_num) import matplotlib.pyplot as plt plt.imshow(img, origin=True, cmap='jet', vmax=5000) plt.colorbar() # # Use package # ## Imports # %reload_ext autoreload # %load_ext autoreload # %autoreload 1 # %aimport qmap_interpolation import qmap_interpolation as qm from qmap_interpolation import * from qmap_interpolation import units as uq # ## Fixures beam_center = BeamCenter(beamCenterZAtGamma0, beamCenterYAtDelta0) size = Size(detectorSizeZ, detectorSizeY) instrument = Instrument(wavelength, size, pixelSizeInMm) mask = np.ones_like(img, dtype=bool) for x1, x2 in detectorGapsAlongZ: mask[:, x1:x2] = False detector_geometry = DetectorGeometry(instrument, beam_center, angleOfIncidence, detectorDistance, sampleTiltAngle, detectorAnglesDelta[0], detectorAnglesGamma[0], mask) detector_geometry_2 = DetectorGeometry(instrument, beam_center, angleOfIncidence, detectorDistance, sampleTiltAngle, detectorAnglesDelta[1], detectorAnglesGamma[1], mask) # + # %%time converted_images = [] for im1, im2 in couple_image_list: c_image = ConvertedImage([Image(im1, detector_geometry), Image(im2, detector_geometry_2)]) q_xy, q_z, images = c_image.get_vectors() window_size = c_image.default_window() converted = boxInterpolation(images, q_xy, q_z, q_map.qxy_num, q_map.qz_num, q_map.qxy_start, q_map.qxy_step, 10 * window_size, q_map.qz_start, q_map.qz_step, 10 * window_size) converted_images.append(converted) # - plt.imshow(converted_images[1], origin=True, vmax=5000) # + q_image = Image(img, detector_geometry) q_image_2 = Image(img2, detector_geometry_2) c_image = ConvertedImage([q_image, q_image_2]) q_map = QMap(0,outputMaxQXy, int(outputMaxQXy / outputQResolution), 0, outputMaxQZ, int(outputMaxQZ / outputQResolution)) q_xy, q_z, images = c_image.get_vectors() # - # # Cython # + import cython # %load_ext Cython # - # ### Pure numpy implementation # + from numpy import floor, ceil import numpy as np def py_getind(xmsdel: float, hwdel: float, n: int): imin = int(ceil(xmsdel - hwdel)) if (imin < 0): imin = 0 if (imin > (n - 1)): imin = n imax = int(floor(xmsdel + hwdel)) if (imax < 0): imax = -1 if (imax > (n - 1)): imax = n - 1 return imin, imax def pyBoxInterpolation(inten: np.ndarray, qx: np.ndarray, qy: np.ndarray, ndat: int, nx: int, ny: int, xs: float, xdel: float, xhw: float, ys: float, ydel: float, yhw: float): size = nx * ny ninten = np.zeros(size) ginten = np.zeros(size) xhwdel = xhw / 2. / xdel yhwdel = yhw / 2./ ydel for i in range(ndat): xmsdel = (qx[i] - xs) / xdel ximin, ximax = py_getind(xmsdel, xhwdel, nx) xmsdel = (qy[i] - ys) / ydel yimin, yimax = py_getind(xmsdel, yhwdel, ny) for l in range(ximin, ximax + 1): ninten[l * ny + yimin: l * ny + yimax + 1] += 1 ginten[l * ny + yimin: l * ny + yimax + 1] += inten[i] return np.nan_to_num(ginten / ninten) # - # ### Cython implementation with numpy usage # + magic_args="-a" language="cython" # # from libc.math cimport floor, ceil # # import numpy as np # cimport numpy as np # cimport cython # # # cdef inline (int, int) getind(double xmsdel, double hwdel, int n): # cdef int imin = int(ceil(xmsdel - hwdel)) # if (imin < 0): # imin = 0 # elif (imin > (n - 1)): # imin = n # cdef int imax = int(floor(xmsdel + hwdel)) # if (imax < 0): # imax = -1 # elif (imax > (n - 1)): # imax = n - 1 # return imin, imax # # # @cython.boundscheck(False) # @cython.wraparound(False) # def boxInterpolation( # np.ndarray[np.float_t, ndim=1] inten, # np.ndarray[np.float_t, ndim=1] qx, # np.ndarray[np.float_t, ndim=1] qy, # int nx, int ny, # double xs, double xdel, double xhw, # double ys, double ydel, double yhw): # # cdef int ximin, ximax, yimin, yimax # cdef int i, k, l, ind # cdef int size = nx * ny # cdef double xmsdel # # cdef np.ndarray[np.float_t, ndim=1] ninten = np.zeros(size) # cdef np.ndarray[np.float_t, ndim=1] ginten = np.zeros(size) # # cdef double xhwdel = xhw / 2. / xdel # cdef double yhwdel = yhw / 2./ ydel # # for i in range(inten.size): # xmsdel = (qx[i] - xs) / xdel # ximin, ximax = getind(xmsdel, xhwdel, nx) # xmsdel = (qy[i] - ys) / ydel # yimin, yimax = getind(xmsdel, yhwdel, ny) # # for l in range(ximin, ximax + 1): # for k in range(yimin, yimax + 1): # ind = k * nx + l # ninten[ind] += 1 # ginten[ind] += inten[i] # # for i in range(size): # if ninten[i] != 0: # ginten[i] = ginten[i] / ninten[i] # return ginten.reshape((ny, nx)) # - # ### Cython with multithreading # + magic_args="--force" language="cython" # # from libc.math cimport floor, ceil # # import numpy as np # cimport numpy as np # cimport cython # from cython.parallel import prange # # # cdef inline (int, int) getind(double xmsdel, double hwdel, int n) nogil: # cdef int imin = int(ceil(xmsdel - hwdel)) # if (imin < 0): # imin = 0 # elif (imin > (n - 1)): # imin = n # cdef int imax = int(floor(xmsdel + hwdel)) # if (imax < 0): # imax = -1 # elif (imax > (n - 1)): # imax = n - 1 # return imin, imax # # # @cython.boundscheck(False) # @cython.wraparound(False) # def boxInterpolationP( # np.ndarray[np.float_t, ndim=1] inten, # np.ndarray[np.float_t, ndim=1] qx, # np.ndarray[np.float_t, ndim=1] qy, # int ndat, int nx, int ny, # double xs, double xdel, double xhw, # double ys, double ydel, double yhw): # # cdef int ximin, ximax, yimin, yimax # cdef int i, k, l, ind # cdef int size = nx * ny # cdef double xmsdel # # cdef np.ndarray[np.float_t, ndim=1] ninten = np.zeros(size) # cdef np.ndarray[np.float_t, ndim=1] ginten = np.zeros(size) # # cdef double xhwdel = xhw / 2. / xdel # cdef double yhwdel = yhw / 2./ ydel # # for i in prange(ndat, nogil=True): # xmsdel = (qx[i] - xs) / xdel # ximin, ximax = getind(xmsdel, xhwdel, nx) # xmsdel = (qy[i] - ys) / ydel # yimin, yimax = getind(xmsdel, yhwdel, ny) # # for l in range(ximin, ximax + 1): # for k in range(yimin, yimax + 1): # ind = k * nx + l # ninten[ind] += 1 # ginten[ind] += inten[i] # for i in prange(size, nogil=True): # if ninten[i] != 0: # ginten[i] = ginten[i] / ninten[i] # return ginten # - # ### Interpolation q_xy, q_z, images = c_image.get_vectors() window_size = c_image.default_window() detector_geometry # + # %%time for i in range(10): res = boxInterpolation(images, q_xy, q_z, q_map.qxy_num, q_map.qz_num, q_map.qxy_start, q_map.qxy_step, 10 * window_size, q_map.qz_start, q_map.qz_step, 10 * window_size) # + # %%time for i in range(10): res = boxInterpolationP(images, q_xy, q_z, images.size, q_map.qxy_num, q_map.qz_num, q_map.qxy_start, q_map.qxy_step, 15 * window_size, q_map.qz_start, q_map.qz_step, 15 * window_size) # + import matplotlib.pyplot as plt res = boxInterpolation(images, q_xy, q_z, q_map.qxy_num, q_map.qz_num, q_map.qxy_start, q_map.qxy_step, 10 * window_size, q_map.qz_start, q_map.qz_step, 10 * window_size) plt.imshow(res, vmax=5000, origin=True) plt.colorbar() # - # # Analysis couple_image_list = [(read_edf(str(edf_filepaths[i]))[0], read_edf(str(edf_filepaths[i + 11]))[0]) for i in range(len(edf_filepaths) // 2)] # + # %%time converted_images = [] c_image = ConvertedImage() q_map = QMap(0,outputMaxQXy, int(outputMaxQXy / outputQResolution), 0, outputMaxQZ, int(outputMaxQZ / outputQResolution)) for im1, im2 in couple_image_list: im1 = np.flip(np.flip(im1, axis=0), axis=1) im2 = np.flip(np.flip(im2, axis=0), axis=1) c_image.clear() c_image.append_image(Image(im1, detector_geometry)) c_image.append_image(Image(im2, detector_geometry_2)) converted = c_image.calculate_converted_image(q_map) converted_images.append(converted) # + import matplotlib.pyplot as plt plt.imshow(converted_images[9], origin=True, vmax=5000) plt.colorbar() # - import numpy as np a = np.array([1, 2, None, None, 4]).astype(np.float) a nan_idx = np.argwhere(np.isnan(a)) nan_idx np.any(np.isnan(np.delete(a, nan_idx))) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PRACTICE 1 # # [Dataset](https://drive.google.com/open?id=1axM3gfGxQq4T0wuwP51DVMQAecVFmpxL) : Penggunaan Air Tahunan di Baltimore (Hipel an Mcleod, 1994). #
#
# **Latar Belakang** : Dataset yang digunakan merupakan data penggunaan air tahunan di Baltimore dari tahun 1885 hingga 1963 (79 tahun / 79 observasi). Satuan yang digunakan yaitu dalam liter per kapita per hari. # # **Objektif** : Memprediksi penggunaan air tahunan di Baltimore berdasarkan data rata-rata tahunan yang telah diketahui. # # **** # Berdasarkan instruksi, maka langkah-langkah yang akan dilakukan adalah sebagai berikut: # 1. Load Data # 2. Create Summary # 3. Create Line Plot # 4. Group the Annual Data # 5. Create Models using ARIMA # 6. Calculate RMSE # **** # ## 1. Load Data # Dikarenakan terdapat beberapa tabel yang diisi dengan kata-kata yang seharusnya tidak ada, maka perlu dibersihkan terlebih dahulu agar data dapat dimasukkan ke dalam Python. # Package untuk Load Data import pandas as pd import numpy as np # Load Dataset df = pd.read_csv('water.csv', sep = ";") # Melihat 5 Data Teratas df.head() # Melihat 5 Data Terbawah df.tail() # Kode di bawah ini akan membagi dataset (df) menjadi dua file yang berbeda. File yang pertama yaitu dataset untuk pengembangan model (dataset.csv) sebesar 70% dari data awal dan yang lainnya untuk validasi (validation.csv). # Package Export Data ke CSV from pandas import read_csv series = read_csv('water.csv', sep = ";", header=0) split_point = int(len(df)*0.7) dataset, validation = series[0:split_point], series[split_point:] print('Dataset %d, Validation %d' % (len(dataset), len(validation))) dataset.to_csv('dataset.csv', index=False) validation.to_csv('validation.csv', index=False) # > Spesifik dari file-file yang baru saja diperoleh adalah: # - dataset.csv: Pengamatan dari tahun 1885 hingga 1939 (55 pengamatan). # - validation.csv: Pengamatan dari tahun 1940 hingga 1963 (24 pengamatan). # ## 2. Create Summary # Ringkasan dataset dapat kita temukan menggunakan fungsi *describe*. Ringkasan ini termasuk jumlah, rata-rata, standar deviasi, Q1, Q2, Q3, nilai minimum dan nilai maksimum. dataset = read_csv('dataset.csv') dataset = dataset.set_index(dataset['Year']) dataset.drop(columns=['Year'], inplace=True) dataset.describe() # > Berdasarkan *summary* atau statistika deskriptif yang diperoleh, maka dapat diketahui karakteristik dari dataset atau data untuk pengembangan model. Baik itu karakteristik dengan menggunakan pengukuran pemusatan data dan pengukuran penyebaran data. Dapat diketahui bahwa data penggunaan air tahunan di Baltimore rata-rata mecapai 476 Liter dengan penggunaan air maksimum mencapai 662 Liter dan penggunaan air minimum tahunan yaitu 344L. # # 3. Create Line Plot # *Line Plot* dapat memberikan gambaran awal mengenai data yang akan dianalsis. # Package untuk Mmebuat Plot # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.pylab import rcParams import seaborn as sns # Plot Graph rcParams['figure.figsize'] = (21, 5) plt.xlabel('Year') plt.ylabel('Water') plt.plot(dataset['Water']) plt.show() # Berdasarkan *Line Plot* maka dapat mengindikasikan bahwa: # > # - Terlihat adanya tren peningkatan dalam penggunaan air dari waktu ke waktu walaupun terdapat beberapa tren menurun selama beberapa tahun terakhir. # - Terlihat tidak ada data outlier, meskipun ada beberapa fluktuasi besar. # - Penggunaan air tahunan maksimum terjadi pada tahun 1913 sedangkan peggunaan air minimum terjadi pada tahun 1892 (yang pada awalnya dengan menggunakan statistika deskriptif hanya dapat mengetahui nilai maksimum dan minimumnya, dengan menggunakan line plot dapat mengetahui kapan nilai-nilai minimum dan maksimum tesrebut terjadi). # # 4. Group the Annual Data # Jika ingin mengelompokkan data tahunan berdasarkan dekade dan mendapatkan gagasan tentang penyebaran pengamatan untuk setiap dekade dan bagaimana ini dapat berubah dapat digunakan beberapa di bawah ini. # Package Grouping Data from matplotlib import pyplot from pandas import DataFrame from pandas import Grouper rcParams['figure.figsize'] = (21, 5) dataset.hist() plt.show() rcParams['figure.figsize'] = (21, 5) dataset.plot(kind='kde') plt.show() # > Berdasarkan *Histogram* dan *Density Plot* maka dapat diketahui distribusi dari data yang akan digunakan. Berdasarkan visualisasi maka dapat diketahui data berdistribusi normal. Hal ini dikarenakan plot yang dihasilkan memiliki bentuk lonceng. dataset = dataset.reset_index() dataset['Year'].unique() # Berdasarkan penggunaan air di Baltimore selama 55 tahun, maka jika ingin melakukan analisis dengan melihat karakteristik setiap dekadenya dapat menggunakan *Box-Plot* berikut. # + # NEAREST 10 FOR DECADE START dataset['decade_start'] = (dataset['Year'] // 10) * 10 + 5 # ADJUST FOR YEARS ENDING IN ZERO dataset.loc[(dataset['Year'] % 10) == 0, 'decade_start'] = dataset['decade_start'] - 10 # CALCULATE DECADE RANGE dataset['decade_range'] = dataset['decade_start'].astype('str') + ' - ' + \ (dataset['decade_start'] + 9).astype('str') plt.figure(figsize=(15,5)) sns.boxplot(x="decade_range", y="Water", data=dataset, color = 'blue') plt.show() plt.clf() plt.close() # - # > Dikarenakan dataset terdiri dari 55 observasi, maka akan terbentuk 6 kelompok dekade. 1 dekade terdiri dari penggunaan air selama 10 tahun. #
# Berdasarkan *Box-Plot*, dapat diketahui bahwa: # - Nilai median untuk setiap dekade menunjukkan trend meningkat, jika dilihat pada box dekade satu (1885-1894) sampai dengan dekade empat (1915-1924) dan menurun untuk dekade berikutnya. # - Setiap dekadenya menunjukkan beberapa perbedaan yang cukup signifikan. # - Terlihat adanya indikasi outlier pada beberapa data kapasitas pengguna air (dekade 1 dan 6) # - Dekade kedua hingga terakhir tampaknya memiliki konsumsi rata-rata yang lebih rendah, mungkin terkait dengan perang dunia pertama. # # ## 5. ARIMA # Package ARIMA from statsmodels.tsa.stattools import adfuller from matplotlib import pyplot from sklearn.model_selection import train_test_split from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error dataset = read_csv('dataset.csv') dataset['Year'] = pd.to_datetime(dataset['Year'], format='%Y') dataset = dataset.set_index(dataset['Year']) dataset.drop(columns=['Year'], inplace=True) dataset = dataset.astype('float64') train_size = int(len(dataset)*0.5) train = dataset[0:train_size] test = dataset[train_size:] print(train.shape) print(test.shape) # Model ARIMA (0,1,0) model = ARIMA(train, order=(0,1,0)) model_fit = model.fit(disp=0) print(model_fit.summary()) # > Pada saat melakukan modelling menggunakan ARIMA (0,1,0) maka diindikasikan bahwa terdapat error *list index out of range # *. Maka perlu melakukan indikasi ACF & PACF terlebih dahaulu (yang pada dasarnya harus dilakukan sebelum membuat model ARIMA) # Package Plot ACF & PACF from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf pyplot.figure() pyplot.subplot(121) plot_acf(train, ax=pyplot.gca(), lags = 15) pyplot.subplot(122) plot_pacf(train, ax=pyplot.gca(), lags = 15) pyplot.show() # > Berdasarkan ACF maka dapat terindikasi pola data tida memiliki pola seasonal. Berdasarkan PACF maka terindikasi adanya unsur AR dikarenakan lag 1 keluar dari batas, maka model terindikasi ARIMA (0,1,1). # ARIMA 0,1,1 model1 = ARIMA(train, order=(0,1,1)) model_fit1 = model1.fit(disp=0) print(model_fit1.summary()) # > Jika dilihat p-value pada model maka dapat diketahui bahwa parameter signifikan. Maka dapat dilakukan prediksi lebih lanjut. # Mencobakan model ARIMA lainnya # ARIMA 1,1,1 model2 = ARIMA(train, order=(1,1,1)) model_fit2 = model2.fit(disp=0) # ARIMA 1,0,1 model3 = ARIMA(train, order=(1,0,1)) model_fit3 = model3.fit(disp=0) # ARIMA 0,0,1 model4 = ARIMA(train, order=(0,0,1)) model_fit4 = model4.fit(disp=0) # ARIMA 1,0,0 model5 = ARIMA(train, order=(1,0,0)) model_fit5 = model5.fit(disp=0) # # 6. Prediksi # Setelah mendapatkan model maka dapat dilihat nilai prediksi pada data testing. Dan dapat di sandingkan dengan data aktualnya sebagai berikut. #Nilai prediksi dari model y_pred = model_fit.forecast(len(test))[0] df_test = pd.DataFrame(y_pred, columns=['prediksi'], index=test.index) df_test['actual'] = test.values df_test #Nilai prediksi dari model y_pred1 = model_fit1.forecast(len(test))[0] y_pred2 = model_fit2.forecast(len(test))[0] y_pred3 = model_fit3.forecast(len(test))[0] y_pred4 = model_fit4.forecast(len(test))[0] y_pred5 = model_fit5.forecast(len(test))[0] mse1 = mean_squared_error(df_test['actual'], y_pred1) rmse1 = np.sqrt(mse1) mse2 = mean_squared_error(df_test['actual'], y_pred2) rmse2 = np.sqrt(mse2) mse3 = mean_squared_error(df_test['actual'], y_pred3) rmse3 = np.sqrt(mse3) mse4 = mean_squared_error(df_test['actual'], y_pred4) rmse4 = np.sqrt(mse4) mse5 = mean_squared_error(df_test['actual'], y_pred5) rmse5 = np.sqrt(mse5) print(f'Nilai RMSE Model 1 {rmse1})') print(f'Nilai RMSE Model 2 {rmse2})') print(f'Nilai RMSE Model 3 {rmse3})') print(f'Nilai RMSE Model 4 {rmse4})') print(f'Nilai RMSE Model 5 {rmse5})') # > Jika dilihat RMSE pada lima model yang diperoleh, maka dapat dikatakan error dapat dikatakan cukup tinggi. Namun jika dbandingkan untuk lima model tersebut, maka dapat diketahui model 1 atau model ARIMA (0,1,1) merupakan model yang terbaik. # # 7. Calculate RMSE # Untuk mengetahui kebaikn model, maka dapat menggunakan RMSE pada data validation yang telah ditentukan di awal. validation = pd.read_csv('validation.csv') validation['Year'] = pd.to_datetime(validation['Year'], format='%Y') validation = validation.set_index(validation['Year']) validation.drop(columns=['Year'], inplace=True) validation = validation.astype('float64') validation.head() arima_order = (0,1,1) arima = ARIMA(validation, order=arima_order) arima_fit = arima.fit() #Nilai prediksi dari validation y_pred = arima_fit.forecast(len(validation))[0] df_valid = pd.DataFrame(y_pred, columns=['prediksi'], index=validation.index) df_valid['aktual'] = validation.values df_valid mse = mean_squared_error(df_valid['aktual'], df_valid['prediksi']) rmse = np.sqrt(mse) rmse # > Berdasarkan nilai RMSE = 84, maka dapat diketahui bahwa model ARIMA (0,1,1) belum cukup baik. Maka dapat dikatakan bahwa nilai prediksi tidak mendekati nilai aktualnya. Hal ini diduga karena model yang diperoleh berdasarkan data train yang cukup kecil dan perlu dicobakan model lainnya untuk mendapatkan model yang lebih baik. Pada umumnya, data train yang digunakan harus lebih besar jika dibandingkan dengan data testingnya. Dan pada soal, menyarankan untuk membagi data 50% pada dataset yang sudah terpotong di awal. Sehingga dalam membentuk model, data yang digunakan sangatlah kecil. Tidak hanya itu, jika dilihat RMSE pada data testing menunjukkan RMSE setengah lebih kecil jika dibandingkan dengan RMSE pada data traning. Sehingga saran untuk analsiis berikutnya yaitu dapat menggunakan pemodelan ARIMA yang lebih sesuai agar dapat memprediksi penggunaan air tahunan di Baltimore. Jika dilihat hasil prediksi dan data aktualnya dapat dilihat pada grafik berikut. pyplot.plot(df_valid['aktual']) pyplot.plot(df_valid['prediksi'], color='red') pyplot.show() # > Jika dilihat berdasarkan grafik, maka dapat terlihat plot prediksi (merah) tidak mengikuti data aktualnya. Atau dapat dikatakan sangat jauh dari data aktualnya. Maka perlu dilakukan evaluasi secara mendalam. Untuk referensi dapat mengkases web berikut [akses](https://machinelearningmastery.com/time-series-forecast-study-python-annual-water-usage-baltimore/). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 ('wordle-exploration') # language: python # name: python3 # --- from utils import * from global_vars import * # + stats = { 1: 0, # In 1 try 2: 0, # In 2 tries 3: 0, # In 3 tries 4: 0, # In 4 tries 5: 0, # In 5 tries 6: 0, # In 6 tries -1: 0 } for i, answer in enumerate(ANS_ARR): rnd = 1 avail_words = GUESS_ARR won = False while rnd <= 6 and not won: if rnd ==1: guess, entropy = 'soare', 5.9 # From previous exp, dont need traverse again else: guess, entropy = get_guess(avail_words) state = ''.join(map(str, get_state(guess, answer))) avail_words = set(STATE_MAP[guess][state]) & set(avail_words) if guess == answer: won = True break rnd += 1 if won: stats[rnd] += 1 else: stats[-1] += 1 print(f"{i+1}/{len(ANS_ARR)} words tested", end='\r') print(stats) # - # !pip3 install matplotlib with open('./outputs/stats.json', 'w') as f: json.dump(stats, f) import matplotlib.pyplot as plt import numpy as np # + # Number solved by algorithm stats = json.load(open('./outputs/stats.json')) x = np.array(list(stats.keys()), dtype='int64') y = np.array(list(stats.values()), dtype='int64') plt.bar(x, y) plt.title("Number of guesses needed to solve") plt.plot() # + total = 0 div = 0 for i in range(len(x)): total += x[i] * y[i] div += y[i] total / div # - len(ANS_ARR) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # creates3site # # Python script to ask for bucket name, creates a aws s3 bucket, config for static site, uploads permissions, creates a nikola site, uploads site. import subprocess import json import os import getpass myusr = getpass.getuser() class devsite(): def makebucket(namebucket): subprocess.call('aws s3 mb s3://{}'.format(namebucket), shell=True) def makewwwbucket(namebucket): subprocess.call('aws s3 mb s3://www.{}'.format(namebucket), shell=True) def createapp(namebucket): subprocess.call('aws elasticbeanstalk create-application --application-name {} --description "{}"'.format(namebucket, namebucket), shell=True) def deleteapp(namebucket): subprocess.call('aws elasticbeanstalk delete-application --application-name {}'.format(namebucket)) def infoapp(): #subprocess.call('aws elasticbeanstalk describe-applications') return(subprocess.check_output(["aws", "elasticbeanstalk", "describe-applications"])) def listbucket(): return(subprocess.check_output('aws s3api list-buckets --query "Buckets[].Name" --output "json"', shell=True)) def wsconf(namebucket): subprocess.call('aws s3api put-bucket-website --bucket {} --website-configuration file://website.json'.format(namebucket), shell=True) def nikinit(namebucket): subprocess.call('nikola init -q /home/{}/sites/{}'.format(myusr, namebucket), shell=True) devsite.infoapp() devsite.nikinit('newtest') with open('/home/{}/sites/{}/conf.py'.format(myusr, )) devsite.infoapp() devsite.deleteapp('MyApp') devsite.createapp('testing') myitems = devsite.listbucket() myitems myitems.replace('\n', '') str(myitems) type (myitems) myitems.capitalize() for it in myitems: print(it.from_bytes{}) mydecode = myitems.decode("utf-8") mylis = mydecode.replace('\n', '') type(mylis) myspli = mylis.split(' ') type(myspli) lenspi = len(myspli) for mys in range(0, lenspi, 2): print(myspli[mys]) print(myspli[8]) range(0, 10, 2) list(mydecode) for myit in myitems.decode("utf-8"): print(myit ) for devs in devsite.listbucket(): print(devs) devSite.makebucket('lonewill') def makebucket(namebucket): subprocess.call('aws s3 mb s3://{}'.format(namebucket), shell=True) def makewwwbucket(namebucket): subprocess.call('aws s3 mb s3://www.{}'.format(namebucket), shell=True) def wsconf(namebucket): subprocess.call('aws s3api put-bucket-website --bucket {} --website-configuration file://website.json'.format(namebucket), shell=True) def wsindex(namebucket): subprocess.call('aws s3 website s3://{} --index-document index.html --error-document error.html'.format(namebucket), shell=True) def nikinit(namebucket): subprocess.call('nikola init -q {}'.format(namebucket), shell=True) makebucket('breakdownand.com') makewwwbucket('breakdownand.com') wsconf('breakdownand.com') wsindex('breakdownand.com') nikinit('breakdownand.com') def permop(namebucket): with open('permission.json', 'r') as permj: permrd = (permj.read()) editjs = permrd.replace('editthis', namebucket) with open('permission.json', 'w') as permwrit: permwrit.write(editjs) subprocess.call('aws s3api put-bucket-policy --bucket {} --policy file://permission.json'.format(namebucket), shell=True) with open('permission.json', 'r') as permj: permrd = (permj.read()) editjs = permrd.replace('editthis', namebucket) with open('permission.json', 'w') as permwrit: permwrit.write(editjs) permop('breakdownand.com') permop('breakdown.com') def retusome(namebucket): return namebucket retusome('breakdownand.com') with open('permission.json', 'r+') as permj: permrd = (permj.read()) anperz = permrd.replace('examplebucket', retusome('breakdownand.com')) jslo = json.loads(anperz) editjs = json.dumps(jslo) with open('permission.json', 'w') as permwrit: permwrit.write(editjs) def sstact(namebucket): subprocess.call('aws s3api put-bucket-policy --bucket {} --policy file://permission.json'.format(namebucket), shell=True) sstact('breakdownand.com') with open('{}/conf.py'.format(inputnam), 'r+') as configz: confrd = (configz.read()) confiza = confrd.replace("Demo Site", inputnam.replace('.com', '')) with open('{}/conf.py'.format(inputnam), 'w') as confix: confix.write(confiza) os.chdir('breakdownandcry.com/') subprocess.call('nikola build', shell= True) os.chdir('output') subprocess.call('aws s3 sync . s3://{}'.format(inputnam), shell=True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ![PyData_logo](./static/pydata-logo-madrid-2016.png) # # # Embrace conda packages # # ## The build system we always needed, but never deserved # # ###### # ###### Madrid, 2016-04-08 # + [markdown] slideshow={"slide_type": "slide"} # ## Outline # # * Introduction # * Motivation: What brought us here? # * Our first conda package # * Some more tricks # * Working with other languages # * conda-forge: a community repository # * Limitations and future work # * Conclusions # + [markdown] slideshow={"slide_type": "slide"} # ## Who is this guy? # # # # * _Almost_ **Aerospace Engineer** # * Quant Developer for BBVA at Indizen (yeah, lots of Python there!) # * Writer and furious tweeter at **Pybonacci** # * Chair ~~and BDFL~~ of **Python España** # * Co-creator and charismatic leader of **AeroPython** (\*not the Lorena Barba course) # * _When time permits (rare) [writes some open source Python code](https://github.com/Juanlu001/)_ # + [markdown] slideshow={"slide_type": "notes"} # You know, I've been giving talks on Python and its scientific ecosystem for about three years now... And I always write this bit there, that "Almost" word in italics before my background. You may reasonably wonder now what the heck I've been doing all these years to always introduce myself as an "almost" Aerospace Engineer, right? Well, I promise that I'm taking the required steps to graduate not later than this Autumn, but anyway this talk reflects one of the severe pains I've been going through while carrying my final project. # + [markdown] slideshow={"slide_type": "slide"} # ## Motivation: What brought us here? # + [markdown] slideshow={"slide_type": "notes"} # Let's begin with some questions: # # * Who writes Python code here, either for a living or for fun? # * Who can write a `setup.py`... without copying a working one from the Internet? # * How many Linux users... can configure a Visual Studio project properly? # * How many of you are using Anaconda... because it was the only way to survive? # + [markdown] slideshow={"slide_type": "fragment"} # ### _...or: "The sad state of scientific software"_ # + [markdown] slideshow={"slide_type": "subslide"} # * [The scientific Python community was told to "fix the packaging problem themselves" in 2014](https://speakerdeck.com/teoliphant/building-the-pydata-community), Christoph Gohlke packages were the only practical way to use Python on Windows for years before Python(x,y), Canopy and Anaconda were born # + [markdown] slideshow={"slide_type": "subslide"} # * One of the FAQ items of the Sage project: [_"Wouldn’t it be way better if Sage did not ship as a gigantic bundle?"_](http://doc.sagemath.org/html/en/faq/faq-general.html), [they started a SaaS to end the pain](http://sagemath.blogspot.com.es/2014/10/a-non-technical-overview-of.html) # + [markdown] slideshow={"slide_type": "subslide"} # * PETSc (solution of PDEs): They are forced to maintain their own forks because [upstream projects won't fix bugs, even with patches and reproducible tests](http://scisoftdays.org/pdf/2016_slides/brown.pdf) # + [markdown] slideshow={"slide_type": "subslide"} # * DOLFIN (part of the FEniCS project): Extremely difficult to make it work outside Ubuntu, pure Python alternatives are being developed, [my fenics-recipes project has at least 7 meaningful forks already](http://firedrakeproject.org/) # + [markdown] slideshow={"slide_type": "subslide"} # # + [markdown] slideshow={"slide_type": "slide"} # ## Some inconvenient truths: # + [markdown] slideshow={"slide_type": "fragment"} # # Portability is hard (unless you stick to pure Python) # + [markdown] slideshow={"slide_type": "fragment"} # # Properly distributing software libraries is very hard # + [markdown] slideshow={"slide_type": "subslide"} # ### Result: # # # + [markdown] slideshow={"slide_type": "subslide"} # ## What horror have we created # # > If you’re missing a library or program, and that library or program happens to be written in C, **you either need root to install it from your package manager, or you will descend into a lovecraftian nightmare of attempted local builds from which there is no escape**. You say you need lxml on shared hosting and they don’t have libxml2 installed? Well, fuck you. # > # > — Eevee, ["The sad state of web app deployment"](https://eev.ee/blog/2015/09/17/the-sad-state-of-web-app-deployment/) # + [markdown] slideshow={"slide_type": "subslide"} # ## Are virtual machines and containers the solution? # # > _"It's easy to build a VM if you automate the install process, and providing that install script for even one OS can demystify the install process for others; conversely, **just because you provide a VM doesn't mean that anyone other than you can install your software**"_ # > # > — , ["Virtual machines considered harmful for reproducibility"](http://ivory.idyll.org/blog/vms-considered-harmful.html) # + [markdown] slideshow={"slide_type": "slide"} # ## Our first conda package # # Let's install `conda-build`! # + slideshow={"slide_type": "fragment"} # !conda install -y conda-build -q -n root # + [markdown] slideshow={"slide_type": "subslide"} # conda packages are created from conda recipes. We can create a bare recipe using `conda skeleton` to build it from a PyPI package. # + slideshow={"slide_type": "fragment"} # !conda skeleton pypi pytest-benchmark > /dev/null # + slideshow={"slide_type": "fragment"} # !ls pytest-benchmark # + [markdown] slideshow={"slide_type": "fragment"} # These are the minimum files for the recipe: # # * `meta.yaml` contains all the metadata # * `build.sh` and `bld.bat` are the build scripts for Linux/OS X and Windows respectively # + [markdown] slideshow={"slide_type": "subslide"} # ### The `meta.yaml` file # # It contains the metadata in YAML format. # # * `package`, `source` and `build` specify the name, version and source of the package # * `requirements` specify the build (install time) and run (runtime) requirements # * `test` specify imports, commands and scripts to test # * `about` adds some additional data for the package # + slideshow={"slide_type": "subslide"} # !grep -v "#" pytest-benchmark/meta.yaml | head -n24 # + [markdown] slideshow={"slide_type": "subslide"} # ## The `build.sh` and `bld.bat` files # # They specify how to build the package. # + slideshow={"slide_type": "fragment"} # !cat pytest-benchmark/build.sh # + slideshow={"slide_type": "fragment"} # !grep -v "::" pytest-benchmark/bld.bat # + [markdown] slideshow={"slide_type": "subslide"} # ### The build process # # Adapted from http://conda.pydata.org/docs/building/recipe.html#conda-recipe-files-overview # # 1. Downloads the source # 2. Applies patches (if any) # 3. Install build dependencies # 4. Runs the build script # 5. Packages new files # 6. Run tests against newly created package # # Seems legit! # + slideshow={"slide_type": "subslide"} # !conda build pytest-benchmark --python 3.5 > /dev/null # It works! # + slideshow={"slide_type": "fragment"} # !ls ~/.miniconda3/conda-bld/linux-64/pytest-benchmark-3.0.0-py35_0.tar.bz2 # + [markdown] slideshow={"slide_type": "subslide"} # # # (From http://conda.pydata.org/docs/building/pkg-name-conv.html) # + slideshow={"slide_type": "subslide"} # !conda install pytest-benchmark --use-local --yes # + [markdown] slideshow={"slide_type": "subslide"} # ### Build, test, upload, repeat # # * Custom packages can be uploaded to Anaconda Cloud https://anaconda.org/ # * This process can be automated through Anaconda Build http://docs.anaconda.org/build.html # * Later on we can use our custom **channels** to install non-official packages # # # + [markdown] slideshow={"slide_type": "subslide"} # Let's upload the package first using `anaconda-client`: # + slideshow={"slide_type": "fragment"} # !conda install anaconda-client --quiet --yes # + slideshow={"slide_type": "fragment"} # !anaconda upload ~/.miniconda3/conda-bld/linux-64/pytest-benchmark-3.0.0-py35_0.tar.bz2 # + [markdown] slideshow={"slide_type": "subslide"} # And now, let's install it! # + slideshow={"slide_type": "fragment"} # !conda remove pytest-benchmark --yes > /dev/null # + slideshow={"slide_type": "fragment"} # !conda install pytest-benchmark --channel juanlu001 --yes # + [markdown] slideshow={"slide_type": "slide"} # ## Some more tricks # + [markdown] slideshow={"slide_type": "fragment"} # ### Running the tests # # You can run your tests with Python, Perl or shell scripts (`run_test.[py,pl,sh,bat]`) # + slideshow={"slide_type": "fragment"} active="" # # run_test.sh # # cd $SRC_DIR/test # cmake . # make # + [markdown] slideshow={"slide_type": "subslide"} # ### Convert pure Python packages to other platforms # # Using `conda convert` for pure Python packages, we can quickly provide packages for other platforms # + slideshow={"slide_type": "fragment"} # !conda convert ~/.miniconda3/conda-bld/linux-64/pytest-benchmark-3.0.0-py35_0.tar.bz2 --platform all | grep Converting # + [markdown] slideshow={"slide_type": "subslide"} # ### Platform-specific metadata # + slideshow={"slide_type": "fragment"} active="" # # from glpk # # build: # features: # - vc9 [win and py27] # - vc10 [win and py34] # - vc14 [win and py35] # # requirements: # build: # - gmp [linux or osx] # + [markdown] slideshow={"slide_type": "subslide"} # ### Templating for `meta.yaml` # # Metadata files support templating using Jinja2! # + slideshow={"slide_type": "fragment"} active="" # # from glpk # # build: # number: {{ environ.get("APPVEYOR_BUILD_NUMBER", 1) }} [win] # # # from poliastro at conda-forge # # {% set version = "0.5.0" %} # # package: # name: poliastro # version: {{ version }} # # source: # fn: v{{ version }}.tar.gz # url: https://github.com/poliastro/poliastro/archive/v{{ version }}.tar.gz # + [markdown] slideshow={"slide_type": "slide"} # ## Working with other languages # + [markdown] slideshow={"slide_type": "fragment"} # ### _or: conda as a cross-platform package manager_ # + [markdown] slideshow={"slide_type": "fragment"} # * conda can be used to build software written in any language # * Just don't include `python` as a build or run dependency! # * It's already being used to distribute pure C and C++ libraries, R packages... # + slideshow={"slide_type": "fragment"} active="" # # build.sh from glpk # # export CFLAGS="-O3" # ./configure --prefix=$PREFIX --with-gmp # # make check install # + [markdown] slideshow={"slide_type": "subslide"} # ### Important caveat: # + [markdown] slideshow={"slide_type": "fragment"} # ## The burden is on _you_ # + [markdown] slideshow={"slide_type": "subslide"} # ### _There be dragons_ # # * _conda-build does not solve cross-compiling_ so you will need to build compiled packages on each platform # * Regarding Linux, there are [a lot of sources of binary incompatibility](https://www.python.org/dev/peps/pep-0513/#key-causes-of-inter-linux-binary-incompatibility) # - Building on a clean operative system is key # - Using an old version of Linux (CentOS 5?) also helps, because many core system libraries have strict backwards compatibility policies # - **Packages that assume everything is on root locations will fail to compile** # - Sometimes careful editing of compiler flags and event patching is necessary # # If the recipe builds on a fresh, headless, old Linux it will work everywhere # + [markdown] slideshow={"slide_type": "slide"} # ## conda-forge: a community repository # # # + [markdown] slideshow={"slide_type": "subslide"} # > [**conda-forge**](https://github.com/conda-forge) is a github organization containing repositories of conda recipes. Thanks to some awesome continuous integration providers (AppVeyor, CircleCI and TravisCI), each repository, also known as a feedstock, automatically builds its own recipe in a clean and repeatable way on Windows, Linux and OSX. # + [markdown] slideshow={"slide_type": "fragment"} # Features: # # * Automatic linting of recipes # * Continuous integration of recipes in Linux, OS X and Windows # * Automatic upload of packages # # What I love: # # * Having a blessed community channel (like Arch Linux AUR) # * Ensuring recipes run everywhere # * High quality standards! # + [markdown] slideshow={"slide_type": "slide"} # ## Limitations and future work # # conda (2012?) and conda-build (2013) are very young projects and still have some pain points that ought to be addressed # + [markdown] slideshow={"slide_type": "fragment"} # * Support for [gcc](https://github.com/conda/conda-recipes/pull/279) and [libgfortran](https://github.com/ContinuumIO/anaconda-issues/issues/686) is not yet polished in Anaconda and there are still some portability issues # + [markdown] slideshow={"slide_type": "fragment"} # * [No way to include custom channels on a `meta.yaml`](https://github.com/conda/conda-build/issues/532), the only option is to keep a copy of all dependencies # + [markdown] slideshow={"slide_type": "fragment"} # * [Pinning NumPy versions on `meta.yaml` can be a mess](https://github.com/conda/conda-build/pull/650) # + [markdown] slideshow={"slide_type": "subslide"} # The state of Python packaging is improving upstream too! # # * pip builds and caches wheels locally - the problem of compiling NumPy over and over again was addressed a while ago # * Windows and OS X wheels are easy to build and widely available for many scientific packages # * [PEP 0513](https://www.python.org/dev/peps/pep-0513/) provides a way to **finally** upload Linux wheels to PyPI which are compatible with _many_ Linux distributions # * [PEP 0516](https://www.python.org/dev/peps/pep-0516/) proposes "a simple and standard sdist format **that isn't intertwined with distutils**"!!!1! # + [markdown] slideshow={"slide_type": "fragment"} # Still, there are some remaining irks: # # * [pip does not have a dependency solver](https://github.com/pypa/pip/issues/988) # * conda-build has a more streamlined process to build and test packages in an isolated way # + [markdown] slideshow={"slide_type": "slide"} # ## Conclusion # # # + [markdown] slideshow={"slide_type": "slide"} # # # * This talk: https://github.com/AeroPython/embrace-conda-packages # * My GitHub: https://github.com/Juanlu001/ # * Me on Twitter: @astrojuanlu, @Pybonacci, @PyConES, @AeroPython # # ### Approach me during the conference, interrupt me while I'm on a conversation, ask me questions, let's talk about your ideas and projects! 😊 # # # Thanks for yor attention! # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from io import StringIO from sklearn.preprocessing import LabelEncoder import numpy as np import seaborn as sns audio = pd.read_csv('audiology2.csv',header=0,sep=',') audio.columns df=audio # + df=df.drop("number_row",axis = 1) var_mod = ['age_gt_60','airBoneGap','boneAbnormal','history_buzzing','history_dizziness', 'history_fluctuating', 'history_fullness', 'history_heredity', 'history_nausea', 'history_noise', 'history_recruitment', 'history_ringing', 'history_roaring', 'history_vomiting', 'late_wave_poor', 'm_at_2k', 'm_cond_lt_1k', 'm_gt_1k', 'm_m_gt_2k', 'm_m_sn', 'm_m_sn_gt_1k', 'm_m_sn_gt_2k', 'm_m_sn_gt_500', 'm_p_sn_gt_2k', 'm_s_gt_500', 'm_s_sn', 'm_s_sn_gt_1k', 'm_s_sn_gt_2k', 'm_s_sn_gt_3k', 'm_s_sn_gt_4k', 'm_sn_2_3k', 'm_sn_gt_1k', 'm_sn_gt_2k', 'm_sn_gt_3k', 'm_sn_gt_4k', 'm_sn_gt_500', 'm_sn_gt_6k', 'm_sn_lt_1k', 'm_sn_lt_2k', 'm_sn_lt_3k', 'middle_wave_poor', 'mod_gt_4k', 'mod_mixed', 'mod_s_mixed', 'mod_s_sn_gt_500', 'mod_sn', 'mod_sn_gt_1k', 'mod_sn_gt_2k', 'mod_sn_gt_3k', 'mod_sn_gt_4k', 'mod_sn_gt_500', 'notch_4k', 'notch_at_4k', 's_sn_gt_1k', 's_sn_gt_2k', 's_sn_gt_4k', 'static_normal','viith_nerve_signs', 'wave_V_delayed', 'waveform_ItoV_prolonged'] le = LabelEncoder() for i in var_mod: df[i] = le.fit_transform(df[i]) df.head(10) # - df.info() df['speech()'].value_counts() le = LabelEncoder() data_cat=df['speech()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=["speech()"]) df['speech()']=data_cat_encoded df['speech()'].value_counts() df['air()'].value_counts() le = LabelEncoder() data_cat=df['air()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['air()']) df['air()']=data_cat_encoded df['air()'].value_counts() df['ar_c()'].value_counts() le = LabelEncoder() data_cat=df['ar_c()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['ar_c()']) df['ar_c()']=data_cat_encoded df['ar_c()'].value_counts() le = LabelEncoder() data_cat=df['ar_u()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['ar_u()']) df['ar_u()']=data_cat_encoded df['ar_u()'].value_counts() df['bone()'].value_counts() le = LabelEncoder() data_cat=df['bone()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['bone()']) df['bone()']=data_cat_encoded df['bone()'].value_counts() df['bser()'].value_counts() le = LabelEncoder() data_cat=df['bser()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['bser()']) df['bser()']=data_cat_encoded df['bser()'].value_counts() df['o_ar_c()'].value_counts() le = LabelEncoder() data_cat=df['o_ar_c()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['o_ar_c()']) df['o_ar_c()']=data_cat_encoded df['o_ar_c()'].value_counts() df['o_ar_u()'].value_counts() le = LabelEncoder() data_cat=df['o_ar_u()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['o_ar_u()']) df['o_ar_u()']=data_cat_encoded df['o_ar_u()'].value_counts() df['tymp()'].value_counts() le = LabelEncoder() data_cat=df['tymp()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['tymp()']) df['tymp()']=data_cat_encoded df['tymp()'].value_counts() df['tymp()'].value_counts() # + le = LabelEncoder() data_cat=df['tymp()'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['tymp()']) df['tymp()']=data_cat_encoded df['tymp()'].value_counts() # - df.info() # # + le = LabelEncoder() data_cat=df['classification'] data_cat_encoded= le.fit_transform(data_cat) data_cat_encoded= pd.DataFrame(data_cat_encoded,columns=['classification']) df['classification']=data_cat_encoded df['classification'].value_counts() # - df_label=df["classification"].copy() df=df.drop("bser()",axis = 1) # + from sklearn.preprocessing import StandardScaler #feature_scal = StandardScaler() #df = pd.DataFrame(feature_scal.fit_transform(df), columns=df.columns) #df.head() y=df.classification x = df.drop(columns=['classification']) # - df=df.drop("classification",axis = 1) median = df['mod_sn'].median() df['mod_sn'].fillna(median) df.head(50) x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.4,random_state=400) # + from sklearn.tree import DecisionTreeClassifier from sklearn import metrics from sklearn import preprocessing from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report clft=DecisionTreeClassifier() clft=clft.fit(x_train,y_train) y_predt = clft.predict(x_test) from sklearn import tree plt.figure(figsize=(50,80)) temp = tree.plot_tree(clft.fit(x,y),fontsize=24) plt.show() # - from sklearn.naive_bayes import GaussianNB clfb = GaussianNB() clfb.fit(x_train,y_train.ravel()) y_predb = clfb.predict(x_test) print(classification_report(y_test,clfb.predict(x_test))) from sklearn.neighbors import KNeighborsClassifier k=1 clfk= KNeighborsClassifier(n_neighbors=k) clfk.fit(x_train,y_train.ravel()) y_predk=clfk.predict(x_test) print("when k = {} neighbors , knn test acuracy : {}" .format(k,clfk.score(x_test,y_test))) print("when k = {} neighbors , knn test acuracy : {}" .format(k,clfk.score(x_train,y_train))) print(classification_report(y_test,clfk.predict(x_test))) ran = np.arange(1,30) train_list = [] test_list = [] for i,each in enumerate(ran): clfk= KNeighborsClassifier(n_neighbors=each) clfk.fit(x_train,y_train.ravel()) from sklearn.neural_network import MLPClassifier clfm = MLPClassifier(hidden_layer_sizes=(5,),max_iter=1500) clfm.fit(x_train,y_train.ravel()) y_predm = clfm.predict(x_test) print ("acuracy:", metrics.accuracy_score (y_test,y_predm)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="zPKpAUq9BDOn" # # This tutorial # # Below is Python3 code for working with different types of tokenizers of Chinese language. The corresponding files are: # - [article in Digital Orientalist with more information](https://digitalorientalist.com/2021/02/16/defining-word-boundaries-for-modern-and-classical-chinese/) # - git repository and README file are here: https://github.com/mzorki/tutorials # - alternative download link [from Google Drive](https://drive.google.com/drive/folders/1FQ8NAqBm7fZB0IPAXYOAHzQlXlCL9m7S?usp=sharing) # # **Important** # - for the code in this notebook to work one needs to have the entire folder on their Google Drive. Unfortunately, after some recent changes it is impossible to directly copy a shared Drive folder. The easiest way is to download the whole folder and add it to one's Drive manually # # Notes # - This file can be also used by those who do not know how to code. Just follow the insctuctions and run the code in cells by pressing the "play" button in upper left corner # - If running the file from Google Colab, the table of contents can be opened in the menu on the left side of this window # - Colab will shut down automatically after some time. This means, that it is impossible to run code that requires several hours to process (see [How long can notebooks run in Colab?](https://research.google.com/colaboratory/faq.html#:~:text=How%20long%20can%20notebooks%20run,or%20based%20on%20your%20usage.)) # # # # + [markdown] id="wfhBcaaddaqc" # # Step 1. Import all the required libraries. # # + [markdown] id="Zuvyqkw2l2J2" # To be able to run different tokenization tools, we need to import them. # Below is an example import statement. # In each section that deals with a tool, there will be a separate import. Do not forget to run a cell that imports a tool before trying it out, otherwise the code will not work.

# In some cases like jieba, Google already has all the necessary data downloaded, in some – like with HanLP and Udkanbun, this notebook will first download some files. Please be careful with data usage: the files can be quite big and they get redownloaded each time this notebook is run. # # + id="9cbWOX4sdfRI" from tqdm.notebook import tqdm import re # + [markdown] id="OKKloeRehxie" # # Step 2. Mount Google Drive # For Colab to be able to work with Google Drive as a normal directory, we need to give it permissions to do so and tell it the place where this notebook is. # Run the cell, then click the link that appears below, give the permissions and copy the code that will appear on that page into the field below. # + colab={"base_uri": "https://localhost:8080/"} id="Ptoy-2APeBfk" outputId="421af960-f19f-48ad-f118-61ad373c3bba" from google.colab import drive drive.mount('/gdrive', force_remount=True) # + [markdown] id="2ZE4Ut3BejKv" # Change the path to the project to your own. Generally the path starts with "gdrive/My Drive/" + path to the project folder. When in the Google Drive folder, the path to it is shown right under the search field.

# Notes: # - In my case, Google Drive first level folder called "My Drive" contains a "Shared" folder with my working folder "Chinese_tokenizers" inside of it, hence the path
# - It is always better to have no spaces in folder names, but name "My Drive" is set automatically, so we cannot change it # # + colab={"base_uri": "https://localhost:8080/"} id="1SVQve9leLix" outputId="7e06245d-3bca-4a26-ec49-efa082412a30" project_dir = "/gdrive/My Drive/Shared/Chinese_tokenizers/" #move to the working directory # %cd {project_dir} # + [markdown] id="48lDwy0UhbC6" # # Step 3. Save the paths to the dictionary and the text to tokenize. # # CDICT (Stardict version) dictionary with full-form characters is used in this notebook. [Link to CDICT and other open-access dictionaries](http://download.huzheng.org/zh_TW/). # # To use a custom dictionary: # - create a .txt file with one word per line # - upload the dictionary to the project folder # - change the file name in the cell below # # To tokenize your own text: # - create a .txt file with the text to tokenize # - no special formatting is required. On the other hand, if there was any, it might be lost after tokenization # - change the file name in the cell below # # # + id="iYTm39aEi7XG" dictionary_path = 'CDICT(Stardict)_wordlist.txt' text_path = 'test_text.txt' tok_text = open(text_path).read() #this line splits the text into smaller chunks. It assumes that there are separations made with new lines. #there are ways to make this more elegantly, but it depends on a specific formatting of a file. sentences = [i for i in tok_text.split('\n') if i!=''] # + [markdown] id="Uk5LWUand0H3" # # Step 4. Test the tokenizers. # # Below are examples of code to work with several tokenizers. Generally, there are following parts: # - a short description of the tool # - link to the GitHub repository # - example code to tokenize one phrase # - code to load user dictionary # - example code to tokenize a .txt file # # Notes: # - after a user dictionary has been loaded, the tokenizer will remember it for all the operations afterwards. To reset, run the cells once more from the beginning of a section # - output files are saved to the folder 'results'. Sometimes Google Drive requires some time to update the folder and add a new file. If you do not see the file with the results, try refreshing the page or waiting for a couple of minutes # + [markdown] id="wf_KtXiHzNrP" # ## Split by characters # # Assumes that every character is a word. Passable for OC and poetry in OC and MC, less so for later prose. # + [markdown] id="lmFwtprwzn2-" # ### Tokenize a sentence # Insert your own text below to test it. # + id="rRJVuZuKzXZb" text = "建章歡賞夕,二八盡妖妍。羅綺昭陽殿,芬芳玳瑁筵 。" # + colab={"base_uri": "https://localhost:8080/"} id="DTNQx0-QzdkJ" outputId="9bf68bcd-a15e-4e1e-addf-8134925cc6fa" tokenized_text = " ".join(list(text)) print (tokenized_text) # + [markdown] id="AWTRfu1Iz52N" # ### Tokenize and save a text # The name of the file to tokenize should be inserted in "Step 3".
# The file is saved in the "results" folder. # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["dceb84702afc40cb8fae7cf477c73bc8", "c800ba001e044de4ae2698cd0418a1c8", "93a08536080041fba63b2713ee83f663", "c4a6b858d43b4d4890a88294f046646c", "756ea5953f9a460483d098a890f0765d", "07363d056b9842a18b280d71ec63dea3", "a5c38c5adf114735a67edc729ed3b69b", "10174df062a04d2b9e9d80df3b85ee3b"]} id="zwSyz6lTzrH7" outputId="2e6d3089-43eb-4039-e3a0-ae2d05977e57" fh = open('./results/single_tokenized.txt', 'w') for phrase in tqdm(sentences): seg_list = list(phrase) joined_list = " ".join(seg_list) fh.write(f'{joined_list}\n') fh.close() # + [markdown] id="ymumL1HnfmHo" # ## Jieba # # [jieba GitHub repository](https://github.com/fxsjy/jieba). # # Jieba is one of the most popular tokenizers for Modern Chinese. It has very detailed instructions on their github page. #
# Has many fine-tuning options (including whether it uses a statistical model or adds machine learning), PoS tagging and a possibility to add a user-defined dictionary. #

# Very good with Modern Chinese. Works fine with full characters, but for MC and OC will recognize many long phrases as words. #
# Github page has more explanations and examples. # + id="fVFLC0yBeVXg" import jieba #remove the "#" below and run the cell if you want jieba to use machine learning. #jieba.enable_paddle() # + [markdown] id="lP8L8-s5IxVx" # ### Tokenize a sentence # Insert your own text below to test it. # + id="ibW9LxbOf3O6" text = "我来到北京清华大学" # + [markdown] id="e0NLkdpZJF0v" # Tokenization in the "Default mode":
# note that when possible this algorithm will choose to keep longer sequences of characters not split. # + colab={"base_uri": "https://localhost:8080/"} id="9X928L6KJE0N" outputId="ac576d06-54bb-4a4f-eea4-06d04b7f89f0" seg_list = jieba.cut(text, cut_all=False) print("Default Mode: " + " ".join(seg_list)) # 精确模式 # + [markdown] id="5NKT277AJ6ip" # Tokenization in "Full mode":
# note how in moments of uncertainty the algorithm returns **all possible** variants. This adds a lot of noise and makes the text not appropriate for corpus analysis. # + colab={"base_uri": "https://localhost:8080/"} id="KqGUWS-XgBcF" outputId="af9b7185-855d-4e6c-ae79-7391a8aa67da" seg_list = jieba.cut(text, cut_all=True) print("Full Mode: " + " ".join(seg_list)) # 全模式 # + [markdown] id="qTXykS-tg2xG" # Load dictonary.
# By default uses CDICT. If you want to use another one, replace dictionary file name in "Step 3". # # + id="kSvedrlPgEOm" jieba.load_userdict(dictionary_path) # + [markdown] id="BFeHZAGxKzh8" # Run the tokenization with the dictionary using the default mode.
# Here I used a dictionary with full forms and will attempt to tokenize a classical poem. # + colab={"base_uri": "https://localhost:8080/"} id="MwRoYkxbKyjI" outputId="5533e472-49d3-428c-e952-ac967efb9286" seg_list = jieba.cut("建章歡賞夕,二八盡妖妍。羅綺昭陽殿,芬芳玳瑁筵 。", cut_all=False) print("Full Mode + dictionary: " + " ".join(seg_list)) # + [markdown] id="uSDi1HRDLf-6" # ### Tokenize and save a text # # The name of the file to tokenize should be inserted in "Step 3".
# The file is saved in the "results" folder. # # # # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["deba78aac9d745b4b0c5e74fc4cb6c91", "cc00bbadacb94807a7b7be0394acdb29", "87d3038d8de84c708784f3c12aa77d5e", "fb9eaa4b5b7347f48dafb950bcd29319", "eb5ce4f8cb924cebb2d90335e079d275", "1d69edd9485449a0883a21f3a2b9ec8a", "bdb2fb11233846549cc5614428744936", "c29e2656abc642848b4e96b1bc088f63"]} id="5mC7qnWuKxEm" outputId="e9678fb0-4c69-433d-d932-0e2ae014ae21" fh = open('./results/jieba_tokenized.txt', 'w') for phrase in tqdm(sentences): seg_list = jieba.cut(phrase, cut_all=False) joined_list = " ".join(seg_list) fh.write(f'{joined_list}\n') fh.close() # + [markdown] id="KmXYL_YljdsG" # ## HanLP # # [Original HanLP repository](https://github.com/hankcs/HanLP)
# [Python version of HanLP](https://github.com/hankcs/pyhanlp/wiki/%E6%89%8B%E5%8A%A8%E9%85%8D%E7%BD%AE) # # HanLP is another heavyweight in processing of the Chinese language. It is written in Java and has a Python interface (pyhanlp) added on top of it, so parts of the code are counterintuitive for Python users.

# HanLP heavily uses Machine Learning and offers a wide range of problems it can solve, including tokenization, part-of-speech tagging, dependency parsing etc. It also offers smart tagging of pinyin and transformation from simplified to full characters. The fact that it uses machine learning means, that it will distinguish between cases like 后/後, 云/雲 and will try to choose the more appropriate one in each case.
# Below I will only cover basic tokenization with or without a user dictionary.
HanLP works very well for modern Chinese. For wenyan it often allows long sequences of characters to remain not split, but unlike jieba it does not introduce any extra noise.
# # **Important!** # - It is not pre-installed by Google Colab, so we need to first install it and download all the necessary files for it to work # - keep an eye on data usage # - after this file is closed, all the downloaded data will be deleted and will need to be downloaded again with the next use # + colab={"base_uri": "https://localhost:8080/"} id="8unJnor7gXlE" outputId="6bda8e6c-3ed6-4652-f6be-540582460320" # !pip install pyhanlp import pyhanlp from pyhanlp import * # + [markdown] id="1R1CIghXZG6F" # ### Tokenize a sentence # Insert your own text below to test it. # + colab={"base_uri": "https://localhost:8080/"} id="__g5AYRkZ-t7" outputId="6744f72c-452f-4b5c-ae64-5c55aec9dfc4" text = "我来到北京清华大学" seg_list = [str(i) for i in HanLP.segment(text)] #hanlp is written in java, so a conversion to Python format is necessary print(" ".join(seg_list)) # + [markdown] id="i-OotqIvaadq" # Let's remove the PoS segmentation. Do not run if you want to keep it. Will not work well for MC and OC. # + id="lgjtGzoVjbx7" JClass("com.hankcs.hanlp.HanLP$Config").ShowTermNature = False # + colab={"base_uri": "https://localhost:8080/"} id="oNOiHoeUakQL" outputId="37665bf2-6541-42ee-e607-05e56bcb6776" seg_list = [str(item) for item in HanLP.segment(text)] #hanlp is written in java, so a conversion is necessary print(" ".join(seg_list)) # + [markdown] id="PWSwNkOibFSb" # Load a user dictionary.
# Do not run if don't want to use it. # Run the whole HanLP segment from the beginning if you want to stop using it. # + id="s55lFi5fbH-t" CustomDictionary = JClass("com.hankcs.hanlp.dictionary.CustomDictionary") dictionary = open(dictionary_path).read() words = dictionary.split('\n') for word in words: CustomDictionary.add(word) # + [markdown] id="txMniJcY30EZ" # When it comes to classical Chinese, unlike jieba , HanLP decides to split some character sequences. # + colab={"base_uri": "https://localhost:8080/"} id="W7eCe_G6bHuT" outputId="b6d74c32-eb32-48d6-da76-96175538f1c8" seg_list = [str(item) for item in HanLP.segment("建章歡賞夕,二八盡妖妍。羅綺昭陽殿,芬芳玳瑁筵 。")] print(" ".join(seg_list)) # + [markdown] id="oWB7vJa6ZI4q" # ### Tokenize and save a text # # The name of the file to tokenize should be inserted in "Step 3".
# The file is saved in the "results" folder. # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["0046d31d84384daa938203db0b3fe75a", "46ea4719d00e405ead10f6453f8d3111", "e01f4d309d0f40b3a3d0f7b5601f1978", "", "", "a29676e12adc42f09aa59ed97c641793", "a094d794fe7c430bab66eb51717236b2", "cf82bf898e744384bd0362dad4f45f44"]} id="HrucGafgb1WC" outputId="6d200cbd-f6f9-4f2d-a47d-e927f28a0cdf" fh = open('./results/hanlp_tokenized.txt', 'w') for phrase in tqdm(sentences): seg_list = [str(item) for item in HanLP.segment(phrase)] joined_list = " ".join(seg_list) fh.write(f'{joined_list}\n') fh.close() # + [markdown] id="R0za6uHJl_qk" # ## Udkanbun # # [GitHub repository](https://github.com/KoichiYasuoka/UD-Kanbun)
# [UNIVERSAL DEPENDENCIES TREEBANK OF THE FOUR BOOKS # IN CLASSICAL CHINESE](http://kanji.zinbun.kyoto-u.ac.jp/~yasuoka/publications/2019-12-04.pdf)
# [Project page](http://kanji.zinbun.kyoto-u.ac.jp/~yasuoka/kyodokenkyu/2018-12-01.html) # # Udkanbun is a tokenizer, POS-Tagger, and Dependency-Parser for Classical Chinese Texts (漢文/文言文). It was primarily created for dependency parcing. This means, that when it comes to compounds, the algorithm will prefer to treat them as separate words and map their syntactic relationship. # # Just like with HanLP, it needs to be installed before we can use it. # # + colab={"base_uri": "https://localhost:8080/"} id="TTQuLrfcmB2j" outputId="00248a62-a404-4d84-dfe2-05d4f7af0fd2" # !pip install udkanbun import udkanbun # + [markdown] id="3boIpIBfP4wV" # Load the tokenizer. # + id="1zcR7f60mCPM" lzh=udkanbun.load() # + [markdown] id="QinjTPxPoHFm" # ### Working with one sentence. Full information and dependency trees. # Insert your own text below to test it. # + id="EZ6vbsHpnJKU" text = "建章歡賞夕,二八盡妖妍。" # + [markdown] id="dF2q2tdA_pfT" # Tokenize the text. # + id="2TZtyQQ3nLtv" seg_phrase = lzh(text) # + [markdown] id="D72uAZ_RP9G-" # View full information. # + colab={"base_uri": "https://localhost:8080/"} id="H5K5QAXonOc-" outputId="333ac28b-eab6-42a7-8f32-c17b676c54e0" print(seg_phrase) # + [markdown] id="T7Rvjd32QEiR" # Show just the visual representation of the dependency tree. # + colab={"base_uri": "https://localhost:8080/"} id="cX81wqFVnPb0" outputId="4f8bf648-198a-4c25-afec-b4a70e301426" print(seg_phrase.to_tree()) # + [markdown] id="bRQ2VGPOQlL1" # We can save the tree to an .svg file.
# The contents of the file might not be visible within Google Drive. To view, download the file to computer, right-click and choose "Open with" => "Google Chrome" # + id="7bnKcpmdQQ7D" f=open("trial.svg","w") f.write(seg_phrase.to_svg()) f.close() # + [markdown] id="F0j_1_oYoX-B" # Only show the tokenized text. # # + colab={"base_uri": "https://localhost:8080/"} id="i2iyUbn6n1iT" outputId="87372f98-b33d-4af3-a495-c389f4a8afb0" print(" ".join([i.form for i in seg_phrase[1:]])) # + [markdown] id="s39NDy6uob2I" # ### Tokenize and save a text # # The name of the file to tokenize should be inserted in "Step 3".
# The file is saved in the "results" folder. # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["e222e4773ee94bbf94fd04bd27944b9b", "d2709ffaf2a34622925ff58550de2d12", "8052671ed19249a698d82ffbe75d8eee", "6fbd6a2d08fb4ed0ad0b8cffe77ec5ad", "b01b51ba32514748aee4e2dc591f4f25", "7d98bf93c96741e29ea9a982ff309061", "", "2c8e0dfe4f4e4259bd49906600c43da6"]} id="Kyx7dsBHsJFq" outputId="d9b10eff-30fd-4475-a1a1-53115546bd15" fh = open('./results/udkanbun_tokenized.txt', 'w') for phrase in tqdm(sentences): seg_list = lzh(phrase) seg_phrase = " ".join([i.form for i in seg_list[1:]]) fh.write(f'{seg_phrase}\n') fh.close() # + [markdown] id="vmq93bnWSciK" # ## Dictionary-based tokenization # This is a small, quick and very simple script that crawls through the text and tries to match character sequences to a dictionary.
# It does not use any techniques to deal with situations when several tokenization options are available and because of that tends to get "greedy": in a sequence "ABCD", even if the best way to tokenize is "AB" + "CD", it will return "ABC" + "D" whenever possible. To deal with this, the maximum allowed word length is set to 2, but can be manually changed below.
# Unlike udkanbun it will split the text into larger chunks when possible, but will not allow for whole sentences to remain as is. #
# # + id="vhv1pekvqlTC" from helper import dict_tokenizer as dt # + [markdown] id="WFLuKM9JW876" # Load dictonary.
# By default uses CDICT. If you want to use another one, replace dictionary file name in "Step 3". # + id="LrK2S18CUjZP" dictionary = dt.open_vocab(dictionary_path) # + [markdown] id="FTf2zLpQT7O6" # ### Tokenize a sentence # By default allows only 1 or 2 character words. Change number in "longest_word" to allow longer ones.
# Insert your own text below to test it. # + colab={"base_uri": "https://localhost:8080/"} id="rt7YcrGaTryl" outputId="f52b6068-8e12-4584-cb76-f69396975eeb" text = '建章歡賞夕,二八盡妖妍。羅綺昭陽殿,芬芳玳瑁筵 。' seg_list = dt.tokenize(text, tree = dictionary, longest_word=2) print(" ".join(seg_list)) # + [markdown] id="iLg22IRHWCGg" # ### Tokenize and save a text # + [markdown] id="G6Q5CKryc5qV" # The name of the file to tokenize should be inserted in "Step 3".
# The file is saved in the "results" folder. # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["f23eb7e673a64ceb8b566dface466141", "ffaf40a89c124c59acac77ee344d70b9", "3d877b5cad1b4861b25a007d7dd08539", "3e177410a114449ebe55721623bca5bf", "638752e748c447b2a3f0b950060de74c", "", "b5d7f9088bcd446d8266eea0986fa853", ""]} id="kaJ2isIEU9zg" outputId="32187998-825f-4600-9340-35d79d9510ff" fh = open('./results/dictionary_tokenized.txt', 'w') for phrase in tqdm(sentences): seg_list = dt.tokenize(phrase, tree=dictionary, longest_word=2) seg_phrase = " ".join(seg_list) fh.write(f'{seg_phrase}\n') fh.close() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Graph Convolutional networks for Tweet archetype classification # # Dependencies import tensorflow as tf import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from sklearn.feature_extraction.text import TfidfVectorizer import nltk from nltk.stem import WordNetLemmatizer, SnowballStemmer import re import numpy as np from collections import OrderedDict from itertools import combinations from tqdm import tqdm import math import networkx as nx import pickle def save_to_pickle(obj, file_name): with open(file_name, "wb") as f: pickle.dump(obj, f) def fcn_stub(stub): return stub # Create a stemmer stemmer = SnowballStemmer("english") # Function for stemming and lemmatization def stem_and_lemmatize(text:str) -> str: """Stems and lemmatizes a given text.""" return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v')) def preprocess_texts(text_list: pd.DataFrame) -> pd.DataFrame: """Processes text to remove all unwanted words and symbols.""" # Lowercase the tweets text_list['processed_tweet'] = text_list['tweet_text'].str.lower() # Regex patterns url_pattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)" user_pattern = '@[^\s]+' alpha_pattern = "[^a-zA-Z]" sequence_pattern = r"(.)\1\1+" seq_replace_pattern = r"\1\1" # Remove URLs from the tweet text text_list['processed_tweet'] = [re.sub(url_pattern, ' ', str(x)) for x in text_list['processed_tweet']] # Remove username from the tweet text text_list['processed_tweet'] = [re.sub(user_pattern, ' ', str(x)) for x in text_list['processed_tweet']] # Remove all non-alphanumeric symbols text_list['processed_tweet'] = [re.sub(alpha_pattern, ' ', str(x)) for x in text_list['processed_tweet']] # Replace all 3 or more consecutive letters with 2 letters text_list['processed_tweet'] = [re.sub(sequence_pattern, seq_replace_pattern, str(x)) for x in text_list['processed_tweet']] full_tweet_list = [] for x in text_list['processed_tweet']: full_tweet = '' for word in x.split(): word = stem_and_lemmatize(word) full_tweet += (word + ' ') full_tweet_list.append(full_tweet) text_list['processed_tweet'] = full_tweet_list return text_list def filter_tokens(tokens): tokens1 = [] for token in tokens: if (token not in [".",",",";","&","'s", ":", "?", "!","(",")",\ "'","'m","'no","***","--","...","[","]", " "]): tokens1.append(token) return tokens1 def word_word_edges(p_ij): word_word = [] cols = list(p_ij.columns) cols = [str(w) for w in cols] for w1, w2 in tqdm(combinations(cols, 2), total=nCr(len(cols), 2)): if (p_ij.loc[w1,w2] > 0): word_word.append((w1,w2,{"weight":p_ij.loc[w1,w2]})) return word_word def nCr(n,r): f = math.factorial return int(f(n)/(f(r)*f(n-r))) def to_categorical(y, num_classes): """ 1-hot encodes a tensor """ return np.eye(num_classes, dtype='uint8')[y] # - # Read and process dataset text_df = pd.read_csv('twitter_database.csv') text_df.head() # Preprocess text and drop empty fields text_df = preprocess_texts(text_df) text_df = text_df.groupby('archetype').head(1000) save_to_pickle(text_df, "unprocessed_tweets_df.pickle") print(len(text_df)) # + # Tokenize the words df_ta = pd.DataFrame(columns=["processed_tweet", "archetype"]) for arch in text_df["archetype"].unique(): dummy = pd.DataFrame(columns=["processed_tweet", "archetype"]) dummy["processed_tweet"] = text_df[text_df["archetype"] == arch].groupby("archetype").apply(lambda x: (" ".join(x["processed_tweet"])).lower()) dummy["archetype"] = arch df_ta = pd.concat([df_ta, dummy], ignore_index=True) # Tokenize the dataframe df_ta['processed_tweet'] = df_ta['processed_tweet'].apply(lambda x: nltk.word_tokenize(x)).apply(lambda x: filter_tokens(x)) # Data vectorization vectorizer = TfidfVectorizer(input="content", max_features=None, tokenizer=fcn_stub, preprocessor=fcn_stub) vectorizer.fit(df_ta['processed_tweet']) df_tfidf = vectorizer.transform(df_ta['processed_tweet']) df_tfidf = df_tfidf.toarray() # Get feature names vocab = vectorizer.get_feature_names() vocab = np.array(vocab) df_tfidf = pd.DataFrame(df_tfidf, columns=vocab) df_tfidf.head() # + # Calculate PMI between words names = vocab name_idx = OrderedDict((name, 0) for name in names) word_to_index = OrderedDict((name, index) for index, name in enumerate(names)) # Get the co-occurrences occurrences = np.zeros((len(names), len(names)), dtype=np.int32) windows_count = 0 window = 10 # Sliding window size, for calculation PMI between words for l in tqdm(df_ta['processed_tweet'], total=len(df_ta['processed_tweet'])): for i in range(len(l) - window): windows_count += 1 d = set(l[i:(i+window)]) for w in d: name_idx[w] += 1 for w1, w2 in combinations(d, 2): i1 = word_to_index[w1] i2 = word_to_index[w2] occurrences[i1][i2] = 1 occurrences[i2][i1] = 1 # Convert the occurences to PMI pmi_per_word = pd.DataFrame(occurrences, index=names, columns=names) / windows_count pmi_index = pd.Series(name_idx, index=name_idx.keys()) / windows_count # Free memory del occurrences del name_idx for col in tqdm(pmi_per_word.columns): pmi_per_word[col] = pmi_per_word[col]/pmi_index[col] for row in tqdm(pmi_per_word.index): pmi_per_word.loc[row, :] = pmi_per_word.loc[row, :] / pmi_index[row] pmi_per_word = pmi_per_word + 1E-9 for col in tqdm(pmi_per_word.columns): pmi_per_word[col] = pmi_per_word[col].apply(lambda x: math.log(x)) # + # Build a graph graph = nx.Graph() graph.add_nodes_from(df_tfidf.index) graph.add_nodes_from(vocab) # Build document-word edges document_word = [(doc,w,{"weight":df_tfidf.loc[doc,w]}) for doc in tqdm(df_tfidf.index, total=len(df_tfidf.index))\ for w in df_tfidf.columns] word_word = word_word_edges(pmi_per_word) graph.add_edges_from(document_word) graph.add_edges_from(word_word) # - # Export every needed structure save_to_pickle(graph, "text_graph.pickle") save_to_pickle(df_ta, "tweet_archetype_df.pickle") # Read data with open('text_graph.pickle', "rb") as f: graph = pickle.load(f) print("Graph loaded.") # + # GCN - implementation and training # Create A matrix and hat_A A = nx.to_numpy_matrix(graph, weight="weight") A = A + np.eye(graph.number_of_nodes()) degs = [] for deg in tqdm(graph.degree(weight=None)): if deg == 0: degs.append(0) else: degs.append(deg[1]**(-0.5)) degs = np.diag(degs) X = np.eye(graph.number_of_nodes()) hat_A = np.matmul(np.matmul(degs, A), degs) inp = X # Net input # + # Load the tweet pickle with open('unprocessed_tweets_df.pickle', "rb") as f: df_tweet = pickle.load(f) archetype_dict = {'archetype': {'artist': 0, 'caregiver': 1, 'everyman': 2, 'explorer': 3, 'guru': 4, 'hero': 5, 'innocent': 6, 'jester': 7, 'magician': 8, 'rebel': 9, 'ruler': 10, 'seducer':11} } df_tweet = df_tweet.replace(archetype_dict) df_tweet = df_tweet.reset_index() # Split the testing dataset test_indices = [] for arch in tqdm(df_tweet["archetype"].unique()): tmp = df_tweet[df_tweet["archetype"] == arch] if len(tmp) >= 4: test_indices.extend(list(np.random.choice(tmp.index, size=round(0.1*len(tmp)), replace=False))) print(f"Finished processing test indices: {test_indices}") selected = [] for i in tqdm(range(len(df_ta))): if i not in test_indices: selected.append(i) print("Finished selecting.") # + # Save test indices and seleced ones save_to_pickle(test_indices, "test_indices.pickle") save_to_pickle(selected, "selected.pickle") import torch import torch.nn as nn import torch.nn.functional as F # Operations on selected inputs inp_selected = inp[selected] inp_selected = torch.from_numpy(inp_selected).float() inp_selected = torch.tensor(inp_selected, device=torch.device('cuda')) labels_selected = [l for idx, l in enumerate(df_tweet["archetype"]) if idx in selected] inp_not_selected = inp[test_indices] inp_not_selected = torch.from_numpy(inp_not_selected).float() labels_not_selected = [l for idx, l in enumerate(df_tweet["archetype"]) if idx not in selected] inp = torch.from_numpy(inp).float() # + # Model creation device = torch.device('cuda:0') class GCN(nn.Module): def __init__(self, X_size, A_hat, args, bias=True): # X_size = num features super(GCN, self).__init__() self.A_hat = torch.tensor(A_hat, requires_grad=False, device=device).float() self.weight = nn.parameter.Parameter(torch.FloatTensor(X_size, args['hidden_size_1'])) var = 2./(self.weight.size(1)+self.weight.size(0)) self.weight.data.normal_(0,var) self.weight2 = nn.parameter.Parameter(torch.FloatTensor(args['hidden_size_1'], args['hidden_size_2'])) var2 = 2./(self.weight2.size(1)+self.weight2.size(0)) self.weight2.data.normal_(0,var2) if bias: self.bias = nn.parameter.Parameter(torch.FloatTensor(args['hidden_size_1'])) self.bias.data.normal_(0,var) self.bias2 = nn.parameter.Parameter(torch.FloatTensor(args['hidden_size_2'])) self.bias2.data.normal_(0,var2) else: self.register_parameter("bias", None) self.fc1 = nn.Linear(args['hidden_size_2'], args['num_classes']) def forward(self, X): ### 2-layer GCN architecture X = torch.mm(X, self.weight) if self.bias is not None: X = (X + self.bias) X = F.relu(torch.mm(self.A_hat, X)) X = torch.mm(X, self.weight2) if self.bias2 is not None: X = (X + self.bias2) X = F.relu(torch.mm(self.A_hat, X)) return self.fc1(X) def evaluate(output, labels_e): _, labels = output.max(1); labels = labels.numpy() return sum([(e-1) for e in labels_e] == labels)/len(labels) # + # Define additional arguments args = { 'hidden_size_2': 130, 'num_classes': 12, 'hidden_size_1': 330 } net = GCN(X.shape[1], hat_A, args).to(device=device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.01) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[1000,2000,3000,4000,5000,6000], gamma=0.77) # + losses_per_epoch, accuracy_per_epoch = [], [] evaluation_trained = [] best_pred = 0.0 import os for e in range(1000): optimizer.zero_grad() inp_selected = inp_selected.to(device) output = net(inp_selected) loss = criterion(output[selected], torch.tensor(labels_selected).long()) losses_per_epoch.append(loss.item()) loss.backward() optimizer.step() if e % 50 == 0: ### Evaluate other untrained nodes and check accuracy of labelling net.eval() with torch.no_grad(): pred_labels = net(inp_selected) trained_accuracy = evaluate(output[selected], labels_selected); #untrained_accuracy = evaluate(pred_labels[test_indices], labels_not_selected) evaluation_trained.append((e, trained_accuracy)) #evaluation_untrained.append((e, untrained_accuracy)) print("[Epoch %d]: Evaluation accuracy of trained nodes: %.7f" % (e, trained_accuracy)) #print("[Epoch %d]: Evaluation accuracy of test nodes: %.7f" % (e, untrained_accuracy)) print("Labels of trained nodes: \n", output[selected].max(1)[1]) net.train() if trained_accuracy > best_pred: best_pred = trained_accuracy torch.save({ 'epoch': e + 1,\ 'state_dict': net.state_dict(),\ 'best_acc': trained_accuracy,\ 'optimizer' : optimizer.state_dict(),\ 'scheduler' : scheduler.state_dict(),\ }, os.path.join("./data/" ,\ "test_model_best_%d.pth.tar" % e)) if (e % 250) == 0: save_to_pickle(losses_per_epoch, "test_losses_per_epoch_%d.pkl" % e) #save_as_pickle("test_accuracy_per_epoch_%d.pkl" % args.model_no, evaluation_untrained) torch.save({ 'epoch': e + 1,\ 'state_dict': net.state_dict(),\ 'best_acc': trained_accuracy,\ 'optimizer' : optimizer.state_dict(),\ 'scheduler' : scheduler.state_dict(),\ }, os.path.join("./data/",\ "test_checkpoint_%d.pth.tar" % e)) scheduler.step() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Activity 2 - Least Squares Method Linear Regression # Construct a least squares linear model for the dataset # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression # Loading the data from activity 1 df = pd.read_csv('activity2_measurements.csv') df_first_year = df[:365] rolling = pd.read_csv('activity2_rolling.csv') window = 20 # - # Visualising the measurements df.head() # Visualise the rolling average values rolling.head(n=30) # Create a linear regression model using the default parameters i.e. calculate a y-intercept for the model and do not normalise the data # Now fit the model. Where the input data is the day number for the year (1 to 365) and the output is the average temperature. To make later calculations easier insert a column (DayOfYear) which corresponds with the day of the year for that measurement. # Fit the model with the DayOfYear values as the input as df_first_year.TempAvgF as the output # Print the parameters of the model # What does the trendline provided by the model look like? We can plot this simply using the first, middle and last years in the linear equation. # Plot the values with the trendling # Evaluate the performance of the model # # How well does the model fit the data? Calculate the $r^2$ score to find out. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="fJAD-U9MZp35" colab_type="code" outputId="faca1b0b-8c23-4a42-d0a4-766765607fd6" colab={"base_uri": "https://localhost:8080/", "height": 374} import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns df = pd.read_excel( 'https://github.com/pierretd/investor-classifier/blob/master/Part%202/investor_data.xlsx?raw=true' ) df.sample(5) # + id="m1hQT2-ShGlN" colab_type="code" outputId="e43d6324-e1e4-4e49-8435-924c546dca61" colab={"base_uri": "https://localhost:8080/", "height": 410} df = pd.read_excel('https://github.com/pierretd/investor-classifier/blob/master/Part%202/investor_data.xlsx?raw=true', sheetname='investor_data') df.sample(5) # + id="OoSaMsSrhPXI" colab_type="code" outputId="c058c306-42a0-4e09-8b5a-61ffe2051631" colab={"base_uri": "https://localhost:8080/", "height": 374} df = pd.read_excel('https://github.com/pierretd/investor-classifier/blob/master/Part%202/investor_data.xlsx?raw=true', sheet_name='investor_data') df.sample(5) # + id="7_nsA3fxsta2" colab_type="code" colab={} rvlvr = df.copy(deep=True) # Syndicated Revolver = rlvlr # + id="DdSg0RrMs9__" colab_type="code" outputId="09e8c54b-0fb2-4198-b168-4fa85bb7afa4" colab={"base_uri": "https://localhost:8080/", "height": 35} print('Syndicated Revolver data set has {} features and {} degrees of freedom.'.format(df.shape[1], df.shape[0])) # + id="V3_P2hAltUar" colab_type="code" outputId="8a969a20-386a-4874-a32b-faa160a65545" colab={"base_uri": "https://localhost:8080/", "height": 308} rvlvr.info() # + id="Kg0UjT1AtrBw" colab_type="code" outputId="a2c6af4d-8f5d-40d3-fe89-400078e48937" colab={"base_uri": "https://localhost:8080/", "height": 145} rvlvr.select_dtypes(float).nunique() # + id="xaPDUfVKuIzz" colab_type="code" outputId="e73f960f-c3f5-4237-b341-db6a1bdebc5a" colab={"base_uri": "https://localhost:8080/", "height": 288} df.describe() # + id="SLLei2Iit_XY" colab_type="code" outputId="213de988-7624-428b-a98b-84f81269ca43" colab={"base_uri": "https://localhost:8080/", "height": 126} rvlvr.select_dtypes(object).nunique() # + id="-puGJIBcudUs" colab_type="code" outputId="00de06cc-94be-4c5a-d731-cb99497f5f36" colab={"base_uri": "https://localhost:8080/", "height": 72} rvlvr.lender.unique() # + id="QzvQpzKyu_Xl" colab_type="code" outputId="eafafaa4-0bcf-4cd4-ce4f-51a5084ad838" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.commit.unique() # + id="8tbaLOqTvYLs" colab_type="code" colab={} mapping = {'Commit':1, 'Decline':0} rvlvr['commit'] = rvlvr['commit'].replace(mapping).astype(np.float64) # + id="WJpVelnhu6zc" colab_type="code" outputId="5859aab6-0993-447c-cd5f-124e3b090147" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.commit.unique() # + id="TPP3sgvgvC7T" colab_type="code" outputId="efc735d9-c372-4cd0-8047-deb2f50b5b54" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.prior_tier.unique() # + id="TaO5hPRTvGHi" colab_type="code" outputId="28792c15-1f13-4a4e-8c7a-b5cf7fe5838b" colab={"base_uri": "https://localhost:8080/", "height": 35} df.invite_tier.unique() # + colab_type="code" id="v9-QNwRbwQ0d" colab={} mapping2 = {'Bookrunner':1, 'Participant':0} rvlvr['prior_tier'] = rvlvr['prior_tier'].replace(mapping2).astype(np.float64) rvlvr['invite_tier'] = rvlvr['invite_tier'].replace(mapping2).astype(np.float64) # + colab_type="code" outputId="160a8b71-6add-4276-aa7d-98f268a3914b" id="yaDrT6xAwxZj" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.prior_tier.unique() # + colab_type="code" outputId="8d7eed00-f093-4d4e-fb53-19ef70be4ba0" id="nJ0Og6R6wxZl" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.invite_tier.unique() # + id="HxS7tdmcxBqu" colab_type="code" outputId="93a85b91-3fc4-45d8-f23d-346a06fabff2" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.int_rate.unique() # + id="m-y0lGB_TJqQ" colab_type="code" colab={} mapping3 = {'Above':2, 'Below':0, 'Market': 1} rvlvr['int_rate'] = rvlvr['int_rate'].replace(mapping3).astype(np.float64) # + id="77ek0UgH2xjB" colab_type="code" outputId="e7b00c3f-0dea-4ec5-dde4-1bc646a71647" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.int_rate.unique() # + _uuid="a2a2a5ae0cd320e261942a76cc14b778271ba30a" id="B-dDT7m5Ve5l" colab_type="code" outputId="6b4f9e97-c8f7-4f38-aeb3-ed49edae755b" colab={"base_uri": "https://localhost:8080/", "height": 391} rvlvr.head() # + id="EEY91MTv4DEc" colab_type="code" outputId="1bbf7ab9-6ff7-4fb5-9a35-754b9f47f91b" colab={"base_uri": "https://localhost:8080/", "height": 35} rvlvr.shape # + id="DXtVMEi9yQz8" colab_type="code" outputId="0cbae339-6936-4615-ea89-af91fe0bf1a7" colab={"base_uri": "https://localhost:8080/", "height": 368} rvlvr.corr() # + id="yzlqhwtztTi7" colab_type="code" outputId="2dc7e787-2430-475d-f2e8-0e5786bd7d46" colab={"base_uri": "https://localhost:8080/", "height": 820} def correlation_heat(rvlvr): _ , ax = plt.subplots(figsize =(14, 12)) colormap = sns.diverging_palette(220, 10, as_cmap = True) _ = sns.heatmap( df.corr(), cmap = colormap, square=True, cbar_kws={'shrink':.9 }, ax=ax, annot=True, linewidths=0.1,vmax=1.0, linecolor='white', annot_kws={'fontsize':12 } ) plt.title('Pearson Correlation of Features', y=1.05, size=15) correlation_heat(rvlvr[["commit", "deal_size", "invite", "rating", "int_rate", "covenants" ,"total_fees", "prior_tier", "invite_tier" ]]) # + id="YEFouHBUZp3-" colab_type="code" outputId="84eaf2df-6d0f-4fe6-a0ba-50ac43967b49" colab={"base_uri": "https://localhost:8080/", "height": 653} plt.style.use('fivethirtyeight') df.hist(figsize=(10,10)); plt.show(); # + id="8hOsk3VMZp4C" colab_type="code" outputId="d9ade758-9806-4c91-a684-f1de7af4ab97" colab={"base_uri": "https://localhost:8080/", "height": 535} df = df[df.total_fees>0] df.hist(figsize=(8,8)) plt.show(); # + id="FNRAqFajZp4G" colab_type="code" outputId="d6ac64a3-857c-4cb5-845c-2cd85aa456b7" colab={"base_uri": "https://localhost:8080/", "height": 445} sns.countplot(y='commit', data=df) plt.show() # + id="qu63PUSsZp4K" colab_type="code" outputId="9157920e-e946-4b33-e7d5-d1c2f6a59fda" colab={"base_uri": "https://localhost:8080/", "height": 369} df.groupby('invite_tier').commit.value_counts().plot(kind='barh') plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import scipy as sp import pandas as pd import re import csv import json import nltk from rake_nltk import Rake from pandas.io.json import json_normalize from matplotlib import pyplot as plt plt.style.use('ggplot') filename = '/home/claire/Documents/jobs/milieu/milieu-noumea/noumea-analysis/python-noumea-sept5.csv' df = pd.DataFrame.from_csv(filename, header=0, sep=',', index_col=0) pd.options.display.max_columns = 999 df # + dfc = df[['text_q1-comment', 'text_q2-comment', 'text_q3-comment', 'text_q4-comment', 'text_q5-comment', 'watson_score_q1-comment', 'watson_score_q2-comment', 'watson_score_q3-comment', 'watson_score_q4-comment', 'watson_score_q5-comment']] pd.options.display.max_rows = 999 result1 = dfc.sort_values(['watson_score_q1-comment'], ascending=[False]) hey1 = result1[['text_q1-comment', 'watson_score_q1-comment']] result2 = dfc.sort_values(['watson_score_q2-comment'], ascending=[False]) hey2 = result2[['text_q2-comment', 'watson_score_q2-comment']] result3 = dfc.sort_values(['watson_score_q3-comment'], ascending=[False]) hey3 = result3[['text_q3-comment', 'watson_score_q3-comment']] result4 = dfc.sort_values(['watson_score_q4-comment'], ascending=[False]) hey4 = result4[['text_q4-comment', 'watson_score_q4-comment']] result5 = dfc.sort_values(['watson_score_q5-comment'], ascending=[False]) hey5 = result5[['text_q5-comment', 'watson_score_q5-comment']] # hey = hey1.join([hey2, hey3, hey4, hey5]) # hey = hey.sort_values(['watson_score_q1-comment', 'watson_score_q2-comment', 'watson_score_q3-comment', 'watson_score_q4-comment', 'watson_score_q5-comment'], ascending=[False, False, False, False, False]) # hey hey5 # - # + import re import pprint from collections import Counter arr = [] for index, row in dfc.iterrows(): n = 1 # print(row[n]) comment = row[n] arr.append(comment) n = n + 1 arr1 = " ".join(str(x) for x in arr) words = re.findall(r'\w+', arr1) cap_words = [word.upper() for word in words] word_counts = Counter(cap_words) pp = pprint.PrettyPrinter(indent=4) pp.pprint(word_counts) # - # + # # - result = q3.dropna().sort_values(['score1'], ascending=[False]) result # + # dfc2 = pd.DataFrame(dfc.mean()*100, columns=['mean']) # dfc.describe() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- txt = "Quick brown fox" mylist = list(range(10)) def add(a,b): print("Adding",a,b,a+b) return a+b def mult(a,b): print("Mult",a,b,a*b) return a*b print("Oh running myMod notebook!") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ecpaperenv] # language: python # name: conda-env-ecpaperenv-py # --- # + import importlib import xarray as xr import matplotlib.pyplot as plt import matplotlib as mpl import sys import numpy as np from CASutils import mapplot_utils as maps from CASutils import colorbar_utils as cbars from CASutils import plotposition_utils as pos from CASutils import filter_utils as filt from CASutils import calendar_utils as cal from matplotlib.lines import Line2D from scipy import stats importlib.reload(maps) importlib.reload(cbars) importlib.reload(pos) importlib.reload(filt) # - cityname=['Saskatoon','Toronto','Siderovsk'] citylon=[253.330, 280.617, 82.3139] citylat=[52.1579, 43.6532, 66.5973] for icity in np.arange(0,len(citylon),1): if (citylon[icity] > 180.): citylon[icity] = citylon[icity]-360. landfrac = xr.open_dataset("/project/cas/islas/cesmle/fx/landfraclens.nc") landfrac = landfrac.LANDFRAC landfrac = np.array(landfrac[0,:,:]) plotdir="/project/cas/islas/python_plots/snowpaper/FIGURES/" filepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/deseasonalized_tvar/" filepath_3cities="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/" # + both_1 = xr.open_mfdataset(filepath+"TVAR_Isla_CAM6_CLM5.nc") both_2 = xr.open_mfdataset(filepath+"TVAR_Isla_CAM6_CLM5_002.nc") both_3 = xr.open_mfdataset(filepath+"TVAR_Cecile_CAM6_CLM5.nc") cam6_clm5 = (both_1 + both_2 + both_3)/3. clm4dat = xr.open_mfdataset(filepath+"TVAR_Cecile_CAM6_CLM4.nc") # - snowd_1 = xr.open_dataset(filepath+"TVAR_CAM6_CLM5_snowdensity.nc") snowd_2 = xr.open_dataset(filepath+"TVAR_CAM6_CLM5_snowdensity_002.nc") snowd = (snowd_1+snowd_2)/2. # TREFHT PDFs def deseasonalize(dat): datseas = dat.groupby('time.dayofyear').mean('time') dat4harm = filt.calc_season_nharm(datseas,4,dimtime=1) datanoms = dat.groupby('time.dayofyear') - dat4harm datdjfanoms = cal.group_season_daily(datanoms,'DJF') datmean = datdjfanoms.mean('day') datdjfanoms = datdjfanoms - datmean return datdjfanoms # + #CLM5 dat1 = xr.open_dataset(filepath_3cities+"/CAM/TREFHT_Isla_CAM6_CLM5.nc") dat1 = dat1.trefht dat1deseas = deseasonalize(dat1) dat2 = xr.open_dataset(filepath_3cities+"/CAM/TREFHT_Isla_CAM6_CLM5_002.nc") dat2 = dat2.trefht dat2deseas = deseasonalize(dat2) nyears = dat1deseas.year.size + dat2deseas.year.size clm5_3cities=np.zeros([nyears, dat1deseas.day.size, dat1deseas.city.size]) clm5_3cities[0:dat1deseas.year.size,:,:] = dat1deseas clm5_3cities[dat1deseas.year.size: nyears,:,:] = dat2deseas # - #CLM4 dat = xr.open_dataset(filepath_3cities+"/CAM/TREFHT_Cecile_CAM6_CLM4.nc") dat = dat.trefht clm4_3cities = deseasonalize(dat) clm4_3cities = np.array(clm4_3cities) # + #SNOWD dat1 = xr.open_dataset(filepath_3cities+"/CAM/TREFHT_CAM6_CLM5_snowdensity.nc") dat1 = dat1.trefht dat1deseas = deseasonalize(dat1) dat2 = xr.open_dataset(filepath_3cities+"/CAM/TREFHT_CAM6_CLM5_snowdensity_002.nc") dat2 = dat2.trefht dat2deseas = deseasonalize(dat2) nyears = dat1deseas.year.size + dat2deseas.year.size snowd_3cities = np.zeros([nyears, dat1deseas.day.size, dat1deseas.city.size]) snowd_3cities[0:dat1deseas.year.size,:,:] = dat1deseas snowd_3cities[dat1deseas.year.size:nyears,:,:]=dat2deseas # - bins = np.arange(-35.5,36.5,1) clm5pdf = np.zeros([bins.size,3]) clm4pdf = np.zeros([bins.size,3]) snowdpdf = np.zeros([bins.size,3]) for icity in np.arange(0,dat1.city.size,1): dat = clm5_3cities[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) clm5pdf[:,icity] = kernel(bins)*100. dat = clm4_3cities[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) clm4pdf[:,icity] = kernel(bins)*100. dat = snowd_3cities[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) snowdpdf[:,icity] = kernel(bins)*100. def plotlogpdf(fig,titlestr,x1,x2,y1,y2): ax = fig.add_axes([x1,y1,(x2-x1),(y2-y1)]) ax.set_ylim(np.log10(0.01),np.log10(15)) ax.set_yticks([np.log10(0.01),np.log10(0.03),np.log10(0.1),np.log10(0.3),np.log10(1),np.log10(3),np.log10(10)]) ax.set_yticklabels(['0.01','0.03','0.1','0.3','1','3','10'], fontsize=12) ax.set_ylabel('Probability (%)', fontsize=12) ax.set_xlim([-35,35]) ax.set_xticks([-30,-20,-10,0,10,20,30]) ax.set_xticklabels(['-30','-20','-10','0','10','20','30'], fontsize=12) ax.set_xlabel('Temperature (K)', fontsize=14) ax.set_ylabel('Probability (%)', fontsize=14) ax.set_title(titlestr,fontsize=16) return ax x1, x2, y1, y2 = pos.get3by3coords() # + #SCAM_CLM5 #dat1 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_CLM5_CLM5F_001.nc") dat1 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_CLM5_CLM5F_01.nc") dat1 = dat1.sel(time=slice("1979-01-01", "2014-12-31")) dat1 = dat1.trefht dat1deseas = deseasonalize(dat1) #dat2 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_CLM5_CLM5F_002.nc") dat2 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_CLM5_CLM5F_02.nc") dat2 = dat2.sel(time=slice("1979-01-01", "2014-12-31")) dat2 = dat2.trefht dat2deseas = deseasonalize(dat2) nyears = dat1deseas.year.size + dat2deseas.year.size scam_clm5 = np.zeros([nyears, dat1deseas.day.size, dat1deseas.city.size]) scam_clm5[0:dat1deseas.year.size,:,:] = dat1deseas scam_clm5[dat1deseas.year.size:nyears,:,:]=dat2deseas # + #SCAM_SNOWD #dat1 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_SNOWD_SNOWDF_001.nc") dat1 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_SNOWD_SNOWDF_01.nc") dat1 = dat1.sel(time=slice("1979-01-01", "2014-12-31")) dat1 = dat1.trefht dat1deseas = deseasonalize(dat1) #dat2 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_SNOWD_SNOWDF_002.nc") dat2 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_SNOWD_SNOWDF_02.nc") dat2 = dat2.sel(time=slice("1979-01-01", "2014-12-31")) dat2 = dat2.trefht dat2deseas = deseasonalize(dat2) nyears = dat1deseas.year.size + dat2deseas.year.size scam_snowd = np.zeros([nyears, dat1deseas.day.size, dat1deseas.city.size]) scam_snowd[0:dat1deseas.year.size,:,:] = dat1deseas scam_snowd[dat1deseas.year.size:nyears,:,:]=dat2deseas # + #SCAM_SNOWD_CLM5F #dat1 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_SNOWD_CLM5F_001.nc") dat1 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_SNOWD_CLM5F_01.nc") dat1 = dat1.sel(time=slice("1979-01-01", "2014-12-31")) dat1 = dat1.trefht dat1deseas = deseasonalize(dat1) #dat2 = xr.open_dataset(filepath_3cities+"/SCAM/TREFHT_SCAM_SNOWD_CLM5F_002.nc") dat2 = xr.open_dataset(filepath_3cities+"/SCAM_CLMINIT_60days/TREFHT_SCAM_SNOWD_CLM5F_02.nc") dat2 = dat2.sel(time=slice("1979-01-01", "2014-12-31")) dat2 = dat2.trefht dat2deseas = deseasonalize(dat2) nyears = dat1deseas.year.size + dat2deseas.year.size scam_snowd_clm5F = np.zeros([nyears, dat1deseas.day.size, dat1deseas.city.size]) scam_snowd_clm5F[0:dat1deseas.year.size,:,:] = dat1deseas scam_snowd_clm5F[dat1deseas.year.size:nyears,:,:]=dat2deseas # - bins = np.arange(-35.5,36.5,1) scamclm5pdf = np.zeros([bins.size,3]) scamsnowdpdf = np.zeros([bins.size,3]) scamsnowdclm5fpdf = np.zeros([bins.size,3]) for icity in np.arange(0,dat1.city.size,1): dat = scam_clm5[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) scamclm5pdf[:,icity] = kernel(bins)*100. dat = scam_snowd[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) scamsnowdpdf[:,icity] = kernel(bins)*100. dat = scam_snowd_clm5F[:,:,icity].flatten() kernel = stats.gaussian_kde(dat) scamsnowdclm5fpdf[:,icity] = kernel(bins)*100. print(y1[0], y2[0]) print(x2[0]-x1[0]) # + fig = plt.figure(figsize=(16,16)) ax1 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(cam6_clm5.djfvar) - np.array(clm4dat.djfvar), np.array(cam6_clm5.lon), np.array(cam6_clm5.lat), 5, -70, 70, '(a) CLM influence', 0.17, 0.45, 0.8, 0.95) ax2 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(cam6_clm5.djfvar) - np.array(snowd.djfvar), np.array(cam6_clm5.lon), np.array(cam6_clm5.lat), 5, -70, 70, '(b) SNWDENS influence', 0.55, 0.83, 0.8, 0.95) ax = cbars.plotcolorbar(fig, 5, -70, 70, 'Temperature variance (K$^{2}$)', 0.2,0.8,0.76,0.77,ticks=([-60,-40,-20,0,20,40,60]), fsize=12) ax3 = plotlogpdf(fig, '(c) Saskatoon (CAM6)', 0.05,0.3,0.49,0.69) ax3.plot(bins, np.log10(clm5pdf[:,0]), color='darkblue', linewidth=3) ax3.plot(bins, np.log10(clm4pdf[:,0]), color='darkred', linewidth=3) ax3.plot(bins, np.log10(snowdpdf[:,0]), color='forestgreen', linewidth=3) ax4 = plotlogpdf(fig, '(d) Toronto (CAM6)', 0.37,0.62,0.49,0.69) ax4.plot(bins, np.log10(clm5pdf[:,1]), color='darkblue', linewidth=3) ax4.plot(bins, np.log10(clm4pdf[:,1]), color='darkred', linewidth=3) ax4.plot(bins, np.log10(snowdpdf[:,1]), color='forestgreen', linewidth=3) ax5 = plotlogpdf(fig, '(e) Siderovsk (CAM6)', 0.69,0.94,0.49,0.69) ax5.plot(bins, np.log10(clm5pdf[:,2]), color='darkblue', linewidth=3) ax5.plot(bins, np.log10(clm4pdf[:,2]), color='darkred', linewidth=3) ax5.plot(bins, np.log10(snowdpdf[:,2]), color='forestgreen', linewidth=3) legendparts = [Line2D([0],[0],color='darkblue',linewidth=3), Line2D([0],[0],color='darkred',linewidth=3), Line2D([0],[0],color='forestgreen',linewidth=3)] legendlabels=['CAM6_CLM5','CAM6_CLM4','SNWDENS'] fig.legend(legendparts, legendlabels, bbox_to_anchor = (0.68,0.455), ncol=3, frameon=False, fontsize='x-large') ax6 = plotlogpdf(fig, '(f) Saskatoon (SCAM6)', 0.05,0.3,0.2,0.4) ax6.plot(bins, np.log10(scamclm5pdf[:,0]), color='darkblue', linewidth=3) ax6.plot(bins, np.log10(scamsnowdpdf[:,0]), color='forestgreen', linewidth=3) ax6.plot(bins, np.log10(scamsnowdclm5fpdf[:,0]), color='darkorange', linewidth=3) ax7 = plotlogpdf(fig, '(g) Toronto (SCAM6)', 0.37,0.62,0.2,0.4) ax7.plot(bins, np.log10(scamclm5pdf[:,1]), color='darkblue', linewidth=3) ax7.plot(bins, np.log10(scamsnowdpdf[:,1]), color='forestgreen', linewidth=3) ax7.plot(bins, np.log10(scamsnowdclm5fpdf[:,1]), color='darkorange', linewidth=3) ax8 = plotlogpdf(fig, '(h) Siderovsk (SCAM6)', 0.69,0.94,0.2,0.4) ax8.plot(bins, np.log10(scamclm5pdf[:,2]), color='darkblue', linewidth=3) ax8.plot(bins, np.log10(scamsnowdpdf[:,2]), color='forestgreen', linewidth=3) ax8.plot(bins, np.log10(scamsnowdclm5fpdf[:,2]), color='darkorange', linewidth=3) legendparts = [Line2D([0],[0],color='darkblue',linewidth=3), Line2D([0],[0],color='forestgreen',linewidth=3), Line2D([0],[0],color='darkorange',linewidth=3)] legendlabels=['SCAM6_CLM5_CLM5F','SCAM6_SNWDENS_SNWDENSF','SCAM6_SNWDENS_CLM5F'] fig.legend(legendparts, legendlabels, bbox_to_anchor = (0.8,0.165), ncol=3, frameon=False, fontsize='x-large') # - fig.savefig(plotdir+"fig4.pdf", facecolor="white", bbox_inches="tight") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # Illustration of how the performance of an estimator on unseen data (test data) is not the same as the performance on training data. As the regularization increases the performance on train decreases while the performance on test is optimal within a range of values of the regularization parameter. The example with an Elastic-Net regression model and the performance is measured using the explained variance a.k.a. R^2. # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). #
You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). #
We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # ### Version import sklearn sklearn.__version__ # ### Imports # + import plotly.plotly as py import plotly.graph_objs as go from plotly import tools import numpy as np from sklearn import linear_model # - # ### Calculations # Generate sample data # + n_samples_train, n_samples_test, n_features = 75, 150, 500 np.random.seed(0) coef = np.random.randn(n_features) coef[50:] = 0.0 # only the top 10 features are impacting the model X = np.random.randn(n_samples_train + n_samples_test, n_features) y = np.dot(X, coef) # Split train and test data X_train, X_test = X[:n_samples_train], X[n_samples_train:] y_train, y_test = y[:n_samples_train], y[n_samples_train:] # - # Compute train and test errors # + alphas = np.logspace(-5, 1, 60) enet = linear_model.ElasticNet(l1_ratio=0.7) train_errors = list() test_errors = list() for alpha in alphas: enet.set_params(alpha=alpha) enet.fit(X_train, y_train) train_errors.append(enet.score(X_train, y_train)) test_errors.append(enet.score(X_test, y_test)) i_alpha_optim = np.argmax(test_errors) alpha_optim = alphas[i_alpha_optim] print("Optimal regularization parameter : %s" % alpha_optim) # Estimate the coef_ on full data with optimal regularization parameter enet.set_params(alpha=alpha_optim) coef_ = enet.fit(X, y).coef_ # - # ### Plot Results fig = tools.make_subplots(rows=2, cols=1) # + p1 = go.Scatter(x=alphas, y=train_errors, name='Train', mode='lines', line=dict(width=1)) fig.append_trace(p1, 1, 1) p2 = go.Scatter(x=alphas, y=test_errors, name='Test', mode='lines', line=dict(width=1)) fig.append_trace(p2, 1, 1) p3 = go.Scatter(x=2*[alpha_optim],y=[0, np.max(test_errors)], mode='lines', line=dict(width=3, color='black'), name='Optimum on test') fig.append_trace(p3, 1, 1) fig['layout']['yaxis1'].update(title='Regularization parameter', showgrid=False) fig['layout']['xaxis1'].update(title='Performance', showgrid=False, type='log') # Show estimated coef_ vs true coef p4 = go.Scatter(y=coef, name='True coef', mode='lines', line=dict(width=1)) fig.append_trace(p4, 2, 1) p5 = go.Scatter(y=coef_, name='Estimated coef', mode='lines', line=dict(width=1)) fig.append_trace(p5, 2, 1) # - py.iplot(fig) # ### License # Author: # # <> # # License: # # BSD 3 clause # # + from IPython.display import display, HTML display(HTML('')) display(HTML('')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Train error vs Test error.ipynb', 'scikit-learn/plot-underfitting-overfitting/', 'Train Error vs Test Error| plotly', ' ', title = 'Train Error vs Test Error | plotly', name = 'Train Error vs Test Error', has_thumbnail='true', thumbnail='thumbnail/train-test-error.jpg', language='scikit-learn', page_type='example_index', display_as='model_selection', order=3, ipynb= '~Diksha_Gabha/3416') # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + if __name__ == '__main__': n = int(input()) student_marks = {} for _ in range(n): name, *line = input().split() scores = list(map(float, line)) student_marks[name] = scores query_name = input() print("{0:.2f}".format(sum(student_marks[query_name])/len(student_marks[query_name]))) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mixed lubrication modelling by the semi system approach # Slippy contains the powerful unified Reynold’s solver which can solve a wide variety of lubrication problems from Hertzian contact/ boundary lubrication to mixed lubrication and full EHL contacts. This can be done for non-Newtonian fluids including user defined or built-in models and rough surfaces. Using sub models for wear, friction, flash temperature, tribofilm growth, etc. full simulations can be carried out with any user defined behaviour. # # Clearly this is a powerful and flexible tool. In this notebook, we will use this solver to solve the oil lubricated contact between a ball and a plane. This process has the steps defined below: # - Import slippy # - Solve the Hertzian contact problem as an initial guess for the solution and to set sensible bounds for the solution domain # - Make the surfaces # - Assign materials to the surfaces # - Make the lubricant and assign non-Newtonian sub models to it # - Make a contact model with the surfaces and the lubricant # - Make a Reynold’s solver object # - Make a lubrication model step with the Reynold’s solver # - Add this step to the contact model # - Data check the contact model # - Solve the contact model # - Analyse the result # %matplotlib inline # Importing slippy import slippy slippy.CUDA = False import slippy.surface as S import slippy.contact as C # # Defining constants # The next cell contains all the constants which we will use in the model, these can be edited for different situations: radius = 0.01905 # The radius of the ball load = 800 # The load on the ball in N rolling_speed = 4 # The rolling speed in m/s (The mean speed of the surfaces) youngs_modulus = 200e9 # The youngs modulus of the surfaces p_ratio = 0.3 # The poission's ratio of the surfaces grid_size = 65 # The number of points in the descretisation grid eta_0 = 0.096 # Coefficient in the roelands pressure-viscosity equation roelands_p_0 = 1/5.1e-9# Coefficient in the roelands pressure-viscosity equation roelands_z = 0.68 # Coefficient in the roelands pressure-viscosity equation # # Solving the Hertzian problem # Slippy contains a comprehensive hertz solver that will provide all the parameters we need to initialise the solution. This can be found in the contact sub package: # Solving the hertzian contact hertz_result = C.hertz_full([radius, radius], [float('inf'), float('inf')], [youngs_modulus, youngs_modulus], [p_ratio, p_ratio], load) hertz_pressure = hertz_result['max_pressure'] hertz_a = hertz_result['contact_radii'][0] hertz_deflection = hertz_result['total_deflection'] hertz_pressure_function = hertz_result['pressure_f'] # # Making the surface objects # Next, we will make define the geometry of the contacting surfaces. We can use analytically defined surfaces for this. In any contact model the 'master' surface must be discretised to the resolution of the solution grid. We can discretise the round surface to the correct grid size by setting its shape and extent and setting generate to True when the surface is made. # # We can view the surface profile by calling the show() method on the surface. ball = S.RoundSurface((radius,)*3, shape = (grid_size, grid_size), extent=(hertz_a*4,hertz_a*4), generate = True) flat = S.FlatSurface() ball.show() # # Assigning materials to the surfaces # Now we must define how the surfaces deflect under load. This is done by setting the material property of the surfaces. If we wanted to model pure HDL, we could set theses to rigid materials, but here we will model full EHL by using an elastic material. # # Any material object that supplies a displacement_from_surface_loads method can be used here. steel = C.Elastic('steel', {'E' : youngs_modulus, 'v' : p_ratio}) ball.material = steel flat.material = steel # # Making the lubricant # Now we must define how the lubricant flows under pressure and any non-Newtonian behaviour that we want to model. Our lubricant must have sub models for each of the fluid parameters in our chosen Reynold’s solver's requires property. In this case that is the non-dimensional viscosity and the non-dimensional pressure. If any of the needed models are missed it will be caught in the data check stage. # # Here we will add a Roeland’s model for the viscosity and a Dowson Higginson model for the density. These are defined as shown below: # # Roeland's: # $$\frac{\eta(P)}{\eta_0} = exp\left(\left(ln(\eta_0)+9.67\right)\left(-1+\left(1+\frac{P p_h}{p_0}\right)^z\right)\right)$$ # # # In which $P$ is the non dimensional pressure, $\frac{\eta(P)}{\eta_0}$ is the non dimensional viscosity, $p_h$ is the Hertzian pressure and $p_o$, $\eta_0$ are parameters specific to each oil. # # Dowson-Higginson: # $$\frac{\rho(P)}{\rho_0} = \frac{5.9e8+1.34p_hP}{5.9e8+p_hP}$$ # # In which $P$ is the non dimensional pressure, $\frac{\rho(P)}{\rho_0}$ is the non dimensional density, and $p_h$ is the Hertzian pressure and $\rho_0$ is the density at ambient pressure. # # **Sub models added to the lubricant will be run on every iteration of the solver. Models for wear or friction etc should be added to the step or the contact model, these are run on each step after the normal contact has been solved.** # + print(C.UnifiedReynoldsSolver.requires) # looking at the requires property of our chosen solver oil = C.Lubricant('oil') # Making a lubricant object to contain our sub models oil.add_sub_model('nd_viscosity', C.lubricant_models.nd_roelands(eta_0, roelands_p_0, hertz_pressure, roelands_z)) oil.add_sub_model('nd_density', C.lubricant_models.nd_dowson_higginson(hertz_pressure)) # adding dowson higginson # - # # Making a contact model # Now we will make a contact model to coordinate solving our steps, contact models can contain any number of solution steps to be solved. They also handle file output for long simulations. For our single step simulation, we do not need file outputs. # # The first argument of the contact model is the name, this will be used for the log file and the output files. The second argument is the master surface, this must be discretised. my_model = C.ContactModel('lubrication_test', ball, flat, oil) # # Making a Reynold’s solver # There are many ways to solve a Reynold’s equation, as such Reynold’s solvers are separate object in slippy. The Reynold’s solver you choose will depend on the situation you are trying to model but these can be used interchangeably in the lubrication steps. # # Our Reynold’s solver is non dimensional and as such must take all the parameters which are used to non-denationalise the problem, the exact parameters needed will change from solver to solver, the documentation for your particular solver should be consulted. reynolds = C.UnifiedReynoldsSolver(time_step = 0, grid_spacing = ball.grid_spacing, hertzian_pressure = hertz_pressure, radius_in_rolling_direction=radius, hertzian_half_width=hertz_a, dimentional_viscosity=eta_0, dimentional_density=872) # # Making a lubrication step and adding it to the model # The unified Reynold’s solver only solves the fluid pressure problem. To solve the full EHL problem we need to use this with a model step. This step coordinates solving each of the semi systems (fluid pressure, material deflection, pressure-viscosity, pressure-density) as well as checking for convergence in the solution. # # To make the solution converge faster we can provide an initial guess of the pressure distribution from the Hertzian solution we found earlier. We can also add any options which we want to pass to the material objects, and iteration controls like the required accuracy, the maximum number of iterations and the relaxation factor used for the pressure result. # # Lastly, we need to add this step to our contact model. # + # Find the hertzian pressure distribution as an initial guess X, Y = ball.get_points_from_extent() X, Y = X + ball._total_shift[0], Y + ball._total_shift[1] hertzian_pressure_dist = hertz_pressure_function(X, Y) # Making the step object step = C.IterSemiSystem('main', reynolds, rolling_speed, 1, no_time=True, normal_load=load, initial_guess=[hertz_deflection, hertzian_pressure_dist], relaxation_factor=0.05, max_it_interference=3000) # Adding the step to the contact model my_model.add_step(step) # - # # Data checking the model # There are many potential problems that can arise during solution of a contact model. data checking the model will ensure that during the solution each requirement of all the sub models, Reynold’s solvers, steps, and outputs will be fulfilled when they are called. A model failing to write your required outputs can waste a lot of time, so it is always best not to skip this step. # # *It will not ensure that your result will converge or that the result will resemble reality in any way. For this you must validate against experimental measurements, tinker with iteration controls or do some maths (hopefully not).* my_model.data_check() # # Solving the model # Now we are ready to solve the model! # # This is easy, by default the solve method will return the final state of the model as a dict, it will also write all requested outputs to a file. state = my_model.solve() print(f"Result converged: {state['converged']}") # # Plotting our results # To plot our results we need to import some plotting packages import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np # # Full pressure distribution fig = plt.figure(figsize=(8,5)) ax = fig.gca(projection='3d') ax.plot_trisurf(X.flatten()/hertz_a, Y.flatten()/hertz_a, state['nd_pressure'].flatten(), cmap=plt.cm.viridis, linewidth=0.2) ax.set_xlabel('ND length') ax.set_ylabel('ND length') ax.set_zlabel('ND pressure') ax.set_title('Pressure distribution') plt.show() # # Gap state['gap'][state['gap']<0.5e-9] = 0.5e-9 plt.imshow(np.log(state['gap'])) # # Central pressure distribution plt.plot(X[:,0]/hertz_a,state['nd_pressure'][:,32], label='Lubricated') plt.plot(X[:,0]/hertz_a,hertzian_pressure_dist[:,32]/hertz_pressure, label='Hertz') ax = plt.gca() ax.set_xlabel('Nondimensional length') ax.set_ylabel('Nondimensional pressure') ax.set_title('Central pressure distribution') # # Central film thickness plt.plot(X[:,0]/hertz_a, state['gap'][:,32]/hertz_a) ax = plt.gca() ax.set_xlabel('Nondimensional length') ax.set_ylabel('Nondimensional film thickness') ax.set_title('Central film thickness') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- import plotly import numpy as np import pandas as pd plotly.tools.set_credentials_file(username='mlohmus', api_key='') dat = pd.read_csv('clean/macro/master_macro_cleaned_naics.csv',dtype=str, usecols=['CASE_SUBMITTED','EMPLOYER_NAME','EMPLOYER_CITY','EMPLOYER_STATE','WORKSITE_STATE','TOTAL_WORKERS','NAICS_CLASSIFICATION']) dat.CASE_SUBMITTED = pd.to_datetime(dat.CASE_SUBMITTED) dat['YEAR'] = dat.CASE_SUBMITTED.apply(lambda x: x.year) dat.TOTAL_WORKERS=dat.TOTAL_WORKERS.astype(float) df = dat.groupby(['WORKSITE_STATE','YEAR'])[['TOTAL_WORKERS']].sum() df.columns=['TOTAL_WORKERS'] df.reset_index(inplace=True) # + #plotly.offline.init_notebook_mode() #df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv') scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\ [0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']] data = [] for i in range(2001,2018): data.append(dict( type='choropleth', colorscale = scl, autocolorscale = False, locations = df[df.YEAR==i]['WORKSITE_STATE'], z = df[df.YEAR==i]['TOTAL_WORKERS'].astype(float), locationmode = 'USA-states', text =df[df.YEAR==i]['WORKSITE_STATE'], marker = dict( line = dict ( color = 'rgb(255,255,255)', width = 2 ) ), colorbar = dict( title = "# Workers Requested") ) ) layout = dict( title = 'LCA Requests by State', geo = dict( scope='usa', projection=dict( type='albers usa' ), showlakes = True, lakecolor = 'rgb(255, 255, 255)'), ) # let's create the steps for the slider steps = [] for i in range(len(data)): step = dict(method='restyle', args=['visible', [False] * len(data)], label='Year {}'.format(i + 2001)) step['args'][1][i] = True steps.append(step) sliders = [dict(active=0, pad={"t": 1}, steps=steps)] layout['sliders'] = sliders fig = dict(data=data, layout=layout) py.iplot(fig, filename='Sine Wave Slider') # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 3. Train-Predict KNN # Result: # - Kaggle score: # # Reference: # - http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier # - http://scikit-learn.org/stable/modules/model_evaluation.html#model-evaluation # ## Run name # + import time import os import pandas as pd import gc project_name = 'Google_LandMark_Rec' step_name = 'Train-Predict_KNN' time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime()) run_name = project_name + '_' + step_name + '_' + time_str print('run_name: ' + run_name) # - # ## 项目文件夹 # + cwd = os.getcwd() data1_folder = '/data1/kaggle/landmark-recognition-challenge/' input_folder = os.path.join(data1_folder, 'input') output_folder = os.path.join(cwd, 'output') model_folder = os.path.join(cwd, 'model') feature_folder = os.path.join(cwd, 'feature') post_pca_feature_folder = os.path.join(cwd, 'post_pca_feature') log_folder = os.path.join(cwd, 'log') print('input_folder: \t' + input_folder) print('output_folder: \t' + output_folder) print('model_folder: \t' + model_folder) print('feature_folder: \t' + feature_folder) print('post_pca_feature_folder: \t' + post_pca_feature_folder) print('log_folder: \t' + log_folder) org_train_folder = os.path.join(input_folder, 'org_train') org_test_folder = os.path.join(input_folder, 'org_test') train_folder = os.path.join(input_folder, 'data_train') test_folder = os.path.join(input_folder, 'data_test') test_sub_folder = os.path.join(test_folder, 'test') if not os.path.exists(post_pca_feature_folder): os.mkdir(post_pca_feature_folder) print('Create folder: %s' % post_pca_feature_folder) # - train_csv_file = os.path.join(input_folder, 'train.csv') test_csv_file = os.path.join(input_folder, 'test.csv') sample_submission_folder = os.path.join(input_folder, 'sample_submission.csv') # ## 加载feature # + # %%time import h5py import numpy as np from sklearn.utils import shuffle np.random.seed(2018) x_data = [] y_data = {} x_test = [] image_size = 150 time_str = '20180311-151108' cwd = os.getcwd() # feature_cgg16 = os.path.join(cwd, 'feature', 'feature_VGG16_{}.h5'.format(20180219)) # feature_cgg19 = os.path.join(cwd, 'feature', 'feature_VGG19_{}.h5'.format(20180219)) # feature_resnet50 = os.path.join(cwd, 'feature', 'feature_ResNet50_{}.h5'.format(20180220)) # feature_xception = os.path.join(cwd, 'feature', 'feature_Xception_{}.h5'.format(20180221)) feature_inceptionV3 = os.path.join(cwd, 'post_pca_feature', 'post_pca_feature_InceptionV3_%s_%s.h5' % (image_size, time_str)) # feature_inceptionResNetV2 = os.path.join(cwd, 'feature', 'feature_InceptionResNetV2_%s.h5' % time_str) # for filename in [feature_cgg16, feature_cgg19, feature_resnet50, feature_xception, feature_inception, feature_inceptionResNetV2]: for filename in [feature_inceptionV3]: with h5py.File(filename, 'r') as h: x_data.append(np.array(h['train'])) y_data = np.array(h['train_labels']) # x_val.append(np.array(h['val'])) # y_val = np.array(h['val_labels']) x_test.append(np.array(h['test'])) # - print(x_data[0].shape) print(len(y_data)) print(x_test[0].shape) # %%time x_data = np.concatenate(x_data, axis=-1) x_test = np.concatenate(x_test, axis=-1) print(x_data.shape) print(x_test.shape) # + # from sklearn.model_selection import train_test_split # x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=0.0025, random_state=5) # print(x_train.shape) # print(x_val.shape) # print(y_train.shape) # print(y_val.shape) # - # ## Train set_y_data = list(set(y_data)) print(len(set_y_data)) # + import numpy as np from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=0.1, random_state=2017) print(x_train.shape) print(x_val.shape) print(y_train.shape) print(y_val.shape) print(x_test.shape) # - set_y_train = list(set(y_train)) print(len(set_y_train)) # + # %%time from sklearn.neighbors import NearestNeighbors clf = NearestNeighbors( radius=2, algorithm='brute', leaf_size=10, metric='cosine', n_jobs=-1 ) clf.fit(x_train, y_train) # - # %%time from sklearn.metrics import accuracy_score y_train_pred = clf.kneighbors(x_train[:100], 20, True) print(y_train_pred[0].shape) print(y_train_pred[1].shape) y_train_pred1 = [y_train[i] for i in y_train_pred[1][:, 0]] # print(y_train_pred1.shape) print(y_train_pred1[:10]) print(accuracy_score(y_train[:100], y_train_pred1)) # + # %%time from sklearn.metrics import accuracy_score count = 100 # count = len(y_val) y_val_nbs = clf.kneighbors(x_val[:count], 10, True) print(y_val_nbs[0].shape) print(y_val_nbs[1].shape) # + # %%time y_val_pred = [y_train[i] for i in y_val_nbs[1][:, 0]] # for i in range(count): # print((y_val[i], y_val_pred1[i]), end=' ') # if y_val[i] == y_val_pred1[i]: # print('*') # else: # print(' ') val_acc = accuracy_score(y_val[:count], y_val_pred) print(val_acc) # - print(list(set(y_val_nbs[0][1]))) print(list(set(y_val_nbs[1][1]))) # %%time y_test_nbs = clf.kneighbors(x_test, 1, True) print(y_test_nbs[0].shape) print(y_test_nbs[1].shape) # %%time y_test_pred = [y_train[i] for i in y_test_nbs[1][:, 0]] print(len(y_test_pred)) print(y_test_pred[:10]) # ## Predict sample_submission_csv = pd.read_csv(sample_submission_folder) print('sample_submission_csv.shape is {0}.'.format(sample_submission_csv.shape)) display(sample_submission_csv.head(2)) # 这里证明os.listdir()得到的图片名称list不正确 files = os.listdir(os.path.join(input_folder, 'data_test', 'test')) print(files[:10]) # 这里证明ImageDataGenerator()得到的图片名称list才是正确 from keras.preprocessing.image import ImageDataGenerator gen = ImageDataGenerator() image_size = (299, 299) batch_size = 128 test_generator = gen.flow_from_directory(test_folder, image_size, shuffle=False, batch_size=batch_size) print('test_generator') print(len(test_generator.filenames)) print(test_generator.filenames[:10]) # + # test_generator_filenames = np.array(test_generator.filenames) # test_generator_filenames_file = os.path.join(output_folder, 'test_generator_filenames.npy') # np.save(test_generator_filenames_file, test_generator_filenames) # + # %%time test_dict = {} for i, paire in enumerate(zip(test_generator.filenames, y_test_pred)): image_name, label = paire[0], paire[1] image_id = image_name[5:-4] test_dict[image_id] = '%d %.4f' % (label, 1) #确认图片的id是否能与ImageDataGenerator()对应上 for key in list(test_dict.keys())[:10]: print('%s %s' % (key, test_dict[key])) # - # %%time len_sample_submission_csv = len(sample_submission_csv) print('len(len_sample_submission_csv)=%d' % len_sample_submission_csv) count = 0 for i in range(len_sample_submission_csv): image_id = sample_submission_csv.iloc[i, 0] # landmarks = sample_submission_csv.iloc[i, 1] if image_id in test_dict: pred_landmarks = test_dict[image_id] # print('%s %s' % (image_id, pred_landmarks)) sample_submission_csv.iloc[i, 1] = pred_landmarks else: # print(image_id) # sample_submission_csv.iloc[i, 1] = '9633 1.0' # 属于9633的类最多,所以全都设置成这个类,可能会比设置成空得到的结果好 sample_submission_csv.iloc[i, 1] = '' # 设置成空 count += 1 if count % 10000 == 0: print(int(count/10000), end=' ') display(sample_submission_csv.head(2)) # + run_name_acc = run_name + '_' + str(int(val_acc*10000)).zfill(4) pred_file = os.path.join(output_folder, 'pred_' + run_name_acc + '.csv') print(pred_file) sample_submission_csv.to_csv(pred_file, index=None) # - print(run_name_acc) print('Done !') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from statistics import mode from statistics import median # # Pandas Review # This HW covers the concepts simple pandas concepts introduced in class using NBA Players Stats - 2014-2015 data # To find out more about the data follow the link: https://www.kaggle.com/drgilermo/nba-players-stats-20142015/version/1 # ## Load `players_stats.csv` df = pd.read_csv('players_stats.csv') df.head() # ## Explore the data in 3 different ways (using 3 different methods) # Hint: you want to use methods that show the size and shape of your dataframe df.shape df.dtypes df.size # ## Introduction to Statistics # #### Calculate the mean, mode and median of `MIN` and `PTS` fields # df.MIN.mean() df.MIN.mode() (median(df.MIN)) df.PTS.mean() df.PTS.mode() (median(df.PTS)) # #### From the description of the data find out what information is contained in the `MIN` and `PTS` fields df.MIN.describe() df.PTS.describe() # ### Compute the average number of minutes all the players spend on the field df.groupby(['Name']).MIN.mean().reset_index() group_avg = df.groupby(['Name']).MIN.mean().reset_index() group_avg.mean() # ### Now compute the average number of minutes each team spends on the field df = df.dropna(subset=['Team', 'MIN']) df.MIN.mean() team_avg = df.groupby(['Team']).MIN.mean().reset_index() team_avg.mean() # ### Why is it different? What is the right way to calculate the Average of averages? # Please use classwork as a reference # ### Calculate the average number of minutes all teams spend on the field count_and_mean = df.groupby(['Team']).agg({'MIN':['mean', 'count', 'median']}).reset_index() count_and_mean sum(count_and_mean['MIN']['count']*count_and_mean['MIN']['mean'])/len(df) sum(count_and_mean['MIN']['count']) len(df) df[df.Team.isnull()==True] / --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / --- / + cell_id="00000-caca9876-d08b-4e9e-b0c6-8ac34bf0a238" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=936 execution_start=1616756133490 source_hash="7e11b1f9" tags=[] / + cell_id="00001-b8d0b297-5861-49b8-ac3b-935b21a20090" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=2 execution_start=1618435779429 source_hash="d5c6ecdf" tags=[] import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns / %matplotlib inline / + cell_id="00002-5403e443-90d1-4451-9864-74bccd98ed95" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=9 execution_start=1618435364840 source_hash="93f02f4f" tags=[] adam = np.load("cifar/ADAM-50.npy") nag = np.load("cifar/NAG-50.npy") rmsprop = np.load("cifar/RMSprop-50.npy") sgd = np.load("cifar/SGD-50.npy") normal_opts = [adam, nag, rmsprop, sgd] adam.shape / + cell_id="00003-ac9ef81a-74c8-4dd6-b7a2-a36de19e172e" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=10 execution_start=1618435583265 source_hash="966ce497" tags=[] all_losses = np.load("cifar/all_losses.npy") # we're only interested in the last epoch (the most trained one ) all_losses = all_losses[all_losses.shape[0]-1, :, :] all_losses.shape # (N_tests, N_updates) / + cell_id="00004-bf7292ed-36e0-48e2-b81c-4e59fbc80665" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=122 execution_start=1618435822359 source_hash="446e58e4" tags=[] plt.plot(all_losses.transpose()) plt.show() # 10 lines of 50 updates / + cell_id="00004-607c7b94-68bb-42f3-b3b0-48a99bc1169e" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=200 execution_start=1618436231166 source_hash="a6eeffd8" tags=[] # adam and the others trimmed_opts = [opt[:, :50] for opt in normal_opts] plt.plot(trimmed_opts[0].transpose()) plt.show() / + cell_id="00006-766c4a35-50a6-4885-ad14-0b895a1e0483" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=3 execution_start=1618435979091 source_hash="223def7e" tags=[] def to_long_form(data_arr): records = [] for i in range(fit_data.shape[0]): for j in range(fit_data.shape[1]): for k in range(fit_data.shape[2]): records.append((i, j, condition[k], fit_data[i, j, k])) df = pd.DataFrame.from_records(records,columns=['test', 'Steps', 'Optimizer', 'Loss'] ) return df / + cell_id="00007-755045ef-9e0a-49db-9e24-ff1a0c3c9967" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=18 execution_start=1618436335267 source_hash="57e4483b" tags=[] fit_data = trimmed_opts + [all_losses] opt_names = ['Adam', "NAG", "RMSprop", "SGD", "LSTM"] records = [] for opt, name in zip(fit_data, opt_names): print(name) for test in range(opt.shape[0]): for epoch in range(opt.shape[1]): records.append((name, test, epoch, opt[test, epoch])) df = pd.DataFrame.from_records(records, columns=["Optimizer", "test", "Epoch", "Loss"]) / + cell_id="00009-5fd26c61-a3dc-4710-a62f-0d7dff4e191a" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=48 execution_start=1618436339383 source_hash="f804c160" tags=[] df / + cell_id="00010-b39a5caf-3490-404a-9c31-18c17aab6542" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=5224 execution_start=1618436448268 source_hash="9f3dd75d" tags=[] axis = sns.lineplot(data=df, x='Epoch', y='Loss', hue='Optimizer', ci=95) plt.title("Trained & tested on CIFAR 10") plt.show() / + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] / / Created in deepnote.com / Created in Deepnote # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from tqdm import tqdm # # Load data camera=pd.read_table('./amazon_reviews_us_Camera_v1_00.tsv', error_bad_lines=False) camera.head() grocery=pd.read_table('./amazon_reviews_us_Grocery_v1_00.tsv', error_bad_lines=False) videogames=pd.read_table('./amazon_reviews_us_Video_Games_v1_00.tsv', error_bad_lines=False) watches=pd.read_table('./amazon_reviews_us_Watches_v1_00.tsv', error_bad_lines=False) # # Remove neutral class camera_binary = camera.loc[camera['star_rating'] != 3] camera_binary.head() grocery_binary = grocery.loc[grocery['star_rating'] != 3] videogames_binary = videogames.loc[videogames['star_rating'] != 3] watches_binary = watches.loc[watches['star_rating'] != 3] watches_binary['star_rating'].value_counts() camera_binary.shape # # Take out test data ~20,000 examples (for camera) from sklearn.model_selection import train_test_split camera_train, camera_test = train_test_split(camera_binary, test_size=0.012) camera_pos = camera_train[camera_train['star_rating'].isin([4, 5])] camera_test.shape camera_pos['star_rating'].value_counts() # # Balance data 50,000 : 50,000 #grocery_train, grocery_test = train_test_split(grocery_binary, test_size=0.2) #watches_train, watches_test = train_test_split(watches_binary, test_size=0.2) camera_pos_small = camera_pos.sample(50000) camera_neg = camera_train[camera_train['star_rating'].isin([1, 2])] camera_neg_small = camera_neg.sample(50000) camera_neg_small.shape camera_neg_small['star_rating'].value_counts() camera_pos_small['star_rating'].value_counts() camera_train_small = camera_pos_small.append(camera_neg_small) camera_train_small.shape camera_train_small['star_rating'].value_counts() # + camera_train_small['sentiment_actual']="default" with tqdm(total=len(list(camera_train_small.iterrows()))) as pbar: for index, row in camera_train_small.iterrows(): if row['star_rating'] > 3.0: camera_train_small.at[index, 'sentiment_actual']="pos" else: camera_train_small.at[index, 'sentiment_actual']="neg" pbar.update(1) # + camera_test['sentiment_actual']="default" with tqdm(total=len(list(camera_test.iterrows()))) as pbar: for index, row in camera_test.iterrows(): if row['star_rating'] > 3.0: camera_test.at[index, 'sentiment_actual']="pos" else: camera_test.at[index, 'sentiment_actual']="neg" pbar.update(1) # - camera_train_small['sentiment_actual'].value_counts() camera_test['sentiment_actual'].value_counts() camera_train_small.head() camera_test.to_csv("camera_test.csv", sep='\t') camera_train_small.to_csv("camera_train.csv", sep='\t') # # Do the same for the other 3 datasets (grocery) grocery_binary.shape grocery_train, grocery_test = train_test_split(grocery_binary, test_size=0.009) grocery_test.shape grocery_pos = grocery_train[grocery_train['star_rating'].isin([4, 5])] grocery_neg = grocery_train[grocery_train['star_rating'].isin([1, 2])] grocery_neg.shape grocery_pos_small = grocery_pos.sample(50000) grocery_neg_small = grocery_neg.sample(50000) grocery_train_small = grocery_pos_small.append(grocery_neg_small) grocery_train_small.shape # + grocery_train_small['sentiment_actual']="default" with tqdm(total=len(list(grocery_train_small.iterrows()))) as pbar: for index, row in grocery_train_small.iterrows(): if row['star_rating'] == 4 or row['star_rating']==5: grocery_train_small.at[index, 'sentiment_actual']="pos" else: grocery_train_small.at[index, 'sentiment_actual']="neg" pbar.update(1) # + grocery_test['sentiment_actual']="default" with tqdm(total=len(list(grocery_test.iterrows()))) as pbar: for index, row in grocery_test.iterrows(): if row['star_rating'] == 4 or row['star_rating']==5: grocery_test.at[index, 'sentiment_actual']="pos" else: grocery_test.at[index, 'sentiment_actual']="neg" pbar.update(1) # - grocery_test.to_csv("grocery_test.csv", sep='\t') grocery_train_small.to_csv("grocery_train.csv", sep='\t') # # Watches watches_binary.shape watches_train, watches_test = train_test_split(watches_binary, test_size=0.023) watches_test.shape watches_pos = watches_train[watches_train['star_rating'].isin([4, 5])] watches_neg = watches_train[watches_train['star_rating'].isin([1, 2])] watches_neg.shape watches_pos_small = watches_pos.sample(50000) watches_neg_small = watches_neg.sample(50000) watches_train_small = watches_pos_small.append(watches_neg_small) watches_train_small.shape # + watches_train_small['sentiment_actual']="default" with tqdm(total=len(list(watches_train_small.iterrows()))) as pbar: for index, row in watches_train_small.iterrows(): if row['star_rating'] > 3.0: watches_train_small.at[index, 'sentiment_actual']="pos" else: watches_train_small.at[index, 'sentiment_actual']="neg" pbar.update(1) # + watches_test['sentiment_actual']="default" with tqdm(total=len(list(watches_test.iterrows()))) as pbar: for index, row in watches_test.iterrows(): if row['star_rating'] > 3.0: watches_test.at[index, 'sentiment_actual']="pos" else: watches_test.at[index, 'sentiment_actual']="neg" pbar.update(1) # - watches_test.to_csv("watches_test.csv", sep='\t') watches_train_small.to_csv("watches_train.csv", sep='\t') # # Videogames videogames_binary.shape videogames_train, videogames_test = train_test_split(videogames_binary, test_size=0.012) videogames_test.shape videogames_pos = videogames_train[videogames_train['star_rating'].isin([4, 5])] videogames_neg = videogames_train[videogames_train['star_rating'].isin([1, 2])] videogames_neg.shape videogames_pos_small = videogames_pos.sample(50000) videogames_neg_small = videogames_neg.sample(50000) videogames_train_small = videogames_pos_small.append(videogames_neg_small) videogames_train_small.shape # + videogames_train_small['sentiment_actual']="default" with tqdm(total=len(list(videogames_train_small.iterrows()))) as pbar: for index, row in videogames_train_small.iterrows(): if row['star_rating'] > 3.0: videogames_train_small.at[index, 'sentiment_actual']="pos" else: videogames_train_small.at[index, 'sentiment_actual']="neg" pbar.update(1) # + videogames_test['sentiment_actual']="default" with tqdm(total=len(list(videogames_test.iterrows()))) as pbar: for index, row in videogames_test.iterrows(): if row['star_rating'] > 3.0: videogames_test.at[index, 'sentiment_actual']="pos" else: videogames_test.at[index, 'sentiment_actual']="neg" pbar.update(1) # - videogames_test.to_csv("videogames_test.csv", sep='\t') videogames_train_small.to_csv("videogames_train.csv", sep='\t') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NER with bertreach # # > "Finetuning bertreach for NER" # # - toc: false # - branch: master # - hidden: true # - badges: true # - categories: [irish, ner, bert, bertreach] # + [markdown] id="UldPY2vaxCOJ" # This is a lightly edited version of [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb). # + [markdown] id="X4cRE8IbIrIV" # If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it. # + id="MOsHUjgdIrIW" # %%capture # !pip install datasets transformers seqeval # + [markdown] id="1IU9pa_DPSOk" # If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries. # # To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow. # # First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password: # + [markdown] id="ARRVh3-ixvs9" # (Huggingface notebooks skip this bit, but you need to set credential.helper before anything else works). # + id="9RX-uPZLxurJ" # !git config --global credential.helper store # + colab={"base_uri": "https://localhost:8080/", "height": 283, "referenced_widgets": ["445ac7ed80f7436eaa1f81e8f71cb1e5", "", "f7a501140914451d952b3f1528c907d7", "11eedcf6113342f8a5795f40686cb7cd", "efb271ceea364dde8e54a5397615c267", "ed03af8d393543be9abedf5f6ac070d5", "", "", "", "5e16bebe96ad4b6fb4b141675fe6be4c", "", "9594610636864c05a147ad11dfee7063", "418a605ac1d54f8f852c76504a941934", "d981126fe0a649b5aba5d4a7f2c3bbdf", "abc72496d5eb48a787ddd8f4f1ab21e8", "2cccaab9e726491592efc43d2296698c"]} id="npzw8gOYPSOl" outputId="f9c0bfaa-2a9e-4f0f-9746-bb2002a619b2" from huggingface_hub import notebook_login notebook_login() # + [markdown] id="5-CthmJKPSOm" # Then you need to install Git-LFS. Uncomment the following instructions: # + colab={"base_uri": "https://localhost:8080/"} id="7YAk6M5KPSOn" outputId="154c5842-906c-4f27-e26b-aa7ad595cd4b" # !apt install git-lfs # + [markdown] id="CHYxLRR8PSOo" # Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version: # + colab={"base_uri": "https://localhost:8080/"} id="U7ZVbepsPSOp" outputId="8036e6e0-eb72-4b89-ca57-e3d60c0b7f0f" import transformers print(transformers.__version__) # + [markdown] id="HFASsisvIrIb" # You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/token-classification). # + [markdown] id="rEJBSTyZIrIb" # # Fine-tuning a model on a token classification task # + [markdown] id="w6vfS60cPSOt" # In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a token classification task, which is the task of predicting a label for each token. # # ![Widget inference representing the NER task](https://github.com/huggingface/notebooks/blob/master/examples/images/token_classification.png?raw=1) # # The most common token classification tasks are: # # - NER (Named-entity recognition) Classify the entities in the text (person, organization, location...). # - POS (Part-of-speech tagging) Grammatically classify the tokens (noun, verb, adjective...) # - Chunk (Chunking) Grammatically classify the tokens and group them into "chunks" that go together # # We will see how to easily load a dataset for these kinds of tasks and use the `Trainer` API to fine-tune a model on it. # + [markdown] id="4RRkXuteIrIh" # This notebook is built to run on any token classification task, with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.html#bigtable) if this is the case). It might just need some small adjustments if you decide to use a different dataset than the one used here. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly: # + id="zVvslsfMIrIh" task = "ner" # Should be one of "ner", "pos" or "chunk" model_checkpoint = "jimregan/BERTreach" batch_size = 16 # + [markdown] id="whPRbBNbIrIl" # ## Loading the dataset # + [markdown] id="W7QYTpxXIrIl" # We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`. # + id="IreSlFmlIrIm" from datasets import load_dataset, load_metric # + [markdown] id="CKx2zKs5IrIq" # For our example here, we'll use the [CONLL 2003 dataset](https://www.aclweb.org/anthology/W03-0419.pdf). The notebook should work with any token classification dataset provided by the 🤗 Datasets library. If you're using your own dataset defined from a JSON or csv file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.html#from-local-files) on how to load them), it might need some adjustments in the names of the columns used. # + colab={"base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": ["50c7c01d13da42d6b264ea36684e5f1f", "", "", "d6c350910a3a48d485cfeab446eb60ff", "6c1e4ea54ae844a3a2d32a8b4a71e962", "e41938f14b3946cf9874de60a1cf5fa5", "174e47a8e6da4edca874390cab52707e", "f54f7775ee234038a4071ab8f1841186", "", "61451ee6101842a98d05aca771754332", "d5c6208c774d4440894efb480abc1664"]} id="s_AY1ATSIrIq" outputId="f5442b3e-5c6a-409f-ce71-a5b46b894803" datasets = load_dataset("wikiann", "ga") # + [markdown] id="RzfPtOMoIrIu" # The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set. # + colab={"base_uri": "https://localhost:8080/"} id="GWiVUF0jIrIv" outputId="4cda094b-9ed3-4a00-fc41-f5a163b3bff2" datasets # + [markdown] id="X_XUcknPPSO1" # We can see the training, validation and test sets all have a column for the tokens (the input texts split into words) and one column of labels for each kind of task we introduced before. # + [markdown] id="u3EtYfeHIrIz" # To access an actual element, you need to select a split first, then give an index: # + colab={"base_uri": "https://localhost:8080/"} id="X6HrpprwIrIz" outputId="24e54194-65b2-433a-dba2-7bf1ac7eb5bb" datasets["train"][0] # + [markdown] id="lppNSJaKPSO3" # The labels are already coded as integer ids to be easily usable by our model, but the correspondence with the actual categories is stored in the `features` of the dataset: # + colab={"base_uri": "https://localhost:8080/"} id="vetcKtTJPSO3" outputId="d06b4662-e8c2-4c6f-ea22-67cbc5c9a7a5" datasets["train"].features[f"ner_tags"] # + [markdown] id="URaPOdjRPSO5" # So for the NER tags, 0 corresponds to 'O', 1 to 'B-PER' etc... On top of the 'O' (which means no special entity), there are four labels for NER here, each prefixed with 'B-' (for beginning) or 'I-' (for intermediate), that indicate if the token is the first one for the current group with the label or not: # - 'PER' for person # - 'ORG' for organization # - 'LOC' for location # - 'MISC' for miscellaneous # + [markdown] id="dCCD_uaQPSO5" # Since the labels are lists of `ClassLabel`, the actual names of the labels are nested in the `feature` attribute of the object above: # + colab={"base_uri": "https://localhost:8080/"} id="Q9jfLo3rPSO6" outputId="a258d350-8279-498b-d94a-da31853b5dfb" label_list = datasets["train"].features[f"{task}_tags"].feature.names label_list # + [markdown] id="WHUmphG3IrI3" # To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset (automatically decoding the labels in passing). # + id="i3j8APAoIrI3" from datasets import ClassLabel, Sequence import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel): df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x]) display(HTML(df.to_html())) # + colab={"base_uri": "https://localhost:8080/", "height": 432} id="SZy5tRB_IrI7" outputId="35f50cf0-4606-48be-97cc-54643321ccba" show_random_elements(datasets["train"]) # + [markdown] id="n9qywopnIrJH" # ## Preprocessing the data # + [markdown] id="YVx71GdAIrJH" # Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires. # # To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure: # # - we get a tokenizer that corresponds to the model architecture we want to use, # - we download the vocabulary used when pretraining this specific checkpoint. # # That vocabulary will be cached, so it's not downloaded again the next time we run the cell. # + colab={"base_uri": "https://localhost:8080/"} id="eXNLu_-nIrJI" outputId="5981ce18-02da-4278-c75d-9939729142aa" from transformers import RobertaTokenizerFast tokenizer = RobertaTokenizerFast.from_pretrained(model_checkpoint, add_prefix_space=True) # + [markdown] id="Vl6IidfdIrJK" # The following assertion ensures that our tokenizer is a fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing. # + id="pZaRFHN7PSO-" import transformers assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast) # + [markdown] id="cfiX-_BgPSO_" # You can check which type of models have a fast tokenizer available and which don't on the [big table of models](https://huggingface.co/transformers/index.html#bigtable). # + [markdown] id="rowT4iCLIrJK" # You can directly call this tokenizer on one sentence: # + colab={"base_uri": "https://localhost:8080/"} id="a5hBlsrHIrJL" outputId="2bd9771e-e59b-460c-a325-45333c61cee2" tokenizer("Is abairt amháin é seo!") # + [markdown] id="B9K1KIg4PSPA" # Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested. # # If, as is the case here, your inputs have already been split into words, you should pass the list of words to your tokenzier with the argument `is_split_into_words=True`: # + colab={"base_uri": "https://localhost:8080/"} id="qdxB67GzPSPA" outputId="3bedb26b-6e89-4885-a85e-8f3416478539" tokenizer(["Hello", ",", "this", "is", "one", "sentence", "split", "into", "words", "."], is_split_into_words=True) # + [markdown] id="XjXRHpJLPSPB" # Note that transformers are often pretrained with subword tokenizers, meaning that even if your inputs have been split into words already, each of those words could be split again by the tokenizer. Let's look at an example of that: # + colab={"base_uri": "https://localhost:8080/"} id="mi8IJrMFPSPB" outputId="cad513ef-3f29-4e5b-c134-4e290d067ef1" example = datasets["train"][4] print(example["tokens"]) # + colab={"base_uri": "https://localhost:8080/"} id="NKP2znNrPSPC" outputId="93a5dee3-b047-4231-974a-bf569d1cc0ea" tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) print(tokens) # + [markdown] id="aL91xiUePSPC" # Here the words "Zwingmann" and "sheepmeat" have been split in three subtokens. # # This means that we need to do some processing on our labels as the input ids returned by the tokenizer are longer than the lists of labels our dataset contain, first because some special tokens might be added (we can a `[CLS]` and a `[SEP]` above) and then because of those possible splits of words in multiple tokens: # + colab={"base_uri": "https://localhost:8080/"} id="3intBYzJPSPD" outputId="b5296afe-243a-4dc6-f0f2-14c4c81814fd" len(example[f"{task}_tags"]), len(tokenized_input["input_ids"]) # + [markdown] id="YFEDGF_WPSPD" # Thankfully, the tokenizer returns outputs that have a `word_ids` method which can help us. # + colab={"base_uri": "https://localhost:8080/"} id="ZjPu1wxoPSPD" outputId="96a09327-cc87-478c-ee60-356f38207b03" print(tokenized_input.word_ids()) # + [markdown] id="WqHQp3otPSPE" # As we can see, it returns a list with the same number of elements as our processed input ids, mapping special tokens to `None` and all other tokens to their respective word. This way, we can align the labels with the processed input ids. # + colab={"base_uri": "https://localhost:8080/"} id="S2bLHIshPSPE" outputId="b5aa29df-fb7b-4eaf-de0b-3e06b66f4847" word_ids = tokenized_input.word_ids() aligned_labels = [-100 if i is None else example[f"{task}_tags"][i] for i in word_ids] print(len(aligned_labels), len(tokenized_input["input_ids"])) # + [markdown] id="cj3LA4UXPSPE" # Here we set the labels of all special tokens to -100 (the index that is ignored by PyTorch) and the labels of all other tokens to the label of the word they come from. Another strategy is to set the label only on the first token obtained from a given word, and give a label of -100 to the other subtokens from the same word. We propose the two strategies here, just change the value of the following flag: # + id="lJkzdIlIPSPF" label_all_tokens = True # + [markdown] id="2C0hcmp9IrJQ" # We're now ready to write the function that will preprocess our samples. We feed them to the `tokenizer` with the argument `truncation=True` (to truncate texts that are bigger than the maximum size allowed by the model) and `is_split_into_words=True` (as seen above). Then we align the labels with the token ids using the strategy we picked: # + id="vc0BSBLIIrJQ" def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"{task}_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label[word_idx] if label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs # + [markdown] id="0lm8ozrJIrJR" # This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key: # + colab={"base_uri": "https://localhost:8080/"} id="-b70jh26IrJS" outputId="4ffb9712-e409-43d1-991f-239bbfa46c51" tokenize_and_align_labels(datasets['train'][:5]) # + [markdown] id="zS-6iXTkIrJT" # To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. # + colab={"base_uri": "https://localhost:8080/", "height": 113, "referenced_widgets": ["ee50223163a0429b83391b426dbf4191", "", "", "", "", "", "", "af435b9755a942caba61094ec781870a", "", "", "", "", "", "265a7a3116f4435abadf5b8d5e8ab594", "8f947072da59454c88e18196dc9976d0", "", "", "", "", "", "", "", "fa5a85c7a80642d79abf1fe8f8d8dca4", "c406925717dd42e480e0f6b3957e390f", "df92ae2e9102414aa589c3a1040aed9a", "", "", "6ea5e881272241569d79091f8478ab95", "", "", "", "", "3bda313521644f9284681fc804a8848f"]} id="DDtsaJeVIrJT" outputId="abf1d75d-e60a-4e55-ced6-67c554a7d165" tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True) # + [markdown] id="voWiw8C7IrJV" # Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again. # # Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. # + [markdown] id="545PP3o8IrJV" # ## Fine-tuning the model # + [markdown] id="FBiW8UpKIrJW" # Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about token classification, we use the `AutoModelForTokenClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which we can get from the features, as seen before): # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["9cd45896f45a432b8a213df2d36fee1a", "dd73302517d1485f9981fc477fa66e68", "aae9a23e300748709ebd309ec8d5bee4", "5a4a7bcec3524810b8ffb6f3421e5cf6", "9f474e56030c45daa6b359022b7841ff", "2dd373f51945427fa783df4faba16950", "0bb8e705b37949a0a286f9710f643342", "ab33d3a7034d480d97987d38916a12f5", "3d1450e3dab2478586bbbd6680583472", "82189c839ca9483da689c28a69604747", "90c96a4de80c48e39322f80b4ad8a1e0"]} id="TlqNaB8jIrJW" outputId="c26cf91c-1d56-42fb-f4bc-de6f233aa1a3" from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer, AutoConfig config = AutoConfig.from_pretrained(model_checkpoint, id2label={i: label for i, label in enumerate(label_list)}, label2id={label: i for i, label in enumerate(label_list)}) model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, config=config) # + [markdown] id="CczA5lJlIrJX" # The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. # + [markdown] id="_N8urzhyIrJY" # To instantiate a `Trainer`, we will need to define three more things. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional: # + colab={"base_uri": "https://localhost:8080/"} id="Bliy8zgjIrJY" outputId="f5c5761b-b0d0-4121-9789-a4925e433457" model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"BERTreach-finetuned-{task}", evaluation_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=5, weight_decay=0.01, push_to_hub=True, ) # + [markdown] id="km3pGVdTIrJc" # Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay. # # The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-ner"` or `"huggingface/bert-finetuned-ner"`). # + [markdown] id="VKETFK5OPSPL" # Then we will need a data collator that will batch our processed examples together while applying padding to make them all the same size (each pad will be padded to the length of its longest example). There is a data collator for this task in the Transformers library, that not only pads the inputs, but also the labels: # + id="SB-5qhhdPSPL" from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer) # + [markdown] id="pFnZ8C9zPSPM" # The last thing to define for our `Trainer` is how to compute the metrics from the predictions. Here we will load the [`seqeval`](https://github.com/chakki-works/seqeval) metric (which is commonly used to evaluate results on the CONLL dataset) via the Datasets library. # + id="MehhnsFLPSPM" metric = load_metric("seqeval") # + [markdown] id="TXgIHJGrPSPM" # This metric takes list of labels for the predictions and references: # + colab={"base_uri": "https://localhost:8080/"} id="W1Pl8eDRPSPN" outputId="8a92dd56-fd90-426e-e0d9-d8adf4285e07" labels = [label_list[i] for i in example[f"{task}_tags"]] metric.compute(predictions=[labels], references=[labels]) # + [markdown] id="7sZOdRlRIrJd" # So we will need to do a bit of post-processing on our predictions: # - select the predicted index (with the maximum logit) for each token # - convert it to its string label # - ignore everywhere we set a label of -100 # # The following function does all this post-processing on the result of `Trainer.evaluate` (which is a namedtuple containing predictions and labels) before applying the metric: # + id="UmvbnJ9JIrJd" import numpy as np def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # + [markdown] id="rXuFTAzDIrJe" # Note that we drop the precision/recall/f1 computed for each category and only focus on the overall precision/recall/f1/accuracy. # # Then we just need to pass all of this along with our datasets to the `Trainer`: # + colab={"base_uri": "https://localhost:8080/"} id="imY1oC3SIrJf" outputId="c73205ff-288a-420b-a0a2-acd27ee201bd" trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics ) # + [markdown] id="CdzABDVcIrJg" # We can now finetune our model by just calling the `train` method: # + colab={"base_uri": "https://localhost:8080/", "height": 842} id="6aEz13C_PSPP" outputId="b4d28ba3-379e-41b7-e237-e4b54a1009bf" trainer.train() # + [markdown] id="CKASz-2vIrJi" # The `evaluate` method allows you to evaluate again on the evaluation dataset or on another dataset: # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="UOUcBkX8IrJi" outputId="75b87c23-3cfc-42dc-9b7d-8e8b6a2cef7a" trainer.evaluate() # + [markdown] id="1358wwuiPSPQ" # To get the precision/recall/f1 computed for each category now that we have finished training, we can apply the same function as before on the result of the `predict` method: # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="_06GH4g0PSPQ" outputId="4a38c3ca-c1d4-45c1-f54e-899e2eb523b4" predictions, labels, _ = trainer.predict(tokenized_datasets["validation"]) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) results # + [markdown] id="rc5EPoMYPSPR" # You can now upload the result of the training to the Hub, just execute this instruction: # + colab={"base_uri": "https://localhost:8080/", "height": 413, "referenced_widgets": ["8ffb7f6f6b9b4609b5738ff62bc14e64", "f4465aba227f401e97c72b21497e1299", "a1f81973f254436894e767556a3d3a77", "4341e8385e0d48b9b3bd00a83074df14", "", "", "", "", "", "", "fee9e8f7d44a4a8e8dee77b951bf119a", "d273e45038064c25ae7d196f74c06aa3", "", "efc83217251e4fc283d30badf957d35a", "6be0fbacf9824cd0bd309e85ba542337", "", "", "", "", "e269877f6a62482f98338466c27eb35c", "91b671a50c5c4a85925c0ec50db723ca", "de50243e93aa44f494a49e155dd2bf41", "", "fda78f6340dc4b31a1dfa1997c89a668", "cd77ce81e2d646069a38cfeea2005e0b", "", "30490d6ddada4768b113543e3ea4f224", "50cfd93cd58a47dfa9d57ff5f0ca9052", "", "", "6ed1124846a84b2fa8968c44ed6a7c55", "", "13dee99469594071a750bd8512cb55ad", "", "1111a433384743648f5aca0fbf49a910", "", "3b2367739fe34503b38b73b2801e7def", "979d75386902447e842631d6bf032431", "76556862f0a5497ba6692323301cc6f8", "", "0111982f12ca4949b4996f1a8e1ad17e", "", "6c9e6f3130164dd3bdeecf603c46e13e", "023aa82684494291bdf18985e362a1c0", "", "be39f5cdbabb4eb0aed79884e170e04d", "25fe968dbc1146ed81f087e2742d2e49", "a6c9e9a3635d42f08cbf43265835ab3e", "1c7879ac2a8d485db45ae840dbaf9067", "", "", "", "", "341c8604e0074e33b2d43f539fa4fa7f", "79ad0dc00c6a41c7b42315898e246adc"]} id="BLlw2RnTPSPR" outputId="3d5d5f15-3bc1-422a-f691-b2a6420ce0dd" trainer.push_to_hub() # + [markdown] id="14VrOnlDPSPR" # You can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance: # # ```python # from transformers import AutoModelForTokenClassification # # model = AutoModelForTokenClassification.from_pretrained("sgugger/my-awesome-model") # ``` # + id="FN7gcQ-MPSPS" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (pytorch_src) # language: python # name: pytorch_src # --- import numpy as np from glob import glob from matplotlib import pyplot as plt files = glob("coords/alldata03.epoch01*.npy") files # + data = {"trans":{"train": {"nd":0, "d":0}, "test": {"nd":0, "d":0}}, "rnn" :{"train": {"nd":0, "d":0}, "test": {"nd":0, "d":0}}} for i in files: if "test" in i: first = 'test' else: first = 'train' if 'ndrmsd' in i: sec = 'nd' else: sec = 'd' with open(i, "rb") as f: data["trans"][first][sec] = np.load(f) data # - with open("/home/jok120/seq2struct/notebooks/structure/predictions_100118/drmsds.npy", "rb") as f: data["rnn"]["test"]["nd"] = np.load(f) l = open("/home/jok120/Desktop/transformer_compare.log").read().splitlines() l = np.array(list(map(float, l[:-1]))) l, len(l) data["trans"]["test"]["d"] data["rnn"]["test"]["d"] = l plt.hist(data["rnn"]["test"]["d"], label="RNN", bins=20) plt.hist(data["trans"]["test"]["d"], alpha=0.7, label="Transformer", bins=20) plt.xlabel("DRMSD") plt.ylabel("# Structures") plt.legend() plt.title("Distribution of DRMSD loss on test set") plt.savefig("transformer_vs_rnn_comparison.png", dpi=300) print(data["rnn"]["test"]["d"].mean(), data["rnn"]["test"]["d"].std()) print(data["trans"]["test"]["d"].mean(), data["trans"]["test"]["d"].std()) def make_2_dist_plot(d1, d2, label1, label2, filename="loss_comparison.png", myset="test"): plt.hist(d1, alpha=0.7, label=label1) plt.hist(d2, alpha=0.7, label=label2) plt.xlabel("Loss") plt.ylabel("Count") plt.legend() plt.title("Distribution of DRMSD on {0} set".format(myset)) plt.savefig(filename, dpi=300) for s, d in zip(["d1", "d2"], [d1, d2]): print("{0}: mean {1:.2f}, std {2:.2f}".format(s, d.mean(), d.std())) len(data["trans"]["test"]["d"]) + 2490 r, t_d, t = data["rnn"]["test"]["nd"], data["trans"]["test"]["d"], data["trans"]["test"]["nd"] r.max(), r.mean(), r.std(), t.max(), t.mean(), t.std() t_d.max(), t_d.mean(), t_d.std() # # Comparing MSE/DRMSD vs DRMSD only drmsd_only = np.load("coords/all_trighelix04_test_drmsd.npy") mse_drmsd = np.load("coords/all_trighelix04_newloss_test_drmsd.npy") make_2_dist_plot(drmsd_only, mse_drmsd, "DRMSD only", "MSE + DRMSD", filename="drmsd_vs_combined.png") drmsd_only_train = np.load("coords/all_trighelix04_train_drmsd.npy") mse_drmsd_train = np.load("coords/all_trighelix04_newloss_train_drmsd.npy") make_2_dist_plot(drmsd_only_train, mse_drmsd_train, "DRMSD only", "MSE + DRMSD", filename="drmsd_vs_combined_train.png", myset="train") # # Plotting loss over training iteration # !pwd # + def plot_logfile_losses(trainpath, validpath): with open(trainpath) as trainlog, \ open(validpath) as validlog: trainlog = trainlog.read().splitlines() validlog = validlog.read().splitlines() train_vals = np.array([float(x.split(", ")[1]) for x in trainlog[1:]]) valid_vals = np.array([float(x.split(", ")[1]) for x in validlog[1:]]) plt.plot(train_vals, label="Train") plt.plot(valid_vals, label="Validation") plt.xlabel("Epoch") plt.ylabel("MSE+DRMSD Loss") min_timestep = np.where(valid_vals == valid_vals.min())[0][0] plt.axvline(x=min_timestep, color="black", linestyle="--", label="Minimum validation loss") plt.legend() plt.title("Model Loss, Trained on ${HardHelices}$") plt.savefig("hardhelices_training_loss.png", dpi=300) print("Min at", min_timestep) # - plot_logfile_losses("checkpoints/all_trighelix04_newloss.train.log", "checkpoints/all_trighelix04_newloss.valid.log") np.where(valid_vals == valid_vals.min())[0][0] def plot_trainlogfile_losses(trainpath, validpath): with open(trainpath) as trainlog, \ open(validpath) as validlog: trainlog = trainlog.read().splitlines() train_vals = np.array([float(x.strip()) for x in trainlog[1:]]) train_vals_iternum = np.array(list(range(0,372*100, 100))) validlog = validlog.read().splitlines() valid_vals = np.array([float(x.split(", ")[1]) for x in validlog[1:]]) valid_vals_iternum = np.array(list(range(9300, 372*100+1, 9300))) print(len(train_vals), len(train_vals_iternum)) plt.plot(x=valid_vals_iternum, y=valid_vals, label="Validation") print(len(valid_vals), len(valid_vals_iternum)) # plt.plot(x= train_vals_iternum, y=train_vals, label="Train") plt.xlabel("Iteration") plt.ylabel("DRMSD Loss") min_timestep = np.where(train_vals == train_vals.min())[0][0] plt.axvline(x=min_timestep, color="black", linestyle="--", label="Minimum train loss") plt.legend() with open("checkpoints/alldata03.train.log") as trainlog, \ open("checkpoints/alldata03.valid.log") as validlog: trainlog = trainlog.read().splitlines() train_vals = np.array([float(x.strip()) for x in trainlog[1:]]) train_vals_iternum = np.array(list(range(0,372*100, 100))) validlog = validlog.read().splitlines() valid_vals = np.array([float(x.split(", ")[1]) for x in validlog[1:]]) valid_vals_iternum = np.array(list(range(9300, 372*100+1, 9300))) plt.plot(train_vals_iternum, train_vals, label="Train") plt.plot(valid_vals_iternum, valid_vals, label="Validation") plt.xlabel("Iteration") plt.ylabel("DRMSD Loss") min_timestep = np.where(train_vals == train_vals.min())[0][0] plt.axvline(x=min_timestep, color="black", linestyle="--", label="Minimum train loss") plt.legend() plt.title("Model Loss, Trained on ${FullDataset}$") plt.savefig("fulldataset_training_loss.png", dpi=300) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="nPSZauRyx58F" colab_type="text" # # Demo of kNN # # First we load some favourites # + id="DHjLDJ2bjutJ" colab_type="code" colab={} import numpy as np # vectors etc import matplotlib.pyplot as plt # for plotting # + [markdown] id="gpn1o3tTyG-5" colab_type="text" # Downloading or generating data to play with is easy using sklearn. # + id="GkrlVZ2j_ypc" colab_type="code" colab={} # load the data generators and make some 2d classification data from sklearn.datasets import make_classification, make_moons X, y = make_moons(n_samples=100, noise=.1) # X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=4, n_clusters_per_class=1) # + [markdown] id="nobNxZXiybfz" colab_type="text" # It looks like this # + id="yfKN-p3Dya88" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="114cbdcd-34d5-498d-b2c6-b015f65a69f0" plt.figure(figsize=(5, 5), dpi=100) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='tab10') plt.show() # + [markdown] id="R6cBHjg-ygfG" colab_type="text" # Making a classifier with sklearn (almost) always entails: # # 1. Import classifier # 2. Instantiate with defaoult or chosen hyperparameters # 3. Run the fit() method with the training data # # Note that there is no training/test split here for now. # + id="H2JdR4dmb5lo" colab_type="code" outputId="201604d6-033d-4803-c394-fe3e5fe6dd09" colab={"base_uri": "https://localhost:8080/", "height": 71} from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors=3) classifier.fit(X, y) # + [markdown] id="Vx6NDf7izJVg" colab_type="text" # This generates a grid of test points. If we feed these to the classfier, we can visualise the different decision regions. # + id="4RPZDmwubboO" colab_type="code" colab={} h = .1 x_min, x_max = X[:, 0].min() - h, X[:, 0].max() + h y_min, y_max = X[:, 1].min() - h, X[:, 1].max() + h xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # + [markdown] id="895TXo61zYI5" colab_type="text" # Now for plotting it. # + id="Z359aKipmRGw" colab_type="code" outputId="8fc3ec8d-82dc-46ef-deeb-b553fc91bb7e" colab={"base_uri": "https://localhost:8080/", "height": 438} plt.figure(figsize=(10, 5), dpi=100) plt.subplot(1, 2, 1) plt.contourf(xx, yy, Z, alpha=.3, cmap='tab10')#, levels=[0]) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='tab10') plt.subplot(1, 2, 2) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='tab10') plt.show() # + [markdown] id="6w-ySjg6zbri" colab_type="text" # Running the score() method compares classifiers performancce on data set X with target labels y. Since there was no train/test split here, the output is the accuracy on the training set. # + id="YTP9E0P7cWO7" colab_type="code" outputId="88982733-50b0-47c1-f169-cba4ce2fe706" colab={"base_uri": "https://localhost:8080/", "height": 35} print("Training set accuract %.1f%%" % (classifier.score(X, y)*100)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Filtering out the warnings import warnings warnings.filterwarnings('ignore') # + # Importing the required libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # - # # IMDb Movie Assignment # # You have the data for the 100 top-rated movies from the past decade along with various pieces of information about the movie, its actors, and the voters who have rated these movies online. In this assignment, you will try to find some interesting insights into these movies and their voters, using Python. # ## Task 1: Reading the data # - ### Subtask 1.1: Read the Movies Data. # # Read the movies data file provided and store it in a dataframe `movies`. # Read the csv file using 'read_csv'. Please write your dataset location here. movies = pd.read_csv('Movie+Assignment+Data.csv') movies.head() # - ### Subtask 1.2: Inspect the Dataframe # # Inspect the dataframe for dimensions, null-values, and summary of different numeric columns. # Check the number of rows and columns in the dataframe movies.shape # Check the column-wise info of the dataframe movies.info() # Check the summary for the numeric columns movies.describe() # ## Task 2: Data Analysis # # Now that we have loaded the dataset and inspected it, we see that most of the data is in place. As of now, no data cleaning is required, so let's start with some data manipulation, analysis, and visualisation to get various insights about the data. # - ### Subtask 2.1: Reduce those Digits! # # These numbers in the `budget` and `gross` are too big, compromising its readability. Let's convert the unit of the `budget` and `gross` columns from `$` to `million $` first. # Divide the 'gross' and 'budget' columns by 1000000 to convert '$' to 'million $' movies['Gross'] = movies['Gross']/(10**6) movies['budget'] = movies['budget']/(10**6) movies.head() # - ### Subtask 2.2: Let's Talk Profit! # # 1. Create a new column called `profit` which contains the difference of the two columns: `gross` and `budget`. # 2. Sort the dataframe using the `profit` column as reference. # 3. Extract the top ten profiting movies in descending order and store them in a new dataframe - `top10`. # 4. Plot a scatter or a joint plot between the columns `budget` and `profit` and write a few words on what you observed. # 5. Extract the movies with a negative profit and store them in a new dataframe - `neg_profit` # Create the new column named 'profit' by subtracting the 'budget' column from the 'gross' column movies['profit'] = movies['Gross'] - movies['budget'] movies.head() # Sort the dataframe with the 'profit' column as reference using the 'sort_values' function. Make sure to set the argument #'ascending' to 'False' movies.sort_values(by='profit',ascending=False, inplace=True) movies.reset_index(drop=True, inplace=True) # Get the top 10 profitable movies by using position based indexing. Specify the rows till 10 (0-9) top10 = movies.iloc[0:10] top10 #Plot profit vs budget plt.scatter(x=movies['budget'],y=movies['profit']) plt.title('Profit vs Budget graph',fontdict={'fontsize':20}) plt.ylabel('Profit in Million',fontdict={'fontsize':15}) plt.xlabel('Budget in Million',fontdict={'fontsize':15}) plt.show() # The dataset contains the 100 best performing movies from the year 2010 to 2016. However, the scatter plot tells a different story. You can notice that there are some movies with negative profit. Although good movies do incur losses, but there appear to be quite a few movie with losses. What can be the reason behind this? Lets have a closer look at this by finding the movies with negative profit. # + #Find the movies with negative profit neg_profit=movies[movies['profit']<0].sort_values(by='profit', ascending=True) neg_profit # - # **`Checkpoint 1:`** Can you spot the movie `Tangled` in the dataset? You may be aware of the movie 'Tangled'. Although its one of the highest grossing movies of all time, it has negative profit as per this result. If you cross check the gross values of this movie (link: https://www.imdb.com/title/tt0398286/), you can see that the gross in the dataset accounts only for the domestic gross and not the worldwide gross. This is true for may other movies also in the list. # - ### Subtask 2.3: The General Audience and the Critics # # You might have noticed the column `MetaCritic` in this dataset. This is a very popular website where an average score is determined through the scores given by the top-rated critics. Second, you also have another column `IMDb_rating` which tells you the IMDb rating of a movie. This rating is determined by taking the average of hundred-thousands of ratings from the general audience. # # As a part of this subtask, you are required to find out the highest rated movies which have been liked by critics and audiences alike. # 1. Firstly you will notice that the `MetaCritic` score is on a scale of `100` whereas the `IMDb_rating` is on a scale of 10. First convert the `MetaCritic` column to a scale of 10. # 2. Now, to find out the movies which have been liked by both critics and audiences alike and also have a high rating overall, you need to - # - Create a new column `Avg_rating` which will have the average of the `MetaCritic` and `Rating` columns # - Retain only the movies in which the absolute difference(using abs() function) between the `IMDb_rating` and `Metacritic` columns is less than 0.5. Refer to this link to know how abs() funtion works - https://www.geeksforgeeks.org/abs-in-python/ . # - Sort these values in a descending order of `Avg_rating` and retain only the movies with a rating equal to or greater than `8` and store these movies in a new dataframe `UniversalAcclaim`. # # Change the scale of MetaCritic movies['MetaCritic'] = movies['MetaCritic']/10 # Find the average ratings movies['Avg_rating']=movies.loc[:,['MetaCritic','IMDb_rating'] ].mean(axis=1) movies.Avg_rating.describe() # + # Sort in descending order of average rating # movies.sort_values(by='Avg_rating',ascending=False) # movies.head() movies[['Title',"MetaCritic",'IMDb_rating','Avg_rating']].sort_values(by = "Avg_rating", ascending=False) # + # Find the movies with metacritic-Imdb rating < 0.5 and also with an average rating of >= 8 (sorted in descending order) UniversalAcclaim = movies[(abs(movies['MetaCritic']-movies['IMDb_rating'])<0.5) & (movies['Avg_rating']>=8.0)].sort_values(by='Avg_rating', ascending=False) UniversalAcclaim.reset_index(drop=True, inplace=True) UniversalAcclaim # - # **`Checkpoint 2:`** Can you spot a `Star Wars` movie in your final dataset? # - ### Subtask 2.4: Find the Most Popular Trios - I # # You're a producer looking to make a blockbuster movie. There will primarily be three lead roles in your movie and you wish to cast the most popular actors for it. Now, since you don't want to take a risk, you will cast a trio which has already acted in together in a movie before. The metric that you've chosen to check the popularity is the Facebook likes of each of these actors. # # The dataframe has three columns to help you out for the same, viz. `actor_1_facebook_likes`, `actor_2_facebook_likes`, and `actor_3_facebook_likes`. Your objective is to find the trios which has the most number of Facebook likes combined. That is, the sum of `actor_1_facebook_likes`, `actor_2_facebook_likes` and `actor_3_facebook_likes` should be maximum. # Find out the top 5 popular trios, and output their names in a list. # # + # To check if likes column for 3 actors of each movie has any null value or not # - movies ["actor_1_facebook_likes"].isnull().sum() movies ["actor_2_facebook_likes"].isnull().sum() movies ["actor_3_facebook_likes"].isnull().sum() #cleaning actor_x_facebook_likes rows coz they have NaN values movies ["actor_1_facebook_likes"] = movies["actor_1_facebook_likes"].replace(np.NaN,0) movies ["actor_2_facebook_likes"] = movies["actor_2_facebook_likes"].replace(np.NaN,0) movies ["actor_3_facebook_likes"] = movies["actor_3_facebook_likes"].replace(np.NaN,0) # + # Write your code here movies['Facebook_likes'] = movies['actor_1_facebook_likes']+movies['actor_2_facebook_likes']+movies['actor_3_facebook_likes'] movies.sort_values(by='Facebook_likes', ascending = False, inplace=True) movies.iloc[0:5] # + #Top 5 popular trios and the output of their names as a list. top5PopularTrio_List=movies.head(5)[['actor_1_name','actor_2_name','actor_3_name']].values.tolist() top5PopularTrio_List # - # - ### Subtask 2.5: Find the Most Popular Trios - II # # In the previous subtask you found the popular trio based on the total number of facebook likes. Let's add a small condition to it and make sure that all three actors are popular. The condition is **none of the three actors' Facebook likes should be less than half of the other two**. For example, the following is a valid combo: # - actor_1_facebook_likes: 70000 # - actor_2_facebook_likes: 40000 # - actor_3_facebook_likes: 50000 # # But the below one is not: # - actor_1_facebook_likes: 70000 # - actor_2_facebook_likes: 40000 # - actor_3_facebook_likes: 30000 # # since in this case, `actor_3_facebook_likes` is 30000, which is less than half of `actor_1_facebook_likes`. # # Having this condition ensures that you aren't getting any unpopular actor in your trio (since the total likes calculated in the previous question doesn't tell anything about the individual popularities of each actor in the trio.). # # You can do a manual inspection of the top 5 popular trios you have found in the previous subtask and check how many of those trios satisfy this condition. Also, which is the most popular trio after applying the condition above? Write your answers in the markdown cell provided below. # **Write your answers below.** # # - **`No. of trios that satisfy the above condition:`** # 3 # # ['', '', ''] # # ['', '', ''] # # ['', '', ''] # # - **`Most popular trio after applying the condition:`** # # ['', '', ''] with 79000 TotalLikes # **`Optional:`** Even though you are finding this out by a natural inspection of the dataframe, can you also achieve this through some *if-else* statements to incorporate this. You can try this out on your own time after you are done with the assignment. # + # Your answer here (optional and not graded) # - # - ### Subtask 2.6: Runtime Analysis # # There is a column named `Runtime` in the dataframe which primarily shows the length of the movie. It might be intersting to see how this variable this distributed. Plot a `histogram` or `distplot` of seaborn to find the `Runtime` range most of the movies fall into. # Runtime histogram/density plot sns.histplot(movies["Runtime"]) plt.title('Runtime Distribution of movies') plt.xlabel('RunTime in seconds') plt.ylabel('Count of Movies') plt.show() # Runtime distplot/density plot sns.distplot(movies["Runtime"]) plt.title('Runtime Distribution of movies') plt.show() # **`Checkpoint 3:`** Most of the movies appear to be sharply 2 hour-long. # - ### Subtask 2.7: R-Rated Movies # # Although R rated movies are restricted movies for the under 18 age group, still there are vote counts from that age group. Among all the R rated movies that have been voted by the under-18 age group, find the top 10 movies that have the highest number of votes i.e.`CVotesU18` from the `movies` dataframe. Store these in a dataframe named `PopularR`. # + # Write your code here PopularR = movies[movies['content_rating']=='R'].sort_values(by='CVotesU18', ascending=False).head(10) PopularR.reset_index(drop=True, inplace=True) PopularR[['Title', 'CVotesU18']] # - # **`Checkpoint 4:`** Are these kids watching `Deadpool` a lot? --- **YES** # # # ## Task 3 : Demographic analysis # # If you take a look at the last columns in the dataframe, most of these are related to demographics of the voters (in the last subtask, i.e., 2.8, you made use one of these columns - CVotesU18). We also have three genre columns indicating the genres of a particular movie. We will extensively use these columns for the third and the final stage of our assignment wherein we will analyse the voters across all demographics and also see how these vary across various genres. So without further ado, let's get started with `demographic analysis`. # - ### Subtask 3.1 Combine the Dataframe by Genres # # There are 3 columns in the dataframe - `genre_1`, `genre_2`, and `genre_3`. As a part of this subtask, you need to aggregate a few values over these 3 columns. # 1. First create a new dataframe `df_by_genre` that contains `genre_1`, `genre_2`, and `genre_3` and all the columns related to **CVotes/Votes** from the `movies` data frame. There are 47 columns to be extracted in total. # 2. Now, Add a column called `cnt` to the dataframe `df_by_genre` and initialize it to one. You will realise the use of this column by the end of this subtask. # 3. First group the dataframe `df_by_genre` by `genre_1` and find the sum of all the numeric columns such as `cnt`, columns related to CVotes and Votes columns and store it in a dataframe `df_by_g1`. # 4. Perform the same operation for `genre_2` and `genre_3` and store it dataframes `df_by_g2` and `df_by_g3` respectively. # 5. Now that you have 3 dataframes performed by grouping over `genre_1`, `genre_2`, and `genre_3` separately, it's time to combine them. For this, add the three dataframes and store it in a new dataframe `df_add`, so that the corresponding values of Votes/CVotes get added for each genre.There is a function called `add()` in pandas which lets you do this. You can refer to this link to see how this function works. https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.add.html # 6. The column `cnt` on aggregation has basically kept the track of the number of occurences of each genre.Subset the genres that have atleast 10 movies into a new dataframe `genre_top10` based on the `cnt` column value. # 7. Now, take the mean of all the numeric columns by dividing them with the column value `cnt` and store it back to the same dataframe. We will be using this dataframe for further analysis in this task unless it is explicitly mentioned to use the dataframe `movies`. # 8. Since the number of votes can't be a fraction, type cast all the CVotes related columns to integers. Also, round off all the Votes related columns upto two digits after the decimal point. # movies.info() # + # Create the dataframe df_by_genre df_by_genre=movies.iloc[:,11:60] df_by_genre = df_by_genre.drop(columns=['MetaCritic', 'Runtime']) df_by_genre.shape # + # Create a column cnt and initialize it to 1 df_by_genre['cnt']=1 # + # Group the movies by individual genres df_by_g1 = df_by_genre.groupby(by="genre_1").sum() df_by_g2 = df_by_genre.groupby(by="genre_2").sum() df_by_g3 = df_by_genre.groupby(by="genre_3").sum() # + # Add the grouped data frames and store it in a new data frame df_add = df_by_g1.add(df_by_g2, fill_value=0) df_add = df_add.add(df_by_g3, fill_value=0) df_add # + # Extract genres with atleast 10 occurences genre_top10 = df_add[df_add['cnt']>=10] genre_top10 # + # Take the mean for every column by dividing with cnt genre_top10.iloc[:, 0:44] = genre_top10.iloc[:, 0:44].divide(genre_top10.cnt, axis = 0) # + # Rounding off the columns of Votes to two decimals genre_top10 = genre_top10.apply(lambda x: round(x,2), axis=0) genre_top10.head() # + # Converting CVotes to int type CVotes=[] for i in genre_top10.columns: if i.startswith("CVotes"): CVotes.append(i) genre_top10[CVotes]=genre_top10[CVotes].astype('int32') genre_top10.info() # - # If you take a look at the final dataframe that you have gotten, you will see that you now have the complete information about all the demographic (Votes- and CVotes-related) columns across the top 10 genres. We can use this dataset to extract exciting insights about the voters! # - ### Subtask 3.2: Genre Counts! # # Now let's derive some insights from this data frame. Make a bar chart plotting different genres vs cnt using seaborn. # + # Countplot for genres genre_top10.cnt.plot.bar() plt.title('Genre vs Count of movies') plt.xlabel('Genre') plt.ylabel('Count') plt.show() # - # **`Checkpoint 5:`** Is the bar for `Drama` the tallest? ----- **YES** # - ### Subtask 3.3: Gender and Genre # # If you have closely looked at the Votes- and CVotes-related columns, you might have noticed the suffixes `F` and `M` indicating Female and Male. Since we have the vote counts for both males and females, across various age groups, let's now see how the popularity of genres vary between the two genders in the dataframe. # # 1. Make the first heatmap to see how the average number of votes of males is varying across the genres. Use seaborn heatmap for this analysis. The X-axis should contain the four age-groups for males, i.e., `CVotesU18M`,`CVotes1829M`, `CVotes3044M`, and `CVotes45AM`. The Y-axis will have the genres and the annotation in the heatmap tell the average number of votes for that age-male group. # # 2. Make the second heatmap to see how the average number of votes of females is varying across the genres. Use seaborn heatmap for this analysis. The X-axis should contain the four age-groups for females, i.e., `CVotesU18F`,`CVotes1829F`, `CVotes3044F`, and `CVotes45AF`. The Y-axis will have the genres and the annotation in the heatmap tell the average number of votes for that age-female group. # # 3. Make sure that you plot these heatmaps side by side using `subplots` so that you can easily compare the two genders and derive insights. # # 4. Write your any three inferences from this plot. You can make use of the previous bar plot also here for better insights. # Refer to this link- https://seaborn.pydata.org/generated/seaborn.heatmap.html. You might have to plot something similar to the fifth chart in this page (You have to plot two such heatmaps side by side). # # 5. Repeat subtasks 1 to 4, but now instead of taking the CVotes-related columns, you need to do the same process for the Votes-related columns. These heatmaps will show you how the two genders have rated movies across various genres. # # You might need the below link for formatting your heatmap. # https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot # # - Note : Use `genre_top10` dataframe for this subtask # + # 1st set of heat maps for CVotes-related columns plt.figure(figsize=(20,8)) plt.subplot(1,2,1) heatmap_m = sns.heatmap(genre_top10.iloc[:,13:23:3],annot=True,cmap="Spectral_r") bottom, top = heatmap_m.get_ylim() heatmap_m.set_ylim(bottom + 0.5, top - 0.5) plt.subplot(1,2,2) heatmap_f = sns.heatmap(genre_top10.iloc[:,14:24:3],annot=True,cmap="Spectral_r") bottom, top = heatmap_f.get_ylim() heatmap_f.set_ylim(bottom + 0.5, top - 0.5) plt.show() # - # **`Inferences:`** A few inferences that can be seen from the heatmap above is that males have voted more than females, and Sci-Fi appears to be most popular among the 18-29 age group irrespective of their gender. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below: # - Inference 1: **No. of votes from 18-29 and 30-44 is more in comparion to other age groups, both for Female and Male.** # # # - Inference 2: **After Sci-Fi, The movie genre most voted by Male is Action and Thriller. It is the same for Female group.** # # # - Inference 3: **Romance is the least voted by men of all age groups. Crime is the least voted by females of all age groups.** # + # 2nd set of heat maps for Votes-related columns plt.figure(figsize=(20,8)) plt.subplot(1,2,1) heatmap_m = sns.heatmap(genre_top10.iloc[:,30:40:3],annot=True,cmap="Spectral_r") bottom, top = heatmap_m.get_ylim() heatmap_m.set_ylim(bottom + 0.5, top - 0.5) plt.subplot(1,2,2) heatmap_f = sns.heatmap(genre_top10.iloc[:,31:41:3],annot=True,cmap="Spectral_r") bottom, top = heatmap_f.get_ylim() heatmap_f.set_ylim(bottom + 0.5, top - 0.5) plt.show() # - # **`Inferences:`** Sci-Fi appears to be the highest rated genre in the age group of U18 for both males and females. Also, females in this age group have rated it a bit higher than the males in the same age group. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below: # - Inference 1: **Votes from U18 from both Female and Male have higher ratings in comparsion to ther age groups for most genre. This could be due to two factors - one the young are less critical than older, or the U18 age group in a hurry to rate is callous and rates most movies a similar rating.** # # # - Inference 2: **Crime genre has got 8.3 rating, ie almost same rating as SciFi genre from U18. This could be a concern and movie censor boards should look at appropriate content Rating.** # # # - Inference 3: **Rating by 45A, both M and F is the least amongst all age groups. Probably being critcal as mentioned in point 1 could be the reason** # - ### Subtask 3.4: US vs non-US Cross Analysis # # The dataset contains both the US and non-US movies. Let's analyse how both the US and the non-US voters have responded to the US and the non-US movies. # # 1. Create a column `IFUS` in the dataframe `movies`. The column `IFUS` should contain the value "USA" if the `Country` of the movie is "USA". For all other countries other than the USA, `IFUS` should contain the value `non-USA`. # # # 2. Now make a boxplot that shows how the number of votes from the US people i.e. `CVotesUS` is varying for the US and non-US movies. Make use of the column `IFUS` to make this plot. Similarly, make another subplot that shows how non US voters have voted for the US and non-US movies by plotting `CVotesnUS` for both the US and non-US movies. Write any of your two inferences/observations from these plots. # # # 3. Again do a similar analysis but with the ratings. Make a boxplot that shows how the ratings from the US people i.e. `VotesUS` is varying for the US and non-US movies. Similarly, make another subplot that shows how `VotesnUS` is varying for the US and non-US movies. Write any of your two inferences/observations from these plots. # # Note : Use `movies` dataframe for this subtask. Make use of this documention to format your boxplot - https://seaborn.pydata.org/generated/seaborn.boxplot.html # + # Creating IFUS column movies['IFUS']=movies['Country'].apply(lambda x: "USA" if x=="USA" else "Non USA") # - movies.IFUS.value_counts() # + # Box plot - 1: CVotesUS(y) vs IFUS(x) plt.figure(figsize=(20,8)) plt.subplot(1,2,1) sns.boxplot(x=movies["IFUS"],y=movies["CVotesUS"]) plt.title('Votes from US Citizens') plt.subplot(1,2,2) sns.boxplot(x=movies["IFUS"],y=movies["CVotesnUS"]) plt.title('Votes from Non-US Citizens') plt.show() # - # **`Inferences:`** Write your two inferences/observations below: # - Inference 1: **Non USA movies seem to be well distributed in both plots. Non US movies got lesser votes from both US and NON US citizens** # # # - Inference 2: **From both plots, there seem to be some outliers in USA plot, suggesting that some USA movies got exceptionally high votes from USA and non-USA people** # + # Box plot - 2: VotesUS(y) vs IFUS(x) plt.figure(figsize=(20,8)) plt.subplot(1,2,1) sns.boxplot(x=movies["IFUS"],y=movies["VotesUS"]) plt.title('Distribution of Rating from US Citizens') plt.subplot(1,2,2) sns.boxplot(x=movies["IFUS"],y=movies["VotesnUS"]) plt.title('Distribution of Rating from Non-US Citizens') plt.show() # - # **`Inferences:`** Write your two inferences/observations below: # - Inference 1: **Median of rating of USA movies, both by US citizens as well by Non US citizens is higher. There is a probability that is so, because the data set had lesser no of Non US movies, ie just 19 compared to 81 US movies.** # # # - Inference 2: **Overall rating given is higher (for both US movies and non US movies) by US citizens. US citizens have rated Non US movies higher than non US citizens** # - ### Subtask 3.5: Top 1000 Voters Vs Genres # # You might have also observed the column `CVotes1000`. This column represents the top 1000 voters on IMDb and gives the count for the number of these voters who have voted for a particular movie. Let's see how these top 1000 voters have voted across the genres. # # 1. Sort the dataframe genre_top10 based on the value of `CVotes1000`in a descending order. # # 2. Make a seaborn barplot for `genre` vs `CVotes1000`. # # 3. Write your inferences. You can also try to relate it with the heatmaps you did in the previous subtasks. # # # # + # Sorting by CVotes1000 genre_top10.sort_values(by='CVotes1000',ascending=False) # - # Bar plot plt.figure(figsize=(10,5)) plt.title('Genre vs CVotes1000') sns.barplot(x=genre_top10.index, y=genre_top10["CVotes1000"]) plt.xlabel('Genre') plt.ylabel('Votes from top 1000 voters') plt.show() # **`Inferences:`** Write your inferences/observations here. # - Inference 1 : **Sci-Fi movies have got highest votes, though no. of SCI-Fi movies in the data set is lesser than other geners. Sci-Fi movies have also got highest votes from Male and Female voters as we saw in heat map above.** # # # - Inference 2 : **Though the dataset has highest no. of movies from Drama genre, the votes are lesser.** # # # - Inference 3: **Romance genre has the least amount of votes. This is the least popular genre.** # **`Checkpoint 6:`** The genre `Romance` seems to be most unpopular among the top 1000 voters. # # # # With the above subtask, your assignment is over. In your free time, do explore the dataset further on your own and see what kind of other insights you can get across various other columns. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # confident_prediction_for_time_serial_indoor_temp - double_stage_attention import os import sys import time import optparse import configparser import pyodbc import pandas as pd import numpy as np import datetime #import holidays import re from sklearn.preprocessing import MinMaxScaler import tensorflow as tf # + temp_space = pd.read_excel('./datasets/88688_gap_15min_space_temp_f18_csw.xlsx') temp_weather = pd.read_excel('./datasets/68364_gap_1h_weather_temp.xlsx') df_weather=pd.DataFrame(np.array(temp_weather),columns=['Run_DateTime','Date','UTC_Date','TempA','TempM', 'DewPointA','DewPointM' ,'Humidity' ,'WindSpeedA', 'WindSpeedM' ,'WindGustA' ,'WindGustM' ,'WindDir', 'VisibilityA' ,'VisibilityM' ,'PressureA' ,'PressureM', 'WindChillA' ,'WindChillM' ,'HeatIndexA' ,'HeatIndexM', 'PrecipA' ,'PrecipM' ,'Condition' ,'Fog' ,'Rain', 'Snow' ,'Hail' ,'Thunder','Tornado' ,'ID']) df_weather = df_weather[['Date','TempM','DewPointM','Humidity','WindSpeedM' ,'PressureM']] df_space = pd.DataFrame(np.array(temp_space),columns=['ID','zone','floor','quadrant','eq_no','Date','temp']) # + def time_process(date): time=datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S.0000000') return datetime.datetime(time.year,time.month,time.day,time.hour) def weather_data_feature_selected_and_serialize(df_weather): cols=['TempM','DewPointM','Humidity','WindSpeedM' ,'PressureM'] for col in cols: df_weather = df_weather.loc[~(df_weather[col] == 'N/A')] df_weather = df_weather.loc[~(df_weather[col] == -9999)] date_trans=lambda date:datetime.datetime(date.year, date.month, date.day, date.hour) df_weather['time']=df_weather['Date'].apply(date_trans) df_weather = df_weather.dropna() df_weather.drop('Date',axis=1,inplace=True) result = df_weather.groupby('time').apply(np.mean) return result def weather_data_interpolation(df_weather): interpolate_sample = df_weather.resample('15Min').asfreq() df_weather_interpolated = interpolate_sample.interpolate(method='time') return df_weather_interpolated def space_temp_data_feature_selected_and_serialize(df_space): cols=['Date','temp'] df_space = df_space[cols] df_space = df_space.dropna() date_trans=lambda date:datetime.datetime(date.year, date.month, date.day, date.hour, date.minute) df_space['time'] = df_space['Date'].apply(date_trans) df_space.drop('Date', axis=1, inplace=True) result = df_space.groupby('time').apply(np.mean) return result def space_temp_extreme_data_clean_up(df_space, bad_values, verbose): df_space_cleaned = df_space.copy() clean_up_temp = [] for bad_value in bad_values: for i in df_space.loc[df_space['temp'] == bad_value].index: clean_up_temp.append((df_space_cleaned.loc[i])) df_space_cleaned = df_space_cleaned.drop(i) if(verbose == True): show_interpolation_data_range(clean_up_temp) return df_space_cleaned def space_temp_data_interpolation(df_space): interpolate_sample = df_space.resample('15Min').asfreq() df_space_interpolated = interpolate_sample.interpolate(method='time') return df_space_interpolated def weather_data_preprocess(df_weather, bad_values=[0, 162.8, 250]): df_weather_serial = weather_data_feature_selected_and_serialize(df_weather) df_weather_serial_interpolated = weather_data_interpolation(df_weather_serial) return df_weather_serial_interpolated def space_temp_data_preprocess(df_space, bad_values=[0, 162.8, 250], verbose=False): df_space_serial = space_temp_data_feature_selected_and_serialize(df_space) df_space_serial_cleaned = space_temp_extreme_data_clean_up(df_space_serial, bad_values, verbose) df_space_serial_interpolated = space_temp_data_interpolation(df_space_serial_cleaned) return df_space_serial_interpolated # In case people want to check the data after preprocessing def write_data_to_excel(dataframe, filename): # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter(filename, engine='xlsxwriter') # Convert the dataframe to an XlsxWriter Excel object. dataframe.to_excel(writer, sheet_name='Sheet1') # Close the Pandas Excel writer and output the Excel file. writer.save() # cleanning data printer def show_interpolation_data_range(clean_up_list): bad_data_counter = {} for i, row in enumerate(clean_up_list): print( 'Temp: %s '%(row['temp']), 'Date:', row.name) if(str(row['temp']) not in bad_data_counter): bad_data_counter[str(row['temp'])] = 1 else: bad_data_counter[str(row['temp'])] += 1 # Print counter for key, value in bad_data_counter.items(): print('Clean up %s bad data(%s)' %(value, key)) # - df_weather_processed = weather_data_preprocess(df_weather) df_space_processed = space_temp_data_preprocess(df_space, bad_values=[0, 162.8, 250], verbose=True) #write_data_to_excel(df_space_processed, 'space_data.xlsx') df = pd.merge(df_weather_processed, df_space_processed, left_index=True, right_index=True) df.describe() scaler = MinMaxScaler((0,1)) def get_batch(df, batch_size=128, T=16, input_dim=6, step=0, train=True): t = step * batch_size X_batch = np.empty(shape=[batch_size, T, input_dim]) y_batch = np.empty(shape=[batch_size, T]) labels = np.empty(shape=[batch_size]) #time_batch = np.empty(shape=[batch_size, T], dtype='datetime64') time_batch = np.array([]) time_stamp = df.index.tolist() for i in range(batch_size): X_batch[i, :] = scaler.fit_transform(df.iloc[t:t+T].values) y_batch[i, :] = df["temp"].iloc[t:t+T].values labels[i] = df["temp"].iloc[t+T] time_batch = np.append(time_batch, time_stamp[t+T]) #time_batch[i, :] = df.iloc[t:t+T].index[-1] t += 1 ## shuffle in train, not in test if train: index = list(range(batch_size)) np.random.shuffle(index) X_batch = X_batch[index] y_batch = y_batch[index] labels = labels[index] time_batch = time_batch[index] return X_batch, y_batch, labels, time_batch x, y, labels, time_train = get_batch(df,batch_size = 256, T = 7*24, step = 50, train=True) print(x.shape) print(y.shape) print(labels.shape) print(time_train.shape) class ts_prediction(object): def __init__(self, input_dim, time_step, n_hidden, d_hidden, batch_size): self.batch_size = batch_size self.n_hidden = n_hidden self.d_hidden = d_hidden #self.o_hidden = 16 self.input_dim = input_dim self.time_step = time_step self.seq_len = tf.placeholder(tf.int32,[None]) self.input_x = tf.placeholder(dtype = tf.float32, shape = [None, None, input_dim]) # b,T,d_in self.input_y = tf.placeholder(dtype = tf.float32,shape = [None,self.time_step]) # b,T self.label = tf.placeholder(dtype = tf.float32) #b,1 self.original_loss = tf.placeholder(dtype = tf.float32, shape = []) ## placeholder for loss without adversarial gradient added self.encode_cell = tf.contrib.rnn.LSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True) self.decode_cell = tf.contrib.rnn.LSTMCell(self.d_hidden, forget_bias=1.0, state_is_tuple=True) #self.output_cell = tf.contrib.rnn.LSTMCell(self.o_hidden, forget_bias=1.0, state_is_tuple=True) self.loss_1 = tf.constant(0.0) self.loss = tf.constant(0.0) ## =========== build the model =========== ## ## ==== encoder ===== ## h_encode, c_state = self.en_RNN(self.input_x)#!!!!!!! c_expand = tf.tile(tf.expand_dims(c_state[1],1),[1,self.time_step,1]) fw_lstm = tf.concat([h_encode,c_expand],axis = 2) # b,T,2n stddev = 1.0/(self.n_hidden*self.time_step) Ue = tf.get_variable(name= 'Ue',dtype = tf.float32, initializer = tf.truncated_normal(mean = 0.0, stddev = stddev,shape = [self.time_step,self.time_step])) ## (b,d,T) * (b,T,T) = (b,d,T) brcast_UX = tf.matmul(tf.transpose(self.input_x,[0,2,1]),tf.tile(tf.expand_dims(Ue,0),[self.batch_size,1,1])) e_list = [] for k in range(self.input_dim): feature_k = brcast_UX[:,k,:] e_k = self.en_attention(fw_lstm,feature_k) # b,T e_list.append(e_k) e_mat = tf.concat(e_list,axis = 2) alpha_mat = tf.nn.softmax(e_mat) #b,T,d_in horizontally encode_input = tf.multiply(self.input_x,alpha_mat) self.h_t, self.c_t = self.en_RNN(encode_input, scopes = 'fw_lstm') ## ==== inferrence nn ==== ## """ node_hidden_layer1 = 500 node_hidden_layer2 = 500 node_hidden_layer3 = 500 hidden_layer1 = {"weights": tf.Variable(tf.random_normal([10, node_hidden_layer1])), "biases": tf.Variable(tf.random_normal([node_hidden_layer1]))} hidden_layer2 = {"weights": tf.Variable(tf.random_normal([node_hidden_layer1, node_hidden_layer2])), "biases": tf.Variable(tf.random_normal([node_hidden_layer2]))} hidden_layer3 = {"weights": tf.Variable(tf.random_normal([node_hidden_layer2, node_hidden_layer3])), "biases": tf.Variable(tf.random_normal([node_hidden_layer3]))} output_layer = {"weights": tf.Variable(tf.random_normal([node_hidden_layer3, 20])), "biases": tf.Variable(tf.random_normal([20]))} # Hidden Layer: weights \dot input_data + biaes layer1 = tf.add(tf.matmul(self.h_t, hidden_layer1["weights"]), hidden_layer1["biases"]) layer1 = tf.nn.relu(layer1) layer2 = tf.add(tf.matmul(layer1, hidden_layer2["weights"]), hidden_layer2["biases"]) layer2 = tf.nn.relu(layer2) layer3 = tf.add(tf.matmul(layer2, hidden_layer3["weights"]), hidden_layer3["biases"]) layer3 = tf.nn.relu(layer3) output = tf.add(tf.matmul(layer3, output_layer["weights"]), output_layer["biases"]) prediction = neural_network_model(X_data) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) """ ## ==== decoder ===== ## h_decode, d_state = self.de_RNN(tf.expand_dims(self.input_y,-1)) d_expand = tf.tile(tf.expand_dims(d_state[1],1),[1,self.time_step,1]) dec_lstm = tf.concat([h_decode,d_expand],axis = 2) # b,T,2*d_hidden Ud = tf.get_variable(name = 'Ud', dtype = tf.float32, initializer = tf.truncated_normal(mean = 0.0, stddev = stddev, shape = [self.n_hidden, self.n_hidden])) brcast_UDX = tf.matmul(self.h_t,tf.tile(tf.expand_dims(Ud,0),[self.batch_size,1,1])) # b,T,n_hidden l_list = [] for i in range(self.time_step): feature_i = brcast_UDX[:,i,:] l_i = self.dec_attention(dec_lstm,feature_i) l_list.append(l_i) l_mat = tf.concat(l_list,axis = 2) beta_mat = tf.nn.softmax(l_mat, dim = 1) context_list = [] h_tmp = tf.transpose(self.h_t,[0,2,1]) for t in range(self.time_step): beta_t = tf.reshape(beta_mat[:,t,:],[self.batch_size,1,self.time_step]) self.c_t = tf.reduce_sum(tf.multiply(h_tmp,beta_t),2) # b,T,T -> b,T,1 context_list.append(self.c_t) c_context = tf.stack(context_list,axis = 2) # b,n_hidden,T # b,T,1 b,T,n_hidden -> b,T,n_hidden+1 c_concat = tf.concat([tf.expand_dims(self.input_y,-1),tf.transpose(c_context,[0,2,1])], axis = 2) W_hat = tf.get_variable(name = 'W_hat', dtype = tf.float32, initializer = tf.truncated_normal(mean = 0.0, stddev = stddev,shape = [self.n_hidden+1,1])) y_encode = tf.matmul(c_concat,tf.tile(tf.expand_dims(W_hat,0),[self.batch_size,1,1])) h_out, d_out = self.de_RNN(y_encode) last_concat = tf.expand_dims(tf.concat([h_out[:,-1,:],d_out[-1]],axis = 1),1) Wy = tf.get_variable(name = 'Wy', dtype = tf.float32,initializer = tf.truncated_normal(mean = 0.0, stddev = stddev,shape = [self.n_hidden+self.d_hidden,1])) W_y = tf.tile(tf.expand_dims(Wy,0),[self.batch_size,1,1]) self.y_predict = tf.squeeze(tf.matmul(last_concat,W_y)) #self.loss += tf.reduce_mean(tf.square(self.label - self.y_predict)) # reduce_mean: avg of batch loss self.loss_1 += tf.reduce_mean(tf.square(self.label - self.y_predict)) # reduce_mean: avg of batch loss self.adversarial_gradient = tf.gradients(self.loss_1,self.input_x) self.loss = self.loss_1 + self.original_loss self.params = tf.trainable_variables() #learning rate self.alpha=tf.Variable(5e-4) optimizer = tf.train.AdamOptimizer(self.alpha) #self.train_op = optimizer.minimize(self.loss) grad_var = optimizer.compute_gradients(loss = self.loss, var_list = self.params, aggregation_method = 2) self.train_op = optimizer.apply_gradients(grad_var) def en_RNN(self, input_x, scopes = 'fw_lstm'): ''' input_x: b, T, d_in output: h: seqence of output state b,T,n_hidden state: final state b,n_hidden ''' with tf.variable_scope('fw_lstm' or scopes) as scope: try: h,state = tf.nn.dynamic_rnn( cell = self.encode_cell, inputs = input_x, sequence_length = self.seq_len, dtype = tf.float32, scope = 'fw_lstm') except ValueError: scope.reuse_variables() h,state = tf.nn.dynamic_rnn( cell = self.encode_cell, inputs = input_x, sequence_length = self.seq_len, dtype = tf.float32, scope = scopes) return [h,state] def de_RNN(self,input_y, scopes = 'de_lstm'): with tf.variable_scope('dec_lstm') as scope: try: h,state = tf.nn.dynamic_rnn( cell = self.decode_cell, inputs = input_y, sequence_length = self.seq_len, dtype = tf.float32, scope = 'de_lstm') except ValueError: scope.reuse_variables() h,state = tf.nn.dynamic_rnn( cell = self.decode_cell, inputs = input_y, sequence_length = self.seq_len, dtype = tf.float32, scope = scopes) return [h,state] def en_attention(self,fw_lstm,feature_k): ''' fw_lstm: b,T,2n feature_k: row k from brcast_UX, b,T return: b,T ''' with tf.variable_scope('encoder') as scope: try: mean = 0.0 stddev = 1.0/(self.n_hidden*self.time_step) We = tf.get_variable(name = 'We', dtype=tf.float32,shape = [self.time_step, 2*self.n_hidden], initializer=tf.truncated_normal_initializer(mean,stddev)) Ve = tf.get_variable(name = 'Ve',dtype=tf.float32,shape = [self.time_step,1], initializer=tf.truncated_normal_initializer(mean,stddev)) except ValueError: scope.reuse_variables() We = tf.get_variable('We') Ve = tf.get_variable('Ve') # (b,T,2n) (b,2n,T) W_e = tf.transpose(tf.tile(tf.expand_dims(We,0),[self.batch_size,1,1]),[0,2,1]) # b,2n,T mlp = tf.nn.tanh(tf.matmul(fw_lstm,W_e) + tf.reshape(feature_k,[self.batch_size,1,self.time_step])) #b,T,T + b,1,T = b,T,T V_e = tf.tile(tf.expand_dims(Ve,0),[self.batch_size,1,1]) return tf.matmul(mlp,V_e) def dec_attention(self, dec_lstm, feature_t, scopes = None): ''' dec_lstm: b,T,2*d_hidden feature_k: row k from brcast_UX, b,T return: b,T ''' with tf.variable_scope('decoder' or scopes) as scope: try: mean = 0.0 stddev = 1.0/(self.n_hidden*self.time_step) Wd = tf.get_variable(name = 'Wd', dtype=tf.float32, shape = [self.n_hidden, 2*self.d_hidden], initializer=tf.truncated_normal_initializer(mean,stddev)) Vd = tf.get_variable(name = 'Vd', dtype=tf.float32, shape = [self.n_hidden,1], initializer=tf.truncated_normal_initializer(mean,stddev)) except ValueError: scope.reuse_variables() Wd = tf.get_variable('Wd') Vd = tf.get_variable('Vd') # (b,T,2*d_hidden) (b,2*d_hidden,T) W_d = tf.transpose(tf.tile(tf.expand_dims(Wd,0),[self.batch_size,1,1]),[0,2,1]) # b,2*d_hidden,n_hidden # (b,T,2*d_hidden) * (b,2*d_hidden,n_hidden) -> b,T,n_hidden mlp = tf.nn.tanh(tf.matmul(dec_lstm,W_d) + tf.reshape(feature_t,[self.batch_size,1,self.n_hidden])) #b,T,n_hidden + b,1,n_hidden = b,T,n_hidden V_d = tf.tile(tf.expand_dims(Vd,0),[self.batch_size,1,1]) return tf.matmul(mlp,V_d) #b,T,1 def predict(self,x_test,y_test,sess): train_seq_len = np.ones(self.batch_size) * self.time_step feed = {model.input_x: x_test, model.seq_len: train_seq_len, model.input_y: y_test} y_hat = sess.run(self.y_predict,feed_dict = feed) return y_hat # + w_data=df batch_size = 7 * 24 * 4 * 2 INPUT_DIM = 6 # input feature # time_steps = 7*24 time_steps = 24 n_hidden = 64 # encoder dim d_hidden = 64 # decoder dim total_epoch = 10 train_batch_num = int(len(w_data)*0.8/batch_size) ## 0.8 of traininng data, 20% for testing df_test = w_data[(train_batch_num*batch_size):] df1 = w_data[:(train_batch_num*batch_size)] steps_train = int((len(df1) - time_steps)/batch_size) - 1 steps_test = int((len(df_test) - time_steps)/batch_size) - 1 print ('Training data %i' % len(df1), "testing data %i" % len(df_test)) # + # Check everything before training model print(steps_train) print(steps_test) print(w_data.shape) print(df_test.shape) print(df1.shape) x, y, labels, times = get_batch(df1,batch_size = batch_size, T = time_steps, step = steps_train-1) print(x.shape) print(y.shape) print(labels.shape) print(times.shape) # + current_epoch = 0 tf.reset_default_graph() model = ts_prediction(input_dim=INPUT_DIM, time_step=time_steps, n_hidden=n_hidden, d_hidden=d_hidden, batch_size=batch_size) init = tf.global_variables_initializer() #memory policy config=tf.ConfigProto() config.gpu_options.allow_growth=True sess = tf.Session(config=config) sess.run(init) while current_epoch <= total_epoch: cumulative_loss = 0.0 start_time = time.time() for t in range(steps_train - 1): x, y, labels, _ = get_batch(df1,batch_size=batch_size, T=time_steps, step=t) train_seq_len = np.ones(batch_size) * time_steps feed = {model.input_x: x, model.seq_len: train_seq_len, model.input_y: y, model.label: labels, } loss_1, adversarial_gradients, _ = sess.run([model.loss_1,model.adversarial_gradient, model.alpha],feed_dict = feed) epsilon = np.random.uniform(0,0.5,1) feed = {model.input_x: x + epsilon*adversarial_gradients[0], model.seq_len: train_seq_len, model.input_y: y, model.label: labels, model.original_loss: loss_1, } loss, _, _ = sess.run([model.loss, model.train_op, model.alpha], feed_dict = feed) cumulative_loss += loss end_time = time.time() print("time:%.1f epoch:%i/%i loss:%f" % (end_time - start_time, current_epoch, total_epoch, cumulative_loss)) if cumulative_loss<3: if cumulative_loss>2.5: update=tf.assign(model.alpha,1e-4) #update the learning rate sess.run(update) print('Learning rate is updated to 1e-4') else: update=tf.assign(model.alpha,1e-5) #update the learning rate sess.run(update) print('Learning rate is updated to 1e-5') current_epoch += 1 # + test_loss = 0.0 y_hat_arr = np.empty(shape = [0]) y_labels_arr = np.empty(shape = [0]) time_batch = np.empty(shape = [0], dtype="str") for t in range(steps_test-1): x_test,y_test,labels_test, time_test = get_batch(df_test, batch_size=batch_size, T=time_steps, step=t, train=False) y_hat = model.predict(x_test, y_test, sess) y_hat_arr = np.concatenate([y_hat_arr, np.array(y_hat)]) y_labels_arr = np.concatenate([y_labels_arr, np.array(labels_test)]) time_batch = np.concatenate([time_batch, time_test]) test_loss += np.mean(np.square(y_hat - labels_test)) print ("the mean squared error for test data are %f " % (test_loss * 1.0 / steps_test)) # - y_hat_arr print(y_labels_arr.shape) print(y_hat_arr.shape) print(time_batch.shape) """ # Forecast 24 hours plot from matplotlib import pyplot as plt from matplotlib.pyplot import cm %matplotlib inline _FUTURE_HOURS = y_hat_arr.shape[1] compare_data_index = [0, 100, 200, 300, 400, 500, 600, 700] X_SHOW_FREQUENT = 4 Y_SHOW_FREQUENT = 24 * 7 x_stick = list(range(0, y_labels_arr.shape[1] + 1, X_SHOW_FREQUENT)) y_stick = list(range(0, y_labels_arr.shape[0] + 1, Y_SHOW_FREQUENT)) time_batch[[y_stick_value for y_stick_value in y_stick]] x_stick_content = np.array([time_batch[0],'Pred 1 h', 'Pred 2 h', 'Pred 3 h', 'Pred 4 h', 'Pred 5 h', 'Pred 6 h', 'Pred 7 h', 'Pred 8 h', 'Pred 9 h', 'Pred 10 h', 'Pred 11 h', 'Pred 12 h', 'Pred 13 h', 'Pred 14 h', 'Pred 15 h', 'Pred 16 h', 'Pred 17 h', 'Pred 18 h', 'Pred 19 h', 'Pred 20 h', 'Pred 21 h', 'Pred 22 h', 'Pred 23 h', 'Pred 24 h']) for index in compare_data_index: x_stick_content[0] = time_batch[index] fig, ax = plt.subplots(figsize=(20, 10)) plt.axis([0.0, _FUTURE_HOURS, 62.0, 82.0]) plt.gca().set_autoscale_on(False) plt.plot(list(range(24)), y_hat_arr[index, :], 'blue', label="prediction label") plt.plot(list(range(24)), y_labels_arr[index, :], 'red', label="true label") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.xticks(x_stick, x_stick_content[[x_stick_value for x_stick_value in x_stick]]) plt.show() """ # + from matplotlib import pyplot as plt # %matplotlib inline plot_num = 24 * 7 * 4 * 2 SHOW_FREQUENT = 12 * 4 x_index = range(plot_num) x_stick = list(range(0, plot_num, SHOW_FREQUENT)) for i in range(int(y_labels_arr.shape[0] / plot_num)): #for i in range(3): start_idx = i * plot_num end_idx = (i + 1) * plot_num plt.figure(figsize=(20, 10)) #plt.ylim([72.0, 82.0]) plt.xticks(x_stick, time_batch[[start_idx + x_stick_value for x_stick_value in x_stick]]) plt.xticks(rotation=30) plt.plot(range(plot_num), y_hat_arr[start_idx:end_idx], 'b', label="prediction") plt.plot(range(plot_num), y_labels_arr[start_idx:end_idx], 'r', label="ground true") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.draw() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.10 (''.local_venv'': venv)' # language: python # name: python3 # --- import sys sys.path.append('../') import raytracing import numpy as np import jax.numpy as jnp import cv2 from matplotlib.pyplot import imshow # %matplotlib inline img_width = 256 img_height = 256 img = np.arange(img_height * img_width, dtype=float).reshape((img_height, img_width)).T img = img[..., np.newaxis] img = np.repeat(img, 3, axis=2) img[..., 1] = img[..., 1].T img[..., 0] /= ((img_width - 1.) * 256) img[..., 1] /= ((img_height - 1.) * 256) img[..., 2] = 0.25 img *= 254.999 img = np.flipud(img.astype(int)) imshow(img) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import netCDF4 import numpy as np f = netCDF4.Dataset('orography.nc', 'w') f.createDimension('time', None) f.createDimension('z', 3) f.createDimension('y', 4) f.createDimension('x', 5) lats = f.createVariable('lat', float, ('y',), zlib=True) lons = f.createVariable('lon', float, ('x',), zlib=True) orography = f.createVariable('orog', float, ('y', 'x'), zlib=True, least_significant_digit=1, fill_value=0) lat_out = [60, 65, 70, 75] lon_out = [ 30, 60, 90, 120, 150] # Create field values for orography data_out = np.arange(4*5) # 1d array but with dimension x*y data_out.shape = (4,5) # reshape to 2d array orography[:] = data_out lats[:] = lat_out lons[:] = lon_out f.close() f = netCDF4.Dataset('orography.nc', 'r') lats = f.variables['lat'] lons = f.variables['lon'] orography = f.variables['orog'] print(lats[:]) print(lons[:]) print(orography[:]) f.close() # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.4 # language: julia # name: julia-0.6 # --- using Distributions using ProfileView using StatsFuns using Gadfly using ProgressMeter using DataFrames using Colors using Viridis using DataStructures # + type Individual m::Int64 p::Int64 mu::Float64 mu_sigma::Float64 mu_sigma_b::Float64 e::Array{Float64, 3} y::Array{Float64, 1} b::Array{Float64, 2} z::Array{Float64, 1} Individual(m, p, mu, mu_sigma, mu_sigma_b, e) = new(2*m, p, mu, mu_sigma, mu_sigma_b, e) end type Population n::Int64 base_ind::Individual generation::Int64 fitness::Array{Float64, 1} surface::Distributions.FullNormal pop::Array{Individual, 1} moments::Array{Any, 1} Population(n, base_ind, generation) = new(n, base_ind, generation) end # + function Population(n::Int, base_ind::Individual, pop_array::Array{Individual, 1}) new_pop = Population(n, base_ind, 0) new_pop.generation = 0 new_pop.pop = pop_array new_pop.moments = [] new_pop end function RandomInd!(ind::Individual, base_ind::Individual) ind.y = rand(Normal(0, base_ind.mu_sigma), ind.m) ind.b = rand(Normal(0, base_ind.mu_sigma_b), ind.p, ind.m) for i in 1:ind.m ind.b[:,i] = normalize(ind.b[:,i]) * ind.y[i] end ind.z = sum(ind.b, 2)[:,1] for k in 1:ind.p for i in 1:ind.m for j in 1:(i-1) ind.z[k] = ind.z[k] + ind.e[i, j, k] * ind.b[k, i] * ind.b[k, j] end end end end function RandomInd(base_ind::Individual) new_ind = Individual(base_ind.m/2, base_ind.p, base_ind.mu, base_ind.mu_sigma, base_ind.mu_sigma_b, base_ind.e) RandomInd!(new_ind, base_ind) new_ind end function RandomPop!(pop::Population) pop.pop = Array{Individual}(pop.n) pop.generation = 0 for i = 1:pop.n pop.pop[i] = RandomInd(pop.base_ind) end pop.moments = [] pop.fitness = zeros(Float64, pop.n) end # + import Base.string function string(pop::Population) return "a Population with $(pop.n) individuals, at generation $(pop.generation)" end import Base.print print(io::IO, pop::Population) = print(io, string(pop)) import Base.show show(io::IO, pop::Population) = print(io, "This is ", pop) import Base.getindex function getindex(pop::Population, i::Integer) getindex(pop.pop, i) end function getindex(pop::Population, s::UnitRange) Population(length(s), pop.base_ind, getindex(pop.pop, s)) end function append!(pop::Population, ind::Individual) pop.pop = [pop.pop; ind] pop.n = length(pop.pop) end function append!(pop1::Population, pop2::Population) # COMPARE BASE IND! pop1.pop = [pop1.pop; pop2.pop] pop.n = length(pop1.pop) end function join(pop1::Population, pop2::Population) # TODO: COMPARE BASE IND! new_pop = Population(pop1.n + pop2.n, pop1.base_ind) new_pop.pop = [pop1.pop; pop2.pop] new_pop end function copy!(source::Population, sink::Population) sink.n = source.n sink.base_ind = source.base_ind sink.generation = source.generation sink.fitness = source.fitness sink.surface = source.surface sink.pop = copy(source.pop) end import Base.copy function copy(source::Population) new_pop = Population(source.n, source.base_ind, source.generation) copy!(source, new_pop) new_pop end # + function mutation!(ind::Individual, bin_locus) mutation_locus = rand(bin_locus) if(mutation_locus > 0) d_uni_y = DiscreteUniform(1, ind.m) norm_sigma_y = Normal(0, ind.mu_sigma) norm_sigma_b = Normal(0, ind.mu_sigma_b) for k = range(1, mutation_locus) i = rand(d_uni_y) ind.y[i] = ind.y[i] + rand(norm_sigma_y) ind.b[:,i] = normalize(normalize(ind.b[:,i]) + rand(norm_sigma_b, ind.p)) * ind.y[i] end end end function mutation!(pop::Population) bin_locus = Binomial(pop.base_ind.m, pop.base_ind.mu) for k = 1:pop.n mutation!(pop.pop[k], bin_locus) end end function cross!(ind_1::Individual, ind_2::Individual, new_ind::Individual, d_uni, alele_1::Array{Int64, 1}, alele_2::Array{Int64, 1}) rand!(d_uni, alele_1) rand!(d_uni, alele_2) for locus = range(2, 2, convert(Int64, ind_1.m/2)) new_ind.y[ locus - 1] = ind_1.y[ (locus - 1) + alele_1[convert(Int64, locus/2)]] new_ind.y[ locus] = ind_2.y[ (locus - 1) + alele_2[convert(Int64, locus/2)]] for j in range(1, ind_1.p) new_ind.b[j, locus - 1] = ind_1.b[j, (locus - 1) + alele_1[convert(Int64, locus/2)]] new_ind.b[j, locus] = ind_2.b[j, (locus - 1) + alele_2[convert(Int64, locus/2)]] end end new_ind.z = sum(new_ind.b, 2)[:,1] for k in 1:new_ind.p for i in 1:new_ind.m for j in 1:(i-1) new_ind.z[k] = new_ind.z[k] + new_ind.e[i, j, k] * new_ind.b[k, i] * new_ind.b[k, j] end end end end function choose_mates!(pop::Population, mates::Array{Int64, 1}) matings = rand(Multinomial(pop.n, pop.fitness), 1) l = 1 for k = 1:pop.n if(matings[k] > 0) for i = 1:matings[k] mates[l] = k l = l + 1 end end end round.(Int64, shuffle!(mates)) end function next_generation!(pop::Population, holder_pop::Population, sires, dames, d_uni, alele_1, alele_2; selective = true) holder_pop.surface = pop.surface if (selective) fitness!(pop) else fill!(pop.fitness, 1./pop.n) end holder_pop.fitness = pop.fitness mutation!(pop) choose_mates!(pop, sires) choose_mates!(pop, dames) for i in 1:pop.n cross!(pop[sires[i]], pop[dames[i]], holder_pop.pop[i], d_uni, alele_1, alele_2) end holder_pop.generation = pop.generation + 1 copy!(holder_pop, pop) end function moments!(pop::Population) pop.moments = [pop.moments; moments(pop)] end # + function fitness!(pop::Population) logfit = Float64[logpdf(pop.surface, pop[i].z) for i in 1:pop.n] #logfit = Float64[logpdf(pop.surface, pop[i].z) * (1-0.95*sum(abs(pop[i].b))/(size(pop[i].b)[1] * size(pop[i].b)[2])) for i in 1:pop.n] pop.fitness = exp.(logfit - logsumexp(logfit)) end function changeSurface!(pop::Population, theta::Array{Float64, 1}, omega::Array{Float64, 2}) pop.surface = MvNormal(theta, omega) end function twoModuleMatrix(vars::Array{Float64, 1}, cor1, cor2) n = size(vars)[1] n_1 = Int(floor(n/2)) module_1 = [ones(Float64, n_1); zeros(Float64, n - n_1)] module_2 = [zeros(Float64, n_1); ones(Float64, n - n_1)] omega = (module_1 * module_1') * cor1 + module_2 * module_2' * cor2 [omega[i, i] = 1 for i = 1:n] omega .* (sqrt.(vars) * sqrt.(vars)') end # - function moments(pop::Population) ys = convert(Array{Float64, 2}, reshape(Float64[ind.y[i] for ind in pop.pop, i in 1:pop.base_ind.m], pop.n, pop.base_ind.m)) zs = convert(Array{Float64, 2}, reshape(Float64[ind.z[i] for ind in pop.pop, i in 1:pop.base_ind.p], pop.n, pop.base_ind.p)) mean_b = zeros(Float64, pop.base_ind.p, pop.base_ind.m) for i in 1:pop.n mean_b = mean_b + pop[i].b end mean_y = squeeze(mean(ys, 1), 1) mean_z = squeeze(mean(zs, 1), 1) mean_b = mean_b / pop.n count_b = countBclass(classifyBj(mean_b)) count_b[:gen] = pop.generation G = cov(zs) corrG = cor(zs) Dict([("mean_y", mean_y), ("mean_b", mean_b), ("mean_z", mean_z), ("zs", zs), ("theta", pop.surface.μ), ("gen", pop.generation), ("G", G), ("corrG", corrG), ("count_b", count_b)]) end # + function lowerTri(mat) p = size(mat)[1] lower = zeros(Float64, Int64((p * p - p)/2)) k = 1 for i = 1:p, j = 1:p if(i < j) lower[k]= mat[i, j] k = k + 1 end end lower end function AVGcorr(mat; modules = Array[collect(1:Int(floor(size(mat)[1]/2))), collect(((Int(floor(size(mat)[1]/2)))+1):size(mat)[1])]) p = size(mat)[1] avg_plus = [mean(lowerTri(mat[mod, mod])) for mod in modules] modules_array = Array{Int64}(p, p, length(modules)) for j in 1:length(modules) mod_vec = [any(i .== modules[j]) ? 1 : 0 for i in 1:p] modules_array[:, :, j] = mod_vec * mod_vec' end avg_plus, mean(mat[find(sum(modules_array, 3) .== 0)]) end function plotSurfacePop(pop::Population; gen = length(pop.moments)) zs = pop.moments[gen]["zs"] theta = pop.moments[gen]["theta"] zs_eig = (eig(pop.surface.Σ.mat)[2][:, end-1:end]' * zs')' zs_df = DataFrame(x = zs_eig[:,1], y = zs_eig[:,2], fit = pop.fitness) sort!(zs_df, cols = [:fit]) s_theta = eig(pop.surface.Σ.mat)[2][:, end-1:end]' * theta s_omega = diagm(eig(pop.surface.Σ.mat)[1])[end-1:end,end-1:end] limits_x = (reduce(min, [s_theta[1] - 2*sqrt(s_omega[1, 1]); zs_eig[:,1]]), reduce(max, [s_theta[1] + 2*sqrt(s_omega[1, 1]); zs_eig[:,1]])) limits_y = (reduce(min, [s_theta[2] - 2*sqrt(s_omega[2, 2]); zs_eig[:,2]]), reduce(max, [s_theta[2] + 2*sqrt(s_omega[2, 2]); zs_eig[:,2]])) plot(layer(z = (x,y) -> pdf(MvNormal(s_theta, s_omega), [x; y]), x = linspace(limits_x[1], limits_x[2], 150), y = linspace(limits_y[1], limits_y[2], 150), Geom.contour), layer(zs_df, x = "x", y = "y", color = "fit", Geom.point)) end function plotCorrelations(pop::Population, start = 1) n = length(pop.moments) df_P = DataFrame(W_1 = [AVGcorr(pop.moments[i]["corrG"])[1][1] for i in start:n], W_2 = [AVGcorr(pop.moments[i]["corrG"])[1][2] for i in start:n], B = [AVGcorr(pop.moments[i]["corrG"])[2] for i in start:n], G = collect(start:n) ) plot(df_P, layer(y="W_1", x="G", Geom.line, Theme(default_color=colorant"red")), layer(y="W_2", x="G", Geom.line, Theme(default_color=colorant"green")), layer(y="B", x="G", Geom.line), Guide.manual_color_key("Legend", ["Within 1", "Within 2", "Between"], ["red", "green", "deepskyblue"])) end function plotAVGRatio(pop::Population, start = 1) n = length(pop.moments) df_P = DataFrame(W = [mean(AVGcorr(pop.moments[i]["corrG"])[1]) / AVGcorr(pop.moments[i]["corrG"])[2] for i in start:n], G = collect(start:n) ) plot(df_P, layer(y="W", x="G", Geom.line)) end function plotTraits(pop::Population, start = 1) n = size(pop.moments)[1] p2 = convert(Int64, pop.base_ind.p/2) df_trait = DataFrame(mod1 = [mean(pop.moments[i]["mean_z"][1:p2]) for i in start:n], theta1 = [mean(pop.moments[i]["theta"][1:p2]) for i in start:n], mod2 = [mean(pop.moments[i]["mean_z"][(p2+1):end]) for i in start:n], theta2 = [mean(pop.moments[i]["theta"][(p2+1):end]) for i in start:n], gen = collect(start:n) ) plot(df_trait, layer(y="mod1", x="gen", Geom.line, Theme(default_color=colorant"red")), layer(y="theta1", x="gen", Geom.line, Theme(default_color=colorant"darkred")), layer(y="theta2", x="gen", Geom.line, Theme(default_color=colorant"darkblue")), layer(y="mod2", x="gen", Geom.line), Guide.manual_color_key("Legend", ["Trait Module 1", "Trait Module 2", "Theta Module 1", "Theta module 2"], ["red", "deepskyblue", "darkred", "darkblue"])) end function plotPleitropy(pop::Population, start = 1) df = copy(pop.moments[start]["count_b"]) n = size(pop.moments)[1] for i in (start+1):n df = [df; pop.moments[i]["count_b"]] end plot(df, x = "gen", y = "count", color="class", Geom.line) end # - modular_matrix = Array( [[1 1 0 0] [-1 -1 0 0] [0 0 1 1] [0 0 -1 -1] [1 0.5 0 0] [0.5 1 0 0] [-0.5 -1 0 0] [-1 -0.5 0 0] [0 0 0.5 1] [0 0 1 0.5] [0 0 -0.5 -1] [0 0 -1 -0.5]]) intra_antagonistic_matrix = Array( [[1. 1 1 -1] [1 1 -1 1] [1 0 1 -1] [1 0 -1 1] [1 -1 1 1] [1 -1 1 0] [1 -1 1 -1] [1 -1 0 1] [1 -1 0 0] [1 -1 0 -1] [1 -1 -1 1] [1 -1 -1 0] [1 -1 -1 -1] [0 1 1 -1] [0 1 -1 1] [0 0 1 -1] [0 0 -1 1] [0 -1 -1 1] [-1 1 1 1] [-1 1 1 0] [-1 1 1 -1] [-1 1 0 1] [-1 1 0 0] [-1 1 0 -1] [-1 1 -1 1] [-1 1 -1 0] [-1 1 -1 -1] [-1 0 1 -1] [-1 0 -1 1] [-1 -1 -1 1]]) antagonistic_matrix = Array( [[1. 1 0 -1] [1 1 -1 0] [1 1 -1 -1] [1 0 0 -1] [1 0 -1 0] [1 0 -1 -1] [0 1 0 -1] [0 1 -1 0] [0 1 -1 -1] [0 -1 1 1] [0 -1 1 0] [0 -1 0 1] [-1 0 1 1] [-1 0 1 0] [-1 0 0 1] [-1 -1 1 1] [-1 -1 1 0]]) local_matrix = Array( [[1. 0 0 0] [0 1 0 0] [0 0 1 0] [0 0 0 1] [-1 0 0 0] [0 -1 0 0] [0 0 -1 0] [0 0 0 -1]]) integrated_matrix = Array( [[1. 1 1 1] [1 1 1 0] [1 1 0 1] [1 0 1 1] [-1 -1 -1 -1] [0 1 1 1] [0 -1 -1 -1] [-1 0 -1 -1] [-1 -1 0 -1] [-1 -1 -1 0] [0 1 0 1] [1 0 1 0] [0 1 1 0] [1 0 0 1] [0 -1 0 -1] [-1 0 -1 0] [0 -1 -1 0] [-1 0 0 -1]]) directional_matrices = Dict([("Modular", modular_matrix), ("Antagonistic", antagonistic_matrix), ("Local", local_matrix), ("Integrated", integrated_matrix), ("Intra_antagonistic", intra_antagonistic_matrix)]) # + function vectorCor(x::Array{Float64, 1}, y::Array{Float64, 1}) dot(normalize(x), normalize(y)) end function compareRows(b_j::Array{Float64, 1}, current_matrix::Array{Float64, 2}) max_corr = 0. for i in 1:size(current_matrix, 1) vec = current_matrix[i,:] corr = vectorCor(b_j, vec) max_corr = max_corr > corr ? max_corr : corr end max_corr end function classifyBj(b_j::Array{Float64, 1}) max_corr = 0 key = "Neutral" for i in keys(directional_matrices) corr = compareRows(b_j, directional_matrices[i]) key = max_corr > corr ? key : i max_corr = max_corr > corr ? max_corr : corr end key end function classifyBj(b::Array{Float64, 2}) m = size(b)[2] [classifyBj(b[:,j]) for j in 1:m] end function countBclass(b_class) counter_b = counter(b_class) DataFrame( class = ["Modular", "Antagonistic", "Local", "Integrated", "Intra_antagonistic"], count = [counter_b[i] for i in ["Modular", "Antagonistic", "Local", "Integrated", "Intra_antagonistic"]]) end # + function run_pop(ind::Individual, n_e::Int64, selectionRegimes::Array{String,1}, regimeGenerations::Array{Int64, 1}; theta::Array{Float64, 1} = zeros(Float64, ind.p), delta_theta::Array{Float64, 1} = zeros(Float64, ind.p), omega::Array{Float64, 2} = diagm(ones(Float64, ind.p)), thin = 100) pop = Population(n_e, ind, 0) RandomPop!(pop) changeSurface!(pop, theta, omega) fitness!(pop) run_pop(pop, selectionRegimes, regimeGenerations; theta = theta, delta_theta = delta_theta, omega = omega, thin = thin) end function run_pop(pop::Population, selectionRegimes::Array{String,1}, regimeGenerations::Array{Int64, 1}; theta::Array{Float64, 1} = pop.surface.μ, delta_theta::Array{Float64, 1} = zeros(Float64, ind.p), omega::Array{Float64, 2} = pop.surface.Σ, thin = 100) changeSurface!(pop, theta, omega) fitness!(pop) holder_pop = copy(pop) sires = zeros(Int64, pop.n) dames = zeros(Int64, pop.n) d_uni = DiscreteUniform(0, 1) alele_1 = zeros(Int64, Int64(pop.base_ind.m/2)) alele_2 = zeros(Int64, Int64(pop.base_ind.m/2)) omega_var = omega[1, 1] regimes = [i for i in zip(selectionRegimes, regimeGenerations)] for current_regime in regimes @showprogress 2 current_regime[1] for i = 1:current_regime[2] if(i % thin == 0) moments!(pop) end if(startswith(normalize_string(strip(current_regime[1]), casefold = true), "dire")) theta += delta_theta changeSurface!(pop, theta, omega) end next_generation!(pop, holder_pop, sires, dames, d_uni, alele_1, alele_2, selective = normalize_string(strip(current_regime[1]), casefold = true) != "drift") end end pop end # - function all_plots(pop::Population, file) plot_corr = plotCorrelations(pop) plot_traits = plotTraits(pop) plot_pleio = plotPleitropy(pop) draw(PDF("./data/figures/$file", 6inch, 6inch), vstack(plot_corr, plot_traits, plot_pleio)) end # + n_e = 2000 m = 100 p = 4 mu = 0.001 mu_σ = 0.1 mu_σ_b = 0.1 sigma_e = 1 e = rand(Normal(0, sigma_e), 2*m, 2*m, p) for k in 1:p e[:, :, k]= (e[:, :, k] + transpose(e[:, :, k]))./2 end global random_ind = Individual(m, p, mu, mu_σ, mu_σ_b, e) delta_theta_speed = 40/1000 delta_theta = delta_theta_speed * [ones(Float64, Int64(random_ind.p/2)); -1*ones(Float64, Int64(random_ind.p/2))] delta_theta_1 = delta_theta_speed * [ones(Float64, Int64(random_ind.p/2)); zeros(Float64, Int64(random_ind.p/2))] delta_theta_2 = delta_theta_speed * [zeros(Float64, Int64(random_ind.p/2)); -1 * ones(Float64, Int64(random_ind.p/2))] omega_var = 5 omega = twoModuleMatrix(omega_var*ones(Float64, random_ind.p), 0.8, 0.8) # - corridor_pop = run_pop(random_ind, n_e, ["Drift", "Stab", "Direct"], [5000, 5000, 5000]; theta = 10*ones(Float64, Int64(random_ind.p)), delta_theta = delta_theta_1, omega = omega, thin = 100) #corridor_pop = run_pop(random_ind, n_e, ["Drift", "Stab", "Direct"], [5000, 5000, 5000]; # theta = 10*ones(Float64, Int64(random_ind.p)), delta_theta = delta_theta_1, # omega = omega, thin = 100) #divergent_pop = run_pop(random_ind, n_e, ["Drift", "Stab", "Direct"], [5000, 5000, 5000]; # theta = 10*ones(Float64, Int64(random_ind.p)), delta_theta = delta_theta, # omega = omega, thin = 100) # delta_theta = delta_theta_2, omega = omega, thin = 1) #random_pop = run_pop(random_pop, ["Direct"], [1000]; # delta_theta = delta_theta_1, omega = omega, thin = 1) #random_pop = run_pop(random_pop, ["Direct"], [1000]; # delta_theta = delta_theta_2, omega = omega, thin = 1) all_plots(corridor_pop, "epistatic_pleioVectors_omega-diag_sel-corridor_n-2000_m-100_mu-1e3-sigmas_y0.1-b0.1-e0.1.pdf") #all_plots(divergent_pop, "epistatic_pleioVectors_omega-diag_sel-divergent_n-2000_m-100_mu-1e3-0.1-0.1.pdf") pop1 = corridor_pop pop2 = divergent_pop D = [pop1.moments[end]["count_b"]; pop1.moments[100]["count_b"]; pop2.moments[end]["count_b"]] pa = plot(D, x = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3], y=:count, color=:class, Geom.bar(position=:stack)) divergent_pop.moments[end]["corrG"] random_pop = corridor_pop plotCorrelations(random_pop) plot_traits = plotTraits(random_pop, 500) plotPleitropy(random_pop) draw(PDF("./figures/plot_pleioVectors_divergent.pdf", 6inch, 6inch), vstack(plot_corr, plot_traits, plot_pleio)) # + b = random_pop.moments[end]["mean_b"] clss = classifyBj(convert(Array{Int64, 2}, round(Int64, b))) b_j = b[:,1] # - round(Int64, -0.6) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="k2oteg-rEwLW" outputId="7d8317df-c05c-4de5-d838-97a660bbc9cd" colab={"base_uri": "https://localhost:8080/", "height": 255} import pandas as pd df=pd.read_csv('/content/train.csv') df.head() # + id="cdqJI_nAFbdd" outputId="214969a5-6a65-468c-8d19-51f69299296a" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + id="3v-q_aaOFpJt" outputId="c7f46685-97b3-4c0c-e174-3929c1083cf6" colab={"base_uri": "https://localhost:8080/", "height": 471} # !pip install sweetviz # + id="6mrEi6ynF_AY" import sweetviz as sv # + [markdown] id="hgGEtXGmZAvN" # SWEETVIZ TOOL FOR AUTOMATED EDA # # + id="0fX5lqgXGKTY" outputId="49ef39aa-70f3-4964-825d-a585c2359530" colab={"base_uri": "https://localhost:8080/", "height": 591, "referenced_widgets": ["857df1645ed14c648723742f0729c453", "e45deca21f064d71baf51aed6f5540cb", "f3889985586848b89e351759a5a699c5", "842a0b8a891c470c95a39e5674358c26", "532811cec48e496b9e6cdd1c6221d4dd", "f1e8def4c9c642e0b9ffad0c6c177297", "", "bb3e1b827c9649afa38096ddb6ff53ac", "", "", "", "c9eef1286c4c4df18d5be38f20c81aa3", "09db3109a17946ed836fa12ff2f3c87e", "a2a65045a84c44f9a095bfc39326442e", "24d0e13365c24a6c8ccb3624f8140ad1", "", "", "e699d21d025d48268226c8fe97845384", "84a843e685dd4ea99918099432cf5d5f", "26ca100e25dd423382578ad6ac855dac", "", "", "", "141bee93ee38417f810f9c24484414da", "5e225a633d4848bc8ec8050719e57f78", "", "", "d73e4d31b0e84dbab447116d510faf3a", "b76371ddfbde4dd7befac659739fc7d5", "", "1893aaabedbd4bd588d047ac089c1acc", "cd583bfe5cb64f9d8a6a5946983fdcd5", "e88cbc6af71a4115aa0ada2243fcca47"]} report=sv.analyze(df) # + id="JsMguLpRGNbh" outputId="fcd96233-4e3c-4e24-8540-bbc6991c5c43" colab={"base_uri": "https://localhost:8080/", "height": 54} report.show_html() # + id="S2uABhyuGZXu" outputId="799a46cf-7c7f-4e30-ed43-e651f3ce8e4f" colab={"base_uri": "https://localhost:8080/", "height": 1000} df.info() # + id="qgfqtCcJJYgf" outputId="28314228-3d1b-4859-f9a6-d20cc807dafc" colab={"base_uri": "https://localhost:8080/", "height": 350} df.describe() # + [markdown] id="jjmlUeh3ZMEd" # CORRELATION TABLE # + id="Hu8yVwrhJbQG" outputId="2c4b6c22-3f90-4054-90aa-7c98956a3c5c" colab={"base_uri": "https://localhost:8080/", "height": 444} df.corr() # + id="kMdV9AvLJcdm" x=df.drop(['critical_temp'],axis=1) y=df['critical_temp'] from sklearn.preprocessing import StandardScaler scaler=StandardScaler() from sklearn.model_selection import train_test_split x_train,y_train,x_test,y_test=train_test_split(x,y,test_size=0.2) # + id="xFpy1VOjRNjN" x_train=scaler.fit_transform(x_train) y_train=scaler.fit_transform(y_train) # + id="oDc487e1KMgY" from keras.models import * from keras.layers import * # + id="vEuDV1CeKTG9" outputId="9c86fe05-0d95-4943-d0ba-4d4d21286a99" colab={"base_uri": "https://localhost:8080/", "height": 34} x_train.shape,x_test.shape # + [markdown] id="5kejbu75U9G8" # **NEURAL NETWORK IMPLEMENTATION** # # + id="fOMeaPTIKRDb" outputId="368f3ce2-9750-4e80-88a4-6d1a3ae43a4f" colab={"base_uri": "https://localhost:8080/", "height": 469} import numpy as np import pandas as pd import keras import keras.backend as kb import tensorflow as tf model = keras.Sequential([ keras.layers.Dense(128, activation=tf.nn.relu, input_shape=[81]), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(32, activation=tf.nn.relu), keras.layers.Dense(8, activation=tf.nn.relu), keras.layers.Dense(1,activation='linear') ]) model.summary() # + id="-bG5m6HCLq4g" outputId="cc246f3d-41fc-44db-f439-ee70c2fb3c24" colab={"base_uri": "https://localhost:8080/", "height": 1000} #optimizer = tf.keras.optimizers.RMSprop(0.0099) model.compile(loss='mean_squared_error',optimizer='adam') history=model.fit(x_train,x_test,epochs=400,validation_data=(y_train,y_test)) # + id="i5QpNH4RObSG" outputId="9f24eee3-dae4-43ce-f177-1dca751f2c8c" colab={"base_uri": "https://localhost:8080/", "height": 34} history.history.keys() # + id="xdSQPjPuOTk-" outputId="455b3361-c5c3-4dd9-e0da-c476aee6e9e2" colab={"base_uri": "https://localhost:8080/", "height": 396} import matplotlib.pyplot as plt plt.style.use('ggplot') plt.plot(history.history['loss'],'red') plt.plot(history.history['val_loss'],'blue') plt.xlabel('epochs') plt.ylabel('loss') plt.show() # + id="tOykwUbDdDrL" y_pred=model.predict(y_train) # + id="64JpoGqucx1r" outputId="75656fee-512f-4bbe-e841-a130a3234db6" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn import metrics metrics.mean_absolute_error(y_test,y_pred) # + [markdown] id="zD13XIzWVISF" # **RANDOM FOREST REGRESSOR IMPLEMENTATION** # + id="zsHntgzndHs8" outputId="944ecaa3-8c8f-4e92-be10-bf8e5d1f96ce" colab={"base_uri": "https://localhost:8080/", "height": 139} from sklearn.ensemble import RandomForestRegressor model=RandomForestRegressor(n_estimators=100) model.fit(x_train,x_test) # + id="mvJSMLleTC_w" outputId="afccb372-a2e2-4eac-c71a-6fc60a2b26d3" colab={"base_uri": "https://localhost:8080/", "height": 34} model.score(y_train,y_test) # + id="mjCeRJ4ERyDQ" y_pred=model.predict(y_train) # + id="z8CAXNygSK3s" outputId="97e3921d-75b8-4959-c3e2-02fd47736b03" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn import metrics metrics.mean_squared_error(y_test,y_pred) # + id="PABn0yM_ST5R" outputId="55f4b357-4db0-40e7-89ef-6d338157dabe" colab={"base_uri": "https://localhost:8080/", "height": 515} from yellowbrick.regressor import ResidualsPlot visualizer=ResidualsPlot(model) visualizer.fit(x_train,x_test) visualizer.score(y_train,y_test) visualizer.poof() # + id="HU8O8GswSn-F" outputId="0e107490-3ab8-4874-dec5-59073ca3760b" colab={"base_uri": "https://localhost:8080/", "height": 517} from yellowbrick.regressor import ResidualsPlot,PredictionError visualizer=PredictionError(model) visualizer.fit(x_train,x_test) visualizer.score(y_train,y_test) visualizer.poof() # + id="JezTEULHTMTd" outputId="30e8ebae-255b-4a84-c61a-9ec5f384dcee" colab={"base_uri": "https://localhost:8080/", "height": 492} import matplotlib.pyplot as plt import seaborn as sns w=range(len(y_test)) plt.scatter(w,y_test,label='actual_value') plt.scatter(w,y_pred,label='predicted_value') plt.legend() # + id="rgniJW0eUQzP" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_csv('~/dropbox/sts-ml/results/mgh-other-procedures-breakdown.csv') # Improve column names df = df.rename(columns={ 'Surgery type':'procedure', 'Freq.':'counts', 'Percent':'percent', 'Cum.':'percent_cumulative'}) # Drop last row df = df[:-1] # Drop percent column df.drop(columns=['percent', 'percent_cumulative'], inplace=True) # Append new row at bottom with total total_counts = df.counts.sum() df = df.append(pd.DataFrame([{'procedure': 'Total', 'counts': total_counts}])) df.reset_index(drop=True, inplace=True) # Add new column of fraction of total df['fraction'] = df.counts / total_counts # Add new column of cumulative fraction of total df['cumulative_fraction'] = df[:-1].percent.cumsum() # Round down df = df.round(2) # Save as CSV df.to_csv('~/dropbox/sts-ml/results/mgh-other-procedures-breakdown.csv', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # author: # %pylab inline # #%qtconsole # home = %env HOME # cd $home/QCThesis/EAC # # Generate data & data partitions # %run generatePartitions.py -d synthetic -n 100 -D 2 -C 6 -i 3 -m cuda -s syn_cem -np 20 -mc 4 -Mc 25 -dir test/ # # Get test partitions # ls test # + # files=!ls $home/QCThesis/EAC/test folder= home + "/QCThesis/EAC/test/" for i,f in enumerate(files): files[i] = folder+f partition_files = [f for f in files if "_partition_" in f and "syn_cem" in f] # - partition_files # # Cluster pwd import eac reload(eac) # #%debug reload(eac) estimator=eac.EAC(100) estimator.fit(partition_files,files=True,assoc_mode='full', prot_mode='random', nprot=None) np.where(estimator._coassoc==20)[0].size print estimator._coassoc.shape print estimator._coassoc print "Diagonal" print estimator._coassoc.diagonal() print "Symmetric:\t",(estimator._coassoc.T == estimator._coassoc).all() # # Linkage # + from scipy.cluster.hierarchy import linkage from scipy.spatial.distance import squareform def _apply_linkage(assoc_mat,method='single'): """ SciPy linkage wants a distance array of format pdist. SciPy squareform converts between the two formats. assoc_mat : pair-wise similarity association matrix method : linkage method to use; can be 'single'(default), 'complete', 'average', 'weighted', 'centroid', 'median', 'ward' """ condensed_assoc = squareform(assoc_mat) # convert pair-wise similarity array (assoc_mat->condensed_assoc) to dissimilarity condensed_diss_assoc = condensed_assoc.max() - condensed_assoc Z = linkage(condensed_diss_assoc,method=method) return Z # - assoc=estimator._coassoc coassoc = assoc / 20 coassoc = coassoc.max() - coassoc coassoc Z=_apply_linkage(coassoc) import scipy.cluster.hierarchy as hie # + for l in Z: print l X = [c for c in Z if c[0] >=100 and c[1] >= 100] print "argmax\t",np.argmax(Z[:,2]) print Z[np.argmax(Z[:,2])] # - plt.figure(figsize=(16,12)) r=hie.dendrogram(Z,leaf_rotation=90,show_contracted=True) len(r['color_list']) dif=Z[1:,2]-Z[0:-1,2] #[maximo,indice]=max(dif); print "max lifetime: ", dif.max() print "index: ", dif.argmax() # # K random prototypes # #%debug reload(eac) estimator=eac.EAC(100) estimator.fit(partition_files,files=True,assoc_mode='prot', prot_mode='random', nprot=5) estimator._coassoc.sum() # # K Centroid-based prototypes # #%%debug -b eac.py:190 reload(eac) estimator=eac.EAC(100,data=data) estimator.fit(partition_files,files=True,assoc_mode='prot', prot_mode='other', nprot=5) b=np.zeros(a.shape[0]) for c in xrange(a.shape[0]): dist = data - a[c] dist = dist **2 dist = dist.sum(axis=1) b[c] = np.argmin(dist) b a=array([[-778.1035957 , 728.38305131], [ 474.98214377, 654.43652209], [ -62.22709694, 637.21319263]]) estimator.k_labels estimator._coassoc.sum() # # K-Nearest Neighbours # This is how it is implemented inside the EAC class. # # ## Load data # + datafile=None for f in files: if 'syn_cem' in f and 'data' in f: datafile=f print datafile # - data = np.genfromtxt(datafile,delimiter=',') # ## Selecting the neighbours from sklearn.neighbors import NearestNeighbors K_neigh=6 # Minkowski distance is a generalization of Euclidean distance and is equivelent to it for p=2 neigh = NearestNeighbors(n_neighbors=K_neigh, radius=1.0, algorithm='auto', leaf_size=30, metric='minkowski', p=2) neigh.fit(data) # + data_neighbours=neigh.kneighbors(X=data,return_distance=False) # the closest neighbour is the point itself - no interest in that data_neighbours = data_neighbours[:,1:] # - data_neighbours # ## EAC # #%debug reload(eac) estimator=eac.EAC(100,data=data) estimator.fit(partition_files,files=True,assoc_mode='prot', prot_mode='knn', nprot=5) estimator._coassoc.sum() # # sparse matrix linkage diss_mat = estimator._coassoc diss_labels = estimator.k_neighbours from scipy.sparse import * smat = csr_matrix((100,100)) mat = np.zeros((100,100)) # %timeit smat[0,0]=50 # %timeit mat[0,0]=50 rows=diss_labels[0] cols=diss_labels[1] rows # %timeit mat[rows[:,np.newaxis],cols]=1 # %timeit smat[rows[:,np.newaxis],cols]=1 smat*2 # ## Testing updating sparse coassoc # + def _update_coassoc_knn(assoc_mat,clusters,k_neighbours): """ Updates an NxK co-association matrix. k_neighbours is an NxK array where the k-th element of the i-th row is the index of a data point that corresponds to the k-th nearest neighbour of the i-th data point. That neighbour is the k-th prototype of the i-th data point. """ nclusters = len(clusters) for i in xrange(nclusters): if clusters[i].shape > 1: # all data points in cluster - rows to select n_in_cluster = clusters[i] # update row j of matrix for j in n_in_cluster: # all prototypes in cluster - columns to select k_in_cluster = np.where(np.in1d(k_neighbours[j] ,n_in_cluster)) # this indexing selects the rows and columns specified by n_in_cluster and k_in_cluster assoc_mat[j,k_in_cluster] += 1 # np.newaxis is alias for None pass def _update_coassoc_knn_sparse(assoc_mat,clusters,k_neighbours): """ Updates an NxK co-association matrix. k_neighbours is an NxK array where the k-th element of the i-th row is the index of a data point that corresponds to the k-th nearest neighbour of the i-th data point. That neighbour is the k-th prototype of the i-th data point. """ nclusters = len(clusters) for i in xrange(nclusters): if clusters[i].shape > 1: # all data points in cluster - rows to select n_in_cluster = clusters[i] # update row j of matrix for j in n_in_cluster: # all prototypes in cluster - columns to select # column indices corresponding to the K-prototypes #k_in_cluster = np.where(np.in1d(n_in_cluster,k_neighbours[j]))[0] k_in_cluster = n_in_cluster[np.in1d(n_in_cluster,k_neighbours[j])] # this indexing selects the rows and columns specified by n_in_cluster and k_in_cluster #assoc_mat[j,k_in_cluster] += 1 # np.newaxis is alias for None assoc_mat[j,k_in_cluster] += np.ones_like(k_in_cluster) pass # - def mat_match(sparse,normal,neigh): for row in xrange(sparse.shape[0]): cols_in_sparse = neigh[row][normal[row].astype(bool)] row_in_sparse = sparse[row,cols_in_sparse] row_in_normal = normal[row][normal[row].astype(bool)] if (row_in_sparse != row_in_normal).any(): return False return True def zero_low_tri(sparse): r,c = sparse.shape for i in xrange(r): for j in xrange(c): if j >= c: continue if sparse[i,j] != 0: return False return True def func1(smat): """ check that, where the matrix is not symetric, at least one of the values is 0 """ a=smat.todense() row,col = np.where(a!=a.T) row=np.array(row).flatten() col=np.array(col).flatten() for i in xrange(row.size): if a[row[i],col[i]] != 0 and a[col[i],row[i]] != 0: return False return True def func2(smat): """ true if lower triangle is zero where matrix is not symetric """ a=smat.todense() row,col = np.where(a!=a.T) row=np.array(row).flatten() col=np.array(col).flatten() for i in xrange(row.size): if row[i] > col[i] and a[row[i],col[i]] != 0: return False return True def func3(smat): """ true if upper triangle is zero where matrix is not symetric """ a=smat.todense() row,col = np.where(a!=a.T) row=np.array(row).flatten() col=np.array(col).flatten() for i in xrange(row.size): if row[i] < col[i] and a[row[i],col[i]] != 0: return False return True def func3(smat): """ make dists only on upper triangle """ a=smat.todense() rows,cols = smat.nonzero() for i in xrange(rows.size): if rows[i]>cols[i] and smat[rows[i],cols[i]] != 0: smat[cols[i],rows[i]] = smat[rows[i],cols[i]] smat[rows[i],cols[i]] = 0 pass def func4(smat): """ make dists symmetric """ a=smat.todense() rows,cols = smat.nonzero() for i in xrange(rows.size): if rows[i]condensed_assoc) to dissimilarity condensed_diss_assoc = condensed_assoc.max() - condensed_assoc Z = linkage(condensed_diss_assoc,method=method) return Z # - Y = sparse_coassoc.tocsr() Y[i,j]=Y.max()-Y[i,j] # # Linkage experiment # # I want to evaluate if complete link is equivalent to single link when the dissimilarity measure is inverted. import scipy points=np.array([[5,5],[5,6],[1,1],[1,2],[-2,-2]]) dist=scipy.spatial.distance.pdist(points) invDist=dist.max()-dist Z_d=linkage(dist,method="single") Z_id=linkage(invDist,method="complete") p=scipy.cluster.hierarchy.dendrogram(Z_d) p=scipy.cluster.hierarchy.dendrogram(Z_id) # They're **not equivelent** since the distance between clusters is always **minimized** indepedentely of how the **metric** chosen. a=np.ones(5) a[:,np.newaxis] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyGrid Client Worker API # ### Import Libs # + from syft.grid.client.client import connect from syft.grid.client.grid_connection import GridHTTPConnection from syft.core.node.domain.client import DomainClient import syft as sy import torch as th sy.VERBOSE = False # - # ### Connect performing authentication process domain = connect( url="http://172.16.58.3:5000", # Domain Address credentials={"email":"", "password":""}, conn_type= GridHTTPConnection,) # HTTP Connection Protocol # ### Create a new workers/environments domain.workers.instance_type(pandas=True) domain.workers.create(instance_type="t2.large") # ### Get all Environments domain.workers.all(pandas=True) # ### Get Specific Environment worker = domain.workers[2] print("Worker Provider: ", worker.provider) print("Worker Instance Type: ", worker.instance_type) print("Worker Region:", worker.region) # ## Uploading Tensors # #### Send a tensor to the Domain # + import torch as th x = th.tensor([1,2,3,4,5,6]) domain_tensor_pointer = x.send(domain, pointable=True, tags=["#dataset"]) # - # #### Check Domain Store domain.store.pandas # #### Move a Domain Tensor to your temporary environment domain.load(domain_tensor_pointer, worker.address) # #### Send a private tensor to your temporary environment private_tensor_y = th.tensor([6,5,4,3,2,1]) worker_tensor_pointer = private_tensor_y.send(worker, pointable=True, tags=["#diff-dataset"]) # #### Check Environment Store worker.store.pandas # ### Perform computation using private environment result = worker.store["#dataset"] + worker.store["#diff-dataset"] # ### Save/Send results from env to the Domain worker.save(result) domain.store.pandas # ### Perform Request to get access to datasets domain.store[1].request(reason="I would like to get this tensor") # ### Evaluate data access requests domain.requests.pandas domain.requests[0].accept() # ### Retrieve the real data value domain.store[1].get() # ### Delete Environment del domain.workers[1] domain.workers.all(pandas=True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Dogs vs Cats Redux (Kaggle) # # ---Prepackaging--- from os import getcwd, rename from shutil import copyfile from numpy.random import choice from glob import glob from scipy.misc import imread, imresize, imsave from tqdm import tqdm from vaiutils import path_consts for k,v in path_consts('CatDog-Prepack', 'CatDog'): exec(k+'=v') FRACTION_VALID = 0.2 FRACTION_SAMPLE = 0.01 # + DIR_TRAIN = DIR_DATA + '/train' DIR_TEST = DIR_DATA + '/test' DIR_VALID = DIR_DATA + '/valid' DIR_RESULTS = DIR_DATA + '/results' DIR_SAMPLE_TRAIN = DIR_DATA + '/sample/train' DIR_SAMPLE_TEST = DIR_DATA + '/sample/test' DIR_SAMPLE_VALID = DIR_DATA + '/sample/valid' DIR_SAMPLE_RESULTS = DIR_DATA + '/sample/results' # + def resize_img(path): # %cd $path filenames = glob('*.jpg') for filename in tqdm(filenames): imsave(path + '/' + filename, imresize(imread(path + '/' + filename), (224, 224))) resize_img(DIR_SAMPLE_VALID + '/cats') resize_img(DIR_SAMPLE_VALID + '/dogs') resize_img(DIR_SAMPLE_TRAIN + '/cats') resize_img(DIR_SAMPLE_TRAIN + '/dogs') resize_img(DIR_VALID + '/cats') resize_img(DIR_VALID + '/dogs') resize_img(DIR_TRAIN + '/cats') resize_img(DIR_TRAIN + '/dogs') resize_img(DIR_TEST + '/unknown') resize_img(DIR_SAMPLE_TEST + '/unknown') # - # %cd $DIR_DATA for path in ['test/unknown', 'valid', 'results', 'sample/train', 'sample/test/unknown', 'sample/valid', 'sample/results']: # %mkdir -p $path # + def move_img(from_path, to_path, fraction, copy=False): # %cd $from_path filenames = glob('*.jpg') filenames = choice(filenames, int(fraction*len(filenames)), replace=False) for filename in filenames: if copy: copyfile(from_path + '/' + filename, to_path + '/' + filename) else: rename(from_path + '/' + filename, to_path + '/' + filename) move_img(DIR_TRAIN, DIR_VALID, FRACTION_VALID) move_img(DIR_TRAIN, DIR_SAMPLE_TRAIN, FRACTION_SAMPLE, copy=True) move_img(DIR_VALID, DIR_SAMPLE_VALID, FRACTION_SAMPLE, copy=True) move_img(DIR_TEST, DIR_SAMPLE_TEST + '/unknown', FRACTION_SAMPLE, copy=True) # + def separate_cats_dogs(path): # %cd $path # %mkdir cats # %mkdir dogs # %mv cat.*.jpg cats/ # %mv dog.*.jpg dogs/ separate_cats_dogs(DIR_TRAIN) separate_cats_dogs(DIR_VALID) separate_cats_dogs(DIR_SAMPLE_TRAIN) separate_cats_dogs(DIR_SAMPLE_VALID) # - # %cd $DIR_TEST # %mv *.jpg unknown/ # + def join_cats_dogs(path): # %cd $path/cats # %mv *.jpg ../ # %cd $path/dogs # %mv dog.*.jpg ../ join_cats_dogs(DIR_TRAIN) join_cats_dogs(DIR_VALID) join_cats_dogs(DIR_SAMPLE_TRAIN) join_cats_dogs(DIR_SAMPLE_VALID) # - # %cd $DIR_TEST/unknown # %mv *.jpg ../ # %cd $DIR_SAMPLE_TEST # %mv *.jpg ../ # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Processing # # **Data processing for labeled datasets involves the following operations:** # 1. Read and preprocess train, validation and test CSV files (involves data alterations, e.g., tokenization). # 2. Compute global vocabulary (set of all unique words in all 3 datasets). # 3. Compute word embeddings for all words in vocabulary. # 4. Compute metadata on training set (e.g., word frequencies). # 5. Save all 3 processed datasets to cache along with data processing info. # # **Data processing for unlabeled datasets involves the following operations:** # 1. Read and preprocess unlabeled CSV file (involves data alterations, e.g., tokenization). # 2. Compute vocabulary (set of all unique words in all unlabeled dataset). # 3. Compute word embeddings for words in vocabulary that are not present in labeled datasets. # # In `deepmatcher` we aim to simplify and abstract away the complexity of data processing as much as possible. In some cases however, you may need to customize some aspects of it. # # This tutorial is structured into four sections, each describing one kind of customization: # 1. CSV format # 2. Data alterations # 3. Word embeddings # 4. Caching # # ## 1. CSV format # # As described in the getting started tutorial, each CSV file is assumed to have the following kinds of columns: # # * **"Left" attributes (required):** Remember our goal is to match tuple pairs across two tables (e.g., table A and B). "Left" attributes are columns that correspond to the "left" tuple or the first tuple (in table A) in an tuple pair. These column names are expected to be prefixed with "left_" by default. This can be customized by setting the `left_prefix` parameter (e.g., use "ltable_" as the prefix). # * **"Right" attributes (required):** "Right" attributes are columns that correspond to the "right" tuple or the second tuple (in table B) in an tuple pair. These column names are expected to be prefixed with "right_" by default. This can be customized by setting the `right_prefix` parameter (e.g., use "rtable_" as the prefix). # * **Label column (required for train, validation, test):** Column containing the labels (match or non-match) for each tuple pair. Expected to be named "label" by default. This can be customized by setting the `label_attr` parameter. # * **ID column (required):** Column containing a unique ID for each tuple pair. This is for evaluation convenience. Expected to be named "id" by default. This can be customized by setting the `id_attr` parameter. # # An example of this is shown below: import deepmatcher as dm train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), left_prefix='left_', right_prefix='right_', label_attr='label', id_attr='id') # ## 2. Data alterations # # By default, data processing involves performing the following two modifications to all data: # # **Tokenization:** Tokenization involves dividing text into a sequence of tokens, which roughly correspond to "words". E.g., "This ain't funny. It's actually hillarious." will be converted to the following sequence after tokenization: \['This', 'ain', ''t', 'funny', '.', 'It', ''s', 'actually', 'hillarious', '.'\]. The tokenizer can be set by specifying the `tokenizer` parameter. By default, this is set to `"nltk"`, which will use the **[default nltk tokenizer](https://www.nltk.org/api/nltk.tokenize.html#nltk.tokenize.word_tokenize)**. Alternatively, you may set this to `"spacy"` which will use the tokenizer provided by the `spacy` package. You need to first [install and setup](https://spacy.io/usage/) `spacy` to do this. import deepmatcher as dm train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), tokenize='nltk') # **Lowercasing:** By default all data is lowercased to improve generalization. This can be disabled by setting the `lowercase` parameter to `False`. train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), lowercase=False) # ## 3. Word Embeddings # # **[Word embeddings](http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/#word-embeddings)** are obtained using **[fastText](https://fasttext.cc/docs/en/pretrained-vectors.html)** by default. This can be customized by setting the `embeddings` parameter to any of the pre-trained word embedding models described in the **[API docs](http://pages.cs.wisc.edu/~sidharth/deepmatcher/deepmatcher.html#deepmatcher.process)** under the `embeddings` parameter. We list a few common settings for `embeddings` below: # # * `fasttext.en.bin`: Uncased **character level** `fastText` embeddings trained on English Wikipedia. This is the default. # * `fasttext.crawl.vec`: Uncased word level `fastText` embeddings trained on Common Crawl # * `fasttext.wiki.vec`: Uncased word level `fastText` embeddings trained on English Wikipedia and news data # * `glove.42B.300d`: Uncased glove word embeddings trained on Common Crawl # * `glove.6B.300d`: Uncased glove word embeddings trained on English Wikipedia and news data # # An example follows: train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), embeddings='glove.42B.300d') # In order to avoid redownloading pre-trained embeddings during each run, the downloads are saved to a shared directory which serves as a cache for word embeddings. By default this is set to `~/.vector_cache`, but you may customize this location by setting the `embeddings_cache_path` parameter as follows: # + # First, remove data cache file. Otherwise `embeddings_cache_path` won't be used - cache # already contains embeddings information. # !rm -f sample_data/itunes-amazon/*.pth # Also reset in-memory vector cache. Otherwise in-memory fastText embeddings will be used # instead of loading them from disk. dm.data.reset_vector_cache() # Then, re-process. train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), embeddings_cache_path='~/custom_embeddings_cache_dir') # - # ## 4. Caching # Processing data is time consuming. In order to reduce the time spent in processing, `deepmatcher` automatically caches the result of the data processing step for all labeled datasets. It is designed such that users do not need to be aware of its existence at all - you only need to call `deepmatcher.data.process` as you would normally. In the first such call, `deepmatcher` would do the processing and cache the result. In subsequent calls, unless there are any changes to this call that would necessitate re-processing data, e.g., a modification in any of the data files or a change in the tokenizer used, `deepmatcher` will re-use the cached results. If there changes that makes the cache invalid, it will automatically rebuild the cache. The caching behavior can be customized by setting these parameters in the `deepmatcher.data.process` call: # # * **cache:** The filename of the cache file which will be stored in the same `path` as the data sets. This file will store the processed train, validation and test data, along with all relevant information about data processing (e.g. columns to ignore, tokenizer, lowercasing etc.). This defaults to `cacheddata.pth`. # * **check_cached_data:** Whether to check the contents of the cache file to ensure its compatibility with the specified data processing arguments. This defaults to `True`, and we strongly recommend against disabling the check. # * **auto_rebuild_cache:** Whether to automatically rebuild the cache file if the cache is stale, i.e., if the data processing arguments have changed in a way that makes the previously processed data invalid. If this is `False` and `check_cached_data` is enabled, then `deepmatcher` will throw an exception if the cache is stale. This defaults to `True`. # # An example of using these parameters is shown below: train, validation, test = dm.data.process( path='sample_data/itunes-amazon', train='train.csv', validation='validation.csv', test='test.csv', ignore_columns=('left_id', 'right_id'), cache='my_itunes_cache.pth', check_cached_data=True, auto_rebuild_cache=False) # Now when you change a processing parameter with `auto_rebuild_cache` set to False, you will get an error. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- var1 = 5 print(var1) var2 = "string" print(var2) a = 2 b = 5 print(a) print(a+b) c=a+b print(c) print(a*b) print(2**5) print(a**b) pow(5, 2) print(b%a) print(5//2) a, b = 1, 2 print(a, b) var3 = str(var1) print(type(var3)) print(var3) a ==b a > b a < b help(pow) s = "sting" s *7 v = "macroeconomics" len(v) v[1 : 3], v [0 : 8] v.upper() v.lower() help(round) m = 12.576 round(m, 2) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.4 64-bit # language: python # name: python3 # --- # + ## load modules import sys sys.path.append("..") import pandas as pd import numpy as np import ast import matplotlib.pyplot as plt import seaborn as sns from copy import deepcopy from modeling.functions import get_features, modelling_fc from sklearn.model_selection import cross_val_predict from sklearn.svm import SVR from lightgbm import LGBMRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import MinMaxScaler # + ## read data data = pd.read_csv('../data/GEFCom2014Data/Wind/raw_data_incl_features.csv', \ parse_dates= ['TIMESTAMP'], index_col= 'TIMESTAMP' ) data.interpolate(method = 'linear', inplace= True) data = pd.get_dummies(data, columns = ['WD100CARD','WD10CARD'], drop_first=True) # - # train-test-split and get features data_train = data[:'2013-07-01 00:00:00'] data_test = data['2013-07-01 01:00:00':] feature_dict = get_features(data) # + result_df = pd.DataFrame() result_df = pd.read_csv(f'../results/RandomForestRegressor.csv', index_col='ZONE') result_df['ZONEID'] = range(1,11) result_df # - data_test[data_test.ZONEID == 1].index # + model = RandomForestRegressor() df_pred = pd.DataFrame(index=data_test[data_test.ZONEID == 1].index) for zone in result_df.ZONEID: fc = result_df[result_df.ZONEID == zone]['FC'].values[0] data_train_zone = data_train[data_train.ZONEID == zone] data_test_zone = data_test[data_test.ZONEID == zone] scaler = MinMaxScaler() X_train = data_train_zone[feature_dict[fc]] X_train = scaler.fit_transform(X_train) X_test = data_test_zone[feature_dict[fc]] X_test = scaler.transform(X_test) y_train = data_train_zone.TARGETVAR y_test = data_test_zone.TARGETVAR best_params = result_df[result_df.ZONEID == zone]['BEST_PARAMS'].values[0] model = model.set_params(**ast.literal_eval(best_params)) model.fit(X_train, y_train) y_pred = model.predict(X_test) y_pred = np.array([1 if value >= 1 else 0 if value <= 0 else value for value in y_pred]) df_pred[f'Zone {zone}'] = y_pred # - df_pred.to_csv('../results/RandomForest_Predictions.csv') df = pd.read_csv('../results/RandomForest_Predictions.csv', index_col='TIMESTAMP', parse_dates= ['TIMESTAMP']) df #.info() # + models_dict = {} prediction_dict = {} y_test_dict = {} X_test_dict = {} df_results = pd.DataFrame() for model_obj in models_obj: model_name = model_obj.__class__.__name__ print(model_name) df_model = result_dict[model_name] models_dict[model_name] = {} prediction_dict[model_name] = {} X_test_dict[model_name] = {} y_test_dict[model_name] = {} for zone in df_model.ZONEID.unique(): print(zone) fc = df_model[df_model.ZONEID == zone]['FC'].values[0] data_train_zone = data_train[data_train.ZONEID == zone] X_train = data_train_zone[feature_dict[fc]] y_train = data_train_zone.TARGETVAR best_params = df_model[df_model.ZONEID == zone]['BEST_PARAMS'].values[0] model = model_obj.set_params(**ast.literal_eval(best_params)) y_pred = cross_val_predict(model, X_train, y_train, cv=5) y_pred = np.array([1 if value >= 1 else 0 if value <= 0 else value for value in y_pred]) tmp_df = deepcopy(data_train_zone) tmp_df['MODEL'] = model_name tmp_df['PRED'] = y_pred tmp_df['RESIDUAL'] = tmp_df.PRED - tmp_df.TARGETVAR df_results = df_results.append(tmp_df) prediction_dict[model_name][zone] = y_pred models_dict[model_name][zone] = deepcopy(model) df_results # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Concepts # # ## DataArray and Dataset meta data handling # # This section describes details about how coords (and masks) of datasets and data arrays behave when slicing, combining, or inserting. # + import numpy as np import scipp as sc x = sc.Variable(dims=['x'], values=[1,2,3,4]) da = sc.DataArray(data=x, coords={'x':x}, masks={'x':sc.less(x, 2 * sc.units.one)}) ds = sc.Dataset({'a':da}) # - # Consider a data array `da` and a dataset `ds` with an aligned coord and an aligned mask. # The following conditions must hold: assert 'x' in da['x', 0:1].coords # range slice preserves coord assert 'x' in da['x', 0:1].masks # range slice preserves coord assert 'x' in da['x', 0].attrs # point slice converts coord to attr assert 'x' not in da['x', 0].coords assert 'x' in da['x', 0].attrs assert 'x' in da['x', 0].masks # point slice preserves masks as aligned assert sc.identical(ds['a']['x', 0:1], ds['x', 0:1]['a']) assert sc.identical(ds['a']['x', 0], ds['x', 0]['a']) # + assert 'x' in ds['a'].coords assert 'x' in ds['x', 0:1].coords assert 'x' not in ds['x', 0].coords # cannot have attr (unaligned coord) in dataset assert 'x' in ds['x', 0:1]['a'].coords assert 'x' in ds['a']['x', 0].attrs assert 'x' in ds['x', 0]['a'].attrs assert 'x' in ds['a'].masks assert 'x' in ds['x', 0:1]['a'].masks assert 'x' in ds['a']['x', 0].masks assert 'x' in ds['x', 0]['a'].masks # - # In operations, coords are compared: try: ok = da['x', 0:1] + da['x', 1:2] except: ok = False assert not ok # Mismatching attrs ("unaligned coords") are dropped: assert sc.identical(da + da['x', 1], da + da['x', 1].data) # Masks are ORed, there is no concept of "unaligned masks": assert not sc.identical(da + da['x', 0], da + da['x', 0].data) # A missing attr is interpreted as mismatch to ensure that: a = da['x', 0] b = da['x', 1] c = da['x', 2] assert sc.identical(a + (b + c), (a + b) + c) # Insertion order does not matter for attrs: a = da.copy() a.attrs['attr'] = 1.0 * sc.units.m b = da.copy() b.attrs['attr'] = 2.0 * sc.units.m ds1 = sc.Dataset() ds2 = sc.Dataset() ds1['a'] = a ds1['b'] = b ds2['b'] = b ds2['a'] = a assert sc.identical(ds1, ds2) # Insert into dataset with mismatching attrs drops attr: ds = sc.Dataset() ds.coords['x'] = x['x', 0] ds['a'] = da['x', 1] # Drops 'x' from 'a' assert sc.identical(ds.coords['x'], ds['a'].coords['x']) # shadowing is NOT supported # Masks of dataset items are independent: ds = sc.Dataset() masked1 = da.copy() masked1.masks['x'] = sc.less(x, 1 * sc.units.one) masked2 = da.copy() masked2.masks['x'] = sc.less(x, 2 * sc.units.one) assert not sc.identical(masked1, masked2) ds['a'] = masked1 ds['b'] = masked2 assert not sc.identical(ds['a'].masks['x'], ds['b'].masks['x']) # If there is no coord it is preserved for all items. # Adding a coord later makes the `meta` property invalid because of ambiguous name shadowing: ds = sc.Dataset() ds['a'] = da['x', 0] ds['b'] = da['x', 1] assert 'x' not in ds.coords assert 'x' in ds['a'].attrs assert 'x' in ds['b'].attrs ds.coords['x'] = x['x', 0] # introduce shadowing try: ds['a'].meta # raises because of shadowing except: ok = True else: ok = False assert ok del ds.coords['x'] edges = sc.Variable(dims=['x'], values=[1,2,3,4,5]) da.coords['x'] = edges assert sc.identical(sc.concatenate(da['x', :2], da['x', 2:], 'x'), da) assert sc.identical(sc.concatenate(da['x', 0], da['x', 1], 'x'), da['x', 0:2]) assert sc.identical(sc.concatenate(da['x', :-1], da['x', -1], 'x'), da) da_yx = sc.concatenate(da['x', :2], da['x', 2:], 'y') # create 2-D coord assert sc.identical(da_yx.coords['x'], sc.concatenate(da.coords['x']['x', :3], da.coords['x']['x', 2:], 'y')) # 2-D coords for a dimension prevent operations between slices that are not along that dimension: # + da_2d = sc.DataArray( data=sc.Variable(['y', 'x'], shape=[2, 2]), coords={ 'x':sc.Variable(['y', 'x'], values=np.array([[1, 2], [3, 4]])), 'y':sc.Variable(['y'], values=[3, 4])}) da_2d['x', 0] + da_2d['x', 1] # Same as with 1-D coord: x-coord differs but not aligned due to slice. try: # 'y' sliced, so 'x' coord is aligned and yields different values from slices of 2-D coord. da_2d['y', 0] + da_2d['y', 1] except RuntimeError: ok = False else: ok = True assert not ok # - # `coords` always refers to (aligned) coords in dataset, cannot add or erase via item since a new coord dict is created when getting a dataset item: try: ds['a'].coords['fail'] = 1.0 * sc.units.m except sc.DataArrayError: ok = False else: ok = True assert not ok assert 'fail' not in ds.coords ds.coords['xx'] = 1.0 * sc.units.m assert 'xx' in ds['a'].coords try: del ds['a'].coords['xx'] except sc.DataArrayError: ok = False else: ok = True assert not ok assert 'xx' in ds.coords # The same mechanism applies for coords, masks, and attrs of slices: try: da['x', 0].coords['fail'] = 1.0 * sc.units.m except sc.DataArrayError: ok = False else: ok = True assert not ok assert 'fail' not in da.coords # `meta` contains dataset coordinates as well as item attributes, cannot add or erase, since ambiguous: ds['a'].meta['fail'] = 1.0 * sc.units.m assert 'fail' not in ds['a'].meta ds['a'].attrs['attr'] = 1.0 * sc.units.m assert 'attr' in ds['a'].meta del ds['a'].meta['attr'] assert 'attr' in ds['a'].meta # Attributes are independent for each item, and show up in `meta` of the items: ds['a'].attrs['attr'] = 1.0 * sc.units.m ds['b'].attrs['attr'] = 2.0 * sc.units.m assert 'attr' in ds['a'].meta assert 'attr' in ds['b'].meta assert 'attr' not in ds.meta assert not sc.identical(ds['a'].attrs['attr'], ds['b'].attrs['attr']) del ds['a'].attrs['attr'] del ds['b'].attrs['attr'] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''base'': conda)' # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # ## Simply linear regresion # #ref: Numerical Recipes # # Problem: fit a straight-line model to Stokes I vs StokesQUV # # $S_j(x) = a + bS_I(x)$ # # where $S_I$ is Stokes I, $S_j(x)$ represents Stokes Q,U or V and $x$ represents valid pixels (npx) in the image. # # This linear regression problem can be solved analyticaly. # # This being said, the solution for a and b is: # # $$ # a = \frac{S_{xx}S_y - S_xS_{xy}}{\Delta} \,\,\,\,\,\, b = \frac{SS_{xy}-S_xS_y}{\Delta} \,\,\,\,\,\, \Delta = SS_{xx} - (S_x^2) # $$ # with # $$ # S_x = \sum_{i=1}^{npx}S_I(i) \,\,\,\,\,\, S_y = \sum_{i=1}^{npx}S_j(i) \,\,\,\,\,\, S_{xx} = \sum_{i=1}^{npx}S_I^2(i) \,\,\,\,\,\, S_{xy} = \sum_{i=1}^{npx}S_I(i) S_j(i) # $$ # The value of $S$ is determined from the noise in the data but it can be safelly set to 1. If no information about noise, these equations are modified sligtly to avoid round-off errors. def coeficients(x, y): #assumes input is 1D vector (flatten() or whatever) n_points = len(x) #Calculate S_x, S_y, S_xx, S_xy S_x = sum(x) S_y = sum(y) S_xx = sum(x*x) - S_x*S_x / n_points S_xy = sum(y*x) - S_x*S_y / n_points #Calculate off-ser and slope b = S_xy / S_xx a = (S_y - b*S_x)/ n_points return (a,b) # + x = np.arange(0,100) y = 20.3 + x * 4.3 + 20*np.random.rand(len(x)) (off_set, slope) = coeficients(x, y) print("Coefficients:\noff_set = {} \nslope = {}".format(off_set, slope)) plt.scatter(x, y,marker = "o", s = 10) plt.plot(x, off_set + slope*x, color = "red") plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx import numpy as np import pandas as pd import seaborn as sns;sns.set() from scipy import sparse import glob, os import matplotlib.pyplot as plt pattern="57" # + col=7 #area os.chdir("/home/garner1/dataset/dapi") fig = plt.figure(figsize=(10,8)) for file in glob.glob(pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/he") for file in glob.glob("MN"+pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/") plt.legend(loc='upper right') plt.savefig(pattern+".area.png") # + col=8 #perimeter os.chdir("/home/garner1/dataset/dapi") fig = plt.figure(figsize=(10,8)) for file in glob.glob(pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/he") for file in glob.glob("MN"+pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/") plt.legend(loc='upper right') plt.savefig(pattern+".perimeter.png") # + col=9 #eccentricity os.chdir("/home/garner1/dataset/dapi") fig = plt.figure(figsize=(10,8)) for file in glob.glob(pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/he") for file in glob.glob("MN"+pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/") plt.legend(loc='upper right') plt.savefig(pattern+".eccentricity.png") # + col=12 #circularity os.chdir("/home/garner1/dataset/dapi") fig = plt.figure(figsize=(10,8)) for file in glob.glob(pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/he") for file in glob.glob("MN"+pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/") plt.legend(loc='upper right') plt.savefig(pattern+".circularity.png") # + col=13 #mean intensity os.chdir("/home/garner1/dataset/dapi") fig = plt.figure(figsize=(10,8)) for file in glob.glob(pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/he") for file in glob.glob("MN"+pattern+"__*.txt"): print(file) data = np.loadtxt(file,delimiter='\t',skiprows=1,usecols=col) data = data/np.mean(data) sns.distplot(data,hist=False,label=file) os.chdir("/home/garner1/dataset/") plt.legend(loc='upper right') plt.savefig(pattern+".meanIntensity.png") # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exceptional Preference Mining (EPM) using Preference Matrix # # Author : # # February 2018 # # Context : I wanted to discover this interesting aspect of Data Science that is EPM. # # Note : This notebook is structured as a draft. It shows, step by step, how I implemented te beam search algorithm using toy data and toy score function, then implemented an EP score function, and finally applied it to the sushi dataset. # # **CONTENT** : # - **Subgroup Discovery** using a **beam search** (see more [here](http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume17/gamberger02a-html/node4.html)) # # - **Subgroup selection** using a score of "Exceptional Preference", based on a **Preference Matrix** (PM) obtained from the ranking of some items. This idea is proposed by *et al* in their article [Exceptional Preference Mining](https://biblio.ugent.be/publication/8519644/file/8519856.pdf) # # - The whole method is applied to the [**Sushi preference dataset**](http://www.kamishima.net/sushi/), set A, in which 10 sushis were ranked by 5000 subjects, about whom we possess some information. # imports import pandas as pd import numpy as np np.random.seed(0) import matplotlib.pyplot as plt from copy import deepcopy import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.ticker as ticker import matplotlib.colors as mc # Playing with toy data, we first developp the beam search algorithm with a naive score for a subgroup : the mean sd of each object's rank, which is the sd of the matrix of ranks. # We will change this to a more meaningful score (see the end of the notebook). Usource = "toydata/Udata.tsv" Uranks = "toydata/Uranks3.tsv" Udata = pd.read_csv(Usource, sep = "\t", dtype=str) Udata.head(5) Rdata = pd.read_csv(Uranks, sep = "\t", header=None, names = ["obj"+str(i) for i in [1,2,3]]) #col 1 == obj 1 !!! Rdata.head(5) # **Important** : # # Data from this point on are considered clean in order to be fed to the Beam Search algorithm : # - no missing value in a row ; # - a column j in Rdata corresponds to the jth object of the set considered ; # - the ith value of the column is the rank of this object attributed by the ith subject of the Udata. # - subjects are described by attributes, which were binned into a finite set of values (2 in our toy data). # Beam Search ALGORITHIM # + # a condition is defined by an attribute which may take a value. ex : sexe = 1 class condition: def __init__(self, first_attr, value_taken, relation): self.attr_name = first_attr self.takes_value = value_taken self.relation = relation def __str__(self): return("[" + self.attr_name + " " + self.relation + " " + self.takes_value+"]") # a rule is a set of conditions class rule: def __init__(self, first_attr= None, first_value = None, first_relation = None, start_condition = None, start_rule = None): self.score = 0.0 self.conds = [] if start_condition != None: self.add(start_condition) elif first_attr != None and first_value!=None and first_relation!=None : self.add(condition(first_attr, first_value, first_relation)) elif start_rule != None : self.score = start_rule.score self.conds = deepcopy(start_rule.conds) # add a condition to the rule, ony if attr was not yet present def add(self, new_cond): if new_cond.attr_name in [c.attr_name for c in self.conds]: return(0) self.conds.append(new_cond) return(1) # generate the subset of ranking data from the two dataframes def get_subset(self, Udata, Rdata): attr_names = Rdata.columns all_data = pd.concat([Udata,Rdata],axis=1) for cond in self.conds: if cond.relation == "=": all_data = all_data[all_data[cond.attr_name] == cond.takes_value] elif cond.relation == "<": all_data = all_data[all_data[cond.attr_name] < cond.takes_value] elif cond.relation == ">": all_data = all_data[all_data[cond.attr_name] > cond.takes_value] return(all_data[attr_names].values) # for equal string representation. def reorder_conds(self): #TODO pass def __str__(self): out = [cond.__str__() for cond in self.conds] return(" & ".join(out)) # set in ascending order a list of rules based on their score def reorder(rules): return(sorted(rules, key = lambda x : x.score)) # a naive default score function for the subset discovery, for dev purpose def naive_score_function(ranks): return(np.std(ranks[:,0])) # TESTS # cond1 = condition("attr1", "2", "<") # cond2 = condition("attr2", "1",">") # rule1 = rule(start_condition = cond1) # rule1.add(cond2) # print(rule1) # - def beam_search(Udata, Rdata, beam_width, max_iter = 50,verbose = False, score_function = naive_score_function): # initialisation of the different conditions to combine L = [] for attr in Udata.columns : uniques = np.unique(Udata[attr].values) for index,val in enumerate(uniques) : L.append(condition(attr, val, "=")) # inf and sup cond without redundancy with equality if index >1 and index<(len(uniques)-2): L.append(condition(attr, val, "<")) L.append(condition(attr, val, ">")) # Initialization of the beam lists beam = [] new_beam = [] for c in range(beam_width): beam.append(rule()) new_beam.append(rule()) i = 0 while i < max_iter: for r in beam : # shuffling the rules for better generation. # Note : this is an inplace operation. np.random.shuffle(L) for l in L: new_r = rule(start_rule = r) # exit if condition was already there is_actually_new = new_r.add(l) if not is_actually_new: break # exit if rule was already found is_not_yet_in_new_beam = (new_r.__str__() not in [b.__str__() for b in new_beam]) if not is_not_yet_in_new_beam: break # generate subset and compute score Rsubset = new_r.get_subset(Udata, Rdata) new_score = score_function(Rsubset) if verbose: print(new_r.__str__(), "->",round(new_score,2)) # update new beam if new score is better than the worst of new beam if new_score > new_beam[0].score: new_r.score = new_score new_beam[0] = new_r new_beam = reorder(new_beam) if verbose: print("new beam =", [b.__str__() for b in new_beam]) # stop the search if no difference was made in this iteration if [b.__str__() for b in new_beam] == [b.__str__() for b in beam]: print("["+str(i)+"] BEAM =", [b.__str__() for b in beam]) break # update the beam beam = deepcopy(new_beam) if verbose: print("["+str(i)+"] BEAM =", [b.__str__() for b in beam]) print("-----------------------------------------") i+=1 if not verbose : print("["+str(i)+"] BEAM =", [b.__str__() for b in beam]) print("-----------------------------------------") return(beam, [b.__str__() for b in beam]) # TEST # b, b_str = beam_search(Udata, Rdata, 2, verbose= False) # print(b[-1].get_subset(Udata,Rdata)) # Now let us use our sushi data set, and find exceptionnal groups based on certain simple attribute i.e. that can be transformed in a meaningful manner in a categorical attribute (or already are categorical) # + Usource = "sushi3-2016/sushi3.udata" Uranks = "sushi3-2016/sushi3a.5000.10.order" # SUSHI : USER DATA Udata = pd.read_csv(Usource, sep = "\t", header=None, names = ["ID", "gender", "age", "time", "prefID", "regID", "EWID", "curprefID", "curregID", "curEWID", "eqprefs"]) # speed to fill the form 0 : slow, 1 : medium ,2 : fast Udata["velocity"], bins = pd.qcut(Udata["time"].values,3, labels = False, retbins=True) # print("Speed limits for the three levels : "+ str(bins) + ' (secs)') # dropping some data for computational efficiency of the Beam Search algo Udata = Udata.drop(["ID","prefID","regID","curprefID","curregID","curEWID","time"],axis=1) Udata = pd.DataFrame(Udata,dtype=str) # because this is an assumption of the Beam Search algorithm above ! Udata.head(5) # - #SUSHI : RANKING DATA sushis = ["ebi","anago","maguro","ika","uni","ikura","tamago","toro","tekka_maki","kappa_maki"] Rdata = pd.read_csv(Uranks, sep = " ", header=None,skiprows=1, names = ["X0","X10"]+sushis) Rdata = Rdata.drop(["X0", "X10"],axis=1) Rdata.head() # For a use in the notebook "Example worflow", we save those preprocessed data. Udata.to_csv(path_or_buf = "sushi3-2016/forEPM.sushi3.udata", sep= " ", index = False) Rdata.to_csv(path_or_buf = "sushi3-2016/forEPM.sushi3a.5000.10.order", sep= " ", index = False) # Here in the first colums is the id of the prefered sushi, and in the last is the idea of the less appreciated sushi... # # We want to change this so that the first columns is the rank given to the sushi of id 1, etc... # + # our ranks go from 1 to 10 for a normal human interpretation def transform_ranks(x): y = np.zeros_like(x) for rank, ID in enumerate(x): y[ID] = rank+1 return(y) # TEST : # transform_ranks(Rdata.iloc[0].values) Rdata = Rdata.apply(transform_ranks,axis=1) # - # Now let us define our Exceptional Preference score. It is based on the article [*Exceptional Preference Mining*](https://biblio.ugent.be/publication/8519644/file/8519856.pdf) by *et al* # + # we compute a preference matrix (PM) that gives the relative pairwise preferences for an item/sushi """ ranks : a numpy array, """ def fill_PM_with_row(PM, row, l, w): """ Fill a preference matrix (PM) Input : - PM : is a (w,w) (w = nb of items ranked) numpy array filled of zeros - row : the rankings of a subject to take into account - l, w : length and w of the dataset at end. l is useless here anc could be removed. """ for i in np.arange(w): for j in np.arange(w): # print(i,j, row, PM.shape) PM[i,j]+=np.sign(row[i]-row[j]) return(PM) def compute_PM(ranks): l,w = ranks.shape # print(l,w) if l==0: return(None) PM = np.zeros((w,w)) np.apply_along_axis(lambda row : fill_PM_with_row(PM, row, l, w),1, ranks) PM/= float(l) return(PM) # N : size of the origina dataset def compute_PM_score(ranks,globalPM, N, method = "Norm"): PM = compute_PM(ranks) #if subgroup is empty, return score=0 if PM is None : return(0.0) if method == "Norm": # Frobenius norm of the difference criteria = np.linalg.norm(PM-globalPM,ord='fro') else: pass # weighted by sqrt(size of the subgroup) return(criteria*np.sqrt(ranks.shape[0]/float(N))) # To visualise the preference matrix in a nice way def display_PM(PM, items_names): fig = plt.figure() # PREFERENCE MATRIX ax = fig.add_subplot(1,1,1) cax = ax.matshow(PM, cmap='seismic') # fig.colorbar(cax) # Axes ax.set_xticklabels([''] + items_names, rotation=90) ax.set_yticklabels([''] + items_names) # .title() # Label pour chaque index ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() # BARPLOT OF PREFERENCES fig = plt.figure() ax = fig.add_subplot(1,1,1) w = PM.sum(axis=1) norm01 = mc.Normalize(vmin=min(w),vmax=max(w)) plt.barh(list(range(10)),w, tick_label = items_names,color = cm.seismic(norm01(w))) plt.title("Barplot of preferences") plt.show() def display_best_PM(beam, Udata, Rdata, items_names, n=1, relative = False): assert n<=len(beam) for i in np.arange(n): print("PM #"+str(i)+" is obtained with rule : ") rule = beam[-(i+1)] print(rule.__str__() + " and score :" + str(rule.score)) ranks = rule.get_subset(Udata,Rdata) PM = compute_PM(ranks) if relative: print("PM differences are as follow :") final = (1.0/2)*(compute_PM(Rdata.values)-PM) display_PM(final,items_names) #(division by 2 limits the distance to the interval [1; 1] else: print("PM is :") final = PM display_PM(final,items_names) # TEST # compute_PM(np.random.randint(0,high=10,size=(10,4))) # - # First we compute the PM on the whole dataset # %time #really fast ! PM_5000 = compute_PM(Rdata.values) display_PM(PM_5000, Rdata.columns.tolist()) # Interpretation is easy. For instance, toro is globaly less appreciated than the other sushis, maguro as well to a certain extend. And tamago and kappa makis are loved by everyone... with this relationship : kappa_maki > tamago. # # This is clearly seen when we compute the sum over a row :) # The PM of the subgroup is compared to the PM of the whole dataset. This gives an **exceptionality score**. # # Here, we build a particular function to avoid computing the PM for the whole dataset at each step. get_PM_score = lambda ranks : compute_PM_score(ranks, PM_5000, Rdata.shape[1],method = "Norm") # Let us now apply our previous Beam Search algorithm b, b_str = beam_search(Udata, Rdata, 5, max_iter = 50,verbose = True, score_function = get_PM_score) # Here are the most important rules found, as well as the PM associated. display_best_PM(b, Udata, Rdata, sushis, n=4, relative = True) # So we can immediatly see some interestings results here : # # - the youngest (age=0 : below 19 years old) are globally less attracted to uni and anago sushis compared to their elders; # - people below 29 years old (age < 2) follow the same trend but with less intensity ; the bigger size of the subgroup explains why it was nevertheless kept in the results; # - being old is associated with a net disdain for ikura and tamaro compared to the rest of the population, as well as for ebi sushis ; # - compared to the rest of the population (so to females), males (gender=0) have quite different tastes. The subgroups should be studied separately. # ## Conclusion # # **Credit** : this work was inspired by this [repository](https://github.com/alicemontel/M2DS_MusicProject) in which the previous EPM algorithm is naively implemented. # # Our contribution is the implementation of the Beam Search algorithm, as well as an improved computational efficiency in the computation of the PM using numpy matrix operations. # # Furthermore, this notebook explain a reproductible demarch to mine exceptionnal preference pattern in virtually any ranking dataset. # # Over the 24/02/2018 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.4 64-bit # metadata: # interpreter: # hash: 1ee38ef4a5a9feb55287fd749643f13d043cb0a7addaab2a9c224cbe137c0062 # name: Python 3.8.4 64-bit # --- # ### Get Spotify credentials # + tags=[] # Get required info for token request import os from dotenv import load_dotenv load_dotenv() client_id = os.getenv("SPOTIFY_CLIENT_ID") client_secret = os.getenv("SPOTIFY_CLIENT_SECRET") token_url = "https://accounts.spotify.com/api/token" # Perform OAuth client credentials authorization import requests r = requests.post( "https://accounts.spotify.com/api/token", data={ "grant_type": "client_credentials" }, auth=(client_id, client_secret) ) r.raise_for_status() response = r.json() token = response["access_token"] headers = { "Authorization": f"Bearer {token}" } # - # ### Get Song a Day playlist data # + tags=[] playlists = { 2019: "6xJjyO5AGrZkyGkWYXfUX8", 2020: "3iNn4yKx9PxpSVYfRXkK8i" } years = sorted(playlists.keys()) tracks = { year: [] for year in years } # For each year's playlist, grab tracks for year in years: # Page through track list for the playlist next_url = f"https://api.spotify.com/v1/playlists/{playlists[year]}/tracks" while next_url: # Fetch the current page r = requests.get(next_url, headers=headers) r.raise_for_status() response = r.json() # Reshape each playlist item and add to our "tracks" list for the relevant year for playlist_item in response["items"]: img_sizes = { 640: "large", 300: "medium", 64: "small" } tracks[year].append({ "track_name": playlist_item["track"]["name"], "track_id": playlist_item["track"]["id"], "album_id": playlist_item["track"]["album"]["id"], "album_name": playlist_item["track"]["album"]["name"], **dict([ ("album_image_{size}".format(size=img_sizes[i['width']]), i["url"]) for i in playlist_item["track"]["album"]["images"] ]), "artists": [ { "name": artist["name"], "id": artist["id"] } for artist in playlist_item["track"]["artists"] ], "primary_artist_name": playlist_item["track"]["artists"][0]["name"], "primary_artist_id": playlist_item["track"]["artists"][0]["id"], "is_local": playlist_item["is_local"] }) # Take note of the URL of the next page of playlist items next_url = response["next"] # + tags=[] # Compile all tracks all_tracks = [] # Assign a date to each track from datetime import datetime, timedelta for year in years: curr_date = datetime(year, 1, 1) for track in tracks[year]: track["date"] = curr_date all_tracks.append(track) # add to big array curr_date += timedelta(days=1) print(tracks[2019][-1]) # - # ### Add to Pandas # + import pandas as pd pd.options.display.max_columns = None track_data_df = pd.DataFrame(all_tracks) track_data_df # - # ### Get audio features for each track # + tags=[] audio_features_data = [] # deduplicated ID of every track. # This ensures that our audio_features dataframe only contains each track once and we can # perform a many-to-one merge. track_ids = list(set([track["track_id"] for track in all_tracks if not track["is_local"]])) # Fetch data from spotify API for idx in range(0, len(track_ids), 100): ids_batch = track_ids[idx:idx+100] endpoint = "https://api.spotify.com/v1/audio-features" r = requests.get(endpoint, headers=headers, params={ "ids": ",".join(ids_batch) }) r.raise_for_status() keys = [ "id", # Concrete musical properties "mode", "key", "tempo", "time_signature", "duration_ms", # Mood "danceability", "energy", "speechiness", "acousticness", "instrumentalness", "liveness", "valence" ] audio_features_data.extend([ { k: track_data[k] for k in keys } for track_data in r.json()["audio_features"] ]) # Merge this new data into basic track data audio_features_df = pd.DataFrame(audio_features_data).rename(columns={ "id" : "track_id" }) tracks_df = pd.merge(track_data_df, audio_features_df, how="left", on="track_id", validate="many_to_one") tracks_df # - # ### Export data # + # Abbreviate export_df = tracks_df.drop(columns=["album_image_medium", "artists"]) # Shorten image URLs to save space def discard_cdn_prefix(url): prefix = "https://i.scdn.co/image/" return url[len(prefix):] if isinstance(url, str) and url.startswith(prefix) else url export_df["album_image_large"] = export_df["album_image_large"].apply(discard_cdn_prefix) export_df["album_image_small"] = export_df["album_image_small"].apply(discard_cdn_prefix) # Date index makes JSON export nicer export_df.set_index("date", inplace=True) # Export data # Formatted JSON for easy reading export_df.to_json("./tracks.json", orient="index", date_format="iso", indent=2) # CSV is the "clean" distributable data export; CSV takes up a lot less space than even non-indented JSON does export_df.to_csv("./tracks.csv") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sequence-to-sequence (seq2seq) model # This is based on paper "Attention is all you need". # # Applications include machine translation. # # Seq2seq models follows the basic architecture of autoencoder, which uses multi-head attention mechanism. # # In the following, I will implement seq2seq model based on my own understanding step by step. This can be different from the model described in the seq2seq model paper and tensorflow seq2seq package. # + import re import collections import numpy as np import torch import torch.nn as nn use_gpu = True if use_gpu and torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') # - # ## Multi-head attention # + def get_list(x, n, forced=False): """Expand x to a list of n If x is already a list of length n and force is False, then do nothing. Otherwise, return [x]*n """ if forced or not isinstance(x, collections.Iterable): return [x]*n if len(x) != n: return [x]*n else: return x class MultiLayerPerceptron(nn.Module): r"""Multi-layer perceptron Args: in_dim: int, input feature dimension hidden_dims: sequence of int, hidden dimensions; hidden_dims[-1] is the output dimension nonlinearity: default nn.ReLU(); can be changed to other nonlinearities dense: if True, use DenseNet architecture, i.e., concatenate all previous output as current input residual: if True, use ResNet architecture. That's to say: add the weighted average (decided by weighted_avg function) of previous outputs (after activations) to current affine output, and then pass it to nonlinearity last_nonlinearity: default False; if True, add nonlinearity to output layer. forward_input: if True, forward input layer to subsequential layers when either dense or residual is True return_all: If True, return a list of output; otherwise, return the output from the last layer residual_mode: default 'last', only add the previous (nonlinear) output to current affine output; only used when residual is True; If 'weighted', then calculate a default weighted average of all previous outputs and add it to current affine output before passing it to nonlinearity. To do: make the weight learnable Shape: Input: (N, *, in_dim) Output: if return_all is False, then (N, *, out_dim); else return a list of tensors (depend on forward_input, dense, and residual) Attributes: A list of weights and biases from nn.Linear modules; the dimensions depend on in_dims, hidden_dims, dense, residual, forward_input Examples: >>> x = torch.randn(3,4,5) >>> model = MultiLayerPerceptron(5, [5,5,5], dense=True) >>> model(x).shape """ def __init__(self, in_dim, hidden_dims, nonlinearity=nn.ReLU(inplace=True), bias=True, dense=True, residual=False, last_nonlinearity=False, forward_input=False, return_all=False, residual_mode='last'): super(MultiLayerPerceptron, self).__init__() num_layers = len(hidden_dims) self.dense = dense self.residual = residual self.last_nonlinearity = last_nonlinearity self.forward_input = forward_input self.return_all = return_all self.residual_mode = residual_mode # make sure the dimensions are right assert not (dense and residual) if residual: for i in range(1, num_layers): assert hidden_dims[i]==hidden_dims[i-1] if forward_input: assert in_dim==hidden_dims[0] # nonlinearity and bias can be a set layer by layer by providing a list input nonlinearities = get_list(nonlinearity, num_layers if last_nonlinearity else num_layers-1) biases = get_list(bias, num_layers) self.layers = nn.Sequential() for i in range(num_layers): out_dim = hidden_dims[i] self.layers.add_module('linear{}'.format(i), nn.Linear(in_dim, out_dim, bias=biases[i])) if i < num_layers-1 or last_nonlinearity: self.layers.add_module('activation{}'.format(i), nonlinearities[i]) # prepare for input dimension for next layer if dense: if i==0 and not forward_input: in_dim = 0 in_dim += hidden_dims[i] else: in_dim = hidden_dims[i] def weighted_avg(self, y, mode='last', weight=None): if mode == 'last': return y[-1] if model == 'unweighted': return torch.cat(y, dim=-1).mean(-1) if mode == 'weighted' and weight is None: w = torch.tensor([i for i in range(1, len(y)+1)], device=device) / (len(y)*(len(y)+1)/2) return (torch.cat(y, dim=-1) * w).sum(-1) def forward(self, x): if self.forward_input: y = [x] else: y = [] out = x for n, m in self.layers._modules.items(): out = m(out) if n.startswith('activation'): y.append(out) if self.dense: out = torch.cat(y, dim=-1) if n.startswith('linear') and self.residual and len(y)>0: out = out + self.weighted_avg(y, mode=self.residual_mode) if self.return_all: if not self.last_nonlinearity: y.append(out) return y else: return out class MultiHeadAttention(nn.Module): r"""Use multi-head self attention mechanism to learn sequence embedding. Args: in_dim: int; input feature dimension out_dim: int; output feature dimension key_dim: int; map input to (key, value) and use keys to calculate weights (attention) value_dim: int; if None, set it to be out_dim num_heads: int mask: if True, each element in a sequence only attend to itself and its left side; useful for decoder knn: int; only attend to the top k elements with the highest unnormalized attention Shape: Input: (N, seq_len, in_dim) for most cases; (N, *, seq, in_dim) is also possible Output: change the last dimension of input to out_dim Attributes: In the end all parameters are from nn.Linear modules. keys and values are two nn.ModuleList instances with num_heads of two-layer perceptron In the final layer we concatenate feature vector from all heads and pass it to a two-layer perceptron and get the final output Examples: >>> x = torch.randn(3,5,7) >>> model = MultiHeadAttention(7,11,13,17,19, mask=False, knn=1) >>> model(x).shape """ def __init__(self, in_dim, out_dim, key_dim, value_dim=None, num_heads=1, mask=False, knn=None): super(MultiHeadAttention, self).__init__() if value_dim is None: value_dim = out_dim self.num_heads = num_heads self.mask = mask self.knn = knn self.keys = nn.ModuleList([MultiLayerPerceptron(in_dim, [key_dim]*2) for i in range(num_heads)]) self.values = nn.ModuleList([MultiLayerPerceptron(in_dim, [value_dim]*2) for i in range(num_heads)]) self.out = MultiLayerPerceptron(value_dim*num_heads, [out_dim]*2) def forward(self, x): y = [] for i in range(self.num_heads): keys = self.keys[i](x) values = self.values[i](x) # inner product as unnormalized attention att = (keys.unsqueeze(-2) * keys.unsqueeze(-3)).sum(-1) if self.mask: # mask the upper triangle to be float('-Inf') tmp = att.new_tensor(range(att.size(-1))).expand_as(att) idx = torch.nonzero(tmp > tmp.transpose(-1,-2)) idx = tuple([idx[:,i] for i in range(idx.size(1))]) att[idx] = float('-Inf') if self.knn and self.knn < att.size(-1): # tricky: put Non-topk values to '-Inf' att.scatter_(-1, att.topk(att.size(-1) - self.knn, -1, largest=False)[-1], float('-Inf')) # Use softmax to normalize attention; # To do: alternative to softmax att = torch.nn.functional.softmax(att, dim=-1) # tricky: y.append((values.unsqueeze(-3) * att.unsqueeze(-1)).sum(-2)) return self.out(torch.cat(y, dim=-1)) class Encoder(nn.Module): """Stacked MultiHeadAttention layers """ def __init__(self, num_layers, in_dim, out_dim, key_dim, value_dim=None, num_heads=1, mask=False, knn=None, residual=True, normalization='layer_norm', return_all=False): super(Encoder, self).__init__() # We can set out_dim layer by layer, similar to key_dim, value_dim, num_heads out_dim = [in_dim] + get_list(out_dim, num_layers) key_dim = get_list(key_dim, num_layers) value_dim = get_list(value_dim, num_layers) num_heads = get_list(num_heads, num_layers) self.num_layers = num_layers self.residual = residual self.return_all = return_all self.normalization = normalization if residual: for i in range(num_layers): assert out_dim[i] == out_dim[i+1] self.attentions = nn.ModuleList([MultiHeadAttention( out_dim[i], out_dim[i+1], key_dim[i], value_dim[i], num_heads[i], mask, knn) for i in range(num_layers)]) self.perceptrons = nn.ModuleList([MultiLayerPerceptron(out_dim[i+1], [out_dim[i+1]]*2) for i in range(num_layers)]) def forward(self, x): y = [] out = x for i in range(self.num_layers): if self.residual: out = self.attentions[i](out) + out else: out = self.attentions[i](out) if self.normalization == 'layer_norm': out = nn.functional.layer_norm(out, (out.size(-1),)) # perceptron if self.residual: out = self.perceptrons[i](out) + out else: out = self.perceptrons[i](out) if self.normalization == 'layer_norm': out = nn.functional.layer_norm(out, (out.size(-1),)) y.append(out) if self.return_all: return y return out # + x = torch.randn(3,5,7) model = Encoder(1, 7, 7, 11) model(x).shape # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 1 import regex import os import collections # + flatten = lambda l: [item for sublist in l for item in sublist] def filesNames(): path = '../ustawy' absolute_path = os.path.realpath(path) + "\\" return [absolute_path + filename for filename in os.listdir(path)] def getFileTextRaw(filename): with open(filename, 'r', encoding="utf8") as content_file: return content_file.read() def getFileText(filename): return getFileTextRaw(filename).replace('-\n', '').replace('\n', ' ') filesTexts = [getFileText(filename) for filename in filesNames()] # - def getYear(text): main = regex.findall(' \d\d\d\d ', text) if len(main) > 0: return main[0][1:5] else: return None def findExternalPositions(text, year): positions = regex.findall(r'(nr(\d+),poz\.(\d+))', text) return [(x[1], x[2], year) for x in positions] def getExternalWithYear(text, defaultYear): byYear = regex.findall(r'((z(\d{4})r\.)?((nr\d+,poz\.\d+)(,)?)+(;)?)', text) return flatten([findExternalPositions(x[0], x[2] or defaultYear) for x in byYear]) def getExternals(text): year = getYear(text) preText = text.lower().replace(' ', '').replace('i', ',').replace('oraz', ';') externals = regex.findall(r'((dz\.u\.)((z(\d{4})r\.)?((nr\d+,poz\.\d+)(,)?)+(;)?)+)', preText) return flatten([getExternalWithYear(x[0], year) for x in externals]) def pretyForm(text): year = text[0][2] nr = text[0][0] poz = text[0][1] counts = text[1] return 'year: {}, nr: {}, poz: {}, counts: {}'.format(year, nr, poz, counts) def zad1(): filesTexts = [getFileText(filename) for filename in filesNames()] allExternals = flatten([getExternals(text) for text in filesTexts]) counter=collections.Counter(allExternals) result = [(x, counter[x]) for x in counter.keys()] result.sort(key = lambda x: x[1], reverse=True) return [pretyForm(x) for x in result] zad1() def zad2(text): pass # + def zad3Help(text): options_set = list(set(['ustawa', 'ustawy', 'ustawy', 'ustaw', 'ustawie', 'ustawom', 'ustawę', 'ustawy', 'ustawą', 'ustawami', 'ustawie', 'ustawach', 'ustawo', 'ustawy'])) p = regex.compile(r"\b\L\b", options=options_set) return len(regex.findall(p, text.lower())) def zad3(): filesTexts = [getFileText(filename) for filename in filesNames()] return sum([zad3Help(text) for text in filesTexts]) # - zad3Help("Count all occurrences of the word ustawa in all inflected forms (ustawa, ustawie, ustawę, etc.), and all spelling forms (ustawa, Ustawa, USTAWA), excluding other words with the same prefix (e.g. ustawić).") # Zadanie 3 zad3() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Building a super-fast Latin dictionary # SymSpell is very fast but you have to provide it with a frequency list generated from a large corpus in the language you need. Compared to the many high-quality frequency lists for modern languages (see the [links on Wolf Garbe’s Github page](https://github.com/wolfgarbe/SymSpell#frequency-dictionaries-in-other-languages)), which are based on corpora with billions of words, the Latin lists are quite small and focussed on Antiquity. # # Since we want to use SymSpell with late medieval texts, we have to generate our own frequency list. # ## Extract words from a Latin corpus # Prerequisite: Download the "latin library" included in CLTK (Classical Language Toolkit) from https://github.com/cltk/lat_text_latin_library (click on the button `Code` and select `Download ZIP`, then unzip the file). # # Save the full path to the unzipped folder: #corpus_path = "E:/Latin Corpus/lat_text_latin_library-master" # Windows path corpus_path = "/home/markus/cltk_data/latin/text/latin_text_latin_library/" # Linux path import glob from collections import Counter import pickle import re # Iterate through all txt files in the corpus and collect # the lines of text (except the first 16 and the last 8 lines # of each file because they contain English metadata): latin_corpus = [] file_counter = 0 for path in glob.iglob(f"{corpus_path}/**/*.txt", recursive=True): # Open every txt file and collect all lines of text # except the first 16 and the last 8 lines of each file. with open(path, "r", encoding="utf-8") as f: file_counter += 1 lines = f.readlines() for line in lines[16:-8]: latin_corpus.append(line) print(f"Processed {file_counter} files and saved them in the latin_corpus variable.") # + # Clean and normalize the lines and save them as a txt file. # (THIS CAN TAKE SOME TIME depending on the size of your corpus!) def normalize(text): normalization_patterns = {'v': 'u', 'V': 'U', 'j': 'i', 'J': 'I', 'ë': 'e', 'æ': 'ae', 'Æ': 'ae', 'œ': 'oe', 'Œ': 'Oe'} for pattern, replacement in normalization_patterns.items(): text = re.sub(pattern, replacement, text) return text cleaned_corpus = [] for line in latin_corpus: # Extract a list of words without punctuation, Greek letters etc. words = re.findall(r'[a-zA-Z]+', line) for word in words: word = normalize(word.lower()) cleaned_corpus.append(word) # - # Count the unique words: COUNTS = Counter(cleaned_corpus) print("Total # of words: ", sum(COUNTS.values())) print("# of unique words:", len(COUNTS)) tops = COUNTS.most_common(50) print("The 50 most frequent words:") for k, v in tops: print(f"- {k}: {v}") # Save the counts in a txt file ( \n \n…) with open("frequency_dictionary_la.txt", "w", encoding="utf-8") as f: for word in COUNTS: f.write(f"{word} {COUNTS[word]}\n") # ## Check the collected words # Unfortunately, the files are quite messy and sometimes suffer from very strange spelling and/or numerous transcription errors so that it fails in correcting some simple mistakes. The dictionary thinks, for example, that "mar" and "tio" were valid Latin words, which is not the case! import os import pickle from symspellpy.symspellpy import SymSpell, Verbosity from hunspell import Hunspell sym_spell = SymSpell(3,7) # + # Load SymSpell dictionary from the frequency file we created above: # (format: \n \n…) # THIS CAN TAKE SOME TIME depending on the size of your corpus! dict_path = "frequency_dictionary_la.txt" #dict_path = "U:/frequency_dictionary_la.txt" sym_spell.load_dictionary(dict_path, 0, 1) # Cf. https://symspellpy.readthedocs.io/en/latest/examples/dictionary.html # - from pprint import pprint for word in ["mar", "tio", "chriftus"]: print(word) pprint([str(s) for s in sym_spell.lookup(word, Verbosity.CLOSEST)]) # To get rid of the typos and erroneous words, we check every word with another dictionary. Hunspell provides a very fast algorithm and there is a very good [Latin dictionary by and ](https://extensions.libreoffice.org/en/extensions/show/latin-spelling-and-hyphenation-dictionaries) that you can use with Hunspell. h = Hunspell('la_LA', hunspell_data_dir='./test_flask_app/my_app/dictionaries') # Check every word in SymSpell with Hunspell. # Collect and count the wrong words. counter = {"correct": 0, "wrong": 0} wrong_words = {} for word, count in sym_spell._words.items(): hunspell = h.spell(word) if hunspell: counter['correct'] += 1 else: counter['wrong'] += 1 wrong_words[word] = count #print(word, count, hunspell) # Let's see how many words were considered correct and wrong: counter # OK, Hunspell does not recognize Roman numbers ... and some other stuff: wrong_words # You can preserve at least some of the words categorized as "wrong" by applying some tricks on words that are not recognized at first glance (e.g. by replacing “ci“ → “ti”, “e“ → “ae”, capitalizing the word, etc.). # + # Delete false positives from the list of wrong words: # spelling variations, capitalization: def do_tricks(word): if len(word) < 4: # with short words, do_tricks produces errors return False tricks = {"ci": "ti", "ti": "ci", # fidutia "e": "ae", # predicare "e": "oe", "oe": "e", # foemina, foelix "ichi": "ihi", # nichil, nichilum "semet": "", # semetipsos etc. "y": "i", # ydoneus, consyderatus, paradysi } for pattern, replacement in tricks.items(): trick_from_left = word.replace(pattern, replacement, 1) trick_from_right = replacement.join(word.rsplit(pattern, 1)) for trick in [trick_from_left, trick_from_right]: if h.spell(trick): # Exit the for-loop as soon as a solution is found: return word return False wrong_words_cleaned = {} correct_words = {} for word, count in wrong_words.items(): if do_tricks(word): correct_words[word] = count elif len(word) > 2 and h.spell(word.capitalize()): correct_words[word] = count else: wrong_words_cleaned[word] = count print(f"Input: {len(wrong_words)} wrong words.") print(f"Output: {len(wrong_words_cleaned)} wrong words.") print(f"False positives: {len(wrong_words)-len(wrong_words_cleaned)} words.") # - # OK, that looks better already... correct_words wrong_words_cleaned h.spell("confideremus") # Let's store our result and write the wrong words to file: with open("wrong_words_cleaned.txt", "w", encoding="utf-8") as f: line = [] for word, count in wrong_words_cleaned.items(): f.write(word+"\n") # ## Apply corrections to the SymSpell dictionary # Now that we have a list of wrong words, we should delete them from SymSpell. Furthermore we can add/delete some hand-picked words I collected during the last two years. # Delete the false entries we found above: # THIS MAY TAKE SOME TIME depending on the size of your word list! for word, count in wrong_words_cleaned.items(): sym_spell.delete_dictionary_entry(word) print(f"The cleaned dictionary has {len(sym_spell._words)} entries.") # Add some hand-picked words to the dictionary: counter = 0 with open("manually_added_words.csv", "r", encoding="utf-8") as f: for word in f.readlines(): if word.strip() in sym_spell._words: pass else: counter += 1 sym_spell.create_dictionary_entry(word.strip(), 10) print(f"Added {counter} words.") # Delete some hand-picked words from the dictionary: counter = 0 with open("manually_deleted_words.csv", "r", encoding="utf-8") as f: for word in f.readlines(): if word.strip() in sym_spell._words: sym_spell.delete_dictionary_entry(word.strip()) counter += 1 print(f"Deleted {counter} words.") # + # Save the improved SymSpell dictionary as a pickle stream # Help on pickle: https://docs.python.org/3/library/pickle.html with open("symspell_dictionary_LA.pickle", "wb") as f: sym_spell.save_pickle_stream(f) print(f"The cleaned dictionary has {len(sym_spell._words)} entries.") # - # ## Build bigram list # The SymSpell dictionary uses a bigram list to decide whether two tokens belong together forming one word or not. Example: neither the dictionary nor the list of bigrams contain "sempermaior" (which is a mistake) whereas "semper maior" (correct) is in the bigram list but not the dictionary (which includes both words individually). # # To build a proper list of bigrams, we should to be aware of punctuation. Therefore, we transform the corpus into a list of sentences, i.e. we tokenize the corpus. Then, we take each sentence and collect the bigrams. # + # Let's build a tokenizer and transform our corpus into a list of sentences. import re def tokenize_sentences(list_of_lines, sentence_separators=r"([,;.?!:\(\)\[\]])\s?"): """ Transforms a list of text lines into a list of sentences using the sentence_separators (i.e. a regular expression). """ list_of_lines.append("#") # add dummy entry to simplify the moving window logic below sentences = [" "] # add dummy entry to simplify the moving window logic below first_line = True for line in list_of_lines: line = line.strip() # get rid of spaces, line breaks etc. at the beginning/end of the line. # Delete abbreviations like "a.d.": line = re.sub(r"(\w)(\.)(\w)(\.)?", r"", line) # Identify the tokens using the sentence_separators and split the line: line = re.sub(sentence_separators, r"\1$TOKEN$", line) junks = re.split(r"\$TOKEN\$", line) # Iterate over the junks and decide if and how to join them: for junk in junks: junk = junk.strip() if junk != "" and junk not in "([": if sentences[-1][-1] in "[-=]": # The previous line ends with a hyphen: cut the hyphen off and join: sentences[-1] = sentences[-1][:-1] + junk elif sentences[-1][-1] in ",;.?!:)]": # The previous line ends with punctuation: cut it off and # add the current junk to the list of sentences: sentences[-1] = sentences[-1][:-1] sentences.append(junk) elif sentences[-1][-1] in "([": # The previous line ends with an opening parenthesis/bracket: # Cut the parenthesis/bracket and add the junk as a new sentence: sentences[-1] = sentences[-1][:-2] sentences.append(junk) else: # No hyphen in the previous line: add a space: sentences[-1] = sentences[-1] + " " + junk first_line = False # Fix the first and the last sentence: sentences[0] = sentences[0].strip() sentences = sentences[:-1] return sentences # return the sentences except the dummy test = ["Hello World! This is\n", "Python, i.e. the most popu-\n", "lar (and eastiest to learn)\n","programming\n", "language today!\n", "(so far)", "Here we go!", "We are splitting", "sentences."] tokenized_corpus = tokenize_sentences(latin_corpus) # + # Let's extract the bigrams from the tokenized_corpus. # THIS CAN TAKE A WHILE. raw_bigrams = [] for sentence in tokenized_corpus: # Extract a list of words without punctuation, Greek letters etc. # (This is not a perfect solution because it produces some bad bigrams # because if the sentence includes numbers, Greek letters or other unusual # characters, they will be ignored, which can lead to bigrams that are not # actually included in the corpus. We would need to build a more complex # tokenizer to solve this problem.) words = re.findall(r'[a-zA-Z]+', sentence.lower()) if len(words) > 1: for idx, word in enumerate(words[1:]): # Filter out erroneous words by checking them with SymSpell: if words[idx] in sym_spell._words and word in sym_spell._words: raw_bigrams.append(f"{words[idx]} {word}") # Count the unique bigrams: BIGRAM_COUNTS = Counter(raw_bigrams) print("Total # of bigrams: ", sum(BIGRAM_COUNTS.values())) print("# of unique bigrams:", len(BIGRAM_COUNTS)) bigram_tops = BIGRAM_COUNTS.most_common(50) print("The 50 most frequent bigrams:") for k, v in bigram_tops: print(f"- {k}: {v}") # - # Save the bigrams as a txt file ( \n…): # THIS MAY TAKE SOME TIME! The resulting file is quite big (~ 80 MB) with open("frequency_dictionary_bigrams_la.txt", "w", encoding="utf-8") as f: for bigram in BIGRAM_COUNTS: f.write(f"{bigram} {BIGRAM_COUNTS[bigram]}\n") # ## Integrate the bigrams into the SymSpell dictionary # + # Load the list of unique bigrams from the frequency file we created above: # (format: \n…) # THIS MAY TAKE SOME TIME depending on the size of your corpus! bigram_path = "frequency_dictionary_bigrams_la.txt" #bigram_path = "U:/frequency_dictionary_bigrams_la.txt" sym_spell.load_bigram_dictionary(bigram_path, 0, 2) # Cf. https://symspellpy.readthedocs.io/en/latest/examples/dictionary.html # Cf. https://symspellpy.readthedocs.io/en/latest/api/symspellpy.html#symspellpy.symspellpy.SymSpell.load_bigram_dictionary # - # ## Properly saving the word frequencies and bigrams as a pickle file # There is one problem with the `save_pickle_stream` method of SymSpellPy: it does not save the bigrams! (The [documentation](https://symspellpy.readthedocs.io/en/latest/api/symspellpy.html#symspellpy.symspellpy.SymSpell.save_pickle_stream) tells us so as well!) To overcome this problem we have to save and load the pickled dictionary using custom functions: import pickle from symspellpy.symspellpy import SymSpell, Verbosity def my_save_pickle(sym_spell, pickled_counts): """ Custom saver for SymSpellPy dictionaries that are pre-pickled. The built-in pickle function of SymSpellPy compresses the pickle stream, which slows down loading the pickle file. Therefore, this functions save an un-compressed stream. Since the built-in load_pickle function will not work with un-compressed streams, we need to build custom loading function as well. """ with open(pickled_counts, "wb") as f: pickle_data = { "deletes": sym_spell._deletes, "words": sym_spell._words, "max_length": sym_spell._max_length, "data_version": sym_spell.data_version, "bigrams": sym_spell._bigrams, } pickle.dump(pickle_data, f) print("Pickled", len(sym_spell._words), "words and", len(sym_spell.bigrams), "bigrams to", pickled_counts) return True my_save_pickle(sym_spell, "symspell_dictionary_LA.pickle") # # Test the new SymSpell dictionary # Let's load the new frequency lists into a new SymSpell dictionary and give it a test! # + # Load SymSpell dictionary from pickled file (takes some seconds!): def load_my_pickle(dict_path): """ Custom loader for SymSpellPy dictionaries that are pre-pickled with my custom safer. The pickle stream is not compressed (reduces loading time) and the pickle also contains bigrams (not only unique words). """ with open(dict_path, "rb") as f: pickle_data = pickle.load(f) return pickle_data new_sym_spell = SymSpell(3, 7) dict_path = "symspell_dictionary_LA.pickle" pickled_data = load_my_pickle(dict_path) new_sym_spell._deletes = pickled_data["deletes"] new_sym_spell._words = pickled_data["words"] new_sym_spell._bigrams = pickled_data['bigrams'] new_sym_spell._max_length = pickled_data['max_length'] new_sym_spell.data_version = pickled_data['data_version'] # - f"The new dictionary contains {len(new_sym_spell._words)} words and {len(new_sym_spell._bigrams)} bigrams" # Let's how the improved dictionary deals with words it had problems with at the beginning: for word in ["cio", "cum", "dig", "iohele"]: try: print(word, new_sym_spell._words[word]) except: print(word, "ERROR") from pprint import pprint for word in ["mar", "tio", "cio", "chriftus"]: print(word) pprint([str(s) for s in sym_spell.lookup(word, Verbosity.CLOSEST)]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4cbb17d9-f8da-4896-a9b7-cc314875e30b", "showTitle": false, "title": ""} # basic libraries from ftplib import FTP import os import py7zr from delta.tables import * from pyspark.sql import functions as psf import icecream import pandas as pd from pyspark.sql.types import * # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d22eff65-0bda-4e25-8f6c-62553dfd8952", "showTitle": false, "title": ""} ### ftp parameters ftpServerUrl = 'ftpupload.net' ftpServerPort=21 ftpPath='/htdocs/nhsgp/' ftpUserName ='topsecret***' ftpPassword ='***' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "97016874-90b4-4e20-942f-874a3ebea6e7", "showTitle": false, "title": ""} #get ftp file list localPath = '/dbfs/' try: ftp= FTP(ftpServerUrl) ftp.login(user=ftpUserName, passwd=ftpPassword) ftp.cwd(ftpPath) files = [ f for f in ftp.nlst() if f.endswith('.7z') or f.endswith('.csv')or f.endswith('.json') or 'PDPI BNFT.7z' in f or 'BNF Snomed Mapping data' in f] # f.endswith('.7z') #testing: f=='T201606PDPI+BNFT.7z' print('downloading:') for f in files: print(f) localfile = open(localPath+f, "wb") #open(f,'wb') ftp.retrbinary('RETR '+f, localfile.write,1024) localfile.close() except : print("FTP Error: ") ftp.quit() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f1727b74-df38-4244-a7c7-53d3eb54e04b", "showTitle": false, "title": ""} # extract single 7z files def extract7zFile(filePathAndName, fileRemoved): if(filePathAndName.endswith('.7z')): archive = py7zr.SevenZipFile(filePathAndName, mode='r') archive.extractall(path='/dbfs/') archive.close() if (fileRemoved): os.remove(filePathAndName) else: print('Not a 7Z file!') def extract7zMultiVolumn(filePath, fileName, fileRemoved): mulitVolumnFiles = sorted([ f for f in os.listdir(filePath) if fileName in f]) tempFilePath = filePath +'tempAll.7z' with open(tempFilePath, 'ab') as outfile: # append in binary mode for fname in mulitVolumnFiles: with open(filePath+fname, 'rb') as infile: # open in binary mode also # print(filePath+fname) outfile.write(infile.read()) extract7zFile(tempFilePath, fileRemoved) #remove multivolumn files if(fileRemoved): for f in mulitVolumnFiles: os.remove(filePath+f) #convert xlsx to csv due to poor performance of spark-excel def convertXlsxToCsv(filepath, filename): tempInputPath= os.path.join(filepath, filename) tmpXpd = pd.read_excel(tempInputPath, sheet_name='November 20',engine='openpyxl') tmpFilename = str.replace(filename, '.xlsx','') tmpOutxpath = localPath + tmpFilename +'.csv' os.remove(tempInputPath) tmpXpd.to_csv(tmpOutxpath) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9d76eb23-6b59-4902-a44f-67be359bd1ca", "showTitle": false, "title": ""} filePath= '/dbfs/' fileName = 'T201901PDPI BNFT' extract7zMultiVolumn(filePath,fileName,True) fileName = 'T201902PDPI BNFT' extract7zMultiVolumn(filePath,fileName,True) fileName = 'T201903PDPI BNFT' extract7zMultiVolumn(filePath,fileName,True) #convert xlsx files to csv for f in os.listdir('/dbfs'): if(f.endswith('.xlsx')): convertXlsxToCsv(filePath, f) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9876478e-2508-4547-aedd-ff0c674904cb", "showTitle": false, "title": ""} #move files from localfolder into dbfs tmp folder for f in os.listdir('/dbfs'): if(f.endswith('.csv') or f.endswith('.json')): dbutils.fs.cp("file:/dbfs/"+f,"dbfs:/tmp/") os.remove('/dbfs/'+f) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d6975426-20e6-4a8d-a182-1b72276b6806", "showTitle": false, "title": ""} #check path def checkPathExist(checkPath): try: dbutils.fs.ls(checkPath) return True except Exception as e: if 'java.io.FileNotFoundException' in str(e): return False #save to table def saveIntoLandingTable(tableName, inputDf): print("saving table {}...".format(tableName)) spark.sql("use nhsgp") spark.sql("DROP TABLE IF EXISTS {}".format(tableName)) tempPath = "/user/hive/warehouse/{}".format(tableName) if(checkPathExist(tempPath)): dbutils.fs.rm(tempPath, True) inputDf.write.format("delta").save(tempPath) spark.sql("CREATE TABLE {} USING DELTA LOCATION '{}'".format(tableName, tempPath)) #append fact table def appenFactTable(tableName, newDf): print("saving table {}...".format(tableName)) spark.sql("use nhsgp") tempPath = "/user/hive/warehouse/{}/".format(tableName) existingTable = DeltaTable.forPath(spark, tempPath) existingTable.alias("old").merge( newDf.alias("new"), "1 = 2") \ .whenNotMatchedInsert(values = { "SHA": "new.SHA", "PCT": "new.PCT", "PRACTICE": "new.PRACTICE", "BNF_CODE": "new.BNF_CODE", "BNF_NAME": "new.BNF_NAME", "ITEMS": "new.ITEMS", "NIC": "new.NIC", "ACT_COST": "new.ACT_COST", "QUANTITY": "new.QUANTITY", "PERIOD": "new.PERIOD" } ) \ .execute() #reading a nested JSON file def getColumnMappingsFromJson(filepath): jdf = spark.read.json(filepath,encoding='utf-8') jdf = jdf.select( psf.array(psf.expr('bnf_code.*')).alias('bnf_code'), psf.array(psf.expr('bnf_name.*')).alias('bnf_name'), psf.array(psf.expr('practice.*')).alias('practice') ) jdf = (jdf.withColumn("Code_Name_Practice", psf.explode(psf.arrays_zip("bnf_code", "bnf_name","practice"))) .select("Code_Name_Practice.bnf_code", "Code_Name_Practice.bnf_name", "Code_Name_Practice.practice")) return jdf # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "04f96442-45c6-476a-83e7-9787e691ddad", "showTitle": false, "title": ""} #list temp files and load into landing storage rawFiles = [ f for f in dbutils.fs.ls('/tmp') if f.name.endswith('.csv') or f.name.endswith('.json') or f.name.endswith('.xlsx')] factDescriptionFileCounter = 0 for f in rawFiles: print("Processing file {}".format(f.name)) if(f.name.endswith('.json')): loadDf = getColumnMappingsFromJson(f.path) saveIntoLandingTable("landing_column_mappings",loadDf) if('CHEM SUBS' in f.name): loadDf = spark.read.csv(f.path, inferSchema=True, header=True, encoding='UTF-8') loadDf= loadDf.withColumnRenamed("CHEM SUB","ChemSub") saveIntoLandingTable("landing_dim_chem",loadDf) if('ADDR BNF' in f.name): loadDf = spark.read.csv(f.path, inferSchema=True, header=True, encoding='UTF-8') saveIntoLandingTable("landing_dim_practices",loadDf) if('BNF Snomed Mapping data' in f.name): localschema = StructType() \ .add("index",IntegerType(),True) \ .add("BNF Code",StringType(),True) \ .add("BNF Name",StringType(),True) \ .add("SNOMED Code",StringType(),True) loadDf = spark.read.csv(f.path, schema=localschema, header=True, encoding='UTF-8') loadDf= loadDf.withColumnRenamed('BNF Code','BNF_Code') loadDf= loadDf.withColumnRenamed('BNF Name','BNF_Name') loadDf= loadDf.withColumnRenamed('SNOMED Code','SNOMED_Code') saveIntoLandingTable("landing_dim_BnfSnomedMapping",loadDf) # if('BNF Snomed Mapping data' in f.name): # loadDf = spark.read.format("com.crealytics.spark.excel") \ # .option("inferSchema", "true") \ # .option("treatEmptyValuesAsNulls", "true") \ # .option("header", "true") \ # .option("sheetName", "November 20") \ # .load(f.path) # loadDf= loadDf.withColumnRenamed('BNF Code','BNF_Code') # loadDf= loadDf.withColumnRenamed('BNF Name','BNF_Name') # loadDf= loadDf.withColumnRenamed('SNOMED Code','SNOMED_Code') # saveIntoLandingTable("landing_dim_BnfSnomedMapping",loadDf) if('PDPI BNFT.csv' in f.name): print(f.name) loadDf = spark.read.csv(f.path, inferSchema=True, header=True, encoding='UTF-8') loadDf= loadDf.withColumnRenamed("BNF CODE","BNF_CODE") loadDf= loadDf.withColumnRenamed("BNF NAME","BNF_NAME") loadDf= loadDf.withColumnRenamed("ACT COST","ACT_COST") if(factDescriptionFileCounter == 0 ): saveIntoLandingTable("landing_fact_predescription",loadDf) print("Loading fact file No. {}".format(factDescriptionFileCounter)) else: appenFactTable("landing_fact_predescription",loadDf) print("Loading fact file No. {}".format(factDescriptionFileCounter)) factDescriptionFileCounter=factDescriptionFileCounter+1 # remove imported raw file # dbutils.fs.rm(f.path) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "81c40388-b0ae-4613-8d07-13432922bfcc", "showTitle": false, "title": ""} # # clear temp files and tables spark.sql('use nhsgp') spark.sql("show tables").show(truncate=False ) spark.sql("select count(1) from landing_fact_predescription").show() # spark.sql("select count(1) from landing_dim_practices").show() # spark.sql("select count(1) from landing_dim_chem").show() # spark.sql("select count(1) from landing_column_mappings").show() # spark.sql("select count(1) from landing_dim_chem").show() # spark.sql("select count(1) from landing_dim_bnfsnomedmapping").show() # spark.sql("drop table landing_fact_predescription ") # spark.sql("drop table landing_dim_practices ") # spark.sql("drop table landing_dim_chem ") # spark.sql("drop table landing_column_mappings") # clean all loading files # os.listdir('/dbfs') # dbutils.fs.ls('/tmp') # for f in os.listdir('/dbfs'): # os.remove('/dbfs/'+f) # # !ls -l '/dbfs' # dbutils.fs.rm(f.path) # rawFiles = [ f for f in dbutils.fs.ls('/tmp') if f.name.endswith('.csv') or f.name.endswith('.json') or f.name.endswith('.xlsx') ] # for f in rawFiles: # dbutils.fs.rm(f.path) dbutils.fs.ls('/tmp') # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "43c8fd9d-b7c4-4b52-a13f-a06bff0c11cb", "showTitle": false, "title": ""} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Loading libraries and setting up directories # + from __future__ import division import os import urllib, cStringIO import pymongo as pm from collections import Counter import matplotlib import matplotlib.patheffects as PathEffects from matplotlib import pylab, mlab, pyplot # %matplotlib inline from IPython.core.pylabtools import figsize, getfigs plt = pyplot import seaborn as sns sns.set_context('poster') sns.set_style('white') import numpy as np import scipy.stats as stats import pandas as pd import json import re from PIL import Image import base64 import sys from svgpathtools import parse_path from IPython.display import clear_output import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # + # directory & file hierarchy proj_dir = os.path.abspath('../..') analysis_dir = os.getcwd() results_dir = os.path.join(proj_dir,'results') plot_dir = os.path.join(results_dir,'plots') csv_dir = os.path.join(results_dir,'csv') features_dir= os.path.join(results_dir,'features') exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments')) sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches')) ## add helpers to python path if os.path.join(proj_dir,'analysis') not in sys.path: sys.path.append(os.path.join(proj_dir,'analysis')) if not os.path.exists(results_dir): os.makedirs(results_dir) if not os.path.exists(plot_dir): os.makedirs(plot_dir) if not os.path.exists(csv_dir): os.makedirs(csv_dir) if not os.path.exists(features_dir): os.makedirs(features_dir) ## add helpers to python path if os.path.join(proj_dir,'analysis') not in sys.path: sys.path.append(os.path.join(proj_dir,'analysis')) # Assign variables within imported analysis helpers import analysis_helpers as h if sys.version_info[0]>=3: from importlib import reload reload(h) # - # ## Setting up connection to mongo and creating main dataframe # + #### set vars auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user pswd = auth.values[0][0] key = auth.values[0][0] user = 'sketchloop' host = 'rxdhawkins.me' ## cocolab ip address # have to fix this to be able to analyze from local import pymongo as pm conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1') db = conn['semantic_parts'] coll = db['sketchpad_basic'] # which iteration name should we use? iterationName = 'pilot0' # - num_sketches = coll.find({'iterationName':iterationName}).count() print 'We have {} annotations so far.'.format(num_sketches) jefan = ['A1MMCS8S8CTWKU','A1MMCS8S8CTWKV','A1MMCS8S8CTWKS'] hawkrobe = ['A1BOIDKD33QSDK'] kmukherjee = ['A1WU4IHJNQGVAY'] researchers = jefan + hawkrobe + kmukherjee unique_assignments = coll.find({'iterationName':iterationName}).distinct('aID') print 'We have had {} unique sessions'.format( len(unique_assignments)) # + ## get list of unique_assignments unique_assignments = coll.find({'iterationName':iterationName}).distinct('aID') ### initialize a bunch of stuff orig_gameID = [] # the gameID from which this sketch was sourced outcome =[] #original outcome for that trial- true/false orig_trial_num = [] # the trialnum in the original game from which this sketch was sourced -- sketch_id = [] # concatenation of orig_gameID and orig_trial_num -- assignmentID = [] # the session in which this annotation was collected -- annotation_id = [] # the unique ID for each annotation trial (different for each session the same sketch appears in) category = [] # e.g., "chair" target = [] # e.g., "inlay" condition = [] # e.g., "closer" vs. "further" or "repeated" vs. "control trial_num = [] workerID = [] #mTurk workerId spline_id =[] #unique spline identifier time_submitted = [] # when the participant clicked "next sketch" time_labeled = [] # unique to each spline labeled time_clicked = [] # when this spline was clicked/selected num_strokes_in_sketch = [] # how many strokes in this sketch num_splines_in_sketch = [] # how many spline elements in this sketch stroke_num = [] # which stroke number this labeled spline came from cumulative_spline_num = [] # spline index in the cumulative spline sequence for the entire sketch within_stroke_spline_num = [] # spline index for the current stroke cumulative_bout_num= [] #which bout of annotation the spline belonged to part_bout_num =[] #which part-specific bout of annotation the spline belonged to label = [] # the label provided by the participant spline_svg_string = [] # the svg spline string that earned this label sketch_svg_string = [] # the entire svg string correponding to this sketch annotation_flag = [] # this is True if all splines were labeled as the same thing annotation_spline_id = [] #unique identifier for specific annotation of a spline png=[] #png string for the annotated sketch stroke_id=[] timestamp=[] ## loop through all the unique assignments that have submitted things for this_assignment, aID in enumerate(unique_assignments): if this_assignment%10==0: print 'Analyzing sketches from assignment {} of {} ...'.format(this_assignment, len(unique_assignments)) clear_output(wait=True) ### get all the sketch recs for this assignment sketch_recs = coll.find({'$and': [{'iterationName':iterationName}, {'aID':aID}]}).sort('time') try: for sketch_ind,sketch in enumerate(sketch_recs): ## get annotations embedded within record sketch_cat = sketch['category'] annotations_string = sketch['annotations'] ## convert to json dictionary _annotations_dict = json.loads(annotations_string) annotations_dict = _annotations_dict[0][sketch_cat] _timestamp = coll.find({'$and': [{'iterationName':iterationName}, {'aID':aID}]}).distinct('time')[sketch_ind] png_string = _annotations_dict[0]['png'] num_splines = len(annotations_dict) for annotation in annotations_dict: assert sketch['numSplines']==num_splines ## get spline-level metadata workerID.append(h.encode(key,sketch['wID'])) label.append(annotation['label']) stroke_num.append(annotation['strokeNum']) spline_svg_string.append(annotation['svgString']) cumulative_spline_num.append(annotation['cumulativeSplineNum']) within_stroke_spline_num.append(annotation['withinStrokeSplineNum']) time_clicked.append(annotation['timeClicked']) time_labeled.append(annotation['timeLabeled']) spline_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],annotation['cumulativeSplineNum'])) stroke_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],annotation['strokeNum'])) cumulative_bout_num.append(annotation['boutNum']) part_bout_num.append(annotation['partBoutNum']) ## get sketch-level metadata orig_gameID.append(sketch['originalGameID']) outcome.append(sketch['originalOutcome']) orig_trial_num.append(sketch['originalTrialNum']) sketch_id.append('{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'])) annotation_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],sketch['aID'])) assignmentID.append(sketch['aID']) category.append(sketch['category']) target.append(sketch['target']) png.append(png_string) timestamp.append(_timestamp) condition.append(sketch['condition']) time_submitted.append(sketch['time']) trial_num.append(sketch['trialNum']) num_splines_in_sketch.append(sketch['numSplines']) num_strokes_in_sketch.append(sketch['numStrokes']) sketch_svg_string.append(sketch['svg']) annotation_flag.append(sketch['sameAnnotflag']) annotation_spline_id.append('{}_{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],sketch['aID'],annotation['cumulativeSplineNum'])) except AssertionError: print 'There were unequal numbers for sketch["numSplines"] vs. num_splines for sketch {} from {}'.\ format(sketch['trialNum'], sketch['aID']) # + ## make group dataframe D = pd.DataFrame([workerID,orig_gameID,timestamp, orig_trial_num, outcome, sketch_id, category, assignmentID, target, \ annotation_id, condition, trial_num, time_submitted,\ time_labeled, time_clicked, num_strokes_in_sketch, num_splines_in_sketch,\ stroke_num, cumulative_spline_num, within_stroke_spline_num, cumulative_bout_num,\ part_bout_num, label, spline_svg_string, sketch_svg_string, spline_id, stroke_id,\ annotation_spline_id,png]) D = D.transpose() D.columns = ['workerID','orig_gameID', 'timestamp','orig_trial_num','outcome', 'sketch_id', 'category', 'assignmentID', 'target',\ 'annotation_id', 'condition', 'trial_num', 'time_submitted',\ 'time_labeled', 'time_clicked', 'num_strokes_in_sketch', 'num_splines_in_sketch',\ 'stroke_num', 'cumulative_spline_num', 'within_stroke_spline_num', 'cumulative_bout_num', 'part_bout_num', 'label',\ 'spline_svg_string', 'sketch_svg_string', 'spline_id','stroke_id','annotation_spline_id','png'] D=D[D['assignmentID']!=''] print 'Annotations dataframe contains {} rows and {} columns.'.format(D.shape[0],D.shape[1]) # - ##Check to see what dataframe looks like D.head() # + ###Changing the NAs to "None" strings for ind, row in D.iterrows(): if row['label'] is None: row['label'] = "None" # - # + ##Creating a dictionary of dictionaries that maps user input labels to our main labels of interest #Skip this cell to retain the original annotations maplist_dict={} maplist_dict['car'] ={'body':['body','59 decal','Body and hood','Body and windshield','Gas Cap', 'gas tank','Logo','Number','Number Decal','logo','grill',\ 'Grille','Grill','hubcap','seat','grille','ROOF','Roof','roof','number','59 decal','side mirror','Roof Panel',\ 'Undercarriage','numbers','rearview mirror','NUMBER','Top','top','Racing Decal','Side Mirror','convertible top'], 'bumper':['bumper','Fender','fender','fender well','front bumber','Bumper','Bumper and Hood','step'], 'door':['door','DOOR HANDLE','door handle','handle'], 'headlight':['headlight','taillight'], 'hood':['hood','hood release','Hood Ornament','mirror','Mirror','hood ornament'], 'trunk':['trunk','Exhaust'], 'unknown':['Letter R','Letter e','Letter D','letter D','Says the word Drive','unknown','text','Wind','eye','Arrow','Light Beams',\ 'Light beams','driver','Tree','hand','horn','Word',"it's just words, no picture to label",'words','Pavement','Payement'\ 'Color','Door Handle', 'Subject just wrote "red"','subject just wrote "red"','Gas tank',"The Word Red","The Sun"\ 'sun','clouds','tail lights','vent','The word blue', 'The Word Red','spoiler','None','Sun','Color','sun', 'The Sun', 'Payement','subject just wrote "red"'], 'wheel':['rim','Tire','tire','tires','wheel','wheel well','Axle','spokes','Spells the word Red','lug nuts','lug nut','rims'], 'window':['window'], 'windshield':['windshield','Steering wheel','wiper'] } maplist_dict['bird']={'beak':['beak','jaw'], 'body':['body','chest','back','speckles','Markings','marking','markings','Marking','Coloring','coloring'], 'eye':['eye'], 'feet':['feet','Toes'], 'head':['head','neck','Neck','facial marking'], 'leg':['leg'], 'tail':['tail'], 'wing':['wing','feather','feathers','Feathers','Feather'], 'unknown':['unknown','B','I','R','D','This isnt a bird','not sketch','c','h',\ 'i','r','p','Not a bird: The word "orange"','sky','Word','The word Yellow',\ 'Pointing to chest','sun',"Letters spelling 'chirp'",'letter y','letter e','letter l'\ ,'letter w', 'letter o','Shading','Sound of Bird','Words','this just spells bird','Cloud',\ 'Sun','sunbeams','None','letter','the sound it makes','The sound it makes']} maplist_dict['dog']={'body':['body','chest','Stomach','back','butt','Butt','fur','fur ','both head and body','Back',\ 'Belly','rear','skin fold','ribs'], 'ear':['ear'], 'eye':['eye','Eye Brow'], 'head':['head','Nose','Nose ','nose','Nose','Nostrils','snout','NOSE','Snout','snout area','face','mask'], 'leg':['leg'], 'mouth':['mout','tongue','muzzle','jaw','Tongue','Muzzle','chin'], 'neck':['neck'], 'paw':['paw','foot'], 'tail':['tail'], 'unknown':['unknown','Straight line in the letter "D"','Curved part of the letter "D"','left half of the letter "O"',\ 'Right part of the letter "O"','Letter "G"','cheating','Person just wrote words','Non-Animal',\ 'not a vaild pict of a dog','letter','W','o','f','word','letter b','Shadow','SHadow','Text',\ 'spelling of dog','smiley face','O','F','R','K','Words "Woof Bark"','not a drawing of a dog','Word','color',\ 'shading','writing','text','None','Hair','stomach','Stripes','Fur','word description',\ 'leg and paw','Leg and Paw','words written to describe drawing','name of dog type']} maplist_dict['chair']={'armrest':['armrest','sides','support slats','support slat','armrest support','decorative wood pane',\ 'side spindles','Chair frame','Side support','Design Elements','Leg and armrest','arm rest',\ 'bars','bar','arm rest support','arm support','Arm support'], 'backrest':['backrest','headrest','Spindle','spindles','spindels','back support'], 'seat':['seat', 'Chair support','cushion'], 'leg':['leg','bottom frame','spindle','Support Bar','wheel','leg rail','leg support','support beam',\ 'Wheel','bottom brace','stretcher','supporting wood','Leg support','top of leg','foot',\ 'Reinforcement for legs','Brace','supports','support for legs','Bottom Support','Leg support',\ 'Stretcher','wood beam connecting legs','Wood beam connecting legs','brace','braces','Struts','Leg Support',\ 'metal support','support','Leg Brace'], 'unknown':['unknown','frame','Descriptive label','letters','Not a chair','Frame','Decoration','Structure',\ 'name ','Label','Words - Bulky Garage','Part of O','letter r','Part of letter a',\ 'Part of letter n','Part of letter g','Part of letter e','Part of arrow',\ 'The word "sit"','word','text','not a sketch','base','rail','color of chair','chair was written',\ 'not a sketch, chair was written', 'None','footrest','Color of Chair','Structural Support','Cross beams','Support','Crossbars','Strut']} # - ### Number of unique labels before mapping D.label.nunique() D.annotation_spline_id.nunique() # + ###Get total number of splines for labels of interest before mapping total_splines_um = D[(D['category']=='chair')&(D['label'].isin(['armrest','backrest','seat','leg','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='bird')&(D['label'].isin(['beak','body','eye','feet','head','leg','tail','wing','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='dog')&(D['label'].isin(['body','ear','eye','head','leg','mouth','neck','paw','tail','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='car')&(D['label'].isin(['body','bumper','door','headlight','hood','trunk','unknown','wheel','window','windshield']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ # + ##Actually doing the mapping unique_cats = np.unique(D['category']) for this_cat in unique_cats: maplist=maplist_dict[this_cat] reversed_dict = {val: key for key in maplist for val in maplist[key]} D.loc[D['category']==this_cat,'label']=D[D['category']==this_cat]['label'].map(reversed_dict).fillna(D['label']) # D.loc[D['category']==this_cat,'label']=D[D['category']==this_cat]['label'].map(reversed_dict) # + ###Get total number of splines for labels of interest after mapping total_splines_m = D[(D['category']=='chair')&(D['label'].isin(['armrest','backrest','seat','leg','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='bird')&(D['label'].isin(['beak','body','eye','feet','head','leg','tail','wing','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='dog')&(D['label'].isin(['body','ear','eye','head','leg','mouth','neck','paw','tail','unknown']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ +\ D[(D['category']=='car')&(D['label'].isin(['body','bumper','door','headlight','hood','trunk','unknown','wheel','window','windshield']))]\ .groupby('label').agg('count')['annotation_spline_id'].sum()\ # - total_splines_m-total_splines_um D.label.unique() total_splines_m # + #Get a count of how many unique sketches have been annotated and how many unique annotations do we have in total? unique_sketches = np.unique(D['sketch_id'].values) print 'We have {} annotations of {} unique sketches.'.format(len(D['annotation_id'].unique()),len(unique_sketches)) # + ###Removing any annotations that don't have all splines annotated for this_sketch in unique_sketches: DS=D[D['sketch_id']==this_sketch] for this_annot in np.unique(DS['annotation_id']): DSS= DS[DS['annotation_id']==this_annot] if DSS[DSS['label']== 'None'].shape[0]>0: D=D[D['annotation_id']!=this_annot] # - ##How many annotations after filtering? len(D['annotation_id'].unique()) ##Removing any sketches that weren't annotated by at least 3 different workers num_annots=3 for this_sketch in unique_sketches: DS = D[D['sketch_id']==this_sketch] if DS.workerID.nunique()num_annots: for i in range(DS['assignmentID'].nunique()-num_annots): D=D.loc[D.timestamp!=DS.timestamp.min()] DS=DS.loc[DS.timestamp!=DS.timestamp.min()] unique_sketches = np.unique(D['sketch_id'].values) ##How many sketches do we have with 3 annotations? len(np.unique(D.sketch_id)) # + ## separate out the PNG dataframe, to reduce redundancy/bloat in master dataframe D_png = D.groupby('sketch_id')['png'].unique().reset_index() D_png.to_csv(os.path.join(csv_dir, 'semantic_parts_annotated_pngstring.csv'),index=False) ## save out master semantic parts annotation dataframe D2 = D.drop('png',axis=1) # remove reundandant png column (split off into different dataframe, to reduce bloat) D2.to_csv(os.path.join(csv_dir, 'semantic_parts_annotated_data.csv'),index=False) D2.to_pickle(os.path.join(csv_dir, 'semantic_parts_annotated_data_pckl')) # - D2.columns # ## Creating spline and stroke level dataframes for further analysis # + ###D = pd.read_csv(os.path.join(csv_dir, 'semantic_parts_annotated_data.csv')) # + ## get the list of unique labels applied to sketches unique_labels = np.unique(D.label.values) ## Removing Nones and obviously wrong super long lables unique_labels = [i for i in unique_labels if i is not None] unique_labels = [i for i in unique_labels if len(i)<900] print 'we have {} unique labels'.format(len( unique_labels)) # - ##Create empty dictionary with categories as keys. We will use this to store part occurrence data for our categories label_vect_dict = {unique_cats[0]:None,unique_cats[1]:None,unique_cats[2]:None,unique_cats[3]:None} # + ##Create vectors that contain the number of part instances in each sketch for category in unique_cats: DS= D[D['category']==category] unique_sketches_in_cat = np.unique(DS['sketch_id']) unique_labels_in_cat = np.unique(DS['label']) ## initialize matrix that has the correct dimensions Label_Vec = np.zeros((len(unique_sketches_in_cat),len(unique_labels_in_cat)), dtype=int) unique_labels_in_cat= np.array(unique_labels_in_cat) for s,this_sketch in enumerate(unique_sketches_in_cat): label_vec = np.zeros(len(unique_labels_in_cat),dtype=int) DSS = DS[DS['sketch_id']==this_sketch] annotation_ids = np.unique(DSS['annotation_id'].values) for this_annotation in annotation_ids: DSA = DSS[DSS['annotation_id']==this_annotation] label_list = DSA.label.values for this_label in label_list: label_ind = unique_labels_in_cat==this_label label_vec[label_ind] += 1 Label_Vec[s,:]=label_vec/num_annots label_vect_dict[category]= Label_Vec # + valid_labels=[] valid_labels_dict={} for category in unique_cats: vect = label_vect_dict[category] thresh = 0 #print 'These are the labels that appear at least {} times:'.format(thresh) #print unique_labels[np.sum(Label_Vec,0)>thresh] unique_labels_in_cat = np.unique(D[D['category']==category]['label']) plot_labels= unique_labels_in_cat[np.sum(vect,0)>thresh] valid_labels_dict[category]=plot_labels valid_labels.append(plot_labels) prop_labels=[] for part in plot_labels: DS=D[D['category']==category] prop_labels.append(DS[DS['label']==part]['annotation_id'].nunique()/DS['annotation_id'].nunique()) sns.set_context('talk') plt.figure(figsize=(12,7)) plt.ylim(0,1) h = plt.bar(plot_labels,prop_labels) plt.title('Proportion of {} annotations with labels'.format(category)) plt.ylabel('proportion of annotations') plt.xlabel('Part') ##flattening valid labels valid_labels = [item for sublist in valid_labels for item in sublist] # - #Creating a spline-level df where the modal label is set as the 'true' label for any given spline spline_df= D.groupby('spline_id').agg(lambda x: Counter(x).most_common(1)[0][0]) spline_df.reset_index(level=0, inplace=True) # + ##Creating a stroke-level dataframe that takes the mode value of annotation for its children splines to set as its ##label value from collections import Counter from collections import OrderedDict stroke_svgs=OrderedDict() for category in unique_cats: DS=D[D['category']==category] for sketch in np.unique(DS['sketch_id']): DSS=DS[DS['sketch_id']==sketch] for stroke in np.unique(DSS['stroke_num']): DSA=DSS[DSS['stroke_num']==stroke] DSA=DSA.reset_index() stroke_svgs[DSA['stroke_id'][0]] = DSA['sketch_svg_string'][0][stroke] stroke_svg_df= pd.DataFrame.from_dict(stroke_svgs, orient='index') stroke_group_data= D.groupby('stroke_id').agg(lambda x: Counter(x).most_common(1)[0][0]) labels= pd.DataFrame(stroke_group_data[['sketch_id','label','stroke_num','condition','target','category','outcome']]) stroke_df=pd.merge(stroke_svg_df,labels,left_index=True, right_index =True) stroke_df.reset_index(level=0, inplace=True) stroke_df=stroke_df.rename(index=str, columns={"index": "stroke_id", 0: "svg"}) # - # + ##Adding total arclength information to stroke dataframe from svgpathtools import parse_path import svgpathtools def calculate_arclength(svg): try: arclength= parse_path(svg).length() except ZeroDivisionError: print 'zero div error' arclength = 0 return arclength # - stroke_df['arc_length'] = stroke_df['svg'].apply(calculate_arclength) # ## Creating feature vectors and normalizing # + ###This is where we make a num unique labels * 2 X number of sketches vector feature_vec = np.zeros((len(stroke_df.sketch_id.unique()),len(valid_labels)*2), dtype=int) ind=0 start_pos=0 end_pos=0 meta_list=[] cols = ['sketch_id','target','condition','category','outcome'] for cat in unique_cats: DS= stroke_df[stroke_df['category']==cat] unique_labels_in_cat=valid_labels_dict[cat] unique_sketches_in_cat=DS['sketch_id'].unique() start_pos = end_pos end_pos+= len(unique_labels_in_cat) print start_pos, end_pos clear_output(wait=True) Label_Vec = np.zeros((len(unique_sketches_in_cat),len(unique_labels_in_cat)*2), dtype=int) arc_length_vec = np.zeros((len(unique_sketches_in_cat),len(valid_labels_dict[cat])), dtype=int) for s,sketch in enumerate(unique_sketches_in_cat): label_vec = np.zeros(len(unique_labels_in_cat),dtype=int) arc_vec = np.zeros(len(unique_labels_in_cat),dtype=int) DSA=DS[DS['sketch_id']==sketch] meta_list.append(pd.Series([DSA['sketch_id'].unique(),DSA['target'].unique(),DSA['condition'].unique(),DSA['category'].unique(),DSA['outcome'].unique()], index=cols)) label_list = DSA.label.values for label in label_list: if label in unique_labels_in_cat: label_ind = unique_labels_in_cat==label label_vec[label_ind] += 1 for label in unique_labels_in_cat: DSB=DSA[DSA['label']==label] label_ind = unique_labels_in_cat==label arc_vec[label_ind] = DSB['arc_length'].sum() feature_vec[ind,start_pos:end_pos]=label_vec feature_vec[ind,start_pos+len(valid_labels):end_pos+len(valid_labels)]=arc_vec ind+=1 feature_vec_meta = pd.DataFrame(meta_list, columns=cols) # - # + ##Changing column values from np arrays to strings/boolean def arr_to_str(arr): return (arr[0]) feature_vec_meta['sketch_id']=feature_vec_meta['sketch_id'].apply(arr_to_str) feature_vec_meta['target']=feature_vec_meta['target'].apply(arr_to_str) feature_vec_meta['condition']=feature_vec_meta['condition'].apply(arr_to_str) feature_vec_meta['category']=feature_vec_meta['category'].apply(arr_to_str) feature_vec_meta['outcome']=feature_vec_meta['outcome'].apply(arr_to_str) # - feature_df= pd.DataFrame(feature_vec, columns=[s + '_numstrokes' for s in valid_labels]+[s + '_total_arclength' for s in valid_labels]) len(np.unique(valid_labels)) # + ##creating a compressed version of the feature df with no duplicates for parts labs_numstrokes=[] labs_total_arclength=[] for lab in np.unique(valid_labels): labs_numstrokes.append(lab +'_numstrokes') labs_total_arclength.append(lab+'_total_arclength') feature_df_labs=labs_numstrokes+labs_total_arclength feature_df_final= pd.DataFrame(columns=feature_df_labs) for this_lab in feature_df_labs: duplicates=[col for col in feature_df if col.startswith(this_lab)] feature_df_final[this_lab]= feature_df[duplicates].sum(axis=1) feature_df = feature_df_final # - ##Check to make sure the df looks okay assert len(feature_df.columns)==len(np.unique(feature_df.columns)) feature_df.head() ## Save out raw feature matrix prior to normalization within feature_df.to_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed_rawcounts.csv'), index=False) ## Normalizing within sketch, within measure (numstrokes/arclength) ## Note: assumes that we are using two measures and that they account for exactly half of the columns in feature_df feature_df.iloc[:,0:int(len(feature_df.columns)/2)]=feature_df.iloc[:,0:int(len(feature_df.columns)/2)].div(feature_df.iloc[:,0:int(len(feature_df.columns)/2)].sum(axis=1),axis=0) feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))]=feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))].div(feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))].sum(axis=1),axis=0) ### Execute this if we want to save a non-zscore matrix run=True if run==True: feature_df.to_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed_normalized.csv'), index=False) run=False # + #z-scoring within columns columns=list(feature_df.columns) for this_col in columns: feature_df[this_col]=(feature_df[this_col] - feature_df[this_col].mean())/feature_df[this_col].std(ddof=0) # - # ### Saving out files as needed feature_df.to_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed_normalized_whitened.csv'), index=False) np.save(os.path.join(features_dir, 'semantic_parts_sketch_features'),feature_vec) feature_vec_meta.to_csv(os.path.join(features_dir,'semantic_parts_sketch_meta.csv'), index=False) def cleanup_df(X): if 'Unnamed: 0' in X.columns: X = X.drop(columns=['Unnamed: 0']) return X a= cleanup_df(pd.read_pickle(os.path.join(csv_dir,'rawpckl'))) a.assignmentID.nunique() a # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Problem 16 # ## Power digit sum # # $2^{15} = 32768$ and the sum of its digits is $3 + 2 + 7 + 6 + 8 = 26$. What is the sum of the digits of the number $2^{1000}$? # # ## Solution # We'll use [Karatsuba algorithm](https://en.wikipedia.org/wiki/Karatsuba_algorithm) for multiplication and [exponentiation by squaring method](https://en.wikipedia.org/wiki/Exponentiation_by_squaring). # + pycharm={"name": "#%%\n"} from euler.big_int import BigInt from euler.numbers import digits_sum # + pycharm={"name": "#%%\n"} def compute(n: int, p: int) -> int: return digits_sum(BigInt(n) ** p) # + pycharm={"name": "#%%\n"} compute(2, 15) # + pycharm={"name": "#%%\n"} compute(2, 1_000) # + pycharm={"name": "#%%\n"} # %timeit -n 100 -r 1 -p 6 compute(2, 1_000) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SpamHam Text Classification # Imports import os from collections import Counter import pandas as pd import numpy as np import random import matplotlib.pyplot as plt import seaborn as sns # ## Making the data sets # I've copied all spam and ham emails in singel directory 'emails', now making the data. def make_data(): direc = 'emails/' files = os.listdir(direc) emails = np.array([direc+email for email in files]) random.shuffle(emails) email_col = np.array([[]]) label = np.array([[]]) c = len(emails) for email in emails: f = open(email, encoding='utf8', errors='ignore') blob = f.read() email_col = np.append(email_col, blob) if 'ham' in email: label = np.append(label, 'ham') if 'spam' in email: label = np.append(label, 'spam') c-=1 print(c) return email_col, label emails, labels = make_data() emails.shape, labels.shape # making them dimension ready emails = emails[:, np.newaxis] labels = labels[:, np.newaxis] emails.shape, labels.shape df = pd.DataFrame({'message': emails.flatten(), 'labels':labels.flatten()}, index=np.arange(5172)) df.head() # preprocess email data # + from bs4 import BeautifulSoup from nltk import word_tokenize, WordNetLemmatizer lemmatizer = WordNetLemmatizer() def purify_text(message): soup = BeautifulSoup(message) text = soup.get_text() text = text.replace("\n", " ").replace("/", "").replace("|", "").replace("http", "").replace(':', "").replace('\t', "").replace("Subject", "").replace('re', "").strip() tokens = word_tokenize(text) temp = [lemmatizer.lemmatize(word.lower()) for word in tokens] return ' '.join(temp) df['message'] = df['message'].apply(purify_text) # - df.head() # Adding sms spam data to this dataset. df_sms = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t', names=['labels', 'message']) df_sms.head() df_sms['message'] = df_sms['message'].apply(purify_text) df_sms.head() # Merging the two datasets df_final = df.merge(df_sms, how='outer') df_final.head() df_final.tail() df_final = df_final.sample(frac=1).reset_index(drop=True) df_final.tail() df = df_final.copy() # ## Visualizing the data sns.countplot(df['labels']) df['length'] = df['message'].apply(len) sns.distplot(df['length']) df[df['length']>2500]['labels'].value_counts() #.count() for index in df[df['length']>2500].index.tolist(): df.drop(index=index, axis=1, inplace=True) df['length'] = df['message'].apply(len) sns.distplot(df['length']) df.hist('length', by='labels', figsize=(15, 10), bins=60) # ## Applying NLP; BOW, TF-IDF import string from nltk.corpus import stopwords def text_process(message): noPunc = [char for char in message if char not in string.punctuation] noPunc = ''.join(noPunc) return [word for word in noPunc.split() if word not in stopwords.words('english')] df['message'] = df['message'].apply(text_process) df['message'] = df['message'].apply(lambda x: ' '.join(x)) df['message'].head() from sklearn.feature_extraction.text import CountVectorizer bow_transformer = CountVectorizer().fit(df['message']) print(len(bow_transformer.vocabulary_)) message_bow = bow_transformer.transform(df['message']) sparsity = (100.0 * message_bow.nnz/(message_bow.shape[0] * message_bow.shape[1])) print('sparsity {}'.format(sparsity)) from sklearn.feature_extraction.text import TfidfTransformer tfidf_transformer_bow = TfidfTransformer().fit(message_bow) message_tfidf = tfidf_transformer_bow.transform(message_bow) # + from sklearn.naive_bayes import MultinomialNB def buildClassifier(alpha): text_classifier = MultinomialNB(alpha=alpha) return text_classifier # - # ## Making the pipeline, making it production ready! from sklearn.pipeline import Pipeline pipeline = Pipeline([('vec', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB(alpha=0.01))]) # Grid Search for best parameters. from sklearn.model_selection import GridSearchCV parameters = {'clf__alpha': (0.00001, 0.0001, 0.001, 0.01, 0.1, 0.8, 0.9, 1)} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='accuracy', cv=10, verbose=1) grid_search.fit(df['message'], df['labels']) best_parameters = grid_search.best_params_ best_accuracy = grid_search.best_score_ best_parameters best_accuracy pipeline = Pipeline([('vec', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB(alpha=0.1))]) pipeline.fit(df['message'], df['labels']) input_word = ['Download this app for free music'] input_word = text_process(input_word) input_word = [' '.join(input_word)] pipeline.predict(input_word) # # Saving the Pipeline to be integrated in our web-app import pickle filename = 'textClf.pkl' pickle.dump(pipeline, open(filename, 'wb')) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ucair] # language: python # name: conda-env-ucair-py # --- # ### Cardiac MRI View Prediction # # The following notebook uses a trained neural network to predict the MRI view for each series in a directory of dicom files. # First, import the necessary packages to run the analysis. The required libraries are pydicom, pandas, and tensorflow. The original analysis was run using python 3.6.8, pydicom 1.2.2, and tensorflow 2.4.1. # + import os import sys import pydicom # pydicom is using the gdcm package for decompression from multiprocessing import Pool import numpy as np import time import pandas as pd from tqdm import tqdm import tensorflow as tf print('Python: {}'.format(sys.version)) print('Pydicom: {}'.format(pydicom.__version__)) print('TensorFlow: {}'.format(tf.__version__)) # - # Tensorflow can be run with or without GPU support. If GPU support through tensorflow is enabled and available, the following code will display and available GPU. device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # If no GPU is found, or if you simply want to run the analysis using only CPU, we can configure tensorflow to only use CPU using the following code: # Only use CPU for right now - Set CPU as only available physical device my_devices = tf.config.experimental.list_physical_devices(device_type='CPU') tf.config.experimental.set_visible_devices(devices= my_devices, device_type='CPU') # The analysis can be run over a single directory of dicom images (for one or more patients) or over a directory containing multiple subdirectories. Due to memory constraints, skip ahead to section 2.0 if your directory contains multiple patients. The code in section 2.0 runs the analysis iteratively over each subdirectory. Alternatively, if you only have one patient or a small dataset, you can run the complete analysis by running all of the code in section 1.0. # # The suggested directory structures for each approach are shown below: # #### Approach 1 (Section 1.0) # # Run the analysis over all files in a directory and subdirectories (may run into memory issues if dataset is large). # # ```bash # ├── DATA # └── Patient1 # ├── 1.dcm # ├── 2.dcm # ├── 3.dcm # └── ... # ``` # # #### Approach 2 (Section 2.0) # # Run the analysis over each patient in a directory individually (i.e., analysis for patient 1, then analysis for patient 2.) # # ```bash # ├── DATA # ├── Patient1 # │ ├── 1.dcm # │ ├── 2.dcm # │ ├── 3.dcm # │ └── ... # └── Patient2 # ├── 1.dcm # ├── 2.dcm # ├── 3.dcm # └── ... # ``` # ### Section 1.0 - Small Dataset and/or Single Patient # # Use the following code to run the analysis over all the files in a single directory. If you wish to run the analysis over each patient in a directory (recommended due to memory constraints), skip ahead to section 2.0. # #### Parameters # # Define the desired parameters for the code in the cell below. # + # PARAMETERS for the analysis src = "../data/example/CHD10553/" # PATH to the directory containing the desired DICOM files (str) dst = "../data/processed/sorted/" # PATH to the output directory to save dicom files (only valid if save_dicoms = True) (str) modelname = 'ResNet50' # The neural network to load and used (Options: VGG19, ResNet50, or Xception) modelpath = '../models/' # PATH to the saved models (str) use_multiprocessing = False # Use multiprocessing to read header info (True or False) # parameters for postprocessing/saving csv_path = '../reports/EXAMPLE_series_predictions.csv' # PATH to save the generated csv file (only valide if create_csv = True) (str) create_csv = True # Save a .csv file with the series level view predictions (True or False) save_files = True # Save dicom files to new directory (dst) (True or False) save_only_desired = True # Save only dicom files corresponding to desired views (True or False) confidence_value = 0.9 # Only save series if the confidence is > a certain value (set to 0 to save all desired series, regardless of confidence) (float 0-1.0) # - # The following cells are used to define the possible MRI view classes (n=7) and the views that are desired for cardiac modeling. # define possible class predictions classLabels = ['SA', '4CH', '2CH RT', 'RVOT', 'OTHER', '2CH LT', 'LVOT'] classes = sorted(classLabels, key = str) print(classes) # define the series that are needed/desired for cardiac modeling desired_series = ['4CH', 'SA', '2CH RT', '2CH LT', 'LVOT', 'RVOT'] # #### Import and Load Model # # The following code loads the saved neural network that will be used for view prediction: # + # load the appropriate model if modelname == 'ResNet50': MODELPATH = os.path.join(modelpath, 'ResNet50/resnet50.h5py') model = tf.keras.models.load_model(MODELPATH) print(model.summary()) elif modelname == 'VGG19': MODELPATH = os.path.join(modelpath, 'VGG19/vgg19.h5py') model = tf.keras.models.load_model(MODELPATH) print(model.summary()) elif modelname == 'Xception': MODELPATH = os.path.join(modelpath, 'XCEPTION/xception.h5py') model = tf.keras.models.load_model(MODELPATH) print(model.summary()) else: print('Uknown model specified in parameters!') # - # #### Define Necessary Functions # # Before we get started, we define a few useful functions that we will use throughout the analysis. # + def clean_text(string): # clean and standardize text descriptions, which makes searching files easier forbidden_symbols = ["*", ".", ",", "\"", "\\", "/", "|", "[", "]", ":", ";", " "] for symbol in forbidden_symbols: string = string.replace(symbol, "_") # replace everything with an underscore return string.lower() def preprocess(img): # format image into tensor, standardized to 0-255 img = tf.cast(img, tf.float32) img = tf.image.resize(tf.expand_dims(img, 2), (224,224)) img = tf.image.grayscale_to_rgb(img) # standardize img = img / np.max(img) img = img * 255. return img def predict_view(img, model=model): # make prediction on a single image pred = model.predict(tf.expand_dims(img, axis=0)) pred = tf.argmax(pred, axis=-1) pred_view = classes[int(pred)] return pred_view def batch_predict(batch, model=model): # make prediction on a batch of images pred = model.predict(batch) pred = tf.argmax(pred, axis=-1) pred_view = [classes[int(x)] for x in pred] return pred_view def get_dicom_header(dicom_loc): # read dicom file and return header information and image ds = pydicom.read_file(dicom_loc, force=True) # get patient, study, and series information patientID = clean_text(ds.get("PatientID", "NA")) studyDescription = clean_text(ds.get("StudyDescription", "NA")) seriesDescription = clean_text(ds.get("SeriesDescription", "NA")) # generate new, standardized file name modality = ds.get("Modality","NA") studyInstanceUID = ds.get("StudyInstanceUID","NA") seriesInstanceUID = ds.get("SeriesInstanceUID","NA") seriesNumber = ds.get('SeriesNumber', 'NA') instanceNumber = str(ds.get("InstanceNumber","0")) # load image data array = ds.pixel_array return patientID, dicom_loc, modality, seriesInstanceUID, seriesNumber, instanceNumber, array, seriesDescription # - # #### Read DICOM Headers # # Before the neural network can be used to predict the MRI view, we load the necessary information from the dicom headers, such as patient IDs, series, study, modality, instance, and the corresponding raw images. # + print('Reading file list...') unsortedList = [] for root, dirs, files in os.walk(src): for file in files: if ".dcm" in file: # exclude non-dicoms, good for messy folders unsortedList.append(os.path.join(root, file)) print('%s files found.' % len(unsortedList)) if use_multiprocessing: with Pool(os.cpu_count()) as p: output = p.map(get_dicom_header, [dicom_loc for dicom_loc in unsortedList]) print('Done!') else: output = [] for dicom_loc in tqdm(unsortedList): output.append(get_dicom_header(dicom_loc)) # - # generated pandas dataframe to store information from headers df = pd.DataFrame(sorted(output), columns = ['Patient ID', 'Filename', 'Modality', 'Series ID', 'Series Number', 'Instance Number', 'Img', 'Series Description']) df.head().transpose() # #### Make Predictions for Each Series # # Now that we have the header info and images, we can make predictions for each series. The following code iterates over each series and makes predictions in batches. # # The code generates a confidence level, which ranges from 0-1.0. This value is calculated for each series by dividing the count of the most frequent prediction by the total number of predictions. For example, if a 30 frame series has 29 correct predictions of '4CH', but one incorrect prediction of 'OTHER', the confidence would be 0.97. # + output_series = [] # make predictions and calculate confidence values for series in tqdm(set(df['Series ID'])): new = df[df['Series ID'] == series] dataset = tf.data.Dataset.from_tensor_slices([preprocess(x) for x in new['Img'].values]) dataset = (dataset .batch(16) .prefetch(tf.data.experimental.AUTOTUNE)) # record info for this series patient_id = new['Patient ID'].iloc[0] series_num = new['Series Number'].iloc[0] series_desc = new['Series Description'].iloc[0] frames = len(new) # make predictions over images views = batch_predict(dataset, model) # find unique predictions and confidence for that series u, count = np.unique(views, return_counts=True) count_sort_ind = np.argsort(-count) pred = u[count_sort_ind][0] conf = np.round(np.max(count) / np.sum(count), 2) output_series.append([patient_id.upper(), series, series_num, frames, series_desc, pred, conf]) output_series_df = pd.DataFrame(output_series, columns=['Patient ID', 'Series ID', 'Series Number', 'Frames', 'Series Description', 'Predicted View', 'Confidence']) output_series_df # - # #### Save Predictions # # Now that the predictions have been made, we can save the pandas dataframe containing the predicted view for each series to a .csv file and the dicom files of the desired views for cardiac modeling to a new directory, depending on the parameters that were specified above. # + if create_csv: print('Saving .csv file with series predictions and info') if os.path.exists(csv_path): output_series_df.to_csv(csv_path, mode='a', header=False, index=False) else: output_series_df.to_csv(csv_path, mode='a', index=False) print('Done!') if save_files: print('Saving dicom files to new folder...') for series in tqdm(output_series_df['Series ID']): new = df[df['Series ID'] == series] series_df = output_series_df[output_series_df['Series ID'] == series] predView = series_df['Predicted View'].values[0] if save_only_desired: if predView in desired_series and series_df['Confidence'].values > confidence_value: for i, row in new.iterrows(): fileName = row['Modality'] + '.' + row['Series ID'] + '.' + row['Instance Number'] + '.dcm' patientID = row['Patient ID'].upper() dicom = pydicom.dcmread(row['Filename']) # save files to a 2-tier nested folder structure if not os.path.exists(os.path.join(dst, patientID)): os.makedirs(os.path.join(dst, patientID)) if not os.path.exists(os.path.join(dst, patientID, predView)): os.makedirs(os.path.join(dst, patientID, predView)) dicom.save_as(os.path.join(dst, patientID, predView, fileName)) else: pass else: for i, row in new.iterrows(): fileName = row['Modality'] + '.' + row['Series ID'] + '.' + row['Instance Number'] + '.dcm' patientID = row['Patient ID'].upper() dicom = pydicom.dcmread(row['Filename']) # save files to a 2-tier nested folder structure if not os.path.exists(os.path.join(dst, patientID)): os.makedirs(os.path.join(dst, patientID)) if not os.path.exists(os.path.join(dst, patientID, predView)): os.makedirs(os.path.join(dst, patientID, predView)) dicom.save_as(os.path.join(dst, patientID, predView, fileName)) # - # ### Section 2.0 - Directory with Multiple Patients or Series # # Use the following code to run the analysis over each patient and/or series in a directory individually. # + # PARAMETERS for the analysis src = "../data/raw/example/" # PATH to the directory containing the desired DICOM files (str) dst = "../data/processed/sorted/" # PATH to the output directory to save dicom files (only valid if save_dicoms = True) (str) modelname = 'ResNet50' # The neural network to load and used (Options: VGG19, ResNet50, or Xception) modelpath = '../models/' # PATH to the saved models (str) use_multiprocessing = False # Use multiprocessing to read header info (True or False) # parameters for postprocessing/saving csv_path = '../reports/resnet_series_predictions.csv' # PATH to save the generated csv file (only valide if create_csv = True) (str) create_csv = True # Save a .csv file with the series level view predictions (True or False) save_files = True # Save dicom files to new directory (dst) (True or False) save_only_desired = True # Save only dicom files corresponding to desired views (True or False) confidence_value = 0.9 # Only save series if the confidence is > a certain value (set to 0 to save all desired series, regardless of confidence) (float 0-1.0) # + def clean_text(string): # clean and standardize text descriptions, which makes searching files easier forbidden_symbols = ["*", ".", ",", "\"", "\\", "/", "|", "[", "]", ":", ";", " "] for symbol in forbidden_symbols: string = string.replace(symbol, "_") # replace everything with an underscore return string.lower() def preprocess(img): # format image into tensor, standardized to 0-255 img = tf.cast(img, tf.float32) img = tf.image.resize(tf.expand_dims(img, 2), (224,224)) img = tf.image.grayscale_to_rgb(img) # standardize img = img / np.max(img) img = img * 255. return img def predict_view(img, model, classes): # make prediction on a single image pred = model.predict(tf.expand_dims(img, axis=0)) pred = tf.argmax(pred, axis=-1) pred_view = classes[int(pred)] return pred_view def batch_predict(batch, model, classes): # make prediction on a batch of images pred = model.predict(batch) pred = tf.argmax(pred, axis=-1) pred_view = [classes[int(x)] for x in pred] return pred_view def get_dicom_header(dicom_loc): # read dicom file and return header information and image ds = pydicom.read_file(dicom_loc, force=True) # get patient, study, and series information patientID = clean_text(ds.get("PatientID", "NA")) studyDescription = clean_text(ds.get("StudyDescription", "NA")) seriesDescription = clean_text(ds.get("SeriesDescription", "NA")) # generate new, standardized file name modality = ds.get("Modality","NA") studyInstanceUID = ds.get("StudyInstanceUID","NA") seriesInstanceUID = ds.get("SeriesInstanceUID","NA") seriesNumber = ds.get('SeriesNumber', 'NA') instanceNumber = str(ds.get("InstanceNumber","0")) # load image data array = ds.pixel_array return patientID, dicom_loc, modality, seriesInstanceUID, seriesNumber, instanceNumber, array, seriesDescription # - def complete_view_prediction(directory, dst, model, csv_path, create_csv, use_multiprocessing, save_files, save_only_desired, confidence_value): # Runs the complete view prediction over the dicom files in a directory # define possible class predictions classLabels = ['SA', '4CH', '2CH RT', 'RVOT', 'OTHER', '2CH LT', 'LVOT'] classes = sorted(classLabels, key = str) # define the series that are needed/desired for cardiac modeling desired_series = ['4CH', 'SA', '2CH RT', '2CH LT', 'LVOT', 'RVOT'] unsortedList = [] for root, dirs, files in os.walk(directory): for file in files: if ".dcm" in file: # exclude non-dicoms, good for messy folders unsortedList.append(os.path.join(root, file)) #print('%s files found.' % len(unsortedList)) if use_multiprocessing: with Pool(os.cpu_count()) as p: output = p.map(get_dicom_header, [dicom_loc for dicom_loc in unsortedList]) else: output = [] for dicom_loc in tqdm(unsortedList): output.append(get_dicom_header(dicom_loc)) # generated pandas dataframe to store information from headers df = pd.DataFrame(sorted(output), columns = ['Patient ID', 'Filename', 'Modality', 'Series ID', 'Series Number', 'Instance Number', 'Img', 'Series Description']) output_series = [] # make predictions and calculate confidence values for series in set(df['Series ID']): new = df[df['Series ID'] == series] dataset = tf.data.Dataset.from_tensor_slices([preprocess(x) for x in new['Img'].values]) dataset = (dataset .batch(16) .prefetch(tf.data.experimental.AUTOTUNE)) # record info for this series patient_id = new['Patient ID'].iloc[0] series_num = new['Series Number'].iloc[0] series_desc = new['Series Description'].iloc[0] frames = len(new) # make predictions over images views = batch_predict(dataset, model, classes) # find unique predictions and confidence for that series u, count = np.unique(views, return_counts=True) count_sort_ind = np.argsort(-count) pred = u[count_sort_ind][0] conf = np.round(np.max(count) / np.sum(count), 2) output_series.append([patient_id.upper(), series, series_num, frames, series_desc, pred, conf]) output_series_df = pd.DataFrame(output_series, columns=['Patient ID', 'Series ID', 'Series Number', 'Frames', 'Series Description', 'Predicted View', 'Confidence']) if create_csv: #print('Saving .csv file with series predictions and info') if os.path.exists(csv_path): output_series_df.to_csv(csv_path, mode='a', header=False, index=False) else: output_series_df.to_csv(csv_path, mode='a', index=False) #print('Done!') if save_files: #print('Saving dicom files to new folder...') for series in output_series_df['Series ID']: new = df[df['Series ID'] == series] series_df = output_series_df[output_series_df['Series ID'] == series] predView = series_df['Predicted View'].values[0] patientID = series_df['Patient ID'].values[0].upper() if save_only_desired: if predView in desired_series and series_df['Confidence'].values > confidence_value: for i, row in new.iterrows(): fileName = row['Modality'] + '.' + row['Series ID'] + '.' + row['Instance Number'] + '.dcm' dicom = pydicom.dcmread(row['Filename']) # save files to a 2-tier nested folder structure if not os.path.exists(os.path.join(dst, patientID)): os.makedirs(os.path.join(dst, patientID)) if not os.path.exists(os.path.join(dst, patientID, predView)): os.makedirs(os.path.join(dst, patientID, predView)) dicom.save_as(os.path.join(dst, patientID, predView, fileName)) else: pass else: for i, row in new.iterrows(): fileName = row['Modality'] + '.' + row['Series ID'] + '.' + row['Instance Number'] + '.dcm' patientID = row['Patient ID'].upper() dicom = pydicom.dcmread(row['Filename']) # save files to a 2-tier nested folder structure if not os.path.exists(os.path.join(dst, patientID)): os.makedirs(os.path.join(dst, patientID)) if not os.path.exists(os.path.join(dst, patientID, predView)): os.makedirs(os.path.join(dst, patientID, predView)) dicom.save_as(os.path.join(dst, patientID, predView, fileName)) # Run the complete view prediction for each series, in each subdirectory using the code below. # + subdirectories = next(os.walk(src))[1] print('Discovered {} subdirectories in source folder'.format(len(subdirectories))) # load appropriate model if modelname == 'ResNet50': MODELPATH = os.path.join(modelpath, 'Resnet/082621_resnet.hdf5') model = tf.keras.models.load_model(MODELPATH) #print(model.summary()) elif modelname == 'VGG19': MODELPATH = os.path.join(modelpath, 'VGG19/vgg19.hdf5') model = tf.keras.models.load_model(MODELPATH) #print(model.summary()) elif modelname == 'Xception': MODELPATH = os.path.join(modelpath, 'XCEPTION/xception.hdf5') model = tf.keras.models.load_model(MODELPATH) #print(model.summary()) else: print('Uknown model specified in parameters!') for subdir in tqdm(subdirectories): complete_view_prediction(os.path.join(src, subdir), dst=dst, model=model, csv_path=csv_path, create_csv=create_csv, use_multiprocessing=use_multiprocessing, save_files=save_files, save_only_desired=save_only_desired, confidence_value=confidence_value) # - reset() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python37364bitbaseconda7bdb38472b8447b1a3c551411def93ed # --- import pandas as pd from matplotlib import pyplot as plt import numpy as np import datetime from math import * # %matplotlib inline plt.style.use('ggplot') plt.rcParams["figure.figsize"] = [16,9] train_data = pd.read_csv('nyc-taxi-trip-duration/train.csv') test_data = pd.read_csv('nyc-taxi-trip-duration/test.csv') """osmr_1 = pd.read_csv('new-york-city-taxi-with-osrm/fastest_routes_train_part_1.csv', usecols=['id', 'total_distance', 'total_travel_time', 'number_of_steps']) osmr_2 = pd.read_csv('new-york-city-taxi-with-osrm/fastest_routes_train_part_2.csv', usecols=['id', 'total_distance', 'total_travel_time', 'number_of_steps'])""" # We first add new features that seems to be intersting in our modelisation problem. We define a direction features based on initial and final destination using geospatial formula. # + def haversine_array(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2)) AVG_EARTH_RADIUS = 6371 # in km lat = lat2 - lat1 lng = lng2 - lng1 d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2 h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d)) return h def dummy_manhattan_distance(lat1, lng1, lat2, lng2): a = haversine_array(lat1, lng1, lat1, lng2) b = haversine_array(lat1, lng1, lat2, lng1) return a + b def bearing_array(lat1, lng1, lat2, lng2): AVG_EARTH_RADIUS = 6371 # in km lng_delta_rad = np.radians(lng2 - lng1) lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2)) y = np.sin(lng_delta_rad) * np.cos(lat2) x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad) return np.degrees(np.arctan2(y, x)) jfk_coord = pd.DataFrame(np.array([[-73.778889,40.639722]]),columns=['lon','lat']) la_guardia_coord = pd.DataFrame(np.array([[-73.872611,40.77725]]),columns=['lon','lat']) # + train_data.loc[:, 'direction'] = bearing_array(train_data['pickup_latitude'].values, train_data['pickup_longitude'].values, train_data['dropoff_latitude'].values, train_data['dropoff_longitude'].values) train_data.loc[:, 'distance_haversine'] = haversine_array(train_data['pickup_latitude'].values, train_data['pickup_longitude'].values, train_data['dropoff_latitude'].values, train_data['dropoff_longitude'].values) train_data.loc[:, 'distance_dummy_manhattan'] = dummy_manhattan_distance(train_data['pickup_latitude'].values, train_data['pickup_longitude'].values, train_data['dropoff_latitude'].values, train_data['dropoff_longitude'].values) train_data['pickup_datetime'] = pd.to_datetime(train_data['pickup_datetime']) train_data['dropoff_datetime'] = pd.to_datetime(train_data['dropoff_datetime']) train_data['pickup_year'] = train_data['pickup_datetime'].dt.year train_data['pickup_month'] = train_data['pickup_datetime'].dt.month train_data['pickup_day'] = train_data['pickup_datetime'].dt.day train_data['pickup_hour'] = train_data['pickup_datetime'].dt.hour train_data['pickup_minute'] = train_data['pickup_datetime'].dt.minute train_data['pickup_weekday'] = train_data['pickup_datetime'].dt.weekday train_data['jfk_dist_pick'] = haversine_array(train_data['pickup_latitude'].values, train_data['pickup_longitude'].values, jfk_coord['lat'].values, jfk_coord['lon'].values) train_data['jfk_dist_drop'] = haversine_array(train_data['dropoff_latitude'].values, train_data['dropoff_longitude'].values, jfk_coord['lat'].values, jfk_coord['lon'].values) train_data['lg_dist_pick'] = haversine_array(train_data['pickup_latitude'].values, train_data['pickup_longitude'].values, la_guardia_coord['lat'].values, la_guardia_coord['lon'].values) train_data['lg_dist_drop'] = haversine_array(train_data['dropoff_latitude'].values, train_data['dropoff_longitude'].values, la_guardia_coord['lat'].values, la_guardia_coord['lon'].values) train_data['jfk_trip'] = (train_data['jfk_dist_pick'] < 2e3) | (train_data['jfk_dist_drop'] < 2e3) train_data['jfk_trip'].astype(int) train_data['lg_trip'] = (train_data['lg_dist_pick'] < 2e3) | (train_data['lg_dist_drop'] < 2e3) train_data['lg_trip'].astype(int) train_data['work'] = (train_data['pickup_hour'].isin(range(8,18))) & (train_data['pickup_weekday'].isin([0,1,2,3,4])) train_data['work'].astype(int) train_data['date'] = train_data['pickup_datetime'].dt.date train_data['speed'] = train_data['distance_haversine']/(train_data['trip_duration']/3600) # - train_data.head() train_data.shape train_data = train_data[train_data['trip_duration'] < (22*3600)] #isclose = isclose(train_data['distance_haversine'].values,0) train_data = train_data[(train_data['distance_haversine']>0) | (train_data['trip_duration']<60)] train_data = train_data[(train_data['jfk_dist_pick'] < 3e5) & (train_data['jfk_dist_drop'] < 3e5)] train_data = train_data[train_data['trip_duration'] > 10] train_data = train_data[train_data['speed'] < 100] weather = pd.read_csv('weather/weather_data_nyc_centralpark_2016.csv') weather.head() # + weather['date'] = weather['date'].apply(lambda x : datetime.datetime.strptime(x, '%d-%m-%Y')) def Tto(x): if x == 'T': return(0.01) else: return(x) weather['rain'] = weather['precipitation'].apply(lambda x : Tto(x)).astype(float) weather['s_fall'] = weather['snow fall'].apply(lambda x : Tto(x)).astype(float) weather['s_depth'] = weather['snow depth'].apply(lambda x : Tto(x)).astype(float) weather['all_precip'] = (weather['rain'].values + weather['s_fall'].values) weather['has_snow'] = ((weather['s_fall'] > 0) | (weather['s_depth'] >0)) weather['has_rain'] = (weather['rain'] > 0) weather.head() # + weather['date'] = pd.to_datetime(weather['date']) train_data['date'] = pd.to_datetime(train_data['date']) train_data = pd.merge(train_data, weather[['date','rain', 's_fall', 'all_precip', 'has_snow', 'has_rain','s_depth', 'minimum temperature','maximum temperature']], on = 'date', how = 'left') # - print(train_data.shape) osmr_1 = pd.read_csv('new-york-city-taxi-with-osrm/fastest_routes_train_part_1.csv', usecols=['id', 'total_distance', 'total_travel_time','number_of_steps', 'step_direction', 'step_maneuvers']) osmr_2 = pd.read_csv('new-york-city-taxi-with-osrm/fastest_routes_train_part_2.csv', usecols=['id', 'total_distance', 'total_travel_time','number_of_steps', 'step_direction', 'step_maneuvers']) osmr_3 = pd.read_csv('new-york-city-taxi-with-osrm/second_fastest_routes_test.csv', usecols=['id', 'total_distance', 'total_travel_time','number_of_steps', 'step_direction', 'step_maneuvers']) fastest_routes = pd.concat([osmr_1,osmr_2,osmr_3]) temp = fastest_routes temp['fastest_speed'] = (temp['total_distance']/(temp['total_travel_time']+1)*3.6) temp['left_turns'] = temp['step_direction'].str.count('left') temp['right_turns'] = temp['step_direction'].str.count('right') temp['turns'] = temp['step_maneuvers'].str.count('turn') temp = temp.drop(columns=['step_direction','step_maneuvers']) print((temp.info)) # + train_data = pd.merge(train_data, temp, on = 'id', how = 'left') # - train_data.dtypes # + def blizzardTest(x): return(not((x.strftime('%Y-%m-%d')< '2016-01-22' ) | ( x.strftime('%Y-%m-%d') > '2016-01-29' ))) train_data['blizzard'] = train_data['date'].apply(lambda x : blizzardTest(x)).astype(int) train_data['passenger_count'] = train_data['passenger_count'].astype(int) train_data['vendor_id'] = train_data['vendor_id'].astype(int) #train_data['store_and_fwd_flag'] = train_data['store_and_fwd_flag'].astype(int) train_data['jfk_trip'] = train_data['jfk_trip'].astype(int) train_data['lg_trip'] = train_data['lg_trip'].astype(int) train_data['work'] = train_data['work'].astype(int) train_data['has_snow'] = train_data['has_snow'].astype(int) train_data['has_rain'] = train_data['has_rain'].astype(int) train_data = train_data.drop(columns=['pickup_year','pickup_minute']) # - foo1 = train_data.drop(columns=['id','pickup_datetime','dropoff_datetime','jfk_dist_pick','jfk_dist_drop', 'lg_dist_pick','lg_dist_drop','date']) train_data.shape import seaborn as sns corr = train_data.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) ax.set_xticklabels( ax.get_xticklabels(), rotation=45, horizontalalignment='right' ); # + foo = train_data.drop(columns=['store_and_fwd_flag', 'pickup_hour', 'rain', 's_fall', 'all_precip', 'has_rain', 's_depth', 'minimum temperature', 'maximum temperature', 'pickup_weekday', 'right_turns', 'turns']) corr = foo.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) ax.set_xticklabels( ax.get_xticklabels(), rotation=45, horizontalalignment='right' ); # + data = train_data.drop(columns = ['dropoff_datetime','id','pickup_datetime', 'store_and_fwd_flag','date','speed']) # - data.to_csv('preprocessed_train_data.csv') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##### ROBDD Apply # #### class Apply # - class Apply inherits from the ROBDD. # # `class Apply_ROBDD(ROBDD)` # # #### function Apply # - This recursive function builds a new table, taking an operation op as input with two different ROBDDS r1,r2 # # `ROBDD<-apply(self,op,r1,r2)` from ROBDD import ROBDD,Apply_ROBDD import time # Quick and Easy print Table Function for ROBDDs def printROBDD(ROBDD): print('============================') print('| u | i | l | h |') print('============================') n = ROBDD.nNodes for idx in range(0,n): node = ROBDD.T[idx] print(' '+str(idx)+' '+str(node[0])+' '+str(node[1])+' '+str(node[2])+' ') print('---------------------------') # #### Test Case 1 - Simple Test Case # # Create a ROBDDs with 3 variables and the following equations:- # # ROBDD1 = `or(equiv(x0,x1),x2)` # # ROBDD2 = `equiv(and(x0,x1),x2)` # # operation = `and` ROBDD1 = ROBDD(3,0) ROBDD2 = ROBDD(3,1) start = time.clock() ROBDD_Applied = Apply_ROBDD(3) ROBDD_Applied.apply('and',ROBDD1,ROBDD2) time.clock() - start printROBDD(ROBDD_Applied) # #### Test Case 2 - Increasing number of variables and testing with different number of variables # # Create ROBDDs with different number of variables # # ROBDD1 = `and(or(equiv(x0,x1),x2),x3)` # # ROBDD2 = `equiv(and(x0,x1),x2)` # # operation = `or` ROBDD1 = ROBDD(4,0) ROBDD2 = ROBDD(3,1) start = time.clock() ROBDD_Applied = Apply_ROBDD(4) ROBDD_Applied.apply('and',ROBDD1,ROBDD2) time.clock() - start printROBDD(ROBDD_Applied) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Can we use the velocity of the binary orbit to move the m dwarf lya out of the airglow? # + import numpy as np import matplotlib.pyplot as plt import astropy.io.fits as fits import os import glob from astropy.table import Table from astropy.io import ascii import astropy.units as u import astropy.constants as const from astropy.convolution import convolve, Box1DKernel import scipy.interpolate as interpolate #matplotlib set up # %matplotlib inline from matplotlib import rcParams rcParams["figure.figsize"] = (14, 5) rcParams["font.size"] = 20 # - path = '/media/david/5tb_storage1/v471_tau/e140m/' x1ds = glob.glob(path+'*x1d.fits') """for x in x1ds: data = fits.getdata(x, 1) for dt in data: plt.plot(dt['WAVELENGTH'], dt['FLUX']) #plt.xlim(1210, 1220) plt.ylim(0, 3e-12)""" # + # import stisblazefix # stisblazefix.fluxfix(x1ds, pdfname='blazefix') # - """x1fs = glob.glob(path+'*x1f.fits') for x in x1fs: data = fits.getdata(x, 1) for dt in data: plt.plot(dt['WAVELENGTH'], dt['FLUX']) #plt.xlim(1210, 1220) plt.ylim(0, 3e-12)""" # + def spectra_adder(f_array, e_array): """ Returns a variance-weighted coadd with standard error of the weighted mean (variance weights, scale corrected). f_array and e_arrays are collections of flux and error arrays, which should have the same lenth and wavelength scale """ weights = 1 / (e_array**2) flux = np.average(f_array, axis =0, weights = weights) var = 1 / np.sum(weights, axis=0) rcs = np.sum((((flux - f_array)**2) * weights), axis=0) / (len(f_array)-1) #reduced chi-squared error = (var * rcs)**0.5 #var1 = return flux, var**0.5 def echelle_coadd(wavelength, flux, err, nclip =5): """ combines echelle orders into one spectrum, stiching them together at the overlap """ #slice dodgy ends off orders (usually 5-10 for stis el40m) wavelength = wavelength[:, nclip:-(nclip+1)] flux = flux[:, nclip:-(nclip+1)] err = err[:, nclip:-(nclip+1)] #new arrays to put the output in w_full = np.array([], dtype=float) f_full = np.array([], dtype=float) e_full = np.array([], dtype=float) shape = np.shape(flux) order = 0 while order < (shape[0]): #first add the part that does not overlap ajacent orders to the final spectrum if order == 0: #first and last orders do not overlap at both ends overmask = (wavelength[order] > wavelength[order + 1][-1]) elif order == shape[0]-1: overmask = (wavelength[order] < wavelength[order - 1][1]) else: overmask = (wavelength[order] > wavelength[order + 1][-1]) & (wavelength[order] < wavelength[order - 1][1]) w_full = np.concatenate((w_full, wavelength[order][overmask])) f_full = np.concatenate((f_full, flux[order][overmask])) e_full = np.concatenate((e_full, err[order][overmask])) if order != shape[0]-1: #interpolate each order onto the one beneath it, with larger wavelength bins. Code adapted from stisblazefix f = interpolate.interp1d(wavelength[order + 1], flux[order + 1], fill_value='extrapolate') g = interpolate.interp1d(wavelength[order + 1], err[order + 1], fill_value='extrapolate') overlap = np.where(wavelength[order] <= wavelength[order + 1][-1]) f0 = flux[order][overlap] f1 = f(wavelength[order][overlap]) g0 = err[order][overlap] g1 = g(wavelength[order][overlap]) #combine flux and error at overlap and add to final spectrum w_av = wavelength[order][overlap] f_av, e_av = spectra_adder(np.array([f0,f1]),np.array([g0,g1])) w_full = np.concatenate((w_full, w_av)) f_full = np.concatenate((f_full, f_av)) e_full = np.concatenate((e_full, e_av)) order += 1 #stis orders are saved in reverse order, so combined spectra are sorted by the wavelength array arr1inds = w_full.argsort() sorted_w = w_full[arr1inds] sorted_f = f_full[arr1inds] sorted_e = e_full[arr1inds] return sorted_w, sorted_f, sorted_e # - """for x in x1fs: hdul = fits.open(x) header=hdul[0].header data = hdul[1].data w, f, e = echelle_coadd(data['WAVELENGTH'], data['FLUX'], data['ERROR']) mask = (w > 1160) savepath = '{}stitched_e140m/{}_stitched.ecsv'.format(path, header['ROOTNAME']) savdat = Table([w,f,e], names=['WAVELENGTH', 'FLUX', 'ERROR'], meta=dict(tstart=header['TEXPSTRT'], tend=header['TEXPEND'])) ascii.write(savdat, savepath, format='ecsv', overwrite=True) plt.plot(w[mask], f[mask]) hdul.close() plt.show()""" # + # hdul = fits.open(x) # hdul[0].header # - # Get a Muscles spectrum to compare? lya = fits.getdata('hlsp_muscles_model_lya-reconstruction_v-eps-eri_na_v22_component-spec.fits',1) plt.plot(lya['WAVELENGTH'], lya['FLUX']) de = 3.212 dv = 1/(20.95688223826358e-3) scale = (de/dv)**2 print(scale) specs = glob.glob('{}stitched_e140m/*_stitched.ecsv'.format(path)) print(specs) len(specs) tmids = [] for spec in specs: data=Table.read(spec) tmid = (data.meta['tstart']+data.meta['tend'])/2 tmids.append(tmid) # Get binary period from https://ui.adsabs.harvard.edu/abs/2012ApJ...751...66S/abstract. Yay they have phased them up for me! # + roots = np.array(['o4mu02010','o4mua2010','o4mua2020','o4mu01010','o4mu01020','o4mua1010','o5dma1010', 'o5dma4010','o5dma2010','o5dma3010','o6jc01010','o6jc01020','o6jc01030','o6jc01040']) phases = np.array([0.93,0.06,0.17,0.69,0.8,0.94,0.23,0.74,0.27,0.76,0.79,0.9,0.05,0.15]) args = np.argsort(phases) roots= roots[args] phases = phases[args] # - smooth=5 for root, phase in zip(roots, phases): data = Table.read('{}stitched_e140m/{}_stitched.ecsv'.format(path, root)) print(phase) w, f, e = np.array(data['WAVELENGTH']), np.array(data['FLUX']), np.array(data['ERROR']) f = convolve(f,Box1DKernel(smooth)) e = convolve(e,Box1DKernel(smooth))/(smooth**0.5) mask = (w > 1190) & (w < 1240) plt.plot(w[mask], f[mask]) plt.ylim(-0.1e-12, 1.49e-12) plt.show() # + from matplotlib.animation import FuncAnimation #smooth=50 fig, ax = plt.subplots(figsize=(7,5)) fig.set_tight_layout(True) #ax[0].plot(t, f_lc) #ax[0].set_xlabel('Time (s)') #ax[0].set_ylabel('Flux (erg s$^{-1}$ cm$^{-2}$)') #ax[0].set_ylim(0.4, 1.2) ax.set_xlim(1205.1, 1224.9) ax.set_ylim(-0.1e-12, 1.49e-12) # ax.set_yscale('log') #line, = ax[0].plot([0,0], [-0.1e-12,1.3e-12], 'C1--', linewidth=2) ax.set_ylabel('Flux (erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)') ax.set_xlabel('Wavelength (\AA)') #ax.axvline(1393.775, ls='--', c='C1', alpha=0.5) #ax.axvline(1402.770, ls='--', c='C1', alpha=0.5) # [ax.axvline(line, ls='--', c='C1', alpha=0.5) for line in [1206.499 ,1238.821, 1242.804]] #ax.fill_between([1215.392, 1215.948],0, 1, facecolor = 'k') ax.axvline(1215.67, ls='--', c='C1', alpha=0.5) #ext = hdul[1::][0] #dt = ext.data[0] #w, f = dt['WAVELENGTH'], dt['FLUX'] w, f, e = np.array([], dtype=float), np.array([], dtype=float), np.array([], dtype=float) #w, f, e = np.loadtxt(csv_files[0], unpack=True, delimiter=',') line1, = ax.plot(w,f) an = ax.annotate('', (0.95, 0.95), xycoords='axes fraction' , ha='right', va='top') #std = np.max(modf)/200 #peak sn = 40 for one orbit # ax.fill_between([1215.392, 1215.948],0, 1, facecolor = '0.5', zorder=100)#, alpha=0.5) def update(i): data = Table.read('{}stitched_e140m/{}_stitched.ecsv'.format(path, roots[i])) # phase = phases[i] w, f, e = np.array(data['WAVELENGTH']), np.array(data['FLUX']), np.array(data['ERROR']) f = convolve(f,Box1DKernel(smooth)) e = convolve(e,Box1DKernel(smooth))/(smooth**0.5) mask = (w > 1205) & (w < 1225) w, f = w[mask], f[mask] line1.set_xdata(w) line1.set_ydata(f) an.set_text('Phase = {:.2f}'.format(phases[i])) ## if ti > t0+2: # t0 = ti # obs = # an.set_text('Ob {0}'.format(obs)) # print(ti) return ax, line1, an #ax.legend() anim = FuncAnimation(fig, update, frames=np.arange(len(roots)), interval=200) anim.save('plots/v471_tau_basic.gif', dpi=150, writer='imagemagick') plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Claim 6 Count the total number of neuron fragments individually labeled in the volume # + import numpy as np import ndio.remote.neurodata as nd from datetime import datetime startTime = datetime.now() oo = nd() token = '' channel = 'neurons' res = 3 pixel_dim = 0.024*0.024*0.030 #can get from LIMS # + import ndio.ramon as ramon # Don't count all objects, because RAMONNeuron paint is already counted in RAMONSegments # Segments in cylinder segment_ids_cyl = oo.get_ramon_ids(token, channel, ramon_type=ramon.RAMONSegment) # Segments in volume are not RAMONified, so doing the hard way # TODO - RAMONIFY token = '' channel = 'annotation' res = 3 image_size = oo.get_token_info(token)['dataset']['imagesize'][str(res)] unique_count = [] for i in range(1, image_size[2]+1, 16): #TODO hardcoded z print str(i).zfill(4), z_start = i z_stop = np.min([image_size[2]+1, i + 16]) im = oo.get_volume(token, channel, 0, image_size[0], 0, image_size[1], z_start, z_stop, resolution=res) unique_count = np.concatenate([np.ravel(unique_count),np.ravel(np.unique(im.cutout))]) segment_ids_all = np.shape(np.unique(unique_count))[0] - 1 #remove 0 label print datetime.now() - startTime # - print 'Segments in cylinder: ' + str(np.shape(segment_ids_cyl)[0]) + ' Total segments: ' + str(segment_ids_all) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from phasenet.zernike import ZernikeWavefront, random_zernike_wavefront import matplotlib.pyplot as plt import numpy as np # ## The wavefront can be created by passing a fixed set of amplitudes for different Zernike modes as a list/1D array/dictionary. # + # list of amplitudes starting from piston amp = np.random.uniform(-1,1,4) f = ZernikeWavefront(amp, order='ansi') display(f.zernikes) print(f.amplitudes_noll) print(f.amplitudes_ansi) plt.imshow(f.polynomial(512)); plt.colorbar(); plt.axis('off'); # + # dictionary of amplitudes f = ZernikeWavefront({3:0.1, 5:0.1}, order='ansi') display(f.zernikes) print(f.amplitudes_noll) print(f.amplitudes_ansi) plt.imshow(f.polynomial(512)); plt.colorbar(); plt.axis('off'); # - # ## A random wavefront can be created by giving an absolute amplitude range as an absolute number or a tuple for different Zernike modes # + # random wavefront from a list of absolute amplitudes f = random_zernike_wavefront([1,1,1,1], order='ansi') display(f.zernikes) print(f.amplitudes_noll) print(f.amplitudes_ansi) plt.imshow(f.polynomial(512)); plt.colorbar(); plt.axis('off'); # + # random wavefront from a list of amplitude ranges given in a tuple f = random_zernike_wavefront([(0,0),(-1,1),(1,2)], order='ansi') display(f.zernikes) print(f.amplitudes_noll) print(f.amplitudes_ansi) plt.imshow(f.polynomial(512)); plt.colorbar(); plt.axis('off'); # + # random wavefront from a dictionary with the range given in a tuple f = random_zernike_wavefront({'defocus':(1,2), (3,-3):5}) display(f.zernikes) print(f.amplitudes_noll) print(f.amplitudes_ansi) plt.imshow(f.polynomial(512)); plt.colorbar(); plt.axis('off'); # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import numpy as np import scipy.stats as ss import matplotlib.pyplot as plt co2data = np.genfromtxt('C:/users/tman1_000/Downloads/co2_mm_mlo.txt') co2data = co2data[334:,:] #subset data from 1986 to present plt.figure(figsize=(20,10)) plt.plot(co2data[:,2],co2data[:,3],linewidth='3', color="#0047ab") #plots the decimal year against the average CO2 concentration plt.plot(co2data[len(co2data)-1,2],co2data[len(co2data)-1,3],'o', color='#0047ab') plt.annotate('%s' % co2data[len(co2data)-1,2],xy=(co2data[len(co2data)-1,2],co2data[len(co2data)-1,3]),xytext=((co2data[len(co2data)-1,2]+0.5),co2data[len(co2data)-1,3]), textcoords='data') slope, intercept, r_value, p_value, std_err = ss.linregress(co2data[:,2],co2data[:,3]) plt.figure(figsize=(20,10)) plt.scatter(co2data[:,2],co2data[:,3],color = '#a09c9c') plt.plot(co2data[:,2], slope*co2data[:,2] + intercept, '-', color='#44cc54',linewidth='3') plt.legend() from sklearn import linear_model clf = linear_model.LinearRegression() clf.fit(co2data[:,2].reshape(-1,1),(co2data[:,2].reshape(-1,1))**2,co2data[:,3]) plt.figure(figsize=(20,10)) plt.plot(co2data[:,2],clf.predict(co2data[:,2].reshape(-1,1))) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # freud.order.Steinhardt # ## Steinhardt Order Parameters # The `freud.order` module provids the tools to calculate various [order parameters](https://en.wikipedia.org/wiki/Phase_transition#Order_parameters) that can be used to identify phase transitions. # In the context of crystalline systems, some of the best known order parameters are the Steinhardt order parameters $q_l$ and $w_l$. # These order parameters are mathematically defined according to certain rotationally invariant combinations of spherical harmonics calculated between particles and their nearest neighbors, so they provide information about local particle environments. # As a result, considering distributions of these order parameters across a system can help characterize the overall system's ordering. # The primary utility of these order parameters arises from the fact that they often exhibit certain characteristic values for specific crystal structures. # # In this notebook, we will use the order parameters to identify certain basic structures: BCC, FCC, and simple cubic. # FCC, BCC, and simple cubic structures each exhibit characteristic values of $q_l$ for some $l$ value, meaning that in a perfect crystal all the particles in one of these structures will have the same value of $q_l$. # As a result, we can use these characteristic $q_l$ values to determine whether a disordered fluid is beginning to crystallize into one structure or another. # The $l$ values correspond to the $l$ quantum number used in defining the underlying spherical harmonics; for example, the $q_4$ order parameter would provide a measure of 4-fold ordering. # + import freud import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D # Try to plot using KDE if available, otherwise revert to histogram try: from sklearn.neighbors.kde import KernelDensity kde = True except: kde = False np.random.seed(1) # - # We first construct ideal crystals and then extract the characteristic value of $q_l$ for each of these structures. # In this case, we know that simple cubic has a coordination number of 6, BCC has 8, and FCC has 12, so we are looking for the values of $q_6$, $q_8$, and $q_{12}$, respectively. # Therefore, we can also enforce that we require 6, 8, and 12 nearest neighbors to be included in the calculation, respectively. # + L = 6 sc = freud.data.UnitCell.sc() sc_system = sc.generate_system(5) ql = freud.order.Steinhardt(L) ql_sc = ql.compute(sc_system, neighbors={"num_neighbors": L}).particle_order mean_sc = np.mean(ql_sc) print( "The Q{} values computed for simple cubic are {:.3f} +/- {:.3e}".format( L, mean_sc, np.std(ql_sc) ) ) L = 8 bcc = freud.data.UnitCell.bcc() bcc_system = bcc.generate_system(5, sigma_noise=0) ql = freud.order.Steinhardt(L) ql.compute(bcc_system, neighbors={"num_neighbors": L}) ql_bcc = ql.particle_order mean_bcc = np.mean(ql_bcc) print( "The Q{} values computed for bcc are {:.3f} +/- {:.3e}".format( L, mean_bcc, np.std(ql_bcc) ) ) L = 12 fcc = freud.data.UnitCell.fcc() fcc_system = fcc.generate_system(5) ql = freud.order.Steinhardt(L) ql_fcc = ql.compute(fcc_system, neighbors={"num_neighbors": L}).particle_order mean_fcc = np.mean(ql_fcc) print( "The Q{} values computed for fcc are {:.3f} +/- {:.3e}".format( L, mean_fcc, np.std(ql_fcc) ) ) # - # Given that the per-particle order parameter values are essentially identical to within machine precision, we can be confident that we have found the characteristic value of $q_l$ for each of these systems. # We can now compare these values to the values of $q_l$ in thermalized systems to determine the extent to which they are exhibiting the ordering expected of one of these perfect crystals. def make_noisy_replicas(unitcell, sigmas): """Given a unit cell, return a noisy system.""" systems = [] for sigma in sigmas: systems.append(unitcell.generate_system(5, sigma_noise=sigma)) return systems sigmas = [0.01, 0.02, 0.03, 0.05] sc_systems = make_noisy_replicas(sc, sigmas) bcc_systems = make_noisy_replicas(bcc, sigmas) fcc_systems = make_noisy_replicas(fcc, sigmas) # + fig, axes = plt.subplots(1, 3, figsize=(16, 5)) # Zip up the data that will be needed for each structure type. zip_obj = zip( [sc_systems, bcc_systems, fcc_systems], [mean_sc, mean_bcc, mean_fcc], [6, 8, 12], ["Simple Cubic", "BCC", "FCC"], ) for i, (systems, ref_val, L, title) in enumerate(zip_obj): ax = axes[i] for j, (system, sigma) in enumerate(zip(systems, sigmas)): ql = freud.order.Steinhardt(L) ql.compute(system, neighbors={"num_neighbors": L}) if not kde: ax.hist(ql.particle_order, label=fr"$\sigma$ = {sigma}", density=True) else: padding = 0.02 N = 50 bins = np.linspace( np.min(ql.particle_order) - padding, np.max(ql.particle_order) + padding, N, ) kde = KernelDensity(bandwidth=0.004) kde.fit(ql.particle_order[:, np.newaxis]) ql = np.exp(kde.score_samples(bins[:, np.newaxis])) ax.plot(bins, ql, label=fr"$\sigma$ = {sigma}") ax.set_title(title, fontsize=20) ax.tick_params(axis="both", which="both", labelsize=14) if j == 0: # Can choose any element, all are identical in the reference case ax.vlines(ref_val, 0, np.max(ax.get_ylim()[1]), label="Reference") fig.legend(*ax.get_legend_handles_labels(), fontsize=18) # Only have one legend fig.subplots_adjust(right=0.78) # - # From this figure, we can see that for each type of structure, increasing the amount of noise makes the distribution of the order parameter values less peaked at the expected reference value. # As a result, we can use this method to identify specific structures. # Choosing the appropriate parameterization for the order parameter (which quantum number $l$ to use, how to choose neighbors, etc.) can be very important. # In addition to the $q_l$ parameters demonstrated here, this class can also compute the third-order invariant $w_l$. # The $w_l$ may be better at identifying some structures, so some experimentation and reference to the appropriate literature can be useful (as a starting point, see [Steinhardt, Nelson, and Ronchetti (1983)](https://doi.org/10.1103/PhysRevB.28.784)). # # By setting `average=True` in the constructor, the `Steinhardt` class will perform an additional level of averaging over the second neighbor shells of particles, to accumulate more information on particle environments (see [ (2008)](https://doi.org/10.1063/1.2977970)). # To get a sense for the best method for analyzing a specific system, the best course of action is try out different parameters or to consult the literature to see how these have been used in the past. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Module 5 # # ## Video 22: Filtering by Time # **Python for the Energy Industry** # # ## Datetime Objects # # In the 'Cargo Movements Example' video, we saw the `datetime` object used to specify a particular data and time to look for cargo movements. In this lesson we explore in more detail the `datetime` object, and how it is used for filtering. # # When given 3 arguments, a datetime object represents midnight at the beginning of the day specified by `datetime(YYYY,MM,DD)`: # + from datetime import datetime # 00:00 November 1st, 2020 print(datetime(2020,11,1)) # - # Additional arguments represent hours, minutes, and seconds respectively: # 12:00 November 1st, 2020 print(datetime(2020,11,1,12)) # 12:30 November 1st, 2020 print(datetime(2020,11,1,12,30)) # 12:30:09 November 1st, 2020 print(datetime(2020,11,1,12,30,9)) # It's straightforward to get the current date/time: print(datetime.utcnow()) print(datetime.utcnow() - datetime(2020,11,1)) # ## Times up to Now # # Say you want data over a time period stretching from 1 day, or week, or month ago, up to the current time. The `relativedelta` object can be used for this. # + from dateutil.relativedelta import relativedelta now = datetime.utcnow() one_day_ago = now - relativedelta(days=1) one_week_ago = now - relativedelta(weeks=1) one_month_ago = now - relativedelta(months=1) print(one_day_ago) print(one_week_ago) print(one_month_ago) # - # ## Filtering # # When pulling Cargo Movements data from the Vortexa API, we are generally only interested in some subset of the data. This may be data from a particular time window, originating or destinated for a particular location, carrying a particular product, a particular vessel, or some combination of these conditions. This is called 'filtering'. # # Filtering by location, product, or vessel is done using the associated IDs that we can access from the relevant endpoints. Filtering by time is a bit different: as you've seen, datetime objects are used for this. # # As a reminder, documentation for the Cargo Movements endpoint can be [found here.](https://vortechsa.github.io/python-sdk/endpoints/cargo_movements/) # # ## Timestamp Filters # # The meaning of `filter_time_min` and `filter_time_max` depends on the `filter_activity` corresponding to these times. The following activities: # - loading_start # - identified_for_loading_at # - storing_start # - storing_end # - unloading_start # - unloading_end # These filters that correspond to an exact timestamp at which the event occured. Filtering on these will give Cargo Movements where the timestamp of the corresponding activity is between `filter_time_min` and `filter_time_max`. # + import vortexasdk as v cm_query = v.CargoMovements().search( filter_activity="loading_start", filter_time_min=one_day_ago, filter_time_max=now) print(len(cm_query)) # - # This means that there are 257 Cargo Movements that started loading between midnight and midday on November 1st. Obviously, if the same time is given as both the min and max for a timestamp filter, zero results will be returned: # + cm_query = v.CargoMovements().search( filter_activity="loading_end", filter_time_min=now, filter_time_max=now) print(len(cm_query)) # - # *Note: you can of course use specific datetime objects, rather than relative dates, for filtering.* # ## State Filters # Certain activities correspond to states that last for some time, rather than instantaneous timestamps: # - loading_state # - identified_for_loading_state # - transiting_state # - storing_state # - unloaded_state # - any_activity # When filtering on a state, you will get all Cargo Movements which were in that state at any point between `filter_time_min` and `filter_time_max`. This means even if `filter_time_min` and `filter_time_max` are the same time, you will still get back any Cargo Movements that were in that state at that time: # + cm_query = v.CargoMovements().search( filter_activity="loading_state", filter_time_min=now, filter_time_max=now) print(len(cm_query)) # - # Naturally, the number of Cargo Movements returned by a general query like this will become quite large as the filter window is expanded: # + cm_query = v.CargoMovements().search( filter_activity="loading_state", filter_time_min=one_day_ago, filter_time_max=now) print('last day:',len(cm_query)) cm_query = v.CargoMovements().search( filter_activity="loading_state", filter_time_min=one_week_ago, filter_time_max=now) print('last week:',len(cm_query)) cm_query = v.CargoMovements().search( filter_activity="loading_state", filter_time_min=one_month_ago, filter_time_max=now) print('last month:',len(cm_query)) # - # *Note of caution: be careful about directly putting `datetime.utcnow()` as the `filter_time_max` argument, or putting `now = datetime.utcnow()` in the same cell as now is passed in the argument. There is a risk that small differences between the time measured on your computer and the Vortexa servers can mean that `now` is thought to be in the future, giving an error!* # # ### Exercise # # Create a pandas DataFrame that gives the number of cargos that are being loaded at 00:00UTC on each day of the previous month. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # ### *Data Notebook for* # # The BioFragment Database (BFDb): An Open-Data Platform for Computational Chemistry Analysis of Noncovalent Interactions # ### Burns, Faver, Zheng, Marshall, Smith, Vanommeslaeghe, MacKerell, Merz, and Sherrill # ## 0. Configuration # ##### Instructions # **Always execute Section 0**; thereafter, Sections can be executed independently. Foremost, this is a notebook to generate tables and figures for the manuscript and suppmat. Section X is a more general data access guide that may be an orienting first step. # ##### Prerequisites (satisfied by installer) # * Python 2.7 # * Jupyter # * matplotlib # * qcdb (adjust import path below if have separate git repository) import os import errno import sys # %matplotlib inline # if _not_ running from installer and qcdb _not_ in :envvar:`PYTHONPATH`, put path to repo here #sys.path.insert(1, '/Users/loriab/linux/qcdb') import qcdb qcdb.__file__ # ##### Optional Prerequisites (NOT satisfied by installer) # * web browser to display Jupyter notebook # * on Windows, Windows Subsystem for Linux installed and Developer Mode on # * PDF viewer (configure below) # * `pdflatex` (configure below) # + # command to open pdf files. only needed for tables and standalone=True. below works for Mac. pdfviewer = 'open /Applications/Preview.app' # command to run LaTeX. only needed for tables and standalone=True. below works for Mac w/TeXShop pdflatex = '/usr/texbin/pdflatex' # - # ##### Scratch Directory Configuration # existing, writable, empty (to start with) project directory for notebook to write to # note that in installer, this dir will already have prebuilt figure contents to save time wdir = '/opt/anaconda1anaconda2anaconda3/bfdb_analysis' # %cd {wdir} # ##### Table Graphics Configuration: Uncomment one of three # _Background_: the tables have a lot of little figures that must be present for LaTeX to compile but which are tedious to generate. # _Recommendation_: These little figures come pre-built with the installer so easiest to just use them. If you want to rebuild, uncomment the first set, execute the block, run section IV that generates all figures any of the tables need, then recomment the first set and uncomment the second set, and execute this block again. Thereafter, you can run any table sections w/o regenerating figures. # + # run from jupyter, pop up tables, fresh build figures # plotpath = 'autogen' # standalone = True # run from jupyter, pop up tables, reuse figures plotpath = '' standalone = True # run from jupyter, prep tables tex for paper/suppmat, reuse figures # plotpath = '/opt/anaconda1anaconda2anaconda3/bfdb_analysis/' # standalone = False # - # ##### Tables Configuration # + # plotting area btwn +/- tblxlimit kcal/mol for thread plots in tables tblxlimit = 4.0 # guide lines at +/- tblxlines kcal/mol values for thread plots in tables tblxlines = [0.0, 0.3, 1.0, 4.0] # maximum color at +/- tblialimit kcal/mol for mini Iowa plots in tables tblialimit = 1.0 # whether to plot incomplete modelchems in tables. safe b/c footnotes created if any datum missing tblfailoninc = False # summary error statistic in tables is MAE, mean absolute error tblerr = ['mae'] # - # ### Loading SSI & BBI databases and QC results from qcdb.dbwrap import oxcom, fnreservoir from qcdb import textables # to define tableplans # `loadfrompickle=True` signals to load data from {qcdb-top-dir}/data/{database}*pickle files. # this is far faster than reloading the data from python command files "/{database}*py bfdb = qcdb.Database(['ssi', 'bbi'], loadfrompickle=True) ssi = qcdb.Database('ssi', loadfrompickle=True) bbi = qcdb.Database('bbi', loadfrompickle=True) print bfdb.available_projects() for pj in bfdb.available_projects(): bfdb.load_qcdata_byproject(pj) ssi.load_qcdata_byproject(pj) bbi.load_qcdata_byproject(pj) bfdb.promote_Subset() # ## I. Iowa plots (Figs. 4 & 5) # isolate in a directory subdir = 'fig_iowa_array' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # + # maximum color at +/- xlimit kcal/mol xlimit = 1.0 # files only. turn off plotting within notebook b/c colors don't show through layers. view = False # model chemistries to plot dbmc = [ 'GAFF--na', # a 'CGENFF--na', # b 'AM1--na', # c 'PM6DH2--na', # d 'PBEH3C-dfhf_unCP-def2msvp', # e 'M052X-dfhf_unCP-adz', # f 'WB97XV-unCP-atz', # g 'WB97MV-unCP-atz', # h 'PBE0D3MBJ-dfhf_CP-adz', # i 'B3LYPD3MBJ-dfhf_CP-adz', # j 'B2PLYPD3M-dfhf_dfmp_CP-atz', # k 'MP2-dfhf_dfmp_CP-atqz', # l 'SCSMIMP2-dfhf_dfmp_CP-atz', # m 'DWMP2-dfhf_dfmp_CP-adtz', # n 'SAPT2P-SA-adz', # o 'MP2CF12-SA-adz', # p ] # - # Note: Each Iowa can take ~1 min to generate, and the plots won't appear in Jupyter (vide supra). Don't be alarmed, look for the plots in the `wdir`. for mc in dbmc: print mc ssi.plot_iowa(mc, graphicsformat=['pdf'], failoninc=False, view=view, xlimit=xlimit) # %ls # ## II. Metal benchmarks thread plots (Fig. 3) # isolate in a directory subdir = 'fig_metals' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} db4 = qcdb.DB4(loadfrompickle=True) # S22, NBC10, HBC6, HSG db4.load_qcdata_byproject('pt2') # JCP 141 234111 (2014) (doi: 10.1063/1.4903765) db4.plot_modelchems(['C2011BENCH', 'DWCCSDTF12-CP-adz', 'MP2CF12-CP-adz'], xlimit=2.0, sset='tt-5min') db4.plot_modelchems(['C2011BENCH', 'DWCCSDTF12-CP-adz', 'MP2CF12-CP-adz'], labeled=False, xlimit=2.0, sset='tt-5min') # %ls del db4 # ## III. Ternary plots (Fig. 2) s22 = qcdb.Database('s22', loadfrompickle=True) # isolate in a directory subdir = 'fig_ternary_mpl' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # + # graphics formats gf = ['pdf', 'png'] # SSI, SSI500, SSI100, and other subsets ssisss = ['default', 'ssi500', 'ssi100'] # BBI, BBI25, and other subsets bbisss = ['default', 'bbi25', 'shb', 'ua'] # other (full) databases dbs = ['hsg', 'ubq', #'s22', 'nbc10', 'hbc6', 's22by7', 'achc', 'a24', 's66', 'jsch', 'nbc10ext' # not used in paper but available ] # + for ss in ssisss: print 'SSI', ss ssi.plot_ternary(graphicsformat=gf, sset=ss, labeled=False) ssi.plot_ternary(graphicsformat=gf, sset=ss, labeled=True) for ss in bbisss: print 'BBI', ss bbi.plot_ternary(graphicsformat=gf, sset=ss, labeled=False) bbi.plot_ternary(graphicsformat=gf, sset=ss, labeled=True) for db in dbs: print 'Others', db otr = qcdb.Database(db, loadfrompickle=True) otr.plot_ternary(graphicsformat=gf, labeled=False) otr.plot_ternary(graphicsformat=gf, labeled=True) # - # %ls # ## IV. Primary SuppMat Tables (Tables S-5 through S-19) # ##### basis-major w/Iowa & thread: 1 MM, 2 WFN, 12 DFT # isolate in a directory subdir = 'tbl_modelchems' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # Note: When `liliowa` or `flat` appear in the table plan, a figure is inserted into the table. As is, the figures for all the tables in this notebook are pre-generated, so building tables will be fast. If you start adapting these tables and venture into the realm where pre-generated figures aren't available, you'll hit problems when LaTeX compiles your table. See the bit about `autogen` in the "0. Configuration" section to have figures build on-the-fly. But be aware that figures can take several seconds apiece, so these cells will execute slowly. def table_bfdb_suppmat(**kwargs): """Specialization of table_generic into table with as many statistics as will fit plus embedded Iowa and thread diagram as suitable for supplementary material. Multiple tables are formed, one for each in *bas* with lines *mtd* within each table. """ rowplan = ['bas', 'mtd'] columnplan = [ ['l', r"""Method \& Basis Set""", '', textables.label, {}], ['d', 'BBI', 'SHB', textables.val, {'sset': 'shb', 'dbse': 'BBI'}], ['d', 'BBI', 'UA', textables.val, {'sset': 'ua', 'dbse': 'BBI'}], ['d', 'BBI', '25', textables.val, {'sset': 'bbi25', 'dbse': 'BBI'}], ['d', 'BBI', 'TT', textables.val, {'sset': 'default', 'dbse': 'BBI'}], ['d', 'SSI', r"""$\bm{+/+}$""", textables.val, {'sset': 'pospos', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-/-}$""", textables.val, {'sset': 'negneg', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+/-}$""", textables.val, {'sset': 'posneg', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/pl', textables.val, {'sset': 'polarpolar', 'dbse': 'SSI'}], ['d', 'SSI', 'al/al', textables.val, {'sset': 'aliphaliph', 'dbse': 'SSI'}], ['d', 'SSI', 'ar/ar', textables.val, {'sset': 'arylaryl', 'dbse': 'SSI'}], ['d', 'SSI', 'al/ar', textables.val, {'sset': 'alipharyl', 'dbse': 'SSI'}], ['d', 'SSI', '100', textables.val, {'sset': 'ssi100', 'dbse': 'SSI'}], ['d', 'SSI', '500', textables.val, {'sset': 'ssi500', 'dbse': 'SSI'}], ['d', 'SSI', 'TT', textables.val, {'sset': 'default', 'dbse': 'SSI'}], ['c', r"""Iowa\footnotemark[1]""", '', textables.liliowa, {}], ['l', r"""Error Distribution\footnotemark[2]""", '', textables.flat, {'sset': 'ssi'}] ] footnotes = [fnreservoir['liliowa'].format('SSI ', kwargs['ialimit']), fnreservoir['flat'].format('SSI ', kwargs['xlimit'], oxcom(kwargs['xlines']))] landscape = True theme = 'si1bfdbmc' title = r"""Interaction energy (kcal/mol) {{err}} subset statistics computed with {{opt}}{0}.""".format( '' if kwargs['subjoin'] else r""" and {bas}""") return rowplan, columnplan, landscape, footnotes, title, theme # + tblfile = 'si1mm' def make_bfdb_Tables_S1mm(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ dbobj.table_wrapper(mtd=['GAFF', 'CGENFF', 'AM1', 'PM6DH2', 'PBEH3C'], bas=['na', 'def2msvp'], opt=[''], opttarget={'default': ['', 'dfhf_unCP']}, subjoin=True, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) MAE statistics by force-field and semi-empirical methods.""", theme='si1bfdbmc-mm', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) make_bfdb_Tables_S1mm(bfdb) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} # + tblfile = 'si1wfn1' def make_bfdb_Tables_S1wfn(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ dbobj.table_wrapper(mtd=['SAPT2P', 'MP2', 'SCSMP2', 'SCSNMP2', 'SCSMIMP2', 'DWMP2', 'MP2C', 'MP2F12', 'SCSMP2F12', 'SCSNMP2F12', 'SCSMIMP2F12', 'DWMP2F12', 'MP2CF12', 'CCSDAF12', 'CCSDBF12', 'SCSCCSDAF12', 'SCSCCSDBF12', 'SCMICCSDAF12', 'SCMICCSDBF12', 'CCSDTAF12', 'CCSDTBF12', 'DWCCSDTF12'], bas=['adz'], opt=[''], opttarget={'default': ['CP', 'dfhf_dfmp_CP', 'SA', 'dfhf_dfmp_SA']}, subjoin=False, suppressblanks=False, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by wavefunction methods with aug-cc-pVDZ, CP-corrected.""", theme='si1bfdbmc-wfn1', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) make_bfdb_Tables_S1wfn(bfdb) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} # + def make_bfdb_Tables_S1wfn2(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'si1wfn2' dbobj.table_wrapper(mtd=['MP2', 'SCSMP2', 'SCSNMP2', 'SCSMIMP2', 'DWMP2', 'MP2C'], bas=['atz', 'adtz', 'qz', 'aqz', 'atqz', 'atqzadz'], opt=[''], opttarget={'default': ['CP', 'dfhf_dfmp_CP', 'SA', 'dfhf_dfmp_SA']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by wavefunction methods with larger basis sets, CP-corrected.""", theme='si1bfdbmc-wfn2', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_S1wfn2(bfdb) # + def make_bfdb_Tables_S1dft(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'si1dft' dbobj.table_wrapper(mtd=[ 'PBE', 'PBED2', 'PBED3', 'BP86', 'BP86D2', 'BP86D3', 'BLYP', 'BLYPD2', 'BLYPD3', 'B97', 'B97D2', 'B97D3', 'PBE0', 'PBE0D2', 'PBE0D3', 'B3LYP', 'B3LYPD2', 'B3LYPD3', 'WPBE', 'WPBED3', 'B2PLYP', 'B2PLYPD2', 'B2PLYPD3', ], bas=['adz', 'atz', 'def2qzvp'], opt=['CP', 'unCP'], opttarget={'default': ['', 'dfhf', 'dfmp_dfhf']}, subjoin=False, suppressblanks=True, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by DFT methods computed with {opt} and {bas}.""", theme='si1bfdbmc-dft', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_S1dft(bfdb) # + def make_bfdb_Tables_S1dftd3(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'si1dftd3' dbobj.table_wrapper(mtd=[ 'PBED3', 'PBED3M', 'PBED3BJ', 'PBED3MBJ', 'BP86D3', 'BP86D3M', 'BP86D3BJ', 'BP86D3MBJ', 'BLYPD3', 'BLYPD3M', 'BLYPD3BJ', 'BLYPD3MBJ', 'B97D3', 'B97D3M', 'B97D3BJ', 'B97D3MBJ', 'PBE0D3', 'PBE0D3M', 'PBE0D3BJ', 'PBE0D3MBJ', 'B3LYPD3', 'B3LYPD3M', 'B3LYPD3BJ', 'B3LYPD3MBJ', 'M052X', 'WB97XD', 'WB97XV', 'WB97MV', 'WPBED3', 'WPBED3M', 'WPBED3BJ', 'WPBED3MBJ', 'B2PLYPD3', 'B2PLYPD3M', 'B2PLYPD3BJ', 'B2PLYPD3MBJ', ], bas=['adz', 'atz', 'def2qzvp'], opt=['CP', 'unCP'], opttarget={'default': ['', 'dfhf', 'dfmp_dfhf']}, subjoin=False, suppressblanks=True, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by DFT-D3 methods computed with {opt} and {bas}.""", theme='si1bfdbmc-dftd3', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_S1dftd3(bfdb) # + def make_bfdb_Tables_S1sapt(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'si1sapt' dbobj.table_wrapper(mtd=['SAPT0', 'SAPT0S', 'SAPT2', 'SAPT2P'], bas=['jadz', 'adz'], opt=[''], opttarget={'default': ['SA']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_suppmat, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by SAPT methods.""", theme='si1bfdbmc-sapt', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_S1sapt(bfdb) # - # ## V. Primary Paper Tables (Tables II, III, IV) # ##### basis-major w/Iowa & thread: 1 MM, 1 WFN, 1 DFT # isolate in a directory subdir = 'tbl_modelchems' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # extra subsets used in primary paper tables bfdb.add_Subset_union('npblk', ['aliphaliph', 'arylaryl', 'alipharyl']) bfdb.add_Subset_union('ionblk', ['pospos', 'negneg', 'posneg']) bfdb.add_Subset_union('plslice', ['pospolar', 'negpolar', 'polarpolar', 'polaraliph', 'polararyl']) def table_bfdb_paper(**kwargs): """Specialization of table_generic into table with as many statistics as will fit plus embedded slat diagram as suitable for supplementary material. Multiple tables are formed, one for each in *bas* with lines *mtd* within each table. """ if (tblxlines == [0.0, 0.3, 1.0, 4.0]) and plotpath: guidecell = """\includegraphics[width=6.67cm,height=3.5mm]{bfdb_analysis/tbl_modelchems/flat_0p0_0p0_1p0_4p0_lbld.pdf}""" else: guidecell = '' rowplan = ['bas', 'mtd'] columnplan = [ ['l', r"""Method \& Basis Set""", '', textables.label, {}], ['d', 'BBI', r"""TT\footnotemark[3]""", textables.val, {'sset': 'default', 'dbse': 'BBI'}], ['d', 'SSI', r"""ion\footnotemark[4]""", textables.val, {'sset': 'ionblk', 'dbse': 'SSI'}], ['d', 'SSI', r"""pl\footnotemark[5]""", textables.val, {'sset': 'plslice', 'dbse': 'SSI'}], ['d', 'SSI', r"""npl\footnotemark[6]""", textables.val, {'sset': 'npblk', 'dbse': 'SSI'}], ['d', 'SSI', r"""TT\footnotemark[3]""", textables.val, {'sset': 'default', 'dbse': 'SSI'}], ['c', r"""Iowa\footnotemark[1]""", '', textables.liliowa, {}], ['l', r"""Error Distribution\footnotemark[2]""", guidecell, textables.flat, {'sset': 'ssi'}] ] footnotes = [fnreservoir['liliowa'].format('SSI ', kwargs['ialimit']), fnreservoir['flat'].format('SSI ', kwargs['xlimit'], oxcom(kwargs['xlines'])), """Total database""", """Charged block: union of $+/+$, $+/-$, $-/-$.""", """Polar-containing: union of +/pl, $-$/pl, pl/pl, pl/al, pl/ar.""", """Nonpolar block: union of al/al, al/ar, ar/ar."""] landscape = False theme = 'ppbfdbmc' title = r"""Interaction energy (kcal/mol) {{err}} subset statistics computed with {{opt}}{0}.""".format( '' if kwargs['subjoin'] else r""" and {bas}""") return rowplan, columnplan, landscape, footnotes, title, theme # + def make_bfdb_Tables_mm(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'ppmm' dbobj.table_wrapper(mtd=['GAFF', 'CGENFF', 'AM1', 'PM6DH2', 'PBEH3C'], bas=['na', 'def2msvp'], opt=[''], opttarget={'default': ['', 'dfhf_unCP']}, subjoin=True, tableplan=table_bfdb_paper, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by force-field and semi-empirical methods.""", err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_mm(bfdb) # further adjustments for paper inclusion # * remove "Basis Set" and "no applicable basis" and "def2-mSVP (3 lines) # * remove hline and 5 empty lines (6 lines) # * in Missing footnotes, change "reactions" to "interactions" (4 lines) # * add \cite{gaffmissing} to first three Missing lines (3 lines) # * and \cite{charmmmissing} to last three Missing lines (3 lines) # * change label to "\label{tbl:qcdb-ppbfdbmc--na-mae}}" (1 line) # + def make_bfdb_Tables_wfn(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblfile = 'ppwfn' dbobj.table_wrapper(mtd=['SAPT2P', 'MP2', 'SCSMP2', 'SCSNMP2', 'SCSMIMP2', 'DWMP2', 'MP2C', 'MP2F12', 'SCSMP2F12', 'SCSNMP2F12', 'SCSMIMP2F12', 'DWMP2F12', 'MP2CF12', 'CCSDAF12', 'CCSDBF12', 'SCSCCSDAF12', 'SCSCCSDBF12', 'SCMICCSDAF12', 'SCMICCSDBF12', 'CCSDTAF12', 'CCSDTBF12', 'DWCCSDTF12'], # 'CCSDTNSAF12' bas=['adz', 'atqzadz', 'atz', 'qz', 'aqz', 'atqz'], # 'adtz' opt=[''], opttarget={'default': ['CP', 'dfhf_dfmp_CP', 'SA', 'dfhf_dfmp_SA']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_paper, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by wavefunction methods, CP-corrected.""", err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} make_bfdb_Tables_wfn(bfdb) # further adjustments for paper inclusion # * comment out SCS-CCSD-F12a/b (2 lines) # * comment out all SCS(N)-MP2) for >aDZ (4 lines) # * comment out SCS-MP2 & DW-MP2 for QZ # * comment out SCS-MP2 & SCS(MI)-MP2 for aTQZ (2 lines) # * add the citation to this line and the aTQZ (2 lines): \textbf{[aTQZ; $\delta$:aDZ]}\cite{bracketdelta} \\ # + tblfile = 'ppdftm' def make_bfdb_Tables_dft(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ dbobj.table_wrapper(mtd=['PBED3M', 'BP86D3M', 'BLYPD3MBJ', 'B97D3MBJ', 'PBE0D3M', 'B3LYPD3MBJ', 'M052X', 'WPBED3', 'WB97XD', 'WB97XV', 'WB97MV', 'B2PLYPD3M'], bas=['adz', 'atz', 'def2qzvp'], opt=['CP', 'unCP'], opttarget={'default': ['', 'dfhf', 'dfmp_dfhf']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_paper, filename=tblfile, title=r"""Interaction energy (kcal/mol) {err} statistics by DFT methods.""", err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) make_bfdb_Tables_dft(bfdb) if standalone: # !{pdflatex} {tblfile} && {pdfviewer} {tblfile + '.pdf'} # further adjustments for paper inclusion # * two tbls are formed. comment out qzvp of first tbl thru header of second tbl so only one tbl built (many lines) # * add ', CP' to first basis blocks and ', unCP' to further basis blocks (6 lines) # * arrange blocks adz-uncp, adz-cp, atz-uncp, atz-cp, qzvp-uncp (many lines) # * comment out PBE and B97 in atz-cp and qzvp-uncp (4 lines) # * add footnotetexts 7 & 8 'Missing 2--7 of 230 interactions' & 3380 (2 lines) # * add \cite{gganoconv} to above footnotetexts (2 lines) # * alter footnotemarks so all ion go with 7 and all SSI TT go with 8 (12 lines) # - # ## VI. Secondary SuppMat Tables (Tables S-20 through S-39) # ##### method/functional family-major w/Iowa w/colored DFT: #1 MM, 1 WFN, 18 DFT # isolate in a directory subdir = 'tbl_modelchems' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} def table_bfdb_allsset(**kwargs): """Specialization of table_generic into table with as many statistics as will fit plus embedded Iowa diagram as suitable for supplementary material. Multiple tables are formed, one for each in *bas* with lines *mtd* within each table. """ rowplan = ['bas', 'opt', 'mtd'] columnplan = [ ['l', r"""Method \& Basis Set""", '', textables.label, {}], ['d', 'BBI', 'SHB', textables.val, {'sset': 'shb', 'dbse': 'BBI'}], ['d', 'BBI', 'UA', textables.val, {'sset': 'ua', 'dbse': 'BBI'}], ['d', 'BBI', 'TT', textables.val, {'sset': 'default', 'dbse': 'BBI'}], ['d', 'SSI', r"""$\bm{+/+}$""", textables.val, {'sset': 'pospos', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+/-}$""", textables.val, {'sset': 'posneg', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/pl""", textables.val, {'sset': 'pospolar', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/al""", textables.val, {'sset': 'posaliph', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/ar""", textables.val, {'sset': 'posaryl', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-/-}$""", textables.val, {'sset': 'negneg', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/pl""", textables.val, {'sset': 'negpolar', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/al""", textables.val, {'sset': 'negaliph', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/ar""", textables.val, {'sset': 'negaryl', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/pl', textables.val, {'sset': 'polarpolar', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/al', textables.val, {'sset': 'polaraliph', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/ar', textables.val, {'sset': 'polararyl', 'dbse': 'SSI'}], ['d', 'SSI', 'al/al', textables.val, {'sset': 'aliphaliph', 'dbse': 'SSI'}], ['d', 'SSI', 'al/ar', textables.val, {'sset': 'alipharyl', 'dbse': 'SSI'}], ['d', 'SSI', 'ar/ar', textables.val, {'sset': 'arylaryl', 'dbse': 'SSI'}], ['d', 'SSI', 'TT', textables.val, {'sset': 'default', 'dbse': 'SSI'}], ['c', r"""Iowa\footnotemark[1]""", '', textables.liliowa, {}], ] footnotes = [fnreservoir['liliowa'].format('SSI ', kwargs['ialimit'])] landscape = True theme = 'si2bfdbmc' title = r"""Interaction energy (kcal/mol) {{err}} subset statistics computed with {{opt}}{0}.""".format( '' if kwargs['subjoin'] else r""" and {bas}""") return rowplan, columnplan, landscape, footnotes, title, theme def table_bfdb_allssetmmwfn(**kwargs): """Specialization of table_generic into table with as many statistics as will fit plus embedded slat diagram as suitable for supplementary material. Multiple tables are formed, one for each in *bas* with lines *mtd* within each table. """ rowplan, columnplan, landscape, footnotes, title, theme = table_bfdb_allsset(**kwargs) rowplan = ['mtd', 'bas'] return rowplan, columnplan, landscape, footnotes, title, theme # + def make_bfdb_Tables_S2mm(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblname = 'si2mm' dbobj.table_wrapper(mtd=['GAFF', 'CGENFF', 'AM1', 'PM6DH2', 'PBEH3C'], bas=['na', 'def2msvp'], opt=[''], opttarget={'default': ['', 'dfhf_unCP']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_allssetmmwfn, filename=tblname, title=r"""Interaction energy (kcal/mol) {err} subset statistics by force-field and semi-empirical methods.""", theme='si2bfdbmc-mm', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblname} && {pdfviewer} {tblname + '.pdf'} make_bfdb_Tables_S2mm(bfdb) # + def make_bfdb_Tables_S2wfn(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblname = 'si2wfn' dbobj.table_wrapper(mtd=['MP2', 'SCSMP2', 'SCSNMP2', 'SCSMIMP2', 'DWMP2'], bas=['adz', 'atz', 'adtz', 'qz', 'aqz', 'atqz'], opt=[''], opttarget={'default': ['CP', 'dfhf_dfmp_CP', 'SA', 'dfhf_dfmp_SA']}, subjoin=True, suppressblanks=True, tableplan=table_bfdb_allssetmmwfn, filename=tblname, title=r"""Interaction energy (kcal/mol) {err} subset statistics by MP2 methods, CP-corrected.""", theme='si2bfdbmc-wfn', err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblname} && open /Applications/Preview.app {tblname + '.pdf'} make_bfdb_Tables_S2wfn(bfdb) # + def colorval(best, better=0.15, good=0.25, rotten=0.5): def colorscheme(kw): brick = r"""\cellcolor[RGB]{153,0,0}""" paleorange = r"""\cellcolor[HTML]{FFCC99}""" paleblue = r"""\cellcolor[HTML]{CCE5FF}""" lilac = r"""\cellcolor[RGB]{204,153,255}""" mval = kw['matelem'] if mval.isspace(): return textables.val(kw) else: fval = float(mval) cellcolor = '' if fval > rotten: cellcolor = brick if fval <= good: cellcolor = paleorange if fval <= better: cellcolor = paleblue if fval <= best: cellcolor = lilac return cellcolor + textables.val(kw) return colorscheme def table_bfdb_allssetwcolor(**kwargs): """Specialization of table_generic into table with as many statistics as will fit plus embedded slat diagram as suitable for supplementary material. Multiple tables are formed, one for each in *bas* with lines *mtd* within each table. """ rowplan = ['bas', 'opt', 'mtd'] columnplan = [ ['l', r"""Method \& Basis Set""", '', textables.label, {}], ['d', 'BBI', 'SHB', colorval(0.10), {'sset': 'shb', 'dbse': 'BBI'}], ['d', 'BBI', 'UA', colorval(0.08), {'sset': 'ua', 'dbse': 'BBI'}], ['d', 'BBI', 'TT', colorval(0.10), {'sset': 'default', 'dbse': 'BBI'}], ['d', 'SSI', r"""$\bm{+/+}$""", colorval(0.15), {'sset': 'pospos', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+/-}$""", colorval(0.24), {'sset': 'posneg', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/pl""", colorval(0.17), {'sset': 'pospolar', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/al""", colorval(0.09), {'sset': 'posaliph', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{+}$/ar""", colorval(0.12), {'sset': 'posaryl', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-/-}$""", colorval(0.25), {'sset': 'negneg', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/pl""", colorval(0.15), {'sset': 'negpolar', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/al""", colorval(0.09), {'sset': 'negaliph', 'dbse': 'SSI'}], ['d', 'SSI', r"""$\bm{-}$/ar""", colorval(0.14), {'sset': 'negaryl', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/pl', colorval(0.12), {'sset': 'polarpolar', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/al', colorval(0.09), {'sset': 'polaraliph', 'dbse': 'SSI'}], ['d', 'SSI', 'pl/ar', colorval(0.12), {'sset': 'polararyl', 'dbse': 'SSI'}], ['d', 'SSI', 'al/al', colorval(0.08), {'sset': 'aliphaliph', 'dbse': 'SSI'}], ['d', 'SSI', 'al/ar', colorval(0.08), {'sset': 'alipharyl', 'dbse': 'SSI'}], ['d', 'SSI', 'ar/ar', colorval(0.10), {'sset': 'arylaryl', 'dbse': 'SSI'}], ['d', 'SSI', 'TT', colorval(0.13), {'sset': 'default', 'dbse': 'SSI'}], ['c', r"""Iowa\footnotemark[1]""", '', textables.liliowa, {}], ] footnotes = [fnreservoir['liliowa'].format('SSI ', kwargs['ialimit'])] landscape = True theme = 'si2bfdbmc' title = r"""Interaction energy (kcal/mol) {{err}} subset statistics computed with {{opt}}{0}.""".format( '' if kwargs['subjoin'] else r""" and {bas}""") return rowplan, columnplan, landscape, footnotes, title, theme # + def make_bfdb_Tables_S2dft(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ fctlplan = { 'pbe': (['PBE', 'PBED2', 'PBED3'], 'PBE'), 'bp86': (['BP86', 'BP86D2', 'BP86D3'], 'BP86'), 'blyp': (['BLYP', 'BLYPD2', 'BLYPD3'], 'BLYP'), 'b97': (['B97', 'B97D2', 'B97D3'], 'B97'), 'pbe0': (['PBE0', 'PBE0D2', 'PBE0D3'], 'PBE0'), 'b3lyp': (['B3LYP', 'B3LYPD2', 'B3LYPD3'], 'B3LYP'), 'wpbe': (['WPBE', 'WPBED3'], r"""$\omega$PBE"""), 'wb97x': (['M052X', 'WB97XD', 'WB97XV', 'WB97MV'], r"""M05-2X, $\omega$B97X-D, $\omega$B97X-V, and $\omega$B97M-V"""), 'b2plyp': (['B2PLYP', 'B2PLYPD2', 'B2PLYPD3'], 'B2PLYP'), 'pbed3': (['PBED3', 'PBED3M', 'PBED3BJ', 'PBED3MBJ'], 'PBE-D3'), 'bp86d3': (['BP86D3', 'BP86D3M', 'BP86D3BJ', 'BP86D3MBJ'], 'BP86-D3'), 'blypd3': (['BLYPD3', 'BLYPD3M', 'BLYPD3BJ', 'BLYPD3MBJ'], 'BLYP-D3'), 'b97d3': (['B97D3', 'B97D3M', 'B97D3BJ', 'B97D3MBJ'], 'B97-D3'), 'pbe0d3': (['PBE0D3', 'PBE0D3M', 'PBE0D3BJ', 'PBE0D3MBJ'], 'PBE0-D3'), 'b3lypd3': (['B3LYPD3', 'B3LYPD3M', 'B3LYPD3BJ', 'B3LYPD3MBJ'], 'B3LYP-D3'), 'wpbed3': (['WPBED3', 'WPBED3M', 'WPBED3BJ', 'WPBED3MBJ'], r"""$\omega$PBE-D3"""), 'b2plypd3': (['B2PLYPD3', 'B2PLYPD3M', 'B2PLYPD3BJ', 'B2PLYPD3MBJ'], 'B2PLYP-D3'), 'best': (['PBE0D3BJ', 'PBE0D3MBJ', 'B3LYPD3BJ', 'B3LYPD3MBJ', 'B2PLYPD3M', 'WB97XV', 'WB97MV'], 'best'), } for fl in fctlplan: tblname = 'si2' + fl def specializetheme(**kwargs): rowplan, columnplan, landscape, footnotes, title, theme = table_bfdb_allssetwcolor(**kwargs) theme += '-' + fl return rowplan, columnplan, landscape, footnotes, title, theme dbobj.table_wrapper(mtd=fctlplan[fl][0], bas=['adz', 'atz', 'def2qzvp'], opt=['unCP', 'CP'], opttarget={'default': ['', 'dfhf', 'dfmp_dfhf']}, subjoin=True, suppressblanks=True, tableplan=specializetheme, filename=tblname, title=r"""Interaction energy (kcal/mol) {err} subset statistics by """ + fctlplan[fl][1] + """ methods.""", err=tblerr, failoninc=tblfailoninc, xlimit=tblxlimit, xlines=tblxlines, ialimit=tblialimit, plotpath=plotpath, standalone=standalone) if standalone: # !{pdflatex} {tblname} && {pdfviewer} {tblname + '.pdf'} make_bfdb_Tables_S2dft(bfdb) # further adjustments for suppmat inclusion # * in best, comment out adz-uncp # - # ## VII. Heatmaps and SAPT-tinted Iowas (Fig. 1) # isolate in a directory subdir = 'fig_heatmaps_etc' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # Note: Any step involving Iowa-type plots may take some time to run. bbi.plot_iowa('SAPT2P-SA-adz', view=False, xlimit=1.0) ssi.plot_iowa('SAPT2P-SA-adz', view=False, xlimit=1.0) def natomhist(dbobj, heavyonly=False, graphicsformat=['pdf']): """ """ import matplotlib.pyplot as plt natoms = [] rhrxn = dbobj.get_hrxn() for dbrxn, orxn in rhrxn.iteritems(): orgts = orxn.rxnm['default'].keys() omolD = qcdb.Molecule(orgts[0].mol) omolD.update_geometry() if heavyonly: nat = 0 for at in omolD.atoms: if at.symbol() != 'H': nat += 1 natoms.append(nat) else: natoms.append(omolD.natom()) fig, ax1 = plt.subplots(figsize=(16, 6)) plt.axvline(0.0, color='#cccc00') xmin, xmax = 0, 50 ax1.set_xlim(xmin, xmax) ax1.hist(natoms, bins=50, range=(xmin, xmax), color='#2d4065', alpha=0.7) for ext in graphicsformat: savefile = 'natom_' + ('heavy_' if heavyonly else 'all_') + dbobj.dbse + '.' + ext.lower() plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight') plt.show() natomhist(ssi, heavyonly=False) def heat(dbobj, graphicsformat=['pdf']): """For database *dbobj*, creates a heatmap plot in formats *graphicsformat*. """ import re import numpy as np import matplotlib import matplotlib.pyplot as plt aa = ['ARG', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'MET', 'GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'PRO', 'PHE', 'TYR', 'HIE', 'TRP'] bfdbpattern = re.compile("\d\d\d([A-Z][A-Z][A-Z])-\d\d\d([A-Z][A-Z][A-Z])-\d") # handle for frame, overall axis fig, axt = plt.subplots(figsize=(6, 6)) axt.set_xticks(np.arange(len(aa)) + 0.3, minor=False) axt.set_yticks(np.arange(len(aa)) + 0.3, minor=False) axt.invert_yaxis() axt.xaxis.tick_top() axt.set_xticklabels(aa, minor=False, rotation=60, size='small') axt.set_yticklabels(aa, minor=False, size='small') axt.xaxis.set_tick_params(width=0, length=0) axt.yaxis.set_tick_params(width=0, length=0) axt.axvline(x=3.85, linewidth=5, color='k') axt.axvline(x=7.95, linewidth=5, color='k') axt.axvline(x=10.95, linewidth=5, color='k') axt.axhline(y=3.85, linewidth=5, color='k') axt.axhline(y=7.95, linewidth=5, color='k') axt.axhline(y=10.95, linewidth=5, color='k') rxns = [rxn.split('-', 1)[1] for rxn in dbobj.hrxn.keys()] tiles = [] for aa1 in aa: print '\n', aa1, for aa2 in aa: count = 0 for rxn in rxns: bfdbname = bfdbpattern.match(rxn) if (bfdbname.group(1) == aa1 and bfdbname.group(2) == aa2) or \ (bfdbname.group(2) == aa1 and bfdbname.group(1) == aa2): count += 1 print count, tiles.append(count) vmin, vmax = 0, max(tiles) cb = np.reshape(np.array(tiles), (len(aa), len(aa))) print 'max', vmax heatmap = axt.pcolor(cb, vmin=vmin, vmax=vmax, cmap=plt.cm.bone_r) for ext in graphicsformat: savefile = 'heat_' + dbobj.dbse + '.' + ext.lower() plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight') plt.show() heat(bbi) heat(ssi) # + def composition_tile(db, aa1, aa2): """Takes dictionary *db* of label, error pairs and amino acids *aa1* and *aa2* and returns a square array of all errors for that amino acid pair, buffered by zeros. """ import re import numpy as np bfdbpattern = re.compile("\d\d\d([A-Z][A-Z][A-Z])-\d\d\d([A-Z][A-Z][A-Z])-\d") tiles = [] for key, val in db.items(): bfdbname = bfdbpattern.match(key) if (bfdbname.group(1) == aa1 and bfdbname.group(2) == aa2) or \ (bfdbname.group(2) == aa1 and bfdbname.group(1) == aa2): tiles.append(val) dim = int(np.ceil(np.sqrt(len(tiles)))) pad = dim * dim - len(tiles) tiles += [0] * pad return np.reshape(np.array(tiles), (dim, dim)) def rgbiowa(dbobj, view=True, graphicsformat=['pdf']): """For database *dbobj*, creates a heatmap plot in formats *graphicsformat*. """ import re import numpy as np import matplotlib import matplotlib.pyplot as plt dbdat = {} rhrxn = dbobj.get_hrxn() for dbrxn, orxn in rhrxn.iteritems(): rxn = dbrxn.split('-', 1)[1] dbdat[rxn] = orxn.color aa = ['ARG', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'MET', 'GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'PRO', 'PHE', 'TYR', 'HIE', 'TRP'] colors = [('white')] + [(plt.cm.jet(i)) for i in xrange(1,256)] jetwhitezero = matplotlib.colors.LinearSegmentedColormap.from_list('jetwhitezero', colors, N=256) # handle for frame, overall axis fig, axt = plt.subplots(figsize=(6, 6)) axt.set_xticks(np.arange(len(aa)) + 0.3, minor=False) axt.set_yticks(np.arange(len(aa)) + 0.3, minor=False) axt.invert_yaxis() axt.xaxis.tick_top() axt.set_xticklabels(aa, minor=False, rotation=60, size='small') axt.set_yticklabels(aa, minor=False, size='small') axt.xaxis.set_tick_params(width=0, length=0) axt.yaxis.set_tick_params(width=0, length=0) xmin, xmax = 0, 1 # nill spacing between 20x20 heatmaps plt.subplots_adjust(hspace=0.001, wspace=0.001) index = 1 for aa1 in aa: for aa2 in aa: cb = composition_tile(dbdat, aa1, aa2) ax = matplotlib.axes.Subplot(fig, len(aa), len(aa), index) fig.add_subplot(ax) heatmap = ax.pcolor(cb, vmin=xmin, vmax=xmax, cmap=jetwhitezero) ax.set_xticks([]) ax.set_yticks([]) index += 1 axt.axvline(x=3.85, linewidth=5, color='k') axt.axvline(x=7.75, linewidth=5, color='k') axt.axvline(x=10.65, linewidth=5, color='k') axt.axhline(y=3.85, linewidth=5, color='k') axt.axhline(y=7.75, linewidth=5, color='k') axt.axhline(y=10.65, linewidth=5, color='k') axt.set_zorder(100) for ext in graphicsformat: savefile = 'rgbiowa_' + dbobj.dbse + '.' + ext.lower() plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight') if view: plt.show() plt.close() # - rgbiowa(bbi, view=False) rgbiowa(ssi, view=False) # ## VIII. Threads, ternaries, & Iowas of statistical subsets (Fig. 6) # isolate in a directory subdir = 'fig_stat_subsets' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} # Note: Any step involving Iowa-type plots may take some time to run. bbi.plot_iowa('PBE0D3BJ-dfhf_CP-adz', view=False, xlimit=1.0, sset='default') bbi.plot_iowa('PBE0D3BJ-dfhf_CP-adz', view=False, xlimit=1.0, sset='bbi25') ssi.plot_iowa('PBE0D3BJ-dfhf_CP-adz', view=False, xlimit=1.0, sset='default') ssi.plot_iowa('PBE0D3BJ-dfhf_CP-adz', view=False, xlimit=1.0, sset='ssi500') ssi.plot_iowa('PBE0D3BJ-dfhf_CP-adz', view=False, xlimit=1.0, sset='ssi100') bfdb.plot_modelchems(['PBE0D3BJ-dfhf_CP-adz', 'PBE0D3BJ-dfhf_CP-adz', 'PBE0D3BJ-dfhf_CP-adz', 'PBE0D3BJ-dfhf_CP-adz', 'PBE0D3BJ-dfhf_CP-adz'], msset=['ssi', 'ssi500', 'ssi100', 'bbi', 'bbi25'], labeled=False) # ## IX. IE Distribution Thread SuppMat Tables (Tables S-3 and S-4) # isolate in a directory subdir = 'tbl_silver_residue' # %cd {wdir} if not os.path.exists(subdir): os.makedirs(subdir) # %cd {subdir} def form_residue_subsets_from_rxnname(dbobj): import re bfdbpattern = re.compile("\d\d\d([A-Z][A-Z][A-Z])-\d\d\d([A-Z][A-Z][A-Z])-\d") aa = ['ARG', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'GLY', 'MET', 'PRO', 'ALA', 'VAL', 'ILE', 'LEU', 'PHE', 'TYR', 'HIE', 'TRP'] aasset = {} for a in aa: aasset[a.lower()] = {db: [] for db in dbobj.dbdict.keys()} for dbrxn in dbobj.hrxn: db, rxn = dbrxn.split('-', 1) bfdbname = bfdbpattern.match(rxn) aa1 = bfdbname.group(1) aa2 = bfdbname.group(2) aasset[aa1.lower()][db].append(rxn) if aa1 != aa2: aasset[aa2.lower()][db].append(rxn) for a in aasset: dbobj.add_Subset(a, aasset[a]) form_residue_subsets_from_rxnname(bbi) form_residue_subsets_from_rxnname(ssi) # + def table_bfdb_residue(**kwargs): """Specialization of table_generic into table to showcase the count, range, and plot of errors. Actually used for reaction energy itself, rather than error. """ print kwargs print kwargs['xlines'] rowplan = ['sset'] columnplan = [ ['l', r"""Subset""", '', textables.label, {}], ['d', r"""Statistics""", 'count', textables.count, {}], ['d', r"""Statistics""", 'min', textables.val, {'err': 'nexe'}], ['d', r"""Statistics""", 'mean', textables.val, {'err': 'me'}], ['d', r"""Statistics""", 'max', textables.val, {'err': 'pexe'}], ['l', r"""IE Distribution\footnotemark[1]""", '', textables.flat, {}] ] footnotes = [r"""Benchmark interaction energies (\textit{{not}} errors). Guide lines are at {0} kcal/mol bound ($-$) and unbound ($+$).""".format( oxcom(kwargs['xlines']))] landscape = False theme = 'sibfdbss' title = r"""{{dbse}} interaction energy (kcal/mol) subset range statistics with reference {{mtd}}/{{bas}}{0}.""".format( '' if kwargs['subjoin'] else r""" and {bas}""") return rowplan, columnplan, landscape, footnotes, title, theme def make_bfdb_Tables_Sbbiresidue(dbobj): """Generate the subset details suppmat Part II tables and their indices for DHDFT. """ tblname = 'sibbiss' dbobj.table_wrapper(mtd=['DWCCSDTF12'], bas=['adz'], sset=['default', 'bbi25', 'shb', 'ua', 'arg', 'lys', 'asp', 'glu', 'ser', 'thr', 'asn', 'gln', 'cys', 'gly', 'met', 'pro', 'ala', 'val', 'ile', 'leu', 'phe', 'tyr', 'hie', 'trp'], tableplan=table_bfdb_residue, benchmark='ZEROS', opt=[''], opttarget={'default': ['CP']}, # all errors computed all the time so err=[] not needed failoninc=False, # safe b/c explanatory footnotes added subjoin=True, xlimit=10.0, xlines=[0.0, 1.0, 5.0], plotpath=plotpath, standalone=standalone, filename=tblname) if standalone: # !{pdflatex} {tblname} && {pdfviewer} {tblname + '.pdf'} def make_bfdb_Tables_Sssiresidue(dbobj): tblname = 'sississ' dbobj.table_wrapper(mtd=['DWCCSDTF12'], bas=['adz'], sset=['default', 'ssi500', 'ssi100', 'arg', 'lys', 'asp', 'glu', 'ser', 'thr', 'asn', 'gln', 'cys', 'met', 'pro', 'ala', 'val', 'ile', 'leu', 'phe', 'tyr', 'hie', 'trp', 'neutral', 'cation', 'anion', 'pospos', 'posneg', 'pospolar', 'posaliph', 'posaryl', 'negneg', 'negpolar', 'negaliph', 'negaryl', 'polarpolar', 'polaraliph', 'polararyl', 'aliphaliph', 'alipharyl', 'arylaryl'], tableplan=table_bfdb_residue, benchmark='ZEROS', opt=[''], opttarget={'default': ['CP']}, # all errors computed all the time so err=[] not needed failoninc=False, # safe b/c explanatory footnotes added subjoin=True, xlimit=130.0, xlines=[0.0, 10.0, 100.0], plotpath=plotpath, standalone=standalone, filename=tblname) if standalone: # !{pdflatex} {tblname} && {pdfviewer} {tblname + '.pdf'} # + # run from jupyter, pop up tables, fresh build figures # plotpath = 'autogen' # standalone = True # run from jupyter, pop up tables, reuse figures plotpath = '' standalone = True # run from jupyter, prep tables tex for paper/suppmat, reuse figures # plotpath = '/opt/anaconda1anaconda2anaconda3/bfdb_analysis/tbl_silver_residue/' # standalone = False # - make_bfdb_Tables_Sbbiresidue(bbi) make_bfdb_Tables_Sssiresidue(ssi) # # X. Misc. # How to see what subsets and what systems are available for a database? print(bbi.sset.keys()) print(bbi.hrxn.keys()) # How to see what model chemistries are available for a system? print(ssi.hrxn['SSI-001MET-031VAL-1']) # How to see a single model chemistry for a system? print(ssi.hrxn['SSI-001MET-031VAL-1'].data['B3LYPD3BJ-dfhf_CP-def2qzvp']) # How to get model chemistry for whole database? s22 = qcdb.Database('s22', loadfrompickle=True) err, indiv = s22.compute_statistics('S22A', sset='hb', returnindiv=True, benchmark='ZEROS') for k, v in indiv['S22'].items(): print k, v[0] # How to get SSI data for DW-CCSD(T)-F12/aug-cc-pV(D+d)Z, SAPT2+/aug-cc-pVDZ, MP2/aug-cc-pV[T,Q]Z, wB97M-V/aug-cc-pVTZ? err, ssiCC = ssi.compute_statistics('DWCCSDTF12-CP-adz', returnindiv=True, benchmark='ZEROS') err, ssiSA = ssi.compute_statistics('SAPT2P-SA-adz', returnindiv=True, benchmark='ZEROS') err, ssiPT = ssi.compute_statistics('MP2-dfhf_dfmp_CP-atqz', returnindiv=True, benchmark='ZEROS') err, ssiDF = ssi.compute_statistics('WB97MV-unCP-atz', returnindiv=True, benchmark='ZEROS') print """{:20} {:12} {:12} {:12} {:12}""".format('rxn', 'DWCCSDTF12', 'SAPT2P', 'MP2', 'WB97MV') for rxn in ssi.hrxn.keys(): rx = rxn[4:] # strip off "SSI-" print """{:20} {:12} {:12} {:12} {:12}""".format(rxn, ssiCC['SSI'][rx][0], ssiSA['SSI'][rx][0], ssiPT['SSI'][rx][0], ssiDF['SSI'][rx][0]) # What are the error statistics for the various subsets of SSI for SAPT2+/aug-cc-pVDZ? for B3LYP-D3 vs BLYP-D3 with def2-QZVP? ssi.analyze_modelchems(['SAPT2P-SA-adz']) ssi.analyze_modelchems(['B3LYPD3-dfhf_CP-def2qzvp', 'BLYPD3-dfhf_CP-def2qzvp'], failoninc=False) # What do the thread diagrams look like for anion/anion interactions with B3LYP-D3(bj) at different basis zetas? ssi.plot_modelchems(['B3LYPD3BJ-dfhf_CP-adz', 'B3LYPD3BJ-dfhf_CP-atz', 'B3LYPD3BJ-dfhf_CP-def2qzvp'], sset='negneg') ssi.plot_modelchems(['BLYP-dfhf_CP-adz', 'BLYP-dfhf_CP-def2qzvp'], sset='negneg', xlimit=8) # What are the error statistics for the various SSI subsets for PBEh-3c? ssi.analyze_modelchems(['PBEH3C-dfhf_unCP-def2msvp']) # What is the max error for the MM methods? The full set of errors for M05-2X/adz? a = ssi.compute_statistics('GAFF--na', failoninc=False) print('GAFF', a['SSI']['maxe']) a = ssi.compute_statistics('CGENFF--na', failoninc=False) print('CGENFF', a['SSI']['maxe']) a = ssi.compute_statistics('AM1--na', failoninc=False) print('AM1', a['SSI']['maxe']) a = ssi.compute_statistics('PM6DH2--na', failoninc=False) print('PM6DH2', a['SSI']['maxe']) ssi.compute_statistics('M052X-dfhf_CP-adz', failoninc=False) # What reactions are missing for the various MM methods? the GGA DFT methods? (needs Sec. V run to define subsets) print(bfdb.get_missing_reactions('GAFF--na', sset='ionblk')) print(bfdb.get_missing_reactions('GAFF--na', sset='plslice')) print(bfdb.get_missing_reactions('GAFF--na', sset='npblk')) bfdb.get_missing_reactions('GAFF--na') print(bfdb.get_missing_reactions('CGENFF--na', sset='ionblk')) print(bfdb.get_missing_reactions('CGENFF--na', sset='plslice')) print(bfdb.get_missing_reactions('CGENFF--na', sset='npblk')) bfdb.get_missing_reactions('CGENFF--na') print(bfdb.get_missing_reactions('AM1--na', sset='ionblk')) print(bfdb.get_missing_reactions('AM1--na', sset='plslice')) print(bfdb.get_missing_reactions('AM1--na', sset='npblk')) bfdb.get_missing_reactions('AM1--na') print(bfdb.get_missing_reactions('BP86-dfhf_CP-adz', sset='ionblk')) print(bfdb.get_missing_reactions('B97-dfhf_CP-adz', sset='ionblk')) print(bfdb.get_missing_reactions('BLYP-dfhf_CP-adz', sset='ionblk')) print(bfdb.get_missing_reactions('PBE-dfhf_CP-adz', sset='ionblk')) print(bfdb.get_missing_reactions('B97-dfhf_CP-atz', sset='ionblk')) print(bfdb.get_missing_reactions('PBE-dfhf_CP-atz', sset='ionblk')) print(bfdb.get_missing_reactions('BP86-dfhf_CP-def2qzvp', sset='ionblk')) print(bfdb.get_missing_reactions('B97-dfhf_CP-def2qzvp', sset='ionblk')) print(bfdb.get_missing_reactions('BLYP-dfhf_CP-def2qzvp', sset='ionblk')) print(bfdb.get_missing_reactions('PBE-dfhf_CP-def2qzvp', sset='ionblk')) # How to load up a database and a project? hsg = qcdb.Database('hsg', loadfrompickle=True) hsg.load_qcdata_byproject('bfdbmm') ubq = qcdb.Database('ubq', loadfrompickle=True) ubq.load_qcdata_byproject('bfdbmm') # How to see MP2 basis set effect across zeta levels in benzene dimer dissociation curve? hbc = qcdb.Database('hbc6', loadfrompickle=True) hbc.load_qcdata_byproject('pt2') nbc = qcdb.Database('nbc10', loadfrompickle=True) nbc.load_qcdata_byproject('pt2') # + zetas = ['adz', 'atz', 'aqz'] mc = ['MP2-CP-' + z for z in zetas] nbc.plot_axis('Rrat', mc, sset='bzbz_s') nbc.plot_axis('Rrat', mc, sset='bzbz_t') nbc.plot_axis('Rrat', mc, sset='bzbz_pd34') nbc.plot_axis('Rrat', mc, sset='bzme') nbc.plot_axis('Rrat', mc, sset='meme') nbc.plot_axis('Rrat', mc, sset='pypy_s2') nbc.plot_axis('Rrat', mc, sset='pypy_t3') nbc.plot_axis('Rrat', mc, sset='bzh2s') hbc.plot_axis('Rrat', mc, sset='faoofaoo') hbc.plot_axis('Rrat', mc, sset='faonfaon') hbc.plot_axis('Rrat', mc, sset='fannfann') hbc.plot_axis('Rrat', mc, sset='faoofaon') hbc.plot_axis('Rrat', mc, sset='faonfann') hbc.plot_axis('Rrat', mc, sset='faoofann') # - # How to form old HB/MX/DD subsets for SSI? # + def get_sset_from_color(sector, dbinstance): eligible = [] for rxn, orxn in dbinstance.hrxn.iteritems(): if sector == 'dd' and 0.000 < orxn.color and orxn.color < 0.333: eligible.append(rxn) if sector == 'mx' and 0.333 < orxn.color and orxn.color < 0.667: eligible.append(rxn) if sector == 'hb' and 0.667 < orxn.color and orxn.color < 1.000: eligible.append(rxn) return eligible def get_hb_from_color(dbinstance): return get_sset_from_color('hb', dbinstance) def get_mx_from_color(dbinstance): return get_sset_from_color('mx', dbinstance) def get_dd_from_color(dbinstance): return get_sset_from_color('dd', dbinstance) bfdb.add_Subset('hb', {'SSI': get_hb_from_color, 'BBI': get_hb_from_color}) bfdb.add_Subset('mx', {'SSI': get_mx_from_color, 'BBI': get_mx_from_color}) bfdb.add_Subset('dd', {'SSI': get_dd_from_color, 'BBI': get_dd_from_color}) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="LANdcLfSvzZr" colab_type="text" # # **Simple Linear Regression** # + id="fCrpahRtuRXd" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt # + [markdown] id="NGs41zpdmcK0" colab_type="text" # ## Method 1 ("Traditional") # # Calculate bias (or intercept $B_0$) and slope ($B_1$) using: # # $$B_1 = \frac{\sum_{i=1}^{n}(x_i-mean(x))(y_i-mean(y))}{\sum_{i=1}^{n}(x_i-mean(x))^2}$$ # # $$B_0 = mean(y) - B_1 \cdot mean(x)$$ # # to construct simple linear regression model: $$y = B_0 + B_1 \cdot x$$ # + id="67qiy5a2t8og" colab_type="code" outputId="e8d45949-8c0b-436c-a6a6-3bbcdc538a0a" colab={"base_uri": "https://localhost:8080/", "height": 282} x = [1, 2, 4, 3, 5] y = [1, 3, 3, 2, 5] # visualize our data plt.plot(x, y, 'o') # + [markdown] id="Kp8Fz5yivuT-" colab_type="text" # Calculate mean of data # + id="aMUFD4mHudzn" colab_type="code" outputId="fff0a1a9-ec31-4686-9030-a8d25bec5a91" colab={"base_uri": "https://localhost:8080/", "height": 34} mean_x = np.mean(x) mean_y = np.mean(y) print(mean_x, mean_y) # + [markdown] id="K-8Voeq4vwRn" colab_type="text" # Calculate error # + id="h_IpXuGEvaqX" colab_type="code" outputId="6b810aaf-bdd4-447b-87dc-a8f0ec1bc20a" colab={"base_uri": "https://localhost:8080/", "height": 50} err_x = x - mean_x err_y = y - mean_y print(err_x) print(err_y) # + [markdown] id="yTtZuCKcvx9E" colab_type="text" # Multiply error of x and error of y # + id="lyapVNSvwCe9" colab_type="code" outputId="7bbc6634-fdc9-4222-eef6-3f7fc56d1b94" colab={"base_uri": "https://localhost:8080/", "height": 34} err_mult = err_x * err_y print(err_mult) # + [markdown] id="qTpn2KJPwz_g" colab_type="text" # Calculate numerator by summing up the errors # + id="-QYzrLNtw4-O" colab_type="code" outputId="cdd579e5-c2fd-41b9-91cb-271073d492d7" colab={"base_uri": "https://localhost:8080/", "height": 34} numerator = np.sum(err_mult) numerator # + [markdown] id="37GOQKFmw9eP" colab_type="text" # Calculate denominator by squaring the x error and summing them up # + id="sI9vKNTKxXxM" colab_type="code" outputId="a5602946-690c-4235-eaf6-9843f4ea1f2a" colab={"base_uri": "https://localhost:8080/", "height": 34} err_x_squared = err_x**2 denominator = np.sum(err_x_squared) print(denominator) # + [markdown] id="UFgKi72wxkIx" colab_type="text" # Calculate the **slope (B1)** ! # + id="RF9L9iv8xqxk" colab_type="code" outputId="e47cc508-233e-40cf-f504-91cbf6bb106e" colab={"base_uri": "https://localhost:8080/", "height": 34} B1 = numerator / denominator print(B1) # + [markdown] id="jRi32EmVx4Vw" colab_type="text" # And we can calculate the **intercept (c)** ! # + id="_aHhF57ix8pn" colab_type="code" outputId="fa8b0e53-d442-4c2d-a009-8c0b706abde8" colab={"base_uri": "https://localhost:8080/", "height": 34} B0 = mean_y - B1 * mean_x print(B0) # + [markdown] id="wJZH9fDeyQQJ" colab_type="text" # We now have the coefficents for our simple linear regression equation. # $$y = B_0 + B_1 x = 0.4 + 0.8 x$$ # # + [markdown] id="56K484HtrF5v" colab_type="text" # ### Test the model to our training data # + id="9r9HxVKhoZCP" colab_type="code" outputId="39147773-0eec-47ab-c608-cbcbc24e56b4" colab={"base_uri": "https://localhost:8080/", "height": 282} x_test = np.array([1, 2, 3, 4, 5]) y_predicted = B0 + B1 * x_test p1 = plt.plot(x, y, 'o') p2 = plt.plot(x_test, y_predicted, 'o-', color='r') plt.legend((p1[0], p2[0]), (['y data', 'predicted y'])) # + [markdown] id="4Kj8wT89pTYV" colab_type="text" # ### Estimating Error (Root Mean Squared Error) # # $$RMSE = \sqrt{\frac{\sum_{i=1}^{n} (p_i - y_i)^2}{n}}$$ # + id="M25QSqEVpzhV" colab_type="code" outputId="dc3e7d1f-130c-4efc-9480-95c2d3f8fb31" colab={"base_uri": "https://localhost:8080/", "height": 34} numerator = np.sum((y_predicted - y)**2) denominator = len(y) rmse = np.sqrt(numerator / denominator) rmse # + [markdown] id="uGpy429krBRb" colab_type="text" # ### Wrap all up # + id="TudrLsXLrNBV" colab_type="code" colab={} def simple_linear_regression_traditional(x, y, x_test): import numpy as np x = np.array(x); y = np.array(y); x_test = np.array(x_test) mean_x = np.mean(x) mean_y = np.mean(y) err_x = x - mean_x err_y = y - mean_y err_mult = err_x * err_y numerator = np.sum(err_mult) err_x_squared = err_x**2 denominator = np.sum(err_x_squared) B1 = numerator / denominator B0 = mean_y - B1 * mean_x y_predicted = B0 + B1 * x_test return(B0, B1, y_predicted) def linreg_error(y, y_predicted): import numpy as np y = np.array(y); y_predicted = np.array(y_predicted) numerator = np.sum((y_predicted - y)**2) denominator = len(y) rmse = np.sqrt(numerator / denominator) return(rmse) # + [markdown] id="KEifuIW8tFnC" colab_type="text" # ## Method 2 ("Advanced") # # Calculate bias (or intercept $B_0$) and slope ($B_1$) using: # # $$B_1 = corr(x, y) \cdot \frac{stdev(y)}{stdev(x)}$$ # # Then, similar to **Method 1**. # $$B_0 = mean(y) - B_1 \cdot mean(x)$$ # # to construct simple linear regression model: $$y = B_0 + B_1 \cdot x$$ # + [markdown] id="1B1gzeoPvO-N" colab_type="text" # Calculate the **pearson's correlation coefficient $corr(x,y)$**. First, calculate mean and standard deviation. # + id="LfM3_SzotHtq" colab_type="code" outputId="b87743fb-153b-4725-9728-9513125c2f18" colab={"base_uri": "https://localhost:8080/", "height": 34} import statistics as stat mean_x = np.mean(x) mean_y = np.mean(y) stdev_x = stat.stdev(x) stdev_y = stat.stdev(y) print(stdev_x, stdev_y) # + [markdown] id="MYunKNeOvZQH" colab_type="text" # Calculate **covariance**. Covariance is the relationship that can be summarized between two variables. The sign of the covariance can be interpreted as whether the two variables change in the same direction (positive) or change in different directions (negative). A covariance value of zero indicates that both variables are completely independent. # + id="QzGhvOG6vqh-" colab_type="code" outputId="8ddefd6f-d372-4fde-8c7b-f079dc4cf4b8" colab={"base_uri": "https://localhost:8080/", "height": 34} cov_x_y = (np.sum((x - mean_x) * (y - mean_y))) * (1 / (len(x) - 1)) cov_x_y # + [markdown] id="Q0Vm6VLp0Ci5" colab_type="text" # Calculate **Pearson's Correlation Coefficient**. It summarizes the strength of the linear relationship between two data samples. It is the normalization of the covariance between the two variables. The coefficient returns a value between -1 and 1 that represents the limits of correlation from a full negative correlation to a full positive correlation. A value of 0 means no correlation. The value must be interpreted, where often a value below -0.5 or above 0.5 indicates a notable correlation, and values below those values suggests a less notable correlation. # + id="w1bpMRPTxeXx" colab_type="code" outputId="0c1c94f4-c5e6-486d-8812-bbe85ca111fb" colab={"base_uri": "https://localhost:8080/", "height": 34} corr_x_y = cov_x_y / (stdev_x * stdev_y) corr_x_y # + [markdown] id="MxSfB9UK2nYs" colab_type="text" # Calculate slope $B_1$ # + id="pxCJ_Ozq2uQy" colab_type="code" outputId="f8be335f-2a3d-4d18-9cd7-e93976cb30c5" colab={"base_uri": "https://localhost:8080/", "height": 34} B1 = corr_x_y * (stdev_y / stdev_x) B1 # + [markdown] id="ZCEhNOAg5wtb" colab_type="text" # Next, is similar to **Method 1**. # + id="9pWiMeW75gNw" colab_type="code" outputId="bff39b4e-6b6f-4f00-a638-276c15ec9697" colab={"base_uri": "https://localhost:8080/", "height": 282} B0 = mean_y - B1 * mean_x x_test = np.array([1, 2, 3, 4, 5]) y_predicted = B0 + B1 * x_test p1 = plt.plot(x, y, 'o') p2 = plt.plot(x_test, y_predicted, 'o-', color='r') plt.legend((p1[0], p2[0]), (['y data', 'predicted y'])) # + [markdown] id="kwa25OhZ55dm" colab_type="text" # Calculate RMSE # + id="a8wU_bWH57BB" colab_type="code" outputId="db00f1da-9b0d-4dbd-a0da-32dcf75d303e" colab={"base_uri": "https://localhost:8080/", "height": 34} rmse = linreg_error(y, y_predicted) rmse # + [markdown] id="igGssNsl9dj0" colab_type="text" # ### Wrap all up # + id="l7KxX_xy9es7" colab_type="code" colab={} def simple_linear_regression_advanced(x, y, x_test): import numpy as np import statistics as stat x = np.array(x); y = np.array(y); x_test = np.array(x_test) mean_x = np.mean(x) mean_y = np.mean(y) stdev_x = stat.stdev(x) stdev_y = stat.stdev(y) cov_x_y = (np.sum((x - mean_x) * (y - mean_y))) * (1 / (len(x) - 1)) corr_x_y = cov_x_y / (stdev_x * stdev_y) B1 = corr_x_y * (stdev_y / stdev_x) B0 = mean_y - B1 * mean_x y_predicted = B0 + B1 * x_test return(B0, B1, y_predicted) # + [markdown] id="X6-86aCCXCd7" colab_type="text" # ## Implement to Real Dataset # # Simple linear regression to WTI and Brent Daily Oil Price (1980-2020) # + id="09l-UebpXKpQ" colab_type="code" outputId="d210ff14-4686-4051-bbad-44c87a0dbc06" colab={"base_uri": "https://localhost:8080/", "height": 151} # !git clone https://www.github.com/yohanesnuwara/machine-learning # + id="RHhepDmFXVXm" colab_type="code" outputId="ce306ece-b88f-43e0-be0c-b415a16d743d" colab={"base_uri": "https://localhost:8080/", "height": 343} import pandas as pd brent = pd.read_csv('/content/machine-learning/datasets/brent-daily_csv.csv') wti = pd.read_csv('/content/machine-learning/datasets/wti-daily_csv.csv') # Converting to Panda datetime brent['Date'] = pd.to_datetime(brent['Date'], format='%Y-%m-%d') # depends on the data, format check web: https://strftime.org/ wti['Date'] = pd.to_datetime(wti['Date'], format='%Y-%m-%d') # depends on the data, format check web: https://strftime.org/ brent.head(10) # + [markdown] id="HcIqE1bgYA1U" colab_type="text" # Visualize data # + id="kAVh5xXvYs6b" colab_type="code" outputId="13dacac8-960e-46e7-e0a5-c53de6c708e2" colab={"base_uri": "https://localhost:8080/", "height": 421} from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() plt.figure(figsize=(15, 6)) plt.plot(brent.Date, brent.Price, '.', color='blue') plt.plot(wti.Date, wti.Price, '.', color='red') plt.title('Daily Oil Price') plt.xlabel('Year'); plt.ylabel('Price ($/bbl)') # + id="XR4kqCjoa9sC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="146246cb-a00b-4845-bbce-1d630f7c77c3" # convert datetime to ordinal import datetime as dt brent_date = np.array(brent['Date'].map(dt.datetime.toordinal)) brent_price = brent.Price brent_test = brent_date B0_brent, B1_brent, brent_price_predicted = simple_linear_regression_advanced(brent_date, brent_price, brent_test) wti_date = np.array(wti['Date'].map(dt.datetime.toordinal)) wti_price = wti.Price wti_test = wti_date B0_wti, B1_wti, wti_price_predicted = simple_linear_regression_advanced(wti_date, wti_price, wti_test) plt.figure(figsize=(15, 6)) p1 = plt.plot(brent.Date, brent.Price, '.', color='blue') p2 = plt.plot(wti.Date, wti.Price, '.', color='red') p3 = plt.plot(brent_test, brent_price_predicted, color='blue') p4 = plt.plot(wti_test, wti_price_predicted, color='red') plt.legend((p1[0], p2[0], p3[0], p4[0]), (['Brent data', 'WTI data', 'Brent predicted', 'WTI predicted'])) plt.title('Daily Oil Price') plt.xlabel('Year'); plt.ylabel('Price ($/bbl)') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import collections from collections import defaultdict import sys import json import random from jsmin import jsmin from io import StringIO import numpy as np import copy import importlib from functools import partial import math import os # script_n = os.path.basename(__file__).split('.')[0] script_n = 'connection_synapse_count_201230' sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData from weight_database import WeightDatabase weightdb = WeightDatabase() def weight_fn(syn): z_len = syn['z_length'] - 40 major_axis_length = syn['major_axis_length'] * .9 diameter = max(z_len, major_axis_length) diameter = int(diameter/40+.5) diameter *= 40 r = diameter/2 area = math.pi*r*r return area weightdb.load_syn_db('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/gen_db/mf_grc/gen_201229_setup01_syndb_threshold_20_coalesced.gz', weight_fn=weight_fn) mpd = MyPlotData() mpd_raw = MyPlotData() hist = defaultdict(int) weights_db = weightdb.get_weights() raw_data = [] for neuron, pc_weights in weights_db.items(): for pc, weights in pc_weights.items(): if len(weights) <= 1: continue connection_weight = 0 for w in weights: connection_weight += 1 mpd_raw.add_data_point( cleft_area=connection_weight, type='Observation') hist[connection_weight] += 1 raw_data.append(connection_weight) # print(hist) for k in sorted([k for k in hist.keys()]): # print(f'{k}: {hist[k]}') mpd.add_data_point( count=hist[k], cleft_area=k) # mpd = mpd.to_pdf('count', cumulative=False) mpd_cdf = mpd.to_pdf('count', cumulative=False) # - import statistics print(f'Mean: {statistics.mean(raw_data)}') print(f'Median: {statistics.median(raw_data)}') print(f'stdev: {statistics.stdev(raw_data)}') # + import scipy importlib.reload(my_plot); my_plot.my_displot( mpd_raw, x="cleft_area", # y="count", # xlim=[None, 60], # s=100, kind='hist', discrete=True, stat='probability', # fit=scipy.stats.lognorm, # log_scale_x=True, # binwidth=.0399, # kde=True, # kde_kws={'bw_adjust': 3.5}, context='paper', height=4, y_axis_label='Frequency', x_axis_label='Synapses per Connection', show=True, save_filename=f'{script_n}_hist.svg', ) # + # fit lognormal from scipy.stats import lognorm s, log, scale = lognorm.fit(raw_data) r = lognorm.rvs(s, loc=loc, scale=scale, size=len(raw_data)) mpd_fit = MyPlotData() for v in r: mpd_fit.add_data_point( cleft_area=int(v), type='Fit') importlib.reload(my_plot); my_plot.my_displot( mpd_fit, x="cleft_area", # y="count", xlim=[None, 50], # xlim=[-1.5, None], # s=100, kind='hist', discrete=True, # log_scale_x=True, # binwidth=.0399, # kde=True, # kde_kws={'bw_adjust': 3.5}, context='paper', height=4, y_axis_label='Count', x_axis_label='Synapses per Connection', show=True, save_filename=f'{script_n}_hist_fit2.svg', ) # - mpd_all = MyPlotData() mpd_all.append(mpd_raw) mpd_all.append(mpd_fit) importlib.reload(my_plot); my_plot.my_displot( mpd_all, x="cleft_area", # y="count", xlim=[None, 50], # xlim=[-1.5, None], # s=100, hue='type', kind='hist', discrete=True, # log_scale_x=True, # binwidth=.0399, # kde=True, # kde_kws={'bw_adjust': 3.5}, stat='probability', context='paper', height=4, aspect=1, y_axis_label='Frequency', x_axis_label='Synapses per Connection', show=True, save_filename=f'{script_n}_hist_fit.svg', ) print(f'n={len(mpd_raw.data)} connections') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf print(tf.__version__) import numpy as np import unicodedata import re import pandas as pd from sklearn.model_selection import train_test_split gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) SAVE = True NUM_EPOCHS = 22 BATCH_SIZE = 64 LSTM_SIZE = 512 EMBEDDING_SIZE = 256 SPLIT = 0.8 FILENAME = "Data/encode_geo_aggregated.csv" frame = pd.read_csv(FILENAME) train_frame, test_frame = train_test_split(frame, train_size = SPLIT) train = [[x.replace("("," ( ").replace(")", " ) ").replace("-",' - ').replace("_"," _ ").replace(";"," ; ").replace(" "," ").replace("=",""),' ' + y.replace(" $",""), y.replace(" $","") + ' '] for (x,y) in zip(train_frame.Input, train_frame.Output)] test = [[x.replace("("," ( ").replace(")", " ) ").replace("-",' - ').replace("_"," _ ").replace(";"," ; ").replace(" "," ").replace("=",""),' ' + y.replace(" $",""), y.replace(" $","") + ' '] for (x,y) in zip(test_frame.Input, test_frame.Output)] def tokenize_dataset(train, test, differentiate_vocabularies = False): tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', lower=True, split=' ', char_level=False) for index in range(3): tokenizer.fit_on_texts(x[index] for x in train) tokenizer.fit_on_texts(x[index] for x in test) train_in = tokenizer.texts_to_sequences(i[0] for i in train) train_in = tf.keras.preprocessing.sequence.pad_sequences(train_in, padding='post') train_outi = tokenizer.texts_to_sequences(i[1] for i in train) train_outi = tf.keras.preprocessing.sequence.pad_sequences(train_outi, padding='post') train_outo = tokenizer.texts_to_sequences(i[2] for i in train) train_outo = tf.keras.preprocessing.sequence.pad_sequences(train_outo, padding='post') test_in = tokenizer.texts_to_sequences(i[0] for i in test) test_in = tf.keras.preprocessing.sequence.pad_sequences(test_in, padding='post') test_outi = tokenizer.texts_to_sequences(i[1] for i in test) test_outi = tf.keras.preprocessing.sequence.pad_sequences(test_outi, padding='post') test_outo = tokenizer.texts_to_sequences(i[2] for i in test) test_outo = tf.keras.preprocessing.sequence.pad_sequences(test_outo, padding='post') return tokenizer,train_in, train_outi, train_outo, test_in, test_outi, test_outo tokenizer, train_in, train_outi, train_outo, test_in, test_outi, test_outo = tokenize_dataset(train,test) len(tokenizer.word_index) + 1 # + training_set = tf.data.Dataset.from_tensor_slices((train_in,train_outi, train_outo)) training_set = training_set.shuffle(20).batch(BATCH_SIZE) test_set = tf.data.Dataset.from_tensor_slices((test_in, test_outi, test_outo)) test_set = test_set.shuffle(20).batch(BATCH_SIZE) # + class LuongAttention(tf.keras.Model): def __init__(self, rnn_size): super(LuongAttention, self).__init__() self.wa = tf.keras.layers.Dense(rnn_size) def call(self, decoder_output, encoder_output): # Dot score: h_t (dot) Wa (dot) h_s # encoder_output shape: (batch_size, max_len, rnn_size) # decoder_output shape: (batch_size, 1, rnn_size) # score will have shape: (batch_size, 1, max_len) score = tf.matmul(decoder_output, self.wa(encoder_output), transpose_b=True) # alignment vector a_t alignment = tf.nn.softmax(score, axis=2) context = tf.matmul(alignment, encoder_output) return context, alignment class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_size, lstm_size): super(Encoder, self).__init__() self.lstm_size = lstm_size self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_size) self.lstm = tf.keras.layers.LSTM( lstm_size, return_sequences=True, return_state=True) def call(self, sequence, states): embed = self.embedding(sequence) output, state_h, state_c = self.lstm(embed, initial_state=states) return output, state_h, state_c def init_states(self, batch_size): return (tf.zeros([batch_size, self.lstm_size]), tf.zeros([batch_size, self.lstm_size])) #DECODER FOR ATTENTION class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_size, rnn_size): super(Decoder, self).__init__() # Create a LuongAttention object self.attention = LuongAttention(rnn_size) self.rnn_size = rnn_size self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_size) self.lstm = tf.keras.layers.LSTM( rnn_size, return_sequences=True, return_state=True) self.wc = tf.keras.layers.Dense(rnn_size, activation='tanh') self.ws = tf.keras.layers.Dense(vocab_size) def call(self, sequence, state, encoder_output): # Remember that the input to the decoder # is now a batch of one-word sequences, # which means that its shape is (batch_size, 1) embed = self.embedding(sequence) # Therefore, the lstm_out has shape (batch_size, 1, rnn_size) lstm_out, state_h, state_c = self.lstm(embed, initial_state=state) # Use self.attention to compute the context and alignment vectors # context vector's shape: (batch_size, 1, rnn_size) # alignment vector's shape: (batch_size, 1, source_length) context, alignment = self.attention(lstm_out, encoder_output) # Combine the context vector and the LSTM output # Before combined, both have shape of (batch_size, 1, rnn_size), # so let's squeeze the axis 1 first # After combined, it will have shape of (batch_size, 2 * rnn_size) lstm_out = tf.concat([tf.squeeze(context, 1), tf.squeeze(lstm_out, 1)], 1) # lstm_out now has shape (batch_size, rnn_size) lstm_out = self.wc(lstm_out) # Finally, it is converted back to vocabulary space: (batch_size, vocab_size) logits = self.ws(lstm_out) return logits, state_h, state_c, alignment def loss_func(targets, logits): crossentropy = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) mask = tf.math.logical_not(tf.math.equal(targets, 0)) mask = tf.cast(mask, dtype=tf.int64) loss = crossentropy(targets, logits, sample_weight=mask) return loss # - optimizer = tf.keras.optimizers.Adam() #TRAIN FOR ATTENTION @tf.function def train_step(source_seq, target_seq_in, target_seq_out, en_initial_states): loss = 0 with tf.GradientTape() as tape: en_outputs = encoder(source_seq, en_initial_states) en_states = en_outputs[1:] de_state_h, de_state_c = en_states # We need to create a loop to iterate through the target sequences for i in range(target_seq_out.shape[1]): # Input to the decoder must have shape of (batch_size, length) # so we need to expand one dimension decoder_in = tf.expand_dims(target_seq_in[:, i], 1) logit, de_state_h, de_state_c, _ = decoder(decoder_in, (de_state_h, de_state_c), en_outputs[0]) # The loss is now accumulated through the whole batch loss += loss_func(target_seq_out[:, i], logit) variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return loss / target_seq_out.shape[1] #TEST FOR ATTENTION @tf.function def test_step(source_seq, target_seq_in, target_seq_out, en_initial_states): loss = 0 with tf.GradientTape() as tape: en_outputs = encoder(source_seq, en_initial_states) en_states = en_outputs[1:] de_state_h, de_state_c = en_states # We need to create a loop to iterate through the target sequences for i in range(target_seq_out.shape[1]): # Input to the decoder must have shape of (batch_size, length) # so we need to expand one dimension decoder_in = tf.expand_dims(target_seq_in[:, i], 1) logit, de_state_h, de_state_c, _ = decoder( decoder_in, (de_state_h, de_state_c), en_outputs[0]) # The loss is now accumulated through the whole batch loss += loss_func(target_seq_out[:, i], logit) return loss / target_seq_out.shape[1] #PREDICT FOR ATTENTION def predict(test_source_text=None): choice = np.random.choice(len(test)) if test_source_text is None: test_source_text = test[choice][0] target_text = test[choice][2].split(" ") print(test_source_text) print("TARGET") print(' '.join(target_text)) test_source_seq = tokenizer.texts_to_sequences([test_source_text]) en_initial_states = encoder.init_states(1) en_outputs = encoder(tf.constant(test_source_seq), en_initial_states) de_input = tf.constant([[tokenizer.word_index['']]]) de_state_h, de_state_c = en_outputs[1:] out_words = [] alignments = [] while True: de_output, de_state_h, de_state_c, alignment = decoder( de_input, (de_state_h, de_state_c), en_outputs[0]) de_input = tf.expand_dims(tf.argmax(de_output, -1), 0) out_words.append(tokenizer.index_word[de_input.numpy()[0][0]]) alignments.append(alignment.numpy()) if out_words[-1] == '' or len(out_words) >= 100: break print("PREDICTED") print(' '.join(out_words)) # + jupyter={"outputs_hidden": true} in_vocab_size = len(tokenizer.word_index) + 1 out_vocab_size = len(tokenizer.word_index) + 1 encoder = Encoder(in_vocab_size, EMBEDDING_SIZE, LSTM_SIZE) decoder = Decoder(out_vocab_size, EMBEDDING_SIZE, LSTM_SIZE) test_losses = [] training_losses = [] for e in range(NUM_EPOCHS): en_initial_states = encoder.init_states(BATCH_SIZE) training_loss = 0.0 training_samples = 0.0 for batch, (source_seq, target_seq_in, target_seq_out) in enumerate(training_set.take(-1)): if(len(source_seq)== BATCH_SIZE): training_loss += train_step(source_seq, target_seq_in, target_seq_out, en_initial_states) training_samples += 1 print("Training complete, now testing...") training_loss = training_loss/training_samples test_loss = 0.0 test_samples = 0.0 for batch, (source_seq, target_seq_in, target_seq_out) in enumerate(test_set.take(-1)): if(len(source_seq) == BATCH_SIZE): test_loss += test_step(source_seq, target_seq_in, target_seq_out, en_initial_states) test_samples += 1 test_loss = test_loss/test_samples training_losses.append(training_loss) test_losses.append(test_loss) print('Epoch {} Training Loss {:.4f}, Test Loss {:.4f}'.format(e + 1, training_loss.numpy(), test_loss.numpy())) try: predict() except Exception: continue # - import matplotlib.pyplot as plt x = np.arange(NUM_EPOCHS) plt.plot(x, training_losses) plt.plot(x, test_losses) plt.legend(["Training loss", "Test loss"], loc = "upper left") plt.show() # + results = [] for index in range(len(test)): choice = index test_source_text = test[choice][0] target_text = test[choice][2] test_source_seq = tokenizer.texts_to_sequences([test_source_text]) en_initial_states = encoder.init_states(1) en_outputs = encoder(tf.constant(test_source_seq), en_initial_states) de_input = tf.constant([[tokenizer.word_index['']]]) de_state_h, de_state_c = en_outputs[1:] out_words = [] alignments = [] while True: de_output, de_state_h, de_state_c, alignment = decoder( de_input, (de_state_h, de_state_c), en_outputs[0]) de_input = tf.expand_dims(tf.argmax(de_output, -1), 0) out_words.append(tokenizer.index_word[de_input.numpy()[0][0]]) alignments.append(alignment.numpy()) if out_words[-1] == '' or len(out_words) >= 100: break results.append([test_source_text,target_text, ' '.join(out_words)]) dataframe_results = pd.DataFrame(results, columns = ["Text","Target", "Predicted"]) if(SAVE): dataframe_results.to_csv(f"Results/LSTM_results_{NUM_EPOCHS}epochs.csv") dataframe_results # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from astropy.io import fits from astropy import wcs import wcsaxes import numpy as np import aplpy import pandas import matplotlib.pyplot as plt from astropy.modeling import models, fitting from scipy import optimize # %matplotlib inline from os.path import expanduser # + home = expanduser("~") gc_dir = home + "/Dropbox/GalacticCenter/" filename_skymap = gc_dir + "fits/release_galactic_skymap.fits" HDUlist = fits.open(filename_skymap) HDUlist.info() # get excess data skymap_data, skymap_header = fits.getdata(filename_skymap, header = True) # + # world coordinate system wcs_SgrA = wcs.WCS(filename_skymap) wcs_GC = wcs.WCS() w = wcs.WCS(HDUlist[0].header) #fig = plt.figure(figsize=(8,8)) #ax = fig.add_subplot(1, 1, 1, projection = wcs_SgrA) #wx, wy = w.wcs_pix2world(0,0) # + skymap_data_n = np.nan_to_num(skymap_data) img = plt.imshow(skymap_data_n, origin='lower') ax = img.axes ax.grid(color='white', alpha=1, ls='solid') ax.set_xlabel("Galactic Longitude") ax.set_ylabel("Galactic Latitude") plt.colorbar() # - def TwoD_Gaussian(coord, amp, x0, y0, sigma_x, sigma_y, base): """simple model of two-dimensional gaussian""" x, y = coord g = amp*np.exp(-((x-x0)**2/(2*sigma_x**2)+(y-y0)**2/(2*sigma_y**2))) + base return g.ravel() # TwoD_Gaussian # + nx, ny = skymap_data.shape # Create x and y indices x = np.linspace(0, nx-1, nx) y = np.linspace(0, ny-1, ny) x, y = np.meshgrid(x, y) coords = x, y # - # Run fit optimization # + guess_SgrA_world = (1000, 0, 0, 0.2, 0.2, 100) guess_SgrA_pix = (100, 120, 120, 5, 5, 1) guess_G09_world = (250, 0.08, 0.87, 0.5, 0.5, 50) #guess_G09_pix = (250,) #guess_J1745_world = (300) #guess_J1745_pix = (300) # Fitting for uncorrelated map, by pixel p_opt, p_cov = optimize.curve_fit(TwoD_Gaussian, coords, skymap_data_n.ravel(), p0=guess_SgrA_pix, maxfev=2500) #p_opt, p_cov = optimize.curve_fit(TwoD_Gaussian, coords_world, excess_data_N.ravel(), p0=guess_world_G09,maxfev=2500) print(p_opt) print(p_cov) #print(y[0:3,8:12]) #init_guess = (1000,120,120,1,1,100) #p_opt_G09, p_cov_G09 = optimize.curve_fit(TwoD_Gaussian, coords, residual_skymap.ravel(), p0=init_guess_G09) #p_opt, p_cov = scipy.optimize.curve_fit(TwoD_Gaussian, (x,y), dataN.ravel(), p0=init_guess) # + pointSource_excess = TwoD_Gaussian(coords, *p_opt) #print(pointSource_excess.reshape(240, 240)[115:125, 115:125]) residual_skymap = skymap_data_n - pointSource_excess.reshape(nx, ny) fig_residual = plt.figure() plt.axis([50, 190, 80, 150]) plt.imshow(residual_skymap) plt.colorbar() #print(residual_skymap[115:125, 115:125]) fig_PS = plt.figure() plt.imshow(pointSource_excess.reshape(nx, ny)) # + with open(gc_dir+'/spectralPoints/SgrA_4tels_noPoor_dNdE_TeV.txt') as infile: filetext = infile.read() linelist = filetext.split(sep='\n') for index, line in enumerate(linelist): paramlist = line.split() if len(paramlist) < 2: del linelist[index] for line in linelist: print(line) infile.close() lines = list(open(gc_dir+"/spectralPoints/SgrA_4tels_noPoor_dNdE_TeV.txt", "r")) linesSanitized = map(lambda each:each.strip("\n"), lines) print(linesSanitized) Celsius = [39.2, 36.5, 37.3, 37.8] Fahrenheit = map(lambda x: (float(9)/5)*x + 32, Celsius) print(*Fahrenheit) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Caffe to Caffe2 Translator Tutorial # # In this example we will convert a pre-existing Caffe model into a format that will be supported by Caffe2. This tutorial requires the following additional modules that you may not have installed yet: # # - matpotlib # - skimage # - pyyaml (required by caffe/scripts/download_model_binary.py) # # Use conda, pip, or `sudo apt-get install` to get these modules. # # You will also need the original Caffe repo because this tutorial uses tools and resources from Caffe. You configure the Caffe location in the second code block below. This tutorial also assumes that you have existing .caffemodel models and supporting files to convert. If not, see the tutorial called [Getting Models and Datasets](Getting_Models_and_Datasets.ipynb). # %matplotlib inline from caffe.proto import caffe_pb2 from caffe2.proto import caffe2_pb2 from cStringIO import StringIO from google.protobuf import text_format from IPython import display import matplotlib.image as mpimg from matplotlib import pyplot import numpy as np import os from caffe2.python import caffe_translator, visualize, workspace, net_drawer import skimage import skimage.io import skimage.transform import sys print("Required modules imported.") # Below you will set your file path in `CAFFE_ROOT` according to your current installation of Caffe. # You will also set the model that you're converting with `MODEL`. This will be matched with the filename of the model that you may already have downloaded, or else the script will go fetch the pre-trained model (ilsvrc_aux) for you from the BVLC repository using the script included. Note that the required resources here are: # # * Pretrained binary: .caffemodel # * Model file: deploy.prototxt # # At this point, the only two things you should play with below are: # # - CAFFE_ROOT: where you have the Caffe repository # - MODEL: which model you're trying to convert # # If you get any errors then look back at the top for the link to resources or tutorial for getting pre-trained models and datasets. # + # This should point to the root folder of Caffe that you checked # out from https://github.com/BVLC/caffe # You can do it by # git clone https://github.com/BVLC/caffe.git CAFFE_ROOT = os.path.expanduser('~/caffe') # format below is the model's folder, model's dataset inside that folder # you can switch the comments on MODEL to try out different model conversions MODEL = 'bvlc_googlenet', 'bvlc_googlenet.caffemodel', 'this one is fast' #MODEL = 'bvlc_alexnet', 'bvlc_alexnet.caffemodel', 'this one takes a minute' if not os.path.exists(CAFFE_ROOT): print("Houston, you may have a problem.") print("Did you change CAFFE_ROOT to point to your local Caffe repo?") print("Try running: git clone https://github.com/BVLC/caffe.git") CAFFE_MODELS = os.path.join(CAFFE_ROOT, 'models') # this is like: ~/caffe/models/bvlc_alexnet/deploy.prototxt CAFFE_MODEL_FILE = os.path.join(CAFFE_MODELS, MODEL[0], 'deploy.prototxt') # this is like: ~/caffe/models/bvlc_alexnet/bvlc_alexnet.caffemodel CAFFE_PRETRAINED = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[1]) if not os.path.exists(CAFFE_PRETRAINED): print(CAFFE_PRETRAINED + " not found!") else: print("Found the .caffemodel! Now looking for deploy.prototxt...") if not os.path.exists(CAFFE_MODEL_FILE): print("Caffe model file, " + CAFFE_MODEL_FILE + " was not found!") else: print('All needed files found! Load the model in the next step.') # + caffenet = caffe_pb2.NetParameter() caffenet_pretrained = caffe_pb2.NetParameter() text_format.Merge(open(CAFFE_MODEL_FILE).read(), caffenet) caffenet_pretrained.ParseFromString(open(CAFFE_PRETRAINED).read()) print('Model loaded.') # TODO: explain why this is needed DATASET = 'imagenet.bet.pickle' CAFFE_DATA = os.path.join(CAFFE_ROOT, 'data/ilsvrc12') if not os.path.exists(os.path.join(CAFFE_DATA, DATASET)): print("Dataset wasn't found. Downloading...") os.system(os.path.join(CAFFE_DATA, 'get_ilsvrc_aux.sh')) # - # In the next step the model will be translated to Caffe2 format. # Perform translation, using the caffenet and pretrained parameters. print('Translating model.') net, pretrained_params = caffe_translator.TranslateModel( caffenet, caffenet_pretrained, is_test=True) print('Model translated.') # For fun we can print out a graph of the operators. We can also change the code below to have it graph the blobs too. graph = net_drawer.GetPydotGraphMinimal(net.op, net.name, rankdir="LR") print net.name display.Image(graph.create_png(), width=800) # The above command shows only the operators. If you want to see both the # operators and the blobs, use the command below. #graph = net_drawer.GetPydotGraph(net.operators, net.name, rankdir="BT") #display.Image(graph.create_png(), width=500) # We can also list them by name: for tensor in pretrained_params.protos: print tensor.name # For the next step of actually running the network, you have the option of CPU or GPU. It is setup for CPU by default, so if you want to use GPU you need to switch the comments below to enable the appropriate lines. # # `pycaffe2.workspace` implements a very simple Model object that wraps the construction of the model. Specifically, what it will do is: # (1) feed the parameters to the workspace; # (2) Create input blob placeholders; # (3) Actually instantiating the Caffe network. # + # We will first specify the device option: how we want to run the network. #net.device_option.device_type = caffe2_pb2.CPU # If you want to use cuda, use the following commands net.device_option.device_type = caffe2_pb2.CUDA net.device_option.cuda_gpu_id = 0 # Here we will simply use the Model object to host the model. model = workspace.Model(net, pretrained_params, ["data"], ["prob"]) print 'Network created sucessfully.' # - # Here we're going to use the test kitty and show processing. It follows these steps: # # 1. Resize the image to 256\*256, and crop out the center. # # 2. Since Caffe expects CHW order and the current image is HWC, we will need to change the order. # # 3. Caffe uses a BGR order due to legacy OpenCV issues, so we will change RGB to BGR. # # 4. We will subtract the mean image. Note that skimage loads image in the [0, 1] range so we multiply the pixel values first to get them into [0, 255]. # # 5. Finally, since caffe2 expects the input to have a batch term so we can feed in multiple images, we will simply prepend a batch dimension of size 1. Also, we will make sure image is of type np.float32. IMAGE_FILE = 'images/cat.jpg' # TODO: second declaration of the cat, remove the first? img = skimage.img_as_float(skimage.io.imread(IMAGE_FILE)).astype(np.float32) pyplot.imshow(img) pyplot.axis('off') pyplot.title('Original image') # Here are the steps we use to preprocess the image. # (1) Resize the image to 256*256, and crop out the center. input_height, input_width = 224, 224 print 'Input shape is %dx%d' % (input_height, input_width) img = skimage.transform.resize(img, (256, 256)) crop_height = (256 - input_height) / 2 crop_width = (256 - input_width) / 2 img = img[crop_height:crop_height + input_height, crop_width:crop_width + input_width] pyplot.figure() pyplot.imshow(img) pyplot.axis('off') pyplot.title('Resized image') # (2) Since Caffe expects CHW order and the current image is HWC, # we will need to change the order. img = img.swapaxes(1, 2).swapaxes(0, 1) # (3) Caffe uses a BGR order due to legacy OpenCV issues, so we # will change RGB to BGR. img = img[(2, 1, 0), :, :] # (4) We will subtract the mean image. Note that skimage loads # image in the [0, 1] range so we multiply the pixel values # first to get them into [0, 255]. mean_file = os.path.join(CAFFE_ROOT, 'python/caffe/imagenet/ilsvrc_2012_mean.npy') mean = np.load(mean_file).mean(1).mean(1) img = img * 255 - mean[:, np.newaxis, np.newaxis] pyplot.figure() for i in range(3): # For some reason, pyplot subplot follows Matlab's indexing # convention (starting with 1). Well, we'll just follow it... pyplot.subplot(1, 3, i+1) pyplot.imshow(img[i]) pyplot.axis('off') pyplot.title('Input channel %d' % (i+1)) # (5) finally, since caffe2 expect the input to have a batch term # so we can feed in multiple images, we will simply prepend a # batch dimension of size 1. Also, we will make sure image is # of type np.float32. img = img[np.newaxis, :, :, :].astype(np.float32) print 'Final input shape is:', img.shape # Don't worry about the aspect ratio in the resized image. Squishy kitty can still be found. It doesn't matter in this example, and as you will see below we get a very high probablity on the prediction. When you convert your own models you will want to prep your test material to fit your expectations and in a manner that you used to create the original model. results = model.Run([img]) prob = workspace.FetchBlob('prob').flatten() pyplot.plot(prob) pyplot.title('Prediction') pyplot.axis('off') print 'Max prediction is:', prob.argmax() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulation of Widget-Adjustable Parametric Circuit # # Suppose you are interested in printing out the state vector # of a quantum circuit at various times (points) in its evolution, as well # as at the end of the circuit. Qubiter can do that. # # Furthermore, suppose that the circuit is a parametric one, and you want to # vary its parameters using sliders on a gui (graphical user interface). # Qubiter can do that too, via a jupyter notebook with widgets. # This notebook is one such notebook. # # # A jupyter # notebook with widgets gives you # the best of both worlds, the gui world and the notebooks world. # # Gui's excel at reducing the possibility of user errors, increasing the ease of use for the user, # and reducing the amount of understanding of the code # that is demanded from the user in order for him or her to use # the code correctly. # # Notebooks excel at providing a robust, flexible, ready made, familiar method of documenting # and saving your work for multiple use cases. They are also great for # explaining your work to others with great detail and precision. # # cd to the Qubiter directory and add it to your path environmental variable import os import sys print(os.getcwd()) os.chdir('../../') print(os.getcwd()) sys.path.insert(0,os.getcwd()) # We will first construct the quantum # circuit that we are interested in studying. # # Note that we insert a PRINT statement at each point # at which we want a printout of # the state vector (except at the end point which has an implicit PRINT # statement). # # Note also that # the angles for some gates are special strings # with hash characters in them instead of floats. # These placeholder variable or parameter # strings have been explained elsewhere, # in another jupyter notebook. # A PRINT statement in the 'ALL' style will # print out a bunch of text info about the state vector # at the point in the evolution where the print statement is located. # The 'ALL+' style will printout # everything that the 'ALL' style prints out, plus it # prints a bar graph of the probability # associated with each component of the state vector. from qubiter.jupyter_notebooks.utilities_nb import run_sim_gui from qubiter.SEO_writer import * # + num_bits = 3 file_prefix = 'widgets_simulation_test' emb = CktEmbedder(num_bits, num_bits) wr = SEO_writer(file_prefix, emb) wr.write_Ry(0, rads='#1*.5') wr.write_Ry(1, rads='#2*.5') wr.write_cnot(0, 1) wr.write_PRINT('ALL+') wr.write_Ry(0, rads='#3*.5') wr.write_Ry(1, rads='#4*.5') wr.write_cnot(0, 1) wr.write_PRINT('ALL+') trols = Controls(num_bits) trols.bit_pos_to_kind = {0:True, 1:False} trols.refresh_lists() wr.write_controlled_one_bit_gate(2, trols, OneBitGates.rot_ax, ['fun#1#3', 1]) wr.close_files() # - # We can ask wr to print the English file just created wr.print_eng_file() # We can ask wr to print the Picture file just created wr.print_pic_file() # All the gui code is prepackaged and # included in a method called `run_sim_gui()` # included in the file `utilities_nb.py` # + all_var_nums = [1, 2, 3, 4] def fun(a, b): return a*b*.5 fun_name_to_fun = {'fun': fun} # re-run this cell if you change the arguments of run_sim_gui() run_sim_gui(file_prefix, num_bits, all_var_nums, fun_name_to_fun=fun_name_to_fun, append_new=True, sty_fin_desc='ALL+') # - # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.1 # language: julia # name: julia-1.5 # --- # Introductory demonstration of IterationControl.jl using Pkg Pkg.activate(@__DIR__) Pkg.instantiate() # Here's a simple iterative mdel that computes Babylonian # approximations to a square root: # + mutable struct SquareRooter x::Float64 # input - number to be square rooted root::Float64 # current approximation of root training_losses::Vector{Float64} # successive approximation differences SquareRooter(x) = new(x, 1.0, Float64[]) end function train!(m::SquareRooter, Δn::Int) m.training_losses = Float64[] for i in 1:Δn next_guess = (m.root + m.x/m.root)/2 push!(m.training_losses, abs(next_guess - m.root)) m.root = next_guess end end loss(m::SquareRooter) = abs(m.root^2 - m.x) training_losses(m::SquareRooter) = m.training_losses # - # And here it in action: model = SquareRooter(9) model.root train!(model, 2) # train for 2 iterations model.root train!(model, 1) # train for 1 more iteration model.root # Then we can replace the integer argument `n` in `train!(model, n)` # with a number of more sophisticated *controls* by "lifting" the method # `train!` to the `IterationControl.train!` method defined in this # package: using IterationControl IterationControl.train!(model::SquareRooter, n) = train!(model, n) # The lifted `train!` has the same functionality as the original one: # + model = SquareRooter(9) IterationControl.train!(model, 2) model.root # - # But now we can also do this: IterationControl.train!(model, Step(2), NumberLimit(3), Info(m->m.root)); # Here each control is repeatedly applied until one of them triggers a # stop. The first control `Step(2)` says "train the model two more # iterations"; the second says "stop after 3 repetitions" (of the # sequence of control applications); and the third, "log the value of # the root to `Info`". # If `model` admits a method returning a loss (for example, the # difference between `x` and the square of `root`), then we can lift # that method to `IterationControl.loss` to enable control using # loss-based stopping criteria, such as a loss threshold. In the # demonstation below, we also include a callback: model = SquareRooter(4) train!(model, 1) loss(model) # + IterationControl.loss(model::SquareRooter) = loss(model) losses = Float64[] callback(model) = push!(losses, loss(model)) IterationControl.train!(model, Step(1), Threshold(0.0001), Callback(callback)); # - losses # If training `model` generates user-inspectable "training losses" (one # per iteration) then similarly lifting the appropriate access function # to `IterationControl.training_losses` enables Prechelt's # progress-modified generalization loss stopping criterion, `PQ`. # `PQ` is the only criterion from the # [EarlyStopping.jl](https://github.com/ablaom/EarlyStopping.jl) package # not otherwise enabled when `IterationControl.loss` is overloaded as # above. # *Reference.* [utz # (1998)](https://link.springer.com/chapter/10.1007%2F3-540-49430-8_3): # "Early Stopping - But When?", in *Neural Networks: Tricks of the # Trade*, ed. , Springer. # The interface just described is sufficient for controlling # conventional machine learning models with an iteration parameter, as # this [tree boosting # example](https://github.com/ablaom/IterationControl.jl/tree/master/examples/tree_booster) # shows. # --- # # *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # Set some Pandas options pd.set_option('display.notebook_repr_html', False) pd.set_option('display.max_columns', 20) pd.set_option('display.max_rows', 25) # - # # Plotting and Visualization # # There are a handful of third-party Python packages that are suitable for creating scientific plots and visualizations. These include packages like: # # * matplotlib # * Chaco # * PyX # * Bokeh # # Here, we will focus excelusively on matplotlib and the high-level plotting availabel within pandas. It is currently the most robust and feature-rich package available. # # ### Visual representation of data # # We require plots, charts and other statistical graphics for the written communication of quantitative ideas. # # They allow us to more easily convey relationships and reveal deviations from patterns. # # Gelman and Unwin 2011: # # > A well-designed graph can display more information than a table of the same size, and more information than numbers embedded in text. Graphical displays allow and encourage direct visual comparisons. # ## Matplotlib # # The easiest way to interact with matplotlib is via `pylab` in iPython. By starting iPython (or iPython notebook) in "pylab mode", both matplotlib and numpy are pre-loaded into the iPython session: # # ipython notebook --pylab # # You can specify a custom graphical backend (*e.g.* qt, gtk, osx), but iPython generally does a good job of auto-selecting. Now matplotlib is ready to go, and you can access the matplotlib API via `plt`. If you do not start iPython in pylab mode, you can do this manually with the following convention: # # import matplotlib.pyplot as plt plt.plot(np.random.normal(size=100), np.random.normal(size=100), 'ro') # The above plot simply shows two sets of random numbers taken from a normal distribution plotted against one another. The `'ro'` argument is a shorthand argument telling matplotlib that I wanted the points represented as red circles. # # This plot was expedient. We can exercise a little more control by breaking the plotting into a workflow: with mpl.rc_context(rc={'font.family': 'serif', 'font.weight': 'bold', 'font.size': 8}): fig = plt.figure(figsize=(6,3)) ax1 = fig.add_subplot(121) ax1.set_xlabel('some random numbers') ax1.set_ylabel('more random numbers') ax1.set_title("Random scatterplot") plt.plot(np.random.normal(size=100), np.random.normal(size=100), 'r.') ax2 = fig.add_subplot(122) plt.hist(np.random.normal(size=100), bins=15) ax2.set_xlabel('sample') ax2.set_ylabel('cumulative sum') ax2.set_title("Normal distrubution") plt.tight_layout() plt.savefig("normalvars.png", dpi=150) # matplotlib is a relatively low-level plotting package, relative to others. It makes very few assumptions about what constitutes good layout (by design), but has a lot of flexiblility to allow the user to completely customize the look of the output. # # If you want to make your plots look pretty like mine, steal the *matplotlibrc* file from [](http://www.huyng.com/posts/sane-color-scheme-for-matplotlib/). # # ## Plotting in Pandas # # On the other hand, Pandas includes methods for DataFrame and Series objects that are relatively high-level, and that make reasonable assumptions about how the plot should look. normals = pd.Series(np.random.normal(size=10)) normals.plot() # Notice that by default a line plot is drawn, and a light grid is included. All of this can be changed, however: normals.cumsum().plot(grid=False) # Similarly, for a DataFrame: variables = pd.DataFrame({'normal': np.random.normal(size=100), 'gamma': np.random.gamma(1, size=100), 'poisson': np.random.poisson(size=100)}) variables.cumsum(0).plot() # As an illustration of the high-level nature of Pandas plots, we can split multiple series into subplots with a single argument for `plot`: variables.cumsum(0).plot(subplots=True) # Or, we may want to have some series displayed on the secondary y-axis, which can allow for greater detail and less empty space: variables.cumsum(0).plot(secondary_y='normal') # If we would like a little more control, we can use matplotlib's `subplots` function directly, and manually assign plots to its axes: fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12, 4)) for i,var in enumerate(['normal','gamma','poisson']): variables[var].cumsum(0).plot(ax=axes[i], title=var) axes[0].set_ylabel('cumulative sum') # ## Bar plots # # Bar plots are useful for displaying and comparing measurable quantities, such as counts or volumes. In Pandas, we just use the `plot` method with a `kind='bar'` argument. # # For this series of examples, let's load up the Titanic dataset: titanic = pd.read_excel("data/titanic.xls", "titanic") titanic.head() titanic.groupby('pclass').survived.sum().plot(kind='bar') titanic.groupby(['sex','pclass']).survived.sum().plot(kind='barh') death_counts = pd.crosstab([titanic.pclass, titanic.sex], titanic.survived.astype(bool)) death_counts.plot(kind='bar', stacked=True, color=['black','gold'], grid=False) # Another way of comparing the groups is to look at the survival *rate*, by adjusting for the number of people in each group. death_counts.div(death_counts.sum(1).astype(float), axis=0).plot(kind='barh', stacked=True, color=['black','gold']) # ## Histograms # # Frequenfly it is useful to look at the *distribution* of data before you analyze it. Histograms are a sort of bar graph that displays relative frequencies of data values; hence, the y-axis is always some measure of frequency. This can either be raw counts of values or scaled proportions. # # For example, we might want to see how the fares were distributed aboard the titanic: titanic.fare.hist(grid=False) # The `hist` method puts the continuous fare values into **bins**, trying to make a sensible décision about how many bins to use (or equivalently, how wide the bins are). We can override the default value (10): titanic.fare.hist(bins=30) # There are algorithms for determining an "optimal" number of bins, each of which varies somehow with the number of observations in the data series. # + sturges = lambda n: int(np.log2(n) + 1) square_root = lambda n: int(np.sqrt(n)) from scipy.stats import kurtosis doanes = lambda data: int(1 + np.log(len(data)) + np.log(1 + kurtosis(data) * (len(data) / 6.) ** 0.5)) n = len(titanic) sturges(n), square_root(n), doanes(titanic.fare.dropna()) # - titanic.fare.hist(bins=doanes(titanic.fare.dropna())) # A **density plot** is similar to a histogram in that it describes the distribution of the underlying data, but rather than being a pure empirical representation, it is an *estimate* of the underlying "true" distribution. As a result, it is smoothed into a continuous line plot. We create them in Pandas using the `plot` method with `kind='kde'`, where `kde` stands for **kernel density estimate**. titanic.fare.dropna().plot(kind='kde', xlim=(0,600)) # Often, histograms and density plots are shown together: titanic.fare.hist(bins=doanes(titanic.fare.dropna()), normed=True, color='lightseagreen') titanic.fare.dropna().plot(kind='kde', xlim=(0,600), style='r--') # Here, we had to normalize the histogram (`normed=True`), since the kernel density is normalized by definition (it is a probability distribution). # We will explore kernel density estimates more in the next section. # ## Boxplots # # A different way of visualizing the distribution of data is the boxplot, which is a display of common quantiles; these are typically the quartiles and the lower and upper 5 percent values. titanic.boxplot(column='fare', by='pclass', grid=False) # You can think of the box plot as viewing the distribution from above. The blue crosses are "outlier" points that occur outside the extreme quantiles. # One way to add additional information to a boxplot is to overlay the actual data; this is generally most suitable with small- or moderate-sized data series. bp = titanic.boxplot(column='age', by='pclass', grid=False) for i in [1,2,3]: y = titanic.age[titanic.pclass==i].dropna() # Add some random "jitter" to the x-axis x = np.random.normal(i, 0.04, size=len(y)) plt.plot(x, y, 'r.', alpha=0.2) # When data are dense, a couple of tricks used above help the visualization: # # 1. reducing the alpha level to make the points partially transparent # 2. adding random "jitter" along the x-axis to avoid overstriking # A related but inferior cousin of the box plot is the so-called dynamite plot, which is just a bar chart with half of an error bar. titanic.groupby('pclass')['fare'].mean().plot(kind='bar', yerr=titanic.groupby('pclass')['fare'].std()) # Why is this plot a poor choice? # # - bar charts should be used for measurable quantities (*e.g.* raw data), not estimates. The area of the bar does not represent anything, since these are estimates derived from the data. # - the "data-ink ratio" (*sensu* Edward Tufte) is very high. There are only 6 values represented here (3 means and 3 standard deviations). # - the plot hides the underlying data. # # A boxplot is **always** a better choice than a dynamite plot. # + data1 = [150, 155, 175, 200, 245, 255, 395, 300, 305, 320, 375, 400, 420, 430, 440] data2 = [225, 380] fake_data = pd.DataFrame([data1, data2]).transpose() p = fake_data.mean().plot(kind='bar', yerr=fake_data.std(), grid=False) # - fake_data = pd.DataFrame([data1, data2]).transpose() p = fake_data.mean().plot(kind='bar', yerr=fake_data.std(), grid=False) x1, x2 = p.xaxis.get_majorticklocs() plt.plot(np.random.normal(x1, 0.01, size=len(data1)), data1, 'ro') plt.plot([x2]*len(data2), data2, 'ro') # ### Exercise # # Using the Titanic data, create kernel density estimate plots of the age distributions of survivors and victims. # ## Scatterplots # # To look at how Pandas does scatterplots, let's reload the baseball sample dataset. baseball = pd.read_csv("data/baseball.csv") baseball.head() # Scatterplots are useful for data exploration, where we seek to uncover relationships among variables. There are no scatterplot methods for Series or DataFrame objects; we must instead use the matplotlib function `scatter`. plt.scatter(baseball.ab, baseball.h) plt.xlim(0, 700); plt.ylim(0, 200) # We can add additional information to scatterplots by assigning variables to either the size of the symbols or their colors. plt.scatter(baseball.ab, baseball.h, s=baseball.hr*10, alpha=0.5) plt.xlim(0, 700); plt.ylim(0, 200) plt.scatter(baseball.ab, baseball.h, c=baseball.hr, s=40, cmap='hot') plt.xlim(0, 700); plt.ylim(0, 200); # To view scatterplots of a large numbers of variables simultaneously, we can use the `scatter_matrix` function that was recently added to Pandas. It generates a matrix of pair-wise scatterplots, optiorally with histograms or kernel density estimates on the diagonal. _ = pd.scatter_matrix(baseball.loc[:,'r':'sb'], figsize=(12,8), diagonal='kde') # ## Trellis Plots # # One of the enduring strengths of carrying out statistical analyses in the R language is the quality of its graphics. In particular, the addition of ['s ggplot2 package](http://ggplot2.org) allows for flexible yet user-friendly generation of publication-quality plots. Its srength is based on its implementation of a powerful model of graphics, called the [Grammar of Graphics](http://vita.had.co.nz/papers/layered-grammar.pdf) (GofG). The GofG is essentially a theory of scientific graphics that allows the components of a graphic to be completely described. ggplot2 uses this description to build the graphic component-wise, by adding various layers. # # Pandas recently added functions for generating graphics using a GofG approach. Chiefly, this allows for the easy creation of **trellis plots**, which are a faceted graphic that shows relationships between two variables, conditioned on particular values of other variables. This allows for the representation of more than two dimensions of information without having to resort to 3-D graphics, etc. # # Let's use the `titanic` dataset to create a trellis plot that represents 4 variables at a time. This consists of 4 steps: # # 1. Create a `RPlot` object that merely relates two variables in the dataset # 2. Add a grid that will be used to condition the variables by both passenger class and sex # 3. Add the actual plot that will be used to visualize each comparison # 4. Draw the visualization # + from pandas.tools.rplot import * titanic = titanic[titanic.age.notnull() & titanic.fare.notnull()] tp = RPlot(titanic, x='age') tp.add(TrellisGrid(['pclass', 'sex'])) tp.add(GeomDensity()) _ = tp.render(plt.gcf()) # - # Using the cervical dystonia dataset, we can simultaneously examine the relationship between age and the primary outcome variable as a function of both the treatment received and the week of the treatment by creating a scatterplot of the data, and fitting a polynomial relationship between `age` and `twstrs`: cdystonia = pd.read_csv("data/cdystonia.csv", index_col=None) cdystonia.head() plt.figure(figsize=(12,12)) bbp = RPlot(cdystonia, x='age', y='twstrs') bbp.add(TrellisGrid(['week', 'treat'])) bbp.add(GeomScatter()) bbp.add(GeomPolyFit(degree=2)) _ = bbp.render(plt.gcf()) # We can use the `RPlot` class to represent more than just trellis graphics. It is also useful for displaying multiple variables on the same panel, using combinations of color, size and shapes to do so. cdystonia['site'] = cdystonia.site.astype(float) plt.figure(figsize=(6,6)) cp = RPlot(cdystonia, x='age', y='twstrs') cp.add(GeomPoint(colour=ScaleGradient('site', colour1=(1.0, 1.0, 0.5), colour2=(1.0, 0.0, 0.0)), size=ScaleSize('week', min_size=10.0, max_size=200.0), shape=ScaleShape('treat'))) _ = cp.render(plt.gcf()) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RoadMap 11 - Torch NN Layers - Loss Functions # # 1. torch.nn.L1Loss - L1 Loss Function # 2. torch.nn.MSELoss - Mean Square Error Loss Function # 3. torch.nn.CrossEntropyLoss - Cross entropy loss function # 4. torch.nn.NLLLoss - Negative Log Likelihood loss function # 5. torch.nn.PoissonNLLLoss - Poisson Negative Log Likelihood loss function # 6. torch.nn.nn.KLDivLoss - Kullback Leiber Divergence loss function # 7. torch.nn.BCELoss - Binary cross entropy loss function # 8. torch.BCEWithLogitsLoss - Binary cross entropy loss with logits function # 9. torch.nn.MarginRankingLoss - Margin ranking loss function # 10. torch.nn.HingeEmbeddingLoss - Hinge Embedding Loss function # 11. torch.nn.MultiLabelMarginLoss - Multi Label Margin loss function # 12. torch.nn.SmoothL1Loss - Smooth L1 Loss function # 13. torch.nn.MultiLabelSoftMarginLoss - Multi Label Soft Margin Loss function # 14. torch.nn.CosineEmbeddingLoss - Cosine Embedding loss function # 15. torch.nn.MultiMarginLoss - Multi Margin loss function # 16. torch.nn.TripletMarginLoss - Triplet Margin Loss Function import os import sys import torch import numpy as np # + import torch.nn as nn from torchvision import transforms, datasets from PIL import Image import cv2 import matplotlib.pyplot as plt import torchvision # FUNCTIONAL modules - Implementing each module as functions import torch.nn.functional as F # - # ## Extra Blog Layers # # 1. https://medium.com/udacity-pytorch-challengers/a-brief-overview-of-loss-functions-in-pytorch-c0ddb78068f7 # # 2. https://machinelearningmastery.com/loss-and-loss-functions-for-training-deep-learning-neural-networks/ # # 3. https://machinelearningmastery.com/how-to-choose-loss-functions-when-training-deep-learning-neural-networks/ # + # Input image_name = "dog.jpg" image_pil = Image.open(image_name) transform = transforms.Compose([transforms.ToTensor()]) image = transform(image_pil).float() image_nchw = torch.unsqueeze(image, 0) print(image_nchw.size()) plt.imshow(image_pil) # - # ### L1 loss Function # # # 1. nn.L1Loss - Creates a criterion that measures the mean absolute value of the element-wise difference between input x and target y # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # Function # # $$\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad # l_n = \left| x_n - y_n \right|$$ # + print("Module implementation") loss = nn.L1Loss() input_data = torch.randn(10, requires_grad=True) target_data = torch.randn(10) output_loss = loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("Output Loss = ", output_loss) print("\n") print("Functional implementation") output_loss = F.l1_loss(input_data, target_data, reduction='elementwise_mean') output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Mean Square Error Loss # # 2. nn.MSELoss - Creates a criterion that measures the mean squared error between n elements in the input x and target y. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # Function: # $$\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad # l_n = \left( x_n - y_n \right)^2$$ # + print("Module implementation") loss = nn.MSELoss() input_data = torch.randn(10, requires_grad=True) target_data = torch.randn(10) output_loss = loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("Output Loss = ", output_loss) print("\n") print("Functional implementation") output_loss = F.mse_loss(input_data, target_data, reduction='elementwise_mean') output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Cross Entropy Loss # # 3. nn.CrossEntropyLoss - This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class. It is useful when training a classification problem with C classes. If provided, the optional argument weight should be a 1D Tensor assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set. # - weight (Tensor, optional) – a manual rescaling weight given to each class. If given, has to be a Tensor of size C # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - ignore_index (int, optional) – Specifies a target value that is ignored and does not contribute to the input gradient. When size_average is True, the loss is averaged over non-ignored targets. # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # Function: # # $$ \text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right) $$ # # + print("Module Implementation") loss = nn.CrossEntropyLoss() softmax = nn.Softmax() input_data = torch.randn(3, 4, requires_grad=True) input_data = softmax(input_data) target_data = torch.tensor([0, 1, 2]) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.cross_entropy(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Negative log likelihood loss # # 4. nn.NLLLoss - The negative log likelihood loss. It is useful to train a classification problem with C classes. If provided, the optional argument weight should be a 1D Tensor assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set. # # - weight (Tensor, optional) – a manual rescaling weight given to each class. If given, it has to be a Tensor of size C. Otherwise, it is treated as if having all ones. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - ignore_index (int, optional) – Specifies a target value that is ignored and does not contribute to the input gradient. When size_average is True, the loss is averaged over non-ignored targets. # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # Function: # # $$\begin{split}\ell(x, y) = \begin{cases} # \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & \text{if}\; # \text{size_average} = \text{True},\\ # \sum_{n=1}^N l_n, & \text{if}\; # \text{size_average} = \text{False}. # \end{cases}\end{split} # $$ # + print("Module Implementation") m = nn.LogSoftmax() loss = nn.NLLLoss() # input is of size N x C = 3 x 5 input_data = torch.randn(3, 10, requires_grad=True) input_data = m(input_data) # each element in target has to have 0 <= value < C target_data = torch.tensor([1, 0, 4]) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.nll_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Poisson Negative log likelihood loss # # 5. nn.PoissonNLLLoss - Negative log likelihood loss with Poisson distribution of target. # - log_input (bool, optional) – if True the loss is computed as exp(input)−target∗input, if False the loss is input−target∗log(input+eps) # - full (bool, optional) – whether to compute full loss, i. e. to add the Stirling approximation term target∗log(target)−target+0.5∗log(2πtarget). # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - eps (float, optional) – Small value to avoid evaluation of log(0), when log_input == False. Default: 1e-8 # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # Function: # # $$\begin{align}\begin{aligned}\text{target} \sim \mathrm{Poisson}(\text{input})\\\text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input}) # + \log(\text{target!})\end{aligned}\end{align}$$ # # + print("Module Implementation") loss = nn.PoissonNLLLoss() input_data = torch.randn(5, 2, requires_grad=True) target_data = torch.randn(5, 2) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.poisson_nll_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Kullback Leiber Divergence loss # # 6. nn.KLDivLoss - KL divergence is a useful distance measure for continuous distributions and is often useful when performing direct regression over the space of (discretely sampled) continuous output distributions. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # + print("Module Implementation") loss = nn.KLDivLoss(size_average=False) log_softmax = nn.LogSoftmax(1) softmax = nn.Softmax(1) batch_size = 2 input_data = torch.randn(batch_size, 3, requires_grad=True) log_input_data = log_softmax(input_data) target_data = torch.randn(batch_size, 3, requires_grad=True) softmax_target_data = softmax(target_data) output_loss = loss(log_input_data, softmax_target_data) / batch_size output_loss.backward(retain_graph=True) print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.kl_div(log_input_data, softmax_target_data, size_average=False) / batch_size output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Binary Cross Entripy Loss # # 7. nn.BCELoss - Creates a criterion that measures the Binary Cross Entropy between the target and the output # - weight (Tensor, optional) – a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size “nbatch”. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # Function: # # $$\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad # l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]$$ # + print("Module Implementation") loss = nn.BCELoss() sigmoid = nn.Sigmoid() input_data = torch.randn(5, requires_grad=True) sigmoid_input_data = sigmoid(input_data) target_data = torch.empty(5).random_(2) # Binary Target output_loss = loss(sigmoid_input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.binary_cross_entropy(sigmoid_input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Binary Cross Entropy with Logits Loss # # 8. nn.BCEWithLogitsLoss - This loss combines a Sigmoid layer and the BCELoss in one single class. This version is more numerically stable than using a plain Sigmoid followed by a BCELoss as, by combining the operations into one layer, we take advantage of the log-sum-exp trick for numerical stability. # - weight (Tensor, optional) – a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size “nbatch”. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # - pos_weight – a weight of positive examples. Must be a vector with length equal to the number of classes. # # # # Function # # $$\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad # l_n = - w_n \left[ t_n \cdot \log \sigma(x_n) # + (1 - t_n) \cdot \log (1 - \sigma(x_n)) \right]$$ # + print("Module implementation") loss = nn.BCEWithLogitsLoss() sigmoid = nn.Sigmoid() input_data = torch.randn(5, requires_grad=True) sigmoid_input_data = sigmoid(input_data) target_data = torch.empty(5).random_(2) # Binary Target output_loss = loss(sigmoid_input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.binary_cross_entropy_with_logits(sigmoid_input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Margin Ranking Loss # # 9. nn.MarginRankingLoss - Creates a criterion that measures the loss given inputs x1, x2, two 1D mini-batch Tensor's, and a label 1D mini-batch tensor 'y with values (1 or -1). If y == 1 then it assumed the first input should be ranked higher (have a larger value) than the second input, and vice-versa for y == -1. # - margin (float, optional) – Has a default value of 0. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # Function: # # $$\text{loss}(x, y) = \max(0, -y * (x1 - x2) + \text{margin})$$ # # + print("Module implementation") loss = nn.MarginRankingLoss() input_data1 = torch.randn(3, requires_grad=True) input_data2 = torch.randn(3, requires_grad=True) target_data = torch.tensor([-1., 1., 1.]) output_loss = loss(input_data1, input_data2, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data1 = ", input_data1) print("\n") print("Input Data2 = ", input_data2) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.margin_ranking_loss(input_data1, input_data2, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Hinge Embedding Loss # # 10. nn.HingeEmbeddingLoss - Measures the loss given an input tensor x and a labels tensor y containing values (1 or -1). This is usually used for measuring whether two inputs are similar or dissimilar, e.g. using the L1 pairwise distance as x, and is typically used for learning nonlinear embeddings or semi-supervised learning. # - margin (float, optional) – Has a default value of 1. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # # Function: # $$\begin{split}l_n = \begin{cases} # x_n, & \text{if}\; y_n = 1,\\ # \max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1, # \end{cases}\end{split}$$ # + print("Module implementation") loss = nn.HingeEmbeddingLoss() input_data = torch.randn(3, requires_grad=True) target_data = torch.tensor([-1., 1., 1.]) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.hinge_embedding_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Multi Label Margin Loss # # 11. nn.MultiLabelMarginLoss - Creates a criterion that optimizes a multi-class multi-classification hinge loss (margin-based loss) between input x (a 2D mini-batch Tensor) and output y (which is a 2D Tensor of target class indices). For each sample in the mini-batch # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # # # Function: # # $$\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}$$ # + print("Module implementation") loss = nn.MultiLabelMarginLoss() softmax = nn.Softmax() input_data = torch.randn(3, requires_grad=True) input_data = softmax(input_data) target_data = torch.tensor([0, 1, 2]) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.multilabel_margin_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Smooth L1 Loss # # 12. nn.SmoothL1Loss - Creates a criterion that uses a squared term if the absolute element-wise error falls below 1 and an L1 term otherwise. It is less sensitive to outliers than the MSELoss and in some cases prevents exploding gradients (e.g. see “Fast R-CNN” paper by ). # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # # Function: # # $$\text{loss}(x, y) = \frac{1}{n} \sum_{i} z_{i}$$ # # Where, $$z_{i}\text{ is }$$ # # \begin{split}z_{i} = # \begin{cases} # 0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\ # |x_i - y_i| - 0.5, & \text{otherwise } # \end{cases}\end{split} # + print("Module implementation") loss = nn.SmoothL1Loss() input_data = torch.randn(3, requires_grad=True) target_data = torch.randn(3, requires_grad=False) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph=True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.smooth_l1_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Multi Label Soft Margin loss # # 13. nn.MultiLabelSoftMarginLoss - Creates a criterion that optimizes a multi-label one-versus-all loss based on max-entropy, between input x and target y of size (N, C) # - weight (Tensor, optional) – a manual rescaling weight given to each class. If given, it has to be a Tensor of size C. Otherwise, it is treated as if having all ones. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # # Function: # # $$loss(x, y) = - \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) # + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) # $$ # + print("Module implementation") loss = nn.MultiLabelSoftMarginLoss() input_data = torch.randn((1, 3), requires_grad=True) target_data = torch.randn((1, 3), requires_grad=True) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph = True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.multilabel_soft_margin_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Cosine Embedding Loss # # 14. nn.CosineEmbeddingLoss - Creates a criterion that measures the loss given input tensors x1, x2 and a Tensor label y with values 1 or -1. This is used for measuring whether two inputs are similar or dissimilar, using the cosine distance, and is typically used for learning nonlinear embeddings or semi-supervised learning. # - margin (float, optional) – Should be a number from -1 to 1, 0 to 0.5 is suggested. If margin is missing, the default value is 0. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # Function: # # $$\begin{split}\text{loss}(x, y) = # \begin{cases} # 1 - \cos(x_1, x_2), & \text{if } y == 1 \\ # \max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y == -1 # \end{cases}\end{split} # $$ # + print("Module Implementation") loss = nn.CosineEmbeddingLoss() input_data1 = torch.randn(3, 3, requires_grad=True) input_data2 = torch.randn(3, 3, requires_grad=True) target_data = torch.tensor([-1., 1., 1.]) output_loss = loss(input_data1, input_data2, target_data) output_loss.backward(retain_graph = True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data1 = ", input_data1) print("\n") print("Input Data2 = ", input_data2) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.cosine_embedding_loss(input_data1, input_data2, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Multi margin loss # # 15. nn.MultiMarginLoss - Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss) between input x (a 2D mini-batch Tensor) and output y (which is a 1D tensor of target class indices, 0≤y≤x.size(1)) # - p (int, optional) – Has a default value of 1. 1 and 2 are the only supported values # - margin (float, optional) – Has a default value of 1. # - weight (Tensor, optional) – a manual rescaling weight given to each class. If given, it has to be a Tensor of size C. Otherwise, it is treated as if having all ones. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # Function: # # $$\text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)}$$ # + print("Model Implementation") loss = nn.MultiMarginLoss() input_data = torch.randn(3, requires_grad=True) target_data = torch.tensor([1, 2, 3], requires_grad=False) output_loss = loss(input_data, target_data) output_loss.backward(retain_graph = True) # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Input Data = ", input_data) print("\n") print("Target Data = ", target_data) print("\n") print("Output Loss = ", output_loss) print("\n") print("\n") print("Functional implementation") output_loss = F.multi_margin_loss(input_data, target_data) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ### Triplet Margin Loss # # 16. nn.TripletMarginLoss - Creates a criterion that measures the triplet loss given an input tensors x1, x2, x3 and a margin with a value greater than 0. This is used for measuring a relative similarity between samples. # - margin (float, optional) – Default: 1. # - p (int, optional) – The norm degree for pairwise distance. Default: 2. # - swap (float, optional) – The distance swap is described in detail in the paper Learning shallow convolutional feature descriptors with triplet losses by , et al. Default: False. # - size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True # - reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True # - reduction (string, optional) – Specifies the reduction to apply to the output: ‘none’ | ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘elementwise_mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: ‘elementwise_mean’ # # # Function: # # $$L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} # \text{where,} # d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p$$ # + print("Module implementation") triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2) input_data1 = torch.randn(100, 128, requires_grad=True) input_data2 = torch.randn(100, 128, requires_grad=True) input_data3 = torch.randn(100, 128, requires_grad=True) output = triplet_loss(input_data1, input_data2, input_data3) output.backward(retain_graph = True) print("Output Loss = ", output_loss) print("\n") print("Functional implementation") output_loss = F.triplet_margin_loss(input_data1, input_data2, input_data3, margin=1.0, p=2) output_loss.backward() # loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. print("Output Loss = ", output_loss) print("\n") # - # ## Author - Tessellate Imaging - https://www.tessellateimaging.com/ # # ## Monk Library - https://github.com/Tessellate-Imaging/monk_v1 # # Monk is an opensource low-code tool for computer vision and deep learning # # ### Monk features # - low-code # - unified wrapper over major deep learning framework - keras, pytorch, gluoncv # - syntax invariant wrapper # # # ### Enables # - to create, manage and version control deep learning experiments # - to compare experiments across training metrics # - to quickly find best hyper-parameters # # # ### At present it only supports transfer learning, but we are working each day to incorporate # - GUI based custom model creation # - various object detection and segmentation algorithms # - deployment pipelines to cloud and local platforms # - acceleration libraries such as TensorRT # - preprocessing and post processing libraries # # ## To contribute to Monk AI or Pytorch RoadMap repository raise an issue in the git-repo or dm us on linkedin # - Abhishek - https://www.linkedin.com/in/abhishek-kumar-annamraju/ # - Akash - https://www.linkedin.com/in/akashdeepsingh01/ # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # %load_ext autoreload # %autoreload 2 # + import codecs fiqh_file = '/home/jvdzwaan/code/fiqh_corpus/0179MalikIbnAnas.Muwatta.txt' with codecs.open(fiqh_file, encoding='utf-8') as f: text = f.read() # + import re md_header = re.compile(r'#(.+)') result = md_header.finditer(text) print result for m in result: print m.group() # - text = re.sub(r'#META#(.+)\n', '', text) text = re.sub(r'######OpenITI#\n', '', text) # + fiqh_file = '/home/jvdzwaan/data/tmp/adh/test/data/0274AhmadBarqi.Mahasin.txt' with codecs.open(fiqh_file, encoding='utf-8') as f: text = f.read() # + quotes = r'@(.+?)@' pages = r'PageV(\d{2})P(\d+)' milestones = r'Milestone\d+' regex = re.compile(pages) result = regex.finditer(text) for m in result: print m.group() # + clean_file = '/home/jvdzwaan/data/tmp/adh/test/data/0274AhmadBarqi.Mahasin-clean.txt' with codecs.open(clean_file, encoding='utf-8') as f: lines = f.readlines() # + fiqh_file = '/home/jvdzwaan/data/tmp/adh/test/data/0274AhmadBarqi.Mahasin.txt' with codecs.open(fiqh_file, encoding='utf-8') as f: lines_orig = f.readlines() # - print len(lines), len(lines_orig) # + import nltk from nltk import word_tokenize from Bio import pairwise2 import six import re import string def tokenize(text): tokens = word_tokenize(text) # nltk tokenizer replaces " (double quotes) with `` and ''. # We want to keep the double quotes, so replace them again. tokens = ['"' if t == '``' or t == "''" else t for t in tokens] return tokens def get_spaces_pattern(text): # replace regular expressions special characters for p in ('(', ')'): text = text.replace(p, '#') tokens = tokenize(text) m = re.match(r'( *)'.join(tokens), text) return m.groups() def merge_sentences(l1, l2): if l1 == l2: print 'same!' return l1 merged = [] print 'sentence' idx1 = 0 idx2 = 0 print type(l1) print type(l2) #print len(l1), len(l2) # normalize spaces l1 = re.sub(r' +', u' ', l1) l2 = re.sub(r' +', u' ', l2) # remove trailing whitespace l1 = l1.strip() l2 = l2.strip() #print len(l1), len(l2) tokens1 = tokenize(l1) tokens2 = tokenize(l2) print len(tokens1), len(tokens2) print 'l1 is printable: ', all(c in string.printable for c in l1) print 'l2 is printable: ', all(c in string.printable for c in l2) # get spaces patterns try: text = l1 spaces1 = get_spaces_pattern(text) except: print 'Spaces1 error' print l1 print u'#'.join(tokens1) print text try: text = l2 spaces2 = get_spaces_pattern(text) except: print 'Spaces2 error' print len(spaces1), len(spaces2) print '---' alignment = pairwise2.align.localms(tokens1,tokens2,2,-1,-0.5,-0.1, gap_char=["GAP"]) #print alignment #print len(alignment) for t1, t2 in zip(alignment[0][0], alignment[0][1]): print t1, t2 if t1 == t2: #print 'equal' #print_char(1, idx1+1, l1[idx1+1]) merged.append(t1) try: merged.append(spaces1[idx1]) if spaces1[idx1] == u' ': print 'adding space based on l1' except IndexError: # no space to be added pass idx1 += 1 idx2 += 1 elif t1 == 'GAP': merged.append(t2) try: merged.append(spaces2[idx2]) if spaces2[idx2] == ' ': print 'adding space based on l2' except IndexError: pass idx2 += 1 elif t2 == 'GAP': merged.append(t1) try: merged.append(spaces1[idx1]) if spaces1[idx1] == u' ': print 'adding space based on l1' except IndexError: pass idx1 += 1 else: print 'Problem!' merged.append('\n') return ''.join(merged) nltk.download("punkt") merged = [] for l1, l2 in zip(lines[:10], lines_orig[:10]): print l1 print l2 merged_sentence = merge_sentences(l1, l2) merged.append(merged_sentence) with codecs.open('/home/jvdzwaan/data/tmp/adh/test/data/0274AhmadBarqi.Mahasin-merged.txt', 'wb', encoding='utf-8') as f: f.write(''.join(merged)) # - # + s1 = lines[1] s2 = lines_orig[1] s1 = s1.strip() s2 = s2.strip() s1 = s1.replace('(', '#') s1 = s1.replace(')', '#') tokens1 = word_tokenize(s1) print s1 # - print tokens1 r'( *)'.join(tokens1) m = re.match(r'( *)'.join(tokens1), s1) print m print m.groups() tokens = ['``', "''", 'bla'] tokens = ['"' if t == '``' or t == "''" else t for t in tokens] print tokens s1 = 'This is a "test" sentence.' tokens1 = tokenize(s1) print tokens1 print r'( *)'.join(tokens1) m = re.match(r'( *)'.join(tokens1), s1) print m.groups() print '"{}"'.format(m.group(5)) #for match in m: # print m.group() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # # Perform a regression on the diabetes data # ## Import libraries import pickle import os import argparse import matplotlib.pyplot as plt from sklearn.datasets import load_diabetes from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.externals import joblib import numpy as np import json import subprocess from typing import Tuple, List # ## Load diabetes dataset X, y = load_diabetes(return_X_y=True) columns = ["age", "gender", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"] # ## Prepare data # + # X = X[:,0:5] # - # ## Split the data into a training and test dataset # + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) data = {"train": {"X": X_train, "y": y_train}, "test": {"X": X_test, "y": y_test}} print ("Data contains", len(data['train']['X']), "training samples and",len(data['test']['X']), "test samples") # - X_train.shape, X_test.shape # ## Set a random alpha (regularization strength) for the Ridge regression alphas = np.arange(0.0, 1.0, 0.05) alpha = alphas[np.random.choice(alphas.shape[0], 1, replace=False)][0] print(alpha) # ## Create a Ridge regression and train on the training dataset reg = Ridge(alpha=alpha) reg.fit(data["train"]["X"], data["train"]["y"]) # ## Save the trained model to disk # + model_name = "sklearn_regression_model.pkl" with open(model_name, "wb") as file: joblib.dump(value=reg, filename=model_name) # - # ## Load the model from disk reg2 = joblib.load(model_name) # ## Perform inference on the trained model and print the MSE (mean squared error) preds = reg2.predict(data["test"]["X"]) print("mse", mean_squared_error(preds, data["test"]["y"])) preds # # Next: # # [Configure Azure ML](./01-aml-configuration.ipynb) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %pylab inline import json import pandas as pd # + schoolframe = pd.read_json('schoolsdupe.json') schoolframe.head() # - schoolframe[schoolframe['county'].str.contains('Carlow')].groupby('destination').sum()['destinationnumber'] schoolframe[schoolframe['private'].str.contains('True')].groupby('destination').sum()['destinationnumber'] schoolframe[schoolframe['private'].str.contains('True')].sum()['destinationnumber'] (100 * schoolframe[schoolframe['private'].str.contains('True')].groupby('destination').sum()['destinationnumber'] ) / schoolframe[schoolframe['private'].str.contains('True')].sum()['destinationnumber'] schoolframe[schoolframe['private'].str.contains('False')].groupby('destination').sum()['destinationnumber'] (100 * schoolframe[schoolframe['private'].str.contains('False')].groupby('destination').sum()['destinationnumber'] ) / schoolframe[schoolframe['private'].str.contains('False')].sum()['destinationnumber'] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="0FE_3SbEHHAK" colab_type="text" # __Goal__ # # Here we solve 1d Dirichlet problem with q_learning. The same example with value iteration is given [here](https://github.com/songqsh/foo1/blob/master/src/value_iter_dirichlet_1d_v01.ipynb). # # __Conclusion__ # # - Q_learning is kind of Monte carlo and much more instable than value iteration in 1-d problem. # But it may work for high dimensional problem while value iteration may not. # # - Another observation is the quality of the solution highly depends on the initial q_table. In particular, one can feed initial q_value with the exact solution of continuous time problem, if it is available. # + id="HlUEVd6451s0" colab_type="code" colab={} import numpy as np import time start_time = time.time() from tqdm import tqdm import matplotlib.pyplot as plt # + id="KsXKcBQV57_J" colab_type="code" colab={} #PDE to be solved class pde: def __init__(self): #pde config self.name = 'HJB 1d' self.U_LIM = 1 #upper limit for state self.L_LIM = 0 #lower limit for state self.lambda_ = 0. #discount rate self.sigma = 1. #diffusion coefficient self.drift = lambda x,a: a #drift coefficient function self.run_cost = lambda x, a: (a**2+1)/2. C1 = 1; C2 = 1 self.term_cost = lambda x: - np.log(C1*np.exp(x) + C2*np.exp(-x)) #pde exact solution if available self.exact_sol = self.term_cost # + id="ph7MvtJ06BYu" colab_type="code" colab={} def mdp_space_config(self, NUM = 5 #num of meshes in one unit state space ): self.NUM = NUM self.h = 1./self.NUM #mesh size in state space self.s_space = np.arange(self.L_LIM, self.U_LIM+self.h, self.h) #state space self.a_space = np.arange(2*self.L_LIM, 2*self.U_LIM + self.h, self.h) #action space self.del_ind_space = np.array([-1,1]) #space of delta (1-step) index: -1 means left, 1 means right move self.term_ind_space = np.array([0, self.s_space.size-1]) #space of terminal state indice #q-table and state value initialization self.s_val = np.zeros(self.s_space.size) self.q_table = np.zeros([self.s_space.size, self.a_space.size]) ''' #for test purpose, adjust initialization as exact solution self.s_val = self.exact_sol(self.s_space) for i in range(self.s_space.size): self.q_table[i] = self.s_val[i]*np.ones(self.a_space.size) ''' print('>>>>> q_table size is %i' %(self.q_table.size)) #s_val and q-table terminal setup for i in self.term_ind_space: self.s_val[i] = self.term_cost(self.s_space[i]) for j in range(self.a_space.size): self.q_table[i,j] = self.term_cost(self.s_space[i]) pde.mdp_space_config = mdp_space_config # + id="lqn0iaRM6Hvg" colab_type="code" colab={} #transition probability #output is probability (np array) on del_ind_space #central fdm def mdp_trans_prob_central(self,x,a): tp = np.zeros(self.del_ind_space.shape) b_ = self.drift(x,a) tp[0] = (-b_*self.h + self.sigma**2)/(2*self.sigma**2) tp[1] = (b_*self.h + self.sigma**2)/(2*self.sigma**2) #correction on prob if tp[1]<0: tp = tp - tp[1] tp = tp/tp.sum() print('>>>>corrected probability due to negativity') return tp pde.mdp_trans_prob_central = mdp_trans_prob_central # + id="6nsoo9DE6aYv" colab_type="code" colab={} def q_learning(self, n_epoch = 50000, learning_rate = 0.001, start_state = 0.5): start_ind = np.int((start_state - self.L_LIM)/self.h) Lambda_ = 1 + self.lambda_* self.h**2/ self.sigma**2 # reciprocal of discount factor for epoch in tqdm(range(n_epoch)): now_ind_ = start_ind #start while now_ind_ not in self.term_ind_space: act_ind_ = np.argmin(self.q_table[now_ind_]) #choose action index x_ = self.s_space[now_ind_] #current state a_ = self.a_space[act_ind_] #current action run_cost_ = self.run_cost(x_,a_)*self.h**2/self.sigma**2 #compute running cost tp_ = self.mdp_trans_prob_central(x_, a_) #transition probability sampling_ = np.random.binomial(1, tp_[0]) #random sampling del_ind_ = 1 # to the right if sampling_ == 0: del_ind_ = -1 #to the left next_ind_ = now_ind_ + del_ind_ #next state index del_q = run_cost_ + self.s_val[next_ind_]/Lambda_ - self.q_table[now_ind_,act_ind_] #delta of q value for update self.q_table[now_ind_, act_ind_] += learning_rate*del_q #update q_value self.s_val[now_ind_] = np.min(self.q_table[now_ind_]) #sync q_table with s_val now_ind_ = next_ind_ return self.s_val[start_ind] pde.q_learning = q_learning # + id="VDWH2xq4MEHh" colab_type="code" outputId="02d894d8-2d07-4296-85e8-f27253561108" colab={"base_uri": "https://localhost:8080/", "height": 221} ans = 0 n = 5 for i in range(n): pde1 = pde() pde1.mdp_space_config(NUM=8) ans += pde1.q_learning() print('\n soln is'+str(ans/n)) # + id="97U7TqOjNP3x" colab_type="code" outputId="acf26057-098b-46a8-a334-476fa9cfff0d" colab={"base_uri": "https://localhost:8080/", "height": 34} end_time = time.time() print('elapsed time is %f seconds' %(end_time - start_time)) # + id="hix7_ygDLRPT" colab_type="code" outputId="d0be8950-1746-4ebb-e353-41e4f1734d8a" colab={"base_uri": "https://localhost:8080/", "height": 34} pde1.exact_sol(0.5) # + id="1jxcjJE8F2Qd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="47dbf966-51ec-494b-dc5a-5b8fb3590f17" pde1.s_val # + id="oxM3yVaTF57c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="16f336a1-ed8b-4019-ce08-4baa61c0afa5" plt.plot(pde1.s_space, pde1.s_val, label = 'computed') exact_val = pde1.exact_sol(pde1.s_space) plt.plot(pde1.s_space, exact_val, label = 'exact') plt.legend() # + id="gtj_gH0HGCGF" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python3 import torch import math from torch import nn, Tensor from torch.utils.tensorboard import SummaryWriter from tqdm import trange from typing import Tuple from matplotlib import pyplot as plt import combinators.trace.utils as trace_utils from combinators.trace.utils import RequiresGrad from combinators.tensor.utils import autodevice, kw_autodevice, copy, show from combinators.densities import MultivariateNormal, Tempered, RingGMM, Normal from combinators.densities.kernels import MultivariateNormalKernel, MultivariateNormalLinearKernel, NormalLinearKernel from combinators.nnets import ResMLPJ from combinators.objectives import nvo_rkl, nvo_avo from combinators import Forward, Reverse, Propose from combinators.stochastic import RandomVariable, ImproperRandomVariable from combinators.metrics import effective_sample_size, log_Z_hat import visualize as V def mk_kernel(from_:int, to_:int, std:float, num_hidden:int, learn_cov=True): embedding_dim = 2 return MultivariateNormalKernel( ext_from=f'g{from_}', ext_to=f'g{to_}', loc=torch.zeros(2, **kw_autodevice()), cov=torch.eye(2, **kw_autodevice())*std**2, learn_cov=learn_cov, net=ResMLPJ( dim_in=2, dim_hidden=num_hidden, dim_out=embedding_dim).to(autodevice())) def mk_mnlinear_kernel(from_:int, to_:int, std:float, dim:int): return MultivariateNormalLinearKernel( ext_from=f'g{from_}', ext_to=f'g{to_}', loc=torch.zeros(dim, **kw_autodevice()), cov=torch.eye(dim, **kw_autodevice())*std**2) def mk_nlinear_kernel(from_:int, to_:int, std:float, dim:int): return NormalLinearKernel(ext_from=f'g{from_}', ext_to=f'g{to_}') def anneal_to_ring(num_targets, n=2): g0, gK = mk_ring(num_targets, n) return anneal_between(g0, gK, num_targets) def mk_ring(num_targets, n): assert n > 1 g0 = mk_mvn(0, 0, std=5) gK = RingGMM(loc_scale=10, scale=0.5, count=8 if n == "paper" else n, name=f"g{num_targets - 1}").to(autodevice()) return g0, gK def anneal_between(left, right, total_num_targets): proposal_std = total_num_targets # Make an annealing path betas = torch.arange(0., 1., 1./(total_num_targets - 1))[1:] # g_0 is beta=0 path = [Tempered(f'g{k}', left, right, beta) for k, beta in zip(range(1,total_num_targets-1), betas)] path = [left] + path + [right] assert len(path) == total_num_targets # sanity check that the betas line up return path def anneal_between_mvns(left_loc, right_loc, total_num_targets): g0 = mk_mvn(0, left_loc) gK = mk_mvn(total_num_targets-1, right_loc) return anneal_between(g0, gK, total_num_targets) def anneal_between_ns(left_loc, right_loc, total_num_targets): g0 = mk_n(0, left_loc) gK = mk_n(total_num_targets-1, right_loc) return anneal_between(g0, gK, total_num_targets) def mk_mvn(i, loc, std=1): return MultivariateNormal(name=f'g{i}', loc=torch.ones(2, **kw_autodevice())*loc, cov=torch.eye(2, **kw_autodevice())*std**2) def mk_n(i, loc): return Normal(name=f'g{i}', loc=torch.ones(1, **kw_autodevice())*loc, scale=torch.ones(1, **kw_autodevice())**2) def mk_model(num_targets:int): return dict( targets=anneal_to_ring(num_targets, n=8), forwards=[mk_kernel(from_=i, to_=i+1, std=1., num_hidden=64) for i in range(num_targets-1)], reverses=[mk_kernel(from_=i+1, to_=i, std=1., num_hidden=64) for i in range(num_targets-1)], # targets=anneal_between_mvns(0, num_targets*2, num_targets), # forwards=[mk_kernel(from_=i, to_=i+1, std=1., num_hidden=64) for i in range(num_targets-1)], # reverses=[mk_kernel(from_=i+1, to_=i, std=1., num_hidden=64) for i in range(num_targets-1)], # targets=anneal_between_mvns(0, num_targets*2, num_targets), # forwards=[mk_mnlinear_kernel(from_=i, to_=i+1, std=1., dim=2) for i in range(num_targets-1)], # reverses=[mk_mnlinear_kernel(from_=i+1, to_=i, std=1., dim=2) for i in range(num_targets-1)], # NOTES: Anneal between 2 1d guassians with a linear kernel: 2 steps # annealing does not learn the forward kernel in the first step, but learns both in the second step. # targets=anneal_between_ns(0, num_targets*2, num_targets), # forwards=[mk_nlinear_kernel(from_=i, to_=i+1, std=1., dim=1) for i in range(num_targets-1)], # reverses=[mk_nlinear_kernel(from_=i+1, to_=i, std=1., dim=1) for i in range(num_targets-1)], # targets=[mk_mvn(i, i*2) for i in range(num_targets)], # forwards=[mk_kernel(from_=i, to_=i+1, std=1., num_hidden=32) for i in range(num_targets-1)], # reverses=[mk_kernel(from_=i+1, to_=i, std=1., num_hidden=32) for i in range(num_targets-1)], # targets=[mk_mvn(i, i*2) for i in range(num_targets)], # forwards=[mk_mnlinear_kernel(from_=i, to_=i+1, std=1., dim=2) for i in range(num_targets-1)], # reverses=[mk_mnlinear_kernel(from_=i+1, to_=i, std=1., dim=2) for i in range(num_targets-1)], # NOTES: With 1 intermediate density between 2 1d guassians with a linear kernel everything is fine # targets=[mk_n(i, i*2) for i in range(num_targets)], # forwards=[mk_nlinear_kernel(from_=i, to_=i+1, std=1., dim=1) for i in range(num_targets-1)], # reverses=[mk_nlinear_kernel(from_=i+1, to_=i, std=1., dim=1) for i in range(num_targets-1)], ) K = 8 mk_model(K) # + import torch import math from torch import nn, Tensor from torch.utils.tensorboard import SummaryWriter from tqdm import trange from typing import Tuple from matplotlib import pyplot as plt import combinators.trace.utils as trace_utils from combinators.tensor.utils import autodevice, kw_autodevice from combinators.densities import MultivariateNormal, Tempered, RingGMM from combinators.densities.kernels import MultivariateNormalKernel from combinators.nnets import ResMLPJ from combinators.objectives import nvo_rkl from combinators import Forward, Reverse, Propose from combinators.stochastic import RandomVariable, ImproperRandomVariable from combinators.metrics import effective_sample_size, log_Z_hat import visualize as V # - #from main import mk_model, mk_kernel from tqdm.notebook import trange, tqdm # + from combinators import Forward def sample_along(proposal, kernels, sample_shape=(2000,)): samples = [] tr, out = proposal(sample_shape=sample_shape) samples.append(out) for k in forwards: proposal = Forward(k, proposal) tr, out = proposal(sample_shape=sample_shape) samples.append(out) return samples # - # main() arguments seed=1 eval_break = 50 # + # Setup torch.manual_seed(seed) num_samples = 256 sample_shape=(num_samples,) # Models out = mk_model(K) targets, forwards, reverses = [[m.to(autodevice()) for m in out[n]] for n in ['targets', 'forwards', 'reverses']] assert all([len(list(k.parameters())) > 0 for k in [*forwards, *reverses]]) # logging writer = SummaryWriter() loss_ct, loss_sum, loss_avgs, loss_all = 0, 0.0, [], [] # - print(targets) # + print(forwards) # _ = [print(p) for f in forwards for p in f.parameters()] # + print(reverses) # _ = [print(p) for f in reverses for p in f.parameters()] # - from combinators.objectives import mb0, mb1, _estimate_mc, eval_nrep optimizer = torch.optim.Adam([dict(params=x.parameters()) for x in [*forwards, *reverses]], lr=1e-3) lazy_i, i = 0, 0 # + num_iterations=5000 lazy_i = i with trange(num_iterations) as bar: for i in bar: optimizer.zero_grad() i += lazy_i q0 = targets[0] p_prv_tr, out0 = q0(sample_shape=sample_shape) loss = torch.zeros(1, **kw_autodevice()) lw, lvss = torch.zeros(sample_shape, **kw_autodevice()), [] for k, (fwd, rev, q, p) in enumerate(zip(forwards, reverses, targets[:-1], targets[1:])): q.with_observations(trace_utils.copytrace(p_prv_tr, detach=p_prv_tr.keys())) q_ext = Forward(fwd, q, _step=k) p_ext = Reverse(p, rev, _step=k) extend = Propose(target=p_ext, proposal=q_ext, _step=k) # breakpoint() state, lv = extend(sample_shape=sample_shape, sample_dims=0) p_prv_tr = state.target.trace p.clear_observations() q.clear_observations() lw += lv # loss += nvo_rkl(lw, lv, state.proposal.trace[f'g{k}'], state.target.trace[f'g{k+1}']) loss += nvo_avo(lv) s # # # # # breakpoint()s # batch_dim=None # sample_dims=0 # rv_proposal=state.proposal.trace[f'g{k}'] # rv_target=state.target.trace[f'g{k+1}'] # # TODO: move back from the proposal and target RVs to joint logprobs? # reducedims = (sample_dims,) # lw = lw.detach() # ldZ = lv.detach().logsumexp(dim=sample_dims) - math.log(lv.shape[sample_dims]) # f = -lv # # rv_proposal = next(iter(proposal_trace.values())) # tr[\gamma_{k-1}] # # rv_target = next(iter(target_trace.values())) # tr[\gamma_{k}] # kwargs = dict( # sample_dims=sample_dims, # reducedims=reducedims, # keepdims=False # ) # baseline = _estimate_mc(f.detach(), lw, **kwargs).detach() # kl_term = _estimate_mc(mb1(rv_proposal.log_prob.squeeze()) * (f - baseline), lw, **kwargs) # grad_log_Z1 = _estimate_mc(rv_proposal.log_prob.squeeze(), lw, **kwargs) # grad_log_Z2 = _estimate_mc(eval_nrep(rv_target).log_prob.squeeze(), lw+lv.detach(), **kwargs) # #s breakpoint() # if k==0: # # loss += kl_term + mb0(baseline * grad_log_Z1 - grad_log_Z2) + baseline + ldZ # loss += nvo_avo(lv) lvss.append(lv) loss.backward() optimizer.step() # scheduler.step() with torch.no_grad(): # REPORTING # --------------------------------------- # # ESS lvs = torch.stack(lvss, dim=0) lws = torch.cumsum(lvs, dim=1) ess = effective_sample_size(lws, sample_dims=-1) for step, x in zip(range(1,len(ess)+1), ess): writer.add_scalar(f'ess/step-{step}', x, i) # logZhat lzh = log_Z_hat(lws, sample_dims=-1) for step, x in zip(range(1,len(lzh)+1), lzh): writer.add_scalar(f'log_Z_hat/step-{step}', x, i) # loss loss_ct += 1 loss_scalar = loss.detach().cpu().mean().item() writer.add_scalar('loss', loss_scalar, i) loss_sum += loss_scalar # progress bar if i % 10 == 0: loss_avg = loss_sum / loss_ct loss_template = 'loss={}{:.4f}'.format('' if loss_avg < 0 else ' ', loss_avg) logZh_template = 'logZhat[-1]={:.4f}'.format(lzh[-1].cpu().item()) ess_template = 'ess[-1]={:.4f}'.format(ess[-1].cpu().item()) loss_ct, loss_sum = 0, 0.0 bar.set_postfix_str("; ".join([loss_template, ess_template, logZh_template])) # show samples if i % (eval_break + 1) == 0: samples = sample_along(targets[0], forwards) fig = V.scatter_along(samples) writer.add_figure('overview', fig, global_step=i, close=True) # for ix, xs in enumerate(samples): # writer.add_figure(f'step-{ix+1}', V.scatter(xs), global_step=i, close=True) # - # + samples = sample_along(targets[0], forwards) plot_type = len(samples[0].squeeze().shape) import torch import matplotlib.pyplot as plt from matplotlib import colors from scipy.interpolate import interpn from matplotlib import cm import matplotlib.gridspec as gridspec def scatter(xs, lws=None, c='C0', ax=None, show=False): xs = xs.squeeze().detach().cpu().numpy() assert len(xs.shape) == 2 inplace = ax is not None cm_endpoints = [(i, (*colors.to_rgb(c), i)) for i in [0.0, 1.0]] lin_alpha = colors.LinearSegmentedColormap.from_list('incr_alpha', cm_endpoints) fig = None plt.scatter(*xs.T, c=None, cmap=lin_alpha) if show: plt.show() return fig if fig is not None else ax def scatter_together(samples): fig = plt.figure(figsize=(5*len(samples), 5)) gspec = gridspec.GridSpec(ncols=len(samples), nrows=1, figure=fig) for i, xs in enumerate(samples): ax = fig.add_subplot(gspec[0, i]) scatter(xs) return fig if plot_type == 1: print("; ".join(["{:.4f}".format(ss.mean().cpu().item()) for ss in samples])) elif plot_type == 2: fig = scatter_together(samples) plt.show() # + active="" # print(forwards) # # _ = [print(p) for f in forwards for p in f.parameters()] # + active="" # print(reverses) # # _ = [print(p) for f in reverses for p in f.parameters()] # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''venv'': venv)' # name: python3 # --- # + import time #import cv2 import os import random import numpy as np import matplotlib.pyplot as plt from PIL import Image import matplotlib.image as mpimg from collections import OrderedDict import pandas as pd from skimage import io, transform from math import * import xml.etree.ElementTree as ET import pandas as pd from skimage.transform import AffineTransform, warp from skimage.transform import rotate as rotate_transform from skimage.util import random_noise from skimage.filters import gaussian import torch import torchvision import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.transforms.functional as TF from torchvision import datasets, models, transforms from torch.utils.data import Dataset from torch.utils.data import DataLoader class Transforms(): def __init__(self): pass def rotate(self, image, landmarks, params): angle = params['rotation_range'][0] angle = (random.uniform(0,1))*random.choice([-1,1])*angle transformation_matrix = torch.tensor([ [+cos(radians(angle)), -sin(radians(angle))], [+sin(radians(angle)), +cos(radians(angle))] ]) image = rotate_transform(np.array(image), angle = angle, mode = 'edge') landmarks = landmarks - 0.5 new_landmarks = np.matmul(landmarks, transformation_matrix) new_landmarks = new_landmarks + 0.5 # PIL expects RGB images to be uint with ranges from 0 to 255 so we have to convert it to a type that PIL can excpect ie a uint from 0 to 255 return Image.fromarray((image * 255).astype(np.uint8)), new_landmarks def translation(self, image, landmarks, params): image_shape = np.array(image).shape ty = random.uniform(params['height_shift_range'][0]*image_shape[0], params['height_shift_range'][1]*image_shape[0]) tx = random.uniform(params['width_shift_range'][0]*image_shape[1], params['width_shift_range'][1]*image_shape[1] ) horizontal_shift = tx*random.choice([-1,1]) vertical_shift = ty*random.choice([-1,1]) horizontal_shift_normalised = horizontal_shift/image_shape[1] vertical_shift_normalised = vertical_shift/image_shape[0] transform = AffineTransform(translation=(-horizontal_shift,-vertical_shift)) image = warp(np.array(image),transform,mode='edge') landmarks = landmarks + torch.tensor([horizontal_shift_normalised,vertical_shift_normalised]) # PIL expects RGB images to be uint with ranges from 0 to 255 so we have to convert it to a type that PIL can excpect ie a uint from 0 to 255 return Image.fromarray((image * 255).astype(np.uint8)), landmarks def resize(self, image, landmarks, img_size): image = TF.resize(image, img_size) return image, landmarks def zoom(self, image, landmarks, params): landmarks = landmarks.astype(int) img_shape = np.array(image).shape zoom = random.uniform(params['zoom_range'][0],params['zoom_range'][1]) image = TF.resize(image,(int(img_shape[0]*zoom), int(img_shape[1]*zoom)) ) scale_transform = torch.tensor([[zoom, 0], [0, zoom]]) landmarks = np.matmul(landmarks, scale_transform).float() new_img_shape = np.array(image).shape landmarks = landmarks / torch.tensor([new_img_shape[1], new_img_shape[0]]) return image, landmarks def color_jitter(self, image, landmarks): color_jitter = transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1) image = color_jitter(image) return image, landmarks def __call__(self, image, landmarks, params): # set checked image and landmark to landmark_ and image_ (this is for making sure we use the last checked tranformed instead of wrongly tranformed to do the following # tranform) # ----------------------- image_ = Image.fromarray(image.copy()) landmarks_ = landmarks.copy() # ----------------------- # ZOOM image , landmarks = self.zoom(image_, landmarks_, params) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 # correct this becuase hight and with is different sizes # NOTE fix landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] while landmarks_bool.any() or landmarks_outofbounds.any(): image, landmarks = self.zoom(image_, landmarks_, params) landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] # ---------------------- image_ = image landmarks_ = landmarks # ---------------------- # RESIZE image, landmarks = self.resize(image_, landmarks_, (224, 224)) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] while landmarks_bool.any() or landmarks_outofbounds.any(): image, landmarks = self.resize(image_, landmarks_, (224, 224)) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] landmarks_ = landmarks image_ = image # ---------------------- #image_, landmarks_ = self.color_jitter(image_, landmarks_) # ---------------------- # ROTATE image, landmarks = self.rotate(image_, landmarks_, params) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] while landmarks_bool.any() or landmarks_outofbounds.any(): image, landmarks = self.rotate(image_, landmarks_, params) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] # ---------------------- landmarks_ = landmarks image_ = image # ---------------------- # TRANSLATION image, landmarks = self.translation(image, landmarks, params) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] while landmarks_bool.any() or landmarks_outofbounds.any(): image, landmarks = self.translation(image_, landmarks_, params) image_shape = np.array(image).shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*image_shape[1] > image_shape[1] # ---------------------- landmarks_ = landmarks image_ = image # ---------------------- image = TF.to_tensor(image) # the following tranform normalises each channel to have a mean at 0.5 and std of 0.5 / NOTE: NOT sure if this is theoreticlly better, should check this image = TF.normalize(image, [0.5], [0.5]) return image, landmarks class LandmarksDataset(): def __init__(self, transform=None,zoom = [1.0 - 0.03258157476873315, 1.0 + 0.03258157476873315], rotation = [22], height_shift= [0,0.03003200603616672], width_shift= [0,0.03003200603616672 ]): df = pd.read_csv('C:/Projects/msc_haar/tsetsedata_2019_left_commas/annotations_left.txt',index_col=0, header=None) df2 = pd.read_csv('C:/Projects/msc_haar/flipped_left.csv', index_col= 0) self. tranform = transform self.zoom = zoom self.rotation = rotation self.height_shift = height_shift self.width_shift = width_shift self.image_filenames = [] self.landmarks = [] self.transform = transform self.image_dir = 'C:/Projects/msc_haar/tsetsedata_2019_left_commas/images_left/' self.image_dir2 = 'C:/Projects/msc_haar/tsetsedata_2019_right_commas/flipped_left/' self.TransF_ = True # ------------------- Append left wings data to dataset class ------------ for filename in df.index[:]: self.image_filenames.append(os.path.join(self.image_dir, filename)) landmarks = [] for num in range(1, 23, 2): x_coordinate = df.loc[filename,num] - 1 y_coordinate = df.loc[filename, num+1] - 1 landmarks.append([x_coordinate, y_coordinate]) self.landmarks.append(landmarks) assert len(self.image_filenames) == len(self.landmarks) # ------------------ Append flipped right wings data to dataset class----- for filename in df2.index[:]: self.image_filenames.append(os.path.join(self.image_dir2, filename)) landmarks = [] for num in range(1, 23, 2): x_coordinate = df2.loc[filename,'{}'.format(num)] +1 y_coordinate = df2.loc[filename, '{}'.format(num+1)] +1 landmarks.append([x_coordinate, y_coordinate]) self.landmarks.append(landmarks) self.landmarks = np.array(self.landmarks).astype('float32') assert len(self.image_filenames) == len(self.landmarks) # ---------------------- def TransF(self): self.TransF_ = True def NoTransF(self): self.TransF_ = False def set_params(self, zoom = [0.95, 0.105], rotation = [10], height_shift= [0,0.05], width_shift= [0,0.05]): self.zoom = zoom self.rotation = rotation self.height_shift = height_shift self.width_shift = width_shift def __len__(self): return len(self.image_filenames) def __getitem__(self, index): params = {'zoom_range': self.zoom, 'rotation_range':self.rotation, 'height_shift_range': self.height_shift, 'width_shift_range': self.width_shift } image_name = self.image_filenames[index] image_ = plt.imread(image_name) landmarks_ = self.landmarks[index] image = plt.imread(self.image_filenames[index]) landmarks = self.landmarks[index] if self.transform and self.TransF_: image, landmarks = self.transform(image_, landmarks_, params) image_shape = image.shape landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*224 > image_shape[1] while landmarks_bool.any() or landmarks_outofbounds.any(): image, landmarks = self.transform(image_, landmarks_, params) landmarks_bool = landmarks < 0 landmarks_outofbounds = landmarks*224 > image_shape[1] else: img_shape = image.copy().shape image = Image.fromarray(image) image = TF.resize(image, (224,224)) landmarks = torch.tensor(landmarks) / torch.tensor([img_shape[1],img_shape[0]]) image = TF.to_tensor(image) # the following tranform normalises each channel to have a mean at 0.5 and std of 0.5 / NOTE: NOT sure if this is theoreticlly better, should check this image = TF.normalize(image, [0.5], [0.5]) landmarks = torch.tensor(landmarks) - 0.5 return image, landmarks, image_name dataset = LandmarksDataset(Transforms()) class UnNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, tensor): """ Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: Tensor: Normalized image. """ for t, m, s in zip(tensor, self.mean, self.std): t.mul_(s).add_(m) # The normalize code -> t.sub_(m).div_(s) return tensor ''' You instantiate it with the same arguments used for the normalize. and then use it the same way unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) unorm(tensor) ''' class resnet50(nn.Module): def __init__(self,num_classes=22): super().__init__() self.model_name='resnet50' self.model=models.resnet50(pretrained=True) self.model.conv1=nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.model.fc=nn.Linear(self.model.fc.in_features, num_classes) def forward(self, x): x=self.model(x) return x dataset.NoTransF() #DataSet.resize(244) # split the dataset into validation and test sets len_valid_test_set = int(0.2*len(dataset)) # 60% training, 20% validation, 20% testing len_train_set = len(dataset) - len_valid_test_set*2 print("The length of Train set is {}".format(len_train_set)) print("The length of Valid set is {}".format(len_valid_test_set)) print("The length of Valid set is {}".format(len_valid_test_set)) train_dataset , valid_dataset, test_dataset = torch.utils.data.random_split(dataset , [len_train_set, len_valid_test_set, len_valid_test_set], generator=torch.Generator().manual_seed(42)) # shuffle and batch the datasets train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=15, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=15, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=None,batch_sampler=None, shuffle=False) resnet50 = resnet50() resnet50.load_state_dict(torch.load('C:/Projects/msc_haar/manuscript1_exp/regressors/models/model_resnet50_regressor_NoTransF_1.pth')) resnet50.cpu() # + dataset.NoTransF() start_time = time.time() #best_network.cuda() resnet50f.eval() predictions = torch.zeros(484, 11, 2).detach() landmarks = torch.zeros(484, 11, 2).detach() #images, landmarks = next(iter(test_loader)) counter = 0 with torch.no_grad(): for images, landmark, filename in test_loader: images = images.detach().reshape((1,3,224,224)) landmark = (landmark + 0.5 )*torch.tensor([1280, 1024]).detach() prediction = (resnet50f(images) + 0.5) prediction = prediction.view(-1,11,2)*torch.tensor([1280, 1024]) landmarks[counter,:] = landmark predictions[counter,:] = prediction predictions.detach() counter += 1 endtime = time.time()-start_time print(endtime) #print(predictions) difs = abs(predictions - landmarks) difs = difs.detach() dis = np.sqrt(difs[:, :,0]**2 + difs[:, :, 1]**2) av = [] for I in dis: av.append(np.mean(I.numpy())) print(np.mean(av)) print(np.std(av)) plt.figure(figsize=(10,10)) plt.boxplot(dis.T) plt.xlabel('landmarks', fontsize=8) plt.ylabel('pixel distance error', fontsize=8) plt.title('Pixel distance errors', fontsize=10) plt.savefig('errors_resnet50.png') plt.show() # + difs = abs(predictions - landmarks) difs = difs.detach() dis = difs[:, :,0]**2 + difs[:, :, 1]**2 rms = np.sqrt(dis.sum(axis=0)/dis.shape[0]) rms.mean() # + def procrustes(X, Y, scaling=True, reflection='best'): """ A port of MATLAB's `procrustes` function to Numpy. Procrustes analysis determines a linear transformation (translation, reflection, orthogonal rotation and scaling) of the points in Y to best conform them to the points in matrix X, using the sum of squared errors as the goodness of fit criterion. d, Z, [tform] = procrustes(X, Y) Inputs: ------------ X, Y matrices of target and input coordinates. they must have equal numbers of points (rows), but Y may have fewer dimensions (columns) than X. scaling if False, the scaling component of the transformation is forced to 1 reflection if 'best' (default), the transformation solution may or may not include a reflection component, depending on which fits the data best. setting reflection to True or False forces a solution with reflection or no reflection respectively. Outputs ------------ d the residual sum of squared errors, normalized according to a measure of the scale of X, ((X - X.mean(0))**2).sum() Z the matrix of transformed Y-values tform a dict specifying the rotation, translation and scaling that maps X --> Y """ n,m = X.shape ny,my = Y.shape muX = X.mean(0) muY = Y.mean(0) X0 = X - muX Y0 = Y - muY ssX = (X0**2.).sum() ssY = (Y0**2.).sum() # centred Frobenius norm normX = np.sqrt(ssX) normY = np.sqrt(ssY) # scale to equal (unit) norm X0 /= normX Y0 /= normY if my < m: Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0) # optimum rotation matrix of Y A = np.dot(X0.T, Y0) U,s,Vt = np.linalg.svd(A,full_matrices=False) V = Vt.T T = np.dot(V, U.T) if reflection is not 'best': # does the current solution use a reflection? have_reflection = np.linalg.det(T) < 0 # if that's not what was specified, force another reflection if reflection != have_reflection: V[:,-1] *= -1 s[-1] *= -1 T = np.dot(V, U.T) traceTA = s.sum() if scaling: # optimum scaling of Y b = traceTA * normX / normY # standarised distance between X and b*Y*T + c d = 1 - traceTA**2 # transformed coords Z = normX*traceTA*np.dot(Y0, T) + muX else: b = 1 d = 1 + ssY/ssX - 2 * traceTA * normY / normX Z = normY*np.dot(Y0, T) + muX # transformation matrix if my < m: T = T[:my,:] c = muX - b*np.dot(muY, T) #transformation values tform = {'rotation':T, 'scale':b, 'translation':c} return d, Z, tform def generalized_procrustes(data): mean = data[0,...] print('Aligning') d = 100 d_old = 100 while d > 0.0001: d_new = 0 for i in range(data.shape[0]): d_, data[i,:], _ = procrustes(mean, data[i,:], scaling=False, reflection=False) d_new += d_ / data.shape[0] d = d_old - d_new d_old = d_new mean = data.mean(axis=0) return mean mean = generalized_procrustes(landmarks) # + from scipy.spatial import procrustes as procrustes_ procrustes_errors = {'error':[], 'procrustes_disparity':[]} for i in range(484): procrustes_errors['error'].append(av[i]) mtx1, mtx2, disparity = procrustes_(mean, landmarks[i]) procrustes_errors['procrustes_disparity'].append(disparity) # - proc = [] #for i in procrustes_errors['procrustes_disparity']: # if i>0.001: # proc.append(i) errors = [] for it, i in enumerate(procrustes_errors['error']): #$print(it) mean_err = np.mean(procrustes_errors['error']) mean_disp = np.mean(procrustes_errors['procrustes_disparity']) std_err = np.std(procrustes_errors['error'])*2 std_disp = np.std(procrustes_errors['procrustes_disparity'])*2 i_p = procrustes_errors['procrustes_disparity'][it] if (mean_err - std_err)rgb_thresh[0]) \ & (img[:,:,1] > rgb_thresh[1]) \ & (img[:,:,2] > rgb_thresh[2]) color_select[above] = 1 return color_select # Define color selection criteria ###### TODO: MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION red_threshold = 160 green_threshold = 160 blue_threshold = 160 ###### rgb_threshold = (red_threshold, green_threshold, blue_threshold) # + warped = perspect_transform(img,source,dest) colorsel = color_thresh(warped, rgb_thresh=(160, 160, 160)) # Plot the result plt.imshow(colorsel, cmap='gray') plt.show() # - ypos, xpos = colorsel.nonzero() plt.plot(xpos, ypos, '.') plt.xlim(0, 320) plt.ylim(0, 160) plt.show() def rover_coords(binary_img): # Extract xpos and ypos pixel positions from binary_img and ypos, xpos = binary_img.nonzero() # Convert xpos and ypos to rover-centric coordinates x_pixel = -(ypos - binary_img.shape[0]).astype(np.float) y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float) return x_pixel, y_pixel xpix, ypix = rover_coords(colorsel) fig = plt.figure(figsize=(5, 7.5)) plt.plot(xpix, ypix, '.') plt.ylim(-160, 160) plt.xlim(0, 160) plt.title('Rover-Centric Map', fontsize=20) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Import Dependencies import gradio as gr import pandas as pd import numpy as np from joblib import load # Function to make Inference def predict_disease_from_symptom(symptom_list): symptoms = {'itching': 0, 'skin_rash': 0, 'nodal_skin_eruptions': 0, 'continuous_sneezing': 0, 'shivering': 0, 'chills': 0, 'joint_pain': 0, 'stomach_pain': 0, 'acidity': 0, 'ulcers_on_tongue': 0, 'muscle_wasting': 0, 'vomiting': 0, 'burning_micturition': 0, 'spotting_ urination': 0, 'fatigue': 0, 'weight_gain': 0, 'anxiety': 0, 'cold_hands_and_feets': 0, 'mood_swings': 0, 'weight_loss': 0, 'restlessness': 0, 'lethargy': 0, 'patches_in_throat': 0, 'irregular_sugar_level': 0, 'cough': 0, 'high_fever': 0, 'sunken_eyes': 0, 'breathlessness': 0, 'sweating': 0, 'dehydration': 0, 'indigestion': 0, 'headache': 0, 'yellowish_skin': 0, 'dark_urine': 0, 'nausea': 0, 'loss_of_appetite': 0, 'pain_behind_the_eyes': 0, 'back_pain': 0, 'constipation': 0, 'abdominal_pain': 0, 'diarrhoea': 0, 'mild_fever': 0, 'yellow_urine': 0, 'yellowing_of_eyes': 0, 'acute_liver_failure': 0, 'fluid_overload': 0, 'swelling_of_stomach': 0, 'swelled_lymph_nodes': 0, 'malaise': 0, 'blurred_and_distorted_vision': 0, 'phlegm': 0, 'throat_irritation': 0, 'redness_of_eyes': 0, 'sinus_pressure': 0, 'runny_nose': 0, 'congestion': 0, 'chest_pain': 0, 'weakness_in_limbs': 0, 'fast_heart_rate': 0, 'pain_during_bowel_movements': 0, 'pain_in_anal_region': 0, 'bloody_stool': 0, 'irritation_in_anus': 0, 'neck_pain': 0, 'dizziness': 0, 'cramps': 0, 'bruising': 0, 'obesity': 0, 'swollen_legs': 0, 'swollen_blood_vessels': 0, 'puffy_face_and_eyes': 0, 'enlarged_thyroid': 0, 'brittle_nails': 0, 'swollen_extremeties': 0, 'excessive_hunger': 0, 'extra_marital_contacts': 0, 'drying_and_tingling_lips': 0, 'slurred_speech': 0, 'knee_pain': 0, 'hip_joint_pain': 0, 'muscle_weakness': 0, 'stiff_neck': 0, 'swelling_joints': 0, 'movement_stiffness': 0, 'spinning_movements': 0, 'loss_of_balance': 0, 'unsteadiness': 0, 'weakness_of_one_body_side': 0, 'loss_of_smell': 0, 'bladder_discomfort': 0, 'foul_smell_of urine': 0, 'continuous_feel_of_urine': 0, 'passage_of_gases': 0, 'internal_itching': 0, 'toxic_look_(typhos)': 0, 'depression': 0, 'irritability': 0, 'muscle_pain': 0, 'altered_sensorium': 0, 'red_spots_over_body': 0, 'belly_pain': 0, 'abnormal_menstruation': 0, 'dischromic _patches': 0, 'watering_from_eyes': 0, 'increased_appetite': 0, 'polyuria': 0, 'family_history': 0, 'mucoid_sputum': 0, 'rusty_sputum': 0, 'lack_of_concentration': 0, 'visual_disturbances': 0, 'receiving_blood_transfusion': 0, 'receiving_unsterile_injections': 0, 'coma': 0, 'stomach_bleeding': 0, 'distention_of_abdomen': 0, 'history_of_alcohol_consumption': 0, 'fluid_overload.1': 0, 'blood_in_sputum': 0, 'prominent_veins_on_calf': 0, 'palpitations': 0, 'painful_walking': 0, 'pus_filled_pimples': 0, 'blackheads': 0, 'scurring': 0, 'skin_peeling': 0, 'silver_like_dusting': 0, 'small_dents_in_nails': 0, 'inflammatory_nails': 0, 'blister': 0, 'red_sore_around_nose': 0, 'yellow_crust_ooze': 0} # Set value to 1 for corresponding symptoms for s in symptom_list: symptoms[s] = 1 # Put all data in a test dataset df_test = pd.DataFrame(columns=list(symptoms.keys())) df_test.loc[0] = np.array(list(symptoms.values())) # Load pre-trained model clf = load(str("./saved_model/random_forest.joblib")) result = clf.predict(df_test) # Cleanup del df_test return f"{result[0]}" # + # Run Inference Server # Click on Public URL to run demo on separate page or for sharing purposes iface = gr.Interface( predict_disease_from_symptom, [ gr.inputs.CheckboxGroup(['itching', 'skin_rash', 'nodal_skin_eruptions', 'continuous_sneezing', 'shivering', 'chills', 'joint_pain', 'stomach_pain', 'acidity', 'ulcers_on_tongue', 'muscle_wasting', 'vomiting', 'burning_micturition', 'spotting_ urination', 'fatigue', 'weight_gain', 'anxiety', 'cold_hands_and_feets', 'mood_swings', 'weight_loss', 'restlessness', 'lethargy', 'patches_in_throat', 'irregular_sugar_level', 'cough', 'high_fever', 'sunken_eyes', 'breathlessness', 'sweating', 'dehydration', 'indigestion', 'headache', 'yellowish_skin', 'dark_urine', 'nausea', 'loss_of_appetite', 'pain_behind_the_eyes', 'back_pain', 'constipation', 'abdominal_pain', 'diarrhoea', 'mild_fever', 'yellow_urine', 'yellowing_of_eyes', 'acute_liver_failure', 'fluid_overload', 'swelling_of_stomach', 'swelled_lymph_nodes', 'malaise', 'blurred_and_distorted_vision', 'phlegm', 'throat_irritation', 'redness_of_eyes', 'sinus_pressure', 'runny_nose', 'congestion', 'chest_pain', 'weakness_in_limbs', 'fast_heart_rate', 'pain_during_bowel_movements', 'pain_in_anal_region', 'bloody_stool', 'irritation_in_anus', 'neck_pain', 'dizziness', 'cramps', 'bruising', 'obesity', 'swollen_legs', 'swollen_blood_vessels', 'puffy_face_and_eyes', 'enlarged_thyroid', 'brittle_nails', 'swollen_extremeties', 'excessive_hunger', 'extra_marital_contacts', 'drying_and_tingling_lips', 'slurred_speech', 'knee_pain', 'hip_joint_pain', 'muscle_weakness', 'stiff_neck', 'swelling_joints', 'movement_stiffness', 'spinning_movements', 'loss_of_balance', 'unsteadiness', 'weakness_of_one_body_side', 'loss_of_smell', 'bladder_discomfort', 'foul_smell_of urine', 'continuous_feel_of_urine', 'passage_of_gases', 'internal_itching', 'toxic_look_(typhos)', 'depression', 'irritability', 'muscle_pain', 'altered_sensorium', 'red_spots_over_body', 'belly_pain', 'abnormal_menstruation', 'dischromic _patches', 'watering_from_eyes', 'increased_appetite', 'polyuria', 'family_history', 'mucoid_sputum', 'rusty_sputum', 'lack_of_concentration', 'visual_disturbances', 'receiving_blood_transfusion', 'receiving_unsterile_injections', 'coma', 'stomach_bleeding', 'distention_of_abdomen', 'history_of_alcohol_consumption', 'fluid_overload.1', 'blood_in_sputum', 'prominent_veins_on_calf', 'palpitations', 'painful_walking', 'pus_filled_pimples', 'blackheads', 'scurring', 'skin_peeling', 'silver_like_dusting', 'small_dents_in_nails', 'inflammatory_nails', 'blister', 'red_sore_around_nose', 'yellow_crust_ooze']), ], "text", description="Select a symptom from the list and click submit to get predicted Disease as the Output. \ [ NOTE: This app is meant for demo purposes only. Please consult a Doctor if you have any symptoms. ]" ) iface.launch(share=True) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Notebook style configuration (optional) from IPython.core.display import display, HTML style = open("./style.css").read() display(HTML("" % style)) # ### Table of contents # # * [Introduction](#introduction) # * [Single plot](#single_plot) # * [Multiple plots](#multiple_plots) # * [Ticks and labels](#ticks) # * [Exercises](#exercises) # # Introduction [Back to TOC] # # In this introduction, we'll see how to make a [figure](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.figure.html?highlight=figure#matplotlib.pyplot.figure) and play with the different settings such as to improve the rendering. We'll also see how to compose a figure made of several [subplots](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplot.html) with a moderatly complex layout. # # # # # # # These images come from the [cheatsheets](https://github.com/matplotlib/cheatsheets). # # # Single plot [Back to TOC] # # We'll start by playing with a very simple exampe (sine and cosine trigonometric functions) using the [plot](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html) function and see what options are avalaible to us. We'll see there exists a (very) large number of options that allow to obtain quite different outputs. More precisely, you can modify any aspect of a figure and obtain the precise rendering you've in mind. The only difficulty is to know what are these options and how to apply them. # ## Data preparation # # Before starting any plot, we need first to have some data to plot. Such data can originate from your own experiment or analysis but for the sake of simplicity, we'll generate our own data using the [numpy](https://numpy.org) library. For the sine and cosine functions, we simply generate 257 values linearly spaced between -π and +π and we compute the sine and cosine of these values. # + import numpy as np X = np.linspace(-np.pi, np.pi, 257, endpoint=True) C, S = np.cos(X), np.sin(X) # - # X is now a numpy array with 257 values ranging from -π to +π (included). C is the cosine (257 values) and S is the sine (257 values). We're ready to plot them. # # **Note** The standard way of importing maplotlib is to write `import matplotlib.pyplot as plt` and then use the `plt` prefix in front of matplotlib related functions. In some rather rare cases, we also use the `matplotlib` prefix, but most of the time we only need `plt`. # ## Implicit defaults # # Let's draw our first figure and observe what the result looks like. # + import matplotlib.pyplot as plt plt.plot(X, C) plt.plot(X, S) plt.show(); # - # **Note** The value `257` is a bit arbitrary and not that important. However, if you use too few values, the plot will appear "broken" (or even wrong in some cases) The reason is that matplotlib uses linear interpolation between points: # + import matplotlib.pyplot as plt plt.plot(X[::32], C[::32]) plt.plot(X[::32], S[::32]) plt.show(); # - # Reciprocally, if you use too much values, the plot will be correct but the computing of values and the rendering will be slower. # ## Explicit defaults # # Let's now try to do the exact same figure as above, but this time we'll specify everything. To do so, we need to read these defaults from the [maplotlibrc](https://matplotlib.org/stable/tutorials/introductory/customizing.html#customizing-with-matplotlibrc-files) configuration file that is accessible through the `plt.rcParams` variable. # + p = plt.rcParams fig = plt.figure(figsize = p['figure.figsize'], dpi = p['figure.dpi'], facecolor = p['figure.facecolor'], edgecolor = p['figure.edgecolor'], frameon = p['figure.frameon']) ax = plt.subplot() ax.plot(X, C, color="C0", linewidth = p['lines.linewidth'], linestyle = p['lines.linestyle']) ax.plot(X, S, color="C1", linewidth = p['lines.linewidth'], linestyle = p['lines.linestyle']) xmin, xmax = X.min(), X.max() xmargin = p['axes.xmargin']*(xmax - xmin) ax.set_xlim(xmin - xmargin, xmax + xmargin) ymin, ymax = min(C.min(), S.min()), max(C.max(), S.max()) ymargin = p['axes.ymargin']*(ymax - ymin) ax.set_ylim(ymin - ymargin, ymax + ymargin) ax.tick_params(axis = "x", which="major", direction = p['xtick.direction'], length = p['xtick.major.size'], width = p['xtick.major.width']) ax.tick_params(axis = "y", which="major", direction = p['ytick.direction'], length = p['ytick.major.size'], width = p['ytick.major.width']) plt.show(); # - print("figure.figsize: ", p['figure.figsize']) print("figure.dpi: ", p['figure.dpi']) print("figure.facecolor:", p['figure.facecolor']) print("figure.edgecolor:", p['figure.edgecolor']) print("figure.frameon: ", p['figure.frameon']) # + import re import pandas as pd from IPython.display import display, HTML def defaults(pattern = "*"): p = plt.rcParams r = re.compile(pattern) keys = list(filter(r.match, p.keys())) data = {key:p[key] for key in keys} frame = pd.DataFrame(data.values(), index=data.keys(), columns=['Default value']) display( frame ) defaults("figure*") # - # ## Custom settings # # As you can see from the script above, pretty much everything can be changed. So let's modify our figure to improve the rendering a bit. Now that we know what are the defaults, we can get back to the implicit version. # ### Figure size # # The plot appears a bit packed so let's first modify the figure size. # To do so, we need to create it explicitely in order to specify a size in inches. Let's try 10 inches wide and 3 inches tall. We'll also use an Axes ( `ax` ) which can be considered as a subfigure and we'll increase the DPI (dots per inch) of the figure to get a better resolution in the notebook. # + # Set default figure size p["figure.figsize"] = (10,3) # Set default figure dpi p["figure.dpi"] = 300 fig,ax = plt.subplots() ax.plot(X, C) ax.plot(X, S) plt.show(); # - # **Important** Since we modified `rcParams`, this will influence all subsequent figures in this notebook, including the top figures. # ### Line styles # # Solid line is the default line style but there exist several other styles such as dashed lines ( `linestyle="--"` ), dotted lines ( `linestyle=":"` ), etc. You can also combine a style with a marker. For example, we can add disc markers at regular intervals. To do that, we specify the `marker` symbol, the marker color and the spacing between markers (else you will have a marker at each data point). # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white") plt.show(); # - # ### Tick positions # # Ticks on the x axis are not ideal positioned because they do not show the interesting values (+/-π,+/-π/2) and there are too many ticks on the y axis. Let's change them. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white") ax.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) ax.set_yticks([-1,0,1]) plt.show(); # - # ### Tick labels # # Ticks are now properly placed but their labels are not very explicit. We could guess that 3.142 is π but it would be better to make it explicit. Let's change labels then. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white") ax.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "0", "π/2", "π"]) ax.set_yticks([-1,0,1]) ax.set_yticklabels(["-1", "0", "+1"]) plt.show(); # - # ### Spines position # # Spines are the four lines around our figure and delimiting the data area. Byt default, there are four spines at top/bottom and left/right but we can hide some of them and move the others. Since there are four of them (top/bottom/left/right), we'll hide the top and right and we'll move the bottom and left ones to coordinate 0 (in data space coordinates). # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white") ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1,1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) plt.show(); # - # ### Z order # # If you look carefully at the figure above you can see that axis are above the plot. It was already the case previosuly but it was less noticeable. Now with the markers, it is more obvious and pretty annoying. To fix the problem, we need to tell matplotlib to render our sine and cosine plots in front of the axis. To do so, we need to specify a zorder that specify the order of rendering. Elements are rendererd in increasing zorder. Knowing that the axis has a zorder of 0, let's use 10. # # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10) ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10) ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) plt.show(); # - # ### Legend # # Let's add a legend in the upper left corner. This only requires adding the keyword argument `label` (that will be used in the legend box) to the plot commands. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="cosine") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="sine") ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) ax.legend(loc='upper left', frameon=False) plt.show(); # - # ### Font size # # The font size of the tick labels is a bit small. Let's increase it a bit. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="cosine") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="sine") ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) ax.legend(loc='upper left', frameon=False); for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_fontsize("large"); plt.show(); # - # ### Title # # We're almost done. Let's now add a title on the left of our figure. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="cosine") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="sine") ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) ax.legend(loc='upper left', frameon=False); ax.set_title("Trigonometric functions", x=0.1) for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_fontsize("large") plt.show(); # - # ## Saving results # # We can now save our figure in a file using the PDF format. This is a vector format and this means the quality of the figure will be flawless independently of the zoom level. # + fig,ax = plt.subplots() ax.plot(X, C, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="cosine") ax.plot(X, S, marker="o", markevery=(0, 32), markerfacecolor="white", zorder=10, label="sine") ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) ax.legend(loc='upper left', frameon=False); for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_fontsize("large"); plt.savefig("../images/01-introduction.pdf") plt.show(); # - # # --- # # # # Multiple plots [Back to TOC] # # So far, we've been dealing with a single plot (or `Axes` in matplotlib terminology) on a figure, but of course, Matplotlib offers the possibility to draw several plots on the same figure. The only difficulty is to express the layout of these different plots. But let's start with someting simple first # We want to split our sine and cosine plot in two different plots side by side. To do that we need to create two axes. The most straigthforward way is to use the `subplot` method and to specify the number of rows and columns: # + fig = plt.figure(figsize=(12,7)) nrows, ncols = 2, 3 for index in range(1,nrows*ncols+1): ax = plt.subplot(nrows, ncols, index); ax.set_title("plt.subplot(%d,%d,%d)" % (nrows, ncols, index), weight="bold") plt.show(); # - # The syntax is straigforward as long as you know that Axes indices go from left to right and top to bottom. Back to our example, we need one row and two columns. # + fig = plt.figure(figsize=(12,3)) ax1 = plt.subplot(1,2,1) ax2 = plt.subplot(1,2,2) def plot(ax, X, Y, title=""): ax.plot(X,Y) ax.set_xticks([-np.pi, -np.pi/2, np.pi/2, np.pi]) ax.set_xticklabels(["-π", "-π/2", "π/2", "π"]) ax.set_yticks([-1, 1]) ax.set_yticklabels(["-1", "+1"]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_position(('data',0)) ax.spines['left'].set_position(('data',0)) ax.set_title(title, weight="bold") plot(ax1, X, C, "cosine function") plot(ax2, X, S, "sine function") plt.show(); # - # What if we want to have more complex layout with plot of unequal size? In this case, the best is to use the [gridspec](https://matplotlib.org/stable/api/_as_gen/matplotlib.gridspec.GridSpec.html) method. It also involves dividing the figure into rows and columns, but now we can specify the bounds of each plot, i.e. position and extent and their relative size. Let's see some example. # + fig = plt.figure(figsize=(10,6)) from matplotlib.gridspec import GridSpec nrows, ncols = 2, 2 widths = 1, 2 # Experiment changing 2 to 3,4,5, etc heights = 1, 2 # Experiment changing 2 to 3,4,5, etc G = GridSpec(nrows, ncols, width_ratios = widths, height_ratios=heights) aspect= 'auto' # Experiment with aspect=1 ax = plt.subplot(G[0,0], aspect=aspect); ax.plot(X,C,X,S); ax.grid(1) ax = plt.subplot(G[1,0], aspect=aspect); ax.plot(X,C,X,S); ax.grid(1) ax = plt.subplot(G[0,1], aspect=aspect); ax.plot(X,C,X,S); ax.grid(1) ax = plt.subplot(G[1,1], aspect=aspect); ax.plot(X,C,X,S); ax.grid(1) plt.show(); # - # Using gridspec, you can specify any layout, the only difficulty being to be able to express what you want to achieve. # # Ticks and labels [Back to TOC] # # We have already manipulated ticks and tick labels in the previous sections but we only slighlty modified them. In fact, there exists a failry extented machinery in matplotlib that allows you to put ticks at any position using any formats (for the label). Let's start with a very simple example. # + from matplotlib.ticker import MultipleLocator fig = plt.figure(figsize=(10,5)) ax = plt.subplot() ax.set_xlim(0,10); ax.set_ylim(0,5); ax.xaxis.set_major_locator(MultipleLocator(1.0)) ax.yaxis.set_major_locator(MultipleLocator(1.0)) plt.show(); # - # This output of this first example is not fundamentally different from what we've seen so far. But the way to obtain this result is different because we use a `MultipleLocator` descriptor that instructs matplotlib to put major ticks at every unit ( `1.0` ) on the x and y axis. But we can also do the same for minor ticks. # + from matplotlib.ticker import MultipleLocator fig = plt.figure(figsize=(10,5)) ax = plt.subplot() ax.set_xlim(0,10) ax.set_ylim(0,5) ax.xaxis.set_major_locator(MultipleLocator(1.0)) ax.yaxis.set_major_locator(MultipleLocator(1.0)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) plt.show(); # - # We can also modify the labels under ticls. Let's make thme bold and white on black. # + from matplotlib.ticker import MultipleLocator fig = plt.figure(figsize=(10,5)) ax = plt.subplot() ax.set_xlim(0,10); ax.set_ylim(0,5); ax.xaxis.set_major_locator(MultipleLocator(1.0)); ax.yaxis.set_major_locator(MultipleLocator(1.0)); ax.xaxis.set_minor_locator(MultipleLocator(0.1)); ax.yaxis.set_minor_locator(MultipleLocator(0.1)); for label in ax.yaxis.get_ticklabels() + ax.xaxis.get_ticklabels(): label.set_fontweight("bold"); label.set_fontsize("x-small"); label.set_color("white"); label.set_bbox(dict(facecolor='black', linewidth=1, edgecolor='0.0', pad=2)) plt.show(); # - # # --- # # # # Exercises [Back to TOC] # ## Better typography # # We used π/2 to display pi over two to but it would be better to display $\frac{\pi}{2}$. How would you do modify the tick labels such a to obtain the same output as in the figure below? # # **Hint:** You can edit this notebook cell to see how I wrote $\frac{\pi}{2}$. # # # ## Better style # # Starting from the code template below, you need to write the subplot function to achieve the same result as the figure below. The only difficulty are the arrows at end of x and y axis. To plot them, you can plot them using specific [markers](https://matplotlib.org/stable/api/markers_api.html). # # # + raise Exception("!!! To be completed") import numpy as np import matplotlib.pyplot as plt def subplot(index, title): # To be completed return ax fig = plt.figure(figsize=(13,5), dpi=300) X = np.linspace (-4,4,200) subplot(1, "y = cos(x)").plot(X, np.cos(X), "C1") subplot(2, "y = sin(x)").plot(X, np.sin(X), "C1") subplot(3, "y = tan(x)").plot(X, np.tan(X), "C1") subplot(4, "y = cosh(x)").plot(X, np.cosh(X), "C1") subplot(5, "y = sinh(x)").plot(X, np.sinh(X), "C1") subplot(6, "y = tanh(x)").plot(X, np.tanh(X), "C1") plt.show() # - # ## Playing with settings # # Let's wrap everything we've seen so far in order to produce the figure below. To achieve such results, you'll need to set some defaults settings but you'll also need to tweak individual elements once you've created the figure. # # # ---- # # **Copyright (c) 2021 ** # This work is licensed under a [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/). #
# Code is licensed under a [2-Clauses BSD license](https://opensource.org/licenses/BSD-2-Clause) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # -*- coding: utf-8 -*- import pandas as pd import numpy as np import time import progressbar from scipy import stats filenames = ['../../data/bookcorssing_r05.csv', '../../data/bookcorssing_r10.csv', '../../data/audioscrobbler.csv', '../../data/amazonbook.csv', '../../data/amazoncd.csv', '../../data/amazonelectronic.csv', '../../data/amazonmovie.csv', '../../data/facebooklike_msg.csv', '../../data/algorithm_r05.csv', '../../data/artificialintelligence_r05.csv', '../../data/programminglanguage_r05.csv' ] threshs = [625, 625, 85, 3578, 990, 278, 785, 100, 550, 725, 675] blocks = [0,0,0,0,0,0,0,0,0,0,0] # + code_folding=[] def get_valid_data(filename,block): ''' first ,kick out invalid data -- rating time less than thresh ''' #allData=pd.read_csv('BX-Book-Ratings.csv') #print(allData.columns ) allData=dict() pos = 0 with open(filename,'r') as f: # filter the first line f.readline() lines=f.readlines() # traverse and filter for idx,line in enumerate(lines): if block == 1: dataInString=line.strip().split(';') # list type else: dataInString=line.strip().split(',') # list type for index,data in enumerate(dataInString): # userName ISBN score dataInString[index]=data.replace('\"','') if not allData.get(dataInString[1]) : # create the user's list if he does not exist allData[dataInString[1]]=[] allData[dataInString[1]].append(float(dataInString[2])) # miu.append( float(dataInString[2]) ) return allData '''calculate miu1 and miu2 using allData but predict only with valid users''' # + code_folding=[] def get_valid_user(allData, thresh): # %% cell1 ''' pick out valid user ''' #num=0 miu=[] # to store all data rateFreq=dict() # key is rate and value is its frequency rateFreq[userName][score]=frequency for key in allData.keys(): # userName if len(allData[key])> thresh: # num+=1 # create the dict for each valid user if not rateFreq.get( key ): rateFreq[ key ]= dict() # create the dict entry for each score for score in allData[key]: if not rateFreq[key].get(score): rateFreq[ key ][ score ]=0 rateFreq[ key ][ score ]+=1 miu.append(score) return rateFreq, miu # + code_folding=[] def cal_para_multiple(rateFreq, miu, write_multiple): # %% cell2 '''calculate common para''' miu1=np.mean(miu); theta1=np.std(miu) # four direction for miu2 and theta2 # deltamiu=[0.5,-0.5]; theta=[2,1,-1,-2] cp = [(0,1), (0,-1), (1,-1), (-1,1)] write=open(write_multiple,'w'); N=8*len(rateFreq) p = progressbar.ProgressBar(maxval=N) p.start() progress=0 userWeight=[None]*8 # list of 4 ele for i in range(2): for j in range(4): deltamiu = cp[j][0] theta = cp[j][1] miu2=miu1+deltamiu theta2=theta1+theta userWeight[i*4+j]=dict() # rateFreq[userName][score]=frequency # userScore[score]=frequency '''test''' print("************************************",file=write) print('i={0},j={1}'.format(i,j),file=write ) sumMse_all = 0.0 for user,userScore in rateFreq.items(): # cal MSE and choose the best weight bestWeight=0 minMse=float('inf'); '''对每个user 遍历一遍weight 把每个数据点都相加''' for weight in np.arange(0, 1.1 ,0.1): # arange exclude endPoint while linspace includes defaultly sumMse=0 # userScore[score]=frequency '''对于每个user,他对每个score的mse要累计起来''' '''这里需要check下data''' for score,freq in userScore.items(): sumMse+=(( weight*stats.norm(miu1, theta1).cdf(score)+ (1-weight)*stats.norm(miu2,theta2).cdf(score)- freq ) )**2 '''test''' print('user:{0},weight:{1},mse:{2}'.format(user,weight,sumMse),file=write ) if sumMse < minMse: minMse=sumMse bestWeight=weight sumMse_all += minMse '''test''' print('user:{0},minMse:{1}'.format(user,minMse),file=write ) userWeight[i*4+j][user]=bestWeight; # change here progress+=1 p.update(progress) print(sumMse_all/len(rateFreq.keys())) print(sumMse_all/len(rateFreq.keys()),file=write ) print(userWeight,file=write) write.close() p.finish() # + code_folding=[] def cal_para_single(rateFreq, miu, write_single): # %% cell2 '''calculate common para''' miu1=np.mean(miu); theta1=np.std(miu) # four direction for miu2 and theta2 # deltamiu=[0.5,-0.5]; theta=[2,1,-1,-2] cp = [(0,1), (0,-1), (1,-1), (-1,1)] write=open(write_single,'w'); N=8*len(rateFreq) p = progressbar.ProgressBar(maxval=N) p.start() progress=0 userWeight=[None]*8 # list of 4 ele for i in range(2): for j in range(1): userWeight[i*4+j]=dict() # rateFreq[userName][score]=frequency # userScore[score]=frequency '''test''' print("************************************",file=write) print('i={0},j={1}'.format(i,j),file=write ) sumMse_all = 0.0 for user,userScore in rateFreq.items(): # cal MSE and choose the best weight bestWeight=0 minMse=float('inf'); '''对每个user 遍历一遍weight 把每个数据点都相加''' for weight in np.arange(0, 1.1 ,0.1): # arange exclude endPoint while linspace includes defaultly sumMse=0 # userScore[score]=frequency '''对于每个user,他对每个score的mse要累计起来''' '''这里需要check下data''' for score,freq in userScore.items(): sumMse+=((weight*stats.norm(miu1, theta1).cdf(score)- freq ))**2 '''test''' print('user:{0},weight:{1},mse:{2}'.format(user,weight,sumMse),file=write ) if sumMse < minMse: minMse=sumMse bestWeight=weight sumMse_all += minMse '''test''' print('user:{0},minMse:{1}'.format(user,minMse),file=write ) userWeight[i*4+j][user]=bestWeight; # change here progress+=1 p.update(progress) print(sumMse_all/len(rateFreq.keys()),) print(sumMse_all/len(rateFreq.keys()),file=write) print(userWeight,file=write) write.close() p.finish() # - # # Data Regression item # ## I=0 bookcorssing_r05 i = 0 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=1 bookcorssing_r10 i = 1 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=2 audioscrobbler i = 3 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I-3 amazonbook i = 3 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=4 amazoncd i = 4 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=5 amazonelectronic i = 5 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=6 amazonmovie i = 6 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=7 facebooklike_msg i = 7 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=8 algorithm i = 8 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=9 ai i = 9 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # ## I=10 pl i = 10 c = filenames[i] c = c.replace('ratings_', '') s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filenames[i], blocks[i]), threshs[i]) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) c = "libimseti_f_m.csv" s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' filename = "../data/libimseti_f_m.csv" block = 0 thresh = 100 print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filename, block), thresh) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) c = "libimseti_m_f.csv" s_out = c.replace('.csv','')+'_single.txt' m_out = c.replace('.csv','')+'_multiple.txt' filename = "../data/libimseti_m_f.csv" block = 0 thresh = 100 print(s_out, m_out) a_dict, a_miu = get_valid_user(get_valid_data(filename, block), thresh) cal_para_single(a_dict, a_miu, s_out) cal_para_multiple(a_dict, a_miu, m_out) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; os.environ["CUDA_VISIBLE_DEVICES"]="0"; # ## Collect MNIST Dataset as Arrays from tensorflow.keras.datasets import mnist import numpy as np (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train = np.expand_dims(x_train, axis=3) x_test = np.expand_dims(x_test, axis=3) # ## STEP 1: Preprocess Dataset import ktrain from ktrain import vision as vis data_aug = vis.get_data_aug( rotation_range=15, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1) classes = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] (trn, val, preproc) = vis.images_from_array(x_train, y_train, validation_data=None, val_pct=0.1, random_state=42, data_aug=data_aug, class_names=classes) # ## STEP 2: Load Model and Wrap in `Learner` # Using a LeNet-style classifier model = vis.image_classifier('default_cnn', trn, val) learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=128) # ## STEP 3: Find Learning Rate learner.lr_find(show_plot=True, max_epochs=3) # ## STEP 4: Train Model # # We only train for three epochs for demonstration purposes. learner.fit_onecycle(1e-3, 3) learner.validate(class_names=preproc.get_classes()) learner.view_top_losses(n=1) # ## Make Predictions predictor = ktrain.get_predictor(learner.model, preproc) predictor.predict(x_test[0:1])[0] np.argmax(predictor.predict(x_test[0:1], return_proba=True)[0]) predictor.save('/tmp/my_mnist') p = ktrain.load_predictor('/tmp/my_mnist') p.predict(x_test[0:1])[0] predictions = p.predict(x_test) import pandas as pd df = pd.DataFrame(zip(predictions, y_test), columns=['Predicted', 'Actual']) df.head() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Convergencia salarial entre gerentes y subordinados en el mercado laboral argentino: una aproximación cuantitativa. # # + [markdown] slideshow={"slide_type": "slide"} # ### Apéndice # # **Importamos las librerías necesarias** # + slideshow={"slide_type": "subslide"} import pandas as pd import numpy as np import statsmodels as sm import statsmodels.formula.api as smf import matplotlib.pyplot as plt import seaborn as sb # + [markdown] slideshow={"slide_type": "slide"} # **Creamos un DataFrame en Pandas con los datos de la EPH** # + slideshow={"slide_type": "subslide"} # para que lo descargue automáticamente desde mi dropbox #df = pd.read_excel('https://www.dropbox.com/s/ai7ejpw2b01ql3c/usu_individual_t217.xls?dl=1') df = pd.read_excel('C:/Users/franc/Dropbox/Econometria/EPH/usu_individual_t217.xls') # - # #### Mantenemos sólo las variables que podrían ser relevantes para el estudio df = df.filter(['REGION', 'AGLOMERADO', 'PONDERA', 'ITF', 'IPCF', 'PONDIH', 'CH04', 'CH06', 'CH08', 'CH11', 'CH12', 'CH13', 'CH14', 'NIVEL_ED', 'ESTADO', 'PP3E_TOT', 'PP3F_TOT', 'PP04A', 'PP05B2_MES', 'PP05B2_ANO', 'PP08D1', 'P21', 'PONDIIO', 'Tot_p12', 'p47T', 'PONDII', 'PP04D_COD']) # #### Necesitamos crear una variable continua que describa la cantidad de años de educación de los encuestados # # Para ello vamos a crear una variable llamada EDUC, a la que le asignaremos 6 años para primario completo, 12 años para secundario completo, 14 para terciario completo y 16 para universitario completo, etc. # # **Vamos a suponer** que la escala de estudios está determinada de la siguiente forma: # # |Nivel|Completo|Años est.| # |-----|--------|---------| # |Preescolar|No|0| # |Preescolar|Sí|0| # |Primario|No|CH14| # |Primario|Sí|6| # |EGB|No|CH14| # |EGB|Si|9| # |Secundario|No|CH14+7| # |Secundario|Si|12| # |Polimodal|No|CH14+9| # |Polimodal|Si|12| # |Terciario|No|CH14+12[1]| # |Terciario|Si|14| # |Universitario|No|CH14+12| # |Universitario|Si|17| # |Posgrado|No|CH14+17| # |Posgrado|Si|20| # # [1]Hay que revisar a los que tienen valores de terciarios demasiado elevados. # # Vamos a descartar a quienes recibieron educación especial y a los que responden NS/NR en CH13 y CH14 # + # Creamos la variable EDUC df['EDUC'] = 0 # Eliminamos los que no corresponden # Educación especial df = df.drop(df[df.CH12==9].index) # Ns./Nr. si finalizó el nivel df = df.drop(df[df.CH13==9].index) # Educación especial df = df.drop(df[df.CH14==98].index) # Ns./Nr. cuál fue el último nivel aprobado df = df.drop(df[df.CH14==99].index) # Reemplazamos los NaN's de CH14 por 0's df['CH14'].fillna(0, inplace=True) # Quitamos a los menores de 10 años df = df.drop(df[df.ESTADO==4].index) # - # Ahora asignamos la cantidad de años correspondientes acorde a la tabla previamente acordada # + # preescolar incompleto y completa df.loc[(df.CH12==1) & (df.CH13==2), 'EDUC'] = 0 df.loc[(df.CH12==1) & (df.CH13==1), 'EDUC'] = 0 # primaria incompleta y completa df.loc[(df.CH12==2) & (df.CH13==2), 'EDUC'] = df['CH14'] df.loc[(df.CH12==2) & (df.CH13==1), 'EDUC'] = 6 # egb incompleto y completo df.loc[(df.CH12==3) & (df.CH13==2), 'EDUC'] = df['CH14'] df.loc[(df.CH12==3) & (df.CH13==1), 'EDUC'] = 9 # secundario incompleto y completo df.loc[(df.CH12==4) & (df.CH13==2), 'EDUC'] = df['CH14'] + 6 df.loc[(df.CH12==4) & (df.CH13==1), 'EDUC'] = 12 # polimodal incompleto y completo df.loc[(df.CH12==5) & (df.CH13==2), 'EDUC'] = df['CH14'] + 9 df.loc[(df.CH12==5) & (df.CH13==1), 'EDUC'] = 12 # terciario incompleto y completo df.loc[(df.CH12==6) & (df.CH13==2), 'EDUC'] = df['CH14'] + 12 df.loc[(df.CH12==6) & (df.CH13==1), 'EDUC'] = 14 # universitario incompleto y completo df.loc[(df.CH12==7) & (df.CH13==2), 'EDUC'] = df['CH14'] + 12 df.loc[(df.CH12==7) & (df.CH13==1), 'EDUC'] = 17 # posgrado incompleto y completo df.loc[(df.CH12==8) & (df.CH13==2), 'EDUC'] = df['CH14'] + 17 df.loc[(df.CH12==8) & (df.CH13==1), 'EDUC'] = 20 # - df # ###### Experiencia # # Consideramos que la experiencia es igual a # # $$EXPER = EDAD - EDUC - 6$$ df['EDAD'] = df['CH06'] df['EXPER'] = df['EDAD']-df['EDUC']-6 # Como podemos ver, tenemos casos donde la experiencia es igual a **-6**. Para evitar problemas con los resultados, vamos a eliminar a todos los que tengan experiencia menor a 0. df = df.drop(df[df.EXPER<0].index) # ### Variable de gerentes # # Construimos la variable en base al Clasificador Nacional de Ocupaciones. Según el CNO, la variable *'PP04_COD'* está compuesta por: # - Primeros 2 dígitos: Carácter ocupacional # - 3° dígito: **Jerarquía ocupacional** # - 4° dígito: Tecnología ocupacional # - 5° dígito: Calificación ocupacional # # Dentro de la Jerarquía Ocupacional nos encontramos con los siguientes valores: # - 0: **Dirección** # - 1: Cuenta propia # - 2: **Jefes** # - 3: Trabajadores asalariados # # Las observaciones que nos interesan a nosotros son las que tienen el valor 0 y el 2. Empezamos eliminando las observaciones que no informan jerarquía df = df.dropna(subset=['PP04D_COD']) # 3Esto disminuye nuestras observaciones a 24.039 # A continuación, construimos dos variables nuevas: # - 'CAT_OCUP' que toma los 2 primeros valores del CNO para conocer el carácter ocupacional # - 'JER_OCUP' que toma el tercer dígito a fin de conocer la jerarquía. # # Convertimos el tipo de datos a entero (integer) para poder usarlo como dummy fácilmente. # + df['CAT_OCUP'] = df['PP04D_COD'].astype(str).str[0:2].astype(float).astype(int) df['JER_OCUP'] = df['PP04D_COD'].astype(str).str[2:3].astype(float).astype(int) # Eliminamos los que no tienen datos (JER_OCUP>3) y a los que estan por cuenta propia (JER_OCUP=1) df = df.drop(df[df.JER_OCUP>3].index) df = df.drop(df[df.JER_OCUP==1].index) # - # #### Variables Dummy # Vamos a generar una variable dummy **'CEO'** que sea 1 para directores (CEO's) y que sea 0 para jefes. Por otra parte, vamos a generar una variable **'JEFES'** que tiene valor 1 para Directores y Jefes y 0 para asalariados # + # variable categorica df['JER_OCUP'] = df['JER_OCUP'].astype('category') # Directores (y no directores) df['CEO'] = 0 df.loc[(df.JER_OCUP==0), 'CEO'] = 1 # Jefes y directores (y blue collars) df['WC'] = 0 df.loc[(df.JER_OCUP==0), 'WC'] = 1 df.loc[(df.JER_OCUP==2), 'WC'] = 1 # - # para corroborar la creacion de la dummy '''df.filter(['JER_OCUP', 'CEO', 'WC'])''' # ## Regresión # # Vamos a ejecutar distintas regresiones, jugando con las dummies. Para ello necesitamos primero crear la variable exper2 tal que: # # $$EXPER2 = EXPER^2$$ # # Y generar la variable LWAGE tal que: # # $$LWAGE = ln(P21)$$ # + df['EXPER2'] = df['EXPER']**2 df['OCUPADO'] = 0 df.loc[(df.ESTADO==1), 'OCUPADO'] = 1 df.loc[(df.ESTADO==2), 'OCUPADO'] = 0 # - # Creamos un nuevo dataframe con quienes tienen ingresos unicamente, es decir, p21>0. También eliminamos a los que tienen menos que 0, ya que se trata de errores de input. # # Nota: Si bien en el modelo de mincer esto no se hace de manera _directa_, el resultado es el mismo ya que, los logaritmos de 0 seran NaN's y no podran ser utilizados en la regresion. Para evitar mensajes de error en Python, descartamos a quienes no tienen ingresos. # + #df1 = df.loc[df['P21'] > 0] df = df.drop(df[df.P21<=0].index) df['LWAGE'] = np.log(df['P21']) # - # A continuación vamos a conocer los betas de las variables de control de todos los CEOs, JEFEs y Asalariados, excluidos los que están por cuenta propia, a fin de conocer el _baseline_. # + modelo = smf.ols("df['LWAGE'] ~ df['EDUC']+ df['EXPER'] + df['EXPER2']", data=df ) modelo_reg = modelo.fit() modelo_reg.summary() # - # #### Interpretación del baseline # # Como podemos observar, si incluimos a todos los Directores, Jefes y Asalariados, obtenemos que: # - __Educación__: es altamente significativa $(t=49.5)$, y cada año extra de estudio aumenta el salario en un 8.1% # - __Experiencia__: es altamente significativa $(t=29.66)$, y cada año extra de experiencia aumenta el salario en un 4.4% # - __Experiencia al cuadrado__: el valor de -0.0006 explica que a cada año extra de experiencia tiene un efecto menor que el anterior. Al mismo tiempo, también implica que es casi lineal la relación. # # # ### CEO's vs Jefes _(White collars)_ # # Para continuar, descartaremos del DataFrame a los asalariados comunes y utilizaremos la dummy "CEO" previamente creada para captar el efecto que tiene el rango de director sobre el salario. # + df1 = df.drop(df[df.JER_OCUP==3].index) # esto es lo mismo que df1= df[df.WC==1] modelo2 = smf.ols( "df1['LWAGE'] ~ df1['EDUC'] + df1['EXPER'] + df1['EXPER2'] + df1['CEO']", data=df1 ) modelo2_reg = modelo2.fit() modelo2_reg.summary() # - # Como podemos observar, el modelo estima que por cada año extra de educación que tienen los CEOs y los Gerentes, el salario de los mismos crece un 7,76%. Del mismo modo, por cada año extra de experiencia, ganan un 4,36% extra. En ambos casos, tenemos un valor $t>|2|$, lo que implica que son variables significativas. Al igual que con el _baseline_, obtenemos que la concavidad de la experiencia es casi nula, es decir, es una relación casi lineal. # # Respecto del hecho de ser o no CEO obtenemos un resultado interesante: los CEO's ganan, en promedio, un 8,73% menos que los gerentes. # # Antes de sacar cualquier tipo de conclusión, deberíamos encarar el problema desde distintas aristas. Para ello, lo primero que vamos a hacer es achicar el sample de las observaciones y correr las regresiones de los CEOs y de los Gerentes por separado. # # **Directivos** # + modelo3 = smf.ols( "df1[df1.CEO==1]['LWAGE'] ~ df1[df1.CEO==1]['EDUC'] + df1[df1.CEO==1]['EXPER'] + df1[df1.CEO==1]['EXPER2']", data=df1[df1.CEO==1] ) modelo3_reg = modelo3.fit() modelo3_reg.summary() # - # **Gerentes** # + modelo4 = smf.ols( "df1[df1.CEO==0]['LWAGE'] ~ df1[df1.CEO==0]['EDUC'] + df1[df1.CEO==0]['EXPER'] + df1[df1.CEO==0]['EXPER2']", data=df1[df1.CEO==0] ) modelo4_reg = modelo4.fit() modelo4_reg.summary() # - # **Blue Collars (asalariados)** # + modelo1 = smf.ols( "df[df.JER_OCUP==3]['LWAGE'] ~ df[df.JER_OCUP==3]['EDUC'] + df[df.JER_OCUP==3]['EXPER'] + df[df.JER_OCUP==3]['EXPER2']", data=df[df.JER_OCUP==3] ) modelo1_reg = modelo1.fit() modelo1_reg.summary() # - # **White Collars vs Blue Collars utilizando Dummy** # + # Corremos la regresión con la dummy WC modelo_wcd = smf.ols( "df['LWAGE'] ~ df['EDUC'] + df['EXPER'] + df['EXPER2'] + df['WC']", data=df ) reg_wdc = modelo_wcd.fit() reg_wdc.summary() # - # **Directivos** # + modelo_wcd2 = smf.ols( "df[df.WC==1]['LWAGE'] ~ df[df.WC==1]['EDUC'] + df[df.WC==1]['EXPER'] + df[df.WC==1]['EXPER2']", data=df[df.WC==1] ) reg_wdc2 = modelo_wcd2.fit() reg_wdc2.summary() # - # **Asalariados** # + modelo_wcd3 = smf.ols( "df[df.WC==0]['LWAGE'] ~ df[df.WC==0]['EDUC'] + df[df.WC==0]['EXPER'] + df[df.WC==0]['EXPER2']", data=df[df.WC==0] ) reg_wdc3 = modelo_wcd3.fit() reg_wdc3.summary() # - # **Histograma y Distribución de _LWAGE_ para Directivos** sb.distplot( df1[df1.CEO==1]['LWAGE'], bins=15, kde_kws={'color': 'g', 'label':'Mean: 9.69 \n Std:0.784'}, axlabel='LWAGE CEO=1' ) # **Histograma y Distribución de _LWAGE_ para Gerentes** sb.distplot( df1[df1.CEO==0]['LWAGE'], bins=15, kde_kws={'color': 'R', 'label':'Mean: 9.8 \n Std:0.669'}, axlabel='LWAGE CEO=0' ) df1[df1.CEO==0]['LWAGE'].describe() df1[df1.CEO==0]['LWAGE'].describe() import this # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Speech Emotion Recognition - SVM Classifier # # A project for the French Employment Agency # # Telecom ParisTech 2018-2019 # ## I. Context # The aim of this notebook is to set up all speech emotion recognition preprocessing and audio features extraction. # # ### Audio features: # The complete list of the implemented short-term features is presented below: # - **Zero Crossing Rate**: The rate of sign-changes of the signal during the duration of a particular frame. # - **Energy**: The sum of squares of the signal values, normalized by the respective frame length. # - **Entropy of Energy**: The entropy of sub-frames' normalized energies. It can be interpreted as a measure of abrupt changes. # - **Spectral Centroid**: The center of gravity of the spectrum. # - **Sprectral Spread**: The second central moment of the spectrum. # - **Spectral Entropy**: Entropy of the normalized spectral energies for a set of sub-frames. # - **Spectral Flux**: The squared difference between the normalized magnitudes of the spectra of the two successive frames. # - **Spectral Rolloff**: The frequency below which 90% of the magnitude distribution of the spectrum is concentrated. # - **MFCCS**: Mel Frequency Cepstral Coefficients form a cepstral representation where the frequency bands are not linear but distributed according to the mel-scale. # # Global Statistics are then computed on upper features: # - **mean, std, med, kurt, skew, q1, q99, min, max and range** # # ### Data: # **RAVDESS**: The Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS) contains 7356 files (total size: 24.8 GB). The database contains 24 professional actors (12 female, 12 male), vocalizing two lexically-matched statements in a neutral North American accent. Speech includes *calm*, *happy*, *sad*, *angry*, *fearful*, *surprise*, and *disgust* expressions, and song contains calm, happy, sad, angry, and fearful emotions. Each expression is produced at two levels of emotional intensity (normal, strong), with an additional neutral expression. (https://zenodo.org/record/1188976#.XA48aC17Q1J) # ## II. General import # + ### General imports ### from glob import glob import os import pickle import itertools import pandas as pd import numpy as np ### Warning import ### import warnings warnings.filterwarnings('ignore') ### Graph imports ### import matplotlib.pyplot as plt ### Sklearn imports ### from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score # - # ## II. Import data # Load datas from pickle [features, labels] = pickle.load(open("/Users/aryasoni/Documents/GitHub/AI-Interview/Audio/Dataset/[RAVDESS][HAP-SAD-NEU-ANG-FEA-DIS-SUR][GLOBAL_STATS].p", "rb")) # ## III. Train and test data set # + # Build Train and test dataset X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=123) # Encode Label from categorical to numerical lb = LabelEncoder() lb.fit(y_train) y_train, y_test = lb.transform(y_train), lb.transform(y_test) # - # ## IV. Scale features # Scale train and test dataset scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # ## V. Feature selection # + # k-highest scores analysis on features Kbest = SelectKBest(k="all") selected_features = Kbest.fit(X_train, y_train) # Plot P-values plt.figure(figsize=(20, 10)) plt.plot(selected_features.pvalues_) plt.title("p-values for each features", fontsize=22) plt.xlabel("Features") plt.ylabel("P-value") plt.show() # Display Comment alpha = 0.01 print("Number of p-values > à 1% : {}".format(np.sum(selected_features.pvalues_ > alpha))) # - # Remove non-significant features X_train = X_train[:,np.where(selected_features.pvalues_ < alpha)[0]] X_test = X_test[:,np.where(selected_features.pvalues_ < alpha)[0]] # ## VI. Feature dimension reduction # + # Covariance matrix cov = pd.DataFrame(X_train).cov() # Eigen values of covariance matrix eig = np.linalg.svd(cov)[1] # Plot eigen graph fig = plt.figure(figsize=(20, 10)) plt.title('Decrease of covariance matrix eigen values', fontsize = 22) plt.plot(eig, '-*', label = "eig-value") plt.legend(loc = 'upper right') plt.show() # + # Initialize PCA pca = PCA(n_components=140) # Apply PCA on train and test set X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) # - # ## VII. Cross-Validation and hyperparameter tuning # + # Set C and Gamma parameters list G_list = [0.001, 0.005, 0.01] C_list = [1, 2, 3, 4, 5, 7, 10, 20, 50] # Set the parameters for cross-validation parameters = [{'kernel': ['rbf'], 'C': C_list, 'gamma': G_list}] # Initialize SVM model model = SVC(decision_function_shape='ovr') # Cross Validation cv = GridSearchCV(model, parameters, cv=3, verbose=0, n_jobs=-1).fit(X_train, y_train) # Print Best parameters print("Best parameters set found on train set:") print(cv.best_params_) # - # ## VIII. Best model prediction # Confusion matrix plot function def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize=22) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label', fontsize=18) plt.xlabel('Predicted label', fontsize=18) plt.tight_layout() # + # Fit best mode model = SVC(kernel='rbf', C=3, gamma=0.005, decision_function_shape='ovr').fit(X_train, y_train) # Prediction pred = model.predict(X_test) # Score score = model.score(X_test, y_test) # Reverse label encoder pred = (lb.inverse_transform((pred.astype(int).flatten()))) actual = (lb.inverse_transform((y_test.astype(int).flatten()))) # Build dataFrame df_pred = pd.DataFrame({'Actual': actual, 'Prediction': pred}) # Print Score print('Accuracy Score on test dataset: {}%'.format(np.round(100 * score,2))) # Compute confusion matrix confusion = confusion_matrix(actual, pred) # Plot non-normalized confusion matrix plt.figure(figsize=(15, 15)) plot_confusion_matrix(confusion, classes=set(actual),normalize=True, title='Confusion matrix on train set with gender differentiation') # + # Compute prediction without gender differentation PRED = list(map(lambda i:i[2:], pred)) ACTUAL = list(map(lambda i:i[2:], actual)) # Compute related prediction score SCORE = accuracy_score(ACTUAL, PRED) # Print Score print('Accuracy Score on test dataset: {}%'.format(np.round(100 * SCORE,2))) # Compute confusion matrix confusion = confusion_matrix(ACTUAL, PRED) # Plot non-normalized confusion matrix plt.figure(figsize=(15, 15)) plot_confusion_matrix(confusion, classes=set(ACTUAL),normalize=True, title='Confusion matrix on test set without gender differentiation') # - # ## IX. Save model # + # save the model to local pickle.dump(model, open('../Model/MODEL_CLASSIFIER.p', 'wb')) # Save label encoder pickle.dump(lb, open("../Model/MODEL_ENCODER.p", "wb")) # Save PCA pickle.dump(pca, open("../Model/MODEL_PCA.p", "wb")) # Save MEAN and STD of each features MEAN = features.mean(axis=0) STD = features.std(axis=0) pickle.dump([MEAN, STD], open("../Model/MODEL_SCALER.p", "wb")) # Save feature parameters stats = ['mean', 'std', 'kurt', 'skew', 'q1', 'q99'] features_list = ['zcr', 'energy', 'energy_entropy', 'spectral_centroid', 'spectral_spread', 'spectral_entropy', 'spectral_flux', 'sprectral_rolloff'] win_step = 0.01 win_size = 0.025 nb_mfcc = 12 diff = 0 PCA = True DICO = {'stats':stats, 'features_list':features_list, 'win_size':win_size, 'win_step':win_step, 'nb_mfcc':nb_mfcc, 'diff':diff, 'PCA':PCA} pickle.dump(DICO, open("../Model/MODEL_PARAM.p", "wb")) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Getting started: quantum state reconstruction using QuCumber # # Welcome to the tutorial for QuCumber, by PIQuIL. # # The purpose of this tutorial is to walk you through what QuCumber can do. QuCumber is a neural network-based package that can reconstruct a quantum wavefunction, given a data set of measurements. Once trained, QuCumber can generate new data and physical properties. We recommend that you read through our brief, but thorough, theoretical breakdown of Restricted Boltzmann Machines (RBMs) - the stochastic neural network behind QuCumber - before going through with this tutorial. # # If you'd like to skip all of this and start using QuCumber on your machine with your own data, please click [here](../tutorial.rst). # # ## Download the Tutorial # # If you'd like to run the tutorial on your machine, please go to our github [repository](https://github.com/MelkoCollective/QuCumber/tree/refactor-rbm). In the examples folder, you should find the following files. # # - rbm_tutorial.py # - cplx.py # - unitary_library.py # - observables_tutorial.py # - tfim1d_N10_train_samples.txt # - 2qubits_train_samples.txt # - tfim1d_N10_psi.txt # - 2qubits_train_bases.txt # - 2qubits_psi.txt # # ## Comments Before Getting Started # # This tutorial is sectioned into two parts that will walk you through the following: # # 1. Training an RBM to reconstruct a positive-real wavefunction (i.e. no phase) and then generating new data. # 2. Training an RBM to reconstruct a complex wavefunction (i.e. with a phase) and then generating new data. # #

Before you begin, please ensure that you have installed the following:

# # - Python 3 # - numpy, matplotlib, csv (should come with conda) # - PyTorch (https://pytorch.org/) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.9 64-bit # name: python3 # --- # # Messtechnik HS2021 - Tutorial 7 # ## Aufgabe: Datenverarbeitung für höhere Auflösung # -------------------- # Analysieren Sie den simulierten *free induction decay* ``FID.mat`` (das Format ``.mat`` entspricht eine MATLAB formatierte Datei), welcher mit $e^{-\sigma^2t^2/2}$ und dem Zerfallsparameter $\sigma = $ 1.6 MHz abklingt. Ein solcher Zerfall kann zum Beispiel dann beobachtet werden, wenn die Resonanzfrequenzen Gauss-verteilt sind. Das Signal enthält zusätzlich Rauschen, das mit einem Pseudozufallszahlengenerator (*pseudo-random number generator*) erzeugt wurde. # ----------------- # __(a)__ Versuchen Sie mit Hilfe der *Self-deconvolution*, *Zero-filling* und *Apodization* die spektrale Auflösung zu verbessern und die ursprüngliche unverbreitete Linien zurückzuerhalten. Überlagern Sie den originalen FID mit ihrer Apodisationsvariante, sowohl im Zeit- wie auch im Frequenzraum. # # * Wie viele Linien erkennen Sie im Spektrum? # * Was sind die Amplitudenverhältnisse dieser Linien? # * Geben Sie für jede Window-Funktion, welche Sie zur Apodisation verwenden, den Effekt auf die spektrale Linienbreite an (Verbreiterung/Verschmälerung). # __(i)__ Fourier Transform + Zero-filling # + import numpy as np import matplotlib.pyplot as plt from numpy.fft import fft,fftshift from scipy.io import loadmat # Load the MATLAB-formatted file data = loadmat('FID.mat',squeeze_me=True) t = data['t'] # microseconds fid = data['I1'] # arb. units # Construct frequency axis: for even Npts, (fs/Npts) increment in [-fs/2,fs/2] zerofilling = 3*len(fid) Nfreq = len(fid) + zerofilling # Points in frequency-domain = length of FID + zero-filling of length of FID dt = t[1] - t[0] # FID sampling steps nyq_freq = 1/(dt*2) # MHz freq = np.linspace(-nyq_freq,nyq_freq,Nfreq) # MHz # Get the spectrum, weight first point by 0.5 fidw = fid fidw[0] /= 2 # Get the spectrum spc = fftshift(fft(fidw,Nfreq)) spc /= max(spc) # normalize to maximum # Plot time-domain FID plt.figure(figsize=[9,4]) plt.subplot(1,2,1) plt.plot(t,fid) plt.ylabel('FID [a.u.]') plt.xlabel('Time [µs]') # Plot frequency-domain spectrum plt.subplot(1,2,2) plt.plot(freq,spc.real) plt.ylabel('Normalized spectrum [a.u.]') plt.xlabel('Frequency [MHz]') # Plot only a region of positive frequencies: since the FID has only a real component, # the positive and negative frequencies are indistinguishable plt.xlim([6,11]) plt.tight_layout() plt.show() # - # __(ii)__ Self-deconvolution # # Wenn man die Gauss'sche Envelope des Signals kompensiert mit $e^{\sigma_\text{apo}^2t^2/2}$ bekommt man ein Spektrum mit schmaleren Linien. Das führt aber zu einer Explosion des Rauschen bei spätere Zeiten. # + sigma = 1.6 # MHz, the decay constant as given in the tasksheet apo_sigma = sigma*1.0 # the rise constant of the apodization window # Envelope function of the FID Gaussian decay envelope_decay = np.exp(apo_sigma**2*t**2/2) # Compensation of the FID Gaussian decay fid_comp = fid*envelope_decay # Get the spectrum spc_comp = fftshift(fft(fid_comp,Nfreq)) spc_comp /= max(spc_comp) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t,fid_comp) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Decay-compensated FID'],frameon=False) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t[fid_comp>0],np.log(fid_comp[fid_comp>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Decay-compensated FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_comp.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.ylim([-0.25,1]) plt.tight_layout() plt.show() # - # __(iii)__ Truncation # # Bei der Korrektur mit $e^{\sigma_\text{apo}^2t^2/2}$ explodiert gegen Ende des Zeitintervalls das Rauschen. Um das S/N im Frequenzbereich zu minimieren, muss man das Signal im Zeitbereich ab einem bestimmten Zeitpunkt abschneiden. # + # Signal truncation cutoff = 3 # We choose 3 us as the time where the signal has decayed fid_cut = fid_comp[t<=cutoff] # Cut the FID vector at t_cut t_cut = t[t<=cutoff] # Get the spectrum spc_cut = fftshift(fft(fid_cut,Nfreq)) spc_cut /= max(spc_cut) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t_cut,fid_cut) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Decay-compensated & Truncated FID'],frameon=False) plt.xlim([0,3]) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t_cut[fid_cut>0],np.log(fid_cut[fid_cut>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Decay-compensated & Truncated FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_cut.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.ylim([-0.25,1]) plt.tight_layout() plt.show() # - # __(iv)__ Apodisierung # # Um Abschneide-Effekte zu verhindern, wurde ausserdem ein Hamming-Window verwendet (Gl. (8.29) im Skript). # + # Signal apodization n = np.arange(len(t_cut)) hamming_win = 0.54 + 0.46*np.cos(np.pi*n/max(n)) fid_apo = fid_cut*hamming_win # Get the spectrum spc_apo = fftshift(fft(fid_apo,Nfreq)) spc_apo /= max(spc_apo) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t_cut,fid_apo,t_cut,hamming_win*max(fid_apo)) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Processed FID','Scaled hamming window'],frameon=False) plt.xlim([0,3]) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t_cut[fid_apo>0],np.log(fid_apo[fid_apo>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Processed FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_apo.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.tight_layout() plt.show() # - # Beobachtungen: # * Es sind 4 Spektrallinien zu erkennen. # * Die Amplitudenverhältnisse kann man aus der Abbildung direkt ablesen: approx. 0.35/0.6/1.0/0.8 # * Da die Signalenvelope mit $e^{-\sigma^2t^2/2}$ exakt kompensiert wird, haben die Linien im apodisierten Spektrum \emph{keine} Gauss'sche Verbreiterung mehr. # Da der FID jedoch mit einem Hamming-Window überlagert ist, werden die Linien nun wiederum verbreitert. # ----------------- # __(b)__ Normalerweise ist der Zerfallsparameter $\sigma$ nicht genau bekannt. Wie verändert sich das Spektrum, wenn Sie anstelle von $\sigma = $ 1.6 MHz von einem grösseren oder einem kleineren $\sigma$ ausgehen? # + #---------------------------------------------------------------------------------------- def process_decay_compensation(sigma, cutoff=3): """ This function performs the same analysis as in in the previous section with a given decay parameter sigma """ # Load the MATLAB-formatted file data = loadmat('FID.mat',squeeze_me=True) t = data['t'] # microseconds fid = data['I1'] # arb. units # Construct frequency axis: for even Npts, (fs/Npts) increment in [-fs/2,fs/2] zerofilling = 3*len(fid) Nfreq = len(fid) + zerofilling # Points in frequency-domain = length of FID + zero-filling of length of FID dt = t[1] - t[0] # FID sampling steps nyq_freq = 1/(dt*2) # MHz freq = np.linspace(-nyq_freq,nyq_freq,Nfreq) # MHz # Get the spectrum, weight first point by 0.5 fidw = fid fidw[0] /= 2 # Envelope function of the FID Gaussian decay apo_sigma = sigma*1.0 # the rise constant of the apodization window envelope_decay = np.exp(apo_sigma**2*t**2/2) # Compensation of the FID Gaussian decay fid_comp = fid*envelope_decay # Signal truncation fid_cut = fid_comp[t<=cutoff] # Cut the FID vector at t_cut t_cut = t[t<=cutoff] # Signal apodization n = np.arange(len(t_cut)) hamming_win = 0.54 + 0.46*np.cos(np.pi*n/max(n)) fid_apo = fid_cut*hamming_win # Get the spectrum spc_apo = fftshift(fft(fid_apo,Nfreq)) spc_apo /= max(spc_apo) # normalize to maximum return freq,spc_apo #---------------------------------------------------------------------------------------- # List of sigma values to evaluate sigmas = [1.2, 1.4, 1.6, 1.8, 2] # MHz plt.figure(figsize=[5,8]) for n,sigma in enumerate(sigmas): # Process the FID data freq,spec = process_decay_compensation(sigma) # Plot the processed spectrum plt.plot(freq,2.2*n + spec.real,color='k', linewidth=1) # Add text next to spectrum plt.annotate(f'$\sigma$ = {sigma} MHz', xy=(6,0.5+2.2*n), xytext=(1.02*6, 0.3+2.2*n), color='k') plt.yticks(ticks=1.1*np.arange(2*len(sigmas)),labels=[0,1,0,1,0,1,0,1,0,1]) plt.xlabel('Frequency [MHz]') plt.ylabel('Spectra [a.u.]') plt.xlim([6,11]) plt.tight_layout() plt.show() # - # Bei unbekanntem $\sigma$ ist die Wahl einer geeigneten Zerfallskonstante $\sigma_\text{apo}$ zur Apodisation kritisch. Zum einen kann die Zerfallskonstante unterschätzt werden. In diesem Falle wird die Gauss'sche Envelope nicht vollständig kompensiert. Demnach ist das apodisierte Zeitsignal $e^{-(\sigma^2-\sigma_\text{apo}^2)t^2/2}$ eine abfallende Funktion. Die spektralen Linien werden also breiter wenn $\sigma_\text{apo} < \sigma$ als wenn $\sigma_\text{apo} = \sigma$. # # Es kann ebenfalls vorkommen, dass $\sigma$ überschätzt wird, also dass $\sigma_\text{apo} > \sigma$. In diesem Falle wird die Gauss'sche Envelope überkompensiert. Das apodisierte Zeitsignal $e^{-(\sigma^2-\sigma_\text{apo}^2)t^2/2}$ ist demnach eine ansteigende Funktion. Im Absorptionsspektrum können daher *gestauchte* Linien auftreten, die zur Seite hin zu negativen Werten ausschlagen. Wenn Linien mit unterschiedlichen Zerfallseigenschaften überlappen und voneinander getrennt werden müssen, gilt es immer einen geeigneten Kompromiss zu finden. # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="G6X5asjHzpsf" colab_type="text" # INF8770 Technologies multimédias # # Polytechnique Montréal # # Exemple du calcul de gradients et d'extraction d'arêtes # + id="A9KKNFmhzpsg" colab_type="code" colab={} import numpy as np # + [markdown] id="Zic_VnUqzpso" colab_type="text" # Matrice 3 x 3. On calculera les gradients pour la valeur du centre. # + id="j7r9SZLPzpsp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e7a34f37-4cbe-4dfa-f6f4-2ed1df5a8e54" Matrice = [[50,60,35],[10,30,15],[50,70,30]] print(Matrice) # + [markdown] id="9SS_-oUhzpsv" colab_type="text" # Calcul du gradient. # + id="Sr5mW3Anzpsw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="deb45028-3040-4025-e463-8ec1553cb0fa" Gx = 0 Gy = 0 #Filtres Sobel Sobelx= [[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]] Sobely= [[-1.0,-2.0,-1.0],[0.0,0.0,0.0],[1.0,2.0,1.0]] for i in range(-1,2): for j in range(-1,2): #Convolutions. On calcule en X et Y simultanément. Gx += np.multiply(Sobelx[i+1][j+1],Matrice[1-i][1-j]) Gy += np.multiply(Sobely[i+1][j+1],Matrice[1-i][1-j]) print(Gx) print(Gy) # + [markdown] id="8VOcE0xmzps5" colab_type="text" # Force et angle du gradient: # + id="Xh_YkJBszps6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="4a29f5c6-b1ae-42c3-91f8-c021a0a93864" ForceGradient = np.sqrt(np.power(Gx,2)+np.power(Gy,2)) AngleGradient = np.arctan2(Gy,Gx)* 180 / np.pi print(ForceGradient) print(AngleGradient) # + id="8fnfIlc-zptB" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- import glob from platform import python_version import matplotlib import numpy as np import pandas as pd import sklearn import torch import heartpy import biosppy import time import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data_utils import torch import scipy.io from scipy.signal import savgol_filter from scipy.signal import find_peaks import sys sys.path.append('..') from transform.indexed_transform import IndexedTransformer from transform.interpolate import SplineInterpolate from transform.pca import PCATransformer from transform.pca_tensors import PCATransformer_tensor from transform.scale import ScaleTransform from transform.series_to_curve import CurveProjection, IndicesWindow from transform.transformer import SequentialTransformer from transform.triangle_pattern import TrianglePattern from add_func_pressure import transform_data from add_func_pressure import plot_sequence from add_func_pressure import generate_sequence from add_func_pressure import to_dataframe from Model.LSTM import Model from Model.Optim import Optimization # + import heartpy as hp mat = scipy.io.loadmat('blood_pressure_data/part_3.mat') fig=plt.figure(figsize=(18,10)) plt.subplot(311) plt.plot(mat['p'][0][0][0][450:900]) plt.title("PPG") plt.subplot(312) plt.plot(mat['p'][0][0][1][450:900]) plt.title("ABP") plt.subplot(313) plt.plot(mat['p'][0][0][2][450:900]) plt.title("ECG II") plt.tight_layout() print('ppg signal length',len(mat['p'][0][0][0])) print('ecg signal length',len(mat['p'][0][0][2])) print('abp signal length',len(mat['p'][0][0][1])) ppg=mat['p'][0][0][0] ecg=mat['p'][0][0][2] abp=mat['p'][0][0][1] wd_ppg, m_ppg = hp.process(ppg, sample_rate = 125.0) wd_ecg, m_ecg = hp.process(ecg, sample_rate = 125.0) # + projection_step = 1 smooth_transform = SequentialTransformer( ScaleTransform(0, 1), SplineInterpolate(0.02) ) smooth_ecg=smooth_transform(ecg) smooth_ppg=smooth_transform(ppg) smooth_abp=smooth_transform(abp) # - from sklearn.preprocessing import StandardScaler seq_len = 300 x_train, y_train = transform_data(smooth_ecg, seq_len) x_train_, y_train_ = transform_data(smooth_ppg, seq_len) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14, 7)) plot_sequence(axes, 0, x_train, y_train,seq_len) plot_sequence(axes, 1, x_train_, y_train_,seq_len) # + model_1 =torch.load('weights/model_new_ppg_new_.ckpt') model_1.load_state_dict(torch.load('weights/model_new_weights_ppg_new_.ckpt')) # model_1 = Model(input_size=1, hidden_size=100, output_size=1)#if you train model again, use this row and comment weights downloading loss_fn_1 = nn.MSELoss() loss_fn_2 = nn.L1Loss() optimizer_1 = optim.Adam(model_1.parameters(), lr=1e-3) scheduler_1 = optim.lr_scheduler.StepLR(optimizer_1, step_size=4, gamma=0.1) optimization_1 = Optimization(model_1, loss_fn_1, loss_fn_2,optimizer_1, scheduler_1) # - optimization_1.train(x_train,x_train_, do_teacher_forcing=True) actual_1, predicted_1, test_loss_1 = optimization_1.evaluate(x_train, x_train_, future=5, batch_size=300) df_result_1 = to_dataframe(actual_1, predicted_1) df_result_1[:1000]['predicted'].plot(figsize=(18, 6)) print("Test loss %.4f" % test_loss_1) plt.plot(smooth_ppg[:1000]) plt.show() df_result_1[:1000]['predicted'].plot(figsize=(18, 6)) plt.plot(smooth_ecg[:1000]) plt.show() plt.figure(figsize=(18,6)) plt.plot(smooth_ppg[:1000]) plt.plot(smooth_ecg[:1000]) plt.show() # + prediction=df_result_1['predicted'] yhat = savgol_filter(prediction, 51, 3) peaks, _ = find_peaks(yhat, height=np.mean(yhat)+np.std(yhat)/2) peaks_ecg,_ = find_peaks(ecg, height=np.mean(ecg)+1.2*np.std(ecg)) peaks_ECG = biosppy.signals.ecg.christov_segmenter(signal=ecg, sampling_rate=125)[0] peaks_abp, _ = find_peaks(abp, height=np.mean(abp)+np.std(abp)/2) peaks_abp_min, _ = find_peaks(-1*(abp),height=np.mean(-1*(abp))+0.89*np.std(-1*(abp))) # - fig=plt.figure(figsize=(18,6)) plt.plot(yhat[:1000],label='PPG transformed') plt.plot(smooth_ecg[:1000],label='ECG') plt.legend() plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # name: Python [Root] # --- # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} import re import random from collections import OrderedDict import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') # %matplotlib inline import numpy as np import sklearn from sklearn import preprocessing from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn import preprocessing from sklearn.svm import SVC from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from mpl_toolkits.mplot3d import Axes3D from sklearn.cluster import KMeans from sklearn.decomposition import PCA from fancyimpute import MICE import scipy # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} train = pd.read_csv('~/personal/ai/projects/titanic/train.csv') test = pd.read_csv('~/personal/ai/projects/titanic/test.csv') # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # ### Ideas for exploration # # 1. Are the classes balanced? In other words, are the number of survivals approximately proportionate to the number of deaths in the training data? # 2. Can I see a 3D vizualization of the data? # 3. What are the main sources of variation in the data? # 4. Am I dropping features that are actually relevant? # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # #### Are the classes balanced? # # From below it appears that 61.6% of the data are deaths. I'm not sure if that counts as being imbalanced, but it is probably worth looking into different cost functions. Ng's course mentioned F1 score, though sklearn has a huge number of metrics to use. # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} counts = train['Survived'].value_counts() counts[0]/ float(counts[0] + counts[1]) # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # #### Can I visualize the data in 3D? # # # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # ## Feature engineering # # I'll be joining train and test for this part # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} train.info() print('-'*40) test.info() # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full = train.append(test, ignore_index=True) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full.info() # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # ### Drop Cabin, Name, PassengerId, and Ticket # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} title_regex = re.compile('''\, (\w+)\.''') title_regex.findall('Braund, Mr. ') # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} def get_title(s): res = title_regex.findall(s) if len(res) > 0: return res[0] return 'no_title' # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full['Title'] = full['Name'].apply(lambda s: get_title(s)) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "slide"} full[['Title', 'Survived']].groupby('Title').mean() # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full.loc[full['Title'] == 'Dona', 'Title'] = 'Mrs' full.loc[full['Title'] == 'Mme', 'Title'] = 'Mrs' full.loc[full['Title'] == 'Mlle', 'Title'] = 'Ms' full.loc[full['Title'] == 'Miss', 'Title'] = 'Ms' # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full.drop(['Cabin', 'Name', 'PassengerId', 'Ticket'], axis=1, inplace=True) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} mlb = preprocessing.MultiLabelBinarizer() binary_features = mlb.fit_transform(full[['Embarked', 'Sex', 'Title']].as_matrix()) binary_features full = full.drop(['Embarked', 'Sex', 'Title'], axis=1) full = full.join(pd.DataFrame(binary_features[:, 1:], columns=mlb.classes_[1:])) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full = full.apply(lambda x: x.apply(lambda y: float(y))) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full.info() # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} full_imp = MICE().complete(full.drop('Survived', axis=1).as_matrix()) full_complete = pd.DataFrame(full_imp, index=full.index, columns=full.drop('Survived', axis=1).columns) full_complete.describe() # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} X_train = full_complete[:891] X_test = full_complete[891:] y_train = full['Survived'][:891] # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} clf = make_pipeline(preprocessing.Imputer(strategy='median', missing_values='NaN'), preprocessing.StandardScaler(), PCA(n_components=3)) X_r = clf.fit_transform(X_train) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} sinkers = X_r[np.ix_(y_train == 0, [0, 1, 2])] floaters = X_r[np.ix_(y_train == 1, [0, 1, 2])] # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} threedee = plt.figure().gca(projection='3d') threedee.scatter(sinkers[:, 0], sinkers[:, 1], sinkers[:, 2], c='r') threedee.scatter(floaters[:, 0], floaters[:, 1], floaters[:, 2], c='g') plt.rcParams['figure.figsize'] = [10, 10] plt.show() # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # # Methodology # # One of the things I am trying to figure out at this early stage is to develop at least in very general terms an algorithm of steps that can be used to approach a new dataset. I'll try to enumerate what I have come up with so far: # # 1. Explore data manually / graphically # 2. Clean data # 3. Scale data # 4. Fit a model to the data # - cross-validation # - grid search # - try a number of different estimators # 5. Evaluate the generality of that model # # ### Feature Imputation # # From what I've read, it is only acceptable to perform impution on the training data. This confuses me, because I was under the impression that predictors would need all values to be present. Anyway, random forests can handle missing values, so I'm just going to leave this out for now and come back and do impution only if necessary. # # ### Hyperparameter Grid Search With Pipeline # # The choice of parameters can be justified as follows. # # #### Impution # # All of the missing features are NaN. The choice between median and mean is more interesting. I have read in several places that a good rule of thumb is to impute numerical features with the median of that feature, and categorical features with the mode of that feature. # # #### Robust Scaler # # I chose to use Robust scaler because the fare features are pretty skewed. If they weren't skewed I would have used NormalScaler. # # #### SVC # # I chose to use [SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC) because it is one of the algorithms covered in Andrew Ng's Coursera machine learning course. That gave me a bit more confidence in choosing parameters. I'll also return to exprementing with ensemble methods if this doesn't show promise. [This Resource](http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html) suggested using a logarithmic grid for C and gamma between $10^{-3} and 10^{3}$. I'm also trying both a linear kernel and the rbf (radial basis function) kernel. C is a regularization paremeter. Gamma can be though of as restricting the size of the radius of influence of each individual training example. # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # params = [ # dict(imputer__strategy=['median', 'most_frequent'], # randomforestclassifier__n_estimators=[i for i in range(20, 180, 10)], # randomforestclassifier__max_features=['auto', 'sqrt', 'log2']) # ] # params = [ # dict(randomforestclassifier__n_estimators=[i for i in range(20, 180, 10)], # randomforestclassifier__max_features=['auto', 'sqrt', 'log2']) # ] # clf = make_pipeline( # # preprocessing.Imputer(missing_values='NaN'), # RandomForestClassifier(), # ) # grid_search = GridSearchCV(clf, params, verbose=1, cv=5, n_jobs=4) # grid_search.fit(X_train.fillna(-1), y_train) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.lda import LDA from sklearn.metrics import accuracy_score, precision_score, recall_score import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') # %matplotlib inline X_ready = preprocessing.RobustScaler().fit_transform(X_train) random_seed = 7 num_instances = len(X_ready) n_folds = 10 classifiers = [ KNeighborsClassifier(), DecisionTreeClassifier(), RandomForestClassifier(), AdaBoostClassifier(), SVC(), GaussianProcessClassifier(), GaussianNB(), MLPClassifier(), LDA(), ] results, names = [], [] for clf in classifiers: kfold = sklearn.cross_validation.KFold(n=num_instances, n_folds=n_folds, random_state=random_seed) cv_results = sklearn.cross_validation.cross_val_score(clf, X_ready, y_train, cv=kfold, scoring='accuracy') results.append(cv_results) names.append(clf.__class__.__name__) print('{}: {} ({})'.format(clf.__class__.__name__, cv_results.mean(), cv_results.std())) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} fig = plt.figure() fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) plt.rcParams['figure.figsize'] = [20, 8] plt.show() # + [markdown] ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # It looks like K-nearest-neighbors is doing well due to its low variance. Let's try and get a good result out of that. # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} # params = [ # dict(kneighborsclassifier__n_neighbors=np.linspace(5, 50, 10), # kneighborsclassifier__weights=['uniform', 'distance'], # kneighborsclassifier__p=[1,2,3,4], # kneighborsclassifier__leaf_size=np.linspace(10, 100, 10), # ) # ] # # # clf = make_pipeline( # preprocessing.RobustScaler(), # KNeighborsClassifier(), # ) # grid_search = GridSearchCV(clf, params, verbose=1, cv=10, n_jobs=4) # grid_search.fit(X_train, y_train) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} params = dict(pca__n_components=range(1, len(X_train.columns), 2), svc__C=scipy.stats.expon(scale=100), svc__gamma=scipy.stats.expon(scale=.1), svc__kernel=['rbf', 'sigmoid'], svc__class_weight=['balanced', None], svc__decision_function_shape=['ovo', 'ovr'], ) # clf = make_pipeline( preprocessing.StandardScaler(), PCA(), SVC(cache_size=1024) ) grid_search = RandomizedSearchCV(clf, params, verbose=1, cv=10, n_jobs=4, n_iter=300) grid_search.fit(X_train, y_train) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} grid_search.best_score_ # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} grid_search.best_params_ # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} from sklearn.externals import joblib joblib.dump(grid_search, '/tmp/randomsearch-scv-5.pkl') # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} grid_search = joblib.load('/tmp/randomsearch-scv-1.pkl') # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} grid_search.get_params() # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} print(X_test.info()) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} print(X_test.isnull().sum()) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} predictions = grid_search.predict(X_test) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} predictions # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} output = test.join(pd.DataFrame(predictions.astype(int), columns=['Survived'])) # + ein.tags=["worksheet-0"] slideshow={"slide_type": "-"} output[['PassengerId', 'Survived']].to_csv('~/personal/ai/projects/titanic/svc-pca-predictions-4.csv', sep=',', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="1QwBmDT-fZg3" aux = 0 X = 0 class NodoArbol: def __init__( self , value , left = None , right = None ): self.data = value self.left = left self.right = right def recorrido( self , contador = 0): global aux global X #print( self.data , "El nivel es: ", contador ) if self.left: self.left.recorrido( contador + 1) if self.right: self.right.recorrido( contador + 1) if aux < contador: aux = contador X = self.data # + colab={"base_uri": "https://localhost:8080/"} id="xK2gvBZrS_ym" outputId="a3f14451-3159-4826-c72d-2a50a1cd0862" arbol = NodoArbol( 8 , NodoArbol( 3 , NodoArbol( 1 ) , NodoArbol( 6 , NodoArbol( 4 ) , NodoArbol( 7 ))) , NodoArbol( 10 , None , NodoArbol( 14 , NodoArbol( 13 ))) ) aux = 0 arbol.recorrido() print("\n") print("El nodo hoja más abajo es: ", X , "y su nivel es: ", aux) print("\n") # + colab={"base_uri": "https://localhost:8080/"} id="cl7nJKwdTC5d" outputId="2048f615-0729-4275-af97-1b36d9fff8b5" arbol_2 = NodoArbol( 10 , NodoArbol( 7 , NodoArbol( 3 ) , NodoArbol( 8 , NodoArbol( 5 ) , NodoArbol( 9 , None , NodoArbol( 12 )))),NodoArbol( 11 ) ) aux = 0 arbol_2.recorrido() print("\n") print("El nodo hoja más abajo es: ", X , "y su nivel es: ", aux) print("\n") # + colab={"base_uri": "https://localhost:8080/"} id="KEH3IIPpTLee" outputId="1d7ca23f-569b-4eea-c597-3f316d5db37c" arbol_3 = NodoArbol( 2 , NodoArbol( 7 , NodoArbol( 2 ) , NodoArbol( 6 , NodoArbol( 5 ), NodoArbol( 11 ))) , NodoArbol( 5 , None , NodoArbol( 9 , NodoArbol( 4 )))) aux = 0 arbol_3.recorrido() print("\n") print("El nodo hoja más abajo es: ", X , "y su nivel es: ", aux) print("\n") # + colab={"base_uri": "https://localhost:8080/"} id="5h9-1GmiUNIf" outputId="f1b0237b-a681-4526-fc42-ce6380c3af5b" arbol_4 = NodoArbol( 60 , NodoArbol( 41 , NodoArbol( 16 , NodoArbol( 25 )) , NodoArbol( 53 , NodoArbol( 46 , NodoArbol( 42 ) ) , NodoArbol( 55 )) ) , NodoArbol( 74 , NodoArbol( 65 , NodoArbol( 63 , NodoArbol( 62 ) , NodoArbol( 64 ) ), NodoArbol( 70 )))) aux = 0 arbol_4.recorrido() print("\n") print("El nodo hoja más abajo es: ", X , "y su nivel es: ", aux) print("\n") # --- # title: "Cross Tab in Pandas" # author: "Charles" # date: 2020-09-06 # description: "-" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: kagglevil # language: python # name: kagglevil # --- import pandas as pd df = pd.read_csv('car data.csv') df.head() pd.crosstab(index=df["Fuel_Type"], columns=df["Transmission"]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualize Solar Radation Data # # The data in this notebook come from the [National Solar Radiation Data Base](http://rredc.nrel.gov/solar/old_data/nsrdb/), specifically the [1991 - 2010 update to the National Solar Radiation Database](http://rredc.nrel.gov/solar/old_data/nsrdb/1991-2010/). The data set consists of CSV files [measured at USAF weather stations](http://rredc.nrel.gov/solar/old_data/nsrdb/1991-2010/hourly/list_by_USAFN.html) # ## Setup # # Run the `download_sample_data.py` script to download Lidar from [Puget Sound LiDAR consortium](http://pugetsoundlidar.ess.washington.edu) and other example data sets. # # From your local clone of the `datashader` repository: # ``` # # cd examples # conda env create environment.yml # source activate ds # python download_sample_data.py # ``` # Note on Windows, replace `source activate ds` with `activate ds`. # + import glob import os import re from collections import defaultdict from dask.distributed import Client from holoviews.operation import decimate from holoviews.operation.datashader import dynspread import dask import dask.dataframe as dd import holoviews as hv import numpy as np import pandas as pd hv.notebook_extension('bokeh') decimate.max_samples=1000 dynspread.max_px=20 dynspread.threshold=0.5 client = Client() # - NUM_STATIONS = None # adjust to and integer limit to subset of SOLAR_FILES # + SOLAR_FNAME_PATTERN = os.path.join('data', '72*', '*solar.csv') SOLAR_FILES = glob.glob(SOLAR_FNAME_PATTERN) META_FILE = os.path.join('data', 'NSRDB_StationsMeta.csv') get_station_yr = lambda fname: tuple(map(int, os.path.basename(fname).split('_')[:2])) STATION_COMBOS = defaultdict(lambda: []) for fname in SOLAR_FILES: k, v = get_station_yr(fname) STATION_COMBOS[k].append([v, fname]) choices = tuple(STATION_COMBOS) if NUM_STATIONS: choices = choices[:NUM_STATIONS] STATION_COMBOS = {k: STATION_COMBOS[k] for k in choices} files_for_station = lambda station: [x[1] for x in STATION_COMBOS[station]] station_year_files = lambda station, year: [x for x in files_for_station(station) if '_{}_'.format(year) in x] # - def clean_col_names(dframe): cols = [re.sub('_$', '', re.sub('[/:\(\)_\s^-]+', '_', col.replace('%', '_pcent_'))).lower() for col in dframe.columns] dframe.columns = cols return dframe meta_df = clean_col_names(pd.read_csv(META_FILE, index_col='USAF')) meta_df.loc[list(STATION_COMBOS)] # + keep_cols = ['date', 'y', 'x', 'julian_hr', 'year', 'usaf', 'month', 'hour'] @dask.delayed def read_one_fname(usaf_station, fname): dframe = clean_col_names(pd.read_csv(fname)) station_data = meta_df.loc[usaf_station] hour_offset = dframe.hh_mm_lst.map(lambda x: pd.Timedelta(hours=int(x.split(':')[0]))) keep = keep_cols + [col for col in dframe.columns if ('metstat' in col or col in keep_cols) and 'flg' not in col] dframe['date'] = pd.to_datetime(dframe.yyyy_mm_dd) + hour_offset dframe['month'] = dframe.date.dt.month dframe['hour'] = dframe.date.dt.hour dframe['usaf'] = usaf_station dframe['y'], dframe['x'] = station_data.nsrdb_lat_dd, station_data.nsrdb_lon_dd dframe['julian_hr'] = dframe.date.dt.hour + (dframe.date.dt.dayofyear - 1) * 24 dframe['year'] = dframe.date.dt.year dframe[dframe <= -999] = np.NaN return dframe.loc[:, keep] def read_one_station(station): '''Read one USAF station's 1991 to 2001 CSVs - dask.delayed for each each year''' files = files_for_station(station) return dd.from_delayed([read_one_fname(station, fname) for fname in files]).compute() # - example_usaf = tuple(STATION_COMBOS)[0] df = read_one_station(example_usaf) df.head() df.describe() desc = df.date.describe() desc # The next cell makes some labels for the time series groupby operations' plots and boxplots. direct, dif_h, glo_h = ('Direct Normal', 'Diffuse Horizontal', 'Global Horizontal',) labels = {} watt_hrs_m2_cols = [col for col in df.columns if 'wh_m_2' in col and not 'suny' in col] for col in watt_hrs_m2_cols: label_1 = "Clear Sky " if 'csky' in col else "Measured " label_2 = direct if '_dir_' in col else glo_h if '_glo_' in col else dif_h labels[col] = label_1 + label_2 labels def get_station_quantiles(station=None, grouper='julian_hr', usaf_data=None): '''Given a station name or dataframe do groupby on time bins Parameters: station: Integer name of a USAF weather station (folder names holding years' CSVs) groupby: One of "julian_hr" "hour" "month_hour" (Note the julian_hr does not standardize relative to leap years: non-leap years have 8760 hrs, leap years 8784 hrs) usaf_data: Give CSVs' dataframe instead of station name Returns: summary_df Dataframe with 25%, 50%, 75% for each column ''' if usaf_data is None: usaf_data = read_one_station(station) if grouper == 'hour': group_var = usaf_data.date.dt.hour elif grouper == 'month': group_var = usaf_data.date.dt.month elif grouper == 'month_hour': group_var = [usaf_data.date.dt.month, usaf_data.date.dt.hour] else: group_var = grouper usaf_data = usaf_data.groupby(group_var) usaf_data = usaf_data[keep_cols + watt_hrs_m2_cols] low = usaf_data.quantile(0.25) median = usaf_data.median() hi = usaf_data.quantile(0.75) median[grouper] = median.index.values median['usaf'] = station # For the low, hi quartiles subset the columns # for smaller joins - do not include 3 copies of x,y,date, etc join_arg_cols = [col for col in low.columns if col not in keep_cols] summary_df = median.join(low[join_arg_cols], rsuffix='_low').join(hi[join_arg_cols], rsuffix='_hi') return summary_df # Get Julian day of year summary for one USAF station using `pandas.DataFrame.groupby`. julian_summary = get_station_quantiles(station=example_usaf, grouper='julian_hr',) julian_summary.head() # The function `get_station_quantiles` returns a `DataFrame` with # * spatial coordinates `x` and `y` # * columns related to clear sky solar radiation (columns with `_csky_` as a token) # * measured solar radiation (columns without `_csky_` as a token) # * some date / time related columns helpful for `groupby` operations julian_summary.columns def plot_gen(station=None, grouper='julian_hr', usaf_data=None): '''Given a station name or dataframe do groupby on time bins Parameters: station: Integer name of a USAF weather station (folder names holding years' CSVs) groupby: One of "julian_hr" "hour" "month_hour" usaf_data: Give CSVs' dataframe instead of station name Returns: curves: Dictionary of hv.Curve objects showing 25%, 50%, 75% percentiles ''' summary_df = get_station_quantiles(station=station, grouper=grouper, usaf_data=usaf_data) curves = {} kw = dict(style=dict(s=2,alpha=0.5)) for col, label in zip(watt_hrs_m2_cols, labels): dates = pd.DatetimeIndex(start=pd.Timestamp('2001-01-01'), freq='H', periods=summary_df.shape[0]) median_col = summary_df[col] low_col = summary_df[col + '_low'] hi_col = summary_df[col + '_hi'] hi = hv.Curve((dates, hi_col), label=label + ' (upper quartile)')(**kw) low = hv.Curve((dates, low_col),label=label + ' (lower quartile)')(**kw) median = hv.Curve((dates, median_col), label=label)(**kw) plot_id = tuple(col.replace('metstat_', '').replace('_wh_m_2', '').split('_')) curves[plot_id] = low * median * hi curves[plot_id].group = labels[col] return curves # Run `plot_gen` (function above) with an example USAF station to get a dictionary of `holoviews.Curve` objects that have been combined with the overloaded `holoviews` `*` operator for `Curves` or other `holoviews.element` objects. The `*` operator is used to show 25%, 50%, and 75% time series. hour_of_year = plot_gen(station=example_usaf) # Now we have a dictionary with short keys for different plots of 25%, 50%, 75% of: # * `(glo,)`: Measured Global Horizontal # * `(dir,)`: Measured Direct Normal # * `(dif,)`: Measured Diffuse Horizontal # * `('csky', 'glo')`: Clear Sky Global Horizontal # * `('csky', 'dir')`: Clear Sky Direct Normal # * `('csky', 'dif')`: Clear Sky Diffuse Horizontal list(hour_of_year) # %%opts Curve [width=700 height=500] # %%opts Layout [tabs=True] hour_of_year[('dir',)] + hour_of_year[('csky', 'dir')] # %%opts Curve [width=700 height=500 ] # %%opts Layout [tabs=True] hour_of_year[('glo',)] + hour_of_year[('csky', 'glo')] + hour_of_year[('dif',)] + hour_of_year[('csky', 'dif',)] # The next cells repeat the groupby operations for hour of day. usaf_data = read_one_station(example_usaf) hour_of_day = plot_gen(grouper='hour', usaf_data=usaf_data) # %%opts Curve [width=700 height=500] # %%opts Layout [tabs=True] hour_of_day[('dir',)] + hour_of_day[('csky', 'dir')] # When grouping by hour of day or month of year, the number of groups on the horizontal axis is small enough for box plots to show distributions legibly. The next cell uses `holoviews.BoxWhisker` plots to show the direct normal radiation. # %%opts BoxWhisker [width=600 height=600] # %%opts Layout [tabs=True] (hv.BoxWhisker(usaf_data, kdims=['hour'], vdims=['metstat_dir_wh_m_2'], group='Direct Normal - Hour of Day') + hv.BoxWhisker(usaf_data, kdims=['month'], vdims=['metstat_dir_wh_m_2'], group='Direct Normal - Month of Year')) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('../') # or just install the module sys.path.append('../../fuzzy-tools') # or just install the module sys.path.append('../../astro-lightcurves-handler') # or just install the module # + from fuzzytools.files import search_for_filedirs from synthsne import _C root_folder = '../../surveys-save' filedirs = search_for_filedirs(root_folder, fext=_C.EXT_SPLIT_LIGHTCURVE) # + # %load_ext autoreload # %autoreload 2 import numpy as np from fuzzytools.files import load_pickle, save_pickle from fuzzytools.files import get_dict_from_filedir filedir = '../../surveys-save/alerceZTFv7.1/survey=alerceZTFv7.1°bands=gr°mode=onlySNe.splcds' filedict = get_dict_from_filedir(filedir) root_folder = filedict['*rootdir*'] cfilename = filedict['*cfilename*'] survey = filedict['survey'] lcdataset = load_pickle(filedir) print(lcdataset) # + # %load_ext autoreload # %autoreload 2 from synthsne.distr_fittings import ObsErrorConditionalSampler from synthsne.plots.samplers import plot_obse_samplers kf = 2 set_name = f'{kf}@train' band_names = lcdataset[set_name].band_names obse_sampler_bdict = {b:ObsErrorConditionalSampler(lcdataset, set_name, b) for b in band_names} plot_obse_samplers(lcdataset, set_name, obse_sampler_bdict, original_space=1) plot_obse_samplers(lcdataset, set_name, obse_sampler_bdict, original_space=0) plot_obse_samplers(lcdataset, set_name, obse_sampler_bdict, original_space=1, add_samples=1) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:picasso] # language: python # name: conda-env-picasso-py # --- # # Sample Notebook 2 for Picasso # This notebook shows some basic interaction with the picasso library. It assumes to have a working picasso installation. To install jupyter notebooks in a conda picasso environment use `conda install nb_conda`. # The sample data was created using Picasso:Simulate. You can download the files here: http://picasso.jungmannlab.org/testdata.zip # ## Load Localizations # + from picasso import io path = 'testdata_locs.hdf5' locs, info = io.load_locs(path) print('Loaded {} locs.'.format(len(locs))) # - # ## Info file # The info file is now a list of dictionaries. Each step in picasso adds an element to the list. # + for i in range(len(info)): print(info[i]['Generated by']) # extract width and height: width, height = info[0]['Width'], info[0]['Height'] print('Image height: {}, width: {}'.format(width, height)) # - # ## Filter localizations # # Filter localizations, i.e., via sx and sy: Remove all localizations that are not within a circle around a center position. # + sx_center = 0.82 sy_center = 0.82 radius = 0.04 to_keep = (locs.sx-sx_center)**2 + (locs.sy-sy_center)**2 < radius**2 filtered_locs = locs[to_keep] print('Length of locs before filtering {}, after filtering {}.'.format(len(locs),len(filtered_locs))) # - # ## Saving localizations # Add new info to the yaml file and save everything. # + import os.path as _ospath # Create a new dictionary for the new info new_info = {} new_info["Generated by"] = "Picasso Jupyter Notebook" new_info["Filtered"] = 'Circle' new_info["sx_center"] = sx_center new_info["sy_center"] = sy_center new_info["radius"] = radius info.append(new_info) base, ext = _ospath.splitext(path) new_path = base+'_jupyter'+ext io.save_locs(new_path, filtered_locs, info) print('{} locs saved to {}.'.format(len(filtered_locs), new_path)) # - # ## Manually export images # Use the picasso functions to render images. # + # Get minimum / maximum localizations to define the ROI to be rendered import numpy as np from picasso import render import matplotlib.pyplot as plt x_min = np.min(locs.x) x_max = np.max(locs.x) y_min = np.min(locs.y) y_max = np.max(locs.y) viewport = (y_min, x_min), (y_max, x_max) oversampling = 10 len_x, image = render.render(locs, viewport = viewport, oversampling=oversampling, blur_method='smooth') plt.imsave('test.png', image, cmap='hot', vmax=10) # Cutom ROI with higher oversampling viewport = (5, 5), (10, 10) oversampling = 20 len_x, image = render.render(locs, viewport = viewport, oversampling=oversampling, blur_method='smooth') plt.imsave('test_zoom.png', image, cmap='hot', vmax=10) # - # ## Calculate kinetics # Use the picasso functions to calculate kinetics. # + from picasso import postprocess # Note: to calculate dark times you need picked localizations of single binding sites path = 'testdata_locs_picked_single.hdf5' picked_locs, info = io.load_locs(path) # Link localizations and calcualte dark times linked_locs = postprocess.link(picked_locs, info, r_max=0.05, max_dark_time=1) linked_locs_dark = postprocess.compute_dark_times(linked_locs) print('Average bright time {:.2f} frames'.format(np.mean(linked_locs_dark.n))) print('Average dark time {:.2f} frames'.format(np.mean(linked_locs_dark.dark))) # Compare with simulation settings: integration_time = info[0]['Camera.Integration Time'] tau_b = info[0]['PAINT.taub'] k_on = info[0]['PAINT.k_on'] imager = info[0]['PAINT.imager'] tau_d = 1/(k_on*imager)*10**9*1000 print('------') print('ON Measured {:.2f} ms \t Simulated {:.2f} ms'.format(np.mean(linked_locs_dark.n)*integration_time, tau_b)) print('OFF Measured {:.2f} ms \t Simulated {:.2f} ms'.format(np.mean(linked_locs_dark.dark)*integration_time, tau_d)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch36] * # language: python # name: conda-env-pytorch36-py # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import random torch.__version__ device = 'cuda' if torch.cuda.is_available() else 'cpu' random.seed(777) torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777) import sys sys.version_info from mnist import * # parameters learning_rate = 0.001 training_epochs = 2 batch_size = 100 drop_prob = 0.3 mymnist = MyMNIST(batch_size) class DropoutModel(nn.Module): def __init__(self, drop_prob): super().__init__() self.layer1 = torch.nn.Sequential( torch.nn.Linear(784, 512, bias=True), torch.nn.ReLU(), torch.nn.Dropout(p=drop_prob) ) self.layer2 = torch.nn.Sequential( torch.nn.Linear(512, 512, bias=True), torch.nn.ReLU(), torch.nn.Dropout(p=drop_prob) ) self.layer3 = torch.nn.Sequential( torch.nn.Linear(512, 512, bias=True), torch.nn.ReLU(), torch.nn.Dropout(p=drop_prob) ) self.layer4 = torch.nn.Sequential( torch.nn.Linear(512, 512, bias=True), torch.nn.ReLU(), torch.nn.Dropout(p=drop_prob) ) self.layer5 = torch.nn.Sequential( torch.nn.Linear(512, 10, bias=True), ) torch.nn.init.xavier_uniform_(self.layer1[0].weight) torch.nn.init.xavier_uniform_(self.layer2[0].weight) torch.nn.init.xavier_uniform_(self.layer3[0].weight) torch.nn.init.xavier_uniform_(self.layer4[0].weight) torch.nn.init.xavier_uniform_(self.layer5[0].weight) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.layer5(out) return out dropout_model = DropoutModel(drop_prob).to(device) dropout_model # define cost/loss & optimizer criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed. optimizer = torch.optim.Adam(dropout_model.parameters(), lr=learning_rate) # + total_batch = len(mymnist.train_data_loader) dropout_model.train() # set the model to train mode (dropout=True) for epoch in range(training_epochs): avg_cost = 0 for X, Y in mymnist.train_data_loader: # reshape input image into [batch_size by 784] # label is not one-hot encoded X = X.view(-1, 28 * 28).to(device) Y = Y.to(device) optimizer.zero_grad() hypothesis = dropout_model(X) cost = criterion(hypothesis, Y) cost.backward() optimizer.step() avg_cost += cost / total_batch print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) print('Learning finished') # - # Test model and check accuracy with torch.no_grad(): dropout_model.eval() # set the model to evaluation mode (dropout=False) # Test the model using test sets X_test = mymnist.mnist_test.data.view(-1, 28 * 28).float().to(device) Y_test = mymnist.mnist_test.targets.to(device) prediction = dropout_model(X_test) # print(prediction.shape) correct_prediction = torch.argmax(prediction, 1) == Y_test accuracy = correct_prediction.float().mean() print('Accuracy:', accuracy.item()) # Get one and predict r = random.randint(0, len(mymnist.mnist_test) - 1) X_single_data = mymnist.mnist_test.data[r:r + 1].view(-1, 28 * 28).float().to(device) Y_single_data = mymnist.mnist_test.targets[r:r + 1].to(device) print('Label: ', Y_single_data.item()) single_prediction = dropout_model(X_single_data) print('Prediction: ', torch.argmax(single_prediction, 1).item()) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Qiskit v0.34.1 (ipykernel) # language: python # name: python3 # --- # + import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, Aer, IBMQ, qiskit from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator from qiskit.quantum_info import state_fidelity from qiskit.quantum_info import DensityMatrix # Loading your IBM Quantum account(s) provider = IBMQ.load_account() # - def execute_circuit_sv(quantum_circuit): statevector_simulator=Aer.get_backend('statevector_simulator') result=qiskit.execute(quantum_circuit,statevector_simulator).result() statevector_results=result.get_statevector(quantum_circuit) circuit_diagram=quantum_circuit.draw() q_sphere=plot_state_qsphere(statevector_results) return statevector_results, circuit_diagram, q_sphere qc=QuantumCircuit(1) qc.z(0) result, img, qsphere = execute_circuit_sv(qc) print(result) result img qsphere result.draw('hinton') plot_state_city(result,title='Density Matrix',color=['orange','black']) result.draw('latex') result.draw('bloch') print(state_fidelity(result,result)) DensityMatrix(result) from qiskit.visualization import visualize_transition from qiskit.visualization import * from qiskit.visualization.bloch import Bloch from qiskit.visualization.exceptions import VisualizationError visualize_transition(qc, trace=True, saveas="3 z gate.mp4", fpg=30, spg=2) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np from sympy import * x = Symbol('x') y = Symbol('y') z = Symbol('z') # ### Calculate the Jacobian of the function # - f(x,y,z) = (x^2)*cos(y) + exp(z)*sin(y) # - evaluate at the point (x,y,z)=(π,π,1). f = x**2*cos(y) + exp(z)*sin(y) # + J = np.array( [diff(f, x), diff(f, y), diff(f, z)]) print J # - print np.array([ diff(f, x).subs({x:pi, y:pi, z:1}), diff(f, y).subs({x:pi, y:pi, z:1}), diff(f, z).subs({x:pi, y:pi, z:1})]) # ### Calculate the Jacobian of the vector valued functions # # - u(x,y) = (x^2)*y − cos(x)*sin(y) and v(x,y)=exp(x+y) # - evaluate at the point (0,π) u = x**2*y - cos(x)*sin(y) v = exp(x+y) # + J = np.array([ [diff(u, x), diff(u, y)], [diff(v, x), diff(v, y)] ]) print J # - print np.array([ [diff(u, x).subs({x:0, y:pi}), diff(u, y).subs({x:0, y:pi})], [diff(v, x).subs({x:0, y:pi}), diff(v, y).subs({x:0, y:pi})] ]) # ### Calculate the Hessian for the function. # - f(x,y) = (x^3)*cos(y) − x*sin(y) f = x**3*cos(y) + x*sin(y) # + H = np.array([ [diff(diff(f, x), x), diff(diff(f, x), y)], [diff(diff(f, y), x), diff(diff(f, y), y)] ]) print H # - # ### Calculate the Hessian for the function # - f(x,y,z) = xy + sin(y)sin(z) + (z^3)*exp(x) f = x*y + sin(y)*sin(z) + (z**3)*exp(x) # + H = np.array([ [diff(diff(f, x), x), diff(diff(f, x), y), diff(diff(f, x), z)], [diff(diff(f, y), x), diff(diff(f, y), y), diff(diff(f, y), z)], [diff(diff(f, z), x), diff(diff(f, z), y), diff(diff(f, z), z)] ]) print H # - # ### Calculate the Hessian for the function # - f(x,y,z) = xycos(z) − sin(x)*exp(y)*(z^3) # - evaluate at the point (x,y,z)=(0,0,0) f = x*y*cos(z) - sin(x)*exp(y)*(z**3) # + H = np.array([ [diff(diff(f, x), x), diff(diff(f, x), y), diff(diff(f, x), z)], [diff(diff(f, y), x), diff(diff(f, y), y), diff(diff(f, y), z)], [diff(diff(f, z), x), diff(diff(f, z), y), diff(diff(f, z), z)] ]) print H # - print np.array([ [diff(diff(f, x), x).subs({x:0, y:0, z:0}), diff(diff(f, x), y).subs({x:0, y:0, z:0}), diff(diff(f, x), z).subs({x:0, y:0, z:0})], [diff(diff(f, y), x).subs({x:0, y:0, z:0}), diff(diff(f, y), y).subs({x:0, y:0, z:0}), diff(diff(f, y), z).subs({x:0, y:0, z:0})], [diff(diff(f, z), x).subs({x:0, y:0, z:0}), diff(diff(f, z), y).subs({x:0, y:0, z:0}), diff(diff(f, z), z).subs({x:0, y:0, z:0})] ]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Putting it in Practice: Wine Dataset 🍷 # This dataset includes measurable attributes of different wines as well as their rated quality. We are going to fit the data with multiple linear regression model using ```statsmodel``` library. # # General Workflow: # 1. Checking the data set to make sure it is ready to fit into the model # 2. Clean or manipulate the data if needed # 3. Create the predictors (independent) and target (dependent) matrices # 4. Add the constant column to the predictor matrix using ```sm.add_constant()``` # 5. Fit the data using ```sm.OLS()``` # 6. Print the regression summary table using ```.summary()``` method # # Note: If we were running the ```sm.OLS()``` without creating the constant column in the predictors dataframe, it will return a different result, which has a completely different interpretation. # # Model without Constant: # $$y = \beta1 x_1 + \beta_2 x_2 + ... \beta_k x_k$$ # # Model with Constant: # $$y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... \beta_k x_k$$ # Import dependencies import numpy as np import pandas as pd import statsmodels.api as sm # + # Import the 'wine' data set wine = pd.read_csv('data/wine.csv') # Extract the head of the data set wine.head() # - # Check the data set info wine.info() # Print the statistical summary of the data set wine.describe() # Imagine we want to attempt to estimate the perceived quality of a wine using these attributes. # Count each unique wine quality rating in the data set wine['quality'].value_counts() # Count the total of red wine / non-red wine in the data set wine['red_wine'].value_counts() # ## 🧠 **Knowledge Check** # > Why are we using "quality" as the dependent variable (target)? Would it make sense for another feature to be the target instead? # ## Running the Regression # First, we'll separate the data into our predictors (X) and target (y) # + # Create the target and predictors for regression wine_preds = wine.drop('quality', axis = 1) wine_target = wine['quality'] # Check the predictors data frame wine_preds.head() # - # Now we can perform our (multiple) linear regression! Since we already used `statsmodels`, let's use that again to fit the model and then check the summary: # + # use sm.add_constant() to add constant term/y-intercept predictors = sm.add_constant(wine_preds) # Check if the constant column has been added to the data frame predictors # - # Create the OLS model and fit the data model = sm.OLS(wine_target, predictors).fit() # > Alright! So we fitted our model! Take a look at the summary and look if you can understand the different parts. # Print the model summary table model.summary() # # Scaling - The Missing & Helpful Step # When you looked at the summary after we did the linear regression, you might have noticed something interesting. # # Observing the coefficients, you might notice there are two relatively large coefficients and nearly rest are less than 1. # ## What's Going on Here? # In a word, it's useful to have all of our variables be on the same scale, so that the resulting coefficients are easier to interpret. If the scales of the variables are very different one from another, then some of the coefficients may end up on very large or very tiny scales. # # This happens since the coefficients will effectively attempt to "shrink" or "expand" the features before factoring their importance to the model. # # ![](img/shrinkinator.jpeg) # # This can make it more difficult for interpretation and identifying coefficients with the most "effect" on the prediction. # # For more on this, see [this post](https://stats.stackexchange.com/questions/32649/some-of-my-predictors-are-on-very-different-scales-do-i-need-to-transform-them). # ## A Solution: Standard Scaling # One solution is to *scale* our features. There are a few ways to do this but we'll focus on **standard scaling**. For more about scaling the data, check out this [link](https://www.analyticsvidhya.com/blog/2020/04/feature-scaling-machine-learning-normalization-standardization/) # # When we do **standard scaling**, we're really scaling it to be the features' respective $z$-scores. # # Benefits: # # - This tends to make values relatively small (mean value is at $0$ and one standard deviation $\sigma$ from the mean is $1$). # - Easier interpretation: larger coefficients tend to be more influential # # ![](img/standard_scaling.png) # # Next time, let's *scale* our columns as $z$-scores first. # ## Redoing with Standard Scaling # Let's try standard scaling the model with our wine dataset now. # # *Z*-Score Formula: # # $$z = \frac{x - \bar{x}}{sd(x)}$$ # # where # # - $x$ is the actual value of $x$ # - $\bar{x}$ is the average of $x$ # - $sd(x)$ is the sample standard deviation of $x$. # We are scaling all colmuns using the z-scores formula wine_preds_scaled = (wine_preds - np.mean(wine_preds)) / np.std(wine_preds) # Check the statistial summary of the scaled data set wine_preds_scaled.describe() # Let's run the model with the standardized data predictors_scaled = sm.add_constant(wine_preds_scaled) model = sm.OLS(wine_target, predictors_scaled).fit() model.summary() # > Check how well this model did with the one before scaling. Does it perform any differently? # ## 🧠 **Knowledge Check** # > After standard scaling, what would it mean when all the $x_i$ are all $0$? # ## 🧠 **Knowledge Check** # ### Follow-Up # > What does this mean for the constant term $\hat{\beta}_0$? Could we check this? # # Multiple Regression in Scikit-Learn # It's great that we tried out multiple linear regression with `statsmodels`; now let's try it with `sklearn`! # # The Sklearn library provides us with a Linear Regression model that will fit a line to our data. Sklearn follows a consistent API where you define a model object, fit the model to the data, and then make predictions with the model. # # ![sklearn](img/sklearn_api.png) # # For standard scaling with *sklearn*, we follow below workflow. # # ``` Python # # First create a StandardScaler object # ss = StandardScaler() # # # Use the .fit() method to compute the mean and sd # data_scaler = ss.fit(data) # # # Use the .transform() method to perform standardization # data_scaled = data_scaler.transform(data) # ``` # # Note: The object returned from ```.transform()``` is a *Numpy* array / matrix, instead of *Pandas* dataframe. # # SciKit-Learn: [StandardScaler()](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) # ## Scale the Data # Import Dependencies import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler import sklearn.metrics as metrics # Let's create a StandardScaler object to scale our data for us. # Now we'll apply it to our data by using the .fit() and .transform() methods. # Previous scaled data was stored in Pandas dataframe wine_preds_scaled.head() # sklearn StandardScaler() returns Numpy array / matrix # Check that the scaling worked about the same as when we did it by hand # ## Fit the Model # Now we can fit a `LinearRegression` object to our training data! # # Here are some standard code for using *sklearn* regression model: # # ``` Python # # Create the linear regression object # lr = LinearRegression() # # # Fitting the data using the linear regression object # lr.fit(X, y) # # # Extract estimated coefficients # lr.coef_ # # # Extract estimated intercept coefficient # lr.intcept_ # # # Extract the R^2 of the model # lr.score(X, y) # # # Make prediction of the targe value using the predictors # lr.predict(X) # ``` # # Note: We can pass in either *Pandas* dataframe or *Numpy* array to the regression model. Also, the *sklearn* regression model do not require adding constant column to the **predictors** matrix or dataframe. # # SciKit-Learn: [LinearRegression()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) # + # Create the linear Regression object # Fit the training data using the linear regression object # + # Extract the coefficient values using the .coef_ attribute # + # Extract the intercept coefficient # + # Extract the R^2 score of the model # + # Use the training data set to predict the target values # - # All that's left is to evaluate our model to see how well it did! # ## Evaluate Performance # ### Observing Residuals # We can check the residuals like we would for a simple linear regression model. # # SciKit-Learn: [metrics](https://scikit-learn.org/stable/modules/model_evaluation.html) # + # Create the predicted values array y_hat = lr.predict(wine_preds_st_scaled) # Calculate the residual (Actual - Predicted) resid = (wine_target - y_hat) # Display the residual across all predicted values plt.scatter(x=range(y_hat.shape[0]),y=resid, alpha=0.1) # - # ### Sklearn Metrics # The metrics module in sklearn has a number of metrics that we can use to measure the accuracy of our model, including the $R^2$ score, the mean absolute error and the mean squared error. Note that the default 'score' on our model object is the $R^2$ score. Let's go back to our wine dataset: # Calculate the R^2 of the model metrics.r2_score(wine_target, lr.predict(wine_preds_st_scaled)) # Let's make sure this metric is properly calibrated. If we put simply $\bar{y}$ as our prediction, then we should get an $R^2$ score of *0*. And if we predict, say, $\bar{y} + 1$, then we should get a *negative* $R^2$ score. # + # Calculat the average target value avg_quality = np.mean(wine_target) # Calculate the total number of observations num = len(wine_target) # Check the R^2 with the average of the target values metrics.r2_score(wine_target, avg_quality * np.ones(num)) # - # Check the R^2 with average of the target values + 1 metrics.r2_score(wine_target, (avg_quality + 1) * np.ones(num)) # Mean Absolute Error: # # $$MAE = \frac{\sum_{i=1}^{n}|\hat{y_i} - y_i|}{n}$$ # # where # # - $\hat{y_i}$ is the predicted value # - $y_i$ is the true value # - $n$ is the total number of observations # Check the mean absolute error (MAE) metrics.mean_absolute_error(wine_target, lr.predict(wine_preds_st_scaled)) # Mean Squared Error: # # $$MSE = \frac{\sum_{i=1}^{n}(\hat{y_i} - y_i)^2}{n}$$ # # where # # - $\hat{y_i}$ is the predicted value # - $y_i$ is the true value # - $n$ is the total number of observations # Check the mean squared error (MSE) metrics.mean_squared_error(wine_target, lr.predict(wine_preds_st_scaled)) # ## Practice with a Partner: (20 minutes) # # We have a interesting data set that study our brain. In this exercise we are trying to use multiple features to predict brain **weight**. # # Source: (1905). "A Study of the Relations of the Brain to the Size of the Head", Biometrika, Vol. 4, pp105-123 # # Description: Brain weight (grams) and head size (cubic cm) for 237 adults classified by gender and age group. # # Variables/Columns: # # GENDER: Gender Male or Female # AGE: Age Range 20-46 or 46+ # SIZE: Head size ($cm^3$) 21-24 # WEIGHT: Brain weight (grams) 29-32 # # ### Objectives: # - Follow the standard workflow, such as checking the data type, cleaning the data, prepare data for regression model. # - Create dummy variables for the categorical data using either Pandas ```pd.get_dummy()``` or sklearn ```OneHotEncoder()```. # - Scale the data using sklearn ```standardScaler()```. # - Fit the data in multiple linear regression using sklearn module. # - Evaluate the model using different metrics, $R^2$, MAE, MSE. # # Follow the instructions and complete a coding exercise with a partner in the class (in the random breakout room). Instructor will go into the rooms to help answering questions from students. # Import dependencies import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import sklearn.metrics as metrics # Read the csv file into a pandas DataFrame brain = pd.read_csv('data/brain_categorical.csv') brain.head() # ### Step 1: Checking the data # Use the .info() method to check the data set # Question: # # 1. Do we have any missing data? # 2. What are the data type for each variable (column)? # 3. Do we need to clean the data? # Use the .describe() method to print the statistical summary of the data set # Question: # # 1. Why are we see the statistical summary returning only two columns (size and weight)? # # Let's check the count of each category (level) in both categorical columns. # Count of male and female # Count of different age groups # Next, let's create the predictors and target dataframes. # # Example: # # ``` Python # # Create predictors dataframe # predictors = data.drop('target_name', axis=1) # # # Create target dataframe # target = data['target_name'] # ``` # Create the predictors and target dataframes # ### Step 2: Create dummy variables for categorical data # # Now that we have studied the data set and knowing that there are two categorical variables (columns). We need to create dummy columns. # # Feel free to use either *Pandas* ```pd.get_dummies()``` or *sklearn* ```OneHotEncoder()``` for this task. # # Example: # # ``` Python # # Create dummy columns with Pandas function # data_encoded = pd.get_dummies(data, drop_first = True) # # # Create dummy columns with sklearn OneHotEncoder # ohe = OneHotEncoder(drop = 'first') # data_trans = ohe.fit_transform(data['categorical_columns']) # data_encoded = pd.DataFrame(data_trans.todense(), columns = ohe.get_feature_names()) # data_encoded.join(data['numerical_columns']) # data_encoded # ``` # # Note: *Remember when we are fitting / training the data with the OLS regression model, we need to exclude one level from each categorical variable as the reference level.* # Create the categorical columns with Pandas DataFrame # Create the categorical columns with sklearn OneHotEncoder (optional) # Check the encoded predictors dataframe to make sure the dummy columns are created and the original columns are removed. # # Let's run the sklearn OLS with the encoded data and see the results. # + # Create the OLS Regression object # Fit the training data using the OLS regression object # - # Extract the coefficient values using the .coef_ attribute # Extract the intercept coefficient # ### Step 3: Scaling the data # # If we look at the estimated coefficents, there are two relatively large coefficients (gender_male and age_46+) and a cofficient (size) less than 1. # # Remember, we can use the sklearn ```StandardScaler()``` to standardize the data, so the variables will be on the same scale. # # Example: # # ``` Python # # Create the StandardScaler object # ss = StandardScaler() # # ## Apply it to our data by using the .fit() and .transform() methods # data_scaler = ss.fit(data) # data_scaled = data_scaler.transform(data) # ``` # # Note: *The StandardScaler is expecting a 2D array for input. If we are scaling our target variable, we need to reshape the dataframe using ```target.values.reshape(-1, 1)``` in the .fit() and .transfrom() functions.* # + # Create the StandardScaler object # Apply it the predictors and target by using the .fit() and .transform() methods # Create the scaled dataframes # Print the scaled predictors and target # - # ### Step 4: Fitting the mulitiple regression model # # Let's run the sklearn OLS regrssion on the scaled data again. # + # Create the OLS Regression object # Fit the training data using the OLS regression object # - # Extract the coefficient values using the .coef_ attribute # Extract the intercept coefficient # ### Step 5: Evaluate the model # # Let's evaluate the model with residual graph and different metrics, such as $R^2$, MAE, and MSE. # + # Create the predicted values array # Calculate the residual (Actual - Predicted) # Display the residual across all predicted values # - # Extract the R^2 score of the model # Check the mean absolute error (MAE) # Check the mean squared error (MSE) # **Final Challenge:** # # Let's use statsmodel OLS regression to confirm our results from the previous exercise. Run two OLS regression, one with the encoded data and one with the scaled data. # # Check the coefficient and $R^2$ values. They should be the same as the sklearn OLS regression models. If they are not, something is wrong!! # Create the OLS model and fit the encoded data # Create the OLS model and fit the scaled data # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Car Detection import cv2 # + cap = cv2.VideoCapture('video.avi') car_cascade = cv2.CascadeClassifier('cars.xml') # + while True: ret, img = cap.read() if (type(img) == type(None)): break gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cars = car_cascade.detectMultiScale(gray, 1.1, 2) for (x,y,w,h) in cars: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2) cv2.imshow('video', img) if cv2.waitKey(33) == 27: break cv2.destroyAllWindows() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline # - log = pd.read_csv("../build/log.csv") log.head() fig = plt.figure(figsize=(9, 6)) plt.xlabel("time step, k") plt.ylabel("Cross track error") plt.grid() plt.plot(log["cte"], '-', color="c",label="cte") plt.hlines(0,0,len(log["cte"]), color="r",label="reference cte") plt.legend(loc=1) plt.title("Cross Track Error") plt.show() fig = plt.figure(figsize=(9, 6)) plt.xlabel("time step, k") plt.ylabel("Orientation error (radian)") plt.grid() plt.plot(log["epsi"], '-', color="c",label="epsi") plt.hlines(0,0,len(log["epsi"]), color="r",label="reference epsi") plt.legend(loc=1) plt.title("Orientation Error") plt.show() fig = plt.figure(figsize=(9, 6)) plt.xlabel("time step, k") plt.ylabel("Speed (mph)") plt.grid() plt.plot(log["v"], '-', color="c",label="speed") plt.hlines(100,0,len(log["v"]), color="r",label="reference speed") plt.legend(loc=5) plt.title("Speed") plt.show() fig = plt.figure(figsize=(9, 6)) plt.xlabel("time step, k") plt.ylabel("Steering angle (radian)") plt.grid() plt.plot(log["steer"], '-', color="c",label="steering angle") plt.legend(loc=1) plt.title("Steering angle") plt.show() fig = plt.figure(figsize=(9, 6)) plt.xlabel("time step, k") plt.ylabel("Throttle") plt.grid() plt.plot(log["throttle"], '-', color="c",label="throttle") plt.legend(loc=5) plt.title("Throttle") plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import math from sklearn import metrics from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold, KFold, train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, MinMaxScaler from sklearn import decomposition, cluster from scipy import stats import multiprocessing import joblib from joblib import Parallel, delayed import pickle import seaborn as sns # %matplotlib inline # - # # Parameters path_data = 'data/' # # Helper functions # + class MacOSFile(object): def __init__(self, f): self.f = f def __getattr__(self, item): return getattr(self.f, item) def read(self, n): # print("reading total_bytes=%s" % n, flush=True) if n >= (1 << 31): buffer = bytearray(n) idx = 0 while idx < n: batch_size = min(n - idx, 1 << 31 - 1) # print("reading bytes [%s,%s)..." % (idx, idx + batch_size), end="", flush=True) buffer[idx:idx + batch_size] = self.f.read(batch_size) # print("done.", flush=True) idx += batch_size return buffer return self.f.read(n) def write(self, buffer): n = len(buffer) print("writing total_bytes=%s..." % n, flush=True) idx = 0 while idx < n: batch_size = min(n - idx, 1 << 31 - 1) print("writing bytes [%s, %s)... " % (idx, idx + batch_size), end="", flush=True) self.f.write(buffer[idx:idx + batch_size]) print("done.", flush=True) idx += batch_size def pickle_dump(obj, file_path): with open(file_path, "wb") as f: return pickle.dump(obj, MacOSFile(f), protocol=pickle.HIGHEST_PROTOCOL) def pickle_load(file_path): with open(file_path, "rb") as f: return pickle.load(MacOSFile(f)) # - def reduce_mem_usage(df, verbose=False): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df # # Model functions # + class prepareData: def __init__(self,path,validation_ratio=0,folds=5, duplicate_low_label=False, add_kernels=False, use_log_for_kernel_diff=False, inverse_kde=False, ratio_inverse_kde=False, use_diff_kde=False, perform_pca=False, pca_variance_threshold=0.95, pca_for_kde=False, use_train_test_for_norm=True,cpu=None): self.path = path self.folds = 5 self.use_log_for_kernel_diff = use_log_for_kernel_diff if cpu is None: cpu = multiprocessing.cpu_count() else: cpu = min(cpu,multiprocessing.cpu_count()) print('import data') self._import_data(self.path) self._num_features = list(set(self.X_train.columns) - set(['sig_id','cp_type','cp_dose','cp_time'])) print('transform cat features') self.X_train = self._transform_cat_features(self.X_train) self.X_test = self._transform_cat_features(self.X_test) if add_kernels: print('kde kernels calculations') self.kde_kernels = self._calculate_kde_kernels(self.X_train,self.X_test,ratio_inverse_kde) print('kde features') self.X_train = self._process_kde_parallelized(self.X_train,self.kde_kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde,cpu) self.X_test = self._process_kde_parallelized(self.X_test,self.kde_kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde,cpu) print('perform pca') if perform_pca: self._fit_pca([self.X_train,self.X_test],pca_for_kde) self.X_train = self._transform_pca(self.X_train,pca_variance_threshold) self.X_test = self._transform_pca(self.X_test,pca_variance_threshold) print('normalize features') if use_train_test_for_norm: _ = self._normalize_features(pd.concat([self.X_train,self.X_test],axis=0)) self.X_train = self._normalize_features(self.X_train,is_test=True) self.X_test = self._normalize_features(self.X_test,is_test=True) else: self.X_train = self._normalize_features(self.X_train) self.X_test = self._normalize_features(self.X_test,is_test=True) if validation_ratio>0: self.X_train, self.X_holdout, self.y_train, self.y_holdout = train_test_split(self.X_train,self.y_train,test_size=validation_ratio) if duplicate_low_label: self.X_train, self.y_train = self._duplicate_data_for_imbalanced_labels(self.X_train,self.y_train,self.folds) print('create dataset with non scored data') self.X_train_nonscored, self.y_train_nonscored = self._add_nonscored_targets(self.X_train) if validation_ratio>0: self.X_holdout_nonscored, self.y_holdout_nonscored = self._add_nonscored_targets(self.X_holdout) print('order datasets') self.X_train.sort_values(by=['sig_id'],inplace=True) self.X_train_nonscored.sort_values(by=['sig_id'],inplace=True) self.y_train.sort_values(by=['sig_id'],inplace=True) self.y_train_nonscored.sort_values(by=['sig_id'],inplace=True) if validation_ratio>0: self.X_holdout.sort_values(by=['sig_id'],inplace=True) self.X_holdout_nonscored.sort_values(by=['sig_id'],inplace=True) self.y_holdout.sort_values(by=['sig_id'],inplace=True) self.y_holdout_nonscored.sort_values(by=['sig_id'],inplace=True) print('remove sig_id') self.X_train_ids = self.X_train['sig_id'].copy() self.X_train = self.X_train.drop(['sig_id'],axis=1) self.X_train_nonscored = self.X_train_nonscored.drop(['sig_id'],axis=1) self.X_test_ids = self.X_test['sig_id'].copy() self.X_test = self.X_test.drop(['sig_id'],axis=1) self.y_train = self.y_train.drop(['sig_id'],axis=1) self.y_train_nonscored = self.y_train_nonscored.drop(['sig_id'],axis=1) self.y_train_labels = self.y_train.columns if validation_ratio>0: self.X_holdout_ids = self.X_holdout['sig_id'].copy() self.X_holdout = self.X_holdout.drop(['sig_id'],axis=1) self.y_holdout = self.y_holdout.drop(['sig_id'],axis=1) self.X_holdout_nonscored = self.X_holdout_nonscored.drop(['sig_id'],axis=1) self.y_holdout_nonscored = self.y_holdout_nonscored.drop(['sig_id'],axis=1) print('calculate std') X_list = [self.X_train,self.X_test] if validation_ratio>0: X_list.append(self.X_holdout) self._calculate_features_std(X_list) def _import_data(self,path): self.X_train = pd.read_csv(path+'train_features.csv') self.X_test = pd.read_csv(path+'test_features.csv') self.y_train = pd.read_csv(path+'train_targets_scored.csv') self.X_train_additional = pd.read_csv(path+'train_targets_nonscored.csv') self.sample_submission = pd.read_csv(path+'sample_submission.csv') def _transform_cat_features(self,X): X['cp_type'] = X['cp_type'].map({'trt_cp':0,'ctl_vehicle':1}) X['cp_dose'] = X['cp_dose'].map({'D1':0,'D2':1}) X['cp_time'] = X['cp_time'].map({24:0,48:0.5,72:1}) return X def _normalize_features(self,X,is_test=False): cols_to_normalize = list(set(self.X_train.columns) - set(['sig_id','cp_type','cp_dose','cp_time'])) if is_test==False: self.normalizer_dict = {} for col in cols_to_normalize: if is_test: scaler = self.normalizer_dict[col] X[col] = (scaler.transform([X[col]])).flatten() else: a = X[col].values scaler = MinMaxScaler() a = scaler.fit_transform(a.reshape(-1, 1)) self.normalizer_dict[col] = scaler X[col] = a return X def _calculate_kde_kernels(self,X1,X2,ratio_inverse_kde): X = pd.concat([X1,X2]) X_control = X[X['cp_type']==1] X_treatment = X[X['cp_dose']==0] kernels = {} cols = self._num_features for col in cols: #Calculate kernels x_control = X_control[col].values x_treatment = X_treatment[col].values kde_control_kernel = stats.gaussian_kde(x_control) kde_treatment_kernel = stats.gaussian_kde(x_treatment) kernels[col+'_control'] = kde_control_kernel kernels[col+'_treatment'] = kde_treatment_kernel #Calculate max ratio so that when calculating kde features based on the ratio of treatement/control, we have a threshold for values x_control_mean = x_control.mean() x_control_std = x_control.std() x_treatment_mean = x_treatment.mean() #As b is not usually normal we use only a std to create range kde_range = [min(x_control_mean - 2*x_control_std, x_treatment_mean - 2*x_control_std),max(x_control_mean + 2*x_control_std, x_treatment_mean + 2*x_control_std)] kde_sample = np.arange(kde_range[0],kde_range[1],(kde_range[1]-kde_range[0])/100) x_control_kde_sample = kde_control_kernel.pdf(kde_sample) x_treatment_kde_sample = kde_treatment_kernel.pdf(kde_sample) if ratio_inverse_kde: max_ratio = (x_control_kde_sample/x_treatment_kde_sample).max() else: max_ratio = (x_treatment_kde_sample/x_control_kde_sample).max() kernels[col+'_ratio'] = max_ratio return kernels def _build_batch(self,X,kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde,cpu_count): batch_list = [] col_size = len(self._num_features) if col_size>=cpu_count: batch_size = int(col_size/cpu_count) else: batch_size = 1 cpu_count = col_size for i in range(cpu_count): if i == cpu_count-1: batch_list.append((self._num_features[i*batch_size:],X,kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde)) else: batch_list.append((self._num_features[i*batch_size:(i+1)*batch_size],X,kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde)) return batch_list def _process_individual_batch(self,batch): ratio_multiplier = 10 cols = batch[0] X = batch[1] kernels = batch[2] use_log_for_kernel_diff = batch[3] inverse_kde = batch[4] use_diff_kde = batch[5] series_list = [] for col in cols: kde_control_kernel = kernels[col+'_control'] kde_treatment_kernel = kernels[col+'_treatment'] if use_diff_kde: a_kde = kde_control_kernel.pdf(X[col].values) b_kde = kde_treatment_kernel.pdf(X[col].values) a = (b_kde-a_kde)/np.max((a_kde,b_kde),axis=0) a = a.clip(-1,1) a = np.nan_to_num(a,nan=0.0) else: if inverse_kde: a = kde_control_kernel.pdf(X[col].values)/kde_treatment_kernel.pdf(X[col].values) else: a = kde_treatment_kernel.pdf(X[col].values)/kde_control_kernel.pdf(X[col].values) a = np.nan_to_num(a,nan=ratio_multiplier*kernels[col+'_ratio']) a = a.clip(0,ratio_multiplier*kernels[col+'_ratio']) if use_log_for_kernel_diff: a = np.log1p(a) a = pd.Series(a,name=col+'_kde_diff',dtype='float32') series_list.append(a) return series_list def _run_batch(self,batch): return self._process_individual_batch(batch) def _process_batch_list(self,batch_list,cpu): return joblib.Parallel(n_jobs=cpu)(joblib.delayed(self._run_batch)(batch) for batch in batch_list) def _process_kde_parallelized(self,X,kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde,cpu): batch_list = self._build_batch(X,kernels,use_log_for_kernel_diff,inverse_kde,use_diff_kde,cpu) results = self._process_batch_list(batch_list,cpu) for series_list in results: for s in series_list: X[s.name] = s.values return X def _fit_pca(self,X_list,pca_for_kde): X = pd.concat(X_list,axis=0) all_cols = X.columns pca_cols = [] pca_names = ['g_pca','c_pca'] pca_cols.append([x for x in all_cols if ('g-' in x) & (not '_kde_diff' in x)]) pca_cols.append([x for x in all_cols if ('c-' in x) & (not '_kde_diff' in x)]) if pca_for_kde: pca_cols.append([x for x in all_cols if ('g-' in x) & ('_kde_diff' in x)]) pca_cols.append([x for x in all_cols if ('c-' in x) & ('_kde_diff' in x)]) pca_names.append('g_kde_pca') pca_names.append('c_kde_pca') self.pca_cols_dict = {} self.pca_dict = {} for name,cols in zip(pca_names,pca_cols): if len(cols)>0: X_pca = X[cols] pca = decomposition.PCA(n_components=X_pca.shape[1], whiten=True, svd_solver='full', random_state=42 ) pca.fit(X_pca) self.pca_cols_dict[name] = cols self.pca_dict[name] = pca def _calculate_pca_components_to_keep(self,explained_variance_ratio_,pca_variance_threshold): explained_variance_ratio_cum = explained_variance_ratio_.cumsum() return np.argmax(explained_variance_ratio_cum>=pca_variance_threshold) + 1 def _transform_pca(self,X,pca_variance_threshold): pca_names = list(self.pca_cols_dict.keys()) for name in pca_names: #Recover cols and fit pca cols = self.pca_cols_dict[name] pca = self.pca_dict[name] #Transform to current data X_pca = pca.transform(X[cols]) #Keep only necessary data + transform into pd variance_limit = self._calculate_pca_components_to_keep(pca.explained_variance_ratio_,pca_variance_threshold) X_pca = X_pca[:,:variance_limit] new_cols = [name+'_'+str(i) for i in range(variance_limit)] X_pca = pd.DataFrame(X_pca,columns=new_cols) #Adjust X X.drop(cols,axis=1,inplace=True) X = pd.concat([X,X_pca],axis=1) return X def _duplicate_data_for_imbalanced_labels(self,X,y,folds): cols_with_not_enough_data = np.where(y.iloc[:,1:].sum().values 0: n_duplicates = folds//n_rows + 1 X_duplicate_pd = X.iloc[rows,:].copy() y_duplicate_pd = y.iloc[rows,:].copy() X = pd.concat([X] + [X_duplicate_pd]*n_duplicates) y = pd.concat([y] + [y_duplicate_pd]*n_duplicates) return X,y def _add_nonscored_targets(self,X): X = pd.merge(X,self.X_train_additional,on='sig_id') y = X[self.X_train_additional.columns].copy() return X,y def _calculate_features_std(self,X_list): X_array = [] for X in X_list: X = X.values mask = X[:,0]==0 X_array.append(X[mask][:,3:]) X_array = np.concatenate(X_array,axis=0) self.features_std = X_array.std(axis=0) def create_cv(self,folds=5): cv = MultilabelStratifiedKFold(n_splits=folds, shuffle=True, random_state=SEED) oof_idx = [] for (train_idx, val_idx) in cv.split(self.X_train, self.y_train): oof_idx.append((train_idx, val_idx)) return oof_idx def add_control_test_to_train(prepared_data): X_test = prepared_data.X_test X_train = prepared_data.X_train y_train = prepared_data.y_train X_test_control = X_test[X_test['cp_type']==1] X_train = pd.concat([X_train,X_test_control],axis=0) y_test_control = pd.DataFrame(np.zeros((X_test_control.shape[0],y_train.shape[1])),columns=y_train.columns.tolist()) y_train = pd.concat([y_train,y_test_control],axis=0) return X_train, y_train # - # # Run # #%debug prepared_data = prepareData(path_data,validation_ratio=0.0,folds=5, duplicate_low_label=False,add_kernels=False, perform_pca=True, pca_variance_threshold=0.95 ) # + active="" # pickle_dump(prepared_data,'data/prepared_data') # - prepared_data = pickle_load('data/prepared_data') prepared_data_normal_kde = pickle_load('data/prepared_data_normal_kde') # # KDE features X_train = pd.read_csv(path_data+'train_features.csv') X_test = pd.read_csv(path_data+'test_features.csv') X_train = pd.concat([X_train,X_test]) # + exp = 'g-50' a0 = X_train[X_train['cp_type']=='ctl_vehicle'][exp].values b0 = X_train[X_train['cp_type']=='trt_cp'][exp].values c0 = X_train[exp].values a = prepared_data.X_train[prepared_data.X_train['cp_type']==1][exp].values b = prepared_data.X_train[prepared_data.X_train['cp_type']==0][exp].values c = prepared_data.X_train[exp].values d = prepared_data.X_train[exp+'_kde_diff'].values a1 = prepared_data_normal_kde.X_train[prepared_data_normal_kde.X_train['cp_type']==1][exp].values b1 = prepared_data_normal_kde.X_train[prepared_data_normal_kde.X_train['cp_type']==0][exp].values c1 = prepared_data_normal_kde.X_train[exp].values d1 = prepared_data_normal_kde.X_train[exp+'_kde_diff'].values # - sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.distplot(a0,hist=False,color='blue') sns.distplot(b0,hist=False,color='red') a0.std() (np.random.randn(100)) sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.distplot(np.clip(a0+np.random.randn(a0.shape[0])*a0.std()/4,-11,5),hist=False,color='blue') sns.distplot(np.clip(b0+np.random.randn(b0.shape[0])*b0.std()/4,-11,5),hist=False,color='red') sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.scatterplot(x=c,y=d) sns.scatterplot(x=c1,y=d1) # + a_mean = a0.mean() a_std = a0.std() b_mean = b0.mean() #As b is not usually normal we use only a std to create range kde_range = [min(a_mean - 2*a_std, b_mean - 2*a_std),max(a_mean + 2*a_std, b_mean + 2*a_std)] kde_sample = np.arange(kde_range[0],kde_range[1],(kde_range[1]-kde_range[0])/100) # + a_kde_kernel = stats.gaussian_kde(a0) b_kde_kernel = stats.gaussian_kde(b0) a_kde_sample = a_kde_kernel.pdf(kde_sample) b_kde_sample = b_kde_kernel.pdf(kde_sample) max_ratio = np.abs((b_kde_sample - a_kde_sample)/np.max((a_kde_sample,b_kde_sample),axis=0)).max() a_kde = a_kde_kernel.pdf(c0) b_kde = b_kde_kernel.pdf(c0) c_diff = (b_kde-a_kde)/np.max((a_kde,b_kde),axis=0) c_diff_clipped = c_diff.clip(0,max=10*max_ratio) c_diff_log = np.log1p(c_diff_clipped) # - sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.scatterplot(x=c0,y=c_diff) c_diff = (b_kde+a_kde)/(a_kde*b_kde) c_diff_clipped = c_diff.clip(0,max=10*max_ratio) c_diff_log = np.log1p(c_diff) sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.scatterplot(x=c,y=c_diff_clipped) def _calculate_kde_kernels(X1,X2): X = pd.concat([X1,X2]) X_control = X[X['cp_type']==1] X_treatment = X[X['cp_dose']==0] kernels = {} cols = ['g-50'] for col in cols: #Calculate kernels x_control = X_control[col].values x_treatment = X_treatment[col].values kde_control_kernel = stats.gaussian_kde(x_control) kde_treatment_kernel = stats.gaussian_kde(x_treatment) kernels[col+'_control'] = kde_control_kernel kernels[col+'_treatment'] = kde_treatment_kernel #Calculate max ratio so that when calculating kde features based on the ratio of treatement/control, we have a threshold for values x_control_mean = x_control.mean() x_control_std = x_control.std() x_treatment_mean = x_treatment.mean() #As b is not usually normal we use only a std to create range kde_range = [min(x_control_mean - 2*x_control_std, x_treatment_mean - 2*x_control_std),max(x_control_mean + 2*x_control_std, x_treatment_mean + 2*x_control_std)] kde_sample = np.arange(kde_range[0],kde_range[1],(kde_range[1]-kde_range[0])/100) x_control_kde_sample = kde_control_kernel.pdf(kde_sample) x_treatment_kde_sample = kde_treatment_kernel.pdf(kde_sample) max_ratio = (x_treatment_kde_sample/x_control_kde_sample).max() kernels[col+'_ratio'] = max_ratio return kernels kernels = _calculate_kde_kernels() # # Data augmentation introduction X_train = prepared_data_normal_kde.X_train.copy() cols = X_train.columns.tolist() X_train = X_train.values # + rng = np.random.default_rng(seed=42) granularity = 100 #Means that we granularize the space into 100 distinct values max_dev = 0.1 #max_dev is the absolute max value we can add or substract from x in the augmented vector max_dev_steps = int(max_dev*granularity) #max_dev_steps converts max_dev to the number of "steps" to reach max_dev given the granularity normal_std_dev = 0.1 normal_p = np.arange(-max_dev*granularity,max_dev*granularity+1,1) normal_p = normal_p/granularity normal_p = 1/(normal_std_dev)*np.exp(-(normal_p*normal_p)/normal_std_dev**2) normal_p = normal_p.astype(np.float16) # - v_to_augment = np.array([vehicle,vehicle]).transpose() #### prob_distribution_overall = vehicle_dist.astype(np.float16) prob_distribution_overall = np.repeat(prob_distribution_overall[np.newaxis,:], v_to_augment.shape[1], axis=0) #p is the probability distribution for each of the columns ### prob_distribution_overall = np.repeat(prob_distribution_overall[np.newaxis,:], v_to_augment.shape[0], axis=0) #Extend prob_distribution_overall to each dimension normal_p = np.repeat(normal_p[np.newaxis,:], v_to_augment.shape[1], axis=0) normal_p = np.repeat(normal_p[np.newaxis,:,:], v_to_augment.shape[0], axis=0) # + #Transform v so that it rounds to the desired granularity v_rounded = (np.round(v_to_augment*granularity)).astype(int) # + #For each and every value x in v_rounded, we want to calculate a vector of probability of size 2n+1 such as #The probability value at index 0 is the probability that we remove max_dev to x i_steps = np.arange(-max_dev_steps,max_dev_steps+1,1) #initialization vector for the steps i_initial = np.tile(np.array([[i_steps]]),(v_to_augment.shape[0],v_to_augment.shape[1],1)) v_rounded_repeated = np.repeat(v_rounded[:, :, np.newaxis], i_steps.shape[0], axis=2) idx = i_initial + v_rounded_repeated idx = np.clip(idx,0,granularity-1) #For each prob_candidates = prob_distribution_overall[0,0,idx].copy() prob_candidates = prob_candidates*normal_p prob_candidates = prob_candidates/prob_candidates.sum(axis=2)[:,:,np.newaxis] # + # %%time additional = 100 var = np.zeros([v_to_augment.shape[0],v_to_augment.shape[1],additional]) for i in range(v_to_augment.shape[0]): for k in range(v_to_augment.shape[1]): var[i,k,:] = rng.choice(i_steps,size=additional,p=prob_candidates[i,k,:]) var = var/max_dev_steps*max_dev # + i = 3 exp = cols[i] print(exp) vehicle_mask = X_train[:,0]==0 vehicle = X_train[vehicle_mask][:,i] control = X_train[vehicle_mask==False][:,i] kernel_control = stats.gaussian_kde(vehicle) x_for_dist = np.arange(0,1,0.01) vehicle_dist = kernel_control.pdf(x_for_dist) vehicle_std = vehicle.std() var_normal = np.random.randn(vehicle.shape[0])*vehicle_std*0.4 vehicle_augmented = vehicle + var[:,0,0] print(vehicle_std) print(np.abs(var_normal).mean()) print(np.abs(var[:,0,0]).mean()) sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.kdeplot(vehicle,color='blue') sns.kdeplot(vehicle_augmented,color='red') sns.kdeplot(vehicle + var_normal,color='grey') # sns.lineplot(x_for_dist,vehicle_dist,color='green') #sns.distplot(control,hist=False,color='grey') # - # # Remove non differentiating columns X_train = pd.read_csv(path_data+'train_features.csv') X_train.drop(['sig_id'],axis=1,inplace=True) X_train['cp_type'] = X_train['cp_type'].map({'trt_cp':0,'ctl_vehicle':1}) X_train['cp_dose'] = X_train['cp_dose'].map({'D1':0,'D2':1}) X_train['cp_time'] = X_train['cp_time'].map({24:0,48:0.5,72:1}) cols = X_train.columns.tolist() X_train = X_train.values # + i = 14 exp = cols[i] print(exp) vehicle_mask = X_train[:,0]==0 vehicle = X_train[vehicle_mask][:,i] control = X_train[vehicle_mask==False][:,i] vehicle_kde = stats.gaussian_kde(vehicle) control_kde = stats.gaussian_kde(control) rg = np.arange(-10,10,0.1) vehicle_kde_sample = vehicle_kde.pdf(rg) control_kde_sample = control_kde.pdf(rg) diff = (np.abs(vehicle_kde_sample-control_kde_sample)).mean() print(diff) sns.set(rc={'figure.figsize':(11.7,8.27)}) # sns.distplot(vehicle,hist=False,color='blue') # sns.distplot(control,hist=False,color='grey') sns.lineplot(x=rg,y=vehicle_kde_sample,color='blue') sns.lineplot(x=rg,y=control_kde_sample,color='grey') # - # # PCA all_cols = prepared_data.X_train.columns g_cols = [x for x in all_cols if ('g-' in x) & (not '_kde_diff' in x)] X = prepared_data.X_train[g_cols] pca_g = decomposition.PCA(n_components=X.shape[1], whiten=True, svd_solver='full', random_state=42 ) pca_g.fit(X) a = pca_g.explained_variance_ratio_ a_sum = a.cumsum() np.argmax(a_sum>0.95),a.shape[0] X_pca = pca_g.transform(X) X_pca[:,0].max() def _fit_pca(X_list): X = pd.concat(X_list,axis=0) all_cols = X.columns pca_names = ['g','c','g_kde','c_kde'] g_cols = [x for x in all_cols if ('g-' in x) & (not '_kde_diff' in x)] c_cols = [x for x in all_cols if ('c-' in x) & (not '_kde_diff' in x)] g_kde_cols = [x for x in all_cols if ('g-' in x) & ('_kde_diff' in x)] c_kde_cols = [x for x in all_cols if ('c-' in x) & ('_kde_diff' in x)] self.pca_cols_dict = {} self.pca_dict = {} for name,cols in zip(pca_names,[g_cols,c_cols,g_kde_cols,c_kde_cols]): if len(cols)>0: X_pca = X[cols] pca = decomposition.PCA(n_components=X_pca.shape[1], whiten=True, svd_solver='full', random_state=42 ) pca.fit(X_pca) self.pca_cols_dict[name] = cols self.pca_dict[name] = pca # + def _calculate_pca_components_to_keep(self,explained_variance_ratio_,pca_variance_threshold): explained_variance_ratio_cum = explained_variance_ratio_.cumsum() return np.argmax(explained_variance_ratio_cum>pca_variance_threshold) def _transform_pca(self,X,pca_variance_threshold): pca_names = list(self.pca_cols_dict.keys()) for name in pca_names: #Recover cols and fit pca cols = self.pca_cols_dict[name] pca = self.pca_dict[name] #Transform to current data X_pca = pca.transform(X[cols]) #Keep only necessary data + transform into pd variance_limit = self._calculate_pca_components_to_keep(pca.explained_variance_ratio_,pca_variance_threshold) X_pca = X_pca[:,:variance_limit] new_cols = [name+'_'+str(i) for i in range(variance_limit)] X_pca = pd.DataFrame(X_pca,new_cols) #Adjust X X.drop(cols,axis=1,inplace=True) X = pd.concat([X,X_pca],axis=1) return X # - a = { 'a':1, 'b':2 } list(a.keys()) # # OOF # #%debug prepared_data = prepareData(path_data,validation_ratio=0.0,folds=5, duplicate_low_label=False,add_kernels=False, perform_pca=True, pca_variance_threshold=0.95 ) a = prepared_data.X_train.values b = prepared_data.y_train.values c = prepared_data.X_train_nonscored.values vhcl = np.concatenate([a[a[:,0]==0,:][:,3:],b[a[:,0]==0,:],c[a[:,0]==0,:]],axis=1) oof_assignment = np.zeros(a.shape[0],dtype='int8') kmeans = cluster.MiniBatchKMeans(n_clusters=3000, n_init=10, verbose=5, batch_size=6, init_size=3000,random_state=42) #kmeans = cluster.KMeans(n_clusters=3000, n_init=5, verbose=5, max_iter=300, random_state=42, n_jobs=-1) kmeans.fit(vhcl) vhcl_1 = kmeans.predict(vhcl) ctrl_1 = kmeans.predict(ctrl) def naive_oof_assignment(vhcl_1,folds=5): folds = np.arange(5) folds_count = np.zeros(5) unique, counts = np.unique(vhcl_1, return_counts=True) r = np.array([unique,counts]).transpose() r = r[r[:,1].argsort()] oof_idx = np.zeros(vhcl_1.shape[0],dtype='int8') for i in r[:,0][::-1]: fold_to_fill = np.argmin(folds_count) oof_idx[vhcl_1==i] = fold_to_fill folds_count[fold_to_fill] += (vhcl_1==i).sum() return oof_idx # + vhcl_oof = naive_oof_assignment(vhcl_1) ctrl_oof = naive_oof_assignment(ctrl_1) oof_assignment[a[:,0]==0] = vhcl_oof oof_assignment[a[:,0]==1] = ctrl_oof # - SHIFT = 2222 oof_assignment_shift = np.roll(oof_assignment,SHIFT) oof_assignment_pd = pd.DataFrame(oof_assignment_shift,columns=['oof']) oof_assignment_pd['sig_id'] = prepared_data.X_train_ids.values oof_assignment_pd.to_csv('data/oof.csv') r[:,r[1,:]==5] r[1,:]==5 a = vhcl[(vhcl_1==2615)] b = vhcl[(vhcl_1!=2615)] vhcl_1==2615 b.shape a.std(axis=1).mean(),b[np.random.randint(0,b.shape[0],size=5)].std(axis=1).mean() # + sns.set(rc={'figure.figsize':(11.7,8.27)}) size = 50 sns.lineplot(x=np.arange(0,size),y=a[0,:size],color='red') sns.lineplot(x=np.arange(0,size),y=a[1,:size],color='red') sns.lineplot(x=np.arange(0,size),y=a[2,:size],color='red') sns.lineplot(x=np.arange(0,size),y=a[3,:size],color='red') sns.lineplot(x=np.arange(0,size),y=a[4,:size],color='red') sns.lineplot(x=np.arange(0,size),y=b[0,:size],color='blue') sns.lineplot(x=np.arange(0,size),y=b[1,:size],color='blue') sns.lineplot(x=np.arange(0,size),y=b[2,:size],color='blue') sns.lineplot(x=np.arange(0,size),y=b[3,:size],color='blue') sns.lineplot(x=np.arange(0,size),y=b[4,:size],color='blue') # - # # Post processing - antagonist MoA y1 = pd.read_csv(path_data+'train_targets_scored.csv') y2 = pd.read_csv(path_data+'train_targets_nonscored.csv') y = pd.DataFrame(np.concatenate([y1.values,y2.values],axis=1),columns=y1.columns.tolist()+y2.columns.tolist()) y_cols = y.columns effect = [x.split('_')[-1] for x in y_cols[1:]] names = ['_'.join(x.split('_')[:-1]) for x in y_cols[1:]] antagonist_pairs = [] names_pd = pd.DataFrame(y_cols[1:],columns=['initial']) names_pd['after'] = names names_pd = names_pd.groupby(['after'],as_index=False).agg( {'initial':[list,'count']} ) names_pd[names_pd['initial']['count']==2]['initial']['list'].tolist() pd.options.display.max_rows = 999 y[y['membrane_permeability_inhibitor']==1]\ [['membrane_permeability_enhancer', 'membrane_permeability_inhibitor']] exclusivity_tuples = [ ['potassium_channel_activator', 'potassium_channel_antagonist', 'potassium_channel_agonist', 'potassium_channel_blocker'], ['atp-sensitive_potassium_channel_antagonist', 'atp-sensitive_potassium_channel_agonist', 'atp-sensitive_potassium_channel_inhibitor'], ['gaba_receptor_agonist', 'gaba_receptor_modulator'], ['glutamate_receptor_agonist', 'glutamate_receptor_antagonist', 'glutamate_receptor_modulator'], ['nitric_oxide_donor', 'nitric_oxide_scavenger', 'nitric_oxide_stimulant'], ['prostanoid_receptor_antagonist', 'prostanoid_receptor_agonist', ' prostanoid_receptor_inhibitor'], ['sodium_channel_inhibitor', 'sodium_channel_activator', 'sodium_channel_blocker'], ['acetylcholine_receptor_agonist', 'acetylcholine_receptor_antagonist'], ['adenosine_receptor_agonist', 'adenosine_receptor_antagonist'], ['adenylyl_cyclase_activator', 'adenylyl_cyclase_inhibitor'], ['adrenergic_receptor_agonist', 'adrenergic_receptor_antagonist'], ['aldehyde_dehydrogenase_inhibitor', 'aldehyde_dehydrogenase_activator'], ['ampk_activator', 'ampk_inhibitor'], ['androgen_receptor_agonist', 'androgen_receptor_antagonist'], ['angiotensin_receptor_antagonist', 'angiotensin_receptor_agonist'], ['apoptosis_stimulant', 'apoptosis_inhibitor'], ['aryl_hydrocarbon_receptor_agonist', 'aryl_hydrocarbon_receptor_antagonist'], ['atp_channel_activator', 'atp_channel_blocker'], ['benzodiazepine_receptor_agonist', 'benzodiazepine_receptor_antagonist'], ['calcium_channel_blocker', 'calcium_channel_activator'], ['cannabinoid_receptor_agonist', 'cannabinoid_receptor_antagonist'], ['car_agonist', 'car_antagonist'], ['caspase_activator', 'caspase_inhibitor'], ['cc_chemokine_receptor_antagonist', 'cc_chemokine_receptor_agonist'], ['cftr_channel_agonist', 'cftr_channel_antagonist'], ['chloride_channel_blocker', 'chloride_channel_activator'], ['cholinergic_receptor_antagonist', 'cholinergic_receptor_agonist'], ['complement_antagonist', 'complement_inhibitor'], ['corticosteroid_agonist', 'corticosteroid_antagonist'], ['dopamine_receptor_agonist', 'dopamine_receptor_antagonist'], ['estrogen_receptor_agonist', 'estrogen_receptor_antagonist'], ['fatty_acid_receptor_agonist', 'fatty_acid_receptor_antagonist'], ['fxr_agonist', 'fxr_antagonist'], ['g_protein-coupled_receptor_agonist', 'g_protein-coupled_receptor_antagonist'], ['glucocorticoid_receptor_agonist', 'glucocorticoid_receptor_antagonist'], ['glucokinase_activator', 'glucokinase_inhibitor'], ['gonadotropin_receptor_agonist', 'gonadotropin_receptor_antagonist'], ['guanylate_cyclase_activator', 'guanylate_cyclase_stimulant'], ['histamine_receptor_agonist', 'histamine_receptor_antagonist'], ['hsp_inhibitor', 'hsp_inducer'], ['icam1_antagonist', 'icam1_inhibitor'], ['membrane_permeability_enhancer', 'membrane_permeability_inhibitor'], ['mineralocorticoid_receptor_antagonist', 'mineralocorticoid_receptor_agonist'], ['neurotensin_receptor_agonist', 'neurotensin_receptor_antagonist'], ['nfkb_inhibitor', 'nfkb_activator'], ['opioid_receptor_agonist', 'opioid_receptor_antagonist'], ['oxytocin_receptor_agonist', 'oxytocin_receptor_antagonist'], ['p53_activator', 'p53_inhibitor'], ['phospholipase_inhibitor', 'phospholipase_activator'], ['pka_activator', 'pka_inhibitor'], ['ppar_receptor_agonist', 'ppar_receptor_antagonist'], ['progesterone_receptor_agonist', 'progesterone_receptor_antagonist'], ['protein_kinase_inhibitor', 'protein_kinase_activator'], ['protein_synthesis_inhibitor', 'protein_synthesis_stimulant'], ['retinoid_receptor_agonist', 'retinoid_receptor_antagonist'], ['serotonin_receptor_agonist', 'serotonin_receptor_antagonist'], ['sigma_receptor_agonist', 'sigma_receptor_antagonist'], ['sirt_activator', 'sirt_inhibitor'], ['smoothened_receptor_antagonist', 'smoothened_receptor_agonist'], ['src_inhibitor', 'src_activator'], ['thyroid_hormone_inhibitor', 'thyroid_hormone_stimulant'], ['tlr_agonist', 'tlr_antagonist'], ['trace_amine_associated_receptor_agonist', 'trace_amine_associated_receptor_antagonist'], ['transient_receptor_potential_channel_antagonist', 'transient_receptor_potential_channel_agonist'], ['trpv_agonist', 'trpv_antagonist'], ['urotensin_receptor_agonist', 'urotensin_receptor_antagonist'], ['vasopressin_receptor_agonist', 'vasopressin_receptor_antagonist'], ['wnt_inhibitor', 'wnt_agonist'] ] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Waste Calculator # # This jupyter notebook will calculate the solid e-waste produced by solar, wind, and nuclear energy on a per unit energy basis. Sources for assumptions will be included. # # First we will calculate the waste produced by the solar farm at UIUC # ## Solar: # Cost per MWh: $45.99 \[[source](https://fs.illinois.edu/services/utilities-energy/production/solar-farms)\] # # Mass of Commercial Panel: ~50 pounds \[[source](https://brightstarsolar.net/common-sizes-of-solar-panels/), # [source](https://www.vivintsolar.com/learning-center/how-big-is-a-solar-panel)\] # # Average Capacity Factor: ~17\% \[[source](https://go.illinois.edu/solar)\] # # Expected Lifespan of Module: 30 years \[[source](https://www.irena.org/publications/2016/Jun/End-of-life-management-Solar-Photovoltaic-Panels)\] # # [UIUC Solar Farm Data](https://icap.sustainability.illinois.edu/files/project/175/Solar%20Farm%20Fact%20Sheet_0.pdf) rated_power = 4.68 # MW average_cf = 0.17 n_mod = 18867 mod_weight = 50 / 2.205 # pounds to kg life = 30 # years def expected_output(power, capfac, lifespan): hours = 8760 expected = (power*hours)*capfac*lifespan return expected sf_output = expected_output(rated_power, average_cf, life) print(f"Expected Power Output of UIUC solar farm 1.0: {sf_output} MWh") def waste_per_mwh(mass_waste, generation): """ Calculates waste per MWh. Generation is expected in MWh and mass_waste is expected in kg. """ waste = mass_waste/generation return waste sf_mass = n_mod * mod_weight # kg sf_waste = waste_per_mwh(sf_mass, sf_output) print(f"UIUC Solar Farm 1.0 will produce {sf_waste} kg/MWh of solar panel waste.") # ## Wind Power # # UIUC has a PPA with Railsplitter Wind Farm. Which uses GE SLE 1.5MW wind turbines. # # The weight to power ratio (t/MW): ~ 12.9 t/MW [source](https://www.sciencedirect.com/science/article/pii/S0956053X17300491) # # Lifespan: ~ 20 years [source](https://www.sciencedirect.com/science/article/pii/S0956053X17300491) # # Power = 100.5 MW [source](https://railsplitterwindfarm.com/) # # Average Capacity Factor IL: 35\% [source](https://icap.sustainability.illinois.edu/files/project/2235/RailSplitter-Wind%20Farm-9.6.16.pdf) rated_power = 100.5 # MW weight_power = 12.9*1000 # kg/MW average_cf = 0.35 life = 20 # years wf_output = expected_output(rated_power, average_cf, life) print(f"Expected Power Output of Railsplitter Wind Farm: {sf_output} MWh") wf_mass = weight_power * rated_power wf_waste = waste_per_mwh(wf_mass, wf_output) print(f"Railsplitter Wind Farm will produce {wf_waste} kg/MWh of turbine waste.") # ## Nuclear Power # A slightly different calculation needs to be done because the nuclear waste is "spent nuclear fuel." # The only figure I can find for nuclear waste produced annually is from a DOE factsheet that states "2000 tons per year." [source](https://www.energy.gov/ne/articles/5-fast-facts-about-spent-nuclear-fuel) # # In 2019, U.S. nuclear produced 809,409,000 MWh of electricity. [source](https://www.eia.gov/energyexplained/nuclear/us-nuclear-industry.php#:~:text=In%202019%2C%20the%20nuclear%20share,electricity%20generation%20was%20about%2020%25.) # # For this calculation, though, we care about waste per MWh(th) because the nuclear technology in TEMOA produces steam alone (which can then be used to produce electricity or district heating). np_mass = 2000 * 1000 # tons --> kg nuc_gen = 809409000/0.33 # MWh(th) np_waste = waste_per_mwh(np_mass, nuc_gen) print(f"Nuclear plants will produce approximately {np_waste} kg/MWh(th) of spent nuclear fuel.") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Course Human-Centered Data Science ([HCDS](https://www.mi.fu-berlin.de/en/inf/groups/hcc/teaching/winter_term_2020_21/course_human_centered_data_science.html)) - Winter Term 2020/21 - [HCC](https://www.mi.fu-berlin.de/en/inf/groups/hcc/index.html) | [Freie Universität Berlin](https://www.fu-berlin.de/) # *** # # A2 - Reproducibility Workflow # # Your assignment is to create a graph that looks a lot like the one below one, starting from scratch, and following best practices for reproducible research. # # ![wikipedia_pageViews_2008-2020.png](img/wikipedia_pageViews_2008-2020.png) # # ## Before you start # 1. Read all instructions carefully before you begin. # 1. Read all API documentation carefully before you begin. # 1. Experiment with queries in the sandbox of the technical documentation for each API to familiarize yourself with the schema and the data. # 1. Ask questions if you are unsure about anything! # 1. When documenting your project, please keep the following questions in your mind: # * _If I found this GitHub repository, and wanted to fully reproduce the analysis, what information would I want?_ # * _What information would I need?_ # ## Step 1️⃣: Data acquisition # In order to measure Wikipedia traffic from January 2008 until October 2020, you will need to collect data from two different APIs: # # 1. The **Legacy Pagecounts API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Legacy_Pagecounts), [endpoint](https://wikimedia.org/api/rest_v1/#!/Pagecounts_data_(legacy)/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end)) provides access to desktop and mobile traffic data from December 2007 through July 2016. # 1. The **Pageviews API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews), [endpoint](https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end)) provides access to desktop, mobile web, and mobile app traffic data from July 2015 through last month. # # For each API, you need to collect data for all months where data is available and then save the raw results into five (3+2) separate `JSON`files (one file per API query type) before continuing to step 2. # # To get you started, you can use the following **sample code for API calls**: # + # Source: https://public.paws.wmcloud.org/User:Jtmorgan/data512_a1_example.ipynb?format=raw import json import requests endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}' endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}' # SAMPLE parameters for getting aggregated legacy view data # see: https://wikimedia.org/api/rest_v1/#!/Legacy_data/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end example_params_legacy = {"project" : "en.wikipedia.org", "access-site" : "desktop-site", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : "2020100100" } # SAMPLE parameters for getting aggregated current standard pageview data # see: https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end example_params_pageviews = {"project" : "en.wikipedia.org", "access" : "desktop", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : '2020101000' } # Customize these with your own information headers = { 'User-Agent': 'https://github.com/mvrcx', 'From': '' } def api_call(endpoint,parameters): call = requests.get(endpoint.format(**parameters), headers=headers) response = call.json() return response # - example_monthly_pageviews = api_call(endpoint_pageviews, example_params_pageviews) example_monthly_pageviews example_monthly_legacy = api_call(endpoint_legacy, example_params_legacy) example_monthly_legacy # Your `JSON`-formatted source data file must contain the complete and un-edited output of your API queries. The naming convention for the source data files is: `apiname_accesstype_firstmonth-lastmonth.json`. For example, your filename for monthly page views on desktop should be: `pagecounts_desktop-site_200712-202010.json` # # ### Important notes❗ # 1. As much as possible, we're interested in *organic* (user) traffic, as opposed to traffic by web crawlers or spiders. The Pageview API (but not the Pagecount API) allows you to filter by `agent=user`. You should do that. # 1. There is about one year of overlapping traffic data between the two APIs. You need to gather, and later graph, data from both APIs for this period of time. # ### Query, collect, and store data # 1. Setting Parameters for Pageview desktop/mobilesite/mobileapp and Pagecount desktop/mobile # 2. Query the data by calling the api with respective parameters # 3. Creating needed folders to directory # 4. Saving the collected data to JSON in `raw_data/` directory # + # Setting parameters for pageview desktop pageviews_desktop_param = {"project" : "en.wikipedia.org", "access" : "desktop", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", "end" : '2020101000' } # Setting parameters for pageview mobile site pageviews_mobilesite_param = {"project" : "en.wikipedia.org", "access" : "mobile-web", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", "end" : '2020101000' } # Setting parameters for pageview mobile app pageviews_mobileapp_param = {"project" : "en.wikipedia.org", "access" : "mobile-app", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", "end" : '2020101000' } # Setting parameters for legacy desktop legacy_desktop_param = {"project" : "en.wikipedia.org", "access-site" : "desktop-site", "granularity" : "monthly", "start" : "2001010100", "end" : "2020100100" } # Setting parameters for legacy mobile legacy_mobile_param = {"project" : "en.wikipedia.org", "access-site" : "mobile-site", "granularity" : "monthly", "start" : "2001010100", "end" : "2020100100" } # Querying the data pageviews_monthly_desktop = api_call(endpoint_pageviews, pageviews_desktop_param) pageviews_monthly_mobilesite = api_call(endpoint_pageviews, pageviews_mobilesite_param) pageviews_monthly_mobileapp = api_call(endpoint_pageviews, pageviews_mobileapp_param) legacy_monthly_desktop = api_call(endpoint_legacy, legacy_desktop_param) legacy_monthly_mobile = api_call(endpoint_legacy, legacy_mobile_param) ################################################### # I MEAN THIS COULD'VE BEEN DONE MORE EFFICIENTLY # ################################################### # Creating directories, # Source: https://stackoverflow.com/questions/11373610/save-matplotlib-file-to-a-directory def mkdir_p(mypath): '''Creates a directory. equivalent to using mkdir -p on the command line''' from errno import EEXIST from os import makedirs,path try: makedirs(mypath) except OSError as exc: # Python >2.5 if exc.errno == EEXIST and path.isdir(mypath): pass else: raise # Create directory for all raw json files mkdir_p('raw data') mkdir_p('raw data/json') # Saving the queries to files with open('raw data/json/pagecounts_desktop-site_200101_202010.json', 'w', encoding='utf-8') as file: json.dump(pageviews_monthly_desktop, file, ensure_ascii=False, indent=4) with open('raw data/json/pagecounts_mobile-site_200101_202010.json', 'w', encoding='utf-8') as file: json.dump(pageviews_monthly_mobilesite, file, ensure_ascii=False, indent=4) with open('raw data/json/pagecounts_mobile-app_200101_202010.json', 'w', encoding='utf-8') as file: json.dump(pageviews_monthly_mobileapp, file, ensure_ascii=False, indent=4) with open('raw data/json/legacy_desktop-site_200101_202010.json', 'w', encoding='utf-8') as file: json.dump(legacy_monthly_desktop, file, ensure_ascii=False, indent=4) with open('raw data/json/legacy_mobile-site_200101_202010.json', 'w', encoding='utf-8') as file: json.dump(legacy_monthly_mobile, file, ensure_ascii=False, indent=4) # - # ## Step 2: Data processing # # You will need to perform a series of processing steps on these data files in order to prepare them for analysis. These steps must be followed exactly in order to prepare the data for analysis. At the end of this step, you will have a single `CSV`-formatted data file `en-wikipedia_traffic_200712-202010.csv` that can be used in your analysis (step 3) with no significant additional processing. # # * For data collected from the Pageviews API, combine the monthly values for `mobile-app` and `mobile-web` to create a total mobile traffic count for each month. # * For all data, separate the value of `timestamp` into four-digit year (`YYYY`) and two-digit month (`MM`) and discard values for day and hour (`DDHH`). # # Combine all data into a single CSV file with the following headers: # # | year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views| # |------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------| # | YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views | # # # # # # ### Creating corresponding dataframes, merging mobilesite and mobileapp data and doing some reindexing # + import pandas as pd ################################### # BEGINNING OF FIRST BULLET POINT # ################################### # Pageviews mobile site views into dataframe pv_mobilesite_df = pd.DataFrame((list(pageviews_monthly_mobilesite.values())[0]),columns = ['access','timestamp', 'views']) #pv_mobilesite_df # Pageviews mobile app views into dataframe pv_mobileapp_df = pd.DataFrame((list(pageviews_monthly_mobileapp.values())[0]),columns = ['access','timestamp', 'views']) #pv_mobileapp_df # Merging the two dataframes of mobile-site and mobile-app access new = pv_mobilesite_df.merge(pv_mobileapp_df, on='timestamp') #new # Swapping columns bc i didnt like it the other way around columns_titles = ["timestamp","access_x","views_x","access_y","views_y"] pv_total_mobile_df = new.reindex(columns=columns_titles) # Adding new column "total mobile" as a sum of views_x and views_y pv_total_mobile_df['total_mobile'] = pv_total_mobile_df.loc[:,['views_x','views_y']].sum(axis=1) #pv_total_mobile_df ################################### # END OF FIRST BULLET POINT # ################################### # - # ### Merging, remaining dataframes # Pretty sure the following could've been done more efficiently   ¯\\_(ツ)_/¯
Strip year and month from timestamp and add those as new columns.
Also add `mobilesite + mobileapp` as `totalmobile`.
See resulting dataframe below to understand what I mean. # + #################################### # BEGINNING OF SECOND BULLET POINT # #################################### # Pageviews mobile site views into dataframe pv_mobilesite_df = pd.DataFrame((list(pageviews_monthly_mobilesite.values())[0]),columns = ['access','timestamp', 'views']) #pv_mobilesite_df #Split year (first 4 characters) and month (5th and 6th character) pv_mobilesite_df['year'] = "" pv_mobilesite_df['month'] = "" pv_mobilesite_df['year'] = pv_mobilesite_df.timestamp.str[:4] pv_mobilesite_df['month'] = pv_mobilesite_df.timestamp.str[4:6] new_pv_mobilesite_df = pv_mobilesite_df #new_pv_mobilesite_df # Swapping columns columns_titles = ["year", "month", "access","views"] new_pv_mobilesite_df = new_pv_mobilesite_df.reindex(columns=columns_titles) #new_pv_mobilesite_df # Doing basically the same for pageviews mobile app views pv_mobileapp_df = pd.DataFrame((list(pageviews_monthly_mobileapp.values())[0]), columns = ['access','timestamp','views']) pv_mobileapp_df['year'] = "" pv_mobileapp_df['month'] = "" pv_mobileapp_df['year'] = pv_mobileapp_df.timestamp.str[:4] pv_mobileapp_df['month'] = pv_mobileapp_df.timestamp.str[4:6] new_pv_mobileapp_df = pv_mobileapp_df new_pv_mobileapp_df = new_pv_mobileapp_df.reindex(columns=["year", "month", "access","views"]) #new_pv_mobileapp_df # Pageviews total mobile pv_total_mobile_df['year'] = "" pv_total_mobile_df['month'] = "" pv_total_mobile_df['year'] = pv_total_mobile_df.timestamp.str[:4] pv_total_mobile_df['month'] = pv_total_mobile_df.timestamp.str[4:6] pv_total_mobile_df['access'] = "mobile" new_pv_totalmobile_df = pv_total_mobile_df new_pv_totalmobile_df = new_pv_totalmobile_df.reindex(columns=["year", "month", "access", "total_mobile"]) #new_pv_totalmobile_df # Pageviews desktop views pv_desktop_df = pd.DataFrame((list(pageviews_monthly_desktop.values())[0]),columns = ['access','timestamp', 'views']) pv_desktop_df['year'] = "" pv_desktop_df['month'] = "" pv_desktop_df['year'] = pv_desktop_df.timestamp.str[:4] pv_desktop_df['month'] = pv_desktop_df.timestamp.str[4:6] new_pv_desktop_df = pv_desktop_df new_pv_desktop_df = new_pv_desktop_df.reindex(columns=["year", "month", "access","views"]) #new_pv_desktop_df # Legacy mobile views lg_mobile_df = pd.DataFrame((list(legacy_monthly_mobile.values())[0]), columns = ['access-site', 'timestamp', 'count']) lg_mobile_df['year'] = lg_mobile_df['month'] = "" lg_mobile_df['year'] = lg_mobile_df.timestamp.str[:4] lg_mobile_df['month'] = lg_mobile_df.timestamp.str[4:6] new_lg_mobile_df = lg_mobile_df new_lg_mobile_df= new_lg_mobile_df.reindex(columns=["year", "month", "access-site", "count"]) #new_lg_mobile_df # Legacy Desktop views lg_desktop_df = pd.DataFrame((list(legacy_monthly_desktop.values())[0]), columns = ['access-site', 'timestamp', 'count']) lg_desktop_df['year'] = lg_desktop_df['month'] = "" lg_desktop_df['year'] = lg_desktop_df.timestamp.str[:4] lg_desktop_df['month'] = lg_desktop_df.timestamp.str[4:6] new_lg_desktop_df = lg_desktop_df new_lg_desktop_df= new_lg_desktop_df.reindex(columns=["year", "month", "access-site", "count"]) #new_lg_desktop_df #new_lg_mobile_df new_pv_totalmobile_df #new_pv_desktop_df ################################### # END OF SECOND BULLET POINT # ################################### # - # ## Combining all data # # The goal is to have a dataframe that looks like this: # # | year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views| # |------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------| # | YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views | # # 1. For this, a new dataframe is being initialized containing two columns: `year`, `month` # 2. Setting the year and month range by copying the dataframe with the maximum amount years. I end up with this # # | year | month | # |------|-------| # | 2015 | 07 | # | .... | .. | # | 2020 | 09 | # # 3. Start joining dataframes to combine all data into single dataframe using outer joins # 4. Calculate the `pageview_all_views` as sum of `pageview_desktop_views + pageview_mobile_views` # 5. Calculate the `pagecount_all_views` as sum of `pagecount_desktop_views + pagecount_mobile_views` # 6. Creating new directory for clean data # 7. Saving the dataframe to `clean_data/result.csv` and to `clean_data/result.xlsx` because I like excel # + ############################################ # COMBINING ALL DATA INTO SINGLE DATAFRAME # ############################################ # Creating DataFrame with Columns: Year, Month (will be needed for joining later) result = pd.DataFrame(columns=['year', 'month']) # Initialize year and month range result['year'] = new_lg_desktop_df['year'] result['month'] = new_lg_desktop_df['month'] # Merging result table with Pagecount mobile views and renaming column afterwards result = pd.merge(result, new_lg_mobile_df[['year','month','count']], on=['year','month'], how='outer') result = result.rename(columns = {'count': 'pagecount_mobile_views'}) # Merging result table wirth pagecount desktop views, renaming and rearrangeing result = pd.merge(result, new_lg_desktop_df[['year','month','count']], on=['year','month'], how='outer') result = result.rename(columns = {'count': 'pagecount_desktop_views'}) result = result.reindex(columns=['year','month','pagecount_desktop_views', 'pagecount_mobile_views']) # Adding pagecount desktop + mobile sum_pagecount = result["pagecount_desktop_views"] + result["pagecount_mobile_views"] result["pagecount_all_views"] = sum_pagecount result = result.reindex(columns=['year','month','pagecount_all_views','pagecount_desktop_views', 'pagecount_mobile_views']) # Adding column for pageview_all_views result['pageview_all_views']="" # Adding pageview_desktop_views result = pd.merge(result, new_pv_desktop_df[['year', 'month', 'views']], on=['year','month'], how='outer') result = result.rename(columns = {'views': 'pageview_desktop_views'}) # Adding pageview_mobile_views result = pd.merge(result, new_pv_totalmobile_df[['year','month','total_mobile']], on=['year','month'],how='outer') result = result.rename(columns={'total_mobile':'pageview_mobile_views'}) # Summing pageview desktop+mobile sum_pageview = result['pageview_desktop_views']+result['pageview_mobile_views'] result['pageview_all_views'] = sum_pageview final_result = result # Making directory for csv file mkdir_p('clean data') # Exporting dataframe to Excel final_result.to_excel('clean data/result.xlsx', index=False) # Exporting dataframe to csv final_result.to_csv('clean data/result.csv', index=False) #final_result final_result # - # ## Step 3: Analysis # # For this assignment, the "analysis" will be fairly straightforward: you will visualize the dataset you have created as a **time series graph**. Your visualization will track three traffic metrics: mobile traffic, desktop traffic, and all traffic (mobile + desktop). In order to complete the analysis correctly and receive full credit, your graph will need to be the right scale to view the data; all units, axes, and values should be clearly labeled; and the graph should possess a legend and a title. You must also generate a .png or .jpeg formatted image of your final graph. # Please graph the data in your notebook, rather than using an external application! # ### Analyzing the given data # # Ok, so i figured out, that if you have data from pageview_mobile and data from pagecount_mobile it doesnt make sense to sum those up, since they are basically counting the same access type thats why i decided to calculate the average of the two provided data sources and plot it. And thats exactly whats happening here: # + # Extract all needed data: mobiletraffic, desktoptraffic, alltrafic step3 = pd.DataFrame(columns=['year', 'month']) step3[['year','month']] = final_result[['year','month']] # Calculating the mean for months that provide pageview and pagecount data (desktop): column_desktop = final_result.loc[: , ["pagecount_desktop_views","pageview_desktop_views"]] step3['desktop traffic'] = column_desktop.mean(axis=1) #step3 # Calculating the mean for months that provide pageviews and pagecount data (mobile): column_mobile = final_result.loc[:,["pagecount_mobile_views", "pageview_mobile_views"]] step3['mobile traffic'] = column_mobile.mean(axis=1) # Adding mobile mean + desktop mean traffic step3['all traffic'] = step3.fillna(0)['desktop traffic'] + step3.fillna(0)['mobile traffic'] # Displaying all rows pd.set_option('display.max_rows', step3.shape[0]+1) # As a result I get a Dataframe with the following columns: step3 # <-- Year | Month | desktop traffic | mobile traffic | all traffic # Exporting result to csv mkdir_p('result') step3.to_csv('result/result data.csv', index=False) # - # ## Plotting data # + import matplotlib.pyplot as plt import matplotlib.ticker as ticker df = pd.read_csv("result/result data.csv") ax = step3.plot(figsize=(20,10), x='year', colormap='tab20c', title='Pageviews on english Wikipedia') ax.set_xlabel("Year", size=20) ax.set_ylabel("Views [in Billions]", size=20) #################################### ### Some beauty work from now on ### #################################### # Scaling y_axis to billions = 10^9 # Source: https://stackoverflow.com/questions/10171618/changing-plot-scale-by-a-factor-in-matplotlib scale_y = 1e9 ticks_y = ticker.FuncFormatter(lambda y, pos: '{0:g}'.format(y/scale_y)) ax.yaxis.set_major_formatter(ticks_y) # Limiting y_axis with y_min = 0 and y_max = 12.000.000.000 = 12e9] # Source: https://stackoverflow.com/questions/3777861/setting-y-axis-limit-in-matplotlib ax.set_ylim([0,12e9]) # Changing position/size of legend because why tf did it cover my graph # Source: https://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot ax.legend(loc=2, prop={'size': 17}) # Making title bigger ax.set_title(label='Pageviews on english Wikipedia', fontsize=20) ax = ax.plot() # Creating directory for img file (if not existent) mkdir_p('img') # Saving figure to png plt.savefig('result/result graph.png', dpi=300) # - # *** # # #### Credits # # This exercise is slighty adapted from the course [Human Centered Data Science (Fall 2019)](https://wiki.communitydata.science/Human_Centered_Data_Science_(Fall_2019)) of [Univeristy of Washington](https://www.washington.edu/datasciencemasters/) by [](https://wiki.communitydata.science/User:Jtmorgan). # # Same as the original inventors, we release the notebooks under the [Creative Commons Attribution license (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## 10.2 Code-Scheduling Constraints # ### 10.2.1 # # > The assignments in Fig. 10.5 have certain dependences. For each of the following pairs of statements, classify the dependence. # # > ``` # 1) a = b # 2) c = d # 3) b = c # 4) d = a # 5) c = d # 6) a = b # ``` # # > a) Statements (1) and (4). # True dependence. # > b) Statements (3) and (5). # Antidependence. # > c) Statements (1) and (6). # Output dependence. # > d) Statements (3) and (6). # True dependence. # > e) Statements (4) and (6). # Antidependence. # ### 10.2.2 # # > Evaluate the expression `((u+v)+(w+x))+(y+z)` exactly as parenthesized. Give register-level machine code to provide the maximum possible parallelism. # | `r1 = u` | `r2 = v` | `r3 = w` | `r4 = x` | `r5 = y` | `r6 = z` | # |:-|:-|:-|:-|:-|:-| # | `r7 = r1+r2` | `r8 = r3+r4` | `r9 = r5+r6` | | | | # | `r10 = r7+r8` | | | | | | # | `r11 = r10+r9` | | | | | | # ### 10.2.3 # # > Repeat Exercise 10.2.2 for the following expressions: # # > a) `(u + (v + (w + x))) + (y + z)`. # | `r1 = u` | `r2 = v` | `r3 = w` | `r4 = x` | `r5 = y` | `r6 = z` | # |:-|:-|:-|:-|:-|:-| # | `r7 = r3+r4` | `r8 = r5+r6` | | | | | # | `r9 = r2+r7` | | | | | | # | `r10 = r1+r9` | | | | | | # | `r11 = r10+r8` | | | | | | # > b) `(u + (v + w)) + (x + (y + z))`. # | `r1 = u` | `r2 = v` | `r3 = w` | `r4 = x` | `r5 = y` | `r6 = z` | # |:-|:-|:-|:-|:-|:-| # | `r7 = r2+r3` | `r8 = r5+r6` | | | | | # | `r9 = r1+r7` | `r10 = r4+r8` | | | | | # | `r11 = r9+r10` | | | | | | # # Minimum: 3 registers, 8 saved. # ### 10.2.4 # # > The expression of Exercise 10.2.2 can be executed by the sequence of instructions shown in Fig. 10.6. If we have as much parallelism as we need, how many steps are needed to execute the instructions? # | `r1 = u` | `r2 = v` | `r3 = x` | # |:-|:-|:-| # | `r1 = r1+r2` | | | # | `r2 = w` | | | # | `r2 = r2+r3` | | | # | `r1 = r1+r2` | `r3 = z` | | # | `r2 = y` | | | # | `r2 = r2+r3` | | | # | `r1 = r1+r2` | | | # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (teaching) # language: python # name: teaching # --- # + [markdown] slideshow={"slide_type": "slide"} # # Fundamentals CS block 3 Programming project # # ## Agenda # ### 1) Administrative stuff: schedule, exam # ### 2) Lecture 1 Editors # ### 3) Lecture 2 Virtual environments # ### 4) Choose projects and get started # + [markdown] slideshow={"slide_type": "slide"} # # Administration # ## Schedule # ### This week # Thursday 3 - 0630pm # # Friday 8 - 10am or 5 - 7pm # # Saturday off / work from home. # # ### Next week # Tue: aroudn 9:30 # # Thursday 2 - 6pm # # Friday 2 - 6pm # # # + [markdown] slideshow={"slide_type": "slide"} # # Choosing an editor # + [markdown] slideshow={"slide_type": "slide"} # # Pycharm # https://www.jetbrains.com/pycharm/ # # **Pro**: A full IDE, good for larger projects, all in one place: scripts, terminal, larger code files, version control, debugging, testing, plug-ins. # # **Contra**: heavy, sometimes slow, a lot to configure, costs some time to set up # + [markdown] slideshow={"slide_type": "slide"} # # Pycharm Demo # + [markdown] slideshow={"slide_type": "slide"} # # Atom # https://atom.io/ # # **Pro**: more lightweight, can be just a text editor. But can be configured into a full IDE with git and github integration, debugging, etc. # # **Contra**: Seems to be very neat, but needs time to be set up. # # There is an Atom IDE, that is, atom configured to be like an IDE: https://ide.atom.io/ # # There is teletype: https://teletype.atom.io/ # # Works with packages. you download and install atom and then install packages to add features to atom. # + [markdown] slideshow={"slide_type": "slide"} # # Emacs # https://www.gnu.org/software/emacs/ # # **Pro**: very lightweight basic text editor that you can configure like hell to be productive like hell. # # **Contra**: Less user friendly than atom or pycharm, the geek solution. # + [markdown] slideshow={"slide_type": "slide"} # # VI # The very basic text editor on unix systems. Sometimes it opens as a default, e.g. when for entering a git commit message. # # open `vi filename` # # exit `:q` or `:q!` when you want to overwrite changes. short tutorial: https://www.guru99.com/the-vi-editor.html#5 # + [markdown] slideshow={"slide_type": "slide"} # # Jupyter lab # https://blog.jupyter.org/jupyterlab-is-ready-for-users-5a6f039b8906 # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0q7bjLTa2ksR" # & # # COSC 4P80 - Seminar Demo # # March 29, 2021 # + id="z51tQN3V-5ZQ" colab={"base_uri": "https://localhost:8080/"} outputId="22068881-39b4-4744-a547-fccdc4109bda" # Music output # !sudo apt-get install fluidsynth # !pip install midi2audio # !pip install mingus from mingus.containers import Note, NoteContainer, Track from mingus.midi.midi_file_out import write_NoteContainer, write_Track from midi2audio import FluidSynth fsy = FluidSynth() # imports for data manipulation import numpy as np import pandas as pd from sklearn.model_selection import train_test_split # imports for machine learning import keras from keras.models import Sequential from keras.layers import Dense, LSTM # + id="wR_1giDiDHN4" # read in the notes, make an array with 0's, except for the current note def read_and_format(input_filepath): input_data = [] with open(input_filepath) as input_file: for line in input_file: values = line.split(",") for value in values: tmp = [0.0] * 88 v = int(value) tmp[v-1] = 1.0 input_data.append(tmp) return input_data input_data = read_and_format("k330-allegro-moderato.csv") # + id="ZZmz0vCDrdfe" colab={"base_uri": "https://localhost:8080/"} outputId="5537646d-4eac-43a2-916e-f34f08778d33" # get the previous 20 notes, predict the next note def generate_datasets(input_array, n_prev = 20): temp_x = [input_array[i:i+n_prev] for i in range(len(input_array) - n_prev)] temp_y = [input_array[i+n_prev] for i in range(len(input_array) - n_prev)] return np.array(temp_x), np.array(temp_y) x, y = generate_datasets(input_data) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, shuffle=False) print(x_train.shape, y_train.shape) print(x_test.shape, y_test.shape) print(y_train[0]) # + id="Be0VQFAlqlPA" # build the model itself model = Sequential() model.add(LSTM(30)) model.add(Dense(88, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="odZ6VCKIsLJ8" colab={"base_uri": "https://localhost:8080/"} outputId="8bd85a38-3c02-4f7c-bfeb-4bc50ec0d82c" # train the model model.fit(x_train, y_train, batch_size=10, epochs=100, validation_split=0.05) # + id="ucPR1Hp5vIUT" colab={"base_uri": "https://localhost:8080/"} outputId="3387e71a-a564-4e31-fb17-0ce5aad749f6" # test the model model.evaluate(x_test, y_test) # + id="kPvGpGVwNIoD" colab={"base_uri": "https://localhost:8080/"} outputId="a3ce2bca-0629-47db-e4ab-99f89413b3cf" # See incorrectly predicted predictions = model.predict(x_test) incorrect_indices = [] for (index, (prediction, target)) in enumerate(zip(predictions, y_test)): pred = np.argmax(prediction) tar = np.argmax(target) if pred != tar: incorrect_indices.append(index) print(", ".join(map(str, incorrect_indices))) # + id="X2WwsRUFm5u0" colab={"base_uri": "https://localhost:8080/"} outputId="e862edbd-0cd8-4a3f-bee7-35ca9ad1c6e1" # Predict song test_in = x_test[0] test_out = y_test[0] # initial - provide inital 20 notes # n - how many predicted notes to add (i.e. expand by this number) def make_big_song(initial, n): res =[ x for x in initial] for _ in range(n): next = model.predict(np.array([res[-20:],]))[0] res.append(next) return np.array(res) test = make_big_song(test_in, 60) print(test.shape) # + id="u7AKlNEV4THd" colab={"base_uri": "https://localhost:8080/"} outputId="b93a2709-cecf-4ca0-a316-f39deebce80e" # Expects n x 88 def vector_to_midi(arr, filename="nice.midi"): track = Track() for note_arr in arr: note_num = int(np.argmax(note_arr)) note = Note() note.from_int(note_num - 3) track.add_notes(note) write_Track(filename, track) print("Done!") vector_to_midi(test) # + colab={"base_uri": "https://localhost:8080/"} id="aXdjXdBXQ1Hs" outputId="8139d50e-4a23-4c01-b0e1-407d3fe2e3a4" def predict_to_file(first_20_notes, expected, filename="nice"): next = model.predict(np.array([first_20_notes])) actual_next = np.array([expected]) next_file = filename + "_predicted_note" actual_next_file = filename + "_actual_note" orig_file = filename + "_first_20_notes" vector_to_midi(next, next_file + ".midi") vector_to_midi(actual_next, actual_next_file + ".midi") vector_to_midi(first_20_notes, orig_file + ".midi") # This conversion not seem to work # fsy.midi_to_audio(next_file + ".midi", next_file + ".mp3") # fsy.midi_to_audio(actual_next_file + ".midi", actual_next_file + ".mp3") # fsy.midi_to_audio(orig_file + ".midi", orig_file + ".mp3") predict_to_file(test_in, test_out) inci = incorrect_indices[0] predict_to_file(x_test[inci], y_test[inci], 'first_incorrect') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Logo_unad](https://upload.wikimedia.org/wikipedia/commons/5/5f/Logo_unad.png) # # #

Escuela de Ciencias Básicas, Tecnología e Ingeniería

#
# # #

ECBTI

#
# # #

Curso:

#
# # #

Introducción al lenguaje de programación Python

#
# # #

Febrero de 2020

#
#

Sesión 09 - Ordenamiento y Búsqueda

# ### Docente: # # *, I.C. Ph.D.* # # Ordenamiento # - **Ordenación o Clasificación de datos ($sort$, en inglés):** Operación consistente en disponer un conjunto —estructura— de datos en algún determinado orden con respecto a uno de los campos de elementos del conjunto. # # - **Ejemplo:** cada elemento del conjunto de datos de una guía telefónica tiene un campo nombre, un campo dirección y un campo número de teléfono; la guía telefónica está dispuesta en orden alfabético de nombres; los elementos numéricos se pueden ordenar en orden creciente o decreciente de acuerdo al valor numérico del elemento. # # # - En terminología de ordenación, el elemento por el cual está ordenado un conjunto de datos (o se está buscando) se denomina clave. # - Una colección de datos (Estructura) puede ser almacenada en un archivo, un arreglo (lista, tupla, diccionario) u otra estructura especial. # # # - Un arreglo se dice que está ordenado por la clave $k$ si está en orden ascendente o descendente con respecto a la clave. # # - El arreglo se dice que está en orden ascendente si: # $𝑖<𝑗$, esto implica que $𝑘_𝑖 \leq 𝑘_j$ # # - y se dice que está en orden descendente si: # $𝑖>𝑗$, esto implica que $𝑘_𝑖 \geq 𝑘_j$ # # # ## Algoritmos de Ordenamiento # Existen muchos algoritmos de ordenamiento, cuál es el “mejor”? # # **Eficiencia:** factor que mide la calidad y rendimiento de un algoritmo. El más eficiente es el que: # - Resuelve en menor tiempo de ejecución en computador; # - Menor número de instrucciones. # # # - A veces no se cuenta con un mecanismo que permita cuantificar la $eficiencia$. # # # - El mejor criterio para medirla es aislar una operación específica clave en la ordenación. # # # - Se empleará como medida de $eficiencia$ el número de comparaciones efectuada entre elementos. # # - $A$ será más eficiente que $B$ si se requiere un menor número de comparaciones. # - En el caso de ordenar elementos de un arreglo vectorial, el número de comparaciones será función del número de elementos, $n$, del arreglo: $n+4$, $n^2$, etc. # # - Los métodos de ordenación se suelen dividir en dos grandes grupos: # # - **Directos:** `Burbuja, Selección, Inserción`. # # - **Indirectos:** `Shell, Ordenación Rápida, Ordenación por Mezcla, Radixsort`. # ## Ordenamiento por intercambio # - Basado en la lectura sucesiva de la lista a ordenar, comparando el elemento inferior de la lista con los restantes y efectuando el intercambio de posiciones cuando el orden de la comparación no sea el correcto. # # **Ejemplo:** Ordenar el arreglo $[8, 4, 6, 2]$ # ![Ordena_Intercambio0.PNG](attachment:Ordena_Intercambio0.PNG) # - El elemento de índice $0$ se compara con cada elemento posterior del arreglo de índices $1$, $2$ y $3$. # # # - En cada comparación se comprueba si el elemento siguiente es menor que el del índice $0$, en este caso se intercambian. # # # - Después de terminar todas las comparaciones, el elemento menor se localiza en el índice $0$. # # ![Ordena_Intercambio1.PNG](attachment:Ordena_Intercambio1.PNG) # - El algoritmo continua comparando el elemento de índice $1$ con los elementos posteriores de índices $2$ y $3$. En cada comparación, si el elemento mayor está en el índice $1$ se intercambian los elementos. # # # - Después de hacer todas las comparaciones, el segundo elemento de menor valor del arreglo se almacena en el índice $1$. # # ![Ordena_Intercambio2.PNG](attachment:Ordena_Intercambio2.PNG) # La sublista a considerar es $8$, $6$. Una única comparación produce el arreglo ordenado. # - El método utiliza dos bucles anidados. # # # - Suponiendo que el arreglo es de tamaño $n$, el rango del ciclo externo irá desde el índice $0$ hasta $n-2$. # # # - Por cada índice $i$, se comparan los elementos posteriores de índices $j=i+1, i+2, \ldots, n-1$. # El intercambio ($swap$) de los elementos $a_i$, $a_j$ se realiza en un bloque: # # $$aux = a_i$$ # $$a_i = a_j$$ # $$a_j = aux$$ # # ### - Ordenamiento de Burbuja: # ![Ordena_Burbuja.PNG](attachment:Ordena_Burbuja.PNG) # + # Ordenamiento burbuja miLista =[8,4,6,2] print("lista desordenada: ", miLista) n = len(miLista) for i in range(1,n): for j in range(0,n-i): if miLista[j] > miLista[j+1]: aux = miLista[j] miLista[j] = miLista[j+1] miLista[j+1] = aux print("lista ordenada: ", miLista) # - # ## Ordenamiento por Selección # Los pasos del algoritmo son: # # 1. Seleccionar el elemento de menor valor del arreglo, intercambiarlo con el primer elemento, $a_0$. Con esto, la entrada de menor valor está en la primera posición del arreglo. # # 2. Considerar las posiciones del arreglo $a_1, a_2, \ldots$, seleccionar el elemento de menor valor e intercambiarlo con $a_1$. Las dos primeras entradas están en orden. # # 3. Continuar este proceso encontrando o seleccionando el elemento de menor valor de los restantes elementos del arreglo, intercambiándolos adecuadamente. # # **Ejemplo:** Ordenar el arreglo $[51, 21, 39, 80, 36]$ # # ![Ordena_Seleccion3.PNG](attachment:Ordena_Seleccion3.PNG) # ![Ordena_Seleccion_Seudocodigo.PNG](attachment:Ordena_Seleccion_Seudocodigo.PNG) # + # Ordenamiento Selección miLista = [51,21,39,80,36] print("lista desordenada: ", miLista) n = len(miLista) for i in range(0,n-1): minimo = i for j in range(i+1, n): if miLista[j] < miLista[minimo]: minimo = j aux = miLista[i] miLista[i] = miLista[minimo] miLista[minimo] = aux print("lista ordenada: ", miLista) # - # ## Ordenamiento por Insersión # Los pasos del algoritmo son: # # 1. El primer elemento, $a_0$ se considera ordenado; es decir el arreglo inicial consta de un elemento. # # 2. Se inserta $a_1$ en la posición correcta, delante o detrás de $a_0$, dependiendo de que sea mayor o menor. # # 3. Por cada ciclo o iteración $i$ (𝑑𝑒𝑠𝑑𝑒 $i = 1$ hasta $n-1$) se explora la sublista $a_{i-1} \ldots, a_{0}$ buscando la posición correcta de inserción; a la vez se mueve hacia la derecha en la sublista una posición todos los elementos mayores que el elemento a insertar $a_i$, para dejar vacía esa posición. # # 4. Insertar el elemento en la posición correcta. # **Ejemplo:** Ordenar el arreglo $[50, 20, 40, 80, 30]$ # # ![Ordena_Insersion.PNG](attachment:Ordena_Insersion.PNG) # ![Ordena_Insersion_Seudocodigo.PNG](attachment:Ordena_Insersion_Seudocodigo.PNG) # + # Algoritmo de insersion miLista = [50,20,40,80,30] print("LIsta desordenada: ", miLista) n = len(miLista) for i in range(1,n): actual = miLista[i] j = i while j > 0 and miLista[j-1] > actual: miLista[j] = miLista[j-1] j = j - 1 miLista[j] = actual print("LIsta ordenada: ", miLista) # - # ## Ordenamiento Shell # **Inserción directa:** Cada elemento se compara con los elementos contiguos a su izquierda, uno tras otro. # # - Si el elemento a insertar es el menor hay qué realizar muchas comparaciones antes de colocar en su lugar definitivo. # # **Shell:** modifica los saltos contiguos resultantes de las comparaciones por saltos de mayor tamaño. # # - Generalmente se toma como salto inicial $n/2$ ($n$, número de elementos), luego se reduce el salto a la mitad en cada repetición hasta que le salto es de tamaño $1$. # # **Ejemplo:** ordenar el arreglo $[6, 1, 5, 2, 3, 4, 0]$ # - $n = 7$, salto inicial $n/2 \approx 3$ # ![Ordena_Shell.PNG](attachment:Ordena_Shell.PNG) # ![Ordena_Shell_Seudocodigo.PNG](attachment:Ordena_Shell_Seudocodigo.PNG) # + #Ordenamiento Shell miLista = [6,1,5,2,3,4,0] print("LIsta desordenada: ", miLista) n = len(miLista) intervalo = n//2 while intervalo > 0 : for i in range(intervalo, n): j = i aux = miLista[i] while j >= intervalo and miLista[j - intervalo] > aux: miLista[j] = miLista[j - intervalo] j = j - intervalo miLista[j] = aux if intervalo == 2: intervalo = 1 else: intervalo = intervalo // 2 print("LIsta sordenada: ", miLista) # - # # # # Búsqueda # Cuando se trabaja con grandes cantidades de datos es necesario determinar si una estructura contiene un valor que coincida con un cierto valor clave. El proceso de encontrar un elemento específico se denomina búsqueda. # # Se analizarán dos técnicas de búsqueda: # # - Lineal o secuencial: más sencilla, # # - Binaria o dicotómica: más eficiente. # # **Ejemplo:** encontrar un nombre en una guía telefónica para consultar su teléfono. # # ## Búsqueda Secuencial # # - El algoritmo de búsqueda secuencial compara cada elemento del conjunto de datos con la clave de búsqueda. # # # - Dado que el conjunto no está en un orden particular, es probable que el elemento a buscar sea el primer elemento, el último o cualquier otro. # # # - En promedio, el algoritmo tendrá que comparar la clave de búsqueda con al menos la mitad de los elementos. # # # - Este método funciona bien con conjuntos pequeños o no ordenados. La eficiencia de la búsqueda secuencial o lineal es pobre. # **Ejemplo:** Buscar el elemento `u`: # # ![Busqueda01.PNG](attachment:Busqueda01.PNG) # ### Pseudocódigo: # ![Busqueda03.PNG](attachment:Busqueda03.PNG) #lista = ['c','o','m','p','u','t','a','c','i','o','n'] clave = input("Ingrese el elemento a encontrar") lista = "computacion" n = len(lista) for i in range(n): if lista[i] == clave: print("la clave está en la posición: ", i) # ![Busqueda04.PNG](attachment:Busqueda04.PNG) mayor = lista[0] for i in range(1,n): if lista[i] > mayor: mayor = lista[i] j = i print("el mayor es: ", mayor, " y se encuentra en la posición ", j) # ![Busqueda05.PNG](attachment:Busqueda05.PNG) mayor = lista[0] for i in range(1,n): if lista[i] < mayor: mayor = lista[i] print("el mayor es: ", mayor) # ### Eficiencia y Complejidad # Considerando la cantidad de comparaciones # # **- Mejor caso: 1.** El elemento buscado está en la primera posición, es decir, se hace una sola comparación. # # **- Peor caso: n.** El elemento buscado está en la última posición, necesitando igual número de comparaciones que la cantidad total de elementos en el arreglo. # # **- Caso promedio: n/2.** El elemento buscado estará cerca de la mitad, necesitando, en promedio, la mitad de comparaciones. # # La velocidad de ejecución depende linealmente del tamaño del arreglo. # # ## Búsqueda Binaria # # - La búsqueda secuencial se aplica a cualquier lista. # # # - Si la lista está ordenada, la búsqueda binaria proporciona una técnica de búsqueda mejorada. # # # - Una búsqueda binaria típica es la búsqueda de una palabra en un diccionario. # # # - Una idea similar se aplica en la búsqueda en una lista ordenada. # # - Se sitúa la lectura en el centro de la lista y se comprueba si la clave coincide con el valor del elemento central. Si no se encuentra el valor de la clave, se sigue la búsqueda uno en la mitad inferior o superior del elemento central de la lista. # # - En general, si los datos de la lista están ordenados se puede utilizar esa información para acortar el tiempo de búsqueda. # # **Algoritmo:** Suponiendo la lista almacenada en un arreglo, los índices de la lista son: `bajo=0` y `alto=n-1`, y `n` es el número de elementos en el arreglo. # # 1. Calcular el índice del punto central del arreglo: `central = mod((bajo+alto)/2)` (división entera) # # 2. Comparar el valor de este elemento central con la clave: # # ![Binaria01.PNG](attachment:Binaria01.PNG) # - Si `a[central]clave`, la nueva sublista de búsqueda tiene por valores extremos de su rango `bajo … central-1 = alto`. # ![Binaria02.PNG](attachment:Binaria02.PNG) # El algoritmo se termina bien o porque se ha encontrado la clave o porque el valor `bajo` excede a `alto` y el algoritmo devuelve el indicador de fallo `-1` (búsqueda no encontrada). # **Ejemplo:** Buscar la clave `40` en el siguiente arreglo: # ![Binaria03.PNG](attachment:Binaria03.PNG) # ![Binaria04.PNG](attachment:Binaria04.PNG) # Buscar en la sublista derecha de Central: # ![Binaria05.PNG](attachment:Binaria05.PNG) # Buscar en la sublista derecha de Central: # ![Binaria06.PNG](attachment:Binaria06.PNG) # El algoritmo ha requerido 3 comparaciones frente a 8 comparaciones que se hubieran requerido con la búsqueda secuencial. # ### Algoritmo # ![Binaria07.PNG](attachment:Binaria07.PNG) # + code_folding=[] lista = [-8, 1, 3, 6, 9, 15, 23, 87, 109] dato = 87 inf = 0 sup = len(lista) - 1 while inf <= sup: centro = (sup - inf) // 2 + inf if lista[centro] == dato: print('el dato está en la posición ', centro) break else: if dato < lista[centro]: sup = centro - 1 else: inf = centro + 1 else: print('El elemento no se encuentra en la lista') # - # ### Eficiencia y Complejidad # # Considerando la cantidad de comparaciones # # **- Mejor caso: 1.** El elemento buscado está en el centro, por lo tanto se hace una sola comparación. # # # **- Peor caso: log(n).** El elemento buscado está en una esquina. # # # **- Caso promedio: log(n/2).** # # # La velocidad de ejecución depende logarítmicamente del tamaño del arreglo. # # ### Comparación: Búsqueda Binaria y Secuencial # ![ComparacionBinariaSecuencial.png](attachment:ComparacionBinariaSecuencial.png) # ### Una breve, brevísima introducción a la generación de números aleatorios... import random as rnd # Todas las funciones del módulo dependen de la función básica *random()*, que genera **UN** valor aleatorioa de punto flotante que cumple con una distribución uniforme en el intervamo semi-abierto $[0.0,1.0)$ a = rnd.random() print(a) # Si lo que se quiere es generar **un** valor entero aleatorio en un rango especificado: b = rnd.randint(165,324) print(b) a = rnd.randrange(0,100, 2) print(a) # Si lo que se quiere es generar una lista de valores aleatorios... # + listarnd = [] b = rnd.randint(0,100) print(b) for i in range(b): c = rnd.randint(0,10) listarnd.append(c) print(listarnd) # - # Y si se quiere seleccionar un valor aleatoriamente de la lista... print(rnd.choice(listarnd)) # Otra forma alternativa (y más eficiente) de generar una lista de valores usando *list comprenhension*... a = rnd.randint(0,100) b = rnd.randint(0,100) print("rango aleatorio de los números aleatorios: ",a) print("cantidad aleatoria de números aleatorioas a ser creados ",b) z = [rnd.randint(0,a) for i in range(b)] print(z) # Como estas, existen muchas otras funciones para generar números aleatorios. Los invito a que las exploren. # + def busqueda2(a,x): print(a) n = len(a) for i in range(n): if a[i] == x: print("el elemento está en la posición:", i) #break lista = "paralelepipedo" busqueda2(lista,"e") # + def BusquedaMayor(a): n = len(a) mayor = "a" for i in range(n): if a[i] > mayor: mayor = a[i] print("La letra de mayor valor es: ", mayor) lista = "paralelepipedo" BusquedaMayor(lista) # + def BusquedaMenor(a): n = len(a) menor = "z" for i in range(n): if a[i] < menor: menor = a[i] print("La letra de mayor valor es: ", menor) lista = "paralelepipedo" BusquedaMenor(lista) # + numero = int(input("Ingrese el número: ")) x = 2 resto = numero % x while (resto != 0) and (x < numero): x = x + 1 resto = numero % x if x == numero: print("El número {0} es primo".format(numero)) else: print("El número {0} no es primo".format(numero)) # - # # copiado de listas # + import timeit COUNT = 50000000 print("Array duplicating. Tests run", COUNT, "times") setup = 'a = [0,1,2,3,4,5,6,7,8,9]; import copy' print("b = list(a)\t\t", timeit.timeit(stmt='b = list(a)', setup=setup, number=COUNT)) print("b = copy.copy(a)\t", timeit.timeit(stmt='b = copy.copy(a)', setup=setup, number=COUNT)) print("b = a.copy()\t\t", timeit.timeit(stmt='b = a.copy()', setup=setup, number=COUNT)) print("b = a[:]\t\t", timeit.timeit(stmt='b = a[:]', setup=setup, number=COUNT)) print("b = a[0:len(a)]\t\t", timeit.timeit(stmt='b = a[0:len(a)]', setup=setup, number=COUNT)) print("*b, = a\t\t\t", timeit.timeit(stmt='*b, = a', setup=setup, number=COUNT)) print("b = []; b.extend(a)\t", timeit.timeit(stmt='b = []; b.extend(a)', setup=setup, number=COUNT)) print("b = []; for item in a: b.append(item)\t", timeit.timeit(stmt='b = []\nfor item in a: b.append(item)', setup=setup, number=COUNT)) print("b = [i for i in a]\t", timeit.timeit(stmt='b = [i for i in a]', setup=setup, number=COUNT)) print("b = [*a]\t\t", timeit.timeit(stmt='b = [*a]', setup=setup, number=COUNT)) print("b = a * 1\t\t", timeit.timeit(stmt='b = a * 1', setup=setup, number=COUNT)) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('/home/jovyan/rocketry/open_rocketry/library') from nosecone_library.specific_noses import BNC50J from bodytubes.semroc import bt20, bt50, bt60 from bodytubes.modelrockets_us import _29mm from nosecone.nosecone_bases import HollowBaseWithScrewHole from nosecone.transition import Transition from viewscad_connector.renderer import Renderer vr = Renderer() t = Transition(1.23975, bodytube1=_29mm, bodytube2=bt50, shoulder=.75, thickness=1 / 16., bodytube3=bt20, open_end=True, fudge=0.02) vr.render(t.transition, outfile="bt50t.stl") t = Transition(1.67725, bodytube1=bt60, bodytube2=_29mm, shoulder=.75, thickness=1 / 16., bodytube3=bt50, open_end=True) vr.render(t.transition, outfile="29mmt.stl") # + nc = BNC50J(thickness=1. / 16, scale_bodytube=_29mm) nc = HollowBaseWithScrewHole(nc, shoulder=0.75, screw_diameter=1 / 16., screw_length=0.25) vr.render(nc.cone, outfile="bt50n.stl") # - nc = BNC50J(thickness=1. / 16, scale_bodytube=bt60) nc = HollowBaseWithScrewHole(nc, shoulder=0.75, screw_diameter=1 / 16., screw_length=0.25) vr.render(nc.cone, outfile="29mmn.stl") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="nmOFggjpY9ac" # # The purpose of this *.ipynb* is to create *.csv*-files containing filenames and corresponding labels (deducted from the directory structure) in a specified split of a dataset contained in a specified folder to be used as datasets for training and validating in machine learning algorithms. # Depending on your implementation of a dataset for your ML algorithm you might be required to move the actual files into a specific directory structure. This notebook does not move the files in any way, it only creates *.csv*-files and directories, if specified. # + id="bzyKmmeswG2H" #@title import necessary modules { form-width: "15%", display-mode: "form" } import torch from torch.utils.data import Dataset, IterableDataset from torchvision import transforms, datasets import random import numpy as np from typing import List, TypeVar, Iterable import os import csv import bisect import functools random.seed(222) np.random.seed(222) # + colab={"base_uri": "https://localhost:8080/"} id="lH55r_3RxCNG" outputId="351751f4-a7f3-4a48-cdf7-b8bb6ba856a2" #@title Mount Google Drive { form-width: "15%", display-mode: "form" } #@markdown This block requires you to go through the login process of your Google Account to access Google Drive where your dataset should be stored. from google.colab import drive base_path = '/content/data' drive.mount(base_path, force_remount=True) wd = os.path.join(base_path, "MyDrive") # + id="q4Wg7-cdvf_w" #@title CombinedDataset Class { form-width: "15%", display-mode: "form" } T_co = TypeVar('T_co', covariant=True) class CombinedDataset(Dataset[T_co]): # modified version of torch.utils.data.dataset.ConcatDataset r"""Dataset as a combination of multiple datasets. This class is a modified version of torch.utils.data.dataset.ConcatDataset, which is useful to assemble different existing datasets. Args: datasets (sequence): List of datasets to be concatenated """ datasets: List[Dataset[T_co]] cumulative_sizes: List[int] @staticmethod def cumsum(sequence): r, s = [], 0 for e in sequence: l = len(e) r.append(l + s) s += l return r @staticmethod def labels_classes(sequence): label = 0 classes = [] for ds in sequence: label += len(ds.classes) classes.append(ds.classes) return list(range(label)), classes @staticmethod def mapped_labels(labels, classes): map = {} cidx = 0 max_label_idx = len(labels) for idx in range(len(classes)): for idc in range(0,len(classes[idx])): if cidx >= max_label_idx: return map map.update({labels[cidx] : (classes[idx][idc], idc)}) cidx += 1 return map @staticmethod def mapped_items_per_label(datasets, map): items_per_label = {} max_idx = len(map.keys()) idx = 0 offset = 0 offset2 = 0 for ds in datasets: labels = [instance[1] for instance in ds.imgs] for i in range(idx, idx+len(ds.classes)): items_per_label.update({i : [j+offset2 for j in range(len(labels)) if labels[j] == map.get(i)[1]]}) offset = len(ds.classes) offset2 += len(labels) idx += offset #offset2 -= 1 return items_per_label @staticmethod def modified_classes(datasets, grouped_classes): mod_classes = [] for d in range(len(grouped_classes)): p = os.path.basename(datasets[d].root) for c in grouped_classes[d]: mod_classes.append(p+" / "+c) return mod_classes def __init__(self, datasets: Iterable[Dataset]) -> None: super(CombinedDataset, self).__init__() self.datasets = list(datasets) assert len(self.datasets) > 0, 'datasets should not be an empty iterable' # type: ignore[arg-type] for d in self.datasets: assert not isinstance(d, IterableDataset), "CobinedDataset does not support IterableDataset" self.cumulative_sizes = self.cumsum(self.datasets) #self.imgs, self.possible_labels, self.grouped_classes = self.labels_classes(self.datasets) self.possible_labels, self.grouped_classes = self.labels_classes(self.datasets) self.mapped_labels = self.mapped_labels(self.possible_labels, self.grouped_classes) self.items_per_label = self.mapped_items_per_label(self.datasets, self.mapped_labels) self.mod_classes = self.modified_classes(self.datasets, self.grouped_classes) def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): if idx < 0: if -idx > len(self): raise ValueError("absolute value of index should not exceed dataset length") idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return (self.datasets[dataset_idx].imgs[sample_idx][0], self.labels[idx]) # + id="F1-r2FmXvxn0" #@title def create_data_csv{ form-width: "15%", display-mode: "form" } #@markdown The version **create_data_csv()** saves the relevant data in the form of **rel_path/filename, label** def create_data_csv(save_to_path, _filename, _label): with open(save_to_path, 'w', newline='') as csvfile: header_key = ['filename','label'] new_val = csv.DictWriter(csvfile, fieldnames=header_key) new_val.writeheader() for idx in range(len(_filename)): new_val.writerow({'filename': os.path.relpath(_filename[idx], start=drive_path),'label': _label[idx]}) #@markdown The version **create_data_csv_1** saves the relevant data in the form of **filename, label** def create_data_csv_1(save_to_path, _filename, _label): with open(save_to_path, 'w', newline='') as csvfile: header_key = ['filename','label'] new_val = csv.DictWriter(csvfile, fieldnames=header_key) new_val.writeheader() for idx in range(len(_filename)): new_val.writerow({'filename': os.path.basename(_filename[idx]),'label': _label[idx]}) #@markdown By selecting the *keep_rel_path_in_filename checkbox* you switch from using **create_data_csv_1** (default) to using **create_data_csv()** # + id="W_xKsdY9uzWV" #@title Provide a path to the dataset { form-width: "15%", display-mode: "form" } #@markdown If your dataset consists of multiple folders for different classes with subfolders for their respective labels, please choose CombinedDataset. If you have one folder with one class containing subfolders for lables, choose ImageFolder. dataset_type = "ImageFolder" #@param ["ImageFolder", "CombinedDataset"] #@markdown Please provide a path to your dataset, relative to your Google Drive "root" level. rel_data_path = "images" #@param {type:"string"} #@markdown Please select how to split your dataset into training and validation set max_train_data = 10 #@param {type:"integer"} max_val_data = 30 #@param {type:"integer"} keep_rel_path_in_filename = True #@param {type:"boolean"} #@markdown Please select this checkbox if want to create a folder for the *.csv* files under the **parent directory** of *rel_data_path* create_folder = True #@param {type:"boolean"} drive_path = os.path.join(wd, rel_data_path) custom_mode = False if dataset_type == "CombinedDataset": custom_mode = True # + id="lCTD_VX-wBxz" #@title Create datasets { form-width: "15%", display-mode: "form" } d_transform = transforms.Compose([]) if custom_mode: all_folders = [os.path.join(drive_path, f) for f in sorted(os.listdir(drive_path))] #create dataset from all folders in the specified path test_fp = os.path.join(drive_path, "test") train_fp = os.path.join(drive_path, "train") if train_fp in all_folders and test_fp in all_folders: folder_list = [os.path.join(train_fp, f) for f in sorted(os.listdir(train_fp))] folder_list2 = [os.path.join(test_fp, f) for f in sorted(os.listdir(test_fp))] all_folders.remove(train_fp) all_folders.remove(test_fp) all_folders.extend(folder_list) all_folders.extend(folder_list2) dataset_list = [] for folder in all_folders: _ds = datasets.ImageFolder(root=folder, transform=d_transform) dataset_list.append(_ds) ds = CombinedDataset(dataset_list) _filename = [] _label = [] for im in range(len(ds)): _filename.append(ds[im][0]) _label.append(ds.mod_classes[ds[im][1]]) items_per_label = ds.items_per_label else: ds = datasets.ImageFolder(root=drive_path, transform=d_transform) items_per_label = {} max_idx = len(ds.classes) labels = ds.targets for i in range(max_idx): items_per_label.update({i : [j for j in range(len(labels)) if labels[j] == i]}) _filename = [] _label = [] for im in range(len(ds)): _filename.append(ds.imgs[im][0]) _label.append(ds.classes[ds.imgs[im][1]]) lens = [] for l in items_per_label.keys(): lens.append(len(items_per_label[l])) # + id="afKxNh_B0wF2" #@title Split datasets { form-width: "15%", display-mode: "form" } random.seed(222) np.random.seed(222) _val_filename = [] val_label = [] #_test_filename = [] #test_label = [] _train_filename = [] train_label = [] for idx in range(len(lens)): l = lens[idx] k = min(l, max_train_data) m = min(l-k, max_val_data) cur_class = ds.classes[idx] data = items_per_label[idx] np.random.shuffle(data) train_slice = data[:k] #test_slice = data[k:k+m] val_slice = data[k:k+m] _train_filename.extend(train_slice) train_label.extend([cur_class]*k) #_test_filename.extend(test_slice) #test_label.extend([cur_class]*m) _val_filename.extend(val_slice) val_label.extend([cur_class]*m) val_filename = [_filename[i] for i in _val_filename] #test_filename = [_filename[i] for i in _test_filename] train_filename = [_filename[i] for i in _train_filename] # + id="nvCWmoMmP2yj" colab={"base_uri": "https://localhost:8080/"} outputId="85a980e5-b6bd-4abb-94e4-da585ec7e48e" #@title Save *.csv files { form-width: "15%", display-mode: "form" } if create_folder: d = os.path.dirname(drive_path) b = os.path.basename(drive_path) save_to = os.path.join(d, "ProtoNet: "+b+" {}-{}".format(max_train_data, max_val_data)) os.makedirs(save_to, exist_ok=True) else: save_to = drive_path all_sample_path = os.path.join(save_to, "all_samples.csv") val_sample_path = os.path.join(save_to, "val.csv") train_sample_path = os.path.join(save_to, "train.csv") #test_sample_path = os.path.join(save_to, "test.csv") if keep_rel_path_in_filename: create_data_csv(all_sample_path, _filename, _label) create_data_csv(val_sample_path, val_filename, val_label) #create_data_csv(test_sample_path, test_filename, test_label) create_data_csv(train_sample_path, train_filename, train_label) else: create_data_csv_1(all_sample_path, _filename, _label) create_data_csv_1(val_sample_path, val_filename, val_label) #create_data_csv_1(test_sample_path, test_filename, test_label) create_data_csv_1(train_sample_path, train_filename, train_label) print(f"Created 2 CSV files at \"{save_to}\".\n") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Norms, conditions numbers and Eigensystems # # In linear-algebra calculations, we are sometimes very unfortunate and have to solve a problem like $Ax = b$ (given fixed $A$), where small changes in $b$ produce extremely large changes in $x$. Such problems are said to be **ill-conditioned**. # # This notebook explores this phenomenon. Along the way we will have to calculate condition numbers and eigenvalues. # ## Preliminaries # # Let's load numpy as usual, and the linear algebra package from numpy as we will find some functions in it useful. We also use the `GaussianElimination()` from one of the other notebooks and define the $L_2$-norm import numpy as np from numpy import linalg as la def GaussianElimination(A, b): n = A.shape[1] # Append the vector b as a column to the matrix A A1 = np.c_[A,b] i = 0 while i < n - 1: j = i+1 while j < n: A1[j, i+1:] = A1[j, i+1:] - A1[i, i+1:]*A1[j,i]/A1[i,i] j += 1 i += 1 x = np.zeros(n) i = n-1 while i >= 0: j = i x[i] = A1[i,n] while j < n-1: x[i] -= A1[i,j+1]*x[j+1] j += 1 x[i] = x[i]/A1[i,i] i -= 1 return x def MatrixInverseViaGaussianElimination(A): n = A.shape[1] A1 = np.hstack((A,np.identity(n))) i = 0 while i < n: j = 0 while j < n: if(j == i): j += 1 continue A1[j] = (A1[j] - A1[i]*A1[j,i]/A1[i,i]) A1[j] = A1[j]/A1[j,j] j += 1 i += 1 return A1[:,n:2*n] def L2Norm(v): return la.norm(v,2) # # Norms # # The $L^2$-norm of a matrix $A$ is formally defined by # # $$\lVert A \rVert_2 = \sup_{x\neq0}\frac{\lVert Ax \rVert_2}{\lVert x \rVert} $$ # # For practical calculation, this is not a particularly useful definition. We derived a more useful formula: # # $$ \lVert A \rVert_2 = \sqrt{\lambda_\text{max}} $$ # # where $\lambda_\text{max}$ is the maximum eigenvector of $A A^T$. # # Let's check that NumPy's definitions agree with these formula. # + A = np.random.rand(6,6) lmax = np.max(la.eig(A@A.T)[0]) la.norm(A,2) - np.sqrt(lmax) # - # Note that by default NumPy's `la.norm()` function computes the *Frobenius* norm for matrices. If you want the $L^2$-norm you need to call it as `la.norm(A,2)`, as above. # ## Ill-condition system - example case # # Let's look at an example where the matrix $A$ is ill-conditioned # + A = np.array([[1.002,1],[1,0.998]]) b = np.array([2.002,1.998]) x = GaussianElimination(A,b) print(x) # - # Slightly perturbing $b$ causes a huge change in the value of $x$ # + bp = np.array([2.0021,1.998]) xp = GaussianElimination(A,bp) print(xp) # - print("Change in b = %.4f%%" % (100*L2Norm(bp-b)/L2Norm(b))) print("Change in x = %.2f%%" % (100*L2Norm(xp-x)/L2Norm(x))) # ## Condition number # # The condition number of a matrix $A$ is defined as # # $$ \kappa(A) = \lVert A \rVert_2 \lVert A^{-1} \rVert_2$$ # # We learned in the lectures that another practical way to compute this is via # # $$\kappa(A) = \sqrt{\frac{\lambda_\text{max}}{\lambda_\text{min}}} $$ # # where $\lambda_\text{min/max}$ is the maximum eigenvalue of $A A^T$. # # Let's use these two methods along with NumPy's built in method `la.cond()` A = np.random.rand(6,6) eigenvalues = la.eig(A.T@A)[0] lmax = np.max(eigenvalues) lmin = np.min(eigenvalues) print("Condition number computed via norm definition:\t", la.norm(A,2)*la.norm(la.inv(A),2)) print("Condition number comuted via eigenvalue formula:", np.sqrt(lmax/lmin)) print("Condition number comuted via la.cond(A,2):\t", la.cond(A,2)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda3] # language: python # name: conda-env-anaconda3-py # --- # # Dark Sky API: Current Weather # This script gets and prints current weather data from [Dark Sky API](https://darksky.net/dev) in the CLI. An API key is required. # ### Shebang/Interpreter Directive # + # #!/usr/bin/env python3 # - # ### Import [urllib3](https://pypi.python.org/pypi/urllib3), [simplejson](https://pypi.python.org/pypi/simplejson/) and [datetime](https://docs.python.org/3/library/datetime.html) # If `import json` does not work, then try `simplejson`. My Jupyter Notebook needed to import `json`. import urllib3 as ul3 import json import datetime # ### Disable Warnings # SSL certification verification returns `InsecureRequestWarning`, so `urllib3.disable_warnings()` can be used to suppress the warning. ul3.disable_warnings(ul3.exceptions.InsecureRequestWarning) # ### API Key and Geographic Coordinates # The following strings fill out the Dark Sky API _key_, followed by geographic _coordinates_ of the weather location. k = '' # example c = '40.7306, -73.9352' # ### urllib3.PoolManager, request() # `urllib3.PoolManager()` class is used for making the server request. h = ul3.PoolManager() r = h.request('GET', 'https://api.darksky.net/forecast/' + k + '/' + c) # ### Convert Angle to Cardinal Direction # `windBearing`'s value is an integer which indicates angle of wind direction with true north at 0°. So the following function converts that angle to a cardinal direction letter(s). def degrees_to_cardinal(d): dirs = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"] ix = int((d + 11.25)/22.5 - 0.02) return dirs[ix % 16] # ### Format Weather Data # The following print calls format the weather data for the CLI. Unfortunately, there are two `KeyError`'s which indicate that data was unavailable at the present time. Which is the purpose for the two try/except clauses below. Based on that, additional try/except clauses can be added, if other `KeyError` exceptions are experienced. print("\n\033[1;34mCurrent Weather\033[0;0m (data provided thru Dark Sky API)\n") print("Date/Time:", datetime.datetime.fromtimestamp(json.loads(r.data.decode('utf-8'))['currently']['time'])) print("Summary :", json.loads(r.data.decode('utf-8'))['currently']['summary'], '\n') print("Distance to nearest storm:", json.loads(r.data.decode('utf-8'))['currently']['nearestStormDistance'], "miles") print("\033[1;34mPrecipitation:\033[0;0m") print("* intensity :", json.loads(r.data.decode('utf-8'))['currently']['precipIntensity'], "mm/h") try: print("* intensity error margin:", json.loads(r.data.decode('utf-8'))['currently']['precipIntensityError'], "mm/h") except KeyError: print("KeyError: omitted data") print("* probability :", json.loads(r.data.decode('utf-8'))['currently']['precipProbability'] * 100, "percent") try: print("* type :", json.loads(r.data.decode('utf-8'))['currently']['precipType'], '\n') except KeyError: print("KeyError: omitted data") print("\033[1;34mTemperature-related conditions:\033[0;0m") print("Temperature :", json.loads(r.data.decode('utf-8'))['currently']['temperature'], "deg F") print("Apparent temp.:", json.loads(r.data.decode('utf-8'))['currently']['apparentTemperature'], "deg F") print("Dew point :", json.loads(r.data.decode('utf-8'))['currently']['dewPoint'], "deg F") print("Humidity :", json.loads(r.data.decode('utf-8'))['currently']['humidity'] * 100, "percent\n") print("\033[1;34mAir-related conditions:\033[0;0m") print("Air pressure:", json.loads(r.data.decode('utf-8'))['currently']['pressure'], "mb") print("\033[1;34mWind:\033[0;0m") print("* speed :", json.loads(r.data.decode('utf-8'))['currently']['windSpeed'], "mph") print("* gust :", json.loads(r.data.decode('utf-8'))['currently']['windGust'], "mph") print("* bearing:", degrees_to_cardinal(json.loads(r.data.decode('utf-8'))['currently']['windBearing']), "degrees\n") print("\033[1;34mCeiling-related conditions:\033[0;0m") print("Cloud cover :", json.loads(r.data.decode('utf-8'))['currently']['cloudCover'] * 100, "percent") print("UV index :", json.loads(r.data.decode('utf-8'))['currently']['uvIndex']) print("Visibility :", json.loads(r.data.decode('utf-8'))['currently']['visibility'], "miles") print("Ozone density:", json.loads(r.data.decode('utf-8'))['currently']['ozone'], "DU") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import argparse import cv2 import numpy as np import torch from torch.autograd import Function from torchvision import models, transforms class FeatureExtractor(): """ Class for extracting activations and registering gradients from targetted intermediate layers """ def __init__(self, model, target_layers): self.model = model self.target_layers = target_layers self.gradients = [] def save_gradient(self, grad): self.gradients.append(grad) def __call__(self, x): outputs = [] self.gradients = [] for name, module in self.model._modules.items(): x = module(x) if name in self.target_layers: x.register_hook(self.save_gradient) outputs += [x] return outputs, x class ModelOutputs(): """ Class for making a forward pass, and getting: 1. The network output. 2. Activations from intermeddiate targetted layers. 3. Gradients from intermeddiate targetted layers. """ def __init__(self, model, feature_module, target_layers): self.model = model self.feature_module = feature_module self.feature_extractor = FeatureExtractor(self.feature_module, target_layers) def get_gradients(self): return self.feature_extractor.gradients def __call__(self, x): target_activations = [] for name, module in self.model._modules.items(): if module == self.feature_module: target_activations, x = self.feature_extractor(x) elif "avgpool" in name.lower(): x = module(x) x = x.view(x.size(0),-1) else: x = module(x) return target_activations, x # + def preprocess_image(img): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) preprocessing = transforms.Compose([ transforms.ToTensor(), normalize, ]) return preprocessing(img.copy()).unsqueeze(0) def show_cam_on_image(img, mask): heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET) heatmap = np.float32(heatmap) / 255 cam = heatmap + np.float32(img) cam = cam / np.max(cam) return np.uint8(255 * cam) # - class GradCam: def __init__(self, model, feature_module, target_layer_names, use_cuda): self.model = model self.feature_module = feature_module self.model.eval() self.cuda = use_cuda if self.cuda: self.model = model.cuda() self.extractor = ModelOutputs(self.model, self.feature_module, target_layer_names) def forward(self, input_img): return self.model(input_img) def __call__(self, input_img, target_category=None): if self.cuda: input_img = input_img.cuda() features, output = self.extractor(input_img) if target_category == None: target_category = np.argmax(output.cpu().data.numpy()) one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32) one_hot[0][target_category] = 1 one_hot = torch.from_numpy(one_hot).requires_grad_(True) if self.cuda: one_hot = one_hot.cuda() one_hot = torch.sum(one_hot * output) self.feature_module.zero_grad() self.model.zero_grad() one_hot.backward(retain_graph=True) grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy() target = features[-1] target = target.cpu().data.numpy()[0, :] weights = np.mean(grads_val, axis=(2, 3))[0, :] cam = np.zeros(target.shape[1:], dtype=np.float32) for i, w in enumerate(weights): cam += w * target[i, :, :] cam = np.maximum(cam, 0) cam = cv2.resize(cam, input_img.shape[2:]) cam = cam - np.min(cam) cam = cam / np.max(cam) return cam # + """ python grad_cam.py 1. Loads an image with opencv. 2. Preprocesses it for VGG19 and converts to a pytorch variable. 3. Makes a forward pass to find the category index with the highest score, and computes intermediate activations. Makes the visualization. """ img_path = 'cat_dog.png' model = models.resnet50(pretrained=True) grad_cam = GradCam(model=model, feature_module=model.layer4, target_layer_names=["2"], use_cuda=True) # - model img = cv2.imread(img_path, 1) img = np.float32(img) / 255 # Opencv loads as BGR: img = img[:, :, ::-1] input_img = preprocess_image(img) # + # If None, returns the map for the highest scoring category. # Otherwise, targets the requested category. target_category = None grayscale_cam = grad_cam(input_img, target_category) grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0])) cam = show_cam_on_image(img, grayscale_cam) # - import matplotlib.pyplot as plt plt.imshow(cam) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.1 64-bit # language: python # name: python3 # --- # ## Instructions # # Build a number guessing game # + logo = """ / _ \_ _ ___ ___ ___ /__ \ |__ ___ /\ \ \_ _ _ __ ___ | |__ ___ _ __ / /_\/ | | |/ _ \/ __/ __| / /\/ '_ \ / _ \ / \/ / | | | '_ ` _ \| '_ \ / _ \ '__| / /_\\| |_| | __/\__ \__ \ / / | | | | __/ / /\ /| |_| | | | | | | |_) | __/ | \____/ \__,_|\___||___/___/ \/ |_| |_|\___| \_\ \/ \__,_|_| |_| |_|_.__/ \___|_| """ # + import random print(logo) print("Welcome to the Number Guessing Game \n") number = random.randint(1,101) attempt = 1 print("I'm thinking of a number between 1 and 100 \n") difficulty = input("Choose a difficulty. Type 'easy' or 'hard' \n") if difficulty == 'easy': max_attempts = 10 print(f"You have {max_attempts} attempts remaining to guess the number.") else: max_attempts = 5 print(f"You have {max_attempts} attempts remaining to guess the number") attempt = 0 guess = 0 while guess != number: guess = int(input("Make a guess: ")) if attempt == max_attempts: print("You've run out of guesses. You lose. \n") break if guess < number: print("Too low") attempt += 1 print(f"You have {max_attempts - attempt} attempts left.") elif guess > number: print("Too high") attempt += 1 print(f"You have {max_attempts - attempt} attempts left.") else: print(f"You got it! The answer is {number}") attempt += 0 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### データ読み込み import pandas as pd import numpy as np train_data = pd.read_csv("01.original_data/titanic/train.csv") test_data = pd.read_csv("01.original_data/titanic/test.csv") train_data.head(10) # 欠損値 有無確認 train_data.isnull().sum() # ### 学習データ train_data["Age"] = train_data["Age"].fillna(train_data["Age"].median()) train_data["Embarked"] = train_data["Embarked"].fillna("S") train_data.loc[train_data.Sex == 'male', 'Sex'] = 0 train_data.loc[train_data.Sex == 'female', 'Sex'] = 1 train_data.loc[train_data.Embarked == 'S', 'Embarked'] = 0 train_data.loc[train_data.Embarked == 'C', 'Embarked'] = 1 train_data.loc[train_data.Embarked == 'Q', 'Embarked'] = 2 train_data = train_data.drop(columns='PassengerId') train_data = train_data.drop(columns='Name') train_data = train_data.drop(columns='Ticket') train_data = train_data.drop(columns='Cabin') train_data.rename(columns={'Survived': 'y:Survived'}, inplace=True) train_data.rename(columns={'Pclass': 'x__0:Pclass'}, inplace=True) train_data.rename(columns={'Sex': 'x__1:Sex'}, inplace=True) train_data.rename(columns={'Age': 'x__2:Age'}, inplace=True) train_data.rename(columns={'SibSp': 'x__3:SibSp'}, inplace=True) train_data.rename(columns={'Parch': 'x__4:Parch'}, inplace=True) train_data.rename(columns={'Fare': 'x__5:Fare'}, inplace=True) train_data.rename(columns={'Embarked': 'x__6:Embarked'}, inplace=True) train_data.head(10) # ### テストデータ test_data["Age"] = test_data["Age"].fillna(test_data["Age"].median()) test_data["Embarked"] = test_data["Embarked"].fillna("S") test_data.loc[test_data.Sex == 'male', 'Sex'] = 0 test_data.loc[test_data.Sex == 'female', 'Sex'] = 1 test_data.loc[test_data.Embarked == 'S', 'Embarked'] = 0 test_data.loc[test_data.Embarked == 'C', 'Embarked'] = 1 test_data.loc[test_data.Embarked == 'Q', 'Embarked'] = 2 test_data = test_data.drop(columns='PassengerId') test_data = test_data.drop(columns='Name') test_data = test_data.drop(columns='Ticket') test_data = test_data.drop(columns='Cabin') # test_data.rename(columns={'Survived': 'y:Survived'}, inplace=True) test_data.rename(columns={'Pclass': 'x__0:Pclass'}, inplace=True) test_data.rename(columns={'Sex': 'x__1:Sex'}, inplace=True) test_data.rename(columns={'Age': 'x__2:Age'}, inplace=True) test_data.rename(columns={'SibSp': 'x__3:SibSp'}, inplace=True) test_data.rename(columns={'Parch': 'x__4:Parch'}, inplace=True) test_data.rename(columns={'Fare': 'x__5:Fare'}, inplace=True) test_data.rename(columns={'Embarked': 'x__6:Embarked'}, inplace=True) test_data.head() # ### 学習/検証データ分割 from sklearn.model_selection import train_test_split train, validation = train_test_split(train_data, train_size=0.75, random_state=42, shuffle=True) # ### CSV書き出し train.to_csv('02.data/train.csv', index=False) validation.to_csv('02.data/validation.csv', index=False) test_data.to_csv('02.data/test.csv', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df = 1) contents= ['상처받은 아이들은 너무 일찍 커버려', '내가 상처받은 거 아는 사람 불편해', '잘 사는 사람들은 좋은 사람 되기 쉬워', '아무 일도 아니야 괜찮아'] #형태소 분석 from konlpy.tag import Twitter t=Twitter() contents_tokens = [t.morphs(row) for row in contents] contents_tokens contents_for_vectorize=[] for content in contents_tokens: sentence='' for word in content: sentence = sentence + ' ' + word contents_for_vectorize.append(sentence) contents_for_vectorize #백터라이즈 X= vectorizer.fit_transform(contents_for_vectorize) num_samples, num_features = X.shape num_samples, num_features vectorizer.get_feature_names() X.toarray().transpose() # + new_post = ['상처받기 싫어 괜찮아'] new_post_tokens = [t.morphs(row) for row in new_post] new_post_for_vectorize = [] for content in new_post_tokens: sentence = '' for word in content: sentence = sentence + ' ' + word new_post_for_vectorize.append(sentence) new_post_for_vectorize # - new_post_vec = vectorizer.transform(new_post_for_vectorize) new_post_vec.toarray() import scipy as sp def dist_raw(v1, v2): delta = v1 - v2 return sp.linalg.norm(delta.toarray()) dist = [dist_raw(each, new_post_vec) for each in X] dist print("Best post is", dist.index(min(dist)), ',dist=', min(dist)) print('test post is --->',new_post) print('vest dist post is ---->', contents[dist.index(min(dist))]) #유클리드 유사도 for i in range(0,len(contents)): print(X.getrow(i).toarray()) print('--------------------') print(new_post_vec.toarray()) # + # TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(min_df=1, decode_error='ignore') X = vectorizer.fit_transform(contents_for_vectorize) num_samples, num_features = X.shape num_samples, num_features # - X.toarray().transpose() #문장 적용 new_post_vec = vectorizer.transform(new_post_for_vectorize) new_post_vec.toarray() #거리 구하는 법 def dist_norm(v1,v2): v1_normalized = v1 / sp.linalg.norm(v1.toarray()) v2_normalized = v2 / sp.linalg.norm(v1.toarray()) delta = v1_normalized - v2_normalized return sp.linalg.norm(delta.toarray()) dist = [dist_norm(each, new_post_vec) for each in X] print("Best post is", dist.index(min(dist)), ',dist=', min(dist)) print('test post is --->',new_post) print('vest dist post is ---->', contents[dist.index(min(dist))]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np #

No 1 : Multiple Subplots

# # Dengan data di bawah ini buatlah visualisasi seperti expected output : x = np.linspace(2*-np.pi, 2*np.pi, 200) tan = np.tan(x)/10 cos = np.cos(x) sin = np.sin(x) # + fig = plt.figure(figsize=[20,8]) ax = fig.subplots(nrows=1,ncols=3, sharex=True, sharey=True) ax[0].plot(x,tan, color='blue', label ='tanX') ax[0].set_title('Grafik tanx') ax[1].plot(x,sin, color='red', label = 'sinX') ax[1].set_title('Grafix sinx') ax[2].plot(x,cos, color='green', label ='cosX') ax[2].set_title('Grafik consx') fig.legend(loc='upper center') plt.show() # - # ![title](image/day3-expected-output1.png) #

No 2 : Nested Axis

# # Dengan data di bawah ini, buatlah visualisasi seperti expected output : x = np.linspace(2*-np.pi, 2*np.pi, 100) y = np.cos(x) y2 = np.cos(x**2) y3 = np.cos(x**3) y4 = np.cos(x**4) y5 = np.cos(x**5) # + fig = plt.figure(figsize=[20,10]) ax = fig.subplots() ax.plot(x,y) ax.set_title('Grafik CosX', fontweight='bold') # WIDTH X HEIGHT, WIDTHSIZE X HEIGHTSIZE i2 = ax.inset_axes([-0.1, -0.1, 0.25, 0.25]) i2.plot(x,y2,color='r') i2.plot(x,y) i2.set_title('Grafik CosX^2', fontweight='bold') i3 = ax.inset_axes([0.8, -0.1, 0.25, 0.25]) i3.plot(x,y3,color='g') i3.plot(x,y) i3.set_title('Grafik CosX^3', fontweight='bold') i4 = ax.inset_axes([0.8, 0.8, 0.25, 0.25]) i4.plot(x,y4,color='black') i4.plot(x,y) i4.set_title('Grafik CosX^4', fontweight='bold') i5 = ax.inset_axes([-0.1, 0.8, 0.25, 0.25]) i5.plot(x,y5,color='lightgreen') i5.plot(x,y) i5.set_title('Grafik CosX^5', fontweight='bold') plt.show() # - # ![title](image/day3-expected-output2.png) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: me # language: python # name: me # --- # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - pd.options.display.float_format = '{:20,.2f}'.format imf_exports = list(filter(lambda x: x.endswith(".csv"), os.listdir('imf'))) y = range(1960, 2018) # ### Data dictionary # | name | grouping | period |granularity| n_X |countries| contents |meta cols| comments | # | -------|-----------|-----------|---- -----|-------|---------|-------------------------------------|---------|---------------------------------------------------------------------| # | PPLT | country |1960-2017 |monthly | 85 | 208 | trade and labour statistics | 9 (705) | Reference periods vary from country to country | # | ITS | country |1960-2015 |yearly | 76 | 153 | services import and export shares | 5 (61) | - | # | IRFCL | country |1990-2017 |monthly |272 | 84 | central bank reserves | 8 (344) | - | # | COMMP | world |1980-2017M6|monthly | 63 | - |commodities prices | 6 (456) | Data only available at world level | # | FDI | country |1980-2017 |yearly | 9 |188 | financial institutions | 4 (42) | - | # | CPI | country |1960-2017 |monthly | 102 |189 | consumer price indices | 7 (703) | - | # | GFSR | country |1972-2017 |yearly | 84 | 169 | government revenues | 11 (57) | Uses 'Classification Name' rather than 'Indicator Name' | # | BOP | country |1960-2017 |quarterly | 5517 | 155 |government balance of payment | 6 (232) | - | # #### Analysis scripts # + # set(pd.read_csv('data/imf/%s' % 'BOP_11-25-2018 19-15-19-60_timeSeries.csv').fillna(0)['Indicator Name'].values.tolist()) # + country_df_list = [] for file_name in imf_exports: if 'COMMP' not in file_name and 'GFSR' not in file_name: raw_df = pd.read_csv('imf/%s' % file_name).fillna(0) # if 'GFSR' in file_name: # country_df = raw_df.rename(index=str, columns={"Classifcation Name": "Indicator Name"}) # else: # country_df = raw_df country_df = raw_df cleaned_df = country_df.drop(columns=['Country Code', 'Indicator Code']).loc[country_df['Attribute'] == 'Value'].drop(columns=['Attribute']) country_df_list.append(cleaned_df) # - df = pd.concat(country_df_list, axis=1, ignore_index = True) df.size df.columns df[df['Unit Code']].head(50) indicators = set(df['Indicator Name'].values.tolist()) len(indicators) print(indicators) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from SimPEG import Mesh, Maps from simpegEM1D import * import numpy as np # %pylab inline nearthick = np.logspace(-1, 1, 5) deepthick = np.logspace(1, 2, 10) hx = np.r_[nearthick, deepthick] mesh1D = Mesh.TensorMesh([hx], [0.]) depth = -mesh1D.gridN[:-1] LocSigZ = -mesh1D.gridCC nlay = depth.size topo = np.r_[0., 0., 30.] FDsurvey = EM1DSurveyFD( rx_location = np.array([0., 0., 30.]), src_location = np.array([0., 0., 30.]), field_type = 'secondary', rx_type = 'Hz', src_type = 'VMD', depth = depth, topo = topo, frequency = np.r_[100.], offset = np.r_[8.] ) sig_half = 1e-2 chi_half = 0. Expmap = Maps.ExpMap(mesh1D) # Conductivity prob = EM1D( mesh1D, sigmaMap=Expmap, jacSwitch=False, chi=np.zeros(FDsurvey.n_layer) ) if prob.ispaired: prob.unpair() if FDsurvey.ispaired: FDsurvey.unpair() prob.pair(FDsurvey) m_1D = np.log(np.ones(nlay)*sig_half) Hz = FDsurvey.dpred(m_1D) # + import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as iuSpline from empymod.filters import key_201_2009 as fhtfilt # Chance this to your choice def get_spline_values(filt, inp): """Return required calculation points.""" # Number per decade from filter.factor nr_per_dec = 1/np.log(filt.factor) # Get min and max required out-values (depends on filter and inp-value) outmax = filt.base[-1]/inp.min() outmin = filt.base[0]/inp.max() # Number of out-values nout = int(np.ceil(np.log(outmax/outmin)*nr_per_dec) + 1) # The cubic InterpolatedUnivariateSpline needs at least 4 points if nout-filt.base.size < 3: nout = filt.base.size+3 # Calculate output values out = np.exp(np.arange(np.log(outmin), np.log(outmin) + nout/nr_per_dec, 1/nr_per_dec)) # Only necessary if standard spline is used. We need to calculate the new # input values, as spline is carried out in the input domain. Else spline # is carried out in output domain and the new input values are not used. new_inp = inp.max()*np.exp(-np.arange(nout - filt.base.size + 1) / nr_per_dec) # Return output values return np.atleast_2d(out), new_inp # 1. COMPUTE REQUIRED LAMBDAS for given hankel-filter-base from empymod.filters import key_201_2009 fht_filter = key_201_2009() off = FDsurvey.offset lambd, ioff = get_spline_values(fht_filter, off) print (lambd.size, ioff.size) # - ioff PJ0 = prob.hz_kernel_vertical_magnetic_dipole( lambd/FDsurvey.offset, FDsurvey.frequency, FDsurvey.n_layer, prob.sigma, prob.chi, FDsurvey.depth, FDsurvey.h, FDsurvey.z, 'secondary', output_type='response' ) # + # PJ0 = np.empty((off.size, lambd.size), dtype=complex) # PJ1 = np.empty((off.size, lambd.size), dtype=complex) # + # 2. CALL THE KERNEL # Here comes your PJ0, PJ1 calculation def rearrange_PJ(PJ, noff, nfilt): """Return re-arranged PJ with shape (noff, nlambd). Each row starts one 'lambda' higher.""" outarr = np.concatenate((np.tile(PJ, noff).squeeze(), np.zeros(noff))) return outarr.reshape(noff, -1)[:, :nfilt] PJ0 = rearrange_PJ(PJ0, ioff.size, fht_filter.base.size) # PJ1 = rearrange_PJ(PJ1, ioff.size, fht_filter.base.size) # 3. DLF # EM_int = np.dot(PJ1, fht_filter.j1)/ioff + np.dot(PJ0, fht_filter.j0) EM_int = np.dot(PJ0, fht_filter.j0) # 4. Interpolation real_EM = iuSpline(np.log10(ioff[::-1]), EM_int.real[::-1]) imag_EM = iuSpline(np.log10(ioff[::-1]), EM_int.imag[::-1]) fEM = real_EM(np.log10(off)) + 1j*imag_EM(np.log10(off)) # Normalize by offset fEM /= off # - print (Hz) print (fEM) # + # iuSpline?? # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZVk0N_dEdpIz" colab_type="text" # # JFT Notes # # This file was forked from an official Tensorflow notebook, components.ipynb ([GitHub](https://github.com/tensorflow/tfx/blob/master/docs/tutorials/tfx/components.ipynb), [Colab](https://colab.research.google.com/github/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_interactive.ipynb)). # # ## 2019-11 TFX talk # **[TFX tech talk](https://youtu.be/TA5kbFgeUlk?t=1562)**, YouTube, 2019-11 # # Presenter demos [a Jupyter notebook on Colab](https://colab.research.google.com/github/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_interactive.ipynb) demoing interactive dev via InteractiveContext: # # ```python # from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext # ``` # # That file is what was used to seed this file. # # ## 2020-03 Google Cloud AI Platform Pipepiles # # At TensorFlow Dev Summit 2020 they beta'd Google Cloud AI Platform Pipelines (the Pipelines is the new part). [The tech press release](https://cloud.google.com/blog/products/ai-machine-learning/introducing-cloud-ai-platform-pipelines) is the best primer doc. # # Pipelines is TFX end to end pipelines running on Google Cloud AI Platform, as a new feature of the platform. Where TFX is platform agnostic, Pipelines it TFX deployed specifically atop Google Cloud AI Platform, e.g. KubeFlow happens to be the executor. That's provided as an easy-to-use fully managed service. # # Intro and demo talk, [TFX: Production ML with TensorFlow in 2020 (TF Dev Summit '20)](https://youtu.be/I3MjuFGmJrg?list=PLQY2H8rRoyvzuJw20FG82Lgm2SZjTdIXU). The PM intro pitch is only the first 4 minutes. # # # + [markdown] colab_type="text" id="wdeKOEkv1Fe8" # ##### Copyright © 2019 The TensorFlow Authors. # + id="Ka6rbmXIdtTt" colab_type="code" colab={} # + cellView="form" colab_type="code" id="c2jyGuiG1gHr" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="23R0Z9RojXYW" # # TFX Component Tutorial # # ***A Component-by-Component Introduction to TensorFlow Extended (TFX)*** # + [markdown] colab_type="text" id="LidV2qsXm4XC" # Note: We recommend running this tutorial in a Colab notebook, with no setup required! Just click "Run in Google Colab". # # # + [markdown] colab_type="text" id="KAD1tLoTm_QS" # # This Colab-based tutorial will interactively walk through each built-in component of TensorFlow Extended (TFX). # # It covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving. # # When you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam. # # Note: This notebook and its associated APIs are **experimental** and are # in active development. Major changes in functionality, behavior, and # presentation are expected. # + [markdown] colab_type="text" id="sfSQ-kX-MLEr" # ## Background # This notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, we walk through the Chicago Taxi example in an interactive notebook. # # Working in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts. # # ### Orchestration # # In a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells. # # ### Metadata # # In a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the `/tmp` directory on the Jupyter notebook or Colab server. # + [markdown] colab_type="text" id="2GivNBNYjb3b" # ## Setup # First, we install and import the necessary packages, set up paths, and download data. # + [markdown] colab_type="text" id="MZOYTt1RW4TK" # ### Install TFX # # Note: Because of package updates, you must use the button at the bottom of the output of this cell to restart the runtime. Following restart, please rerun this cell. # + colab_type="code" id="S4SQA7Q5nej3" colab={} # !pip install "tfx>=0.21.1,<0.22" "tensorflow>=2.1,<2.2" "tensorboard>=2.1,<2.2" # + [markdown] colab_type="text" id="N-ePgV0Lj68Q" # ### Import packages # We import necessary packages, including standard TFX component classes. # + colab_type="code" id="YIqpWK9efviJ" colab={} import os import pprint import tempfile import urllib import absl import tensorflow as tf import tensorflow_model_analysis as tfma tf.get_logger().propagate = False pp = pprint.PrettyPrinter() import tfx from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import Pusher from tfx.components import ResolverNode from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing from tfx.utils.dsl_utils import external_input # %load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip # + [markdown] colab_type="text" id="wCZTHRy0N1D6" # Let's check the library versions. # + colab_type="code" id="eZ4K18_DN2D8" colab={} print('TensorFlow version: {}'.format(tf.__version__)) print('TFX version: {}'.format(tfx.__version__)) # + [markdown] colab_type="text" id="ufJKQ6OvkJlY" # ### Set up pipeline paths # + colab_type="code" id="ad5JLpKbf6sN" colab={} # This is the root directory for your TFX pip package installation. _tfx_root = tfx.__path__[0] # This is the directory containing the TFX Chicago Taxi Pipeline example. _taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline') # This is the path where your model will be pushed for serving. _serving_model_dir = os.path.join( tempfile.mkdtemp(), 'serving_model/taxi_simple') # Set up logging. absl.logging.set_verbosity(absl.logging.INFO) # + [markdown] colab_type="text" id="n2cMMAbSkGfX" # ### Download example data # We download the example dataset for use in our TFX pipeline. # # The dataset we're using is the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. The columns in this dataset are: # # # # # # # # #
pickup_community_areafaretrip_start_month
trip_start_hourtrip_start_daytrip_start_timestamp
pickup_latitudepickup_longitudedropoff_latitude
dropoff_longitudetrip_milespickup_census_tract
dropoff_census_tractpayment_typecompany
trip_secondsdropoff_community_areatips
# # With this dataset, we will build a model that predicts the `tips` of a trip. # + colab_type="code" id="BywX6OUEhAqn" colab={} _data_root = tempfile.mkdtemp(prefix='tfx-data') DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv' _data_filepath = os.path.join(_data_root, "data.csv") urllib.request.urlretrieve(DATA_PATH, _data_filepath) # + [markdown] colab_type="text" id="blZC1sIQOWfH" # Take a quick look at the CSV file. # + colab_type="code" id="c5YPeLPFOXaD" colab={} # !head {_data_filepath} # + [markdown] colab_type="text" id="QioyhunCImwE" # *Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.* # + [markdown] colab_type="text" id="8ONIE_hdkPS4" # ### Create the InteractiveContext # Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook. # + colab_type="code" id="0Rh6K5sUf9dd" colab={} # Here, we create an InteractiveContext using default parameters. This will # use a temporary directory with an ephemeral ML Metadata database instance. # To use your own pipeline root or database, the optional properties # `pipeline_root` and `metadata_connection_config` may be passed to # InteractiveContext. Calls to InteractiveContext are no-ops outside of the # notebook. context = InteractiveContext() # + [markdown] colab_type="text" id="HdQWxfsVkzdJ" # ## Run TFX components interactively # In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts. # + [markdown] colab_type="text" id="L9fwt9gQk3BR" # ### ExampleGen # # The `ExampleGen` component is usually at the start of a TFX pipeline. It will: # # 1. Split data into training and evaluation sets (by default, 2/3 training + 1/3 eval) # 2. Convert data into the `tf.Example` format # 3. Copy data into the `_tfx_root` directory for other components to access # # `ExampleGen` takes as input the path to your data source. In our case, this is the `_data_root` path that contains the downloaded CSV. # # Note: In this notebook, we can instantiate components one-by-one and run them with `InteractiveContext.run()`. By contrast, in a production setting, we would specify all the components upfront in a `Pipeline` to pass to the orchestrator (see the "Export to Pipeline" section). # + colab_type="code" id="PyXjuMt8f-9u" colab={} example_gen = CsvExampleGen(input=external_input(_data_root)) context.run(example_gen) # + [markdown] colab_type="text" id="OqCoZh7KPUm9" # Let's examine the output artifacts of `ExampleGen`. This component produces two artifacts, training examples and evaluation examples: # # Note: The `%%skip_for_export` cell magic will omit the contents of this cell in the exported pipeline file (see the "Export to pipeline" section). This is useful for notebook-specific code that you don't want to run in an orchestrated pipeline. # + colab_type="code" id="880KkTAkPeUg" colab={} # %%skip_for_export artifact = example_gen.outputs['examples'].get()[0] print(artifact.split_names, artifact.uri) # + [markdown] colab_type="text" id="J6vcbW_wPqvl" # We can also take a look at the first three training examples: # + colab_type="code" id="H4XIXjiCPwzQ" colab={} # %%skip_for_export # Get the URI of the output artifact representing the training examples, which is a directory train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) # + [markdown] colab_type="text" id="2gluYjccf-IP" # Now that `ExampleGen` has finished ingesting the data, the next step is data analysis. # + [markdown] colab_type="text" id="csM6BFhtk5Aa" # ### StatisticsGen # The `StatisticsGen` component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library. # # `StatisticsGen` takes as input the dataset we just ingested using `ExampleGen`. # + colab_type="code" id="MAscCCYWgA-9" colab={} statistics_gen = StatisticsGen( examples=example_gen.outputs['examples']) context.run(statistics_gen) # + [markdown] colab_type="text" id="HLI6cb_5WugZ" # After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots! # + colab_type="code" id="tLjXy7K6Tp_G" colab={} # %%skip_for_export context.show(statistics_gen.outputs['statistics']) # + [markdown] colab_type="text" id="HLKLTO9Nk60p" # ### SchemaGen # # The `SchemaGen` component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library. # # `SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default. # + colab_type="code" id="ygQvZ6hsiQ_J" colab={} schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=False) context.run(schema_gen) # + [markdown] colab_type="text" id="zi6TxTUKXM6b" # After `SchemaGen` finishes running, we can visualize the generated schema as a table. # + colab_type="code" id="Ec9vqDXpXeMb" colab={} # %%skip_for_export context.show(schema_gen.outputs['schema']) # + [markdown] colab_type="text" id="kZWWdbA-m7zp" # Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain. # # To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen). # + [markdown] colab_type="text" id="V1qcUuO9k9f8" # ### ExampleValidator # The `ExampleValidator` component detects anomalies in your data, based on the expectations defined by the schema. It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library. # # `ExampleValidator` will take as input the statistics from `StatisticsGen`, and the schema from `SchemaGen`. # # By default, it compares the statistics from the evaluation split to the schema from the training split. # + colab_type="code" id="XRlRUuGgiXks" colab={} example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) context.run(example_validator) # + [markdown] colab_type="text" id="855mrHgJcoer" # After `ExampleValidator` finishes running, we can visualize the anomalies as a table. # + colab_type="code" id="TDyAAozQcrk3" colab={} # %%skip_for_export context.show(example_validator.outputs['anomalies']) # + [markdown] colab_type="text" id="znMoJj60ybZx" # In the anomalies table, we can see that the `company` feature takes on new values that were not in the training split. This information can be used to debug model performance, understand how your data evolves over time, and identify data errors. # # In our case, this anomaly is innocuous, so we move on to the next step of transforming the data. # + [markdown] colab_type="text" id="JPViEz5RlA36" # ### Transform # The `Transform` component performs feature engineering for both training and serving. It uses the [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/get_started) library. # # `Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code. # # Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)). First, we define a few constants for feature engineering: # # Note: The `%%writefile` cell magic will save the contents of the cell as a `.py` file on disk. This allows the `Transform` component to load your code as a module. # # # + colab_type="code" id="PuNSiUKb4YJf" colab={} _taxi_constants_module_file = 'taxi_constants.py' # + colab_type="code" id="HPjhXuIF4YJh" colab={} # %%skip_for_export # %%writefile {_taxi_constants_module_file} # Categorical features are assumed to each have a maximum value in the dataset. MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12] CATEGORICAL_FEATURE_KEYS = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds'] # Number of buckets used by tf.transform for encoding each feature. FEATURE_BUCKET_COUNT = 10 BUCKET_FEATURE_KEYS = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] # Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform VOCAB_SIZE = 1000 # Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed. OOV_SIZE = 10 VOCAB_FEATURE_KEYS = [ 'payment_type', 'company', ] # Keys LABEL_KEY = 'tips' FARE_KEY = 'fare' def transformed_name(key): return key + '_xf' # + [markdown] colab_type="text" id="Duj2Ax5z4YJl" # Next, we write a `preprocessing_fn` that takes in raw data as input, and returns transformed features that our model can train on: # + colab_type="code" id="4AJ9hBs94YJm" colab={} _taxi_transform_module_file = 'taxi_transform.py' # + colab_type="code" id="MYmxxx9A4YJn" colab={} # %%skip_for_export # %%writefile {_taxi_transform_module_file} import tensorflow as tf import tensorflow_transform as tft import taxi_constants _DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS _VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS _VOCAB_SIZE = taxi_constants.VOCAB_SIZE _OOV_SIZE = taxi_constants.OOV_SIZE _FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT _BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS _CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS _FARE_KEY = taxi_constants.FARE_KEY _LABEL_KEY = taxi_constants.LABEL_KEY _transformed_name = taxi_constants.transformed_name def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _DENSE_FLOAT_FEATURE_KEYS: # Preserve this feature as a dense float, setting nan's to the mean. outputs[_transformed_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key])) for key in _VOCAB_FEATURE_KEYS: # Build a vocabulary for this feature. outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary( _fill_in_missing(inputs[key]), top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE) for key in _BUCKET_FEATURE_KEYS: outputs[_transformed_name(key)] = tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT, always_return_num_quantiles=False) for key in _CATEGORICAL_FEATURE_KEYS: outputs[_transformed_name(key)] = _fill_in_missing(inputs[key]) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_transformed_name(_LABEL_KEY)] = tf.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) # + [markdown] colab_type="text" id="wgbmZr3sgbWW" # Now, we pass in this feature engineering code to the `Transform` component and run it to transform your data. # + colab_type="code" id="jHfhth_GiZI9" colab={} transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=os.path.abspath(_taxi_transform_module_file)) context.run(transform) # + [markdown] colab_type="text" id="fwAwb4rARRQ2" # Let's examine the output artifacts of `Transform`. This component produces two types of outputs: # # * `transform_graph` is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models). # * `transformed_examples` represents the preprocessed training and evaluation data. # + colab_type="code" id="SClrAaEGR1O5" colab={} transform.outputs # + [markdown] colab_type="text" id="vyFkBd9AR1sy" # Take a peek at the `transform_graph` artifact. It points to a directory containing three subdirectories. # + colab_type="code" id="5tRw4DneR3i7" colab={} train_uri = transform.outputs['transform_graph'].get()[0].uri os.listdir(train_uri) # + [markdown] colab_type="text" id="4fqV54CIR6Pu" # The `transformed_metadata` subdirectory contains the schema of the preprocessed data. The `transform_fn` subdirectory contains the actual preprocessing graph. The `metadata` subdirectory contains the schema of the original data. # # We can also take a look at the first three transformed examples: # + colab_type="code" id="pwbW2zPKR_S4" colab={} # Get the URI of the output artifact representing the transformed examples, which is a directory train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) # + [markdown] colab_type="text" id="q_b_V6eN4f69" # After the `Transform` component has transformed your data into features, and the next step is to train a model. # + [markdown] colab_type="text" id="OBJFtnl6lCg9" # ### Trainer # The `Trainer` component will train a model that you define in TensorFlow (either using the Estimator API or the Keras API with [`model_to_estimator`](https://www.tensorflow.org/api_docs/python/tf/keras/estimator/model_to_estimator)). # # `Trainer` takes as input the schema from `SchemaGen`, the transformed data and graph from `Transform`, training parameters, as well as a module that contains user-defined model code. # # Let's see an example of user-defined model code below (for an introduction to the TensorFlow Estimator APIs, [see the tutorial](https://www.tensorflow.org/tutorials/estimator/premade)): # + colab_type="code" id="N1376oq04YJt" colab={} _taxi_trainer_module_file = 'taxi_trainer.py' # + colab_type="code" id="nf9UuNng4YJu" colab={} # %%skip_for_export # %%writefile {_taxi_trainer_module_file} import tensorflow as tf import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils import taxi_constants _DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS _VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS _VOCAB_SIZE = taxi_constants.VOCAB_SIZE _OOV_SIZE = taxi_constants.OOV_SIZE _FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT _BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS _CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS _MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES _LABEL_KEY = taxi_constants.LABEL_KEY _transformed_name = taxi_constants.transformed_name def _transformed_names(keys): return [_transformed_name(key) for key in keys] # Tf.Transform considers these features as "raw" def _get_raw_feature_spec(schema): return schema_utils.schema_as_feature_spec(schema).feature_spec def _gzip_reader_fn(filenames): """Small utility returning a record reader that can read gzip'ed files.""" return tf.data.TFRecordDataset( filenames, compression_type='GZIP') def _build_estimator(config, hidden_units=None, warm_start_from=None): """Build an estimator for predicting the tipping behavior of taxi riders. Args: config: tf.estimator.RunConfig defining the runtime environment for the estimator (including model_dir). hidden_units: [int], the layer sizes of the DNN (input layer first) warm_start_from: Optional directory to warm start from. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ real_valued_columns = [ tf.feature_column.numeric_column(key, shape=()) for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS) ] categorical_columns = [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0) for key in _transformed_names(_VOCAB_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0) for key in _transformed_names(_BUCKET_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip( _transformed_names(_CATEGORICAL_FEATURE_KEYS), _MAX_CATEGORICAL_FEATURE_VALUES) ] return tf.estimator.DNNLinearCombinedClassifier( config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25], warm_start_from=warm_start_from) def _example_serving_receiver_fn(tf_transform_graph, schema): """Build the serving in inputs. Args: tf_transform_graph: A TFTransformOutput. schema: the schema of the input data. Returns: Tensorflow graph which parses examples, applying tf-transform to them. """ raw_feature_spec = _get_raw_feature_spec(schema) raw_feature_spec.pop(_LABEL_KEY) raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( raw_feature_spec, default_batch_size=None) serving_input_receiver = raw_input_fn() transformed_features = tf_transform_graph.transform_raw_features( serving_input_receiver.features) return tf.estimator.export.ServingInputReceiver( transformed_features, serving_input_receiver.receiver_tensors) def _eval_input_receiver_fn(tf_transform_graph, schema): """Build everything needed for the tf-model-analysis to run the model. Args: tf_transform_graph: A TFTransformOutput. schema: the schema of the input data. Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untransformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ # Notice that the inputs are raw features, not transformed features here. raw_feature_spec = _get_raw_feature_spec(schema) serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_tensor') # Add a parse_example operator to the tensorflow graph, which will parse # raw, untransformed, tf examples. features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) # Now that we have our raw examples, process them through the tf-transform # function computed during the preprocessing step. transformed_features = tf_transform_graph.transform_raw_features( features) # The key name MUST be 'examples'. receiver_tensors = {'examples': serialized_tf_example} # NOTE: Model is driven by transformed features (since training works on the # materialized output of TFT, but slicing will happen on raw features. features.update(transformed_features) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=receiver_tensors, labels=transformed_features[_transformed_name(_LABEL_KEY)]) def _input_fn(filenames, tf_transform_graph, batch_size=200): """Generates features and labels for training or evaluation. Args: filenames: [str] list of CSV files to read data from. tf_transform_graph: A TFTransformOutput. batch_size: int First dimension size of the Tensors returned by input_fn Returns: A (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ transformed_feature_spec = ( tf_transform_graph.transformed_feature_spec().copy()) dataset = tf.data.experimental.make_batched_features_dataset( filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn) transformed_features = ( tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()) # We pop the label because we do not want to use it as a feature while we're # training. return transformed_features, transformed_features.pop( _transformed_name(_LABEL_KEY)) # TFX will call this function def trainer_fn(trainer_fn_args, schema): """Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ # Number of nodes in the first layer of the DNN first_dnn_layer_size = 100 num_dnn_layers = 4 dnn_decay_factor = 0.7 train_batch_size = 40 eval_batch_size = 40 tf_transform_graph = tft.TFTransformOutput(trainer_fn_args.transform_output) train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.train_files, tf_transform_graph, batch_size=train_batch_size) eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.eval_files, tf_transform_graph, batch_size=eval_batch_size) train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda train_input_fn, max_steps=trainer_fn_args.train_steps) serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda tf_transform_graph, schema) exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn) eval_spec = tf.estimator.EvalSpec( eval_input_fn, steps=trainer_fn_args.eval_steps, exporters=[exporter], name='chicago-taxi-eval') run_config = tf.estimator.RunConfig( save_checkpoints_steps=999, keep_checkpoint_max=1) run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir) estimator = _build_estimator( # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(first_dnn_layer_size * dnn_decay_factor**i)) for i in range(num_dnn_layers) ], config=run_config, warm_start_from=trainer_fn_args.base_model) # Create an input receiver for TFMA processing receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda tf_transform_graph, schema) return { 'estimator': estimator, 'train_spec': train_spec, 'eval_spec': eval_spec, 'eval_input_receiver_fn': receiver_fn } # + [markdown] colab_type="text" id="GY4yTRaX4YJx" # Now, we pass in this model code to the `Trainer` component and run it to train the model. # + colab_type="code" id="429-vvCWibO0" colab={} trainer = Trainer( module_file=os.path.abspath(_taxi_trainer_module_file), transformed_examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=10000), eval_args=trainer_pb2.EvalArgs(num_steps=5000)) context.run(trainer) # + [markdown] colab_type="text" id="6Cql1G35StJp" # #### Analyze Training with TensorBoard # Optionally, we can connect TensorBoard to the Trainer to analyze our model's training curves. # + colab_type="code" id="bXe62WE0S0Ek" colab={} # %%skip_for_export # Get the URI of the output artifact representing the training logs, which is a directory model_dir = trainer.outputs['model'].get()[0].uri # %load_ext tensorboard # %tensorboard --logdir {model_dir} # + [markdown] colab_type="text" id="FmPftrv0lEQy" # ### Evaluator # The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library. The `Evaluator` can also optionally validate that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day. In this notebook, we only train one model, so the `Evaluator` automatically will label the model as "good". # # `Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below: # + colab_type="code" id="fVhfzzh9PDEx" colab={} eval_config = tfma.EvalConfig( model_specs=[ # Using signature 'eval' implies the use of an EvalSavedModel. To use # a serving model remove the signature to defaults to 'serving_default' # and add a label_key. tfma.ModelSpec(signature_name='eval') ], metrics_specs=[ tfma.MetricsSpec( # The metrics added here are in addition to those saved with the # model (assuming either a keras model or EvalSavedModel is used). # Any metrics added into the saved model (for example using # model.compile(..., metrics=[...]), etc) will be computed # automatically. metrics=[ tfma.MetricConfig(class_name='ExampleCount') ], # To add validation thresholds for metrics saved with the model, # add them keyed by metric name to the thresholds map. thresholds = { 'accuracy': tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.5}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10})) } ) ], slicing_specs=[ # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), # Data can be sliced along a feature column. In this case, data is # sliced along feature column trip_start_hour. tfma.SlicingSpec(feature_keys=['trip_start_hour']) ]) # + [markdown] colab_type="text" id="9mBdKH1F8JuT" # Next, we give this configuration to `Evaluator` and run it. # + colab_type="code" id="Zjcx8g6mihSt" colab={} # Use TFMA to compute a evaluation statistics over features of a model and # validate them against a baseline. # The model resolver is only required if performing model validation in addition # to evaluation. In this case we validate against the latest blessed model. If # no model has been blessed before (as in this case) the evaluator will make our # candidate the first blessed model. model_resolver = ResolverNode( instance_name='latest_blessed_model_resolver', resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing)) context.run(model_resolver) evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], #baseline_model=model_resolver.outputs['model'], # Change threshold will be ignored if there is no baseline (first run). eval_config=eval_config) context.run(evaluator) # + [markdown] colab_type="text" id="Y5TMskWe9LL0" # After `Evaluator` finishes running, we can show the default visualization of global metrics on the entire evaluation set. # + colab_type="code" id="U729j5X5QQUQ" colab={} # %%skip_for_export context.show(evaluator.outputs['evaluation']) # + [markdown] colab_type="text" id="t-tI4p6m-OAn" # To see the visualization for sliced evaluation metrics, we can directly call the TensorFlow Model Analysis library. # + colab_type="code" id="pyis6iy0HLdi" colab={} # %%skip_for_export import tensorflow_model_analysis as tfma # Get the TFMA output result path and load the result. PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri tfma_result = tfma.load_eval_result(PATH_TO_RESULT) # Show data sliced along feature column trip_start_hour. tfma.view.render_slicing_metrics( tfma_result, slicing_column='trip_start_hour') # + [markdown] colab_type="text" id="7uvYrUf2-r_6" # This visualization shows the same metrics, but computed at every feature value of `trip_start_hour` instead of on the entire evaluation set. # # TensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see [the tutorial](https://www.tensorflow.org/tfx/tutorials/model_analysis/tfma_basic). # # Since we added thresholds to our config, validation output is also available. The precence of a `blessing` artifact indicates that our model passed validation. Since this is the first validation being performed the candidate is automatically blessed. # + colab_type="code" id="FXk1MA7sijCr" colab={} # %%skip_for_export blessing_uri = evaluator.outputs.blessing.get()[0].uri # !ls -l {blessing_uri} # + [markdown] colab_type="text" id="76Mil-7FlF_y" # Now can also verify the success by loading the validation result record: # + colab_type="code" id="k4GghePOTJxL" colab={} # %%skip_for_export PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri print(tfma.load_validation_result(PATH_TO_RESULT)) # + [markdown] colab_type="text" id="T8DYekCZlHfj" # ### Pusher # The `Pusher` component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to `_serving_model_dir`. # + colab_type="code" id="r45nQ69eikc9" colab={} pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=_serving_model_dir))) context.run(pusher) # + [markdown] colab_type="text" id="ctUErBYoTO9I" # Let's examine the output artifacts of `Pusher`. # + colab_type="code" id="pRkWo-MzTSss" colab={} # %%skip_for_export pusher.outputs # + [markdown] colab_type="text" id="peH2PPS3VgkL" # In particular, the Pusher will export your model in the SavedModel format, which looks like this: # + colab_type="code" id="4zyIqWl9TSdG" colab={} # %%skip_for_export push_uri = pusher.outputs.model_push.get()[0].uri latest_version = max(os.listdir(push_uri)) latest_version_path = os.path.join(push_uri, latest_version) model = tf.saved_model.load(latest_version_path) for item in model.signatures.items(): pp.pprint(item) # + [markdown] colab_type="text" id="3-YPNUuHANtj" # We're finished our tour of built-in TFX components! # # After you're happy with experimenting with TFX components and code in this notebook, you may want to export it as a pipeline to be orchestrated with Apache Airflow or Apache Beam. See the final section. # + [markdown] colab_type="text" id="qGNDOG1o1Tht" # ## Export to pipeline # # To export the contents of this notebook as a pipeline to be orchestrated with Airflow or Beam, follow the instructions below. # # If you're using Colab, make sure to **save this notebook to Google Drive** (`File` → `Save a Copy in Drive`) before exporting. # + [markdown] colab_type="text" id="DDbff6EdQ0iJ" # ### 1. Mount Google Drive (Colab-only) # # If you're using Colab, this notebook needs to mount your Google Drive to be able to access its own `.ipynb` file. # + cellView="form" colab_type="code" id="CH8yu7Un1Thu" colab={} # %%skip_for_export #@markdown Run this cell and enter the authorization code to mount Google Drive. import sys if 'google.colab' in sys.modules: # Colab. from google.colab import drive drive.mount('/content/drive') # + [markdown] colab_type="text" id="iFrJ8nOIRIJ0" # ### 2. Select an orchestrator # + cellView="form" colab_type="code" id="CO7erulbvUNi" colab={} _runner_type = 'beam' #@param ["beam", "airflow"] _pipeline_name = 'chicago_taxi_%s' % _runner_type # + [markdown] colab_type="text" id="d64gdS2u1Thw" # ### 3. Set up paths for the pipeline # + colab_type="code" id="X_dZL1lS1Thx" colab={} # For Colab notebooks only. # TODO(USER): Fill out the path to this notebook. _notebook_filepath = ( '/content/drive/My Drive/Colab Notebooks/taxi_pipeline_interactive.ipynb') # For Jupyter notebooks only. # _notebook_filepath = os.path.join(os.getcwd(), # 'taxi_pipeline_interactive.ipynb') # TODO(USER): Fill out the paths for the exported pipeline. _pipeline_name = 'taxi_pipeline' _tfx_root = os.path.join(os.environ['HOME'], 'tfx') _taxi_root = os.path.join(os.environ['HOME'], 'taxi') _serving_model_dir = os.path.join(_taxi_root, 'serving_model') _data_root = os.path.join(_taxi_root, 'data', 'simple') _pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name) _metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name, 'metadata.db') # + [markdown] colab_type="text" id="stNBJAIPvUNq" # ### 4. Choose components to include in the pipeline # + colab_type="code" id="DNc0Iks2vUNq" colab={} # TODO(USER): Specify components to be included in the exported pipeline. components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher ] # + [markdown] colab_type="text" id="6nATTNYZ1Thy" # ### 5. Generate pipeline files # + cellView="form" colab_type="code" id="SsfNFi6iHMSp" colab={} # %%skip_for_export #@markdown Run this cell to generate the pipeline files. if get_ipython().magics_manager.auto_magic: print('Warning: %automagic is ON. Line magics specified without the % prefix ' 'will not be scrubbed during export to pipeline.') _pipeline_export_filepath = 'export_%s.py' % _pipeline_name context.export_to_pipeline(notebook_filepath=_notebook_filepath, export_filepath=_pipeline_export_filepath, runner_type=_runner_type) # + [markdown] colab_type="text" id="qL4RQQwSSt0y" # ### 6. Download pipeline files # + cellView="form" colab_type="code" id="FeRJyHly1Th3" colab={} # %%skip_for_export #@markdown Run this cell to download the pipeline files as a `.zip`. if 'google.colab' in sys.modules: from google.colab import files import zipfile zip_export_path = os.path.join( tempfile.mkdtemp(), 'export.zip') with zipfile.ZipFile(zip_export_path, mode='w') as export_zip: export_zip.write(_pipeline_export_filepath) export_zip.write(_taxi_constants_module_file) export_zip.write(_taxi_transform_module_file) export_zip.write(_taxi_trainer_module_file) files.download(zip_export_path) # + [markdown] colab_type="text" id="Po3wc1dMTJHw" # To learn how to run the orchestrated pipeline with Apache Airflow, please refer to the [TFX Orchestration Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/airflow_workshop). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # Open In Colab # + [markdown] colab_type="text" id="CVGBphEakB1c" # # KenLM Framework for Language Modeling # + [markdown] colab_type="text" id="_mu9NmEikGja" # ## Install KenLM # # ### Reference: https://github.com/kpu/kenlm # + # # !pip install https://github.com/kpu/kenlm/archive/master.zip # - import sys sys.path.append('utils/') # + colab={"base_uri": "https://localhost:8080/", "height": 317} colab_type="code" id="90dlx6MzkrRB" outputId="6d2e42e5-3675-4d87-b79a-ff562b9e4c27" import kenlm import os import re import utils.ngram_utils as ngram_utils import numpy as np # + # Read data from .txt files and create lists of reviews train_data = [] # create a list of all the reviews with open('../data/amazon_train.txt', 'r') as f: train_data = [review for review in f.read().split('\n') if review] valid_data = [] # create a list of all the reviews with open('../data/amazon_valid.txt', 'r') as f: valid_data = [review for review in f.read().split('\n') if review] # - # Tokenize the Datasets # TODO: this takes a really long time !! why? train_data_tokenized, all_tokens_train = ngram_utils.tokenize_dataset(train_data) valid_data_tokenized, all_tokens_valid = ngram_utils.tokenize_dataset(valid_data) vocab = list(set(all_tokens_train)) len(vocab) train_data = [] for t in train_data_tokenized: train_data.append(' '.join(t)) train_data[:3] valid_data = [] for t in valid_data_tokenized: valid_data.append(' '.join(t)) valid_data[:3] len(train_data), len(valid_data) # + # # Change directory where you have the data # path_to_data = '../data/' # os.chdir(path_to_data) # path_to_data # - # ## 3-gram model with KenLM # cat ../data/amazon_train.txt | ../../kenlm/build/bin/lmplz -o 3 > amazonLM3.arpa # !../../kenlm/build/bin/build_binary amazonLM3.arpa amazonLM3.klm model_3n = kenlm.LanguageModel('amazonLM3.klm') model_3n # ## 5-gram KenLM # cat ../data/amazon_train.txt | ../../kenlm/build/bin/lmplz -o 5 > amazonLM5.arpa # !../../kenlm/build/bin/build_binary amazonLM5.arpa amazonLM5.klm model_5n = kenlm.LanguageModel('amazonLM5.klm') model_5n # ## Perplexity (Train + Valid Data) # + [markdown] colab_type="text" id="ov42EMhflktI" # ### The KenLM model reports negative log likelihood, not perplexity. So we'll be converting the score and report net perplexity. The following function calculate the perpelxity. # # ### Pereplexity is defined as follows, $$ PPL = b^{- \frac{1}{N} \sum_{i=1}^N \log_b q(x_i)} $$ # # ### All probabilities here are in log base 10 so to convert to perplexity, we do the following # # ### $$PPL = 10^{-\log(P) / N} $$ # # ### where $P$ is the total NLL, and $N$ is the word count. # + colab={} colab_type="code" id="KLsISNQNlKff" def get_ppl(lm, sentences): """ Assume sentences is a list of strings (space delimited sentences) """ total_nll = 0 total_wc = 0 for sent in sentences: sent = re.sub(r"([\w/'+$\s-]+|[^\w/'+$\s-]+)\s*", r"\1 ", sent) words = sent.strip().split() score = lm.score(sent, bos=False, eos=False) word_count = len(words) total_wc += word_count total_nll += score ppl = 10**-(total_nll/total_wc) return ppl # - # 3-gram train_ppl = get_ppl(model_3n, train_data) valid_ppl = get_ppl(model_3n, valid_data) train_ppl, valid_ppl # 5-gram train_ppl = get_ppl(model_5n, train_data) valid_ppl = get_ppl(model_5n, valid_data) train_ppl, valid_ppl # ## Score Sentences sentences = ['i like this product very much .'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['i like pandas'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 # + [markdown] colab_type="text" id="4unImHqblPQ9" # Function for loading the data # - sentences = ['this color is very ugly'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['kigali is an awesome city !'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['i want to get a refund'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['this watch is not what i expected'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['this dress fits me perfectly !'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 sentences = ['my wife loves the color of this dress'] ppl3 = get_ppl(model_3n, sentences) ppl5 = get_ppl(model_5n, sentences) ppl3, ppl5 # ## Generate Sentences def generate(lm, context='', max_num_tokens=20): generated_tokens = [] cur_sent = context for j in range(max_num_tokens): scores = [] for i, token in enumerate(vocab): sent = cur_sent + ' ' + token if token == '': eos = True else: eos = False token_score = lm.score(sent, bos=True, eos=eos) scores.append(token_score) best_token = vocab[np.argmax(scores)] generated_tokens.append(best_token) cur_sent = cur_sent + ' ' + best_token if best_token == '': break return generated_tokens s3 = generate(model_3n) s5 = generate(model_5n) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' i will' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' i like' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' i am' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' this' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' this dress' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' this animal' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) context = ' what' s3 = generate(model_3n, context=context) s5 = generate(model_5n, context=context) print(' '.join(word for word in s3)) print(' '.join(word for word in s5)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # SparkSession 생성하기 from pyspark.sql import SparkSession spark = SparkSession.builder.master("local").appName("Word Count")\ .config("spark.some.config.option", "some-value")\ .getOrCreate() # - spark # + # range 명령을 통해 DataFrame 생성하면 기본적으로 8개의 파티션이 생성됨 df1 = spark.range(2, 10000000, 2) df2 = spark.range(2, 10000000, 4) # 파티션 재분배 step1 = df1.repartition(5) step12 = df2.repartition(6) step2 = step1.selectExpr("id * 5 as id") # 조인 수행, spark.sql.shuffle.partitions 속성의 기본값이 2000이라서 200으로 설정됨 step3 = step2.join(step12, ["id"]) step4 = step3.selectExpr("sum(id)") step4.collect() # - step4.explain() df1.printSchema() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Shortest path # https://www.wikiwand.com/en/Shortest_path_problem # # - ### Single-source # - Dijkstra's # - Bellman-Ford # - Breadth-first (unweighted) # - Topological sorting (weighted DAGs) # - ### Single pair # - A* (using heuristics) # - ### All pairs # - Floyd-Warshall # - Johnson's (potentially faster on sparse graphs) # - ### Stochastic # - Viterbi # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from tueplots import fonts, figsizes import matplotlib.pyplot as plt # Increase the resolution of all the plots below plt.rcParams.update({"figure.dpi": 150}) # "Better" figure size to display the font-changes plt.rcParams.update(figsizes.icml2022(column="half")) # - # Fonts in `tueplots` follow the same interface as the other settings. # # There are some pre-defined font recipes for a few journals, and they return dictionaries that are compatible with `matplotlib.pyplot.rcParams.update()`. fonts.neurips2021() # Compare the following default font to some of the alternatives that we provide: fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.jmlr2001_tex(family="serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.jmlr2001_tex(family="sans-serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.neurips2021()) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.neurips2021(family="sans-serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.neurips2021_tex(family="sans-serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.neurips2021_tex(family="serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.beamer_moml()) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() with plt.rc_context(fonts.icml2022()): fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() with plt.rc_context(fonts.icml2022_tex(family="sans-serif")): fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() with plt.rc_context(fonts.icml2022_tex(family="serif")): fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.aistats2022_tex(family="serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() plt.rcParams.update(fonts.aistats2022_tex(family="sans-serif")) fig, ax = plt.subplots() ax.plot([1.0, 2.0], [3.0, 4.0]) ax.set_title("Title") ax.set_xlabel("xlabel $\int_a^b f(x) dx$") ax.set_ylabel("ylabel $x \sim \mathcal{N}(x)$") plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="FDyyuoqLf4ho" # # SC1015 Mini-Project: Predicting Natural Disasters from Increasing Temperatures # ### Group 6 Members: , , # # --- # + [markdown] id="4y-at1tgu1Ct" # # Multivariate Linear Regression to Predict Frequency of Natural Disasters using CO2, CH4, N2O and Temperature Change (Model 1) # - # ## Cleaning and Preparing CO2 Dataset # Finding the average Atmospheric CO2 level per year (from 2001): # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="uLV89rU_t6vb" outputId="462f2345-4d77-4c85-e240-7611e0d5980a" # co2Data_model.iloc[(co2Data_model.index >= "2001") & (co2Data_model.index < "2022")] avg_co2 = {} for year in range (2001, 2021): avg_co2[year] = co2Data_model.iloc[(co2Data_model.index >= str(year)) & (co2Data_model.index < str(year+1))].mean() co2Data_LF = pd.DataFrame.from_dict(avg_co2, orient="index").rename(columns={"average" : "CO2 Average"}) co2Data_LF # - # ## Cleaning and Preparing CH4 Dataset # Finding the average Atmospheric CH4 level per year (from 2001): # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="YBITAdGRvhAv" outputId="7a2d4616-d81d-4f4f-a973-b112a8e7e300" # ch4Data_model.iloc[(ch4Data_model.index >= "2001") & (ch4Data_model.index < "2022")] avg_ch4 = {} for year in range (2001, 2021): avg_ch4[year] = ch4Data_model.iloc[(ch4Data_model.index >= str(year)) & (ch4Data_model.index < str(year+1))].mean() ch4Data_LF = pd.DataFrame.from_dict(avg_ch4, orient="index").rename(columns={"average" : "CH4 Average"}) ch4Data_LF # - # ## Cleaning and Preparing N2O Dataset # Finding the average Atmospheric N2O level per year (from 2001): # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="kZRKc7bewCz8" outputId="1d3bcf66-d150-4877-81ee-c2576dfa22a0" # n2oData_model.iloc[(n2oData_model.index >= "2001") & (n2oData_model.index < "2022")] avg_n2o = {} for year in range (2001, 2021): avg_n2o[year] = n2oData_model.iloc[(n2oData_model.index >= str(year)) & (n2oData_model.index < str(year+1))].mean() n2oData_LF = pd.DataFrame.from_dict(avg_n2o, orient="index").rename(columns={"average" : "N2O Average"}) n2oData_LF # - # ## Cleaning and Preparing Temperature Change Dataset # Finding the average temperature change per year (from 2001): # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="ylMD7oNwwE1k" outputId="22c6cf25-a1c8-43d6-ea63-2d0dab365aad" # tempData.iloc[(tempData.index >= "2001") & (tempData.index < "2022")].drop(columns=['Entity', 'Code']) avg_temp = {} for year in range (2001, 2021): avg_temp[year] = tempData.iloc[(tempData.index >= str(year)) & (tempData.index < str(year+1))].mean() tempData_LF = pd.DataFrame.from_dict(avg_temp, orient="index") tempData_LF # - # ## Concatenating all datasets into a single dataframe # Concatenating all datasets (CO2, CH4, N2O, Temperature Change) with Number of Disasters per year into a single dataframe: # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="C2sTW3YcxQ66" outputId="9875618d-47e6-4727-b346-9a200677a26b" total_LF = pd.concat([co2Data_LF, ch4Data_LF, n2oData_LF, tempData_LF, disasterCountPerYear[disasterCountPerYear.index >= 2001]], axis=1) total_LF # - # ## Multivariate Linear Regression Model 1 # Predictors: CO2, CH4, N2O, Temperature Change # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="YDUjVc7-3T5-" outputId="bc91e69b-91bf-431f-8b2f-6f9dec0d3a0c" from sklearn.model_selection import train_test_split predictors = ["CO2 Average", "CH4 Average", "N2O Average", "temperature_anomaly"] y = pd.DataFrame(total_LF["No. Of Disasters"]) x = pd.DataFrame(total_LF[predictors]) # split in to train test data x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2) # Linear Regression using Train Data linreg = LinearRegression() # create the linear regression object linreg.fit(x_train, y_train) # train the linear regression model # Coefficients of the Linear Regression line print('Intercept of Regression \t: b = ', linreg.intercept_) print('Coefficients of Regression \t: a = ', linreg.coef_) print() # Print the Coefficients against Predictors print(pd.DataFrame(list(zip(x_train.columns, linreg.coef_[0])), columns = ["Predictors", "Coefficients"])) print() # Predict Response corresponding to Predictors y_train_pred = linreg.predict(x_train) y_test_pred = linreg.predict(x_test) # Plot the Predictions vs the True values f, axes = plt.subplots(1, 2, figsize=(24, 12)) axes[0].scatter(y_train, y_train_pred, color = "blue") axes[0].plot(y_train, y_train, 'r', linewidth = 1) axes[0].set_xlabel("True values of the Response Variable (Train)") axes[0].set_ylabel("Predicted values of the Response Variable (Train)") axes[1].scatter(y_test, y_test_pred, color = "green") axes[1].plot(y_test, y_test, 'r', linewidth = 1) axes[1].set_xlabel("True values of the Response Variable (Test)") axes[1].set_ylabel("Predicted values of the Response Variable (Test)") plt.show() # Check the Goodness of Fit (on Train Data) print("Goodness of Fit of Model \t\tTrain Dataset") print("Explained Variance (R^2) \t\t:", linreg.score(x_train, y_train)) print("Mean Squared Error (MSE) \t\t:", mean_squared_error(y_train, y_train_pred)) print("Root Mean Squared Error (RMSE) \t:", np.sqrt(mean_squared_error(y_train, y_train_pred))) print() # Check the Goodness of Fit (on Test Data) print("Goodness of Fit of Model \t\tTest Dataset") print("Explained Variance (R^2) \t\t:", linreg.score(x_test, y_test)) print("Mean Squared Error (MSE) \t\t:", mean_squared_error(y_test, y_test_pred)) print("Root Mean Squared Error (RMSE) \t:", np.sqrt(mean_squared_error(y_test, y_test_pred))) print() # - # Creating a seperate dataframe for CO2 predicted data (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="bzbH4a_X4div" outputId="5388efd1-f1a8-42ad-ad84-2b0cbdbcd696" co2Data_pred = forecast_co2.to_frame().iloc[forecast_co2.index >= "2021"] avg_co2_pred = {} for year in range (2021, 2031): avg_co2_pred[year] = co2Data_pred.iloc[(co2Data_pred.index >= str(year)) & (co2Data_pred.index < str(year+1))].mean() co2Data_PredAvg = pd.DataFrame.from_dict(avg_co2_pred, orient="index").rename(columns={"predicted_mean" : "CO2 Average"}) co2Data_PredAvg # - # Creating a seperate dataframe for CH4 predicted data (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="wFdBt7aT-rIh" outputId="cdd89e1e-285a-4a3e-fa8a-9f6d9c56e701" # forecast_ch4 ch4Data_pred = forecast_ch4.to_frame().iloc[forecast_ch4.index >= "2021"] avg_ch4_pred = {} for year in range (2021, 2031): avg_ch4_pred[year] = ch4Data_pred.iloc[(ch4Data_pred.index >= str(year)) & (ch4Data_pred.index < str(year+1))].mean() ch4Data_PredAvg = pd.DataFrame.from_dict(avg_ch4_pred, orient="index").rename(columns={"predicted_mean" : "CH4 Average"}) ch4Data_PredAvg # - # Creating a seperate dataframe for N2O predicted data (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="phhqiwuS-uBI" outputId="07af34c3-05f7-42ac-ea59-5856dd83eb41" # forecast_n2o n2oData_pred = forecast_n2o.to_frame().iloc[forecast_n2o.index >= "2021"] avg_n2o_pred = {} for year in range (2021, 2031): avg_n2o_pred[year] = n2oData_pred.iloc[(n2oData_pred.index >= str(year)) & (n2oData_pred.index < str(year+1))].mean() n2oData_PredAvg = pd.DataFrame.from_dict(avg_n2o_pred, orient="index").rename(columns={"predicted_mean" : "N2O Average"}) n2oData_PredAvg # - # Creating a seperate dataframe for Temperature Change predicted data (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="BX0FfiDf-wW0" outputId="b9d5ad95-dd39-494d-faea-3eb91feb58f0" forecast_temp tempData_pred = forecast_temp.to_frame().iloc[forecast_temp.index >= "2021"] avg_temp_pred = {} for year in range (2021, 2031): avg_temp_pred[year] = tempData_pred.iloc[(tempData_pred.index >= str(year)) & (tempData_pred.index < str(year+1))].mean() tempData_PredAvg = pd.DataFrame.from_dict(avg_temp_pred, orient="index").rename(columns={"predicted_mean" : "temperature_anomaly"}) tempData_PredAvg # - # Combining all predicted values (CO2, CH4, N2O, Temperature Change) into a single dataframe: # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="he2A8maO-xYW" outputId="3bda0960-a672-4ecf-a357-9f713709d796" total_pred = pd.concat([co2Data_PredAvg, ch4Data_PredAvg, n2oData_PredAvg, tempData_PredAvg], axis=1) total_pred # - # Using predicted values of (CO2, CH4, N2O and Temperature Change), we used the Multivariate Linear Regression model to predict the amount of Natural Disasters: # + colab={"base_uri": "https://localhost:8080/"} id="MjwVVPLDCaxa" outputId="fed4cd82-1821-456f-abc7-5591e394d9a4" x_pred = pd.DataFrame(total_pred[predictors]) y_pred = linreg.predict(x_pred) y_pred # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="Njbs-ZUFUDjd" outputId="a9d0f9f2-82a9-4c00-e8e3-0b477197c5f6" total_pred = pd.concat([total_pred, pd.DataFrame(y_pred, columns = ["Predicted Amount of Natural Disasters"], index = total_pred.index)], axis=1) total_pred # - # Plotting the graph of actual Natural Disasters (1970 - 2020) with predicted Natural Disasters (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="fjopsOtriAxn" outputId="6d7f58af-56df-4df5-e4b5-93fe1206d27b" # total_pred["Predicted Amount of Natural Disasters"].rename(columns={"Predicted Amount of Natural Disasters" : "No. Of Disasters"}) # pd.concat([disasterCountPerYear['No. Of Disasters'], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters")], axis=0).to_frame() # join_line = pd.concat([disasterCountPerYear['No. Of Disasters'].iloc[-1:], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters").iloc[:1]], axis=0).to_frame() # join_line plt.figure(figsize=(15,8)) plt.title("No. Of Disasters from 1970 - 2030") plt.xlabel('Year') plt.ylabel('No. Of Occurrences') plt.plot(disasterCountPerYear['No. Of Disasters'], 'r') # plt.plot(pd.concat([disasterCountPerYear['No. Of Disasters'], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters")], axis=0).to_frame(), 'r') plt.plot(total_pred["Predicted Amount of Natural Disasters"], "b") plt.plot(pd.concat([disasterCountPerYear['No. Of Disasters'].iloc[-1:], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters").iloc[:1]], axis=0), "b") plt.legend(["Actual", "Prediction"], loc=2, prop={"size":15}) plt.show() # + [markdown] id="lmZkkMuQNHt1" # # Multivariate Linear Regression to Predict Frequency of Natural Disasters using CO2 and Temperature Change Only (Model 2) # - # Finding the average Atmospheric CO2 level per year (from 1970): # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DXdQKIM0DGPR" outputId="486add98-79e3-423d-b17e-1739cfbe521e" # co2Data_model avg_co2 = {} for year in range (1970, 2021): avg_co2[year] = co2Data_model.iloc[(co2Data_model.index >= str(year)) & (co2Data_model.index < str(year+1))].mean() co2Data_LF = pd.DataFrame.from_dict(avg_co2, orient="index").rename(columns={"average" : "CO2 Average"}) co2Data_LF # - # Finding the average Temperature Change per year (from 1970): # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="z0ZfJvCwQArd" outputId="0e913fc8-4fb3-4df1-9f12-5a6264c04cbb" # tempData_model avg_temp = {} for year in range (1970, 2021): avg_temp[year] = tempData.iloc[(tempData.index >= str(year)) & (tempData.index < str(year+1))].mean() tempData_LF = pd.DataFrame.from_dict(avg_temp, orient="index").rename(columns={"average" : "CO2 Average"}) tempData_LF # - # ## Concatenating all datasets into a single dataframe # Concatenating all datasets (CO2, Temperature Change) with Number of Disasters per year into a single dataframe: # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="D1FXGYjUNVtr" outputId="0d0bc776-8ca2-4587-fa48-a313cbf8c262" total_LF = pd.concat([co2Data_LF, tempData_LF, disasterCountPerYear[disasterCountPerYear.index >= 1970]], axis=1) total_LF # - # ## Multivariate Linear Regression Model 2 # # Predictors: CO2, Temperature Change # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="TBh8hp10OP3A" outputId="879097eb-2526-4285-d285-0b3ca606f3bd" predictors = ["CO2 Average", "temperature_anomaly"] y = pd.DataFrame(total_LF["No. Of Disasters"]) x = pd.DataFrame(total_LF[predictors]) # split in to train test data x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2) # Linear Regression using Train Data linreg = LinearRegression() # create the linear regression object linreg.fit(x_train, y_train) # train the linear regression model # Coefficients of the Linear Regression line print('Intercept of Regression \t: b = ', linreg.intercept_) print('Coefficients of Regression \t: a = ', linreg.coef_) print() # Print the Coefficients against Predictors print(pd.DataFrame(list(zip(x_train.columns, linreg.coef_[0])), columns = ["Predictors", "Coefficients"])) print() # Predict Response corresponding to Predictors y_train_pred = linreg.predict(x_train) y_test_pred = linreg.predict(x_test) # Plot the Predictions vs the True values f, axes = plt.subplots(1, 2, figsize=(24, 12)) axes[0].scatter(y_train, y_train_pred, color = "blue") axes[0].plot(y_train, y_train, 'r', linewidth = 1) axes[0].set_xlabel("True values of the Response Variable (Train)") axes[0].set_ylabel("Predicted values of the Response Variable (Train)") axes[1].scatter(y_test, y_test_pred, color = "green") axes[1].plot(y_test, y_test, 'r', linewidth = 1) axes[1].set_xlabel("True values of the Response Variable (Test)") axes[1].set_ylabel("Predicted values of the Response Variable (Test)") plt.show() # Check the Goodness of Fit (on Train Data) print("Goodness of Fit of Model \t\tTrain Dataset") print("Explained Variance (R^2) \t\t:", linreg.score(x_train, y_train)) print("Mean Squared Error (MSE) \t\t:", mean_squared_error(y_train, y_train_pred)) print("Root Mean Squared Error (RMSE) \t:", np.sqrt(mean_squared_error(y_train, y_train_pred))) print() # Check the Goodness of Fit (on Test Data) print("Goodness of Fit of Model \t\tTest Dataset") print("Explained Variance (R^2) \t\t:", linreg.score(x_test, y_test)) print("Mean Squared Error (MSE) \t\t:", mean_squared_error(y_test, y_test_pred)) print("Root Mean Squared Error (RMSE) \t:", np.sqrt(mean_squared_error(y_test, y_test_pred))) print() # - # Combining predicted values (CO2, Temperature Change) into a single dataframe: # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="teF-xPVHPC8-" outputId="028b6413-b529-45de-af98-ff135f58ee71" total_pred = pd.concat([co2Data_PredAvg, tempData_PredAvg], axis=1) total_pred # - # Using predicted values of (CO2, CH4, N2O and Temperature Change), we used the Multivariate Linear Regression model to predict the amount of Natural Disasters: # + colab={"base_uri": "https://localhost:8080/"} id="F5JVB7RuRbfs" outputId="192d0b98-d39d-448d-fb2b-a142db5a1322" x_pred = pd.DataFrame(total_pred[predictors]) y_pred = linreg.predict(x_pred) y_pred # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="fqvDgafYSmAm" outputId="a01b5a77-54de-4ae9-8623-148133145f51" total_pred = pd.concat([total_pred, pd.DataFrame(y_pred, columns = ["Predicted Amount of Natural Disasters"], index = total_pred.index)], axis=1) total_pred # - # Plotting the graph of actual Natural Disasters (1970 - 2020) with predicted Natural Disasters (2021 - 2030): # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="9GVjcCINpBiy" outputId="70fea6c4-391a-4034-e894-50428633d341" plt.figure(figsize=(15,8)) plt.title("No. Of Disasters from 1970 - 2030") plt.xlabel('Year') plt.ylabel('No. Of Occurrences') plt.plot(disasterCountPerYear['No. Of Disasters'], 'r') # plt.plot(pd.concat([disasterCountPerYear['No. Of Disasters'], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters")], axis=0).to_frame(), 'r') plt.plot(total_pred["Predicted Amount of Natural Disasters"], "b") plt.plot(pd.concat([disasterCountPerYear['No. Of Disasters'].iloc[-1:], total_pred["Predicted Amount of Natural Disasters"].rename("No. Of Disasters").iloc[:1]], axis=0), "b") plt.legend(["Actual", "Prediction"], loc=2, prop={"size":15}) plt.show() # - # ## Summary: # Model 2 is the favoured model. # # **Model 1 (Predictors: CO2, CH4, N2O, Temperature Change):** # - Very poor and fluctuating R2 value when model was ran multiple times, due to low amount of data points # # **Model 2(Predictors: CO2, Temperature Change):** # - Stable R2 value even when model was ran multiple times, decent goodness of fit (RMSE) # - Used data since 1970 # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Heroes Of Pymoli Data Analysis # * Of the 1163 active players, the vast majority are male (84%). There also exists, a smaller, but notable proportion of female players (14%). # # * Our peak age demographic falls between 20-24 (44.8%) with secondary groups falling between 15-19 (18.60%) and 25-29 (13.4%). # ----- # ### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import pandas as pd import numpy as np # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(file_to_load) # - # Display a few elements of the file stored in a Data frame purchase_data.head() # Display a statistical overview of the Dataframe purchase_data.describe() # ## Player Count # * Display the total number of players # player_count=len(purchase_data["SN"].unique()) player_count_display=pd.DataFrame({"Total Players": [player_count]}) player_count_display # ## Purchasing Analysis (Total) # * Run basic calculations to obtain number of unique items, average price, etc. # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # # + # Number of unique items item_count=len(purchase_data["Item ID"].unique()) # Average Price average_price=purchase_data["Price"].mean() # Number of Purchases number_purchases=len(purchase_data["Purchase ID"]) # Total Revenue total_revenue=purchase_data["Price"].sum() # Summary Data Frame summary_table = pd.DataFrame({"Number of Unique Items": [item_count], "Average Price": [average_price], "Number of Purchases": [number_purchases], "Total Revenue": [total_revenue]}) # Display numbers in cleaner format #pd.options.display.float_format = '${:,.2f}'.format summary_table ["Average Price"] = summary_table ["Average Price"].map("${:,.2f}".format) summary_table ["Total Revenue"] =summary_table ["Total Revenue"].map("${:,.2f}".format) # Display Data Frame summary_table # - # ## Gender Demographics # * Percentage and Count of Male Players # # # * Percentage and Count of Female Players # # # * Percentage and Count of Other / Non-Disclosed # # # # + # Data Frame to remove duplicates by SN Column #clean_data=pd.DataFrame.drop_duplicates() #clean_data=purchase_data.groupby(["SN","Gender"]).count() #clean_data_male=clean_data.loc[clean_data["Gender"]=="Male"] #clean_data #clean_data_new=clean_data[["SN", "Gender"]] #clean_data_new # Finding the Counts by Gender player_count=len(purchase_data["SN"].unique()) male_player_count=len(purchase_data[purchase_data["Gender"]=="Male"]["SN"].unique()) female_player_count=len(purchase_data[purchase_data["Gender"]=="Female"]["SN"].unique()) remaining_count=player_count-(male_player_count + female_player_count) # Finding the percentages male_player_percent=round(((male_player_count / player_count) * 100),2) female_player_percent=round(((female_player_count / player_count) * 100),2) remaining_percent= round(100 - (male_player_percent + female_player_percent),2) # Creating DataFrame to display results gender_table = pd.DataFrame([ {"":"Male","Total Count":male_player_count,"Percentage of Players":male_player_percent}, {"":"Female","Total Count":female_player_count,"Percentage of Players":female_player_percent}, {"":"Other/Non-Disclosed","Total Count":remaining_count,"Percentage of Players":remaining_percent}]) # Setting Gender as the Index gender_table_display = gender_table.set_index("") # Display Table columnsTitles = ["Total Count","Percentage of Players"] gender_table_display = gender_table_display.reindex(columns=columnsTitles) gender_table_display # - # # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + # Total purchases total_purchases=purchase_data["Purchase ID"].count() # Purchase Count by Gender male_purchases=purchase_data[purchase_data["Gender"]=="Male"]["Purchase ID"].count() female_purchases=purchase_data[purchase_data["Gender"]=="Female"]["Purchase ID"].count() remaining_purchases=total_purchases-(male_purchases + female_purchases) # Average Price by Gender male_price=purchase_data[purchase_data["Gender"]=="Male"]["Price"].mean() female_price=purchase_data[purchase_data["Gender"]=="Female"]["Price"].mean() remaining_price=purchase_data[purchase_data["Gender"]=="Other / Non-Disclosed"]["Price"].mean() # Total Purchase Value male_purchases_value=male_purchases * male_price female_purchases_value=female_purchases * female_price remaining_purchases_value=remaining_purchases * remaining_price # Average Total Purchase Per Person male_purchase_average = male_purchases_value / male_player_count female_purchase_average = female_purchases_value / female_player_count remaining_purchase_average = remaining_purchases_value / remaining_count # Creating DataFrame to display results purchasing_analysis_table = pd.DataFrame([ {"Gender":"Female","Purchase Count":female_purchases,"Average Purchase Price":female_price, "Total Purchase Value":female_purchases_value,"Avg Total Purchase Per Person":female_purchase_average}, {"Gender":"Male","Purchase Count":male_purchases,"Average Purchase Price":male_price, "Total Purchase Value":male_purchases_value,"Avg Total Purchase Per Person":male_purchase_average}, {"Gender":"Other/Non-Disclosed","Purchase Count":remaining_purchases,"Average Purchase Price":remaining_price, "Total Purchase Value":remaining_purchases_value,"Avg Total Purchase Per Person":remaining_purchase_average}]) # Setting Gender as the Index purchasing_analysis_display = purchasing_analysis_table.set_index("Gender") # Display numbers in cleaner format #pd.options.display.float_format = '${:,.2f}'.format purchasing_analysis_display["Average Purchase Price"]=purchasing_analysis_display["Average Purchase Price"].map("${:,.2f}".format) purchasing_analysis_display["Total Purchase Value"]=purchasing_analysis_display["Total Purchase Value"].map("${:,.2f}".format) purchasing_analysis_display["Avg Total Purchase Per Person"]=purchasing_analysis_display["Avg Total Purchase Per Person"].map("${:,.2f}".format) # Display Table columnsTitles = ["Purchase Count","Average Purchase Price","Total Purchase Value","Avg Total Purchase Per Person"] purchasing_analysis_display = purchasing_analysis_display.reindex(columns=columnsTitles) purchasing_analysis_display # - # ## Age Demographics # * Establish bins for ages # # # * Categorize the existing players using the age bins. Hint: use pd.cut() # # # * Calculate the numbers and percentages by age group # # # * Create a summary data frame to hold the results # # # * Optional: round the percentage column to two decimal points # # # * Display Age Demographics Table # # + #Figure out min and max age #print(purchase_data["Age"].max()) #print(purchase_data["Age"].min()) # Create Bins bins = [0,9.9,14.9,19.9,24.9,29.9,34.9,39.9,50] #Create group labels for these bins group_labels = ["<10", "10-14","15-19","20-24","25-29","30-34","35-39","40+"] #Create a new dataframe, keeping only SN, Gender and Age columns purchase_data_new = purchase_data[["SN","Gender","Age"]] #purchase_data_new #Drop Duplicates from new Data Frame purchase_data_new = purchase_data_new.drop_duplicates() #purchase_data_new.describe() #Place the data series into a new column inside of the DataFrame purchase_data_new["Age Groups"] = pd.cut(purchase_data_new["Age"], bins, labels=group_labels) #purchase_data_new.head() #Exporting to a CSV File to test that bin grouping is working #purchase_data_new.to_csv("fileOld.csv", index=False, header=True) #Calculate the numbers and percentages by age group age_count=purchase_data_new["Age Groups"].value_counts() age_percent=round(age_count / player_count * 100,2) #Create a summary data frame to hold the results age_demographics = pd.DataFrame({"Total Count": age_count,"Percentage of Players": age_percent}) #Display the table and sort Index from lower to higher Bin age_demographics age_demographics.sort_index() # - # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + #Place the data series into a new column inside of the DataFrame purchase_data["Age Groups"] = pd.cut(purchase_data["Age"], bins, labels=group_labels) # Create a GroupBy object based upon "Age Groups" purchase_group = purchase_data.groupby("Age Groups") # Calculations purchase_count = purchase_group["Purchase ID"].count() avg_purchase_price = purchase_group["Price"].mean() total_purchase_age = purchase_group["Price"].sum() avg_total_value_person = total_purchase_age / (age_demographics["Total Count"]) #Create a summary data frame to hold the results purchasing_analysis_age = pd.DataFrame({"Purchase Count": purchase_count,"Average Purchase Price": avg_purchase_price, "Total Purchase Value":total_purchase_age, "Avg Total Purchase Per Person":avg_total_value_person}) # Display numbers in cleaner format #pd.options.display.float_format = '${:,.2f}'.format purchasing_analysis_age["Average Purchase Price"]=purchasing_analysis_age["Average Purchase Price"].map("${:,.2f}".format) purchasing_analysis_age["Total Purchase Value"]=purchasing_analysis_age["Total Purchase Value"].map("${:,.2f}".format) purchasing_analysis_age["Avg Total Purchase Per Person"]=purchasing_analysis_age["Avg Total Purchase Per Person"].map("${:,.2f}".format) #Display the table and sort Index from lower to higher Bin purchasing_analysis_age purchasing_analysis_age.sort_index() # - # ## Top Spenders # * Run basic calculations to obtain the results in the table below # # # * Create a summary data frame to hold the results # # # * Sort the total purchase value column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + #Calculations total_purchase_count_SN = purchase_data.groupby("SN").count()["Price"] avg_purchase_price_SN = purchase_data.groupby("SN").mean()["Price"] total_purchase_value_SN = purchase_data.groupby("SN").sum()["Price"] #Create a summary data frame to hold the results total_spenders = pd.DataFrame({"Purchase Count": total_purchase_count_SN,"Average Purchase Price": avg_purchase_price_SN, "Total Purchase Value":total_purchase_value_SN}) # Display numbers in cleaner format pd.options.display.float_format = '${:,.2f}'.format # total_spenders["Average Purchase Price"]=total_spenders["Average Purchase Price"].map("${:,.2f}".format) # total_spenders["Total Purchase Value"]=total_spenders["Total Purchase Value"].map("${:,.2f}".format) #Display a summary of the dataframe by chooding the first 5 Names sorted from Highest to lowerst by Purchase Value total_spenders.sort_values("Total Purchase Value", ascending=False).head(5) # - # ## Most Popular Items # * Retrieve the Item ID, Item Name, and Item Price columns # # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value # # # * Create a summary data frame to hold the results # # # * Sort the purchase count column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + #Retrieve the Item ID, Item Name, and Item Price columns by creating a new dataframe popular_items=purchase_data[["Item ID","Item Name","Price"]] #Group by Item ID and Item Name.Perform calculations to obtain purchase count, item price, and total purchase value item_purchase_count = popular_items.groupby(["Item ID","Item Name"]).count()["Price"] item_purchase_price = popular_items.groupby(["Item ID","Item Name"]).mean()["Price"] item_purchase_value = popular_items.groupby(["Item ID","Item Name"]).sum()["Price"] #Summary data frame to hold the results items_summary = pd.DataFrame({"Purchase Count": item_purchase_count,"Item Price": item_purchase_price, "Total Purchase Value":item_purchase_value}) # Display numbers in cleaner format pd.options.display.float_format = '${:,.2f}'.format # items_summary["Item Price"]=items_summary["Item Price"].map("${:,.2f}".format) # items_summary["Total Purchase Value"]=items_summary["Total Purchase Value"].map("${:,.2f}".format) #Display the table, sorting by Purchase Count, and listing the top 5 items items_summary.sort_values("Purchase Count", ascending=False).head(5) # - # ## Most Profitable Items # * Sort the above table by total purchase value in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the data frame # # #Sort the above table by total purchase value in descending order items_summary.sort_values("Total Purchase Value", ascending=False).head(5) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ''' The goal of this program is to predict whether a student will get good or bad grades based on their attributes. These attributes are the column names in the given csv data and are as follows: gender race/ethnicity parental level of education lunch test preparation course The grades are based on the following: math score reading score writing score The data is named Student Performance in Exams from the website Kaggle. The data can be downloaded here: https://www.kaggle.com/spscientist/students-performance-in-exams ''' # + # Temporary global variable above_avg_observations = {'female': 0, 'male': 0, 'group A': 0, 'group B':0, 'group C':0, 'group D': 0, 'group E': 0, "master's degree": 0, "bachelor's degree": 0, "associate's degree": 0, "some college": 0, "high school": 0, "some high school": 0, 'standard': 0, "free/reduced": 0, 'none': 0, 'completed': 0} below_avg_observations = {'female': 0, 'male': 0, 'group A': 0, 'group B':0, 'group C':0, 'group D': 0, 'group E': 0, "master's degree": 0, "bachelor's degree": 0, "associate's degree": 0, "some college": 0, "high school": 0, "some high school": 0, 'standard': 0, "free/reduced": 0, 'none': 0, 'completed': 0} above_avg_probabilities = {'female': 0, 'male': 0, 'group A': 0, 'group B':0, 'group C':0, 'group D': 0, 'group E': 0, "master's degree": 0, "bachelor's degree": 0, "associate's degree": 0, "some college": 0, "high school": 0, "some high school": 0, 'standard': 0, "free/reduced": 0, 'none': 0, 'completed': 0} below_avg_probabilities = {'female': 0, 'male': 0, 'group A': 0, 'group B':0, 'group C':0, 'group D': 0, 'group E': 0, "master's degree": 0, "bachelor's degree": 0, "associate's degree": 0, "some college": 0, "high school": 0, "some high school": 0, 'standard': 0, "free/reduced": 0, 'none': 0, 'completed': 0} index_selection = {'2': 'female', '3':'male', '4':'group A', '5':'group B', '6':'group C', '7':'group D', '8':'group E', '9':"master's degree", '10':"bachelor's degree", '11':"associate's degree", '12':"some college", '13':"high school", '14':"some high school", '15':'standard', '16':"free/reduced", '17':'none', '18':'completed'} # + # Here we open the csv file and read from it as a dictionary. While reading it in, # we are also gathering the minimum and maximum math score, and the average math score import csv import statistics as std import re with open('StudentsPerformance.csv', 'r') as file: csv_dict = csv.DictReader(file) total_math_scores = 0 math_score_list = [] avg_prob = 0 total_above_average = 0 total_below_average = 0 score = 0 user_choices = [] for row in csv_dict: math_score = int(row['math score']) math_score_list = append_math_score(row, math_score_list) total_math_scores += math_score # get the total math score for calculating the mean mean_of_math_scores = total_math_scores / len(math_score_list) # get the average math score file.seek(0) # start at the beginning of the csv_dict file next(csv_dict) # and skip the header for row in csv_dict: total_above_average += count_above_avg(row, mean_of_math_scores) total_below_average += count_below_avg(row, mean_of_math_scores) file.seek(0) # start at the beginning of the csv_dict file next(csv_dict) # and skip the header for row in csv_dict: x_given_above_avg(row, mean_of_math_scores) x_given_below_avg(row, mean_of_math_scores) calculate_probability_of_above_avg(total_above_average) # calculate probability for each occurences of p(x | given above average) by dividing by the average of above average math scores print(above_avg_probabilities) calculate_probability_of_below_avg(total_below_average) print(below_avg_probabilities) print(total_above_average) print(total_below_average) print(above_avg_observations) print(below_avg_observations) probability_given_user_input(int(user_input_score()), user_input(user_choices)) # - def user_input_score(): score = input("Please enter 0 for above average or 1 for below average") return score # + # get user input of constraints def user_input(user_choices): gender = input("Enter 2 for female, 3 for male or 'S' to skip") group = input("Enter 4 for group A, 5 for group B, 6 for group C, 7 for group D, 8 for group E or 'S' to skip") parent_ed = input("Enter 9 for master's degree, 10 for bachelor's degree, 11 for associate's degree, 12 for some college, 13 for high school, 14 some high school or 'S' to skip") lunch = input("Enter 15 for free/reduced lunch, 16 for standard lunch or 'S' to skip") test_prep = input("Enter 17 for no test preparation course, 18 for completed test preparation course or 'S' to skip") user_choices.extend([gender, group, parent_ed, lunch, test_prep]) return user_choices # - def probability_given_user_input(score, user_choices): probability = 0 convert_number_responses = [] if(user_choices[0].isnumeric()): probability = user_choices[0] choices = iter(user_choices) next(choices, None) for choice in choices: if(choice.isnumeric()): convert_number_responses.append(index_selection[choice]) print(convert_number_responses) if(score == 0): for choice in convert_number_responses: print(above_avg_probabilities[choice]) probability *= above_avg_probabilities[choice] else: for choice in choices: if(choice.isnumeric()): probability *= below_avg_probabilities[choice] return probability def calculate_probability_of_below_avg(total_below_average): for x in below_avg_observations: val = below_avg_observations[x] below_avg_probabilities[x] = float(val / total_below_average) def x_given_below_avg(row, mean_of_math_scores): for x in row: if(below_average(row, mean_of_math_scores)): if(x != 'math score' and x != 'reading score' and x != 'writing score'): below_avg_observations[row[x]] += 1 def count_below_avg(row, mean_of_math_scores): if(below_average(row, mean_of_math_scores)): return 1 return 0 # + # calculates the percent of above average math scores def count_above_avg(row, mean_of_math_scores): if(above_average(row, mean_of_math_scores)): return 1 return 0 # + # count all occurences of x given above average def x_given_above_avg(row, mean_of_math_scores): for x in row: if(above_average(row, mean_of_math_scores)): if(x != 'math score' and x != 'reading score' and x != 'writing score'): above_avg_observations[row[x]] += 1 # + # calculate probability for each occurences of p(x | given above average) by dividing by the average of above average math scores def calculate_probability_of_above_avg(total_above_average): for x in above_avg_observations: val = above_avg_observations[x] above_avg_probabilities[x] = float(val / total_above_average) # + # is x given y true? def x_given_y(x, y): return x and y # + # generate math score list for the standard deviation method def append_math_score(row, score_list): math_score = int(row['math score']) score_list.append(math_score) return score_list # + # return standard deviation of math scores def standard_deviation(scores): stan = std.stdev(scores) return stan # + # Returns true if the row's math score is higher than the average math score def above_average(row, mean_of_math_scores): above = float(row['math score']) return above >= mean # - def below_average(row, mean_of_math_scores): below = int(row['math score']) return below < mean # + # race/ethnicity is group A def is_group_a(row): group = row['race/ethnicity'] return group == 'group A' # - def is_group_b(row): group = row['race/ethnicity'] return group == 'group B' def is_group_c(row): group = row['race/ethnicity'] return group == 'group C' def is_group_d(row): group = row['race/ethnicity'] return group == 'group D' def is_group_e(row): group = row['race/ethnicity'] return group == 'group E' def is_female(row): female = row['gender'] return female == 'female' def parents_bachelors_degree(row): education = row['parental_level_of_education'] return education == "bachelor's degree" def parents_masters(row): education = row['parental_level_of_education'] return education == "master's degree" def parents_some_college(row): education = row['parental_level_of_education'] return education == 'some college' def parents_high_school(row): education = row['parental_level_of_education'] return education == 'high school' def parents_associates(row): education = row['parental_level_of_education'] return education == "associates degree" def free_lunch(row): free = row['lunch'] return free == 'free/reduced' def prep_course(row): prep = row['test preparation course'] return prep == 'completed' # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf2p0] # language: python # name: conda-env-tf2p0-py # --- import gym import matplotlib.pyplot as plt import matplotlib.animation as animation env_name = 'Breakout-v0' env = gym.make(env_name) # + frames = [] # array to store state space at each step env.reset() done = False for _ in range(300): #print(done) frames.append(env.render(mode='rgb_array')) obs,reward,done, _ = env.step(env.action_space.sample()) if done: break # - patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, \ frames=len(frames), interval=10) anim.save('random_agent.gif', writer='imagemagick') import gym env = gym.make("Breakout-v0") env = gym.wrappers.Monitor(env, 'recording', force=True) observation = env.reset() for _ in range(1000): #env.render() action = env.action_space.sample() # your agent here (this takes random actions) observation, reward, done, info = env.step(action) if done: observation = env.reset() env.close() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: dev # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # metadata: # interpreter: # hash: dca0ade3e726a953b501b15e8e990130d2b7799f14cfd9f4271676035ebe5511 # name: python3 # --- # Update sklearn to prevent version mismatches import sys # !{sys.executable} -m pip install sklearn --upgrade # install joblib. This will be used to save your model. # Restart your kernel after installing # !pip install joblib import pandas as pd import numpy as np pd.set_option('display.max_columns', None) df = pd.read_csv("exoplanet_data.csv") # Drop the null columns where all values are null df = df.dropna(axis='columns', how='all') # Drop the null rows df = df.dropna() df.head() # # Read the CSV and Perform Basic Data Cleaning # # Select your features (columns) # Set features. This will also be used as your x values. selected_features = df.iloc[:,1:] selected_features.head() # # Create a Train Test Split # # Use `koi_disposition` for the y values from sklearn.model_selection import train_test_split X = selected_features y = df["koi_disposition"] X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=1) X_train.head() # # Pre-processing # # Scale the data using the MinMaxScaler and perform some feature selection # Scale your data from sklearn.preprocessing import MinMaxScaler X_scaler = MinMaxScaler().fit(X_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) from sklearn.ensemble import RandomForestClassifier scores=[] for n in range(1,200,2): rf = RandomForestClassifier(n_estimators=n) rf = rf.fit(X_train_scaled, y_train) scores.append([n,rf.score(X_test_scaled, y_test)]) from operator import itemgetter sorted(scores,key =itemgetter(1),reverse=True)[0:2] model = RandomForestClassifier(n_estimators=189) model = model.fit(X_train_scaled, y_train) # # Train the Model # # print(f"Training Data Score: {model.score(X_train_scaled, y_train)}") print(f"Testing Data Score: {model.score(X_test_scaled, y_test)}") # # Hyperparameter Tuning # # Use `GridSearchCV` to tune the model's parameters # Create the GridSearchCV model from sklearn.model_selection import GridSearchCV param_grid = { 'n_estimators': list(range(1,300,10)), 'max_features': ['auto', 'sqrt', 'log2'] } grid = GridSearchCV(estimator=model, param_grid=param_grid, cv= 5) # Train the model with GridSearch grid.fit(X_train, y_train) print(grid.best_params_) print(grid.best_score_) from sklearn.metrics import classification_report final_model = RandomForestClassifier(n_estimators=211,max_features= 'sqrt') final_model.fit(X_train_scaled, y_train) pred = final_model.predict(X_test_scaled) print(final_model.score(X_test_scaled,y_test)) print(classification_report(y_test, pred, target_names=list(y.unique()))) # # Save the Model # save your model by updating "your_name" with your name # and "your_model" with your model variable # be sure to turn this in to BCS # if joblib fails to import, try running the command to install in terminal/git-bash import joblib filename = '../models/RF_exoplanet.sav' joblib.dump(final_model, filename) # + """ Vehicles IV """ # Create an instance of each child class. Access all their attributes and methods, including those inherited from their parent class Vehicle. # TAKEAWAY! - Vehicle is the baseline class for other more specific types of vehicles. Typically, you wouldn't instantiate a Vehicle because the child classes are more useful for storing information about vehicles. The Vehicle class serves to create a relationship between its children. However, "submarine" might be created as a Vehicle because it's so rare that you might not need a full Submarine class! class Vehicle(): def __init__(self, name, owner): self.name = name self.owner = owner category = 'transportation' def start_engine(self): print('Vrrrrrooomm!') class Car(Vehicle): def __init__(self, name, owner): self.name = name self.owner = owner motion = 'drive' terrain = 'land' def honk_horn(self): print('HONK!') class Plane(Vehicle): def __init__(self, name, owner): self.name = name self.owner = owner motion = 'fly' terrain = 'air' def take_off(self): print('Fasten your seatbelts!') class Boat(Vehicle): def __init__(self, name, owner): self.name = name self.owner = owner motion = 'sail' terrain = 'water' def drop_anchor(self): print('Anchors away!') # CAR INSTANCE car1 = Car('The Batmobile','Batman') print(car1.category) # transportation print(car1.owner, 'can', car1.motion, car1.name, 'on', car1.terrain) # The Batmmobile can drive on land car1.start_engine() # Vrrrrrooomm! car1.honk_horn() # HONK! print('\n') # PLANE INSTANCE plane1 = Plane('The Canary', 'Amelia Earhart') print(plane1.category) # transportation print(plane1.owner, 'can', plane1.motion, plane1.name, 'through the', plane1.terrain) # Amelia Earhart can fly The Canary through the air plane1.start_engine() # Vrrrrrooomm! plane1.take_off() # Fasten your seatbelts! print('\n') # BOAT INSTANCE boat1 = Boat('Jenny', '') print(boat1.category) # transportation print(boat1.owner, 'can', boat1.motion, boat1.name, 'on', boat1.terrain) # Forrest Gump can sail Jenny on water boat1.start_engine() # Vrrrrrooomm! boat1.drop_anchor() # Anchors Away! # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Series # A series is a 1D labelled array. It represents a single column of data. import pandas as pd import glob # ### Create a pandas series object from a list # pd.series is a constructor method # object is the internal python lingo for string ice_cream = ['chocolate', 'vanilla', 'rum_raisin', 'hazelnut'] pd.Series(ice_cream) # One of the advantages of a pandas series over a list is that a pandas series can have indices which are other than numbers like strings, datetime objects etc. Numeric index is the default that is set when no index is mentioned in particular. lottery = 4,8,15, 17, 23, 56 pd.Series(lottery) registrations = [True, False, False, False, True] pd.Series(registrations) # ### Intro to attributes # Objects in python have attributes and methods. A pandas series is just one type of object. Attributes do not modify an object but instead they carry some information about the object. about_me = ['smart', 'handsome', 'brilliant', 'humble'] about_me = pd.Series(about_me) about_me about_me.values about_me.index about_me.dtype # ### Intro to methods prices = [2.99, 3.45, 6.99] prices = pd.Series(prices) prices.sum() prices.product() prices.mean() # ### Parameters and Arguments # + fruits = ['apple', 'orange', 'mango', 'grape', 'blueberry'] weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday'] # first argument - values, second argument - index pd.Series(fruits, weekdays) # - fruits = pd.Series(data=fruits, index=weekdays) fruits # index labels in a pandas series do not have to be unique fruits = ['apple', 'orange', 'mango', 'grape', 'blueberry', 'watermelon'] weekdays_ = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'monday'] fruits = pd.Series(data = fruits, index=weekdays_) fruits glob.glob('./data/*.csv') pd.read_csv('./data/pokemon.csv') # If we wanted to import specific columns we can `usecols` pd.read_csv('./data/pokemon.csv', usecols=['Pokemon']) # If you want it to be a series object, use the squeeze arg pd.read_csv('./data/pokemon.csv', usecols=['Pokemon'], squeeze=True) pokemon = pd.read_csv('./data/pokemon.csv', usecols=['Pokemon'], squeeze=True) google = pd.read_csv('./data/google_stock_price.csv', squeeze=True) google pokemon.head() pokemon.head(10) pokemon.tail(7) # ### Python Builtin functions # Returns all of the available methods and attributes for that object dir(pokemon) sorted(pokemon) # numeric values in ascending order sorted(google) # index - key, value - value dict(google) # Alphabetically the largest coz of Z print(max(pokemon)) print(min(pokemon)) print(max(google)) print(min(google)) # Are there any duplicates in the series? pokemon.is_unique # Share price may have been same across different days google.is_unique # A series is always a 1D data! pokemon.ndim google.ndim #721 rows x 1column pokemon.shape google.shape # size counts null values pokemon.size google.size #name of the series pokemon.name google.name pokemon.name = 'Pocket Monsters' pokemon.head() # alphabetical order pokemon.sort_values().head() google.sort_values().head() pokemon.sort_values(ascending=False).head() google.sort_values(ascending=False).head() pokemon.sort_values(ascending=False, inplace=True) pokemon.head() # to get back original order pokemon.sort_index() pokemon.sort_index(ascending=False) # ### `in` keyword # By default the in operation checks the index "Hoopa" in pokemon 100 in pokemon "Hoopa" in pokemon.values pokemon.sort_index(inplace=True) google.sort_index(inplace=True) # Extract values by index position pokemon[1] pokemon[[100,200,300]] pokemon[50:55] pokemon[:5] pokemon[715:] pokemon[-20:] pokemon[-30:-10] pokemon = pd.read_csv('./data/pokemon.csv', index_col='Pokemon', squeeze=True) pokemon.head() pokemon[0] pokemon[[130, 100]] pokemon['Bulbasaur'] pokemon['Ditto'] pokemon[['Charizard', 'Jolteon']] pokemon['Charmander':'Weedle'] pokemon[:'Weedle'] pokemon['Charmander':'Weedle':2] # Pikachu exists and digimon doesn't pokemon[['Pikachu', 'Digimon']] pokemon.reindex(index=['Pikachu', 'Digimon']) # It is a good idea to sort your index because pandas provides many methods that can # take advantage of a sorted indices # Pandas works faster on sorted index pokemon.sort_index(inplace=True) pokemon.get('Moltres') pokemon.get([0,5]) pokemon.get(key='Digimon', default='This is not a pokemon') pokemon.get(key='Charizard', default='This is not a pokemon') pokemon.get(key=['Charizard', 'Meowth'], default='This is not a pokemon') pokemon.get(key=['Charizard', 'Digimon'], default='This is not a pokemon') # ### Math methods google = pd.read_csv('./data/google_stock_price.csv', squeeze=True) # count the number of non-NA values google.count() google.sum() google.mean() google.std() google.min() google.max() google.median() google.mode() google.describe() # ### `idxmax` and `idxmin` print(google.max()) print(google.min()) google.idxmax() google[google.idxmax()] google.idxmin() google[google.idxmin()] # ### `value_counts` pokemon pokemon.value_counts() pokemon.value_counts().sum() pokemon.value_counts(ascending=True) # ### `apply` method def performance_class(number): if number < 300: return 'OK' elif number >= 3000 and number < 650: return 'Satisfactory' else: return 'Incredible' google.apply(performance_class) # ### `map` method pokemon_names = pd.read_csv('./data/pokemon.csv', usecols=['Pokemon'], squeeze=True) pokemon_names.head(3) pokemon_types = pd.read_csv('./data/pokemon.csv', index_col=['Pokemon'], squeeze=True) pokemon_types pokemon_names.map(pokemon_types) pokemon_names = pd.read_csv('./data/pokemon.csv', usecols=['Pokemon'], squeeze=True) pokemon_types = pd.read_csv('./data/pokemon.csv', index_col=['Pokemon'], squeeze=True).to_dict() pokemon_types pokemon_names.map(pokemon_types) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="eCQGHreTsnMi" # # Homework 8 - # + [markdown] id="5_HSEjtAuA9b" # ## Preliminaries # + [markdown] id="K3cljo7Ytf8P" # Download [this version](https://utexas.instructure.com/courses/1319193/files/folder/Data?preview=63145828) of our survey data with reasonable column names in the header line. Then upload it (if using Colab) so that the follow code works. If you are using Jupyter, could can use `read_csv()` directly after downloading the data. # + id="EpF2UsDwB-xI" # %load_ext sql # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="OYnDQXtICdcT" outputId="ba127530-4396-4f9e-a245-528555301c90" # %sql sqlite:// # + id="_vS4tBSr0tBW" import pandas as pd # + id="8OLhve4z3Fps" survey = pd.read_csv("survey.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 53} id="DtnEgwXT3H6c" outputId="fdd1327a-81f8-41e3-c514-ed4f236b57c4" # %sql persist survey # + colab={"base_uri": "https://localhost:8080/", "height": 97} id="2XVV-_TNt88X" outputId="f3e751a2-5eab-46ed-89f1-4cb12b058b8e" language="sql" # SELECT COUNT(*) FROM survey # + colab={"base_uri": "https://localhost:8080/", "height": 566} id="eaSNC_DOR4A-" outputId="9e51335b-f2e7-4abb-f5fc-466b87e44756" language="sql" # SELECT * # FROM survey # LIMIT 5 # + [markdown] id="3gmFhFzcrUuH" # ## Problem 1 # + [markdown] id="PAnc7ny8rhuo" # Write a SQL query that shows the top three `siblings` values (in descending order). # + id="BArZbWWRrf_W" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="4bb69c4a-567f-49ee-fd62-44565b809cc6" language="sql" # SELECT siblings # FROM survey # ORDER BY siblings DESC # LIMIT 3 # + [markdown] id="JhXuC8PbrkU9" # ## Problem 2 # + [markdown] id="GCKLJks8rlvH" # Write a SQL query that shows the number of survey responses by degree in descending order of those counts. # + id="X9qWJPvqrlFO" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="17bf5055-4a8b-4b19-a3a5-2f1c62bbf2a8" language="sql" # SELECT degree, COUNT(*) AS survey_resp # FROM survey # GROUP BY degree # ORDER BY survey_resp DESC # + [markdown] id="wJfFhwfJrqIo" # ## Problem 3 # + [markdown] id="6D1A89Odrsnu" # Write a query that returns the mean instructor greatness responses by degree. The data frame should be in descending order of mean greatness. # + id="fd_dLF1JrrfF" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="a92ae554-6bf8-448d-8c5e-1fc4e47f7d23" language="sql" # SELECT degree, AVG(instructor) AS mean_instructor # FROM survey # GROUP BY degree # ORDER BY mean_instructor DESC # + [markdown] id="Oy2OtMYqrwPM" # ## Problem 4 # + [markdown] id="G0N8EJU0rxtg" # Another query: For Business majors, the count of responses by computer type. # # + id="aTc0BQYFwq0n" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="2112c38e-45fd-40a8-d7aa-6d02a4131ac2" language="sql" # SELECT computer, COUNT(computer) AS resp_count # FROM survey # WHERE degree = 'Business' # GROUP BY computer # ORDER BY resp_count DESC # + [markdown] id="nWVdPITbr3Hw" # ## Problem 5 # + [markdown] id="RrKbDLcwr5n-" # Another query: For Business majors with Macs, the mean number of siblings by queso. # + id="k5Z00vZirw-e" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="aa3616a7-526a-45cb-e546-09c0d38ba33c" language="sql" # SELECT queso, AVG(siblings) AS mean_siblings # FROM survey # WHERE degree = 'Business' AND computer = 'Mac' # GROUP BY queso # ORDER BY mean_siblings desc # + [markdown] id="g45BWGl-Wp09" # : # + [markdown] id="oEcNXWqTr_Fe" # ## Problem 6 # + [markdown] id="BgOzklZFr-yQ" # Consider a strange metric that is `math+instructor`. This metric, which of course is the # sum of the "how much you like math" response and the "how great is the instructor response", # might have tremendous predictive value. Let's investigate. # # Compute the mean value of this metric by `computer` value. # # + id="qXNOXWY5sFmf" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="02f51f6a-58bc-427d-eea7-7d2869fec95c" language="sql" # SELECT computer, AVG(math + instructor) AS math_instructor # FROM survey # GROUP BY computer # ORDER BY math_instructor desc # + [markdown] id="0LYn0OnLsGe1" # ## Problem 7 # + [markdown] id="gsAjCVgKsI7m" # Write a query that returns the maximum math+instructor score. # + id="1tLJUiJ9sHqv" colab={"base_uri": "https://localhost:8080/", "height": 97} outputId="fa2d61d3-96a0-4267-9063-009a3a7f9f16" language="sql" # SELECT MAX(math + instructor) AS max_math_instructor # FROM survey # ORDER BY max_math_instructor desc # + [markdown] id="_lmKs1XushZ4" # ## Problem 8 # + [markdown] id="NERiByzesi49" # Another query: For responses reporting one or two siblings, count votes for each `queso`. # + id="9mmw7DihsicY" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="11ad679d-f84c-4d2c-c2cd-8887a8d4520f" language="sql" # SELECT queso, COUNT(*) as count # FROM survey # WHERE siblings = 1 OR siblings = 2 # GROUP BY queso # ORDER BY count DESC # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="IWP75K_G2avq" # ##For Loop # + colab={"base_uri": "https://localhost:8080/"} id="6s981Dry0yYJ" outputId="bf8368af-cca6-46e7-d937-11849461ea7f" week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"] for x in week: print(x) # + [markdown] id="woJSvHZk3C-i" # The Break Statement # + colab={"base_uri": "https://localhost:8080/"} id="6CoTegQE3StD" outputId="a73f590b-20b3-43d4-a8ee-5673e202a103" for x in week: print(x) if x=="Thursday": break # + colab={"base_uri": "https://localhost:8080/"} id="3wxPUlty4Z97" outputId="6ec79a33-7cb3-46d9-a752-2f64da206fc4" for x in week: if x=="Thursday": break print(x) # + [markdown] id="z9jp8lsK5E4k" # Looping through a String # + colab={"base_uri": "https://localhost:8080/"} id="-FkE8hXs5My7" outputId="e9a85177-9de1-46b4-e950-2464c1c23dfd" for x in "Programming Logic and Design": print(x) # + [markdown] id="XW9DsF5V5fzK" # The range() Function # + colab={"base_uri": "https://localhost:8080/"} id="Ro81iZLX5l73" outputId="d5a6d96d-bac8-47cc-bf88-82d65f0e274f" for x in range(6): print(x) # + colab={"base_uri": "https://localhost:8080/"} id="l4YmVE886DKc" outputId="b5c2be7d-82c2-4f8b-dfec-5466423613fd" for x in range(2,6): print(x) # + [markdown] id="V86pRMVQ6Onq" # Nested Loops # + colab={"base_uri": "https://localhost:8080/"} id="aY8ndvuB6QKo" outputId="d3f744eb-2e6d-44f6-da61-a055aa2a7545" adjective = ["red", "big", "tasty"] fruits = ["apple", "banana", "cherry"] for x in adjective: for y in fruits: print(x,y) # + [markdown] id="sz__CiSi70NN" # ##While loop # + colab={"base_uri": "https://localhost:8080/"} id="nqtIiRx6749s" outputId="9c520874-08d6-426e-cb8e-bb9f7deb8a0e" i=1 while i<6: print(i) i+=1 #The same as i=i+1,#Assignment operator for addition # + [markdown] id="mJlyyazd9B-J" # The Break Statement # + colab={"base_uri": "https://localhost:8080/"} id="56Qa3Vry89Ha" outputId="c5f8febd-1ddd-4131-ec86-7346f2f247e6" i=1 while i<6: print(i) if i==3: break else: i+=1 # + [markdown] id="nK_bx5o_91ve" # The continue Statement # + colab={"base_uri": "https://localhost:8080/"} id="F2NesYmL95Xg" outputId="a78b803b-8cd6-4ca5-8502-d10d95f0ef8e" i=0 while i<6: i+=1 if i==3: continue print(i) # + [markdown] id="YuNpq7fr-rDl" # The else statement # + colab={"base_uri": "https://localhost:8080/"} id="wEDRFCIb-uF_" outputId="ff30d14b-06a7-4ff7-f958-6ac68ed80d4c" i=1 while i<6: print(i) i+=1 else: print("i is no longer less than 6") # + [markdown] id="Cz4pbYx8_SuX" # Application 1 # + colab={"base_uri": "https://localhost:8080/"} id="qb4D56Ek_URD" outputId="bd0a1022-6079-4ac2-89b7-6f21f4f2e971" value = ["Value 1","Value 2","Value 3","Value 4","Value 5","Value 6","Value 7","Value 8","Value 9","Value 10"] for x in value: print(x) # + colab={"base_uri": "https://localhost:8080/"} id="XIadKB-oAcVk" outputId="d13c5aa5-909a-4d78-ba1f-488e60fca840" value = ["Value"] number = ["0","1","2","3","4","5","6","7","8","9","10"] for x in value: for y in number: print(x,y) i=0 while i<=10: print("Value",i) i+=1 # + [markdown] id="jjN5JlxTBYOl" # Application 2 # + colab={"base_uri": "https://localhost:8080/"} id="wq3looD5BZtD" outputId="69ce6db8-cf77-4f95-81b6-423bc7f1c0ad" i=4 while i<20: print(i) i+=1 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #pip install umap-learn #pip install plotly #pip install kaleido # - import numpy as np import pandas as pd import umap import plotly.express as px dataraw = pd.read_csv("MapRateFiltered_tiny_75.csv") dataraw.head() datafilter= [col for col in dataraw if col.startswith('AT')] dataquant = dataraw[datafilter] dataquant.head() UMAP_2d = umap.UMAP(n_neighbors=20,min_dist=.1,n_components=2) UMAP_3d = umap.UMAP(n_neighbors=20,min_dist=.1,n_components=3) proj_2d = UMAP_2d.fit_transform(dataquant) proj_3d = UMAP_3d.fit_transform(dataquant) print(proj_2d.shape) print(proj_3d.shape) # + fig_2d = px.scatter( proj_2d, x=0, y=1, color=dataraw.Tissue, labels={'color': 'tissue type'} ) fig_3d = px.scatter_3d( proj_3d, x=0, y=1, z=2, color=dataraw.Tissue, labels={'color': 'tissue type'} ) fig_3d.update_traces(marker_size=3) fig_2d.show() fig_3d.show() # - fig_2d.write_image("UMAP_1000Gene_2D.png") fig_3d.write_image("UMAP_1000Gene_3D.png") fig_3d.write_html("UMAP_1000Gene.html") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="bVYUQYyGigyK" import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # + id="msjrt9f2jBsA" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="d4c0a957-00e2-44c9-eb33-2d01e42d7159" train =pd.read_csv('/content/sample_data/california_housing_train.csv') test= pd.read_csv('/content/sample_data/california_housing_test.csv') train.head() # + id="lEN6_mxijPW3" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="0490ee80-d8d0-48d7-989a-6c1196aa2f2a" test.head() # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="F7areousk6sr" outputId="2165845a-8351-4fc2-c892-c6144a5b5b1b" train.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 771} id="x2kPRTXEk9TK" outputId="8fc69aa5-47ff-4ba0-9340-517d733598e1" train.hist(figsize=(15,13), grid=False, bins=50) plt .show() # + id="xL6OX6QDl-Vn" correlation=train.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 691} id="jjHi5JaEmLFX" outputId="0634a1ff-4ca1-4ab4-83ba-c289f8246efb" plt.figure(figsize=(10,10)) sns.heatmap(correlation, annot =True) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from matplotlib import pyplot as plt import scipy.stats as st import statsmodels.api as sm from matplotlib import rcParams from matplotlib.pyplot import matshow # ## Loading the data and preparing the DataFrame from the csv file ## data_frame = pd.read_csv('framingham.csv') df = pd.DataFrame(data_frame) df_pre=pd.DataFrame(data_frame) df df.isnull() df.isnull().sum() df.sum() df.drop(['education'], axis = 1, inplace = True) df.size df.shape df.head() df.isnull().sum() rcParams['figure.figsize'] = 6,5 plt.bar(df.TenYearCHD.unique(), df.TenYearCHD.value_counts(), color = ['purple', 'blue']) plt.xticks([0, 1]) plt.xlabel('Target Classes') plt.ylabel('Count') plt.title('Count of each Target Class') print(df.TenYearCHD.value_counts()) # A total of 4240 data with 15 columns, 644 observations to be risked to heart disease, and 388 data are missing or invalid. # ## Data Preparation ## # Dropping the missing data # + df_test=df df_test.dropna(axis=0,inplace=True) df_test.shape rcParams['figure.figsize'] = 6,5 plt.bar(df_test.TenYearCHD.unique(), df_test.TenYearCHD.value_counts(), color = ['purple', 'blue']) plt.xticks([0, 1]) plt.xlabel('Target Classes') plt.ylabel('Count') plt.title('Count of each Target Class after Dropping the missing observations') print(df_test.TenYearCHD.value_counts()) # - # Dropping so many observations in this case might cause irrelevance in the training the model. So we impute the data. # ## Imputation and Scaling using Pipeline ## data_frame = pd.read_csv('framingham.csv') df = pd.DataFrame(data_frame) df.drop(['education'], axis = 1, inplace = True) df.shape from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline # + cols=["male","age","currentSmoker","cigsPerDay","BPMeds","prevalentStroke","prevalentHyp","diabetes","totChol","sysBP","diaBP","BMI","heartRate","glucose"] # - X_components=df.columns[:-1] ddf=df[X_components] ddf # + pipe1=Pipeline([("imputer",SimpleImputer(strategy="mean")),("scaler",StandardScaler())]) df1=pipe1.fit_transform(ddf) df_mean=pd.DataFrame(data=df1[0:,0:], columns=cols) pipe2=Pipeline([("imputer",SimpleImputer(strategy="median")),("scaler",StandardScaler())]) df2=pipe1.fit_transform(ddf) df_median=pd.DataFrame(data=df2[0:,0:], columns=cols) pipe3=Pipeline([("imputer",SimpleImputer(strategy="most_frequent")),("scaler",StandardScaler())]) df3=pipe1.fit_transform(ddf) df_most=pd.DataFrame(data=df3[0:,0:], columns=cols) #imp1=SimpleImputer(strategy="mean") #imp2=SimpleImputer(strategy="median") #imp3=SimpleImputer(strategy="most_frequent") # - df_mean.shape df_mean # This is the preprocessed data # ## Exploratory Analysis ## # ## Histogram ## # + from ipywidgets import widgets feature_desc={'age':'Age of person', 'cigsPerDay':'No of average ciggarete taken per day', 'BPMeds':'BPMeds', 'prevalentStroke':'prevalentStroke', 'prevalentHype':'prevalentHype', 'diabetes':'diabetes', 'totChol':'Total Cholesterol Value Measured', 'sysBP':'sysBP', 'diaBP':'diaBP', 'BMI':'Body Mass Index', 'heartRate':'Heart Rate', 'glucose':'Glucose', 'TenYearCHD':'Ten Year CHD'} def hist_feature(column): df[column].hist(bins=20,facecolor='midnightblue') plt.show() dropdown_menu = {v:k for k,v in feature_desc.items()} widgets.interact(hist_feature, column=dropdown_menu) # - # ## Correlation Matrix Visualization ## # + from matplotlib import rcParams from matplotlib.pyplot import matshow rcParams['figure.figsize'] = 3,8 plt.matshow(df.corr()) plt.yticks(np.arange(df_mean.shape[1]), df.columns) plt.xticks(np.arange(df_mean.shape[1]), df.columns) plt.colorbar() # - rcParams['figure.figsize'] = 8,6 plt.bar(df.TenYearCHD.unique(), df.TenYearCHD.value_counts(), color = ['purple', 'blue']) plt.xticks([0, 1]) plt.xlabel('Target Classes') plt.ylabel('Count') plt.title('Count of each Target Class') df_mean.describe() # ### Conclusion of Exploratory Analysis # Out of 3715 observations over 500 observation (Patient) are at the risk of heart disease. def draw_histograms(dataframe, features, rows, cols): fig=plt.figure(figsize=(20,20)) for i, feature in enumerate(features): ax=fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins=20,ax=ax,facecolor='midnightblue') ax.set_title(feature+" Distribution",color='DarkRed') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) # ## Logistic Regression ## from sklearn.linear_model import LogisticRegression from sklearn.feature_selection import RFE, RFECV, SelectFromModel from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from sklearn.metrics import confusion_matrix,accuracy_score,classification_report from statsmodels.tools import add_constant df_constant = add_constant(df) df_constant.head() # # ## Why adding a constant column? # It’s because you expect your dependent variable to take a nonzero value when all the otherwise included regressors are set to zero. # # Suppose you want to model the wage as a function of years of secondary schooling (in years). You’d estimate an equation of the form # # yi=α+xiβ+εi # # because one can reasonably expect the wage to take, on average, a positive value even if one’s secondary schooling is null. This value would show up as a constant. # # Note however that a constant may take an absurd value while being relevant for the estimation, or may be irrelevant altogether. Suppose further you’re interested in estimating the model above with variables as deviations from their mean. # # yi−y¯=(α−α¯)+(xi−x¯)β~+νi # # Obviously, the constant equals its average value so that the first term on the right-hand side cancels out. You end up with # # yi−y¯=(xi−x¯)β~+νi # # that is a model without constant. In practice, including one would probably not be of any concern (for a reasonable amount of observations), but would be theoretically injustified. # # Remember that you should always know whether what you estimate makes sense, both from a real and statistical point of view! # # # ## Feature Selection # ## 1. Backward elimination (P-value approach) # + X1_components=df_mean.columns X1=df_mean[X1_components] y1=df.TenYearCHD X2_components=df_median.columns X2=df_median[X2_components] y2=df.TenYearCHD # - X1.shape column_list=['male','age','currentSmoker','cigsPerDay' ,'BPMeds','prevalentStroke','prevalentHyp' ,'diabetes', 'totChol', 'sysBP', 'diaBP', 'BMI','heartRate', 'glucose'] df_mean.isnull().sum() # + def feature_selection(data_frame, dependent_variable, column_list): while len(column_list)>0: model = sm.Logit(dependent_variable, data_frame[column_list]) result = model.fit(disp = 0) largest_pvalue = round(result.pvalues, 3).nlargest(1) if largest_pvalue[0] < (0.05): return result break else: column_list = column_list.drop(largest_pvalue.index) cols = df_mean.columns[:-1] result1 = feature_selection(df_mean, y1, cols) print("This is the result using the imputation for mean values") result1.summary() # - column_list=["male","age","cigsPerDay","prevalentStroke","diabetes","sysBP"] new=df_mean[column_list] new # + from matplotlib import rcParams from matplotlib.pyplot import matshow column_list=["male","age","cigsPerDay","prevalentStroke","diabetes","sysBP"] new=df_mean[column_list] rcParams['figure.figsize'] = 20, 14 plt.matshow(new.corr()) plt.yticks(np.arange(new.shape[1]), df.columns) plt.xticks(np.arange(new.shape[1]), df.columns) plt.colorbar() # + result2 = feature_selection(df_median, y2, cols) print("This is the result using the imputation for median values") result2.summary() # - # ## Without KFold # + column_list=["male","age","cigsPerDay","prevalentStroke","diabetes","sysBP"] X=df_mean[column_list] y=df.TenYearCHD X_train,X_test,y_train,y_test=train_test_split(*shuffle(X,y), test_size=0.2, random_state=5) log_model=LogisticRegression() log_model.fit(X_train,y_train) # - log_model.score(X_train,y_train) log_model.score(X_test,y_test) results=confusion_matrix(y,log_model.predict(X)) results classification_report(y_train, log_model.predict(X_train)) classification_report(y_test, log_model.predict(X_test)) # ## 2. Recursive Feature Elimination with Cross Validation df_mean.columns # + rfc = RandomForestClassifier() rfecv = RFECV(estimator = rfc, step = 1, cv = StratifiedKFold(10), scoring = 'r2',verbose=1) X_components=df_mean.columns X=df_mean[X_components] y=df.TenYearCHD print(X.shape) print(y.shape) rfecv.fit(X, y) # - X_components=df_mean.columns X=df[X_components] X X.columns # + # X_components=df_mean.columns # X=df[X_components] # dset = df # print(dset.shape) # print(X) # dset['attr'] = X.columns # dset['importance'] = rfecv.estimator_.feature_importances_ # dset = dset.sort_values(by='importance', ascending=False) # plt.figure(figsize=(16, 14)) # plt.barh(y=dset['attr'], width=dset['importance'], color='#1976D2') # plt.title('RFECV - Feature Importances', fontsize=20, fontweight='bold', pad=20) # plt.xlabel('Importance', fontsize=14, labelpad=20) # plt.show() # + # rfecv_unscaled = RFECV(estimator = rfc, # step = 1, # cv = StratifiedKFold(10), # scoring = 'accuracy',verbose=1) # X_components=df_pre.columns # X_unscaled=df_pre[X_components] # y_unscaled=df_pre.TenYearCHD # rfecv_unscaled.fit(X_unscaled, y_unscaled) # - X_components=df_mean.columns X=df_mean[X_components] rfecv_array = [True, True, False,True,False,False,True,False,True,True,True,True,True,True] res = [i for i, val in enumerate(rfecv_array) if not val] X.drop(X.columns[res], axis=1, inplace=True) X_train,X_test,y_train,y_test=train_test_split(*shuffle(X,y), test_size=0.2, random_state=5) log_model=LogisticRegression() log_model.fit(X_train,y_train) log_model.score(X_train,y_train) log_model.score(X_test,y_test) rfecv.estimator_.feature_importances_ # + X_components=df.columns[:-1] X=df[X_components] dset = pd.DataFrame() dset['attr'] = X.columns dset['importance'] = rfecv.estimator_.feature_importances_ dset = dset.sort_values(by='importance', ascending=False) plt.figure(figsize=(16, 14)) plt.barh(y=dset['attr'], width=dset['importance'], color='#1976D2') plt.title('RFECV - Feature Importances', fontsize=20, fontweight='bold', pad=20) plt.xlabel('Importance', fontsize=14, labelpad=20) plt.show() # - # ## 3. Coefficient values X_components=df.columns[:-1] X=df[X_components] y=df.TenYearCHD X_train, X_test, y_train, y_test= train_test_split(X, y, random_state = 0) logreg = LogisticRegression(fit_intercept = False) logreg.fit(X_train, y_train) np.round(logreg.coef_, decimals = 2) > 0 # logreg.coef_ # + # Calculating Accuracy of coefficient values # - # print(np.where(rfecv.support_ == False)[0]) coefficient_array = [ True, True, False, True, True, True, True, True, False,True, False, False, False, False] res = [i for i, val in enumerate(coefficient_array) if not val] X.drop(X.columns[res], axis=1, inplace=True) X X_train,X_test,y_train,y_test=train_test_split(*shuffle(X,y), test_size=0.2, random_state=5) log_model=LogisticRegression() log_model.fit(X_train,y_train) log_model.score(X_train, y_train) log_model.score(X_test, y_test) # ## 4. Recursive Feature Extraction X_components=df.columns[:-1] X=df[X_components] y=df.TenYearCHD X_train, X_test, y_train, y_test= train_test_split(X, y, random_state = 0) predictors = X_train selector = RFE(logreg, n_features_to_select = 1) selector = selector.fit(predictors, y_train) order = selector.ranking_ order feature_ranks = [] for i in order: feature_ranks.append(f"{i}.{df.columns[i]}") feature_ranks rfe_array = [True,True,True,True,True,True,True,True,False,False, False, True,True,False] res = [i for i, val in enumerate(rfe_array) if not val] X.drop(X.columns[res], axis=1, inplace=True) X X_train,X_test,y_train,y_test=train_test_split(*shuffle(X,y), test_size=0.2, random_state=5) log_model=LogisticRegression() log_model.fit(X_train,y_train) log_model.score(X_train, y_train) log_model.score(X_test, y_test) # ## 5. Feature Extraction Using SFM X_components=df.columns[:-1] X=df[X_components] y=df.TenYearCHD X_train, X_test, y_train, y_test= train_test_split(X, y, random_state = 0) smf = SelectFromModel(logreg, threshold = -np.inf, max_features = 8) smf.fit(X_train, y_train) feature_idx = smf.get_support() feature_idx # feature_name = df.columns[feature_idx] # feature_name sfm_array =[ True, True, True, False, True, False, True, True, False,False, True, True, False, False] res = [i for i, val in enumerate(sfm_array) if not val] X.drop(X.columns[res], axis=1, inplace=True) X_train,X_test,y_train,y_test=train_test_split(*shuffle(X,y), test_size=0.2, random_state=5) log_model=LogisticRegression() log_model.fit(X_train,y_train) log_model.score(X_train, y_train) log_model.score(X_test, y_test) # ## Naive Bayes ## def separate_by_class(dataset): separated = dict() for i in range(len(dataset)): vector = dataset[i] class_value = vector[-1] if (class_value not in separated): separated[class_value] = list() separated[class_value].append(vector) return separated separate_by_class(df) # ## Using K-fold for cross validation # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.svm import SVC from sklearn.metrics import roc_auc_score, roc_curve from ast import literal_eval import os import itertools from sklearn import preprocessing from sklearn.linear_model import LogisticRegressionCV, LinearRegression from sklearn.preprocessing import MinMaxScaler import warnings import numpy as np from collections import OrderedDict from lob_data_utils import lob, db_result, overview, roc_results from lob_data_utils.svm_calculation import lob_svm sns.set_style('whitegrid') warnings.filterwarnings('ignore') # - # data_length = 10000 # TODO: not used plot_dir = 'results_additional_features_all' stocks = list(roc_results.results_10000.keys()) # + def rescale(df): scaler = MinMaxScaler() df[['mid_price']] = scaler.fit_transform(df[['mid_price']]) for c in [c for c in df.columns if 'prev' in c or 'next' in c]: df[[c]] = scaler.transform(df[[c]]) return df def svc(clf, df, df_cv, columns): df = rescale(df) df_cv = rescale(df_cv) X = df[columns] y = df['mid_price_indicator'] clf.fit(X, y) pred = clf.predict(X) pred_cv = clf.predict(df_cv[columns]) return roc_auc_score(y, pred), roc_auc_score(df_cv['mid_price_indicator'], pred_cv) # - def plot_results(clf, df, df_cv, plot_title='', stock=None): x_columns = {} nums = [2, 5, 10, 20, 50] x_columns['queue_imb'] = [c for c in df.columns if 'queue_im' in c] for n in nums: x_columns['prev_{}'.format(n)] = [ c for c in df.columns if 'prev_mid_price_avg_{}'.format(n) == c] x_columns['prev_{}_and_imb'.format(n)] = [ c for c in df.columns if 'prev_mid_price_avg_{}'.format(n) == c or 'queue_im' in c] results = [] titles = [] for features, x_cols in x_columns.items(): res = svc(clf, df, df_cv, x_cols) titles.append(features) results.append({'train': res[0], 'cv': res[1], 'stock': stock, 'features': features}) df_res = pd.DataFrame(results, index=titles) df_res[['train', 'cv']].plot(kind='bar') plt.legend(loc='lower left') plt.ylim(0, 1) plt.title(plot_title) plt.savefig(os.path.join(plot_dir, '{}_{}.png'.format(stock, plot_title))) return results # %%capture results = [] for stock in stocks: df, df_cv, df_test = lob.load_prepared_data( stock, data_dir='../queue_imbalance/data/avg_mid_bool/', cv=True, include_test=True, length=None) df.dropna(inplace=True) df_cv.dropna(inplace=True) df_test.dropna(inplace=True) df.rename(columns={'Unnamed: 0': 'datetime'}, inplace=True) df.index = df['datetime'] df = df.sort_index() title = 'Logistic Regression for {}'.format(stock) print(title) clf = LogisticRegressionCV() result = plot_results(clf, df, df_cv, plot_title=title, stock=stock) results += result df_res = pd.DataFrame(results) df_res.to_csv('{}/results.csv'.format(plot_dir)) df_res.sort_values(by='cv', ascending=False).groupby('stock').first().groupby( 'features').count().plot(kind='bar') df_best = df_res.sort_values(by='cv', ascending=False).groupby('stock').first() best_features = list(df_best.groupby('features').count().nlargest(10, 'cv').index) # + df_compare = df_res.sort_values(by=['stock', 'cv'], ascending=False).groupby('stock').head( len(df_res['features'].unique())) results = [] feature_combinations = list(itertools.combinations(list(range(len(best_features))), 2)) for s in stocks: r = {'stock': s} for comb in feature_combinations: df_stock = df_compare[df_compare['stock'] == s] a = df_stock[df_compare['features'] == best_features[comb[0]]]['cv'].values[0] b = df_stock[df_compare['features'] == best_features[comb[1]]]['cv'].values[0] r['{} vs {} cv'.format(best_features[comb[0]], best_features[comb[1]])] = a-b r['{} vs {} cv'.format(best_features[comb[1]], best_features[comb[0]])] = b-a results.append(r) df_s = pd.DataFrame(results) #df_s.plot(figsize=(16, 16)) df_s[[c for c in df_s.columns if 'vs queue_imb' in c]].describe() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import labnote as lb import os # + parser = lb.ArgumentParser() parser.add_argument('--epochs',type=int,default=20) parser.add_argument('--batch_size',type=int,default=128) parser.add_argument('--gpu_dev',type=str,default='0') params = parser.parse_args() # + params.num_classes = 10 script_name=None # <- required only for jupyter with password authentification if lb.utils.is_executed_on_ipython(): script_name = 'keras_mnist_sample.ipynb' note = lb.Note('./log_dir',script_name=script_name) note.set_params(params) # - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = note.params.gpu_dev # + '''Trains a simple deep NN on the MNIST dataset. Gets to 98.40% test accuracy after 20 epochs (there is *a lot* of margin for parameter tuning). 2 seconds per epoch on a K520 GPU. ''' import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout from keras.optimizers import RMSprop from keras.callbacks import ModelCheckpoint, CSVLogger # + # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, note.params.num_classes) y_test = keras.utils.to_categorical(y_test, note.params.num_classes) model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(note.params.num_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # - note.save(memo='sample code for keras mnist. I just want to explain how to use note with general deep learning framework.') with note.record() as rec: print(rec.dirname) csv_name = os.path.join(rec.dirname,"history.csv") model_name = os.path.join(rec.dirname,"mnist_models.pkl") cb_csv = CSVLogger(csv_name) cb_mcp = ModelCheckpoint(model_name,period=5) history = model.fit(x_train, y_train, batch_size=note.params.batch_size, epochs=note.params.epochs, verbose=1, validation_data=(x_test, y_test), callbacks=[cb_csv,cb_mcp] ) score = model.evaluate(x_test, y_test, verbose=0) with open(os.path.join(rec.dirname,"score.txt"),'w') as f: f.write("Test loss: %f\n"%score[0]) f.write("Test accuracy: %f\n"%score[1]) last_exp = rec.dirname with open(os.path.join(last_exp,"score.txt")) as f: for l in f: print(l) exit() !!! STATEMENT !!!**This notebook is done by (QI) from Shanghai Maritime University, as the final project for course _Machine Learning: Theory and Application_ by Peter the Great St. Petersburg Polytechnic University Winter School 2022.** It's open sourced under MIT license, you may check it on GitHub: https://github.com/tnqzh123/mymusicalpreferences .The competiton score(accuracy of prediction) may be effected by the randomness of `train_test_split` and model fitting. All predictions are generated by the same code(okay maybe some variable and syntax has a little difference but actually all codes are doing the same thing), whether score is high or low.**HAPPY 2022 LUNAR NEW YEAR !!!** Preparation Import Often-used Libraries!pip install deep-forestCollecting deep-forest Downloading deep_forest-0.1.5-cp37-cp37m-manylinux2010_x86_64.whl (2.4 MB) |████████████████████████████████| 2.4 MB 597 kB/s [?25hRequirement already satisfied: joblib>=0.11 in /opt/conda/lib/python3.7/site-packages (from deep-forest) (1.1.0) Requirement already satisfied: scikit-learn>=0.22 in /opt/conda/lib/python3.7/site-packages (from deep-forest) (0.23.2) Requirement already satisfied: scipy>=0.19.1 in /opt/conda/lib/python3.7/site-packages (from deep-forest) (1.7.3) Collecting numpy<1.20.0,>=1.16.0 Downloading numpy-1.19.5-cp37-cp37m-manylinux2010_x86_64.whl (14.8 MB) |████████████████████████████████| 14.8 MB 66.4 MB/s [?25hRequirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from scikit-learn>=0.22->deep-forest) (3.0.0) Installing collected packages: numpy, deep-forest Attempting uninstall: numpy Found existing installation: numpy 1.20.3 Uninstalling [...]_Don't worry about this ERROR, it doesn't contain packages we need to use in this notebook._import pandas as pd import numpy as np import missingnoRead DataPATH = '../input/mymusicalprefrences/' dataAll = pd.concat([pd.read_csv(PATH + 'train.csv'), pd.read_csv(PATH + 'test.csv')]).reset_index(drop = True) dataAll.rename(columns = {'Vocal ': 'Vocal'}, inplace = True) # Remove the spaceCheck the shape of data.dataAll.shapeMake a list of features.dataAll.columns features = { 'cat': { 'single': ['Track', 'Version', 'Album', 'Album_type', 'Key'], 'multiple': ['Artists', 'Artists_Genres', 'Vocal', 'Labels', 'Country'] }, 'num': ['Category', 'Duration', 'Release_year', 'BPM', 'Energy', 'Dancebility', 'Happiness'] }Data Preprocessing Fill Missing Valuemissingno.bar(dataAll)One row of training data is mostly filled with `NaN`. It's useless so I droped it.dataAll[dataAll['Artists'].isna()] dataAll = dataAll.drop(661).reset_index(drop = True)Fill some missing data with true value irl. These data are collected by searching in Google.- AlbumdataAll[dataAll['Album'].isna()] dataAll.at[250, 'Album'] = 'Hello' dataAll.at[501, 'Album'] = 'Ready to Die'- LabelsdataAll[dataAll['Labels'].isna()] dataAll.at[85, 'Labels'] = 'Universe International' dataAll.at[195, 'Labels'] = 'Virgin' dataAll.at[250, 'Labels'] = 'Mercury' dataAll.at[501, 'Labels'] = 'Bad Boy' dataAll.at[508, 'Labels'] = 'Playground Music' dataAll.at[769, 'Labels'] = 'Virgin'- VocaldataAll[dataAll['Vocal'].isna()] dataAll.at[736, 'Vocal'] = 'M'- Country Filling missing value in this column with `NA` because I don't really understand what is this and cannot found it by Google.`NA` in this column means "Not Applicable", not North America or Namibia.dataAll['Country'].fillna('NA', inplace = True)- Version & Album_typeFilling missing value in these two columns with `N/A` because not all music are applicable to these two attribute.dataAll['Version'].fillna('N/A', inplace = True) dataAll['Album_type'].fillna('N/A', inplace = True) missingno.bar(dataAll)Great! There are no missing value now, except "Category" column because missing value in this column means we need to predict it. Rename Value`X♭` is thus enharmonic to `(X-1)`, so I rename `X♭` value to `(X-1)` for lower complexity and better accuracy.dataAll['isMajor'], dataAll['Key'] = [item[1] for item in dataAll['Key'].str.split()], [item[0] for item in dataAll['Key'].str.split()] dataAll['Key'].replace({"D♭": "C#", "E♭": "D#", "G♭": "F#", "A♭": "G#","B♭":"A#"}, inplace = True) features['cat']['single'].append('isMajor')Feature Engineering Functions DefinationThese funtions are copied from sample solution, I changed it only a little.from sklearn.manifold import TSNE def split_to_onehot(df, col): """ This method converts features separated by '|' into one-hot vectors. Additionally it drops unnecessary values, which present only in test set / train set or have only one value. """ # Getting all unique ganres values. unique = [] for i in df.index: unique.extend(df.loc[i,col].split("|")) if "" in unique: unique.remove("") unique = list(set(unique)) # Putting values into binary form onehot = df.loc[:,["Category"]] onehot = onehot.fillna('none').replace({0:"dislike",1:"like"}) onehot[unique] = np.zeros((len(unique)), dtype = np.int8) for i in df.index: g = set(df.loc[i,col].split("|")) for j in g: if j!="": onehot.loc[i,j] = 1 # Dropping unnecessary values _a = onehot.groupby("Category").sum() only_one = list(_a.sum()[_a.sum()==1].index) only_train = list(_a.loc["none"][_a.loc["none"]==0].index) only_test = list(_a.loc[["like",'dislike']].sum()[_a.loc[["like",'dislike']].sum()==0].index) _a = set(only_one + only_train + only_test) onehot = onehot.drop(_a, axis=1) return onehot.drop(columns = 'Category') def onehot_to_tsne2(df, title): """ This method converts one-hot representation into two tsne values. Such operation is needed to shrink the dimentionality of the dataset """ onehot = df #.drop("Category",axis=1) embedded = TSNE(n_components=2, init="pca").fit_transform(onehot) embedded = pd.DataFrame(embedded,columns=[f"{title}_tsne1",f"{title}_tsne2"]) return embeddedEncodingDo label encoding and scaling for "Duration" and "Release_year".for i in dataAll.index: dataAll.at[i, 'Duration'] /= 60000 # Milliseconds to minutes, isn't it a kind of scaling? XD if(dataAll.at[i, 'Release_year'] < 1980): dataAll.at[i, 'Release_year'] = 0 # Label encoding continue dataAll.at[i, 'Release_year'] -= dataAll.at[i, 'Release_year'] % 10 # Years to decades dataAll.at[i, 'Release_year'] = (dataAll.at[i, 'Release_year'] - 1970) / 10 # Label encodingLabel encoding features which only contains single value in the cell by with `LabelEncoder`.from sklearn.preprocessing import LabelEncoder for key in features['cat']['single']: if key == 'Track': continue # Why I skip "Track"? Please refer to "Build Model" section. dataAll[key] = LabelEncoder().fit_transform(dataAll[key])One-Hot encoding features which contains multiple value in the cell with `split_to_onehot`, then reduce the dimensionality with `onehot_to_tsne2`. (Actually it's not really a One-Hot Encode, just kind similar to it.)for key in features['cat']['multiple']: onehot = split_to_onehot(dataAll, key) tsne2 = onehot_to_tsne2(onehot, key) dataAll = pd.concat([dataAll.drop(columns = key), tsne2], axis = 1)/opt/conda/lib/python3.7/site-packages/pandas/core/frame.py:3678: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` self[col] = igetitem(value, i) /opt/conda/lib/python3.7/site-packages/pandas/core/frame.py:3678: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` self[col] = igetitem(value, i)_Please ignore these warnings...I don't know why they doesn't appear in example solution and it seems to be a lot of work to fix it, so just left them here because "Programmers do not care warnings but errors" XDDD_ Build Models Split Data Using In Training And Predictingfrom sklearn.model_selection import train_test_split dataBuilding = dataAll.drop(columns = ['Id', 'Track']).copy() # I droped "Track" feature because people won't like or dislike a song just by it's name trainData, predictData = dataBuilding.loc[: 663], dataBuilding.loc[664:].drop(columns = ['Category']).copy() dataTraining, dataTesting = dict(), dict() dataTraining['feature'], dataTesting['feature'], dataTraining['result'], dataTesting['result'] = train_test_split(trainData.drop(columns = 'Category'), trainData['Category'], test_size = 0.2, shuffle = True)Combining different models to a `VotingClassifier`, then train it.from sklearn.model_selection import cross_validate from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from catboost import CatBoostClassifier from deepforest import CascadeForestClassifier from sklearn.ensemble import VotingClassifier voting = VotingClassifier([ ('SVM', SVC(probability = True)), ('RandomForest', RandomForestClassifier()), ('KNeighbors', KNeighborsClassifier()), ('DecisionTree', DecisionTreeClassifier()), ('XGBoost', XGBClassifier(use_label_encoder = False, eval_metric = ['logloss', 'auc', 'error'])), ('CatBoost', CatBoostClassifier(verbose = False)), ('DeepForest', CascadeForestClassifier(verbose = False)) ], flatten_transform = True) voting.fit(dataTraining['feature'].to_numpy(), dataTraining['result'].to_numpy())Make PredictionFinally, we can make the prediction, then export the answer to `submission.csv`.**CAULTION**: some classifier may return float numbers as result, but Kaggle doesn't accept float numbers as prediction - it will judge float prediction as wrong answer - at lease in this competition. We need to convert float numbers to integer numbers. (I got 5 "Score: 0" just because this stupid problem!!!)answer = pd.read_csv(PATH + 'sample_submition.csv') answer['Category'] = voting.predict(predictData.to_numpy()) answer['Category'] = answer['Category'].astype(int) answer.to_csv('submission.csv', index = False)BHSA version mappingsIn this notebook we map the nodes between the all the extant versions of the BHSA dataset.The resulting mappings can be used for writing version independent programs that processthe BHSA data.Those programs can only be version independent to a certain extent, becausein general, node mappings between versions cannot be made perfect.If one imagines what may change between versions, it seems intractable to make a device that overcomesdifferences in the encoding of the texts and its syntax.However, we are dealing with versions of a very stable text, that is linguistically annotated by meansof a consistent method, so there is reason to be optimistic.This notebook shows that this optimism is well-founded.In another notebook,[versionPhrases](versionPhrases.ipynb)we show how one can use the mappings to analyze phrase encodings across versions of the data. OverviewWe create the mappings in two distinct stages, each being based on a particular insight, and dealing witha set of difficult cases.* [Slot nodes](Slot-nodes): we restrict ourselves to the *slot* nodes, the nodes that correspond to the individual words;* [Nodes in general](Nodes-in-general): we extend the slot mapping in a generic way to a mapping between all nodes. Those other nodes are the ones that correspond to higher level textual objects, such as phrases, clauses, sentences.This is a big notebook, here are links to some locations in the computation.* [start of the computation](Computing)* [start of making slot mappings](Making-slot-mappings)* [start of expanding them to node mappings](Extending-to-node-mappings) Nodes, edges, mappingsIn the[text-fabric data model](https://github.com/Dans-labs/text-fabric/wiki/Data-model),nodes correspond to the objects in the text and its syntax, and edges correspond to relationships betweenthose objects.Normally, these edges are **intra**-dataset, they are between nodes in the same dataset.Now, each version of the BHSA in text-fabric is its own dataset.The mappings between nodes of one version and corresponding nodes in another version are**inter**-dataset edges.Nodes in text-fabric are abstract, they are just numbers,starting with 1 for the first slot (word),increasing by one for each slot up to the last slot,and then just continuing beyond that for the non-slot nodes.So an edge is just a mapping between numbers, and it is perfectly possible to have just any mappingbetween numbers in a dataset.We store mappings as ordinary TF edge features, so you can use the mapping in both ways, by```nodesInVersion4 = Es('omap@3-4').f(nodeInVersion3)nodesInVersion3 = Es('omap@3-4').t(nodeInVersion4)```respectively.When one version supersedes another, we store the mapping between the older and newer versionas an edge in the new version, leaving the older version untouched.We store the node mapping with a bit more information than the mere correspondence between nodes.We also add an integer to each correspondence which indicates how problematic that correspondence is.If the correspondence is perfect, we do not add any value.If it is a simple discrepancy, confined to an equal amount of slots in both versions, we add value `0`.If the discrepancy is more complicated, we add a higher number.The details of this will follow. Slot nodesThe basic idea in creating a slot mapping is to walk through the slots of both versions in parallel,and upon encountering a difference, to take one of a few prescribed actions, that may lead to catching upslots in one of the two versions.The standard behaviour is to stop at each difference encountered, unless the difference conformsto a "predefined" case. When there is no match, the user may add a case to the list of cases.It might be needed to add a different systematic kind of case, and for that programming is needed.This notebook shows the patterns and the very small lists of cases that were needed to do the job for 4version transitions, each corresponding to 1 year or more of encoding activity. DifferencesWhen we compare versions, our aim is not to record all differences in general, but to recordthe correspondence between the slots of the versions, and exactly where and how thiscorrespondence is disturbed.We use the lexeme concept as an anchor point for the correspondence.If we compare the two versions, slot by slot, and as long as we encounter the same lexemes,we have an undisturbed correspondence.In fact, we relax this a little bit, because the very concept of lexeme might change between versions.So we reduce the information in lexemes considerably, before we compare them, so that wedo not get disturbed by petty changes.While being undisturbed, we just create an edge between the slot in the one version that we are at,to the node in the other version that we are at,and we assign no value to such an edge.But eventually, we encounter real disturbances.They manifest themselves in just a few situations:1. ![1](diffs/diffs.001.png)2. ![2](diffs/diffs.002.png)3. ![3](diffs/diffs.003.png)In full generality, we can say:$i$ slots in the source $V$ version correspondto $j$ slots in the target version $W$,where $i$ and $j$ may be 0, but not at the same time:1. ![4](diffs/diffs.004.png)If $i$ slots in version $V$, starting at $n$get replaced by $j$ slots in the version $W$, starting at $m$,we create edges between all $n, ..., n+i-1$ on the one handand all $m, ..., m+j-1$ on the other hand,and associate them all with the same number $j-i$.But so far, it turns out that the only things we have to deal with,are specific instances of 1, 2, and 3 above.We have a closer look at those cases. Lexeme changeWhen a lexeme changes at a particular spot $n, m$,we have $i=j=1$, leading to exactly one edge $(n, m)$ with value $0$. Slot splittingWhen slot $n\in V$ splits into $m, ..., m+j \in W$, we create edges from $n$ to each of the $m, ..., m+j$,each carrying the number $j$. The larger $j$ is,the greater the dissimilarity between node $n\in V$and each of the $m, ..., m+j \in W$. Slot collapseWhen slots $n, ..., n+i \in V$ collapse into $m\in W$, we create edges from each of the $n, ..., n+i$ to $m$,each carrying the number $j$. The larger $j$ is,the greater the dissimilarity between the nodes $n, ..., n+i\in V$and $m \in W$. Slot deletionWhen slot $n$ is deleted from $V$, we have $i=1, j=0$, leading to zero edges from $n$.But so far, we have not encountered this case. Slot additionWhen slot $m$ is added to $W$, we have $i=0, j=1$, again leading to zero edges to $m$.But so far, we have not encountered this case. Nodes in generalThe basic idea we use for the general case is that that nodes are linked to slots.In text-fabric, the standard `oslots` edge feature lists for each non-slot node the slots it is linked to.Combining the just created slot mappings between versions and the `oslots` feature,we can extend the slot mapping into a general node mapping.In order to map a node $n$ in version $V$, we look at its slots $s$,use the already established *slot mapping* to map these to slots $t$ in version $W$,and collect the nodes $m$ in version $W$ that are linked to those $t$.They are good candidates for the mapping.![5](diffs/diffs.005.png) RefinementsWhen we try to match nodes across versions, based on slot containment, we also respecttheir `otype`s. So we will not try to match a `clause` to a `phrase`.We make implicit use of the fact that for most `otype`s, the members contain disjoint slots. Multiple candidatesOf course, things are not always as neat as in the diagram. Textual objects may have split, or shifted,or collapsed.In general we find 0 or more candidates.Even if we find exactly one candidate, it does not have to be a perfect match.A typical situation is this:![6](diffs/diffs.006.png)We do not find a node $m\in W$ that occupies the mapped slots exactly.Instead, we find that the target area is split between two candidates whoalso reach outside the target area.In such cases, we make edges to all such candidates, but we add a dissimilarity measure.If $m$ is the collection of slots, mapped from $n$, and $m_1$ is a candidate for $n$, meaning $m_1$ hasoverlap with $m$, then the *dissimilarity* of $m_1$ is defined as:$$|m_1\cup m| - |m_1\cap m|$$In words: the number of slots in the union of $m_1$ and $m$ minus the number of slots in their intersection.In other words: $m_1$ gets a penalty for* each slot $s\in m_1$ that is not in the mapped slots $m$;* each mapped slot $t\in m$ that is not in $m_1$.If a candidate occupies exactly the mapped slots, the dissimilarity is 0.If there is only one such candidate of the right type, the case is completely clear, and wedo not add a dissimilarity value to the edge.If there are more candidates, all of them will get an edge, and those edges will contain the dissimilarityvalue, even if that value is $0$. SubphrasesThe most difficult type to handle in our dataset is the `subphrase`,because they nest and overlap.But it turns out that the similarity measure almost always helps out: when looking for candidatesfor a mapped subphrase, usually one of them has a dissimilarity of 0.That's the real counterpart. ReportingWe report the success in establishing the match between non-slot nodes.We do so per node type, and for each node type we list a few statistics,both in absolute numbers and in percentage of the total amount of nodes of thattype in the source version.We count the nodes that fall in each of the following cases.The list of cases is ordered by decreasing success of the mapping.1. **unique, perfect**: there is only one match for the mapping and it is a perfect one in terms of slots linked to it;2. **multiple, one perfect**: there are multiple matches, but at least one is perfect; this occurs typically if nodes of a type are linked to nested and overlapping sequences of slots, such as `subphrase`s;3. **unique, imperfect**: there is only one match, but it is not perfect; this indicates that some boundary reorganization has happened between the two versions, and that some slots of the source node have been cut off in the target node; yet the fact that the source node and the target node correspond is clear;4. **multiple, cleanly composed**: in this case the source node corresponds to a bunch of matches, that together cleanly cover the mapped slots of the source node; in other words: the original node has been split in several parts;5. **multiple, non-perfect**: all remaining cases where there are matches; these situations can be the result of more intrusive changes; if it turns out to be a small set they do require closer inspection;6. **not mapped**: these are nodes for which no match could be found. Computingimport os import collections from functools import reduce from utils import caption from tf.fabric import FabricWe specify our versions and the subtle differences between them as far as they are relevant.REPO = os.path.expanduser("~/github/etcbc/bhsa") baseDir = "{}/tf".format(REPO) tempDir = "{}/_temp".format(REPO) versions = """ 3 4 4b 2016 2017 c """.strip().split() # work only with selected versions # remove this if you want to work with all versions versions = """ 2017 2021 """.strip().split() versionInfo = { "": dict( OCC="g_word", LEX="lex", ), "3": dict( OCC="text_plain", LEX="lexeme", ), }Load all versions in one go!TF = {} api = {} for v in versions: for (param, value) in versionInfo.get(v, versionInfo[""]).items(): globals()[param] = value caption(4, "Version -> {} <- loading ...".format(v)) TF[v] = Fabric(locations="{}/{}".format(baseDir, v), modules=[""]) api[v] = TF[v].load("{} {}".format(OCC, LEX)) # noqa F821 caption(4, "All versions loaded").............................................................................................. . 5m 27s Version -> 2017 <- loading ... . .............................................................................................. This is Text-Fabric 9.1.7 Api reference : https://annotation.github.io/text-fabric/tf/cheatsheet.html 115 features found and 0 ignored 0.00s loading features ... | 0.00s Dataset without structure sections in otext:no structure functions in the T-API 18s All features loaded/computed - for details use TF.isLoaded() .............................................................................................. . 5m 45s Version -> 2021 <- loading ... . .............................................................................................. This is Text-Fabric 9.1.7 Api reference : https://annotation.github.io/text-fabric/tf/cheatsheet.html[...]We want to switch easily between the APIs for the versions.def activate(v): for (param, value) in versionInfo.get(v, versionInfo[""]).items(): globals()[param] = value api[v].makeAvailableIn(globals()) caption(4, "Active version is now -> {} <-".format(v))Inspect the amount of slots in all versions.nSlots = {} for v in versions: activate(v) nSlots[v] = F.otype.maxSlot caption(0, "\t {} slots".format(nSlots[v])).............................................................................................. . 6m 09s Active version is now -> 2017 <- . .............................................................................................. | 6m 09s 426584 slots .............................................................................................. . 6m 09s Active version is now -> 2021 <- . .............................................................................................. | 6m 09s 426590 slotsMethodWhen we compare two versions, we inspect the lexemes found at corresponding positions in the versions.We start at the beginning, and when the lexemes do not match, we have a closer look.However, in order not to be disturbed by minor discrepancies in the lexemes, we mask the lexemes: weapply a few transformations to it, such as removing alephs and wavs, and finally even turning them intoordered sets of letters, thereby loosing the order and multiplicity of letter.We also strip the disambiguation marks.We maintain a current mapping between the slots of the two versions, and we update it if we encounterdisturbances.Initially, this map is the identity map.What we encounter as remaining differences boils down to the following:* a lexeme is split into two lexemes with the same total material, typically involving `H`, `MN`, or `B`* the lexeme is part of a special case, listed in the `cases` table (which has been found by repeatedly chasing for the first remaining difference.* the both lexemes differ, but that's it, no map updates have to be done.The first two types of cases can be solved by splitting a lexeme into `k` parts or combining `k` lexemes into one.After that the mapping has to be shifted to the right or to the left from a certain point onwards.The loop then is as follows:* find the first slot with a lexeme in the first version that is different from the lexeme at the mapped slot in the second version* analyze what is the case: * if the disturbance is recognized on the basis of existing patterns and cases, update the map and consider this case solved * if the disturbance is not recognized, the case is unsolved, and we break out of the loop. More analysis is needed, and the outcome of that has to be coded as an extra pattern or case.* if the status is solved, go back to the first stepWe end up with a mapping from the slots of the first version to those of the other version that linksslots with approximately equal lexemes together. Making slot mappings Lexeme maskingWe start by defining our masking function, and compile lists of all lexemes and masked lexemes for all versions.masks = [ (lambda lex: lex.rstrip("[/="), "strip disambiguation"), (lambda lex: lex[0:-2] if lex.endswith("JM") else lex, "remove JM"), (lambda lex: lex[0:-2] if lex.endswith("WT") else lex, "remove WT"), (lambda lex: lex.replace("J", ""), "remove J"), (lambda lex: lex.replace(">", ""), "remove Alef"), (lambda lex: lex.replace("W", ""), "remove W"), (lambda lex: lex.replace("Z", "N"), "identify Z and N"), (lambda lex: lex.rstrip("HT"), "strip HT"), ( lambda lex: ("".join(sorted(set(set(lex))))) + "_" * lex.count("_"), "ignore order and multiplicity", ), ] def mask(lex, trans=None): """Apply a single masking operation or apply them all. Parameters ---------- lex: string The text of the lexem trans: integer, optional `None` If given, it is an index in the `masks` list, and the corresponding mask transformation will be applied to `lex`. If `None`, all transformations in the `masks` list will be applied in that order. Returns ------- string The result of transforming `lex` """ if trans is not None: return masks[trans][0](lex) for (fun, desc) in masks: lex = fun(lex) return lexCarry out the lexeme masking for all versions.lexemes = {} caption(4, "Masking lexemes") for v in versions: activate(v) lexemes[v] = collections.OrderedDict() for n in F.otype.s("word"): lex = Fs(LEX).v(n) # noqa F821 lexemes[v][n] = (lex, mask(lex, trans=0), mask(lex)) caption(0, "Done").............................................................................................. . 6m 14s Masking lexemes . .............................................................................................. .............................................................................................. . 6m 14s Active version is now -> 2017 <- . .............................................................................................. .............................................................................................. . 6m 15s Active version is now -> 2021 <- . .............................................................................................. | 6m 16s DoneNow for each version `v`, `lexemes[v]` is a mapping from word nodes `n` to lexeme information of the word at node `n`.The lexeme information is a tuple with members* **fullLex** the full disambiguated lexeme* **lex** the lexeme without the disambiguation marks* **maskedLex** the fully transformed lexeme Cases and mappingsIn `cases` we store special cases that we stumbled upon.Every time we encountered a disturbance which did not follow a recognized pattern,we turned it into a case.The number is the slot number in the first version where the case will be applied.Cases will only be applied at these exact slot numbers and nowhere else.In `mappings` we build a mapping between corresponding nodes across a pair of versions.At some of those correspondences there are disturbances, there we add a measure of thedissimilarity to the mapped pair.Later, we extend those slot mappings to *node* mappings, which are maps between versions where*all* nodes get mapped, not just slot nodes.We deliver those node mappings as formal edges in TF.Then these edges will be added in the second version, so that each newer version knowshow to link to the previous version.We build the node maps in `edges`.We store the dissimilarities in a separate dictionary, `dissimilarity`.All these dictionaries are keyed by a 2-tuple of versions.cases = {} mappings = {} dissimilarity = {} edges = {}AlgorithmHere is the code that directly implements the method.Every pair of distinct versions can be mapped.We store the mappings in a dictionary, keyed by tuples like `(4, 4b)`,for the mapping from version `4` to `4b`, for instance.The loop is in `doDiffs` below.def inspect(v, w, start, end): """Helper function for inspecting the situation in a given range of slots. Parameters ---------- v: string First version w: string Second version start: integer Slot number (in first version) where we start the inspection. end: integer Slot number (in first version) where we end the inspection. Returns ------- None The situation will be printed as a table with a row for each slot and columns: slot number in version 1, lexeme of that slot in version 1, lexeme of the corresponding slot in version 2 """ mapKey = (v, w) mapping = mappings[mapKey] version1Info = versionInfo.get(v, versionInfo[""]) version2Info = versionInfo.get(w, versionInfo[""]) for slot in range(start, end): print( "{:>6}: {:<8} {:<8}".format( slot, api[v].Fs(version1Info["LEX"]).v(slot), api[w].Fs(version2Info["LEX"]).v(mapping[slot]), ) ) def inspect2(v, w, slot, k): """Helper function for inspecting the edges in a given range of slots. Not used, currently. Parameters ---------- v: string First version w: string Second version slot: integer Slot number (in first version) in the center of the inspection k: integer Amount of slots left and right from the center where we inspect. Returns ------- None The situation will be printed as a table with a row for each slot and columns: slot number in version 1, the edge at that slot number, or X if there is no edge """ mapKey = (v, w) edge = edges[mapKey] for i in range(slot - k, slot + k + 1): print(f"EDGE {i} =>", edge.get(i, "X")) def firstDiff(v, w, start): """Find the first discrepancy after a given position. First we walk quickly through the slots of th first version, until we reach the starting position. Then we continue walking until the current slot is either * a special case * a discrepancy Parameters ---------- v: string First version w: string Second version start: integer start position Returns ------- int or None If there is no discrepancy, None is returned, otherwise the position of the first discrepancy. """ mapKey = (v, w) mapping = mappings[mapKey] theseCases = cases[mapKey] fDiff = None for (slot, (lex1, bareLex1, maskedLex1)) in lexemes[v].items(): if slot < start: continue maskedLex2 = lexemes[w][mapping[slot]][2] if slot in theseCases or maskedLex1 != maskedLex2: fDiff = slot break return fDiff def printDiff(v, w, slot, k): """Prints the situation around a discrepancy. We also show phrase atom boundaries. WE show the bare lexemes in the display, not the masked lexemes. Parameters ---------- v: string First version w: string Second version slot: integer position of the discrepancy k: integer amount of slots around the discrepancy to include in the display Returns ------- A plain text display of the situation around the discrepancy. """ mapKey = (v, w) mapping = mappings[mapKey] comps = {} prevChunkV = None prevChunkW = None # gather the comparison material in comps # which has as keys the versions and as value a list of display items for i in range(slot - k, slot + k + 1): # determine if we are at a phrase atom boundary in version 1 chunkV = None if i not in mapping else api[v].L.u(i, otype="phrase_atom") boundaryV = prevChunkV is not None and prevChunkV != chunkV prevChunkV = chunkV # determine if we are at the actual discrepancy in version 1 currentV = i == slot # determine if we are at a phrase atom boundary in version 2 j = mapping.get(i, None) chunkW = None if j is None else api[w].L.u(j, otype="phrase_atom") boundaryW = prevChunkW is not None and prevChunkW != chunkW prevChunkW = chunkW # determine if we are at the actual discrepancy in version 2 currentW = j == mapping[slot] lvTuple = lexemes[v].get(i, None) lwTuple = None if j is None else lexemes[w].get(j, None) lv = "□" if lvTuple is None else lvTuple[1] # bare lexeme lw = "□" if lwTuple is None else lwTuple[1] # bare lexeme comps.setdefault(v, []).append((lv, currentV, boundaryV)) comps.setdefault(w, []).append((lw, currentW, boundaryW)) # turn the display items into strings and store them in rep # which is also keyed by the versions rep = {} for version in comps: rep[version] = printVersion(version, comps[version]) # compose the display out of the strings per version # and make a header of sectional information and slot positions print( """{} {}:{} ==> slot {} ==> {} {} {} """.format( *api[v].T.sectionFromNode(slot), slot, mapping[slot], rep[v], rep[w], ) ) def printVersion(v, comps): """Generate a string displaying a stretch of lexemes around a position. Parameters ---------- comps: list of tuple For each slot there is a comp tuple consisting of * the bare lexeme * whether the slot is in the discrepancy position * whether the slot is at a phrase atom boundary Returns ------- string A sequence of lexemes with boundary characters in between. """ rep = "" for (lex, isCurrent, boundary) in comps: rep += "┫┣" if boundary else "╋" rep += f"▶{lex}◀" if isCurrent else lex rep += "╋" return repdoDiffsThis function contains the loop to walk through all differences.We walk from discrepancy to discrepancy, and stop if there are no more discrepancies or when wehave reached an artificial upper boundary of discrepancies.We try to solve the discrepancies.If we hit a discrepancy that we cannot solve, we break out the loop too. MAX_ITERThe articial limit is MAX_ITER.You determine it experimentally.Keep it low at first, when you are meeting the initial discrepancies.When you have dealt with them and discover that you can dealt with that amount of discrepancies,increase the limit. CasesWe will encounter discrepancies, and we will learn how to solve them.There are some generic ways of solving them, and these we collect in a dictionary of cases.The keys of the cases are either slot positions or lexemes.When the algorithms walks through the corpus, it will consider slotswhose number or whose lexeme is in the cases as solved.The value of a case is a tuple consisting of* the name of an *action** a parameterHere are the actionskey | action | parameters | description--- | --- | --- | ---slot | `ok` | `None` | the discrepancy is ok, nothing to worry about; we set the dissimilarity to 0, which is worse than `None`slot | `split` | `n` integer | split the lexeme in version 1 into `n` lexemes in version 2; set the dissimilarity to `n`slot | `collapse` | `n` integer | collapse `n` lexemes in version 1 into one lexeme in version 2; dissimilarity `-n`lex | `ok` | `alt` string | the discrepancy is ok if version 2 has *alt* instead of *lex*; dissimilarity set to 0lex | `split` | `n` integer | split *lex* in version 1 into `n` extra slots in version 2; set the dissimilarity to `n`If a discrepancy falls through all these special cases, we have a few generic rules that will also be applied:* if a lexeme in version 1 contains `_`, we split on it and treat it as separate lexemes. In fact, we perform the action `split` with parameter the number of parts separated by `_`.* if the lex in version 1 equals the lex in version 2 plus the next lex in version 2, and if the lex in version 2 is `H`, we split the lex in version 1 into that `H` and the rest.* if the set of letters in the masked lexeme in version 1 is the union of the sets of the corresponding masked lexeme in version 2 plus that of the next lexeme in version 2, and if the corresponding lexeme in version 2 is either `B` or `MN`, we split the lex in version 1 into that `B` or `MN` and the rest. Note that these rules are very corpus dependent, and have been distilled from experience with the BHSA versions involved.If you aree in the process of applying this algorithm to other corpora, you can leave out these rules, and add yourown depending on what you encounter.MAX_ITER = 250 def doDiffs(v, w): mapKey = (v, w) thisDissimilarity = {} dissimilarity[mapKey] = thisDissimilarity thisMapping = dict(((n, n) for n in api[v].F.otype.s("word"))) mappings[mapKey] = thisMapping theseCases = cases.get(mapKey, {}) iteration = 0 start = 1 solved = True while True: # try to find the next difference from where you are now n = firstDiff(v, w, start) if n is None: print(f"No more differences.\nFound {iteration} points of disturbance") break if iteration > MAX_ITER: print("There might be more disturbances: increase MAX_ITER") break iteration += 1 # there is a difference: we have to do work # we print it as a kind of logging printDiff(v, w, n, 5) # we try to solve the discrepancy # first we gather the information of about the lexemes at this position in both versions (lex1, bareLex1, maskedLex1) = lexemes[v][n] (lex2, bareLex2, maskedLex2) = lexemes[w][thisMapping[n]] # and at the next position (lex1next, bareLex1next, maskedLex1next) = lexemes[v][n + 1] (lex2next, bareLex2next, maskedLex2next) = lexemes[w][thisMapping[n + 1]] # the discrepancy is not solved unless we find it in a case or in a rule solved = None skip = 0 # first check the explicit cases if n in theseCases: (action, param) = theseCases[n] if action == "collapse": plural = "" if param == 1 else "s" solved = f"{action} {param} fewer slot{plural}" thisDissimilarity[n] = -param skip = param for m in range(api[v].F.otype.maxSlot, n + param, -1): thisMapping[m] = thisMapping[m - param] for m in range(n + 1, n + param + 1): thisMapping[m] = thisMapping[n] elif action == "split": plural = "" if param == 1 else "s" solved = f"{action} into {param} extra slot{plural}" thisDissimilarity[n] = param for m in range(n + 1, api[v].F.otype.maxSlot + 1): thisMapping[m] = thisMapping[m] + param elif action == "ok": solved = "incidental variation in lexeme" thisDissimilarity[n] = 0 elif lex1 in theseCases: (action, param) = theseCases[lex1] if action == "ok": if lex2 == param: solved = "systematic variation in lexeme" thisDissimilarity[n] = 0 elif action == "split": plural = "" if param == 1 else "s" solved = f"systematic {action} into {param} extra slot{plural}" thisDissimilarity[n] = param for m in range(n + 1, api[v].F.otype.maxSlot + 1): thisMapping[m] = thisMapping[m] + param # then try some more general rules elif "_" in lex1: action = "split" param = lex1.count("_") plural = "" if param == 1 else "s" solved = f"{action} on _ into {param} extra slot{plural}" thisDissimilarity[n] = param for m in range(n + 1, api[v].F.otype.maxSlot + 1): thisMapping[m] = thisMapping[m] + param elif lex1 == lex2 + lex2next: if lex2 == "H": solved = "split article off" thisDissimilarity[n] = 1 for m in range(n + 1, api[v].F.otype.maxSlot + 1): thisMapping[m] = thisMapping[m] + 1 elif set(maskedLex1) == set(maskedLex2) | set(maskedLex2next): if lex2 == "B" or lex2 == "MN": solved = "split preposition off" thisDissimilarity[n] = 1 for m in range(n + 1, api[v].F.otype.maxSlot + 1): thisMapping[m] = thisMapping[m] + 1 print(f"Action: {solved if solved else 'BLOCKED'}\n") # stop the loop if the discrepancy is not solved # The discrepancy has already been printed to the output, # so you can see immediately what is happening there if not solved: break # if the discrepancy was solved, # advance to the first position after the discrepancy # and try to find a new discrepancy in the next iteration start = n + 1 + skip if not solved: print(f"Blocking difference in {iteration} iterations")The mappings itself are needed elsewhere in Text-Fabric, let us write them to file.We write them into the dataset corresponding to the target version.So the map `3-4` ends up in version `4`.def edgesFromMaps(): edges.clear() for ((v, w), mp) in sorted(mappings.items()): caption(4, "Make edge from slot mapping {} => {}".format(v, w)) edge = {} dm = dissimilarity[(v, w)] for n in range(1, api[v].F.otype.maxSlot + 1): m = mp[n] k = dm.get(n, None) if k is None: if n in edge: if m not in edge[n]: edge[n][m] = None else: edge.setdefault(n, {})[m] = None else: if k > 0: for j in range(m, m + k + 1): edge.setdefault(n, {})[j] = k elif k < 0: for i in range(n, n - k + 1): edge.setdefault(i, {})[m] = k else: edge.setdefault(n, {})[m] = 0 edges[(v, w)] = edgeRunningHere we run the mapping between `3` and `4`. 3 => 4Here are the special cases for this conversion.cases.update( { ("3", "4"): { "CXH[": ("ok", "XWH["), "MQYT/": ("split", 1), 28730: ("ok", None), 121812: ("ok", None), 174515: ("ok", None), 201089: ("ok", None), 218383: ("split", 2), 221436: ("ok", None), 247730: ("ok", None), 272883: ("collapse", 1), 353611: ("ok", None), }, } ) doDiffs("3", "4")Genesis 18:2 ==> slot 7840 ==> 7840 ╋MN╋PTX╋H╋>HL┫┣W┫┣▶CXH◀┫┣>RY┫┣W┫┣>MR┫┣>DNJ┫┣>M╋ ╋MN╋PTX╋H╋>HL┫┣W┫┣▶XWH◀┫┣>RY┫┣W┫┣>MR┫┣>DNJ┫┣>M╋ Action: systematic variation in lexeme Genesis 19:1 ==> slot 8447 ==> 8447 ╋W┫┣QWM┫┣L╋QR>┫┣W┫┣▶CXH◀┫┣>P┫┣>RY┫┣W┫┣>MR┫┣HNH╋ ╋W┫┣QWM┫┣L╋QR>┫┣W┫┣▶XWH◀┫┣>P┫┣>RY┫┣W┫┣>MR┫┣HNH╋ Action: systematic variation in lexeme Genesis 21:14 ==> slot 9856 ==> 9856 ╋HLK┫┣W┫┣TR_CB<◀┫┣W┫┣KLH┫┣H╋MJM┫┣MN╋ ╋HLK┫┣W┫┣TR◀╋CB<┫┣W┫┣KLH┫┣H╋MJM╋ Action: split on _ into 1 extra slot Genesis 21:31 ==> slot 10174 ==> 10175 ╋L╋H╋MQWM╋H╋HW>┫┣▶B>R_CB<◀┫┣KJ┫┣CM┫┣CB<┫┣CNJM┫┣W╋ ╋L╋H╋MQWM╋H╋HW>┫┣▶B>R◀╋CB<┫┣KJ┫┣CM┫┣CB<┫┣CNJM╋ Action: split on _ into 1 extra slot Genesis 21:32 ==> slot 10183 ==> 10185 ╋CNJM┫┣W┫┣KRT┫┣BRJT┫┣B╋▶B>R_CB<◀┫┣W┫┣QWM┫┣>BJMLK╋W╋PJKL╋ ╋CNJM┫┣W┫┣KRT┫┣BRJT┫┣B╋▶B>R◀╋CB<┫┣W┫┣QWM┫┣>BJMLK╋W╋ Action: split on _ into 1 extra slot Genesis 21:33 ==> slot 10200 ==> 10203 ╋PLCTJ┫┣W┫┣NV<┫┣>CL┫┣B╋▶B>R_CB<◀┫┣W[...]RunningHere we run the mapping between `4` and `4b`.The points of disturbance will be written into the output cell. 4 => 4bHere are the special cases for this conversion.cases.update( { ("4", "4b"): { 214730: ("collapse", 3), 260028: ("split", 1), 289948: ("ok", None), 307578: ("split", 1), 323067: ("ok", None), 389774: ("ok", None), 407543: ("split", 1), 408429: ("split", 1), }, } ) doDiffs("4", "4b")Genesis 24:65 ==> slot 12369 ==> 12369 ╋H╋JC╋▶HLZH◀┫┣H┫┣HLK┫┣B╋H╋FDH╋ ╋H╋JC╋▶H◀╋LZH┫┣H┫┣HLK┫┣B╋H╋ Action: split article off Genesis 37:19 ==> slot 20514 ==> 20515 ╋>X┫┣HNH┫┣B┫┣W┫┣X┫┣HNH┫┣B┫┣W┫┣ slot 130846 ==> 130848 ╋W┫┣NWX┫┣>L╋H╋SL<╋▶HLZ◀┫┣W┫┣>T╋H╋MRQ┫┣CPK╋ ╋W┫┣NWX┫┣>L╋H╋SL<╋▶H◀╋LZ┫┣W┫┣>T╋H╋MRQ╋ Action: split article off 1_Samuel 14:1 ==> slot 148319 ==> 148322 ╋MYB╋PLCTJ┫┣>CR┫┣MN╋B┫┣L>┫┣NGD╋ ╋MYB╋PLCTJ┫┣>CR┫┣MN╋B┫┣L>╋ Action: split article off 1_Samuel 17:26 ==> slot 151331 ==> 151335 ╋>CR┫┣NKH┫┣>T╋H╋PLCTJ╋▶HLZ◀┫┣W┫┣SWR┫┣XRPH┫┣MN╋CR┫┣NKH┫┣>T╋H╋PLCTJ╋▶H◀╋LZ┫┣W┫┣SWR┫┣XRPH┫┣MN╋ Action: split article off 1_Samuel 20:19 ==> slot 153816 ==> 153821 ╋W┫┣JCB┫┣>YL╋H╋>BN┫┣▶H>ZL◀┫┣W┫┣>NJ┫┣CLC╋H╋XY╋ ╋W┫┣JCB┫┣>YL╋H╋>BN╋▶H◀╋>ZL┫┣W┫┣>NJ┫┣CLC╋H╋ Action: split article off[...]4b => 2016We need other cases.cases.update( { ("4b", "2016"): { 28423: ("split", 2), 28455: ("split", 2), 91193: ("split", 1), 91197: ("split", 1), 122218: ("split", 1), 122247: ("split", 1), 123160: ("split", 1), 184086: ("split", 1), 394186: ("collapse", 1), 395150: ("ok", None), 395190: ("ok", None), 401036: ("split", 2), 404503: ("ok", None), 419138: ("split", 2), }, } ) doDiffs("4b", "2016")Genesis 50:10 ==> slot 28423 ==> 28423 ╋KBD╋M>D┫┣W┫┣BW>┫┣VD◀┫┣>CR┫┣B╋D┫┣W┫┣BW>┫┣VD┫┣>CR┫┣B╋ slot 28455 ==> 28457 ╋KNT╋H╋>BL┫┣B╋▶GRN_>VD◀┫┣W┫┣>MR┫┣>BL╋KBD┫┣ZH╋ ╋KNT╋H╋>BL┫┣B╋▶GRN◀╋H╋>VD┫┣W┫┣>MR┫┣>BL╋ Action: split into 2 extra slots Numbers 33:45 ==> slot 91193 ==> 91197 ╋MN╋ slot 91197 ==> 91202 ╋B╋DJBWN_GD┫┣W┫┣NS<┫┣MN╋▶DJBWN_GD◀┫┣W┫┣XNH┫┣B╋ slot 122218 ==> 122224 ╋GBWL╋H╋JPLVJ┫┣ slot 122247 ==> 122254 ╋GBWL╋NXLH┫┣MZRX┫┣<[...]2016 => 2017We need other cases.cases.update( { ("2016", "2017"): { 16562: ("split", 1), 392485: ("split", 2), }, } ) doDiffs("2016", "2017")Genesis 31:11 ==> slot 16562 ==> 16562 ╋>MR┫┣>L┫┣ML>K╋H╋>LHJM┫┣▶B◀╋XLWM┫┣JMR┫┣HNH╋ ╋>MR┫┣>L┫┣ML>K╋H╋>LHJM┫┣▶B◀╋H╋XLWM┫┣JMR╋ Action: split into 1 extra slot 1_Chronicles 2:52 ==> slot 392485 ==> 392486 ╋L╋CWBL┫┣>B╋QRJT_JH┫┣▶XYJ_HMNXWT◀┫┣W┫┣MCPXH╋QRJT_JB╋QRJT_JH┫┣▶XYJ◀╋H╋MNWXH┫┣W┫┣MCPXH╋QRJT_J2017 => 2021No changes expected right now.cases.update( { ("2017", "2021"): {}, } ) doDiffs("2017", "2021")Genesis 24:10 ==> slot 11325 ==> 11325 ╋W┫┣QWM┫┣W┫┣HLK┫┣>L╋▶>RM_NHRJM◀┫┣>L╋L╋▶>RM◀╋NHR┫┣>L╋ slot 105981 ==> 105982 ╋BLRM_NHRJM◀┫┣L╋QLL┫┣W┫┣L>┫┣>BH╋ ╋BLRM◀╋NHR┫┣L╋QLL┫┣W┫┣L>╋ Action: split on _ into 1 extra slot Judges 3:8 ==> slot 128871 ==> 128873 ╋MKR┫┣B╋JD╋KWCN_RCRM_NHRJM◀┫┣W┫┣L┫┣>T╋ ╋MKR┫┣B╋JD╋KWCN_RCRM◀╋NHR┫┣W┫┣L╋ Action: split on _ into 1 extra slot Psalms 60:2 ==> slot 320252 ==> 320255 ╋L╋LMD┫┣B╋NYH┫┣>T╋▶>RM_NHRJM◀╋W╋>T╋>RM╋YWB>┫┣W╋ ╋L╋LMD┫┣B╋NYH┫┣>T╋▶>RM◀╋NHR╋W╋>T╋>RM╋YWB>╋ Action: split on _ into 1 extra slot 1_Chronicles 19:6 ==> slot 401289 ==> 401293 ╋KSP┫┣L╋FKR┫┣L┫┣MN╋▶>RM_NHRJM◀╋W╋MN╋>RM_MRM◀╋NHR╋W╋MN╋>RM╋M slot 401292 ==> 401297 [...]c => 2021No changes expected right now.cases.update( { ("c", "2021"): {}, } ) doDiffs("c", "2021")Genesis 24:10 ==> slot 11325 ==> 11325 ╋W┫┣QWM┫┣W┫┣HLK┫┣>L╋▶>RM_NHRJM◀┫┣>L╋L╋▶>RM◀╋NHR┫┣>L╋ slot 105981 ==> 105982 ╋BLRM_NHRJM◀┫┣L╋QLL┫┣W┫┣L>┫┣>BH╋ ╋BLRM◀╋NHR┫┣L╋QLL┫┣W┫┣L>╋ Action: split on _ into 1 extra slot Judges 3:8 ==> slot 128871 ==> 128873 ╋MKR┫┣B╋JD╋KWCN_RCRM_NHRJM◀┫┣W┫┣L┫┣>T╋ ╋MKR┫┣B╋JD╋KWCN_RCRM◀╋NHR┫┣W┫┣L╋ Action: split on _ into 1 extra slot Psalms 60:2 ==> slot 320252 ==> 320255 ╋L╋LMD┫┣B╋NYH┫┣>T╋▶>RM_NHRJM◀╋W╋>T╋>RM╋YWB>┫┣W╋ ╋L╋LMD┫┣B╋NYH┫┣>T╋▶>RM◀╋NHR╋W╋>T╋>RM╋YWB>╋ Action: split on _ into 1 extra slot 1_Chronicles 19:6 ==> slot 401289 ==> 401293 ╋KSP┫┣L╋FKR┫┣L┫┣MN╋▶>RM_NHRJM◀╋W╋MN╋>RM_MRM◀╋NHR╋W╋MN╋>RM╋M slot 401292 ==> 401297 [...]Clearly, the only difference between versions `c` and `2021` isthat some composite words in `c` have been split in version `2021`.edgesFromMaps().............................................................................................. . 6m 40s Make edge from slot mapping 2017 => 2021 . ..............................................................................................Extending to node mappingsnodeMapping = {} diagnosis = {} statLabels = collections.OrderedDict( b="unique, perfect", d="multiple, one perfect", c="unique, imperfect", f="multiple, cleanly composed", e="multiple, non-perfect", a="not mapped", ) def makeNodeMapping(nodeType, v, w, force=False): caption(2, "Mapping {} nodes {} ==> {}".format(nodeType, v, w)) mapKey = (v, w) edge = edges[mapKey] if not force and mapKey in nodeMapping and nodeType in nodeMapping[mapKey]: mapping = nodeMapping[mapKey][nodeType] diag = diagnosis[mapKey][nodeType] else: mapping = {} diag = {} caption( 0, "Extending slot mapping {} ==> {} for {} nodes".format(*mapKey, nodeType) ) for n in api[v].F.otype.s(nodeType): slots = api[v].E.oslots.s(n) mappedSlotsTuple = reduce( lambda x, y: x + y, [tuple(edge.get(s, ())) for s in slots], (), ) mappedSlots = set(mappedSlotsTuple) mappedNodes = reduce( set.union, [set(api[w].L.u(s, nodeType)) for s in mappedSlots], set(), ) result = {} nMs = len(mappedNodes) if nMs == 0: diag[n] = "a" elif nMs >= 1: theseMSlots = {} for m in mappedNodes: mSlots = set(api[w].E.oslots.s(m)) dis = len(mappedSlots | mSlots) - len(mappedSlots & mSlots) result[m] = dis theseMSlots[m] = mSlots mapping[n] = result # we wait further case analysis before we put these counterparts of n into the edge if nMs == 1: m = list(mappedNodes)[0] dis = result[m] if dis == 0: diag[n] = "b" edge[n] = { m: None } # this is the most freqent case, hence an optimization: no dis value. # all other cases require the dis value to be passed on, even if 0 else: diag[n] = "c" edge[n] = {m: dis} else: edge[n] = result dis = min(result.values()) if dis == 0: diag[n] = "d" else: allMSlots = reduce( set.union, [set(theseMSlots[m]) for m in mappedNodes], set(), ) composed = allMSlots == mappedSlots and sum( result.values() ) == len(mappedSlots) * (len(mappedNodes) - 1) if composed: diag[n] = "f" else: diag[n] = "e" diagnosis.setdefault(mapKey, {})[nodeType] = diag nodeMapping.setdefault(mapKey, {})[nodeType] = mapping caption(0, "\tDone") def exploreNodeMapping(nodeType, v, w, force=False): caption(4, "Statistics for {} ==> {} ({})".format(v, w, nodeType)) mapKey = (v, w) diag = diagnosis[mapKey][nodeType] total = len(diag) if total == 0: return reasons = collections.Counter() for (n, dia) in diag.items(): reasons[dia] += 1 caption(0, "\t{:<30} : {:6.2f}% {:>7}x".format("TOTAL", 100, total)) for stat in statLabels: statLabel = statLabels[stat] amount = reasons[stat] if amount == 0: continue perc = 100 * amount / total caption(0, "\t{:<30} : {:6.2f}% {:>7}x".format(statLabel, perc, amount)) # ntypes = api["3"].F.otype.all ntypes = api["2021"].F.otype.all for (i, v) in enumerate(versions): if i == 0: continue prev = versions[i - 1] ntypes = api[v].F.otype.all for ntype in ntypes[0:-1]: makeNodeMapping(ntype, prev, v, force=False) exploreNodeMapping(ntype, prev, v)********************************************************************************************** * * * 6m 46s Mapping book nodes 2017 ==> 2021 * * * ********************************************************************************************** | 6m 46s Extending slot mapping 2017 ==> 2021 for book nodes | 6m 57s Done .............................................................................................. . 6m 57s Statistics for 2017 ==> 2021 (book) . .............................................................................................. | 6m 57s TOTAL : 100.00% 39x | 6m 57s unique, perfect : 100.00% 39x *********************[...]Writing mappings as TF edgesdef writeMaps(): for ((v1, v2), edge) in sorted(edges.items()): fName = "omap@{}-{}".format(v1, v2) caption(4, "Write edge as TF feature {}".format(fName)) edgeFeatures = {fName: edge} metaData = { fName: { "description": "⚠️ Maps the nodes of version {} to {}".format( v1, v2 ), "encoder": "Dirk Roorda by a semi-automatic method", "see": "https://github.com/ETCBC/bhsa/blob/master/programs/versionMappings.ipynb", "valueType": "int", "edgeValues": True, } } activate(v2) TF.save( nodeFeatures={}, edgeFeatures=edgeFeatures, metaData=metaData, ) caption(4, "Write mappings as TF edges") for (v1, v2) in sorted(mappings.keys()): caption(0, "\t {:>4} ==> {:<4}".format(v1, v2)) writeMaps().............................................................................................. . 7m 45s Write mappings as TF edges . .............................................................................................. | 7m 45s 2017 ==> 2021 .............................................................................................. . 7m 45s Write edge as TF feature omap@2017-2021 . .............................................................................................. .............................................................................................. . 7m 45s Active version is now -> 2021 <- . .............................................................................................. 0.00s Exporting 0 node and 1 edge and 0 config features to ~/github/etcbc/bhsa/tf/2021: | 3.44s T omap@201[...]Exercise 1'''Function to create a new node if it doesn't exist, and get the vertex if already present in the graph''' def getOrCreate(name): # Treating None as no node/relationship in the graph if name == 'None': return None # Getting vertex if already present vertex = g.V().has(name, 'name', name).toList() if len(vertex) != 0: return vertex[0] # Creating vertex if not present with property 'name' return g.addV(name).property('name', name).next() '''Creating the graph from alerts.csv''' with open('data/alerts.csv', 'r') as file: for line in file: # CSV is comma delimited columns = line.strip().split(',') # Getting vertices corresponding to the columns in the CSV v1 = getOrCreate(columns[0]) v2 = getOrCreate(columns[1]) v3 = getOrCreate(columns[2]) # Creating the edge between the first and second column e1 = g.V(v2).addE('is').to(v1).property('type', 'is').iterate() # Some vertices in the third column are None indicating no relationship # Edges are being created only when the vertex in the third column is Not None # Condition applied only for third node but is easily extendible to all nodes if the contain None if v3 != None: e2 = g.V(v3).addE('is').to(v1).property('type', 'is').iterate() e3 = g.V(v2).addE('knows').to(v3).property('type', 'knows').iterate()Exercise 2# List of vertices to iterate over verticesList = g.V().valueMap().toList() '''The solutions are written to a file named exercise2.txt''' with open('exercise2.txt', 'w') as file: file.write("QUERY 1:\n") for vertex in verticesList: nodeName = vertex['name'][0] # Getting degree (in) inDegree = g.V().has(nodeName, 'name', nodeName).inE().count().toList()[0] # Getting degree (out) outDegree = g.V().has(nodeName, 'name', nodeName).outE().count().toList()[0] #Getting degree (in/out) # overallDegree = g.V().has(nodeName, 'name', nodeName).both().count() # More efficient to just add the inDegree and outDegree overallDegree = inDegree + outDegree file.write("For node {:20s}\n".format(nodeName)) file.write("In degree -> {:20s}\n".format(str(inDegree))) file.write("Out degree -> {:20s}\n".format(str(outDegree))) file.write("Overall degree -> {:20s}\n".format(str(overallDegree))) file.write("\n\n") file.write("*"*40) file.write("\n\n") file.write("QUERY 2:\n") # Initializing the max length and vertex the maximum chain length belongs to maximumChainLength = 0 maximumChainVertex = '' for vertex in verticesList: nodeName = vertex['name'][0] # The last path emited is the longest path for that node # The chain count is a combination of the vertex count and the edge count # This was done to print not just the vertex but also the relationship between the vertex # Easily changable to get only the vertex count longestChainLength = g.V().has(nodeName, 'name',nodeName).repeat(__.inE().outV()).emit().tail().path().unfold().count().toList()[0] # The longest chain length for each Node is written file.write("For node {:20s}\n".format(nodeName)) file.write("Longest Chain for this Node -> {}\n".format(longestChainLength)) # Finding the maximum chain length, and the vertex that chain belongs to if longestChainLength >= maximumChainLength: maximumChainLength = longestChainLength maximumChainVertex = nodeName file.write("*"*20) file.write("\nLongest Chain -> {}\n".format(maximumChainLength)) file.write("Longest Path:\n") # There can be multiple paths that have the maximum length, to get all those chains # All chains or paths of the vertex is iterated through for path in g.V().has(maximumChainVertex, 'name', maximumChainVertex).repeat( __.inE().outV()).emit().path().by(__.label()).toList(): # When the length of that path matches the maximum length calculated above # The path is written to the file as the solution if len(path) == maximumChainLength: for node in path: file.write("{} ".format(node)) file.write("<- ") file.write("start\n") file.write("\n\n") file.write("*"*40) file.write("\n\n") file.write("QUERY 3:\n") # Finding the number of vertices connected to 'ztf4' node connectedVerticesCount = g.V().has('ztf4', 'name', 'ztf4').both().count().toList()[0] # Finding the vertices connected to 'ztf4' node connectedVertices = g.V().has('ztf4', 'name', 'ztf4').both().valueMap().toList() file.write("Vertices connected to ztf4 -> {}\n".format(connectedVerticesCount)) file.write("The vertices are ") for vertex in connectedVertices: file.write("{} ".format(vertex['name'][0])) file.write("\n\n") file.write("*"*40) file.write("\n\n") file.write("QUERY 4:\n") # Getting a subgraph in gremlin-python wasn't straight forward like it would have been in pure gremlin # So a workaround was used here, which gets the edges and vertices connected to the 'unknown' node # A gremlin solution "g.V().has('unknown', 'name', 'unknown').bothE().subgraph('subgraph').outV().bothE().subgraph( # 'subgraph').cap('subgraph').next()" can be used for a simple subgraph retrieval # A solution using connecting to the client and submitting a query to the gremlin server and retrieving just the # the result was considered, but I also read somewhere that that isn't recommended, if this solution isn't # up to the mark, that solution can also be used subGraph = g.V().has('unknown', 'name', 'unknown').bothE().otherV().as_( 'vertex2').bothE().dedup().project('v', 'IN', 'OUT').by( __.valueMap(True)).by(__.inV().label().fold()).by( __.outV().label().fold()).toList() # dedup has been used because of the presence of parallel edges # valueMap along with chained by clauses have been used to output # the source and destination of the edge file.write("Nodes and edges in subgraph:\n") # properties = [] # for node in subGraph: # print(node) for node in subGraph: file.write("{} {} {}\n".format(node['OUT'][0], node['v']['type'], node['IN'][0]))"Geo Data Science with Python" Notebook Exercise 4 Python Dictionaries--- Part A: Writing a Dictionary (4 points)Create a dictionary object, `dict_DaysInMonth`, that maps a month's name (the dictionary key) to the respective number of days in that month (the value) for January, February, ..., December) assuming it’s not a leap-year. Enter the code in the cell below."""Create a dictionary containing Number-of-Months, mapping month names to number of days in that month:""" ### YOUR CODE HEREHow can you list only the values from `dict_DaysInMonth`? Assign the values of the dictionary to a variable `DaysInMonth`.Enter the code in the cell below."""Assign the values of the dictionary to a variable DaysInMonth:""" ### YOUR CODE HERE # Check the output of your code print(DaysInMonth) # Check the result of your code len(dict_DaysInMonth) == 12 # note the dictionary comprehensions in the following tests len([d for d in list(DaysInMonth) if d == 31]) == 7 len([d for d in list(DaysInMonth) if d == 30]) == 4 assert type(dict_DaysInMonth) == dict assert type(DaysInMonth) == type({'z':1}.values())--- Part B: Writing, Accessing and Manipulating Complex Dictionaries Task B1 (5 points)For this task, you should write the data base of the Finnland Meteorological Institue into a dictionary, that contains table header and content in one data structure.| StatName | ID | Lat | Long ||:-:|:-:|:-:|:-:|| Helsinki Kaisaniemi | 100971 | 60.18 | 24.94 || Helsinki Kaivopuisto | 132310 | 60.15 | 24.96 || Helsinki Kumpula | 101004 | 60.20 | 24.96 || Helsinki Malmi airfield | 101009 | 60.25 | 25.05 || Helsinki lighthouse | 101003 | 59.95 | 24.93 |The dictionary should be named `FIMdata` and it should be **a dictionary of 5 dictionaries**, with each of these 5 dictionaries containing a row of the data table above. Use the station ID's as key of the outer dictionary (instead of the names). Then use the remaining header lines as keys of the inner dictionaries and the remaing columns as values of the inner dictionaries. Make sure to use the same key names as in the table header given above. Use appropriate object types for the content numbers and strings."""Write the dictionary FIMdata containing the table above.""" ### YOUR CODE HERE # Check the content of your dataset. assert type(FIMdata) == dict assert FIMdata[100971]['StatName'] == 'Helsinki Kaisaniemi' assert FIMdata[101009]['Lat'] == 60.25 assert [e for e in FIMdata] == [100971, 132310, 101004, 101009, 101003] assert len(FIMdata) == 5 assert len(FIMdata[100971]) == 3--- Task B2 (2 points)Now, use a **method** available for dictionaries to delete only the station with the ID `132310` from the dictionary (for the station `'Helsinki Kaivopuisto').`"""Use a dictionary method to delete the station with the ID 132310 """ ### YOUR CODE HERE # Check the result of your code. FIMdata # Check the result of your code. assert len(FIMdata) == 4 assert len(FIMdata[100971]) == 3 assert [e for e in FIMdata] == [100971, 101004, 101009, 101003]--- Task B3 (2 points)Now, update the name of the station `Helsinki lighthouse` to a shorter name: `lighthouse`. Use the method `update().`"""Update the name of the station Helsinki lighthouse lighthouse""" ### YOUR CODE HERE--- Task B4 (2 point)As you can see, dictionaries are very useful in manipulating datasets. If an entry is deleted, you do not need to update the access key for preceding entries, as you would have to do for lists.In the next step, assign a variable `FIMkeys` with the keys of the dictionary. **Use a dictionary method (not a list comprehension)**."""Assign the keys of the dictionary FIMdata to a variable FIMkeys.""" ### YOUR CODE HERE """Check the result of your code.""" assert list(FIMkeys) == [100971, 101004, 101009, 101003]--- Part C: Dictionaries of Lists, or better the Other Way Around? Task C.1 (4 points)You have given two different data structures containing inventory lists. First, investigate the data structures and execute the data cell. Here a short description of the abbreviations:* SKU: Stock Keeping Unit (i.e., a specific item) * Cost: \$ per unit* Location: Aisle in a warehouse * Qty: Quantity on hand Then, study the access operations for both data structures, given in the code in the cell further below. Learn, to discriminate how to access the two different data structures. Try and experiment with the example code in the empty code cells below.# Two Different Inventory List Data Structures inv1 = [ {'sku': 'A1004', 'cost': 3.45, 'location': 'A104', 'qty': 12} ,{'sku': 'A1012', 'cost': 19.68, 'location': 'A105', 'qty': 92} ,{'sku': 'B1638', 'cost': 33.14, 'location': 'A106', 'qty': 2} ,{'sku': 'B1849', 'cost': 0.78, 'location': 'A107', 'qty': 18} ,{'sku': 'C1098', 'cost': 1.29, 'location': 'C204', 'qty': 1} ] inv2 = { 'A1004': [ 3.45, 'A104', 12] ,'A1012': [19.68, 'A105', 92] ,'B1638': [33.14, 'A106', 2] ,'B1849': [ 0.78, 'A107', 18] ,'C1098' :[ 1.29, 'C204', 1] } # Examples for Data Structures # Show all Items in the Dataset # ... for inv1 for r in inv1: print(r) # ... for inv2 for k in inv2: print(k, inv2[k]) # List all SKUs in the Dataset # ... for inv1 [r['sku'] for r in inv1] # ... for inv2 inv2.keys() #Total Inventory Value # ... for inv1 sum([r['cost']*r['qty'] for r in inv1]) # ... for inv2 sum([inv2[k][0]*inv2[k][2] for k in inv2]) #On‐hand inventory for SKU B1638 # ... for inv1 sku = 'B1638' inv1[[s['sku'] for s in inv1].index(sku)]['qty'] # ... for inv2 sku = 'B1638' inv2[sku][2] print('\nWhat is your favorite? A dictionary of lists, or a list of dictionaries?\n')Try the examples by copying & pasting them into the code cells below. Study their output. You may also add more code cells, if you like. This exercise will not be graded, but you will need the expertise in the following tasks.### Try the examples in this code cell. # (This will not be graded for accuracy, but you receive up to 4 points # for experimenting with the code.) ### Try the examples in this code cell. ### Try the examples in this code cell.--- Task C.2 ( 2 points)After studying the access operations for dictionaries, let's practise a few of them.We keep working with both inventary dictionaries `inv1` and `inv2`.Assign two variables `costA1004_inv1` and `costA1004_inv2` with the cost of the stock unit A1004, one for each of the two dictionaries. **Use mapping keys and list indexes on both dictionaries for that!**"""Assign variables costA1004_inv1 and costA1004_inv2 with cost information from the dictionaries inv1 and inv2.""" ### YOUR CODE HERE #Check the result of your code assert costA1004_inv1 == 3.45 assert costA1004_inv2 == 3.45--- Task C.3 ( 2 points)Now, for the stock unit C1098, change the available quantity from 1 to 20. Do this for both inventory dictionaries `inv1` and `inv2`."""Change quantity of stock unit C1098 to 20 in both dictionaries.""" ### YOUR CODE HERE #Check the result of your code. inv2 # also try: inv1--- Task C.4 ( 2 points)Now, delete the following items from the data structure:* the *location* of the last entry from the data structure `inv1`* the *entire last entry* from the data structure `inv2`! It is not enough to delete the content. You have to remove the entire entry for location or unit, respectively. (Note: We the data structures are again copied in the cell below, for easier access.)# Two Different Inventory List Data Structures inv1 = [ {'sku': 'A1004', 'cost': 3.45, 'location': 'A104', 'qty': 12} ,{'sku': 'A1012', 'cost': 19.68, 'location': 'A105', 'qty': 92} ,{'sku': 'B1638', 'cost': 33.14, 'location': 'A106', 'qty': 2} ,{'sku': 'B1849', 'cost': 0.78, 'location': 'A107', 'qty': 18} ,{'sku': 'C1098', 'cost': 1.29, 'location': 'C204', 'qty': 1} ] inv2 = { 'A1004': [ 3.45, 'A104', 12] ,'A1012': [19.68, 'A105', 92] ,'B1638': [33.14, 'A106', 2] ,'B1849': [ 0.78, 'A107', 18] ,'C1098' :[ 1.29, 'C204', 1] } """Delete requested items in the dictionaries.""" ### YOUR CODE HERE # Check the result of your code for the first data structure. inv1 #Check the result of your code for the second data structure. inv2 #Check the result of your code. assert len(inv1) == 5 assert len(inv2) == 4from google.colab import drive drive.mount('/content/drive') ! pip install spacy==2.3.2 ! pip install scispacy==0.2.5 ! pip install transformers==3.0.2 ! pip install numpy==1.19.2 ! pip install seqeval==1.2.2 ! pip install fastapi==0.61.2 ! pip install uvicorn==0.12.2 ! pip install pandas==1.1.2 ! pip install filelock==3.0.12 ! pip install pydantic==1.7.2 ! pip install allennlp==1.1.0 ! pip install scikit-learn==0.23.2 ! pip install networkx==2.5 ! pip install matplotlib==3.3.1 ! pip install seaborn==0.11.0 ! pip install gunicorn==20.0.4 ! pip install uvicorn==0.12.2 %cd /content/drive/MyDrive/NER from utils import read_data, save_pickle, read_ade_data from biobert_ner.utils_ner import generate_input_files from biobert_re.utils_re import generate_re_input_files from typing import List, Iterator, Dict import warnings import os labels = ['B-DRUG', 'I-DRUG', 'B-STR', 'I-STR', 'B-DUR', 'I-DUR', 'B-ROU', 'I-ROU', 'B-FOR', 'I-FOR', 'B-ADE', 'I-ADE', 'B-DOS', 'I-DOS', 'B-REA', 'I-REA', 'B-FRE', 'I-FRE', 'O'] def ner_generator(files: Dict[str, tuple],max_seq_len) -> None: """Generates files for NER""" # Generate train, dev, test files for filename, data in files.items(): generate_input_files(ehr_records=data[0], ade_records=data[1], filename="dataset/" + filename + '.' + "txt", max_len=max_seq_len, sep=" ") save_pickle("dataset/" + filename, {"EHR": data[0], "ADE": data[1]}) # Generate labels file with open("dataset/" + 'labels.txt', 'w') as file: output_labels = map(lambda x: x + '\n', labels) file.writelines(output_labels) filenames = [name for files in map( lambda x: [x + '.' + "txt", x + '.pkl'], list(files.keys())) for name in files] print("\nGenerating files successful. Files generated: ", ', '.join(filenames), ', labels.txt', sep=" ") def re_generator(files: Dict[str, tuple]): """Generates files for RE""" for filename, data in files.items(): generate_re_input_files(ehr_records=data[0], ade_records=data[1], filename="dataset/" + filename + '.' + "txt", max_len=512, sep=" ", is_test=data[2], is_label=data[3]) save_pickle("dataset/" + 'train', {"EHR": files['train'][0], "ADE": files['train'][1]}) save_pickle("dataset/" + 'test', {"EHR": files['test'][0], "ADE": files['test'][1]}) print("\nGenerating files successful. Files generated: ", 'train.tsv,', 'dev.tsv,', 'test.tsv,', 'test_labels.tsv,', 'train_rel.pkl,', 'test_rel.pkl,', 'test_labels_rel.pkl', sep=' ') from transformers import AutoTokenizer biobert = AutoTokenizer.from_pretrained("dmis-lab/biobert-base-cased-v1.1") # args.max_seq_len -= biobert.num_special_tokens_to_add() # tokenizer = biobert.tokenize max_seq_len = 512 max_seq_len -= biobert.num_special_tokens_to_add() tokenizer = biobert.tokenize ade_train_dev = None ade_train = None ade_devel = None print("\nReading data\n") train_dev, test = read_data(data_dir="data/",tokenizer=tokenizer, verbose=1) # Data is already shuffled, just split for dev set dev_split_idx = int((1 - 0.1) * len(train_dev)) train = train_dev[:dev_split_idx] devel = train_dev[dev_split_idx:] files = {'train': (train, ade_train), 'train_dev': (train_dev, ade_train_dev),'devel': (devel, ade_devel), 'test': (test, None)} ner_generator(files,max_seq_len) %cd /content/drive/MyDrive/NER !python generate_data.py \ --task ner \ --input_dir data/ \ --target_dir dataset/ \ --max_seq_len 512 \ --dev_split 0.1 \ --tokenizer biobert-base \ --ext txt \ --sep " " \ files = {'train': (train, ade_train, False, True), 'dev': (devel, ade_devel, False, True), 'test': (test, None, True, False), 'test_labels': (test, None, True, True)} re_generator(files)Variable successfully saved in dataset/train_rel.pkl Variable successfully saved in dataset/dev_rel.pkl Variable successfully saved in dataset/test_rel.pkl Variable successfully saved in dataset/test_labels_rel.pkl Variable successfully saved in dataset/train.pkl Variable successfully saved in dataset/test.pkl Generating files successful. Files generated: train.tsv, dev.tsv, test.tsv, test_labels.tsv, train_rel.pkl, test_rel.pkl, test_labels_rel.pklProgramming Assignment: Готовим LDA по рецептам Как вы уже знаете, в тематическом моделировании делается предположение о том, что для определения тематики порядок слов в документе не важен; об этом гласит гипотеза «мешка слов». Сегодня мы будем работать с несколько нестандартной для тематического моделирования коллекцией, которую можно назвать «мешком ингредиентов», потому что на состоит из рецептов блюд разных кухонь. Тематические модели ищут слова, которые часто вместе встречаются в документах, и составляют из них темы. Мы попробуем применить эту идею к рецептам и найти кулинарные «темы». Эта коллекция хороша тем, что не требует предобработки. Кроме того, эта задача достаточно наглядно иллюстрирует принцип работы тематических моделей.Для выполнения заданий, помимо часто используемых в курсе библиотек, потребуются модули *json* и *gensim*. Первый входит в дистрибутив Anaconda, второй можно поставить командой *pip install gensim*Построение модели занимает некоторое время. На ноутбуке с процессором Intel Core i7 и тактовой частотой 2400 МГц на построение одной модели уходит менее 10 минут. Загрузка данных Коллекция дана в json-формате: для каждого рецепта известны его id, кухня (cuisine) и список ингредиентов, в него входящих. Загрузить данные можно с помощью модуля json (он входит в дистрибутив Anaconda):import json with open("recipes.json") as f: recipes = json.load(f) print recipes[0]{u'cuisine': u'greek', u'id': 10259, u'ingredients': [u'romaine lettuce', u'black olives', u'grape tomatoes', u'garlic', u'pepper', u'purple onion', u'seasoning', u'garbanzo beans', u'feta cheese crumbles']}Составление корпусаfrom gensim import corpora, models import numpy as npC:\Users\Factorion\Anaconda2\lib\site-packages\gensim\utils.py:865: UserWarning: detected Windows; aliasing chunkize to chunkize_serial warnings.warn("detected Windows; aliasing chunkize to chunkize_serial")Наша коллекция небольшая, и целиком помещается в оперативную память. Gensim может работать с такими данными и не требует их сохранения на диск в специальном формате. Для этого коллекция должна быть представлена в виде списка списков, каждый внутренний список соответствует отдельному документу и состоит из его слов. Пример коллекции из двух документов: [["hello", "world"], ["programming", "in", "python"]]Преобразуем наши данные в такой формат, а затем создадим объекты corpus и dictionary, с которыми будет работать модель.texts = [recipe["ingredients"] for recipe in recipes] dictionary = corpora.Dictionary(texts) # составляем словарь corpus = [dictionary.doc2bow(text) for text in texts] # составляем корпус документов print texts[0] print corpus[0][u'romaine lettuce', u'black olives', u'grape tomatoes', u'garlic', u'pepper', u'purple onion', u'seasoning', u'garbanzo beans', u'feta cheese crumbles'] [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1)]У объекта dictionary есть полезная переменная dictionary.token2id, позволяющая находить соответствие между ингредиентами и их индексами. Обучение моделиВам может понадобиться [документация](https://radimrehurek.com/gensim/models/ldamodel.html) LDA в gensim. __Задание 1.__ Обучите модель LDA с 40 темами, установив количество проходов по коллекции 5 и оставив остальные параметры по умолчанию. Затем вызовите метод модели *show_topics*, указав количество тем 40 и количество токенов 10, и сохраните результат (топы ингредиентов в темах) в отдельную переменную. Если при вызове метода *show_topics* указать параметр *formatted=True*, то топы ингредиентов будет удобно выводить на печать, если *formatted=False*, будет удобно работать со списком программно. Выведите топы на печать, рассмотрите темы, а затем ответьте на вопрос:Сколько раз ингредиенты "salt", "sugar", "water", "mushrooms", "chicken", "eggs" встретились среди топов-10 всех 40 тем? При ответе __не нужно__ учитывать составные ингредиенты, например, "hot water".Передайте 6 чисел в функцию save_answers1 и загрузите сгенерированный файл в форму.У gensim нет возможности фиксировать случайное приближение через параметры метода, но библиотека использует numpy для инициализации матриц. Поэтому, по утверждению автора библиотеки, фиксировать случайное приближение нужно командой, которая написана в следующей ячейке. __Перед строкой кода с построением модели обязательно вставляйте указанную строку фиксации random.seed.__np.random.seed(76543) # здесь код для построения модели: %time model = models.ldamodel.LdaModel(corpus, id2word=dictionary, num_topics = 40, passes = 5) topics = model.show_topics(num_topics = 40,num_words = 10, formatted = True) print topics topics = model.show_topics(num_topics = 40,num_words = 10, formatted = False) c_salt = 0 c_sugar = 0 c_water = 0 c_mushrooms = 0 c_chicken = 0 c_eggs = 0 for i in range(40): check = topics[i][1] for j in check: word = j[0] if word == "salt": c_salt += 1 elif word == "sugar": c_sugar += 1 elif word == "water": c_water += 1 elif word == "mushrooms": c_mushrooms += 1 elif word == "chicken": c_chicken += 1 elif word == "eggs": c_eggs += 1 print c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs def save_answers1(c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs): with open("cooking_LDA_pa_task1.txt", "w") as fout: fout.write(" ".join([str(el) for el in [c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs]])) save_answers1(c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs)Фильтрация словаряВ топах тем гораздо чаще встречаются первые три рассмотренных ингредиента, чем последние три. При этом наличие в рецепте курицы, яиц и грибов яснее дает понять, что мы будем готовить, чем наличие соли, сахара и воды. Таким образом, даже в рецептах есть слова, часто встречающиеся в текстах и не несущие смысловой нагрузки, и поэтому их не желательно видеть в темах. Наиболее простой прием борьбы с такими фоновыми элементами — фильтрация словаря по частоте. Обычно словарь фильтруют с двух сторон: убирают очень редкие слова (в целях экономии памяти) и очень частые слова (в целях повышения интерпретируемости тем). Мы уберем только частые слова.import copy dictionary2 = copy.deepcopy(dictionary)__Задание 2.__ У объекта dictionary2 есть переменная *dfs* — это словарь, ключами которого являются id токена, а элементами — число раз, сколько слово встретилось во всей коллекции. Сохраните в отдельный список ингредиенты, которые встретились в коллекции больше 4000 раз. Вызовите метод словаря *filter_tokens*, подав в качестве первого аргумента полученный список популярных ингредиентов. Вычислите две величины: dict_size_before и dict_size_after — размер словаря до и после фильтрации.Затем, используя новый словарь, создайте новый корпус документов, corpus2, по аналогии с тем, как это сделано в начале ноутбука. Вычислите две величины: corpus_size_before и corpus_size_after — суммарное количество ингредиентов в корпусе (для каждого документа вычислите число различных ингредиентов в нем и просуммируйте по всем документам) до и после фильтрации.Передайте величины dict_size_before, dict_size_after, corpus_size_before, corpus_size_after в функцию save_answers2 и загрузите сгенерированный файл в форму.Frequent_ingredients = [] for i in dictionary2.dfs: if dictionary2.dfs[i] > 4000: Frequent_ingredients.append(i) Frequent_ingredients dict_size_before = len(dictionary2.dfs) dictionary2.filter_tokens(Frequent_ingredients) dict_size_after = len(dictionary2.dfs) print dict_size_before, dict_size_after corpus2 = [dictionary2.doc2bow(text) for text in texts] # составляем корпус документов corpus_size_before = 0 for i in corpus: corpus_size_before += len(i) corpus_size_after = 0 for i in corpus2: corpus_size_after += len(i) print corpus_size_before, corpus_size_after def save_answers2(dict_size_before, dict_size_after, corpus_size_before, corpus_size_after): with open("cooking_LDA_pa_task2.txt", "w") as fout: fout.write(" ".join([str(el) for el in [dict_size_before, dict_size_after, corpus_size_before, corpus_size_after]])) save_answers2(dict_size_before, dict_size_after, corpus_size_before, corpus_size_after)Сравнение когерентностей__Задание 3.__ Постройте еще одну модель по корпусу corpus2 и словарю dictionary2, остальные параметры оставьте такими же, как при первом построении модели. Сохраните новую модель в другую переменную (не перезаписывайте предыдущую модель). Не забудьте про фиксирование seed!Затем воспользуйтесь методом *top_topics* модели, чтобы вычислить ее когерентность. Передайте в качестве аргумента соответствующий модели корпус. Метод вернет список кортежей (топ токенов, когерентность), отсортированных по убыванию последней. Вычислите среднюю по всем темам когерентность для каждой из двух моделей и передайте в функцию save_answers3.np.random.seed(76543) # здесь код для построения модели: %time model2 = models.ldamodel.LdaModel(corpus2, id2word=dictionary2, num_topics = 40, passes = 5) top_topics1 = model.top_topics(corpus) top_topics2 = model2.top_topics(corpus2) def topics_mean(all_topics): mean_topics = [] for one_topics in all_topics: mean_topics.append(one_topics[1]) return np.mean(mean_topics) coherence = topics_mean(top_topics1) coherence2 = topics_mean(top_topics2) print coherence, coherence2 def save_answers3(coherence, coherence2): with open("cooking_LDA_pa_task3.txt", "w") as fout: fout.write(" ".join(["%3f"%el for el in [coherence, coherence2]])) save_answers3(coherence, coherence2)Считается, что когерентность хорошо соотносится с человеческими оценками интерпретируемости тем. Поэтому на больших текстовых коллекциях когерентность обычно повышается, если убрать фоновую лексику. Однако в нашем случае этого не произошло. Изучение влияния гиперпараметра alpha В этом разделе мы будем работать со второй моделью, то есть той, которая построена по сокращенному корпусу. Пока что мы посмотрели только на матрицу темы-слова, теперь давайте посмотрим на матрицу темы-документы. Выведите темы для нулевого (или любого другого) документа из корпуса, воспользовавшись методом *get_document_topics* второй модели:doc_top = model.get_document_topics(corpus2[0]) doc_topТакже выведите содержимое переменной *.alpha* второй модели:model.alphaУ вас должно получиться, что документ характеризуется небольшим числом тем. Попробуем поменять гиперпараметр alpha, задающий априорное распределение Дирихле для распределений тем в документах. __Задание 4.__ Обучите третью модель: используйте сокращенный корпус (corpus2 и dictionary2) и установите параметр __alpha=1__, passes=5. Не забудьте про фиксацию seed! Выведите темы новой модели для нулевого документа; должно получиться, что распределение над множеством тем практически равномерное. Чтобы убедиться в том, что во второй модели документы описываются гораздо более разреженными распределениями, чем в третьей, посчитайте суммарное количество элементов, __превосходящих 0.01__, в матрицах темы-документы обеих моделей. Другими словами, запросите темы модели для каждого документа с параметром *minimum_probability=0.01* и просуммируйте число элементов в получаемых массивах. Передайте две суммы (сначала для модели с alpha по умолчанию, затем для модели в alpha=1) в функцию save_answers4.np.random.seed(76543) # здесь код для построения модели: %time model3 = models.ldamodel.LdaModel(corpus2, id2word=dictionary2, num_topics = 40, passes = 5,alpha = 1) print model3.get_document_topics(corpus2[0]) def get_doc_topics_summa(model,corpus): summa = 0 for i in corpus: top = model.get_document_topics(i, minimum_probability=0.01) summa += len(top) return summa count_model2 = get_doc_topics_summa(model2,corpus2) count_model3 = get_doc_topics_summa(model3,corpus2) print count_model2, count_model3 def save_answers4(count_model2, count_model3): with open("cooking_LDA_pa_task4.txt", "w") as fout: fout.write(" ".join([str(el) for el in [count_model2, count_model3]])) save_answers4(count_model2, count_model3)Таким образом, гиперпараметр __alpha__ влияет на разреженность распределений тем в документах. Аналогично гиперпараметр __eta__ влияет на разреженность распределений слов в темах. LDA как способ понижения размерностиИногда, распределения над темами, найденные с помощью LDA, добавляют в матрицу объекты-признаки как дополнительные, семантические, признаки, и это может улучшить качество решения задачи. Для простоты давайте просто обучим классификатор рецептов на кухни на признаках, полученных из LDA, и измерим точность (accuracy).__Задание 5.__ Используйте модель, построенную по сокращенной выборке с alpha по умолчанию (вторую модель). Составьте матрицу $\Theta = p(t|d)$ вероятностей тем в документах; вы можете использовать тот же метод get_document_topics, а также вектор правильных ответов y (в том же порядке, в котором рецепты идут в переменной recipes). Создайте объект RandomForestClassifier со 100 деревьями, с помощью функции cross_val_score вычислите среднюю accuracy по трем фолдам (перемешивать данные не нужно) и передайте в функцию save_answers5.from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import cross_val_score X = np.zeros((len(recipes),40)) y = [] for r in recipes: y.append(r['cuisine']) for i in range(len(recipes)): top = model2.get_document_topics(corpus2[i]) for t in top: X[i,t[0]] = t[1] RFC = RandomForestClassifier(n_estimators = 100) estimator = cross_val_score(RFC,X,y,cv = 3).mean() print estimator def save_answers5(accuracy): with open("cooking_LDA_pa_task5.txt", "w") as fout: fout.write(str(accuracy)) save_answers5(estimator)Для такого большого количества классов это неплохая точность. Вы можете попроовать обучать RandomForest на исходной матрице частот слов, имеющей значительно большую размерность, и увидеть, что accuracy увеличивается на 10–15%. Таким образом, LDA собрал не всю, но достаточно большую часть информации из выборки, в матрице низкого ранга. LDA — вероятностная модельМатричное разложение, использующееся в LDA, интерпретируется как следующий процесс генерации документов.Для документа $d$ длины $n_d$:1. Из априорного распределения Дирихле с параметром alpha сгенерировать распределение над множеством тем: $\theta_d \sim Dirichlet(\alpha)$1. Для каждого слова $w = 1, \dots, n_d$: 1. Сгенерировать тему из дискретного распределения $t \sim \theta_{d}$ 1. Сгенерировать слово из дискретного распределения $w \sim \phi_{t}$. Подробнее об этом в [Википедии](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation).В контексте нашей задачи получается, что, используя данный генеративный процесс, можно создавать новые рецепты. Вы можете передать в функцию модель и число ингредиентов и сгенерировать рецепт :)def generate_recipe(model, num_ingredients): theta = np.random.dirichlet(model.alpha) for i in range(num_ingredients): t = np.random.choice(np.arange(model.num_topics), p=theta) topic = model.show_topic(t, topn=model.num_terms) topic_distr = [x[1] for x in topic] terms = [x[0] for x in topic] w = np.random.choice(terms, p=topic_distr) print w generate_recipe(model, 5) generate_recipe(model2, 5) generate_recipe(model3, 5)grated parmesan cheese red wine vinegar shrimp zucchini chilesИнтерпретация построенной моделиВы можете рассмотреть топы ингредиентов каждой темы. Большиснтво тем сами по себе похожи на рецепты; в некоторых собираются продукты одного вида, например, свежие фрукты или разные виды сыра.Попробуем эмпирически соотнести наши темы с национальными кухнями (cuisine). Построим матрицу $A$ размера темы $x$ кухни, ее элементы $a_{tc}$ — суммы $p(t|d)$ по всем документам $d$, которые отнесены к кухне $c$. Нормируем матрицу на частоты рецептов по разным кухням, чтобы избежать дисбаланса между кухнями. Следующая функция получает на вход объект модели, объект корпуса и исходные данные и возвращает нормированную матрицу $A$. Ее удобно визуализировать с помощью seaborn.import pandas import seaborn from matplotlib import pyplot as plt %matplotlib inline def compute_topic_cuisine_matrix(model, corpus, recipes): # составляем вектор целевых признаков targets = list(set([recipe["cuisine"] for recipe in recipes])) # составляем матрицу tc_matrix = pandas.DataFrame(data=np.zeros((model.num_topics, len(targets))), columns=targets) for recipe, bow in zip(recipes, corpus): recipe_topic = model.get_document_topics(bow) for t, prob in recipe_topic: tc_matrix[recipe["cuisine"]][t] += prob # нормируем матрицу target_sums = pandas.DataFrame(data=np.zeros((1, len(targets))), columns=targets) for recipe in recipes: target_sums[recipe["cuisine"]] += 1 return pandas.DataFrame(tc_matrix.values/target_sums.values, columns=tc_matrix.columns) def plot_matrix(tc_matrix): plt.figure(figsize=(10, 10)) seaborn.heatmap(tc_matrix, square=True) # Визуализируйте матрицу matr1 = compute_topic_cuisine_matrix(model, corpus, recipes) plot_matrix(matr1) matr2 = compute_topic_cuisine_matrix(model2, corpus2, recipes) plot_matrix(matr2) matr3 = compute_topic_cuisine_matrix(model3, corpus2, recipes) plot_matrix(matr2)The `auto_plot` function%matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('retina')OverviewThe easiest way to plot stuff with :py:obj:`vplot` is using the :py:class:`vplot.auto_plot` function. Given a path to a directory containing a :py:obj:`vplanet` run, :py:class:`vplot.auto_plot` will parse the output and generate plots of all of the simulated quantities as a function of time for all of the bodies.Tar up the example folderimport globimport tarfileimport oswith tarfile.open("examples/CircumbinaryOrbit.tar.gz", "w:gz") as tar: for file in glob.glob("examples/CircumbinaryOrbit/*.in"): tar.add(file, arcname=os.path.basename(file))Let's run :py:class:`vplot.auto_plot` on the :download:`CircumbinaryOrbit ` example.import vplotWhen we call :py:class:`vplot.auto_plot`, we pass the directory to the :py:obj:`vplanet` run:from figure_grid import FigureGridFigureGrid( vplot.auto_plot("examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300)).display().. note:: You must actually run :py:obj:`vplanet` before calling :py:obj:`vplot`!By default, parameters are grouped by *parameter name*. This means that if there are multiple bodies with the same parameter, they will all show up in the same plot, with labels indicating the body they correspond to. We can disable grouping by runningFigureGrid( vplot.auto_plot("examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300, group="none") ).display()Alternatively, we can group by *physical type*. This means that everything that is an angle will be grouped into one plot, everything that has units of distance will be grouped into a different plot, and so forth. It isn't always useful, particularly if you have *lots* of parameters of the same physical type (as is the case here).FigureGrid( vplot.auto_plot("examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300, group="type") ).display()Useful options We can plot only specific parameters:FigureGrid( vplot.auto_plot( "examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300, params=["eccentricity", "CBPR"], ) ).display()And we can plot only specific bodies:FigureGrid( vplot.auto_plot( "examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300, params=["eccentricity", "CBPR"], bodies="earth", ), ).display().. note:: Body and parameter names are case-insensitive, but otherwise they must match the values in the ``.in`` files exactly. Note that the parameter names are those specified in the ``saOutputOrder`` line of the ``.in`` files.We can also plot things logarithmically:FigureGrid( vplot.auto_plot( "examples/CircumbinaryOrbit", show=False, figsize=(5, 3), dpi=300, params=["eccentricity", "CBPR"], bodies="earth", xlog=True, ylog=False ),).display()Finally, note that :py:class:`vplot.auto_plot` also accepts any keyword arguments accepted by :py:class:`vplot.VPLOTFigure` and :py:class:`matplotlib.figure.Figure`, such as ``figsize`` and ``dpi``.Python Intro Purpose: This notebook will introduce you to Python and basic funcionality What is Python and why are we using it? Python is an object oriented programming language used across the world in almost every kind of field. It can be used for data analysis, scripting, artificial intelligence, real-time control, and more. Data Types Python only allows for certain types of data, the most common are: Integer4 i = 4What is the difference between the previous two lines?type(i)Floatf = 3.14What is the type of i*f?type(i*f)BooleanTrue b = FalseStrings = 'Sojourner'What is the type of i*s?type(i*s)What is i*s?i*sWhat is the type of f*s?type(f*s)Listl = [1,2,3,4]What is the result of i*l?i*lHow do you index a list? INDEX starts at 0!!!l[0] l[1]: index means all, so i[1:] means 1 to the endl[1:]what would l[1:3] return?l[1:3]The list returns the starting index to the endind index minus 1 Dictionary Dicionaries are indexed using the keys The keys in this dictionary are 'Name', 'Room', and 'Experience'with types inside those keys as: string, integer, float, listd = {'Name':'Tony', 'Room':162, 'Experience':11.1,'Missions':['MAVEN','PSP','SWFO']} d['Name'] d['Missions'][1]Bytesby = b'\x01\x02\x03\x04' bytes(l) by == bytes(l)LSMTThis [tutorial](https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/) is divided into four parts; they are:- Univariate LSTM Models - [Data Preparation](Data_Preparation) - [Vanilla LSTM](Vanilla_LSTM) - [Stacked_LSTM](Stacked_LSTM) - [Bidirectional_LSTM](Bidirectional_LSTM) - [CNN_LSTM](CNN_LSTM) - [ConvLSTM](ConvLSTM)- [Multivariate LSTM Models](Multivariate_LSTM_Models) - Multiple Input Series - Multiple Parallel Series- [Multi-Step LSTM Models](Multi_Step_LSTM_Models) - Data Preparation - Vector Output Model - Encoder-Decoder Model- Multivariate Multi-Step LSTM Models - Multiple Input Multi-Step Output. - Multiple Parallel Input and Multi-Step Output Univariate LSTM Models- [Data Preparation](Data_Preparation)- [Vanilla LSTM](Vanilla_LSTM)- [Stacked_LSTM](Stacked_LSTM)- [Bidirectional_LSTM](Bidirectional_LSTM)- [CNN_LSTM](CNN_LSTM)- [ConvLSTM](ConvLSTM) Data Preparation# split a univariate sequence def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) print(f"X:\n{X.flatten()} \n\ny:{y.flatten()}")X: [10 20 30 20 30 40 30 40 50 40 50 60 50 60 70 60 70 80] y:[40 50 60 70 80 90]Vanilla LSTM# univariate lstm example from numpy import array from tensorflow import keras as keras from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([70, 80, 90]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(f"y:{y.flatten()} and yhat:{yhat.flatten()}")y:[40 50 60 70 80 90] and yhat:[102.517494]Stacked LSTMMultiple hidden LSTM layers can be stacked one on top of another in what is referred to as a Stacked LSTM model> However, LSTM layer requires a three-dimensional input and LSTMs by default will produce a two-dimensional output as an interpretation from the end of the sequence`Stacking LSTM hidden layers makes the model deeper, more accurately earning the description as a deep learning techniqueIt is the depth of neural networks that is generally attributed to the success of the approach on a wide range of challenging prediction problems`> Additional hidden layers can be added to a Multilayer Perceptron neural network to make it deeper> The additional hidden layers are understood to recombine the learned representation from prior layers and create new representations at high levels of abstraction> e.g. from lines to shapes to objects- _We can address this by setting the return_sequences=True argument on the layer and having the LSTM output a value for each time step in the input data_- _This allows us to have 3D output from hidden LSTM layer as input to the next_We can therefore define a Stacked LSTM as follows:```python define modelmodel = Sequential()model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features)))model.add(LSTM(50, activation='relu'))model.add(Dense(1))model.compile(optimizer='adam', loss='mse')```# define model model = Sequential() # Pay attention to return_sequences=True model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) model.add(LSTM(50, activation='relu')) # 1 neuron per feature col and total features = n_features model.add(Dense(n_features)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([70, 80, 90]) # Making 3D array for LSTM input x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)[[102.34008]]Bidirectional LSTMOn some sequence prediction problems, it can be beneficial to > allow the LSTM model to learn the input sequence both forward and backwards and concatenate both interpretations> We can implement a Bidirectional LSTM for univariate time series forecasting by wrapping the first hidden layer in a wrapper layer called Bidirectional- __In problems where all timesteps of the input sequence are available, Bidirectional LSTMs train two instead of one LSTMs on the input sequence.__- __The first on the input sequence as-is and the second on a reversed copy of the input sequence__- __This can provide additional context to the network and result in faster and even fuller learning on the problem.__1) Bidirectional LSTMs are supported in Keras via the Bidirectional layer wrapper - This wrapper takes a recurrent layer (e.g. the first LSTM layer) as an argument 2) One could specify the merge mode, that is how the forward and backward outputs should be combined before being passed on to the next layer. The options are: - __‘sum‘__: The outputs are added together - __‘mul‘__: The outputs are multiplied together - __‘concat‘__: The outputs are concatenated together (the default), providing double the number of outputs to the next layer - __‘ave‘__: The average of the outputs is taken More Information in the [blog](https://machinelearningmastery.com/develop-bidirectional-lstm-sequence-classification-python-keras/)# define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 3 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) from keras.layers import Bidirectional # define model model = Sequential() model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([70, 80, 90]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)[[102.624146]]CNN LSTM> A convolutional neural network, or CNN for short, is a type of neural network developed for working with two-dimensional image data, spatial data`The CNN can be very effective at automatically extracting and learning features from one-dimensional sequence data such as univariate time series data`_CNN LSTMs(also called as Long-term RCN)_ are a class of models that is both spatially and temporally deep, and has the flexibility to be applied to a variety of vision tasks involving sequential inputs and outputsCNN LSTMs were developed for visual time series prediction problems and the application of generating textual descriptions from sequences of images, like:- __Activity Recognition__: Generating a textual description of an activity demonstrated in a sequence of images- __Image Description__: Generating a textual description of a single image- __Video Description__: Generating a textual description of a sequence of images`CNN LSTM: A CNN model can be used in a hybrid model with an LSTM backend where the CNN is used to interpret subsequences of input that together are provided as a sequence to an LSTM model to interpret`> A CNN LSTM can be defined by adding CNN layers on the front end followed by LSTM layers with a Dense layer on the output > It is helpful to think of this architecture as defining two sub-models: the CNN Model for feature extraction and the LSTM Model for interpreting the features across time steps ___ __CNN Model__```pythoncnn = Sequential()cnn.add(Conv2D(1, (2,2), activation='relu', padding='same', input_shape=(10,10,1)))cnn.add(MaxPooling2D(pool_size=(2, 2)))cnn.add(Flatten())```- The snippet above expects to read in 10×10 pixel images with 1 channel (e.g. black and white)- The Conv2D will read the image in 2×2 snapshots and output one new 10×10 interpretation of the image- The MaxPooling2D will pool the interpretation into 2×2 blocks reducing the output to a 5×5 consolidation- The Flatten layer will take the single 5×5 map and transform it into a 25-element vector ready for some other layer to deal with, such as a Dense for outputting a prediction> All these makes sense for image classification and other computer vision tasks___ __LSTM Model__- The CNN model above is only capable of handling a single image, transforming it from input pixels into an internal matrix or vector representation- We need to repeat this operation across multiple images and allow the LSTM to build up internal state and update weights using BPTT across a sequence of the internal vector representations of input images- We want to apply the CNN model to each input image and pass on the output of each input image to the LSTM as a single time step- __We can achieve this by wrapping the entire CNN input model (one layer or more) in a TimeDistributed layer__```pythonmodel.add(TimeDistributed(...))model.add(LSTM(...))model.add(Dense(...))```- __TimeDistributed layer__ layer achieves the desired outcome of applying the same layer or layers multiple times- In this case, applying it multiple times to multiple input time steps and in turn providing a sequence of “image interpretations” or “image features” to the LSTM model to work on___ __CNN LSTM Model__We can define a CNN LSTM model in Keras by first defining the CNN layer or layers, wrapping them in a TimeDistributed layer and then defining the LSTM and output layers```pythonmodel = Sequential() define CNN modelmodel.add(TimeDistributed(Conv2D(...))model.add(TimeDistributed(MaxPooling2D(...)))model.add(TimeDistributed(Flatten())) define LSTM modelmodel.add(LSTM(...))model.add(Dense(...))```1) Split the input sequences into subsequences that can be processed by the CNN model - e.g., we can first split our univariate time series data into input/output samples with four steps as input and one as output 2) Each sample can then be split into two sub-samples, each with two time steps 3) The CNN can interpret each subsequence of two time steps and provide a time series of interpretations of the subsequences to the LSTM model to process as input ```python choose a number of time stepsn_steps = 4 split into samplesX, y = split_sequence(raw_seq, n_steps) reshape from [samples, timesteps] into [samples, subsequences, timesteps, features]n_features = 1 number of subsequences as n_seq; number of time steps per subsequence as n_stepsn_seq = 2; n_steps = 2X = X.reshape((X.shape[0], n_seq, n_steps, n_features))```- We want to reuse the same CNN model when reading in each sub-sequence of data separately- This can be achieved by wrapping the entire CNN model in a TimeDistributed wrapper that will apply the entire model once per input, in this case, once per input subsequence```pythonmodel.add(TimeDistributed(Conv1D(filters=64, kernel_size=1, activation='relu'), input_shape=(None, n_steps, n_features)))model.add(TimeDistributed(MaxPooling1D(pool_size=2)))model.add(TimeDistributed(Flatten()))```- The CNN model first has a convolutional layer for reading across the subsequence that requires a number of filters and a kernel size to be specified- The number of filters is the number of reads or interpretations of the input sequence- The kernel size is the number of time steps included of each ‘read’ operation of the input sequence- The convolution layer is followed by a max pooling layer that distills the filter maps down to 1/2 of their size that includes the most salient features- These structures are then flattened down to a single one-dimensional vector to be used as a single input time step to the LSTM layer- Next, we can define the LSTM part of the model that interprets the CNN model’s read of the input sequence and makes a prediction```pythonmodel.add(LSTM(50, activation='relu'))model.add(Dense(1))```___from numpy import array from keras.models import Sequential from keras.layers import LSTM, Dense, Flatten, TimeDistributed from keras.layers.convolutional import Conv1D, MaxPooling1D # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 4 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, subsequences, timesteps, features] n_features = 1 n_seq = 2 n_steps = 2 X = X.reshape((X.shape[0], n_seq, n_steps, n_features)) # define model model = Sequential() model.add(TimeDistributed(Conv1D(filters=64, kernel_size=1, activation='relu'), input_shape=(None, n_steps, n_features))) model.add(TimeDistributed(MaxPooling1D(pool_size=2))) model.add(TimeDistributed(Flatten())) model.add(LSTM(50, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=500, verbose=0) # demonstrate prediction x_input = array([60, 70, 80, 90]) x_input = x_input.reshape((1, n_seq, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)[[101.208145]]Multivariate LSTM Models- Multiple Input Series- Multiple Parallel Series __A problem may have:__- [x] two or more parallel input time series and- [x] an output time series that is dependent on the input time series> The input time series are parallel because each series has an observation at the same time steps# multivariate data preparation from numpy import array from numpy import hstack # define input sequence in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) # convert to [rows, columns] structure in_seq1 = in_seq1.reshape((len(in_seq1), 1)) in_seq2 = in_seq2.reshape((len(in_seq2), 1)) out_seq = out_seq.reshape((len(out_seq), 1)) # horizontally stack columns dataset = hstack((in_seq1, in_seq2, out_seq)) datasetWorked example: Symmetry reductionimport numpy as np import sympy as sp import scipy.linalg as laSome routines for creating rotation matrices and transpositionsdef get_rotation_matrix(phi, axis): cp = np.cos(phi) sp = np.sin(phi) r1, r2, r3 = axis Rm = np.array( [ [ r1 ** 2 * (1 - cp) + cp, r1 * r2 * (1 - cp) - r3 * sp, r1 * r3 * (1 - cp) + r2 * sp, ], [ r1 * r2 * (1 - cp) + r3 * sp, r2 ** 2 * (1 - cp) + cp, r2 * r3 * (1 - cp) - r1 * sp, ], [ r3 * r1 * (1 - cp) - r2 * sp, r2 * r3 * (1 - cp) + r1 * sp, r3 ** 2 * (1 - cp) + cp, ], ] ) # Clean spurious terms for ii, jj in np.ndindex(Rm.shape): if abs(Rm[ii, jj]) < 1e-10: Rm[ii, jj] = 0 sp = np.sin(phi) return Rm def get_transposition(nd, verbose=0): # Make a basis: basis_nd = [] for ii in range(nd): for jj in range(nd): emp = np.zeros([nd, nd], dtype="int8") bb = emp bb[ii, jj] = 1 basis_nd.append(bb) # # Build transpose and return: transp = np.array([bb.T.flatten() for bb in basis_nd]) return transp # pretty print matrix def pprint(M, maxl=80): M = np.atleast_2d(M) dim1, dim2 = M.shape lengths = [len(str(el)) for el in M.flatten()] maxl = min(maxl, max(lengths)) for ii in range(dim1): print( (f" | " + " , ".join([f"{str(el)[:maxl]:{maxl}s}" for el in M[ii]]) + " |") )Start: Define a matrix $A$* Define a general 3x3 matrix $A$ and its 9x1 vectorized representation$$ \boldsymbol{a} = {\textbf{vec}} (A)$$* $A$ and $\boldsymbol a$ can, e.g., represent a physical tensor (conductivity, stress, ...)ndim = 3 sym = sp.Symbol A = sp.ones(ndim, ndim) for ii in range(ndim): for jj in range(ndim): A[ndim * ii + jj] = sym(f"a_{ii}{jj}") A = np.array(A) a = A.flatten() print("Matrix A:") pprint(A) print("vec(A):") pprint(a)Matrix A: | a_00 , a_01 , a_02 | | a_10 , a_11 , a_12 | | a_20 , a_21 , a_22 | vec(A): | a_00 , a_01 , a_02 , a_10 , a_11 , a_12 , a_20 , a_21 , a_22 |Case 1: Cubic system* Define 3 rotation matrices $M_i$ that implement a 90° rotation about $x$, $y$, and $z$ axis* Construct the rotation matrices in the flattened representatin by Roth's Relationship, i.e.$$ M_\text{flat} = M \otimes M $$* Add transposition (not necessary in cubic case)r1 = get_rotation_matrix(np.pi / 2, [1, 0, 0]) r2 = get_rotation_matrix(np.pi / 2, [0, 1, 0]) r3 = get_rotation_matrix(np.pi / 2, [0, 0, 1]) pprint(r1)| 1.0 , 0.0 , 0.0 | | 0.0 , 0.0 , -1.0 | | 0.0 , 1.0 , 0.0 |Construct big matrices implementing rotations (+transposition) of the vectorized tensorR1 = np.kron(r1, r1) R2 = np.kron(r2, r2) R3 = np.kron(r3, r3) T = get_transposition(ndim)Now sum up the matrices we want to invariant under, i.e.$$ \sum_i (\mathbf 1 - M_i)~a = 0$$id = np.eye(len(a)) inv = 4*id - R1 - R2 - R3 - TConsctruct nullspace by SVD__[scipy-cookbook.readthedocs.io/items/RankNullspace.html](http://scipy-cookbook.readthedocs.io/items/RankNullspace.html)__u, s, vh = la.svd(inv, lapack_driver="gesdd") rank = (s > 1e-12).sum() print(f"Initial Dimension: {len(a)}") print(f"Rank of Nullspace (= No. irred. elements): {len(a) - rank}")Initial Dimension: 9 Rank of Nullspace (= No. irred. elements): 1Construct matrices translating between full and reduced representation* $S$ reconstructs the full representation $a$ from a given reduced $\tilde a$. One can think of $\tilde a$ as being the components of $a$ in the basis given by the vectors in $S$:$$ a = S \, \tilde{a}$$* $S^+$ (pseudo-inverse, often just transpose) projects a given $a$ onto the irreducible components $\tilde a$ $$\tilde{a} = S^+ a$$S = vh[rank:].T Sp = S.TBuild projectors* $P = S^\vphantom{+} S^+$* $\boldsymbol{1}_\text{irred} = S^+ S^\phantom{+}$P = S @ Sp id_irred = Sp @ S print("Projector onto invariant space") pprint(P, 4) print("Identity within invariant space") pprint(id_irred)Projector onto invariant space | 0.33 , 0.0 , 1.77 , -6.9 , 0.33 , 1.01 , 1.29 , -5.2 , 0.33 | | 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 | | 1.77 , 0.0 , 9.49 , -3.6 , 1.77 , 5.44 , 6.91 , -2.8 , 1.77 | | -6.9 , 0.0 , -3.6 , 1.43 , -6.9 , -2.1 , -2.6 , 1.09 , -6.9 | | 0.33 , 0.0 , 1.77 , -6.9 , 0.33 , 1.01 , 1.29 , -5.2 , 0.33 | | 1.01 , 0.0 , 5.44 , -2.1 , 1.01 , 3.11 , 3.96 , -1.6 , 1.01 | | 1.29 , 0.0 , 6.91 , -2.6 , 1.29 , 3.96 , 5.04 , -2.0 , 1.29 | | -5.2 , 0.0 , -2.8 , 1.09 , -5.2 , -1.6 , -2.0 , 8.28 , -5.2 | | 0.33 , 0.0 , 1.77 , -6.9 , 0.33 , 1.01 , 1.29 , -5.2 , 0.33 | Identity within invariant space | 1.0000000000000002 |Symmetrize the tensor with the projector obtained$$ \text{sym} (a) = P a = S^\vphantom{+} S^+ a = S \, \tilde a $$aT = np.dot(Sp, a) aS = np.dot(S, aT) # = np.dot(P, a)How does the matrix $A$ now look like?$$A = \text{unvec} \left( \text{sym} (a) \right)$$As = aS.reshape(3,3) print('1/3*') pprint(3*As)1/3* | 1.0*a_00 + 5.33729362479519e-33*a_02 - 2.07749100017982e-35*a_10 + 1.0*a_11 + 3. , 0 , 5.33729362479519e-33*a_00 + 2.84867032372794e-65*a_02 - 1.10881794708291e-67*a_1 | | -2.07749100017982e-35*a_00 - 1.10881794708291e-67*a_02 + 4.31596885582815e-70*a_ , 1.0*a_00 + 5.33729362479519e-33*a_02 - 2.07749100017982e-35*a_10 + 1.0*a_11 + 3. , 3.05816280424746e-34*a_00 + 1.63223128386957e-66*a_02 - 6.35330570290877e-69*a_1 | | 3.8891592043194e-34*a_00 + 2.07575846270274e-66*a_02 - 8.07969324524005e-69*a_10 , -1.57643859172956e-33*a_00 - 8.41391564551927e-66*a_02 + 3.2750369866543e-68*a_1 , 1.0*a_00 + 5.33729362479519e-33*a_02 - 2.07749100017982e-35*a_10 + 1.0*a_11 + 3. |$= \frac{1}{3} \text{Tr A}$ How about hexagonal?Start with defining three-fold rotations in $x,y$ plane, i.e., about $z$ axisr1 = get_rotation_matrix(np.pi / 6, [0, 0, 1]) r2 = get_rotation_matrix(np.pi / 3, [0, 0, 1])Construct the matrices in the flatten represenation as beforeR1 = np.kron(r1, r1) R2 = np.kron(r2, r2) T = get_transposition(ndim) id = np.eye(len(a)) inv = 3*id - R1 - R2 - T # Consctruct nullspace u, s, vh = la.svd(inv, lapack_driver="gesdd") rank = (s > 1e-12).sum() print(f"Dimension: {len(a)}") print(f"Rank of Nullspace: {len(a) - rank}") # Nullspace: S = vh[rank:].T # clean S for ii, jj in np.ndindex(S.shape): if abs(S[ii, jj]) < 1e-10: S[ii, jj] = 0 Sp = S.THow do the projectors look like?print(S@Sp) #print(Sp) #print(S@Sp) print(Sp@S) # Projector P = S@Sp # Symmetrize a aS = np.dot(P, a) # Restore shape As = aS.reshape(3,3) pprint(As)| 0.5*a_00 + 0.5*a_11 , 0 , 0 | | 0 , 0.5*a_00 + 0.5*a_11 , 0 | | 0 , 0 , 1.0*a_22 |SEPARATE NUMERICAL AND CATEGORICAL COLUMNSid_columns = ["Id"] numeric_features = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"] categorical_features = ["Color"]* ~~X and y~~* ~~TRAIN TEST SPLIT~~* ~~IMPUTE~~* ~~Handling class IMBALANCE~~* ~~Magical COLUMN TRANSFORMER~~ * ~~ENCODE and SCALE (cat and num columns) in a single go~~* MODEL TRAIN * PREDICT X and YX = df.drop(id_columns + ['Species'], axis = 1) y = df.SpeciesTrain-Test SplitWe do this before scaling/feature transformation to prevent data leakage from test set into training dataX_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 42, test_size = 0.2)num > impute > balance > scale cat > balance > encode Imputeimputer = SimpleImputer(strategy='median', missing_values=np.nan) X_imputed = imputer.fit_transform(X_train[numeric_features].values) X_train_imputed = pd.DataFrame(X_imputed, index=X_train.index, columns=X_train[numeric_features].columns) # Reconstructing the dataset for balancing X_train_imputed["Color"] = X_train.Color X_train_imputed.head()Class Imbalance* Synthetic Minority Over-sampling Technique for Nominal and Continuous (SMOTE-NC) * Use SMOTE for just continuous dataprint('Original dataset samples per class {}'.format(Counter(y_train))) sm = SMOTENC(random_state=42 ,categorical_features=[4] ,sampling_strategy = 'minority') X_bal, y_bal = sm.fit_resample(X_train_imputed, y_train) print('Resampled dataset samples per class {}'.format(Counter(y_bal))) X_bal.shape, y_bal.shape # more data was createdEncoding (categorical features) and Scaling (numeric features)oh_encoder = OneHotEncoder() rob_scaler = RobustScaler() ## choose your scaler depending on the kind of data you have # mine has a lot of outliers. from sklearn.compose import ColumnTransformer column_transformer = ColumnTransformer( transformers = [ ('ohe', oh_encoder, [4]), # for a dataframe use categorical_features ('sca', rob_scaler, [0,1,2,3]) # for a dataframe use numeric_features ] ) X_final = column_transformer.fit_transform(X_bal) X_final.shape # four numerical scaled columns, 4 new categorical columns (one for every category)PREPROCESSING COMPLETE !! 👻X_bal.shapeNational Migration Data- 2020-01-01 to 2020-03-15folder = 'Data' file_path = os.path.join(folder, 'country_flow.csv') flow_df = pd.read_csv(file_path) flow_df['date2020'] = flow_df['date2020'].astype(str) flow_df['date2019'] = flow_df['date2019'].astype(str) flow_df.head() plot_df = flow_df.copy() # fig, ax = plt.subplots(figsize=(12, 3)) fig, ax = plt.subplots(figsize=(12, 2.75)) ax.patch.set_alpha(0.015) ax.set_facecolor('#2b3679') text_color = '#46ac88' marker_size = 7 # 2020 # -------------------- x = plot_df['date2020'] y = plot_df['migration_index_2020'] ax.plot(x, y, '-o', color=text_color, linewidth=2, markersize=marker_size, clip_on=False, markeredgecolor=text_color, markeredgewidth=1, markerfacecolor='None', label='2020', # alpha=0.9, zorder=100, ) # 2019 # -------------------- x = plot_df['date2020'] y = plot_df['migration_index_2019'] ax.plot(x, y, '-o', color='#666666', linewidth=1.75, markersize=marker_size, clip_on=False, # markeredgecolor='slategrey', markeredgecolor='#666666', markeredgewidth=1, markerfacecolor='None', label='2019 (in lunar calendar)', alpha=0.9, zorder=90, ) ymin=-50 ymax = 800 line_style = (0, (7.5, 5)) ax.vlines(x=list(x).index('20200123'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1, color='grey') ax.text(x=list(x).index('20200123')-0.25, y=ymax/20, s='Wuhan\nlockdown', fontsize=12, ha='right', color='grey') line_style = (0, (1, 2)) ax.vlines(x=list(x).index('20200125'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1.5, color='grey') ax.text(x=list(x).index('20200125')+0.4, y=ymax/20, s='Lunar\nNew Year', fontsize=12, ha='left', color='grey') line_style = (0, (1, 2)) ax.vlines(x=list(x).index('20200208'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1.5, color='grey') ax.text(x=list(x).index('20200208')+0.4, y=ymax/1.35, s='Lantern\nFestival', fontsize=12, ha='left', color='grey') N = len(x) every_nth = 1 locs = range(0, N, every_nth) ax.xaxis.set_minor_locator(ticker.FixedLocator(locs)) every_nth = 5 locs = range(0, N, every_nth) ax.xaxis.set_major_locator(ticker.FixedLocator(locs)) locs = range(0, 800, 250) ax.yaxis.set_major_locator(ticker.FixedLocator(locs)) ticks = list(x) ticks = list(x.str[-4:]) # ticks.insert(0, '') ticks.append('0316') ticks = [t[:2]+'-'+t[-2:] for t in ticks] # string date # ticks = list(plot_df['date2020_str']) # ticks.append(pd.to_datetime('20200316').strftime('%b %d')) plt.xticks(list(range(0, N+1, every_nth)), ticks[::every_nth], rotation='0', horizontalalignment='center', # va = 'center', position=(0, 0) # rotation_mode="anchor" ) real_legend = plt.legend(numpoints=1, fontsize=13, frameon=False, loc='best') # ax.set_xlabel('Date', fontsize=13) ax.set_ylabel('National migration index', fontsize=14.5) border_width = 1.5 ax.spines['bottom'].set_linewidth(border_width) ax.spines['left'].set_linewidth(border_width) ax.spines['top'].set_linewidth(border_width) ax.spines['right'].set_linewidth(border_width) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # # custom ticks ax.tick_params(axis='x', which='major', top=False, right=False, pad=3, labelsize=13, length=6, width=1.5, direction='out') ax.tick_params(axis='y', which='major', top=False, right=False, pad=3, labelsize=14, length=6, width=1.5, direction='out') ax.tick_params(axis='both', which='minor', top=False, right=False, length=3.5, width=1, direction='out') plt.xlim(-1, 75) plt.ylim(0, 800) plt.tight_layout(pad=0.1) # plt.tight_layout(pad=0.1) out_dir = 'Outputs' # plt.savefig(os.path.join(out_dir, 'fig4-national_migration_2020_vs_2019.pdf'), pad_inches=0.01, bbox_inches='tight')Mobility Netork over Time- 2020-01-01 to 2020-03-15net_df = pd.read_csv('Data/mobility_network_over_time.csv') net_df['date'] = net_df['date'].astype(str) net_df.shape net_df.head() fig, ax = plt.subplots(figsize=(12, 2.75)) plot_df = net_df.copy() marker_size=5.5 ax.patch.set_alpha(0.015) ax.set_facecolor('#2b3679') # ----------- col = 'average_degree' x = plot_df['date'] y = plot_df[col] text_color = '#74c476' # text_color = '#46ac88' # normlize? norm_max = plot_df[col].max() norm_min = plot_df[col].min() plot_df[col] = plot_df[col].map(lambda x: (x-norm_min)/(norm_max-norm_min)) x = plot_df['date'] y = plot_df[col] ax.plot(x, y, '-s', color=text_color, linewidth=1.9, markersize=marker_size, clip_on=False, markeredgecolor=text_color, markeredgewidth=1, markerfacecolor='None', label='Average degree', alpha=0.9, zorder=100, ) # ----------- col = 'average_path_length' x = plot_df['date'] y = plot_df[col] text_color = '#9e9ac8' # normlize? norm_max = plot_df[col].max() norm_min = plot_df[col].min() plot_df[col] = plot_df[col].map(lambda x: (x-norm_min)/(norm_max-norm_min)) x = plot_df['date'] y = plot_df[col] ax.plot(x, y, '-s', color=text_color, linewidth=1.85, markersize=marker_size, clip_on=False, markeredgecolor=text_color, markeredgewidth=1, markerfacecolor='None', # label='Average shortest path length', label='Average path length', alpha=0.9, zorder=100, ) # ----------- ymin=-0.1 ymax=1.1 line_style = (0, (7.5, 5)) ax.vlines(x=list(x).index('20200123'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1, color='grey') # ax.text(x=list(x).index('20200123')-0.25, y=0.08, s='Wuhan\nlockdown', fontsize=12, ha='right', color='dodgerblue') line_style = (0, (1, 2)) ax.vlines(x=list(x).index('20200125'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1.5, color='grey') # ax.text(x=list(x).index('20200125')+0.4, y=0.375, s='Lunar\nNew Year', fontsize=12, ha='left', color='dodgerblue') line_style = (0, (1, 2)) ax.vlines(x=list(x).index('20200208'), ymin=ymin, ymax=ymax, linestyles=line_style, linewidth=1.5, color='grey') # ax.text(x=list(x).index('20200208')+0.4, y=0.375, s='Lantern\nFestival', fontsize=12, ha='left', color='dodgerblue') N = len(x) every_nth = 1 locs = range(0, N, every_nth) ax.xaxis.set_minor_locator(ticker.FixedLocator(locs)) every_nth = 5 locs = range(0, N, every_nth) ax.xaxis.set_major_locator(ticker.FixedLocator(locs)) locs = range(0, 150, 25) locs = [i/100.0 for i in locs] ax.yaxis.set_major_locator(ticker.FixedLocator(locs)) ticks = list(x) ticks = list(x.str[-4:]) # ticks.insert(0, '') ticks.append('0316') ticks = [t[:2]+'-'+t[-2:] for t in ticks] # string date plt.xticks(list(range(0, N+1, every_nth)), ticks[::every_nth], rotation='0', horizontalalignment='center', # va = 'center', position=(0, 0) # rotation_mode="anchor" ) real_legend = ax.legend(numpoints=1, fontsize=11.5, markerscale=1.25, labelspacing=0.25, frameon=False, loc='best') # ax.set_xlabel('Date', fontsize=13) # ax.set_ylabel('Metrics (normalized)', fontsize=13) ax.set_ylabel('Network metrics', fontsize=13) border_width = 1.5 ax.spines['bottom'].set_linewidth(border_width) ax.spines['left'].set_linewidth(border_width) ax.spines['top'].set_linewidth(border_width) ax.spines['right'].set_linewidth(border_width) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # # custom ticks ax.tick_params(axis='x', which='major', top=False, right=False, pad=3, labelsize=11.5, length=6, width=1.5, direction='out') ax.tick_params(axis='y', which='major', top=False, right=False, pad=3, labelsize=11.5, length=6, width=1.5, direction='out') ax.tick_params(axis='both', which='minor', top=False, right=False, length=3.5, width=1, direction='out') ax.set_xlim(-1, 75) ax.set_ylim(-0.05, 1.075) # plt.tight_layout(pad=0.5) plt.tight_layout(pad=0.1) out_dir = 'Outputs' # plt.savefig(os.path.join(out_dir, 'fig4-mobility_network_change.pdf'), pad_inches=0.01, bbox_inches='tight')Nipype ShowcaseWhat's all the hype about Nipype? Is it really that good? Short answer: Yes!Long answer: ... well, let's consider a very simple fMRI preprocessing workflow that just performs:1. slice time correction2. motion correction3. smoothing Preparing the preprocessing workflow First, we need to import the main Nipype tools: `Node` and `Workflow`from nipype import Node, WorkflowNow, we can import the interfaces that we want to use for the preprocessing.from nipype.interfaces.fsl import SliceTimer, MCFLIRT, SmoothNext, we will put the three interfaces into a node and define the specific input parameters.# Initiate a node to correct for slice wise acquisition slicetimer = Node(SliceTimer(index_dir=False, interleaved=True, time_repetition=2.5), name="slicetimer") # Initiate a node to correct for motion mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True), name="mcflirt") # Initiate a node to smooth functional images smooth = Node(Smooth(fwhm=4), name="smooth")After creating the nodes, we can now create the preprocessing workflow.preproc01 = Workflow(name='preproc01', base_dir='.')Now, we can put all the nodes into this preprocessing workflow. We specify the data flow / execution flow of the workflow by connecting the corresponding nodes to each other.preproc01.connect([(slicetimer, mcflirt, [('slice_time_corrected_file', 'in_file')]), (mcflirt, smooth, [('out_file', 'in_file')])])To better understand what we did we can write out the workflow graph and visualize it directly in this notebook.preproc01.write_graph(graph2use='orig') # Visualize graph from IPython.display import Image Image(filename="preproc01/graph_detailed.png")Run the workflow on one functional imageNow, that we've created a workflow, let's run it on a functional image.For this, we first need to specify the input file of the very first node, i.e. the `slicetimer` node.slicetimer.inputs.in_file = '/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'To show off Nipype's parallelization power, let's run the workflow in parallel, on 5 processors and let's show the execution time:%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})ConclusionNice, the whole execution took ~2min. But wait... The parallelization didn't really help.That's true, but because there was no possibility to run the workflow in parallel. Each node depends on the output of the previous node. Results of `preproc01`So, what did we get? Let's look at the output folder `preproc01`:!tree preproc01 -I '*js|*json|*pklz|_report|*.dot|*html'Rerunning of a workflow Now, for fun. Let's run the workflow again, but let's change the `fwhm` value of the Gaussian smoothing kernel to `2`.smooth.inputs.fwhm = 2And let's run the workflow again.%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})ConclusionInteresting, now it only took ~15s to execute the whole workflow again. **What happened?**As you can see from the log above, Nipype didn't execute the two nodes `slicetimer` and `mclfirt` again. This, because their input values didn't change from the last execution. The `preproc01` workflow therefore only had to rerun the node `smooth`. Running a workflow in parallel Ok, ok... Rerunning a workflow again is faster. That's nice and all, but I want more. **You spoke of parallel execution!**We saw that the `preproc01` workflow takes about ~2min to execute completely. So, if we would run the workflow on five functional images, it should take about ~10min total. This, of course, assuming the execution will be done sequentially. Now, let's see how long it takes if we run it in parallel.# First, let's copy/clone 'preproc01' preproc02 = preproc01.clone('preproc02') preproc03 = preproc01.clone('preproc03') preproc04 = preproc01.clone('preproc04') preproc05 = preproc01.clone('preproc05')We now have five different preprocessing workflows. If we want to run them in parallel, we can put them all in another workflow.metaflow = Workflow(name='metaflow', base_dir='.') # Now we can add the five preproc workflows to the bigger metaflow metaflow.add_nodes([preproc01, preproc02, preproc03, preproc04, preproc05])**Note:** We now have a workflow (`metaflow`), that contains five other workflows (`preproc0?`), each of them containing three nodes.To better understand this, let's visualize this `metaflow`.# As before, let's write the graph of the workflow metaflow.write_graph(graph2use='flat') # And visualize the graph from IPython.display import Image Image(filename="metaflow/graph_detailed.png")Ah... so now we can see that the `metaflow` has potential for parallelization. So let's put it to test%time metaflow.run('MultiProc', plugin_args={'n_procs': 5})This time we can see that Nipype uses all available processors.And if all went well, the total execution time should still be around ~2min.That's why Nipype is so amazing. The days of opening multiple SPMs, FSLs, AFNIs etc. are past! Results of `metaflow`!tree metaflow -I '*js|*json|*pklz|_report|*.dot|*html'Softmax回归 pytorch实现> 本部分代码看起来有些复杂,但只需要掌握最核心的代码即可>> 我们借助了动手学习深度学习课程的[d2l库](https://github.com/d2l-ai/d2l-zh),来导入数据集,对此表示感谢。import torch from torch import nn from d2l import torch as d2l from utils import train # 加入数据集 batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) # Pytorch不会隐式的调整输入的形状 # 我们定义了展平层flatten net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights) # 初始化超参数 loss = nn.CrossEntropyLoss() # 交叉熵损失 trainer = torch.optim.SGD(net.parameters(), lr=0.1) # 优化器 # 训练 if __name__ == '__main__': num_epochs = 10 train(net, train_iter, test_iter, loss, num_epochs, trainer)ResNetX> a folded resnetThe key distinguishing feature of our proposed architecture is the use of concatenation-skip (addition(additive)-skip) connections like DenseNet (ResNet), but with selective long-range and short range skip connections rather than a dense connectivity.Despite various parameter-efficient depthwise-convolution-based designs, for GPU-based deployment ResNet architecture provide a comparable or better speed-accuracy trade-off.Ref:XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB CameraThe proposed networks reduces computations by 20% with equivalent or even superior accuary on the ImageNet dataset, and significantly outperforms state-of-the-art approaches in terms of AP_50 on the MS COCO object detection dataset. Ref:CSPNet: A new backbone that can enhance learning capability of CNNis more accurate and more computationally efficient than the state of art ResNets networks.which achieve much better accuracy and efficiency than previous ConvNets.A residual network with multiple direct pathsIn order to compare ResNetX with ResNet, we using ablation method. As ResNet is an special ResNetX when fold=1, we first express ResNet as ResNetX, then we change fold from 1, 2, 3, 4 to evaluate its performance. We first use transfer learning, we got pre-trained model of resnet152, then we fill the weights of ResNetX model with pretrained model, then fine tuning them, we got an better result ; Second method is training the model from scratch, we https://petewarden.com/2017/10/29/how-do-cnns-deal-with-position-differences/As you go deeper into a network, the number of channels will typically increase, but the size of the image will shrink. This shrinking is done using pooling layers, traditionally with average pooling but more commonly using maximum pooling these days.#export def get_pred(l:int, d:int=1, start_id:int=None, end_id:int=None): "get predecessor layer id." if start_id is None: start_id = d if end_id is None: end_id = l assert l >= 1 and start_id >= d and end_id > start_id if l < start_id or l > end_id or d == 1: # if the current layer index is less than the fold depth, or if fold depth == 1 pred = l - 1 else: remainder = (l-1-(start_id-d)) % (d-1) pred = l - 2 * (1+remainder) return predParameters:- l : current layer id.- start_id : index of the starting node- end_id : index of the ending node- d : fold depth.Return:- The previous layer id that directly link to the current layer.\begin{equation}\label{eq:resnetx} i = \left\{ \begin{array}{ll} 1 & l < d \lor d=1 ; \\ 2 * (1 + (l-1) \pmod{d-1}) & \textrm{else} . \end{array} \right.\end{equation}get_pred(l=17, d=2, start_id=13) get_pred(l=50, d=5, start_id=8) test_eq(get_pred(l=12, d=1, start_id=1), 11) test_eq(get_pred(l=8, d=5, start_id=7), 4) test_eq(get_pred(l=12, d=4, start_id=6), 10) #export def layer_diff(cur:int, pred:int, num_nodes:tuple): "layer difference between the current layer and the predecessor layer." assert cur > pred num_nodes = (1,) + num_nodes cumsum = 0 # start with 0 for i, num in enumerate(num_nodes): if cumsum <= cur < cumsum + num: cur_layer = i if cur == cumsum: first = True else: first = False if cumsum <= pred < cumsum + num: pred_layer = i cumsum += num diff = cur_layer - pred_layer return diff, first num_nodes = (3,4,6,3) cur, pred = 9,0 layer_diff(cur, pred, num_nodes)Parameters:- Stem : the stemming stage, which accept original images, transform them, then input into the backbone network.- Unit : the operation at nodes.- Conn : the connections between nodes- fold : the fold depth- ni : number of input channels of the backbone network.- *num_stages : number of stages in the backbone network.*- num_nodes : number of nodes of every stage in the backbone network.- start_id : index of starting node of ResNetX- base : standard width of channels in the backbone network.- exp : expansion along with the increase of stages.- bottle_scale : bottleneck scale- first_downsample: dose down-sample at the start of the first stage.- deep_stem : using 7x7 or 3 3x3 conv in stemming stage.- c_in : number of input channels of the Start layer- c_out : number of classes in the output of the final classifier.- kwargs : arguments translate into `Unit`#export class ResNetX(nn.Module): "A folded resnet." def __init__(self, Stem, Unit, Conn, Tail, fold:int, ni:int, num_nodes:tuple, start_id:int=None, end_id:int=None, base:int=64, exp:int=2, bottle_scale:int=1, first_downsample:bool=False, c_in:int=3, c_out:int=10, **kwargs): super(ResNetX, self).__init__() # fold depth should be less than the sum length of any two neighboring stages if start_id < fold: start_id = fold origin_ni = ni num_stages = len(num_nodes) nhs = [base * exp ** i for i in range(num_stages)] nos = [int(nh * bottle_scale) for nh in nhs] strides = [1 if i==0 and not first_downsample else 2 for i in range(num_stages)] # print('nhs=', nhs, 'nos=', nos, 'nus=', nus, 'strides=', strides) self.stem = Stem(c_in, no=ni) # , deep_stem units = [] idmappings = [] cur = 1 for i, (nh, no, nu, stride) in enumerate(zip(nhs, nos, num_nodes, strides)): for j in range(nu): if j == 0: # the first node(layer) of each stage units += [Unit(ni, no, nh, stride=stride, **kwargs)] else: units += [Unit(no, no, nh, stride=1, **kwargs)] pred = get_pred(cur, fold, start_id, end_id) # diff, first = layer_diff(cur, pred, num_nodes) assert diff == 0 or diff == 1 or (diff == 2 and pred == 0), \ 'cur={}, pred={}, diff={} is not allowed.'.format(cur, pred, diff) # print('fold = {} , cur = {} , pred = {} ,diff = {}'.format(fold, cur, pred, diff)) if diff == 0: idmappings += [Conn(no, no, stride=1)] elif diff == 1: # if first: idmappings += [Conn(ni, no, stride=stride)] # else: # idmappings += [Conn(no, no, stride=1)] elif diff == 2: idmappings += [Conn(origin_ni, no, stride=stride)] cur += 1 ni = no self.units = nn.ModuleList(units) self.idmappings = nn.ModuleList(idmappings) self.classifier = Tail(nos[-1], c_out) self.fold, self.start_id, self.end_id = fold, start_id, end_id self.num_nodes = num_nodes init_cnn(self) def forward(self, x): results = {} results[0] = self.stem(x) cur = 0 for i, (unit, idmapping) in enumerate(zip(self.units, self.idmappings)): cur += 1 pred = get_pred(cur, self.fold, self.start_id, self.end_id) diff, first = layer_diff(cur, pred, self.num_nodes) # if diff == 0: results[cur % (2*self.fold-1)] = unit(results[(cur-1) % (2*self.fold-1)]) + idmapping(results[pred % (2*self.fold-1)]) # else: # results[cur % (2*self.fold-1)] = unit(results[(cur-1) % (2*self.fold-1)]) + idmapping(results[(cur-1) % (2*self.fold-1)]) x = results[cur % (2*self.fold-1)] x = self.classifier(x) return x def my_load_state_dict(self, state_dict, local_to_pretrained): error_msgs = [] def load(module, prefix=''): local_name_params = itertools.chain(module._parameters.items(), module._buffers.items()) local_state = {k: v.data for k, v in local_name_params if v is not None} new_prefix = local_to_pretrained.get(prefix, 'none') for name, param in local_state.items(): key = new_prefix + name if key in state_dict: # print(key) input_param = state_dict[key] if input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, ' 'the shape in current model is {}.' .format(key, input_param.shape, param.shape)) continue try: param.copy_(input_param) except Exception: error_msgs.append('While copying the parameter named "{}", ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(key, param.size(), input_param.size())) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(self) load = None # break load->load reference cycle #export def resnet_local_to_pretrained(num_nodes, fold, start_id, end_id): "mapping from local state_dict to pretrained state_dict. the pretrained model is restricted to torchvision.models.resnet." local_to_pretrained = { # mapping from the names of local modules to the names of pretrained modules 'stem.0.': 'conv1.', 'stem.1.': 'bn1.', } cumsum = 0 for i, num in enumerate(num_nodes): for j in range(num): key = 'units.' + str(cumsum + j) + '.' value = 'layer' + str(i+1) + '.' + str(j) + '.' downsample0 = 'layer' + str(i+1) + '.0.' + 'downsample.0.' downsample1 = 'layer' + str(i+1) + '.0.' + 'downsample.1.' pred = get_pred(cumsum + j + 1, fold, start_id, end_id) # diff = layer_diff(cumsum + j + 1, pred, num_nodes) if diff == 1: idmapping0 = 'idmappings.' + str(cumsum + j) + '.unit.0.' idmapping1 = 'idmappings.' + str(cumsum + j) + '.unit.1.' # print(idmapping0, downsample0) # print(idmapping1, downsample1) local_to_pretrained[idmapping0] = downsample0 local_to_pretrained[idmapping1] = downsample1 for a, b in zip(['1.','2.','4.','5.','7.','8.'], ['conv1.','bn1.','conv2.','bn2.','conv3.','bn3.']): # print (key + a, value + b) local_to_pretrained[key + a] = value + b cumsum += num return local_to_pretrainedThree priority levels to set configuration:- `default_cfg` the default configuration, which set all the option names and their default values- `cfg_file` the configuration file, which will override the default configuration- `cfg_list` the configuration list, which will override all the previous configurations.#export def resnetx(default_cfg:dict, cfg_file:str=None, cfg_list:list=None, pretrained:bool=False, **kwargs): "wrapped resnetx" assert default_cfg.__class__.__module__ == 'yacs.config' and default_cfg.__class__.__name__ == 'CfgNode' cfg = default_cfg if cfg_file is not None: cfg.merge_from_file(cfg_file) if cfg_list is not None: cfg.merge_from_list(cfg_list) assert_cfg(cfg) cfg.freeze() Stem = getattr(sys.modules[__name__], cfg.GRAPH.STEM) Unit = getattr(sys.modules[__name__], cfg.GRAPH.UNIT) Conn = getattr(sys.modules[__name__], cfg.GRAPH.CONN) Tail = getattr(sys.modules[__name__], cfg.GRAPH.TAIL) # start_id >= fold + 1, fold <= 6 model = ResNetX(Stem=Stem, Unit=Unit, Conn=Conn, Tail=Tail, fold=cfg.GRAPH.FOLD, ni=cfg.GRAPH.NI, num_nodes=cfg.GRAPH.NUM_NODES, start_id=cfg.GRAPH.START_ID, end_id=cfg.GRAPH.END_ID, base=cfg.GRAPH.BASE, exp=cfg.GRAPH.EXP, bottle_scale=cfg.GRAPH.BOTTLE_SCALE, first_downsample=cfg.GRAPH.FIRST_DOWNSAMPLE, **kwargs) if pretrained: state_dict = load_state_dict_from_url(cfg.URL) local_to_pretrained = resnet_local_to_pretrained(cfg.GRAPH.NUM_NODES, cfg.GRAPH.FOLD,cfg.GRAPH.START_ID,cfg.GRAPH.END_ID) model.my_load_state_dict(state_dict, local_to_pretrained) for param in model.parameters(): # freeze all param.requires_grad = False return model cfg num_nodes = (3, 8, 36, 3) num_all_nodes = sum(num_nodes) fold = 3 start_id = num_nodes[0] + num_nodes[1] + fold + 1 end_id = num_nodes[0] + num_nodes[1] + num_nodes[0] + num_nodes[2] - 3 cfg_list = ["GRAPH.STEM", "resnet_stem", "GRAPH.UNIT", "mbconv", # resnet_bottleneck "GRAPH.CONN", "IdentityMapping", "GRAPH.TAIL", "Classifier", "GRAPH.NUM_NODES", num_nodes, "GRAPH.FOLD", fold, "GRAPH.START_ID", start_id, "GRAPH.END_ID", end_id, "GRAPH.NI", 64, "GRAPH.BASE", 64, "GRAPH.EXP", 2, "GRAPH.BOTTLE_SCALE", 0.5, # 4 "URL", 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', ] model = resnetx(cfg, cfg_list=cfg_list, pretrained=False, c_out=100, ks=5) num_nodes = (24, 24, 24, 24) num_all_nodes = sum(num_nodes) fold = 4 start_id = fold end_id = num_all_nodes cfg_list = ["GRAPH.STEM", "conv_bn", "GRAPH.UNIT", "mbconv", # resnet_bottleneck "GRAPH.CONN", "IdentityMappingMaxPool", "GRAPH.TAIL", "Classifier", "GRAPH.NUM_NODES", num_nodes, "GRAPH.FOLD", fold, "GRAPH.START_ID", start_id, "GRAPH.END_ID", end_id, "GRAPH.NI", 64, "GRAPH.BASE", 64, "GRAPH.EXP", 1, "GRAPH.BOTTLE_SCALE", 1., # 4 "URL", 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', ] model = resnetx(cfg, cfg_list=cfg_list, pretrained=False, c_out=100, ks=3) num_params(model)**Tip** : Three methods to get `class` or `function` object from its string name:- `getattr(sys.modules[__name__], cfg.GRAPH.STEM)`- `globals()[cfg.GRAPH.STEM]`- `eval(cfg.GRAPH.STEM)`x = torch.randn(2,3,64,64) with torch.autograd.set_detect_anomaly(True): out = model(x) out.mean().backward() "{:,}".format(num_params(model))Faces of GISThis sample notebook tries to re-create Esri's [Faces of GIS](https://www.flickr.com/photos/esri/sets/72157627065236829/) Flickr stream using public ArcGIS Online profiles.It iterates over a list of common last names from a .csv file and searches ArcGIS Online for users matching those last names. Then it displays the user's profile if the users' profile contains a thumbnail. Organization administrators can search for members in their ArcGIS Online organization or an internal portal in a similar manner and update their properties as needed.**Note**: To run this sample, you need the ``pandas`` library in your conda environment. If you don't have the library, install it by running the following command from cmd.exe or your shell```conda install pandas```import pandas as pd from arcgis.gis import GIS from IPython.display import displayData preparationTo run through the sample, we prepare a .csv file that contains a list of common last names. Then we use a Python data analysis library called pandas to read the file as input to do a search for users from ArcGIS Online.df = pd.read_csv('data/lastnames.csv') df[:3] gis = GIS() for index, row in df.iterrows(): if index == 6: break users = gis.users.search(row['Surname']) for user in users: if user['thumbnail'] is not None: display(user)Study of the iterative calculation of surface Green's functionI will use this notebook to study the convergence properties of the surface Green's function and the problems associated with it because of a finite superconducting order parameter.import numpy as np import matplotlib.pyplot as plt %matplotlib inline import scipy.optimize %load_ext line_profiler def calc_surface_g(E,alpha,beta,eta,eps=1e-8,max_iter=1000,kappa=0.5): def func_g(g,E,alpha,beta,eta): return np.linalg.inv((E + 1j*eta)*np.eye(alpha.shape[0])- alpha - beta @ g @ np.conj(beta).T) g0 = np.zeros(alpha.shape) g = np.zeros(alpha.shape) err_vec = [] for i in range(max_iter): g = func_g(g,E,alpha,beta,eta) g = ((1-kappa)*g + kappa*g0) err = np.linalg.norm(g - g0) err_vec.append(err) g0 = np.copy(g) if err < eps: break return g,err_vec t = 100e-3 mu = 5e-3 Delta = 1e-3 alpha = np.array([[2*t - mu,Delta],[np.conj(Delta),-2*t + mu]]) beta = np.array([[-t,0],[0,t]]) E = 2e-3 eta = 1e-4 %lprun -f calc_surface_g g,err_vec = calc_surface_g(E,alpha,beta,eta,max_iter=100000,kappa=0.5) print(g) plt.semilogy(err_vec) plt.ylabel('Error') plt.xlabel('Number of iterations')[[-9.67076974-2.90198431j -0.03648546-1.25601797j] [-0.03648546-1.25601797j 9.77603149-2.12938467j]]Notes- I am abandoning the solution on fixed point or root solvers, moving to recursion written by me; as correctness is more important than speed- The iterative method is not converging outside the gap; it converges if eta is set to a large valueAs the eta value is decreased, the number of iterations required to converge also increases.- time is 0.50s for eta = 1e-4 and eps = 1e-8 and takes about 4000 iterations- updating g = 0.5*(g + g0) improves time to 0.050sfor kappa in np.linspace(0.1,1,10): print(kappa) %timeit g,err_vec = calc_surface_g(E,alpha,beta,eta,max_iter=100000,kappa=kappa)0.1 91.1 ms ± 2.23 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.2 67.1 ms ± 1.12 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.3 59.1 ms ± 3.16 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.4 58.6 ms ± 9.05 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.5 53.5 ms ± 1.56 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.6 63.8 ms ± 7.74 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.7 69.6 ms ± 6.29 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.8 89.2 ms ± 3.06 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 0.9 167 ms ± 15.5 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 1.0 67.4 µs ± 2.82 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)Seoul Public Transportation Trafficimport pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib import seaborn as sns import matplotlib.font_manager as fm %matplotlib inline from matplotlib import font_manager, rc plt.rcParams["font.family"] = 'NanumGothicCoding' # 2019 df1 = pd.read_csv("2019.csv") traffic_m_2019 = df1.pivot_table("count", "월", "시간", aggfunc=np.sum) traffic_m_2019 traffic_m_2019.loc[:, "total"] = traffic_m_2019.sum(axis=1) traffic_m_2019 traffic_r_2019 = df1.pivot_table("count", "지명", "시간", aggfunc=np.sum) traffic_r_2019 traffic_r_2019.loc[:, "total"] = traffic_r_2019.sum(axis=1) traffic_r_2019 # 2020 df2 = pd.read_csv("2020.csv") traffic_m_2020 = df2.pivot_table("count", "월", "시간", aggfunc=np.sum) traffic_m_2020 traffic_m_2020.loc[:, "total"] = traffic_m_2020.sum(axis=1) traffic_m_2020 traffic_r_2020 = df2.pivot_table("count", "지명", "시간", aggfunc=np.sum) traffic_r_2020 traffic_r_2020.loc[:, "total"] = traffic_r_2020.sum(axis=1) traffic_r_20201. 결측 데이터 검색df1.isna().sum() df2.isna().sum() pip install missingno import missingno as msno %matplotlib inline %config InlineBackend.figure_formats = {'png', 'retina'} msno.matrix(df1) plt.show() warnings.filterwarnings('ignore') msno.matrix(df2) plt.show() warnings.filterwarnings('ignore')2. 2019-2020 전체 서울 대중 교통량 비교y1 = traffic_m_2019["total"].sum() y2 = traffic_m_2020["total"].sum() print("percentage :", (((y2 - y1) / y1 * 100))) x1 = [0] x2 = [1] y1 = traffic_m_2019["total"].sum() #2019 Total 교통량 y2 = traffic_m_2020["total"].sum() #2020 Total 교통량 plt.bar(x1,y1, label='2019 traffic', color='b') plt.bar(x2,y2, label='2020 traffic', color='g') plt.legend() plt.ylim([0,100000000000/6]) plt.title("rate : -26.07%", fontsize = 20) plt.show()3. 2019-2020 월별 서울 대중 교통량 비교X1 = (1,2,3,4,5,6,7,8,9,10,11,12) X2 = (1,2,3,4,5,6,7,8,9,10,11,12) y1 = traffic_m_2019.total y2 = traffic_m_2020.total plt.plot(X1,y1, label='2019 traffic(month)', color='b') plt.plot(X2,y2, label='2020 traffic(month)', color='g') plt.legend() plt.title("2019-2020 Traffic Counting") #plt.title("2019_Traffic") #plt.xlabel('region') #plt.ylabel('Traffic') plt.show()4_1. 요일별 대중교통 이용량df3 = pd.read_csv("2019.csv") df_y = df3.drop(columns=["시간", "지명", "발생량", "도착량", "월"]) df_y data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"] for week in weeks: data.append(df_y[df_y["날짜"].str.contains(week)]["count"].sum()) ytraffic_2019 = pd.DataFrame(data=data, index=weeks).reset_index() ytraffic_2019.columns = ["요일", "count"] ytraffic_2019 df4 = pd.read_csv("2020.csv") df_u = df4.drop(columns=["시간", "지명", "발생량", "도착량", "월"]) df_u data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"] for week in weeks: data.append(df_u[df_u["날짜"].str.contains(week)]["count"].sum()) ytraffic_2020 = pd.DataFrame(data=data, index=weeks).reset_index() ytraffic_2020.columns = ["요일", "count"] ytraffic_2020 #df_y["날짜"][df_y["날짜"].str.contains("수")]4_2. 요일별 대중교통 이용량 비교(np.sum(ytraffic_2020["count"][-2:]) - np.sum(ytraffic_2019["count"][-2:]))\ / np.sum(ytraffic_2019["count"][-2:]) * 100 plt.bar("2019주말",np.sum(ytraffic_2019["count"][-2:])) plt.bar("2020주말",np.sum(ytraffic_2020["count"][-2:])) a = np.sum(ytraffic_2019["count"][-2:]) b = np.sum(ytraffic_2020["count"][-2:]) plt.text("2019주말",80*10000000, a, ha="center", fontsize = 20) plt.text("2020주말", 80*10000000, b, ha="center", fontsize = 20) plt.title("rate : -35.35%", fontsize = 20) plt.show() aplt.bar(ytraffic_2019["요일"][-2:], ytraffic_2019["count"][-2:]) plt.show() plt.bar(ytraffic_2020["요일"][-2:], ytraffic_2020["count"][-2:]) plt.show() (np.sum(ytraffic_2020["count"][:-2]) - np.sum(ytraffic_2019["count"][:-2]))\ / np.sum(ytraffic_2019["count"][:-2]) * 100 plt.bar("2019평일",np.sum(ytraffic_2019["count"][:-2])) plt.bar("2020평일",np.sum(ytraffic_2020["count"][:-2])) a = np.sum(ytraffic_2019["count"][:-2]) b = np.sum(ytraffic_2020["count"][:-2]) plt.text("2019평일",80*10000000, a, ha="center", fontsize = 20) plt.text("2020평일", 80*10000000, b, ha="center", fontsize = 20) plt.title("rate : -23.57%", fontsize = 20) plt.show() plt.bar(ytraffic_2019["요일"][:-2], ytraffic_2019["count"][:-2]) plt.bar(ytraffic_2020["요일"][:-2], ytraffic_2020["count"][:-2]) plt.show() plt.bar(ytraffic_2019["요일"][:-2], ytraffic_2019["count"][:-2]) plt.show() plt.bar(ytraffic_2020["요일"][:-2], ytraffic_2020["count"][:-2]) plt.show()5. 지역별 대중 교통량 비교# column 추가 traffic_r_2019.loc[:,'region'] = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', \ '노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', \ '성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구' ] traffic_r_2019 traffic_r_2020.loc[:,'region'] = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', \ '노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', \ '성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구' ] traffic_r_2020 # 그래프 # 우분투 폰트설정 import matplotlib.pyplot as plt %matplotlib inline from matplotlib import font_manager, rc plt.rcParams["font.family"] = 'NanumGothicCoding' plt.figure(figsize=(20, 5)) plt.bar(traffic_r_2019["region"], traffic_r_2019["total"]) plt.bar(traffic_r_2020["region"], traffic_r_2020["total"]) plt.show() traffic_r_2019[["region", "total"]].plot(kind='bar') plt.title("2019_Public_Transportation_Traffic") plt.xlabel('region') plt.ylabel('Traffic') plt.show() traffic_r_2020['total'].plot(kind='bar') plt.title("2020_Public_Transportation_Traffic") plt.xlabel('region') plt.ylabel('Traffic') plt.show() from numpy.random import randn fig = plt.figure() ax1 = fig.add_subplot(2,2,1) ax2 = fig.add_subplot(2,2,2) ax1.plot((traffic_m_2019.total),'k') ax2.plot((traffic_m_2020.total),'r-')Clustering analysisIn this first notebook, we conduct a k-means clustering analysis on using included masks of each of the DMN regions.We use the simple neurosynth.analysis.cluster.magic function for clustering analysis. By default, the magic() function performs a k-means co-activation based analysis using the same methods as our manuscript. First lets import some basic necessities:%matplotlib inline import seaborn as sns from nilearn import plotting as niplt from matplotlib.colors import ListedColormap import numpy as npNext, I load a previously generated Neurosynth dataset. This dataset was generated using version 0.6 of Neurosynth and the features are 60 topics generated using latent Dirichlet allocation (LDA). One can also generate a dataset using the latest version of Neurosynth and plug it into this analysis.from neurosynth.base.dataset import Dataset dataset = Dataset.load("data/neurosynth_60_0.6.pkl")Here, we use the magic function to perform the clustering analaysis. For each N we specify, an image is generated and placed in images/. Note that I specifiy that at least 80 studies must activate each voxel to be included in the analysis to ensure a robust classification. For each DMN region (MPFC,PCC,left-TPJ and right-TPJ), the following analysis will compute co-activation between each voxel within each region and the rest of the brain (reduced into 100 PCA components), and use the resulting distance matrix for classification. Note that this step may be computationally intensive. You may use the Clusterable class in neurosynth.analysis.cluster for a more custom analysis to avoid repeating the PCA for each classification if you desire.The number of clusters is chosen on the basis of sihouette score, consistent with the description in the manuscript.#from neurosynth.analysis.cluster import magic #magic(dataset, roi_mask='masks/MPFC.nii.gz', min_studies_per_voxel=80, output_dir='images/MPFC/', n_clusters=2) #magic(dataset, roi_mask='masks/PCC.nii.gz', min_studies_per_voxel=80, output_dir='images/PCC/', n_clusters=3) #magic(dataset, roi_mask='masks/lTPJ.nii.gz', min_studies_per_voxel=80, output_dir='images/lTPJ/', n_clusters=3) #magic(dataset, roi_mask='masks/rTPJ.nii.gz', min_studies_per_voxel=80, output_dir='images/rTPJ/', n_clusters=3)Next, we use nilearn's plotting functions to display the results on saggital and coronal slices. Note that the figure shown on the manuscript was generated with MRICroGL with the same mask used here.# Generate color palette colors = ["purple","blue",'red','green'] colors = sns.xkcd_palette(colors) niplt.plot_roi('images/DMN/DMN.nii.gz', cut_coords=[4], display_mode='x', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8) colors = ["red","blue",'red','blue'] niplt.plot_roi('images/DMN/DMN.nii.gz', cut_coords=[-60], display_mode='y', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8) colors = colors = ["amber","dark blue grey"] colors = sns.xkcd_palette(colors) niplt.plot_roi('images/MPFC/MPFCk2.nii.gz', cut_coords=[4], display_mode='x', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8) # Generate color palette colors = ["blue","golden yellow","red"] colors = sns.xkcd_palette(colors) niplt.plot_roi('images/PCC/PCCk3.nii.gz', cut_coords=[4], display_mode='x', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8) #Generate color palette colors = ["green","sunshine yellow","red"] colors = sns.xkcd_palette(colors) niplt.plot_roi('images/lTPJ/lTPJk3.nii.gz', cut_coords=[-66], display_mode='y', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8) # Generate color palette colors = ["indigo","forest green","orange"] colors = sns.xkcd_palette(colors) niplt.plot_roi('images/rTPJ/rTPJk3.nii.gz', cut_coords=[-66], display_mode='y', draw_cross=False, cmap = ListedColormap(colors), alpha=0.8)a = input() ls = sorted(list(set(a))) middle = '' half = '' x = 0 for i in ls: n = a.count(i) if n%2==0: half += i* (n//2) elif x==1: half = 0 print("NO SOLUTION") break; else: x =1 middle = i half += i* (n//2) if half !=0: print(half+middle+half[::-1])Mother NO SOLUTION![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCL//)Assignment 3%pylab inlinePopulating the interactive namespace from numpy and matplotlib**Task 1:** Download and install 3D Slicer [15 Points]# 3D Slicer is available for Windows, Linux, and Mac here: https://slicer.org # Please use the stable version 4.11.2xxxxx !**Task 2:** Let's load some Ultrasound data! [15 Points]# Please download this file: https://cs480.org/data/ultrasound.nrrd ! # Then, drag and drop the file into 3D Slicer and click OK. # TODO Is this a traditional ultrasound image? # TODO: YOUR_ANSWER (YES or NO and WHY)Yes, this appears to be a standard 2D ultrasound image.**Task 3:** Visualize the ultrasound data! [30 Points]# Please use the Volume Rendering module in 3D slicer to render the data. # Then, activate Cropping and adjust the bounding box to show the fetus. # Hint: The MRI Default color map creates a nice visual. # TODO Please post a screenshot of your rendering in the text box below by # using the Upload image button after double-click.![Screen Shot 2021-03-22 at 12.38.40 PM.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABDgAAAL0CAYAAADp6od6AAAMa2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkJCEEoiAlNCbIL1KCaFFEJAq2AhJIKHEmBBUbFgWFVy7iGJFV0UUXQsgi4rYy6LY+2JBRVkXC4qi8iYkoOu+8r3zfXPnv2fO/KfcmXvvAKDVw5NKc1FtAPIk+bL4iBDWmNQ0FqkdYIAM6GA4IPL4cik7Li4aQBno/y7vbwBE2V91UnL9c/y/iq5AKOcDgIyDOEMg5+dB3AQAvp4vleUDQFTqLafkS5W4CGI9GQwQ4lVKnKXCO5U4Q4Ub+20S4zkQXwZAg8rjybIAoN+DelYBPwvy0D9D7CIRiCUAaA2DOJAv4gkgVsY+LC9vkhKXQ2wH7aUQw3iAT8Z3nFl/488Y5OfxsgaxKq9+0QgVy6W5vGn/Z2n+t+TlKgZ82MBGFcki45X5wxreypkUpcRUiDslGTGxylpD3CMWqOoOAEoRKSKTVPaoMV/OgfUDTIhdBLzQKIiNIQ6X5MZEq/UZmeJwLsRwtaBTxfncRIgNIF4olIclqG02yybFq32htZkyDlutP8uT9ftV+nqgyEliq/nfiIRcNT9GLxQlpkBMgdiqQJwcAzEdYmd5TkKU2mZEoYgTM2AjU8Qr47eCOF4oiQhR8WMFmbLweLV9SZ58IF9ss0jMjVHj/fmixEhVfbCTfF5//DAX7LJQwk4a4BHKx0QP5CIQhoapcseeCyVJCWqeHml+SLxqLk6R5sap7XELYW6EUm8BsYe8IEE9F0/Oh4tTxY9nSvPjElVx4oXZvJFxqnjwZSAacEAoYAEFbBlgEsgG4pbOuk54pxoJBzwgA1lACJzUmoEZKf0jEnhNAIXgT4iEQD44L6R/VAgKoP7LoFZ1dQKZ/aMF/TNywFOI80AUyIX3iv5ZkkFvyeAJ1Ij/4Z0HGx/Gmwubcvzf6we03zRsqIlWaxQDHllaA5bEMGIoMZIYTrTHjfBA3B+Phtdg2NxwH9x3II9v9oSnhFbCI8J1Qhvh9kTxXNkPUY4CbZA/XF2LjO9rgdtATk88BA+A7JAZZ+JGwAn3gH7YeBD07Am1HHXcyqqwfuD+WwbfPQ21HdmFjJKHkIPJdj/OpDvQPQdZlLX+vj6qWDMG680ZHPnRP+e76gtgH/WjJbYQO4CdwY5j57BGrA6wsGNYPXYRO6LEg6vrSf/qGvAW3x9PDuQR/8MfT+1TWUm5S7VLh8tn1Vi+cGq+cuNxJkmnycRZonwWG34dhCyuhO88jOXm4uYKgPJbo3p9vWX2f0MQ5vlvunlwjwdI+vr6Gr/poj4BcNAcbv+2bzrbK/A1Ad/TZ5fzFbIClQ5XXgjwLaEFd5ohMAWWwA7m4wa8gD8IBmFgJIgFiSAVTIBVFsF1LgNTwAwwBxSDUrAMrAbrwCawFewEe8B+UAcawXFwGlwAl8F1cBeunnbwEnSB96AXQRASQkMYiCFihlgjjogb4oMEImFINBKPpCLpSBYiQRTIDGQeUoqsQNYhW5Aq5FfkMHIcOYe0IreRh0gH8gb5hGIoFdVDTVAbdDjqg7LRKDQRHY9moZPRQnQ+ugQtRyvR3Wgtehy9gF5H29CXaDcGME2MiZljTpgPxsFisTQsE5Nhs7ASrAyrxGqwBvicr2JtWCf2ESfiDJyFO8EVHIkn4Xx8Mj4LX4yvw3fitfhJ/Cr+EO/CvxJoBGOCI8GPwCWMIWQRphCKCWWE7YRDhFNwL7UT3hOJRCbRlugN92IqMZs4nbiYuIG4l9hEbCU+JnaTSCRDkiMpgBRL4pHyScWktaTdpGOkK6R2Uo+GpoaZhptGuEaahkRjrkaZxi6NoxpXNJ5p9JK1ydZkP3IsWUCeRl5K3kZuIF8it5N7KToUW0oAJZGSTZlDKafUUE5R7lHeampqWmj6ao7WFGsWaZZr7tM8q/lQ8yNVl+pA5VDHURXUJdQd1CbqbepbGo1mQwumpdHyaUtoVbQTtAe0HjqD7kzn0gX02fQKei39Cv2VFlnLWoutNUGrUKtM64DWJa1ObbK2jTZHm6c9S7tC+7D2Te1uHYaOq06sTp7OYp1dOud0nuuSdG10w3QFuvN1t+qe0H3MwBiWDA6Dz5jH2MY4xWjXI+rZ6nH1svVK9fboteh16evqe+gn60/Vr9A/ot/GxJg2TC4zl7mUuZ95g/lpiMkQ9hDhkEVDaoZcGfLBYKhBsIHQoMRgr8F1g0+GLMMwwxzD5YZ1hveNcCMHo9FGU4w2Gp0y6hyqN9R/KH9oydD9Q+8Yo8YOxvHG0423Gl807jYxNYkwkZqsNTlh0mnKNA02zTZdZXrUtMOMYRZoJjZbZXbM7AVLn8Vm5bLKWSdZXebG5pHmCvMt5i3mvRa2FkkWcy32Wty3pFj6WGZarrJstuyyMrMaZTXDqtrqjjXZ2sdaZL3G+oz1BxtbmxSbBTZ1Ns9tDWy5toW21bb37Gh2QXaT7SrtrtkT7X3sc+w32F92QB08HUQOFQ6XHFFHL0ex4wbH1mGEYb7DJMMqh910ojqxnQqcqp0eOjOdo53nOtc5vxpuNTxt+PLhZ4Z/dfF0yXXZ5nLXVdd1pOtc1wbXN24Obny3Crdr7jT3cPfZ7vXurz0cPYQeGz1ueTI8R3ku8Gz2/OLl7SXzqvHq8LbyTvde733TR88nzmexz1lfgm+I72zfRt+Pfl5++X77/f7yd/LP8d/l/3yE7QjhiG0jHgdYBPACtgS0BbIC0wM3B7YFmQfxgiqDHgVbBguCtwc/Y9uzs9m72a9CXEJkIYdCPnD8ODM5TaFYaERoSWhLmG5YUti6sAfhFuFZ4dXhXRGeEdMjmiIJkVGRyyNvck24fG4Vt2uk98iZI09GUaMSotZFPYp2iJZFN4xCR40ctXLUvRjrGElMXSyI5caujL0fZxs3Oe630cTRcaMrRj+Nd42fEX8mgZEwMWFXwvvEkMSliXeT7JIUSc3JWsnjkquSP6SEpqxIaRszfMzMMRdSjVLFqfVppLTktO1p3WPDxq4e2z7Oc1zxuBvjbcdPHX9ugtGE3AlHJmpN5E08kE5IT0nflf6ZF8ur5HVncDPWZ3TxOfw1/JeCYMEqQYcwQLhC+CwzIHNF5vOsgKyVWR2iIFGZqFPMEa8Tv86OzN6U/SEnNmdHTl9uSu7ePI289LzDEl1JjuTkJNNJUye1Sh2lxdK2yX6TV0/ukkXJtssR+Xh5fb4e/Km/qLBT/KR4WBBYUFHQMyV5yoGpOlMlUy9Oc5i2aNqzwvDCX6bj0/nTm2eYz5gz4+FM9swts5BZGbOaZ1vOnj+7vSiiaOccypycOb/PdZm7Yu67eSnzGuabzC+a//iniJ+qi+nFsuKbC/wXbFqILxQvbFnkvmjtoq8lgpLzpS6lZaWfF/MXn//Z9efyn/uWZC5pWeq1dOMy4jLJshvLg5bvXKGzonDF45WjVtauYq0qWfVu9cTV58o8yjatoaxRrGkrjy6vX2u1dtnaz+tE665XhFTsXW+8ftH6DxsEG65sDN5Ys8lkU+mmT5vFm29tidhSW2lTWbaVuLVg69NtydvO/OLzS9V2o+2l27/skOxo2xm/82SVd1XVLuNdS6vRakV1x+5xuy/vCd1TX+NUs2Uvc2/pPrBPse/Fr+m/3tgftb/5gM+BmoPWB9cfYhwqqUVqp9V21Ynq2upT61sPjzzc3ODfcOg35992NJo3VhzRP7L0KOXo/KN9xwqPdTdJmzqPZx1/3Dyx+e6JMSeunRx9suVU1Kmzp8NPnzjDPnPsbMDZxnN+5w6f9zlfd8HrQu1Fz4uHfvf8/VCLV0vtJe9L9Zd9Lze0jmg9eiXoyvGroVdPX+Neu3A95nrrjaQbt26Ou9l2S3Dr+e3c26/vFNzpvVt0j3Cv5L72/bIHxg8q/7D/Y2+bV9uRh6EPLz5KeHT3Mf/xyyfyJ5/b5z+lPS17Zvas6rnb88aO8I7LL8a+aH8pfdnbWfynzp/rX9m9OvhX8F8Xu8Z0tb+Wve57s/it4dsd7zzeNXfHdT94n/e+90NJj2HPzo8+H898Svn0rHfKZ9Ln8i/2Xxq+Rn2915fX1yflyXj9vwIYbGhmJgBvdgBASwWAAc9tlLGqs2C/IKrzaz8C/wmrzov94gVADeyUv/GcJgD2wWZTBLlhU/7CJwYD1N19sKlFnunupuKiwpMQoaev760JAKQGAL7I+vp6N/T1fdkGg70NQNNk1RlUKUR4ZtgcqETXDQRF4AdRnU+/y/HHHigj8AA/9v8CkaePkVE2P08AAACKZVhJZk1NACoAAAAIAAQBGgAFAAAAAQAAAD4BGwAFAAAAAQAAAEYBKAADAAAAAQACAACHaQAEAAAAAQAAAE4AAAAAAAAAkAAAAAEAAACQAAAAAQADkoYABwAAABIAAAB4oAIABAAAAAEAAAQ4oAMABAAAAAEAAAL0AAAAAEFTQ0lJAAAAU2NyZWVuc2hvdCvbIeUAAAAJcEhZcwAAFiUAABYlAUlSJPAAAAHXaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjc1NjwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj4xMDgwPC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+ConW/b0AAAAcaURPVAAAAAIAAAAAAAABegAAACgAAAF6AAABegACQD3bI84aAABAAElEQVR4Aey9ebRs2V3ft2uuutMbetKADCQ4OBaCgNEEYhBgMIZ4gVkJWg4CHAcIMR6WhdTzpFaruyVhJyQrODE4Dg4JzkoIdmznDxZGagnNQkKyEJrVEpJa3VJ3v+lONdx8P79zd3e9p/vuu/dW3brnVH1P932n6px99v7tz673Xv2+7/f77drtb1zfST5MwARMwARMwARMwARMwARMwARMwARMoMIEare+4aIFjgovoE03ARMwARMwARMwARMwARMwARMwARNIqXbzA09a4PAnwQRMwARMwARMwARMwARMwARMwARMoNIELHBUevlsvAmYgAmYgAmYgAmYgAmYgAmYgAmYAARqtzx4zhEc/iyYgAmYgAmYgAmYgAmYgAmYgAmYgAlUmkDt1ofOW+Co9BLaeBMwARMwARMwARMwARMwARMwARMwARcZ9WfABEzABEzABEzABEzABEzABEzABEyg8gScolL5JfQETMAETMAETMAETMAETMAET//B29Sa+kwnwBc9f6jINn02g2gRufXWRj/rd3/6X0+Nf/Gg6Xd9Oz1vrpo4EhS054X20Cx2x04jeSytIy22lr6zK6dfrC5fYTlXigKI3LmypUGfc76SeioeO2LK1qfQSbRvb7J5Susqp1JXA0dJPu6tdVBQhMdi6oKiKzbS1eSnEAcZCaBhK7JBCooiP50ogWVLbngSJYhtZhA5ZJKFjtzipBIUhNTa0ywrjUw8DoQHxpEaUiISKqL+hnVhULSO1dI1dYUeK0pCSof5VF0T2E5mBoIF4wXa1RHUQw8L9qNehUaPWh1JxEEw4GA/BhfEQZCiOKsskihABs/dxyy+/KG4gdvDsA288WEFS2tN/RJHs3bWvzohA/n2ThzvoGub2PpuACZiACZSPQBax7eeUb21s0eEJWOA4BDMLHIeA5aYmUGIC2Un7kR/46fS5T747bV96It3Qrafre9oBpaGtUyVobO/oLCe+gUgg133Atqqa0+qytmBVlEddNzuq1UGKCbU61odKC1GDZTn4K73lNGx0oxZHaq9FTQuFQKSRanO0JHa0tMNKjfoYiqzY0u4mNQkE6lQFQTdVI2MjCpIuLZ+WyPE1qbMsQURFSomMINoDRz8iOzTO9uaF1FdR1EJzkDihXV04+qqxscOOLLqzrdSUHY0V6ShKS2krMkSJNdEO0aK12zfKRZH2QhoKekdRL4NUlfE6HDxIFIYM0VnyDukmmveIGh+kzpDKsnsgTOwVpZH5jxTRQbTIg296d34kzlnQkIIUz0cNEY1JdElD9vg4eQJ5DbMlFjoyCZ9NwARMoLoE7OtUd+1s+TMELHA8w2LfV/4Nvy8e3zSBShHIztmLnv8SbQv72XR9U9vCKu2kqegN6RshZPDLUBENitNQeorSPCRwLHflXEuIuLSuaAI16DYbqsWha3LEt0eK/JDC0ZLo0cYRV/2MlqI1GhIU6npda7FzSlfRHQgnpLUo+kLpH8P+esgNpMBss+0rkQoSHZa1nWyXLVslhnQQRRBNSG3ZuhTXahIWhtpadnv9qUifIZKiwba1KhI6UHoKIkSkr6h3Cpn2N87HlrU9CSdElNSJMkHUkL11bUuLYIE4gR2RcqIVpSBpT1EniCtEguR6HSw2dTeINiHlpniPWFFElsQF2uwhcGBnTnPJ65DbjTvJOTLkmb5k7h61R/J9n0+GQF7DPPr4GuZrPpuACZiACVSHgKM5qrNWtnRvAhY49ubyVVctcHwVEl8wgUoSyA7Zj3z/K9NnPvGOVN98Kv3Fs8tpta3in4rGWFdBjf6uY644BR0SCCQEbGg3ldMrSkGRqLGlkI115bAMKBAagkhDNTkoS6q6FYowIAWlo4KdddWtUJ6K0lO0o4qEgIjcUGpIX5EXF9cvSuRQlAVpIXomiooSySHhpKdtWle0xSxCB6JFvaWID4kjFBtlvLrukw4ylGixsyuQMPZwVzxgRxPeI6LUJb7UFc0h1SPVVPC0Ljuaep60l5bsRFwg4qMmoaOxW8AUkYQ+EEhaGpfIFQ1bCCCyFQEk1/6IuiAaIxrolxyxgXiDYJLf6/GrHnlN9hY6GJ0ir4UV9Jf7JKJFN4ookqv27huzIJDXMI9loSOT8NkETMAEqknAQkc1181W6yupa3Bc+2NgcePajNzCBKpCIDtiL3nByxS98Zl0Q6OfnrPSTcudRuopOgNHfVMiB4LGQM5zW4IGQsZIoR2RkiKfulEbpa0+W8KqVof8bgqLjvR6KJGjKaGgKceeWhQ7DdW4kFhQk9DRJQqD+hwSDfrrSi1RKspAIgIpMA1qa6gdtTXYNYU6Hm22iJUIUZPoUFckCGkzI4kG9NtqS4DQ+22lp4wU0REFRdUH9xoam11dmMdQaSOk0FCPo6G+GuqHiBCiI7Cju3JG4yv6RHVAEDWkHeiHNJNGIWroXmwjKw5NCSO0CdFC16kfEkVH9ZoaHAzYly1Ee9TVjtctiTzYnwWJ/BkJsQIRRuIO4kWO6Lj5VS+MtlnoyKkrRXutgcbBRiJmlHejMbQ7jKJIKP7aFOP96n/ksX0+XgL591cexUJHJuGzCZiACVSTgIWOaq7bIlttgeMAq2+B4wCQ3MQEKkAgO18/+oM/mz7zsXekwcUvp69bbafrVVejL8GgJUeZ3VPa+iGiAoGDGhFRaLTw/tP6pnYK0WsFXyhiQpEMEjfwuQdyvhE5uMdWq+2OIi5SU7U4JByo2Ghn6Ywc8K5qYmxrN5ONSBsh5YKoCYqHjiRMdBSx0ZYQ0VFxzyIyQvKCiozW9TPQc7WdQURU1OTcD1RbA8Fjc/28xAb1h5ihPrCmoXEQTmR9/MiwInKjtxrjkwYTO8UokgMbSDfpENkhcaUmW+KaxgjhJYp7ykaJFqgLLT0T4o0AEF2CgIHAMZANWxceUxqM7iPO0JcEDLanpVgq4g2iTFHno6jjEcVJNU4RnSGNZ3eMvE58pLj34JveFTbSHpGjSF9RPRL1jXgSMR4SPaIYa9To0DU9RzuLHifzG3N8DbHAQsfJrINHNQETMIFpEbDQMS2S7ue4CdQe8C4q+zK+RTunPPiGg1X637cj3zQBEzhxArfs7pzyHd/68nTuy4+kU8OL6cYlRUw062lT4oSCM+J1U+IAURuhXOhaV7U3NpWSsr4xUPqJHPrIlpCUIEFBbnm8RyToa7cSeeJRm4NCpDUVGiUyYyinf9BcVeiHnH6ekFChRIu0I6cfUWFHNTCI+iByo0Eqi4QOqR4SIzblvJM6o34lKFAjpLu0Fk79QMVJ+0rRiEgQvR4NNlIbUUMRDQgaETmhcesSSyhwGjuwqM+tbbaQvUTgRAgWpLEQmdFWVEhHQgR1Q6ROqL2iJDQnJsfrGpNWkdLOyvWakkQM6nJInGCL26ZqjKhASER11HS9JlGHYqARuREpOHCiH86Yp/ojeo4zvDgonoq4ggCCPXo43fLql8T4iBWDfj899Ma3ap5qp3s1mOxGgSDk1EjlCXGGGiJFBAusi0O1RkL4yO99nhWB/Hsuj/fgAXfOye19NgETMAETKBcBfCMO+0flWhdb8wwBCxzPsNjzlQWOPbH4oglUjkB2tH7sr/7X6dMf+8O0de6xdFNnJ620KCKqiAwJGlv6IQKj15KosKtvNBTN0VH6yiVtBTtU2gr3O3KmSRNR/onSVHZiq9iRojlqcripd8EZAYEtWZVUkkYdbRWrFAoc/BaOv34UXyCBQr9KTOioL1JLuF6TWNBWlATFPokK2VT6CLU3SOagNgdFQhFFIkVFtTy2tAMMkRzsytKQANEKkUPCRwgTcvolWqSWUl0UBcI2tOsqSjrU7i1EcYTIIFtJhyG1piU7OxJQeK3JSbDY1jwU/aE2bEXbPfVsdaVIFAkcRHmwLS1zou4IhVhhM5AAUVeqDCktxFYgX+yIDREcRMMMlJpD6gxCBaIQIonMFh9FuogFgom6UfsiagRh4lV//0Vpe2sznfvKV2TOIP2vv/XxSK2JCBXZRj0SUnCKaI0ieiPEDcbUi4jmiMgPpdrINh+zJ5B//+WRLXRkEj6bgAmYQDUJWOio5rotgtUWOPZZZYsb+8DxLROoGIHsYL3s234gPfn4p1Jr61y6ToVFpV2E2MAWrwMJDmyhilCAYEGqyrJ2V9mW8LEpgYMUlq6iPZY6bFequhmKVGBHFFWBUIoLnrRqQejU396S8CEBQT0X0QQSA0hb6S5pm1kVDpXXfXFTEQ8SIYhWaIZgolQU9YkzjleuX8PZp8YGYktD9Sy6S9oBhQgJOet1iRJEMmxvnIsaG9JpZEch1EieUUSFxpeA0JZYEVEVEmM2JS6sK6WlryiOGtvHkvIisaQru4hGGWlLWwqjUiSVuRMlsS3xZCBxoY2Nazel9sp1qb18JgQLxIbYmpZ5SExgR5m+xJM6KTKyj6KnRGkMtPsLaT6RYiKRBXFlR4IJ6TRMlygS+kRUoZ4GYgg1PmAR29WS7qKV+c/+0+ekXq+XlldWVbh1Kf3Pv/HhaLOjiA3ScppEq+hAOMnRI5yLlBYJIbInxkBI0nUfsyeQfx/mkS10ZBI+m4AJmEA1CVjoqOa6zbPVFjj2WV0LHPvA8S0TqBCB7FT92A//Qvrcp/8oDZ/6fDrVHKSefFxEC9JREBsolYkTvCVFIepvyK1utsINV1pJSkuK7FhuE2mhQpwSDIq6FBJAJHJQcJToC5xz6mwM5Mj3VdxTYQnhyFOsdERUgqIpEBBw+HG2uR+pKruCRV3jqHJFREIgVBBZMlR6ClvWtrV1bFtOfEtCSBvBQyktQ4kdRHBsSEQg7YXtYIcSMHY0NqIHEShSGyg7GnUyiAgZ6R73JYdENAh9DrXby3ZNaSdNRUIgcqioKraM+pdU60JiA1EevTOpp61rl07dKAYSY/gMiNtg60IhcEhkYGcWuLAdLik21AjZluhBZMdwgGihKStqo8GPhAsYIIhQZ0QTVYTISghAEd2h57e15S1iTlPsmuJODZD/6m+9UJEssrFb8P7V//F9Ef1CjROEmqJ+CTEvlx9Z6IA5AodFjsv5zPJd/j2Zx7TQkUn4bAImYALVJGCho5rrNo9WW+C4yqpa3LgKGF82gQoSyM7Ud3/7D6VLTzyS6htPpSXSOfSDzhBCgIQOIiuoxUHKyoZ23Pj9PQAAQABJREFUUmnpRlOvES/QCVp6TRFS0jMUQyEHX6KFztwnkqMpJ51ojJ1aSzux9LXTynbqaMeVJYkkcSiKo6koB3nzcvQ1sPpBlMDxZrcVxAHSW3JtCuzhpy8RQ5UutAuKIheWTqW2IjmIumjIUadYKPUvEBK2FJWxqZ1V2IaWlBVsJbKCYqADRZhEIdKhhA1+RkqewQTdb+m+5BCl00hwkI1J89hRpAjzVeVQCRwbxTWN31w6mzq904qkkBBBSomEipEiQ6SEMJOIpNA+tCF+YNOG7BkoCgVRhZSWqA0iGC0JEdQpIc0kBA6JHdTyID0HDhQn3dnWVrqIMdTU0Fgd7UTTXZINEjzasuVv/zcvDMGkL2GEhfyN31TqCmJJiBxIOpcfUaBULCl+SrpM3r3l8lZ+N0sC+fdmHtNCRybhswmYgAlUk4CFjmqu2zxZXXvgje+Mf4Sbp0lNYy586fIXrWmQdB8mcLIEbvnlohjWT/y1X0of+9Dvp872uXRK0QkDogUkcCBRcCAG9BRlwdGXYNGX600tDpz2vsSOpp7pKqWFPzC/cqkfaSMtOejbehxRRGEYinpAICCyQ+krEjG2dJ1NW9cklPDsJiEiuk7NDSI8eEwhB7Gdajj1Gk2evQQORWzoh7Gp6UHaxqDe1o4sipDQjizdFW05K0eeVA8EkT67qShKgpoZI0VJDBTFUdTOQMBRnIYEFyJAGkpJGahtXWKEJBZFr6gmBcoNgo3aUKtjR5EZ1BUhQiPSTkYb4qFkG9kdURZsdysbYncZpYJQf6Om/kYSWYRTfUk4EAf4kgrTl8gx6F+UUKJIEN1uSLwgcqMlRggkOzyveSHwEPlBZEqbLWnVerSl6A+l+2iWaifhRVvcdlVrY23tBkVwnKI3dI30s698vqJLemlpReKI2P4P/9OHd6ND1GT3oGbJSKJJIbKwFa7mo7GLNBhHc2ROJ3XOv0/z+Oyc48METMAETKC6BOxLVXftqm65BY49VtC/IfeA4ksmUFEC2XF66bd8V3rqsU+ls/VBWpZgQGrKel/bpepf9ElH6coxx9knegOvuaedU4jsQNyICAtd6ygSg+cQNDZUtKPfp8Al6S0ke5DqIo1AYgHOM078gOtyrDuNnbQmgWMkESSiESRqkHpCGoc8fjntyxI9JFjQl1I5tre07auEC4qAks7B675SYnb0w64spGI0tZsLhUhJ62A3FVJVovaEhIb2Tn837UZOvcYpUjFw4ilsihgjYUJihzSb3YgUokq0pa1EjJGEFMQUGDTVvoe4Ij5sO0uqS19iRFNCCOk3RGXQByJRiDGK/IAntTewha1sSW+pDfWjXV7gobCN1EHcUG0Q7I9Ulyb1R5TWgqiCyKH7iDrsCEOdD2p0RB0R7QrT7q2pBsfZtCyRg3nUsEPkGip++nf+7nempWW2sZWAop83/Xd/pH6K6Blqegworqrxix1hFDmi8SKaA7Fjt11FP+ZzY3b+/ZonZKEjk/DZBEzABKpHIEfp+R+Nq7d2VbbYAsceq2eBYw8ovmQCFSSQnaWf/OuvSh/+o3+d0saT6ayKafTklZOeIW88XdgeSKxQpIUcannpEiO0dat+eh0KW2oXFN0fSmRoKbpjqV3XrisIFSo6qmf6en5TIof+j4iO2PlEYQpNCQU7cpiLIbSdqQ6e62rcraHKZUrgGMi550w6S0sCB2IC8SOx44ccd3YT2Y6IA6XHSPjo6xrFRtkxhJSObggcEgFkA5ERGB/RCBI4SIuhaOq2foi+wBJsQaggJQSxgugVBBwiV+J52YLIwa4vfQkcFEWtSQRpiwvixYrGo7/tETVD9CPRYUepLsgHEQRChIhEHewLwUWRIrH1q6I7GvqJgqNqi5DQJG0mUlIUNSJWjNunXwkOUjLifogURFhIvhhJlGlIPGGnFKI8WkpR6a5er7N2apFwEQKHIkuaSp3pKLLjlld/V8y3r61lEXf+0X//vhBbiDIhJYcdWoKkXtcQjbTeRHMgdhT8C1HINTqE6YSO/Hs3D2+hI5Pw2QRMwASqR8BCR/XWrMoWS+B4V3y3rfIkpmn7La9+kVJT3j3NLt2XCZjACRG45ZdfFCOzc8pjX/hIWhptqbioojO0EwrpGluKzkCoYJcUCojK/w+HuNuVcy/nf9AvinyOJC60JE709GwHcUQePXU3SEHh+QFREUQj6GegKA1EEoqI4phL+pCzXUsriuDgIPqDHVpwsknPaMtp78hhJxqB2hvEQiAakHZycVNFORUR0ZJgQdQBf1jHVqsag/oVbAmLaDJA5NB4FEtFYujKznXZflERKtTw0GUmFtEkUhPiNek4y9oNBqGHaBLsV9lPRXAU6Sp9oi30jEIsYoyIKMFGNSVtpqhCgpiDYCJ7sUlzwv6+0mRy+kxdQkpdtsMQkUO3o123regJiRWRzqKL0pF0XzfVNuqCKIqjrXQT+kf66SOEILrIzkjb0X22rG1FRIvqnqh+R6u7lpbOPCt1FeXRlFjyc3/zz4cAhFDR39pOv/7P/qQQN8QNcUtvImIkjELIkf20ZQwEEI7idbz0LydAIP8ezkM/+Cb//ZxZ+GwCJmACVSOAn8VhX6tqK1ctey1wXLFeFjiuAOK3JlBRAtkx+umfem36wDt+O1069+V0vaI3lhW2QD0GnGSc3PDhNUciG3BuKdzZULTCSOe+RAIcbqId5P8qdQJxoxAghhI2Luo+4oiSQ9RMdTbk6VOAlEgOoi1IGxlSz0LPnkLg0JloEVznOlEDctp7KhaKwBG7uMgeokNIIxkpOgKR45Icc+INuqqXocfTlvxyJBPas5Uru4sQfcL7KDiK/fLPiX3YJH1FKS9SP0KUoQ1GaAoRsdBRFEUHm9VWZqVN/Ywi+oSIiiKKASGmo8KoIwlCiC2KcVDfSnHRGIgnjF3Tz6bmScFVIiAoPEptEGp8sOUuDLclJiHEINB0lD7SFmPSdUg9iQgYjcc2vVHvQ8YQ4RH1OpQ+ImM1qoQNteWZOlz1vkkUiNix6wupOy1tNdvlh0KsnZWIQCHy4xd/7j+J1J8uu65oLr/2Gx8RB5af2iFFzRMiOEivaSoqpBdb1kaTp39B4OFz01A7H7MnkH8/55EtdGQSPpuACZhA9QhY6KjemlXJ4ppyovjG60MEbpaq+JCjN/xZMIG5IHDzbvTGy1/8w+kLn/twag0upeuVYsK/zTflrLZVF4PXW3KmES82JCos4XiznauubyvHg6gINAE2QWkqigJhoMm/+utM5MYlCRw49A2c/F1RgwKdS/L+KT6Ks47j35WYcX2X2hP1tKFnLmksCngO5ayPlCKxomgGUjewiH62qTuhgqFDBA6lWVDHo6sxEAvUnaIlZIAO0ksodNrWs9iMKBF1OUipkc0tCTLU4EBkiSKb0Ua/8DiRGRIIliW8IHDAYV2d1HQdoaOPgCBbSNdpB4dBRFO0UTYQMfQMz2GDhojCpAgssCl4aHy1gnMUbcUOteN5dpLZUd87mtOOIj8QPohq0UmCjCJjJIZwRKFT2cicGhI3EIQQSHJdEQSWpmqXFMKHIkKUptLTT3tpTbvMrEa6Sk87zlAThXn9ws9/S9Qu6XSV2qJnf+UfvUM7wGwquuNC2tb2uezY0l29Ma2cvil2c6FNTlkh3YbXxTppLSR0RM0T2eNjdgTy7+s84kOO6MgofDYBEzCByhHA9+Kw/1W5pSu1wRY4xpbHAscYDL80gQoTyE7QK19xZ/rAu/5FGm5cSDe2FL2hf3wnhQNnFSeZpBGiGtYlOLAd7JKiCtg5hRQUHP4tiRzcRwChQCfOO04vdSlIBzmnvArdjrQVneJ1UfdCQQd6r6ZKYymeW5JwQroF17flyUcEgsaTxx7pLqSzkJpB6gkiB87zBs63nP0iPUQRG3qeiIko+Cl5AbGFqBKiRkhJiQgDDYqdCBxdiQm6G5EmkcYiY7GBVJAGkRsaB9GB9hRXpW4HW8NuSGjY0A3ukwaDY4+uwbNt8WFeRFuoa70uUmAQOUhDgSPpPLl9RHAwZ4k9tKlpXGpw0AeRKIgipMgMxBvJBJEmtrTVM7TnddTtoBAoPxIqeB/ihuzpsJuMrqu6RuwE01HKD1EcHUVxLJ9+tqIxzip1ZVnPqoipRugofeXVr3qpdniReHTxQjr35FPpn/yTd6WRttYdqJ5HE5FkVbu0qIgpxVzDTtVCGZJ2I/sQWTiIzsEO6oHsSFDCNh+zI5B/j+cRLXRkEj6bgAmYQPUIWOio3pqV2WILHLurY3GjzB9T22YChyOQnZ+Xfdv3pS8/+vG0qo1G16QlUEMjUWhSji7/4/RHDQ29Jr1kSdEMOPQICTj1xBEQfUD1C5xtRABEA0QGnj0vUWFLNxAReD7SS+SoI34gcRAVsiphA4efK6Sz4MCrc0VzFGIGu4XwH6IHogr9Et1BXEVfTjRhDZG2oedx6kkpQWjABmqJ0C/jXUDg0BvGwn76pc4GQg6pNNTwQIzQ5Yi64DneYz/1RLjfVWRFS8rIusJPiDzBBt2IuiIIKZG+k5Ub+pItwUl9xNhipyF5SmNIEJF9JHTAhTFIRWHciPDQmUAUxBpEnJHG22KuYgC7EE6wjagOnRFxOtQskY2kzCCSkBITKUESF9jmdiDxg6KlNdpJ6Fg9+7y0KpEDsYNUlbbOCBysBhEyr/iJ56SL58/pI7GtXWwvpt/9V5+Iwq9N2q5eFzu1kLJCbRR2YWEt+CxwkK5DZA1pNJ2ets2NWWvumo+P2RHIv9fziBY6MgmfTcAETKB6BCx0VG/NymixBY7dVbHAUcaPp20ygcMTyA7P3/qZB9L73v6/p+72+XRjR7oGaR9yshsjdtKQHyw/lcgBIiw4enLWG3LIERF2dB1HndcciAQ42RTJpO4FIgJu/EZcI+JAbq+89YiOwAnmun5qusZziBI8R60KRI5LivygQCmpKxQfRSLBDiIoqF1R9EdkCAIGtR8Ym0gS3GilhSBgyATuI2RsSiBg29qI6NA1zGvs2siziCZYTPoMIg9jIiBgGyJMCBy639WkEUWe3NqtlyEBAUki7IKL3pGSEqIMEDUG82QMUFFctUOx1l0hoKvXbbXhOVJzGEdN46Af/R+KR00XY+tdtWGNmCciD0JQsNczoXtoFk2JF9hI9AQ7rrCjC3VIiOKgfgjlTyOlRcJDW0VI21GT46zSVRTVoe1lu13V5hCz/sbFNNCuOqzZj/+1P6/dcpSUoznQ77/8t4/Eji5tRX/09AypMpqUfhCcirSkGmfZOGSXGo3VVVoMRUqJthlsb8icbqSxFLP1r8dNIP++z+NY6MgkfDYBEzCB6hGw0FG9NSuTxRY4tBoWN8r0kbQtJjAZgezo/MDLfjw9+rkPpTO1QXruiopmDvppS/9q36rJIZXDjMNNZAESAk5uT1EBOOnUxCSGQMknkYZBhAJaAedIN5FfjRCAkx/pGmqL+IBYIAkjnPgQKPR+iy1k5bCjoawqHYXIBfqWaxzCAs4740c9DrWRriA/WoUu1Q4nv4gkYWwJA2DROHFPL2VCRJsoiyTSU4hEocIIu7ygHGCv/g8hQ1ci2oBzTxEa0a9eYyfKB+fY2UWviYwgkoRbjBXKiNrqbUS0AIn5EL0BA64jStA38+OHmiA82iKCA3t0hECkVogt0bnmRPQGER5EQsR9dTZUXwhLRNLwOoQdPUf0B9vcErHBPewoIjiaWg8VG9X6RQSIjON6W4VH26q1UW8tpbbSTuoUHe2cUvrK6VRTrQ31HjVCmi12a+np+Ub6qz/0dRK5ZJHGYDeZ3/k3f1Y8q/ofFD2NyTIZ2c78EM2o3VGTsNPqrKVWT6kwWpDB1kWJG3qG7X/Vt4/ZEci///OIFjoyCZ9NwARMoHoE7KNVb83KYHFNe8vz/XShD74Q+UvQQn8EPPk5IXDzq4piVUznJS/4TqUdPJme01PqiUpm9vsbaUv/so5jilOdRQgFM0TdCWpSoAjgaBNpQKQFTj6RHdSz4A/KwlXXC3VCFAARCogIFM4kQoPimOc3tXOJnke4QKTgNfUxiHTA+SdtA+dcvxZ+fthC0dKh2pEqokgIOdva7EX1LHRTAzA2dvNMYUcRFRL9q1+iQRAdiCxR13Lw5fwjJOg1kSV0wxyK+A/62n2FHbTRfVJkuIo4wdx5zXMURo2z+iZlh3tEW1CANSI5dI2xQ4DRa0QR0nUQNuASKSh6hjGIBBlIwKE/olz44QZ6RUTOaK4hYmimrFHMV21oVsy1EEIoGEp6C1YS0cGWs6SuhFCjxohTIxUvjZod2j6WnVa0xUra6Z5S0Ec3Uk7Y5rYr4WJ59XRakujB2lCwFdnk+77/67UpSy91Tp1WPY5T6X/75x8I+xFXBFYDa/tddm9R5MZIohmLBue6Cp6yRS21VHRT41MYVX3y3sdMCYz/WcDAD/2Kt5ed6QJ4MBMwAROYEoEsXNtXmxLQBehm4QUOixsL8Cn3FBeGQHZqfuQHXpm+8NkPpqaKiT67p9SFrfMKIdiQ8yrHFAVAjjSiAI42CSI46DjbRD+QurGuCIYtpR505HnHrh+6tiwxg2fUVC6wHGg5xKRKxC4iuoZTz1aoRIbQF34wESLh+Ov5ogYF6SXyh9Uv6SSMT4QCJmEDKSRbSl+JlA/d66gtzj1jUvuDAyEENxxBhbgOalSoixAqEBfoizFlqsSVIvoEsYE2UTBU97CKSAPsQxQIO+hcd5gzUgjREAxMn0u7qSZ0jrjBM9p8NsbCNmznGgVGu5ogtuaUHAQO7EEYCSGGxhoXwUKgJGJQ26P4wRCEIw5qgtT1E8KQ7mMVkSX0g1BSzKXou6V1YFcT+uRxPRa7tMB5KKFjtKO1E7Xtei/SSqglQhROS3U6lpdWVIhUtTkUbcGDoyhAK8YSQ378P3+xhIyOxLGB7Bml3/2Xfxo7rkBslZ1WemeUyiIpRs8MJXSw5W1TBU1XtBNLQ5EhmowYFfOJSe3+UkTXFOtOIVcfx0cg/5mQR7DQkUn4bAImYALVImCho1rrdZLWWuBw9MZJfv48tglMjcC4I/O9L/rhdP6pL6ZVpaf0tL1rksBRH25pLMQMogCKNIi6/tWdnT8QH3DOcebREUhFwVle1rayONL8IDrgqOOvUpgTZ5x/9edA5MChJ/0iojV2nXgEAiISaMsjIz2MYBKOOH3ph6gI+e1y8ov3aiY/Wyks1MGQmEEbam0wLl0hJCAUYCf34oZeIGZgPyIN7fr6hRgMrvGeaIsQNOI6JHbH54V+EFqo7UEUCH1FDRENqqnreQkXYkF0B+k1zAU7QizSM7ymPUIIER/MkQNBAoED0QJ2CBPspkLqCWMRoYKV1OnATrggOyEQIXCEvWoLj0KMKTjqbcGQvtUvUTFc0//CofWSyIKIQZgHIgfpLet97ZYzYD67zyhaI4qJIm6oSGhPqSSN2AlFspHWsa+ipao0GkVE/8oPfaPWYpA2Ll5Ml556Mr3/g19RbY8bUnft+tTRdrQquqHoDcQTPaN5LGsHlu7KdbIlVkhWFcdo2I++RxHFIltkAxEe2B1RH7mhz1MnMP7nA51b6Jg6YndoAiZgAjMhYKFjJpgrPchCCxyO3qj0Z9fGm8BlBLID81OvuCN99lPvS4P1c6ndv5BGG+dSc6joDaUVEDWBsyw3OaIOcKJxzHGeSU0hHQVnG9GAKISVtqIC5LASmUGNCg6EAEQLXda18KEjpYReiW6gL2km4byf3yJipHDGERxw9nH+qU0RRTLpUNfxcFc6qhMiYYF+olClXpPyMlBECHIBYgqCAc56nZ1NdJ++MQshg3H1MgQThAeEBu6rdbRn61vSZRBHOBiHfhEV8MMRWRAqmBOPca04S3zQs8w3bFen2MkPB6INB7z4ieKhuoY9RMFwSFso5rXbFhsYu6NO4YutRFRwMEfmBnPasF7wZjxpLLFeWA5DxogdZ9QHVsAIkakgIaGEDvULqSdSL9RPwYDnYbSjVJetHe240l1L1ykdRb2pWOsg6rXweeB+k1QTpbXUVUD0m7/pulSXOLG0spKWTp1J7/njdYkjK6mj1JSadl5RXkuIJS2JHVBuKyqkoagOYA5VA2awva7zIK1feCyWvaX7vZXTarfy9OeBQqVXCiNMw8d0COQ/J3JvFjoyCZ9NwARMoFoELHRUa71maa0Fjjc5L3eWHziPZQLHQWDcafkrL39FeuLxz6aWtoPd2d5M2+tfTm3tnNKlsqhcWGpb4A0TkUFkBo40fjfChf4Ph5t0E9rgaBKZIb9ZzjFOdCFsIAogjGzIGUdgoBYHDi3tcdqJTsAxp/DnhW2lO9CX7odYojHYjnapo+1M1X5d42MZNSdw8RmPKBBsiUgCiQGx+4gcfIpx0g8iBP1jB1EQnAsxQyILN3Xg7BP5wRlhh/lhI88xdxz4IuJD9/QaEYG6GoxFHRDaEXmBEMCYQFqS4BMRHogQ9MFQaoOocEoCjZpIOqAIqfqifxmFkIOgESKFrmErP/S5RKSF7hfFSBUBw1zVN/axVlsK+UDkwT7aF/VQZJPGJFJDo+hZdk3hWjwS60l6jbrSobH0wzyL9nqvlJKBrhZKjkQS9TNMKgiqehlsL4tosqlniQCpKfpnR4VEo1CsxI6WhA7aPP/5z01rZ69LX/sffkN69vOep0iQVmwx20TQaK+EKKJuZINSVySIkD7DIg+VJjXYuKAdWy5INOnoknatie1orw++LdX1YDxAIn5cKXQgfDmlhXWd/Bj/M4PeLHRMztQ9mIAJmMBJELDQcRLUyz1m7aEFLTL6GqWmvMHiRrk/nbbOBA5I4DW7xUV/5qfuSY98/N1ppGKiNf1r+dbFx1Vo9KnU1PaeS1ICECj4j/9XqCshPxdHXf5kuqTQCBxz+cIhDBCgoP/jPQ1wqjkQKXCocYSJKuAy0QerXUUCyMvGmSYKoRARRumiBI7oh4f13CU57KRyrHYVGSDnfhPRgv71bIgGqsFBCgfPI56s6/ksOoTAom4QHxAeopinxsMyno0X3NcLHH7SbhApQgDQBeZ3SeIJdtMe2xFjIgWmMI9fox3bx9IOIYS6IWwBGwKH2iNY9DUPOmBu2AIfojei8Kqu9WGg8eifuhmIQTxHG0SSqIGh1yEKKRoDrkyhr6gP+huofY442dDaIAAtqV0v2iJuFOvDdrgFv0LMQFiBF8JIXgf6ZctebCMtBGJIRURlRI0OiReIUdRegVVHgkVH6SqyQj+K3tEawWFT2sNQqSt1bQFbV9THD/3wS9NfeP7z03KPiA3Wq5H+n3/9qUQEB/U9RkpdGSgtZUe7tgz7W5q36sFsnks1xDcJKCMVPu2QzqJdXlpKk0F8iV10FDUyktCBmNImBQYr1Ac7tlDboylxJCJDdMfHZATynx25lze4GGlG4bMJmIAJVIoAvh2H/btKLduxGGuB41iwulMTMIFZERh3UP76j/5i+srjj6QVObHbG+fT449+UkLHJTnHAznaRXoKcQw4/ri5pHogDhTRDTjIOriggzY40UVtCL3gGcIkdOYHR52mRIDQhoKjpHJwGwcdEQIHPYQHHtcPosmXN/tpTREcUQtCzxVRERI31J6tSnHWv3JpK5xqPH6EDkQKohsQDeiTSA/EDbZ71f9hJ9cZhV9jNxWdmR22hTijdvTN9q5EiCAC8CDzEJqInBiGoFHUzWA7WeYfO5+oLa+LWiDF3HiPgMGcsCPGUV+ICLwe6ZmIDNGZmiWIG9jWhpPu51ok2BQ2yD6iN3iGfjlCJBJfCrciiPAchjAfBKGWxqKpughxqYi2kRggQeKiODMmggfzyrbCnUKqsEG0iDKnEjpI/UGEISKGqBLGQxRrKfKH2aPnkI6zISAbquWxI3GitXpDWlOx0R/70W9Lp8+cSZe2+uni+nrs6vL2dz4RqS0DCRs7g/XY7aWldJVRf1OfMwQspS8pNSV1VOdDYklTQkZsV6udWbCRqA9s1wMiIXtl1xCxhPwniSMNiSwWOuJjMpVfxv8coUMLHVPB6k5MwARMYOYELHTMHHnpBlxIgcPRG6X7HNogEzgygeyY/OIv/Gr6s898QDUrlI6iiI3HPv+RdO6pL6Sh0lROS1DAwY1/t9e5cN0LwaGo2SA/U04looZOEXWAQbjZ8itDYMCLJoUDJzuiOHRGVKDfEEx0j2gF0i0QO4gUoVgpYgSOOu1wqC/KWaePcNIZjAF08L7XLopUMg6iBuLJjvpgBw89HuLEEJFD1/vqhxoiPI6djB126Fykg9BrBFmEc4zAg90IAzjqPIMzj1041Fw7J1EAYSAiJSQedJWSQjQJwg3tL6imCJERvAmxQdciHUXv0X4QL0i/YS6ICuyCgsATtUOwFUbiE0KL2iACETGBmMDrDe1WckHFVXUrRA91H8IOZ/pHYFGzoh/Z19ZYpN9E9IdEDQQOBBa4EfWxqR84EYERM9CzwhdCS2gHeo+gwu41RHQgHOjXGJs1ROShrghpIUU79afnN9V+fSSbG9TcWEvL2ma2s6QtaOvt9Jf+0rNjLt1OJ3W73fTuP3oyBKCWokJaFCFVXwP9wJw0ldjyVgLLSD+IHL3VM1ojXZfAMVQUErYjjPB5ILoD+1i9KCJbU8oQ29VSA0QcfUxOIP95knuy0JFJ+GwCJmAC1SJgoaNa6zVNay1wTJOm+zIBE5gpgXFn5Gd/6r507skvpGU5pOtPPpo+/cl3py3VOzilohs4vrh/pD/s7Dq7ONQcehv3KC4qvzWcx2U5tjigRRpHIWhsqT1OJZICzjYH9/ErERforaU3OP5rqkfBmFxT9+HwXpLjviHHmzoSCB04z0QKsKNLRHnoWQSCjpz2TksJGXj5/Gi8qL3BWBq/puc51hXlcUliBOIJESMIDWEXc9BjOLzFNfpQe3n2YaMaca+HQKDxcv2LED10nZogGxIyiMrAuacdD2LDpsQCCqciWGBFRHnoXtQi0XuEEQQU2MZY6g+xg61tKRyKmBE7nsgk6SbRjgKjiBREqBCpAQtdCo6sGu8RoZjFEqKD+LBOTdkVES/qn37ZnYXaIbRnfISbbfrDVt3HXn64Hrz1GmGDvnhPtAft2CmGoqSsH5EwzAfupNnoU6GnFMURGo9SdQirUFoKW8wiMjQkWBDxwm4q3/yCr0mrq6vp+rNn0uqpU+ld73o8mMTOKarlMWp0ZZBEDfWPiBSfLD3fXb1e29ae0Wf3vIqSbihSoycbBxHN0dAYLbXRm/jsSPHS8C3ZqJ+I/EBO8zENAuN/ttCfhY5pUHUfJmACJjB7AhY6Zs/8pEdcOIHD0Rsn/ZHz+CYwPQLZCbn11v8rfeqj75Kz31d6Si195k/+ID31xOdV92Az3dBrh6OPY8q/hsv7lQGFw8xrIv7ZWhWRAkcdRxqxA9EAcYILiB34sirBUAgWOkfqA06vriNqRPSHnqH37BwTBZCLaz4pMYKbOa2ENlFrQjaF80wfcqw7RA3Iead4ZkM/8qVDEEEYWFNh0k3V6OBnS0ID28jyOoQPjY1wQlQJUySFpaMQDSIPqKWxHeMUzryms3ufVBk59RpvdbmTVpfakbrC85HGIoGAaBEccKIniMhA5EAMyPVHIhIBNhoDsYT6IlEsNJx2Ii/EUc9F6onmwoHoAVho4eCTokLUBSkqIUToNuJPCBXYD2c9sSJxg/5ZlxH26Kez+35dBTIQYbAL0IyBuIEYwzjYwXpgB+uQRRi4MFeiMrCKdhEponexHvos0J4dckLekKARaTu6hlBEdAcRFp1mU0KDIjGI0FDcjvSstNPspRd/+zekm268Ma2uranOxnJ6yzseDZGirjobLYkSzGyoPjkaqt3RVTQIqSc7qtMxHGymviKQEOZiG1lxrOmZTlvFTKXmEL1RV60P0lWo34Et3m42UE7tl/xnTO7QQkcm4bMJmIAJVIuAfcBqrdck1i6UwOEP9iQfFT9rAuUiMO54/L2/++vpS5//aOpoO9jNrzySHvn0B9K6toclNQX/FKcWvzfSReRSstNJM7YKKZxs2nDQDqGDtwgTeiScWP0ixzXexJamUWtDNxET+Ff+aKjbtKcPdg/hP4QPPRrO8xMSOJbljOMkE+nAOEV0giIKNBY7kiBikK5BfYqaHHzSEpbYPlb3EAnok/FIu1jfUAFLOe+MgaBAvxzYQIHQjd3dW7q6r0fjOk46gglTlzYQYyGk7KgNc+ooJeXsWjeiNhAy6Avnf11iyraEFLasBQMCB8JBuOW7XGiMOMNbokp4nogI+mB0XYrX3EdAQICIdBm9p7YI0RsYyjyYE6IHaHmOCA4eZu4IQ0Ru8BPRFXqP0PN0UdFdTjyb2SCQsP5dXSxqlyglR+2KYqSFwMScEFiYAM8iXhDJEcKOLhINw03WLMzRfS2T2oqCXrdV/JN1i7lrKrE1rlJI+kpbqTeX0wu+5WvTdTfclJYU1XH67Nn08Fs/r2c1R55jxxRFajS72nK2u6xxdg8VFt3UdsebikSqS/Ag+oO6HU0EDf10JIiQ5lLXsw0JHU3e87lRvz6mS2D8zxt6ttAxXb7uzQRMwARmQcDRHLOgfPJj1LQ1Gt/aFuJ4zateqC8l71mIuXqSJjDvBF7zD14YU7z/gbekj334LemJLz+SlrUd7PnPvj+dP/dFOc19OaeSBHb/hIt/udcTOMnsxJEjDXCq5c+G843QQCQGqSbIG9TQCBGCkdQI8aErJ54dRcLd1TX8Xvnj8YPggENeSANFn0Qi4Eizewlj4qDjfhLJQZ+X9AxRB0QlIGjgtCM2IABQiwOnmT70MsZg4HDosyCg90RwYDvyQnbqSUk5L1GCWhg5IoVxIxVC7eif9BPGHarvhuZEJIeGjsiFG0710op2esmRFdTmeGqdqAJFWmhsBIMcmYEgACfEEyI5oBNChOwmBQenP2p16D4HNjIn0lMQEhBDLhKRoraIIoWooHmpX2qHEJmBIESfRLuQprMs20jnQQjajLQZURcDRCAOXuuReI6rCBjYSD9sFVy0hQcCULG7DDup6KWEh6IX1pr0Ga7pf/XHWvKZKNJ92rTTfcSYptJF6Iv/eT6LVv0dCSBKWRlpG9rUXEkv+OavTWunTqf20rLEiWZ6/we+LFFjJbW6p1KjJ4FDYkcIQ6oj02fXFJ13VCi3QZoKdmsb2pZ2XUEUqbNtbURt6DMlcUMqh8SPTuzgAlfsRvDwMT0C+c+d3OMb/qG/U2QWPpuACZhAVQjgE3LYL6zKih3OzoUROCxuHO6D4dYmUGYC407G3/97/0v6/CN/nNafejSlJx9JW09+RtEGG6p1UaScyA2PAqI49Bw44/wrPpELXMLpJRoBxx/HknaIHAgiRGjgmG/jKOpZHPKIsFAntCP5AWcdx5MoBJ7B6ccbpm/EAwQL+jknJzxqWmhcHHgcYDXRuUh/iDQMnFIu6ozYgbjRUXsce5xwbMV5JtUGUQBhgzEYEuGBtBn6RhBg21uEFWxirEi9UJ/YHbbpmbCd4bgmRzinq1BcFFGkq1oga8va+UNt2SEE55658LOt+cRcZQ9HjKPrlySqIGjQNwJKRLvofiH+FPbyvLqMNogDzIVIkXhO/REBQ4PdriNSBHaRgoMx2KvnerKTNdnanStzJ42Fxy8ogoVxOHD2sZXnYfl0lE4IJ9qNRg8wt3XZDjM+BwgbCBjcYy5Rm4TF2j0i8SVMEVNdi5QiiRw8y4EtXCP1BeFnpO1laxIghhI7tnaa6Ru/8blpZVlRF52uiqV20/v/5EkVLKVY6Zo+M6rBEoVl9ZmUaNdV0ktd6VdbdUVrLN8oneR6MSq2GY4cK9lHOhbpLE2lraxd9zUhlCB+DPob8dmpq30LUcSCR6zPpL+M/xlEXxY6JiXq503ABExg9gQsdMye+SxGtMAxC8oewwRMYKoEsnNx/4MPpz/949+L7WB3Np5KO49/NI22zqWm/tUbPw6nmYgCjiJqg+iJIv0BPxSnl7oaOOe0wh0utjxVO92P67oR7dSGZ3B2EUBwmuNf+EkbkBOMwEExTRxlDgQR+iNiBGc4xAaNl4US5I0QG9QGESVc8d1+iajAUV5VegoCySWiG+R4FwJHESWBHTjwRB0geDAsAgdFVLOAoG5CKKB/nHWZUognOuHoF3Mv0lyYD2IEu6BgLwLCsgQEOmA71iXV/4ASqSEaJj11cSuiO5gPNpLmQmTHpoQFoj4YipQbxAQegCHsc30OYCLmkBaCwMH8c1QFYgw7vECSNcB+6mfkKAyu0x576Z/2DIOgwLyYJ3xgTloOUTzBWP0Ed7VHMGFt6EuXQ1yBG+Pp8eiPG3xi6BtxJItSrE0h0TD7wm6BjbG5t9xpR//0Q2/Yuq1fRqSzKKVkoI2LKclCUdIX/MXnSvhoRuHS3upa+sSnNiRsqW4MaSa63kNo2dnUuioapyFB5NRz0tKZ56beyll1OEjrF59MW9ptZag0llr/UiH+SChZve7rU5t0FyJABtupJoEEW9rLZ1NHRUxdq4OVm/zIfxblnix0ZBI+m4AJmEB1CFjoqM5aHcTShRA4HL1xkI+C25hAdQhkp+KX/vavpY8rPWXj4hOpeV5bwl76kv4VeytqLcjPDEEAJxcHFjFhhfQSOcG8jn/d1w1qJeDcI1LwH/fCadZZPq36KJ7P53C8dZ1/2SdCAeeef6FnDBzgcI81Ns46zm5EC6gv3Q7BhXY431zX/yF44BzTnp1M6AtnHMFiWWJDbHuqN0RlEKlQOOFFzY2I5lB7hBWiHxiE+zjjtEeeCDFGZ90qIlMwQOMSYYB7jsjBgXiCI45AACMEohA5JLIgeBBN0tPrVRVtJUUGYYKIh9gBhT7pVM/QD6LSlkQL5kR/2IYIU4gvIqSmuhTjwR7hIiJS1As2RG+7z+htHAgkahZsmB+2w4/xESqIBCGagzMiU+Z7UXZGW7VX8xibdWGdYRwih858TvhR0otaxSpGZAbvinuFwJEFFHojzYa22ANnbInPks4rEodYZz4YRHFsq4hL1MeQaBFJPFGsVJ8xRXY0Fdnx7OedTkMVHu11l2LNP/bJ8yo62pXIpXoboy1xVVulpDSUytJeuymt3fC1qnnSUbHZS0rJ+lK68NinUnd4SVEgqtHRPZ12VJej1j2TuipsWhd7lUaNeh3s8NJQJEeHNJfeqtZYW9f6mJhA/jMpd2ShI5Pw2QRMwASqQ8BCR3XWaj9LLXDsR8f3TMAESkcgOxI4EB98z79KX3jkg2njy59OvfUvqRjmejiU1NDQ/2kLL3r3wAElBQMnGMebf6nHk5Y/XGzvKqce3zbXhsBZ5T2ONw5uiA5yVBE49H/xr/XqLwpL6j3OMv2T1sFuIrzGiS5aY5YcXaItdMbBpj9cS3YoIeKBazxTOMNICimtqs7EGe1sQtsc6RDRDrKdSBP65DkiIagXQjFQHGocbqI5qDVBuxAE9Jq2tKMuBrUtliQMRN0MUOi+ToUdOhORsSonnbnETiV6FvECpx2G8NHLuIZTjxhCqktwkhCxI3ykpcQWqNiisUgF4ho7s7BA9IuwxGwROahTgmTAWtE5Ys1AESFDtoDVNX6IAInrMhYRhzlRNBZ2eiqiOhB+kJqYT6yLznBAnIBZHPQlu7AXUYV58R9iEc/TA2IH7flhi2AiguiUHtQ82lEolDUlrYY+mnrPuof4RFv9FJ+DYo2aEjjYnSW2k9VskaE2JF40lN4iZSLd+JxTsTbU9mBnlke/uC5hSakp7KxCwVIJEtThqKvYKLuntHW+eOHxdP7xT6p4rUQ8pbiowIcMbKemIjn6WojlxiCdktCxLTFFF6NAKc+yBW3U85DQ0VDUiI/JCeQ/n3JPFjoyCZ9NwARMoDoELHRUZ632srSmSuB8V5vb49UqIvNGFxad2/X1xBaPwKt3i4veccfvpve/83fSxfOPp+0vfCg1+xfl/KoQo5Dg9FIXgXf8WzyiBI48/7rPv+zjJONE47jiAGcHGae7Ieed9AWEDopujtSWHT7CcVdj6l7wmoP+cKw5R70MakLwg+OuPnBSSaMgggGnHoGD58OBl2WkdKzLgSfaYRSOtfx+PXNOkRFEUZyWuHGKQp9y5GmDOIAtzA2HHe+ZCAyKkTLnr6gIaFzWeFHPQvd4j5hDdAnj6mXa1C/MWT55zJ/5ZNFCTUJIwKFHUIAFUSSMQbQK46tJ9MuY3MdWLnSUxtJW1Mn5jX4wxgZED9oXwoKEjP4gXWBLV3FFcFjRHLvsFKO2CA0XJNIwzpI48gziUgg3OhMBQoFR6MMOoYNjm8gW8UMcYR3y2kY0ifol2gYbiVohkgR7aMfa0xdnGPC54F7+BT7YwDxgSIFT2tIHD8IqN4cVohG8GY/XCDobsgsCOdqmGylNtNN9fSgROmS+1lGfU4kXQwkSsL7hbE82FilK7XYnnTuvz662h20pymNHYgjP8YPF20pR6W9dVE2SdjqzvJJGqtUxaCiCoyOxQ8VJe6mf1paWVINjNQ1b2lqW4qQqetqQEEKUCKkyTQkl7SVtUauoEB+TE8h/TuWe3uhipBmFzyZgAiZQGQL4kRz2JSuzZGGoBY5qrZetNYGFJpCdBpyFz2kr2E98+M3pkU+8O50dPKFtNPvxL/o4ijimhTNfCAD4oTiD1JfgOs49/0KPn4pjG//CrjPpKnioOO1tOaIRFaCHceSJnMChxtmNdAm17SnlZUkRG+yOgqDRkaPeDSe/iHxoyKmO+hTqGwc5ipnqjIuOcLIlIYPtXDd15nVEN+g64gK1LqjBsaz+NHwIHDjxRHLgYCNydORE05Y5k0LDPDnoW5dDBOHahp6hFgVcEAEQE3py9nHG4UL0BA46UQghAukeB1ziOvc0P8QG2pICAjeex5GnVsepnnY1URsEl+x8IzDADGGip3lwn91YYHlOZ1J8uEbqxtpqN+Z8kWKmsrelPs/oGmuwvjHQLiv9iCYhMgUhIgZn7uprSwxJl4FjTfeYA+MS4YGwkKNNgMJ8WHvmj5hE/9ipHkN0iTVSu8yBebLmee7U82De0Y/uxX1d4zPBMyGO7a5HiCG6x9rHf3rN6BxsQYtdEbkiYMXWvUWKC583hDE1TysrEjoUwdHTritrEi++/PiW5Aoih5ph045qawz72k5WuwYhfqwtrUQkSL+uSA2JFtTuaNdGIRg1JWRQl6PZXUs1iRzs3tKQaELxU1jol1ST6NFduU7t1I+PiQnkP7NyRxY6MgmfTcAETKA6BCx0VGetsHSuBQ5Hb1Trw2hrTeBaBLKz8PoH3pI++qHfT5/5/9m78yDdtrM87Lunr/ubuvsMd9J4GQKhyjgppIS4Ki5XbCd2qkLKVTFVqaRS/iP5I6lUYhBQlcQmNsQuDAhhQ6UwccohZTAewGEGTYwiDLqAACEmCYEASejqnqnH06fP6Ty/d/e6HBNJ6Fzu0Odq7XO+s79v7zW861nr69PPs9/3Xe9953D9A08N22cnIfxnRS49ZUcaEVsH4oooeiGfBI55hIlQuSKqxA5btfKuSNaIlPJ0P7kv8qocCmkmTVboQpHrENl5RA08lQAg10KFcTjnNSFwhLhLzLnmlXKhr0WEkWKEGtG+EzJ+EJK/H6+Lg3g83I7nwslphI48zmcHsrxIW3YK0cZRypenAuKekA22IOlh2snjMI4TMSZNCHmRB4OHBAKOYBM1kHR4bMWG7Yg9vBOO0x8PETb6O83YVtMhO5tIlMtF6Mv7Je0I1dBmuisiXmNKZYLFVtol6jiEnMDVfVjdOa9HUNA3m4ThLCOOVD6L1CN02DKW58fl7WxVO9+KWBEBI14fNw+SXyU5QHjJ3A52hJw172+f1K4ue7kPU94w8oSki1G0CXkn3hBe4EYoUpdXB2yJCV68SggVttglwhi/eQjvL08b94g7RseDBrb3Y2zM1oxr5sI4ebzk0uiNkTNxKs3XerIGzxIbZF6Ia+UFlLqjKJN7ydMB47NsNfvoo5eGZbww4GauPvgHBxlPbImHxsYasSn45lswicixGi+NexvZjWUzXhwpw2ukPFtiOD+X03h4TCJkLHceS8LR5OtI3xKg3olQYteVjY2Et8yuDNPdVwzT3O/hK2b2T3a0n12tlS50NCT6uSPQEegIPDwIdKHj4ZirLnA8HPPUrewIfMoj0AgCYnB0eGv4+Z/858P7fvVHh5X9D2enibPyorBTyFHIYiPf4W0jecsZ8UQOPSlH1EcvjZGIhkMWCW1hEsoi6a63A1nlBaGekA1eCwQLhYSk2Fo1Skfc/0cyWdvQpg3iR/MGqLAV7aYeEYK4cePW0XArO5IcHNqVZAy1IAaoQ2hRH+ElcBAI7NaiLg+OrbBzf26HNDNEWeRb2EtGXgQdBjG9iDJCTnCQfJTXibJIMjGEV4OyJU6kP21plU3wIKYQgxy8I2ybql1l2EoggI82hJdMCSjndhMFjPnG0UkRfAJAjS3t1bzk3u4ieSZynQgRtWN4ZHc2zBK+ot56bC2vkbTD02VzmpCKtM+AlbyO5B7Jx4P944RyHJWXiDVAsLB9LrGKPXAFBLtP0s7R8UmJKUQd85pbJRQUThF+9uI1og6cak4ilBg7m0uUeBa3MRdLiTa534QTwgF8jUHbk9R3T7jRcUJ1jqNq3I54wyDXFao/KWtN5ZT1lHUWC9ZWIqCtrg9XLs8LZ+vhlPBzcFxzs5FypxKXmt21eL5E5Jgk3GQSsWI2yRaxaV++EGvcerkT0WR1emmYJf8GQeMs+TkiccQb5DBjTd+pu7qRBKVJWLq5fGSYLq9GXErISz/+RAi0n2OtkS50NCT6uSPQEegIPDwI9IfoF3uuXrYCR194F3vhdes6Ag+KQCMGyRs0/O77nkqC0e8afu+3f37YHfJUP+RUqAN2zXsDIXU8Sy7zvrYKLSHg/Cl7SGo9nc895BUJRvEJGSMhDa08J7zlcZByuDciT+DgpTES9ZGsr+ofiUwl5J1QIKxD+IXz7NzzgMjBW0EySx4HCPnNveOEYZw8G4ZSgkjKLLZ4bxBDEhqS8sI7nO+FWK/Gzkm6NC5jllCVvfJMsLd5ARAgEHIH+5FwXgU+yE3iMF73kF/ij/LEBuEajhpzrhFUtC2shsARM1InXhSxVRdkFS8eA+aDZ4ny5oMgcD328zQQ1mLOeFXAq/KdpL/j2EYgIpDM49WxmG8Os60k1kw77FuPEFFiR8SHw4gTAXd44rGdap+4cX3vKIJL5pB9PGLMdyoKY4myUu3uziejUBS7eGkQmQ5jFzvlAuFhQUQiNhAzeHvAB57UMh4R5oc3B7xg4DNPEOWJOoV3rqs3Ccjq3lUwxhAwYnbepv20Ldymwp7SllkgRAhfKaEnfcCel4chEB5GcSU4RbDhRWK+eGecZMxHEUxOI1TwzliVRDSJQyXc1e9q3k/j0UFIEc5UPk8RMG7XUK3p7OYi0WiSoE4MNQvLvKwkrGUlyUiHjcWwMb86zLYfH2YRO6zrfjx3BNrPs9ZCFzoaEv3cEegIdAQeDgS6N8fFnacucFzcuemWdQQ6AucINDLQSMCP/9A3Dr/1qz82rB7fGJbZKmQRYorGIZ1x5ihhI3yxSKsb9odAYItMpgwRQCJIBE7IQoWEIGyIXeqhokgj8o2Qo/n4qZCFScJb5MewKwhRxJN23g5Yq21eKxRA+7nnvaSYXpOQ9s2EYwivII7wwjgKsSZuPHPjsEJUjkPK5Y0gnBAE5inHE4HtyLh7iDKvBDuw2HDUX2UJDrfDim2FWwJFbK/xZUwlYKQcAo1M30kbSC6KCjeHXB7SYaZaCG5IesaXKjVu3iLK6Wc15fZjS5oZk3aqW20m9CL3YFUhKWkD+W7iibru67OwzBiJHMYKZ/V07p7cJfOIO7xiiAfEFjk8Zplnu73AGsE2Fn+EBRGRbkckgulstlltVshJit9MCBARw3ap67HBVrcEmOOIAkQQSVFLyEhZHi5EioO0JaeHYxSceH6M+GkXOClWZZWxJmDXPEfYt5m5KKEp9jO7tsmlVOQwN+5VKE3GARc4mjsv78XGaI9wwssm3adbWGWeUl7/lxPSs53xE0QOU+ba/h2tZ53Nglk8OVKGpwfMSljamiaUSJjPVsSQhPdEheJ5w2uFTVvJvWGnFvk8hFixO+ablIgd0+FsbZbtZy8PW9mqliCyPkl788s1HynVjwdEoP1sa9Xaz7j2uZ87Ah2BjkBH4GIj0IWOizc/K1/zppffLip+Yei/JFy8xdYt6gg8VwS+7IvHLNZ/9+/9WISNdwy/HO+NZ55+7/DYZMzhUCQZ2wv9QwSRaR+RQ0QUMyRuEAeQVDkokGp5HpD3IpPNuFRE0IkXWyGonnijpNqsfpDVlKkwlxBA25zy1uB5UKT1/DOBg0jAkNpNJU/c5eiYxiNBv8xymxcHL4L9hKjciNjBO4Mwon5tj5qCzZOgxI0Q8rOQcMQY0Rb2YHy8OHhnEGHSfIkCiLrD1rASourU+IWwjKJF6DL7UkZoz7OHesGOfTwIEG4E+E48J44SWkFwkTNCG8Qd97XDzv20rY7WmqB0v4gxS3nj5yQBUzjCOyaeh/6s1S4vZxlX5TLRP0yVi2fDNCJHYRN75DkhGOVtPDZGLww5S2Aj4eujl5JAM3bwODnI/b2IHEJTiBxw0pbdWNh6kDIFXO6x2zgkLj3M3OD3xJaaNWvKWNMHTw8YsYOgUeNN2TqUy6EPGLHD/CnDo8TcEVnM9ZkFlnk1N3BQU58+w0hT1vLtgM5jpkQj5dLopc1JwqUkF50MNzI/5uVehJHjo2CQtoW2ZIHmzMuGRwiBLa/sorISIWMlXiG2qSVobEX4IJTczmsrO7dMsivLWkJfxrAraz27CiVM5u7KJGvKlrOzYX7ltUlKerVEjr4DSybtOR7tZ1yr/rVf/872tp87Ah2BjkBH4CFAoAnWnYO+9JPVBY6Xfg66BR2BjsAnQKD94u8X/ls3/mB4x5v/9+FDH/jFYefssLwVbDOK8HkaHg5cJFtohaf7vC/qWsgoDwgJNZFUrv12IEGaeWL4g0g6fCZU8AjgYSDXBs8MByKOnGOgCGhO9XRf2EkqhSTKO4H/RiBBvvNiQOW1SJtCIOyysqLtnB3EDU/2j0O+95KL4yhEvNpPO5JlyrvB26M8TTImllSyzoxxFDRw4xBfYy6LwjvZa+zp26iMEz7GpgW5MJTXlpchwaq8BdJW87QgPvB2MQ4Cx5g7IqE1If/KbsIuZdB3bTuEzEguKmSGEIKka6NwTh+ViDTjV+8o81GCC9tTTsLWIvwpxyjCSOGdz0QTQoFcHDCeBT8JTQkL5k7oDrGCiEAQ2ULiI+rsLMadWNhLXDBu3hxCUeBkbNbQ9cyDeZtkjnhj3E05+T6uRYASIiSASTJY68J4HCU+pF3maks9c2A3nLqYMTnMEyHGrinm1HyyVTm4lNdGyq1HhFCDd02aHYW3vLl7j5gyih8lduWevjcJPoWLcJXgQKiI58voTRIbY9NxkrNGmqi1uR6x4l7KWBe8Uk5ipn5WVjeGS7aRjVfHSgQQW9t6Ee+sY+1tROy4vBPvjnhsnNyNB9K9tJWdWu6tBN94dEyvvHqYJ1fHxubMkPvxHBFoP+9a9S50NCT6uSPQEegIPBwIdKHjpZ+nl53A0b03XvpF1S3oCDyfCLRf+P/WV/zg8J5f+MHh137ph4ajmx8enlzmqXVCDZA1B6LrUT5SXUQ9BBBBd424cRCySviQd6KSZSJ2KuYfBJMA4LOn3pX3IJ+blwaPAYTwLOUQbp4LmCghQPs8JYgb8kN4GF/iSAgt8lyeHGlTmIUXbw6hKvOyfTVeBSHWsVk4xMlJcmwQOuyqkrOdVdqOKZ74I9HEGWKHHB68PRiC1Pts7LldRP0kdm3Hhq7qUBIAAEAASURBVC395qJ7vDwcxjnmfxgFE3UIDHDTIvJMtCB6zCISGJu6SLl+7DJSglLK0jWIEkQh5Fsb2iOC2J2GzdO6T6QYBRNChfZzqwj29Yg6eVshOdrRgKGxRXgP7GGJcLOYfdrdiG08NoTMwEz/5emReuoSImbB2jiIIuaVNGHs3o+eKaOnifVzeSc5KtKesowj6BCTbsdr5elbx9W28CCCkjAa9eHqgIFqhBGCGAzYow2HMVgr8FHOLjolyJzjRaDgZUMEMQaHsjUnqUecO8764G3EG8NhXOOqNaexxStljYVHh3krb4+UPYtuth5vjcxYLYCyIvVhmUUZMWiSrX7n8drYHE4yJPN8O7v6nJyexPYhW9TGW2MaMSMCyL31eMZsLDMfvEA2M7h5Qleyze/iiWH7kXh0zLIN7bmN7OzHgyPQfu61ml3oaEj0c0egI9AReDgQ6ELHSzdPXeB46bDvPXcEOgJ/DALtl3y/3H8giUV//if/6fCR3//////XMM////////Nf////)**Task 4:** Segment the fetus! [40 Points]# Switch to the Legacy->Editor module and click OK to create the GenericAnatomyColors label map. # Select the PAINT EFFECT and activate THRESHOLD PAINTING. # Hint: Reduce the RADIUS for a smaller brush. # Hint: Adjust the lower threshold to not include the black background. # Hint: Switch to 'Red Slice only' view and use the arrow keys to navigate through the slices. # # TODO: Mark the Fetus by drawing on each slice! # Hint: Start at Slice ~83mm to ~112mm # TODO: Take a screenshot of one slice after painting and upload it below.# TODO: Use the MAKEMODELEFFECT and click APPLY. # This will create a 3D mesh of your segmentation. # TODO: Switch to '3D Only' view and take a screenshot of the segmented fetus in 3D! # TODO: Upload the screenshot below.![Screen Shot 2021-03-22 at 12.59.11 PM.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAs4AAAK2CAYAAABenRqAAAAMa2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkJCEEoiAlNCbIL1KCaFFEJAq2AhJIKHEmBBUbFgWFVy7iGJFV0UUXQsgi4rYy6LY+2JBRVkXC4qi8iYkoOu+8r3zfXPnv2fO/KfcmXvvAKDVw5NKc1FtAPIk+bL4iBDWmNQ0FqkdYIAM6GA4IPL4cik7Li4aQBno/y7vbwBE2V91UnL9c/y/iq5AKOcDgIyDOEMg5+dB3AQAvp4vleUDQFTqLafkS5W4CGI9GQwQ4lVKnKXCO5U4Q4Ub+20S4zkQXwZAg8rjybIAoN+DelYBPwvy0D9D7CIRiCUAaA2DOJAv4gkgVsY+LC9vkhKXQ2wH7aUQw3iAT8Z3nFl/488Y5OfxsgaxKq9+0QgVy6W5vGn/Z2n+t+TlKgZ82MBGFcki45X5wxreypkUpcRUiDslGTGxylpD3CMWqOoOAEoRKSKTVPaoMV/OgfUDTIhdBLzQKIiNIQ6X5MZEq/UZmeJwLsRwtaBTxfncRIgNIF4olIclqG02yybFq32htZkyDlutP8uT9ftV+nqgyEliq/nfiIRcNT9GLxQlpkBMgdiqQJwcAzEdYmd5TkKU2mZEoYgTM2AjU8Qr47eCOF4oiQhR8WMFmbLweLV9SZ58IF9ss0jMjVHj/fmixEhVfbCTfF5//DAX7LJQwk4a4BHKx0QP5CIQhoapcseeCyVJCWqeHml+SLxqLk6R5sap7XELYW6EUm8BsYe8IEE9F0/Oh4tTxY9nSvPjElVx4oXZvJFxqnjwZSAacEAoYAEFbBlgEsgG4pbOuk54pxoJBzwgA1lACJzUmoEZKf0jEnhNAIXgT4iEQD44L6R/VAgKoP7LoFZ1dQKZ/aMF/TNywFOI80AUyIX3iv5ZkkFvyeAJ1Ij/4Z0HGx/Gmwubcvzf6we03zRsqIlWaxQDHllaA5bEMGIoMZIYTrTHjfBA3B+Phtdg2NxwH9x3II9v9oSnhFbCI8J1Qhvh9kTxXNkPUY4CbZA/XF2LjO9rgdtATk88BA+A7JAZZ+JGwAn3gH7YeBD07Am1HHXcyqqwfuD+WwbfPQ21HdmFjJKHkIPJdj/OpDvQPQdZlLX+vj6qWDMG680ZHPnRP+e76gtgH/WjJbYQO4CdwY5j57BGrA6wsGNYPXYRO6LEg6vrSf/qGvAW3x9PDuQR/8MfT+1TWUm5S7VLh8tn1Vi+cGq+cuNxJkmnycRZonwWG34dhCyuhO88jOXm4uYKgPJbo3p9vWX2f0MQ5vlvunlwjwdI+vr6Gr/poj4BcNAcbv+2bzrbK/A1Ad/TZ5fzFbIClQ5XXgjwLaEFd5ohMAWWwA7m4wa8gD8IBmFgJIgFiSAVTIBVFsF1LgNTwAwwBxSDUrAMrAbrwCawFewEe8B+UAcawXFwGlwAl8F1cBeunnbwEnSB96AXQRASQkMYiCFihlgjjogb4oMEImFINBKPpCLpSBYiQRTIDGQeUoqsQNYhW5Aq5FfkMHIcOYe0IreRh0gH8gb5hGIoFdVDTVAbdDjqg7LRKDQRHY9moZPRQnQ+ugQtRyvR3Wgtehy9gF5H29CXaDcGME2MiZljTpgPxsFisTQsE5Nhs7ASrAyrxGqwBvicr2JtWCf2ESfiDJyFO8EVHIkn4Xx8Mj4LX4yvw3fitfhJ/Cr+EO/CvxJoBGOCI8GPwCWMIWQRphCKCWWE7YRDhFNwL7UT3hOJRCbRlugN92IqMZs4nbiYuIG4l9hEbCU+JnaTSCRDkiMpgBRL4pHyScWktaTdpGOkK6R2Uo+GpoaZhptGuEaahkRjrkaZxi6NoxpXNJ5p9JK1ydZkP3IsWUCeRl5K3kZuIF8it5N7KToUW0oAJZGSTZlDKafUUE5R7lHeampqWmj6ao7WFGsWaZZr7tM8q/lQ8yNVl+pA5VDHURXUJdQd1CbqbepbGo1mQwumpdHyaUtoVbQTtAe0HjqD7kzn0gX02fQKei39Cv2VFlnLWoutNUGrUKtM64DWJa1ObbK2jTZHm6c9S7tC+7D2Te1uHYaOq06sTp7OYp1dOud0nuuSdG10w3QFuvN1t+qe0H3MwBiWDA6Dz5jH2MY4xWjXI+rZ6nH1svVK9fboteh16evqe+gn60/Vr9A/ot/GxJg2TC4zl7mUuZ95g/lpiMkQ9hDhkEVDaoZcGfLBYKhBsIHQoMRgr8F1g0+GLMMwwxzD5YZ1hveNcCMHo9FGU4w2Gp0y6hyqN9R/KH9oydD9Q+8Yo8YOxvHG0423Gl807jYxNYkwkZqsNTlh0mnKNA02zTZdZXrUtMOMYRZoJjZbZXbM7AVLn8Vm5bLKWSdZXebG5pHmCvMt5i3mvRa2FkkWcy32Wty3pFj6WGZarrJstuyyMrMaZTXDqtrqjjXZ2sdaZL3G+oz1BxtbmxSbBTZ1Ns9tDWy5toW21bb37Gh2QXaT7SrtrtkT7X3sc+w32F92QB08HUQOFQ6XHFFHL0ex4wbH1mGEYb7DJMMqh910ojqxnQqcqp0eOjOdo53nOtc5vxpuNTxt+PLhZ4Z/dfF0yXXZ5nLXVdd1pOtc1wbXN24Obny3Crdr7jT3cPfZ7vXurz0cPYQeGz1ueTI8R3ku8Gz2/OLl7SXzqvHq8LbyTvde733TR88nzmexz1lfgm+I72zfRt+Pfl5++X77/f7yd/LP8d/l/3yE7QjhiG0jHgdYBPACtgS0BbIC0wM3B7YFmQfxgiqDHgVbBguCtwc/Y9uzs9m72a9CXEJkIYdCPnD8ODM5TaFYaERoSWhLmG5YUti6sAfhFuFZ4dXhXRGeEdMjmiIJkVGRyyNvck24fG4Vt2uk98iZI09GUaMSotZFPYp2iJZFN4xCR40ctXLUvRjrGElMXSyI5caujL0fZxs3Oe630cTRcaMrRj+Nd42fEX8mgZEwMWFXwvvEkMSliXeT7JIUSc3JWsnjkquSP6SEpqxIaRszfMzMMRdSjVLFqfVppLTktO1p3WPDxq4e2z7Oc1zxuBvjbcdPHX9ugtGE3AlHJmpN5E08kE5IT0nflf6ZF8ur5HVncDPWZ3TxOfw1/JeCYMEqQYcwQLhC+CwzIHNF5vOsgKyVWR2iIFGZqFPMEa8Tv86OzN6U/SEnNmdHTl9uSu7ePI289LzDEl1JjuTkJNNJUye1Sh2lxdK2yX6TV0/ukkXJtssR+Xh5fb4e/Km/qLBT/KR4WBBYUFHQMyV5yoGpOlMlUy9Oc5i2aNqzwvDCX6bj0/nTm2eYz5gz4+FM9swts5BZGbOaZ1vOnj+7vSiiaOccypycOb/PdZm7Yu67eSnzGuabzC+a//iniJ+qi+nFsuKbC/wXbFqILxQvbFnkvmjtoq8lgpLzpS6lZaWfF/MXn//Z9efyn/uWZC5pWeq1dOMy4jLJshvLg5bvXKGzonDF45WjVtauYq0qWfVu9cTV58o8yjatoaxRrGkrjy6vX2u1dtnaz+tE665XhFTsXW+8ftH6DxsEG65sDN5Ys8lkU+mmT5vFm29tidhSW2lTWbaVuLVg69NtydvO/OLzS9V2o+2l27/skOxo2xm/82SVd1XVLuNdS6vRakV1x+5xuy/vCd1TX+NUs2Uvc2/pPrBPse/Fr+m/3tgftb/5gM+BmoPWB9cfYhwqqUVqp9V21Ynq2upT61sPjzzc3ODfcOg35992NJo3VhzRP7L0KOXo/KN9xwqPdTdJmzqPZx1/3Dyx+e6JMSeunRx9suVU1Kmzp8NPnzjDPnPsbMDZxnN+5w6f9zlfd8HrQu1Fz4uHfvf8/VCLV0vtJe9L9Zd9Lze0jmg9eiXoyvGroVdPX+Neu3A95nrrjaQbt26Ou9l2S3Dr+e3c26/vFNzpvVt0j3Cv5L72/bIHxg8q/7D/Y2+bV9uRh6EPLz5KeHT3Mf/xyyfyJ5/b5z+lPS17Zvas6rnb88aO8I7LL8a+aH8pfdnbWfynzp/rX9m9OvhX8F8Xu8Z0tb+Wve57s/it4dsd7zzeNXfHdT94n/e+90NJj2HPzo8+H898Svn0rHfKZ9Ln8i/2Xxq+Rn2915fX1yflyXj9vwIYbGhmJgBvdgBASwWAAc9tlLGqs2C/IKrzaz8C/wmrzov94gVADeyUv/GcJgD2wWZTBLlhU/7CJwYD1N19sKlFnunupuKiwpMQoaev760JAKQGAL7I+vp6N/T1fdkGg70NQNNk1RlUKUR4ZtgcqETXDQRF4AdRnU+/y/HHHigj8AA/9v8CkaePkVE2P08AAACKZVhJZk1NACoAAAAIAAQBGgAFAAAAAQAAAD4BGwAFAAAAAQAAAEYBKAADAAAAAQACAACHaQAEAAAAAQAAAE4AAAAAAAAAkAAAAAEAAACQAAAAAQADkoYABwAAABIAAAB4oAIABAAAAAEAAALOoAMABAAAAAEAAAK2AAAAAEFTQ0lJAAAAU2NyZWVuc2hvdOBHlZgAAAAJcEhZcwAAFiUAABYlAUlSJPAAAAHWaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjY5NDwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj43MTg8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpVc2VyQ29tbWVudD5TY3JlZW5zaG90PC9leGlmOlVzZXJDb21tZW50PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KcLxjeAAAABxpRE9UAAAAAgAAAAAAAAFbAAAAKAAAAVsAAAFbAAC+mUMN2FsAAEAASURBVHgB7L0JoCVFeffdl4FhHQZmmGF3AAHBBWUTXEBf94hxxbjEGM37RZO4IfHViOyLoigacXv9EuVzi3FJ3BKjkaCAqMiqIMi+78M6ww7z1e85/fTUPdNz59w+3X16+ffMuV3dXfXUU7/qrv6fOtXVU4cc/osViRYR6DGB447a30r/oSNOm0bB90/bGW0Mx48OKSgCIiACIiACItBBAlMSzh2sVRVpZAKxOF6dEI7jrM7w6tKuLr72i4AIiIAIiIAItI+AhHP76kwel0jARfGowtfjz+TCqLZmsqFjIiACIiACIiACzSMg4dy8OpFHNRJwITxbsevp1uTqbO2uyZ6Oi4AIiIAIiIAITI5AEM6naYzz5Pgr5wkSOO6o/Sz3Dx1x+lheuJ01GRk3nzXZ13EREAEREAEREIFqCUx9SMK5WsKy3lgCx6bC+dAxhbMX0O359prWZeW7pnx0XAREQAREQAREoBwCEs7lcJSVFhJwoVuFgHXbo2KpwodR81Y8ERABERABERCB0QhMhTGYGqoxGivF6hCBY48cDNM49MjxhmmsCYnnE8cjz7z9HqdqnzwfrUVABERABERABGZHQMJ5drwUuyMEXLjWKVI9zxihRHRMQ2EREAEREAERaDYBCedm14+8q4iAi9g6hbMXxfP2bdaxH3nHh+PEaRUWAREQAREQARGoh4CEcz2clUuDCLgwjcXqpNxzX+L8Y7/yjhM3jhOnVVgEREAEREAERKA6AhLO1bGV5YYScDHaJPHpPsXIhv3Li+Pxh+P6fq1FQAREQAREQATKIyDhXB5LWWoBgVh8NlVsxj460mFf8+KsLq7v11oEREAEREAERGA8AhLO4/FT6pYRcME5LESbWAz3NfYtz++8eJ4mL74f01oEREAEREAERGB2BCScZ8dLsVtOwEVmmwSl+zyMPq8Mq4tL2rz4wza1LQIiIAIiIAIisHoCEs6rZ6MjHSPgorLNAtLLEFfNTOXJi0/amdLEthUWAREQAREQARFYSUDCeSULhTpOwEVkF0Sjl2W4ymYqW5E0w/a1LQIiIAIiIAJ9JjB1qN4c2Of671XZj0nfFnhYxW8LrBuqlyvOd01lzEvj6deU1uNpLQIiIAIiIAJ9IyDh3Lca72l5XSh2WRR6GYereE1lXl067Kwp7XBe2hYBERABERCBLhOQcO5y7apsGQEXh30Rgl7eDEAIjFL2vHRuY5T0HldrERABERABEegigalDjzxtRRcLpjKJQEzgmCP2s83Djjo93t35sJd7uKCjchg3/XC+2hYBERABERCBNhOQcG5z7cn3kQi4+BtVLI5ktIWRnEPs+myY5KXH1mxsxHkrLAIiIAIiIAJtIyDh3LYak7+zJuCCTwJvgM55DIOcDZ8ybAznr20REAEREAERaDoBCeem15D8G4tALPBmIwzHyrRFiWM+7vZsOeXZKGrL02ktAiIgAiIgAk0kIOHcxFqRT6URcFE3WzFYmgMtMeScYneLMMuzg80itmJfFBYBERABERCBJhCQcG5CLciHygi4kJNwGx2xM4tTzJZfng23N1tbnk5rERABERABEZg0AQnnSdeA8q+MgIs3CbViiJ1fnHpUlp7W4/t2EVtxGoVFQAREQAREYJIEgnA+XdPRTbIGlHdlBI454tlm+7Cjzqgsj74YdpZxeVfHNY47HCc+NoqtOI7CIiACIiACIjBpAhLOk64B5V8ZARdpw+Ktsgx7YNiZxkUd5utxhvfHaQh7vHj/mtLEcRUWAREQAREQgboJSDjXTVz51ULARZmEWDW4ne+wdXj7sVHZe/w8W8P7tC0CIiACIiACkyQwFV6jq6Eak6wB5V0JgaPTYRqHa5hGJXxjo8463leUe5m2Yn8UFgEREAEREIEyCEg4l0FRNhpHwAVYUQHXuAK1wCFnPuxqkToo09awP9oWAREQAREQgaIEJJyLklO6xhJw0VVEsDW2UC1xzNnnuVu0PvJsFrWV55f2iYAIiIAIiMCoBKbCdFEaqjEqLcVrBYGjDx/MpnH40ZpNo84Ky+Pu+4b9KFI3ebaK2Bn2RdsiIAIiIAIiMCoBCedRSSleKwjE4kqiqt4qc/ar4+7HY69WFzeOkxcu01aefe0TAREQAREQgTwCEs55VLSvtQRcUBUVZK0t+IQdd+64sSb2cdzY7TWli+N6OM9WETtuT2sREAEREAERmImAhPNMdHSsdQRcSEk81Vt1Rbl7utjbonVXpq3YH4VFQAREQAREwAlIODsJrVtPwIVTUeHVegATLMC47D39cBGK1GWerSJ2hn3RtgiIgAiIgAhIOOsc6AwBF0wSSfVWadnc3V5ciqJ1Wqat2B+FRUAEREAE+klAwrmf9d7JUrtIKiqyOgmlhkJVxd3txkUoWrdl2or9UVgEREAERKBfBCSc+1XfnS2tC6OiwqqzYGooWB3sPY+4OEXqOs8ONovYin1RWAREQAREoB8EJJz7Uc+dL6ULIgmgequ6bu6eX1zKonVepq3YH4VFQAREQAS6S2DqcL0Apbu126OSHZW+9OQIvfSk1lqfJHfPOy5wkfrPs4PNIrZiXxQWAREQARHoHgEJ5+7Vae9K5MJHQqfeqnfu5DpJ9rEfTqCoP2Xacl+0FgEREAER6A4BCefu1GVvS+Jip6hY6i24MQveNO7uz3CxipwXZdoa9kfbIiACIiAC7SUwdfjRp69or/vyvO8Ejjrs2RmCI445IwsrUD0BZ99E7u5bTKGon2Xaiv1RWAREQAREoH0EJJzbV2fyOCLgoqaoKIpMKTgLAm3h7n4OF63I+VKmrWF/tC0CIiACItAOAhLO7agnebkaAi5migih1ZjU7hEItJG7+xwXr+h5U6at2B+FRUAEREAEmk1AwrnZ9SPvZiDg4qWo+JnBtA6tgUCb2bvvw0Usch6VaWvYH22LgAiIgAg0j4CEc/PqRB6NSMBFSxHBM2IWipZDoEvcvSxxMYueT2Xaiv1RWAREQAREoDkEgnA+Qw8HNqc+5MksCBx12LMs9hHH/HIWqRR1XAJd5O5lGmZT5Nwq09awP9oWAREQARGYLAEJ58nyV+4FCbg4KSJsCmapZIGAcwdGV9nHZfRKL1rWMm25L1qLgAiIgAhMjoCE8+TYK+cxCLggKSpoxsi610n7xN3LOlzhRc65PFtF7Az7om0REAEREIF6CUg418tbuZVEwIWIxEdJQEc001fuXu4YU9Fzr0xbsT8Ki4AIiIAIVE9Awrl6xsqhZAIuPIoKl5Ld6Y05cZ8+VCWu+CLnovMc106cXmEREAEREIFqCUyF19Hq4cBqGct6yQSOTB8KPFIPBZZMdmZz4j6dj/OI9xY9J8u0FfujsAiIgAiIQLkEJJzL5SlrFROIBUZRkVKxi5017+zFfXoVO5d4b1FGZdqK/VFYBERABESgHAISzuVwlJWaCLiwKCpManKzc9mI+2hV6pzi2EXP1TJtxf4oLAIiIAIiUJzAVJjsX0M1ivNTypoJHHnoYO7mI4/V3M11ohf32dF2XnGqoudsmbZifxQWAREQARGYPQEJ59kzU4oJEXABUVSATMjt1mfr3CmI2M++OmN+nroIxzw7qhMnqrUIiIAI1ENAwrkezsqlBAIuHIqIjhKy760JcS+n6p1jbK3ouVymrdgfhUVABERABGYmIOE8Mx8dbRABFwtFxUaDitIqV8S93OpynsNWi5zXZdoa9kfbIiACIiACqxKQcF6VifY0kIALhCLiooHFaY1L4l5tVTnfOJei53iZtmJ/FBYBERABEVhJQMJ5JQuFGkzARUFRUdHgojXaNXGvp3qc83BuRc73Mm0N+6NtERABEeg7AQnnvp8BLSm/i4EiQqIlRWykm+Jef7U48zjnoud9mbZifxQWAREQgb4SkHDua823qNx+8y8qHlpU1Ea5Ku6TrQ7nP+xFkeugTFvD/mhbBERABPpEQMK5T7Xd0rL6Tb+IYGhpkRvhtrg3ohrMCa+L2KOi10OZtmJ/FBYBERCBPhCYOlIvQOlDPbe2jEekLzyhAEfppSe11aO414Z6VhnF9RInLHJtlGkr9kVhERABEegyAQnnLtduB8rmN/ciwqADxZ9YEcR9YuhHztjrKE5Q9Dop01bsj8IiIAIi0DUCEs5dq9GOlcdv6EUFQcdw1FYcca8N9dgZeV0NGypyzZRpa9gfbYuACIhAFwhIOHehFjtahvgmXkQEdBRL5cVy7mJeOerSM/C6iw0XrccybcX+KCwCIiACbSYwdeSxZ6xocwHke3cJHPGhZ1nhjjrul90tZANLJu4NrJRZuuR1GCcreh3l2cJuUXuxTwqLgAiIQNsIBOH8SwnnttVaT/w94kPPtJIeddyZPSlxM4op7s2oh7K88PqM7RW9psq0FfujsAiIgAi0hYCEc1tqqmd++g266A2+Z7hKK664l4aycYa8bmPHil5fZdqK/VFYBERABJpOQMK56TXUU//8xlz0xt5TbGMXW9zHRtgKA17PsbNFr7UybcX+KCwCIiACTSQg4dzEWpFPid+Mi97MhXD2BJw5KcV99vzamCKuc/e/aN2Xact90VoEREAEmkZAwrlpNSJ/JJondA648CkqnCbktrItgYDX/bCpoudCnr2itoZ90rYIiIAITJKAhPMk6SvvXAJ+09WNNhdPZTvFvTK0rTLs50HsdNFrsUxbsT8Ki4AIiMCkCEg4T4q88s0lEN9oi96scw1r54wEnLuYz4ipVwf9nBgudJFzpExbw/5oWwREQATqJCDhXCdt5bVGAn6DLXJzXqNxRVgtAXFfLRodCAT8/IhhFL1Gy7QV+6OwCIiACNRBYCq8VUrzONdBWnmMRODwdO7mozV380i8yook7mWR7LYdP0+GS1nkei3T1rA/2hYBERCBqghIOFdFVnZnTSC+kRa5Ec86QyUwAs5dzHVCzIaAnzdxmqLnUJm2Yn8UFgEREIGyCUg4l01U9goT8Jtn0Ztv4Yx7nlDce34CjFl8P3+GzRS5jsu0NeyPtkVABESgDAISzmVQlI1SCPhNs8gNtxQHemjEmVN0ce/hCVBykePzyU0XPa/KtOW+aC0CIiAC4xKYOuo4jXEeF6LSj0/g8EOeaUaO/vCZ4xuThZEJiPvIqBRxFgT8vBpOUuT6LtPWsD/aFgEREIHZEpBwni0xxa+EgN8ci9xYK3GoJ0bFvScVPcFi+jkWu1D0Oi/TVuyPwiIgAiIwKgEJ51FJKV6lBPyGWPSGWqlzHTUu5h2t2IYWy8+32L2i13ueLewWtRf7pLAIiIAIzERAwnkmOjpWCwG/CeqmVwvuLBNxz1AoUDMBP/fibIte/2Xaiv1RWAREQATyCEg451HRvloJ+I2v6I2zVmc7lJm4d6gyW1oUPwdj94u2A3m2sFvUXuyTwiIgAiLgBCScnYTWEyEQ3+x0g6uvCpy7mNfHXDnNTMDPyThW0fOzTFuxPwqLgAiIgISzzoGJEvAbXNEb5ESdb3Hm4t7iyuu4635uxsUs2j7k2cJuUXuxTwqLgAj0k4CEcz/rvTGl9hubbmT1VYkzJ0dxr4+7cpo9gfhc9dRFz9kybbkvWouACPSPwNTRmse5f7XekBIfls7djDvHaP7m2mrFuYt5bciV0ZgE/JyNzRQ9f8u0FfujsAiIQD8ISDj3o54bWUq/gRW9ATayUC1wStxbUElyMZeAn7vDB4u2IXn2itoa9knbIiAC3SQg4dzNem1FqfympRtVfdUl5vWxVk7VEvBzOc6laFtSpq3YH4VFQAS6RyAI5zNXdK9YKlHTCRx2yDPMxWM+/Kumu9op/8S9U9WpwgQCfk4PwyjatuTZK2pr2Cdti4AItJ+AhHP767CVJfCbk25I9VafuNfLW7nVS8DP7zjXom1MmbZifxQWARFoN4Gp8ISyepzbXYet9P6wD6Y9zh9Rj3NdFSjmdZFWPpMm4Of6sB/HFGxv8uwVtTXsk7ZFQATaRUDCuV311Qlv/SakG0+91Snu9fJWbs0g4Od97E3RtqdMW7E/CouACLSHgIRze+qqM576zafozaszIGosiDMnS3GvEbyyagyB+BqInSpyPZRpK/ZFYREQgeYTkHBufh11ysP4hlPkhtUpGDUWxrmLeY3QlVVjCfj1EDtY9Noo01bsj8IiIALNJCDh3Mx66axXfpMpepPqLJiKCybuFQOW+VYS8Oti2Pki7VOZtob90bYIiEBzCEg4N6cueuGJ31yK3Jh6AaiCQop5BVBlsnME/DqJC1a0nSrTVuyPwiIgApMnIOE8+TrojQfxzaToDak3sEosqHMX8xKhylRnCfj1Ehew6LWTZwu7Re3FPiksAiIwGQJT4U1Lmo5uMux7l+uh6RR0xxacEqp3wEoqsLiXBFJmekfAr5244EXbrzJtxf4oLAIiUC8BCed6efc6N79xFL3x9BpewcKLeUFwSiYCEQG/jqJdSdF2LM8Wdovai31SWAREoHoCEs7VM1YOgYDfLHRzqPd0EPd6eSu37hPwayouadF2rUxbsT8Ki4AIVEdAwrk6trIcEfAbRNEbTGRKwREJOHOii/uI0BRNBEYkEF9fnqTodZZnC5tF7bk/WouACJRPQMK5fKaymEPAbwy6EeTAqWiXmFcEVmZFICLg11m0y4JF27o8e0VtDfukbREQgfEJTB3zET0cOD5GWZiJwKH/8Aw7fOzxv5opmo6VTEDcSwYqcyKwBgJ+zcXRirZ7ebawW9Re7JPCIiACxQlIOBdnp5QjEvAbgBr8EYGVEE3MS4AoEyJQkIBff8PJi7aBefaK2hr2SdsiIAKzIyDhPDteij1LAnGDr4Z+lvDGiO7cxXwMiEoqAiUQ8GsxNlX0uizTVuyPwiIgAqMTkHAenZViFiDgDX3RG0WBLJUkEBB3nQYi0CwCfk0Oe1W0bcyzV9TWsE/aFgERWD0BCefVs9GREgh4464GvQSYI5oQ8xFBKZoITIiAX6Nx9kXbyDJtxf4oLAIikE9Awjmfi/aWQCBu0IveFEpwo3cmnLuY967qVeCWEfBrddjtotdunr2itoZ90rYIiMCAgISzzoTKCHgjroa7MsSrGHbmHBD3VfBohwg0lkB87bqTRa/h2Nardt8g2XXLdZLN581JHn5sRXLnfY8l6+/3X56F1iIgArMkIOE8S2CKPjoBb7yLNv6j56SYTkDMnYTWItBOAn4ND3tfpB294ccvSp6147rJFvPnJPPWnUrmrDWV3PPAY8lVtz+SfOKn9yZvfM8vhrPRtgiIwBoITB2reZzXgEiHixD4UDp383Gau7kIvsJpxL0wOiUUgcYR8Os5dmzUNvXcb70gOWC39ZKdNw+9zRuvlWw4d61kKhi6/+EVyS33PJqce+1DyV27/UdsWmEREIERCATh/KsVI8RTFBGYFYEP/cO+Fv+44389q3SKXJyAmBdnp5Qi0GQCfm3HPs7Uth5+yDOSw182P9lzydxkh83WTjbbaK1kg7lTSehwNuF8x/LHkj/e8kjy9d8sT/7lrOVmdiZ7cb4Ki0DfCUg49/0MqKj83tCrMa4IcI5ZMc+Bol0i0DECfp3HxRpuZ794wrOTdzx3XrLXdnOTbTadE4TznEw4PxB6nJcG4XzlbY8kp132YHLUD++KTSXDtqYd1IYIiEAi4ayToHQC3rCrAS4d7YwGxX1GPDooAp0i4Nd7XChvc79x0v7JX+yzYbLHknWSbTYZ9Divn/Y4u3BmnPMZlz+YrPfsnyR5trDr9uI8FBaBvhOQcO77GVBB+b0RVqNbAdzVmBTz1YDRbhHoAQG//uOiHvLS+cnu285NHrdwTrJwQ4ZqrJUN1aDH+dJbHk6+d979yVMPPCVOliui1ZZPQ6SNnhOYCk/qaoxzz0+CMov/oQ8MxjZj87iPanxzmWxnsuXcxXwmSjomAt0m4O1AXMofvHNxsiiMcabHmeX+h1Ykty17LPnd9Q+FYRp3J0d++Fdx9CycZ4uDamMyRAr0lICEc08rvqpie2OrxrUqwqvadeYcEfdV+WiPCPSNQNwmxGX/t79dlNwXhPO1dzyS/M8lDyb/6y2nxodXG86zp7Zmtbh0oOMEJJw7XsF1F88bWDWq9ZEX8/pYKycRaBOBjxzxzGRZmLd5eHnCFuskb37v6cO717jtbc1wRLX3w0S03WUCEs5drt2ayxY3qmpI64Pv3MW8PubKSQTaQMDbhpl8Ldpu5Nkuamsm/3RMBJpGQMK5aTXSYn+8IVXjWV8linl9rJWTCLSNwOraB98fl6dou51nC7tF7cU+KSwCTSQg4dzEWmmpT96AqsGsrwLFvD7WykkE2kZgTe2DHx8uV9E2PM9eUVvDPmlbBJpCQMK5KTXRcj+8wVQjWW9Finu9vJWbCLSFwGzbBo8fl69oe55nC7tF7cU+KSwCkyYwFd57r+noJl0LHcj/kHQaug9rCrraalPMa0OtjESgdQSKtg+ebrjARdv2PHtFbQ37pG0RmAQBCedJUO9gnt44qkGsr3LFvD7WykkE2kTA2wZ8HqdNju14+Yvay7M1rn/uk9YiUCcBCec6aXc0L28QizaoHcVSabGcOZmIe6WoZVwEWkfA24ey2ga3NwyiqP08e0VtDfukbRGomoCEc9WEe2DfG0E1fPVVtpjXx1o5iUDbCFTZPrjtmEnRtr9MW7E/CotAlQQknKuk2wPbccNXtPHsAabSi+jcxbx0tDIoAq0mUFfb4PnEsMZpj8q2F/ulsAiUSUDCuUyaPbTljd04DWYPsY1VZDEfC58Si0CnCUyiffA8Y7BF7wll2or9UVgEyiIwddxHNatGWTD7aOeQ9+9rxf7wx37dx+JPpMxiPhHsylQEGk9g0m2D5x+DGufeULa92C+FRaAoAQnnouSULokbtXEaR6GcHQHnLuaz46bYItB1Ak1qG9yXmHnRNqtMW7E/CotAEQISzkWoKY0R8MasaGMojLMnIOazZ6YUItAXAk1sH9ynuA6K3jPybGG3qL3YJ4VFYFQCEs6jklK8VQh4I6ZGaxU0le0Q88rQyrAItJpAG9oG9zEGXfT+Uaat2B+FRWBNBCSc10RIx3MJeKNVtNHLNaqdMxJw5kQS9xlR6aAI9I6Atw9taBvc17iSivqdZwu7Re3FPiksAnkEJJzzqGjfGgl4Y6XGaY2oSosg5qWhlCER6BSBtrYN7vdwZRS9r+TZK2pr2Cdti4ATCML51yt8Q2sRGJXAIe/fx6J++GO/GTWJ4o1JQMzHBKjkItBRAl1oG7wMcRUVvb/k2cJuUXuxTwqLwFSYa1HCWefBrAh8MBXNH5FonhW3cSKL+Tj0lFYEukvA2wZK2IU2OS5PXGtFy5Znr6it2B+F+0tAwrm/dV+45N4QqfEpjHDWCcV81siUQAR6QaDLbYOXLa7IovedPFvYLWov9knhfhGQcO5XfY9d2rjxUYMzNs6RDTh3MR8ZmSKKQC8I9KFt8DIOV2jR9jDPXlFbwz5pu/sEJJy7X8elltAbHDUypWKd0ZiYz4hHB0WgtwT62DZ4meNKL3o/yrOF3aL2Yp8U7i4BCefu1m0lJfOGRg1LJXhzjYp5LhbtFIHeE+hz2+BlHz4Jit6b8uwVtTXsk7a7RUDCuVv1WWlp4oZFDUqlqDPjYp6hUEAERCAioLZhJYyYhe8teo/Ks4XNovbcH627Q2AqzHGoWTW6U5+VluSD/2cwBd1HTtAUdJWCjoyLeQRDQREQgYyA2oYMRRZwJtmONFD0npVnr6itYZ+03V4CEs7trbvaPfdGRA1HfejFvD7WykkE2kRAbcPMteV84lhF7115trBb1F7sk8LtIyDh3L46m4jH3nCooagPv5jXx1o5iUCbCKhtGL22nFWcYpz7WNn2Yr8UbgcBCed21NPEvfTGYpwGZ+KFaJkDYt6yCpO7IlATAbUNxUA7tzh10XtambZifxRuPgEJ5+bXUSM89EaiaCPTiEK0zAkxb1mFyV0RqIGAtwtkpfa4GPCYoVsYh2XZ9twnrZtJQMK5mfXSKK+8URinYWlUgVrgjJi3oJLkoghMgIDahnKhO8/YatF7XZm2Yn8UbhaBqY9oVo1m1UgDvfmHdDaN4zWbRm21I+a1oVZGItAqAmobqqku5xpbH+eeV7a92C+FJ0tAwnmy/Bufe3zxj9OINL6gDXJQzBtUGXJFBBpEwNsGtcXVVYozHs6hKPM8e0VtDfuk7ckQkHCeDPfW5OoXvS70+qpMzOtjrZxEoE0E1DbUW1vOO851nHth2fZivxSuj4CEc32sW5mTX+jjNBatLPgEnRbzCcJX1iLQUALeLuCe2uN6KylmH+dctB7y7BW1FfujcD0EJJzr4dzKXOKLWxd1PVXozMW7Ht7KRQTaQkBtQzNqyush9qZoe51nC7tF7cU+KVwdAQnn6ti23rJf1LqI66tKMa+PtXISgTYRUNvQrNry+hj2quj9Ms9eUVvDPmm7XAJTHznh1yvKNSlrXSHwD+/bx4py/Md/05UiNb4cYt74KpKDIlA7AbULtSOfVYZeP3GiovfNPFvYLWov9knhcghIOJfDsXNW/OLVxVpf1Yp5fayVkwi0iYDahnbUltfTsLdF76N59oraGvZJ28UJSDgXZ9fplH7B6iKtr5rFvD7WykkE2kJA7UJbamq6n15v8d6i99M8W9gtai/2SeHZE5Bwnj2zXqTwC1UXZj3V7bzJTczrYa5cRKANBLxtULvQhtpa1Uevv+EjReszz15RW8M+aXs0AkE4/0ZjnEdj1ZtY//C+p1tZj//4Wb0p86QLKuaTrgHlLwLNJKC2oZn1UsQrr8s4bdH7bJ4t7Ba1F/uk8MwEJJxn5tPLo35B6gKsr/rFvD7WykkE2kJA7UJbamp2fnq9xqnGud+WbS/2S+FVCUg4r8qk93v8IhznQu49xFkAEO9ZwFJUEegRAbUN3a9sr+O4pEXvvXm2sFvUXuyTwisJTIV5AjVUYyWP3oc+kA7T+KiGadR2Loh5baiVkQi0hoDahdZUVSmOen3Hxsa5D5dtL/ar72EJ576fAUPl94ttnAt2yKQ210BAzNcASIdFoIcE1C70sNLTInvdxwSK3pPzbGG3qL3Yp76GJZz7WvM55Y4vMF1UOYAq2OXMxbsCuDIpAi0l4O0C7qttaGklluB2fB64uXHOh7LtuU99W0s4963GZyivX1TjXJgzmNehHAJingNFu0Sg5wTULvT8BBgqvp8PQ7sLf6nKs6f7/jDd1W9LOK+eTe+O+MWkC6ieqnfe5Cbm9TBXLiLQBgLeNqhdaENt1eujnxtxruOcJ2Xbi/3qaljCuas1O8ty+cUzzgU4yyx7H13Me38KCIAIrEJA7cIqSLQjh4CfJ8OHit7D8+wVtTXsU9e2p8IbZzSrRtdqtUB5PvD3g5eefPQTeulJAXyFkoh5IWxKJAKdJqB2odPVW0nh/JyJjY9zLy/bXuxXF8ISzl2oxRLK4BfKOBdbCW70xoR496aqVVARGJmAtwskUFs8MjZFTAnE508Mpei5lGevqK3Yn7aHJZzbXoMl+O8Xhy6IEmCOaELMRwSlaCLQIwJqF3pU2RUX1c+lOJtx7vFl24v9altYwrltNVaBv35BjHNRVeBWp02KeaerV4UTgUIE1C4UwqZEMxDwc2o4StH7fZ69oraGfWrLtoRzW2qqQj/9QujbyV8h0hlNi/eMeHRQBHpJQO1CL6u91kL7ORZnWvS+n2cLu0XtxT41PSzh3PQaqtg/P/n7cLJXjHJk82I+MipFFIHeEFC70JuqnnhB/VwbdqSoDsizV9TWsE9N3J76qGbVaGK91ObT+9PZND6m2TRqYe68yUzMa0GuTESg8QTULjS+ijrrYHzueSGL3pvybGGzqD33p2lrCeem1UiN/sQneddO7BoxziorZy7es8KmyCLQaQJqFzpdva0onJ+Dw84WvVfl2Stqa9inSW9LOE+6BiaYv5/YXTmZJ4hy5KzFfGRUiigCvSGgdqE3Vd2Kgvr5GDtbVCfk2cJuUXuxT5MKSzhPinwD8vUTus0ncAMwjuyC8yaBmI+MTRFFoNMEvF1Qm9Dpam5l4fzcjJ0f5zwt217sV51hCec6aTcoLz+Bx7kIGlScVrgi5q2oJjkpArUSULtQK25lVpCAn6dx8qL6Ic8Wdovai32qIyzhXAflBubhJ25bTtQGIpy1S2I+a2RKIAKdJuBtAoVUW9zpqu5M4eJz1gs1zrlbtj33qcq1hHOVdBts20/WcU74Bhevca6Jd+OqRA6JwMQJqF2YeBXIgTEI+PkbmyiqKfJsYbeovdinssNTYa69FWUblb1mE3j/wXubgx878bfNdrRD3ol5hypTRRGBkgioXSgJpMxMlICfx7ET4+iLsu3FfpURlnAug2LLbPhJOc6J3bIiT9Rd540TYj7RqlDmItAYAt4uqE1oTJXIkTEJ+Dk9bKboOV62vWG/im5LOBcl1+J0fjIWPZlbXPSJuC7eE8GuTEWg0QTULjS6euTcmAT8/I7NjKM5yrYX+zXbsITzbIm1PL6ffOOcwC1HULv7Yl47cmUoAo0moDah0dUj50ok4Of6sMmiGqRse8N+jbIt4TwKpQ7F8ZOu6EnbIRS1FMV5k5mY14JcmYhA4wl4u6A2ofFVJQdLJODnfWxynGugbHuxXzOFJZxnotOxY/FJNs7J2jEslRbHmYt3pZhlXARaRUDtQquqS86WTMDP/2GzRe+TefaK2hr2KW97Kkz1oVk18sh0cN//SWfTOEGzadRWu2JeG2plJAKtIKA2oRXVJCdrIuDXQ5zdOBqlbHuxXx6WcHYSPVj7CTXOSdkDTKUVUbxLQylDItAZAmoXOlOVKkiJBPy6GDZZVK/k2Stqa9gnCedhIh3d9pOorBOno5hKLZaYl4pTxkSg9QTUJrS+ClWAGgj4dRJnNY52KduehHNcMx0O+4kzzsnXYTylF815Y1jMS8crgyLQSgLeLqhNaGX1yemaCfj1Mpxt0esnz14RWxLOwzXS0W0/YYqcJB1FUmmxxLtSvDIuAq0j4G0Cjqsdbl31yeEJE4ivH3el6HWUZwubo9qTcPYa6PDaT5JRT4oOo6itaGJeG2plJAKtIKA2oRXVJCcbTsCvo9jNcbRNEXsSzjH9job9xBjn5OoomkqK5bwxLuaVIJZREWgdAW8X1Ca0rurkcEMJ+DUVu1f0+sqzhd08e1MfO1HT0cXQuxj+P+/d24p1wid/28XiNa5M4t24KpFDIjBRAmoTJopfmXecgF9fcTHH0TtrsifhHJPuYNhPgHFOog5iqbRIYl4pXhkXgdYRUJvQuiqTwy0l4Nda7H5R/ZNnC7sSzjHdDoa94oueOB1EUmmRxLtSvDIuAq0j4G0Cjqsdbl31yeGWEoivOy/CONdfbE/C2Yl2cB1X9DgnTAfRVFYkZy7elSGWYRFoFQG1Ca2qLjnbMQJ+/Q0Xq+g9GnsSzsM0O7TtJ0zRE6RDKGopivMmMzGvBbkyEYHGE/B2QW1C46tKDnacgF+LcTGLXJdTJ+jhwJhhp8LvSx8K/LgeCqylXsW7FszKRARaQ0BtQmuqSo72iIBfl8NFHlUrSTgPk+vItp8Yo54IHSn2RIsh5hPFr8xFoHEE1CY0rkrkkAhMI+DXaLxzTbpJwjmm1aGwnwxrOgE6VOSJFsV544SYT7QqlLkINIKA2oRGVIOcEIGRCMTXa5wg734u4RwT6lDYT4K8Su9QMRtTFPFuTFXIERFoBAG1CY2oBjkhArMm4NdunDDWUhLOMZmOhL3S44ruSNEaWwwxb2zVyDERmAgBtQkTwa5MRaA0An4NDxsMwvm3K4Z3arvdBN733r2sAB//5NntLkhLvBfvllSU3BSBmgioTagJtLIRgZoI+DVNdhLONUGvMxuvYAnneqiLdz2clYsItIWA2oS21JT8FIHZEeDangpz2KnHeXbcGh37fQelvc2fUm9zHRXlvMnr42JeB3LlIQKNJqA2odHVI+dEYGwCEs5jI2yWAW+0JeLqqRfxroezchGBthBQm9CWmpKfIlCMgIRzMW6NTOUNNs5JONdTRc5cvOvhrVxEoOkE1CY0vYbknwiMR0DCeTx+jUqtBrve6nDe5CrhXC975SYCTSTgbYLagybWjnwSgXIISDiXw7ERVtRo11sN4l0vb+UmAk0noDah6TUk/0RgfAJTYa5fPRw4PseJW/j79KHAT+gBtdrqQsxrQ62MRKDxBNQeNL6K5KAIlEJAwrkUjJM3oka73joQ73p5KzcRaDoBtQlNryH5JwLlEJBwLofjxK2o0a63CsS7Xt7KTQSaTkBtQtNrSP6JQDkEJJzL4ThRK2qw68XvvMlVQ2PqZa/cRKCJBLxNUHvQxNqRTyJQLgEJ53J5TsSaGu16sYt3vbyVmwg0nYDahKbXkPwTgfIISDiXx3JiltRo14tevOvlrdxEoMkE1B40uXbkmwiUT0DCuXymtVpUo10r7sR5k6t+lq2XvXITgSYS8DZB7UETa0c+iUD5BCScy2daq0U12rXizoSzbpL1clduItBEAt7+4pvahCbWkHwSgfIJTH38U5rHuXys9Vj8+/fslWX0iX88OwsrUB0BZy7e1TGWZRFoCwG1B22pKfkpAuURkHAuj2XtltreaH/jkx9Jzr3m38Lrqn9bO7siGbadd5EyK40IiMDqCahNWD0bHRGBrhKQcG5xzbax0f7sce9M9lzy6mST9bcy8g88cm9yx/LrkvOu/V7y90d+pdG10UbejQYq50SgxQTUHrS48uS6CIxBQMJ5DHiTTNrGRvtjh78hedETD042m7d9ssHc+cnU1Jzk4UcfSO69/9bkprsvTs684ivJ+4/+l0linTHvNjKfsUA6KAIiUJiA2oPC6JRQBFpNQMK5pdXXxkb7tJN/lOywaJ9kkw22TtZde6MgnKeShx+5P1n+0B3JbfdemVy79PzkxW97YyNrpI28GwlSTolABwh4e0BR9LxDBypURRCBWRCY+oQeDpwFruZEPTh9MPDEFj0UeO43f5Nss+luyfz1twjCecMgnNeyHuflD96R3L7s6uS6O85PnvvWlzcHcuRJG3lH7isoAiJQIgG1ByXClCkRaBkBCeeWVRjutrXRPv9fz0622uRJJpznmnAOPc5hqMZ9D95p45yvu/OC5IzLvpQccux3GlUrzhun2vRFZZIQb13wnWQq/Ft0x2sm6YbyFoFKCHiboPagErwyKgKNJiDh3OjqyXeurY32L7/638njFuye2+O8dNk1ybV3nJf85MJPNE6ctpV3/tlT3d4bN/pmstbaayVz5syxYThBNw/WUZbz7p2bzL3/gGiPgiLQLgJqD9pVX/JWBMomEITz2SvKNip71RI4+D17WgYn/uM51WZUsvXvfe4LyY6LnpFsuuG2yXrrzEvHOD9gY5xvvefy5Oql5yRnXfnNLNemlK+tvDOQFQXWXnhKctV9NyVrrbXW4BNEM+GptaZsPVO2xOEf/ze58RUzRdUxEWgUAbUHjaoOOSMCtROQcK4d+XgZtr3R/vEXv5osnrdjsuG6myZrTa09mFXjAWbVuMSGaaxYser3uEkK6LbzHu9sWzX1xQ98IVlvg/WSuevPTdaaE0RyeMDThbKHbR32rwj/bGEVBDKLi+XhsG2HNCzzb2jmOHdzTn96TcDbAyBMsl3qdSWo8CIwYQISzhOugNlm7w13mxvtr554TLL5xjslK1Y8ljz0yH3J0uXXJr+//j+nvbLWyxnzmUSZ3Y9J5B2XfZLhC+78dLLBxhuYWJ677tyBUHbRnIpd848O5FRII5BNOA+LZiKmItqCw+lJl355Uk+0UdWfBhFQe9CgypArIjAhAhLOEwJfJFtvtEnbJyEXl9u51VV+z7uu/Lx8TVhftPxzA8EcxDJjl70nGeHrQzJiP/04axbvcbZeZnZEgplNFj9Gr7UvLpzpsHYbm970Sj+stQhMjECf24OJQVfGItAwAhLODauQmdzpe6Pt5Y8ZVSloPb8q84jL0oTwmVd/JFm41cJk3Q3WTeasPWfaw34miFN9i3BGCHvP8rBopiyIXhfGcVwv5/pLp5L7Fw6Gc5i99EDWUz0wkonnEEg2vVkC2vlpXS+BPrYH9RJWbiLQDgJTYfL2wZ2rHf722suD350+FPjpdj0UWHalOYfY7okVMPF8qrAd+96E8K+v+2iyyaJNkvXnrZ/1LDOGGfFsD/IxBCMaVuH73Hc/5mvfj2BmMQGdhq1HmVYn3eb46oRz1vtMpHRh34JbXuWbWotALQT61B7UAlSZiEBLCUg4t6Ti1GjnV5RziY+WIXRju2XYi/1rUvi8pZ9K5m06L5m73mDsctxzjHC2BwAH3cWrCGcvh4tlX/v+WBhPOxZEcyaIXVhHwjzucc7iYTT9iu892Rx77NHHks2Waq7ojLkClRDoS3tQCTwZFYGOEZBwbkmFesPdZRE3TlU4n9jGOKzc3jg2Yl+aFv79PZ9JNl648WCsMqKV6eH4pAI2W6f7pongVEhTJhfEvp5WzjxRnD7499hjjwUrIUIUZ1radCMTzpFo9niWPmxgCwH9yMOP2Jrwtg++yaNpLQJjE+h6ezA2IBkQgR4RkHBuSWWr4R6topxTHLuI+HU7RdLGeTct/KVv/WWy14v2SuYtmJess+46A6EcxLE97Oe9vujZEB7o2oGy9V7ebN+Q4F1FOFvygQ0Y+HEXwojb2FYsojNm9EzTzezr7MAgkKXheNr7jF16yRHTKx4b9Ehvuez1Qym1KQKzI9DV9mB2FBRbBEQAAlPhlaHhtqOlyQTem45t/mQF43ibXO5xfHNmsY1R+XnaUePHeTQ5/KVvvSV52vOelszfbL49/EcP8zTB7M4PiWJ2u+CdJqjRxS6247ShRcni+XFspgKXNcKWxQW1iWjEuu0Mf9JWyfL1dH7MIg3+ZOKZJKlQ9sNLb1qaLNhygdkiv0cffjR5+KGHk+0efrNH0VoE1kigq+3BGguuCCIgArkEJJxzsTRrpxru8erD+cVWZhLFHn+mOLGtNoS//J23Jk961pNMNG84f8NpwzIQrdmSClYXvsP72TaxHES3i91YvHIcsWtxEM2pOLc8UgHsvcOIYxtDHeJYGjOIhWghDRGj9aOPPGrpLG3IA8HsZSBsYjsycfNVNydbPX4r8yXandx5y53JDo+9Jd6lsAisQqCL7cEqhdQOERCBkQlIOI+ManIR1XCXw945xtbyxLHHyzsWp21LGNG823N2szmZ111/XRui4T3NLjitLC5Ow0bWE5wWMhO2bKOZ055kHxc90LwDAezC1UWzi2MENsceuv+hTDBzzJY0bxe+bt/tPvroo8ljj4Rx0UFkZ3bTKfEGBgaCnbDnT9hEfViT/rpLrku2f8r27F4pxAdbtr3gFk115zi0HhDoWlugehUBERifgITz+AwrtaCGu3y8zjS27CLZj/l2HKdN4ZO/+1fJ5tttblPMMSczM2ess946g7mZhwQnvbksseBcZTuNYxGJG/65uM3Ec9jPPoZFrLveusnUnEFvMNsmYIPg9jxMuNMbPbWW2cIHjvnHbSOcfQgGx7J00TzSxLH0gz/J7753TrLbqwZTNz7y0COD3mmPH/y7+NcXJ6d957Tkrz/215YuK1Owz0I+i27XTB3Opc/rrrQHfa5DlV0EyiYg4Vw20ZLtqeEuGWhkztlGu7JgW4Xzp794YLLVjlslW+6wpYlm3vi39jpr25hmRGc2vCEViS5Ybcwx4jUVn4BAQGaLH/MdbHM8iFYeyEPcMgf0nDlzps397HZN3JKWJPQapz3WLnrtULDneXLcFuyHnmbf78LZjodj3qNsfmMeGwzXYPhGWCyfIOAR6JkPHAiHLzv3suRHX/xRctDnD8qOWdrUBr5bue6cSuav9VpSaekZAW8j2toe9Ky6VFwRqIWAhHMtmItl4o02qdVwF2M4aqqYtadpE/Ov/+jtJooXbbsoWbTNIhvL7EMaTKjGYjUtoItMF4gmToOgdBE60NC2w1K4eEWMWtogMBHNDP9Ye+7a1htsL0xBFKe9zW7DBa4ZQsO6cB7oW8vTxa7zz/JBCId/2PAyTRPBngBX8SkIXvIlzjSh7fHC2svCrgtPv9DGfxPXyh7ZoXzuB7OQLNCbCyOK3Q56m9CmdqDbNaLSiUAzCEg4N6Mecr1Qw52LpbKdzns4g6beOH99/cdMuDJ+F1G3/obrJxtvtrH1+iIM455dE6qIZ9Rk+G/CNwjSTDQjTkOau2+/O5mzzqDXmN5j0j1w3wPJ/ffen4lWRLH14AZQ5GH2Qnoe2uPDfMoPPfBQcufNdyZLb1yaLNxy8Arvp7/06YbWfAl2SWcCl1XIm48LXiK6YPX9/gUAgUs8E7lEZEEoh8VFM8cy0ZyWeRBj8NdspjuciYtkL0+8TdRpfodtiegUYEdX3h409frvKHYVSwQaT0DCucFVpIa73sqJeXs49qApN9DLV3zJRKGJzSBY6e1F7GZCM+yzBW2a9uz62sRqOHjDZTck6220XrLOOmHcc0iLyGQYBz3GPpzDhfDDDz6cvVyE4ywmYlNjiFREJj3GLprZRnBvtMlGyXobrGdpLj3nUntAD6HPGOjXfzDMr+wCmDXCOVq8PB4nE8KIdY+fJjERHe8LUTx+ZNKCefnEcex4sIUIH+ZGPMppXxjSRBLQMb1uhP36b8o13w2qKoUIdIOAhHND61ENd70VMxNvPxZ7NIkbKoJ5nbmDB/zo9UXUuah0Iemiz3tjXfixvvg3FyeLt12cbDB/A7PD2GfvoaVsCGYbE7322iZiEc6IZuY+5iE7z4N4pLWeWXqKHw29zY89aoISUUk6jpHn3HXn2kOJiFj2P7D8geSu2+6yXnHysrRh/3333pcsu2tZ8sxXPNMwW9lCKBa5LoQ55vvJz0UzCb383jvtdvDd/SWebxPM4rDhCyKcf6F8HPcP254n/rgfJJOAdnjtX/s1P4nrvP30VAIR6DaBqRM/rRegNLGK3/uuwawAnzzpnCa61zmfRuHtceLCV10/iGUfSsBwDB8+gQi0JRV4CEETdd5ri1AMC/vo6WUeY2bWQKyajdBr6z3KpLUxymmPM73KJoBDzzBC9+EHHk4eevAh2zd3vbkmmolPrzK9xyYqgz0TlAhNnEuFs/diI2TNZhDJpEOw2xobYR82mP0DoU1cjpP3sjuXJWvfuSLZfJ9tbHhI3NNLHBOuUVkHpQ7ZBxvTHgiEDwVlSVfOkLxzF08T1vZFJU2QfdlIyxiLZ7ejqe2cRPvW8XVe9fXdPjryWAREQMK5oeeAN95quOupoNnw9rixZ2XX07Vzv5YJTISdCdAw5pj1QJcGIYoCjMSdCefQA2uiNIjKtUPP8W9/8ttk1313TTbYaIPBkIyQ3oWfxQvCFaHMA36ITXq0sY/Ipaf5gWUP2HhlRCzCnZenIJrpcTah7D64YI+hpGHvKc4OBZ9Z3E/e6IetuevPHRwIf9l2/xDYD97/YHLXrXclO+6x4yBOsMF+F63x2vPLBHHMCH/D4uJ+YGzlX9s/iLCSbdg2m6EejHEoq+dHHnyM6UozWUgCOkPRmoBf32Vf060BIEdFQARmJCDhPCOeyRxUw10v96K8PV3s7bg32xs3/KaJMkRh1psbhBli1nuKEWou3Ey3BiFnIjNM20YaRKcNp0hnujjvlPOSjTbdyF6AYsI3iF6EIDYRxo888og9wLfBxhtYjy826AVmmAbjlB9c/mBy3z33JZtusamJW/cjFs7kt4pQDWDYb5+QH8I8E6bhGMIfG+SF//ExmFJG9puIRiQTN/jGkA560GHk8XxtOZAPH7ilopm08WLHwuHVLdN8CfGcl+WJ2WDP4oQwtlYnnN3+sa87Ngm/7vmm1g0m4Nf1uNdyg4so10RABMYgIOE8Bryqkqrhropsvt0yeLuNOIfZ3HivTE5O1t9ofROzJoIRhWi9VJj52gRcJAZdTCLoeDCP9U9O/kny6ve8OmFYBQvCFBFK2isuuGIw1CKI5ztuuSP51Q9+ZcIPIcpb9bZ70nYmjombCef7gnAOY5A323oz653GJrNs0CONXXqsyYt1LJLpleYTP3yYiWuMpIsL5Hi+Zj9GuVmIQ1l9TZkeDH7x2uztnryd7YeXCeZUNCNsXZyTzhcXzdPEMQfTvLJ46Q63yX4T4CGerUmCOLf/YehLEPWjLOqFHoXSZOL4dTyba3cynipXERCBSRGY+qTGOE+K/WrzPSgd3/wpjW9eLaOyDjhr7JXBO7bnPs5k98Jln7ee4PU2HMw8QRoTkkEAx6LMxFnas2kCMohI640NghARaSI7pEXMbrr5piZiEdI81IcAZno4G0OcjlG2PEJvLkKb9MTbYN4GyTUXX5M9gMiwDcY2c5yeaV5wQrzl9yy3Hmh6fi3vkO/CrRbaC1ewi98IaevBDmsbm42oTnu5vecZ/9nHtq+97C5M4YE969EO8ThOWVnzcd+W3b3MHnx0Wy6CjVUaF1ss5gcBhDJ6mrWtssBgR/rXfcMPz9d9yOooHGPIiS2pmWlGcjYW6hXfOVQmu8uv35mu2cl6qNxFQAQmTSAI53NWdsVM2hvlnxz0rj2MwqdOOlc0aiBQFW+3GxchrtOfX3p0svmSzU2s8ips7/30HlLSmShjnQpLRCsCGKHn4hkh5z26CGPmTmZIBT3CxEXcMj6YMcobbrxhssnmm1gvMMLPhmQEsYewZrgGIpqeZ0Q8ef/qh7+ysc+IVkQ4eXov9K3X3pq87O0vywTtmd8/04QzPcyMl8Y2fpMnItpe9x3GaOOvCVCEf8jP/bde6zDGmenxyM8FqgnoUF7S8CEe63jxuJT5vmX3JQs2X2CHbb/3Uod8WfDJbbFNnGn2ItNeJy7afdvqKKQz/4N9yuziG04ez/eRz2qXtPVdeOurVhtFB+ohEF+z8bVaT+7KRQREoC0EJJwbVlPeeKvhrqdi6uDteXiJDnjbAcmSJy6xHlnGC+ctJu4QealoRkDS04sIJmxzN6cPCyJKmY0C0Xb/svtNNCOUs6ETQYjasIqQ7qH7g/AOC4KaV07P32y+vTSF3ubldy9PdnjqDsnWO25tPbGIX8T6L779C8uTYREMjzjg7QeYbbfvvl71u6sygXz/8vtNrPOAIkI87lFH6FNuBCgL5aHnmB5oHmKkZxux7T29dhwhjmAO/5kD2nvCLY4L6sCKBUFL+a4+7fJk15c+xfaZQEbSpkwHO+1vJnpdQA+LaYSz9VJHohqfrMc/rIlPrzrpydt64c3VNEGULs1x1ZUE9KpMat7j16na3prBKzsRaBkBCecGVZg33Likxrv6inHedbEmvx2ftmPywje/0B7WGxZjVuIgoLIp44IQc7GIYEUM0ouMaOPlJYhlBCkCluN3L73bhgsg6Ex4IvhS0ee9uDdfdXPy3RO/m7zm4NckN1x+g8UnLuOrb7765mTjhRsnz/mz55hNRCAfRDeC/O7b7k6uueiaZN6Cecnjn/r4rKcbH5mu7YZLbzDRyzZ+3nHzHck9S+8xHxDmPnMHaxu+EQT0Jos2sTQ3Xn6j5eXCkzcg8vIUfEPs+nzPiHnS2vCT8CWAIRrYJp4PCbEvI0GsImCJxxeOjeZvlH0JyXqCU7Hq2/iNCGYJJcoEddbLza4QB71vojnEpS744CP5sz8bq20mwp+BwcF6xL8Lb1EP9IioSotWd3tQmuMyJAIiUCsBCedacc+cmRrumfmUfbRu3v/603ckO+2xU7Jo20UD0TkVZraIFkQbYo8eWUQxAhoBR08yvcH33HFPcu/Se21KOMYPb7hJmBou9OjSu3z7jbdbTzNzNSPgENSsWRDoJpyD4jMxHGbfQBcytGHpTUuT2667zfLlAcAX/sULrVcb+yYKEYfhQ/p7br8nuemqm5J8WVBbAABAAElEQVTH7fI4ix+L8lO/eWqy+/N3N+HNWF9e3Y3d3Z6zm4lNBOcPv/BDE7ibLN7E/MPOwq0XmrhliAlldLGLnxyjRxxRjL/4gyhlWAli1pYgYv949h+TBVsuMPHvc0EzfMIEN0wZjhLEP2ltyju0LPqYT6prszX7WMJ+F882iwiC2SYiGSSAiYnsEN/DLprJA1+n2RlsrczPt3PWJs7Dfr1QJQdORbvqbgsqKobMioAI1EBAwrkGyKNmocZ7VFLlxKuT9zm3fdKELD26CFpEpwskSoP4QtjRo4ooRCya+AuCzXp8g8hlXPGNV9xownv+ooGgZOgFAvX6S69PNt9u82Tx4xbbi054O6CJy6DfPvm2TyYH/9PBJuawRRpsI7jvvfNem5ni1utuTZ7/xuebSEdoYg8Bj4877LaDAUdkI3AR/ghbBDlim2Ejvz/t98keL9gjwS/E7yW/uSR5wtOfkJWRuFbeVKj+9OSfJs9/0/OtZ5q49GIjPMmbLwoMEaFneosdtrDywCMWqsQlnonf4N01f7jGRDs98bziG588T/INMtfKwJ+vHv3V5PXvf73tc3FsBxHBaTzfT1qvKxvmEbZ9MX+yjZUCmv2Uw7+4mCgn+5DU7JJmpRm3kK3J01ileza96RXZMQWqIVBnW1BNCWRVBESgLgISznWRXkM+arjXAKjkw3Xy/t3dn7FeYnuILwhmZqtAhPliojkIUcQWghDB6z3GCEgELsMNbr3mVuvxZZup4RCHDIX4/em/T3bafSd7KBCxuPNeOycLtliQfP7gz9uQCsZT7/L0XUxokhab9IoyXhrhe/etdycX/OKC5JXvemXy/c98P3nK/k+xccQIP/y0ntTgH2KOoRX0+KIvGbqB4D79u6cnr//A663XFwHMnM+UwcRfKKYJ2GDHRGPaa8trtxHvzCv9m//4ja0RqD5Ug/HULLvss4s9YMi4Z3rXTbxG7IjPMBLyuuXqW7IZSkzskmfY70M34EyYY5ecdUnCsJULz7gwefvH3+5VMRDnvoXQDendfxfidjitPiujx4/W9DgzzhsRP23Bpqvm1Ma042EDm2Y3Ph54S0APkypn29sCrNU1bKscz2VFBERgEgQknCdBPSdPb7zVcOfAqWBXXbx/ff3HksVLFg8EG72uQcwheF1wIbDoZUYAImjp8UTc/eFXf0j2fdm+NuQBgUt8E9B3Lbexzkwbx7CMi868yOwhaBnWgHBdtM2i5MnPfrL1MF990dUmYq+68CoT2Yhohkh8/bivJ39+6J/b8IhTvn6KvdqaccnkyTANRC3iF7GP0MdnF8P4irC98ndXJnfcdIe9mXCL7bewafAQwrddf5sJbPQhIjEWsV5uvgiQT9rBS0SLi4B/6nOfauX93knfS/Z+yd7Wc+v+YAs/2MYWvPjCQS83bBlOwhqRzBeNG6+8MVl/w/CwYfiiQA82X0goC+n58OWAsdgX/fKi5GV/87JpQyzwjTwYv41o5mN+urAN56WVB4FL3JAvx/1V39Qn48a32G6LTJB7+b28uac2ZoIty5uI6UKZJJ6dRnnrutqC8jyWJREQgUkSkHCeJP0obzXeEYyKg86abKr6onL+nZ+2h94QnQg9BJ/3XLLNkvU0B9HnY3sRS4hVhDBjfhkewYNxjN1lP+KZHlbE63c+8Z3k7z71d4Nxu8EeYvuycy5L9n/t/sm2u2xrb/z71gnfsuEOCNid9tzJhDUCGz32lSO/Yr3NzMH8+g++3nxi+MOyO8OMG8EOPrvAtGEUwU/EJ73M9DYzVnnLHbbMZgeZt3CePaSIjwhpE35BAHq5bTuUb+mNS23eZ8tw+A+iEefC/5MPPzl53hueZ18o+PJAmTPRG8Qv/i27I7xFcMetsrwYW+3jojnOa7rtAcrADnHLQllgyrhspu7jCwgvf4E3M40wVpu64UuNCeZgByFr5UA0I5DD4uWxuCE++fl+/3Jks5gQnbwHySzdIKL9TYPpQbZC0NkNNtkRQsGGxDNEyl28PaiqLSjXW1kTARGYNIGp8Iak9HYyaVf6m/9B70znbv6M5m6u4yyog/fF9/9fE7uIKR8eYD2X4YFAhFcmmultDmObGXtMDyWiml5jfuKnt5iZNOiZZYYJhBu9wnwQkYhEf9MfAt2mYAtp2L/fa/azYReLt12cbLbNZvZKbcb/InwRw/TUIh4RkSzWwxnUGb3fNmNFmg8imBk7EJE77r6jHfchJYi7P/72jzY3NELfbSMaeRAQAf66D7wu+cZx30jeeuxbMzGaW8epbjTRbA6hFVPBmCZgeIV/CYEZwh2Ovo9o111ynYl5XjHOfnrEbZhH2rtMfMpEeeDsYpcXvJx7yrnG3XrZQ08048U5buI5/bXAOCGeU9+wxycTxmlrSg83dUJ68svEdlQm+PniYerFrIdzxPYZgpSDZRNiBN/19kEnN966jrZgPA+VWgREoGkEJJwbUCNqvOuthKp5n375cdbjioB00WwCLIgoE0NBAPnwDHqa6eX1+ZV5qG/+wvkmgulBRiQx1APxjC0fb4xwtjHKYTzx43Z9nIl0bGPnR//3R/YAH6LaZplIp2qzBw7xKQhK7FrPa1iTzkWa93wjzl2QMrZ4ztzBmGXyJY4LRcqFsMa2l9HHIg804kD0IQYRhaRnTPZu++82qPRUO3J82mLJsoN2CJHOEA96u/EBYcpUdAhcvpTQu8y0dszkgajmCwI+sVBeF7km/IMfCGQrdziOsEUg/+c//Wey36v3syEcfHGBH7YYfmFxgj3i4W7GMAhnhLwvfPFgCAh15gL6gfsesMNWTso2JJqdPzax7cM9fL/zgaH95wvAZfckW238Zs9W6wIEqm4LCrikJCIgAg0nIOHcgApS411fJdTBmmEaGy/YOBOSLigRXIgv6+lFgIbeXT70dtLbzItMeKgPwXfp2ZfaNGvWYxweBLQxukHE3X797TYHNIIR8WiiEREW/rONcKaHk2EHCEV6gRFxiHjErYvhTPQhnL13M1QDIh7hx3H8RriZAA1T47GwH+FJfhwjDvl4XOudTcWqxRn8sbT8wceLf3Wx9QIz3prDCG1bQtiWoA3Z7wv5kAe9yczAwTAQE6bM3xx6dq2HOO3dxT963BduudCGaLjwtPIG5nxZoIwwp5cfv1kQ4TCC6+033G5TASLQ6bFmKAjx7EtQ4MAafyiLf/hCwJsgmfWEObEZ/sE0eZwHDAPBx/ihSfOLjOEI//APUYyfHPOPHQ/brLMF7RzydwG98DbN+ZyxmUXA2wKSfEq/9s2CnKKKQL8JTH1KQzUmega8Jx2m8Y9quGuph6p5X3T/FwZzMCO0Qs8ugiv7BOFsgieUFMGF2LIXdASR6z2UCC1e6nHOT88xAcbDdghqHnBjqAWvsL7i/CtsDmcEHL3NLqoQtPQUIwp59TT5u1g2Ybjuygf8XEgC3cUlQowZMxDxJtyCoGMx4Zz6jv/4TZ7WC5s+OGcCL/yJRbjZxUAq+lxsXvrbS23YB3FZGIfNUI/rL7t+MMMFwpklTTfYGMziwfAPZuT45fd+mbzthLeZnz60BKHLw5IM09h1n11tKAZ5IObxGcHM+HAELMNa7r3jXvOX4z4chukCCdOLzkOTxH3mK59pUwGe8rVTjDdDX2DP1HsMoaFOSGNjqQNzvvQwrzRDQfjiwnSBTBXIwpSCvPbc2ThnhHP2CwBCOhXPMIjjYsPPIVunrDaTeAbNrJaq24JZOaPIIiACrSEg4TzhqlLjXW8FVMn70kf/ORNiCDbroXThjHhbMZjSDSGK2EM8I8zo+URgMSQAAX35uZdnvcTW0xnEGKIMEY2oPevHZ9lxRPHuz9s9A4g9hDPi8OEHHh70xAZ7CEPs0uuJmEYgu0hzYYY4Q0wjLukFJw2LlYM0LCEdYs2FG2nwz0ReLPaIi+DjTxpm5fbJw9Nikzz+/dP/br2+hPkCQG/tIV8/xJJjxvIN/sHNhS+967wtkSEa/pZC3mjIWGx6jhG19CJjk3x4TTc99jycyPzVsLLhLnffZ0MyELr0VDNPNWmZkeOK866w2UJ4+Qw95PgNX3r0WTbdfFOrl3h+bthd9fur7AUuvEL8B5//gT3kiODGf//iYdxcIAdbeXXibC2z8AemNlzDd7Av2GBZeMsrba0/oxGosi0YzQPFEgERaCMBCecJ1po33LigHufqK8J5V8H6ovtCT3MYNoBocrFjojf0TLJG7DAOljAiFvGEaGMeZkQdP+/TE8n+8045z0Q0wzB4gyB2EYAIz7N/cra97AORteu+uyZPftaTM3CxcMYOAp10LCacgwBGOFtvc0iPDR+7izglDTbwh20WymP+I5hD/i6a/Rii1MtMGhdxFnYjqS17CPLhgXBEAJKW+NhnQZwuvWGpCVzPd3mYfo9ZP7Z5wjYWB9/wgS8Q9jKXIH4Rsd5LjhhmXDI9wNZbzxeGUAeUDfvX/fE6e2gRDuTP2xOt9zkIacKIeqbrYzgIX0wQ2tjf68V7WX7YYbEvJ0EEkw/5zdtk3uCLiY+bDn7Su4wYp24v/s3F9iAn2/gfD3eBgdVJ+GJlY6VTjs4mY0rGKUsLpqI7Po5tCWjozLxU2RbMnLOOioAItJ2AhPMEa1CNd73wq+RNbzNC1QRjKmgQhHwQaAgiRBev0UaQIXYYDmAv+ghiiCEaiCqEKw/BMZ53yZOW2JzMiDOW//nG/1ge1116XbLznjtb+FmvfFYG0YRz6IWlZ5W8EM2Wf8jLZ3hg23s2EWsuXvGbNC4M3aiLYgSZfzhmoo4vCYjfVADHAs44uMoLtk0shrLR44poNhshnTEKdvAd0QobX+BDzzBcEKeIY+sxJwICOvyD64MPPGizYZiNEIdeaBfefPGwNCFLepqZZo+hHAyzoMea3usrf3+lfYFBaNODvM1O29gDjEzpR50y/d4L/vwF5h/sKC/5wwp/sU+9wthZ4CLzXDNkg6Ec/GLAEA6GbPBFwIQyvLEX/jlb48P542w4f+DF/7A/W9JtjsV5wgTbEs8ZqdxAlW1BbobaKQIi0BkCEs4TrEo13vXBd9bkWHaP8wV3fSbZeLONpwkbE5apcEZgIeoQR4gsF1sm3MKb++h5Zu5hBDLikTfg3XLNLdbrfPl5l9tQBB5QQ8TxsBo9oEyxhmh66dteaiKRcpGWHlcT6UGQuZBHRBJGpCLAEFb2CSIrE8aI0OAn++OF4ybGgu8hkC2UzwRbJJozYTdN3w02GKZCL6v1tGIl7HbRzBoGJnJDj637gX3EsvWak33w4YoLrki22XkbK8/3P/v95CVvfYmJagQo5YcvvdaM1YYD5WHoCb5RB/Q6I2LZ/sOZf0j2O3C/TNge/xfHm6jmBTL7vHSfhJfMbPX4rSwdY8ndd9KSH34Txi//YjJNxIb91CMPClK39PrzIKTXAxjwEb+xgX8mnFPRbII88GfJWAMuXcjbOIY4FmZ/2IUtGG52qx4adFbx2tuCstuBOA+FRUAEuktAwnlCdavGu17wVfH+5TXHWy8lY2IRlohMFoSMic5UECGKEDUMG7ChF0FAIb5smEEQTqRn7mZEFeN1v/vJ7ybPed1zkvvC+FtmibDe6CA8eQMgM20gquitfulfv9TCLrwQmiY8Q77EwZ94rDW+ISatpzP4iJ+IMtKbmEZUh3+IchY7lu4Lu7OF/S66CbPE4s0juh3EK76Rj8c14ZnagQ/ikwXfyMvsYjqEPR0P9PHFg5lDiIcgpXykR4DC0V4jHpi5EOXLBkyYyo50T/tfT7MvJ7xymyEgfGmhF5px5tjmS8ZRBx6VvOWYt9iwDsYmb7p4U6sr7JAPHxv+EvI2xoG18QjlMQ6p3/jIcA/Gr2OfLwF88JnyEZc4PssHwpl684/HcdvwsXoI9q3uOM/SL0TGOsqX8i9e+hpLoj8rCVTVFqzMQSEREIEuE5BwnlDtqvGuF3xVvM++9ZP2sz+zXiCgbElXhBEvJuDCA4CIP362R3AhiJiWjvl92U8vKD2uiCDGz57x72fYA26Mk+W1zYitvV60l4lbRBQ/999w+Q3Jni/cc9AjG+VlAjj4wvhlE3VBpGGXjx0L+SEATZSl4g0f/JgVAwHmC8KVnVG5sGXCLRKKLpJTzW2p2UdaemsRzn7M/PGe0pCX28MP8y0V9GYkzdePMYTDe90Ro+yHMf5Yr33IhzcuIoA5xmu4mWLO0zNl3GZhij+2eaiPLyWMQ6YOWGOTBynP+9l5yWbbhqkAQ70wswljoGHmXwAQ+vblJNhhjU3KweJimDcRkv+CrRbYzCFZz3PwjTTGIWWJj5SdvG3u7HCc/Ew0h9lLnC88XTSzdnawtThhTV1iS7NtWHVM+1NVWzAtE22IgAh0loCE84SqVo13feCrYv2v//1Oe0CP8ck2nIAiucgLAQSW9U6yDh/iINIQdAgeBBCzX5g4CuKJ/V8+9MvJFtttYSKOh98QiAgjHgJEdNFzSa8oY3MZBrDdk8NLToLQY0E8mmCixzZavMeZfPw4otEEWfADwcV2JpzTtOZj2E+Z8JX/LCbaXNCl6dm3ymKmBz2qNkwj9DqzENfEXrCBb+6L+UHvdsjT7LnJNF/2wwQBiyhk5gzsIFLhzDZiFNZ33nqnhb1nlzgMY6GMlOPsn55tw0O23H5Le4iQoS98+Vl/4yCgQ88/vwpccOoFydY7bW1fdOipttefh/T0CtNLTd6MJ9/uidsZS/ynbuCM//hI7/fP//Xnyd4v3tvS0yOOOOdcsE/45YEyUffU423X3zbYH7axb5/wBYO6sjpIIWPfPl4PaZ74YOxCGSkzPiy6/dVpKq2qagtEVgREoD8EJJwnUNdqvOuFXhXvX19/gglZeipNOKcCj9JZL3MQWIgixAu9iAgmei9ZXFwhBr906JeS7Z+0vYmu839+fvKc1z7HpkRDCDImlx5WXozCPM7khfhjTPBNV9xkQw2wS08jQgl7JoAjoesCzOKEscYmHvEB8RUEF4ulQbSGf8TzxcRaWi5ss5DGRLeLNfYFW6ssiLjwb3iYhveg0iPOQ3Uu9EhvU/aRf2wuzR+OlJsvDXCwnvuQr31hCL7xcJ+J68Cd3mBEKEKauvGyulBnyMf5p55vDwgyPp2hGqTnFwF4OmseKGR2E8Qy+Rrj8EsBZSIvvhAgqBG9vADFzoUgeskXToh4Zk654OcXJDvusaMNFSEuPpmPN9yWPGGvJwzqLdi79Zpbs2OIaa872GR1AR4XzmFNHKuTwNPQcSyqV3xYfIeGbMCwqrYA21pEQAT6QWDqU585J7rd96PQky7le96xh7nwj589d9Ku9CL/Kngf9M49kzcf+WZ78Mt6DoPIcfGIuKIH1B/UWzu8eATB7L2RQEfYnPCWE5J9X7avpSMNwo3eZoQbQg6h6KKQXlCEIguCjSEejNElPj3RiCfTyuFYnnCOhZQJZxdfwQ/sWZqwXkU0W4aW7UC4hWAsmr3Mvh7ETP8ifkPrQhngwUK8LH3IG4FJ/p4vHMxWjnBGADKMBXv2ZSEMXzD7QbwidLEFM8pC+Nuf+LaJ0JuuvCl502FvGohLem8RmsEP5n/mRSrUE19M6EXecJNUPIceaML0QsMOgQxz6+0OPrLNjB+IYtLjD0M16FEmnQ/jIC0PgTI8hLjM2sGHc8bENSI3fFgQxjws6EKc+jbhHHw2wR44UV7ie30SduFsdoIp+0KSimmzG3jg65KH3mT59PVPFe1AX1mq3CLQZwISzhOofTXg9UGvivUxH35R8qp3v8rELALHegf5OT2IGxueEYSiCbjQ04yIQizSG0iP57WXXGs9mAy1YFzrtk/Y1kQ14m3dMO0a424RQYglRCJrBBx5IK6wi4hj9g3GOiPWyMMEcMifNQKLuCwuvlwcuzhFfPGP+OzzhX0snj41MziMMAtlMfGG4BtEzcTfIFL6NxwjT3ggaD0N+WbiObXhfljK1GZmi7IEH+ltRrjyBcR7m7GL73zpYIGX2//cQZ8zoc0sHPTyPv2lTze2njd+8eDgL771C/tlgAcAFy9ZbCKYnmcXzvxagH3sPrg8jK8ODxky9zPDaOiF9i8u5M2vBAhjhLSdF0Eg4zsPefLGQ3xHMHOMsIt4F73wol6x6ce9fjhGXeAH3K2ewpry8MnqIhz32TuIi238vOGyG5K9tzw4w9q3QFVtQd84qrwi0HcCEs41nwFqvOsFXhXvUy460n7CRwAhWulBRAQhnugZRexwDJGEqEHoMnMCYowXnPCTPj3GOzx1BxNa9EgzHICH0BDCpMEG4g6bPgQA4YRQREQisBhmgNBGZHGMNHwsHP4gmlxU2bFIfHHM41uaUDUumqmlTDgPNvg7EL+IMT4otfDfFhdy6abvQ9jCwxd8sbQhvQ+byGwEt21xm54o7IcBvc3YQwS7IGQ/24hbysw0dDBnsWn5wtAURCMzkJx82Mk2ZvlP/vef2FhzfGEMMj3S1/zhmoEIDsJ34dYLs1ec4zts4c8czPT8U0+Mob75ypuzOqZOsEU8pp9DaBMmLf44B+JlPcSh4GxTD3zgQD3wQCjCnXJZOUIcRDNxKSPxLD5B0qbimLhsY4M8yNfOv7T3mXHaW937OqL1bvF2gILrl77eVb8KLAKlEgjC+Vy/XZVqWMbyCbznHYNXJP/jZ8/Lj6C9pRFw1hgsk/dB79wjecvRbzHR5L2K1hscBAtCC2GFkKb3mFcuI6j4OZ+eSoTNj//5xzbDws577WziCOFLXNIiAP2hNxOCqXDmeCYIg3jEHj3WPkSA/IjvQtjDLtJ823otXXgh1IIgyxPNXgmZeE5bCUSaC3EXbx7XBbAJ6nQnwpYPSybyUqFn2whGOxj+pKJwsGPlX3ykvAg/yglr/CA+Xyw2mh+mAvSyhJ34h1ClXBxnPDMzW2wwf4PktutuSy7+9cUmpPc/cH+bEYUhGwyjwP6yO5cly+5eZr8CIHwZZsGXHsrAWGfmeEYUM/b5+suuN/5eL4xFR+zy5Yg6JD3b9qUq/cLgcSk75bKx0mkduM/kxdj4C8+4MHnFsz6xEkQauuShL2ZvfLT6QBgHe2YzlJnzj7LzxWTO3MEMHfjAvq3uff0q9vqww9uCMtuBPnBTGUVABFYlMBUmgU9viase1J5yCbw7Fc1Y/bSEc7lwc6w577JZf+1Hb0+e9rynmYBzIYeIc8HDGkFLzzHHEX2IIoQM452PPvDoZL/X7GevYMZtfpYnPiIPoYvgcnGKmCat/2xPfLYRR7zRjribbbWZCW4TwMEP1hYOcbFjQtIfCgzHvYfS47GOxS55mAjGFgVjYRXEKbYQaIRtbQen//H92KVMJtZDFEvrvaNpeo873ULYCsdtCfkiLhG1lJsvKqSxMrEdeugZlkB8jpugDgmpAxaEL/NgsyBmmS0Dbpefe7nVC73Nuz9vd7NLHnzJgS1hHuh78VtebDNn0GNNbzPDMBhPfe0frh3kEfKlfukhZj9DaviSQ1xm6qDe/Bwh7L9C4M804RzEMz3Kfg4h9BH5L3jyUUTNXc6/8yQ7X8xm+MIFb3yhzJSBL2GcczAhDsyeuN7bc211fWdVbUHXual8IiACqxKQcF6VSWV71HhXhjbXcBW8v/Hjv012f/7uJkJMCKc9eYg73kTHOGXEIr3JiFoWF44Io1/++y9NvPEgmf8Ub0IrxHUBi7hzgYotbNs46SCqWUw4hx5J3pBHHrzWGcFNegQua7OVCkxEJT7Y8ZDexGq0L1c0k5GZC3/SsAlu7y025Tw4NPzXxTB+4j8L++L0vi9eE86W4B8LfiMC6c2lt5aP2Qn27AtK+gZFF84mqANnWFNexkQzVMMZk97ngT71m6cmvBGQhWEgfHmhN5u09tbBkJ4hHDvstoPlSQ8/D21S7+xnbDqClHz41YCx1MzbjIhmP4LZhvEEm4hq6ttefhO2rb7SOiG914+Fw6wdy+9dnjD++qIzL0re+uovmY/6U4xAFe1AMU+USgREoAsEJJxrrEU14PXBror18ScekBx48IEmpDKRFgQiwmuTRZuYiELoMTyAh8F+94vfJW/44Buy3mRmV7j7trttBgd6IL03kB5KxB8LYov92Ed4IkDpPWSbxQUpwwUQe/SCIkxNdAURxkKYfQi0TEwHoYZdywdRHLanLWn+2T7ikDhdzB4+mInUjh+M1sRjQdh6zy++u+D1cppgx0waPzKR+YiIpfcXXzPRTBmCW4hcuJCe8vqC6GVhvDMP8XGcLxgmcnnpTLBn+4Odqy+62o7T03zlBVfarwD0YrOQjh5/3uTIuGXWzKNN/S7ZdYmJYIZtIJqxTxk//OcfTg542wEm1Bm3jnhGLFvPc/A37nW2+hygyvIjQFnoOeac4jxiaMkb/+TzFkd/Zk+gqrZg9p4ohQiIQBcISDjXVIveeJNd2UMHaipCq7Jx3mWzvmKtk01QIaoQdQjb6y6+Ltn+KdtnsysgfBBXzLqASGY+4X3/dF/r6bzukuts/Ko90BeEFcIWQUhvpPca+xhehCbC04QzP7mHPBGdbLMf4czwA8QcC/magEwFr4tV25+KZLORxo00sdk1I/Gf1I7vMnupDy56/Vi8Jg/yRHSyZttEcyiP5+/xs20rWqQiQwSzEfgy9AA2bgc/YLDuhuEhQWyHf56P9RaHemHh5TI8RIlg9i8pTCOHIGUsM0NkEN58+aAOTv2XU01IM5YZIUw9sPDK7PuXD3quH7fL40x073fgfnacesvihu8UPABqD3+mPcz47Q+PIqQR5cPi2dl4fTkT6phZPO5eendy9YVXJwfsfbz5oz+jE/B2gBRltwWje6GYIiACXSIg4VxTbXoDrsa7HuBV8OblCe/+3LsHPb9B4yEM+SDEeCMgvY+ILQQPD3fZXL+hx3Drnbe2HlOGDPBzP8fpwaQ3EsGGYPLhGQhjF4nst4fHQi9p3FuLOMQ2wpkH0Zh/GBGMeOTjgngqfU2z7Uc4I07T3tppvc3T9erKCgpJWOh1dtGKSB38H6wHMab/Ja73ilvcsG2iMKzZjhcXibbPDg8ikCc+MqzCRTG+YwcbcONjfgW7Vu5gxIdYkB4R68KVuOyjvuhdhh/DN6gv1v/x//6H9TbfefOdNgyD8ek2N3T4woK45uFB/GHc8ZInLbEvQwe+90Czj23qkoWhIT/43A+SJU9cYnbZh89W18EOvyxQ1+uut66JaPxj6AdrYxTqzLbTcnKu4K/5ddE1ycufeQImtYxIoIp2YMSsFU0ERKCjBCSca6pYNeA1gQ7ZVMX68uTLmcBBGCKOEWL89I9opgd0nXUGY175qd3mWg5C+anPeaoJOx5S46d30iDMEGSILhd3Lo7ZdkHoPaguMFkj4BiGgHBGgCPaXTCbSE57eRFiLkCh7zY8rtXIkJC1ff4nFs5pb7HbcAHtUeM1cXyICWErV05vM2mG7bh9yoho5JXWZsPthHUolIlO5+V5kwZeLAhoGMHSevBDOspNvfAFhl5g2NP7y8wTvz/t99k24vjy8y63seMm/MMf4hGGKfVN+ZiN43UfeF1Wf+S1/K7lyU9O/kk2HR293VbHIR3+so14ppfaeqrTL09+Hph4DvHY5kMazhd+vbj+0uuT/R//IS+u1iMQqKotGCFrRREBEegoAQnnGipWjXcNkKMsquD99f/4G3vLn70iOigohBPCGSHGjAv0biKKn/TsJ4Wj4W1xYf8VF1yR7P3ivW12BUQb8Rg+QDoeIDOxFF45jeCycCquMqGYij0Tg4jYINw45qIS4cw8zoh2F8Os+bgAI64dI3kQYYhO9mVL2DXjQvzwz+whWtP4w4IXu36MMHxIh7/DaYfzi21Z+YL/lBlWLPjvIhlbzou4WZ4hHl9mrGxhN2KTNBY/pCEexxi6QS+2P6Rn48lDLy9zJ3/7hG8nz371sy3dNaF3F4HMw4CkYUHoej0hrvHx5X/3ctvHfsQ1b+hjrPQNl99g29QNfnIcJvhkvdxhiAdDRKzXO4hn9lEuZ0U8RDb78BEGPt75T/f9mPmjPzMTqKIdmDlHHRUBEegDAQnnGmpZDXgNkNMsnDWbZQ6L+dAh+yd/dfxfmQBCoPFTPwIYUYUoQlQx3vX8U8+3Vy/zljZemLHL3rtYzzJpeCCNsc98fFytiSUXTEEsIbLsE8ImChGuLn7ZFY4jomyoxk1L7YFEbLGPDwtrF2C+n3R+zEQuG4Ndtn+1f1JB7H55mljsYs9FMnYQlPQWE8cFr+U/Q37uH+kprwnnwMn3W/4pExOZfMmgTKlNT5MxSL8cOAfiUQc29CN4y37rbQ69/vhJfkxNx0wbzIjCdHC81ZFx0IhwRC9DKBDc5Etdsp9hFK9932uzoR88QMhbAvEDEW3ifSp8MQr5sPCFivrnnGG8M+cMfljPNOPYKWO6uJAmzkYLwtSGQVzTo02efZ1WztmMsva2oMx2YJR8FUcERKDbBCSca6hfNeA1QE6zqIr1ZSu+ZGIHMYMAQzQxjAAhxk/v9Bye9M6Tkme+/JkmiHiY67mve2423Zn1ToeH3Dwdaei9RsBhk8XFoa2DOHNhaMIZIYhODPsRZQjTO24Mszxsvonl7wLZRHGIh91sX2qbbTtumfFnhIVsU8GKwOQ/i4UHQRPNtg8hGxZ8w2fixsLVDg7/CdEGZge2zeeQFl746oLcmSA4TYzGwjnEQ/hansGclTNdW/6pGMUvepF9bLn1NodjWZ5B0DIk4r++/F/2KnMT1KGuOU5afjGgnnnID1ENe7608MAnUxSe9p3Tko98+IzkR2d9wIbQcH5Qt/iF3whfbFE2etMR0QhmhDHHTSinX6IME0gCUx4kZTgOAhsblJXzbpvlb7Bo+rMqAW8HOCLhvCof7REBEShOYCq8fpRbl5aKCLz77wZvCvz05/SmwIoQTzNbBe+fnHdo8vinPd56HRG8PLBHjy+9zQgqxNQpXzvFeicROIgzflZ/3hufZy8nQRh6r6UN8QgCDRFkQikIQI6zuDi0dRB0vp2JwlRIISjJd2nocWbeaGyZ+HNhHOJlwjmINu/FJA5pPT/LdE1/QvyZhDPC1hf8NZEZ2LCQzsuQm6cn9fKHSJSVD+Vz05mNgMofnHPb2EWYGqNQPsRuxiL4EAtn2FNv2KO3F7FqS/CD9PQEU3c87Pmtj38r2XnPnW0YDHFIyzF6pRGyvC798U99vP3SwHAZn8HjVfufOLAZ/v70gsMyQWzCmSEZ4WUt5kcoHwLaps5Ly08c98nKEMrFQq+3fcIMKja8Iwht+DDGeqc5f2Vx9Gc6gSragek5aEsERKCvBCScK655NeAVA47MV8X6jw//kwlhBBK9lIgWPvT6IX6/c+J37Cf8HZ+2o4k2ZkBYvGRxsscL9sheB+1pWCOibbhBSGui1AVkKIsLQsRdgqZmnQo7imrbYe1DNRZutXAwo0OI44IRseiC0tNwbNaimcRhsTyDG4hS/tu+EIhFs+0LvprwZR7lEM/9iNMNUqd/o3Kb3bScMIr9JX/7BCGOTZj7Piy5aPZebtakJ04snBHGLKSPe5vZx68IiF+mneOhTkTyz7/5c6vH+QvnW88wgpd5uKk/prNjSAdCF1HLHN6s8eVJG/wNJpP/OvdDto+hFcTnpTdMH0iPN/lRTsSz+R3i4y8c6IFm6A9fiK7/4/XJHbcM5o8mDx4qnbdwnr3KnTIy1eEuc//a8tOflQSqagtW5qCQCIhAXwlIOFdY8954k4V6nCsEnZp23mWy/uLX/yLZbf/d7AE/pn5bK4xXNfEbZmx45MEggIMI+tnXfmZTws1fNN/E9M1X32wzaSCs+CmexQRlKpYQvdbbHATctAUhiXaKempNTIXdJgaDVHURig+8WQ7hbONn0cWpOGYaOkQyaUyYk55jBRcXdJ736swQz0Rvmi/bnjY3DT56mUIEE8AhLaLSRHk4bjYCD+OCEA7MTAxjO+wnTRL+Uz7/WNmxS3yGdIR4HEPwsth4Yu9tDtscI0+GTiCY+SWBhz0Zz/yjL/zIpp/baP5Gtk0v76VnX5os2mZRsu2u21oe2EVwI3ipk5f875dYvpvfeaDl53+++ZN3WH3xKwGCG98Q43zwAX+xse0Gi5MHFw7OA0T8H8/6Y/Li3Y81Mz+78Ihk4ZYLzQ4PHrIsvPWVttafAYEq2gGxFQEREAEnMPVpDdVwFqWv35UO0zhJwzRKZ5tnsAreP/zNB5JF2y6ynuP1w+udEWoIXwQiC6L1+5/5vr2WGdHD8AleksHb/LbeaWsTesQzURdEIen4uKDzY6x9MbGYCkPCiMasF9UU5GCoxl233JUs2Cq8bjsIMOz7wrYJyrCD9HaMw8HObBfLn0TmxsCXmWzAxuK6/6m/q6RJ3TWBzMGwjXg1v0PYfCY77PiHLxThY+zSfcR3thkD0g8LZwR5iEt6evuzcqWOUScIZ35F4MNwDeqd+vzBZ3+Q7LLPLjaWnHi8bRDRSvwttt/CXtCC4KaoDNF52dtfZnXMi1vYv9fig7Li/9O33pI8Ye8n2JALysHifiOgEdXe22zHgt/8YypDxlbv8OibLc3/XHy05c0c3sw3zdjsx93/RjvW9z9VtAN9Z6ryi4AIrCQg4bySRekhNeClI12twapYn/KHo2xKMgSK/cQehiEgnpgjGAH231/5bxNy+7xsn0ykMvMBM2rQG+wCzcRdELcrwstMGCPt+7MCpWKP7UwoBnFoupMeUwQUvZKpEGVsLGJq0y1Wvm476CsThthwMYYt0pKsyJL5aa4M/JlmJ5i2JRwiH8Qv4tTL4MfSWCtXUXnx29K6COZLQPhvNoKtQZEj0ZzatzISFa7pxzII+2wJSX1Yh4ny4Bu9zewbXlw4m9gNvbzUNXWOD5efe3lyxvfOsC9EpGd8M+KWz5W/uzLZesetbcgF4495UcqL3/piO0d8OAjimc/um77Lsv3syW+wYTz0OmMfYUy9MoyD6e8YouFjnUlA2fAHQb/VPX827Hry4zAkZM8X7WljtLe+93WrHO/TDm8HKLM6LPpU8yqrCNRHQMK5ItZqwCsCuxqzzrvMm+W3Tnl3ss3O21jvos2VHIQhAosPwu5nX/+ZzZrBQ2LeC8pP9ggceiJ5UBBhRFzEkYk7xCU9n6jBsLCf/74Qh8VFo62xEZYsXdhECNK7Gb9uGzuIVhbsmChL7dnOAn/Mf9IFs+bzwPzAUuQ3x+2tfcEJ99nSxvE9/zSdl924BD8pE8s0BqHsmT0X5KzDx79MWHq+HPjiwZB3LJyxwy8E5pfHTdeZcE6Ha8DVx6Ezx/NZPz7L/EPUMmaZ8dKbL9nchnXces2t9qDedk/ezgQywzlefdCrTezyMCAimLpDlN+37L7kKRv+reV6ysVH2VhrRPGmizc1UY/4Ztu/fFjEUJ7HVjxm/vBmwm2Wv37I+8HmSV96XfLGD4Ze51DuhTe/IjdO13dW0Q50nZnKJwIiMDsCEs6z4zVybDXgI6MaO6KzxlCZwvns2z5lwoueQAQYQs1n1OAlF/xUv+szdrVxqeSNmEJUIeQYA8vDYog0E4JB/JjAC8d8mzQsLiAHWytFrwnGVCxaPBeHiNQgMvl5ngcWEcxmM0RyQUn8LG82Ci4uMn2NKLPFxanbDfsRn4hrE+9hmzD/V1nw15weHMF3hCV8B7sHPbCxHReSvraUabmdq+/L8gt5m3AOTvALAT3ANjwixyd8p/5sXu4wnzLDctz3m6+8Obns3MtMKC+/Z3ly+/W3J8zXjHB2Ic24Z74sMd6ZccmMQ7724mtt/PKb/p8/S+Y9Kfz6EP5RbhsTHQTwzmFGjD8+8s8mrBHXfNZedyC0re7TLw2Ux8oY+FCezTSmOavi4YC3BWW2A8N5aFsERKDfBCScK6p/NeAVgc0xWwXrT3zuVcmL//LFJgL5ed6HEiCw6IFEgDEtGT2T1qMYhCy9zbzOGXGDcGaauky85ok8yhL2D1YD0TjYGvw1sYr2TAWUC0S2Ec6IdHq1TWwG8cni+fl6YKn4X+yw+NrEZOpzbNXFL/H8M0gYxxqEiWtLunLRbPvDPsSl27B8UwYuem1fMOA8THAPLGY8bTOkoy58oZ58cRu+HQtnf9sfaRG51PcffvWHZKc9d7Jxxg8+8GByzk/Psbc/wp8eYl6IglBmukLOFxPeoSxvfsUXLYtjP/qShB7pJU9cYmOnrTc7nEPZXN7peGcX0HCOGfgXDeodX7e8+7XuutYpgSraAcEVAREQgWECEs7DRErYVgNeAsRZmKiC9x8e/KL9NI4bLppZM83cVb+/yubj5QFA5gOmJxExxThWei0RNvRG2k/u4RiiJxOF3uMcly8VkJZXiMt/X7yHFRHlQpEwYpP8eHW32yauhT1xCWsXmL4eNmkiN5TRxm1DIvgQVoRsPRx/ULyVZbQyBa6Uh4Vt0sZ2yDvjgP108bRsZuWO2OEC4hexmT0QuDK55UMcfMJ/6o68rN7CmrT8qsCY5svPuzzZaset7IsKQy4Q0kwxt/XOW9sUcfjPMIqbrrgp2WD+BlYvDNm5/rLrk2f86TPsYVHGL1tZgm3ydaas3X++HJCvHQtx2PZ4sXjmQcK88c4pml6uqmgHeglyqNDfOfUgO895cJVznDdiPnHfJyYLb1l1ONBNm3w7O185b/ksXvrqIYvaFIF2E5BwrqD+1IBXAHU1JqtifeWcr5jgQlAhrE4+7OTkfV9+X8IQDQQQY1H9IS6EMj/xc1MxkR3Goy7aepH1Jprb6EQXzIQxmLeku10gEcVvPiYkwzYCjX3kQ574YaIrpCUOYTse1mUs2DKRZwpzukUvB0LXhHN6o8zih6TDiwtER5B9KUn9df+93KQ30RzKFu/zMrPObBI5LjbCM4hQFuutDmsrDztS3/CdxYUzvcUsxCMtwy6uueiahLm5GR/NcAzGMPO2QH5h4AHQjRdsbMNymFGE2TiuuvCqZPsnb289y/M2DW/8C9PaxcLd8/R6jsvg+bqflH2aeE45cc7RG771sn4/DGiVFf5U1Q64/T6ueTCa851rgvOXL5S0eYS5bLh2+VgbmQKyczscs/OXdXp9eVux+dLXZCjvX+eHyT0bP5TFkcDO0CjQcAISzhVUkBrxCqCuxmQVrL/wtTclL3zzC60H8vMHfz7Z9gnbJi/6yxcl2+y0TXLDFTfY65RNNPOgWfhH7x8ilh5GBA2Ca8GWC1aKtCDmXBxNE3l5ZUqFn4sq7jvchPwTC2de6czQALdJHA/nmR5ln93wQkTseNh84E+6+E3QtxGr3EBd3MfpsjgEiDf4Y7vJw4XzYIf9Hdjxm25YZ8IxZUEsT5tb3pShu8zbBp2j5RDZHuQ4GDOelTk9TjkY0wxjXlRy+ndOTxZuvdDq97JzLjNBwdCKLXfY0oZmIDAQz7yOmzHOTGfHkA0eEMzyT22vwgHeFmmlyPc01gPNFwd2hMU5w45zb4uh+aK9TH1aV9EO9IlfXNazbj7RZnjhgVSeC8iEcxDMtHsML+Iypi3i15z4GvRz2NoAv86C8azNGLo2PV8/t9mWgHYqWjeVgIRzyTWjBrxkoGswVwXv/w4vmViy6xJ7oOxnX/1ZsuXjt7Sf25nFghsH8zXT+4KgcfHCT/r81I/AYr5dpjJLdc7gxoJoRCiGT97CjWOVm0uIyD5uQv7x9KzpccQfwhyP13l5rGmf3exCJBfnvk05/MaW+YgxihKOcfN0/7K4A41HrJULDEiUIsBf8oKhLazIKy0v+yyMaEx7nNlHOjMT1hZmZ7yk5sxWms78T21bVMIDZ22TLzxeXl9z4K7b7rKx7OTDy03O/N6ZNlyH8cz0JtPDzHzdjHVmTDsvTmHsOQ+GbrHdFtbbPFyeLP+0nNiO4wzcGgD0chPHvmQEMUPZTcCk5x9xFt32KjPb1z9VtAN9Y3n2rZ+y89jPNdbW0xweWPUeZ5tiMW1r7NoN02vaNZ3C8mvH1n6Npccsnl+b7Buc4oPrMI0TrySgYxoKN4lAEM7nxadyk3xrpS/v+runmd8nfe78VvrfJqerYH3o4c9JDnjbAQPhG2D89P/7afKMlz8j4XXavAyDGTYQKtxIWLtw9t5mxDMvpOBNc3ZjCFeX3YjCTcjEHn+ixQWp74pvQnFUvxFl8YI9xJ79bBp2cpx8iix+s7O04WbGXNMm3oJN34efmW9xNsPxw3YsSGN/zL80rTOBX+Y32Qamljd/wsK2fVKRyb44LdurLJF/sb2MYeSjlx0R4Lx9X+qC1bHVc5iZg2EbZ//0bOuFRhzTG06vL73ObBOPB0QvOeuSZI8X7mEPj2LPmLijIX/S8cWLHmvWLMTLvkjgI9vhSwlfkJgVhOn+OM4+T8u+3TZ+h1vu5bqKdqBPIM+6+RP25dAenuWyC9ePX5fso5PAPgzXYJhGWPw89GuGtiG+7u0a8uvMUmAWw+kGq/g4+bLEx8Pm4mhohx3XHxFoAIGp8HrioVO1AV611IV3/e1ANOP+SZ+XcK66Gp13maxP+qc/S3Z7zm72BjdebsJ45v1fs3+y3rz17HXb/EzJT/92kwkFNGETRA2CGYFDjzOzMjD22BbuFUHQ+idmMk1MxQdC2G4y7POrk5tMEFK2YDP8I2/3Y3Bg9n8zmyTFfOqvbXp+flNjp/tDOCwuet2Or83WIMrgb2TX0/nN2W2wn/SxDR+mYfuCHxY3tRWnG2SS/o19DGk8rd/Y3T5rhDV2/MtC7AN8/fXlCAW+qCy9cak9GMhUgIx3f+pzn2p1zs/ZDM+gJ5ghNPcuvTc5/d9OT/Z60V7W64xdfok44OnHm5Pn33VSss466yRz5gZhEoZyuB+cR/72RTu3wjnFOOtnPu4Dlk5/ViVQRTuwai7d2vPjcw6x3mVmBmI4EdeGXyeU1K8zvqDFopltjnFuehyLHxqG4euLNsD2Yc8bjuFrcxAJE9OX4XhpW7Todj1oOB2UtiZBQMK5ROpqwEuEuQZTzppoZQrnr3z/r004I+pO/ZdTbQqyPV6wh82ugDhiodfFbiAhDg/G0OtHr6MJ5yB8+NmeB8JY/ObC2sK2d+UNJd3MXQ3fbLznFd9YsGc9qrmpZ97p4tFiRTc4f8iP/VmccNyW+Gbmu/AlR5ymh7OVc/AdWc9q2OHHrCz/P3tnAmhHUaX/wlEgBEjISnaSkBDZSRAlbH9BxHFEBERldZmRYRNwcBxBEVDQQRlkB3ccxwVlU5RNEJRFdkhYEyA7JCErCUkIoPzP7+s69ered+9b73sJpOu9e6u7llOnzu1zztfV1VUZL95fj2knge0oA6dXERtLLrtqZ+7gwPsGMBcvyDI6Z2Ivh7+HFkCBD8B59pTZYf7M+drQBHlt3LvY7Y86TNFhygZTONgY5Yl7nghjJ4wN2+25XTFKbDdd4/udVMHu06/+QPNGAc2slMK0H/rJvPm9331GRdnypFICbgcaaQMqW3h7nd066fTQe2BvzV3m5p5pZ+gff+hKxXVv6QLOZu94Mdbn6nNtojMKph8VwW0BiZkuSx/zslke5SpCLFfNk3iLBfut41OTKuRVnnS7BErg3ECRl0a8gcJshVRXyPq8iw8Iex+2t8AP0zLuuf4ezW1mvrMAJY7FRigZ5cWIA6QEnBmNtJfCfMS5V3/bnptHmuYA5JTMAek4OqdWulaRLYeTpch54FhwNh77cVau3mHufNxh4TAVLKIfArCWkMq2QJ/+5eVEK5JLPEQ5+LlkYs6XmOAxdCpocc5UjSw9AedY12lWxAVZE09xkPOUaBmPfuwgILVtxPiNxVekRRkBZ7suZj89O6xYviK8/urrmgPdb0g/PWEALBN4mZAbJ87fs997wnUXX6fpG6O2HxUO2ut7KlN+NUYCXWEHGsPZ2kXlNntvo9/QfsVOlWabGAQADKMirn9uT1wPBFxN97Bl6IPrhOtg6mHUEZ1H3RcNdMz+XA+jOhbVYp6bnlq04Es6Gm2A05c+20nfciOgJLbyoHslsJ7tsJRf9t3b+tuotROyaRqXlNM0uvyXdXk3UtY3PnJaGLPTGIG16y+5Xmvv7n7g7gJFjExi5IndgQg42+N7AnNQfcSZ9Z0ZqXGnhAOQE2jmJdouJnc+7tSoyXFyem0gldeFFXdAzpaPJHk5j+uR9ra9nGKj2yzkztky1U58ya2CRsYTtCpAs+VRVp+WRpuhD6h2qxb5UV/92IE451m5ijK5s440ebLADdWUB6cUQMJGj/ld5s+YH8buPFajxWyEwnx35rkzH/rQUw8VgL7khEvCgScVL/BNHPZlo1iGzkrAbQB0GmkHOstXI+o/ZU8hALbYEeyMnorYuwfcqDNlCB3iyQQvrPrN/Jwpc8JnD/5xRfN/fuYbYcDwAQLM0MN2QRM75rpXHUv3om3hONXzpzP19A9dynQMRqgv2+V65txZuVzfkh7GfOcptwPQVh0RLgr2m/8xp1jGpQS6TQIlcG6QqN2Iv90MeIPE01AyXSHrb/73fuHAEw/Uo0scy/UXXx+22W2bsO3u2xbgKjoEOR4cSBxtxoERcGYCzq+/HvoM7CMH4gDO43YLIbbpTgUHVOFs2kgQ55NCPKx2QOTTB4KX91iJ2Zc7NfFl9FSOuPBsWcniMO8/x/pEuTktaOTtJdCcjTgjc6/frBFPsJ9DZVxWpNfgzdurlqn3wdt3ssTIB6Dy2B2PaXoGj7rZAAe+AMlbbLOF5iO/vOBlTd154dkXwuFfPTxs0HOD8MCND4SH//RwOOD4A8L2mxyXky2POyiBrrADHWSl09X+fM/pYfMJw4opEXGE1/VEoNn0xZ96+A0uMWU8neuTzXkWzF6gqT+jdxytpx8CyhEsV+gCuiIlNvb9uDBnFbrowJlOcq23JSRdjvbGm1Fd10dOyI9tcip+4MX65TqadLFGvXL7eaRWhu6WQAmcGyTxt5MRb5BIuoxMV8j6N7efKJCsx5I2MvP7S38f9jhoD20AIONuvcGQA5wJDpx1YoZewNnmOWPw2RCDIMdn57ljUEZrX+5sqsuJVDY1ojq/xnlyYORVO55Y3kE0Lzb6yLqK54A7lqVPeUj0nXY17/Cc1ZFMosOHDuei4fVJ5Njadl68DQcKOT2Kp+DyMQKACa+Xfr9CAKm453vs7VKAtOSwSTDajPax/TbzQgHPTMlhSTr4YRdBlqHbsOeGgRcHmQvNXFKm+Qx/9/AwZ+ocrbQx8aMTwxN3PxEOmPjd8Ndp3wr9BvcLW294NC2UoZ0S6Ao70E4WOl38xOPHh0NOOUS7UvpoMNeewLE9lVFsYNXBMQ3meiBdsGuTfNKZIjRt8jTdxHFj5zpEPV3nUc/ScdTxpFOuQzGdes6Xt0taa0H0KeT2wOimkOt6SkTFikK0w6H0L7txdlpur6hKnf4vrdtLMWYiLA+7SQIlcG6AoN8OBrwBYug2El0h7788d45AMo7m95f/PgwePTjsvN/OTatjWO8YuWFFDQfNydlYnoCzrYBAGbbBJpDvjk0JbflyR1OvLL6ltTIUyRyfAGFeKavvI6z0iY87VJqvoGHneX/JJ3iZvF6RU3zn/Xd5yDHGQqRR1+mQrPMqh6m6GeCO1Ssip099tZE76Nhn76+3k2LPj3LLefBGkM/CFxeGjTbeKNx1zV2h//D+GnkmH7A86c5JYcz4MRpt5kU/Xu5jXjPzxlnTmdUxbr3y1rD34XuHu6+9O3zxmGtE+rq7TwnbTNwmjH3H57ypMm5FAl1hA1ppsuHZd03/dhg4YqBeJtWosNkeAi8a59c71zLXno/GUoZzbuQEMO2aZT79Jn03CY/e9mgYs/MYLZvJCi/kQwv6fiOoa9t1Dh2Jxzl92nCTgc2jDOC8rYHyCkTYrCykvJimdjmO5eCXAL/vWK/gu0jQt7FVY/qHtVMC6EI+5XfXS6AEzg2Q8dvBiDdApbp6jAAAPk1JREFUDN1CoitkffWdJ4ft9ihWPQA4M795/D7jw5Y7bSnjTccw9nI+OBBzWnw8YPh9ZQ2WbtJufvJYhdNyh+Dl68bR19TN9wz8Sp2yFU4plhFYpG5WpzoNZw2fqu/1ovNzR+bN57GXzwFpyjd6eV2OAQGeRlxRP1asHmmijIAD9aNTTW34QY22lGV9qe6r86q2rZDzQPk8jWN48QBwWLZwmW6mbv3fW7Wteu8BvTW/fdmCZWHGUzO0oopP1Znx5Ay9HLjZ5pvpBSvo0X+25GZ3wUP3u9RJl3E7JdAVdqCdLHSq+D2zzg29+/fW2t/YDB8ZdjuiGz9rAduSdMaucR95dvuDTeKmzVfyWX/99cPd198dRm43UmvJC5DHJTQFRM2+6br2axv94Dh+ZCOsnRQsX7zBSz3dS4WbDqCnQFRFz/WRvilk+XkbtCtb4LSK0vW/ra0SPNcXT5nTOAmUwLkBsnyrG/EGiKDbSHSFrB9edGHo1beXjPS1F1wrYLTnIXsGAE9yKNZDHlnixATiMPqZwWf0hznOPMbnjXUcgJxAVqZFIUU/02KZVjKTs6Kc0XMHJWeY1VV63p7xyAgpaxanvFg/d2QZCR2m9rytnCYlEFF0ti4LtvF1uZGWy7cgWjhq0YZudJoOnFWm+ot2+DN6jFA1Gxlz/qjnxxltb0vZ7qQtX47bKuQOfuUrK8MGG24QbvrJTZqm0XdwX/3ujCbPfX6u2mYzFMDQU/c+FSYeMFFTOrSKgV0/LOlF2SXzloRHbn8kHHfUL6t7U563IgG3ARR7q75Tct8L54VN+22qdb8dOHP9cp3zhEK6EW/adD3bNU6eA2cAMfaIpREHjSw232FXP9aZR4dvuOKGsPXErbWxCeuHk871nINRB6UptmvfdUF2AAFHPYGf9gTXW+q7vqs+5x7qkFTf4SV/6uR1asRe3rPW9V00XQ5l3HUSKIFzJ2XrRvytasA72f1ure6yptFGynvqmz/RFAwM9XUXXRdG7zBaG1v49rJyHpaH03HnVd1xn+Pco2ePAmCbo5GzqeMcUv3ckaTE9h0kJ0W1SC93fE5NaTXak7O2F4vkqNTZArC25iwrnGwNunn/OdbHbjwqgtVLvMJ+5iw5Vn8MawMY6vHj7RDzG+kRNrQcBNOgt0Mc06v5r5YP4CR3+qyYwRrNtMHugbwEyM0VgRsPRqMXz1scRmwzQm08ftfjYa9P7KUVDdQX6/qAEQPU32WLl4Xpj0/X56TP/1Y0yq+2ScDtQCNtQNtablyp2586q1gezm6yZWe41i1wnXMDzjXGNcuNuoNk9JRrnOvWdXXu9LmBmzd2NPW5yNIZo0U5Xlpl3j1znwWsmbLh9bFpcSqE0tCLqB9O32mJuXZ+QbNZ8KQqM5CXUx8z3vK86mPpfkykPc4HlJukVIupPG+wBErg3EmBvh2MeCdF0G3Vu0LWDy24QMvOuTMBOLPFNnOccURu/B1EyXnlaCr23h+xsu2ynI05Bhn1FhxEZ5ySC93507n8XvRM7qAsIwHTLM3rE2tOpTnoNjlL+kM77hQ5VkJO0Y69/xyaM/Pz5Oiq6VA90hUfRl+/icX1blaoktN1njRCp8zafHo5xRn/1f3wcpGU5izz8ij8cW3c9KObtEsko8nwyPJgvPjHboKcUw5gveiFRXrxVLtJWnuAFUadX3z+RT2dePCmB8PJ/361N1PGrUigK+xAK012Sfbf5nxXUzW4ztALPmyAs2q5zVdmEyWboyzQbC8r86SC41yXuL4evOXBMO4941RewNkAd9Ify+dv3ox5Wv2F69Sv3wrATDnXt+y4UGspSIf6X60/0KthOpvRpo/cOHg/mhUQmcKmVOdRV+1aWwMWlLsMVsunPG+MBErg3Ak5ugGHxFt59KMTIujWqi7vRsp62j/9r/qAsb32omvDZgM2C7sduFuxdnMcnaEATqklAEceAJSRH4I7OJ3U+sKJdDIkxyTflhGsOFRm3ZbgU9M0codZqzRA1wMkTV4KIt+8jbz/HOvTymgzjlUOMzpxOVpL46ZE9Lx9j+U7CwcKWNaLm4xM047V85DzCmDwPKWLdX1ZchF7vRTHdpwu9fg8evujGmFmFFojg+bsF8xZkMAQwEcboRgQYvrG0LFD1T+ThkYVly1apnQANJvtnHx0CZ6TzOscdIUNqNPUGkv+9a0n6Gae9eDZlZKpHIBenmpxnaELXH8AaeIn730yjNx2ZFrKzqdl5MBz6UtLdS1ix/Syn+u7x1zTfpzpgadVC4N2qwPXtUKMapWprlPvvGa73mSkn+qmZosD2uWDPR609JBUrDwoJdAoCZTAuROSXBeMeCfE09CqXSXr6e/8uZn7whFde+G1Ycf376ilnHxOKp2QETdnko9kps5Fo605zm0Fzu4AEpGOHcgxCetFghnduiAwb8rK+0i5Pxqu6ezqOKbCvzZvG//pQDfF0eFXNJ87XyPjDs+dJufUBzDUCqKdtQWQELgAWOTOP7Yj+gYOFGgvlqmOva2cvvNCrGoWA3inPjRVDlojg7b6wKoVqwIvCrLixoqlK7QUHeUevPnBcOz3ji1AtREANLMSwoitRwjwsP4uK3V8ct+LvfkyriGBrrIDNZpao0lX3faF0H9of82P50kFgJcg4GwKxgg01yc2iRtfgDFPu3xUWaPPpg/Sa+pZWcqldEC3A2W7ltN1zcUdA/k+8utpzWLXo2YZnUgwmvkNbouU0H8MDiFGzjeyKadtFKIpvxsrgfUuvrzcObCjIj3hmB1V9ZIrHusoibJeGyXQFbJ+bMklckywgOMAOH/giA/osSePNAFD+BGch4/01GMXwMZHj+OtkIOuZuWb/FKzrHYl4CQib5mvs8PYQEvteJ7RYFSG0fK6wDn5pOIggUwHkLTn9LwDVpT+y6H5cRtGm/kNqp05zg/+mgWnSytG2/nPQbbzSt2ctp+Lptgv+pCXJ099sH7gkDkWDe+3xewQOPXhqWHhnIWapiFRWDprNvPCFsvRjZkwJvQd1FcjiIAap8sj+fM/f3446bKTNBII6OEmZsDi8vGyhFTjy20AWeuCzf3t7Sfp5b5N+26qeco+wsz1zrXGNcl1zwgzabJB9mIhOqT16A1sy4ZFWUpPANPZJ+lblRLzojC0HYTW+DmaJaEfHQ5ela4ZHdptNUg17ctDPPcRdfqL/Ri4+GAvUcalBBoigRI4d1CM65oR76CYGlbN5d1Ih/nIoosCy4nhIK753jWBnbbG7TJOTspHZvAnjH5ggFsKgDscGY/niX30o6JOG3xBRfmWTvAX8Aa4j6Ea+Hl6RWx1VM4S6ZOmaeCocHr8u/OL/iiN5ngbXtbOU1mrlwfvv2LLIJazzwqluqRFnpITz3gBDKhu3kZ0kA5o6QcjcKzYAfhUiOVdJhW0aTL20+OKvhcUCr4z3imr8pFfAMu86fM0h5Td2hhB5rpZMn+Jbr5YZeODn/mg5suz2ooH5LFy2crAzoLsysY1yNrfbNNNn/ovLDd0cFnlcVfYgJz+2nZ88Y8/qWuHa4MbK7+GudllpYyem/UsttK2Jx0Cw3Y9cp0xtYPrlKcabMyDDr9p23VzfeXgGYBJvWqQKloA8gw4U8b1Gl1pLSS9aq0g+Tk9u/69ny1WdRuQF4ppDpzJor+vP/9KGNzryLxkeVxKoFMSWO+ScsS5QwI8Po42X1qONndIfu2p1BWyvu6eL2nOaZ/N+2jO39XnXx32OHgPjQzqpZy4fJM7AAeB9fgGOGPwGelJDsYL547B0zobm5PA4cjxuedpqR3Lc8DsTQNKAc6ikYFVwBuhAjTHtOTUcnp5uzgvwKZ9IpHKcxLzun4eQanTR+7QyEeQRc++XL7eDmXpA78B5f03S+1YewIBsY9qkmMLKhv5T/XIyPqh9uDZ6iQgXhAvtji2ec3sKDjloSma4w4PjCgDcD702Q+Fe39/r7Zzh6zo2hdLjgG2F81dpDXEZz09K3AtApB4yXDoK59U8Vpfz7zx42LOq9EY+47P1irytkzrCjuwtgvqOxcdoPXk2aGS6481m7lR5JrkSQa7V2JzeJqhpTBtC3iNNhsoZifQZx95VtccU8mYM41+EQCXPjiA7nhATziXTbCyDp5dN2g32ZGoN163Vuz1auUprQYNr5PaoaCXQy9Roloh5nkfoOM2YsCC8ma0lsjKtI5JoATOHZNbWBeNeAdF1elqXSHrO22nQNZuZrct1m5m6+MJ+03QyB8jNhq14HFlG0ab6SDOTI7IHI+Altt2N/idlkIlATkwcwzu2Cpzs7PYfoUTsmzqA+wAeXKSRovg5SqcE31xOlZOjs3OVTamq7J9ed8Vx/Pq0WZoeTsVdM1RQ9s/yFQjyFkbiT7Ok2XqbJSZ3wo6lKVOTl+0oMunyBCr6oMdKY70PY0CqR2TE8dezsuImtEk8EIgK2dMe3ya5jVz/cyfOV9PLg78woF63H71964OB518UDjnU+eEM645QzRfWfxKePr+p8N7/+W9od/8A2RTTr7iZI1AL567OFx0/EWh+sb87hnnhv7D+gtg0za/H8uXbfnmpzl924ausAFru7DOv/zgMHfaXL13AUDmRUGm/3DTxTJ0TBXiKRkvNG+82ca6afN3M3iSwXXLPHs23GHkGaCtpxrWcQHn9YsXBR04O2CW7bNr222Lx1z76LLbBumTCzHTUU8idn3J0yqOq+rlOpqX87a87TwvHUebi647zynPDtCxMpQSaIQESuDcASmui0a8A2JqSBWXNcSqQURnGvgbGxD0KeYOsgQdS4gNGj1Ic5QBwA6CaaNFYx2ZYLSTdVIdcCm5yinEoh2LMvAqAvG8loNIDVj77nBSWjwAYDLaTICGg81mfY3OiHI5eEwOMe+jlaX/kgEVqs9FpIqnyGM+0ixezEkzSsYjZs4VoCeisZ3oxPmtSAZEprar6dYCzrGM0099ssZSP2IfyEv9L4QlML56hQEZa3fmUzPDtMnT9JLWOzd4p6ZisO7z5875XFpjFx6pityfe/S5MHbCWI043//H+8Peh+0dRrx6WLjp0a9p+21W6vjGId8o+h2/uf5/fsMxYcIHJ6R5+NDjhoHRxS1WH15R/u104nagkTZgbZfP18/aRzsAvrzwZb1kCjjmHQp0mmlBPOWY/JfJYfu9tg9b7bJV6NWvV5pm5jaMPnJdzpkyR6C5z6A+mtbENct1is2SrbNRZgCzg2e3KzqPUza4/pNeVAmvws7YNUlwfSnO2vatOq0VRY8IsZ3ixL5jumwJ+m78ehB/dlqCZ5dIGXdGAiVw7oD01kUj3gExNaRKV8n6/rnn66UsRnFuufKWsMuHdtHoM4873ZHgPABqFU6hTq8Azow0CrhFgFSnaPuS3UlQq8kPyEngIEgT8M2pKjkvnGcWxzhOpgS4Y6yg7cXzti3NwS3ZySnlzVj53LHq2MBtRajmjfMISp0+59SFR3eG8Oey9TYY/aIOv5dP01D5yJPTy28MvJ/i33nJYvFKP/jjJiDy7zxSX3WtoK4PRvXs8TlTLaY8OEXrMzOSzyoZjPIdfvrhGhH0awN65FOXNKZ0sP02I4ojXztCzT8w/3thyJghYciyT6QnW7kMP3/u5zWKzdbujCzCDzdCjDy3NMUjp/FWOnYbAM/rEnCmvyccu1MY/4Hxug4HbjFQo84OdvXbR4DItJ9+Q/oFgLGvrIFuoxezn5kdXlv9Wnjukee0igsAHJDNdacpHnaTps1RbHqawDNAOT5tgwZ6xPVK7LqX6xl85sFBatKZPDM7RseqQzNbG3U5lauukueTF/VTuq+TVDPpbQmem2RSHnVMAiVw7oDc3JCva0a8A6LqdJWulDUA5e5r79YLOKx+wDxCwIyP1jhAaksnKoBzWyq0VKbaOXhZdxIxH+dAwLkp4DRwFi0EOSurD8gCmLpDrKiWtU95ycHS5IwYxfF2qprKgS0+MTnZjJ8Kmcb64sHpWkwZAcw33kg16ZfoibDRNgAKWAQ8MJ2G0WkBbashGRht75vahGfaiCGlFZ1JdZSd8U6bzcpGOg5MeAkL0MwubfpNrBmmWpC/75H7aq3myX+dHLbedWttbrF88fIwcMTAsFGvjdQnRqap+/6xX3P2AvOYx73zX9O56wEJ+x61r0are/bqWazNG6eqIBNkMPjlt9fatd73ddnefuu8fwnDtx6u6Rbc7Ascm63iGuf4+1/6fvjYCR8Lm4/cXKPS77K5zly3r7/6enh11avh8b8+LqA8ZqcxYf2N1g9DthwiQM2ugkzf4L0OgWe7ZrluXXfyY+jx4TqTrqer0w6aVEupXpa4Ii+zLXl1jmVrKgpXl6hxnrcLbTtP9oTiWb7bhRI415BjmdQuCZTAuV3iCmkEaF024u0UWYeLd7XDvH/e+eG+P9wnEMIGFT179yx26IojLzL6beReII4pA50JLTiV3AE4mHP+cBRBsy0yL1GDDwfN1Ge6AA4Qx1jh3DIe3JGpvTiy5WXdCaVmrB7l+BDScUaPPqieV+I8OmPF3AjEMmkEmbKWppC1Ae+kc5NDADDm86Hd8Se6Vka8q3Q8jm15m96O+pC1pXqUhdeicLqJYL4pSffdcJ/mowLkWeGDJemYbsHOgfsfs3944KYHwkszXwr77//B8I4RPcTvj0/7sW7Wxu48NozafpTWej7iXy6PHNaPGIU8+IsHh8FbDtY0EL/Zgz/k0trLhfUpr505XW0H1s5eN+eK333iRyeGPoP7aMdKpm3wIqBeZjY9vvVnt4YJ+07Q0wtW3eC6QM/ZpfLGH9yoaR+MNqMbAGZGodmwybfkpg4gnJs/yuiDfYjnxLrG0HP0r1aIOkVZqQrAuS0BfeOmOAsVtiJLrzjMyVt16ai3DQOEFNlBPC7BcyGa8rtjEiiBczvlVhrxdgqsE8W7WtbnXXKgnMyoHUZpswGcSBptxknwcUtbqx/RCJMF0HMQV6toi2mV/qJ20awtgJ0cBEDJjv1lntoVo0PK2oBXwFVyiDg36Gdl5MRwRCYDAWzAOcWsLB+FGHGcg03ocK60omRR3Otx5iSgh5wjXWLq5cCZfKdJzAuB4smcOjLnpgUAS8zvlehZPZy/+C6+KvkwspSvzk+8x344byoX+8ANB7wwt5jpFs888Exg+TnKAGheXvByxSYnPTbpoaXrDvvqYalvGvU3vtle+ayPn6UR6eOO+oV4bO3rnO9+WKvAAJ61mkKcq0r70OWFxe03PrY1Mmt9flfbgLVeAFUMCjwfMDFs2s/ez7CpOgBnrjcAL9cim+gAnnv3762nEegR1ycb9axcvlIra0CSa4aRZvQHOuxwCQDnWDfT6I5d4w6eiaUHlo5tQP9y/ZYeRV6T/qEr+revGKhbL+T06pXJ21GZJtLSR2+7ot2sDH0glMBZYii/OigBA86P1b+SO0j07Vrt+GN2SF279IpJ6bg86BoJuLy7StaXXnmo3kYfud1ILQXGyAuOBIeRQLDZWQdXLfXSgVxLZZrltUfzMuOPg3Ewx7E7umZOxRqUo6pqB2DFR/VsLmN1vYo6sf/JIRlNOZ+MH3whfLjjU0yaMmKvXY4uhFjfHbIcnTk1aLtTZi4wgbYTTdqJTtunaQCYKUu65BIBc+LZ245tin8RbvptJQPyI9/ehvfV870usqMMo3aM6k2bNC3MnjJbfHIDxuoHM56YoRUyACQAHHZ3+8w3PyOQ/4ZNQ2GO6eBRg1WHG5krTrkinH3WnXDWpnC9Lam45U5bBkA5QMhHnunD8iXLw6g3jmoTnbW5UFfbgLW57/V4u+SnnxJwZlUNbFaPnj0EiHliBqBmlY07r7oz9B3cV0800A1AM09FWJGFqR5cv74SB09GuH4cTHOtJsAcwTNznqVbrqOZHorPTLfycq7Xzfpi12gesBVuP/L06mPpYZ4Y2+WaJzTTeSUqS1+uv/3mf6wpsTwqJdBOCZTAuR0CK414O4TVyaLdIevv//JIjbD4iLMDD4yrXryxWMEiB071uuUGuV5+RXqV06jIy04EPO28mrYAooFD0nE2AsDRoXl1r+sOJaVbeYAeI1GqBx28Wwyq507IaAJKBW5je3KExZdXKcAmjtQ+tEec2qeUpVX0wcUaeXZn52XgjdEzgvKsPdGMbXj//eaGKRoAUbVTBZpFs+r383Y8zvtU0QdrL9WH19hv0uALPgXYrRwvXgGeuYYAsrwcyGoIAGheHGQFjaULlobPnf25YmTcaMC/94uNVHbY9Hj1uT1f9714XlpNQfNUbW4rAVB11XeuCl86/vr2kFurynaHDVirOtxJZriRYsoPgJkR5LMOPivssNcOuh4ZXeYpCC8RMnWDEWk+gGh2JmT9cAA0166mfjBVA9DsH0acow3g+vfrFl1MwfSM9w0oJ1WxcoSkZ3bMlCrqSM+UW/lFOjqELhMA/JRHj3jxMbdVRQn7pplY3nlUmrIKHtJ55Im6JXhGCmXoiARK4NwOqZWGvB3C6mTR7pD1j37zGY3YjNh6RBgwfEAx2ozDiHOcMbbJ6HOshNodS+VqZxepmY9puVhTwVptOnB0B1YBbI2wQGsTiYqmAHuMbtKVVI+TGvWcvjsj9bFaDtaOt+fOFCdWAZyNdoXDK5qTbHPa3h50eKlJ4sYJE6BpDtSnaQA6maJCWZwxAJb6qU84eT4iUvnbqR9G0vsDeS8HPbVlMceU8XJejzYgy00FZWmf5eWe+ttTmnYBCIH/FctWaBdBNjZ59ZVXtYQY4PmoM48SYIEebdCvB29+MOz/3nNhpV3hT0+codUUaJPRR8APfQG0X3jchRW0uurJTUUjDTzpDhvQQHbXClJ3Pnu2XhDkuvrdJb/TU47h7x6eADIAdPG8xYGXV7m5YvUWpgqxOQo7WPp1VL00Xbo5R6ci+HR9RAcuPelSAd4vXPwFyUF6lEvEyrjuUi+3D657FOdYOhH1T22YzaIutgvg/4uzfxG+9uumF2mtivSQGN6cP50rU4Qhb2cU0mEJnAsxlN8dkMB6toWxXdJlaE0Cx/97Nk3j++U0jdbk1Zn87pL1j3/7WT3KxLHgOHAOBOYLylFEI5uMrWxuk+FVunc0GmM/rYjbqGG5M/H6qW1PMFo4FgecJMtZRIcmGi20xxQNRpx9JMmdiMjHek4fh0hwoCiHVCUDyrijS8desaiu7ySrKCfOvQ+JrgNJ65/mYGdOEHBKORwpPAOc4QvwSh6fHDTr2MrHxmktOUy1ZxneLmWcP7l0nLa1U13Oz/3aUFkrxwje9MnTw+N3PS4QyyNzgPOqFau0c9u2u22raRmMpOH42Wr79N+cLiCgGxmb7nH9xdeHLx5zjdht79edU8/WVCMevQOcofnYHY8JiOe65HQvfYvYL+f9rcKvy3dNxmxLzkY7gOAHbnxAN26oY68BvUL/If2Lec+mM0vnL9VW8cx/ZsSZVTZ4kZVRaIC020C9P8HSdHHEmRHlx659KMxdukAj2zxZQVcpv+fH9yx02mxRMxNgPOTAuZmMUM+or8SyKRFgSxfjTbCns0snq9Hw+cARH2gCzrGc6Evl7YsQIx26XbCTfi+VUzaQSRnaJ4ESOLdRXqURb6OgGlCsu2T9s+s/L+A8YpsReksdgw0oktOIo4p1ARf9rGOYkwjMWbQlCOzWKejOxLNxHDglgU53UMaHA0Xle+GqmDxAs0Amj2BjH1Us8ur1kQMAzOmqryYf8ZM5IeeHOD9OTecyIjHWhY4+ODqna9nQYCRMRemfBfJJA3jiRPMnAvBIf3xKiTv4nGb6nbK2m6VFxrwfxNBQOfgrKlTwDG/ws2zxMgFk5jQzuuzglWXneGmQOfS8wMVyhzOfnim5fvLLn9RjaEDHglkLwp5bnga5Dof75/6PpmwwT5WNMd47+JQKWq5TeWItQHrjQ6dpM43R////LsS2QRpU9gQWOrAvKh3yAf9T27IjsAEHz0b1n7F6FemXuZtjP0+mzf0FDHPh+30P5VGwuAIOrJVgIAIzKnzpz0wH5cgDLjC/AoAGkLU2Tn4EzjzPoaV8Aie5i1+LAwAK3i40KqpP9Zywk0G8j1PcriwT7oofcSLJ62YKwClNnawJMj6GHxxartc4ksvFYe7ehleLn0J/o0i2vxEwE6ZVMfeNVLiQZ+eTqFldzPk+Y4OfJDEznQX+CJviKrM/OK9VvvO5SPPJEj9Hlq8dJDL9lLrf+ucz9dkgycV+57z6kbvNAPl9Zdc+ch+ogQW+HYssFiq9v8vZeWbJG/iUigAM5NpCEbSjUaquKsrXz4kiCfgn3mrmd0dBKffvWtEL732ZUVgElKC+WVgGdXZCozKjbCUGxc3Y+iEcAtz5sow5Rv8uNQ+ig3JnOUqehxj6LkIwaWbtrb06TQeTkJRUwYn6Dla3AoworOyi131LEcVBLG/3pxIl134i5XZABocP4Iz130Ov953TKx5nmU3sM8s5OxcJdHSsfL8+TVXdN+4mnUrkYXy+OksZPEB3tMsZ59Y9OTPVmdrw+NPVUnPWA1Ezi2fpYfRxgBEESREUAKPrBcYoEc9q9h4eD//dtiyxw67szAOeKALOic/uPTlWd5GSjeXPn6fOFIu/dYsE8Jz0Mu/l54+5W3w24H7Rae+vdTOhu440YdlQewx35ytmC89txrOjMYwNqlTxdtVaANZOlljJns0iMeGXvISyDSAa2VjPzUB82vvke/oc/Y+GS8k48/gXcD7rS5LNcWBzClPMYrdAnn9Ajy8pcCZ1XS6OLoj/RF8uT9kDKNPxYA8MMxeVjWWfzjKAeg6bzAD475yMcQT7L4E2i2fABuyoAXFuOUSVrowifbWsY+M7ZOX+tToUv588hrp4f1O6yv9vlk/idh6w2OWUqKWfbr7jos9Nu+n7aC0JbFVo1lItYmQaRZAOfGDuYaU09r7LJmEu7Uu5Meyw7/73BNmoACFAjKBuWBcmIidcVYye9xtB0Khv/yR8WDvxw0Z8kWpVWGCj+uEHlDHQsZ+QDMKEksSIABlCKKH95Jz4tn8L7RFhtVoGhBkb/ySPFeNbA8ZNneSwTVMFRDSYBDgEDq1A4e4HRT0hF8KAnh6b3ni1fkmLZRLpuUXlme8tvyvpLHG2lACEfDTRgzQUcj8qLTt7c6M09SF8/Dr50m0Ez7+4IP0IOM3HIoelEmAl2x7izMprw5JXy1+wmLLfKlWRdnR4TFBRzyP/OnZy6X7XCvf3mtxqQWgsa7FjR2BXCy7YGnLeNGjNO1/479ddwY2y0YFw/d8FDYYuctQpvObSQP6jz01qFhx+/vqD4ASJXsbBw5fdoe2oBMQDBOwNqssYxlyiceYJk67kmP9ZdxSn4AM34H0wKnNj6xajPXkBZZaiEc86vvWBvmfcgK8TqrX8Y5Se3MwsjuNVcYHazKc96bE6ZNmBben/K+joWjXvxx7Cb8UAf6Bv3Qy8biiixkYbayqR90MShAW3vEzVpPf+Fp1tH735RWvdH7L79pvzDoO4PyJyqNvkJFBZaJBArgvEzEWBBBAg6a8S8vixNlLUv331dODF37dtUWiEdvejS07dxWflmf7LFlDkJqAM7wkyu3CExcsaFsBJgtv+9PzAGYMtauNihjlCqnZ3BGM4oO5Y9SnvLWFD2WfuSmR8JuB+6mD2s8/9/nw09+9xNZZSqWYOyUuyXhq5zGkt4jv7o6BwoCuDFzCZ2kLRRdim8qFufAJKcT2ZJs6s6iygDYQK+chgO+d954Jzzyj0fCgJ0HhBcfejGcfeawirxVFwhoxmrpiz0spVgavd8JCJbJgv4k4Mc2AgNtM9+ZGcY8PSYc9qPrqism8MIdgJSziwFW9Gc+TMHC7SsbHldtvmURwefC+VKej0do0u4+Lv5y8F90hNkGHTbQo/apb0/V+OCDGuTBkti6kx09ZgsL5OHbNiQbo0Ub8Yjet2fRNgK9BjAZZzjk6l+YU5tai5IOJ17gx/4I8/FJXvxYpgWkTdYsaFnosi2CeUZbDqwcxrLaJdbLaXr/UUHJj/oU/cp4p62pC/yrbLNuA2xpG9574MpLkeRhAd69f3d99IWyCWMBzkIcOmwH42QR6MAzQBq5cs9TkX8O+Wej/6R6IsYq3gdGnRy+udmpVcKLgOYrgQI4N9+2X+Y1d+DcWEEzArn10V+GPtv0kWK49JeXhoGDB+p8Zx53S1FigeIoNxSU/zkQsnsHyMTJDzxyv1+zCMm/HDwpbdoyRsad0vqNXQV1DPCgwFBmKH8es2O1RHmyV/bNl94MWM4H/M8AvfhYhb7TS8pZFJQEJl6Pr9erlVde39qUR70dvFSpq9OkLmWgGVmm5XGPK2+fPCzz8FtnB1/iDX7oE+Yc9PGy1qRXJ4WHb3xY4G7imInhhF/W/SUr9vRiFQTwaF9z3KOKRdHLVMFRFvQheMDyyBfnOIqNfb9c9/7GRUpa/jN8xgUCryzcBJwtAZZNrI+8HFZf1sdnp52XW7nZr+2yhH/AHuDun+f8U0CXbRkcV+YWU+rPuwt8rATrLkCVPiMQa/uJ2eMLPQeegE/o0qcAvbQPfsIBmU5XixKjA9DFeXvKesuTIMvLdganQXmiawCa8gHzsobbaRZqI+t5nsZpiS7j3SzC3oa0m5ftV+KcJvMC/AKS33vnvfD0XU8LqLOwmjllpj72kr6EevnN+4me88Z8ggM8aw60p25sXYMmVv2ffP1CxRc/hQSakwSaPHBuCmCusXTIpiDr8y7/Qdj+e9vrmDcsdqf98LSw6/67Zh8zMPCMFQplJOUUARBKyvRclTApN0UtAkq0pSs95cl+1MSWSnS8vfN0eYB7SLYoLUoMJYviBOjMnDxT9+zVvPmsm8P0CdPDMZceo48xLKKQ+IxUJacyPKKaNB5dL1dVsW4FIzN/jF4uP79PQXIKkOUH93i5FYoukUldKp3Sgr73ndiOAirWjgCriWMnhnGvjNNCDaDGU4Wj9r2xLqUp7dW3HxQG7DRAVlUsj4A89V36qy8c8PrLY5Rvlkb2/nKaDHIECPO5YfYJH7nPDTkP7G1mbzBbFQBU0AbEAQ5ZuHE6Bx8X4auVWM2XxKUL8Bc/vEjgknIQGVfq5AsB2kXbIewpzG3n3qZFA1uuAHzINpghmLqwmIBfxjZXACv0ALzk9z6SXv1lXL1AF9OweNaeX2tHNWFsT+opoBvBNmXq1Ir4+Wtk6ydoYFEGfFMPeGEBApj3OvleZ6dJm3k/YcwjaxY6Hk4cFnSALTS0tcxoE89Le9edeF3Y4utb5Pe//vkdS9IsRZ5CAs1eAgVwbvZdYNkIoCmAZpfEo2+coReesNYBNFCqz9z9jKxUWIVQblicZOFBYRoQyYFQ9KfK1ONE3/QszhWz0mU/WXjiL0mnWDJmHoACNAB78AdoYYsGj82xJKGg+aogVvPtv7t92HGvHaVMnUzJNdIsCbMbysidewFc7s8j69FjZZXwUcuiABG5jMkT6ThQrkIm1isvaxnXMafrvESgJR6tLAE3A0OA5FeGvqJH6Q6AsADzpUg7c78K24sLuOfF3wc+yw7QBEjx1KS8/bBgsg0B0A6oA0DSjwCXOMAf+4X5sh7WWl7Egm9fRMIn9wBBgD509Kjf+iRhAGi2SrCAm/jqRNE8/rrjw5ADhshPOXylk/235e6A0w7QyTYAXMAg4w7+sMp379c9B5m09yXHXCKgDGjf7Gubqf8LfBpv8MSYYPwyrilT45gtDfZXIhMAKv9sQUG4W7EZY8gxHe+ko+7ejrQz/AFsAczIk09PIwPu4c23SFCuzyPUmxN84Im6GFmdK6062+kXgGs5Cxd9K4MnTNDmhVLysnimXtBl7zaLBPiFhwljJ+glPngn34HfuyqjV/wWEigkUGcJrGRfdrKh2HTdYQdnHz258m/FR0/qs5Wbkpz/8+xvQ5e+XfTiHUADRXbWvmeF3Q/ZXYAC6xAgJFegFYCzx6EA/fEv8heAMmUsvynczFMWjraOUSXpstSKgw5lwJtby4hGkWLx4w13QM6Vx18ZDj/3cFkHVbbTSK9JWR5cktbjmSnc7wmXx9XKLOFnMWUCYgAXAjOeNuXb6+EzXzn9NK3nX8ZXB2u6Gm1f/ACsOP0BSySgD8DEqQWELclX+G595FjtX4UWoAq6AmZeH5MB975fHoBM3wbIsodYQNHkCbgGBNOfkS38Avqwuq6yup1hbFsOiAegArSJE6C2lmMxQB3om3ykgjqyVQIA6xZX6M75YI7SwSsAl320fIhDANPAYA5Ord/Tvy/55SXhhL+fEC499lJZbAGZ8MvxZNDesMeG4oGq+tcBAZjaEmFgXxZZ4x9eNV5j5wYMA3z5QzZcWRDo09lsDbHy3SEHssGb9zfSIwfqy/5h/ghDPsgRXuCfK3Q5lYSFBfKCN+oLyGabyUYDN1IfgFd3GvMG6rE2v/7C6+Hxfz6uKNdxx/16UOi5Wc+w+U6biyZbZ/baPlukOI3iWkigkMCSS6BJA+emBOaWvImXT86mJOvr/nO4viDYrms7PbJGgigpLIFYcXikquOYImBG0TlQlgKN4eQruY/61hWsx3OV8o2KO/cv0s+l8SRHUaPAI5ARDcItbPyo8QIn7O184YEXdBSXHkl7ovJrWk6MqwRU3bpWnn253BuPlXiqrmzARS7cjJVyAABAAElEQVRn6mdyquLqSLNKfg+oID+Pqu4Kb95vHJzRxwBQ915xb+i3Qz+BQYAZL3Id9uNrqyO12PDnZ16o/speZwFnoynAZzlpU/5jTQXEsS0DP8BWWxrYmmTnFJPe8wCyvS9QDyydbrEGHFIPgB5jBbAOfcAzlk+svqTViRX21UKALnIAUIq+yZJ7FgxcCfOxJcBo7QpA5iVEzrl+6eGXNB7hAZoAT0Aqabpt2k31FfA1MM+ebeIZvwByyuaPtOorSTtSLjS1EDC5zJo5S+86UC/4Id75oQHIz5/y2Rc7Of6N7RG8ZMk+bGSIHEYMHRF6b9lb8kUOgOtRw0aJ57PPfEJtySfLWSSdelZ2rjwyZAGvcq3+tA9/lLFz798Hn3uVOf44iE7DCn8hgUICy0YCBXBeNnJs1lR84m4qk/Wfzv122PZb24ZOG9mxdKZkUZBYgEY/OTpXvDxSRelKqUeg7Ao+V6KWT9asCJIW4WIhtrzPSGnHu9wflXh+T7yyeUQGMAhGWcsRb2U9evOj+noXlrstBm8ROvToIBCSJarwG0mmMSlIFbiKkSX8pBmWhz+t/9KUV6G+Ti6tt4fVx5W+AjCkTQF2DlxHPzU6jHlqjL5iB3Blzzpnil9wwXNLxMY/H/tV6Ll5T20BAHwBSH0rQU7Qug9h9HHAGOCW0yLoP4BRf/zvlllZnslMPqyrfETDwCVAk7yATayn7IFm/ADY6aOEU0/6ELR8bDgAhRZ/3OdxsW8T5nHQgofpk6bL0g3vyJF92Xztky9/Akr5JD0WccokPbzBi4Cz8cciQsAdi7PxpPHFlapZGZTHIoB3BliMcoIIe6ZJ6/EaGyYHB7XkYYHAAoQFD/nZ0gJgZ/HKli/80OHJ0DEH3qzyqvsZOu7P2SLD8iIT31ZDfXjx98DvXZln9Xk4DzBPU5mT0zoV/kICK1oCTRY4p5NIMXnUbzdzWTclOd/+xP/pzGOUP4qZz+/eevatYeef7KzHwLLGrWX7Cf3Tsw6eTalKsZqiFzhCKds/lB6K2Z0AWnofFbbipcP1k91WEyfgRQpwc6QFryh17q8/6frwg+N+IMUtQpV+Yr7yKAeQOWiO4KA83XK/l1iqYXppmVkCsi6nuhbN9h36DoBQwM4AFqce/Pdv/xXg7LpJV/UftjXst8dlFck/Zh/r4eQU2pw9v3tseUaVdHcN/53OJQcA63G/tSMvuwn4ATjtXv3FcvKyGeAZgMsf/ZgFoufFMgsQdaDLFWAIaAYs+otwbMfgiQxHOfK1Pn1908CjwK+VKZBq5TE+5IwHHKATWcgRBW/8A8RyfJtZckmjdGahZu8xNOAJGRDOOHOLMoAXGvDooB6grL3EtijQvmlbSJCf8SK+4lV1tHLhh8ULT5s4mYYzjymT8ig/d5aP8gG0WJoBzRwriMwJQza8KHnGyY/kWWrrGT7jr2H+vPkC8NBg68dvazhpxefjlH5TmpvTehX+QgIrQgJNHjgXE0b9dqt0km5Ksr7mzkNlreWRNdYrvrr36rOv5h9HYQ8lFjkseChclKmuDnJNYaPEHTBLOdMUAILoBLrSe89LPAo8+1Hq1K9oSyvais1+UNwofHevDX9N+1v1iWUPLL8m5adRlJeD5hjhgCdNt0L8Lpv6LLwauXiRag+/qcvV6NJGgGbazwEYVtuxT4/VC57sB+67bV9ZSJ+555lw7jlPVyzhOTsODuCMdZejwXp/sV+VdEPf+rOsm1g9OQmCPkJ/BsgBSOGBK0CeOLcc8wIZvHHF+ow1l76ORZrFJIAaMAooZAsEYJG9tJtuv6ks6YDT5+55Ti/osYeXD/XAJ+W5lbecWQer8JFv3bBE8Mc9QBUgC19aBFgcYN7DeJlPcWZVHvfyuNC5T+cccJOXRQF9WGN3PQPO0eKcjl3iNYYiaIc+8sLiDP+8cCurPTwaTcrDkQc+kA/7xPmIzXuT3wsnnnCf4lfETzo3e/lNaY72OhXXQgLLWwIFcF7eEm9i5fnk3NQm5CtuOSBss/s2ekkKZfj2qLfDd7c7K9xw389Dm45tZLXyPaCp4qV5UaKEARIcMANasH4RzhVMjKsEiLMYRZbEp2lR8P6oH3oocCn8mBnwAaBB2dcIeCsAxHLQrPrACbw3FGesSB7Lmp/FVHGJykxpAshY3FgYAJJ2wiI6a/qs8OKDL2rbAyCYl8L4YMWhP7ym2ho+9c5fAvvwOYsYANtz4T5V0j495Vy95EcfwIpMukHJx0nufv73Ap9sxwBIAoYBigBmgKas0GZNhmesqCcenwFBH/dsX9hmt21ySzRlACx9mwf971/n/0svqvGyKp+AJ4w05c77GeH0Z3hBPoRz5R6wSl6sxcgPOQKs+SMP4JV9xZQDuAZwu7Uaqzg02Cqx9nqLXkxENvpj8QvB2F6EMYZZZEwYPUHs8tKwvzBMWfwhL+SKrADlXO++7O5w0gn/VZ6G8OPtlfLS1ObstG6Fv5BAfUqgSQJnnySKiaE+u05Gu6nK+qrbDgoD/2egvi7GI/S7L787nH7Sw2HIxXuFjbfeWAoSKxyPpAEdrnxR8vwBmAHLgFtXwEgsB866yWSYKmsFm8LOnfS4a3J0ugJyRU86QEW5A4RtOmhTWQsF1MsT+H1SlAeVXAH55qiDA4osZMX/iqcKbAiAGb9YzCWvCmkqBtUgizrRceLl9CJohj8cQFXA2UAXFk3OOwbUcXQZAO0ng//qlCpez/jLHmGnH+6UvWhmgHDLVkeVpPv3sycEzvKmjG3aHFMSV35z9FFb6UMhgEAAc4fuHdS3jz3k1vKkwcc8ET854ScaC1hm6R8AU0AtdcPKzd/f/vC3sPnXNtcpEWyjwFrdoqVt3aBjRpl4IchGY8ZArFua4d/BL36BZjsmElDr25XIB4gV4LdtDYBmB9PkIY4FCcAbazgAV1/9s3Hq4xNaaht1dQPNcbE7d/ZctQ8nYTCmRNsYZ5GgPd22yAAss0Xltede07aKk397v1epQV3TtnPGCj3pkiiuhQRqJ4ECONdOTkWqChLwSbgpTrycrIE1rVXbVnp8vlPP35ZI4F/Dfi0AoMe+BgSwojkISK3N+FHMAhZGoQrYi+DKAbEXUpIuKnLFeXqjiaIHFKTh+Aljbyyg3i3eWaKy30irLDS7zbCdgAS8NChrszOcysXD7OrAtESGSXxFbw2yqDNorkQrAkR48z8Hd1icOf3kpYdeCr0G9tKeWizJ+37r0oqsLsvAx948U33lufueC+yrZqsGgBAA2q1fN+3TZbsSf4D6Pbc5MwfOfM69bZe22TF3Bpa1sLJ68uIbfRNr7/UnX6/j1jb5yiY6Xg7QDPClbbAQO6h1EMoRbToKz16MZMzoSUq0JgPKcfRp4gScDTxDS5Zfi2fRAVhXPmRNXhsPyJgnRzjGKnXxkyqwYEOLdiaf93fnkU9Us/UCCzfbTlgUQA/QDE1O3Jg2fpq2aJCHBXZDdz53p3w2xXk8rV/hLySwrCRQAOdlJclmSMcn36Y44f79niN1JBhKmLf099qh6jmoNz9oX+Ozs24Bz/qsr1nBcLI2myJ2KxwgwoEz8SjXEhdv6wKeoeEAUTSFWiBuL3mZQgdk8Ai/RlfGRp42gmbuvQyARYN0VgfkBp+pXLmHZ0BTGl6xDtXJwRIvE9BsdABw8OR/LLIAYPAG2Hv6P09riwGAWeG24Br5+MjgR5RV5HspA9nqQfkP3fBQ2GS7TXT6CoAZUIhlln3JHCkHwOdDQFqoGbAcN2KcTv7YeJuNZZmGX/Y48yIgizX1FavfLWfdomMd+WAKYBjQDC2BVPqvyURbKayNsHQDWum7nDbBeAKAkx7n1mOu8Iw1mHHmT3B4cRAaAGgH21rI2g/WarZbYBUmr58UAt+AYdGxPd5qI6OPg0fainE0c9JMfZyFsY48KAM+eWGY7Ss8kYLP3/3qLuVtbD8+jzvfTXE+97oV10ICy0ICTQ44+yRQDP5l0T1qptGUZc1WDd6iR3k+e/ez4ZcH31JFGA+MOkVnyQIYOEEAxQq4cKXuYBnl7n6IkAZnMGoRMIvgrS7gWUSSH/ICRLCCsX0EsAN4qNZVAoxJeuoO0DCy2bVaQis+Aj7hN3UAIkBOjfxXkkFCZFkAZ/gSKItXyHPPVgH6wrvj3g3D7hgmGbfv1l77g9nfzEkZ7NU9/hd3JhwtG+/j48/Sgu/14a/r9Bj262vrglly4U19NMoG/gUwLQ45Ayanjpuqo9X4SIe//Me+ZvLRr++44I7Qd7u+ennR68ki1BcFyBW6LGwAuuqnVh4Ad8obU8QLLz4CxHGiS/poEQbYpgAcnvxlPWh6X/AyALr4qZvv5wbsMi5Fx/iBtuibnzFLer7aOGPyDC0MANykpyxOtwA0C6hbeTWdcrFsWqz+qfh8npZU6NFUGoW/kEAmgQI4Fz1hiSTgk2xTnVjPOGeP8LUffi3MeW9OGLjLwNB5zg+ryGnIRXvJKoci5W37jj076pxYlKsrZACBgLSFydm9viQIcBBaANdFhJJfzBP95BF4zXIrPE+vSI/IrgAEXlTicbTAT2l06V1SRmQlj4c3QAYAYrF08lwrxpOC5txvcqYNlitwTuUZReFgLb0ia+4BkYA2TmG465K71JcAzOyh52kB4Ax3xN7XR2rL5sILgYDLjba0r9LZ4gIe3BKMH94czOKHX4HL2BcevP5B9QuAMv0ey7RAs+Ul7dW/uzrs+P0dtZh0izFtwUIOAO792cvBIkw8ji0byIN91ixGeZrD+CEf6fkjP32SvomfujAG6a8Cvwlwhh/CiOePtGzRAMznwBmwH/OSBgBOmdB2CzgWdYFpS4f/jRfeEM+MtXOGPLlsGqaBUPG5PWWnqc7zaR0LfyGB2kqgAM61lVSRrkQCPrk21Qn1F0dvHXb56S46MYCKVwLOtz56rPZ43nnhnWHjLTfWnmhABCCDfZOAEhQyoIB7HIpfSt+UsoCzKXZAcg6GI/jSfQLEHGyICFksf+6iF1DAUWaUx4ceAA01OicR08EP5Yovtk7HeOjBd0N0yMHBlPPHPX8cw4cF3uNLZEZir79nLLvmbVIWXu1tGT3KpQ0AbvQD8WVXnPiLQFJnhA+5VY/+v77v1wXoAHVYNLnu/Y0Lqy1ySSJufODosNXXt9LWCfqqQLIRQl6AQ/qR5Aav9p+tG2yfeO/d90L3ft0F+OlnAH/1NeMROjjODt9q161C5407ZwuDaIGmz2tcRIBcRRYGjGkPtooATNm+wouEPMWhHPZFk0dtaOm46s/4ZXHkwBfe8eNID//0Z7Zx5KDYeMWSDYAWQLaCqS90Pp79sc6L9gUFeeCFBQ28caYzfHFyx3GH36ZymuqPz/Fp/ZrqfJ/WsfAXElicBFa64m8js1lmcSkbQfxhB20mLq+8ZlQj4LbxsuhypgZNVdZnXfDdMHDngdpriUJ94cEXqmzXuOLmAwIvPWEZ48it/l/tr1MMckuWKWgBZwPNfpYyShyFD5DAz385wIAjuQjAdB/9pBFoyFILZCjM89gNAAdLHdZKAXWn7XnKr9COacSLx8OWgQ+ADuHw2iBd5B+5AP4AcQAn6gTfAvxWD8LcOllSj0S2JeF2k7dFeURN9yk9eDDgJtBsV+Sp+4Q/gBtWWD5ocdOfbtJJGgBOQB197u2Rb2urxn57Vv4ASk2sVBd36d/3DYO+O0gvx9FPBJTN4otDXro3WVL++Yeer68O7rDXDkrPKTLEq19QH/vHdhKszTyZcZBLe1AvwCd7jwWarQ95GyATtZOViVyU1sYI+Xg5kWMU582ZJ2CLLHybSEk/NFlzr7YHLPMvtrUAM7QBztFhRQYEsxUEx8uBgHGvC+GMHUAyVmTO1eZcaPZawzefxuacbT6FzrF2D1z3QDDdGak3/Us653ttm+rc7/UrroUEqpPASlc2IeB8aATOVxXAubr2XibhzUHO1/3nCJ0qAJjggw9YvfqufECJ/K667WAdT4VyZd/ksH8N00tWvATlIETAgBeQAAb2DwWPQ2HLLdLtAsM5YIsgrBw8kwewIJdfMg8vPwEQ4AVlH4vK0tbwK57gAzJ2dcBBOTmfNeRfYVHOM6xHmcC78+/AGWsi8f6X8xvll99HT94G5RG1uU9k6GAZfhwsyiIagTT8sW+WkySuO/G6sMmgTbTdBwCK1ZUP7hB/1L431KbkKmmesy/OAQQBpgs/sVMg3pkb2g3sqIUVn6YGuOPu+OsdsiTTZwDM7HfmFA2dU259yV8yRY70BwCr5GxAlfS4/17z3zDo24MyGduLdoBdB6XI3dtFsjCgKzlYflmTsVzzdMb+AUoBrVh3kRl7vPX0xmTl/Zl00PS+SXsRJnkbTcrAEcZ/HOX5aSFYmaGLXKiLvkhoiwfqwgkZfPFvy8FbhhFDR4S2ndqGVu1bqT4vPfKSxjCf0/6/I5q2tTmTWtVfn/vTmELfptIo/M1BAgacR8WppXFX99CD+ucVuOqa0bm/8Cx7Cbism7Kcbxt6nM6dxSo16fVJYdCeg8ID1z4QDtrrqlyg5135w7Dt7tvKKsWe4lFPjApd+3YNq7awvZz22BsgKuDMPY+icdLl2TnPDlSziPhreXLgFoGd7iMgIw+gocTZLeXw+BwwABhxwAB4yOmVZMpuUnDh0QAWB0e+t9TjGuKVugOuUismchJwBrRGi6pbPEvqUEWUZQEliWt3k4JEB3PIVNsBbGHj4NGBM3w/ftvjoXXH1vpjK8D0CdPD2GfH6ot9R/z0+toVXE2ql2dfGj6Y/oG2E016zV44NGAMcKaNH77h4bDVN7YKLdfO9sQDRnmJD96QocuUK/LLgbPJnPoQDh3c+YefH3Y/eHdtAaGP0g8B/pIBYDbSc5qUIeCM5dfALOFYm3FsrcAx/lZZPW57oi1NjvRtB84OnkkrwBzHV9r/8cMDwBlwTD30VCbWkXjoKNzGMYsNtmTw9UG2XgGs773iXtWNkz6gs9tmp1Jks3WuA1IBNGV9kNaz8BcSaHLAuRi89dupfcJs6nK+8f6jdaYu1mTOcN14q43D68+/HnYfcFou4D+eNjh8c/9vyiINcMZSpQ8rGGDwl4tQ2ICDHDhbbhS8K2pX8LoasJCzaw52PcyjImguB888hkahs01EYMLAAK4m4FwJNDv4AETgHHzqpiH9RLDjVlDk57xzFbAyMIYDLNEO1InwctkB6NzlcveAOl5F38qBB8oE6Ak0G2CGD9pJR68ZSKRPaC+6lTH17amKp7/xgQ4WQZx4Mfz+4eH8c5+tExfsTeVDHQBx+gPgr1PvTmG91utp37ROmKBnGH/wwxF4AFwAMH1HfcksvsiLMGSMfHmhDiBLOHkl19iXnUHC773qXtHDqrtBxw3COq3WyaK9X3ONtJGXTtsw2jwtYcsEYWrXyI+3G0Rcplzpmz6O8jZN+j08p07bLywf4doeQn4rQ3Ssr2D1ZtFLH+ApAHvPeUGTMmgXnjy5nNjW0WelA1LyzdLv+iCtfFPXDWldC3/zlEABnJtnuy9xrX2ibA6T43B71A3oYOsDLwcNXPvIKnJ77K0zpVAFWE35fvT+Rzm4QOHjfC+rZxawi8DBw1DmArkRGAPmKoE4AYSMLAmUBno82uYKH4SXg4a8nOghbRVnQYA85xvAALDI03oWL78KgeUTAD/iy0AQfgAofOMXr8YncvLFCwBPFlMLI9zrl3Mb61NJ3nmaWnhov5VXyiyX3gbwkwNnA6CAZrZNfPrJpzlP9C/AG5ZOwJna0Mrjoxrbd/q/Gku+58U/KM8OnX8dzr7wezpajvwOcrEus0943Q1sa4a9MIos6CuUxTFrgGaAOhZYgWbjD/AKiAW88uQEWshYIDXKkDho0Q784ag/gNbB7U1n3qSPnlAnLNxunYYO9dVWCfP71wTZW4y8HARDNwfN1kbsOSaecgH36p/00dhPuUZGxIv7NU6Mb/JYhMYHIBna1I+nFc6TaFga6oeMAM5cdcKHycn3XFOXe6+817bR/CMrs/gNrhtSUTQHPZHWt/A3Dwk0CeDsA7YYpPXfaZuTrF94/yJ9chvlyud6N1n1oCoCvn3Y/2UnCJglDiAAwMCih1J2UOHAQ2AKChGAorw9TIAvUnfg4ODLC1W4AQgBvAS8khewI4uZ8eD0PV/5NS0rjRPoMVqAB1n04gtbefrIt/Oc5q13fwQ88OL1F48GwBywUW/i+QeABTjTBgAmwJaDZl0NSJU4l2tJYO1vKJP/JWDPssOPQL0BSgd8AET8gC8AKsAVmRNGHNsBoAV4mzl5Zti27S+rZYSPmNx+3u2B0zjop/RBALKAKRZtA9EOzLHs0nbz58wPs9+frTRYUHlaQhy8L1xoL8YZsCYfNModsqNO8MrV60s6yd7q6fXlHqd+a1fyXP3bq/XCIbSJF6i2j4qwQKVMpTUe1X4AYdrFxpL6ZkLbZYXcGHPQE7+WXi4rOvNDI/JNAP2HIyEZL+TP/wDqlg7+ZXW3Jzh8NbBDjw4KHzVslPZ+Iy/KKv/EeVZY8es6IpVEoZtTaRT+xi6BAjg39hZcjvz7hNhcJsHn37swtOvWTuCAvZd9Vz6wirT/fP53wnbf3k5KH9Cy5tr2dbQ1V5eVCuXPo99yizNEAA1u9fL7lLiDDYUZCACY5dY3C3TwSHxuHTQAQRpAF055QB7RObjz+/TqoIdyBVSMPwCJgx+lLQMjaf5683uZVCsCMYAOoBMHj/jFP/H8tyuATsDZ0ko+Btpwqp/VEbGUyFiRUa5KWfufVK4OJNUORgLABwCmLAFl40NbNdjCYXxTF0Ar6Vzu82bNC+u1y8AzAI5tAf1bHFqFIY5DfPLOJwOfvsYSitVY2zMMOLNHmv4IMIY+5ftCgkUg5VE28tM+Y9tO5Dx7QS5j0nmfQm6EUwfSE0edXf7QFXC2NhBAtTSqu9WXdmA7EYs8TuGYPmm6ntC06dwm8OEXXor0fi260LYxhCsH48iUEzig2WKtDPRTF/GSM7uojeEBfrni8v4BWLZwtUXi1ye17eMnzgdAufcX+ylv8VM7Cbi+SFM3F92R1rnwNz0JFMC56bVpvdXIJ8LmMvk9O+38sGGvDQVw57w/J2y62sEVZXvX8N/pU7wAEIAOXzzDz1mvHNXlj809swCgATxXyoQrzBNUc5XiF+LLEgAyAG08+seCBnCQi2AzBc4puKtC3tJ/8WV2/i084QQszF/CV6Sb588wSH67TD1pWVaO8wF/PLIHSFFf6gVYs8siv/Mf963K4mxAS+A0AVAOonK+Y30E3vLAxXvgjT/oITe1k4ExHIAM6yjhOAAfYQKYdqU5dbqFXR3YiZ7VkTj8WIDnz5tfslXo50duEXb60U4CyH226aMX8thTjLWafciAYxz9ENAn4Gj8Qc95QnakRabargDvloZ4ZOb9U/LIqiM5+ykUio/1Fc/IIQJnlZcAVaeJRZ2TMziZgu1P+PmMdff+3UOLlplFXHI0nrwPOmh2QK62tnI+mvVRmPb2tPxEEBYK1BVecF5n3XBv/Hj7OO9qF+Rs9HyBw6KA4/BYLCMH9RvLu0H7DUL/llUXME6/uFYvAdcdaYrmokfSOhf+piGBRg+cfUAWg7B+O6TLmVKai6x5DM65uoAK9jlusspBFYV82+PH6UMoKGEei7OvlMfOvPAFkKlicc70eqbIDSCg4F3Zq4AIAtPCchAAwAJRAWQiHYEcKxuLooeRF2Dk4LokPAKLlD7AxgELfkBIJb7SPPLDx7J0sU45SaMPcFGVTU44vwc4w6uDNurochToAjgbkEI+nKzhwEjgCVr8jzTT8vDXFjznZZPHaIk2gNH+cF4uQI14B4HUgbzc64U754eSY34WRPAI/1hWAXK+PeDa/xymPtaqbSuBZizL9DvOJ6a/fr7Atn3Ytgt/qkHZXlfKhC+uyIc/B5KUp74Sr/Qh/juoFwA2YEm6vK5G2+VAvVgoeP+hTIXZgoFwtjHxSXiAMydXUC/2PzPOtJXEihfdZCsFZdJ22uNszFAW7TNt4rTw/H3Pa5sK/NMfqDtx+o9ME6c2MNlCX/WN4Jwk0IQ/LOKM9a3WPzrJWXiXlQRSPeI0m4s+8foW18YvgQI4N/42XC418AmvOU1yD4w+JfTeqrcUOsq+urfoL75+n9Bn2z5Kh4UZAOOWPsCBLM6r2RFx5gQwhEwygODAhvAsQZYmu1n068BFIcIFBkjsH6ACJ1AAUMjJRA+RqdfLITw6gI1AVNxzDS+AOVzOl24UVPWnFJ9Uja9NSMpjZNhBl7JbGdSR+vIn/izM6y8+jYbzK3AUgTPgDOAFMHIAqSsAtQxceVkCX4vhm7IcWOJXmdECmwPnCFIdtFG1FDR7Pq7wyVncE8dM1LX/jv0FBqEL/2wfmPLGFIHSya9NDjwF4fPcbFnouJGdz2x7b+l79CncWy+/Fbpu2rWkzuIZviNwdzk4mMz7GXzaP/oFaQHXpEXeWInp315H4pyutqHQPli6DcTi1L8sDGszllz4njV9lqzilMtWDd9iQhniJT4toAzKpO24qr2s3ZEVp90AxPmAzMBdBqpPUCb5M+ycdEz6j4XrD8Ac/1RfS+z8s1jpuXAf8V381J8EXJ+kJTQn3ZLWu/A3PgkUwLnxtdkK4dgnuuY0ud362LFhs69uFrDqcTzYRp/vW1H2fzjZPs29+zZ60YljqwDPWNxQ0ih7jrbybRQCLAZIAGap8hbhCPxQ4qkTuAHg8b8M6AGoHAw4Da4CdJlHwRawKCwLycMBRzj4oWzoAYZwOS+lLCmu5CfBKCXhtbmJtHOeLQ/1VNnEGW3AoKyOBuLgE/6Qrafh6n8USXqlw/oaQRdX0kieETTnsiWTu1iXmsCzyrX0eZnGp9rTZCcwZvRxDvrgF7l6+vwKgLU/jkoDBAJKebmPtIDP9CU98gCS+fT1/X+7X5ZW0tDf/AU/7ulr8MCLhXrR0PhI+42XLQaNb/ELYAVMRv6JIx2WadoF/qEBMAaosq8Y5+chO01OlQG8wrdbnakfbceWE6zmvJjIHmfGFHyzv5mPBukFxQhoJS/z41yGWIOpG3wiK17WIx+nj2z9za0FnJFdlTa1pvDxRl29b3g7Kb2lYLsSPPZcUABnCX45/LheSYtqTjomrXfhbzwSaNTA2QddMdDqt8M1Zzk///6F+pob1r7u8/euVtA3PfgLvX3fvmt7vajlj4zZD/rZp/Znj9tR1AAMQCx+AZUIVggDoPjj6LQgKXgDLQ5+/JrSIg003EHL75XOI8qusgYacHbABEhx0E/SPG9Cu4zEotsIOBcFLMYXaYrXJCn187oRjJy4l/XT9v5iwQcI6d7CPS1X+S2PA1XawYGXgLOBOAElk5dfKcNlil8u1oVyyp2XkZcnUS962RNZuqNs+BTwjG3k+ZA9jjRYY7E282Rj3CvjwqbbbyqABxDMt3JYWl6se/C6B8PWu22tF/8A2izWHKgCLB04Ay75NPS3DvkWyFH9QbxHfilbfcTiqH8KnLknLbxxhX/cJ/bC3IczPsyBNKdiEEcagC38s2UIfqAHSIUW/R/eAc5YnLlyz6J0gw525KO9wOinz6jt4riALqAbkM0fdaNNsVi/NeKtgOX9Bz/9Tpi7xgKdF00crqQ9vX7WLiVjzsJpK8KQA2N8+sTpNZ5iIuLFT71IwPVMSrzQ7ak0Cn9DkUABnBtKSzRgPnxCa46T2BMTzg5dN+kqhc2ZrgPXqXqWM0138d9tu8bWfXQKB58rdmUMOAAUAdouOvqisO23tg1f+fZXclBBOoFeiJjyBiSkDgDgf4CfFBAAvMrzZ2SEjEQG4FGdIw4aXAE4OKxuelnN/CV5qyejfPox/mrlIi0ruUpy6qdyY5TqboAzBaAAK4FmgKf+Z3VwIApRgS8DRQ7qkL//5QsRB89WJi6VbRagXxP7ooqJZ8pEdsg2+vP8sY28HvBNG+kvAmrlRe4Z8+IL0Al/gF0AIlt8nrzjSZ2zzP5fTs3Auop1mXqyl56tCj0375mBTrZGmFz4c+AMPb5m2W/HfqqDl5fznlVPdXCZaAFl+4sF/q1u9EdkDU3oXX7c5aHbpt10XnSbTm3COq3XEbCH5uyZswWG4c954IoVnLwCz2Yxp67UEQC9fgc749nOmM5Bs1urLR8yZEsGLwECyAHb8DV3tu2RnvqB9iLzEiRf9dTiIbE2520Zm4572kBAnnaPixjvR7TTmy+/qY/O7Pj9HbMj/GxMsBd7xy6/cUkV1+UgAdc3aVHNUfek9S/8DUcC9M+V7DvzNj02PnfIgYs+sX31tcUntuuzBV3WzVHO1/z7sLDD93bQFwF5aajvygdUK+o7njpeoAIwAGAAQGDFEugzxYySxv/CAy/oJS6+7lYCqCx9DpwBYPwzhe+gJsFv4kHxDgIIiSNZgI5bgJ07vBFEeDqdpGEWXOjDB/zih3dclfwKXcyPl1EpWQk7yU1MS10pkz/qhgMIIhN4IxwQRjrk6GmFP2O+SEr5AFkpcCYP4A3SHHPmcoUeYbo6Ab8qyn6iq1KmFQ5vbGHAQQO6Lkvn2bcHkMbrgh+eWFzBF1Za6NBn4Jsv17EIc2uypweEEo/lFyAtq7Ttoffj9zjPGR4AnZzsAsB1Bz+4vB66iXW3ago4x/7gcoY3+scbL7yhEyywjHOaRetOrXUiBvyxhYQPqgBuWSjqC3uWB0sy/GlhZvSpO/xrgWBbmKg/oJc+R52wGHO6hrac2HYn6jB1wtTw3jvv6QQRPg/O58MnvzpZX+o88E8Hhnffejd7ysPij/by9tSNqptblr1+8AAvyJp9zZNenyRrOosUzpT2D8IwZqnPlDenhDFPjQk//9nfM4LFb71LwPVOWlBz1EFp/Qv/ipcA/bLRA+diINVvR/LJqznLeeTHV0qZomB7fFr9do2rbjs4bPKVTbRlAzCB0mWbBmDAgYpvxRiy/5CAZWu7PbbLrHvWjA6svUUFwiIQc78DH9IAZhz8ORhOrYqiUxWfZsEGoCgP8ABggQ4gBgADXVxaltNXxOJ+FuHMRSkjH+JvUWjuo36SkfGT4R3bi2p8SSbwaQAfOXoYfAswZz+L8kLRyiddDpwtDUBJJ1wAmKzukqfVOb0qK6ArdfGWdDhkJLlQfATrWFABVw4KKYvtLsgS0IgF32UMDeWXxz5GYsfMYU0FKOpDJQb+HDjDvxYKxie0KVt8GCtvvvhmBvBsj7Dom2xoN/7cqgrg5UxoPrCSlxv59vucFwuQLGJ/Ix461IUX+m78041h79/urZcPiWNfMfTZn4y1ePaM2TpF5sPpHxKtvdXsrwaAso+Z8UB9WAQxjvhyIttMWCggK+TDHzKDHoCbfLgZE2eEya9P1gu45GEx8MRtT4RDzjokjB89PrTp2EZ9I+eftuK/X+2G/o1skCN8+1gD8LMgJp4XKzkaj3aQLOPiChkhexYFbA/Zc6s/ia/iZ/lJwPWQl9ic9ZHLoLgufwl4PyyA8/KXfaMq0TtKc56ohs+8MLTv3l5ADSXbv+UhFdvw2GO/EnY9YFdt7eAzwihcgEFuJQUARtDGFWvafVffF3ps1kMAq2vfrlLoThzF7xYygQCLIB/Ow7mmjnhPUx3YBbwKiBk/0AUgkQcwAcApL0v0s2LTomr2ZzhTMiNhFcCc0AO0wCvlO2gGuOAALAo3fy7HCF5VT8vn9fF6q04JcIYOABDQJgANAAVIVbA6k9brjx+n+1gfB17wS3mAXPb9QhtAhqXcQZq/sKfFiO37zdsq8gxoAzQDIrEsY6XN6Vt5ApF2ugnlO9BDPs/e/Wzo0qeLPhgCwCSdFgoROHvfoO8B9gDOXid4pgxdza96IEEWItQVuVifojyuH334kSzfvMzn50UjQ85ghiZ8s3DA2syJH5TXqn2r0KFbB1mB6U9+ygzp4Unbl6LckJ/CkZ3FA6LZ0712q+zz1vQDrM0AZ7aIsL3jtedeCxPHTgxbfH0LhdEvvA/DP3T8z/sTfQS+KZ9xp3DSWXr6GHKkLC1erO+QdsLoCfqs9rnnPCXZTFnvX2rnt195O/xPr98qrPhZvhJwfZSW2px1UyqHwl//EvD+1yiBszNfDJj67SguZ0ppzrJ+dvoFgZf+sHgCnPutfnC1gr/hvqN0RBgvPAECAFOytplSdoshSh3gQhxAgxe4OvbsKIDTtkvbAOjGAVxQ5qRH0TtY417hEeQoMdGAIsAQLoKz7Kbs1+IciDngEm9Gz4+hEwmnpZsyGnW4BbSUuOSWD7cEw8sOjh3QyNoZt2jAG+CIOqegT3WlnrHeXnfko3ohPwOsBo8ywGxgUODJ5C5waelyoBUBl/iUuBG4OffHW8p3GoAw2tDbRckjTcqXhdwWJfDtf6JpP4BNTqd47ObHdHILoLDl2i3FF/T5c1BMvdLtKuNHjldfYZuGQCOW+FgO8vQFgQPnNddbcxGQjAxIZg6g7YqMAfCvPvtqGLDTAN2zJYO9xCMfHxkGfXdQWL/d+toawvnQAH4cFnX2Nr877l1tZyCs14BesjgjK74GyDaHdl3byToNaMU6z0uzbNfgnrJpf2Qp67sBZ7aBAMqpH+nZa8wVizbWdvaAb//d7fVyIRZqjRErm6v/eV+ijbUdhvayP/qEy0uLGsaYpdFCxcrDkX7s02Nlzfc91n237Ru69esWeNfh4RseDr8+8nalLX6WvwRS3eSlN2cd5TIorvUngbTPFcC5/uTc6Cl7R2nuE9LTU84LHXp2kLKdNXNWGLDW4TW27Z1PnxB6DugZ+JIb+4hbrmGAyJQ1AEGAFyBnoJATAThBYb89Lg03PXiMgEGHHh10rB2KHHDHC2EOCgRADSgKhFmcp4EZ4mQ1jKDU0wIeyh3p4IU4AARX7t1ySXoHocqbAN1yWjXdi4c0QRkd6iFesDS7M17Yo+tgyi2fDui5F29GS+BPqNX8EQRCBrlUAc4mb9G049UAbUoXQa4DTeTgsi65Wrg70TBZAfIon/vcGU+U7cDNwbJAuofblTyAxrsvv1t73QfuPFD7kEkH38QDnP2FOermwPn1518XqCQt2yB8m0OlsuCRrQ2UBcDVvt24kIB3ypAMzc/Z0XxeHiA74rERemrCoo78gN+OvTrqJT7APW3AB1bYZgKYnTFpRpj02qQwddzU0KVvF9WFLQ+kw3LMyRlY35+56xktKgmjPuT1uvpigTBOtsBKTZnkg0fa/4UHX9CYoDws4VvvurXisRKrf8T2S9uAtkGmAGYWvaJl4BhAjuyQG+1FOAtcxgC8QH/EoyP0RUNeZCQv21J4UZiFwmO3PhaOPfiWvOkLz4qRgOuotPTmrq9SWRT+ZScB72v0rwI4Lzu5NjlKaUdpcpWrQ4WemDgkdO7dWQoWy9dW6x+12Nx3v/AHfQ0Nxc8jeAEhU/4oagAaiph9o2ec9LBonXXh90LvLXtrP6kec1sewA5KHYUOOEC5c18CklD6KXjEj0tAKgAKkKBgo8F+YQCLW978JUGAGoDM08mjG8gtemEvD6/GszjATDbqUAk0C8hbPPwBvLi6xd7BMXLgjzq63++hXQ6cCVOdAXwGiviDtgPmFGjhd+cAmPaSs/IEGgGcAHX7J1AcgZfz5+mJU3wCmqFJ+VhMiaNvsF0BUEvZyEV1t3q7lZ2y2Z89+qnR+efcSYtcZNk2YJj2CS8f0MsLhmOeHiMwCzAG0FIW8gJMXv3bqwUGOekFevA3dfxUWX7pc/DHiR4sAgHQAFT485MxWEiSHiswn81muxHAeP7c+frjyQt/AG7kI9lbfoAzdGhDZEofBTRD95033hHYxqLOmdUA10dufCT8+IQfywLOdg0swn236Rsev+3xwIdievTvIRCtNovyRg70J2Tliwj4AhwztrgiNy2y7ErduIcP9lFTd+rHwgOwzJYqngjhf+rOp/TRme/vMCTrG8XvCpeA66uUkQJEp9Io/EsjAe9fjRI4p8wvjRCKvDVLoJDzIvk8OOY0gQIUcO9qPoKyKPUi3z0v/lGgBMAAcJWyxppqQGHSq5PCzhv9Lk989NFbh8H7DJaFmZeh+HQywIXH9QCWq0+4Ohx7+bGZogcMxCPDAAqVwCOgji0KAAaV38JOWrB/lO0AT4DBwBsgBhAHUHMnIJrfgG8M4ZiDRnXO05TEZ9lKggQO4SOCX48kHBk7cIRPwgA00FY9LQxW5Jcn+gk3RzrqQt2gxdXTuywcvBGfAq383mRKOFVN60uZ8Ia8lM/SwF8sWE8X4NlddbT/n737ALykqg4/PvwtBKT3XhZ26UVYQYoUFRRU7AULoGDDGE3UJPo3GstfjRpjT2JD7A0jdhSVLkXK0uuC9CpVRYnJ/3zOm/OYfTyWXbb8frs7d/e9mblzyznnzu773jNn7pDDi0JO/+npzb4H75ttKkvu7KO1TYGdvLN/cXZ6PAFsyhXlne+Gg5T8+leHnl6+c9OVNyUIgsZ1p66b1+KJR53YbLjFhnmdrbr2qhn6AZxdHwWwgJFcBdCAuwl12eAPd/4hr2GvlV97k7Wb1ddbvRESYoIiqWvZOGFHa6y/RsJ3yVT2ty17kR00+wjHcC2CfP0rIx7cZMMH8JPB3Rje+l9+45fNtB2m5eoc7JF2b+1pciixuYcS2QN069t15fownvp6VLsyiUntid89sRGaQRbL5bG5SYF1qy2Bd8J3TmgOf8mXsu3+a3JZoH67ulL1AN21Rr8/txaoa6quo0XO4zyqwNwaoC8/Zxbo7Xy/ncQt1yt9Z7cc3f01Bnv/8fVDmmk7TktQcesZWCzzmGXS23zmz89sDjngP2apctRJbxl4+MLTNeO4GQkbwNnriGuJO7e9wRrIAht+0IEh4LGVAIokD+AAoooDlQc6tFHeXeAAwnwqVRt5HM11obgLk853z1X9kmV43O7olwzaB0QgJ1NsQAzZ5GeZgLBHLztYjaHK55Z+/rZ6ZvkIiak+U7cCZ6EokZT1VjhbfdC5vJEFnAVcZVP1Ur5WRP2ASuULanMMWllS5iiTKeqwU7bZjhMPtyRURHwzgPOQH7nVlcjmo132MM7Onfy9k/OORMKemGbnXAf1aa8H/ZGzbAhErYrhhSEe4AOFQjC22nWrhGXtgVRbfeqLd5tsYpwBplU7TPzUA41k4ol1bVtKTn22VFcZ0KxPnllwK/zI9c8e7M6GOQbRtvaHKezwh3v+kG88BKjq0JNHnvf89htvz7p0JqtYY55ycD7j+BkJ6N14bnV9yAWcATd7eJjTJIKMt113W47F2lPWzkluiJgPI3Y9yd867u9Sz7U2Wis94eTZpH8t93DYJutO/YZ15Svw6eb1+70FHsoCdS3V9dOD80NZbAk9P3qhLKFmGKp9yvUfaXZd5++Gx3O68/0z3pbLZVmaC0T4Iec12+avXvmAJr7xizfmbW5lgJmHBt0mB8+WuQNDYGKT7TdJYOpCm3NSwSQY413zI68+uAEcgEWZhIoWduSBa+crDdvJRrHdoP06X/A8mj9SLItXmYIrZcBMQmnbIH31CawqrEIeYJOfHzrm38Fx7nfPtW3Rg34Jny046w805cNosQ/e2CUhLGCzC7il29Aj2oKz4wyriJj1tGfFC4cgJZ9tN2W71T6ojT/asUycycxyK8ZScdF+1XfOZwjmUccxkBXfTGarZHhwjo7kz0/AbHqso33gS1/jChbBqElDeZLJrn3hEyZyxl5bZEjA5J0N+wBnkMnbm+EKcd0CTp7YBNu4E2LNZV5depJTWEUCdjw8KKzJNW9FGl7bal9b9HHNko+X2H49jOgapx9Pt/EnC2C/53f3ZB9e901+Dy7yZvN4C6u47KzL0oNesdyuN7YhV3qco520R/y7MIEQ5qGdQ993aP5bEX4xbp3m75z45vT2Czmp2O0Zv5rRHLDT+7pD3e9PUgvUb1lXvAKgbl6/31tgnAXq+uleM4sUOI9TYJyifd68WaC387zZb7T20ae/NR90AgNingHNWcee1XS9Wuq86wP7NVN3nJplAB6YES8KlL7+/q836266brZjhYJNd9g0YSWBq4XHYKZBim2tDww8eBOFe4ATfSdQAsuAnToGU44lcDdsy/EIDGah0a/qu83PNjplRqHZqeqPnuBxCHwBffoHiWRNHUP20jXFa2XqnqvuwJI2u+AMeNkUsAEpEF0eTXIoW/KUbMr5JIC2MAkEgWaCbcBq2arkoEc3VZvZfpxUzuRA/64FdqkEdsmoDPkL+hJiAzblXfjrCzPO1l2ILjDb15e2S2bjLU+75XEtoJavf5CZIB3XiUTXsg/ALe8x8HYtFdSqA1YrNCIhOGykPHjmkRZf7dq1Ig3gJAM45j0G1yDcGNfkSJ425Ylv1nbaIGzCe/3nP/05x8k5kwRjKfn3QC46WbbOvxltlH14+MF/TZxMKC8/+/L0InsQcZ+X7ZM6zzxvZnP1hVc3rz/kq9luff1kxjub1dZbLWHe+BsfD/Y+nIl0tdlvJ8YC9dvW7b0LRN38fr+3AAvUNVPXieMenPtr4wEWGL1QHlCgz5hrCwjb8ES+WFDxy0Bqs+bgse187AsHDh+wUo63GSwkEIU30Rq2K6wWMZl1mz0gxq1myY86QLCsVxdMeLwLnAEJYFMWKIFA8FJJ/jDF7igED8/VTqe4rNHyCc3RjwTcKgEdACRemxwlnzLlFSVLykMO8OwP+fxttx5udJwptvnQYwec1bn37sFaywAKcIFRYQZgNCE7ZBlCr7biMGEwPKKgm3xk4gUFZHQiv1QyOu7m5cn4yvwYRzIWuNrmOPyfwaQh2wj9wGW1rz5ZwWaG3FgZI2DXq7bXn7Z+6knGAkRtqs9j70/aNvqlX3ra2ThsppxrSVsFrl2AV0Y7bOTjenJ9lK1s9Vl3KXjoKwQjQTsAWGywB+xWWm2lZtMdNx2EckT/oFqIBb1cdzXO+pPYV2hIjWHqFC8RArt0UJ7cErnYpmK0/XuwSo1/X8pkOyFn2i/6TWCP8Ayeah5z/7YAubAVyZ0gL1R5xbM+k8e+vn38m9KrbW3qBPKQj33EqG/5qFcMy/U7i5YF6jeuK3WBUTev31+yLVDXSffaWKTAuRQwjF0lluxhnf/a93Z+cJv+7r+/21z2u2uandd4w4MXms2Zzx11WMICqPjbV35zNiXvP/XKV2yT6+JuueuWw9crgzgwBqAkAAiseeskcAECJOW8chlIgBCQU5487fjUrfqsEF/AYJhiF4SNTSPZw3KdfIAF7iT9VyrAJEvCXMhRIRrOkQnYJCyHPMqkXNFU7Y8eV9sFeOrXBMFKDCBQe2xDbyskJLwCwQD4TG37wJmteCvJ41PwWP3I68oHBOVJXVtkHm6mQ/QPWiXyVb/Okcm2wFRZIOplIyZCYM+E6YpzrsjYaOW6NhWGkDJFGwnfcT494zEG2WfHdo5z4hXhIvSiq5QyktMn+q9jl0Dtl3yWDczxjZPuFvAIg1NhFTy5ZHZnRFw+XbXhnLHowjgbkIccQN71yxbq0ElZYE03MtcDf3UHIe/khOfcw7fq6dv1ntd8yKVP4K1td2LUU5ZtXQu87rXMnPCSS8+8tHnH3/847fHjGe8YxE6Hp9m/LzLSg0x9nHOaaJH+6v7elSI9X5Ql+m1dH3VN1HF4nM+P/wYmfzrs5QOvwOeOuGDyC7sIS9jbefaDd/H/HpGwM+1/D559wQc5+4tL35s//qusvUqz9dIPjHN+kGqZ/bmjDs0fbw8K8n4BCT/kv/jKLxKOeZ0BAbiwDq3zjq0GMISTgDuwiJPADjjhmUuwaTsHSMMUu4FPw8PcecBhmzGS/2DQrA2ypRxRB+wAwNzGPpkBX4Ga7ms/tyNAl+c7MiY48wgD0xbcQRIPqDyvi+aN33KXLYfeTfIARXYpiCRnxe8WiKadOvKU7Mqyu74z4dAolx/7dqN95Xln9efT8mqed8446dODhGzCW+qhODDoroEH1NxxMNZspAyoFEqhvbRdAC3bkjk/0VbXfmRSjy4gVZ+pfwvLPPhDGxCRbTogneWjbgJ01HV9gFEwKdzCes48vzvtv9NwZQxygGNgLeb6T7//00C2VldtuA7JQ3b9aRM4009dkyDnSh529Ep7YUigmLdameO+dVzepQHEyovTtvazNtkMUGujXqpiZRCynvaj0/K88BKhJcI/LOFnUmps1c3xDZuQZ+N7X5zj1n8tHhao376uNj1vdK2xZO13r4e6DipvqSBp/71P+nToIQNw/vwXe3BekIPV23n21j3nnv9IDxoP1paPfHi3aj2lb1mw3dZ90+w7e5Cz//bZF2asszhXt6XLO/uZv//MAKA3juXBwpsKFr1EwsNZBaUJJuAu/tUDDzAgr5sAxjDF7hCcO9l1Ps+NyQdKCWeBYLb660JiAlB7roAywTHqgT9gU/US2oAbueKv/PKGOiZDgl4LqF1wrna0felvLs0QAefB50777ZQPyKnLFkBNH8CQrEAMMAHMgsSENj0WBAdAgSjlgV/2zb5kobNNR25l1c3zcU57leSzC9vJV9YYukPh7XvgUNiIOFtvmjRpYgsPGsrnEQWFZE5gjnZKl5Iht61M9o2/VOdLLnldm6be9JCIHzbx0Y9jcFtLxl1w8gXN9KdMH0I82yrH4y9Uw4duGY4RoFsykoVObGi8CpjBs5Q6gfzot3u+9Ka7uiUre7ILubwmW5/uzvg3o202zT5i3MEyDzlgXm6V5Zpll1t2GPJRXmby50SDzpE2vvfA3PZfi5cF6jewq1XPHV1rLBn7dR3U2Ncx7RcJcC6BS4ElY9gWvpa9nR/a5sde8t582QLA4dXdZi69xg/dw5yV+JdPPDuX+fJShoq9BDC8hR97zccar3D2FjcvVXErGyCACnACWIRFAAkwRpdKBVZ5HO0lGDuI/W56sHxlAA6wSYAJeBm22TJiAZB85cBfbmO/4CfhMtrK+OXg7opj7kJzlUkIiz71N4TcOAZkVd45LwIRX+u1ycd947hmv0P3G76YQ1tkIJs2ckLBg9yCbAIZWGzJMmUmd3jt2ZZX09J5Vo9QXyoYZatsvy2bJ+vrfm5OefVDBok8YoF5yq3tDThNKkAecAZxytBJHSuwWCUjPapkjU8ugaePMWOZ8sW50qlV7QHHZGG/SuppW17Z2KRDGIQHAi0PR86MgW4nIeQ0CQD5vOhSXY/VVtpYm6EL+xpX9bK/kIodE7KjLogFxMbH2tZ0rpAj7alT4SNCNTwMaMKR8dBxt0ayJCBvM3vSxb8jy9mBZrakAw82Ge763V3pTTcpyX6i360edWi2038tnhao38Oudj2DdK2x+O53x77GvPIc9+C8+I79XGvWvTDmuvISVOGsOz+dayvnD2rEaz7UK7gXhGne++GnN1O2nZIxr2Jf/fiXpxGkgY6Pvuajzas//OqEY/GnoKoAkOxSwk0Lao4TpuxEyv25AGZ1EnqibSCS8BIAlGB2P3cN40TJkABqG5+E1Yi5VZeXMtsA3qA22gF/to61XfLdd999CVQFYGUHsmQIQ4CUulddeFVz/eXXN5vvvHlunbdaAjiSElqFDgTsgdKcZLTwXZCnTiUyl6eavQG0uwBDuUNGqewALpWrPDKBVeWl9LaGLKWHsj4eYLvlugE868MDpu4kKO88j7Q7IK4DYQW5wgRoDtm1lWkgSnQZO+1+ytnKmHL4koab2Kl9Mtrv1HVsbI0Jucg589yZzWaP2yxlI5/z9M/JRYAz+Ae9ZHPeh03JUnYgQumu3CMeHTaJP/R0vbK76xkYW1JPCEtex8qaBFKx7dP4m3hcdf5VeZfHa7OBfXmihewYW5Bsab26BtiR7YzZTVfflCEme035x+Z7p/7j4A5N6P607d9D1D4t5hao38WumgVT3bx+f/GxQI15jXMd07AH58VnnOeLJnVxIIhRpAAAQABJREFU1MUyXxpdDBv51eXvzx9ht4hBAI/Wtsu+aqFqCpwBx0ZbbZRhGbXU3dBDGgAG6sAFryXY4EUDBpJj+wCxCywJowq08GG3Uhe6Kq+7JY92g3GyfsJu7IKezIt9MCopV0BsX9mut5kc9cmyjgG0eiPnQFnBr9v6JhFkAV28nNqmI88sO9FDrK2X0Ky8xsqDhycDwBKU23pW+qiJhvraAmdAT9KefPIU5LnVz6vpdn+lbjlyJvTTw0ogTBVwpoy7APmwXWwdS8oaQ95s15iHQT1YZz1hSxOyqf6tSey112LZQSQPrHbJzA7dcavxtaWvrTYqVd+DIRuMm7zMz8NWtmjVuFZ9AMp7C97rLoZzpS9QNRbKyTde5ZU20WFvY0cWH+VtxSmzOTvwuruWTRLoBoL1l9Dc3kWhb173UVf8s5ec3HjVjflSlWnTp2UdS9aVnNr276AedEy7RRvy3ZXx9sPes1xXR7+t38iuJfrfy641Fo/9Guca29HjSe9xHhV48RiWyadFb+c5H5Mv/ejwZqvdtkrvoh9qt4zn5lXcc97TQ5d89wf3b7bbc7uM3eRpLI+ZLTixtaYuOBGzS95gnoQP5UFHJUCTCTQrNDwc7lTRB2y12wWw3O+AFhAjSyXnCzztA/iCeCAK1JxPWO5sTQSkhLKQ1wNp6msbVPEweqCL7toQTgPYQLkVKYyV8sqBKh5ooQPsANLYyP7Q48xeIXuFQKSMLZSSQVvkBcUmUcA5Pd4BdmnDVtaE6/BeVqJb2jfaBoE5XqEDcOumap8OPK5CIXh2heGQU7tAEEyTzZYM2mED41LjmlvDGnLrM+Gw1SEBN/bpKiUox1Y5bdVxyqcNwsdf8tEd2F976bXNetPWSznUI1uNoXEyAWBrMpF9aOu4NvWrLWMIoJUXMw6u67qwdvK5J56bbyzMdZVXXXHYV9VPewVkg2ZtAV+vty9PvDsCJjY5SQrYfkQsB0gen9KztuQXWrLNMnP3EG8asP9arC1Qv5ddJQuyunn9/qJngRrb7niO5vXgvOiN6wKRePTCWCCdLEaN/vyi9zTrbLpO3valFm/jRK7r+s/vf2q+fjihoLxo4VEsIAN1QAkggE+ePw9JgRKp4Gq4H1CU+y0gDY7Gf2ebAT2VwMsQwOz424KcMoBEf8pl2cgDUWSt/ATn9nwCdse7W2AFfuilLI8tb6xQgE0fu2nqqS1xrNbttZwfmOKNBInqkUNcbr6MIzzQIJsnWn6CFHgPGRwD7j1fsOcwlrbgKnUI2cAju7IxKASOQ3DmOY42EpYZAHOGbCk3YIt+qj37BaXsVh5by+IBz5Sl9bquts5qWsuxvPXaW3MsrbgCEsnPntodDOEAdLWnXecAPjm0KZ8X3JYnOf+yK3AOb3DK2MqpzywferPjfffel8u6KSN0wrhU0jbbmKjwFNtXriYYdU2m3q2dTBKAs3Mmd9WeMrWiiPqpX+TVNcRL7drI/qK+a8HY8tRfffHVzXPe8Jz896pdurs7o80a65TZdR/qS9p1TTx2+dcOMvrv3gIjFqjfzW52F7i6+f3+omGBGtMax9FjWvTgvGiM5QKTsi6K6qAuljrutw9ugROv/lC+tcyPOEAAz1svfdiDV1iAZ35y7jubn37hpxl2sN1eAw+0W+agAyiAjAIp3kHAINbzAeDcQh1RhwD3YHJH2YTmAKgCwa4nVbWCsIKfBM0AkgS2gFjH6eENkAF56aGMW+TghU2rvG1CWIAR2E3PcfSb+oWO2hOKIWxhhyfvkLqCqFuvj4cB40FJAAiolMu+tR2gywMqjlXMrK12tc9bK0893mreSxMNExPhHgVt2krZQheAqnyFy6TZIo9OZK+kDP3SIyw0w0Ek4GdcJO3at83jgEoyOCaj/tktPbmhA+A/6b9Oap70kidlqAYZ0useNlUn24mmbNVlN2Ukdkn52vMJ15FvPIwveQvsU/Y4VzqBZsvL5XJ5YRcToBoT9sgxi/bT8x/g75oC9mzKbuRJeNZH6EsfwJseZxMQ/ccHoP/4cz9unv+m5+cY5IocoYc+ciIVY6WMevosUDeZ4qWn43pT18v1mv27cN3rl7z69SlZa1Lg+nD9eFvj6176ZabqU2+BsRYY/R1VqP8tHWuqSZ1Z49gdu3F5kxqcxwk8qa2+CAhXNp0TUbsXz5yUX9LKfPbbh2bIhlf++gEGPrddd1vzuFVfv9BN8brX7dg8+2+enUvlff0DX2+mPnZqLkNXL0dJmAHQAUsA0+oP4jylhKrcwSEtZLXHNpWG5doMYJagy0sJ0HgMAyALDAsIAYpyEjjRTsJJgJN6AFCMacFy9gO6WrAueFZfO3kcUOM2vLAFsdsA7uZrbk4P8y4H7JIrJIBJedY+pmvJUGA0BCUrjERfCYcwNmQqaLUFrOJ3z/nlOc1eL9yrWWWdVZq/WibWAqZzC6XazASIA+jUk6ovgEc/+erVccGtfHBXupINMGZbykcC9s67zuTbT3AOD23pwubgPsGZRzX+DGUMWY1NgnOMiWtC0i/7pd3j2FZ7yvmoQ5ayizolB4g3WTn5eyfnWsigOPtu6ylLXp5/MPuoePhTDLZrgmxpC+aIMfDHmJnYabfkSLvEeK+18VoZzw96yXPLNbdkTLd2jR8dyg7s7q4KT7Pz1uzmjf/Bp3/Q7PWivTIOnjc749hNIGJ8yMmrD/LBttjoVz7v81ToU2+BObbAuN/Y/rd0js03oQVr7Gq8Ro9LuB6cyxKL8bYGf5yKdYHUuXFlR8tU2X47sMCZd3wqwQwMgIjLzrysedbjPzDh5vni91+TL8oAAsDRiyKAGu+gh8tAdcESYXM/AALA+NtN3XLygUuBTYJylE+IbuEsmCjBCHiBoyHwtl7GPA4YSq942K3aA2RDOUBpAJBjbein2uFldOuflx+M8WCWJ1FMK/28FQ6IASbxzQXn2gRa2gBa+kxABIcBsPb1KR98ATlrQIP06U+d3qwYsbWWLksdo7yyKSMo9mlBN/OjL+0UeFoeznnnJLLkuag3hD8QGN5T+a6penBNHfCcMrdebODshSO842vH+t0mcWLa06PM9iPy0c1Hu7ZSedwTPKPv7CO26ZXlma46sZ/txcVRdYRVgOKTvntSPjDrxSw88mQv4CZjvskyVPbgJtvxGDtf8K6MmHOvsrb1inHhKcaI3mKTxU+7ZtVjQ+OhL8nkRh9sU+MsTOOUo0/JVVTqQUI6GVP2TXCOB3zlud6rDXXeeOg3st3+q7fAw7VA/1v6cC03MfW641XMU3l1XJJNWnAugQk6KnQJ328f3AJd+42WmhN7jqs/J/VG+1oSjn8y450Z7+xH3Q86mPv5l37evOEVX59w9b/xizc2a2+ydoIlaPaAGfjxAhawWQCXrNwC4KjQwzLtCVCkLdCUt/tbaAaI9Ac2oNLW+YTA2Ac1QDEBuwVG3mZlyKR+gbL6lZzzkZf1o5y29J+gFBCoHX3LE+9rAqM8D6gXwIBdx9FS6qwe2NSeegWn9uXxcvJ6gmZr+IJxYL7Jdps0K66xYq71qyy50j5t2yWryUfKGoDrPFsU8KUuoZx8H3DIJuwE9AEcG9dEYAjB0R+5cg1ugBs6XH/F9c1NV92UE6MZx89oXvy2FyfIgt2E12gn5YsxGgIwGG+BmI3T4xx9s9udt905eJFLjInyvPlsY3KSNqYv+wTU8uTy0Or3iS9+YsKnsI1jvnBMs+q6q+aayaCUXuyx3IrLJfh64M84+Msm+gfOd916V77NT3iFPrxdUxiUhwI9DOnOgRhqerle3QUwth7qpKN+jH/aKOprR3yzBwkt4VcrdJg4esU2W+d6zfEQovjuO266o/ncWz/XfPrTZ9Wl1297C8yzBfrf0nk24UJpoMapOGf0uCvEpAfnUqIrdL8/3gI10KNn58WG49qcl/ZGZVtcjo+b+YH0cvphFxtpebDHr/nGSaGetWetNGGVAl5N3ryNtt5oAFQkDAhJ0Av4sD9Mbf7wOHYSmgOoePNAEQACHXSWCg5BkfMFw7b6BkQ+oA+4K6PNhMjIB5DZTuQVXDkuQFUu2wK+AW6gGwTqV/vk8Cpl8ikLPk0cCvq0lW1EOTJI+geG9fBggmTc9gdh+cBheLZNOHhL19wglkALD7Z21UsIbqFZW2BaSrgMICRrwnT7gJ1zJh0pQ+s1rrWNgR+59SVsgE48yPozdmmn1kZkpKfwkb0P3DvHzVJ0VoGwBdMJz+HZNabqspW+2TxtFnlSTiICXEH4WT8/K221/KqDeG6TwXwzX8Ap4NWm8mQFu7zNlkTkQdYPudNrHHqc86tzso67HTzGK60ebVkSMWSo8eRdLuCld4JuPPTowVvgLBzDpGWT7TfJpeTSUx719f2Tz/+k2ftFe2doivHTf45dK5s7EiYD7CE+nc7sXpMG54WNCLNS11gv7GUlcwD6ryXGAv3v6eQc6u64FN9UXh13Je/BuWuNRWy/Bnac2OMGe1y5Oc0b19f87mNOZZmM5cApGAUJwMADZid854Tm0Gd/dqGL+/Vj35C3od26ftl+n2o+993Dmg232DC9d8AHnLqtDyKk3MZuHQ/z8uz9X0AOfP3mmN802+yxzQAaox441CZorFAEcCaBQlADtsCksuAKvAAyIASi1C+oznYKSu/vPvfImOAcIQvgfAivIYc+tAOIQK9zXmoBnBJeWw+xMgn6A8ZNQDVxkJcQFw/b8fq6pS8koTzP4NurzI0x/dgy22294QR0LBWM2ibAhu0ytX2CdDL+/u7f5wRAOXWrnr4BKeD81Td+lcvMiV0GgGLUCwS9Nlo9UL/Z9M2a2264LT29j1z6kflK6QLNhOaQl91rRQn2kXi89XfF2Vfkm/NyQuNhzWjX2xZXWnOlfOjUZCGvH+NtHANQeZs9kAjsjY0PLz2wZTtlyMJ22W/IQBbXCe80L7NQInWuOOeKZuen7ZwTKt7hlC/G9YSjTmi22X2btH2FmLiu1APGU7abcv8kLOwMntkxr5PY5nVnzNvr1HVMTm2Rm1dbGSEu2y/3mrRJ/9VbYEFaoP89XZDWnfu2azyKaepYS5XXbXWpzx9xfvzXNLnSKw7ZKgX6QrzasE+zWqBsM2vu4Ghh2WtUhoXV7zidJ0veoS/fujn4XQfn7WReNT/e3la2d7xtbDKkI398eLP+tPUTWnjzyuvJ01bAU3I6Hk3y0usZnkUe6/IwgtCCPsALQAAP+AJPeTs9XlShjA+QkkBYARqPtXMFzoBGGwWhQ1laWXmWQVkCWECQfhOKWqBLeUC8cwF/GU8b7dUxXZQH3s776Ju8gBnQ5muhoz/l7AtJ0I4l4MTVJjgHAOo728jGBu2xCRjVVsrStl92BXzic3k4wZp4bPAGLE12nAeGIJ3HVWiBa2m3Z++W/YsRBsoJuwGbPMVii3mngeTjn/74jGfnkb/2smsHS7AFMINeti2ALdnJS9YzfnJGhlgsv9LygxANYBnA+52PfKdZf7P1mzXWj/Wx6Rz6UPf2G29vNt9p8/TYalMeHQua2cAkSf6jHhV3FsJ+7iJoQxmwrI0LT72wmb7v9CHMgnsf/bCza4Z8rpcCdJ55EySTLx5jITlkpR+9XNf0Mq4SuewbD/lSXg9RVl3jzIO/44qH57n+q7fAwrDA6G+pPvvf04Vh+Vn7qHEo248ez1o6/kv7/BcnITgf3ILzkT04G7BXtPYYHTzHX5hAG43KNZGyjLPNws77/m/+by55xSPph583cfPm4IUtxmz7+2W89ZAnMyEumIKntfZVzP3cub8ZYAu03ao//cenJ9CBDV7PBFYgEm0BHMdCC0CK2/gewAK6zvlon220xasIqjLGN7y9+gFL4KgAqBuuoW7CT7Snv/Tk8prGfoZpAKVYnQEYJXiDWn9iW5Ck/WqHhgVzgCo9wBE6wMMM7MhAnwI3/Qk3oFOFLRTga7faVkf8rLsOdNNf2inyxRCDRUsBygeGQDjBOfatAiE0QTwvONQ+T7iQBg/HAUX6idm1FZphcgKuH/fUxyVgAnHlQKt1j/UFKsnP+192604k6Hv8t4/PMamH8PKOQIzVdz/+3eaFb3lh2vFdz31Xs+sBu6a8vOHg3rVATmPJ3hJ9TR5tJfZ1rfE0k9tzAEIzTv2vU5qnvGq/LMceYtNNHOiurvjmXZ+5a9ZJeWJMpD/9fjApEUt945U3pm2ANYh2raYY7fWdFeLLtVPjkHAdeeQG/eTzMpzd139TFe+3vQUWmgVGf0t1vKT/ni4s45fty951PLsx6MF5YY3OXPbTHbzRqjXAo/kTdTxO1skm48KyzS8ve38+sFRgxIO47SR689jPLnxPviAk4TFABXTal2orL4+hTsAGIAQlp//k9HxICxCvtu5q6TVWjkeWd5NXUEysiQPwAW0eQtQeKE2ICq7ibQZyPoAFYOrHermbbr/pECILrPVfiYzqgD+A5OUc9bBc6hTt2ILCBLkWaBNsgXR8qpw2C7LAMdgFbWQFVAC/vNsF0saV/ADNeeWGbUdf2lbWuPPMg1eeTC9h8ZAaudku5Yv+gTM9M0Y3jnmilalYXTY77Uen5SQFpAo7SW92wD0vrfZBtZAHaw6zIxl5o3mfrQJiLNhaXyUzvVP2sIdk7M478by0gYmPWG6hISYWHg4kE3uWXHQ874TzMvbY0obsyg7gmAxlY/vGSz0f14JjD1yeeNSJzVNf8dQEaNDKu24chIZok4196KtuthWTJhMr15mVUnj/xXl7nbbJB7gma/eaoV/KFmMD2rVT41YTKnYE6XtvMjnuEJG5T0umBfrf04U77mXvYpbR43HSTDpwnhOhxymyOOSV7qO61ICO5k+243HyLyqyzy9bvuUf98j1lEGSH3E/+rdce0uz02oLf23ncTq9+4NPa/Z52T6Nh78AG8ABM4AnU+3HFmRIIMtDj+DGbXYwx2PI4wgir7n0mgQunlNQCGB4QcFRrWbBDjy4wI2HrzygygA2rzAHf5effXnGils+LG+/h2c6oSwpOIQhX8hMjoTeyEoYirwENtAWf8qbOgrPjquN2Bu2DQQTQCN8olZb0L62ye48iEtvcxdC21CRAjC2AaG8qT7GXjzz5o/fPO3NU13QrH82SSiNfeEn4FYIgkQfwMeb/K0PfSsfmBPrC4rJs87UeIBuzVWGHmvtkrdgP/ejTSEh+mV7fXU/6XkNm+rz5qtvTo8vsDWOHs4zvsqoQ54CZ9sj33Fkc8DrDsj+jadyyuR4xFZij5Qn2qzJFXuaVJGHhxkouyYsl8hjbXwL8P90b3iWQwe6gXhbkzjXmq3r4NQfnNps84RtcmzIm3crwm6V0o7tw5na1kdeUyEvgHY9sOnl51zePP2x76lq/ba3wIRaoP89XfDmLxt3OWVc3qgkPTiPWmQhHtcAjeuyO5Djzk/mvHF6Lcr6zK2tP/Xlg/Ihp1oNAeTcfcfdzfaPefXcNvWA8u//2LOat77hew/In5uME6/+cHrrQAZPm23CpK9IQCM/ARnAAqCc8dMzMiZ1i523SKiZed7MjM3Nh9QC/i769UXN6husng+xAZ3f/Ow3jTcYXnnelQk0QIsHE2CBIltxvOtOWzc9sx4UU085nlJleFiXfky85a0TH0s+EAeiJGAGlDJEI0I9yC0BzvoEJ+V+nogv8JQAFWWAGNAGdbzNIBPoLrPCMllHe2wE9kwIwDPII582gG7K03qeQae2xC7ThadZ7DEb8pQ+4tHhDU2BBjKxb3o/I095HxOa4755XL7wg8x09Nl6962b3140AM5zTzi3mbLtlLTp0Z88Os9tuNWGzTpT1kl99EE2yYQnJyrRt8lA2Y9dJbHGdCe7j3FRRrgIO/A4p83CFs6LqRZjzSY86zUZ4DEW32/ZOVtwmuAcdcqj76U1l55xacZSqz9tx2kJzOxadw7KhmCeLNpKm8c4G680Xzt29LrsrMuaqTtOvR/g41yNsS0Z2JjuGZrTXhtp99gH+jz9F59+cXPgkz6aNum/egtMFgss6b+nC3IcyrbFJ6PHD9Z3D84PZpkFlF8DM675Grxx5xbFvHG6Lm46Pti4/NevB6tsuKUMsoDDlD8d+GDF5yj/4ubI5kOHfCie8j1/jsqPK3TE0a9pNt1h07ydD0hARXpb43b+qmuvOvAYAtCIFS4PX5UTe8vDBwBBh7fyibUFNVfMuCJBi5cSWLnVD0i0D7I9WKY+TzxvIUa3Bi+4A4s82Mo65vnlrU1QjVUxgD2QK88ne4InUAuyyFeANAs4B7hlson+lFE+UxzTr7zW4ppBs7AKuum7ygFnIKof+idsR9/0Iy94s5WvfaEn4Nk+b+uyy8WDf21sMZiXalvtySvQpq999V036R2NeiCSNxtwugOwylqrZN/k8AY8axC/6B9flKALRBOWeYpDb2PFbgWP7GmMrWXt5S7eyifVmBlTdhAKYWLyy6/9stnj+XvkpIJ+HpYkC7gV2sLuVvswhmKk1REPTgd9pEc37iZ4OdD+r9w/vefnn3h+YyIGwtlPe65FE5fbbrwtPf9W2tDWBptvkPqkkMY+dGJjYSlk8Fp1Y0bu0oEN67pgb+2wQdqhHUftsSf77L5eH9+c9u2/Jq0FluTf1Pk9KGXLLpOMyxvX76QC5zkVepwikzmv9BonY3fQxp1fHPLG6b8k6H3Mhe9ODyCvLFABZ5v+90se1pB+/IsvabZ4/BbNPpu//WHVr0qf/PLL8rZ2en/j9j2gBCu15BpgFcMMukD/VrtulcABCgHQzHNnJhA5vviMi4cPrPE2AjngbAkz9cAtkDr/pPMTnMGRFT1AFi8ngGEbcAh+lE2wCcABsWQq2GE/+7b61rbzCV0BSwApQTpAtaA0Pbut4rzKAAkEZz/RVsJhtKU8G/Cegn4AJkxEPmh1Tr+AFpQJKdG/Y23wilv2jnwpVzy4Zgk29ZUnl8mGeOyuTM4XOJPv+G8en/aVR3+rtABPcrPp1B2mppefSsprF7SyA51O+/FpqS3PNB14rsHvEMJjbWr6SGULkKuuVTtAvvKScagJAb0Aquviq+/9aoKvuwP69iIU9dmCbdnKMQ+7NvTt9dUXnXpRXivG3wod5BOWY98Y6otM4rFNXr794W/nmPp/4j0ffnqu4cwmQjtMIISS8OQL7xAP7dozDmGWvJ5AeMoX/adMMQ7KG6+c7PD+x5gaL+1de+m1zdN36MM0cvD7r0lvgSX1N3V+DkzZsFhk9Hh2ffXgPDvrzMO5GoRxTdRAjTu3uOeN2mVxt8WvrvhAhhyIjeXt463b7H8Pmuth/t5pb22etfP757reuApf/ulfN5s/bvOENiB37x/vbW6ceWOCEfgAdDyEvMC8rCDJG/PS4xo6JPwGqApDAG/XXHJNgt6mj9106JFOIAwIA08nf+/kXMnDQ4NAGaxYDQIU8kBbIeG+++5rbrvutka4ATAWZyy+FVQ71k5BJrgDRbyE62yyTrZDZkAEhnyANIiqBODyIcY//DkBFqj5aIsNAKDb9cCZ93bZFWOliOhXGW1luEZ4Qk0sbrzqxgx/0B7b7P6c3VOOirklC5AcgnGnDzpLidABsWRNj3Lk8LKefezZGdNsYsF77JpRJwE6dBCisfVuW2d8eYGfV2ULtRBOQ5ZrLrom44Z5ncEm7zD984HFgER3Fowle5WHFnybGJGZTeTTx5bdjTMAdV04b6yFNgBdYTrb7rltThK0Q5+81qMs2DZWJmr6dA2IjTeeeWciYtnzDkRcE643YRcv2ffjNWyz3f792/bMicT2e2+f4UAFyz/8zx82G2+zcXrLta1voP79T38/veb6z7CZsL1x9bKYpz323bPtqz/ZW2AyWmD095SMi/tv6vwYh67dyl6VV8ez62fSgHMJvSgPfFeHrtHnZCC65ZeE/VFbLc42+tE570hvl9hPMLLxvS+aFEN8zPnvTuAAoM/Z9V9Sps98+9BcCQM0AkPgwxNoCTtQDSTr9dQJnQFavIC8lR5i46W2DjDYAnYA6fv//v1m5/13TmgygQBnbutr3yoatuAU5A3joCOuth4qA6dSho4EtAFiYC2OWPvDuOPwjPKOJhCH/Lyy/vL6Arr0zMY+YKQf+clZZejKc85javUK8vM8KwPqfOiongkAWYEZnY76t6Myzpj3FDQrQxa2Iiewved39zR7vWivBNDUJ2QocHYMMC2txrsMIA9650HZBnsBZ2BbL5g59ivHNk971dMGnvEAVQB70ndPanbcd8d82NJ4GTdygl3gfPFpF6dtgCP9ncuJAUtF2Ib+xZqrx64ebgWcdDEG7Ehenmzec3YCq+wGSt1l8Gps9mTrk753UrP7s3fPMB6wXg+Oqi8u2p2IHfbZIT3GJmGXnHFJc9hzP8sUzak3fSzevPmG3J/d1/s++szmCc99QoaAePmLNwx6kctbDv9Oc+ghW2dVYSju1tDZg4ResiIen3684fNrQjo7OftzvQUWpAVGf0/1tTj/ps6rLcteZaM6nlO7BThfEP/NTXx6xcFbphBfOPLCiRdmDiUomccVX5T0GCf/wsobZ8PF1XY3/+nbzQXXXd7sPeWtC8u8D6uf75/xtvQOAqaEo4AgEAiiPFD26GXjNnd4QstTyosOnobeygBTHtOE4LglDlQvPfPShCogquwxXzymee7fPncApgFjwBngeQWy8hlSAHoDztJLGx7Jat+x2+tg1HnwV95T2wRpbcQf8Kw8kOM1zVCNAEBwx5sK4KQ6B4xBvVv/9AVXIBLc84gLKdGH2F/gCZLZptoA+9ZdBplALWVuV4MAwyB7gy02yPLsy44hZsrDo0snK5J4OPB5b3reAFIDjNMOAa/sRFb98DR7gJInVz4IBPfa5xlnS+2BajqYePDQC5swPmQXgpE2CBm0ZwxMhHin6SdOmX7kVN/ET1iKfXKwi7Z468moT956gM3G2jNWv/7Br5vHPumx2SflE9hDZjYmh7AN61BvFuuen3nnp3I1FrbedplXpa0e7MsbvqyooR/e9ies/5YHK9rn9xZYIiywJP2mPtwBLRsVa4weP1S7PTg/lIVGzpeBR7LzsAZh3Lk+b/YWGGfX3p6zt9mCOvvO9z2l2eN5ewxu0QfcuNUPYngMARTQAj7gCLABPt7KCgUAWmAIwN5x0x3NWlPWamYcNyMfkgOLO+2/0yDuNkAw41IpEpDF06ltkAc4ASU4KwhNfTuAB9jAHu+nW+8ewKsQA4CnL+2CZfBcXlMygznylR4pQuSDRnHcdAOg9KCz1SB4g8EhaLaGcHpOww7ZDxWiPlkLchOII7+82yYD9j/wsg80b/3qW7OP1LOtSz52JgPv+0WnXdTs9qzdBrZoy2QfIQM4P/nok5u9X7T3IGwjvOJ/+fNfmgt+fUF6Xel1zBHHZH1j49iHRzzXfI7JgX5AN6+5CY1wFXby0J3wBdBtzOsB17RXjU/oYmx4nP94V4T1xFjoh87g2XKHxkKojTadN2HSvzEpOzm2igrwtSyhBxDFPbMxWx39qaOb1x/81dD+geln8QxBPdRosvPETd/2wEJ9Tm+BJdQC/W/q+IEvuxRf1LHSlTe+5v25kwKcS/A5Ffp+8RfOXsk3rrfJKvM4WReFvHG27m08MSP3k3PfmSDLk/mCPT/SHHP+uwbhGgGpwNTKDiARQIEtoQRuuft4CyCw5LXkZT3+W8c3+xy0zwB0Wk8koAJzlUBhQnNAmQQqeYx5jtN72z5cB5JANhAF7WDTecBOLuBmq31hFsoWgNvyloqxBsXq8CADPueU9SKRk446Kd9Y54EyctHJ2sPaBWvDkJKon/AbOimX8BkALDQiZW4nAOA6j1t41h+9pIJh8qrPO85LDGx9rFySRQFn2Du95rEFqhtvu/FgLMLWzgFuq5J4BbfQDDHjwP/Mn52ZExirWCgnfIRO5KQ/eOZdz9CVkAnkWkqQvtbUds5Y1fjk2IS+xl14h5e9uPPAfgA7w1gCioWqKOvaKL2srCF0wthmik2OQRxYRWOjrTfKyQD7bP5/DhmUGfN96k0fTdup89KnfnJMiT6rt0Bvgf43ddZroOxRXDF6PGvp8UdLxbu52/+9xhdYGLkvb8M0jphEYRol06j+k0nGUdkWp+Nx9u9tP/Ej/JPz/nkWLyWgA5xCB2x5GMEe77PVEUBsQmZ4KmfOmJkwCpgsOcarW8BUmqX3tqBywJUJnM4DSy/p4L1eb7P1EuSshuAV0B4047UEbcAwQwzCc60OryoPJ5AHy4CbN/Xu2+4ehFtEnQTGiODQP11A5QUnX9BMf8r0hDMPQF532XWDl7PEUnsJkQHN5XVPGwDfANEE4Ngm8AekA+ZRcDaxUIesWSdgu7ywyoJNtqST8BGJjPJ4jMHn9k/cPh/K074JhPaEaAgJcY6X17KAJhpTtpuSsqqvfeXBMT3VW3GNCD2JsA0TCbBLFhB+wndOaDbfafPhWtT0rQkA4NcegNeXuHPysq+YZnHibF7QnxOEaNf1cflZlzdb7rplyqRNH316Hfkq66yStn7KFu9Ivfuv3gK9BebdAv1vatOUDbosMS7voay9VARHTzw4HzSIbz7iSxMb3/zyVo5Ro020XKPyLEnH48akH4+JuwL+81uvyAcFKwYY8GRcbwAgz6Z9XkgABU55q8WxgjLQyqO60morpQeSl5MHc5iAcvxvlB5c8MzjHFAHLHlZPbjnTXy5FFmsCw1ewZg8nmEv+gBwjkHfk178pAwlAIoAHlQDQ33wUgNc4RH50GN40RNYA/rArE/BJdC77vLrEsatKJKx3hGGQHbea+0oDyJ5XCUeVnnsQ05tF3DqvyC5IJsc9BVuku0GDOe5sJdwlKsvvjq9sbzdzpuMeBAOtNKXbcir7VO+f0rz9Fc/PYGV57nW2qYDO0lk45kWSmOMhFaA3XoYkizau/m3N+eqKfo15kJp6FRjYzUUnuSaLIFnupm4iL8W5pFlY5yMFfnoBdotXcgmJlKuHdeGGG0TFPWM+1O2nBWejzrp73PStPPqf5N69F+9BXoLzL0FltTf1dK7GGL0eE4tOeHg/HAFn1MFZ1eu+h5Xpgw77lyfNzEWGB2vfowW/jh89PMHJvjscsAuCZCAEPTxhnpwEPyAORBZ4CkONqEsABqk2QecYBqEZWq9zMBbAlsJbwGgYMzDZ+BZ6AdoBOG81ekpDhgTRiFeVmwuULXsGcC1L+wBOINm3tCE1ogRBvjalK+c9vRJBt5i5cQ16w/0yfMX7KlD9gTn0J+u2gN7kofmwKl2eV0LnNVP0I62nffRNnAEpWRImGxBUywzecAzu4p79oBfvdJdX6DVRIWHnzfahIUnWYy2GGhyitsGsjlxUClsRkZed6tRkJ39hNWYmJhkJLjHRIAHm13kGTPhG9oxqcmJUuguZrzWAHc9WEpw3U3XzbAWME6n0t15YR2XnH7J4A5EtOOc9lwr7iq4TkwGjG+uXBLrNQsXMRZstPu6/ctKDGOfegvMiwVGf1O1tbj+rpauXf3G5c2JPZc4cC5DjTNO16Djzvd5k8MCo2PYj9vCG5f3/dszm7U3WTvhypv/ABX4A1Ue0ANbYl7lSUAOuIJYD3yBMFDL8wmCE6iiHKAFkAWT2gVT6gJGnlwAVkvigSegDgp5LHmav/OR7zR7PHePIeCBQIB31+/uStBWVrs+II1XGMQB5ATx8PY6B34Bq6QfcK3/qktWcoNmWzpb0q/AmR5Al018gDMvcUJ72MXkIoE5ZACG2qiJQHq96R5JO/mJcsqDSp5yb2C0jF95fxOqYy1uMtLR9sh3HNm8+Yg3J7jT9aIfn9sst8lgMuGhPbJ46PPO2+7MSQnoFvcMWPd83p7NRtvE8oMxOWGbGkP9p5xRHzyTjU1Aculu61g5AEx3Y20c9Mne7MXbbVIyfd/pw7EH8/ozEfBRX5gNGxrrtE20oU/23HfLf0o79V+9BXoLzJsFRn9Ttba4/a6WjqXX6PHcWHCJAOcy0DjDlBHHnevzJrcFxo1rP54Lfsw+991XJlwCWWEAIEqIgWNAw/tZadX/Wa656NqZCTte9e0WvfjWaY+blt7LhNEWhkBTgbN953hOgSzwBmPlGS4QVQbAgTuQy+sJtuzzjkoZQhFtgDeQWgANMHlM9ZnwGoCmLfn6B5z6A2zaAPJdeROgl4qXtYT39IKTLmiuPP/K9Ihm/wGXK6+1cnpJwSN5tAeawaN+cgWRiCcmj/MFl/oriLS+ctkFcJZHVkz3Dk/aIWVLJUM2AE0m/WhLv8DcJODSsy5NHUxuTCjoYpx8jIktOTw0ycMr/nitjQav4da+fk1iyAC8y/4mLexVkw2TDN53HmgQLjZcmIe2sk6Ml/juWqnFeFlOrmv/uoac065zOXbCXcLebFeyH7T/p1L97tdlj/hKM/UvL+1m9fu9BXoLzKEFFsff1a5OxQiVV8dzaJ4sNqHgPC+CP5SS1fZouYdjpNE2+uPJZYFxY92P84IbI3HOoIyX17JsFQbhNjp4LsAkAajKB8cCzKzoAKas6Txt+rTBkmMBQ2BU6kJz7ge8ilUGWWJwAR/A5H0EiFLCaMAVwF5uleXSOw3sQCDYVk4MLpiTn0vWtd5LfQAzbYBTcNaVgVzAWb5yCc2Rl/JG9xm6ETJ4eM/ayLs+c9fmpqsH60Bfc/E1OanwspH0mgb0A+YEzOhXnraFaPBcg0XAXJ/SjVzlFSdnwbPY5OtnXt9cee6VzdNe/bShbOoDS+351ESgXsICjK2GwoYgF+AaL/WMFThmc7DrjXzsCFyl8tDb1zbbCqOxrrcl6bRXITrGjFfZQ6LAXNs8zyZZPMj01i/deNEtRVchIHu9YK+0jXz2qkmEcSSvdssWZKDb295wNLH61Fugt8B8ssDi9LtauhQXjB7PrckWG3AuQ4wzQBlr3Lk+b/GxwLhroB/7+T++nz3qsIReQMQTaeUGkApoALUVLwqGgRTvoFAA8GR5tHN+eU6u1CDUA5Qpy3MJCoErL+tf/if2w1MK5MTsCvMAzgldAXOglbcYWIE3K0d4tffz/u552QbgA3c8rvoFXAAYrGrDvjYSZFs49cY+b78DsuW9zTrhpc0EllvIry0wNRkAzt7axytOVw8qWvmCviYWj4kYXfoBPtCcXvoAecAK/hKYwxZ5XHAenWpfHdvyPNOH3dOuMWHwCnAx4PKtSLHCKitkmIP8A996YMJsThLiPFum9zzapgOvd75+OuwETD1oeOEpFzbLrLBMs9N+O+WydilT2CplIUeMkUmICYP+ahWOGmPlrJpSr0wH4tZqZieTm5XXGLx6vbzsBeaWlVtv2nopH+C277pKfUO222++PSHb8njyhPB0vdHaf+2BX8yh6r96C/QWmD8WWNR/V7vyFw9UXh3PraUmDJxLcAI/XOG7bYwq/nDbHG2nP170LDDuuuivh/k3jh874sUJNQWsFZLA6yt+ebOdNksoBVDlhQRV4lZ5pC0n59XT609bP0MXUrKIby1vbAEi+JV+e8FvE9BAOTAFmuANiGZYQHgmpYtPv7jZ9Rm7pmcUFCaQBowCTw/CFZxm2Ad4DrDWhn7F5nqADpRJgLlAG1iqG0qlXlkgvrRPR8B59i/PTmDN9YmjP5AnnMJbAIGiB+883OaTMbvhOSdjThyiHW0LQxj208KzCUWm2HTBFfz68NiyMT0qkUvb7G3CAFzPPeHcDGM57AOHZVm66yv1NMFg6+hD2ImwlzN/fmZ6cg947QEZi648m/uop73NHrdZTgDU1R8ZjHPqEMLIY1uA61oRegFuTXiE+FgVxFiI2XZdgGEPeYJ5K3h4OJDdTHbUpy97uA7IrX3H+q+Pvp6zy+D18WWPfttboLfAvFtgUf1dLbmLAeqYRSpvbq0z4eA8t4J3lR5Vdm7bGq3fHy9eFhh3rfTXyPwZ43/+wH75hrf0PkZ8My+xB82A83Z7bZcg4xwYArfCNYA1uAJHe71wr1x1wTlAKAFAkKseIOLVdA7QWqGBhxJU5wNoAasgCYTxDutX2Ii6+vABViAOnDlO8NJX/E14DggHrGQEZryxAFEeOSQAB9K0KyXIxintFiBq13rSZx17VvPklz05AVp/gNXDkDy/G2+zcbNGLKGXnmbxzPqRkpnjq/ZD5gJf8leqff2TTfvkTr3CbuRLfVsPvnb1keE0EUID0NlX+APvNJ1qJQ8TgzwOAAWp6nqhyRUzrkjwN55sD/r1ceJRJzY7P23nYQhK2cFYkQOsD+0TtkqwD8BPmSPGm6ffGs/G95IzLskXzoiBdnfAcnSX/ubSjA83pu44aE+7dZ0kiMddDLL6iJEGzvpkO+MB0F+w57+W+fptb4HeAvPRAovSb2vJWr/9o8cPxyyLBDiXouMULGOMO9fn9RYoC4xeQ/11U5aZt+1Xf/Y3CaGWgeNRtEzaY5/02Hy9NlASF1tvlfNyC+AmX0gETzXw4ZUEU4Anvc0Bx2AOKCl71QVXNRtvvXGGUGjLbXxACJIAIRC0D8Z4jHlhATXIE+LBm0kOkA6aC8CETJQnlUc6QbwFPJ5toMj7SQZtqucD8NTTt5RAGnAnTldM80ZbxYoU4dEWagLGrXDx/Lc8P+00hFQxvm0qGE84LoYG0MB55FgeGdgp5QrZAHSBJZlAZIZBhIy/v/v32c4sXtqozw75EpaY8Pij3erPJIg9edIvPPXC5vQfnZ62XT9ehW1JO6uhsOswbCbKll3JRi5wTxbtlLw1GXG+Jg7G0oQDJPPMC+/gKRd6Ywzoxr5CU/TBHuQ0piY/jldbZ7WMn9ZX6hDF9JmrjcS4Pmlq/yruutb6bW+B+WmB0d9VbU+m39aSr2Sq43mVc0LAuYQvZcYNZJUZPTe7OqNl++PeAqMWGL2u+utp1EJzd3zkjw5PryxQPff4c5tNttsk31IHgsAqT64H0oApLyAPopdbePiMh9ALOjIFFCkP2vJhufCIFhCqp5yYXn1sFK9kBobAzAoQy664bEIST3aCY4AtoOJR5ckE5/fGcm1NOI0LzEFcwmV7yx+0A2tb+flpPd6AOr27oSMvKZguD6f+Mhwh9BPXLLxACAKw1AZI/M0xv8lXTFeb6hakDq3dQmEddwHQfn4CDPUn0aM8z2xf4Exn7Rfc1yQCSCqvXMFnt82SSZ6y1QeI9TBn2rmFVd5p4EqflAc3q+NvbNmD59jDosO+YgeoJ+zHJER/ZDEBEs5ijIGwBwhdL0JdgLvy1v0WIy7+2uRMTL0JSsVne0U6L/XeB+6dMuQYR9vGTPv7bP721MfXRz77wgz/0MffHvaNYX6/01ugt8DDt8Do76qWJsNva8lVsoweP1yNJw04l0LjFCmlx53r83oLPBwLjLve+uts7i353n99RnqYQS3IAcObTd8swQosA2deRR5DYMkrOGXbKekZPv/k89PzDGwBFg8h6BHOAMiAlWMQ5i1zwh7SYx3gJZaYF1V/yumL59mazZIH1nig1QODgLhSQSRZAFz1xbMJ/qy8IZ8+9LJChHPkBGvgWb8+CZ/RvvARkGY953pJiIfnEtxb0E6oDahPz2gSZUnUbvFq2GFcqn66XtWEZ0Ackw37qU+7WkUBNl0BKPuzE53Swx8y8+iCYN7fCtWgk5RjGWOg7g1X3pCTEGCb8N9OLMp2yqdNW3imA7uZROQr0WO1kSpDBv2nJ7mdrABiHmYTFy9f4X0W5iJEI20edqcb8LeCBlt7CYzxJYN6N1xxQ65oAqCf84bnpB3pBfz33+5d2f8nvvTSHA/hN5YFvPPmO5sX7NWHc6Rx+q/eAvPJAqO/rRP1u1pydPsfl/dw1J5QcJ6dwF1lZ1euP9dbYF4sUP+Qum30117XGrPf53EWfywO2Zvq3M7n7QSXYEwSwhA+0yzDW8zbzGP473/777nqQy5DFpADgIAZoJNAllCBKdtMyRU1xNwCSN5IfblFD9JAMbgFW7zWoAxQVViBY21livJkAXrgkIzlNRVakTAYq2ooD9gBoI+wDXIKtaAfwObp1b99MGiSAJ6V56H1KdAmt7IFpnk8kOj+b+AsabNSJ2/YRjxEWGVqEgCc6VEwX/rRkX3YxqonZORBdwxSjQU4NV70Kv1Aqj7YriYrBe01Xvpir4LmrBNyl5xUYI/TfnRas8/L9sly5GRnIF+xytonj4mPNrVvkqHd1CegGQSThZ2Fv/BCGxNyg2Dl9QXChQ15vTi993z+ns0dt96Rq57svP/OjZVcVlx1xWzXOPF2v2Tfj5e1+21vgd4C88kCE/3bWv3X7/no8byoudQRR15Q/zXPSztzXPeQg7Z80LJf/NKFD3quP9FbYEFaYNx12V+Pc2bxE6/71wRNt+ZBL9gBOiCYp/a6S69LT6EHwcALKLVMGw+yB+YyNCM8xGJtwZC6IAyYgiCAB3DBufM82Dy5PI4S4C4gtJWsVVyvjuY1LbgDdT6OyVGgDKYlMsvPZfDCW8mTzbOdax/HObKpI6WMAaZWkqg+wJjYX2CnLLl9Eib9T6ub2mpkND3Y/8YtTBeUlryqd3Vje58M0QjZeGhnzpiZD/qJvfYSmuVXWj5l/MXXftFsuMWGGVJhwkEvIRhsrA12oBuQTe92jEl5zekGuLuTCHKUfW2rDWNS8cgpa7QDahOeY8LDZjzD3hToGkgbB8Try746vM+SyYgxEbLB/lb3ePqrnz70opsouD54+3n+yagv1+FKq6/UrLPpOqmfMXJXwh2KfTuhHNlJ/9VboLfAfLPARPy2dvus3/HKq+N5UXCpI760cMGZsIe87H54/uKXe1ielwHs685fC3SvzWq5v0bLEuO3J17zr0MYAV9gCvSAUF5EIOP1yiBn34P3TU8vmBG3zNuoTt2K14P4YOB1/knnZ7w0mANPYpgBD2CyTBmgA5CgCHyBVxAtud0PlEC5fBCojW7SR/XNW+tYqrCPoQc8+jQhoBeQA4TgN72n0a71rC09BwQ33HLDhE9l0hatbtV2t/+x+7OKmEXApFRt1DYz4yv1UiTEzz7D9iYd4F/sMIi0zbcNPnmHZr2p6+ULZT548AebQ95zSK4zzZMrvtjdgXyRTYRl3HnRrc3l1/x2ED4RD3KmV9r4xgeQmkzU3YF8eLKV00a4C5sCWZ57cejp2Y9zNQ7s5zowdrV6BpDlkc4Jh4kHcI4/8tKbHLpM3WFqXhtgnKxAX1uut2Ues0xeD14QA67dmdCW8Tcm1tROaI48OpDF9fmsnd5X5uy3vQV6CywACyzM39bqq367R4/nVb0JB+dSoBSs437bW2AiLVD/0Loy9Ndo1xr3759+2ycTQoaeQnAV8CYBIWAJfM489sxm+ydun+ALXoDUtz/87eal//TSBGeQ5cP7eNX5VzXTnzI9IRrg8IiKgwWN4AgQKwvSgNWffj94IYg8QCeOFSSBSeAMDAF3fqK8drSbIBxtSfnSjoBft/ydB1rKAMVxIAy6tKdPAAjelFO36ld4g+M5SgB4TKJjpXFtFYwWONMb+FsCEDSDyAyHCEgUYy5UxvrY2+61ba6AQk4TERArhIKHF3Dng5FhO5DKntWP8QOs8uitHDiV5Cuf4BwAry3lhYHoJ5NNq1LV14aQGxMXsOsaSltGHfXY2eTkvj/GutXRLv3EO4Nu4RrG0oofy624XPPIpQew7DqqF+foV5uOs/12XPX79Me+O8Xqv3oL9BZY8BZYkL+v3bbrN7vy6nheNZwQcO4KXQpV3vxSrNrrt70F5sUCo9entvprdFaLnnzdvyWg8vCmt7kFEjGp4lGBM2gCb5effXmzxoZrNL/82i9zzeMNNt9gAFMBUcAIfPFCe0EG+LJkHBgEprddd1s+qLb17ltnW7yNILu8zeoCLA8EenseWNOm8/XgYUJnQBsgrjjsgl1lgZvVH7KtFp7TUxp1EuRAnPyAfnAqKcvrDSprwmA/yygQ5UZht44LRBUbpvsZeZg1ugOks422rHb80X9BPn1MAry1754778llAa217WFCHmA28hKTfBgSpIZuJhhsZQxANyjNWO4QgHe/wm+MrT7Btn3t6xs0WzXFHQHH4BYIKwNYhVpow5hmYsPQwQTkvvtiZZMA4bIdeeTb0hVgp2e7nayYTAFpEyNwTmaTqgJlcKy+mHgPKubDpDFJUoacZFDGOKl7wPT3DmTqv3sL9BZYKBZYEL+v1Wb9TtcxhSpvXpWbcHAuBbrKzU8Fq/1+21tgXi3QX6PjLfizi96bgOTBQCBSnmcABmw9lOaWPqATxwpshV/cdPVNzdJ/NXgJCfjkleZJTk9mQBYoA7jqgUDg7S1zHkZcac2VEny1Z9UHwAWYwDRw4wkF8WQAdQnO0Yc2gSJoTmAOKLOVQBY59AUgwVqWiW3BL3j0t6A4ga4FbfULqMGfsuUpTsjtmC+PW2jMvqP8LGnkcJZz7UG1DTwlx/n2wbBhgSm7GIO774hxiCXYQCgZTUhMTpQrG2cboQPvLRgVZmPSwxa1BB+dHANi0MqLzV7sKe586ccsnWOrvPEEtODdBEq/xg2wuyuQDyK2AJ12Dy+yLX3SIxzjx6YlV27DTgBaGVsTKjqSBaCLpzZmINnESRkyWG2D7sbf9VUf14LrlU7gvvc8p7n7r94CC9UCo7+tOn+4kFttVf3R4/mh2KQB51KmlKzjUr6O+21vgYm2QH+NzjoCv5r5L83eU/6h+c0dn05vYnlywWqBVXp4w9MIKHkcARL4EncLoNKrGTD06GUfnV5JKx+ALwCVcBrlvSgDNFvTFxg556UqXnctLnrtTdZOaCtoAriA6p7f3ZNgBJB4WskCGLWbgAuMI9kHZGQBbI4LkLWV0Kyg/bZO5Sub9Z0fTdGmcpk67QzbiBMJwbonQzfVofq13zlf9WTVPt1MQHh8QS0I5jlmb30WBPP+kqu81NlG6F0TBxMe4KyeNtmV7YwdDzvvr/psZWK01CMGUJ5hGW053v70BgPomMCwp7HSd91NALry9esDdnOCE9dJ2SgBurVBQTZb5eQktmSsMA8QrM0aYxMHD0maCJDH9alvDw/auv7Kk84G+23zTps+9RboLbCQLTD626r7uWHAql916nhu23kotScdOJfAXYXnt9LVR7/tLTAvFhi9RrVV/2Dnpd1Fre4pN3y02XXtNzYn/PbDCbUJyQEnbqXfdetdCbjABLAAIWAKbjw0xovszYBCNoAwT6A4VZ+E44DdqsPbnOEbUQaoKS8BqfQYRpuS8qBJAo3ACUiRQbvkc5yw2wJyFg4wzbfpRd5oKoCrcgXCs+SPVEoI1hTg66Sqk9vuuShb4FzbTrVZdzsiFjDXNkG0BUeVeGUrBlm7ILW8ryYTUsnkPHAFxSYdPMWgFLSzmw+7sy/IlbQPbHmqtVPeXHqLG+cJdh1oS5vGFTjzTmsnPwH55P/LnwcwDmrrAcOhLbrg3PadAsSX8TfWZAX1rgkyuetAJteAcAwhGyZzrh8yCCkhL53YomzzpKlvrab7bW+B3gITYIG5/X193at2an7/x3tS0kc/aunmM184e7gQxfz+XQ5wvrDzX/AEWOchujzkZVs8oMQXv3zRA/L6jN4CE2WB/hodWP5z331lI/64PI6AiteyPHoFq26J80LygvqU9xJIWSrNMnPCLtLrCLbjFvupPzw1X5rCQ7jscssmdAEdUJSg13orxe8CuPRQhlgZ3xxyaItctpYhK+9oFxRpUfUKdtPL3MJtFy4L5iqvtlWv2qpyttVWlbWlWx1n2fZ/4249bVWqso6rfAFz5gV86geMFkCbqAhnYHe20gZATM9zACsZZmkrGkqvcgvcQi0cm/gYI/DMS82GgFq7wNVYasvkRAiOffmgVbw0cDVmZNO3iUx5u1MG8B1tuR6EfYBacpK5EtmNkXhoejqumPQM2wDOIXd99JchG3zdp4sAAEAASURBVCF/eqXjQUBrN4v7Fi6ij3xhTcA8yKancB/pCeu/ubrtt70FegtMkAUe6vf17W9+arPt1B2bH5/03ZTw/x72L81//+W+5ubf3dgccfQnM29+M+OkB+caq4cyXpXrt70FJsoC/TXaNCde++H0KPLoASDgBJxBXEJOQBCIA2MFW94uxxNpKTcQkzHKAc6Ap7zU6l5y5iUJ1rzO5XEWkgDeAI8PAMzb/bF/41U3Zlyv8laR0B45JIANnOqYLPUGvuCxhLLcRr+S/gc78R39gbcE1kFunlemypGjyuQ2yie0tqCsXNkENFY9zQ0hNtoYHudeK0crCjnqfNVpi2Xb2gem5KSf0Jks1xbK8wGx+tdWnSMLmdUBmyYfxsd53lkTl0c8euDJBa/AvDz7ylnRBGBrk6fbWtBXnndlrm1t3I2pkBwP6YHw7D9k0qdrRh3x15YrrLsKpWcCdmdylOMX/bAtWObdpic5yFSTJTo5D97Fet92w20J9M675lwjPhUuAtyB/Y4rHl4m7be9BXoLTKAFxv2+rrzCqs3e05/STN1wy+Zfjnh7SvfFdx+d/1fdcfftzbU3XdVss++L5rvUiww4l+bjjDe/ZxPVV7/tLfBwLLAkX6Of+PJLm+n7Tk+wBSE8huAWuPgIAfjj7wdv4yuw8pAgz6AH+pZfNTzOEd9cccy8gJmC7S446YL0RvM6V7xtep3D8wl6C/wSogOkeEBP+d4pzQZbbpBvyANqvNHAzENkPNtWAgFnBd4gkFxknQWcwW1mDAAzYRjY+hvA1wSPg9RBkYEXvEDeeftpj4BZsMcuPK8JjdnVwD7qS6mLtjvwnDLFudpmwbbsaHntFjjb1z8o9cAekE5dom6CZ2zLE63NlEmeOgGbIBRAq0sHgJme+xgz9eUpJ+yGzY03GFXecoRAdZkVlsm1tXmehdwA5hVWWyHLsQW9E9RjUmVsAPZO++80fMkNuSS6s6ey5FOXvPSXZ2x96oFBNgbyl/zmkmaVNVfJfgA1gPZRThleZp5y11OGEoXn274J1+PXeMOg8/67t0BvgUlhgfqN3XW7vZrHbbV787GvDVbE+d6/ndg8+pHxUPD/xu/Mn/7Q3HDLdc0FM89pdnnmK+er3IscOJf2Zbg6tu0BumuNfn+iLbAkXqNea7rbs3Zrtt1z24QcYAVsCvbcigdG4AQAgbJvfehbzd4H7p0P9ok7zYfH4hY6r3JBt7K8lqBv5TVWTtjlEeTNLrgGg84rC6TA4O033N7MOGFGs+0e2yZo6a9iq8EeOcER6AWAJVddO+RO+emA7vyNPG0DN33Zj24HQBz6SnVeGUCnL32Q1ZZetmWbtE8LzdW3NumRUNxmpjxkio9E3yqXx3SPP2Rl+/ooQzdy2KdHJW1VH/bVkeSZ6IhVTy/uvfEimYBhMgu1YPvyCCe0xlJ+FUucYRgBpIBbOaBNd+NvFRRvhFTGCivANsM/2vNkvO6y6/LlJetOXTd1JUvpTDZjxbYpb9jSecdAGDhrQ9/g2ItTeLl96OL6Ud8ETgy2uj7A2d0O9qFPxuhHPPRL9/2ELvvUW6C3wCSygN+aF+x7SLNZeJvf/Zm3pGTH/ueM5tGP8mKjCNP70++b2+/6XXPBFec0m+7xjPkq+SILzmWFJRFOSvd+u+hYYPQ6XZwneW/+hyc0e79472bNDdYcgmbBGZjxAXYFcr/+wa+bKdtMydAJXmeeYB7Lgi8gB2TEpoqNXmvjtYaxsryFBaNgsODWlQGGgBzQm3HcjOELPMgihlq73hq31a5bZQhCF3YLprSnfnk2C3S1DyrTixvgDKDJos30WrZgB77AHKgTFgDeyKtcelo1FKkLhV0Y7soxKDhYCk/5rBM6KwP2chuFbJ0byqxs6CCfzKCxylYbKUPoSi5lq70E//DSZxxzQK+xy/Zbm9QYGZ8CV95ifWhLojebsI/+2MRDeuAYqILjDNsIGJd4hHmuT/qvk5pn/fWzBvbiWY66OS1oJyt0IQsZUj8hHCGH+n8KyGd35xPKo28yuYbYRciJsvLUN6Gy6oZry5sVXQtCVK6//PrmRU+Mdcr71Fugt8CkssDrX71zc8BeL2y++uPPDuU6/vMXBjgPwsR4nH93163NJVed36y785OHZebHzlLx3u74L3fRTwePeYjwyP4hwkV/YBczDUav08XxGn3H+5/a7PrMXfPWPNgBNSf/18nNLgfskgAGVkAu72UC8RU3DOEH0ORb/wJseTUL/gCbJcUsjwa0CsQKVqtc9hd9al+yRBlwcvyzI3/WrL3x2glr1hO+9+57mxnHz2im7Tgt2wS0ZCsvMhC0X5CbXuIW/vQDmkGaOsCS9zVjrwMSgVdBnC15yVFyJ5ymmK2gIWsBM7kBX8lhH8iWLbu6Klu6Kl9J+1U+ixiH+KMtExbQWfWUG5ZpgdhxTSScr306//kPf8429JETl/CcS9l2tJuTo4Bjcko5RjzsYTteduXYzVv9eIQ32W6TweoWEYJDB8Br6TqhOVs/YetBOEfUJ7+/6rM3u/sYH3ZVN8+B5wBh4+dBUGE/xgWs33zNzXk9eniRV7rWkwbz3kyprWnTp2UIDxnFZ3uD4oF79/Ccg9l/9RaYJBY4pPU4f/OYI1KiH3zslGb5x8Qypo8Mj3Pk3BvgfNudtzYXzTy32Wi3p85XqRcbcC6rjIKJ/MURTkrffrtoWmBxvk4/8MnnNNvvvX16jsGVl3Kstt5qecsc8AAaCYwd+5Vjm6132zqh0y12t/RXW3e19AjzBAIfQAn0LGcGZLxoJSEMiMVnCJIFffiqhUEgZR8k8SDqA2zZyr/u8usS5jbfefPsWx6wBPXqpPwBU/pIeQKOlak2PYwGsJTN1SQC0CT9gjHnEuxCD2Dtk/JGG5naTe7H//bq5aeFwIJB5+UXxCcYh76j9RKw2SFkLjmTN6s/7UTbbKC9Stleq5d6kvFR1jkp5QqvLnhOD3R4ouliDNRxXp3yaqsjv8JSSnZ2ZP/fXvTb5sYrb8y3F7rLAG5LNncWLj7j4maN9ddoVl9/8KKWATcPZDEZYXN6VEp9W1lz7KJvkyGTMeMJjMVckznHOK5DbbsLQMfrLr0uw4Ee/4zHDydndL3m4msSnl+6z8erq37bW6C3wCSwQPd39FsfPLZZftkA5/A4+834w73xEPAdNzdnXXRaM/1pB81XaZeK9e3u/99zvjY98Y0d/NJZl7I78iv9MnYTPyq9BF0LjF6jzi3q1+lHv/Di9Np54Kqg6oJTLsiQCNAMWgAe0LLM3AZbbJAPnIEp4ARmxKFaxxcMATGgBNaAarYbQFTQnCAa4FMwmQAV9Wyl7DMAiycUUJX3lLfTrXiQtt5m66WHXDyu8+mxDNgiA09x3e4v6NWXtsgjZhoU5trEwDm61af+yA4c1QPNaQ+yDigwt/6TH6bWLgWtCathp9Kl+s/q9Iu/bOZTdRJQQ56KodZ21h+YI22vrHFIOZwnU8dm1a92E8LjfI0ZWK1Pik4Mdds2yt7ytEPmoQ3CM0xmdwJmnjsz7yJM2XZKjmneGQhZ1DE2HiS04opQmrqOsp94GFQf7K9syWVfftojMl0vIF27uSyht0hGGEmFz5joCCfidWYP4SGXn3V5TvrqQUE6adO1efHpFzfP2eUDzNmn3gK9BSaBBer3859f85FmndXXG4Lzf/8l3hPwx7ubq2+Y2Ux5wv7zXdLFGpzLWmXcOl7UwaT06LeLjwVGr1GaLarX6UePeHGz8TYb50N+vHlf/X9fbQ5650EDYA0YTdgLyAE6Zx5zZrPe5uvlg4EABWyKd11t/dUyT32AB4bSyynsoQXQ8mAmTIK2FtyGWx3hvRao1NdHrvsbcoBxMc48im71CxHxZsGE3Dh+5NKPHD5Ups9qt6467QE84SO5nFm8RrzWRk7Qj/bJLak7DNNogRe0zgLNCrbgTOb6yAOMqQ5ArX31W91Kr+wrzqcOrXc767V96TO7US9gcSifdkNGfUlsVLLUBKWtmnX0V+OS8rVvDVRGm0OPNn3iD4jPdgLonfcSEg8JikF3BwGosl0lq6R4TfjMc2bmw5ti0nmPXQ8gXJ/lbaYrWcgkX39A2biKoXa3Q58mQ1b48HCq5egAu7sb1nJW15gpc+0l1zZTd5ya4wW8JWPt3NaPekWJ2G97C/QWmGAL1O/mwc94bbP2aus1Ky2/SvOoeDjwLwHOt4a3+fzLz25e+Jr/N9+lXCLAuaxWRq7jRRVMSv5+u/hZYPQapeGidp1649MzXvOMDNXwYFiBbcJkrGIASOUBNw/teRiLtxYQWi6OV9DSYaDH0nOVEhIDeG2zzRbWAF/1kR7JAuiCRCgV4JRgFY2lt7X1mvJaWkZNyID+PJTIAwnQgG5BekFzQqs2ArTcxgeAYnJzibwIM1EP4GX8duhSMAfy5aecHfClG9AbJuAX+km2PtVn6tapq17pVRCpXvbVhk8wgT7TFNVN5CW0R9vky/7kRdtlo5KBaAW8CdY6iOS8slk/bJuTmNBRGylTjC07D1WL9sGzBHI9iHfr9bem57ledsIznHZux419TUrcrfCq9VzJg/c/AFtYB1vXuFSf2UF8AWehGcZXTHzZybV19cVXN1dfeHXzxBc/Me8yCA8iv7aU4122ZJ7xd23stN9OeV0WeO+06l9XN/22t0BvgQmyQP1W+n18zWE7NltstE2zyfqbNff9d6ys86c/NgcevuDuDi1R4FzjWwav40UNTErufrv4WmD0GqXponSdfvGHhyeUgFHgxsO36fab5oNaHgoDPUD63BPOzRefWBJOAmLgxpYncIVVBuEeQiZADegCfSAnYS3azv12m3DZAechlLbQCPgKssAmTyJQAnEAkefYyh48jeklBbsBVVhOv/7yhpJDPW9GBIfiaMFceabJTz99OZ+hCtHOLOAMgvP0oAz9u/KSlT6Vcp8InXplE+1Is9gFxEYblbptVV7ZI7cFwy3w0kF76SkPOxT4Zl2iV/koR68hfEZ+2jjyC+i7crCL8Bjx6uxv7NkcEGcbITdZ1fUBsUCY1znt3K7SYS1vkxFl026lamyNj0lNXUfk05bQGg/7/fA/fti8/L0vz7sixl2y1Z61vy8989LM0zfwPvR9hw5Dc7ZbZv6uCZsd9V+9BXoLzJUF6jeyfhdHj+eqsbksvESCc9moDF3HtjUI3bx+v7fARFpg9DpdENfosZe+r3nytLfNVzW/duwbmzU2XCMhedW1V00oBcXXX3F93pq3NvBlZ1+WMc4rr7lyghkvpfjXXEYuPMpgKWOHYxWEv/xPhGwEtNbrsh8AzoALNLfg3FUGuCX4RmZBVsEtz2bewg84A2b65P3M9lsv6oCZByAK/NQVpiGGmczLCtOIOglhAZXOA7UCUtCZ0Bw6FcDmNiCv5LElYx135a/+q26WoUuA+dCzG/3qZ2iDtq2E1g5UaqubtKEMmdMm4SkvvUKYYZxwTgrCxiWLNtRjD7aqvkvnhPoWfuUpI084jnAJq6SYfAiRqVAND4Nqn61see6ttwy0S05yeODUZMp4sXnBs+uDPTL+PNo2LspL5PSwJnjmSdavyVnaPOQD5yZy2jJhI+dL9v1Y85HPvqg54PADcjzlbbP0odle/9VboLfAxFigfhO7v4Xj8haUdEs0OJdRy+B1bNsdkG5+v99bYKIsMHqdzq9r9KTrPpLLbwGV+RnD+amvHjx48C/gRSiDkAZhEBJYATIe/vJwYK71G9AE+sANKAUyAMjauryRIArUKFPQbJvHPM4FzrbxoU+mgtO238yLcwl24ZnM+NXoExQBvILg8h6TtdpKsAwvqbI+bt/zyGYdHtMCvhZCCyLll8yD5to2yRZ9FgjrK/cLdAmbRQfl6aV8pqpbx5E57EM1ZeNP/tVHp5y80qmA1PkC/tKvbPpXy/zVYIzEerOtZBPtmCBIBdbD9uKk/WyjrZPwGjYztsI1PFjpYU8vvTHGOcGw9FyUp4v2xVsDedePB/tcE/rm4TepcnegZNK+88qed+J5uSwisKabcc4xc33d84fmktMvaXbef+eciNH7tutua04++uRmo602yjAjb7A89UenNs/+m2cP7BLXisne9o95Verbf/UW6C0wMRao38L6DRw9XtBS9eDcsXAZv5PVA3TXGP3+pLDA/LxOxSO/6kOvyrAK4RPAYstHHDLf9Pzsd1+ZDwoCQg/eefAP5AAcn9uuv61Zc8M1B6ERATi8hQWyGVMaKx0I9wCm4Ce9tl1wBsytF7fODUGt5TvwlQDZatUFYWDH48zzCbYAFBAT95oe5Lh1LyXQRv8g0cs1wH1BmDADEwPezwoloYe2bUEfuE6oBYMhV8mQMEu+aLvyu7JW360QAzkGCg10imqS+gWbBZG22Vbb/rAv9Su15xw6T2Y65hiEno4VNzmhH7ilS1dWExrlahWU1DvsqO8ci6gr0VkMOzuzN6+zhzP1C57ZvEJk9Kcf29ItwTfAOesGePNSA+e8OxDXQffaERq0/ubrD2SNvo2rj7HWvzAOazpbQ1ofZDGJE+tuorbFzls0l511WXqnpz9leo43u1jpY+8p/5D69F+9BXoLLHwLdH//enBe+PZ/0B67A1OFaoDquN/2FphoC8yv6/TYS97XrLnxms1yKw5WF7jj1juaHVd47XxR7/0ff3az4z47JsCIH+ZZBFPleQQiXrvMewgswQ244jUEp0I2eDKVSa8tUA7QKQ9u7Xe3CWvRB7gDfWNBdMByqSMgAmP6A1aADcCRqUI2FFSOXLylCV+xJjCYBG+2CY7RqXIJzLHVJnnowPNZHmF55CtYTUEyqyNYZsZXm6WO8pno1cJywXHqHbbJtlXrlM9+1C17tM1oK9ts28tyAcHluaWv8TAudHRnIF+13cJzlaevfOOQ8B3eWXVS5xgzOuTYxmTJlueWnbzohD3ts7u4ZbZipwRpDwCGHiZHAN343HXrXRnmITY6PdVtiIxJi3a1Jxxom923ybEH3GmjeFYxy4ROxluoyEWnXtQ87qmPS5D+7YW/zX8DwjfEX2t7vWmxTOFaK+d4a/eyMy9rDnxS/zKUvAb7r94CE2CB+t0rJhs9Xhgi9R7n2Vi5BqRbpAarm9fv9xaYSAvM63X69vfs2+x32H75sgnACBC2WOrg+abSUSf/Q8IH8AIhbscnGMct+yvPu7JZZ5N10pOZoBfQBsISbGJFBZBlObEE69a7XAANrrrAPNbj3AJhKtNh0oSxoLmCTt5jsa8JzwFW7EDe8oICxILm8pSC6lqJw0NlgLHgv7zRQk6U4833gCA4LHjWtzqxST1KxvRGl/W7MnfkrbqpBwiPZB+g5rbqxzbBuLPNU61dZjmXogxkSrhtvbPGwjGgZY+CZ7bvJjaTlAXCPsBZIpO+eN/VM8Z0d1dBTDs4B7jAVxhFjTH7rrDaCjn+2mFX8cnA1gooYuhdG+yrP23cfvPtzeVnX9484blPyPZMArL/1t761g9ot463axKM835b4UP7dPUwq9h7NhVjbYWP0358WvOW13ybKH3qLdBbYCFboPtbVyxWeXW8METqwXkOrFwD0y26MAep22+/31vgwSwwL9fppTd8vrl3pf9tll91+QSf22+8vZm+8uEP1tVc5X/iSy9rtt596wRIXkIwlZAUYAZqbrrqpvTaAkoJAHkZBagCLm7Jgy4PhA0fDByBaDA2hGgeyhYmAaa/mTqcB067ZcAeKDNp4E0GwmCMNxlE6Z/MzgMssbkeLgN1YFJ7gCzBOba3XH1L6qbO0o8ZgJ02gTP9TR4AmTopSwuhqYcTHVnJrkzpMtwGeGb5AuZOvQLFoe4DCwy+C5BDpzRPbBOgWzvZJ1eOUeudNRbkJj9wrjsEZXN6s5MESnmGQXJNKAYdD76VlZ82C7tn+ETYPsE3rge2BevsLLQHPHuzIFB2vXjAVEiFvi1Tx0NsckJn4+MuxTWXXJNvJHRHw8OeoF6/dEuPeFxjxtsKG8d/6/hm7xftnat3iLcv+bwcha7aNQGywkb/ApTuSPb7vQUWrgXqN674q45JUXkLQ6IenOfCyt1BqmoLc7Cqz37bW2B2Fni41+lxV32wWXvK2glFwHWrR758dt3M8TkPCQJnXsRV1101twmiAUHnnXDeAMLCG1ueWasoZDhAgFiuthHApnwCZtyyL29kQlvA0NDTHCCVcA2ix4EziTtAqkyWa0ESuKUnPMIrbr765ox1FV6SXtMoA9oAHW+n138DZx/gnLAZEKgsCBcqIGYWYJqMAED74LIAWt8J2yYBZGnlTpk6stK7YFl2wl8Lu1mv1SPrlX62QLjVbdimBiJpI8+17eRxnhicc1z2AJjsMpxQBISaVIBo9iR3AnXoJ4FsdXLS0IZ2sJc2QCkYZbMcxxZmlU1wjq027fP+i0UGw8B5pdUHSxtqG1QLtfBK7jU2GHid2VUfrl1hF15k4i2U7mistOZKgwlLOz7sTh8PKJ5/0vnNpjtsmsf6BdlirgualXWXwZi6Q/L2v/t+6tl/9RboLbBwLVC/bcVdo8cLS5qljlyMX7m9II140MjrvL/Uv857QZq7b/thWmBurtMvHfP6fL0xQAELU//84ofZ66zVvnHcm/IBQOAJZHhb/3j5nc2f4y447x/Y8ZAgrx6gAikbbLlBhkoAzgTFAQEmpI2CcwJ0C58F0UNABo8PktSTCkxthQwkwMWLWvTrob8qx4MK4jx4JrwAXIm9FtKhrBhc4Rjqg7YzfnpGnk8vZshHN1CW0BjAmECu/wL4EIf8ZCdLJpsWfvPYfgd2hxOF0iXaqqScutleJ9/5aqMLz1lP+UjOiwf+77ADULWaBbglK0j2kGCWi/FTlo1yXeUAU/obU7BfMcvy7r7t7uauiAv3oKe7CGyhLXVNNlxzGcrRyqpfwA2Es8/wKus3ZQp5QC8QnzZ92jAUxnJ0xghw8xALv9lg8w2GIR1kpgOZgbNJ2g0zb8ix0zboT2huJzm859owYeLJBuS7PGOXZq+N3pL691+9BXoLLBwL1G9ZsVYd673yFo4k8X9ID87zZuru4E3EAM6b9H3tJcUCc3KdXvf7bzY3/s/tCTaABMxs0Rw0zyb65vFvzlcbiw998kufnJ5C6+QCugSm6AewuK3uFvw6m66Tnm+32BPsIFgHAguOhwDdeqETQAPCCj5T8BYER5UAdcpLdeseyNK7+qoVMgqcyVohBIBO6ID4WOCsLRMDkwKQOXPGzAQ7YQb1ymd98YrmyiIhJ93YIPUhi7+tTMo6ztSBWXUsz0bm1DfKk690TuCOenQoPZyfpd1oNM+DXh+NtrJUf84DWTqBR5CpX/AMfMG/5DwoTh2iHzbUX8Uy87LXBEGcsDALIMyDDFB54NPmrSz6THGifyEcJijsyfbgWTKxAtUFtNvttd3QO1w6O88jDXT17/Xa5E5bxRizhzbJ7w2Dxqv0Evph31jRg85ekCOkiKdb27s9e7fmiRsv3NU1vn3S3+dKIeXtpwt53Nk4cO+PpG36r94Ci6sF6jesIHn0eGHqvdSRX7mw/W95YXa7+PV10Eu2mEWpL331olmO+4PeApPBAqPXKZnqWj36jLc3m2y/SQIKQAQsO63yuvkiNni2bu4uB+ySMaeACriAU1u35UFAQkG8knv19VYfglJCXgvO9oFWAlB4ZwvYais/PwB0Nv+zVZu8xOllDkBKAAyPMfjVntAR5byiee2N1852eVLF4PKcAmeeUxAJsrSlDV5MD5JZlcHtfjoVkAPVhMAARvCm/CypYFkmnqV3uw8q1a28oQ3CHuQdgraqrb1UHdrDQZuc13eCc9tHF66ddy69zgGOOWEID68JjuXaTHzEm2f9VqeC5oTSVtYMTQHToSvgZDueW+2bdFjKj+3IL6/A2TYf4AvQLhtpXxngrR2yKLftHtvmy3Wq37Kbc+5kHPvlY5s9nrdHxqObtKScIY8JAzD2MOBPv/DTZrOdNstrEzSLbTd2+vbRVz0geMs1t+QdEWE6e6z/pjLpAt1++Zi/yYcVeem7nnrym0CYdD5ly7cvUBn6xnsLTJQF6nerfqvIMS5vYcnXg/N8tnQNZjXbHejK67e9BSbaAqPXKXkO/ueDc/ktsLTl/zl4vor4iS8flGvj8ub54U+AbOHqxqtuzGW+tn/i9glSFdKQADT4GoBgSAScAFJBY21nAef2fFZty3eV0Yak7ig4gyRgBYrPOvasZps9tsn+gBoIBK68zSCqu45wQR2ISbCOUIH1N1s/QwucIwuQ8wHRKS94A52RJ5VcyuZx7GRenFaGbPovEC7dh9sWBlPvaNefskuCaRzzSGsz2wvo0l43sS0Az35L5ijD48zrKqxh6g5T84E945jtai/KVNiF9go46e6jnAkKD7IYcTYS+73aOqtlvbSRSUP0SW7tAW0hF7zC6mvftZme66jv3IZbbZivbNcn2RPkO2MO+M889sy8K2A8HrPSYzLWnB381Ta4vujXF6WH290OkzrjL76dLOV1LnjmNT/tR6c1+xy0T/PUrf5J1ws0feprh2QIVb3RsiYjbFLjRQB6PH7118+xLK87fKfmU58+fY7L9wV7C0yUBer3qnhq9Hhhy9WD8wKyeA1sNV8DXsf9trfAZLBA9zp97Udemw9a3XLtLXP1Azwnevzw7P/P3n0A3nZU9R7f+ASUFkpIQgokIb0QQgihhyhP6QIGIVQfKE1RBFEQRXmIKErVRxORjsATFAhNQycJoUM6aZBGGhBAUXjiW5/f2etk3/+9CZD8b82ee8/ZbWbNmjVz/vOdtWfP/uPBihrWOubRdRveLXCe5hOPOXHY7cDdAirmFJsX26EBDsDk/wR6Aw4Fvyu3AWsAWCHpaxuoGmXkQn01eMSrOk45oFtAr+DWm+d4Hrfdeds8QLbnwXsG6HibATew8gFaPd0A1JkDzeO8zx33WcDeCLygDuQuPYZAr8rjvKCMASH7ziur/yC49sFoh5TRFBUrjZTMwC45/nfaityQbtuhZQUIC2aTpr6UfcGTl9kusuRfH8u3WcfYEnAeIl1OOSnBdJBefPLYNIOEgrsGZ+XjsedxNiWHTh7s43UGgyl7irJ4aFJccGwwYjBFXzBtDjN4ZQ9rhJMv/4Zh5Ux9l87mzXuNtrS32OcWmR5iao28xaGjPAzePv6PHx/2OGiPxTShAuzWC8QDbO01XvPKWxmce8QvvrTNut62/3jM07NMHn20nfbQK7M2rM7YXNvb/9qP/rH1OO1abxl2+/4RP3b8OeJsgY1hge6jpgy1rnMbUrcC55PqJzeH9WWBRz5sr7VEv/5NJ691bj4xW2BjWuCS//fO4avfuyBzT5/9y8+OKqvZTj9w0nNye98DWCAsEFJTQc79yrkBl71ut1fmlbo9voQgrDf+dWogC8gVnDU0rGsbqByh9PJsmjhgq+NVPsAu54tEgB/P8off8uG8lhmk8ZDylPKYAmoeQJAPZsALmDZN46zjz4o32kNpeViwoA0Ekg0QM3WjpoRM817qCRwrntBlBncN1yvjRd9JfJAnfoeG1th0jNfeYIMXegkAbAmgE3mxv7KVTA/lfeOCb2TKjYFOT7Ugmze4YZTe9BDoxwYBvLIxu/LcGlhYOxkQe8DStJj2WEuT/ApUATKw5fEmB5wDRPIdm++8zx32CUzSPwOfmvohqA9pLU0Hts039+Bre27TdgqgyeCtPeUzp2Re9C777ZJBXqZrjFOKtA11wF5gPPFr6tEf/e67k9f6/Hrf8c/O6i0GlT3tJ4M+7bfbcOnGrvv81K+uT1Vm2bMFNrgFmqG6P1p5vMEVqgxncN5AVu/KnmbXDWF6bt6fLbAxLPDXb3jEcOiDDg1UPPvwZ6+hwmq001e/87GZP+3thOCJ5+7fvvVvw/GfPH7Y/y77B2oyV3j0WjYQgCjQkv+8mRUClLVteFhjW/GlbS9cEky+pA1sjsBhXwBF/uWaPCseSDr9C6dnJRBQAlo8SMZ7CaJ7Hmx7WoEoD7ql7MDgTW5WS+8VmCkrb6g5weIKyXeRdY77q/OP/uPJ6DaW3anWOTaqiFP70Dv2KVNFVpUz0MokY7mAs7LRywew5vZ/6dZx5BO5Y35AVRrzi8GockrjAT/TGq51nWsN1/6Zyx4EjA6EVGggtw9mQTH4NTAh12CJF9iWfZQr5ag6Aam8u3Smp0GKOyLyZ382Z+Neczke2QLoUn4B6RXP4Cz1V2ldB6A32uZGkUc3MuRx+hdPH07+1Mlpp+RZozurqoxtRFx1b040G5j7/qRHvEGx1mv42Nf+avlKdzoo77TNp61UedXPnv/9iLV0Ofkarx+Oe99xwyPv8TdrXZtPzBbYlC0w5abuh/pcH28M/Wdw3sBW70qfZrsxG8BUj3n/6muBl9U8ykPufUhgYfcfPCyGWNlWr2o7fcXbHp0HnHT0Hvxyux5k3uGX7pBXHQMxATQ1GAQSAWaBQcNYoKrjFNQArY7f2wha+VUyhAAnGBph0jkw117dyKjr8Y4WeMUzW/ryMl56yaVZW7of/AsU0qE+uZ1ft+9BVXs2XXfe9c63oVC+XRbla326zJVClAXATsofWSUv8Rdfi3gjNCvHNL+WHVkVB/wBSfCp3PH2lu1BKyjrQUTbXBkEdwkCsgW8vLRkuGaut2kE4DfgOj7s1+XoOlGeXhEEsPcbA+UHig2cGuCHutvgPLA2T1ne6sPWesp07qkU7gB4oM/qJRl8lU7SsnsGB+WtBt5H//PRgxU4dtl/l0DyTnvtlMGQsslDW/zSR7+Uuy4777tztqbi0D+wWrbhufZyG3Vsys6j7vV/JF9v4TPfetkyb7buOprCcwYaVV51s/+1HrPedJkFzxbY0BboPqj7npXHG1qfzm8G57bEBt52A5hm241jem7eny2woSzw4TP/IvM79/rvR66R5cq2elXa6ds/8bTh0gsvHa6z1XWGs08+e7jlAbfMEluZogEGxzAFvwBnQddKzyoQy7XLA+cRRiMSeNa/qVznA3djniAWYDnXAAnWgCZ4BlcffetHh0PuUwOM8rQGEgs2G36l9TGtgDfUtBRyeAJBDwDDuQHCEdQ7LUjt0IDdx70VV4jO4ebRXpP9pbyK18DqnDS2QBRQNoyaL8uTHv1GTyYdEyq7AFpNiaAfCGUH3uIAKY91lZU80x96Obd4jqu8yhHorHK3zilDyWInssihk3x6GkJP2aADW8WmFa/XZ7Y1r9zgpadrXHj2hdFh21vU2/7G6RXylEeAs8ppjWdgDviPetNRw91+5W7Dz1yvptpU+a0gw+vsBScGSOaze7lKwLl0E7SFi8+5eLmiBz2+duLXhqc9Yf2+gvvYC1+aMmhPy3pSV2N92bIT/c3Vvse+z4q+89dsgc3dAt33dJ+z8nhjlW8G541l+THfbghTNbqRTM/N+7MFNoQFvvi9vx0O+NlfX2dWK9vqlWmnL37NQ/OAIM/fm/70TcO9fv1eebNbIK/BGQiCV8BV8GXfVmjw6m2urwTnUU5AuYG70i5B2/URDp3rffJBHFADlAkj5OVcweA7XvKO4Y6/dMd4lAMyDc4Ar8BK+vbiAmtlAIfy+OmfXswDBqHiiA94fNYIrdtYDuVPeSeRnEuI+uP+eL3jtm1SxromH/k2OLtOv4axniaxMMlCpmvy4il2lyBL931/hN6CZsBpYEEWyOTxzYCi0rV3NHq0vvQogPdhU/OmY4c6pkv0KduDwS63eGD3u9/47uDBS8E0GCu0iKM8oNHH1AqDFjrIl62VQZlSvopv3rRpGTzlO+1RXueKq948hGhax9HvOnq44/3uGM+08rQuPMyWojOv3VScN//Zm1Pm3Q/cPQPOu978d6Pban95o6f2eJ3rXWfhkWfb+tAr27KbegXyHoR82N1fstoqzPJmC2xwC3R/0/1MH1Okz21wpcYMZ3DeWJZfke+0UfSljd04Wo95O1tgaoGr0lZfe+QTB2vOHnyPgwOUHhLz0JZXa3cI0I2rRTR09TZxCoYCX0VFDcOAK3HAYKUFzWuEOmwZU+gUZ3lc+5ELZDHXCLTATQBYIPGML5+RF7rwsC6nl4DSgq+Gwp43LB2wA4jgx3nAIzSkg5411F0wa+KsBPucnHxNdZ+cjh5Lm4zQSrfkyYteZSLbucSbvERG2TvfBudAbpWDTey3F543ve3D08vbzE5JN4J57L4QGhXlqbwZPJBVH3rFBgB3hMK2k+umdZx/5vmZXsK7bYUWtqd79Kk4POEAu+ulyy9TOuS4bNHTMnijLVFHX3Y0dcW8a6u8eNujl6aoL3rS79zTzg00e/slL/Xnj/p8HpY0LWfHPXZMm7nz9k+ZVsOq7X/o9D9fPjzLPj6Z1sJWwLn0+8//+M/hXS971/CMJ71z1fKdBc0W2FgW6H6mOWjl8cbSS74zOG9M668j724c00vdcKbn5v3ZAhvbAle2rT7xN2873Odx94kH8LTPnxZwtjZwe3kDORNwbuBp8A14VeEbwHK+wai2a8FkQRpuI2cJhW28CSQ6RWY+BYjtobXtPEDa3z3j74b7/+b947EEi8mv5EgHYMQHNYAsMusYlMk7sNNziSs+AO15v+uC5wBspNRXpf9JAr3aZklXdmivuGsZXExtMwpPOmnrYyqELfinK5iNjHrIz3xjXtoGf7ZQ5p+uF9jwrrfNpFc2x0LqbdyyV4Mzu/U1ddXwLY55zLzB7Gc6RnubGxrF4Q3n3Scv3v3SPXWeXOuLGjVdw3XeaaBpwMaDrO0ZBPiYe8+zbWm6yC9wBtBs4KUnBgeOrdbBc+1lMMCZrcx9vvvuf9A5ruqW59mgIW2rwH06QPHQpUHDftd89KrmOQubLbAxLNB9y5R91nVuY+gmz2vU6wv96ZzDJmiBR6xYyu4N8zJ2m2AtzSqtbKcssrKtfvkHrwnUHHT9x8dgbz7qdxZzUmuN5I+89SN5wYM30glgpaFruQ/kRigOiFW8AG7Bp2OfZVyANgJhw2inbfisFMkrX7XruEFypSdUPp0n6LLcHIjaYfcd4gUkG0iJB+D6mFfSfp8HmuSANNMGqEBeAL2mQkTX0jt6jNqtoadzE7UTZYy/VrwxbmzDHkLFVTb5kZMyueSw9JqWM+nY1JSJSk9PUJnjOg+eedEtv2dLnqkrVtYI0FUZ1cdSX3mUnOQpy7KVsBKc27ubuBWfDHbz4hXeYHPLvVUSvLaO6oKcpB1X7eh6SB6jR9/1eMgNVip+BmqlxrWvuwBh8JlylTwPCWbd8apDDyKahgKyAXt7xHm4j//E8dFlx913zIDBcoV33XH9vE3wnz79zID7dW9UL3EpO2cQVh5ndmADg9D7HPDs2HX+mi2wOVug+5TuR1Yeb+yyzeC8sWvgx8i/G01H7cbUx/N2tsCmYIGV7ZRO3VZPGl4fcNrrh4vlsp7y+3fJq5vvcN87xHN7zLuOGfa7836BkAbg3jZw5bggwXGArKCnAQw8LOPXfgcQ2oBluwxr7I7yXCyZgcS6jQ9GgJZzgd2CFNcsR+ahsJve/KZ5812vSpG0P/yv4ZrXXLykAkz3dAN6AlZgBnLJo08Dn7RdlqgBLPO/9E9hR80nek/jZX+RILaRhry2XVKXPKI6n7ZnrtVXn+9jadum9ASw4DNe54JPUx1MoeDl5W0Fc4C253533i23obvL6rwPm/pkADECsDjSu24Fj3NOPSdz481tNrUHnJMnTWC47EpHIXYtPWPbOkeGfR5ZDzNmAFBAHp3LSx6ArnSdP7nK6gU45jJvvcPWydPcaXOeU64ypCkfp37m1MjZYbcdUu5vXfSt4a47rB9w9iIU+pib3d59EC94oPH2W//4bw1MovlrtsAmaIHuR7rvoOK6zm1M1a9Rb2Px53QOm4EFHvHQNV+m8oY3zy9S2Qyq7Wqn4sp2esTTj8ibAQHJNy/85nDwDZ+4tAnPs5U13vaXb8tyeJYBC5hMoM1xvyEv1wo4G4YbvnLcaUZADjQX4EgjiLNWmJzq6yCO91F60xMAF+8x0AKSPW1A3oCpH4hz7Hp7AkFNgw0Q/K+C6kwVKMgDfvIL1I15tW6BXgn8L5kJNhNdO67riTPGBX/LUPFTpskpMpxb2rFs0+WWbiqrzys3vYEy6HTeoAJcmu4AIIGcAQRwth+vbNlC3MisfAPOpUB0Lj2zLf1BbaZ/1DbwW/r29BX5mAd/ybmXxNtrfnG/wIQ8NqJX15ll7HpA0B5osv/z3/5z+Lfv/FsgHDx7g6U62OomW6W+5MNbHPCvcpi+YbqGurfsnXW7+8197KRcrp183MkpL+80vbTjP/3DD4iy6uFtH3vaEpzz4h1v4aw2pmynfu7U4fA7/sWq5zkLnC2woS3Q/UfzzcrjDa3PuvKbwXldVtnEz3VDajW7gfXxvJ0tsKlYQFt97PMfm1crA0pvizvweo+Nem/84G/ndvhbnveW3IJ+yNMfEhAIsE0Abw1onkJ0wYsQMKttALtgbQmYBVUNiInoi9xlhOXZ5Y682ysMnP0HVdGpYimDeb0XnHnBcMFXL8jLUXrpOUIA9hKczWUu6JQfiAtsklX/eGUb/AJ9dX0tXStvoQFzcTT5jnqlZUEo+e0xTYzRfsljTNJlsG1PcqkSfTpN8ipI7rI7JoN80GyutnIE0Ct/awcD58CsqRoFze0NXaM8oz7yaX1jXuBM99rmeFIWc5Z5tA1UQK1pEqZL9OodPT2GPBAvfpetBy1kg3GwbNUJ+lpV45xTzhluse8tFncMSm8Q7E6CMm5/y+0D0OQqG8jWBtubznb0Jfs7lyyWgOMF1w7Yigf+tls9YbT66m2e99cPHA78uQMXrw2v1UOude1rpc3Q+043e/LqZTRLmi2wkSwwZZvmmj7XxxtJtTWyncF5DXNsXgfdoKZab0qNa6rXvH/1tcDHz31hbncDHfNUX/TYF+XhOqDhbXof/PsPDnc5/C5Z0gt4TAEvYFeg51yDce9nW+cFINPXl5YeYQ2o+r/OkEvruNiniueAV6CshIBisr590bezygLoNe/2ujesh7aqfFNwVhYfACZee2t5Osmhc3ugxVnqT6dJuQKUi68FYI4FadgEcDzjdBRNnkLLm9or5yf2XJimC7uYA6280a10CtTW1jH9QWHLk4+3P/LMgkkvIAk4m65RXtvOq/NYQ2bJS5FqGzsZ8FQQp2EXtHpTIY+zc2wLiHvOMZB1LqGKwAbSx9tdA5aeKqP+2/4AmUzw/Il3fmK4/5PuP/zsdX82y+oZ1Hngb+9D9g6ckyMdmcpnSgeZmdpR6gsGUeeffn7WlLYUniD+AT/za9lf7a9nPe+e0c+KH8qvLv71jf86PPXX/2G1s5rlzRbY4BZopmmOWXm8wRW6nAxncL4cw2xOp7txTXXuhjc9N+/PFtgYFvjwmc/PuruAwwNVZx5/ZtakBc3vfdV7h3v+2j3zCmQPaYG9/AOO+T9uR5BsaEs8ANhTDYCMqM5N4jrXgdzpcZ9fuQVJi6g1raD+BZzLoymAPPAEIL36GdQBYeDcb7ILXJmmUR5nZfSPDPBHdqY+FFzxggIz14A02a3/slwyxZgVP/8KYnPK8Qi0ppX0NAo2Tr488yVPfi0rdpnaaLRN7CKbkhlo9ZBc6cWzHP1GvaV3nMFDtFi8cc+8XnNsLcvmwT0vFQHQ9CMvutc+T37DbcpZOoLTBtxR5EKP0uHfvr2AZitVNIiTJU2nU16e10yjYb/6Z59MUJ3zbDC2CfqoO/OmwbNXbN/p/ndKHXorpBU1rJxhXjP7dZA+9qxz8m559OENt+1ysLeys8nddn5ai1jV7Yv//mF5NfgJnzxhePpv/uOqyl4NYS/424fMML8ahrwayZhyTPNLn+vjTcUcMzhvKjWxCnp0I5uK2tQa3FS3ef/qYYG/eNnhw51+6U7xkB18oycO5mryloG6Y488djjsIYct3rRXXsoAyQh3rLM8LiDK/wkUuzaFQvDiuOM1EMbKIyROLb68vuJa5JItr/oPgnhzASp4Ak4ADDBdfO7FuZ3P2wi44k3mlaw4Da4tI+Up/fzz4hAeVXAqgD9AOi2PdNEhMRZ6KCOQns4JzjQFU0BKX6DYeTdAryxPH0fsWHZyA7oF8WRHZg0WnCdPueLdrfgZBFQ8wT6w9cY6nlzz1Q0igLPQ8KyceYFKbckDvL1k3Bp6jtAJ0E2t4AUG5pnaMgIwneTbEB/vPTCvNGQ59vEKcPPPG2jpI29Bet7ib379m8NJnzopXlzz1b3kxNQQ4Bw9x9VPYtuqe202Msa6cV7oQUbXOV2cu9W1H5PrV6ev57zgfsP+d9l/uP/tnnt1KvZc1qtogeaXZpY+JrbPXcUsVi35DM6rZspNR9C0wbVWm1rDa73m7dXLAk9/1s8Pt7n7bbKOLo/fLvvvspjqMK5PG5gdoSTguPhaAmSDyhQqGwRzbcExy/jiCZG72A0M925vl9c7PnAeIVccoGyqQoORc7y8oO7Cry1e92zFh0BbgXPDWoNwA690oAq0AWc686Z2/C7XtCxAWYj3tgCRLDJ4b7O1X58GRmAnfQPpVKbza8heiI5M00bkAfiAs9ADBbLJddyA3fqIawDx1RO/mvnBmbJRAyP5i9PxzI8GrAHSktVbMpf1VPoJysYzDMqtuex67FR6JL7xQ9mCbPEaXNmhPfSg2dzrnhMdz7S8KtDDUnrmUNPdsnKHP/XwXFMe9Sqfnh7SusZ+4L7qAES3LAMW7YOO4pDfdjzgZ9fPtI1kvgl+Wf3DnaVH/OJLN0HtZpU2VQs0tzSrrDzelPSewXlTqo1V1qUb3lRsN8rpuXl/tsCGssCr3/m4eJe9nc1KBfvcfp/hBlvfIOshg5NAXSnTcBegLWjq8w2B9O1zvZ1em163n9BQ3JTm5HhuEWHxvcwTOI+gGVgd59Amv0rHA/3v3/334eJzLo5HdqubbhXQAsFAM2lHGSSTAagAFnAGaPHklje3y96yA8vKXf+SDoBWWnlmykMBouOWGbAbp2kAvs674a6PlTd5jAWO/Mqs5YDRhnFpgT95yhQd67jBNPqUXtLwDp93+nkZDFgZxR0F8cnNp+IAyc47+qrvkgewk3/Jan3JFt/gCtwGvksWMO2pGSlT1Y94BjHAXFwrfcR+VUa6835rYx4utC+dNKbKGLzw/nvY88Nv/fDwoKc+KK/tln/aQcnwcKDysEHrTx/X3YkgQ700oItXpc451/e5xqNGa2/5m//9V/cd7vbgu2UK0/0Oes6WX+C5hKtigWaV5pM+JrzPrUpGqyRkBudVMuSmLGbaCFvPTbExtm7zdsu1wEte+/CsjOCW+G633i3wB2p4BgNNE7ALpIzwGOBbfC3hZXmuzVVxhYabxdHax4tIi6sNR9NznS7gWJCVfGrbHk7b/CsPLegyz1m4/o2uH2AGw6YikL2Gx7ni8KQCO/BMz+k83C5/hPVXeVZBWj4NzgXNwA9skgEOO79A8wjrrsUWo02nxy2ejimLMvmUTOBsm/KTVZ+eVhF5vL3+VfyGZ+DKO2zO7d633zsPywFioaFYXuZ8C/Rs+bZkGRC0jq2Hh/nMR1ZeAw0Am2kt9XbCgHyVUZBenMBzDUpspQW04nmboTf8Ndyyf88LF4d93Tn44Os+ODz+BY/PNUBMjkGQB0Ab2KvoKTeoV/emdlhfebudt8ugUH7RqWxILhn7X+vRObelf73y7b+WO0qmMX38HR8fnvW7797SizyXbxUs0IzSXLLyeBWyWFURMzivqjk3bWHdGKdadkOdnpv3ZwusTwv85St/JTBhygZosrqGebEBrRHyOv+AmoMFH0Gv5X5Oj+CU/QI8UJMok/OudVjK6xO2ETlm0OedG8Ez8FuRAosFQwHMERrBmkGAc6AMXAFnYBjRpROIiKezoDGAVwBoS24eXpu8XnylfoFOwDz5AEzp6c1+geWxvL2/hpxpWcSr/2sEINzlqi35gVrlLx2jZ+XT9ZO0YxpgGA91DQg8zPeFD39huPleN1+skFLTF9RHZJfcyFTmkhX9Sg8yyVceZUwY9SWXfQGsW/9A1/QLq2Asp6WA84rf6eONH6d5mMIRr3KBtOvmSff0DTp546GtYCt/6QG6NmnpOquGCO4m9MOfZP3gBz8YTjnulLwY5dKLLx0uvejS4fb3uX0GgAHntnGJV4a9/nvx4p8I20K/HvmwvYcH/NYDssQkG3zpY18anvDg12yhpZ2LtVoWaC6Zssi6zq1WfqshZwbn1bDiZiijG2arPm20fW7ezhZYXxZ4zbuekHVzD7n3IXmIS0fbMBU4lvEIH1MIXCvOqGAAr0APAAEzacVtMFpXOaZyc73z64ypMILjMu4EGIFWz8N1e95LKXJLvzyUyb++eCN77ivvMuBr8fRsmCQ/eYx604fuDXQAtecgp0ylR5e588p2IidlwoWjzGkeueZLeRZfgcvkWcfLtbPJY4PJttO2bj21w8OBXo3t2Cuora5BNN1TVoOJqS4TiM6gZJxj3XkFUAuW2Rg4m4Zh2gSoNRDRZvphSPmA3gww2Krglh5sLz3vM4juedLqgl5dtpS70gB5+5//0OezagVvsXTqFTx74NB13mZrQdOFDh4y9BKUXfbbJeXugVNMXHJNZdlcHxT89DdflkEHO3i49/KClT72ucM+GTiqQ2U+++Szh8ce/reXl2Q+P1tgaBZpBll5vCmaaAbnTbFWNqBO3Ug7y268fTxvZwusLwu84QO/Nex1u70WHtOGs+pwBR3vYgdroa3JucWV5be44MUWOAEqUGQLqISGw8XRmt/LvKanx+ynANmXG7LiDS1vJjgTeELzuunKVxzgEDAuvXglxQNfYA1Yge2+HgHrKCMADDSXPGUTyJ7qvNwf07NXypvYiy9xEk+c0Z65ApxL3vQzLXODpbjL9CVLkIZO7GC6g3nbF371wkxfsSoFuJWVkCkpVebIG/NvnToPZW2ZzonG+8tuoJwHmBywynNsego7ttdaPrFX6QSiyZeWfj6Z8lHyckeA19tSgRVHHci368T+BWddMJzxpTOGG213o8Snh1ddm+4hngcWwTXvursl6lpet7z1LbP0orsP07bn2t7DI6m42YWjTvvz3EFgq72vse4yvPbI38jLgNjWXRc2NHBRZ95seNSbjxpe8fLPbnZlnxVevxaY8kezR5/r4/WrwZWTfo03zK/cvnKW28JSPXzF67zfOL/Oewur4U2vOG+tZel0rrvfZvdA0kqQorFzC84aCWxlMep0e6vFDTgXUAEowbWV0KxTT7BZITb5La4uvut64BbICWPS3NKvW/28mfKUB5jLPNgCq85DWnmANt7neKYrXjym5kGXzj7iJ++KO9Uh0DwCZWRW/i2bOp1+aSNqLos37lxBvLYNmS23YX5d+kzPid/gDAx5Z03XyLzgGhTkQT4P89UnU1JGDzu9k5ey+jcOJHJ+HCiQK6+2MzDl5RXXfGP26ykxAecRgls2kJeH6R32M8ipKR8AOFBeEZPvqBOIVjc+bP6N874xnPKZU4ab7HCTtCFvCPS6b3mLY9WNk449KddutsvNkgYkmue8/W7bZ03rzodOysNrfuvNcIWNl77+kcOdH3jnDBy041td+9GKNDyipmbc6q63GrbdeduUN973se2zod+2utE2DTSskf34B706aeev2QIs0NzRvLHyeFO10gzOm2rNbCS9uuF29t2g+3jezhZYLQv8ba2wwQPLm+gWrwBwePQCW6DzRwQQA1ACdBUXoJgOAZrAM1k+YLIhsUUG3pYHtQM6J6FlRr486l/LpWegDDSPkA6c5S1dPpVGvgDCVIGeC93rCwf4xzjkdrqpHp1fdFWGipdgAzwrr2wXXxPta3cZ9TIPdcoyyYBNIrOc2bbLsnY9UPn3AABAAElEQVQZyJ+GznM8x97KZx4ym4BDgU1439UjwFVWQA6ocl39KMtYjsDvmJfzZJJtH5T3dA1wCtB8TJ/gdZZWHbf9up7JUEemuNiqA7I63y4rD/v/uFatVV3ebB+wnmXwCpbt0x/4XXT2RcOBP39g8iHbIOGjb/vosO8d902901F5eaFBtnTTIP99f4wVNt53wnPyu6ALXdnykJv8RkR9+Ky/zKCh72xo/wYq2tbP7fJ70+xWdV++BgTqZO+ar/2sP7/XsOMeOw7b7bLdcm6/a12/6tVvgT3UvfMepPzFvZ65qnrNwjZfC0xZozmjz/Xxplq6a7zhzSeNf143VRVnvTaGBR5+xF5rZfvGt5y81rn5xGyBK2uB17z7ibmtfeaXz0wHu9chewWQeBO9UGQKHqB1GsARaNApCw1lOm/AIXqDV8N1IobV8pVDXwGpPvLXcM2sAkrxHItb/3gtQWKgBvwtWHAxv7l0kj5e24Ji0APaQJWHBM3RveY163Z2QWXD3hTkokalT3lrK/0yH2rLr4N4AFeG9X+doaOP1xsuxVWW/Adn9elrAcqVwNzCO8/xOhsLbOJ16rZ0UX71F8961RGZ0zoCUspmmxK4boAzCa6R78Up7AfOfdjTKiz9CTiPU3WUQSAb9ALltIM6tgIKOQ3lyh5dK99+qFO9CKYXgFHzegGgNOrPvG3zt3nQrZBywdcuyLxugyF5+biDYqqKuG1TMrWZPf/rYXavMHzo9Odn+Tzttuu+5cj32j9TLw+qrUEK3bQ/Dyt+7l8/Nzz6vi+7QtlX9uLz/vqXh/s+/r7D6/74dSnfTW9+06VXnW7qlo18HKfu602g6t+xdmHgcIdtnrRU4ZgL/zoy9r7Glv/g5LLQ887SAs0YzRV9LEKfW0bexHYKnE/uP62bmGqzOpuCBR5+xJ5rqfHGt5yy1rn5xGyBn9QCr3vfbwYwzCflQTzn1HOGm+9986zvnFviP73wJq8lt/5i8TQCC+ADvEAXUAJJOu105HXM2wja4gmueA2LZGZ/sbPMYgmm/VdxwWELT2ylBz+BvhGYG2zqUoCmvcgNn0BCGvAH+AL7pTt96ZjsRxgGGEK2Y7lafvQqnZb6jdfl2+mSeF1fyrIQnbgdv23Rsltn2ysK0i9lAO6KD1TdxreutaXr2LvrqAcI0jQMZ1tTKNhH/mTw/A71vz3IdFCf4NV6yDyrDc/SbLX1VsslAKd5KKt0AbmC2+UgZ4TnfnFM6rHOkaXNuGPgAU/nAbb8BPWmfN84/xvDCUefMNz/SfdPfHq65sHA8888PyB7yfmXZPrCLfa5xRIa2/axUU05uaKl6f7kL+6ddZCtOd0Dvqm926b0Yu/cmTFoqH/m0V/Rw3vSXJngd3ruqedmoLLn7fbMA5LyZdcefChbjisD19jRll2dN//9jC+ekZU29jx4z/zO/caV4U43e/Lwq4/Yd9hp752GG2974+Hc084dLvjqBVm/93/96n6DNzqyARu/+tVfvDJFmNNsghZotmieWHm8Caq8VGkG56Up5p0rskA36mmcbvDTc/P+bIGfxALv+eIfp1MER7x1OlmrE/Am9m34lfLAKagAS0IDH1ACPTpsHS0girextp0mCQCoHh9QjmGt40qT0HHAWMlOsrpG9nRffiABTPVDZ9LLX5mAM4Cihzj2pzIcg5/8L1kNS+L0p/NbI64DYdwsDi7/m9xlXEUY5YP4vpbt5YvIlY4b2C2RygmOACdZvP68og3BLVN+7Ci+Tw8sgJd6VK/Luo2qi8FKe3PJ9/nupd/NS3M8rGfKRsAZ8KsvmxowsbuPfOTrWmC62olBlv22OZjWBniO6U9HXlLxpFOei869aDj986cPd3rAnQLs9BXIB9Smd5z62VOHX/ndX8la5ctBFIUWUeP13vu/H5l06/r6h488ddj2FttmPnGDs3jsp4x9LqBa+jnHzq6by82re+vr/Pq6RP/E5/7spQ/IgGGnvXbKw5L73Xm/TMvw2xTUCT1iq7IzHdiJ1z6ry9QAipffNBdz1K3x7a2S0t9wmxvGy3/eGecNp33utMjr363ByMmfOnm42S1vlr8FBkhsqQ7VyVknnDX82bP+JWmu6OujX/urtJG2ma0B3dVhacArssumcK15ohmij+nW5zYFPS9PhxmcL88y8/l1WmDawDvC5tDQW9d5u2lZ4E+ef+/hoP950PClj35p2P8u+6dzBC5WMAgM6YxregN4EUCCqQ6gOjBS57tTDTgXxOT28AhRwEfnLsQTPcK248BUdnwtwuUB9DSu/RyX2N4PNPB0l3z7C1ZawDu9wIBy9XWw4bxzZCRNqWCb/RoTZCuP0r/1WsZNFiONUX2yuyjJur8js+OO+id52SvX1p1srbP07gGLulEeQJupGhU7Hv6SGXAewS5CxjwDzeB19AhnW17lnjIBcthKyJ0E3ukRdsU1X1Z9Zr54TYloQBefLj7x8hfcKRd9Umd1PStgjNM/rrPVdcp0C0946qniK4eyBQqrPZlyoU3xopq+wcMsrnn5ZMuLtxcYHvfe44bfedXvJH7A2UOL/mkTFZTBQ3J33empOV759bZ6YBaoeqHKv1/670sb06ftoRw8tZ4N2Obm26T9k08ng5fbXO9xw+e++8p4yO+++zNWZvEjj1/wtw/J78sbMW+07Y0yIPDbpBO4NY0KpMsvdVL1UUUMNLOzOP2QLNuQc/oXTo/tQLMVR3p1EnVx0TkXxaYAe7tdt8t62X7z+V1X2/e3INBbbUJ+6tWDoqd8+pTh2U9/7zrL8+YP/c6w8z6LN1jmd1/t1Utw2En97n/NR68z3Xxyw1igOaLZYeXxhtHiyucyg/OVt93VOmU39KkR+kcwPTfvzxb4cS3wf4/+/YCQjg2oNBC7pSuANZ1foKr2BR0hoNBBAyrbeCDH6wCtPY7i8YjFU4hjCuJ8AqWECc5lkwuLA8cFK+Q0ADWE2fo4DxoClLWfUBvQ1HAR8KlzgAxg8tiBjE4PPhpGyfNJPlSpPJZhFO+Y/GWY7C7Prdhp/Z1umcmrwOzHDfKc6indD/978SpxMJXrBYy80crsE5t3BlUU9dTw3LbougDBbKn+I7visplj6cQHjWwNivpBudap6xyUmW5hkMXO6l7a9oKSwfOZKRoe7CwwYxPtD5x5cFUdfeaDnxl22nOnABw5yqOcZPVdEQ8tguZLzrtkePRzH31ZmxrrLnXPTqW/l6oceN3HtjWWW9C88347ZwqKsnipStdXD5zkS6dvXPCNvCb8jve/Y+I4T2+/gbe/8O3DE174hOGW//GQpeze+ey3X5EpJXfZ8Sl9ao3ts553z8EzB9aj1rQuPvfi4daH3XrIa9TrN9lz89vGyiOoB/ZTR4Fcv4WykyAub7zfsTo78dgTUxfe1Ghq1s12vVl+sycec+Kinqu+1FW3gbTP8TdFJhkGCAZPoPtxh6+5UofpLgff8+Bh6+23XtR76ZZ6GtsT+2kbt93qCdFv/tqwFmh2mPLCus5tWK1+stxmcP7J7DXHXmGBbvDT09MfxPT8vD9b4IosYP3OB//eg3Mrl/eJlxZMub2rE512pi2nO9KGUx05z2AAtuBjZQffnfoSSEYgbYhsuSsB2vV4jfstf6BWpBGMyAMNDW/kTPOgl+P2Yrqlbr5upgYAy7rWabNfx83E0S1ZFaA7Wf+nIeecWHF+Gqf3WyfH5C7z6gjr2q6QGzAu20ZW1Fl4XU2LUD7/Wk/1A4SVbRlGm7GJ+A2hVqlo/QAq2FGP9LSNfcsuwLfnH5MfaDMfHqxXPPLUO3l5gK6Ajp3Jc01a18xZvs71rxNPqjn29hue2bKB77Mf/Gw8uzyuINur1eWrTOrwqyd+Nfme9KmTAprnnXbecjrCuV85d3jmW56ZoisHG2kz5jm//RO/Nzzozs/PtX/46FOzlN1NbnaTeLjZxctk6JNQ+tiXZ8CvPM7m/B5w6AEpD483aKfD/Z54v5R3XetGv/79T8ocbO3PwOHndv39iH/de38zx7zhPOwg3Dxj61KD5hve9IaLQUrVi3KwTeuV3+UIzfRTTz6xkXZcwTHbW+db3vIw8LjpjjcNjKuTs085e9ke1QWvs/aQvMbmwy7kkqFdqd83/dmbMg+aPOcP/LkDh13232Wx2kfpy249bYgu5Im3ub6QRhk259DM0Jyw8nhzKNsMzptDLW0GOnbjn6raP4zpuXl/tsAVWeCo0/88cPDDH/xwOOPLZ8RjdOPtbhxwAc+Bh7EzBjc6z0BO3Tq2soCOFdzoXEEUONNR+jQotndsqodrgWBf0zBCnutgQtCZkx0Y1KFXnJyrPPs6nRoe7dOrRUsHWtxud9uarMSv8+TkM+4vdWq15LeOkLycv5zr0yTkp7yiVz4dljL6hO1ll3O240hHTvSuL3YOFIpF10m6QNRYxlwuWyZ/7CVexVd3BktWrhDUs9v6PWACPm2n//p+we9//GfqlOx4Jkc41x6iS0EUryKgVPc8yw1yoIkH9PzTC0yrTe1x2z3ycGq8qfSsIA3QA4+mGagnc+9Bs5eitCxxzc9/3sOfNxxw2AGZgmBObsOaspEFcj915KeGhz/r4Yljuse9bvXHkg+g1eAQRPLKBjJrSoK5v9qy9upcALXuuJBpoAjaz/vKefFS88Aq05633TN5W2f6oBs8PvKnX4959K2Gx7/w8ZFlIAhYfQxG3v937x923HPHAK0pKfTe5/b7xCNs0CH/aXthO3XiPP263ea35/dXH+fyW6mtOjv6n48O1LKPlUdcV09WZKGHcpnmAZzdOUp+Vd5uJx4S/eYF34yO0qpLvyW24IE24LKqiTr42RvUyjyjbnTRNnqwq25N95i9ztPWsf73mxOmbLCuc+tfk6uWwwzOV81+c+p1WKB/CH1p+iPpc/N2tsC6LHD0BS9ZLPNVwGKpLy+i8JCQFy2kowanOtIKOlUdNijVyQMBQAY2uiMXz7X+OHbNlI9AtxNjaJhs+d3JOu/D0xevc8FCYKDy762OPeBQnbmw3HfeP6BZH4EsnkgDgmnoNIk3ysv1Ki9dyEkYN9O09n/U9Y4vHzrYrhGW4ifnJ7vidh4pj/R9vXRcgvMi4hqi2ZqtlEN9df6O87/0AUHAGQDyHvISA8kMLkAYG1aeqeuqCzJ81KcP+QBJAEa5nX/BtwJVmaNc7YIMaUD6aZ8/bfDAGyCeTu+RXh50AKA8yOrdXFtxzftN+6rC0x+0HfOuYxJ370P2DrQBfeVKnVae0puzDBCBuHLRBRT3w3LXvcFiiT36sSUPckMz/UHhdJBAT+U4/Yun5w2cWdu64oFLOh92Oes6v++E/51lIA0UDAYBK/0+9Z5PZf4y+wNO8vz2+uG82FaZ6A1IwfQI9WyvLXSbSn1XXUjT6Uxn+cBrPxAv9h632SPTe3jVefQ9cOmV6sqtLtS9bX6jJTf14YVDBfj5LddATR37TdK/wVkdWVua7rzaVnjpQVe3DXZjY/mZLvLzuz3dqTlsAAs0GzQTrDzeACqsShYzOK+KGWch67JA/yj6Wv9Y+njezhZYlwXcwva6499/4v/NZSsNmAvJCyXo9ITAUnXMOlKeqv9XHatr3WHq4HXm6WjLI23baXX4SxCPtPpaiE2cjuecfR8gpfNvGAioFTCABZ1zIKn2Aw8NEb0t8X1dRx/gLNm8ox2m6Tqua+CsgbXjZluyV4ZlvHVcm8ZNXtMT9sc0SxmTcx11eQ0njWV1jo5ApjaXhdahzqmLBihl7rJ2fOnVH9uwMXgTj415iy3PBtY6nboAWSCc7QFS3v5nO8I1gPz2xd+OPPWXtOqz/mkLZ5989nCXw++ysG3p6hwdk77yBmW8mbzWHnATLI3m5SagMQOAyt/1r3zuK5k2sPftR3AuOWBU+ZRdXJCqTGRancN1byc86O4HZT41ONYm6dGDEGl5Z00/ApMB5xokON/l5LEFwdfb6no5b/ABCH/hcl424uE/85ZBpt9QDwiPefcxyQtQglaDBCt85EE93tqyId0AvHPqowE6tmWgsc7Nb1cfDdDS8ggbMO6w+w7J52snfS3eZkv4edBSvuqGHdYAZ3IrsIk2Rgcf9gTMPvbVnUGIQYkHGDPdx4Cqzne9LttvyVMXBlcffsuHh9965BuSx/y1/iww5YFmgT7Xx+sv99WVPIPz6tpzlrYOC/SPoy9tbj+S1nvebhwLvO3jT4sXyW3yhBFG4vUrSMr85tH75PrPXncBGFO40NGKp7MEMzpSt4QBULxlC8mBA7s6+oCRyP7XsY6b1zBAN3bG0joGNp1fIKIAwna5Dy5LBrDwAJcAAuTh3Mq403NgQph2+jmxOLnc7Z1lvBFi+vzKbXSbnhzjL9O7tg4ZuU7nEd76mB3yquvRZhEtnn8Vl80F5W6btw5tX9dAMbh0K53N1TMoBNC9MoL6BHzyJF89grTUZ+VDnjh9K19ctpYvYHUMqr/40S8ORzzjiIUHedQxOpXJgbeHBIG8+dAGc3sctMewwx47BBhTlmpTl1546XDyp0+OfADY0y3S5qo8pV6Cssn3RY99UaZXeFW3t++BbqtjAFntSLxuY6BZMNCzEkUejjU4GG2vnKCRF/uAux0QHdjtwrMvzO/g0Fv8btJ/+lsvGw6+4ROz/9tPveNwz8fcM5787W+5fTzbr3jqK/IGRHUExN218QZE87rTNis/7ZZuoBTc0gk4Z9CokGM5ZdJp8oAo73sFDzt+9aSvJp/P/+vnk4ffpPrwwhlTNwwMQLnfcO4CjG1GHbOJ9pABVk37ULemybCz82x33RsuVu2QPgPrylud+6ReJzpK5/OFD39hePChfxUd56/1Z4HmgO7/Vx6vv5xXX/I16tWGi7/Kqy97ljhbYA0LPOyINV+m8qb5RSpr2Gc+WLcF3vivTw5Y8IAl1F8sHbwPgABrOk5QIrRnLp1lXQdMgeaxowzAVjxQklvNBV065g4B1coj8FWddYMzaADO//G9/wikyZtsHTQvN3BroCEu8EByxYv4kqnzFwIhBQ3SN2RkW7C9TGu/w7g71bMvRfby4LKdZdyJmL4aiOiD6XZd+VxO+qXeUXi0V9u4yjXVS9wepASUyrMMungFxQMwDbts4pY8D6SH+HihxTW3OIOnii9ulpsr6FJO8CZO30Vwzkof7MwjCQa1gQY558m2XvC2O287bHvzbeNJ7nalLuUBSqX1EBog/fQHPj3c/zfuv1zJA7zxjpv2IX9vv6SjOtde5EOmQNa7X/7u2EF+N97+xim36UgXn3fxchqIdq4NS9/Ar1xsBVjbjmlXJVecb3z9GykD2Gc3ZQWS551+Xub8yn+PHzzUJuHtn/z9YfeDdk+56Hjk3x6Z/F206gev+i32vUWmPMhHnZiTr/2682P6DH3o5Xq3p2V7HtsxeX5nbUu2MkXp+E8cHwB3nRz1rwz9sKDfZQa29dsiW75tD3Zkd/XvnN+fNgT2r3O96+S3mN91nU99jtAce01+19qVvxu3ue7aK5zQaw6rZ4Fp39/9fp/r49XLbf1LmsF5/dt4zmGFBfoHMz29Of54pvrP++vPAn/7T48L3PCONcTGmwSeq1PU6epMAZlOFnToOKcdujhTeG5tvb54rfnOI5h1XlOoA1vfueQ76ZBBgg49HXbBRCCi8qdDOumSQ0YAv86BBx/yAkSOS4a47RFrnROvZIOxtQK5FcDhOsM6TotrQNCA09u10l+e7BUyU8axnNGjrnfZwIxydZCvF5L0UmY8ymcdf9aw6wG7LuxT0Ns2jo0qrfTqlLeXvb///e8HuPJA2VgWNgTFrQv5AaaC3jZN5BSEB/rK7uQ3HDtnDrM5tuSaG7vbrXcLhPdrrekK0noL/HifLRtnWgAdTNUwBUEd7n/X/XNevcc7WmBGP/mKe9Qbjxq2uUV5lwuueVTZytQK0Gjf3NwbbH2D6HLDrRce9thmbCcN4t2mXGMnHnowywPL3uYl00c8QTvd678e3lUyfPrSlyeOFUHo986/fmemUEjD5pajMz0qoF4ASvc8MFiDVPlkPrU5yOMdm7T3KrN8lD3HY27K7tyZx5857HqrXSPz2Yc/O/X5wCc/cPFbLR3M11aHgB9sGwzRX3rlZp/8zguYDazUiXo2yGFL6WwbmvtvhPIJdNIe6Ai6L73k0uHO2z151HLerE8LdJ/f/Xwfy7PPrc/8V1v2DM6rbdFZ3o9tgemPpxNtjj+i1n3erj8L/NNn/zDzS+UAFtKxAmcda3WGuW1bnSmPYs9vbAjVcUoTcF4x11nnzLPVnsppCbrD785Wx/3+17w/3sE7P+DOyVcct9jBBNiSV7850DXHy86/0pPlQ1b2xRnBWYcOtnjQnQMCvSTXVK+19keoDcCuvOhaQUtC7TfkOl4CxQgWi0hX8D3m02lT1tKTfLAWebXPzqBG+XhAbcVxG125XLvo7IsCOGwHzmJrgkdZ0jQYAzZwCThz10EcsstWgFFawAeaeC/BXJeNPLY2l1V8xw2U0skDCJ9wzAnJY7cDd8uDdgZD2kTSV3m0H+l5KXmgAap518Ad8FlX2WofB9/r4LzAI6DHDupT2xs93Me977i8mCPz8AsO1blrPNpZWaIgXXvkBfcQIjhns0w9qHLROdNhaiuQTwf2AYp0M70C2CqnsrOHePte41FJ88l6ALdXGclc67LBu17+rqx2wR48ylYa4f3V/vzWkkd53OkJ+pWdjbTtbr9spTw+2Vfuyp9Ofp/0Uy6DQfn844v/cbj7w+++hGQw+9l/+exwu3veLm2jfzddZmmAc0Mz2XSLJ77qXjsiewnMZTe6dch+6WhtauX5+XEpvr4+b9efBbqv7/595fH6y3n9SL7GG98yT9VYP6adpf64FnjYQ9acwiHdm/7hlB83+RzvamCBd3zqDzIfFDA1OHcHqfjACNTobHX88W4WOOgsQVQ69LEjBx7p3Ee78VD56IQTn0cTmGBC2xEITjz6xOW8TF5KHjnr24I/gLMEiRFEpQ28AKh68K2XywN7zkf/Ancg6QMM2oPXXr01IHDUdwq/46k1NyMriBf962pAss5P0y7hss+TMqZdU+B4VHZIEH9SRvYRGqLkCQbBiQ/AYV8vFJFOWb/3b99bTKlhc7LGfNvetuJJb21hYO5BNbf5tQFwDqJ4gOUv736grD3/XXbebrDFvs51u0ndlJ6unXPqOQEqQH+H+90hgA5gtYduO/KNnrXRBpXJsRefqFNwaEDF2yuN/Bq2wfXr/vh1ka3dWEGjpyiwj7ZrKohXiSuPwQKvLy84IMyAoMCTPsog/x48aPvma3/+qM8Ptzr0VpnWJD4bA3xtk332/alHDZ88/yWLecD1GyFLGp53y8Rtu8u2kbPnwXsOBhDSKaO06iH6FaBrm+AZ/LNhfjNVh6mXKksPUN0BYgfnAW28yQXb9p1nrxf+2guH3375bycPZfI6bfO+1SU7KKsPGzU425LN09zQbKve1a3zdBa0m/wvHegpTw8p3m2c+51I89d6tUD3792n97FM+9x6VWA9CJ/BeT0YdRZ55Sww/UG1hM31h9X6z9vVtcDRF740t/0BZbzEOu7qtMEDANBhNsTqKPN0/wQ2dM7p0GurQwU+On8dL0jQ8UqnsxcvXkdFqLgnHH1CoEjHDdi+ftbXk3b73baPh6/XpU10cDPCU3sprQnMuwkEQDoA4R0H1coToCtdePNAJrhSHuWbwiX5K0OurzwZZlh4gqfpp3H7fIBa+hFgF7uTg4lsMNKQFLApW9EzZShoIdN55eaJNedWnZg3y7agOrYtGzTktnhyWyZYMg3iayd/bdjqJlvlVdQGJw1mPJmuC+rOgMl1YCaQI7QHnDzyl+BcOqeeqz6/eeE3sz7w18/4euZSW1Yug68xjnaTUCZRPp/AWWUBvF/+Oy8PbB/2kMMCu+K+5pmvGW5/n9vnoT5zmL/woS9kDrQXiqh7erKlgRwZ4NsSdF3+L33sS8PD/+jhiZd2WXmqn76uTSZ96WZ6xYsf9+K8tZA3OaBbkKvNHf/J41MHD/rdB6VM6gJcsr21pQHxVz77ldiQbCtuaNPx4NfgQTs2SDG/X1y6eACSfaLX+HsxSDDYEb/tYyuO/NR9e7DZx+9IG5GnwYG037q4lg+s9mLA4DcikKfNsBE9xOs0freB5orr7pF85Mcu+T+2AfUnHW+zQdKv3vtvInv+Wv8W6H69+/KVx+tfg9XPYQbn1bfpLPEqWqB/WFMx/aObnpv3t3wLfPTsFwRudMCg4ZrXLLi9VnX6tbIAIAJDgMOqAubMumXtFb4gKtBZnbW0PjpPnXB38KBJ0AnrbHOrd4RneeloxdUxv+PF78gb1ACtuDppL7HQEUu/+4G75yUW8nEsrQezeLds3fbeae+dohdgoQto4In0xrTIHKHZ2+PkM/Vgd/m7xpeg60TpulZYFC3XuvwdL2nHNJErcR2vcX5xYi2xfcJgg/eOfcAM72ygqOolQFny3HrvdZCtB2xgoZzqrEF0LXAu23U9GQgBPzZyi1+9SsuLyjZs65r8HPeHTEE9qGNgasoIucrbHskMkMa6UhfgmXdb0XlcrWJhMDAFMHK7LthAYANtgT5W6QDP2ouBArnnnnZu3mannQJSD91pnwHDyotebSuvkO52aRWPw59yePJTFnoIoHBZb4tTkWHdc8Fvw0CSbXxAtBes8Njz5Mo37b2mN5gaccm5l2S1C/be5Va7DPveYd8M3oCxetWWDXTAuTIkrWkR9VtJ/ZXBenDiGrtOPdHisDm9bZf1U3WjXqyoQb506vbD//Dh4a6H3zV3c9QF+6j3eJoLtu1HN4PdGnwoT/I0eJN/1bF0lV1+79JngKQMBf8GW17v/XtPeFvsNX+tPwt0Xz7tv9d1bv1psH4kz+C8fuw6S10FC/QPbCpq+gOcnp/3t0wLfOCk58YDCAZATLzC1fnqGHWgPtbj9RIIL0kBWtaJ5dHToaYDr44UXIHmaScsLSDR0YKbnfbcaQHP47xOQKRTl9cbn/PG4YG//cDh5ONOXtzGr05bOm8xA74eevKaXwGo0AkEgT3AwotsDrQyBOgAW3maedF4r3nBgBe4tHqE2/2gJfBf+QCDlbAUOFhXta8DmpdpqyxTQCajj3vbIlce93lp2BVQgWdeUnOPQczS61z6sq8yeQANuFl+DTSpB1AjdPkWR4tvkMP27HfCJ0/I1AOvflan36oXmpi/C5B4s60AQWYPenprYCOA5m4n6l5+yXO0Z+cF/gGVV0K7zuNpWgjZS9st1FscjzZIHlWeXgFEHgY9PKRWjmAPc5XNDTadYqtttspc/XicCzzZEXQCUtNEgDtPr7D/XfbP3GbtRRw2C0DXtW4PdKOv88DSkm/KLj8y6aUMvVJHp2M/UM3GJx17UuKqqzvc9w4B+wZT8vsOgXZNpt8VmWTYj0e46rqPbWPnKptrysgOsXs1HtcH1QNsR6hVXmWUh/Wd2cIDmEA/aUtGl7HrmJy+Jn3k1jYQLX61IzLzO6/92LHqxQDA2tEXnHXB8IQH/52kc1hPFug+vPvtlcfrKdv1LnYG5/Vu4jmDq2qB/rFN5fQPcXpu3t8yLfDxc1+0eFCuQEOnqEPkUQOtx77n2KytCzatuuEhqq9/9euZGsBTHHCuNDrt7qh1pl894at5u9qt7narGE0nCgLBb9+ilqZh4chXHTnc74n3y5q73soG5Mxtdh2s8ywecu9D4vEGwcL2u28/3HjbG8cTmk58BHVyAYdO33krdQAFANPARnYDSEDACyUKYkDrMuTwshM8ev4njNcalKSbgvAa++RWWALiKHIaJxEmcdiVTB5C81LZPmsMK1cNbLqePPBmUALg2Mw0AaHL0nn01jWgQ+5pXzgtq294WMw6x6ZmgJ6tt986Xm639YcweAFSARGZ5t2qPwMsOrIvCFfnS3Cm+yT0ACCgWfUJsOhp/nrakLbTgR0rn/6QnwHZCIjKLQB8HmyQRwY7qGMP7rn7AP6jY8mmXwZPp5yTeB4K1SZ23X/XzDtmG3nQS37yDjQWONpXNjJ4jHndDQK0Zd5VAzNlIXMJmWP9AnlrSANVcnh9D7nXIfGUA2f69cCDHG3cZwrC9uVPR/EBdQasBe9LvUtn+nZbbD3kqUyC/XzKfgDeQ5TaMnn056VXT+zZMC1dt0NxY/vaktntT52zDdmC3wgbKoe25A6Vl8U8/ldenevz1+pZYNpvd3/d5/p49XLbsJJmcN6w9p5zu4oW6B9ei9ncf4Bdjnl7+RZ460efFs8t4HRr99xTzx0edvcXrZHg0996eeZd6lyBF6AFUNLoXG2rJw9sgyzHbq/zFv/cQ38u6/kCV7foTakAGjptnTCv5tHvOnq4yy/fJXDiNcHyMZe08/IShXs8+h7D+WeeP3z3G9+NtwzM6+jJADUBuMqbtxGYNPyQBZxOPPbEABXYBgvRvfRs4FhuFWSEn6URAAN6qABY8r8gpIEk50d4WFy+TEZDxcrzU1n2O9DD3HHxlR948Kj3bXMezwBNJQC7pgKASJ5Xnk9g1WVRxtaXfIDIHgYyvPteKGI1DflddO5FkcFz3w/RgTaQCLR91Jl54+CKPsrGLt0uOt8ui61rwNAdgHhAq77Yv+fj0ndpm4ofe432V7dArGGNnvJT36ZtGCzQw8DBAIud2MDAKA8fln7y9mZCS9JpI3QwBUi8Wx5Q6Q0YlUM+9Q+Ik0kn57R19uXRpwd7AEMDAfb1OzCYmcJrbFxTO8744hnxhKszabyC3B0SAC4+XTKQGcHZPl3Ur9Vj5C8OHfMpvdRB6xvgrzgJpS+ZqRdtqHTzEbQBdaNMpq2855XvyfrTdNduxMuUoCp7pmGVvmWKpQ3spy6q/PJ03G2J/M4nfwtAfZUhn8pTu/HCmHvt9yxR57BKFui+uvvolcerlM1GETOD80Yx+5zpVbVA/whbTv84+3jeXn0s8OSn3Xl4zHMfE2ACDYDBvOKd99k5t62f/6jnD7V60PDwI/Ya7v+k++dWuY5UR3vkK48cDn3woelYAcCb/vRNw32fcN/EidetQIBMUL3fnfbLSgUAw0NVoEjHDlyADy+iuLyjvIoNHzx5dHIrvadsyJ/nrSECWJluYn1jKyNIK04ABWQAldp2h7+ydgGVAEDyHxisjJ9Li+uJW3E6kLvGufFSy+t4HScwVZ5NW3YLEPE4FmROwdm1eFtrSoA8DAoCcgCq4jfQkKs+DGrY00DlwJ8/MPOMDT7Yz6DJQ2lksqU8gdt/1lvkDG6AFSAUMpe3AI4tBLDZnnB6yEuQXhnIC+TVeWUInJZ+7R0Vr2W1rZK+4rcsx+pJXADsocYPvvaDg4fyxHGHhA6m4wBn+ThvWoX2CqADdlXvvNPs5E2Fzim7fNkLcGub8tBeTO849FcOlX3yVhZ3ZNpG0bfs0OVXTu3tvDPOywDF4IZc9vOGRoOV2MsdkdKv7a3di0dety3XxFmC89gGYstazcS246gvUJ02UmVKOyl5adslk33FpY+HaMX3EZwXMuCoumzbtXzNP4OOyo8+2pG24BzdlEfebDCtR3k7FvdW13508pi/rroFpv1z9819ro+vei4bT8IMzhvP9nPOq2CB/jG2qC3hR9llmbc/vgU+991XxZMLutyCBi0P/58vXqeAYy7863SiH3jtBwImQEbnyVv479/99wCH+Z/tLdThup3twS5QBEjA1luf/9aFl7k80OIAGp5KHrFMtRjBg0fLagSuWcZLJw5AyAAgPKg8p+ab/svr/yXrCJv6IJ7rAQsQXJ8OAYDxINCM60b4CNhM4Cbnxa3kAeHp/ihyrTh9fpFozGmxYYMGFmXwUR6DgEARuKJr6UQ3UAKeeRKlM8AAhQ1QshDAEXjjmf38hz4/3PYXbjtst/N2SQOAAj811x1ECm2DgFLpYNDi1rutEI9ueUUDnpXeHGYrWqi/BsBOa0pC6+GavJTHviDeMqyxm0IuLlU52Fdc+vZqFfa1HYEu5kAbAJi3ryzuNrj7oQ250+EckD7ry2cNd3/E3aMHmezOrgZVtu6W3GibG8WebEn/5F32Zsekobf/tWUv9QGATVOQJ0+1Y9fAtmlGvM7m5mdgU/qqF2WwBbvainKQOa37Hgi6lvYxQrN0AJg8ZVTuyKFw/RdfeXrA4U7DNjttk/ZBpvoC0/L1G7IeuMGU425/8jBABcyZugKa69NTTrS3DFIrDd1ST2UL9Stffzduc735DYKLhnzVv7tf7v64j0nuc1c9l40nYQbnjWf7OedVtMD0h0nslvDjXEXzbPGinvuSB+TBJsBjDdwruu165JeenU5fR64jBlg6Xh2u+ZXAy6t7v/CRL+S2dV/j/dNZ6+Szrf1XPu2VeWiQnNf/yeuH+zzuPoFmcAMSBGACxK2yAJjiTR6BBBT0tA1g54Eyb6HzoCKg1+GncwdAgk3BBnBosHaaPtm2br3t8yu2ZCT+eH65X+dBxeJib3IyB2CJPfoDXNiDjsoLwIDQZSIWc3DZFMgIIC+fit+2JBecGWQAwnNqvq+38GVQU2mAF+AxuBFPfilz61tbOsnHA3Li8DqbtiGNh9HoBaw9GAYM5c2e0i3lVV7y6YdRgVtDVmxf1+kqZFu7fdz6iKctGSgAUw840mXHPXdcDKyqbbnzIH/1b6BnzWUDCjLYVDm0G9OCrABxxDOOSD7aKl2B7xc/8sXhgEMPWLxYZqxHuqRuqgyA0Ed5nCebXDYGouyUAU3pIIBH89Hp6tXhIF77I4NOgd6quwbdzkddxjtctpp6iPu6c8uBSIF3wHmsv+hb+tFNfLp94p8+MRz8iwcv2lOlZaPWkxwDD0s10idzsNm64vgYMABo++L69G9FXSaMda6O+tic8ttv/ZuL4/n7Klug++Puh1ceX+UMNrKAa7xpfgHKRq6COfvVtMBD1/EylTfPL1NZTRNv0rJe+sZHDb/18Nf9SB3ff/Jz0/n26gagTkd6/unnByBAtU7c648DBdWBm4MKinTADX7dgYMYqwCQY31n81PjGSwgcA44m8JhvuZ1tlqs4QtowBXvG3lAicdPelMTeBN5rjuADEAIMsgGOMAGTAFV5xtEG16dE3qbeIuvxfnp/hj3cuMAjtIZ4LCVbSBqzJedlgBKn5Y9giRvoDI0eCXuCPjOgx7TFjy4CfD2vO2emUcewI22l+Wfsla+0gV+/7t0KvnSsZ3rPP/qVz70dt3cYVNirFgRr+UIz13H7Au6BNfBWduOt9Z/Qb75gNPa7zi9lZ96p4v5s+DTsYc/zY0nmzyrthjogVSQJwA/ccEqfQyoLFF4/ZvUA6M1MGEP3uK0r9rXVuXbeXd5vfjlh2WXhue8hKfKZmqL9sjT7QFC1cQ2//G9mitdctWPQV63MfmxD53llcHEaHt1nCkXNThxXt4N2sriWLmSbmwf2nraJ2e+tqGcY3npZa6/FXIMepTJtBKvx9Y2Wg+/JYNVga39dvxeleH7//79nL/WdQqa625O8rdSTnm6lVXQdtUz+0p/8A0ev7gwf19lC3Qf3P1uHxPc565yJhtZwAzOG7kC5uzXjwWmP9bOYUv50XZ55u2Vs8Az//Qew2FHHJYl36zCoFMHFTxuoJXH1zkdqo7bbeMbbnvDwK1j6+vy2Om4gdi2N982nmSrefSb7b788S8H3LwFLQ+eFRi4xXzTHW4aAMorlAskdNzAC6Dp/N3et7wbgNG5e8CQlzrwiNsqrjg8466BQwBNX7qJx8Oa+BN4XUJsmUy8hMn1tmSuret8EQcIo5MPOOr8WlZDXefVslyXBpwBlyVsj/qKF/gp6DW31YOBBhe32OcWAUpliaySw17Jt9I0yLJJe1FBWIMs8AJ7YAuYmYpgyo242+1aALv1DQN2VgNhM0DJC3vml8+MxzUPcdagJLZUiHBzvpIHG8ir8xOlg/Ior7oHfNqLcpsuYDCmzthEXRqEmMKjDuUhbWxcspVHHGsbWw5RWQRTOXjj5QFO20byALJCgzNZ2lZPZYi3uWCcXqmTiuuaOH4DABjI07OBFnz2FJtua/KQdyB4rFf1m7ZRdZbBZUGytp07EaWX+NkfdSaDDvI1cPKgqcGGtyvKT/npxeMM9H0yZapss+PuO6Z+03YqjnKRwxuv3n3czWlwX+paeXb9gGcDm1/c8w+oModVsED3vd3frjxehSw2uogC51PqpzqH2QJbpgUe+pA91irYm//h1LXOzSe2bAu86O8fls4SCABOnTqvY0OpDpe3GQhbc1in/epnvHp48iuevAS1P3/Enw9PfMkT8wCVzl2nC1J07CCkIQo4ABwQAIItQeacjlt+8rdOM3gCPMDBCg68j+COPIDA+wbe3Jq2pbMgL7f3gSyPN88gOGjPX0MUiGmIla7hc63zixOiLONM4+bCCHRABtwt88hFp6p8U2AHwwVs+RQIBqbqHLuwQ+C3yt6B3H5gzYN+wNbqDiAr6gHlgiXAk/zrOKE28lCvgbyC0NRDeVvZAwDKCxiapmHVE/Yy2Mk0jgJNwVQJwM6OgvpQRwYmKWspoYzTOlYmwbnYa7S3Y+UB+erWnGV1axUKXk/5swFQ1I7UrfbWc3CTD9klzx0JDwp+6r2fygOAXsriOs96VmqpcrvrAGYDzSWXjelMv+hQuoBJsugFMNlCmaVhU7bT7pSDrAwayhbSic/GludzXrulfzzHtV00n8sGMrlWsOw6WbZd3/Kjmzj5VJthH3bQ5v0GDVKzvGGl8xtTVnrQT53Qx0ORHmLkvScztqzpFn4b9tlkCc41+DEYyMCu6sA/dVRmzKDFIPSDr/vg8OzfOzL1OX9deQt0fzvtY9d17srnsGmknMF506iHWYv1bIH+8U6zmf64p+fn/S3TAv/8uT/K3FZvGTzoFw5Kp6xjBxffueQ7w3tf/d50xHvdbq94k4GV693R2reaBg+a2+e3u8ft0gGDEZ5Fy66d8ulT0qHrtIEJIAclOn4dOoAwXQNAm+ea2+G19rSO3XxNkAAQxHULX97AmUeSZxQUuQ4QxAUfU0BoQLFNAHPjfm+dX+6PsFfZLKJP0zk5nm8IXqYdzzsOhFj0QPRK39DIrsoOzEAh7x+I8lEO8cGd9OJa+5i3X5m8UIZ9Gpw7H3p0GgCW8yVDnuTk+rgFVK0/e5Lv4U8AyFvb0zBA9TfP/+Zwg5veIAMRXmn6WBkFKNIn9irQ6qkPdLAPvgTXE2e0C31AH3COd7OOp9AovYGRenfnYvl69SoTOcqhfk0P0qasdmEt40xPqDzpZz4y+e5kSK99NjTTmQw6yMscXm0xbbVsof2QIX7HE1fdaPfkGUS0DgaAIBW4y8+1rk/yBVv/tEe2Vee2ZHboQQj9+kMng4CzTz07enmBkWlKZKk3dwhsk2flyyaeBTA1yrMABjd09xtjU0HcTGOqbUOzsk7bhHjajEENW957/z92ag5XwQLdz3bfuvL4KojepJJeoyZvjz/9TUqvWZnZAuvFAg998Do80G+dPdDrxdibqFAPqtzn8ffJa7J1rjpdL4J48F3/Mhp/5Kt/tfAAguYCGd45XmNQp6O1f+y7j80yYLxe5oM6722BPMY6ftAA/HgLdeIgoT2ePKuBkIJnAO3TgAZUzNEEc2ACDPCygWe6CmDUreteCQKYyKNfeR2gBLEh2cu20gbusnM55ydxOq6ygZsGw2yr1wgm1bUAU215NOlGdwMJx32rHEzRK9BcsJhteZPJCDh9+3vx0BsssM0Ou5UNy/7sNg2BUF7cgj56tG7ikNm6mP4AKgVxQGOgquxifiwPb+C9rtPToIQ8/7zQpT3O7V3tMk/BWR4dAsUTeJan+lEeYCo01NIRCJqCwSOuvG0f8cT3qvI3/9mbM03oJjss1nOmc/SoOOzm/3HvPW459YMcAyy60J+tzH9nK4M6Azmy2dvgS2Bf5+hDZ+1IWwTOgLP1vODMC+IZNuiTh7atPU5tTo4yOq/uXAPObZtkqF2OdpK3gRGbautf+exXooPVbOhAH+2JDtKIrw1947xvDOd85ZzcofCKcM8TqDeeaF7rQHvlTw/llyZ5jeCc30eUWejCTtrGITf+jfHsvLkyFui+9c2T/nRd566M7E0tzQzOm1qNzPpsEAv0D3qa2fQHPz0/7295FnjfCc9ZzCsFGNXhf/ljXx4e+8uvSkGf9OQ7DI97wePSGQMUwBsPVsGfzlxHDsTe9bJ3BUq23Xnb4eZ73TxAAhZ6jV4Pc/Fu8mBKD6qBgCkcZIILIAJ4eBJBphAALdgB0QC54QrECXQACPQABu3VAzqBTZ7WCoGs0QPawNXbKlzKt4g47q+IKw44Y5+pLGnoEHarrf14wQvy6c5m7W20DfCApQIYOgIp8NgwQ741jg1MBPNbTWcJaFa6ZUiW4zQIgF4DHpAeQK5ovPemQtCHTKAosDmokj/vJHs3UIGm4HJdE9iUR5OczJEu+Go9yM2ndCcfgAopl4f26Fr/lU8g28DH1BNxu26cd8eCXtqKQZG0Ps4d8+5jMi1DubSZeMpLTufP+0sn5fBRNgM088M9fMgr7Lw5yurIi2OAIR3YHUgrZwOtAYDBXNsnHvBqj+onA7hRvvnfbEI+WLVPj7Y3m+R3Apyr3lP3NV2G3NiGrRjI/yor+a2DcvKss9GuB+yagQS57EFutyG6a/t+Q6ZVAfyb3fJm+Y30swR08juMR7zAvW2bqRp0GfURR2AXr4W/575/lOP568pZoPvU7kdXHl85qZtmqhmcN816mbXaQBboH/c0u/7hT8/N+1uWBY464y8CRzxzOue77viUNQr4zOf8Yl5cAVp5DQEB6NPxgw6w4nb3P/+ffw406/h3v83ugQlpdPA8Xm4lmxrCQzcFFy9MsV4xSAnklNeZ15E+YEJ6eskHaPE2gj4AASiAN7AOLLotfs3FagdgQByhYWXBKgtgmZ6/vOvLOJXEA2YNOs4L8u+tfdNcbBtu6J/51iNAJvL4xQY+yscjq7yOAeZ5XzkvD36ZRtEe+KWOIzQ3rIMpc2O/V55qNiJDGnZiL7J5dMUDSq6T1d595wRx6SsAVVMaesqCAUl7VaVVxvZo059MAWQrrzjkmuvNZg2pZGozbUv1dtHXLgokmk+v3sUlD8BpM9qaPLSxT7zzE2l/f/y7705+HzjpubG1NpmyFfiapwtsgaT21FArXw+a2nZQvtgDQNYHNNNJ29FOeZTZUmA/OvCAy4OtDfbMP2dr5U68srcyKwu9MpjjbR7nvcfGTK7p1DZtRf4AuuxPP3d9tGcP1LZNgDvbiC+eQRkd+vfjNxLQr+kj4iwHbPV76KlN8hBSP+pInuSNH7bwMO9Df+6FiTd//eQWmPaj3X/2uT7+yaVuuilmcN5062bWbANboH/one2W+IPvsl3dtx868/kBF2Bw6E5PvVxzeCuhV2lbLSNAUHABMtxaBstv/6u3501/AbkCna23LxC67rUDbq5bVuv4Tx6f6R3emgeIeGezSkA94Ad2LJsFQniUAQNoAQQ8gaABVILxTHsoDzOYADMACOjw0gUYSrfAPVDI/zW3ClnYkGsp8CTOymsNG2vEqwPgkv8gcoRm8MU2QKTjNzA5BqfZlq3ZyQesAENlUNYAbKWPHWrON2BtsEni8QvcOi9vaaTlgeSF5b21bBuYE49XV1h65MsTSU8fgQy6K6t93nKw7RyI9BG3bSqO+ui6pj/vZj48zmSN8iK/4oqjvQBDutqqc+s7A1R3JFSJa5ZhU5dAUN70esQvvCS6Tr/+9St/vpxaknxKLx5lgynArB1J3/aVJzvTk37OmzfMjs6lPVZ6gxjeZIMLANuBTciPl/e8izPFyeBGnK5z9Rl71V0TZbDvk1DlA72xzdj+5NswrZz0M5CkmzW2/Q66nTTk8xjLz3m/H+txA2i/hfaAW60GMMtbHuqs2ytd5JW8S459H/ZRvrvv/vSFvvP3T2yB7ju7z1x5/BML3MQTzOC8iVfQrN6Gt0D/6Dvn/mPQx/N287bAPx7zjHS0Otx77f+sH1mYX/v1Ww8PfcZDs0Sah7SAhmkAlqXzUpRPv//TgQidOGj28pTcJgYOBQ06fIBrSocpAiABjPBEWk0ga+mWFmARNJmmANAEANAPdZEJNhyDcoGHMetCjx5nYNFAkAgr4bhAYY3zDlbEyakRKshyfcHLl0EIIAH0ritTPK4VsR+YA0kBI0krLtjJp2C3ARQkAUbybcE/8ANe0gIc8gM/oxzAR3bHAc/AmVffXHPgDHbBFmAFYO0VTVlGOcoDiGMvepd+WQMYUI55A+7OR57ySlnKu0pfeZBBz0Dd6HWuLJZlVtfaS3uy1TfId95SiLzVVvoAvX/ye++R9ArDd/7He4dPf+X46BU7jrYF4/JQ9szLVgZe69LTQEv5lMvKHrzxaT9sULqzk0GGtmZ6hzn19tlLmT08962Lv5XpIKaN7HX7vTJATNmrbYtDvrx92Cy2rfw6xPaaUsmMvSrf6FPHqf9qCzza1vDe5477pAzsS652wv5sTK7Bn3NsaVqNNsEW6tJdmb4LE72qrGR0e7RVtm4TrrG9aR+nfOaU4Zm/9c5Wed7+mBaY9pfdV/a5Pv4xRW020WZw3myqalZ0Q1ugf/yd75b6R6DLN2/XtsAXvvfqAMNpnz9t2OPgPXIbHNTyUgEQXmFvhjv4HgcPX/rYlzKfmfeL51QnDdB05ICE19q+tDp78OS2c+aflhwgACY8XAj4QIJOXucOIgAQT6p9nT3vbz84CDoDmpUnOFmCCiqp/wLg6P3F7mXHa1wTdyoD3FQIwFZ5GpQCNHWtQUSaBn7xo0NtO75ygBsh0Fd2bK+6Y8BFlnJ0HHqJI8jPtALyeEWBLZneNHj2SWcP2+6y7bDTHjslD3qYynHa507LHYEur3xiz5LZHkyy1QnZDXz0DOAB4wI26clsuepPPQBF8QLYXbaKJ4/IrAEOcAaFXrzCUwps1TnvLlhXl6Y//)**Bonus Task:** Use Slicer Python to analyze the image data. [33 Points]# Activate the console with View -> Python Interactor. # Now, you can run Python commands in 3D Slicer (don't run the commands here but in 3D Slicer!). # # 1) Let's install mahotas into the 3D Slicer environment. pip_install('mahotas') # # 2) Grab the image data as a numpy array. # Hint: You need to replace $MRML_ID with the correct value. # Hint: You can find that value in the DATA module, if you activate 'Show MRML IDs' node = slicer.mrmlScene.GetNodeByID('$MRML_ID') data_as_array = slicer.util.arrayFromVolume(node) # # 3) Now, data_as_array is a numpy array containing the image data. # We can print the shape and also the max value. print(data_as_array.shape) print(data_as_array.max()) # # 4) Please use mahotas to generate the histogram of the ultrasound data. import mahotas as mh pixelcounts = mh.full_histogram(data_as_array) # # TODO: Please answer the following questions. # TODO: Max. Intensity Value in data_as_array: # YOUR_ANSWER # TODO: How many pixels with this max. intensity value? # YOUR_ANSWER # # You did it!! # ,------. # `-____-' ,-----------. # ,i--i. | | # / @ @ \ / HEE HEE! | # | -.__.- | ___-' J # \. ,/ """"""""""""""""""' # ,\""""/. # ,' `--' `. # (_,i' `i._) # | | # | ,. | # | | | | # `-' `-' -bodom-A Hovmoller diagram for T (vs longitude 30-100E), for the period 1995-2015 using appropiate frequency for the output#import modules #allows plots to appear beneath cell %matplotlib notebook import numpy as np import numpy.ma as ma from matplotlib import * import netCDF4 as nc4 from netCDF4 import Dataset import matplotlib.pyplot as plt from matplotlib import gridspec import matplotlib.colors as colors from matplotlib import colors, ticker, cm import matplotlib.patches as mpatches import cartopy as cart from mpl_toolkits.basemap import Basemap import xarray as xrr from numpy.ma import masked_where import calendar import datetime from matplotlib import dates as mdates import pandas as pd #The region is between 30E and 100E, so we need to split the download in #two slices due to the grid indexes. #Also, for both cases we should split in two in the temporal dimension #due to the different name of some variables since 2013 in advance... m_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'] sst11=np.zeros((120,516,216)) va=0 for i in range(1995,2013): for m in m_list: fyd=nc4.Dataset('/group_workspaces/jasmin2/nemo/vol1/ORCA0083-N006/means/'+str(i)+'/ORCA0083-N06_'+str(i)+'m'+str(m)+'T.nc','r',format='NETCDF4') sst11_=fyd.variables['sst'][0,1373:1493,3805:4321] sst11[:,:,va]=sst11_ va=va+1 sst12=np.zeros((120,325,216)) va=0 for i in range(1995,2013): for m in m_list: fyd=nc4.Dataset('/group_workspaces/jasmin2/nemo/vol1/ORCA0083-N006/means/'+str(i)+'/ORCA0083-N06_'+str(i)+'m'+str(m)+'T.nc','r',format='NETCDF4') sst12_=fyd.variables['sst'][0,1373:1493,0:325] sst12[:,:,va]=sst12_ va=va+1 sst11[np.abs(sst11) > 3000.] = np.nan sst11[sst11 == 0.] = np.nan sst12[np.abs(sst12) > 3000.] = np.nan sst12[sst12 == 0.] = np.nan sst_1=np.concatenate((sst11,sst12),axis=1) #(120, 841, 216) sst21=np.zeros((120,516,36)) va=0 for i in range(2013,2016): for m in m_list: fyd=nc4.Dataset('/group_workspaces/jasmin2/nemo/vol1/ORCA0083-N006/means/'+str(i)+'/ORCA0083-N06_'+str(i)+'m'+str(m)+'T.nc','r',format='NETCDF4') sst21_=fyd.variables['tos'][0,1373:1493,3805:4321] sst21[:,:,va]=sst21_ va=va+1 sst22=np.zeros((120,325,36)) va=0 for i in range(2013,2016): for m in m_list: fyd=nc4.Dataset('/group_workspaces/jasmin2/nemo/vol1/ORCA0083-N006/means/'+str(i)+'/ORCA0083-N06_'+str(i)+'m'+str(m)+'T.nc','r',format='NETCDF4') sst22_=fyd.variables['tos'][0,1373:1493,0:325] sst22[:,:,va]=sst22_ va=va+1 sst21[np.abs(sst21) > 3000.] = np.nan sst21[sst21 == 0.] = np.nan sst22[np.abs(sst22) > 3000.] = np.nan sst22[sst22 == 0.] = np.nan sst_2=np.concatenate((sst21,sst22),axis=1) #(120, 841, 36) sst = np.concatenate((sst_1,sst_2),axis=2) #(120, 841, 252) lonS1=fyd.variables['nav_lon'][1373:1493,3805:4321] lonS2=fyd.variables['nav_lon'][1373:1493,0:325] lonS=np.concatenate((lonS1,lonS2),axis=1) #(120,841) lons_m = np.mean(lonS,axis=0) timee = pd.date_range(start='1/1/1995',end='1/1/2016',freq='M') #(252) timee_l=list(timee) #this is for the tick label in the figure: years = mdates.YearLocator() months = mdates.MonthLocator() #this is for the y axis ticks .. time_i = pd.date_range(start='1994-12-31',end='2016-01-01',freq='6M') sst_m=np.nanmean(sst,axis=0) #(841,252) average through the latitudes axis. sst_mt=np.transpose(sst_m) fig , ax = plt.subplots(1,1,figsize=(12,6)) ax = plt.axes() ticks=[24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31] low=24 high=31.25 ran=0.15 P1 = ax.contourf(lons_m,timee,sst_mt,np.arange(low, high, ran),xdate=True, extend='both',cmap=plt.cm.nipy_spectral) plt.title('Hovmoller diagram - SST 0-10$^\circ$S, 1995-2015',fontsize=10) plt.xlim((35,100)) plt.xlabel('E longitude',fontsize=9) cbar =plt.colorbar(P1,pad=0.08) cbar.set_ticks(ticks) cbar.set_label('$^\circ$C',rotation=0,labelpad=6) font_size=8 cbar.ax.tick_params(labelsize=font_size) plt.yticks(time_i) ax.yaxis.set_major_locator(years) plt.tick_params(top=True,right=True,labelright=True) #ax.yaxis.set_minor_locator(plt.MultipleLocator) #ax.yaxis.set_minor_locator(mdates.MonthLocator(interval=6)) #ax.yaxis.set_minor_locator(mdates.DayLocator(interval=183))Copyright 2020 Google#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.QAOA Tasks View on QuantumLib Run in Google Colab View source on GitHub Download notebook SetupInstall the ReCirq package:try: import recirq except ImportError: !pip install git+https://github.com/quantumlib/ReCirqAnd import ReCirq:import recirqProblem generationFirst, we generate and save all of the random instances of the problem. This is not computationally intensive but very important to do first so we have a fixed set of random instances.from recirq.qaoa.experiments.problem_generation_tasks import \ SKProblemGenerationTask, HardwareGridProblemGenerationTask, ThreeRegularProblemGenerationTask, \ generate_3_regular_problem, generate_sk_problem, generate_hardware_grid_problem pgen_dataset_id = '2020-03-tutorial' hardware_grid_problem_tasks = [ HardwareGridProblemGenerationTask( dataset_id=pgen_dataset_id, device_name='Sycamore23', instance_i=i, n_qubits=n ) for i in range(5) for n in range(2, 8 + 1, 2) ] recirq.display_markdown_docstring(HardwareGridProblemGenerationTask) sk_problem_tasks = [ SKProblemGenerationTask( dataset_id=pgen_dataset_id, instance_i=i, n_qubits=n ) for i in range(5) for n in range(3, 7 + 1, 2) ] recirq.display_markdown_docstring(SKProblemGenerationTask) three_regular_problem_tasks = [ ThreeRegularProblemGenerationTask( dataset_id=pgen_dataset_id, instance_i=i, n_qubits=n ) for i in range(5) for n in range(3, 8 + 1) if 3 * n % 2 == 0 ]Run the tasksfor task in hardware_grid_problem_tasks: generate_hardware_grid_problem(task) for task in sk_problem_tasks: generate_sk_problem(task) for task in three_regular_problem_tasks: generate_3_regular_problem(task)Angle precomputationfrom recirq.qaoa.experiments.angle_precomputation_tasks import \ AnglePrecomputationTask, precompute_angles apre_dataset_id = '2020-03-tutorial' precompute_tasks = [ AnglePrecomputationTask( dataset_id=apre_dataset_id, generation_task=gen_task, p=p) for gen_task in recirq.roundrobin( hardware_grid_problem_tasks, sk_problem_tasks, three_regular_problem_tasks, ) for p in range(1, 3 + 1) ] recirq.display_markdown_docstring(AnglePrecomputationTask) for task in precompute_tasks: precompute_angles(task)Precomputed angle data collectionfrom recirq.qaoa.experiments.precomputed_execution_tasks import \ PrecomputedDataCollectionTask, collect_data dcol_dataset_id = '2020-03-tutorial' data_collection_tasks = [ PrecomputedDataCollectionTask( dataset_id=dcol_dataset_id, precomputation_task=pre_task, device_name='Syc23-simulator', n_shots=50_000, structured=True, ) for pre_task in precompute_tasks ] recirq.display_markdown_docstring(PrecomputedDataCollectionTask) await recirq.execute_in_queue(collect_data, data_collection_tasks, num_workers=2)Landscape data collectionfrom recirq.qaoa.experiments.p1_landscape_tasks import \ P1LandscapeDataCollectionTask, \ get_data_collection_tasks_on_a_grid, \ collect_either_landscape_or_cal recirq.display_markdown_docstring(P1LandscapeDataCollectionTask) hardware_grid_problem_task = HardwareGridProblemGenerationTask( dataset_id=pgen_dataset_id, device_name='Sycamore23', instance_i=0, n_qubits=4 ) data_collection_tasks = get_data_collection_tasks_on_a_grid( pgen_task=hardware_grid_problem_task, dataset_id=dcol_dataset_id, gamma_res=11, beta_res=11, device_name='Syc23-simulator', epoch="grid") await recirq.execute_in_queue(collect_either_landscape_or_cal, data_collection_tasks, num_workers=2) sk_problem_task = SKProblemGenerationTask( dataset_id=pgen_dataset_id, instance_i=0, n_qubits=3, ) data_collection_tasks = get_data_collection_tasks_on_a_grid( pgen_task=sk_problem_task, dataset_id=dcol_dataset_id, gamma_res=11, beta_res=11, device_name='Syc23-simulator', epoch="sk") await recirq.execute_in_queue(collect_either_landscape_or_cal, data_collection_tasks, num_workers=2) three_regular_problem_task = ThreeRegularProblemGenerationTask( dataset_id=pgen_dataset_id, instance_i=0, n_qubits=4 ) data_collection_tasks = get_data_collection_tasks_on_a_grid( pgen_task=three_regular_problem_task, dataset_id=dcol_dataset_id, device_name='Syc23-simulator', gamma_res=11, beta_res=11, epoch="tr") await recirq.execute_in_queue(collect_either_landscape_or_cal, data_collection_tasks, num_workers=2)Optimization Data Collectionfrom recirq.qaoa.experiments.optimization_tasks import \ OptimizationAlgorithm, \ OptimizationTask, \ collect_optimization_data recirq.display_markdown_docstring(OptimizationTask) optimization_algorithm = OptimizationAlgorithm( method='MGD', n_shots=25000, options={ 'max_iterations': 10, 'rate': 0.3, 'sample_radius': 0.1, 'n_sample_points_ratio': 1.0, 'rate_decay_exponent': 0.4, 'stability_constant': 250, 'sample_radius_decay_exponent': 0.08, }) hardware_grid_optimization_task = OptimizationTask( dataset_id=dcol_dataset_id, generation_task=hardware_grid_problem_task, device_name='Syc23-simulator', p=1, algorithm=optimization_algorithm, x0=[0.3, 0.2]) collect_optimization_data(hardware_grid_optimization_task) optimization_algorithm = OptimizationAlgorithm( method='MGD', n_shots=25000, options={ 'max_iterations': 10, 'rate': 0.3, 'sample_radius': 0.1, 'n_sample_points_ratio': 1.0, 'rate_decay_exponent': 0.3, 'stability_constant': 200, 'sample_radius_decay_exponent': 0.08, }) sk_optimization_task = OptimizationTask( dataset_id=dcol_dataset_id, generation_task=sk_problem_task, device_name='Syc23-simulator', p=1, algorithm=optimization_algorithm, x0=[0.3, 0.2]) collect_optimization_data(sk_optimization_task) optimization_algorithm = OptimizationAlgorithm( method='MGD', n_shots=25000, options={ 'max_iterations': 10, 'rate': 0.2, 'sample_radius': 0.1, 'n_sample_points_ratio': 1.0, 'rate_decay_exponent': 0.4, 'stability_constant': 250, 'sample_radius_decay_exponent': 0.08, }) three_regular_optimization_task = OptimizationTask( dataset_id=dcol_dataset_id, generation_task=three_regular_problem_task, device_name='Syc23-simulator', p=1, algorithm=optimization_algorithm, x0=[0.3, 0.2]) collect_optimization_data(three_regular_optimization_task)Softmax regression with Airbnb dataThe goal in this challenge is to build a softmax regression classifier to predict the room type of New York City Airbnb listings using other features. Use PyTorch to build, train, and evaluate the model.Challenges:1. Load and prepare the Airbnb dataset.2. Build the model.3. Train the model.4. Evaluate the model.5. Draw conclusions.# import the libraries we need import pandas as pd import numpy as np import matplotlib.pyplot as plt # PyTorch import torch import torch.nn1. Load and prepare the data Load the dataset into a pandas dataframe, and prepare it for the model. Hints: - Define the features ($\mathbf{x}$) and labels ($y$). You will probably want to use the Pandas `get_dummies` function to convert the `room_type` column to the proper numerical representation, think *one-hot encoding*. The model will predict whether the listing is an entire home/apartment, private room, or shared room- Split the dataset into training and test sets.- Separate the features and labels in training set and test set.data_url = 'https://raw.githubusercontent.com/BreakoutMentors/Data-Science-and-Machine-Learning/main/datasets/AB_NYC_2019.csv' # your code here2. Build your modelBuild a model to model the relationship between the features $x$ (multiple features) and labels $y$ (Type 1).Hints:- use the `nn.Linear` class to define your linear model layer- define your loss function with the [`nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) class from PyTorch, which uses `nn.Softmax` activation function built-in so no need to use it in the model.- configure the optimization algorithm with stochastic gradient descent- track the accuracy metric# your code here class Logistic_Model(nn.Module): # Constructor def __init__(self, num_features, num_classes): # Todo # Forward Method def forward(self, x): # Todo return x num_features = # num_classes = # model = Logistic_Model(num_features, num_classes) loss_fn = # optimizer = #3. Train your modelNow that you have a model, it's time to train it. Train your model for 100 epochs (i.e., iterations), and record the training, validation, and accuracy metrics in lists.# your code hereVisualize the accuracy metric or crossentropy over the training process. Hint: create a line chart with the epoch (x) and the accuracy (y).# your code here4. Evaluate the modelNow that the model is trained, it's time to evaluate it using the test dataset, which you did not use when training the model. This gives you a sense of how well the model predicts unseen data, which is the case when you use it in the real world. Make sure to evaluate the model and visualize it's predictions against the true values. Hints: - Calculate test accuracy.# your code hereLorem Ipsum "Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit..."Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris ut tellus in eros lobortis mollis. Sed vestibulum porttitor est, convallis varius mi efficitur non. Suspendisse tempus lectus tempus feugiat consequat. Proin quis aliquam orci. Donec sit amet turpis nec neque tempor vehicula sit amet ac arcu. Mauris congue sem ut nunc suscipit posuere. Pellentesque ante risus, auctor id consectetur ac, feugiat eu massa. Fusce tortor neque, mattis placerat enim ac, luctus commodo augue. Etiam iaculis turpis arcu. Nam ornare varius rhoncus. Curabitur sed molestie metus. Etiam eget nunc tempus est convallis aliquet vel sit amet nulla. Sed accumsan, orci vel porttitor finibus, lacus erat faucibus risus, ac ornare augue mi sed felis. Aenean vel porta orci.Donec tincidunt tempor augue. Donec suscipit, diam a tincidunt vehicula, erat ex lacinia leo, quis pharetra elit tellus et nibh. Duis eros nisi, ullamcorper sed leo in, blandit suscipit dolor. Nulla sit amet urna turpis. Sed in justo non tortor vulputate congue. Maecenas viverra consequat suscipit. Sed interdum ut magna non sodales.class Lorem(object): def __init__(self): pass def show(self): print('''Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n Mauris ut tellus in eros lobortis mollis.\n Sed vestibulum porttitor est, convallis varius mi efficitur non.\n Suspendisse tempus lectus tempus feugiat consequat.\n Proin quis aliquam orci.\n Donec sit amet turpis nec neque tempor vehicula sit amet ac arcu.\n Mauris congue sem ut nunc suscipit posuere.\n Pellentesque ante risus, auctor id consectetur ac, feugiat eu massa.\n Fusce tortor neque, mattis placerat enim ac, luctus''') def main(): l = Lorem() l.show() main()Website , , , , , , , , , , , and . Applications: Replication QWI Statistics with MO Wage Records---- Table of Contents- [Introduction](Introduction)- [Basic Concepts](Basic-Concepts)- [Python Setup](Python-Setup)- [The Six Starter Files](The-Six-Starter-Files)- [Longitudinal Linkage](Longitudinal-Linkage)- [QWI Metrics](QWI-Metrics) Introduction- Back to [Table of Contents](Table-of-Contents)This notebook presents an application of creating variables, as taught in the "Variables" notebook. Here we will generate the relevant metrics of the Quarterly Workforce Indicators (QWI) framework, for the overall Missouri economy and by business EIN. QWI is a robust method of quantifying the dynamic employment patterns of workers and firms. It was developed by the Longitudinal Employer-Household Dynamics (LEHD) program at the Census in collaboration with state employment security agencies across the US. LEHD collects administrative unemployment insurance (UI) and employer (Quarterly Census of Employment and Wages) data from the state agencies. It then standardizes and cleans the data, applies some statistical imputation procedures, calculates statistics, and finally makes aggregates available to the public and microdata available to researchers. This procedure is described in detail in "The LEHD Infrastructure Files and the Creation of the Quarterly Workforce Indicators" (Abowd et al. 2009. in "Producer Dynamics: New Evidence from Micro Data").In this notebook, we present code for turning simple Wage Records files into QWI files, as well as examples and suggestions for optimal use. > In the following steps, we will code the QWI metrics for a given quarter (2010 quarter 1). If you need these metrics for project work, the results for all quarters have been run and stored on the ADRF, in the `ada_18_uchi` schema. Basic Concepts- Back to [Table of Contents](Table-of-Contents)The QWI framework is focused on **jobs**, which are the combination of **people** and **employers**. If John works at both McDonalds and , this will generate two completely separate jobs in the QWI data. If John quits his job at McDonalds, that job will no longer exist in the QWI data. If John quits both of his jobs, he will not be observed in the QWI data at all. The Wage Records files might contain "jobs" where no money changes hands in a given quarter. From the perspective of QWI, a job only exists if it involves wages greater than one dollar.In the ADRF environment, **People** are identified by their (hashed) Social Security Number and (hashed) name. In a perfect world, tracking people over time would simply be a matter of tracking their unique SSN. However, SSNs can be corrupted by data entry errors or used by multiple people. For the time being, we do not address these issues. We only longitudinally link two jobs if they have identical SSNs.Due to the fact that people who are unemployed or out of the labor force do not appear in QWI data, these data are not suitable for calculating population-wide employment rates of the kind produced by the Bureau of Labor Statistics. However, under the assumption that people who do not appear in the Wage Records data do not have jobs, QWI data can be used to calculate employment rates and other employment statistics for a subpopulation of people with known (hashed) SSNs. In this QWI routine, **Employers** are identified by their Employer Identification Number (EIN). Other methodologies use the Unemployment Insurance Account Number (UI Account Number).Each QWI file is specific to a single **quarter**. It includes a job if and only if it existed in that quarter. It includes information from prior and later quarters, but this is only to help construct the employment dynamics. The QWI file for 2010 quarter 2 gives a complete picture of the employment dynamics for that quarter but an incomplete (and likely misleading) picture of the employment dynamics for 2010 quarter 1 (or any other quarter). The Wage Records data tell us whether a job existed in a given quarter, but without more information we cannot differentiate a job that lasted the entire quarter from one that only lasted a day. Python Setup- Back to [Table of Contents](Table-of-Contents)# general use imports import datetime import glob import inspect import numpy import os import six import warnings import math from itertools import izip # pandas-related imports import pandas as pd import sqlalchemy # CSV file reading-related imports import csv # database interaction imports import sqlalchemy import datetime # to create a connection to the database, we need to pass the name of the database and host of the database connection_string = "postgresql://10.10.2.10/appliedda" conn = sqlalchemy.create_engine(connection_string) ## makeDataDict: A function to split a dataset into 6 quarterly datasets for use in QWI calculations ## Inputs: ## -connection: Connection to the database ## -keyYr: The year to caculate QWI stats for ## -keyQ: The quarter to calculate QWI stats for ## Output: ## -res: A dictionary with keys m4, m3, m2, m1, t, and p1 containing subsets of the longDat that condition ## on keyRy/keyQ (for entry 't'), 4 lags ('m1'-'m4'), and one lead ('p1') def makeDataDict(connection, keyYr, keyQ): keys = ['m4', 'm3', 'm2', 'm1', 't', 'p1'] res = {} for i in range(0,6): ##Find the right year and quarter, with i=0 corresponding to the 4th lag yr = int(keyYr - 1 + math.floor((keyQ+i-1)/4)) q = int(keyQ + i - 4*math.floor((keyQ+i-1)/4)) # Query the dataset on the given year and quarter, keeping only identifiers and wage as columns query=''' SELECT ssn , ein , 1 as wage FROM il_des_kcmo.il_wage WHERE year = {} and quarter = {} '''.format(yr, q) res[keys[i]] = pd.read_sql(query, connection) return res ## linkData: A function to longitudinally link the datasets supplied in the form of a dictionary output by ## makedataDict. Currently links by a deterministic left merge (where 't' is always on the left) by SSN and EIN. ## Input: ## -dataDict: A dictionary with keys m4, m3, m2, m1, t, and p1 containing subsets of the longDat that condition ## on keyRy/keyQ (for entry 't'), 4 lags ('m1'-'m4'), and one lead ('p1') ## Output: ## -res: A single dataframe with columns SSN, EIN, and wage_m4-wage_p1, the results of the ## longitudinal linkage. A job is included iff it exists in period 't'. When that job does not exist in a different ## period, the record describing that job will have a missing value in the column for that period. def linkData(dataDict): for time in dataDict: dataDict[time] = dataDict[time][(dataDict[time]['wage'].notnull())] res = dataDict['t'] for time in dataDict: if time != 't': ##Define the suffix to add to the time period being merged in suff = '_' + time ##Merge on iddssn, uiacctno. Keep all records in 't'. Drop unmatched records in the other period. res = pd.merge(left=res, right=dataDict[time], on=['ssn','ein'], how='left', suffixes=('',suff)) res = res[['ssn', 'ein', 'wage', 'wage_m4', 'wage_m3', 'wage_m2', 'wage_m1', 'wage_p1']] return res ## makeStatistics: A function to calculate QWI statistics ## (employment-stable employment, accessions and separations therefrom, etc.). ## Input: ## -linkData: A single dataframe with columns idssn, uiacctno, and wage_m4-wage_p1. ## Missing entries will be interpreted as the non-existence of a job. ## Output: ## -res: The input dataset with columns appended for each statistic. def makeStatistics(linkData): res = linkData #Flow Employment res['qwmt'] = 1*(res['wage'] > 0) res['qwmtm4'] = 1*(res['wage_m4'] > 0) res['qwmtm3'] = 1*(res['wage_m3'] > 0) res['qwmtm2'] = 1*(res['wage_m2'] > 0) res['qwmtm1'] = 1*(res['wage_m1'] > 0) res['qwmtp1'] = 1*(res['wage_p1'] > 0) #Beginning of Quarter Employment res['qwbt'] = 1*((res['qwmtm1']==1) & (res['qwmt']==1)) #End of Quarter Employment res['qwet'] = 1*((res['qwmt']==1) & (res['qwmtp1']==1)) #Full Quarter Employment res['qwft'] = 1*((res['qwmtm1']==1) & (res['qwmt']==1) & (res['qwmtp1']==1)) #Accessions res['qwat'] = 1*((res['qwmtm1']==0) & (res['qwmt']==1)) #Accessions to Consecutive Quarter Status res['qwa2t'] = 1*((res['qwat']==1) & (res['qwmtp1']==1)) #Accessions to Full Quarter Status res['qwa3t'] = 1*((res['qwmtm2']==0) & (res['qwmtm1']==1) & (res['qwmt']==1) & (res['qwmtp1']==1)) #Separations res['qwst'] = 1*((res['qwmt']==1) & (res['qwmtp1']==0)) #New Hires res['qwht'] = 1*((res['qwmtm4']==0) & (res['qwmtm3']==0) & (res['qwmtm2']==0) & (res['qwmtm1']==0) & (res['qwmt']==1)) #Recalls res['qwrt'] = 1*((res['qwmtm1']==0) & (res['qwmt']==1) & (res['qwht']==0)) ##Replace technical names by more explicit names names_tech = (['qwmt','qwmtm4','qwmtm3','qwmtm2','qwmtm1','qwmtp1','qwbt' ,'qwet','qwft','qwat','qwa2t','qwa3t','qwst','qwht','qwrt']) names_def =(['emp_current_qrt','emp_4qtrs_ago','emp_3qtrs_ago','emp_2qtrs_ago' ,'emp_prev_qtr', 'emp_next_qtr','emp_begin_qtr','emp_end_qtr' ,'emp_full_qtr','accessions_current', 'accessions_consecutive_qtr' ,'accessions_full_qtr','separations','new_hires','recalls']) rn_dict = dict(izip(names_tech, names_def)) res = res.rename(index=str, columns=rn_dict) return res qwi_vars =(['emp_current_qrt','emp_4qtrs_ago','emp_3qtrs_ago','emp_2qtrs_ago','emp_prev_qtr' , 'emp_next_qtr','emp_begin_qtr','emp_end_qtr','emp_full_qtr','accessions_current' , 'accessions_consecutive_qtr','accessions_full_qtr','separations','new_hires','recalls']) for i in range(2012,2016): full_data = makeDataDict(conn, i, 1) print("Downloaded " + str(i) + " data") linked_data = linkData(full_data) print("Linked "+ str(i) + " data") qwi_stats = makeStatistics(linked_data) qwi_ein_stats = qwi_stats.groupby(['ein'])[qwi_vars].mean().reset_index() qwi_ein_count = qwi_stats.groupby(['ein']).size().reset_index() qwi_ein_count['nb_empl'] = qwi_ein_count[0] del qwi_ein_count[0] qwi_ein = pd.merge(qwi_ein_count, qwi_ein_stats, on = 'ein') table_name = 'qwi_ein_' + str(i) + "_1" conn = sqlalchemy.create_engine(connection_string) qwi_ein.to_sql(table_name, conn, schema = 'ada_18_uchi') print("Uploaded " + str(i) + " data")Navigation---You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started! 1. Start the EnvironmentRun the next code cell to install a few packages. This line will take a few minutes to run!!pip -q install ./pythonThe environment is already saved in the Workspace and can be accessed at the file path provided below. Please run the next code cell without making any changes.from unityagents import UnityEnvironment import numpy as np # please do not modify the line below env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64")INFO:unityagents: 'Academy' started successfully! Unity Academy name: Academy Number of Brains: 1 Number of External Brains : 1 Lesson number : 0 Reset Parameters : Unity brain name: BananaBrain Number of Visual Observations (per agent): 0 Vector Observation space type: continuous Vector Observation space size (per agent): 37 Number of stacked Vector Observation: 1 Vector Action space type: discrete Vector Action space size (per agent): 4 Vector Action descriptions: , , ,Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.# get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] print('brain name: {}'.format(brain_name)) print() print('action space - size: {:2d} \t(type: {})'.format(brain.vector_action_space_size, brain.vector_action_space_type)) print('observation space - size: {:2d} \t(type: {})'.format(brain.vector_observation_space_size, brain.vector_observation_space_type))brain name: BananaBrain action space - size: 4 (type: discrete) observation space - size: 37 (type: continuous)2. Examine the State and Action SpacesRun the code cell below to print some information about the environment.# reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size)Number of agents: 1 Number of actions: 4 States look like: [ 1. 0. 0. 0. 0.84408134 0. 0. 1. 0. 0.0748472 0. 1. 0. 0. 0.25755 1. 0. 0. 0. 0.74177343 0. 1. 0. 0. 0.25854847 0. 0. 1. 0. 0.09355672 0. 1. 0. 0. 0.31969345 0. 0. ] States have length: 373. Dueling Double-DQNThe implementation is based on [Wang et al](https://arxiv.org/abs/1511.06581)'s work.%load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=False) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) break # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes) with open(path_log, 'wb') as f: pickle.dump(scores, f)The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload Episode 100 Average Score: 0.62, execution time=0:01:39.731444 Episode 200 Average Score: 3.42, execution time=0:01:40.281683 Episode 300 Average Score: 7.10, execution time=0:01:41.126893 Episode 400 Average Score: 10.90, execution time=0:01:43.151091 Episode 480 Average Score: 13.04 Environment solved in 380 episodes! Average Score: 13.04Eps decay=0.98, Eps end=0.1%load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn_epsDecay_098_epsEnd_01' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=False, clip=10.0) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) break # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes, eps_decay=0.98, eps_end=0.1) with open(path_log, 'wb') as f: pickle.dump(scores, f)Episode 100 Average Score: 2.46, execution time=0:01:41.023919 Episode 200 Average Score: 7.04, execution time=0:01:39.939979 Episode 300 Average Score: 10.72, execution time=0:01:39.859323 Episode 400 Average Score: 11.97, execution time=0:01:41.214624 Episode 442 Average Score: 13.00 Environment solved in 342 episodes! Average Score: 13.00Eps decay=0.98, Eps end=0.1import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle def plot_result(path_log, n_moving_window): with open(path_log, 'rb') as f: scores = pickle.load(f) moving_average = np.convolve(scores, np.ones((n_moving_window,))/n_moving_window, mode='valid') fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores, label='original') plt.plot(np.arange(len(moving_average)), moving_average, label='moving average (window size={})'.format(n_moving_window)) plt.legend() plt.ylabel('Score') plt.xlabel('Episode #') plt.show() n_moving_window = 100 plot_result(path_log, n_moving_window) %load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn_epsDecay_098_epsEnd_01' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=False, clip=10.0) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() cnt_steps = 0 for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): cnt_steps += 1 # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}\tAccumulated steps:{}'.format(i_episode, np.mean(scores_window), cnt_steps), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) break print('total steps={}'.format(cnt_steps)) # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes, eps_decay=0.98, eps_end=0.1) with open(path_log, 'wb') as f: pickle.dump(scores, f)The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload Episode 100 Average Score: 0.53, execution time=0:01:53.825174 Episode 200 Average Score: 3.90, execution time=0:01:55.571172 Episode 300 Average Score: 7.60, execution time=0:01:55.952221 Episode 400 Average Score: 10.78, execution time=0:01:58.031732 Episode 500 Average Score: 12.10, execution time=0:01:59.545478 Episode 600 Average Score: 12.78, execution time=0:01:58.383572 Episode 616 Average Score: 13.05 Accumulated steps:184800 Environment solved in 516 episodes! Average Score: 13.05 total steps=184800Eps decay=0.98, Eps end=0.1, Clip=10.0%load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn_epsDecay_098_epsEnd_01_Clip_10' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, is_clip=False, clip=10.0, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=is_clip, clip=clip) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) break # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes, eps_decay=0.98, eps_end=0.1, is_clip=True, clip=10.0) with open(path_log, 'wb') as f: pickle.dump(scores, f)The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload Episode 100 Average Score: 0.25, execution time=0:01:39.132597 Episode 200 Average Score: 3.58, execution time=0:01:41.867505 Episode 300 Average Score: 6.42, execution time=0:01:45.943246 Episode 400 Average Score: 11.29, execution time=0:01:46.175104 Episode 477 Average Score: 13.00 Environment solved in 377 episodes! Average Score: 13.00Eps decay=0.98, Eps end=0.1, Clip=10.0import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle def plot_result(path_log, n_moving_window): with open(path_log, 'rb') as f: scores = pickle.load(f) moving_average = np.convolve(scores, np.ones((n_moving_window,))/n_moving_window, mode='valid') fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores, label='original') plt.plot(np.arange(len(moving_average)), moving_average, label='moving average (window size={})'.format(n_moving_window)) plt.legend() plt.ylabel('Score') plt.xlabel('Episode #') plt.show() n_moving_window = 100 plot_result(path_log, n_moving_window)Eps decay=0.98, Eps end=0.1, Clip=20.0%load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn_epsDecay_098_epsEnd_01_Clip_20' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, is_clip=False, clip=10.0, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=is_clip, clip=clip) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) break # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes, eps_decay=0.98, eps_end=0.1, is_clip=True, clip=20.0) with open(path_log, 'wb') as f: pickle.dump(scores, f)Episode 100 Average Score: 0.46, execution time=0:01:41.376187 Episode 200 Average Score: 3.25, execution time=0:01:38.063651 Episode 300 Average Score: 7.45, execution time=0:01:40.381402 Episode 400 Average Score: 11.10, execution time=0:01:42.959677 Episode 500 Average Score: 12.95, execution time=0:01:41.835743 Episode 513 Average Score: 13.12 Environment solved in 413 episodes! Average Score: 13.12Eps decay=0.98, Eps end=0.1, Clip=20.0import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle def plot_result(path_log, n_moving_window): with open(path_log, 'rb') as f: scores = pickle.load(f) moving_average = np.convolve(scores, np.ones((n_moving_window,))/n_moving_window, mode='valid') fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores, label='original') plt.plot(np.arange(len(moving_average)), moving_average, label='moving average (window size={})'.format(n_moving_window)) plt.legend() plt.ylabel('Score') plt.xlabel('Episode #') plt.show() n_moving_window = 100 plot_result(path_log, n_moving_window)Free run - Eps decay=0.98, Eps end=0.1%load_ext autoreload %autoreload 2 import pickle import os import torch from collections import deque from datetime import datetime import numpy as np from agents.dueling_ddqn_agent import Dueling_DDQN_Agent from task import Task n_episodes=2000 ### model file model_name='dueling_ddqn_free_run_epsDecay_098_epsEnd_01' # logging dir_logs='./logs/' filename_log='scores_{}.pickles'.format(model_name) path_log = dir_logs + filename_log dir_models='./models/' filename_model='checkpoint_{}.pth'.format(model_name) path_model = dir_models + filename_model if not os.path.exists(dir_logs): os.makedirs(dir_logs) if not os.path.exists(dir_models): os.makedirs(dir_models) LENGTH_MOVING_AVERAGE=100 def dueling_ddqn(unity_env, path_model, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, seed=0, length_moving_average=LENGTH_MOVING_AVERAGE, n_printing_period=100): task = Task(unity_env=unity_env) agent = Dueling_DDQN_Agent(task.state_size, task.action_size, seed=seed, is_clip=False, clip=10.0) scores = [] scores_window = deque(maxlen=length_moving_average) eps = eps_start time_start = datetime.now() is_print=False for i_episode in range(1, n_episodes+1): state = task.reset() score = 0.0 for _ in range(max_t): # apply epsilon-greedy algorithm action = agent.act(state, eps) next_state, reward, done, info = task.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) scores.append(score) eps = max(eps_end, eps_decay * eps) # print training message without newline print('\rEpisode {}\t Average Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") # print training message if i_episode % n_printing_period == 0: print('\rEpisode {}\tAverage Score: {:.2f}, execution time={}'.format(i_episode, np.mean(scores_window), datetime.now() - time_start)) time_start = datetime.now() # ending criterion if np.mean(scores_window) >= 13.0 and is_print == False: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-length_moving_average, np.mean(scores_window))) is_print = True # break # save model torch.save(agent.qnetwork_local.state_dict(), path_model) return scores scores = dueling_ddqn(unity_env=env, path_model=path_model, n_episodes=n_episodes, eps_decay=0.98, eps_end=0.1) with open(path_log, 'wb') as f: pickle.dump(scores, f) import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle def plot_result(path_log, n_moving_window): with open(path_log, 'rb') as f: scores = pickle.load(f) moving_average = np.convolve(scores, np.ones((n_moving_window,))/n_moving_window, mode='valid') fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores, label='original') plt.plot(np.arange(len(moving_average)), moving_average, label='moving average (window size={})'.format(n_moving_window)) plt.legend() plt.ylabel('Score') plt.xlabel('Episode #') plt.show() n_moving_window = 100 plot_result(path_log, n_moving_window)Tratando outliersimport pandas as pd import matplotlib.pyplot as plt plt.rc("figure", figsize = (20, 10)) dados = pd.read_csv("../data/aluguel_residencial.csv", sep= ";") valor = dados['Valor'] q1 = valor.quantile(.25) q3 = valor.quantile(.75) box = q3 - q1 limite_inf = q1 - 1.5*box limite_sup = q3 + 1.5*box selecao = (valor >= limite_inf) & (valor <= limite_sup) dados_new = dados[selecao] dados_new2 = dados_new.query(f"Valor >= {limite_inf} and Valor <= {limite_sup} ") dados_new2.hist(["Valor"]) dados_new.hist(["Valor"]) grupo_tipo = dados.groupby("Tipo")["Valor"] grupo_tipo.groups q1 = grupo_tipo.quantile(.25) q3 = grupo_tipo.quantile(.75) box = q1 - q3 lim_inferior = q1 - (1.5*box) lim_superior = q3 + (1.5*box ) dados_new = pd.DataFrame() for tipo in grupo_tipo.groups.keys(): eh_tipo = dados["Tipo"] == tipo intervalo = ((dados["Valor"] >= lim_inferior[tipo]) & (dados["Valor"]<= lim_superior[tipo])) selecao = eh_tipo & intervalo dados_selecao = dados[selecao] dados_new = pd.concat([dados_new,dados_selecao]) dados_new.boxplot(["Valor"], by= ["Tipo"])FOR Loop Statementweek = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"] for x in week: print(x)Sunday Monday Tuesday Wednesday Thursday Friday SaturdayThe `break` Statementfor x in week: print(x) if x == "Thursday": break for x in week: if x == "Thursday": break print(x)Sunday Monday Tuesday WednesdayLooping through a Stringfor x in "week": print(x)w e e kThe `range()` Functionfor x in range(6): print("Example 1:", x) for x in range(2,6): print("Example 2:", x)Example 1: 0 Example 1: 1 Example 1: 2 Example 1: 3 Example 1: 4 Example 1: 5 Example 2: 2 Example 2: 3 Example 2: 4 Example 2: 5Nested FOR Loopadjective = ["red","big","tasty"] fruits = ["apple","banana","cherry"] for x in adjective: for y in fruits: print(x, y)red apple red banana red cherry big apple big banana big cherry tasty apple tasty banana tasty cherryWHILE Loop Statementi = 1 while i < 6: print(i) i += 11 2 3 4 5The `break` Statementi = 1 while i < 6: print(i) if i == 3: break i += 11 2 3The `continue` Statementi = 1 while i < 6: i += 1 if i == 3: continue else: print(i)2 4 5 6The `else` Statementi = 1 while i < 6: print(i) i += 1 else: print("i is no longer less than 6.")1 2 3 4 5 i is no longer less than 6.Application of Loop Statements Application 1Create a Python program that displays:---Hello 0Hello 1Hello 2Hello 3Hello 4Hello 5Hello 6Hello 7Hello 8Hello 9Hello 10# `for` Loop for x in range(11): print("Hello", x) # `while` Loop i = 0 while i <= 10: print("Hello", i) i += 1Hello 0 Hello 1 Hello 2 Hello 3 Hello 4 Hello 5 Hello 6 Hello 7 Hello 8 Hello 9 Hello 10Application 2Create a Python program that displaysintegers less than 10 but not less than 3.# `for` Loop for x in range(3,10): print(x) # `while` Loop i = 3 while i < 10: print(i) i += 13 4 5 6 7 8 9*출처 : https://futureskill.io/ Probability - 확률변수 : 확률적으로 값이 결정되는 변수- 확률 분포 : 확률 변수가 어떤 값일 확률을 나타내는 함수- 확률 : 어떤 사건이 일어났을 지에 대한 가능성을 0~1 사이의 숫자로 표현한 값def pmf_coin(outcome): ''' 본 함수는 동전을 던졌을 때 나오는 결과(Head 혹은 Tail)를 입력값으로 받는다. 입력값 outcome이 Head와 Tail 둘 중 하나일 때는 0.5, 그 외에는 0이 확률이 된다. 확률 변수의 형식으로, 주어진 outcome에 대한 확률을 출력한다. ''' if outcome in ('Head', 'Tail'): p = 0.5 else: p = 0.0 print(f"P(X = x) = {p:.2f}") pmf_coin('Head') pmf_coin('Tail') pmf_coin('etc')P(X = x) = 0.50 P(X = x) = 0.50 P(X = x) = 0.00Probability distributiondef pmf_bern(p, x): ''' 주어진 p와 x에 관한 베르누이 분포의 확률 값을 반환한다. x는 0과 1만이 가능하다. ''' if x in (0, 1): prob = (p**x)*((1-p)**(1-x)) else: prob = 0 print(f"P(X={x}; p={p}) = {prob:.2f}") return prob pmf_bern(p=0.7, x=1) pmf_bern(p=0.2, x=1) pmf_bern(p=0.2, x=0)P(X=1; p=0.7) = 0.70 P(X=1; p=0.2) = 0.20 P(X=0; p=0.2) = 0.80pmf vs pdfdef pdf_unif(x, a=0, b=1): ''' 주어진 a, b, x에 관한 균등 분포의 확률 값을 출력한다. ''' if x < a or x > b: prob = 0 else: prob = (1/(b - a)) * (x - a) print("P(X=%s; a=%s, b=%s) = %.2f"%(x, a, b, prob)) pdf_unif(x=0.5) pdf_unif(x=0, a=-1, b=2) pdf_unif(x=2.5, a=0, b=3.5)P(X=0.5; a=0, b=1) = 0.50 P(X=0; a=-1, b=2) = 0.33 P(X=2.5; a=0, b=3.5) = 0.71Normal distributionfrom scipy.stats import norm # scipy 패키지를 사용합니다. def pdf_norm(x, mu=0, sigma=1): ''' 주어진 x, mu, sigma에 관한 정규 분포의 확률 값을 출력한다. ''' prob = norm(loc=mu, scale=sigma).pdf(x) print(f"P(X={x}; mu={mu}, sigma={sigma}) = {prob:.2f}") pdf_norm(0) pdf_norm(1.96, 0, 1) pdf_norm(-1, 2, 10)P(X=0; mu=0, sigma=1) = 0.40 P(X=1.96; mu=0, sigma=1) = 0.06 P(X=-1; mu=2, sigma=10) = 0.04Binomial distributionfrom scipy.stats import binom # scipy 패키지를 사용합니다. def pdf_binom(x, n, p): ''' 주어진 x, n, p 관한 이항 분포의 확률 값을 출력한다. ''' prob = binom(n=n, p=p).pmf(x) print(f"P(X={x}; n={n}, p={p}) = {prob:.2f}") pdf_binom(x=3, n=10, p=0.3) pdf_binom(x=7, n=10, p=0.7) pdf_binom(x=50, n=100, p=0.1)P(X=3; n=10, p=0.3) = 0.27 P(X=7; n=10, p=0.7) = 0.27 P(X=50; n=100, p=0.1) = 0.00Poisson distributionfrom scipy.stats import poisson import matplotlib.pyplot as plt def pmf_poisson_graphing(lamb): ''' 주어진 lambda에 대한 확률값을 시각화한다. x의 범위는 [0, 3*lambda]이다. ''' xs = range(0, lamb*3+1) ps = [poisson(mu=lamb).pmf(x) for x in xs] fig, ax = plt.subplots() ax.plot( xs, ps, marker='o' ) plt.show() pmf_poisson_graphing(3) pmf_poisson_graphing(7)Standardization and Normalizationfrom sklearn import datasets import pandas as pd import matplotlib.pyplot as plt iris = datasets.load_iris() # 데이터 꺼내기 X = pd.DataFrame(iris.data, columns = iris['feature_names']) # pandas df로 변환 sepal_length = X['sepal length (cm)'] sepal_length_normalized = (sepal_length - sepal_length.mean()) / sepal_length.var() sepal_length_standarized = (sepal_length - sepal_length.min()) / (sepal_length.max() - sepal_length.min()) fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(15,5)) ax1.hist(sepal_length, color='skyblue') ax1.set_title('Original') ax2.hist(sepal_length_normalized, color='skyblue') ax2.set_title('Normalization') ax3.hist(sepal_length_standarized, color='skyblue') ax3.set_title('Standardization') plt.show()Approximationfrom scipy.stats import binom, poisson, norm import matplotlib.pyplot as plt def poisson_binom_plot(n, p): lamb = n*p mu = n*p std = n*p*(1-p) x_min = int(-n*0.2) x_max = int(n*1.2) probs_binom = [binom.pmf(i,n,p) for i in range(x_min, x_max+1)] probs_poisson = [poisson.pmf(i, lamb) for i in range(x_min, x_max+1)] probs_norm = [norm.pdf(i, mu, std) for i in range(x_min, x_max+1)] plt.plot(range(x_min, x_max+1), probs_binom, alpha=0.5) plt.plot(range(x_min, x_max+1), probs_poisson, alpha=0.5) plt.plot(range(x_min, x_max+1), probs_norm, alpha=0.5) plt.legend(['binom', 'poisson', 'norm']) plt.title(f'N={n}, p={p}') plt.show() poisson_binom_plot(5, 0.9) poisson_binom_plot(20, 0.4) poisson_binom_plot(20, 0.9) poisson_binom_plot(100, 0.0001)Negative binomial distributiontry_counts = [] for _ in tqdm(range(n)): try_count = 0 ''' code here ''' try_counts.append(try_count) return try_counts import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from collections import Counter from scipy.stats import binom def try_till_success(n): ''' 1. 1회씩 시도(Bernoulli)하여 성공 횟수가 10회 누적될 때까지 try count를 누적시킨다 2. 10번 성공하면 try_counts에 append 한 뒤 초기화 3. 1~2를 n회 반복 *numpy에는 Bernoulli가 없으니 이항 분포에서 n=1로 하여 대신 사용한다. ''' try_counts = [] try_count = 0 for _ in tqdm(range(n)): if binom(n=1, p=0.7).rvs() == 1: try_count += 1 else: continue if try_count == 10: try_counts.append(try_count) try_count = 0 return try_counts def bar_plotter(nums): ''' matplotlib이 제공하는 히스토그램은 여러 개를 겹쳐그릴 때 이쁘지 않아 bar plot으로 대신 그렸다 ''' num_counter = Counter(nums) plt.bar( num_counter.keys(), num_counter.values(), width=0.9, alpha=0.3 ) plt.show() n = 100000 p = 0.7 try_list = try_till_success(n) nb_list = [10+np.random.negative_binomial(10, p) for _ in range(n)] binom_list = [np.random.binomial(int(10/0.7**2), p) for _ in range(n)] try_counter = Counter(try_list) nb_counter = Counter(nb_list) binom_counter = Counter(binom_list) plt.figure(figsize=(12,10)) plt.subplot(2,1,1) plt.bar(try_counter.keys(), try_counter.values(), alpha=0.3) plt.bar(nb_counter.keys(), nb_counter.values(), alpha=.3) plt.legend(['try till sucess', 'negative binomial']) plt.subplot(2,1,2) plt.bar(try_counter.keys(), try_counter.values(), alpha=0.3) plt.bar(binom_counter.keys(), binom_counter.values(), alpha=.3) plt.legend(['try till sucess', 'binomial']) plt.show()100%|██████████| 100000/100000 [00:42<00:00, 2365.01it/s]How To: Adding Hunting Bookmarks from Notebooks__Notebook Version:__ 1.0__Python Version:__ Python 3.6 (including Python 3.6 - AzureML)__Required Packages:__ azure 4.0.0, azure-cli-profile 2.1.4__Platforms Supported:__ - Azure Notebooks Free Compute - Azure Notebooks DSVM__Data Source Required:__ - no DescriptionThe sample notebook shows how to add hunting bookmarks from Jupyter notebooks to Sentinel portal.!pip install Azure-Sentinel-Utilities --upgrade !pip install jsons from SentinelPortal import Constants, BookmarkProperties, BookmarkModel, BookmarkHelper from azure.common.credentials import get_azure_cli_credentials # please enter your tenant domain below, for Microsoft, using: microsoft.onmicrosoft.com !az login --tenant 'microsoft.onmicrosoft.com' # Retrieve access token creds, sub_id = get_azure_cli_credentials() token = creds._token_retriever() access_token = token[2]['accessToken'] # User Input subscription_id = input('Subscription Id: ') resource_group_name = input('Resource Group: ') workspace_name = input('LA Workspace Name: ') entity_mappings = {} entity_mappings.update({"550a6d02-d667-49d8-969a-e709cce03293": "Account"}) entity_mappings.update({"192.168.3.11": "Host"}) # Construct properties properties = BookmarkProperties("New Test 001", query="AzureActivity | take 1", query_result_dict=entity_mappings, tag_list=None, notes=None, event_time=None, query_start_time=None, query_end_time=None) #properties = BookmarkProperties("New nb test 10021621", query="AzureActivity | take 1", query_result="{\"__entityMapping\":{\"550a6d02-d667-49d8-969a-e709cce03293\":\"Account\", \"201.12.34.88\":\"Host\"}}", tag_list=None, notes=None, event_time=None, query_start_time=None, query_end_time=None) model = BookmarkModel("New Test 001", subscription_id, resource_group_name, workspace_name, properties) helper = BookmarkHelper(access_token) result = helper.get_bookmarks(model) print(result.text) result = helper.add_bookmark(model) print(result.text)pandas - "A melhor biblioteca do Python para Dados"- Quase sempre que você for trabalhar com dados no Python você vai usar o pandas, então é importante você saber trabalhar com essa biblioteca muito bem- Vamos aprender os principais comandos e os princípios básicos do pandas- Faça o download do Gabarito desse arquivo e use como uma cartilha de consulta (Link na descrição) Importando o pandasimport pandas as pdCriando um dataframe a partir de um dicionário# dataframe = pd.DataFrame() venda = {'data': ['15/02/2021', '16/02/2021'], 'valor': [500, 300], 'produto': ['feijao', 'arroz'], 'qtde': [50, 70]} print(venda) vendas_df = pd.DataFrame(venda){'data': ['15/02/2021', '16/02/2021'], 'valor': [500, 300], 'produto': ['feijao', 'arroz'], 'qtde': [50, 70]}Visualização dos Dados - print - displayprint(vendas_df) display(vendas_df)Importando arquivos e bases de dadosvendas_df = pd.read_excel("Vendas.xlsx") display(vendas_df)Resumos de Visualização de Dados simples e úteis - head - shape - describedisplay(vendas_df.head()) display(vendas_df.head(10)) print(vendas_df.shape) display(vendas_df.describe())Pegar 1 coluna (e os pd.Series)produtos = vendas_df[['Produto', 'ID Loja']] display(produtos).loc, um método muito importante - Pegar 1 linha - Pegar linhas de acordo com alguma condição - Pegar linhas e colunas específicas - Pegar 1 valor específico# pegar uma linha # display(vendas_df.loc[0:4]) # pegar linhas que correspondem a uma condição vendas_norteshopping_df = vendas_df.loc[vendas_df['ID Loja'] == 'Norte Shopping'] display(vendas_norteshopping_df) # pegar várias linhas e colunas usando o Loc vendas_norteshopping_df = vendas_df.loc[vendas_df['ID Loja'] == 'Norte Shopping', ['ID Loja', 'Produto', 'Quantidade']] display(vendas_norteshopping_df) # pegar um valor específico print(vendas_df.loc[1, 'Produto'])CamisetaAdicionar 1 coluna# a partir de uma coluna que já existe vendas_df['Comissão'] = vendas_df['Valor Final'] * 0.05 display(vendas_df) # criar uma coluna com valor padrão vendas_df.loc[:, ['Imposto']] = 0 display(vendas_df)Adicionar 1 linha - Linhas de um complemento da base de dadosvendas_dez_df = pd.read_excel("Vendas - Dez.xlsx") display(vendas_dez_df) vendas_df = vendas_df.append(vendas_dez_df) vendas_dfExcluir linhas e colunasvendas_df = vendas_df.drop("Imposto", axis = 1) display(vendas_df) vendas_df = vendas_df.drop(1, axis = 0) display(vendas_df)Valores Vazios - Deletar linhas/colunas vazias - Deletar linhas que possuem valores vazios - Preencher valores vazios (média e último valor)# deletar linhas e colunas completamente vazias vendas_df = vendas_df.dropna(how='all', axis=1) # deletar linhas que possuem pelo menos um valor vazio vendas_df = vendas_df.dropna() display(vendas_df) display(vendas_df) # preencher valores vazios com a média da coluna vendas_df['Comissão'] = vendas_df['Comissão'].fillna(vendas_df['Comissão'].mean()) display(vendas_df) # preencher com o último valor vendas_df = vendas_df.ffill()Calcular Indicadores - Groupby - Value Counts# value counts transacoes_loja = vendas_df['ID Loja'].value_counts() display(transacoes_loja) # group by faturamento_produto = vendas_df[['Produto', 'Valor Final']].groupby('Produto').sum() display(faturamento_produto)Mesclar 2 dataframes (Procurar informações de um dataframe em outro)gerentes_df = pd.read_excel('Gerentes.xlsx') vendas_df = vendas_df.merge(gerentes_df) display(vendas_df)Correlation function of DR72 SDSS VAGC Catalog First import all the modules such as healpy and astropy needed for analyzing the structureimport healpix_util as hu import astropy as ap import numpy as np from astropy.io import fits from astropy.table import Table import astropy.io.ascii as ascii from astropy.io import fits from astropy.constants import c import matplotlib.pyplot as plt import math as m from math import pi #from scipy.constants import c import scipy.special as sp from astroML.decorators import pickle_results from scipy import integrate import warnings from sklearn.neighbors import BallTree import pickle import multiprocessing as mp import time from lco07metric import * from progressbar import * from tqdm import * from functools import partial import pymangle #from astroML.datasets import fetch_sdss_specgals #from astroML.correlation import bootstrap_two_point_angular %matplotlib inline # Getting back the objects: with open('datsLCf.pkl') as f: # Python 3: open(..., 'rb') dat = pickle.load(f) dat bins=np.arange(0.,0.08,0.005) print bins Nbins=len(bins) Nbins binsq=(bins*0.007)**2 binsq LCometric07(dat[0],dat[1]) %%time BT_DLCo = BallTree(dat,metric='pyfunc',func=LCometric07,leaf_size=5) with open('BTDdatsLCo07.pkl', 'w') as f: pickle.dump(BT_DLCo,f) with open('BTDdatsLCo07.pkl') as f: BTDLCo = pickle.load(f) BTDLCo %%time start_time=time.time() counts_DD=BTDLCo.two_point_correlation(dat,binsq) print counts_DD end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTDcDDLCo07.pkl', 'w') as f: pickle.dump(counts_DD,f) with open('BTDcDDLCo07.pkl') as f: counts_DD = pickle.load(f) counts_DD DD=np.diff(counts_DD) DD plt.plot(bins[1:len(bins)],DD,'ro-')BallTree.two_point_correlation works almost 10 times faster! with leaf_size=5 Going with it to the random catalog# Getting back the objects: with open('rDR7200kLCsrarf.pkl') as f: # Python 3: open(..., 'rb') datR = pickle.load(f) datR %%time BT_RLCo = BallTree(datR,metric='pyfunc',func=LCometric07,leaf_size=5) with open('BTR200kdatsLCo07.pkl', 'w') as f: pickle.dump(BT_RLCo,f) with open('BTR200kdatsLCo07.pkl') as f: BTRLCo = pickle.load(f) BTRLCo %%time start_time=time.time() counts_RR=BTRLCo.two_point_correlation(datR,binsq) print counts_RR end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTR200kcRRLCo07.pkl', 'w') as f: pickle.dump(counts_RR,f) with open('BTR200kcRRLCo07.pkl') as f: counts_RR = pickle.load(f) counts_RR RR=np.diff(counts_RR) RR plt.plot(bins[1:len(bins)],RR,'bo-') RR_zero = (RR == 0) RR[RR_zero] = 1 %%time start_time=time.time() counts_DR=BTRLCo.two_point_correlation(dat,binsq) print counts_DR end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTR200kcDRLCo07.pkl', 'w') as f: pickle.dump(counts_DR,f) with open('BTR200kcDRLCo07.pkl') as f: counts_DR = pickle.load(f) counts_DR DR=np.diff(counts_DR) DR corrells=(4.0 * DD - 4.0 * DR + RR) / RR corrells plt.plot(bins[1:len(bins)],corrells,'go-') plt.plot(bins[1:len(bins)],bins[1:len(bins)]*bins[1:len(bins)]*corrells*(c*1e-5)**2,'go-') plt.plot(bins[2:len(bins)],bins[2:len(bins)]*bins[2:len(bins)]*corrells[1:len(bins)]*(c*1e-5)**2,'go-') plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-') plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-') plt.savefig("correl2xlsLCo07.pdf") plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'bo-') plt.savefig("correl2x1lsLCo07.pdf") plt.yscale('log') plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-') plt.savefig("correllsfiglogLCo07.pdf") plt.yscale('log') plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'ro-') plt.savefig("correllslog2xLCo07.pdf") plt.yscale('log') plt.xscale('log') plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-') plt.savefig("correllsloglogLCo07.pdf")Project descriptionThe gym chain Model Fitness is developing a customer interaction strategy based on analytical data.One of the most common problems gyms and other services face is customer churn. How do you know if a customer is no longer with you? You can calculate churn based on people who get rid of their accounts or don't renew their contracts. However, sometimes it's not obvious that a client has left: they may walk out on tiptoes.For a gym, it makes sense to say a customer has left if they don't come for a month. Of course, it's possible they're in Cancun and will resume their visits when they return, but's that's not a typical case. Usually, if a customer joins, comes a few times, then disappears, they're unlikely to come back.In order to fight churn, Model Fitness has digitized a number of its customer profiles. Your task is to analyze them and come up with a customer retention strategy.You should:Learn to predict the probability of churn (for the upcoming month) for each customerDraw up typical user portraits: select the most outstanding groups and describe their main featuresAnalyze the factors that impact churn mostDraw basic conclusions and develop recommendations on how to improve customer service:- Identify target groups- Suggest measures to cut churn- Describe any other patterns you see with respect to interaction with customers Project goal is to analyze customer profiles of Model Fitness chain and come up with a customer retention strategy in order to fight churn. Step 1. Download the dataimport pandas as pd import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score, precision_score, recall_score from scipy.cluster.hierarchy import dendrogram, linkage from sklearn.cluster import KMeans import sys import warnings if not sys.warnoptions: warnings.simplefilter("ignore") data = pd.read_csv('/datasets/gym_churn_us.csv') data.info() data.shape data.columns data.head(10) # Check for duplicates. data.duplicated() data.duplicated().sum()ConclusionOn this step we download the data. We check data for duplicates, no duplicates found. The data types are proper, although could be reduced from int64 , this is not necessary because total data volume is quite small. Step 2. Carry out exploratory data analysis (EDA) - Look at the dataset: does it contain any missing features? Study the mean values and standard deviation (use the describe() method).- Look at the mean feature values in two groups: for those who left (churn) and for those who stayed (use the groupby() method).- Plot bar histograms and feature distributions for those who left (churn) and those who stayed.- Build a correlation matrix and display it.# look at data: for col in data.columns: temp_series = data[col].value_counts().sort_index().sort_values() print(temp_series) # Look at the dataset: mean values and standard deviation. data.describe(include='all')Typical client is about 29 y.o., he/she probably has 6 month contract, live (or work) nearby, visiting Fitness about 2 times a month and spending about 150 USD a month for additional charges. It could be any of two genders, with almost equal probability.# Look at the mean feature values in two groups: for those who left (churn) and for those who stayed. data.groupby('Churn').mean()We can see that some mean feature values differ for two groups. For those who stayed, means of 'Near Location', 'Partner', 'Promo Friends', 'Contract period', 'Group Visits', 'Age', 'Avg additional charges total', 'Month to end contract', 'Lifetime', 'Avg class frequency total', 'Avg class frequency current month' are higher than for those who churn. Means of 'gender' and 'Phone' does not differ considerably.# Plot bar histograms and feature distributions for those who stayed - first bunch, # and for those who left (churn) - second bunch. data.groupby('Churn').hist(figsize=(14,14)) ""We plot bar histograms and feature distributions for those who stayed (first bunch of bar histograms) and for those who left (second bunch of bar histograms). Feature 'Age' has normal distribution for both groups. Features 'Avg additional charges total', 'Avg class frequency current month', 'Avg class frequency total' shows skewed normal distribution. 'Avg class frequency current month' for those who left demonstrates alarming peak on '0'. That must be very important feature so. 'Contract Period' could be telling a lot on user intentions, as we see predominant one-two-month contracts among those who left. On the contrary, we can see fair distribution of short and six-twelf-month contracts in those who stayed. Feature 'Group Visits' supports those who stayed, as well as feature 'Partner'. Feature 'Lifetime' naturally go longer for those who stayed. Feature 'Month to end contract' seem to just follow the 'Contract Period' feature. Features 'Near Location', 'Phone', 'Promo Friends' and 'Gender' seem to be insignificant and not influencing factors.# build and render a correlation matrix cm = data.corr() # calculate correlation matrix fig, ax = plt.subplots(figsize=(10,10)) # plot an annotated heatmap for the correlation matrix sns.heatmap(cm, annot = True, square = True) plt.show()We build a correlation matrix and display it in the form of heatmap. From this matrix we can see that there is a strong correlation between features 'Avg class frequency total'and 'Avg class frequency current month', namely, 0.95; and 'Month to end contract' and 'Contract period', 0.97. Other features do not demonstrate any considerable correlation. Conclusion The data look good. There are no missing values, all min, max, means and standard deviation look meaningful, that means there are no outliers in the dataset provided. The data types are proper, although could be reduced from int64 , this is not necessary because total data volume is quite small.We look at the mean feature values in two groups: for those who left (churn) and for those who stayed (using the groupby() method).We plot bar histograms and feature distributions for those who stayed (first bunch of bar histograms) and for those who left (second bunch of bar histograms). Feature 'Age' has normal distribution for both groups. Features 'Avg additional charges total', 'Avg class frequency current month', 'Avg class frequency total' shows skewed normal distribution. 'Avg class frequency current month' for those who left demonstrates alarming peak on '0'. That must be very important feature so. 'Contract Period' could be telling a lot on user intentions, as we see predominant one-two-month contracts among those who left. On the contrary, we can see fair distribution of short and six-twelf-month contracts in those who stayed. Feature 'Group Visits' supports those who stayed, as well as feature 'Partner'. Feature 'Lifetime' naturally go longer for those who stayed. Feature 'Month to end contract' seem to just follow the 'Contract Period' feature. Features 'Near Location', 'Phone', 'Promo Friends' and 'Gender' seem to be insignificant and not influencing factors.We build a correlation matrix and display it in the form of heatmap. From this matrix we can see that there is a strong correlation between features 'Avg class frequency total'and 'Avg class frequency current month', namely, 0.95; and 'Month to end contract' and 'Contract period', 0.97. Other features do not demonstrate any considerable correlation. Step 3. Build a model to predict user churnBuild a binary classification model for customers where the target feature is the user's leaving next month.- Divide the data into train and validation sets using the train_test_split() function.- Train the model on the train set with two methods: - logistic regression - random forest- Evaluate accuracy, precision, and recall for both models using the validation data. Use them to compare the models. Which model gave better results?Remember to indicate the random_state parameter when dividing data and defining the algorithm.# divide the data into features (the X matrix) and a target variable (y) X = data.drop('Churn', axis = 1) y = data['Churn'] # divide the data into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # create a StandardScaler object and apply it to the train set scaler = StandardScaler() X_train_st = scaler.fit_transform(X_train) # train the scaler and transform the matrix for the train set # apply standardization to the feature matrix for the test set X_test_st = scaler.transform(X_test) # define the model's algorithm Logistic Regression model = LogisticRegression(random_state=0) # train your model model.fit(X_train, y_train) # binary prediction predictions = model.predict(X_test) # print the studied metrics for the resulting prediction print('Logistic Regression Model:') print('Accuracy: {:.2f}'.format(accuracy_score(y_test, predictions))) print('Precision: {:.2f}'.format(precision_score(y_test, predictions))) print('Recall: {:.2f}'.format(recall_score(y_test, predictions))) # define the model's algorithm Random Forest model = RandomForestClassifier(random_state=0) # train your model model.fit(X_train, y_train) # binary prediction predictions = model.predict(X_test) # print the studied metrics for the resulting prediction print('Random Forest Model:') print('Accuracy: {:.2f}'.format(accuracy_score(y_test, predictions))) print('Precision: {:.2f}'.format(precision_score(y_test, predictions))) print('Recall: {:.2f}'.format(recall_score(y_test, predictions)))Random Forest Model: Accuracy: 0.91 Precision: 0.83 Recall: 0.78ConclusionWe build a binary classification model for customers where the target feature is the user's leaving next month.We divide the data into train and validation sets using the train_test_split() function. We indicate the random_state = 0 parameter when dividing data and defining the algorithm. We train the model on the train set with two methods:- logistic regression- random forestWe evaluate accuracy, precision, and recall for both models using the validation data. We use them to compare the models. Logistic Regression Model (Accuracy: 0.93, Precision: 0.86, Recall: 0.83) gave better results than Random Forest Model (Accuracy: 0.91, Precision: 0.83, Recall: 0.78). All metric for Logistic Regression Mode exceed ones for other model. I believe Recall is the most important metric for our task. Step 4. Create user clustersSet aside the column with data on churn and identify object (user) clusters:- Standardize the data.- Use the linkage() function to build a matrix of distances based on the standardized feature matrix and plot a dendrogram. Note: rendering the dendrogram may take time! Use the resulting graph to estimate the number of clusters you can single out.- Train the clustering model with the K-means algorithm and predict customer clusters. (Let the number of clusters be n=5).- Look at the mean feature values for clusters. Does anything catch your eye?- Plot distributions of features for the clusters. Do you notice anything?- Calculate the churn rate for each cluster (use the groupby() method). Do they differ in terms of churn rate? Which clusters are prone to leaving, and which are loyal?# the standardization of data before passing it to the algorithm sc = StandardScaler() X_sc = sc.fit_transform(X) # the linkage() function to build a matrix of distances based on the standardized feature matrix linked = linkage(X_sc, method = 'ward') # plot a dendrogram plt.figure(figsize=(15, 10)) dendrogram(linked, orientation='top') plt.title('Hierarchical clustering for Model Fitness') plt.show()We plot a dendrogram to visualize clustering and use the plot to estimate the number of clusters we can single out. From the dendrogram we can see that 4 would be the optimal number of clusters.# now launch the faster K-means algorithm with n_clusters = 5. km = KMeans(n_clusters = 5, random_state=0) # setting the number of clusters as 5 labels = km.fit_predict(X_sc) # applying the algorithm to the data and forming a cluster vector labels len(list(labels)) data['clusters'] = labels data data['clusters'].value_counts().sort_index().sort_values() # look at the mean feature values for clusters data.drop('Churn', axis = 1).groupby('clusters').mean().sort_values(by='clusters', ascending=True) # plot distributions of features for the clusters plt.figure() sns.distplot(data[data['clusters'] == 0]['gender']) sns.distplot(data[data['clusters'] == 1]['gender']) sns.distplot(data[data['clusters'] == 2]['gender']) sns.distplot(data[data['clusters'] == 3]['gender']) sns.distplot(data[data['clusters'] == 4]['gender']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Near_Location']) sns.distplot(data[data['clusters'] == 1]['Near_Location']) sns.distplot(data[data['clusters'] == 2]['Near_Location']) sns.distplot(data[data['clusters'] == 3]['Near_Location']) sns.distplot(data[data['clusters'] == 4]['Near_Location']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Partner']) sns.distplot(data[data['clusters'] == 1]['Partner']) sns.distplot(data[data['clusters'] == 2]['Partner']) sns.distplot(data[data['clusters'] == 3]['Partner']) sns.distplot(data[data['clusters'] == 4]['Partner']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Promo_friends']) sns.distplot(data[data['clusters'] == 1]['Promo_friends']) sns.distplot(data[data['clusters'] == 2]['Promo_friends']) sns.distplot(data[data['clusters'] == 3]['Promo_friends']) sns.distplot(data[data['clusters'] == 4]['Promo_friends']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Phone']) sns.distplot(data[data['clusters'] == 1]['Phone']) sns.distplot(data[data['clusters'] == 2]['Phone']) sns.distplot(data[data['clusters'] == 3]['Phone']) sns.distplot(data[data['clusters'] == 4]['Phone']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Contract_period']) sns.distplot(data[data['clusters'] == 1]['Contract_period']) sns.distplot(data[data['clusters'] == 2]['Contract_period']) sns.distplot(data[data['clusters'] == 3]['Contract_period']) sns.distplot(data[data['clusters'] == 4]['Contract_period']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Group_visits']) sns.distplot(data[data['clusters'] == 1]['Group_visits']) sns.distplot(data[data['clusters'] == 2]['Group_visits']) sns.distplot(data[data['clusters'] == 3]['Group_visits']) sns.distplot(data[data['clusters'] == 4]['Group_visits']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Age']) sns.distplot(data[data['clusters'] == 1]['Age']) sns.distplot(data[data['clusters'] == 2]['Age']) sns.distplot(data[data['clusters'] == 3]['Age']) sns.distplot(data[data['clusters'] == 4]['Age']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Avg_additional_charges_total']) sns.distplot(data[data['clusters'] == 1]['Avg_additional_charges_total']) sns.distplot(data[data['clusters'] == 2]['Avg_additional_charges_total']) sns.distplot(data[data['clusters'] == 3]['Avg_additional_charges_total']) sns.distplot(data[data['clusters'] == 4]['Avg_additional_charges_total']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Month_to_end_contract']) sns.distplot(data[data['clusters'] == 1]['Month_to_end_contract']) sns.distplot(data[data['clusters'] == 2]['Month_to_end_contract']) sns.distplot(data[data['clusters'] == 3]['Month_to_end_contract']) sns.distplot(data[data['clusters'] == 4]['Month_to_end_contract']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Lifetime']) sns.distplot(data[data['clusters'] == 1]['Lifetime']) sns.distplot(data[data['clusters'] == 2]['Lifetime']) sns.distplot(data[data['clusters'] == 3]['Lifetime']) sns.distplot(data[data['clusters'] == 4]['Lifetime']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Avg_class_frequency_total']) sns.distplot(data[data['clusters'] == 1]['Avg_class_frequency_total']) sns.distplot(data[data['clusters'] == 2]['Avg_class_frequency_total']) sns.distplot(data[data['clusters'] == 3]['Avg_class_frequency_total']) sns.distplot(data[data['clusters'] == 4]['Avg_class_frequency_total']) plt.show() plt.figure() sns.distplot(data[data['clusters'] == 0]['Avg_class_frequency_current_month']) sns.distplot(data[data['clusters'] == 1]['Avg_class_frequency_current_month']) sns.distplot(data[data['clusters'] == 2]['Avg_class_frequency_current_month']) sns.distplot(data[data['clusters'] == 3]['Avg_class_frequency_current_month']) sns.distplot(data[data['clusters'] == 4]['Avg_class_frequency_current_month']) plt.show()We plot distributions of features for the clusters. Distributions of features for different clusters mostly repeat their shapes but differ in quantity.# calculate the churn rate for each cluster data.groupby('clusters').mean().sort_values(by='Churn', ascending=False)usa.gov 数据集 数据读入——处理json格式数据集"""获取filename""" path = 'D:/PythonWorkSpace/Python for Data Analysis/Python for Data Analysis Source Code/ch02/usagov_bitly_data2012-03-16-1331923249.txt' # 以txt格式读取文件 # 单行读取 ##with open(path,'r') as fileobject: ## print(fileobject.readline()) ### 逐行读取 ### for line in fileobject: ### print(line.rstrip()) open(path).readline() """以json格式读取文件""" import json # 列表推导式 -- 将单行数据储存分别作为元素存储到列表中,records现在保存一个字典列表(数据集格式本身即为每行属于一个对象字典),其操作同下 records = [json.loads(line) for line in open(path)] # 列表头元素 records[0] # 列表头元素的字典的tz-时区key records[0]['tz'] # 列表中所有含key-tz的字典的对应value组成列表time_zones time_zones = [record['tz'] for record in records if 'tz' in record] # 前十tz:value time_zones[:10]采用 手写纯函数/调用python标准库函数/调用Pandas 分别来处理刚才获取的数据实现对 '时区key-value的遍历计数'"""手写纯函数体处理""" def get_counts(sequence): # 创建一个计数存储字典 counts = {} # 遍历 for x in sequence: if x in counts: #找到时,计数值+1 counts[x] += 1 else: # 找不到时,新建键值对 counts[x] = 1 return counts # 调用计数函数 counts = get_counts(time_zones) # 获取纽约地区的统计次数 counts['America/New_York'] """调用python标准库函数处理""" from collections import defaultdict def get_counts2(sequence): # 所有值均会被初始化为0,省去了上种方式中每个第一次匹配的键名都需新创建一个键值对的冗余步骤。 counts = defaultdict(int) for x in sequence: counts[x] += 1 return counts # 同上例 counts2 = get_counts2(time_zones) counts['America/New_York'] """只获取前十位的时区及其计数值""" def top10_counts(count_dict,n=10): # 列表推导式 -- 于count_dict中获取一个 key:value - tz:count,以(count,tz)存储在列表中 value_key_pairs = [(count,tz) for tz,count in count_dict.items()] # 对列表中二元组进行排序 value_key_pairs.sort() # 返回排序过后的前十 [-n:] 时区二元组 return value_key_pairs[-n:] top10_counts(counts) """调用python标准库函数来处理上述的 获取前十位的问题""" from collections import Counter # 将time_zones列表传入Counter中,新建其对象counts然后调用函数most_common()获取其中前十位置 counts = Counter(time_zones) counts.most_common(10) """调用Pandas + Numpy处理""" from pandas import DataFrame,Series import pandas as pd import numpy as np # DataFrame是Pandas中最重要的数据结构,用于将数据表示为一个表格 # 此处即为将原始数据创建为一个表格 frame = DataFrame(records) frame # 将表格表示为一个摘要视图 summary view,获取前十位元素字典的时区键值 frame['tz'][:10] # 通过 frame['tz'] 返回的 series对象所含的 value_counts()方法轻松实现上面的获取前十时区及其计数操作 tz_counts = frame['tz'].value_counts() tz_counts[:10]运用绘图库Matplotlib为数据可视化# 注意到在之前的排序中有许多的缺失值,比如下方计数为521的时区项 from IPython.display import Image Image(filename="ch2_1.png") # 通过 通过 frame['tz'] 返回的 series对象所含的 pandas.DataFrame.fillna()方法,替换缺失值 NA # 对于缺失值 -- 空字符串,可以通过 布尔型数组索引--'Missing' 加以替换 clean_tz = frame['tz'].fillna('Missing') clean_tz[clean_tz == ''] = 'Unknown' tz_counts = clean_tz.value_counts() tz_counts[:10] # 通过tz_counts对象(此时为一个字典对象)的plot()方法,绘制一张 水平条形图 # 进入pylab模式,其效果为自动在当前生命周期中集成matplotlib其效果与下句直接引入 matplotlib.pyplot()一致,具体作用看evernote %pylab inline # import matplotlib.pyplot as plot tz_counts[:10].plot(kind='barh',rot=0) # 调用a字段数据可查看,执行url短缩操作的 # 浏览器、设备、应用程序相关信息 frame['a'][0] frame['a'][1] frame['a'][2]解析数据得出信息# 该a字段,即浏览器的USER_AGENT信息,我们接下来尝试解析这些信息 # Series 是一个一维数组对象,类似于 NumPy 的一维 array # 它除了包含一组数据还包含一组索引,所以可以把它理解为一组带索引的数组 # 利用列表推导式将a字段中数据进行 split()切分,只获取x.split()返回的列表中的第一段[0]数据,即浏览器信息 # 随后调用pandas.DataFrame.dropna()除去 a字段列中含缺失值的行 results = Series([x.split()[0] for x in frame.a.dropna()]) results[:5] # 以frame.a结构对比上方列表推导式所得结果 frame.a # 获取Series对象results中各个字符串的出现次数,并只获取前8切片 # 从而获得按照浏览器分类的时区统计信息 results.value_counts()[:8] # 接下来按照a字段中是否出现"windows"来判断用户的操作系统 # 从而将用户按照windows和非windows用户进行分类,来统计时区信息 # 将原始数据中所有a字段为空值的数据行舍弃,然后重新构造为一个frame-cframe,aka-cleanframe cframe = frame[frame.a.notnull()] # 处理后效果如下 cframe # 通过np.where()对cframe中a字段进行分类,condition=cframe.a.str.contains('Windows') xfield='Windows' yfield='Not Windows' # 并将结果保存为Series结构 operating_system = pd.Series(np.where(cframe.a.str.contains('Windows'),'Windows','Not Windows'),name='os') operating_system[:5] # 通过pandas.groupby()对数据按照时区列表和新得的操作系统列表进行 分组 by_tz_os = cframe.groupby(['tz',operating_system]) # 通过size()对分组结果进行计数(类同于value_counts()函数效果,但因为value_counts()无法作用DataFrame对象,所以使用size()) # 通过unstack()对计数结果进行重构(原结构模式如下cell) # 最后通过fillna()将所有的空值替换为0 agg_counts = by_tz_os.size().unstack().fillna(0) agg_counts[:10] # 未使用unstack()重构结构的情况 agg_counts1 = by_tz_os.size() agg_counts1[:10] # 通过agg_counts中的行数构造一个间接索引数组来选取最常出现时区 # 通过sum()函数按照时区进行累加且规范化为'总计为1',并以argsort()升序排列 indexer = agg_counts.sum(1).argsort() indexer[:10] # 通过take以该顺序截取最后10行 count_subset = agg_counts.take(indexer)[-10:] count_subset # 生成条形图 count_subset.plot(kind = 'barh') # 生成堆积条形图 count_subset.plot(kind = 'barh',stacked = True) # 因为上两图中较小分组的相对比例看不太清楚,可以将各行 规范化为‘总计为1’,再重新绘图 normed_subset = count_subset.div(count_subset.sum(1),axis=0) normed_subset.plot(kind = 'barh',stacked=True)IntroductionThis notebook includes experiments on auxiliary learning. Please see the corresponding [repository](https://github.com/vivien000/auxiliary-learning) and the associated blog post.# Set to True to save the experiments' results on Google Drive google_drive = True #@title Tensorboard launch and utilities import tarfile import urllib.request import json import os import time from IPython.core.display import display, HTML get_ipython().system_raw('tensorboard --logdir ./log --host 0.0.0.0 --port 6006 &') if os.path.exists('/content'): get_ipython().system_raw('wget -q https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip') get_ipython().system_raw('unzip -qq ngrok-stable-linux-amd64.zip') get_ipython().system_raw('pip install -U -q PyDrive') get_ipython().system_raw('./ngrok http 6006 &') loop = True while loop: try: ngrok_details = urllib.request.urlopen('http://localhost:4040/api/tunnels') url = json.load(ngrok_details)['tunnels'][0]['public_url'] loop = False except (urllib.error.URLError, IndexError): time.sleep(1) if google_drive: from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) else: url = 'http://0.0.0.0:6006' display(HTML("Link to Tensorboard")) def read_csv(file, *args, **kwargs): if google_drive: auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) listed = drive.ListFile({'q': "title contains '"+file+"'"}).GetList() drive.CreateFile({'id': listed[0]['id']}).GetContentFile(file) return pd.read_csv(file, *args, **kwargs) def append_to_log(file, line, header): if google_drive: auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) listed = drive.ListFile({'q': "title contains '"+file+"'"}).GetList() if len(listed) > 0: file = drive.CreateFile({'id': listed[0]['id']}) content = file.GetContentString() + line + '\n' else: file = drive.CreateFile({'title': file}) content = header + '\n' + line + '\n' file.SetContentString(content) file.Upload() else: if os.path.isfile(file): with open(file, 'a') as f: f.write(line) else: with open(file, 'w') as f: f.write(header + '\n' + line + '\n') def number_log_lines(file): if google_drive: auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) listed = drive.ListFile({'q': "title contains '"+file+"'"}).GetList() if len(listed) > 0: file = drive.CreateFile({'id': listed[0]['id']}) content = file.GetContentString() else: return 0 else: if os.path.isfile(file): with open(file, 'a') as f: content = f.read() else: return 0 try: return len(content.split('\n'))-2 except ValueError: return 0 def save_folder(folders, file): if google_drive: auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) with tarfile.open(file, 'w|gz') as tar: for folder in folders: tar.add(folder) listed = drive.ListFile({'q': "title contains '"+file+"'"}).GetList() if len(listed) > 0: uploaded = drive.CreateFile({'id': listed[0]['id']}) else: uploaded = drive.CreateFile({'title': file}) uploaded.SetContentFile(file) uploaded.Upload() os.remove(file) import numpy as np np.random.seed(seed=0) import tensorflow as tf import tensorflow.contrib.eager as tfe tf.enable_eager_execution() tf.set_random_seed(0) import pandas as pd import sklearn import matplotlib.cm as cm import matplotlib.pyplot as plt import seaborn as sns import time import shutil from IPython.display import clear_output try: step_counter = tf.train.create_global_step() except ValueError: step_counter.assign(0) x_train1 = tf.constant(np.random.rand(1000, 250), dtype=tf.float32) x_train2 = tf.constant(np.random.rand(1000, 250), dtype=tf.float32) x_val = tf.constant(np.random.rand(1000, 250), dtype=tf.float32) x_test = tf.constant(np.random.rand(10000, 250), dtype=tf.float32) b = tf.constant(10*np.random.randn(250, 100), dtype=tf.float32) other_b = tf.constant(10*np.random.randn(250, 100), dtype=tf.float32) noisy_b = tf.constant(b + 3.5*np.random.randn(250, 100), dtype=tf.float32) def synthetic_function(x, matrix): return tf.tanh(tf.tensordot(x, matrix, axes=1)) y_train1 = synthetic_function(x_train1, b) y_val = synthetic_function(x_val, b) y_test = synthetic_function(x_test, b) y_train2_same = synthetic_function(x_train2, b) y_train2_other = synthetic_function(x_train2, other_b) y_train2_noisy = synthetic_function(x_train2, noisy_b) ds_train1 = tf.data.Dataset.from_tensor_slices((x_train1, y_train1)) ds_train2_same = tf.data.Dataset.from_tensor_slices((x_train2, y_train2_same)) ds_train2_other = tf.data.Dataset.from_tensor_slices((x_train2, y_train2_other)) ds_train2_noisy = tf.data.Dataset.from_tensor_slices((x_train2, y_train2_noisy)) ds_train1 = ds_train1.shuffle(1000).batch(100) ds_train2_same = ds_train2_same.shuffle(1000).batch(100) ds_train2_other = ds_train2_other.shuffle(1000).batch(100) ds_train2_noisy = ds_train2_noisy.shuffle(1000).batch(100) class SyntheticModel(tf.keras.Model): def __init__(self): super(SyntheticModel, self).__init__() kwargs = {'activation': 'relu'} self.dense1 = tf.keras.layers.Dense(100, **kwargs) self.dense2 = tf.keras.layers.Dense(100, **kwargs) self.dense3 = tf.keras.layers.Dense(100, **kwargs) self.dense4 = tf.keras.layers.Dense(100, **kwargs) self.dense5_primary = tf.keras.layers.Dense(100) self.dense5_auxiliary = tf.keras.layers.Dense(100) def call(self, x): y = self.dense1(x) y = self.dense2(y) y = self.dense3(y) y = self.dense4(y) return self.dense5_primary(y), self.dense5_auxiliary(y) # Initializes a series of networks to be used in the experiments, so that each # combination of hyperparameters is tested with the same initial weights if not os.path.exists('synthetic'): os.mkdir('synthetic') for i in range(20): model = SyntheticModel() _ = model(x_val[:1, :]) model.save_weights('synthetic/model_%i.h5' % i) del model def censored_vector(u, v, mode): """Adjusts the auxiliary loss gradient Adjusts the auxiliary loss gradient before adding it to the primary loss gradient and using a gradient descent-based method Args: u: A tensorflow variable representing the auxiliary loss gradient v: A tensorflow variable representing the primary loss gradient mode: The method used for the adjustment: - Single task: the auxiliary loss gradient is ignored - Multitask: the auxiliary loss gradient is kept as it is - Unweighted cosine: cf. https://arxiv.org/abs/1812.02224 - Weighted cosine: cf. https://arxiv.org/abs/1812.02224 - Projection: cf. https://github.com/vivien000/auxiliary-learning - Parameter-wise: same as projection but at the level of each parameter Returns: A tensorflow variable representing the adjusted auxiliary loss gradient """ if mode == 'Single task' or u is None: return 0 if mode == 'Multitask' or v is None: return u if len(u.shape.as_list()) == 1: u_dot_v, l_u, l_v = tf.reduce_sum(u*v), tf.norm(u), tf.norm(v) else: a, b = tf.reshape(u, [-1]), tf.reshape(v, [-1]) u_dot_v, l_u, l_v = tf.reduce_sum(a*b), tf.norm(a), tf.norm(b) if l_u.numpy() == 0 or l_v.numpy() == 0: return u if mode == 'Unweighted cosine': return u if u_dot_v > 0 else tf.zeros_like(u) if mode == 'Weighted cosine': return tf.maximum(u_dot_v, 0)*u/l_u/l_v if mode == 'Projection': return u - tf.minimum(u_dot_v, 0)*v/l_v/l_v if mode == 'Parameter-wise': return u*((tf.math.sign(u*v)+1)/2) def combined_grads(primary_grad, average_primary_grad, auxiliary_grad, mode, overall=False, lam=1): """Combines auxiliary loss gradients and primary loss gradients Combines a sequence of auxiliary loss gradients and a sequence of primary loss gradients before performing a gradient descent step Args: primary_grad: A list of tensorflow variables corresponding to the primary loss gradient for the network's Keras variables average_primary_grad: A list of tensorflow variables corresponding to exponential moving averages of the elements above auxiliary_grad: A list of tensorflow variables corresponding to the auxiliary loss gradient for the network's Keras variables mode: The method used for the adjustment: - Single task: the auxiliary loss gradient is ignored - Multitask: the auxiliary loss gradient is kept as it is - Unweighted cosine: cf. https://arxiv.org/abs/1812.02224 - Weighted cosine: cf. https://arxiv.org/abs/1812.02224 - Projection: cf. https://github.com/vivien000/auxiliary-learning - Parameter-wise: same as projection but at the level of each parameter overall: True if the transformation takes place at the level of the whole parameter vector, i.e. the concatenation of all the Keras variables of the network lambda: Float balancing the primary loss and the auxiliary loss Returns: A list of tensorflow variables combining the primary loss gradients and the auxiliary loss gradients and that can directly be used for the next gradient descent step """ result = [0]*len(primary_grad) a = tf.constant([], dtype=tf.float32) aa = tf.constant([], dtype=tf.float32) b = tf.constant([], dtype=tf.float32) shapes = [] for i in range(len(primary_grad)): if auxiliary_grad[i] is None or mode == 'Single task': result[i] = primary_grad[i] elif primary_grad[i] is None: result[i] = lam*auxiliary_grad[i] elif mode == 'Multitask': result[i] = primary_grad[i] + lam*auxiliary_grad[i] elif not overall: if average_primary_grad is None: result[i] = (primary_grad[i] + lam*censored_vector(auxiliary_grad[i], primary_grad[i], mode)) else: result[i] = (primary_grad[i] + lam*censored_vector(auxiliary_grad[i], average_primary_grad[i], mode)) else: a = tf.concat([a, tf.reshape(primary_grad[i], [-1])], axis=0) if average_primary_grad is not None: aa = tf.concat([aa, tf.reshape(average_primary_grad[i], [-1])], axis=0) b = tf.concat([b, tf.reshape(auxiliary_grad[i], [-1])], axis=0) shapes.append((primary_grad[i].shape, np.product(primary_grad[i].shape.as_list()), i)) if len(shapes) > 0: if average_primary_grad is None: c = a + lam*censored_vector(b, a, mode) else: c = a + lam*censored_vector(b, aa, mode) start = 0 for i in range(len(shapes)): shape, length, index = shapes[i] result[index] = tf.reshape(c[start:start+length], shape) start += length return result def train_iteration(model, average_primary_grad, alpha, optimizer, ds_train2, writer, step_counter, mode, overall=False, lam=1): if mode != 'Single task': train_iterator2 = ds_train2.make_one_shot_iterator() with writer.as_default(), tf.contrib.summary.always_record_summaries(): for x1, y1 in ds_train1.make_one_shot_iterator(): if mode != 'Single task': x2, y2 = train_iterator2.get_next() with tf.GradientTape(persistent=True) as tape: y1_hat = model(x1)[0] primary_loss = tf.reduce_mean((y1_hat-y1)**2) if mode != 'Single task': y2_hat = model(x2)[1] auxiliary_loss = tf.reduce_mean((y2_hat-y2)**2) tf.contrib.summary.scalar('primary_loss', primary_loss) primary_grad = tape.gradient(primary_loss, model.variables) if mode == 'Single task': optimizer.apply_gradients(zip(primary_grad, model.variables), global_step=step_counter) else: tf.contrib.summary.scalar('auxiliary_loss', auxiliary_loss) auxiliary_grad = tape.gradient(auxiliary_loss, model.variables) if alpha != 1: if average_primary_grad is None: average_primary_grad = primary_grad else: for i in range(len(average_primary_grad)): if primary_grad[i] is not None: average_primary_grad[i] = ((1 - alpha)*average_primary_grad[i] + alpha*primary_grad[i]) grad = combined_grads(primary_grad, average_primary_grad, auxiliary_grad, mode, overall=overall, lam=lam) optimizer.apply_gradients(zip(grad, model.variables), global_step=step_counter) return average_primary_grad def get_metrics(dataset, model, writer, step_counter): x, y = (x_val, y_val) if dataset == 'val' else (x_test, y_test) with writer.as_default(), tf.contrib.summary.always_record_summaries(): y_hat = model(x)[0] primary_loss = tf.reduce_mean((y_hat-y)**2) tf.contrib.summary.scalar('primary_loss', primary_loss) return primary_loss.numpy() def run_experiment(name, model, alpha, ds_train2, mode, overall, lam, output): train_writer = tf.contrib.summary.create_file_writer('./log/train/' + name, flush_millis=10000) val_writer = tf.contrib.summary.create_file_writer('./log/val/' + name, flush_millis=10000) test_writer = tf.contrib.summary.create_file_writer('./log/test/' + name, flush_millis=10000) step_counter.assign(0) optimizer = tf.train.AdamOptimizer() checkpoint_dir = 'model_synthetic' shutil.rmtree(checkpoint_dir, ignore_errors=True) checkpoint_prefix = os.path.join(checkpoint_dir, 'model.ckpt') root = tf.contrib.eager.Checkpoint(optimizer=optimizer, model=model, optimizer_step=step_counter) average_primary_grad = None iteration, not_better, best_loss = 1, 0, np.Inf while not_better < 10: average_primary_grad = train_iteration(model, average_primary_grad, alpha, optimizer, ds_train2, train_writer, step_counter, mode, overall=overall, lam=lam) val_loss = get_metrics('val', model, val_writer, step_counter) clear_output() print(output) print(iteration, val_loss) if val_loss < best_loss: not_better, best_loss = 0, val_loss root.save(file_prefix=checkpoint_prefix) else: not_better += 1 iteration += 1 root.restore(tf.train.latest_checkpoint(checkpoint_dir)) return model, get_metrics('test', model, test_writer, step_counter) def run_experiments(configs, filename): already_done = number_log_lines(filename) runs_list = [] header = 'situation,mode,overall,alpha,lam,test_loss' output = '' current_iteration = -1 for iteration, mode, overall, lam, case, alpha in configs[already_done:]: start = time.time() model = SyntheticModel() _ = model(x_val[:1, :]) model.load_weights('synthetic/model_%i.h5' % iteration) if case == 'Same task': ds_train2 = ds_train2_same elif case == 'Similar task': ds_train2 = ds_train2_noisy elif case == 'Unrelated task': ds_train2 = ds_train2_other output += 'Iteration #%d: %s, %s (overall: %s, %f)\n' % (iteration, mode, case, overall, lam) name = '%s-%s-%s-%s-%f-%f' % (iteration, mode, case, overall, lam, alpha) test_loss = run_experiment(name, model, alpha, ds_train2, mode, overall, lam, output)[1] template = 'Loss: %.3f (%d seconds)\n\n' output += template % (test_loss, time.time()-start) dict1 = {'case': case, 'mode': mode, 'lam': lam, 'overall': overall, 'alpha': alpha, 'test_loss': test_loss} runs_list.append(dict1) line = '%s,%s,%s,%f,%f,%f' % (case, mode, overall, alpha, lam, test_loss) append_to_log(filename, line, header) return pd.DataFrame(runs_list) iterations = 20 filename = 'synthetic_dataset_experiments.csv' configs = [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [0] for overall in [True] for mode in ['Single task'] for case in ['Same task'] for alpha in [1]] configs += [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [.1, .3, 1, 3, 10] for overall in [True] for mode in ['Multitask'] for case in ['Same task', 'Similar task', 'Unrelated task'] for alpha in [1]] configs += [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [.1, .3, 1, 3, 10] for overall in [True] for mode in ['Projection'] for case in ['Same task', 'Similar task', 'Unrelated task'] for alpha in [0.01, 0.1, 1]] configs += [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [.1, .3, 1, 3, 10] for overall in [True] for mode in ['Weighted cosine', 'Unweighted cosine'] for case in ['Same task', 'Similar task', 'Unrelated task'] for alpha in [0.01]] configs += [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [.1, .3, 1, 3, 10] for overall in [False] for mode in ['Projection', 'Parameter-wise'] for case in ['Same task', 'Similar task', 'Unrelated task'] for alpha in [0.01]] configs += [(iteration, mode, overall, lam, case, alpha) for iteration in range(iterations) for lam in [.1, .3, 1, 3, 10] for overall in [True] for mode in ['Weighted cosine', 'Unweighted cosine'] for case in ['Same task', 'Similar task', 'Unrelated task'] for alpha in [0.1, 1]] run_experiments(configs, filename)Data Cleaningnumeric_cols = preprocess.dtype_select(dtypes=np.number) cat_cols = preprocess.dtype_select(dtypes='object') #Impute Numeric Values X[numeric_cols] = preprocess.impute(numeric_cols=numeric_cols) #Scale Numeric Values X[numeric_cols] = preprocess.minMax(numeric_cols=numeric_cols) X.describe() #Replacing NaN With Unknown So As To Avoid The Encoding To Throw An Error X[cat_cols] = X[cat_cols].fillna('Unknown') X[cat_cols].isna().sum() #Encode Categorical Column Values X[cat_cols] = preprocess.one_hot(cat_cols=cat_cols) for col in cat_cols: X.pop(col) X #Check Y For NaN Values print(Y.isna().sum()) # Change NaN To Most Common Category Y = Y.replace(np.nan, 'No') #ReCheck Y For NaN Values Y.isna().sum()RainTomorrow 0 dtype: int64Data Exploration/Data AnalysisX.describe() exploration_copy = X.copy() exploration_copy['RainTomorrow'] = df['RainTomorrow'] #Mapping Y Values raintom_vals = {'No': 0, 'Yes': 1} exploration_copy['RainTomorrow'] = df.RainTomorrow.map(raintom_vals) Y['RainTomorrow'] = df.RainTomorrow.map(raintom_vals) exploration_copy.plot(x='RainToday_Yes',y='RainTomorrow',kind='hist') XTrain/Test Splityear = pd.to_datetime(df.Date).dt.year train_X = exploration_copy[year<2015] val_X = exploration_copy[year==2015] test_X = exploration_copy[year>2015] train_Y = train_X.pop('RainTomorrow') val_Y = val_X.pop('RainTomorrow') test_Y = test_X.pop('RainTomorrow')Modelfrom sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(max_depth=3, random_state=42) %%time model.fit(train_X, train_Y) from sklearn.metrics import accuracy_score, confusion_matrix val_preds = model.predict(val_X) accuracy_score(val_Y, val_preds)Finding Best `max_depth` Fitdef maxdepth_error(md): model = DecisionTreeClassifier(max_depth=md, random_state=42) model.fit(train_X, train_Y) train_preds = model.predict(train_X) val_preds = model.predict(val_X) test_preds = model.predict(test_X) train_acc = accuracy_score(train_Y, train_preds) val_acc = accuracy_score(val_Y, val_preds) test_acc = accuracy_score(test_Y, test_preds) return {"Max Depth ": md, "Train Acc":train_acc, "Val Acc":val_acc, "Test Acc":test_acc, "Avg Acc":(val_acc+test_acc)/2} %%time errors_df = pd.DataFrame([maxdepth_error(md) for md in range(1, 21)]) errors_df importance_df = pd.DataFrame({ 'feature': X.columns, 'importance': model.feature_importances_ }).sort_values('importance', ascending=False) importance_df.head(10)Visualising Modelsfrom sklearn.tree import plot_tree, export_text import matplotlib.pyplot as plt plt.figure(figsize=(80,20)) plot_tree(model, feature_names=X.columns, filled=True); tree_text = export_text(model, max_depth=10, feature_names=list(X.columns)) print(tree_text[:5000]) aus_rain = { 'model': model, 'train_X': train_X, 'train_Y': train_Y, 'num_cols': numeric_cols, 'cat_cols': cat_cols, } import joblib joblib.dump(aus_rain, '../models/trees_ausRain.joblib')Applying topological data analysis to corpus comparison This is a demostration of some experiments given in my paper.So, let's load some packages:import re from gensim.models import word2vec from sklearn.manifold import TSNE import matplotlib.pyplot as plt import dionysus as dn import numpy as np import pymorphy2 import nltk from string import punctuationPrepare texts Then we create some usefull functions:def tsne_plot(model): """Creates and TSNE model and plots it""" labels = [] tokens = [] for word in model.wv.vocab: tokens.append(model[word]) labels.append(word) perplexitys = [5, 30, 50, 100] # perplexitys = [0, 3] position = 221 plt.figure(1, figsize=(10, 10)) for i in range(len(perplexitys)): tsne_model = TSNE(perplexity=perplexitys[i], n_components=2, init='pca', n_iter=5000, random_state=23) new_values = tsne_model.fit_transform(tokens) x = [] y = [] for value in new_values: x.append(value[0]) y.append(value[1]) print(position) plt.subplot(position) plt.title('Perplexity = {}'.format(perplexitys[i])) for i in range(len(x)): plt.scatter(x[i],y[i]) # plt.annotate(labels[i], # xy=(x[i], y[i]), # xytext=(5, 2), # textcoords='offset points', # ha='right', # va='bottom') position += 1 plt.show() PATH = '/Users/alexey/Documents/GitHub/dialog_persistent/stop words/sw.txt' with open(PATH) as f: stop_words = f.read().splitlines() def check_sentence(sentence): '''Check for stop words''' for element in sentence: if element in stop_words: sentence.remove(element) return sentence def tokenize_and_lemmatize(path, text=None): ''' This function is used to tokenize and lemmatize texts. ''' if text: file = text else: file = open(path).read() morph = pymorphy2.MorphAnalyzer() tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') sentences = tokenizer.tokenize(file) sentenses_splitted = [] for s in sentences: s = s.translate(str.maketrans('','', punctuation)) sentenses_splitted.append(s.lower().split()) for sentence in sentenses_splitted: for i in range(len(sentence)): p = morph.parse(sentence[i])[0] sentence[i] = p.normal_form for i in range(len(sentenses_splitted)): sentenses_splitted[i] = check_sentence(sentenses_splitted[i]) return sentenses_splittedGet vectors from word2vec models:def get_vectors(model, dimensions=100): ''' This function is used to extract vectors ''' tokens = [] for word in model.wv.vocab: tokens.append(model[word]) result_array = np.empty((0, dimensions)) for token in tokens: result_array = np.append(result_array, [token], axis=0) return result_arrayTransform coordinates (escape all negatives due to algoritm problems):def transorm(array): ''' Transalate data cloud to positive coordinates ''' if array.min() < 0: new_array = array - array.min() return new_array else: raise ValueError('This array has no negative values')Then some experiments:def analyze_text(path, min_count=10, transf=False, text_f=None): if text_f: text = tokenize_and_lemmatize(path=None, text=text_f) else: text = tokenize_and_lemmatize(path=path) model = word2vec.Word2Vec(text, size=100, window=20, min_count=min_count, workers=4) vectors = get_vectors(model) if transf is True: vectors = transorm(vectors) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) return dgms, model, vectorsHere we apply our function to out text (Nabokov's Mashenka):text_1 = tokenize_and_lemmatize('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt') text_1[1:3]Then we create a model:model_1 = word2vec.Word2Vec(text_1, size=100, window=20, min_count=10, workers=4) # tsne_plot(model_1)Here I obtain vectors from the model 1 -- with Nabokov's Mashenka:vectors_1 = get_vectors(model_1) vectors_1 = transorm(vectors_1)Then create a lower star filtration:f_lower_star = dn.fill_freudenthal(vectors_1) p = dn.homology_persistence(f_lower_star) dgms1 = dn.init_diagrams(p, f_lower_star) # dn.plot.plot_bars(dgms1[0], show = True, order='death')Then let's compare it with another Nabokov's book -- Drugie Berega (which is bigger):text_2 = tokenize_and_lemmatize('/Users/alexey/Documents/GitHub/dialog_persistent/texts/drugieberega.txt') model_2 = word2vec.Word2Vec(text_2, size=100, window=20, min_count=10, workers=4) vectors_2 = transorm(get_vectors(model_2)) len(vectors_2) f_lower_star = dn.fill_freudenthal(vectors_2) p = dn.homology_persistence(f_lower_star) dgms2 = dn.init_diagrams(p, f_lower_star) # dn.plot.plot_bars(dgms2[0], show = True, order='death')Let's compare them:# print(dn.wasserstein_distance(dgms1[0], dgms2[0])) # dn.bottleneck_distance(dgms1[0], dgms2[0]) dn.wasserstein_distance(dgms1[0], dgms2[0]) text_2 = tokenize_and_lemmatize(sentence='Я пришел домой', path=11) dgms_db = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/drugieberega.txt', transf=True) dgms_m = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt', transf=True)With transofrmationdn.wasserstein_distance(dgms_db[0], dgms_m[0])Without:dgms_db_nt = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/drugieberega.txt') dgms_m_nt = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt') dn.wasserstein_distance(dgms_db_nt[0], dgms_m_nt[0]) dn.plot.plot_bars(dgms_db_nt[0], show = True, order='death') dn.plot.plot_bars(dgms_db[0], show = True, order='death') dn.plot.plot_diagram_density(dgms_db_nt[0], show = True) dn.plot.plot_diagram_density(dgms_db[0], show = True)Experiment with mixing In this experiment I will take 60 percent from one text and 40% from another (0.6 of shinel 0,4 of vii):dgms_mashenka = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt') dgms_sh = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/shinel') dn.wasserstein_distance(dgms_mashenka[0], dgms_sh[0]) for i in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]: text_shinel = open('/Users/alexey/Documents/GitHub/dialog_persistent/texts/shinel').read() text_vii = open('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt').read() text_sh_06 = text_shinel[1:int(len(text_shinel)*i)] text_vii_04 = text_vii[1:int(len(text_shinel) - len(text_shinel)*i)] text_100 = text_sh_06 + text_vii_04 dgms_100 = analyze_text(text_f=text_100, path=None) print(i, ' для шинели') print('Для Машеньки: ', dn.wasserstein_distance(dgms_mashenka[0], dgms_100[0]), '; Для шинели: ', dn.wasserstein_distance(dgms_sh[0], dgms_100[0])) dn.plot.plot_bars(dgms_100[0], show = True, order='death') dgms_1, model_1, vectors_1 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/Anna Karenina.txt', min_count=100) dgms_2, model_2, vectors_2 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/Woskr.txt', min_count=100)/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/ipykernel_launcher.py:8: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).Distance between and Woskreseniedn.wasserstein_distance(dgms_1[1], dgms_2[1])Plots for :vectors = transorm(vectors_1) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[1], show = True, order='death') dn.plot.plot_diagram_density(dgms[1], show = True) tsne_plot(model_1)Plots for Woskresenievectors = transorm(vectors_2) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[1], show = True, order='death') dn.plot.plot_diagram_density(dgms[1], show = True) tsne_plot(model_2) dn.plot.plot_bars(dgms[1], show = True, order='death')Gogol partdgms_3, model_3, vectors_3 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/shinel') dgms_4, model_4, vectors_4 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/vii') vectors = transorm(vectors_3) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[0], show = True, order='death') vectors = transorm(vectors_4) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[0], show = True, order='death') dn.wasserstein_distance(dgms_3[0], dgms_4[0]) dn.wasserstein_distance(dgms_1[0], dgms_4[0]) dn.wasserstein_distance(dgms_2[0], dgms_4[0]) dn.wasserstein_distance(dgms_2[0], dgms_3[0]) dn.wasserstein_distance(dgms_1[0], dgms_3[0])Nabokov partdgms_5, model_5, vectors_5 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/mashenka.txt') dgms_6, model_6, vectors_6 = analyze_text('/Users/alexey/Documents/GitHub/dialog_persistent/texts/dar.txt', min_count=20) print(dn.wasserstein_distance(dgms_5[1], dgms_6[1])) print(len(vectors_6), len(vectors_5))3.6027541160583496 522 508Воскресение и Машенькаdn.wasserstein_distance(dgms_2[1], dgms_5[1]) dn.wasserstein_distance(dgms_1[1], dgms_6[1]) dn.wasserstein_distance(dgms_2[1], dgms_6[1]) dn.wasserstein_distance(dgms_1[1], dgms_2[1])Анна Каренина и Дарdn.wasserstein_distance(dgms_1[1], dgms_6[1]) dn.wasserstein_distance(dgms_1[1], dgms_5[1]) vectors = transorm(vectors_5) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[0], show = True, order='death') vectors = transorm(vectors_6) f_lower_star = dn.fill_freudenthal(vectors) p = dn.homology_persistence(f_lower_star) dgms = dn.init_diagrams(p, f_lower_star) dn.plot.plot_bars(dgms[0], show = True, order='death') dn.wasserstein_distance(dgms_5[0], dgms_6[0]) print(len(vectors_5), len(vectors_6))Single continuous treatment.Valid estimation models include- DoubleML- DeepIV- CausalTree The dataset is generated by the following process [1]Below we use the einstein notation to alleviate the headaches of specifiying dimensions of tensors.\begin{align*} x & = \beta_i w^i \\ y & = x \theta(v^i) + \gamma_j w^j + \epsilon \\ w & \sim \text{Normal}(0, I_{n_w})\\ v & \sim \text{Uniform}(0, 1)^{n_v}\end{align*}train, val, treatment_effect = single_continuous_treatment() adjustment = train.columns[:-4] covariate = 'c_0' outcome = 'outcome' treatment = 'treatment' def exp_te(x): return np.exp(2*x) dat = np.array(list(product(np.arange(0, 1, 0.01), repeat=1))).ravel() data_test = pd.DataFrame({'c_0': dat}) true_te = np.array([exp_te(xi) for xi in data_test[covariate]]) adjustment = train.columns[:-4] covariate = 'c_0' outcome = 'outcome' treatment = 'treatment' dml = DML4CATE( x_model=RandomForestRegressor(), y_model=RandomForestRegressor(), cf_fold=1, covariate_transformer = PolynomialFeatures(degree=3,include_bias=False) ) dml.fit( train, outcome, treatment, adjustment, covariate, ) from ylearn.effect_interpreter.ce_interpreter import CEInterpreter cei = CEInterpreter(max_depth=2,) cei.fit(data=data_test, est_model=dml) cei.plot(feature_names=['a', 'b', 'c']) plt.show() interpret_result = cei.interpret(data=data_test[4:6]) print(interpret_result['sample_0'])decision node 0: (covariate [0, 1] = 0.0015999999595806003) <= 0.44225001335144043 decision node 1: (covariate [0, 2] = 6.399999983841553e-05) <= 0.0664605014026165Table of Contents* **Getting Started**: importing/setting up the data* **The Long Way** : manually doing all the typical steps of pre-processing/training/predicting, using **oo-learning** classes * Splitting * Transforming * Training * Predicting * Evaluating* **The Short Way**: presents the **`ModelTrainer`** class, which encapsulates all of the previously mentioned steps. Note: this notebook is meant to be a demo of some of the capabilities of **`oo-learning`** (https://github.com/shane-kercheval/oo-learning); it is not meant to show the best approach to exploring/cleaning/modeling this particular dataset. Also, with most graphs (e.g. correlations/box-plots/etc.) I will spend very little time commenting on the significance of any interesting or patterns. Again, the intent is to show a demo, not a guide to data analysis. Getting StartedIn the last Notebook, https://github.com/shane-kercheval/oo-learning/blob/master/examples/classification-titanic/1-Exploring%20the%20Titanic%20Dataset.ipynb, I showed how to get started and explore the dataset. In this Notebook, we'll look at basic operations like training models, transformations, etc. Set Up Environment# !pip install oolearning --upgrade from oolearning import * import pandas as pd import matplotlib.pyplot as plt %matplotlib inline pd.set_option('display.max_colwidth', None) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) width = 10 plt.rcParams['figure.figsize'] = [width, width/1.333]Import Data `ExploreRegressionDataset` is a convenience class described in the [first notebook of this series](https://github.com/shane-kercheval/oo-learning/blob/master/examples/regression-insurance/1-Exploring.ipynb).csv_file = '../data/insurance.csv' target_variable = 'expenses' explore = ExploreRegressionDataset.from_csv(csv_file_path=csv_file, target_variable=target_variable) explore.dataset.head()The Long Way The following sections provide a demo of the basic individual classes that are used when training a model. In the **`"The Short Way"`** section, next, I demo some of the classes that encapsulate the logic and take care of the mundane and repetitive tasks. Splitting the Data Let's start out by splitting our data into a training set and holdout set.To do this, we can use, for example, the **`RegressionStratifiedDataSplitter`**, which as the name suggests, splits the data into training/holdout sets while maintaining a similar distribution (think histogram) of the target variable. Technically, the Splitter will give us the indexes that should go into each set, and we will make the split.splitter = RegressionStratifiedDataSplitter(holdout_ratio=0.20) # set aside 20% of the data for the holdout set training_indexes, holdout_indexes = splitter.split(target_values=explore.dataset.expenses) training_y = explore.dataset.iloc[training_indexes][target_variable] training_x = explore.dataset.iloc[training_indexes].drop(columns=target_variable) holdout_y = explore.dataset.iloc[holdout_indexes][target_variable] holdout_x = explore.dataset.iloc[holdout_indexes].drop(columns=target_variable) training_x.shape holdout_x.shapeLet's check to make sure the Splitter did the other part of its job, which was to split the data in a stratified way, keeping the proportions of the target variable the same.training_y.hist() holdout_y.hist()Pretty close! Transforming the DataFrequently, we need to transform the data before we can train it with various models.Additionally, we'll want to *fit* transformations on the training set (i.e. the transformation object will extract the necessary attributes from the training set (e.g. median values for each column for a transformer that imputes missing values)) and then apply those same transformations to the holdout set (i.e. use the medians calculated/fitted on the training set, rather than the holdout set).Let's create a list of transformation objects that we want to apply to the training and holdout sets.Please see the [classification](http://localhost:8888/notebooks/examples/classification-titanic/2-Basic%20Modeling.ipynb) example for a more elaborate example and additional information.transformations = [DummyEncodeTransformer(CategoricalEncoding.DUMMY)] # create dummy variables for categoric featuresIn order to help manage all of the transformations we want to do (especially when we have more than one transformation object), we'll use the **`TransformerPipeline`** class and fit/transform the training data, then transform (without fitting) the holdout data.pipeline = TransformerPipeline(transformations=transformations) transformed_training_data = pipeline.fit_transform(training_x) # fit & transform transformed_holdout_data = pipeline.transform(holdout_x) # fit only training_x.head() transformed_training_data.head()Training a Model, & PredictingFinally, let's train a ridge regression model.The pattern for training a model includes two classes in most cases. One class specifies the model, in this case **`RidgeRegressor`**, and the other class species the hyper-parameters we want to use in the model. The corresponding class for the Ridge model is **`RidgeRegressorHP`**.The corresponding **`HP`** classes define the specific parameters that can be tuned, specified in the **`__init__(...)`** function. Instantiating the class without providing any values will result in default values being used.In most cases, the model classes, such as `RidgeRegressor`, are wrappers around `scikit-learn` classes, and the `HP` classes' default values for the hyper-parameters are the same values that the underlying `scikit-learn` classes use.model = RidgeRegressor() model.train(data_x=transformed_training_data, data_y=training_y, hyper_params=RidgeRegressorHP()) # default hyper-parameters predictions = model.predict(data_x=transformed_holdout_data) predictions[0:10]Evaluating PerformanceWe've trained our model on the training data, and predicted on the holdout. Now we need some way of evaluating performance.We could use, for example, `scikit-learn`'s MSE or MAE score via `mean_squared_error` or `mean_absolute_error`.from sklearn.metrics import mean_squared_error, mean_absolute_error import math math.sqrt(mean_squared_error(y_true=holdout_y, y_pred=predictions)) mean_absolute_error(y_true=holdout_y, y_pred=predictions)**`Evaluator`** object Great.But we can also use, for example, `oo-learning`'s **`RegressionEvaluator`** class. This gives us common quality metrics as well as convenient graphs.evaluator = RegressionEvaluator() evaluator.evaluate(actual_values=holdout_y, predicted_values=predictions) # note, we gave it the raw DataFrame, not the converted classes evaluator.all_quality_metrics evaluator.plot_residuals_vs_fits() evaluator.plot_predictions_vs_actuals() evaluator.plot_residuals_vs_actuals()The Short WayWhat we did is pretty standard, and we'll probably do the same set of generic steps time and time again.**`ModelTrainer`** is a class the encapsulate the boring details of what we did, so we can do it a lot quicker and with more concise and descriptive code.The following code has the identical result as all the work we did above.trainer = ModelTrainer(model=RidgeRegressor(), model_transformations=[DummyEncodeTransformer(CategoricalEncoding.DUMMY)], splitter= RegressionStratifiedDataSplitter(holdout_ratio=0.2), evaluator=RegressionEvaluator()) trainer.train_predict_eval(data=explore.dataset, target_variable='expenses', hyper_params=RidgeRegressorHP()) # access the holdout metrics (we also have `training_evaluator` available to us as well!) and get a summary trainer.holdout_evaluator.all_quality_metrics trainer.holdout_evaluator.plot_residuals_vs_fits()If-statement in Pythonguests = ['Alice', 'Bob', 'Charlie'] max_guests = 3 if len(guests) > max_guests: # Too many guests in on the list print('There are too many guests') else # 0, 1, 2, ... max guests in list print('Found', len(gues))get_cosine_distance get_mean_difference get_mean_ratio get_median_difference get_median_ratio get_pearson_correlation get_signal_to_noisefor fu in ( kwat.vector_vector.get_cosine_distance, kwat.vector_vector.get_mean_difference, kwat.vector_vector.get_mean_ratio, kwat.vector_vector.get_median_difference, kwat.vector_vector.get_median_ratio, kwat.vector_vector.get_pearson_correlation, kwat.vector_vector.get_signal_to_noise, ): print(fu(ve0, ve1))** This file gives a brief overview of the capabilities of the code. *** If you want to predict the spectrum of a single or binary star with particular labels, you'll want the "spectral_model" package.* If you want to fit an observed spectrum, see the "fitting" package.* Downloading and processing APOGEE spectra is handled by the "process_spectra" package.* The "utils" package contains some general-purpose functions used by the other packages.Many of the functions require you to pass them a particular neural network (really, a list of biases and weights parameterizing the network), so we read in all the networks we'll be using at the beginning and then pass them to various functions as we go. This is a bit cumbersome, but the advantage is that if you train a new network (with architechture compatible with the existing code) you can just pass it to the relevant functions without having to rewrite everything.from __future__ import absolute_import, division, print_function # Python2 compatibility import numpy as np import matplotlib.pyplot as plt %matplotlib inline from binspec import utils from binspec import spectral_model from binspec import fitting # read in the standard wavelength grid onto which we interpolate spectra. wavelength = utils.load_wavelength_array() # read in all individual neural networks we'll need. NN_coeffs_norm = utils.read_in_neural_network(name = 'normalized_spectra') NN_coeffs_flux = utils.read_in_neural_network(name = 'unnormalized_spectra') NN_coeffs_R = utils.read_in_neural_network(name = 'radius') NN_coeffs_Teff2_logg2 = utils.read_in_neural_network(name = 'Teff2_logg2')Let's use the data-driven spectral model to predict the APOGEE-like spectrum of a single star similar to the Sun.spec_err = 1e-2*np.ones(len(wavelength)) # for a single-star model, the format of "labels" is [Teff, logg, [Fe/H], [Mg/Fe], v_macro, v_los]. real_labels = [5800, 4.44, 0, 0, 5, 10] # redshift by 10 km/s. real_spec = spectral_model.get_normalized_spectrum_single_star(labels = real_labels, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, spec_err = spec_err) # zoom in on a small region of the spectrum so we can see what's going on. lambda_min, lambda_max = 15350, 15450# for plotting m = (wavelength < lambda_max) & (wavelength > lambda_min) plt.figure(figsize=(14, 4)) plt.plot(wavelength[m], real_spec[m], 'k', lw=0.5) plt.xlim(lambda_min, lambda_max) plt.ylim(0.7, 1.05)Now let's add some noise to this model spectrum, and then fit it to see if we can recover the labels we put in.data_spec = real_spec + 0.01*np.random.randn(len(real_spec)) popt, pcov, model_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = data_spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, p0 = None, num_p0 = 1) plt.figure(figsize=(14, 4)) m = (wavelength < lambda_max) & (wavelength > lambda_min) plt.plot(wavelength[m], data_spec[m], 'k', lw=0.5, label = '"data" spec') plt.plot(wavelength[m], model_spec[m], 'r--', lw=0.5, label = 'best-fit model') plt.xlim(lambda_min, lambda_max) plt.legend(loc = 'best', frameon = False, fontsize = 18) # verify that our best-fit labels are close to what we put in. print(popt)[ 5.79605530e+03 4.44239327e+00 -8.39633505e-03 4.67740152e-03 4.96638546e+00 1.00293424e+01]Now let's predict the spectrum of an unresolved binary.# predict a binary spec # for a binary, the labels are [Teff1, logg1, [Fe/H], [Mg/Fe], mass ratio, v_macro1, v_macro2, v_los1, v_los2] real_bin_labels = [5800, 4.44, 0, 0, 0.7, 2, 5, -10, 10] specerr = 1e-2*np.ones(len(wavelength)) real_bin_spec = spectral_model.get_normalized_spectrum_binary(labels = real_bin_labels, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, spec_err = specerr) plt.figure(figsize=(14, 4)) m = (wavelength < lambda_max) & (wavelength > lambda_min) plt.plot(wavelength[m], real_bin_spec[m], 'k', lw=0.5) plt.xlim(lambda_min, lambda_max) plt.ylim(0.75, 1.05)Again, let's add some noise and then fit the spectrum. We'll fit it with both a single-star model and a binary model, and then compare the fits. Notice that we always pass the fitting function an arguement "num_p0". This determines how many different "walkers" to initialize for the optimizer, to minimize the chance of it's converging on a local mininimum. For a simple single-star model, there's little danger of this happening, but it's more likely for more complicated models with more labels. How long the code takes to run scales linearly with the number of walkers.data_bin_spec = real_bin_spec + 0.01*np.random.randn(len(real_bin_spec)) # fit single-star model popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = data_bin_spec, spec_err = specerr, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, p0 = None, num_p0 = 1) # fit binary model. # use the best-fit single-star model ("popt_single") as a starting guess. popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = data_bin_spec, spec_err = specerr, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, p0_single = popt_single, num_p0 = 10) plt.figure(figsize=(14, 4)) m = (wavelength < lambda_max) & (wavelength > lambda_min) plt.plot(wavelength[m], data_bin_spec[m], 'k', lw=0.5, label = '"data" spec') plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model') plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model') plt.xlim(lambda_min, lambda_max) plt.legend(loc = 'best', frameon = False, fontsize= 18) # unsurprisingly, the single-star model isn't a very good fit, but the binary model is. # verify that our best-fit labels are close to what we put in. print(popt_binary)[ 5.77891029e+03 4.43720910e+00 -5.81747659e-03 1.24170328e-02 7.01505412e-01 2.24734991e+00 4.94390273e+00 -9.93189388e+00 1.01599828e+01]Now that we've seen how to generate and fit model spectra, let's download an actual APOGEE spectrum. Here we'll download a "combined" spectrum. We'll start with a target that is likely a binary, but is not an "obvious" one. I.e., there's no large velocity offset.from binspec import process_spectra apogee_id = '2M18513961+4338099' spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id, catalog = None, save_local = False) plt.figure(figsize=(14, 4)) m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) plt.plot(wavelength[m], spec[m], 'k', lw=0.5) plt.ylim(0.75, 1.05) plt.xlim(lambda_min, lambda_max)Now let's fit this spectrum with a single-star model and a binary model!# fit single-star model popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, p0 = None, num_p0 = 1) # fit binary model. popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, p0_single = popt_single, num_p0 = 10) plt.figure(figsize=(14, 4)) plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum') plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model') plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model') plt.xlim(lambda_min, lambda_max) plt.ylim(0.7, 1.1) plt.legend(loc = 'best', frameon = False, fontsize= 18)The binary model looks like a better fit, though the differences are sublte. You can change the axis limits to zoom in on particular lines or explore other parts of the spectrum. Let's compare the $\chi^2$ of the single and binary model.chi2_diff = utils.get_chi2_difference(norm_spec=spec, spec_err=spec_err, norm_model_A = single_spec, norm_model_B = bin_spec) print(chi2_diff)5414.108844086688Now that we've fit a not-obvious binary (one with a small velocity offset), let's look at one with a bigger velocity offset between the two stars. We'll download the spectrum and fit it in one go.apogee_id = '2M13080617+1753494' spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id, catalog = None, save_local = False) # fit single-star model popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, p0 = None, num_p0 = 1) # fit binary model. popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, p0_single = popt_single, num_p0 = 10) m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) plt.figure(figsize=(14, 4)) plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum') plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model') plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model') plt.xlim(15350, 15450) plt.ylim(0.7, 1.1) plt.legend(loc = 'best', frameon = False, fontsize= 18)Here, the differences between the best-fit binary and single-star models are more obvious. Since the velocity offset between the two stars in the best-fit binary model appears non-neglible, we should fit the spectra from individual visits, in case the spectrum changes significantly from one visit to the next.Let's download and plot the spectra from each visit.all_specs, all_err, all_snr, all_hjd, all_vhelio = \ process_spectra.download_visit_spectra_single_object_and_renormalize( apogee_id = apogee_id, p0_single_combined = popt_single, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, allvisit_cat = None, snr_min = 30) plt.figure(figsize=(14, 4)) for i, spec in enumerate(all_specs): m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5) plt.xlim(lambda_min, lambda_max) plt.ylim(0.7, 1.6)Yup, the spectrum definitely looks like it's changing significantly from one visit to the next (most significantly, from the bottom spectrum to the first two). Let's fit these visit spectra simultaneously using an SB2 model.sb2_labels, pcov, sb2_models = fitting.fit_visit_spectra_sb2_model( norm_spectra = all_specs, spec_errs = all_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, v_helios = all_vhelio, p0_combined = popt_binary, num_p0 = 5) plt.figure(figsize=(14, 4)) for i, spec in enumerate(all_specs): m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) if i == 0: data_label, model_label = 'data', 'SB2 model' else: data_label, model_label = None, None plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5, label = data_label) plt.plot(wavelength[m], sb2_models[i][m] + 0.2*i, 'r', lw=0.5, label = model_label) plt.xlim(lambda_min, lambda_max) plt.ylim(0.7, 1.6) plt.legend(loc = 'upper left', frameon = False, ncol = 2, fontsize = 18)Looks like a good fit. Let's see what the best-fit labels are. The format of the label vector returned for an SB2 model is format is [Teff1, logg1, [Fe/H], [Mg/Fe], q_spec, v_macro1, v_macro2, q_dyn, gamma, dv1_i],where i = 1...N_visits and dv_i is the velocity of the primary at each visit. If you aren't sure what the labels for a particular model are, you can check in spectral_model.pyprint(sb2_labels)[ 5.81209038e+03 4.44233921e+00 1.06612376e-01 -5.44079000e-02 8.31312281e-01 2.37523955e+00 7.17323488e-01 8.36045903e-01 1.05971627e+01 2.79507848e+01 1.68375081e+01 1.58818318e+01]We note that q_spec and q_dyn are both about 0.83. That's good. Finally, let's try fitting an SB1. First, we'll download and fit the combined spectrum.apogee_id = '2M13381097+5620250' spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id, catalog = None, save_local = False) # fit single-star model popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, p0 = None, num_p0 = 1) # fit binary model. popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec, spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2, p0_single = popt_single, num_p0 = 10) m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) plt.figure(figsize=(14, 4)) plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum') plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model') plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model') plt.xlim(lambda_min, lambda_max) plt.ylim(0.7, 1.1) plt.legend(loc = 'best', frameon = False, fontsize= 18) chi2_diff = utils.get_chi2_difference(norm_spec=spec, spec_err=spec_err, norm_model_A = single_spec, norm_model_B = bin_spec) print(chi2_diff)67.59678569121024Hmmm, for this target, the binary model fit is not obviously better, and the $\chi^2$ difference for the combined spectrum is very small (small enough that it wouldn't pass our model selection criteria to consider it a reliable binary). However, if we look at the APOGEE-supplied v_helios, we'll find that this target is RV variable. Therefore, we'll download the individual-visit spectra, and we'll try fitting them with both an SB1 model and a genuine single-star model.# get the visit spectra all_specs, all_err, all_snr, all_hjd, all_vhelio = \ process_spectra.download_visit_spectra_single_object_and_renormalize( apogee_id = apogee_id, p0_single_combined = popt_single, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, allvisit_cat = None, snr_min = 30) # fit them with a single-star model single_labels, pcov, single_models = fitting.fit_visit_spectra_single_star_model( norm_spectra = all_specs, spec_errs = all_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, v_helios = all_vhelio, p0 = popt_single, num_p0 = 1) # fit them with an SB1 model sb1_labels, pcov, sb1_models = fitting.fit_visit_spectra_sb1_model( norm_spectra = all_specs, spec_errs = all_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux, v_helios = all_vhelio, p0 = popt_single, num_p0 = 5) plt.figure(figsize=(14, 4)) for i, spec in enumerate(all_specs): m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min) if i == 0: data_label, sb1_label, single_label = 'data', 'SB1 model', 'single-star model' else: data_label, sb1_label, single_label = None, None, None plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5, label = data_label) plt.plot(wavelength[m], single_models[i][m] + 0.2*i, 'r', lw=0.5, label = single_label) plt.plot(wavelength[m], sb1_models[i][m] + 0.2*i, 'b', lw=0.5, label = sb1_label) plt.xlim(lambda_min, lambda_max) plt.ylim(0.75, 1.6) plt.legend(loc = 'upper left', frameon = False, ncol = 3, fontsize = 18)It's pretty clear that the spectrum is changing from one visit to the next, so the single-star model (which requires constant v_helio across visits) won't be able to get a good fit. But the SB1 model does achieve a good fit, and if you tried an SB2 model, you'd find that it couldn't do any better. Let's look at the labels of the best-fit SB1 model, which are in the format [Teff, logg, [Fe/H], [Mg/Fe], v_macro, dv_i], where i = 1..N_visits is the velocity at each visit.print(sb1_labels)[ 6.03814420e+03 4.21286962e+00 4.08804375e-02 -1.12199395e-01 8.39337302e+00 -1.43412469e+01 1.69912803e+01 -2.59064042e+00]{{ title }}: Job {{ cookiecutter.jenkins_job }} By and AbstractThis brief report describes measurements of interest that were carried out for Job {{ cookiecutter.jenkins_job }} of the Science Pipeline.In short, the astrometric performance is excellent in comparison to both the per cycle ramps as well as the design level KPMs. The photometric performance is not as good. The inclusion of [jointcal](https://github.com/lsst/jointcal) in the processing in future cycles will improve photometric performance. Current reprocessing of HSC data using a precursor to [jointcal](https://github.com/lsst/jointcal) called [meas_mosaic](https://github.com/lsst/meas_mosaic) shows significant improvement over single epoch processing. IntroductionMeasured using [validation_data_hsc](https://github.com/lsst/validation_data_hsc), which consists of 8 HSC engineering images: 2 *r*-band, 4 *i’*-band, and 2 *y*-band. Measurements were made on individual, separately-processed, single frame images: [jointcal](https://github.com/lsst/jointcal) and/or [meas_mosaic](https://github.com/lsst/meas_mosaic) were not run. For comparison, we provide the SRD required “design” value of each metric as defined in the [Science Requirements Document [LPM-17]](https://ls.st/LPM-17), and, where available, the target for this release as defined in the [Data Management Development Milestone Roadmap [LDM-240]](https://ls.st/LDM-240). All values were computed using the [examples/runHscTest.sh](https://github.com/lsst/validate_drp/blob/master/examples/runHscTest.sh) script in the [validate_drp](https://github.com/lsst/validate_drp) package.Some KPMs (AF1, AD1) involve thresholds that are different for “design”, “minimum”, and “stretch” specifications. Thus comparing one of these metrics against a given target number is a two-level process. Both the threshold used in the calculation is dependent on the specifications, and the requirement on the computed number is dependent on the specifications.The metrics in this report have all been computed relative to the “design” thresholds. The values of these KPMs would be different if computed against different thresholds.Note also that the photometric performance of the pipelines in the *y*-band is an under estmate of expected delivered performance. For these tests, the *y*-band data was calibrated with *z*-band photometry. This is due to the lack of a reference catalog containing *y*-band information at this time. We recognize that the bandpass mismatch is certainly not the only source of scatter in the *y*-band photometry. These metric measurements are still worth noting in this report as a historical benchmark to track relative performance.The per cycle target numbers come from the “KPMs” sheet of [LDM-240](https://ls.st/LDM-240). Photometric and Astrometric Performance_Submitted by and -Vasey_procCalRep corresponds to requirement OSS-REQ-0275 (defined in [LSE-30](http://ls.st/LSE-30)). All other photometric performance metrics follow LSS-REQ-0093 ([LSE-29](http://ls.st/LSE-29)) and [LPM-17](http://ls.st/LPM-17) table 14.from astropy.table import Table, Column import numpy as np band_map = {'HSC-R': 'r', 'HSC-I': 'i'} jobs_ids = {'HSC-R': {{ cookiecutter.hsc_r_job_id }}, 'HSC-I': {{ cookiecutter.hsc_i_job_id }}} data = {'Metric':['PA1', 'PA1', 'AM1', 'AM1'], 'band':['HSC-R', 'HSC-I', 'HSC-R', 'HSC-I'], 'metric value':[np.nan, np.nan, np.nan, np.nan], 'spec operator':['<=', '<=', '<=', '<='], 'spec value':[np.nan, np.nan, np.nan, np.nan]} table = Table(data) import requests jobs = {} for band, job_id in jobs_ids.items(): r = requests.get("https://squash-restful-api.lsst.codes/job/%i"%job_id) jobs[band] = r.json() def make_get_str(metric_name, band): if metric_name.startswith('AM'): get_str = 'https://squash-restful-api.lsst.codes/spec/validate_drp.%s.design'%(metric_name) elif metric_name.startswith('PA'): get_str = 'https://squash-restful-api.lsst.codes/spec/validate_drp.%s.hsc_design_%s'%(metric_name, band) else: raise ValueError('Only AM and PA metrics supported currently') return get_str units = [] for i, v in enumerate(zip(table['Metric'], table['band'])): for measurement in jobs[v[1]]['measurements']: if measurement['metric'] == 'validate_drp.'+v[0]: table['metric value'][i] = measurement['value'] units.append(measurement['unit']) r = requests.get(make_get_str(v[0], band_map[v[1]])) result = r.json() table['spec value'] = result['threshold']['value'] col = Column(units, name='unit') table.add_column(col) table.show_in_notebook()SOMENTE O ITEM 1 É OBRIGATÓRIO PARA APRESENTAÇÃO! História das Olimpíadas - Parte 2_(créditos ao prof. )_ Você recentemente trabalhou (ou está trabalhando) em uma análise de dados históricos dos jogos olímpicos utilizando o Pandas para auxiliá-lo.Desde que você iniciou seus trabalhos nesse projeto, novas ferramentas bastante poderosas foram ensinadas! O seu papel agora será utilizar essas novas ferramentas para gerar algumas visualizações que tornarão certas informações muito mais claras.Utilize qualquer uma das bibliotecas estudadas (```matplotlib```, ```seaborn``` e ```plotly```) para realizar as atividades propostas. Não há problema em usar apenas uma para realizar todas as atividades, nem em utilizar cada uma delas em uma atividade diferente - siga suas preferências pessoais!Utilize os (muitos) parâmetros permitidos por cada função e/ou atributos dos objetos fornecidos pelas bibliotecas para criar uma identidade visual coesa para ser utilizada em todo o projeto. Use títulos, legendas e rótulos nos eixos para deixar os gráficos verdadeiramente informativos. E não se esqueça que a simples escolha das cores a serem utilizadas pode tornar os gráficos ainda mais interessantes!Você utilizará o mesmo dataset fornecido no projeto anterior. Não há problemas em reaproveitar códigos do projeto anterior para economizar tempo e focar seus esforços na geração dos gráficos.Para começar, importe o Pandas e carregue o arquivo ```athlete_events.csv``` fornecido no projeto anterior.import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np %matplotlib inline atletas = pd.read_csv("C:\\Users\\Fabio\\Desktop\\Data Science\\Módulo 3\\athlete_events.csv") atletas.head(5)1. O Brasil nas Olimpíadas**1.1** Vamos começar filtrando novamente os dados que iremos trabalhar. Crie um DataFrame contendo apenas informações sobre atletas **medalhistas** brasileiros.sem_nan = atletas.dropna() sem_nan atletas_medalhistas_brasileiros = sem_nan[sem_nan["Team"] == "Brazil"] df_atletas_medalhistas_brasileiros = pd.DataFrame(atletas_medalhistas_brasileiros) df_atletas_medalhistas_brasileiros.head(5) plt.rcParams['figure.figsize']=15,5 # Parâmetro usado para esticar e/ou aumentar o gráfico. Esta única referência serve para todos os gráficosVamos caracterizar fisicamente nossos medalhistas, verificando se há alguma correlação entre o desempenho em certos esportes e o tipo físico dos atletas. **1.2** Gere um gráfico de barras contendo os diferentes esportes no eixo X e a altura dos atletas no eixo Y. Utilize barras lado-a-lado para separar atletas do sexo masculino e feminino.height = sns.barplot(x="Sport", y="Height", data=df_atletas_medalhistas_brasileiros, hue="Sex", palette='YlOrBr_r'); plt.title("Esporte x Altura (cm)", loc="center"); height.set_xticklabels(height .get_xticklabels(), rotation=55) # Rotacionar as categorias no eixo X height;**1.3** Agora gere um gráfico semelhante ilustrando o peso dos atletas.#RESPOSTA 1.3: weight = sns.barplot(x="Sport", y="Weight", data=df_atletas_medalhistas_brasileiros, hue="Sex", palette='YlOrBr_r'); plt.title('Esporte x Peso', loc='center'); weight.set_xticklabels(weight.get_xticklabels(), rotation=55) # Rotacionar as categorias no eixo X weight;Vamos analisar agora as medalhas que nossos atletas trouxeram para casa.Encontre os maiores medalhistas brasileiros em **total de medalhas**.**1.4** Em seguida, faça um gráfico de barras empilhadas. No eixo X coloque o nome dos atletas, e no eixo Y coloque o número de medalhas. Utilize as barras empilhadas para mostrar, respectivamente, as medalhas de bronze, prata e ouro de cada atleta.maiores_medalhistas_brasileiros = atletas_medalhistas_brasileiros.groupby(by=["Name"])["Medal"].size().sort_values(ascending=False).head(4) maiores_medalhistas_brasileiros lista = atletas_medalhistas_brasileiros.groupby(by=["Name", "Medal"]).size().sort_values(ascending=False) lista.head(65) ouro = [2, 2, 0, 2] prata = [2, 2, 2, 2] bronze =[1, 1, 2, 0] nomes =["", "", "", "Srgio 'Escadinha' "] plt.bar(nomes,bronze,color="brown",label="Bronze") plt.bar(nomes,prata,color="silver",bottom=np.array(bronze),label="Silver") plt.bar(nomes,ouro,color="gold",bottom=np.array(bronze)+np.array(prata),label="Gold") plt.title("Atletas x Número de medalhas") plt.ylabel("Número de medalhas") plt.legend(loc="lower left",bbox_to_anchor=(0.8,1.0)) plt.show()**1.5** Agora gere o mesmo gráfico de barras empilhadas substituindo os nomes dos atletas pelo nome de todos os esportes onde o Brasil já ganhou medalhas.**DICA:** tome muito cuidado nessa análise: cada **evento esportivo** rende 1 medalha. Por exemplo, quando a equipe de futebol vence, isso é considerado 1 medalha, mesmo tendo cerca de 20 atletas medalhistas na equipe.atletas.head(3) # FALTA ESTE atletas = atletas.filter(items=["Team", "Sport", "Event", "Medal"]) atletas.fillna(0, inplace=True) atletas_times_brasil = atletas[atletas["Team"] == "Brazil"] atletas_times_brasil.drop_duplicates().reset_index(drop=True) atletas_times_brasil = atletas_times_brasil.groupby(by=["Sport"])["Medal"].value_counts() df_atletas_times_brasil = pd.DataFrame(atletas_times_brasil) df_atletas_times_brasil = df_atletas_times_brasil.rename(columns={"Medal": "Quantidade"}) df_atletas_times_brasil = df_atletas_times_brasil.reset_index() df_atletas_times_brasil_aux = df_atletas_times_brasil.groupby(by=["Sport"]).cumsum() df_atletas_times_brasil["Acumulativos de medalhas"] = df_atletas_times_brasil_aux df_atletas_times_brasil.head(5)**1.6** Mais um gráfico de barras empilhadas: agora mostre os **eventos esportivos** que renderam medalhas para o Brasil.Lembrando: cada "categoria" dentro de um esporte é considerado um evento. Por exemplo, dentro de "atletismo", temos uma competição de 100m masculina, uma de 100m feminino, um revezamento 4 x 100m masculino, um revezamento 4 x 100m feminino, uma competição de 400m masculino, uma de 400m feminino, uma maratona masculina, uma maratona feminina, e assim sucessivamente.df_medalhas_brasil = df_atletas_medalhistas_brasileiros.drop_duplicates(subset=['Medal', 'Event', 'Games']).copy() df_eventos_brasil = df_medalhas_brasil.filter(items = ['Sport', 'Event', 'Medal']) df_eventos_brasil = df_eventos_brasil.groupby(by=['Sport', 'Event']).count() df_2 = df_eventos_brasil.unstack('Event').fillna(0) df_2.plot(kind = 'bar', stacked = True) plt.legend(ncol=3, loc="lower left", bbox_to_anchor=(0, 1)) plt.show()**1.7** Utilize um gráfico de distribuição (como um histograma, por exemplo) ilustrando a quantidade total de medalhas do Brasil por esporte.df_eventos_brasil.head(5) df_medalhas_brasil = df_atletas_medalhistas_brasileiros.drop_duplicates(subset=['Medal', 'Event', 'Games']).copy() colunas_selecionadas= ['Sport','Medal'] melhadas_por_esporte =df_medalhas_brasil.filter(items= colunas_selecionadas) #Resposta 1.7 (HISTOGRAMA): plt.hist(melhadas_por_esporte['Sport'], rwidth=0.5) plt.title("Quantidade Total de Medalhas do Brasil por Esporte") plt.xlabel("Esportes") plt.ylabel("Medalhas") plt.show() #Resposta 1.7 (Gráfico de Dispersão com pontos): df_medalhas_brasil = df_atletas_medalhistas_brasileiros.drop_duplicates(subset=['Medal', 'Event', 'Games']).copy() df_eventos_brasil = df_medalhas_brasil.filter(items = ['Sport', 'Event', 'Medal']) df_eventos_brasil = df_eventos_brasil.groupby(by=['Sport']).count() df_eventos_brasil.reset_index(inplace = True) x=df_eventos_brasil['Sport'] y=df_eventos_brasil['Medal'] plt.scatter(x,y) plt.title("Quantidade Total de Medalhas do Brasil por Esporte") plt.xlabel("Esportes") plt.ylabel("Medalhas") plt.show()**1.8** Repita o procedimento acima, mas com medalhas de ouro.#Resposta 1.8 (HISTOGRAMA): #Filtrando os dados relevantes: df_medalhas_brasil = df_atletas_medalhistas_brasileiros.drop_duplicates(subset=['Medal', 'Event', 'Games']).copy() colunas_selecionadas= ['Sport','Medal'] melhadas_por_esporte =df_medalhas_brasil.filter(items= colunas_selecionadas) melhadas_de_ouro_por_esporte= melhadas_por_esporte[melhadas_por_esporte['Medal']=='Gold'] #Criando o gráfico: plt.hist(melhadas_de_ouro_por_esporte['Sport'], rwidth=0.5) plt.title("Quantidade Total de Medalhas de OURO do Brasil por Esporte") plt.xlabel("Esportes") plt.ylabel("Medalhas de Ouro") plt.show() melhadas_de_ouro_por_esporte.head(5) melhadas_de_ouro_por_esporte = melhadas_de_ouro_por_esporte.groupby(['Sport']).count()**1.9** Agora faça um gráfico de setores (pizza) mostrando a distribuição de medalhas de ouro do Brasil por esporte.#Resposta 1.9: #.pie: gráfico de pizza, parâmetro: Medalhas plt.pie(melhadas_de_ouro_por_esporte['Medal'], labels=melhadas_de_ouro_por_esporte.index , autopct= '%1.2f%%') plt.title("Medalhas de Ouro por esporte") plt.rcParams['figure.figsize']=20,10 melhadas_de_ouro_por_esporte**1.10** Para finalizar a história do Brasil, vamos ver a série temporal de medalhas brasileiras. Crie um gráfico de linhas contendo 3 linhas: ouro, prata e bronze. Coloque no eixo X a edição da olimpíada (em ordem cronológica) e no eixo Y o total de medalhas de cada tipo.colunas_selecionadas2=['Games','Medal'] df_medalhas_brasil = df_medalhas_brasil.filter(items= colunas_selecionadas2) medalhas_edicao = df_medalhas_brasil.sort_values(by='Games') medalha_ouro= medalhas_edicao[medalhas_edicao['Medal']=='Gold'] medalha_ouro= medalha_ouro.groupby(['Games']).count() medalha_prata= medalhas_edicao[medalhas_edicao['Medal']=='Silver'] medalha_prata= medalha_prata.groupby(['Games']).count() medalha_bronze= medalhas_edicao[medalhas_edicao['Medal']=='Bronze'] medalha_bronze =medalha_bronze.groupby(['Games']).count() medalhas_por_games= medalhas_edicao.groupby(['Games']).count() #MEDALHAS DE OURO POR EDIÇÃO(GAMES) medalha_ouro.head(5) #MEDALHAS DE PRATA POR EDIÇÃO(GAMES) medalha_prata.head(5) #MEDALHAS DE BRONZE POR EDIÇÃO(GAMES) medalha_bronze.head(5) #PLOTANDO OS GRÁFICOS DE MEDALHAS SEPARADOS medalha_ouro.plot() medalha_prata.plot() medalha_bronze.plot() #ADICIONANDO AS COLUNAS MEDALHAS DE OURO, PRATA E BRONZE NO DATA FRAME MEDALHAS_POR_GAMES medalhas_por_games['Medalhas de ouro'] = medalha_ouro['Medal'] medalhas_por_games['Medalhas de prata'] = medalha_prata['Medal'] medalhas_por_games['Medalhas de bronze'] = medalha_bronze['Medal'] #EXCLUINDO A COLUNA DE MEDALHAS TOTAIS medalhas_por_games.drop(columns="Medal") #Resposta 1.10: medalhas_por_games= medalhas_por_games.drop(columns="Medal") medalhas_por_games.plot() plt.title("Medalhas brasileiras por edição dos jogos") plt.xlabel("Número de medalhas") plt.ylabel("Edição dos jogos")Demonstrates the usage of the interactive input formThe goal of the interactive input form is to assist the simulator. In a user friendly way, it aims to get an estimate from the user of the `z2jh_cost_simulator`, about for example the maximum users for every given hour during a full day.# For live changes to the z2jh_cost_simulator package - by avoiding caching %load_ext autoreload %autoreload 2 from z2jh_cost_simulator.input_form import InteractiveInputForm workweek_day = InteractiveInputForm() weekend_day = InteractiveInputForm() display(workweek_day.get_input_form("Maximum number of users per day on a weekday")) display(weekend_day.get_input_form("Maximum number of users per day on a weekend")) workweek_day.get_data() weekend_day.get_data()Learndf_train = df X = df_train['image_name'].values y = df_train[tag_columns].values n_features = 1 n_classes = y.shape[1] X, y = shuffle(X, y) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.1) print('We\'ve got {} feature rows and {} labels'.format(len(X_train), len(y_train))) print('Each row has {} features'.format(n_features)) print('and we have {} classes'.format(n_classes)) assert(len(y_train) == len(X_train)) print('We use {} rows for training and {} rows for validation'.format(len(X_train), len(X_valid))) print('Each image has the shape:', INPUT_SHAPE) print('So far, so good') print('Memory usage (train) kB', X_train.nbytes//(1024)) print('Memory usage (valid) kB', X_valid.nbytes//(1024)) def generator(X, y, batch_size=32): X_copy, y_copy = X, y while True: for i in range(0, len(X_copy), batch_size): X_result, y_result = [], [] for x, y in zip(X_copy[i:i+batch_size], y_copy[i:i+batch_size]): rx, ry = [load_image(x)], [y] rx = np.array([preprocess(x) for x in rx]) ry = np.array(ry) X_result.append(rx) y_result.append(ry) X_result, y_result = np.concatenate(X_result), np.concatenate(y_result) yield shuffle(X_result, y_result) X_copy, y_copy = shuffle(X_copy, y_copy) from keras import backend as K def fbeta(y_true, y_pred, threshold_shift=0): beta = 2 # just in case of hipster activation at the final layer y_pred = K.clip(y_pred, 0, 1) # shifting the prediction threshold from .5 if needed y_pred_bin = K.round(y_pred + threshold_shift) tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon() fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1))) fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1))) precision = tp / (tp + fp) recall = tp / (tp + fn) beta_squared = beta ** 2 return (beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall + K.epsilon()) # define the model model = Sequential() model.add(Conv2D(48, (8, 8), strides=(2, 2), input_shape=INPUT_SHAPE, activation='elu')) model.add(BatchNormalization()) model.add(Conv2D(64, (8, 8), strides=(2, 2), activation='elu')) model.add(BatchNormalization()) model.add(Conv2D(96, (5, 5), strides=(2, 2), activation='elu')) model.add(BatchNormalization()) model.add(Conv2D(96, (3, 3), activation='elu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(256, activation='elu')) model.add(BatchNormalization()) model.add(Dense(64, activation='elu')) model.add(BatchNormalization()) model.add(Dense(n_classes, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=[fbeta, 'accuracy'] ) model.summary() EPOCHS = 3 BATCH = 32 PER_EPOCH = 256 X_train, y_train = shuffle(X_train, y_train) X_valid, y_valid = shuffle(X_valid, y_valid) filepath="weights-improvement-{epoch:02d}-{val_fbeta:.3f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_fbeta', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] history = model.fit_generator( generator(X_train, y_train, batch_size=BATCH), steps_per_epoch=PER_EPOCH, epochs=EPOCHS, validation_data=generator(X_valid, y_valid, batch_size=BATCH), validation_steps=len(y_valid)//(4*BATCH), callbacks=callbacks_list )Epoch 1/3 256/256 [==============================] - 53s 206ms/step - loss: 0.4385 - fbeta: 0.6512 - acc: 0.8379 - val_loss: 0.2846 - val_fbeta: 0.6352 - val_acc: 0.8922 Epoch 00001: val_fbeta improved from -inf to 0.63521, saving model to weights-improvement-01-0.635.hdf5 Epoch 2/3 256/256 [==============================] - 50s 197ms/step - loss: 0.2103 - fbeta: 0.6777 - acc: 0.9191 - val_loss: 0.2628 - val_fbeta: 0.5954 - val_acc: 0.8754 Epoch 00002: val_fbeta did not improve from 0.63521 Epoch 3/3 256/256 [==============================] - 51s 198ms/step - loss: 0.1960 - fbeta: 0.6957 - acc: 0.9216 - val_loss: 0.1999 - val_fbeta: 0.7223 - val_acc: 0.9223 Epoch 00003: val_fbeta improved from 0.63521 to 0.72228, saving model to weights-improvement-03-0.722.hdf5`33 Days of Parallel Reading` An initiation to Machine Learning A comprehensive guide to Numpy for Data manipulation Author: [](https://github.com/gabayae) Link to the notebook on the repo: [A comprehensive guide to Numpy](https://github.com/gabayae/-66DaysOfData-KennethJee/edit/main/README.md) ToC - [NumPy](NumPy) - [Overview](Overview) - [Getting Started](Getting-Started) - [Load packages](Load-packages) - [Introduction to Numpy](Introduction-to-Numpy) - [NumPy Arrays](NumPy-Arrays) - [NumPy Arrays from Python Lists](NumPy-Arrays-from-Python-Lists) - [NumPy Arrays using the function arange()](NumPy-Arrays-using-the-function-arange()) - [NumPy Arrays using the function linspace()](NumPy-Arrays-using-the-function-linspace()) - [NumPy Array Attributes](NumPy-Array-Attributes) - [Some Numpy Special Arrays](Some-Numpy-Special-Arrays) - [NumPy Arrays using the function empty()](NumPy-Arrays-using-the-function-empty()) - [Operations on Arrays](Operations-on-Arrays) - [Array Concatenation and Splitting](Array-Concatenation-and-Splitting) - [Numpy Arrays Universal Functions](NumPy-Arrays-Universal-Functions) - [Aggregation Functions](Aggregation-Functions) - [Broadcasting](Broadcasting) - [Boolean Masks](Boolean-masks) - [Additional Functionality](Additional-Functionality) - [Linear Algebra Using Numpy](Linear-Algebra-Using-Numpy) OverviewData manipulation in Python is nearly synonymous with NumPy array manipulation: even newer tools like Pandas ([next lesson]()) are built around the NumPy array. This notebook will present several examples of using NumPy array manipulation to access data and subarrays, and to split, reshape, and join the arrays. Get to know them well![NumPy](https://en.wikipedia.org/wiki/NumPy) (pronounced /ˈnʌmpaɪ/ (NUM-py) or sometimes /ˈnʌmpi/ (NUM-pee)) is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays.[NumPy](https://en.wikipedia.org/wiki/NumPy) is a first-rate library for numerical programming- Widely used in academia, finance and industry. - Mature, fast, stable and under continuous development. Getting Started Load packagesIn order to be able to use numpy we need to import the library using the special word `import`. Also, to avoid typing `numpy` every time we want to use one if its functions we can provide an alias using the special word `as`:import numpy as npIntroduction to NumpyNow, we have access to all the functions available in `numpy` by typing `np.name_of_function`. For example, the equivalent of `1 + 1` in Python can be done in `numpy`:np.add(1,1)Although this might not at first seem very useful, even simple operations like this one can be much quicker in `numpy` than in standard Python when using lots of numbers (large arrays).To access the documentation explaining how a function is used, its input parameters and output format we can press `Shift+Tab` after the function name. Try this in the cell belownp.addNumPy ArraysThe core concept in numpy is the `array` which is equivalent to lists of numbers but can be multidimensional. The essential problem that NumPy solves is fast array processing. The most important structure that NumPy defines is an array data type formally called a [numpy.ndarray](http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html). NumPy arrays power a large proportion of the scientific Python ecosystem. Let us list ways of creating Numpy arrays one by one, with Examples. NumPy Arrays from Python Lists# making a python list li li = [8,5,6,9,4,2] # creating array out of it numpy_array_from_list = np.array(li) # printing the array print(numpy_array_from_list) # checking the type of numpy_array_from_list type(numpy_array_from_list)We can also pass a list manually without explicitly defining it.numpy_array_from_list1 = np.array([5,7,4,1,5,6]) print(numpy_array_from_list1) type(numpy_array_from_list1)Other arguments which are optional can also be included while creating the Numpy arrays. One of the optional parameter, that you might find useful is: dtype : This argument specifies the data-type of the array being created. Since, unlike lists, all the elements in the Numpy array are of the same data-type. The datatypes ranging from float to complex, all are acceptable.Let us see with an example:numpy_array_float = np.array([1,8,5,59,8,98], dtype = 'float') print(numpy_array_float) print("======================================") # let's try Numpy array with string as input str = ['the','air','after','summer','rain'] numpy_array_str = np.array(str, dtype = 'str') print(numpy_array_str) # In a similar manner, other data-types can also be used to create a Numpy array.[ 1. 8. 5. 59. 8. 98.] ====================================== ['the' 'air' 'after' 'summer' 'rain']A short documentation about the `np.array` function can be found by doing `np.array?`#np.array? # Uncomment to run the cell.NumPy Arrays using the function arange()The `arange()` function is one of the Numpy's most used method for creating an array within a specified range. The first argument takes the **starting point** of the array you want to create, second is the **stop point** and the third is the **step** (just like python list slicing function). The last argument is again *dtype*, which is optional: `arange(start, end, step, dtype)`# creating an array starting from 0 ending with 10 # and increasing with a step of 2 arange_array1 = np.arange(0,11,2) # writing 11 instead of 10 since range is exclusive print("First array:", arange_array1) print("======================================") # array starting from 50 going till 120 with step of 4 arange_array2 = np.arange(50,121,4) print("Second Array:", arange_array2) print("======================================") # starting from 0(by default), stopping at 14(don't forget 15 is exclusive), step=1(by default) arange_array3 = np.arange(15) print("Third Array:", arange_array3) print("======================================") # okay now lets print backward counting: 20 to 1 arange_array4 = np.arange(20,0,-1) print("Reverse Array:", arange_array4)First array: [ 0 2 4 6 8 10] ====================================== Second Array: [ 50 54 58 62 66 70 74 78 82 86 90 94 98 102 106 110 114 118] ====================================== Third Array: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14] ====================================== Reverse Array: [20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]NumPy Arrays using the function linspace()Like the `arange()` function, the `linspace()` function can also be used to create Numpy array. In the `arange()` function, we had control over where to start the Numpy array from, where to stop and step points but with `linspace()` we can maintain a proper linear stepping or spacing between array elements value while generating the array. The `linspace()` function takes arguments: **start index**, **end index** and the **number of elements** to be outputted. These number of elements would be linearly spaced in the range mentioned: `linspace(start_index, end_index, num_of_elements)`. Now let's take a code example,# printing arr consisting of 10 values in between # range 15, 75 spaced appropriately. arr = np.linspace(15, 75, 10) print("First Array\n", arr) print("=========================================================") # properly spaced array of 25 elements arr1 = np.linspace(50,100,25) print("Second Array\n", arr1)First Array [15. 21.66666667 28.33333333 35. 41.66666667 48.33333333 55. 61.66666667 68.33333333 75. ] ========================================================= Second Array [ 50. 52.08333333 54.16666667 56.25 58.33333333 60.41666667 62.5 64.58333333 66.66666667 68.75 70.83333333 72.91666667 75. 77.08333333 79.16666667 81.25 83.33333333 85.41666667 87.5 89.58333333 91.66666667 93.75 95.83333333 97.91666667 100. ]NumPy Array AttributesBefore we continue in showing other ways of generating Numpy arrays, let's discuss some useful array attributes. We'll start by defining three random arrays, a one-dimensional, two-dimensional, and three-dimensional array. We'll use NumPy's random number generator, which we will seed with a set value in order to ensure that the same random arrays are generated each time this code is run:np.random.seed(0) # seed for reproducibility x1 = np.random.randint(10, size=6) # One-dimensional array x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional arrayEach array has attributes `ndim` (the number of dimensions), `shape` (the size of each dimension), and `size` (the total size of the array):print("x3 ndim: ", x3.ndim) print("x3 shape:", x3.shape) print("x3 size: ", x3.size)x3 ndim: 3 x3 shape: (3, 4, 5) x3 size: 60Another useful attribute is the `dtype`, the data type of the array (which we already discussed).print("dtype:", x3.dtype)dtype: int64Other attributes include ``itemsize``, which lists the size (in bytes) of each array element, and ``nbytes``, which lists the total size (in bytes) of the array:print("itemsize:", x3.itemsize, "bytes") print("nbytes:", x3.nbytes, "bytes")itemsize: 8 bytes nbytes: 480 bytesIn general, we expect that ``nbytes`` is equal to ``itemsize`` times ``size``. Array Indexing: Accessing Single ElementsIf you are familiar with Python's standard list indexing, indexing in NumPy will feel quite familiar.In a one-dimensional array, the $i^{th}$ value (counting from zero) can be accessed by specifying the desired index in square brackets, just as with Python lists:x1 print(x1[0]) print("-----------") print(x1[4]) print("-----------") print(x1[-1]) print("-----------") print(x1[-2]) print("-----------")5 ----------- 7 ----------- 9 ----------- 7 -----------In a multi-dimensional array, items can be accessed using a comma-separated tuple of indices:x2 print(x2[0,0]) print("-----------") print(x2[2,0]) print("-----------") print(x2[2,-1]) print("-----------") print(x2[-2]) print("-----------")3 ----------- 1 ----------- 7 ----------- [7 6 8 8] -----------Values can also be modified using any of the above index notation:x2[0, 0] = 12 x2Keep in mind that, unlike Python lists, NumPy arrays have a fixed type.This means, for example, that if you attempt to insert a floating-point value to an integer array, the value will be silently truncated. Don't be caught unaware by this behavior!x1[0] = 3.14159 # this will be truncated! x1Array Slicing: Accessing SubarraysJust as we can use square brackets to access individual array elements, we can also use them to access subarrays with the *slice* notation, marked by the colon (``:``) character.The NumPy slicing syntax follows that of the standard Python list; to access a slice of an array ``x``, use this:``` pythonx[start:stop:step]```If any of these are unspecified, they default to the values ``start=0``, ``stop=``*``size of dimension``*, ``step=1``.We'll take a look at accessing sub-arrays in one dimension and in multiple dimensions. One-dimensional subarraysx = np.arange(10) x x[:5] # first five elements x[5:] # elements after index 5 x[4:7] # middle sub-array x[::2] # every other element x[1::2] # every other element, starting at index 1A potentially confusing case is when the ``step`` value is negative.In this case, the defaults for ``start`` and ``stop`` are swapped.This becomes a convenient way to reverse an array:x[::-1] # all elements, reversed x[5::-2] # reversed every other from index 5Multi-dimensional subarraysMulti-dimensional slices work in the same way, with multiple slices separated by commas.For example:x2 x2[:2, :3] # two rows, three columns x2[:3, ::2] # all rows, every other columnFinally, subarray dimensions can even be reversed together:x2[::-1, ::-1]Accessing array rows and columnsOne commonly needed routine is accessing of single rows or columns of an array.This can be done by combining indexing and slicing, using an empty slice marked by a single colon (``:``):print(x2[:, 0]) # first column of x2 print(x2[0, :]) # first row of x2[12 5 2 4]In the case of row access, the empty slice can be omitted for a more compact syntax:print(x2[0]) # equivalent to x2[0, :][12 5 2 4]Subarrays as no-copy viewsOne important–and extremely useful–thing to know about array slices is that they return *views* rather than *copies* of the array data.This is one area in which NumPy array slicing differs from Python list slicing: in lists, slices will be copies.Consider our two-dimensional array from before:print(x2)[[12 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]Let's extract a $2 \times 2$ subarray from this:x2_sub = x2[:2, :2] print(x2_sub)[[12 5] [ 7 6]]Now if we modify this subarray, we'll see that the original array is changed! Observe:x2_sub[0, 0] = 99 print(x2_sub) print(x2)[[99 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]This default behavior is actually quite useful: it means that when we work with large datasets, we can access and process pieces of these datasets without the need to copy the underlying data buffer. Creating copies of arraysDespite the nice features of array views, it is sometimes useful to instead explicitly copy the data within an array or a subarray. This can be most easily done with the ``copy()`` method:x2_sub_copy = x2[:2, :2].copy() print(x2_sub_copy)[[99 5] [ 7 6]]If we now modify this subarray, the original array is not touched:x2_sub_copy[0, 0] = 42 print(x2_sub_copy) print(x2)[[99 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]Reshaping of ArraysAnother useful type of operation is reshaping of arrays.The most flexible way of doing this is with the ``reshape`` method.For example, if you want to put the numbers 1 through 9 in a $3 \times 3$ grid, you can do the following:grid = np.arange(1, 10).reshape((3, 3)) print(grid)[[1 2 3] [4 5 6] [7 8 9]]Note that for this to work, the size of the initial array must match the size of the reshaped array. Where possible, the ``reshape`` method will use a no-copy view of the initial array, but with non-contiguous memory buffers this is not always the case.Another common reshaping pattern is the conversion of a one-dimensional array into a two-dimensional row or column matrix.This can be done with the ``reshape`` method, or more easily done by making use of the ``newaxis`` keyword within a slice operation:x = np.array([1, 2, 3]) # row vector via reshape x.reshape((1, 3)) # row vector via newaxis x[np.newaxis, :] # column vector via reshape x.reshape((3, 1)) # column vector via newaxis x[:, np.newaxis]Some Numpy Special Arrays Numpy Zeros: numpy.zeros()Numpy zeros function returns a numpy array of only zeros with specified shape and data type.Syntax zeros(shape, dtype=float, order='C') 1. shape – This is the shape of the required array, input has to be an int or a tuple of int. 2. dtype (optional) – This is the required data type of the array, by default it is float. 3. order (optional) – This specifies how the array will be stored in the memory. It can be either ‘C’ for row-major or ‘F’ for column-major. By default, it is ‘C’.# Notice that we did not specify the data type, so by default, it assumed float. # 1-D Array np_1d_zero_array = np.zeros(4) print('Output- ') print(np_1d_zero_array) print('Shape- ', np_1d_zero_array.shape) print('Data type -', np_1d_zero_array.dtype) print("======================================================") # 2-D Array np_2d_zero_array = np.zeros((4,2)) print('Output- ') print(np_2d_zero_array) print('Shape- ', np_2d_zero_array.shape) print('Data type -', np_2d_zero_array.dtype) print("======================================================") # 3-D Array np_3d_zero_array = np.zeros((4,2,3), int) print('Output- ') print(np_3d_zero_array) print('Shape- ', np_3d_zero_array.shape) print('Data type -', np_3d_zero_array.dtype)Output- [0. 0. 0. 0.] Shape- (4,) Data type - float64 ====================================================== Output- [[0. 0.] [0. 0.] [0. 0.] [0. 0.]] Shape- (4, 2) Data type - float64 ====================================================== Output- [[[0 0 0] [0 0 0]] [[0 0 0] [0 0 0]] [[0 0 0] [0 0 0]] [[0 0 0] [0 0 0]]] Shape- (4, 2, 3) Data type - int64Numpy Ones: numpy.ones()This is very similar to Numpy Zero. Here Numpy zeros function returns a numpy array of only ones with specified shape and data type.Syntax ones(shape, dtype=float, order=’C’)# 1-D Array np_1d_ones_array = np.ones(4,int) print('Output- ') print(np_1d_ones_array) print('Shape- ', np_1d_ones_array.shape) print('Data type -', np_1d_ones_array.dtype) print("======================================================") # 2-D Array np_2d_ones_array = np.ones((4,2)) print('Output- ') print(np_2d_ones_array) print('Shape- ', np_2d_ones_array.shape) print('Data type -', np_2d_ones_array.dtype) # 3-D Array np_3d_ones_array = np.ones((4,2,3)) print('Output- ') print(np_3d_ones_array) print('Shape- ', np_3d_ones_array.shape) print('Data type -', np_3d_ones_array.dtype)Output- [1 1 1 1] Shape- (4,) Data type - int64 ====================================================== Output- [[1. 1.] [1. 1.] [1. 1.] [1. 1.]] Shape- (4, 2) Data type - float64 Output- [[[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]]] Shape- (4, 2, 3) Data type - float64Numpy Eye: numpy.eye()Numpy eye function helps to create a 2-D array where the diagonal has all ones and zeros elsewhere.Syntax eye(N, M=None, k=0, dtype='float', order='C') 1. N – It is the number of rows in the array. It has to be int. 2. M (optional) – It is the number of columns in the array. If not specified then it will default to N. 3. K (optional) – It denotes the position of diagonal ones. By default is zero i.e. in middle. A positive value denotes upper diagonal and negative value lower diagonal. 4. dtype (optional) – This is the required data type of the array, by default it is float. 5. order (optional) – This specifies how the array will be stored in the memory. It can be either ‘C’ for row-major or ‘F’ for column-major. By default, it is ‘C’.np_identity_matrix_oder_4 = np.eye(4) print('Output- ') print(np_identity_matrix_oder_4) print('Shape- ', np_identity_matrix_oder_4.shape) print('Data type -', np_identity_matrix_oder_4.dtype)Output- [[1. 0. 0. 0.] [0. 1. 0. 0.] [0. 0. 1. 0.] [0. 0. 0. 1.]] Shape- (4, 4) Data type - float64Miscellaneous Examples of Numpy Eyenp.eye(4, 5) np.eye(4, 5, k = 2)NumPy Arrays using the function empty()The `empty()` function is used to create arrays when we don't really have any values to create an array. What it does is, it takes the shape of the array as desired and the array is then filled with random values. The trick here is that without even using the random module we are able to build an array full of random values: `empty(shape, dtype)`# arr of shape (5,2) with datatype=float filled with random values empty_arr = np.empty((5,2), dtype=float) print("Array with Float values\n", empty_arr) print("======================================") # observe what happens when executed empty_arr1 = np.empty((4,4), dtype=int) print("Second Array \n", empty_arr1)Array with Float values [[15. 21.66666667] [28.33333333 35. ] [41.66666667 48.33333333] [55. 61.66666667] [68.33333333 75. ]] ====================================== Second Array [[4607182418800017408 0 0 0] [ 0 4607182418800017408 0 0] [ 0 0 4607182418800017408 0] [ 0 0 0 4607182418800017408]]Operations on Arrays Array Concatenation and SplittingAll of the preceding routines worked on single arrays. It's also possible to combine multiple arrays into one, and to conversely split a single array into multiple arrays. We'll take a look at those operations here. Concatenation of arraysConcatenation, or joining of two arrays in NumPy, is primarily accomplished using the routines ``np.concatenate``, ``np.vstack``, and ``np.hstack``.``np.concatenate`` takes a tuple or list of arrays as its first argument, as we can see here:x = np.array([1, 2, 3]) y = np.array([3, 2, 1]) print(np.concatenate([x, y])) print("========================================") # You can also concatenate more than two arrays at once: z = [99, 99, 99] print(np.concatenate([x, y, z])) print("========================================") #It can also be used for two-dimensional arrays: grid = np.array([[1, 2, 3],[4, 5, 6]]) # concatenate along the first axis print(np.concatenate([grid, grid])) print("========================================") # concatenate along the second axis (zero-indexed) print(np.concatenate([grid, grid], axis=1))[1 2 3 3 2 1] ======================================== [ 1 2 3 3 2 1 99 99 99] ======================================== [[1 2 3] [4 5 6] [1 2 3] [4 5 6]] ======================================== [[1 2 3 1 2 3] [4 5 6 4 5 6]]For working with arrays of mixed dimensions, it can be clearer to use the ``np.vstack`` (vertical stack) and ``np.hstack`` (horizontal stack) functions:x = np.array([1, 2, 3]) grid = np.array([[9, 8, 7], [6, 5, 4]]) # vertically stack the arrays np.vstack([x, grid]) # horizontally stack the arrays y = np.array([[99], [99]]) np.hstack([grid, y])Splitting of arraysThe opposite of concatenation is splitting, which is implemented by the functions ``np.split``, ``np.hsplit``, and ``np.vsplit``. For each of these, we can pass a list of indices giving the split points:x = [1, 2, 3, 99, 99, 3, 2, 1] x1, x2, x3 = np.split(x, [3, 5]) print(x1, x2, x3)[1 2 3] [99 99] [3 2 1]Notice that *N* split-points, leads to *N + 1* subarrays.The related functions ``np.hsplit`` and ``np.vsplit`` are similar:grid = np.arange(16).reshape((4, 4)) grid upper, lower = np.vsplit(grid, [2]) print(upper) print(lower) left, right = np.hsplit(grid, [2]) print(left) print(right)[[ 0 1] [ 4 5] [ 8 9] [12 13]] [[ 2 3] [ 6 7] [10 11] [14 15]]Similarly, ``np.dsplit`` will split arrays along the third axis. Numpy Arrays Universal Functions Computation on NumPy arrays can be very fast, or it can be very slow.The key to making it fast is to use *vectorized* operations, generally implemented through NumPy's *universal functions* (ufuncs).This section motivates the need for NumPy's ufuncs, which can be used to make repeated calculations on array elements much more efficient.It then introduces many of the most common and useful arithmetic ufuncs available in the NumPy package. Array arithmeticNumPy's ufuncs feel very natural to use because they make use of Python's native arithmetic operators.The standard addition, subtraction, multiplication, and division can all be used:x = np.arange(4) print("x =", x) print("x + 5 =", x + 5) print("x - 5 =", x - 5) print("x * 2 =", x * 2) print("x / 2 =", x / 2) print("x // 2 =", x // 2) # floor division # There is also a unary ufunc for negation, # and a ``**`` operator for exponentiation, # and a ``%`` operator for modulus print("-x = ", -x) print("x ** 2 = ", x ** 2) print("x % 2 = ", x % 2) # In addition, these can be strung together however you wish, # and the standard order of operations is respected: -(0.5*x + 1) ** 2 #Each of these arithmetic operations are simply convenient wrappers around specific functions built into NumPy; # for example, the ``+`` operator is a wrapper for the ``add`` function np.add(x, 2)The following table lists the arithmetic operators implemented in NumPy:| Operator | Equivalent ufunc | Description ||---------------|---------------------|---------------------------------------||``+`` |``np.add`` |Addition (e.g., ``1 + 1 = 2``) ||``-`` |``np.subtract`` |Subtraction (e.g., ``3 - 2 = 1``) ||``-`` |``np.negative`` |Unary negation (e.g., ``-2``) ||``*`` |``np.multiply`` |Multiplication (e.g., ``2 * 3 = 6``) ||``/`` |``np.divide`` |Division (e.g., ``3 / 2 = 1.5``) ||``//`` |``np.floor_divide`` |Floor division (e.g., ``3 // 2 = 1``) ||``**`` |``np.power`` |Exponentiation (e.g., ``2 ** 3 = 8``) ||``%`` |``np.mod`` |Modulus/remainder (e.g., ``9 % 4 = 1``)|Additionally there are Boolean/bitwise operators; we will explore these in [Comparisons, Masks, and Boolean Logic](). Absolute valueJust as NumPy understands Python's built-in arithmetic operators, it also understands Python's built-in absolute value function:x = np.array([-2, -1, 0, 1, 2]) abs(x) # The corresponding NumPy ufunc is ``np.absolute``, which is also available under the alias ``np.abs``: print(np.absolute(x)) print("===============================") print(np.absolute(x)) # This ufunc can also handle complex data, in which the absolute value returns the magnitude: x = np.array([3 - 4j, 4 - 3j, 2 + 0j, 0 + 1j]) np.abs(x)Trigonometric functionsNumPy provides a large number of useful ufuncs, and some of the most useful for the data scientist are the trigonometric functions.We'll start by defining an array of angles:theta = np.linspace(0, np.pi, 3) print("theta = ", theta) print("sin(theta) = ", np.sin(theta)) print("cos(theta) = ", np.cos(theta)) print("tan(theta) = ", np.tan(theta)) # The values are computed to within machine precision, which is why values # that should be zero do not always hit exactly zero. #Inverse trigonometric functions are also available: x = [-1, 0, 1] print("x = ", x) print("arcsin(x) = ", np.arcsin(x)) print("arccos(x) = ", np.arccos(x)) print("arctan(x) = ", np.arctan(x))x = [-1, 0, 1] arcsin(x) = [-1.57079633 0. 1.57079633] arccos(x) = [3.14159265 1.57079633 0. ] arctan(x) = [-0.78539816 0. 0.78539816]Exponents and logarithmsAnother common type of operation available in a NumPy ufunc are the exponentials:x = [1, 2, 3] print("x =", x) print("e^x =", np.exp(x)) print("2^x =", np.exp2(x)) print("3^x =", np.power(3, x))x = [1, 2, 3] e^x = [ 2.71828183 7.3890561 20.08553692] 2^x = [2. 4. 8.] 3^x = [ 3 9 27]The inverse of the exponentials, the logarithms, are also available.The basic ``np.log`` gives the natural logarithm; if you prefer to compute the base-2 logarithm or the base-10 logarithm, these are available as well:x = [1, 2, 4, 10] print("x =", x) print("ln(x) =", np.log(x)) print("log2(x) =", np.log2(x)) print("log10(x) =", np.log10(x))x = [1, 2, 4, 10] ln(x) = [0. 0.69314718 1.38629436 2.30258509] log2(x) = [0. 1. 2. 3.32192809] log10(x) = [0. 0.30103 0.60205999 1. ]Ufuncs: Learning MoreMore information on universal functions (including the full list of available functions) can be found on the [NumPy](http://www.numpy.org) and [SciPy](http://www.scipy.org) documentation websites.Recall that you can also access information directly from within IPython by importing the packages and using IPython's tab-completion and help (``?``) functionality Aggregation FunctionsOften when faced with a large amount of data, a first step is to compute summary statistics for the data in question.Perhaps the most common summary statistics are the mean and standard deviation, which allow you to summarize the "typical" values in a dataset, but other aggregates are useful as well (the sum, product, median, minimum and maximum, quantiles, etc.).NumPy has fast built-in aggregation functions for working on arrays; we'll discuss and demonstrate some of them here. Summing the Values in an ArrayAs a quick example, consider computing the sum of all values in an array.Python itself can do this using the built-in ``sum`` function:L = np.random.random(100) sum(L) # The syntax is quite similar to that of NumPy's ``sum`` function, # and the result is the same in the simplest case: np.sum(L)However, because it executes the operation in compiled code, NumPy's version of the operation is computed much more quickly:big_array = np.random.rand(1000000) %timeit sum(big_array) %timeit np.sum(big_array)142 ms ± 3.48 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 495 µs ± 5.27 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)One has to be careful, though: the ``sum`` function and the ``np.sum`` function are not identical, and this can sometimes lead to confusion!In particular, we point out that their optional arguments have different meanings. Moreover ``np.sum`` is aware of multiple array dimensions.help(sum(big_array)) help(np.sum(big_array))Help on float64 object: class float64(floating, builtins.float) | float64(x=0, /) | | Double-precision floating-point number type, compatible with Python `float` | and C ``double``. | | :Character code: ``'d'`` | :Canonical name: `numpy.double` | :Alias: `numpy.float_` | :Alias on this platform (Linux x86_64): `numpy.float64`: 64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa. | | Method resolution order: | float64 | floating | inexact | number | generic | builtins.float | builtins.object | | Methods defined here: | | __abs__(self, /) | abs(self) | | __add__(self, value, /) | Return self+value. | | __bool__(self, /) | self != 0 | | __divmod__(self, value, /) | Return divmod(self, value). | | __eq__(self, value, /) | Return self==value. | | __float__(self, /) | float(self) | | __floordiv__(self, value, /) [...]Minimum and MaximumSimilarly, Python has built-in ``min`` and ``max`` functions, used to find the minimum value and maximum value of any given array:min(big_array), max(big_array) # NumPy's corresponding functions have similar syntax, and again operate much more quickly: np.min(big_array), np.max(big_array) %timeit min(big_array), max(big_array) %timeit np.min(big_array), np.max(big_array)172 ms ± 1.01 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 1.04 ms ± 7.38 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)For ``min``, ``max``, ``sum``, and several other NumPy aggregates, a shorter syntax is to use methods of the array object itself:print(big_array.min(), big_array.max(), big_array.sum())1.4057692298008462e-06 0.9999994392723005 500202.5348847683Whenever possible, make sure that you are using the NumPy version of these aggregates when operating on NumPy arrays! Multi dimensional aggregatesOne common type of aggregation operation is an aggregate along a row or column.Say you have some data stored in a two-dimensional array:M = np.random.random((3, 4)) print(M)[[0.50063048 0.07383653 0.49018646 0.72521956] [0.84926562 0.10226215 0.99559424 0.59250301] [0.53509 0.88518089 0.25518136 0.13130483]]By default, each NumPy aggregation function will return the aggregate over the entire array:M.sum()It is possible, for aggregation functions, to add an argument specifying the axis along which the aggregate is computed. For example, we can find the minimum value within each column by specifying axis=0:M.min(axis=0)The function returns four values, corresponding to the four columns of numbers.You could test the similar axis specification with the `max` aggregate. The way the axis is specified here can be confusing to users coming from other languages.The ``axis`` keyword specifies the *dimension of the array that will be collapsed*, rather than the dimension that will be returned.So specifying ``axis=0`` means that the first axis will be collapsed: for two-dimensional arrays, this means that values within each column will be aggregated. Other aggregation functionsNumPy provides many other aggregation functions. The following table provides a list of useful aggregation functions available in NumPy:|Function Name | NaN-safe Version | Description ||-------------------|---------------------|-----------------------------------------------|| ``np.sum`` | ``np.nansum`` | Compute sum of elements || ``np.prod`` | ``np.nanprod`` | Compute product of elements || ``np.mean`` | ``np.nanmean`` | Compute mean of elements || ``np.std`` | ``np.nanstd`` | Compute standard deviation || ``np.var`` | ``np.nanvar`` | Compute variance || ``np.min`` | ``np.nanmin`` | Find minimum value || ``np.max`` | ``np.nanmax`` | Find maximum value || ``np.argmin`` | ``np.nanargmin`` | Find index of minimum value || ``np.argmax`` | ``np.nanargmax`` | Find index of maximum value || ``np.median`` | ``np.nanmedian`` | Compute median of elements || ``np.percentile`` | ``np.nanpercentile``| Compute rank-based statistics of elements || ``np.any`` | N/A | Evaluate whether any elements are true || ``np.all`` | N/A | Evaluate whether all elements are true | Example: What is the Average Height of a list of heights?heights = np.array([189,170,189,163, 183, 171, 185, 168, 173, 183, 173, 173, 175, 178, 183, 193, 178, 173, 174, 183, 183, 168, 170, 178, 182, 180, 183, 178, 182, 188, 175, 179, 183, 193, 182, 183, 177, 185, 188, 188, 182, 185])Now that we have this data array, we can compute a variety of summary statistics:print("Mean height: ", heights.mean()) print("Standard deviation:", heights.std()) print("Minimum height: ", heights.min()) print("Maximum height: ", heights.max()) # We may also wish to compute quantiles: print("25th percentile: ", np.percentile(heights, 25)) print("Median: ", np.median(heights)) print("75th percentile: ", np.percentile(heights, 75))25th percentile: 174.25 Median: 182.0 75th percentile: 183.0We see that the median height is $182$ cm, or just shy of six feet.Sometimes it's more useful to have a visual representation of the data, which we can accomplish using tool Matplotlib (we'll discuss Matplotlib more fully towards the end of this week). For example, this code generates the following chart:import matplotlib.pyplot as plt import seaborn; seaborn.set() # set plot style plt.hist(heights) plt.title('Height Distribution') plt.xlabel('height (cm)') plt.ylabel('number');Short Exercise First, lets make a common array to work with.```pythonimport numpy as npnp.random.seed(21) This guarantees the code will generate the same set of random numbers whenever executedrandom_integers = np.random.randint(1,high=500000, size=(20, 5))random_integers``` 1. What is the average value of the second column? 2. What is the average value of the first 5 rows of the third and fourth columns? BroadcastingNumPy's universal functions can be used to *vectorize* operations and thereby remove slow Python loops.Another means of vectorizing operations is to use NumPy's *broadcasting* functionality.Broadcasting is simply a set of rules for applying binary ufuncs (e.g., addition, subtraction, multiplication, etc.) on arrays of different sizes.# Recall that for arrays of the same size, binary operations # are performed on an element-by-element basis: a = np.array([0, 1, 2]) b = np.array([5, 5, 5]) a + bBroadcasting allows these types of binary operations to be performed on arrays of different sizes–for example, we can just as easily add a scalar (think of it as a zero-dimensional array) to an array:a + 5We can think of this as an operation that stretches or duplicates the value ``5`` into the array ``[5, 5, 5]``, and adds the results. We can similarly extend this to arrays of higher dimension. Observe the result when we add a one-dimensional array to a two-dimensional array:M = np.ones((3, 3)) M M + aHere the one-dimensional array ``a`` is stretched, or broadcast across the second dimension in order to match the shape of ``M``.While these examples are relatively easy to understand, more complicated cases can involve broadcasting of both arrays. Consider the following example:a = np.arange(3) b = np.arange(3)[:, np.newaxis] print(a) print(b) a + bJust as before we stretched or broadcasted one value to match the shape of the other, here we've stretched *both* ``a`` and ``b`` to match a common shape, and the result is a two-dimensional array!The geometry of these examples is visualized in the following figure The light boxes represent the broadcasted values There are a few rules attached to this *broadcasting* idea. Broadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays:- Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is *padded* with ones on its leading (left) side.- Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape.- Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised.To make these rules clear, let's consider a few examples in detail. Broadcasting example 1Let's look at adding a two-dimensional array to a one-dimensional array:M = np.ones((2, 3)) a = np.arange(3)Let's consider an operation on these two arrays. The shape of the arrays are- ``M.shape = (2, 3)``- ``a.shape = (3,)``We see by rule 1 that the array ``a`` has fewer dimensions, so we pad it on the left with ones:- ``M.shape -> (2, 3)``- ``a.shape -> (1, 3)``By rule 2, we now see that the first dimension disagrees, so we stretch this dimension to match:- ``M.shape -> (2, 3)``- ``a.shape -> (2, 3)``The shapes match, and we see that the final shape will be ``(2, 3)``:M + aBroadcasting example 2Let's take a look at an example where both arrays need to be broadcast:a = np.arange(3).reshape((3, 1)) b = np.arange(3)Again, we'll start by writing out the shape of the arrays:- ``a.shape = (3, 1)``- ``b.shape = (3,)``Rule 1 says we must pad the shape of ``b`` with ones:- ``a.shape -> (3, 1)``- ``b.shape -> (1, 3)``And rule 2 tells us that we upgrade each of these ones to match the corresponding size of the other array:- ``a.shape -> (3, 3)``- ``b.shape -> (3, 3)``Because the result matches, these shapes are compatible. We can see this here:a + bBroadcasting example 3Now let's take a look at an example in which the two arrays are not compatible:M = np.ones((3, 2)) a = np.arange(3)This is just a slightly different situation than in the first example: the matrix ``M`` is transposed.How does this affect the calculation? The shape of the arrays are- ``M.shape = (3, 2)``- ``a.shape = (3,)``Again, rule 1 tells us that we must pad the shape of ``a`` with ones:- ``M.shape -> (3, 2)``- ``a.shape -> (1, 3)``By rule 2, the first dimension of ``a`` is stretched to match that of ``M``:- ``M.shape -> (3, 2)``- ``a.shape -> (3, 3)``Now we hit rule 3–the final shapes do not match, so these two arrays are incompatible, as we can observe by attempting this operation:M + aNote the potential confusion here: you could imagine making ``a`` and ``M`` compatible by, say, padding ``a``'s shape with ones on the right rather than the left.But this is not how the broadcasting rules work!That sort of flexibility might be useful in some cases, but it would lead to potential areas of ambiguity.If right-side padding is what you'd like, you can do this explicitly by reshaping the array (we'll use the ``np.newaxis`` keyword.a[:, np.newaxis].shape M + a[:, np.newaxis]Also note that while we've been focusing on the ``+`` operator here, these broadcasting rules apply to *any* binary ``ufunc``.For example, here is the ``logaddexp(a, b)`` function, which computes ``log(exp(a) + exp(b))`` with more precision than the naive approach:np.logaddexp(M, a[:, np.newaxis])We saw that ufuncs allow a NumPy user to remove the need to explicitly write slow Python loops. Broadcasting extends this ability.One commonly seen example is when *centering an array* of data.Imagine you have an array of 10 observations, each of which consists of 3 values.We'll store this in a $10 \times 3$ array:X = np.random.random((10, 3)) # We can compute the mean of each feature using the ``mean`` aggregate across the #first dimension: Xmean = X.mean(0) print(Xmean) # And now we can center the ``X`` array by subtracting the mean (this is a broadcasting operation): X_centered = X - Xmean # we can check that the centered array has near zero mean: # To within machine precision, the mean is now zero. X_centered.mean(0)[0.48773885 0.42332575 0.50590496]Another place that broadcasting is very useful is in displaying images based on two-dimensional functions.If we want to define a function $z = f(x, y)$, broadcasting can be used to compute the function across the grid:# x and y have 50 steps from 0 to 5 x = np.linspace(0, 5, 50) y = np.linspace(0, 5, 50)[:, np.newaxis] z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) # We have used Matplotlib to plot this two-dimensional array (these tools will be discussed in full later) import matplotlib.pyplot as plt plt.imshow(z, origin='lower', extent=[0, 5, 0, 5], cmap='viridis') plt.colorbar(); #The result is a compelling visualization of the two-dimensional function.Boolean MasksThis section covers the use of Boolean masks to examine and manipulate values within NumPy arrays.Masking comes up when you want to extract, modify, count, or otherwise manipulate values in an array based on some criterion: for example, you might wish to count all values greater than a certain value, or perhaps remove all outliers that are above some threshold.In NumPy, Boolean masking is often the most efficient way to accomplish these types of tasks. In the sections addressing `Universal Functions`(02.03-Computation-on-arrays-ufuncs.ipynb) we introduced ufuncs, and focused in particular on arithmetic operators. We saw that using ``+``, ``-``, ``*``, ``/``, and others on arrays leads to element-wise operations.NumPy also implements comparison operators such as ```` (greater than) as element-wise ufuncs.The result of these comparison operators is always an array with a Boolean data type.All six of the standard comparison operations are available:x = np.array([1, 2, 3, 4, 5]) x < 3 # less than x > 3 # greater than x <= 3 # less than or equal # x >= 3 # less than or equal # Uncomment and tun the cell. x == 3 # equal # x != 3 # not equal # Uncomment and tun the cell. #It is also possible to do an element-wise comparison of two arrays, # and to include compound expressions: (2 * x) == (x ** 2)As in the case of arithmetic operators, the comparison operators are implemented as ufuncs in NumPy; for example, when you write ``x < 3``, internally NumPy uses ``np.less(x, 3)``. A summary of the comparison operators and their equivalent ufunc is shown here:| Operator | Equivalent ufunc || Operator | Equivalent ufunc ||---------------|---------------------||---------------|---------------------||``==`` |``np.equal`` ||``!=`` |``np.not_equal`` ||``<`` |``np.less`` ||``<=`` |``np.less_equal`` ||``>`` |``np.greater`` ||``>=`` |``np.greater_equal`` | Just as in the case of arithmetic ufuncs, these will work on arrays of any size and shape.Here is a two-dimensional example:rng = np.random.RandomState(0) x = rng.randint(10, size=(3, 4)) x x < 6In each case, the result is a Boolean array, and NumPy provides a number of straightforward patterns for working with these Boolean results. Working with Boolean ArraysGiven a Boolean array, there are a host of useful operations you can do.We'll work with ``x``, the two-dimensional array we created earlier.print(x)[[5 0 3 3] [7 9 3 5] [2 4 7 6]]To count the number of ``True`` entries in a Boolean array, ``np.count_nonzero`` is useful:# how many values less than 6? np.count_nonzero(x < 6) # Another way to get at this information is to use np.sum; in this case, # False is interpreted as 0, and True is interpreted as 1: np.sum(x < 6)The benefit of ``sum()`` is that like with other NumPy aggregation functions, this summation can be done along rows or columns as well:# how many values less than 6 in each row? np.sum(x < 6, axis=1)This counts the number of values less than 6 in each row of the matrix. If we're interested in quickly checking whether any or all the values are true, we can use ``np.any`` or ``np.all``:# are there any values greater than 8? np.any(x > 8) # are there any values less than zero? np.any(x < 0) # are all values less than 10? np.all(x < 10) # are all values equal to 6? np.all(x == 6)``np.all`` and ``np.any`` can be used along particular axes as well. For example:# are all values in each row less than 8? np.all(x < 8, axis=1) # Here all the elements in the first and third rows are less than 8, # while this is not the case for the second row.Boolean Arrays as MasksA more powerful pattern is to use Boolean arrays as masks, to select particular subsets of the data themselves.Returning to our ``x`` array from before, suppose we want an array of all values in the array that are less than, say, 5:x # We can obtain a Boolean array for this condition easily, as we've already seen: x < 5Now to *select* these values from the array, we can simply index on this Boolean array; this is known as a *masking* operation:x[x < 5]What is returned is a one-dimensional array filled with all the values that meet this condition; in other words, all the values in positions at which the mask array is ``True``. We are then free to operate on these values as we wish. Additional Functionality Linear Algebra Using NumpyLinear algebra is an integral part of the domain of Machine Learning. Most of the algorithms we will deal with can be concisely expressed using the operations of linear algebra. One of the most widely used operations in linear algebra is the dot product. This can be performed on two compatible `ndarrays` by using the `dot` function.A = np.array([[1,2,3],[4,5,6],[7,8,9]]) B = np.array([[9,8,7],[6,5,4],[1,2,3]]) A.dot(B)With Anaconda’s scientific Python package based around Python 3.5 and above,one can use the `@` symbol for matrix multiplication, as follows:A = np.ones((2, 2)) B = np.ones((2, 2)) A @ BAnother popular matrix operation is transpose of a matrix. This can be easily achieved by using the `T` functionA = np.arange(15).reshape(3,5) A.TOftentimes, we need to find out decomposition of a matrix into its constituents factors. This is called matrix factorization. This can be achieved by the appropriate functions. A popular matrix factorization method is `SVD factorization`, which returns decomposition of a matrix into three different matrices. This can be done using `linalg.svd` function.np.linalg.svd(A)Linear algebra is often also used to solve a system of equations. Using the matrix notation of system of equations and the provided function of numpy, we can easily solve such a system of equation. Consider the system of equations:\begin{cases} 7x + 5y -3z = 16 \\ 3x - 5y + 2z = -8 \\ 5x + 3y - 7z = 0 \end{cases} This can be represented as two matrices: the coefficient matrix (a in the example) and the constants vector (b in the example).a = np.array([[7,5,-3], [3,-5,2],[5,3,-7]]) b = np.array([16,-8,0]) x = np.linalg.solve(a, b) xWe can also check if the solution is correct using the `np.allclose` function.np.allclose(np.dot(a, x), b)First time users stay time 以下の3通りに分けて初回起動ユーザーの平均起動時間を計算する+ daily+ weekly+ monthly Input Parameters+ DATE 集計期間の終わりの日+ DEBUG 手動実行時のみTrue+ FREQUENCY 実行頻度+ BIGQUERY_PROJECT_ID bigqueryのプロジェクト名+ BIGQUERY_DATASET bigqueryのデータセット+ PACKAGE_NAME bigqueryのパッケージ名+ OUTPUT_BIGQUERY_PROJECT_ID 出力先のBQのプロジェクト名+ ISLATEST 最新の日付を対象にする場合はTrue,任意の日付を指定する場合はFalse Output Range+ dailyDATEの1日前を対象に集計 ex.DATE="2021-02-02"の場合は"2021-02-01を対象に集計"+ weeklyDATEの1日前から7日を対象に集計 ex.DATE="2021-02-22"の場合は"2021-02-15"から"2021-02-21を対象に集計"+ monthlyDATEの1日前から1ヶ月を対象に集計 ex.DATE="2021-02-01"の場合は"2021-01-01"から"2021-01-31"を対象に集計" Output Data+ date 集計の開始日+ android_first_users_stay_time Android初回起動ユーザーの平均滞在時間+ ios_first_users_stay_time iOS初回起動ユーザーの平均滞在時間+ all_first_users_stay_time 全ユーザーの平均滞在時間 ParametersDATE = "2020-01-01" # @param {type: "date"} DEBUG = True # @param {type: "boolean"} 手動実行時のみTrueにする。Cloud FunctionsからFalseを渡される。 FREQUENCY = "monthly" # @param {type: "string"} BIGQUERY_PROJECT_ID = "fl-komtar-herbert-offer" # @param {type: "string"} BIGQUERY_DATASET = "analytics_211559993.events_*" # @param {type: "string"} PACKAGE_NAME = "jp.co.hardoff.renk.app.offer" # @param {type: "string"} OUTPUT_BIGQUERY_PROJECT_ID = "fl-komtar-analytics-dashboard" # @param {type: "string"} IS_LATEST = False# @param {type:"boolean"}ConstantsSESSION_TIMEOUT_MINS = 30 #@param {type:"number"} METRICS_NAME = "first-time-users-stay-time"VersionVERSION = "7"Authorizeif DEBUG: from google.colab import auth auth.authenticate_user()Importsimport pandas as pd import numpy as np from datetime import timedelta,datetime from pytz import timezoneGet Input Datasets データの取得期間if IS_LATEST: date = (datetime.now(timezone("Asia/Tokyo"))-timedelta(days=1)) else: date = datetime.strptime(DATE,"%Y-%m-%d") - timedelta(days=1) if FREQUENCY == "daily": start = date end = date elif FREQUENCY =="weekly": start = date-timedelta(days=6) end = date elif FREQUENCY == "monthly": end = date start = datetime(end.year,end.month,1) else: raise Exception("Invalid frequency value") start_date = start.strftime("%Y%m%d") end_date = end.strftime("%Y%m%d") base_start = start-timedelta(days=1) base_end = end+timedelta(days=1) start_date, end_dateGBQからデータを取得するduration_func = """ var current = arr[0]; var result = []; for (var i = 0; i < arr.length - 1; i ++) { var diff = arr[i + 1] - arr[i]; if ((diff / 1000000) >= 1800) { result.push({ event_timestamp: current, duration: (arr[i] - current) / 1000000, }); current = arr[i + 1]; } } result.push({ event_timestamp: current, duration: (arr[arr.length - 1] - current) / 1000000, }); result = result.filter((r) => r.duration > 0) return result; """ query = f""" CREATE TEMP FUNCTION DURATION(arr ARRAY) RETURNS ARRAY> LANGUAGE js AS ''' {duration_func} '''; WITH base_table as ( SELECT DISTINCT FORMAT_TIMESTAMP("%Y%m%d", TIMESTAMP_MICROS(event_timestamp), 'Asia/Tokyo') AS JST,user_pseudo_id,platform, event_timestamp,user_first_touch_timestamp FROM `{BIGQUERY_PROJECT_ID}.{BIGQUERY_DATASET}` WHERE app_info.id like "{PACKAGE_NAME}%" AND _table_suffix BETWEEN "{base_start.strftime("%Y%m%d")}" AND "{base_end.strftime("%Y%m%d")}" ), event_arr AS ( SELECT DATE(TIMESTAMP_MICROS(event_timestamp), "Asia/Tokyo") AS date, user_pseudo_id, UPPER(platform) as OS, user_first_touch_timestamp, DURATION(ARRAY_AGG(event_timestamp ORDER BY event_timestamp)) AS duration_arr FROM ( SELECT * FROM base_table WHERE JST BETWEEN "{start_date}" AND "{end_date}" UNION ALL SELECT * FROM base_table WHERE JST BETWEEN "{start_date}" AND "{end_date}" ) GROUP BY date, user_pseudo_id, user_first_touch_timestamp, OS ), events AS ( SELECT date, TIMESTAMP_MICROS(event_timestamp) AS Time, user_pseudo_id, user_first_touch_timestamp, TIMESTAMP_MICROS(user_first_touch_timestamp) AS First_touch_Time, OS, darr.event_timestamp, darr.duration FROM event_arr CROSS JOIN UNNEST(event_arr.duration_arr) AS darr ) SELECT * FROM events ORDER BY user_pseudo_id """ df_session_duration = pd.DataFrame(columns = ['date', 'Time', 'user_pseudo_id', 'user_first_touch_timestamp','First_touch_Time', 'OS', 'event_timestamp', 'duration']) df_gbq = pd.io.gbq.read_gbq(query, project_id = BIGQUERY_PROJECT_ID) df_session_duration = pd.concat([df_session_duration, df_gbq]) df_session_duration = df_session_duration[(df_session_duration["date"] >= start_date) & (df_session_duration["date"] <= end_date)] df_session_duration期間内の初回起動者を取得unique_first_users = df_session_duration[df_session_duration["Time"]==df_session_duration["First_touch_Time"]]["user_pseudo_id"].unique() unique_first_users期間内のユニークなユーザーの行取得df_first_users_duration = df_session_duration[df_session_duration["user_pseudo_id"].isin(unique_first_users)] df_first_users_durationAll Device Durationsession_timeout = pd.Timedelta('%d min' % SESSION_TIMEOUT_MINS).total_seconds() session_timeout session_duration_list = [] # ユーザ毎に滞在時間を求める。 for g_user_id, df_g_user in df_first_users_duration.groupby(["user_pseudo_id","OS"]): previous_row = None session_duration = pd.Timedelta('0 min') # 各ユーザのイベントを1つ1つ確認する。 for row in df_g_user.itertuples(): # 一番初めのイベントの値を保存する。 if previous_row is None: previous_row = row session_duration = pd.Timedelta(seconds = previous_row.duration) continue # 前後のイベント間隔が30分以上ひらいた時、滞在時間を求める。 diff_session = datetime.fromtimestamp(row.event_timestamp/1000000)-datetime.fromtimestamp(previous_row.event_timestamp/1000000) #追加 diff_session = diff_session.total_seconds() #追加 if diff_session > session_timeout: session_duration_list.append({ "date": previous_row.date, "event_timestamp": previous_row.event_timestamp, "user_pseudo_id": g_user_id[0], "OS" : g_user_id[1], "session_duration": session_duration, }) session_duration = pd.Timedelta(seconds = row.duration) else: # 30分以下の場合は同じセッションに該当するとみなす。 session_duration += pd.Timedelta(seconds = row.duration) previous_row = row # 一番最後のイベントを計測する。 session_duration = pd.Timedelta(seconds = row.duration) session_duration_list.append({ "date" : previous_row.date, "event_timestamp": previous_row.event_timestamp, "user_pseudo_id" : g_user_id[0], "OS" : g_user_id[1], "session_duration": session_duration, }) df_session_duration_list = pd.DataFrame(session_duration_list) df_session_duration_list = df_session_duration_list.sort_values("date").reset_index(drop=True) df_session_duration_list["session_duration"] = (df_session_duration_list.session_duration.astype(np.int64) / 1000000000).astype("int64") df_session_duration_list.sort_values("user_pseudo_id") all_first_users_stay_time = df_session_duration_list["session_duration"].mean() all_first_users_stay_timeAndroiddf_android_users = df_session_duration_list[df_session_duration_list["OS"]=="ANDROID"] df_android_users android_avg_session_duration = df_android_users["session_duration"].mean() android_avg_session_durationIOSdf_ios_users = df_session_duration_list[df_session_duration_list["OS"]=="IOS"] df_ios_users ios_avg_session_duration = df_ios_users["session_duration"].mean() ios_avg_session_durationOutputdf_output = pd.DataFrame(columns=["android_first_users_stay_time","ios_first_users_stay_time","all_first_users_stay_time"],index=[0]) df_output.insert(0, "date", start.strftime(format="%Y-%m-%d")) df_output["date"] = pd.to_datetime(df_output["date"], format="%Y-%m-%d").dt.date df_output["android_first_users_stay_time"] = android_avg_session_duration df_output["ios_first_users_stay_time"] = ios_avg_session_duration df_output["all_first_users_stay_time"] = all_first_users_stay_time df_output = df_output.round(3) df_output.to_gbq(f"""{PACKAGE_NAME.replace(".","_")}_{METRICS_NAME.replace("-","_")}.{FREQUENCY}_events_{start.strftime(format="%Y-%m-%d").replace("-","")}""", if_exists="replace", table_schema=[{'name': 'date','type': 'DATE'}, {'name': 'android_first_users_stay_time','type': 'FLOAT64'}, {'name': 'ios_first_users_stay_time','type': 'FLOAT64'}, {'name': 'all_first_users_stay_time','type': 'FLOAT64'} ], project_id=OUTPUT_BIGQUERY_PROJECT_ID) df_output1it [00:04, 4.28s/it]1d state space model of a mass-spring system in continuous timeFrom https://srush.github.io/annotated-s4/(code at https://github.com/srush/annotated-s4/blob/main/s4/s4.py)!pip install celluloid from functools import partial import jax import jax.numpy as np #from flax import linen as nn #from jax.nn.initializers import lecun_normal from jax.numpy.linalg import eig, inv, matrix_power from jax.scipy.signal import convolve rng = jax.random.PRNGKey(1) def random_SSM(rng, N): a_r, b_r, c_r = jax.random.split(rng, 3) A = jax.random.uniform(a_r, (N, N)) B = jax.random.uniform(b_r, (N, 1)) C = jax.random.uniform(c_r, (1, N)) return A, B, C def discretize(A, B, C, step): I = np.eye(A.shape[0]) BL = inv(I - (step / 2.0) * A) Ab = BL @ (I + (step / 2.0) * A) Bb = (BL * step) @ B return Ab, Bb, C def scan_SSM(Ab, Bb, Cb, u, x0): def step(x_k_1, u_k): x_k = Ab @ x_k_1 + Bb @ u_k y_k = Cb @ x_k return x_k, y_k return jax.lax.scan(step, x0, u)[1] def run_SSM(A, B, C, u): L = u.shape[0] N = A.shape[0] Ab, Bb, Cb = discretize(A, B, C, step=1.0 / L) # Run recurrence return scan_SSM(Ab, Bb, Cb, u[:, np.newaxis], np.zeros((N,))) def example_mass(k, b, m): A = np.array([[0, 1], [-k / m, -b / m]]) B = np.array([[0], [1.0 / m]]) C = np.array([[1.0, 0]]) return A, B, C @partial(np.vectorize, signature="()->()") def example_force(t): x = np.sin(10 * t) return x * (x > 0.5) def example_ssm(): # SSM ssm = example_mass(k=40, b=5, m=1) # L samples of u(t). L = 100 step = 1.0 / L ks = np.arange(L) u = example_force(ks * step) # Approximation of y(t). y = run_SSM(*ssm, u) # Plotting --- import matplotlib.pyplot as plt import seaborn from celluloid import Camera seaborn.set_context("paper") fig, (ax1, ax2, ax3) = plt.subplots(3) camera = Camera(fig) ax1.set_title("Force $u_k$") ax2.set_title("Position $y_k$") ax3.set_title("Object") ax1.set_xticks([], []) ax2.set_xticks([], []) # Animate plot over time for k in range(0, L, 2): ax1.plot(ks[:k], u[:k], color="red") ax2.plot(ks[:k], y[:k], color="blue") ax3.boxplot( [[y[k, 0] - 0.04, y[k, 0], y[k, 0] + 0.04]], showcaps=False, whis=False, vert=False, widths=10, ) camera.snap() anim = camera.animate() #anim.save("line.gif", dpi=150, writer="imagemagick") example_ssm() def example_ssm2(): # SSM ssm = example_mass(k=40, b=5, m=1) # L samples of u(t). L = 100 step = 1.0 / L ks = np.arange(L) u = example_force(ks * step) # Approximation of y(t). y = run_SSM(*ssm, u) # Plotting --- import matplotlib.pyplot as plt import seaborn from celluloid import Camera seaborn.set_context("paper") fig, (ax1, ax2) = plt.subplots(2, figsize=(20,10)) camera = Camera(fig) ax1.set_title("Force $u_k$") ax2.set_title("Position $y_k$") #ax3.set_title("Object") ax1.set_xticks([], []) ax2.set_xticks([], []) # Animate plot over time for k in range(0, L, 2): ax1.plot(ks[:k], u[:k], color="red") ax2.plot(ks[:k], y[:k], color="blue") camera.snap() anim = camera.animate() #anim.save("line.gif", dpi=150, writer="imagemagick") example_ssm2()Developing an AI applicationGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. The project is broken down into multiple steps:* Load and preprocess the image dataset* Train the image classifier on your dataset* Use the trained classifier to predict image contentWe'll lead you through each part which you'll implement in Python.When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.# Imports here ## Imports to load the data from torchvision import datasets, transforms import torch import json ## Imports to build and train the model from torchvision import models from torch import nn, optim import torch.nn.functional as F ##Imports for image processing from PIL import Image import matplotlib.pyplot as plt import numpy as npLoad the dataHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' # TODO: Define your transforms for the training, validation, and testing sets ## The transforms for the training dataset include random rotation, flip and resize/crop as well as normalization ## The PIL image is converted into a Tensor train_data_transforms = transforms.Compose([transforms.RandomRotation(25), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) ## The transforms for bothe the testing and validation sets are the same ## They do not involve flipping or rotation, but resize/crop and normalzation ## The PIL image is converted into a Tensor test_data_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # TODO: Load the datasets with ImageFolder train_image_datasets = datasets.ImageFolder(data_dir + '/train', transform=train_data_transforms) test_image_datasets = datasets.ImageFolder(data_dir + '/test', transform=test_data_transforms) valid_image_datasets = datasets.ImageFolder(data_dir + '/valid', transform=test_data_transforms) # TODO: Using the image datasets and the trainforms, define the dataloaders ## We choose a batch size of 64 images ## To train the model, we want the images to be randomly chosen (shuffle=True), as the model will have a propency to learn better ## It is not the case for the testing and validation datasets train_dataloaders = torch.utils.data.DataLoader(train_image_datasets, batch_size=64, shuffle=True) test_dataloaders = torch.utils.data.DataLoader(test_image_datasets, batch_size=64) valid_dataloaders = torch.utils.data.DataLoader(valid_image_datasets, batch_size=64)Label mappingYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.#import json with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f)Building and training the classifierNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout* Train the classifier layers using backpropagation using the pre-trained network to get the features* Track the loss and accuracy on the validation set to determine the best hyperparametersWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro toGPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.# TODO: Build and train your network ## Before starting to build and train the model, we add this command to be able to use the GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ## Note: we will have to remember to move the model and the data to "device" to be able to run our code ##First step, we create our model from a pretrained model from torchvision ## We choose to use the pretrained model VGG-16 as its overall performance is good without requiring too many layers model = models.vgg16(pretrained=True) ## We look at the details of this pretrained model, in particular to see the details of its default classifier model ## We see that the classifier of the pretrained network VGG-16 has two hidden layers with ReLU activation functions # and one output layer without activation function, that returns values for 1000 classes ## We are going to "freeze" the features, and use a different classifier, which input will still be of size 25088 ## The reason is that we do not need to work with 1000 classes (model pretrained on ImageNet with 1000 classes), but # we only have 102 categories of flowers to classify ## Freeze the parameters, by turning off gradient descent # (no backpropagation on the features layers, only on the classifier) for param in model.parameters(): param.requires_grad = False ## We define a new classifier from nn.Module ## We decide to keep the same number of layers as the initial pretrained model ## we use ReLU for the hidden layers and LogSoftmax for the output layer class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(25088, 256) #hidden_layer_1 with same input as first layer of VGG classifier self.fc2 = nn.Linear(256, 128) #hidden_layer_2 self.fc3 = nn.Linear(128, 102) #output layer with 102 classes # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.log_softmax(self.fc3(x), dim=1) return x ## We replace the classifier used by our model model.classifier = Classifier() ## We define the criterion using NLLoss because the output activation of the classifier is LogSoftmax criterion = nn.NLLLoss() ## We define the optimizer to only train the classifier parameters, the feature parameters are frozen optimizer = optim.SGD(model.classifier.parameters(), lr=0.003) # We move our model to the GPU if available model.to(device) ## We verify here that our new model uses the classifier we defined ## Now that we have defined our model, we are going to train and validate it ## For readability and reusability, we define a few functions that we will use to train and test the model def stage_model(dataset, loop_type): stage_loss = 0 for images, labels in dataset: ## We move the data to the GPU if available images, labels = images.to(device), labels.to(device) if loop_type == 'train': ## Set back the gradient to 0 for each new epoch optimizer.zero_grad() ## Increment the stage_loss counter with the value of the loss at each epoch stage_loss += calc_loss(apply_model(images), labels) ## Use the optimizer to apply corrections on the weights and bias optimizer.step() else: stage_loss += calc_loss(apply_model(images), labels) return stage_loss def calculate_accuracy(dataset): accuracy = 0 for images, labels in dataset: ## We move the data to the GPU if available images, labels = images.to(device), labels.to(device) ## The output of the model is log, so to get probabilities we use the exponential function output = torch.exp(apply_model(images)) ## We want to use only the top probability and top classes top_p, top_class = output.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) ## We use the mean of the distribution to determine the accuracy accuracy += torch.mean(equals.type(torch.FloatTensor)).item() return accuracy def apply_model(image): ## Apply our model to the input images model_output = model(image) return model_output def calc_loss(output, label): ## Calculate the loss and apply backpropagation loss = criterion(output, label) loss.backward() return loss ## We will train our network with 40 epochs epochs = 50 steps = 0 for e in range(epochs): ## First we train the model on the train_dataloaders dataset training_loss = stage_model(train_dataloaders, 'train') ## Then we test our model on the testing set ## To increase the speed of this part of the code, we turn off dropout (as we do not need it for testing), using the model.eval() mode model.eval() testing_loss = stage_model(test_dataloaders, 'test') accuracy = calculate_accuracy(test_dataloaders) model.train() ## At each epoch, we print the performance metrics print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(training_loss/len(train_dataloaders)), "Test Loss: {:.3f}.. ".format(testing_loss/len(test_dataloaders)), "Test Accuracy: {:.3f}".format(accuracy/len(test_dataloaders))) ## Initially, we trained our model for 15 epochs. We got to an accuracy score of about 71-74%. ## But we noticed that between an epoch and the next, the increase of the score was a of few points ## We meant that the model was learning "fast" between epochs and did not reach its optimal point ## Testing our model on the testing set with 30 epochs, we get to an accuracy score of about 83%, with a progression still reasonably high ## When trying with a total of 50 epochs, we see that the score seems to be converging to around 88% ## On the validation set (see below), we obtain an even better score of 89,9% ##We probably got close to the best combination of parameters with VGG16, and we are satisfied with this accuracy score. ##We could explore the improvements of using another pretrained model (such as VGG19 or ResNet18) with a limited number of hidden layersTesting your networkIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.# TODO: Do validation on the test set model.eval() with torch.no_grad(): valid_loss = 0 accuracy = 0 for images, labels in valid_dataloaders: ## the code here is very similar to the training "for loop" images, labels = images.to(device), labels.to(device) log_out = model.forward(images) loss = criterion(log_out, labels) valid_loss += loss.item() output = torch.exp(log_out) top_p, top_class = output.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() ## We print the value of the accuracy score for each image in the validation dataset print("Validation loss:{:.3f}".format(valid_loss/len(valid_dataloaders)), "Validation Accuracy: {:.3f}".format(accuracy/len(valid_dataloaders)))Validation loss:0.024 Validation Accuracy: 0.071 Validation loss:0.061 Validation Accuracy: 0.139 Validation loss:0.104 Validation Accuracy: 0.203 Validation loss:0.153 Validation Accuracy: 0.264 Validation loss:0.186 Validation Accuracy: 0.331 Validation loss:0.211 Validation Accuracy: 0.403 Validation loss:0.216 Validation Accuracy: 0.480 Validation loss:0.232 Validation Accuracy: 0.552 Validation loss:0.263 Validation Accuracy: 0.621 Validation loss:0.272 Validation Accuracy: 0.696 Validation loss:0.304 Validation Accuracy: 0.761 Validation loss:0.362 Validation Accuracy: 0.828 Validation loss:0.389 Validation Accuracy: 0.899Save the checkpointNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.```model.class_to_idx = image_datasets['train'].class_to_idx```Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.# TODO: Save the checkpoint model.class_to_idx = train_image_datasets.class_to_idx checkpoint = {'architecture':'vgg16', 'input_size': 25088, 'hidden_size':[256, 128], 'output_size':102, 'state_dict': model.state_dict(), 'optimizer':optimizer.state_dict(), 'class_to_idx':model.class_to_idx} torch.save(checkpoint, 'checkpoint.pth')Loading the checkpointAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.# TODO: Write a function that loads a checkpoint and rebuilds the model class Classifier(nn.Module): def __init__(self, input_size, hidden_size, output_size): super().__init__() self.fc1 = nn.Linear(input_size, hidden_size[0]) #hidden_layer_1 with same input as first layer of VGG classifier self.fc2 = nn.Linear(hidden_size[0], hidden_size[1]) #hidden_layer_2 self.fc3 = nn.Linear(hidden_size[1], output_size) #output layer with 102 classes # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.log_softmax(self.fc3(x), dim=1) return x def load_checkpoint(checkpoint): ## we create the model again with the parameters saved if checkpoint['architecture'] == 'vgg16': model = models.vgg16() else: print('We could not build a model from a pretrained model - pretrained model unknown') input_size = checkpoint['input_size'] hidden_size = checkpoint['hidden_size'] output_size = checkpoint['output_size'] model.classifier = Classifier(input_size, hidden_size, output_size) criterion = nn.NLLLoss() ## we load the state_dict model.load_state_dict(checkpoint['state_dict']) ## we load the optimizer optimizer = optim.SGD(model.classifier.parameters(), lr=0.003) optimizer.load_state_dict(checkpoint['optimizer']) ## we freeze the parameters again, as we will still want to train later on only with the classifier for parameter in model.parameters(): parameter.requires_grad = False ## we reuse the same class to index attribute model.class_to_idx = checkpoint['class_to_idx'] return model, criterion, optimizer, checkpoint['class_to_idx'] ## we load the checkpoint file checkpoint = torch.load('checkpoint.pth') ## To load the model, we execute the following command new_model, criterion, optimizer, class_to_idx = load_checkpoint(checkpoint) ## We print the details of our model and verify that it matches what we saved ## Note that if we try to train the network again, the accuracy score will not start from a value close to 0 again new_model ##Let's test our model again on the validation set ##For this, we first copy our model to the GPU if available and turn off dropout (eval mode) new_model.to(device) new_model.eval() with torch.no_grad(): valid_loss = 0 accuracy = 0 for images, labels in test_dataloaders: ## the code here is very similar to the training "for loop" images, labels = images.to(device), labels.to(device) log_out = new_model.forward(images) loss = criterion(log_out, labels) valid_loss += loss.item() output = torch.exp(log_out) top_p, top_class = output.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() ## We print the value of the accuracy score for each image in the validation dataset print("Validation Accuracy: {:.3f}".format(accuracy/len(test_dataloaders))) ##We obtain an accuracy score comparable to the one we had when we ran our model on the validation setInference for classificationNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```First you'll need to handle processing the input image such that it can be used in your network. Image PreprocessingYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model ##First, we get as an input the size of the image width, height = image.size ## First we resize the image ## If the height is bigger than the width, then we assign the size 256 to the width ## In order to keep the ratio between the two dimensions, we use a ratio variable if height > width : ratio = int(height/width * 256) new_size = 256, ratio image.thumbnail(new_size) else: ratio = int(width/height *256) new_size = ratio, 256 image.thumbnail(new_size) ## Then we crop from the center width, height = image.size left_margin = (width - 224)/2 right_margin = left_margin + 224 bottom_margin = (height - 224)/2 top_margin = bottom_margin + 224 image = image.crop((left_margin, bottom_margin, right_margin, top_margin)) ## We convert the color channels to floats 0-1 np_image = np.array(image) image = np_image/255 ## We normalize the image mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = (image - mean)/std ## Transpose the color channel, with the color channel in the first position image = image.transpose((2, 0, 1)) return imageTo check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).def imshow(image, ax=None, title=None): """Imshow for Tensor.""" if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax ## Let's test the two functions image_path = 'flowers/train/1/image_06735.jpg' image = Image.open(image_path) processed_image = process_image(image) imshow(processed_image) image_path = 'flowers/test/28/image_05230.jpg' image = Image.open(image_path) processed_image = process_image(image) imshow(processed_image)Class PredictionOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.htmltorch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' # TODO: Implement the code to predict the class from an image file ## We process the raw image image = Image.open(image_path) processed_image = process_image(image) image = torch.FloatTensor(processed_image) ## We move the image to the GPU if available image = image.to(device) image.unsqueeze_(0) ## We only want to predict, so we can disable dropout new_model.eval ## We apply our model on the image, and calculate the top probabilities log_output = model.forward(image) ps = torch.exp(log_output) top_ps, top_class = ps.topk(topk) ## Convert index to class idx_to_class = {} for key in class_to_idx.keys(): idx_to_class[class_to_idx[key]] = key # Extract an array with the top labels ## Convert tensor to numpy array top_class = top_class.to('cpu') top_class = top_class.numpy() top_class = [idx_to_class[x] for x in top_class[0]] #Extract an array with the top probabilities top_ps = top_ps.to('cpu') top_ps = top_ps.numpy() top_ps = top_ps[0] return top_ps, top_class probs, classes = predict('flowers/train/1/image_06735.jpg', new_model) print(probs) print(classes)[ 0.25527748 0.24951909 0.15289307 0.12909067 0.07009785] ['86', '1', '76', '83', '34']Sanity CheckingNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.# TODO: Display an image along with the top 5 classes def sanity_checking(image_path, model): ## First we load an image, process it to apply the model image = Image.open(image_path) processed_image = process_image(image) ## We calculate the top classes top_ps, top_class = predict(image_path, model) ## For each class, we extract the corresponding name top_class_names = [] for item in top_class: top_class_names.append(cat_to_name[item]) ## We prepare the plotting fig = plt.figure(figsize=(5, 10)) axis_1 = plt.subplot(2,1,1) # The title of the image is the name of the flower with the highest probability image_title = axis_1.set_title(top_class_names[0]) imshow(processed_image, axis_1, image_title) # We plot the graph of the probabilities axis_2 = plt.subplot(2,1,2) axis_2 = plt.yticks(range(5), top_class_names[::-1]) axis_2 = plt.barh(range(5), top_ps[::-1]) ## We render the plotting plt.show() ## We test with one example sanity_checking('flowers/test/28/image_05230.jpg',new_model) sanity_checking('flowers/valid/102/image_08002.jpg',new_model) #Note: #The following resources have been used to build the code of this notebook. #https://github.com/fxzero/Flower-Image-Classifier/blob/master/Image%20Classifier%20Project.ipynb #https://github.com/rajesh-iiith/AIPND-ImageClassifier/blob/master/Image%20Classifier%20Project.ipynb #https://medium.com/@josh_2774/deep-learning-with-pytorch-9574e74d17ad #https://discuss.pytorch.org/t/expected-stride-to-be-a-single-integer-value-or-a-list/17612 #https://discuss.pytorch.org/t/runtimeerror-expected-object-of-type-torch-floattensor-but-found-type-torch-cuda-floattensor-for-argument-2-weight/27483Deep LearningThis notebook demonstrates various deep learning architectures using the MNIST dataThe code uses Tensorflow / Keras, which you may need to installfrom sklearn.datasets import fetch_openml from tensorflow import keras from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split X, y = fetch_openml('mnist_784', version=1, return_X_y=True) y = np.int32(y) X_train = X[:60000] X_test = X[60000:] y_train = y[:60000] y_test = y[60000:]Performing so-called "one hot" encoding on the outputsy_cat_test = keras.utils.to_categorical(y_test) y_cat_train = keras.utils.to_categorical(y_train) models = {}Below are multiple architecture examples. Try out different ones and build your own# name = 'simple' # inputs = keras.Input(shape=(X_train.shape[1],)) # h = keras.layers.Dense(128, activation="relu")(inputs) # h = keras.layers.Dense(64, activation="relu")(h) # h = keras.layers.Dense(32, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # models[name].summary() # name = 'dropout' # inputs = keras.Input(shape=(X_train.shape[1],)) # h = keras.layers.Dropout(0.01)(inputs) # h = keras.layers.Dense(128, activation="relu")(h) # h = keras.layers.Dropout(0.01)(h) # h = keras.layers.Dense(64, activation="relu")(h) # h = keras.layers.Dropout(0.01)(h) # h = keras.layers.Dense(32, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # name = 'batchnorm' # inputs = keras.Input(shape=(X_train.shape[1],)) # h = keras.layers.Dense(128, activation="relu")(inputs) # h = keras.layers.BatchNormalization()(h) # h = keras.layers.Dense(64, activation="relu")(h) # h = keras.layers.BatchNormalization()(h) # h = keras.layers.Dense(32, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # name = 'locally connected' # inputs = keras.Input(shape=(28, 28, 1)) # h = keras.layers.LocallyConnected2D(1, kernel_size=(5, 5), activation="relu")(inputs) # h = keras.layers.LocallyConnected2D(1, kernel_size=(5, 5), activation="relu")(h) # h = keras.layers.Flatten()(h) # h = keras.layers.Dense(32, activation="relu")(h) # h = keras.layers.Dense(16, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # name = 'cnn_simple' # inputs = keras.Input(shape=(28, 28, 1)) # h = keras.layers.Conv2D(1, kernel_size=(5, 5), activation="relu")(inputs) # h = keras.layers.Conv2D(1, kernel_size=(5, 5), activation="relu")(h) # h = keras.layers.Flatten()(h) # h = keras.layers.Dense(32, activation="relu")(h) # h = keras.layers.Dense(16, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) name = 'CNN' inputs = keras.Input(shape=(28, 28, 1)) h = keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(inputs) h = keras.layers.MaxPool2D(pool_size=(2,2))(h) h = keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(h) h = keras.layers.MaxPool2D(pool_size=(2,2))(h) h = keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(h) h = keras.layers.Flatten()(h) h = keras.layers.Dense(16, activation="relu")(h) outputs = keras.layers.Dense(10, activation='softmax')(h) models[name] = keras.Model(inputs=inputs, outputs=outputs) optimizer = keras.optimizers.Adam(0.0001) models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) models['CNN'].summary() # name = 'CNN + Dropout + Batchnorm' # inputs = keras.Input(shape=(28, 28, 1)) # h = keras.layers.Dropout(0.01)(inputs) # h = keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(h) # h = keras.layers.BatchNormalization()(h) # h = keras.layers.MaxPool2D(pool_size=(2,2))(h) # h = keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(h) # h = keras.layers.BatchNormalization()(h) # h = keras.layers.MaxPool2D(pool_size=(2,2))(h) # h = keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(h) # h = keras.layers.BatchNormalization()(h) # h = keras.layers.Flatten()(h) # h = keras.layers.Dense(16, activation="relu")(h) # outputs = keras.layers.Dense(10, activation='softmax')(h) # models[name] = keras.Model(inputs=inputs, outputs=outputs) # optimizer = keras.optimizers.Adam(0.0001) # models[name].compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # models[name].summary()We can now train the model using several epochs (1 epoch = churning through the full dataset once)NB: depending on the model, you need to shape the inputs differently!Training 30 Epochs (depending on the model and your computer hardware) can take a while# train 1d models (dense etc) # models[name].fit(X_train, y_cat_train, epochs=30, validation_data=(X_test, y_cat_test), batch_size=64) # traind 2d models (CNNs etc) models[name].fit(X_train.reshape(-1, 28, 28, 1), y_cat_train, epochs=30, validation_data=(X_test.reshape(-1, 28, 28, 1), y_cat_test), batch_size=64)Looking at the training history can help gaining some insight and sport overfitting for examplefor name in models.keys(): #['simple', 'CNN + Dropout + Batchnorm']: #'dropout', 'batchnorm']: #bl = plt.plot(models[name].history.history['accuracy'], ls='--', label='Training Accuracy %s'%name) #plt.plot(models[name].history.history['val_accuracy'], ls='-', c=bl[0].get_color(), label='Testing Accuracy %s'%name) try: bl = plt.plot(models[name].history.history['loss'], ls='--', label='Training Loss %s'%name) plt.plot(models[name].history.history['val_loss'], ls='-', c=bl[0].get_color(), label='Testing Loss %s'%name) except AttributeError: pass plt.gca().set_xlabel('Epoch') plt.gca().set_ylabel('Loss') plt.legend() plt.gca().set_yscale('log') #plt.savefig('NN_history_cnn_best.png', bbox_inches='tight') # predict 1d model #y_pred = models[name].predict(X_test) # predict 2d model y_pred = models[name].predict(X_test.reshape(-1, 28, 28, 1))The confusion matrix shows how good the assignement of digits to the rerspective classis iscm = confusion_matrix(y_test, np.argmax(y_pred,axis=1)) plt.imshow(cm.T, cmap='YlGnBu', origin='lower') plt.gca().set_xlabel('True label') plt.gca().set_ylabel('Predicted label') plt.savefig('NN_consfusion_%s.png'%name, bbox_inches='tight')-> Try out different models and architectures and compare them! Auto encoderAs discussed in the lecture, a different application of NNs are auto encoders.We first look at a linear auto encoder, which just replicates our good old PCA# linear inputs = keras.Input(shape=(X_train.shape[1],)) h = keras.layers.Dense(20, activation="linear", use_bias=False)(inputs) outputs = keras.layers.Dense(X_train.shape[1], activation='linear', use_bias=False)(h) ae = keras.Model(inputs=inputs, outputs=outputs) optimizer = keras.optimizers.Adam(0.00001) ae.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) ae.fit(X, X, epochs=30, batch_size=32) encode = keras.Model(inputs=inputs, outputs=h) reduced_data = encode(X).numpy() plt_data = plt.scatter(reduced_data[:, 0], reduced_data[:, 1], s=0.1, c=y ,cmap=plt.cm.get_cmap('Spectral', 10)) plt.colorbar() #plt.savefig('mnist_encoded_true_labels.png', bbox_inches='tight')And here is how our 20d recosntrcuted data looks like....pretty similar to our 20d PCA!Exercise: compare this NN to PCA in 2dX_reco = ae(X).numpy() fig, ax = plt.subplots(5,5) for i in range(25): axis = ax[i//5, i%5] axis.imshow(X_reco[i].reshape(28,28), cmap='Greys')Non-linear AEIt gets much more powerful when adding back in non-linearirtiesinputs = keras.Input(shape=(X_train.shape[1],)) encoded = keras.layers.Dense(256, activation="relu")(inputs) encoded = keras.layers.Dense(64, activation="relu")(encoded) encoded = keras.layers.Dense(2, activation="relu")(encoded) decoder1 = keras.layers.Dense(64, activation="relu") decoded = decoder1(encoded) decoder2 = keras.layers.Dense(256, activation="relu") decoded = decoder2(decoded) decoder_out = keras.layers.Dense(X_train.shape[1], activation='linear') outputs = decoder_out(decoded) ae = keras.Model(inputs=inputs, outputs=outputs) optimizer = keras.optimizers.Adam(0.001) ae.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) ae.fit(X_train, X_train, epochs=30, validation_data=(X_test, X_test), batch_size=64)We can split up our models intwo the encoder and the decoder part:encode = keras.Model(inputs=inputs, outputs=encoded) dec_inp = keras.Input(shape=2,) decoded_i = decoder1(dec_inp) decoded_i = decoder2(decoded_i) outputs_i = decoder_out(decoded_i) decode = keras.Model(inputs=dec_inp, outputs=outputs_i) reduced_data = encode(X).numpy() reduced_dataFor this 2d encoder, the digits separate much more nicely than in the PCA case, and also recosntrcuted images look fantasticplt_data = plt.scatter(reduced_data[:, 0], reduced_data[:, 1], s=0.1, c=y ,cmap=plt.cm.get_cmap('Spectral', 10)) plt.colorbar() #plt.savefig('mnist_encoded_linear_true_labels.png', bbox_inches='tight') X_reco = ae(X) X_plot = X_reco.numpy() fig, ax = plt.subplots(5,5) for i in range(25): axis = ax[i//5, i%5] axis.imshow(X_plot[i].reshape(28,28), cmap='Greys')Generate digitsWe can try to use the decoder as a generator, and generate artidicial digits. The issue here is that this may not work very well (see lecture) and should be done via _variational_ AEs (see according notebook).inp = np.array([[100., 100.],]).astype(np.float32) o = decode(inp).numpy() # display a 2D manifold of the digits n = 15 # figure with 15x15 digits digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) grid_x = np.linspace(-100., 1600, n) grid_y = np.linspace(-100., 1200, n) for i, yi in enumerate(grid_x): for j, xi in enumerate(grid_y): z_sample = np.array([[xi, yi]]).astype(np.float32) x_decoded = decode.predict(z_sample) digit = x_decoded[0].reshape(digit_size, digit_size) figure[i * digit_size: (i + 1) * digit_size, j * digit_size: (j + 1) * digit_size] = digit plt.figure(figsize=(15, 15)) plt.imshow(figure, cmap='Greys') plt.gca().get_xaxis().set_visible(False) plt.gca().get_yaxis().set_visible(False) #plt.savefig('AE_mnist.png', bbox_inches='tight')Python入门(上)1. [简介](简介)2. [变量、运算符与数据类型](变量、运算符与数据类型) [1. 注释](1.-注释) [2. 运算符](2.-运算符) [3. 变量和赋值](3.-变量和赋值) [4. 数据类型与转换](4.-数据类型与转换) [5. print()函数](5.-print()-函数) 3. [位运算](位运算) [1. 原码、反码和补码](1.-原码、反码和补码) [2. 按位运算](2.-按位运算) [3. 利用位运算实现快速计算](3.-利用位运算实现快速计算) [4. 利用位运算实现整数集合](4.-利用位运算实现整数集合) 4. [条件语句](条件语句) [1. if 语句](1.-if-语句) [2. if - else 语句](2.-if---else-语句) [3. if - elif - else 语句](3.-if---elif---else-语句) [4. assert 关键词](4.-assert-关键词) 5. [循环语句](循环语句) [1. while 循环](1.-while-循环) [2. while - else 循环](2.-while---else-循环) [3. for 循环](3.-for-循环) [4. for - else 循环](4.-for---else-循环) [5. range() 函数](5.-range()-函数) [6. enumerate()函数](6.-enumerate()函数) [7. break 语句](7.-break-语句) [8. continue 语句](8.-continue-语句) [9. pass 语句](9.-pass-语句) [10. 推导式](10.-推导式) 6. [异常处理](异常处理) [1. Python 标准异常总结](1.-Python-标准异常总结) [2. Python 标准警告总结](2.-Python标准警告总结) [3. try - except 语句](3.-try---except-语句) [4. try - except - finally 语句](4.-try---except---finally-语句) [5. try - except - else 语句](5.-try---except---else-语句) [6. raise语句](6.-raise语句) 简介Python 是一种通用编程语言,其在科学计算和机器学习领域具有广泛的应用。如果我们打算利用 Python 来执行机器学习,那么对 Python 有一些基本的了解就是至关重要的。本 Python 入门系列体验就是为这样的初学者精心准备的。天池官方为大家准备钉钉学习交流群,在学习过程中,大家有任何教程内容或者平台使用问题都可以在群内提出,扫码即可加入: 本实验包括以下内容:- 变量、运算符与数据类型 - 注释 - 运算符 - 变量和赋值 - 数据类型与转换 - print() 函数- 位运算 - 原码、反码和补码 - 按位非操作 ~ - 按位与操作 & - 按位或操作 | - 按位异或操作 ^ - 按位左移操作 << - 按位右移操作 >> - 利用位运算实现快速计算 - 利用位运算实现整数集合- 条件语句 - if 语句 - if - else 语句 - if - elif - else 语句 - assert 关键词- 循环语句 - while 循环 - while - else 循环 - for 循环 - for - else 循环 - range() 函数 - enumerate()函数 - break 语句 - continue 语句 - pass 语句 - 推导式- 异常处理 - Python 标准异常总结 - Python 标准警告总结 - try - except 语句 - try - except - finally 语句 - try - except - else 语句 - raise语句 变量、运算符与数据类型 1. 注释- 在 Python 中,`` 表示注释,作用于整行。【例子】单行注释# 这是一个注释 print("Hello world") # Hello worldHello world- `''' '''` 或者 `""" """` 表示区间注释,在三引号之间的所有内容被注释【例子】多行注释''' 这是多行注释,用三个单引号 这是多行注释,用三个单引号 这是多行注释,用三个单引号 ''' print("Hello china") # Hello china """ 这是多行注释,用三个双引号 这是多行注释,用三个双引号 这是多行注释,用三个双引号 """ print("hello china") # hello chinaHello china hello china【我是测试题1】请在下方代码块中打印(print)出 hello+你的姓名如:print("hello 老表")# 写下你的答案2. 运算符算术运算符操作符 | 名称 | 示例:---:|:---:|:---:`+` | 加 | `1 + 1``-` | 减 | `2 - 1``*` | 乘 | `3 * 4``/` | 除 | `3 / 4``//`| 整除(地板除)| `3 // 4``%` | 取余| `3 % 4``**`| 幂 | `2 ** 3`【例子】print(1 + 1) # 2 print(2 - 1) # 1 print(3 * 4) # 12 print(3 / 4) # 0.75 print(3 // 4) # 0 print(3 % 4) # 3 print(2 ** 3) # 82 1 12 0.75 0 3 8比较运算符操作符 | 名称 | 示例:---:|:---:|:---:`>` |大于| `2 > 1``>=`|大于等于| `2 >= 4``<` |小于| `1 < 2``<=`|小于等于| `5 <= 2``==`|等于| `3 == 4``!=`|不等于| `3 != 5`【例子】print(2 > 1) # True print(2 >= 4) # False print(1 < 2) # True print(5 <= 2) # False print(3 == 4) # False print(3 != 5) # TrueTrue False True False False True逻辑运算符操作符 | 名称 | 示例:---:|:---:|:---:`and`|与| `(3 > 2) and (3 < 5)``or` |或| `(1 > 3) or (9 < 2)``not`|非| `not (2 > 1)`【例子】print((3 > 2) and (3 < 5)) # True print((1 > 3) or (9 < 2)) # False print(not (2 > 1)) # FalseTrue False False位运算符操作符 | 名称 | 示例:---:|:---:|:---:`~` |按位取反|`~4``&` |按位与 |`4 & 5``|` |按位或 |`4 | 5``^` |按位异或|`4 ^ 5``<<`|左移 |`4 << 2``>>`|右移 |`4 >> 2`【例子】有关二进制的运算,参见“位运算”部分的讲解。print(bin(4)) # 0b100 print(bin(5)) # 0b101 print(bin(~4), ~4) # -0b101 -5 print(bin(4 & 5), 4 & 5) # 0b100 4 print(bin(4 | 5), 4 | 5) # 0b101 5 print(bin(4 ^ 5), 4 ^ 5) # 0b1 1 print(bin(4 << 2), 4 << 2) # 0b10000 16 print(bin(4 >> 2), 4 >> 2) # 0b1 10b100 0b101 -0b101 -5 0b100 4 0b101 5 0b1 1 0b10000 16 0b1 1三元运算符【例子】x, y = 4, 5 if x < y: small = x else: small = y print(small) # 44有了这个三元操作符的条件表达式,你可以使用一条语句来完成以上的条件判断和赋值操作。【例子】x, y = 4, 5 small = x if x < y else y print(small) # 44其他运算符操作符 | 名称 | 示例:---:|:---:|:---:`in`|存在| `'A' in ['A', 'B', 'C']``not in`|不存在|`'h' not in ['A', 'B', 'C']``is`|是| `"hello" is "hello"``not is`|不是|`"hello" is not "hello"`【例子】letters = ['A', 'B', 'C'] if 'A' in letters: print('A' + ' exists') if 'h' not in letters: print('h' + ' not exists') # A exists # h not existsA exists h not exists【例子】比较的两个变量均指向不可变类型。a = "hello" b = "hello" print(a is b, a == b) # True True print(a is not b, a != b) # False FalseTrue True False False【例子】比较的两个变量均指向可变类型。a = ["hello"] b = ["hello"] print(a is b, a == b) # False True print(a is not b, a != b) # True FalseFalse True True False注意:- is, is not 对比的是两个变量的内存地址- ==, != 对比的是两个变量的值- 比较的两个变量,指向的都是地址不可变的类型(str等),那么is,is not 和 ==,!= 是完全等价的。- 对比的两个变量,指向的是地址可变的类型(list,dict,tuple等),则两者是有区别的。运算符的优先级| 运算符 | 描述 ||-----|-----|| ** | 指数(最高优先级) || ~+- | 按位翻转,一元加号和减号 || * / % // | 乘,除,取模和取整除) || + - | 加法减法 || >> << | 右移,左移运算符 ||&| 位‘AND’|| ^\| | 位运算符 || >=| 比较运算符 || ==!= | 等于运算符 || =%=/=//=-=+=*=**= | 赋值运算符 || is is not | 身份运算符 || in not in | 成员运算符 || not and or | 逻辑运算符 |【例子】print(-3 ** 2) # -9 print(3 ** -2) # 0.1111111111111111 print(1 << 3 + 2 & 7) # 0 print(-3 * 2 + 5 / -2 - 4) # -12.5 print(3 < 4 and 4 < 5) # True-9 0.1111111111111111 0 -12.5 True【我是测试题2】下面这段代码的运行结果是什么?# 运行一下结果就出来了 a = "hello" b = "hello" print(a is b, a == b)3. 变量和赋值- 在使用变量之前,需要对其先赋值。- 变量名可以包括字母、数字、下划线、但变量名不能以数字开头。- Python 变量名是大小写敏感的,foo != Foo。【例子】teacher = "老马的程序人生" print(teacher) # 老马的程序人生老马的程序人生【例子】first = 2 second = 3 third = first + second print(third) # 55【例子】myTeacher = "老马的程序人生" yourTeacher = "小马的程序人生" ourTeacher = myTeacher + ',' + yourTeacher print(ourTeacher) # 老马的程序人生,小马的程序人生老马的程序人生,小马的程序人生【我是测试题3】运行下面一段代码看看结果是什么?# 运行一下就好啦 set_1 = {"欢迎", "学习","Python"} print(set_1.pop())4. 数据类型与转换类型 | 名称 | 示例:---:|:---:|:---:int | 整型 ``| `-876, 10`float | 浮点型``| `3.149, 11.11`bool | 布尔型`` | `True, False`整型【例子】通过 `print()` 可看出 `a` 的值,以及类 (class) 是`int`。a = 1031 print(a, type(a)) # 1031 1031 Python 里面万物皆对象(object),整型也不例外,只要是对象,就有相应的属性 (attributes) 和方法(methods)。【例子】b = dir(int) print(b) # ['__abs__', '__add__', '__and__', '__bool__', '__ceil__', '__class__', # '__delattr__', '__dir__', '__divmod__', '__doc__', '__eq__', # '__float__', '__floor__', '__floordiv__', '__format__', '__ge__', # '__getattribute__', '__getnewargs__', '__gt__', '__hash__', # '__index__', '__init__', '__init_subclass__', '__int__', '__invert__', # '__le__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', # '__neg__', '__new__', '__or__', '__pos__', '__pow__', '__radd__', # '__rand__', '__rdivmod__', '__reduce__', '__reduce_ex__', '__repr__', # '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', # '__round__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', # '__rtruediv__', '__rxor__', '__setattr__', '__sizeof__', '__str__', # '__sub__', '__subclasshook__', '__truediv__', '__trunc__', '__xor__', # 'bit_length', 'conjugate', 'denominator', 'from_bytes', 'imag', # 'numerator', 'real', 'to_bytes']['__abs__', '__add__', '__and__', '__bool__', '__ceil__', '__class__', '__delattr__', '__dir__', '__divmod__', '__doc__', '__eq__', '__float__', '__floor__', '__floordiv__', '__format__', '__ge__', '__getattribute__', '__getnewargs__', '__gt__', '__hash__', '__index__', '__init__', '__init_subclass__', '__int__', '__invert__', '__le__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdivmod__', '__reduce__', '__reduce_ex__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__', '__trunc__', '__xor__', 'bit_length', 'conjugate', 'denominator', 'from_bytes', 'imag', 'numerator', 'real', 'to_bytes']对它们有个大概印象就可以了,具体怎么用,需要哪些参数 (argument),还需要查文档。看个`bit_length()`的例子。【例子】找到一个整数的二进制表示,再返回其长度。a = 1031 print(bin(a)) # 0b10000000111 print(a.bit_length()) # 110b10000000111 11浮点型【例子】print(1, type(1)) # 1 print(1., type(1.)) # 1.0 a = 0.00000023 b = 2.3e-7 print(a) # 2.3e-07 print(b) # 2.3e-071 1.0 2.3e-07 2.3e-07有时候我们想保留浮点型的小数点后 `n` 位。可以用 `decimal` 包里的 `Decimal` 对象和 `getcontext()` 方法来实现。import decimal from decimal import DecimalPython 里面有很多用途广泛的包 (package),用什么你就引进 (import) 什么。包也是对象,也可以用上面提到的`dir(decimal)` 来看其属性和方法。【例子】`getcontext()` 显示了 `Decimal` 对象的默认精度值是 28 位 (`prec=28`)。a = decimal.getcontext() print(a) # Context(prec=28, rounding=ROUND_HALF_EVEN, Emin=-999999, Emax=999999, # capitals=1, clamp=0, flags=[], # traps=[InvalidOperation, DivisionByZero, Overflow]) b = Decimal(1) / Decimal(3) print(b) # 0.33333333333333333333333333330.3333333333333333333333333333【例子】使 1/3 保留 4 位,用 `getcontext().prec` 来调整精度。decimal.getcontext().prec = 4 c = Decimal(1) / Decimal(3) print(c) # 0.33330.3333布尔型布尔 (boolean) 型变量只能取两个值,`True` 和 `False`。当把布尔型变量用在数字运算中,用 `1` 和 `0` 代表 `True` 和 `False`。【例子】print(True + True) # 2 print(True + False) # 1 print(True * False) # 02 1 0除了直接给变量赋值 `True` 和 `False`,还可以用 `bool(X)` 来创建变量,其中 `X` 可以是- 基本类型:整型、浮点型、布尔型- 容器类型:字符串、元组、列表、字典和集合【例子】`bool` 作用在基本类型变量:`X` 只要不是整型 `0`、浮点型 `0.0`,`bool(X)` 就是 `True`,其余就是 `False`。print(type(0), bool(0), bool(1)) # False True print(type(10.31), bool(0.00), bool(10.31)) # False True print(type(True), bool(False), bool(True)) # False True False True False True False True【例子】`bool` 作用在容器类型变量:`X` 只要不是空的变量,`bool(X)` 就是 `True`,其余就是 `False`。print(type(''), bool(''), bool('python')) # False True print(type(()), bool(()), bool((10,))) # False True print(type([]), bool([]), bool([1, 2])) # False True print(type({}), bool({}), bool({'a': 1, 'b': 2})) # False True print(type(set()), bool(set()), bool({1, 2})) # False True False True False True False True False True False True确定`bool(X)` 的值是 `True` 还是 `False`,就看 `X` 是不是空,空的话就是 `False`,不空的话就是 `True`。- 对于数值变量,`0`, `0.0` 都可认为是空的。- 对于容器变量,里面没元素就是空的。获取类型信息- 获取类型信息 `type(object)`【例子】print(isinstance(1, int)) # True print(isinstance(5.2, float)) # True print(isinstance(True, bool)) # True print(isinstance('5.2', str)) # TrueTrue True True True注:- `type()` 不会认为子类是一种父类类型,不考虑继承关系。- `isinstance()` 会认为子类是一种父类类型,考虑继承关系。如果要判断两个类型是否相同推荐使用 `isinstance()`。**类型转换**- 转换为整型 `int(x, base=10)`- 转换为字符串 `str(object='')`- 转换为浮点型 `float(x)`【例子】print(int('520')) # 520 print(int(520.52)) # 520 print(float('520.52')) # 520.52 print(float(520)) # 520.0 print(str(10 + 10)) # 20 print(str(10.1 + 5.2)) # 15.3520 520 520.52 520.0 20 15.35. print() 函数print(*objects, sep=' ', end='\n', file=sys.stdout, flush=False)- 将对象以字符串表示的方式格式化输出到流文件对象file里。其中所有非关键字参数都按`str()`方式进行转换为字符串输出;- 关键字参数`sep`是实现分隔符,比如多个参数输出时想要输出中间的分隔字符;- 关键字参数`end`是输出结束时的字符,默认是换行符`\n`;- 关键字参数`file`是定义流输出的文件,可以是标准的系统输出`sys.stdout`,也可以重定义为别的文件;- 关键字参数`flush`是立即把内容输出到流文件,不作缓存。【例子】没有参数时,每次输出后都会换行。shoplist = ['apple', 'mango', 'carrot', 'banana'] print("This is printed without 'end'and 'sep'.") for item in shoplist: print(item) # This is printed without 'end'and 'sep'. # apple # mango # carrot # bananaThis is printed without 'end'and 'sep'. apple mango carrot banana【例子】每次输出结束都用`end`设置的参数`&`结尾,并没有默认换行。shoplist = ['apple', 'mango', 'carrot', 'banana'] print("This is printed with 'end='&''.") for item in shoplist: print(item, end='&') print('hello world') # This is printed with 'end='&''. # apple&mango&carrot&banana&hello worldThis is printed with 'end='&''. apple&mango&carrot&banana&hello world【例子】`item`值与`'another string'`两个值之间用`sep`设置的参数`&`分割。由于`end`参数没有设置,因此默认是输出解释后换行,即`end`参数的默认值为`\n`。shoplist = ['apple', 'mango', 'carrot', 'banana'] print("This is printed with 'sep='&''.") for item in shoplist: print(item, 'another string', sep='&') # This is printed with 'sep='&''. # apple&another string # mango&another string # carrot&another string # banana&another stringThis is printed with 'sep='&''. apple&another string mango&another string carrot&another string banana&another string位运算 1. 原码、反码和补码二进制有三种不同的表示形式:原码、反码和补码,计算机内部使用补码来表示。**原码**:就是其二进制表示(注意,有一位符号位)。 ```python00 00 00 11 -> 310 00 00 11 -> -3``` **反码**:正数的反码就是原码,负数的反码是符号位不变,其余位取反(对应正数按位取反)。 ```python00 00 00 11 -> 311 11 11 00 -> -3``` **补码**:正数的补码就是原码,负数的补码是反码+1。 ```python00 00 00 11 -> 311 11 11 01 -> -3``` **符号位**:最高位为符号位,0表示正数,1表示负数。在位运算中符号位也参与运算。 2. 按位运算 - 按位非操作 ~ ```python~ 1 = 0~ 0 = 1``` `~` 把`num`的补码中的 0 和 1 全部取反(0 变为 1,1 变为 0)有符号整数的符号位在 `~` 运算中同样会取反。 ```python00 00 01 01 -> 5~---11 11 10 10 -> -611 11 10 11 -> -5~---00 00 01 00 -> 4``` - 按位与操作 & ```python1 & 1 = 11 & 0 = 00 & 1 = 00 & 0 = 0``` 只有两个对应位都为 1 时才为 1 ```python00 00 01 01 -> 5&00 00 01 10 -> 6---00 00 01 00 -> 4``` - 按位或操作 | ```python1 | 1 = 11 | 0 = 10 | 1 = 10 | 0 = 0```只要两个对应位中有一个 1 时就为 1```python00 00 01 01 -> 5|00 00 01 10 -> 6---00 00 01 11 -> 7``` - 按位异或操作 ^ ```python1 ^ 1 = 01 ^ 0 = 10 ^ 1 = 10 ^ 0 = 0```只有两个对应位不同时才为 1```python00 00 01 01 -> 5^00 00 01 10 -> 6---00 00 00 11 -> 3```异或操作的性质:满足交换律和结合律```pythonA: 00 00 11 00B: 00 00 01 11A^B: 00 00 10 11B^A: 00 00 10 11A^A: 00 00 00 00A^0: 00 00 11 00A^B^A: = A^A^B = B = 00 00 01 11```- 按位左移操作 <<`num << i` 将`num`的二进制表示向左移动`i`位所得的值。```python00 00 10 11 -> 1111 << 3---01 01 10 00 -> 88 ```- 按位右移操作 >>`num >> i` 将`num`的二进制表示向右移动`i`位所得的值。```python00 00 10 11 -> 1111 >> 2---00 00 00 10 -> 2 ``` 3. 利用位运算实现快速计算通过 `>` 快速计算2的倍数问题。```pythonn 计算 n*2n >> 1 -> 计算 n/2,负奇数的运算不可用n 计算 n*(2^m),即乘以 2 的 m 次方n >> m -> 计算 n/(2^m),即除以 2 的 m 次方1 2^n``` 通过 `^` 快速交换两个整数。通过 `^` 快速交换两个整数。```pythona ^= bb ^= aa ^= b```通过 `a & (-a)` 快速获取`a`的最后为 1 位置的整数。```python00 00 01 01 -> 5&11 11 10 11 -> -5---00 00 00 01 -> 100 00 11 10 -> 14&11 11 00 10 -> -14---00 00 00 10 -> 2``` 4. 利用位运算实现整数集合一个数的二进制表示可以看作是一个集合(0 表示不在集合中,1 表示在集合中)。比如集合 `{1, 3, 4, 8}`,可以表示成 `01 00 01 10 10` 而对应的位运算也就可以看作是对集合进行的操作。元素与集合的操作:```pythona | (1 把 i 插入到集合中a & ~(1 把 i 从集合中删除a & (1 判断 i 是否属于该集合(零不属于,非零属于)```集合之间的操作:```pythona 补 -> ~aa 交 b -> a & ba 并 b -> a | ba 差 b -> a & (~b)```注意:整数在内存中是以补码的形式存在的,输出自然也是按照补码输出。【例子】C语言输出负数。class Program { static void Main(string[] args) { string s1 = Convert.ToString(-3, 2); Console.WriteLine(s1); // 11111111111111111111111111111101 string s2 = Convert.ToString(-3, 16); Console.WriteLine(s2); // fffffffd } }【例子】 Python 的`bin()` 输出。print(bin(3)) # 0b11 print(bin(-3)) # -0b11 print(bin(-3 & 0xffffffff)) # 0b11111111111111111111111111111101 print(bin(0xfffffffd)) # 0b11111111111111111111111111111101 print(0xfffffffd) # 42949672930b11 -0b11 0b11111111111111111111111111111101 0b11111111111111111111111111111101 4294967293是不是很颠覆认知,我们从结果可以看出:- Python中`bin`一个负数(十进制表示),输出的是它的原码的二进制表示加上个负号,巨坑。- Python中的整型是补码形式存储的。- Python中整型是不限制长度的不会超范围溢出。所以为了获得负数(十进制表示)的补码,需要手动将其和十六进制数`0xffffffff`进行按位与操作,再交给`bin()`进行输出,得到的才是负数的补码表示。 条件语句 1. if 语句 ```pythonif expression: expr_true_suite``` - if 语句的 `expr_true_suite` 代码块只有当条件表达式 `expression` 结果为真时才执行,否则将继续执行紧跟在该代码块后面的语句。- 单个 if 语句中的 `expression` 条件表达式可以通过布尔操作符 `and`,`or`和`not` 实现多重条件判断。【例子】if 2 > 1 and not 2 > 3: print('Correct Judgement!') # Correct Judgement!Correct Judgement!2. if - else 语句 ```pythonif expression: expr_true_suiteelse: expr_false_suite```- Python 提供与 if 搭配使用的 else,如果 if 语句的条件表达式结果布尔值为假,那么程序将执行 else 语句后的代码。【例子】temp = input("猜一猜小姐姐想的是哪个数字?") guess = int(temp) # input 函数将接收的任何数据类型都默认为 str。 if guess == 666: print("你太了解小姐姐的心思了!") print("哼,猜对也没有奖励!") else: print("猜错了,小姐姐现在心里想的是666!") print("游戏结束,不玩儿啦!")猜一猜小姐姐想的是哪个数字?666 你太了解小姐姐的心思了! 哼,猜对也没有奖励! 游戏结束,不玩儿啦!`if`语句支持嵌套,即在一个`if`语句中嵌入另一个`if`语句,从而构成不同层次的选择结构。【例子】Python 使用缩进而不是大括号来标记代码块边界,因此要特别注意`else`的悬挂问题。hi = 6 if hi > 2: if hi > 7: print('好棒!好棒!') else: print('切~') # 无输出【例子】temp = input("猜一猜小姐姐想的是哪个数字?") guess = int(temp) if guess > 8: print("大了,大了") else: if guess == 8: print("你太了解小姐姐的心思了!") print("哼,猜对也没有奖励!") else: print("小了,小了") print("游戏结束,不玩儿啦!")猜一猜小姐姐想的是哪个数字?8 你太了解小姐姐的心思了! 哼,猜对也没有奖励! 游戏结束,不玩儿啦!3. if - elif - else 语句```pythonif expression1: expr1_true_suiteelif expression2: expr2_true_suite . .elif expressionN: exprN_true_suiteelse: expr_false_suite```- elif 语句即为 else if,用来检查多个表达式是否为真,并在为真时执行特定代码块中的代码。【例子】temp = input('请输入成绩:') source = int(temp) if 100 >= source >= 90: print('A') elif 90 > source >= 80: print('B') elif 80 > source >= 60: print('C') elif 60 > source >= 0: print('D') else: print('输入错误!')请输入成绩:99 A4. assert 关键词- `assert`这个关键词我们称之为“断言”,当这个关键词后边的条件为 False 时,程序自动崩溃并抛出`AssertionError`的异常。【例子】my_list = ['lsgogroup'] my_list.pop(0) assert len(my_list) > 0 # AssertionError【例子】在进行单元测试时,可以用来在程序中置入检查点,只有条件为 True 才能让程序正常工作。assert 3 > 7 # AssertionError循环语句 1. while 循环`while`语句最基本的形式包括一个位于顶部的布尔表达式,一个或多个属于`while`代码块的缩进语句。```pythonwhile 布尔表达式: 代码块````while`循环的代码块会一直循环执行,直到布尔表达式的值为布尔假。如果布尔表达式不带有`、==、!=、in、not in`等运算符,仅仅给出数值之类的条件,也是可以的。当`while`后写入一个非零整数时,视为真值,执行循环体;写入`0`时,视为假值,不执行循环体。也可以写入`str、list`或任何序列,长度非零则视为真值,执行循环体;否则视为假值,不执行循环体。【例子】count = 0 while count < 3: temp = input("猜一猜小姐姐想的是哪个数字?") guess = int(temp) if guess > 8: print("大了,大了") else: if guess == 8: print("你太了解小姐姐的心思了!") print("哼,猜对也没有奖励!") count = 3 else: print("小了,小了") count = count + 1 print("游戏结束,不玩儿啦!")猜一猜小姐姐想的是哪个数字?8 你太了解小姐姐的心思了! 哼,猜对也没有奖励! 游戏结束,不玩儿啦!【例子】布尔表达式返回0,循环终止。string = 'abcd' while string: print(string) string = string[1:] # abcd # bcd # cd # dabcd bcd cd d--- 2. while - else 循环```pythonwhile 布尔表达式: 代码块else: 代码块```当`while`循环正常执行完的情况下,执行`else`输出,如果`while`循环中执行了跳出循环的语句,比如 `break`,将不执行`else`代码块的内容。 【例子】count = 0 while count < 5: print("%d is less than 5" % count) count = count + 1 else: print("%d is not less than 5" % count) # 0 is less than 5 # 1 is less than 5 # 2 is less than 5 # 3 is less than 5 # 4 is less than 5 # 5 is not less than 50 is less than 5 1 is less than 5 2 is less than 5 3 is less than 5 4 is less than 5 5 is not less than 5【例子】count = 0 while count < 5: print("%d is less than 5" % count) count = 6 break else: print("%d is not less than 5" % count) # 0 is less than 50 is less than 5--- 3. for 循环`for`循环是迭代循环,在Python中相当于一个通用的序列迭代器,可以遍历任何有序序列,如`str、list、tuple`等,也可以遍历任何可迭代对象,如`dict`。```pythonfor 迭代变量 in 可迭代对象: 代码块```每次循环,迭代变量被设置为可迭代对象的当前元素,提供给代码块使用。【例子】for i in 'ILoveLSGO': print(i, end=' ') # 不换行输出 # I L o v e L S G OI L o v e L S G O【例子】member = ['张三', '李四', '刘德华', '刘六', '周润发'] for each in member: print(each) # 张三 # 李四 # 刘德华 # 刘六 # 周润发 for i in range(len(member)): print(member[i]) # 张三 # 李四 # 刘德华 # 刘六 # 周润发张三 李四 刘德华 刘六 周润发 张三 李四 刘德华 刘六 周润发【例子】dic = {'a': 1, 'b': 2, 'c': 3, 'd': 4} for key, value in dic.items(): print(key, value, sep=':', end=' ') # a:1 b:2 c:3 d:4a:1 b:2 c:3 d:4【例子】dic = {'a': 1, 'b': 2, 'c': 3, 'd': 4} for key in dic.keys(): print(key, end=' ') # a b c da b c d【例子】dic = {'a': 1, 'b': 2, 'c': 3, 'd': 4} for value in dic.values(): print(value, end=' ') # 1 2 3 41 2 3 4--- 4. for - else 循环```pythonfor 迭代变量 in 可迭代对象: 代码块else: 代码块```当`for`循环正常执行完的情况下,执行`else`输出,如果`for`循环中执行了跳出循环的语句,比如 `break`,将不执行`else`代码块的内容,与`while - else`语句一样。【例子】for num in range(10, 20): # 迭代 10 到 20 之间的数字 for i in range(2, num): # 根据因子迭代 if num % i == 0: # 确定第一个因子 j = num / i # 计算第二个因子 print('%d 等于 %d * %d' % (num, i, j)) break # 跳出当前循环 else: # 循环的 else 部分 print(num, '是一个质数') # 10 等于 2 * 5 # 11 是一个质数 # 12 等于 2 * 6 # 13 是一个质数 # 14 等于 2 * 7 # 15 等于 3 * 5 # 16 等于 2 * 8 # 17 是一个质数 # 18 等于 2 * 9 # 19 是一个质数10 等于 2 * 5 11 是一个质数 12 等于 2 * 6 13 是一个质数 14 等于 2 * 7 15 等于 3 * 5 16 等于 2 * 8 17 是一个质数 18 等于 2 * 9 19 是一个质数--- 5. range() 函数```pythonrange([start,] stop[, step=1])```- 这个BIF(Built-in functions)有三个参数,其中用中括号括起来的两个表示这两个参数是可选的。- `step=1` 表示第三个参数的默认值是1。- `range` 这个BIF的作用是生成一个从`start`参数的值开始到`stop`参数的值结束的数字序列,该序列包含`start`的值但不包含`stop`的值。【例子】for i in range(2, 9): # 不包含9 print(i) # 2 # 3 # 4 # 5 # 6 # 7 # 82 3 4 5 6 7 8【例子】for i in range(1, 10, 2): print(i) # 1 # 3 # 5 # 7 # 91 3 5 7 9--- 6. enumerate()函数```pythonenumerate(sequence, [start=0])```- sequence:一个序列、迭代器或其他支持迭代对象。- start:下标起始位置。- 返回 enumerate(枚举) 对象【例子】seasons = ['Spring', 'Summer', 'Fall', 'Winter'] lst = list(enumerate(seasons)) print(lst) # [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')] lst = list(enumerate(seasons, start=1)) # 下标从 1 开始 print(lst) # [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')][(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')] [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')]`enumerate()`与 for 循环的结合使用。```pythonfor i, a in enumerate(A) do something with a ```用 `enumerate(A)` 不仅返回了 `A` 中的元素,还顺便给该元素一个索引值 (默认从 0 开始)。此外,用 `enumerate(A, j)` 还可以确定索引起始值为 `j`。【例子】languages = ['Python', 'R', 'Matlab', 'C++'] for language in languages: print('I love', language) print('Done!') # I love Python # I love R # I love Matlab # I love C++ # Done! for i, language in enumerate(languages, 2): print(i, 'I love', language) print('Done!') # 2 I love Python # 3 I love R # 4 I love Matlab # 5 I love C++ # Done!I love Python I love R I love Matlab I love C++ Done! 2 I love Python 3 I love R 4 I love Matlab 5 I love C++ Done!--- 7. break 语句`break`语句可以跳出当前所在层的循环。【例子】import random secret = random.randint(1, 10) #[1,10]之间的随机数 while True: temp = input("猜一猜小姐姐想的是哪个数字?") guess = int(temp) if guess > secret: print("大了,大了") else: if guess == secret: print("你太了解小姐姐的心思了!") print("哼,猜对也没有奖励!") break else: print("小了,小了") print("游戏结束,不玩儿啦!")猜一猜小姐姐想的是哪个数字?8 你太了解小姐姐的心思了! 哼,猜对也没有奖励! 游戏结束,不玩儿啦!--- 8. continue 语句`continue`终止本轮循环并开始下一轮循环。【例子】for i in range(10): if i % 2 != 0: print(i) continue i += 2 print(i) # 2 # 1 # 4 # 3 # 6 # 5 # 8 # 7 # 10 # 92 1 4 3 6 5 8 7 10 9--- 9. pass 语句`pass` 语句的意思是“不做任何事”,如果你在需要有语句的地方不写任何语句,那么解释器会提示出错,而 `pass` 语句就是用来解决这些问题的。【例子】def a_func(): # SyntaxError: unexpected EOF while parsing【例子】```pythondef a_func(): pass````pass`是空语句,不做任何操作,只起到占位的作用,其作用是为了保持程序结构的完整性。尽管`pass`语句不做任何操作,但如果暂时不确定要在一个位置放上什么样的代码,可以先放置一个`pass`语句,让代码可以正常运行。--- 10. 推导式**列表推导式**```python[ expr for value in collection [if condition] ]```【例子】x = [-4, -2, 0, 2, 4] y = [a * 2 for a in x] print(y) # [-8, -4, 0, 4, 8][-8, -4, 0, 4, 8]【例子】x = [i ** 2 for i in range(1, 10)] print(x) # [1, 4, 9, 16, 25, 36, 49, 64, 81][1, 4, 9, 16, 25, 36, 49, 64, 81]【例子】x = [(i, i ** 2) for i in range(6)] print(x) # [(0, 0), (1, 1), (2, 4), (3, 9), (4, 16), (5, 25)][(0, 0), (1, 1), (2, 4), (3, 9), (4, 16), (5, 25)]【例子】x = [i for i in range(100) if (i % 2) != 0 and (i % 3) == 0] print(x) # [3, 9, 15, 21, 27, 33, 39, 45, 51, 57, 63, 69, 75, 81, 87, 93, 99][3, 9, 15, 21, 27, 33, 39, 45, 51, 57, 63, 69, 75, 81, 87, 93, 99]【例子】a = [(i, j) for i in range(0, 3) for j in range(0, 3)] print(a) # [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)][(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]【例子】x = [[i, j] for i in range(0, 3) for j in range(0, 3)] print(x) # [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]] x[0][0] = 10 print(x) # [[10, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]][[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]] [[10, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]【例子】a = [(i, j) for i in range(0, 3) if i < 1 for j in range(0, 3) if j > 1] print(a) # [(0, 2)][(0, 2)]**元组推导式**```python( expr for value in collection [if condition] )```【例子】a = (x for x in range(10)) print(a) # at 0x0000025BE511CC48> print(tuple(a)) # (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) at 0x0000014CEC2E28B8> (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)**字典推导式**```python{ key_expr: value_expr for value in collection [if condition] }```【例子】b = {i: i % 2 == 0 for i in range(10) if i % 3 == 0} print(b) # {0: True, 3: False, 6: True, 9: False}{0: True, 3: False, 6: True, 9: False}**集合推导式**```{ expr for value in collection [if condition] }```【例子】c = {i for i in [1, 2, 3, 4, 5, 5, 6, 4, 3, 2, 1]} print(c) # {1, 2, 3, 4, 5, 6}{1, 2, 3, 4, 5, 6}**其它**- `next(iterator[, default])` Return the next item from the iterator. If default is given and the iterator is exhausted, it is returned instead of raising StopIteration.【例子】e = (i for i in range(10)) print(e) # at 0x0000007A0B8D01B0> print(next(e)) # 0 print(next(e)) # 1 for each in e: print(each, end=' ') # 2 3 4 5 6 7 8 9 at 0x0000014CEC389C78> 0 1 2 3 4 5 6 7 8 9【例子】s = sum([i for i in range(101)]) print(s) # 5050 s = sum((i for i in range(101))) print(s) # 50505050 5050异常处理异常就是运行期检测到的错误。计算机语言针对可能出现的错误定义了异常类型,某种错误引发对应的异常时,异常处理程序将被启动,从而恢复程序的正常运行。 1. Python 标准异常总结- BaseException:所有异常的 **基类**- Exception:常规异常的 **基类**- StandardError:所有的内建标准异常的基类- ArithmeticError:所有数值计算异常的基类- FloatingPointError:浮点计算异常- OverflowError:数值运算超出最大限制- ZeroDivisionError:除数为零- AssertionError:断言语句(assert)失败- AttributeError:尝试访问未知的对象属性- EOFError:没有内建输入,到达EOF标记- EnvironmentError:操作系统异常的基类- IOError:输入/输出操作失败- OSError:操作系统产生的异常(例如打开一个不存在的文件)- WindowsError:系统调用失败- ImportError:导入模块失败的时候- KeyboardInterrupt:用户中断执行- LookupError:无效数据查询的基类- IndexError:索引超出序列的范围- KeyError:字典中查找一个不存在的关键字- MemoryError:内存溢出(可通过删除对象释放内存)- NameError:尝试访问一个不存在的变量- UnboundLocalError:访问未初始化的本地变量- ReferenceError:弱引用试图访问已经垃圾回收了的对象- RuntimeError:一般的运行时异常- NotImplementedError:尚未实现的方法- SyntaxError:语法错误导致的异常- IndentationError:缩进错误导致的异常- TabError:Tab和空格混用- SystemError:一般的解释器系统异常- TypeError:不同类型间的无效操作- ValueError:传入无效的参数- UnicodeError:Unicode相关的异常- UnicodeDecodeError:Unicode解码时的异常- UnicodeEncodeError:Unicode编码错误导致的异常- UnicodeTranslateError:Unicode转换错误导致的异常异常体系内部有层次关系,Python异常体系中的部分关系如下所示:![](https://tianchi-public.oss-cn-hangzhou.aliyuncs.com/public/files/forum/162210513255214581622105132094.png)--- 2. Python标准警告总结- Warning:警告的基类- DeprecationWarning:关于被弃用的特征的警告- FutureWarning:关于构造将来语义会有改变的警告- UserWarning:用户代码生成的警告- PendingDeprecationWarning:关于特性将会被废弃的警告- RuntimeWarning:可疑的运行时行为(runtime behavior)的警告- SyntaxWarning:可疑语法的警告- ImportWarning:用于在导入模块过程中触发的警告- UnicodeWarning:与Unicode相关的警告- BytesWarning:与字节或字节码相关的警告- ResourceWarning:与资源使用相关的警告 --- 3. try - except 语句```pythontry: 检测范围except Exception[as reason]: 出现异常后的处理代码```try 语句按照如下方式工作:- 首先,执行`try`子句(在关键字`try`和关键字`except`之间的语句)- 如果没有异常发生,忽略`except`子句,`try`子句执行后结束。- 如果在执行`try`子句的过程中发生了异常,那么`try`子句余下的部分将被忽略。如果异常的类型和`except`之后的名称相符,那么对应的`except`子句将被执行。最后执行`try - except`语句之后的代码。- 如果一个异常没有与任何的`except`匹配,那么这个异常将会传递给上层的`try`中。【例子】try: f = open('test.txt') print(f.read()) f.close() except OSError: print('打开文件出错') # 打开文件出错打开文件出错【例子】try: f = open('test.txt') print(f.read()) f.close() except OSError as error: print('打开文件出错\n原因是:' + str(error)) # 打开文件出错 # 原因是:[Errno 2] No such file or directory: 'test.txt'打开文件出错 原因是:[Errno 2] No such file or directory: 'test.txt'一个`try`语句可能包含多个`except`子句,分别来处理不同的特定的异常。最多只有一个分支会被执行。【例子】try: int("abc") s = 1 + '1' f = open('test.txt') print(f.read()) f.close() except OSError as error: print('打开文件出错\n原因是:' + str(error)) except TypeError as error: print('类型出错\n原因是:' + str(error)) except ValueError as error: print('数值出错\n原因是:' + str(error)) # 数值出错 # 原因是:invalid literal for int() with base 10: 'abc'数值出错 原因是:invalid literal for int() with base 10: 'abc'【例子】dict1 = {'a': 1, 'b': 2, 'v': 22} try: x = dict1['y'] except LookupError: print('查询错误') except KeyError: print('键错误') else: print(x) # 查询错误查询错误`try-except-else`语句尝试查询不在`dict`中的键值对,从而引发了异常。这一异常准确地说应属于`KeyError`,但由于`KeyError`是`LookupError`的子类,且将`LookupError`置于`KeyError`之前,因此程序优先执行该`except`代码块。所以,使用多个`except`代码块时,必须坚持对其规范排序,要从最具针对性的异常到最通用的异常。【例子】dict1 = {'a': 1, 'b': 2, 'v': 22} try: x = dict1['y'] except KeyError: print('键错误') except LookupError: print('查询错误') else: print(x) # 键错误键错误【例子】一个 `except` 子句可以同时处理多个异常,这些异常将被放在一个括号里成为一个元组。try: s = 1 + '1' int("abc") f = open('test.txt') print(f.read()) f.close() except (OSError, TypeError, ValueError) as error: print('出错了!\n原因是:' + str(error)) # 出错了! # 原因是:unsupported operand type(s) for +: 'int' and 'str'出错了! 原因是:unsupported operand type(s) for +: 'int' and 'str'--- 4. try - except - finally 语句try: 检测范围except Exception[as reason]: 出现异常后的处理代码finally: 无论如何都会被执行的代码不管`try`子句里面有没有发生异常,`finally`子句都会执行。【例子】如果一个异常在`try`子句里被抛出,而又没有任何的`except`把它截住,那么这个异常会在`finally`子句执行后被抛出。def divide(x, y): try: result = x / y print("result is", result) except ZeroDivisionError: print("division by zero!") finally: print("executing finally clause") divide(2, 1) # result is 2.0 # executing finally clause divide(2, 0) # division by zero! # executing finally clause divide("2", "1") # executing finally clause # TypeError: unsupported operand type(s) for /: 'str' and 'str'result is 2.0 executing finally clause division by zero! executing finally clause--- 5. try - except - else 语句如果在`try`子句执行时没有发生异常,Python将执行`else`语句后的语句。```pythontry: 检测范围except: 出现异常后的处理代码else: 如果没有异常执行这块代码```使用`except`而不带任何异常类型,这不是一个很好的方式,我们不能通过该程序识别出具体的异常信息,因为它捕获所有的异常。try: 检测范围except(Exception1[, Exception2[,...ExceptionN]]]): 发生以上多个异常中的一个,执行这块代码else: 如果没有异常执行这块代码 【例子】try: fh = open("testfile.txt", "w") fh.write("这是一个测试文件,用于测试异常!!") except IOError: print("Error: 没有找到文件或读取文件失败") else: print("内容写入文件成功") fh.close() # 内容写入文件成功内容写入文件成功注意:`else`语句的存在必须以`except`语句的存在为前提,在没有`except`语句的`try`语句中使用`else`语句,会引发语法错误。--- 6. raise语句Python 使用`raise`语句抛出一个指定的异常。【例子】try: raise NameError('HiThere') except NameError: print('An exception flew by!') # An exception flew by!An exception flew by!Ecogym> Ecogym is a simple ecosystem simulator based on OpenAI gym. This file will become your README and also the index of your documentation. InstallExecute the commands below in the root folder of the directory. `conda env create -f=requirement.yml -n ecogym``nbdev_install_git_hooks` For the animations to work ffmpeg needs to be installed. Installation instructions for Windows 10 can be found [here](https://www.wikihow.com/Install-FFmpeg-on-Windows). For Linux and MacOS ffmpeg's [home page](https://www.ffmpeg.org/download.html) is a good starting point. How to use Fill me in please! Don't forget code examples:1+1Sketching AutoEncoder with a Catmull-Rom SplineThis notebook demonstrates an autoencoder that predicts both a list of 2d points describing a Catmull-Rom spline. Curves between each possible pair of points (excluding the first an last points which act purely as control points) are drawn into separate rasters before being merged into a single image with a compositing function. Only the encoder network has learnable parameters; the decoder is entirely deterministic, but differentiable.The network is defined below; the number of points can be configured.import torch import torch.nn as nn try: from dsketch.raster.disttrans import catmull_rom_spline, curve_edt2_bruteforce from dsketch.raster.raster import exp from dsketch.raster.composite import softor except: !pip install git+https://github.com/jonhare/DifferentiableSketching.git from dsketch.raster.disttrans import catmull_rom_spline, curve_edt2_bruteforce from dsketch.raster.raster import exp from dsketch.raster.composite import softor class AE(nn.Module): def __init__(self, npoints=16, hidden=64, sz=28): super(AE, self).__init__() # build the coordinate grid: r = torch.linspace(-1, 1, sz) c = torch.linspace(-1, 1, sz) grid = torch.meshgrid(r, c) grid = torch.stack(grid, dim=2) self.register_buffer("grid", grid) # this is a list of quads of "connections" 0-1-2-3, 1-2-3-4, 2-3-4-5, ... self.coordpairs = torch.stack([torch.arange(0, npoints-3, 1), torch.arange(1, npoints-2, 1), torch.arange(2, npoints-1, 1), torch.arange(3, npoints, 1)], dim=1) self.enc = nn.Sequential( nn.Linear(sz**2, hidden), nn.ReLU(), nn.Linear(hidden, hidden), nn.ReLU(), nn.Linear(hidden, npoints*2), nn.Tanh() ) def forward(self, inp, sigma=7e-3): # the encoding process will flatten the input and # push it through the encoder networks bs = inp.shape[0] x = inp.view(bs, -1) pts = self.enc(x) #[batch, npoints*2] pts = pts.view(bs, -1, 2) # expand -> [batch, npoints, 2] # compute all valid permuations of line start and end points lines = torch.cat((pts[:,self.coordpairs[:,0]], pts[:,self.coordpairs[:,1]], pts[:,self.coordpairs[:,2]], pts[:,self.coordpairs[:,3]]), dim=-1) #[batch, nlines, 8] lines = lines.view(-1, 4, 2) # flatten -> [batch * nlines, 4, 2] # Rasterisation steps # draw the lines (for every input in the batch) rasters = exp(curve_edt2_bruteforce(lines, self.grid, 2, 10, cfcn=catmull_rom_spline), sigma) # -> [batch * nlines, 28, 28] rasters = rasters.view(bs, -1, rasters.shape[-2], rasters.shape[-1]) # then reshape back to images [batch, nlines, rows, cols] # composite return softor(rasters)We'll do a simple test on MNIST and try and train the AE to be able to reconstruct digit images (and of course at the same time perform image vectorisation/autotracing of polylines). Hyperparameters are pretty arbitrary (defaults for Adam; 256 batch size) and the line width is fixed to a value that works well for MNIST.import matplotlib.pyplot as plt from torchvision.datasets.mnist import MNIST from torchvision import transforms import torchvision batch_size = 16 transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.view(28, 28)) ]) trainset = torchvision.datasets.MNIST('/tmp', train=True, transform=transform, download=True) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0) testset = torchvision.datasets.MNIST('/tmp', train=False, transform=transform, download=True) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = AE(npoints=16).to(device) opt = torch.optim.Adam(model.parameters()) for epoch in range(10): for images, classes in trainloader: images = images.to(device) opt.zero_grad() out = model(images) loss = nn.functional.mse_loss(out, images) loss.backward() opt.step() print(loss)tensor(0.0322, device='cuda:0', grad_fn=) tensor(0.0347, device='cuda:0', grad_fn=) tensor(0.0302, device='cuda:0', grad_fn=) tensor(0.0275, device='cuda:0', grad_fn=) tensor(0.0291, device='cuda:0', grad_fn=) tensor(0.0230, device='cuda:0', grad_fn=) tensor(0.0299, device='cuda:0', grad_fn=) tensor(0.0203, device='cuda:0', grad_fn=) tensor(0.0235, device='cuda:0', grad_fn=) tensor(0.0273, device='cuda:0', grad_fn=)Finally here's a visualisation of a set of test inputs and their rendered reconstructions:batch = iter(testloader).next()[0][0:64] out = model(batch.to(device)) plt.figure() inputs = torchvision.utils.make_grid(batch.unsqueeze(1)) plt.title("Inputs") plt.imshow(inputs.permute(1,2,0)) plt.figure() outputs = torchvision.utils.make_grid(out.detach().cpu().unsqueeze(1)) plt.title("Outputs") plt.imshow(outputs.permute(1,2,0))Temporal Evolution of Combustion TemperaturefieldminMaxFile="./fieldMinMax.dat" with open(fieldminMaxFile,"r") as fp: comment=fp.readline() header=fp.readline() header=header[1:-1].split() indexs_processor=[] for i,name in enumerate(header): if header[i]=="processor": indexs_processor.append(i) indexs_processor.reverse() data=pd.read_csv(fieldminMaxFile,comment='#', sep='\t',header=None) data=data.drop(indexs_processor,axis=1) data.rename(columns=lambda x:header[x],inplace=True) data.head() sampling_rate=10 data_sampling=data[data.index%sampling_rate==0] data_sampling.shape fig, ax = plt.subplots() ax.plot(data["Time"],data["max"]/Tref,lineStyle="-",color="r",label="Maximum Temperature") ax.set_xlabel(f"Time (s)") ax.set_ylabel(f"Dimensionless T") ax.set_title(f"Combustion Tempereature Evolution") ax.legend(loc="upper right"):2: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later ax.plot(data["Time"],data["max"]/Tref,lineStyle="-",color="r",label="Maximum Temperature")Transversely averaged O2 fraction and temperature distributions at three typical time instantsdef readOpenFoamField(file,nx,ny,normizedValue=1): with open(file,"r") as fp: lines=fp.readlines() for i,line in enumerate(lines): if line.startswith("internalField"): start=i+3 elif line.startswith("boundaryField"): end=i-4 break field=[] for i in np.arange(start,end+1): value=float(lines[i].replace('\n', '')) field.append(value/normizedValue) field=np.array(field).reshape(ny,nx) return field def show(timeInstant): cokeField=readOpenFoamField(f"../{str(timeInstant)}/coke",lx,ly) O2Field=readOpenFoamField(f"../{str(timeInstant)}/O2",lx,ly) TField=readOpenFoamField(f"../{str(timeInstant)}/T",lx,ly,Tref) fig,axs=plt.subplots(nrows=3, sharex=True, figsize=(13, 6)) fig.tight_layout() plt.rcParams.update({'mathtext.default': 'regular' }) # fig.suptitle(f"Field contours at time instant of {str(timeInstant)} s", fontsize=20) fig.text(0.55, 1.02, f'Field contours at time instant of {str(timeInstant)} s', transform=fig.transFigure, horizontalalignment='center', fontsize=18) im0=axs[0].imshow(cokeField,cmap="coolwarm") axs[0].set_title("coke fraction") bbox_ax0 = axs[0].get_position() loc_cbar0 = fig.add_axes([bbox_ax0.x1*1.01, bbox_ax0.y0, 0.02, bbox_ax0.y1-bbox_ax0.y0]) cbar0 = fig.colorbar(im0, cax=loc_cbar0) im1=axs[1].imshow(O2Field,cmap="coolwarm") plt.rcParams.update({'mathtext.default': 'regular' }) axs[1].set_title("${O_2}$ fraction") bbox_ax1 = axs[1].get_position() loc_cbar1 = fig.add_axes([bbox_ax1.x1*1.01, bbox_ax1.y0, 0.02, bbox_ax1.y1-bbox_ax1.y0]) cbar1 = fig.colorbar(im1, cax=loc_cbar1) im2=axs[2].imshow(TField,cmap="coolwarm") axs[2].set_title("Temperature") bbox_ax2 = axs[2].get_position() loc_cbar2 = fig.add_axes([bbox_ax2.x1*1.01, bbox_ax2.y0, 0.02, bbox_ax2.y1-bbox_ax2.y0]) cbar2 = fig.colorbar(im2, cax=loc_cbar2) # show(t1) t1=0.2 t2=0.6 t3=1 show(t1) show(t2) show(t3) cokeField0=readOpenFoamField(f"../{str(t1)}/coke",lx,ly) O2Field0=readOpenFoamField(f"../{str(t1)}/O2",lx,ly) TField0=readOpenFoamField(f"../{str(t1)}/T",lx,ly,Tref) cokeField1=readOpenFoamField(f"../{str(t2)}/coke",lx,ly) O2Field1=readOpenFoamField(f"../{str(t2)}/O2",lx,ly) TField1=readOpenFoamField(f"../{str(t2)}/T",lx,ly,Tref) cokeField2=readOpenFoamField(f"../{str(t3)}/coke",lx,ly) O2Field2=readOpenFoamField(f"../{str(t3)}/O2",lx,ly) TField2=readOpenFoamField(f"../{str(t3)}/T",lx,ly,Tref) fig,axs=plt.subplots(nrows=3, sharex=True, figsize=(10, 6)) fig.tight_layout() axs[0].plot(np.mean(cokeField0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s") axs[0].plot(np.mean(cokeField1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s") axs[0].plot(np.mean(cokeField2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s") axs[0].set_ylabel(f"Coke Fraction") axs[0].legend() axs[1].plot(np.mean(O2Field0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s") axs[1].plot(np.mean(O2Field1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s") axs[1].plot(np.mean(O2Field2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s") axs[1].set_ylabel(f"$O_{2}$ Fraction") axs[1].legend() axs[2].plot(np.mean(TField0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s") axs[2].plot(np.mean(TField1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s") axs[2].plot(np.mean(TField2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s") axs[2].set_ylabel(f"Temperature") axs[2].legend() axs[2].set_xlim([0,400])Temporal Evolution of Volme-averaged residual coke and reaction ratedef readOpenFoamUField(file,nx,ny,normizedValue=1,component=0): with open(file,"r") as fp: lines=fp.readlines() for i,line in enumerate(lines): if line.startswith("internalField"): start=i+3 elif line.startswith("boundaryField"): end=i-4 break field=[] for i in np.arange(start,end+1): values=lines[i].replace('\n', '').split() values=[float(value.replace('(', '').replace(')', '')) for value in values] value=values[component] field.append(value/normizedValue) field=np.array(field).reshape(ny,nx) return field times=np.arange(timeStep,endTime+timeStep,timeStep) stimes=[f"{t:.2f}".rstrip('.0') for t in times] volumeAveragedCoke=[] volumeAveragedReactionRate=[] sumReactionRate=[] inletfluxs=[] for t in stimes: cokeField=readOpenFoamField(f"../{str(t)}/coke",lx,ly) volumeAveragedCoke.append(np.mean(cokeField)) cokeReactionRateField=readOpenFoamField(f"../{str(t)}/cokeRectionRate",lx,ly) volumeAveragedReactionRate.append(np.mean(cokeReactionRateField)) sumReactionRate.append(np.sum(cokeReactionRateField)) densityField=readOpenFoamField(f"../{str(t)}/rho",lx,ly) UxField=readOpenFoamUField(f"../{str(t)}/U",lx,ly) inletFluxProfile=densityField[:,0]*UxField[:,0] inletfluxs.append(np.sum(inletFluxProfile)) fig,ax=plt.subplots() ax.plot(np.array(sumReactionRate)*(pixelResolution*pixelResolution)*-1/MCoke*MO2,linestyle="-",color="b") ax.set_xlabel('time (s)') ax.set_ylabel("Total $O_2$ Reaction Rate (kg/s)",color="b") ax.set_ylim([1e-7,1e-5]) ax.set_yscale('log') ax.tick_params(axis='y', labelcolor="b") ax2 = ax.twinx() ax2.plot(np.array(inletfluxs)*pixelResolution*YO2,linestyle="--",color="r") ax2.set_ylabel("Total $O_{2}$ Flux by convection",color="r") ax2.set_ylim([1e-7,1e-5]) ax2.set_yscale('log') ax2.tick_params(axis='y', labelcolor="r") fig.tight_layout() fig,ax=plt.subplots() ax.plot(volumeAveragedCoke,linestyle="-",color="b") ax.set_xlabel('time (s)') ax.set_ylabel("Total residual coke fraction")Alunos* * import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas.core.algorithms as algos from pandas import Series import scipy.stats.stats as stats import re import traceback import string from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn import preprocessing import seaborn as sns import warnings # semente e remocao de warnings seed=42 warnings.filterwarnings('ignore') # Caminho de leitura da base os.environ["DATASET_PATH"] = os.getcwd().replace('\\','/').replace('training','data/') + 'sample_products.csv' #lendo dados # DATASET_PATH = os.getcwd().replace('\\','/').replace('training','data/') + 'sample_products.csv' data = pd.read_csv(os.environ["DATASET_PATH"]) print(data.shape) data.head() # categorizando as variáveis le = preprocessing.LabelEncoder() le.fit(data['category']) data['Target'] = le.transform(data['category'])As categorias da variável estão codificadas da seguinte maneira:* 0: Bebê* 1: Bijuterias e Jóias* 2: Decoração* 3: Lembrancinhas* 4: Outros* 5: Papel e CiaX_train, X_test, y_train, y_test = train_test_split(data.drop(['category', 'Target'], axis=1), data['Target'], test_size=.2, random_state=seed) print('amostras em treino: %i' % X_train.shape[0], 'amostras em teste: %i' % X_test.shape[0], 'número de características: %i' % X_train.shape[1], 'número de classes: %i' % (np.max(y_train) + 1), sep='\n', end='\n\n') cols = X_train.dtypes[(X_train.dtypes == 'int64') | (X_train.dtypes == 'float64')].index X_train[cols] = X_train[cols].fillna(-99999) X_test[cols] = X_test[cols].fillna(-99999) plt.figure(figsize=(16, 4)) plt.subplot(121) plt.title('Frequencia das classes no conjunto de treinamento (%i amostras)' % len(X_train)) labels, counts = np.unique(y_train, return_counts=True) sns.barplot(labels, counts) plt.subplot(122) plt.title('Frequencia das classes no conjunto de teste (%i amostras)' % len(X_test)) labels, counts = np.unique(y_test, return_counts=True) sns.barplot(labels, counts);C:\Users\Avell\AppData\Roaming\Python\Python37\site-packages\seaborn\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning C:\Users\Avell\AppData\Roaming\Python\Python37\site-packages\seaborn\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarningObservamos acima que as classes estão bem desbalanceadas entre sim, o que pode comprometer o treinamento do modelo.Faremos um modelo com peso distintos para cada categoria Treinamentodef evaluate(y, p, probabilities, labels=None): from sklearn import metrics # Cálculo das métricas de acerto. print('Accuracy:', metrics.accuracy_score(y, p).round(3)) print('Accuracy (balanced):', metrics.balanced_accuracy_score(y, p).round(3)) # Calculo da matriz de confusão. c = metrics.confusion_matrix(y, p) r = c / c.sum(axis=1, keepdims=True) # Impressão dos gráficos. (plt .figure(figsize=(16, 12)) .suptitle('Matriz de confusão', fontsize=20)) sns.heatmap(r, cmap="YlGnBu", linewidths=.5, annot=True, fmt=".1%", xticklabels=labels, yticklabels=labels, cbar=False) (plt .figure(figsize=(16, 12)) .suptitle('Distribuição de confiança para cada classe', fontsize=20)) for i in np.unique(y): # Para cada classe `i`, seleciona suas amostras e filtra a confiança # do modelo em predizer estas amostras como sendo da classe `i`. sns.distplot(probabilities[y.ravel() == i][:, i], hist=False, label=labels[i]) parameters = {'penalty':('l1', 'l2', 'elasticnet'), 'C':[0.01, 0.1, 1], 'max_iter':[100, 200, 500]} lr = LogisticRegression(class_weight='balanced', solver='liblinear', tol=0.1) clf_lr = GridSearchCV(lr, parameters, scoring='balanced_accuracy', verbose=2) clf_lr.fit(X_train[cols[2:]], y_train) evaluate(y_test, clf_lr.predict(X_test[cols[2:]]), clf_lr.predict_proba(X_test[cols[2:]]), labels=le.classes_) # Fazendo GridSearchCV # parameters = {'criterion':('gini', 'entropy'), # 'max_features':('auto', 'sqrt', 'log2'), # 'max_depth':[2, 3, 5, 10], # 'min_samples_split':[50, 75, 100], # 'min_samples_leaf':[15, 25, 50, 75, 100]} # RFC = RandomForestClassifier(class_weight='balanced_subsample', n_jobs=-1) # # clf_RFC = GridSearchCV(RFC, parameters, scoring='balanced_accuracy', verbose=2) # clf_RFC.fit(X_train[cols[2:]], y_train) from sklearn.ensemble import RandomForestClassifier RFC = RandomForestClassifier(class_weight='balanced', n_jobs=-1, criterion = 'gini', max_depth = 10, max_features = 'log2', min_samples_leaf = 15, min_samples_split = 50) clf_RFC = RFC.fit(X_train[cols[2:]], y_train) import pickle with open('trainer.pkl', 'wb') as model_file: pickle.dump(clf_RFC, model_file) # Cálculo das métricas de acerto. os.environ["METRICS_PATH"] = 'Accuracy: ' + str(metrics.accuracy_score(y_test, clf_RFC.predict(X_test[cols[2:]])).round(3)) os.environ["MODEL_PATH"] = os.getcwd().replace('\\','/') + '/trainer.pkl' evaluate(y_test, clf_RFC.predict(X_test[cols[2:]]), clf_RFC.predict_proba(X_test[cols[2:]]), labels=le.classes_)Accuracy: 0.566 Accuracy (balanced): 0.48A notebook to practice simulating a card game# import libraries here import random import numpy as npThe game - modified warSetup: shuffle and evenly distribute a deck of cards between two playersGameplay: - players simultaneously show the top card of their deck - if one of the cards has a numerical value that is higher than the other, the higher player takes both revealed cards and puts them at the bottom of their deck - (adjustment) if the players are tied, the return the card to the bottom of their deck Ending: - The game ends when one player has possession of all of the cards in their hand 1. Generating a shuffled deck of cardsWrite a function that makes a list of cards. Here we'll say that a deck is a list of ordered pairs of the form `(number,suit)`.def make_deck(num_cards=52,num_suits=4): a_deck = [(n,suit) for n in range(1,num_cards//num_suits+1) for suit in range(num_suits)] return a_deck def shuffle_deck(deck): random.shuffle(deck) return deck2. Write a function that deals the cardsdef deal_deck(deck,number_of_hands): hands = [ [] for hand in range(number_of_hands)] while len(deck)!=0: for hand in hands: card = deck.pop(0) hand = hand.append(card) return hands deck = shuffle_deck(make_deck()) deal_deck(deck,2)Control and environment settings# try to crop the lung area only bool_crop = True # scale each image indepently - set the darkest pixel to zero and the lightest pixel to 255 bool_scale = False # Random seed used in the notebook RAND_SEED = 42 # Train InceptionV3 from scratch or load existing model REBUILD_INCEPTIONV3 = False # Train Standard-CNN-Model from scratch or load existing model REBUILD_CNN = False # Path variables source_dir = '/content/curated_data/curated_data/' train_dir = str(source_dir + 'train/') test_dir = str(source_dir + 'test/') validation_dir = str(source_dir + 'validation/') # Path used to store and restore trained models target_path = '/content/drive/.shortcut-targets-by-id/1lp7vo6EG60jba1Gk_tuNpvDm7q5ZXlKX/Telecom_Specialization_ML3/Students/Peter_Christoph_Marko_Martin/program/'Imports!pip install scikit-plot # Import required packages from google.colab import drive from google.colab.patches import cv2_imshow import pylab as pl import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import glob import time import os import shutil import tensorflow as tf import cv2 as cv2 from IPython import display from datetime import datetime import scikitplot as skplt from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score import joblib import lightgbm as lgb from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Dense, Flatten, Dropout, InputLayer, ReLU from tensorflow.keras.layers import BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, MaxPool2D from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.models import load_model from tensorflow.keras.applications import InceptionV3 from tensorflow.python.keras.callbacks import History %matplotlib inline !rm -r curated_data !rm meta_data_cap.csv !rm meta_data_covid.csv !rm meta_data_normal.csvHelper Functionsdef load_imagelist_label(main_dir, sub_dirs, data_format='/*.png'): """ helper function to create a img list and a label list from the data files takes use of the global variables classes_list and data_fromat """ img_list = [] label_list = [] for case in sub_dirs:#in original version, just a hint -> dict_case_df.keys():#dict_case_lst can be delete after final run file_list = glob.glob(os.path.join(main_dir, case + data_format)) for f in range(len(file_list)): img = cv2.imread(file_list[f], cv2.IMREAD_GRAYSCALE) #img = cv2.resize(img, (224, 224))#uncommit for final version, done in other function img_list.append(img) label_list.append(case) return np.array(img_list), np.array(label_list) # Find a filename of an image correctly predicted with highest confidence def get_best_image_name(model, generator, class_string): """ model: trained model generator: fitted generator class_string: one of generator.class_indices: {'CAP': 0, 'COVID': 1, 'NonCOVID': 2} Returns: - image_file_path of (one of) the image(s) in generator which was correctly predicted as belonging to the given class - i.e. prediction for given class is highest value. - softmax predictions for all classes for this image. """ print(f"generator.directory: {generator.directory}") print(f"generator.class_indices: {generator.class_indices}") class_n = generator.class_indices[class_string] y_true = generator.classes # Predictions for data in generator y_predict_softmax = model.predict(generator) # Choose index of maximum y_predict_max = np.argmax(y_predict_softmax, axis=-1) # Filter where model correctly predicts class_n correct_pred_bool = np.logical_and(y_predict_max == class_n, y_true == class_n) # Set wrong predictions to Zero correct_preds_float = y_predict_softmax[:,class_n] * correct_pred_bool # Find index of maximum prediction index_of_max = np.argmax(correct_preds_float) predictions_at_max = y_predict_softmax[index_of_max] filename_at_max = generator.filenames[index_of_max] filepath = str(generator.directory + filename_at_max) print(f"True class of image: {y_true[index_of_max]}") print(f"Predicted class of image: {y_predict_max[index_of_max]}") return filepath, predictions_at_max # Test code: # Uncomment one line: # filepath, predictions = get_best_image_name(best_model_cnn, test_generator, 'CAP') # filepath, predictions = get_best_image_name(best_model_cnn, test_generator, 'COVID') # filepath, predictions = get_best_image_name(best_model_cnn, test_generator, 'NonCOVID') # print(filepath) # print(predictions) # img = plt.imread(filepath) # plt.imshow(img, cmap='gray') # Test code for loop over classes: # for cl in test_generator.class_indices: # print(cl) # filepath, predictions = get_best_image_name(best_model_cnn, test_generator, cl) # print(filepath) # print(predictions) # img = plt.imread(filepath) # plt.xticks([]) # plt.yticks([]) # plt.imshow(img, cmap='gray') # plt.show() # Find a filename of an image wrongly predicted with highest confidence def get_worst_image_name(model, generator, class_string): """ model: trained model generator: fitted generator class_string: one of generator.class_indices: {'CAP': 0, 'COVID': 1, 'NonCOVID': 2} Returns: - image_file_path of (one of) the image(s) in generator which was incorrectly predicted as NOT belonging to the given class with highest confidence - i.e. prediction for given class is lowest value. - softmax predictions for all classes for this image. """ print(f"generator.directory: {generator.directory}") print(f"generator.class_indices: {generator.class_indices}") class_n = generator.class_indices[class_string] y_true = generator.classes # Predictions for data in generator y_predict_softmax = model.predict(generator) # Choose index of maximum y_predict_max = np.argmax(y_predict_softmax, axis=-1) # Searching for images where model predicts class_n, but class_n not true filter_bool = np.logical_and(y_predict_max == class_n, y_true != class_n) # Filter out the other predictions (set to Zero) wrong_preds_float = y_predict_softmax[:,class_n] * filter_bool # Find index of maximum prediction index_of_max = np.argmax(wrong_preds_float) predictions_at_max = y_predict_softmax[index_of_max] filename_at_max = generator.filenames[index_of_max] filepath = str(generator.directory + filename_at_max) print(f"True class of image: {y_true[index_of_max]}") print(f"Predicted class of image: {y_predict_max[index_of_max]}") return filepath, predictions_at_max # Test code: # Uncomment one line: # filepath, predictions = get_worst_image_name(best_model_cnn, test_generator, 'CAP') # filepath, predictions = get_worst_image_name(best_model_cnn, test_generator, 'COVID') # filepath, predictions = get_worst_image_name(best_model_cnn, test_generator, 'NonCOVID') #print(filepath) #print(predictions) #img = plt.imread(filepath) #plt.imshow(img, cmap='gray') # Test code for loop over classes # for cl in test_generator.class_indices: # print(cl) # filepath, predictions = get_worst_image_name(best_model_cnn, test_generator, cl) # print(filepath) # print(predictions) # img = plt.imread(filepath) # plt.xticks([]) # plt.yticks([]) # plt.imshow(img, cmap='gray') # plt.show() # Plot the training history of a neural network def plot_training_history(history: History): fig, axs = plt.subplots(1, 2, figsize=(12,4)) axs[0].plot(history.history['accuracy'], color='blue', label='train') axs[0].plot(history.history['val_accuracy'], color='orange', label='validation') axs[0].legend() axs[0].set_title('Classification Accuracy') axs[1].plot(history.history['loss'], color='blue', label='train') axs[1].plot(history.history['val_loss'], color='orange', label='validation') axs[1].legend() axs[1].set_title('Classification Loss') plt.show() # Save best model on Google Drive def save_model_in_drive(model, model_name): # Initialize target path #target_path = '/content/drive/.shortcut-targets-by-id/1lp7vo6EG60jba1Gk_tuNpvDm7q5ZXlKX/Telecom_Specialization_ML3/Students/Peter_Christoph_Marko_Martin/program/' # Get Timestamp dateTimeObj = datetime.now() str_timestamp = dateTimeObj.strftime("%d_%b_%Y_%H_%M_%S") # Create model filename model_filename = str(model_name + "_" + str_timestamp) # Create complete filepath model_filepath = str(target_path + model_filename) # Save model model.save(model_filepath) print(f"Model saved: {model_filepath}") return # Test code # save_model_in_drive(best_model_cnn, "best_model_cnn") def plot_confusion_matrix_x(labels,predictions,classes_name): """ example: predictions=max_predict # np.argmax(y_predict, axis=-1) labels=test_generator.classes classes_name = artist_list """ normalize = False # con_mat = tf.math.confusion_matrix(labels=labels, predictions=np.argmax(predictions, axis=-1) ).numpy() con_mat = tf.math.confusion_matrix(labels=labels, predictions=predictions).numpy() con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2) if normalize == True: con_mat_df = pd.DataFrame(con_mat_norm,index = classes_name,columns = classes_name) else: con_mat_df = pd.DataFrame(con_mat,index = classes_name,columns = classes_name) figure = plt.figure(figsize=(4, 4)) sns.heatmap(con_mat_df, annot=True,fmt='g',cmap=plt.cm.Blues) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()Loading data# mount google drive to access dataset food drive.mount('/content/drive', force_remount=True) # unzip the data on the virtual machine %cd /content/ !unzip '/content/drive/.shortcut-targets-by-id/1lp7vo6EG60jba1Gk_tuNpvDm7q5ZXlKX/Telecom_Specialization_ML3/Students/Peter_Christoph_Marko_Martin/data/archive.zip' ##### Inconsistency between file names and file names in meta data ##### ## ==> rename files to the names used in meta_data_covid.csv !mv "/content/curated_data/curated_data/2COVID/17_Zhao_Recurrence-of-positive-SARS-CoV-2-RNA-in-C_2020_International-Journal-of-Inf-p1-21%0.png" "/content/curated_data/curated_data/2COVID/17_Zhao_Recurrence-of-COVID-19-SARS-CoV-2-RNA-in-C_2020_International-Journal-of-Inf-p1-21%0.png" !mv "/content/curated_data/curated_data/2COVID/17_Zhao_Recurrence-of-positive-SARS-CoV-2-RNA-in-C_2020_International-Journal-of-Inf-p1-21%1.png" "/content/curated_data/curated_data/2COVID/17_Zhao_Recurrence-of-COVID-19-SARS-CoV-2-RNA-in-C_2020_International-Journal-of-Inf-p1-21%1.png" # Load data as dataframes df_meta_cap = pd.read_csv('meta_data_cap.csv') df_meta_covid = pd.read_csv('meta_data_covid.csv', encoding = "ISO-8859-1") df_meta_normal = pd.read_csv('meta_data_normal.csv')Split data into training, valdiation and test datanp.random.seed(RAND_SEED) # Configuration of sizes test_size = 0.1 validation_size = 0.1 # List of classes classes_list = [('1NonCOVID','NonCOVID'), ('2COVID','COVID'), ('3CAP','CAP')] # Get patient to image mappings # Covid patients covid_patient_to_image = df_meta_covid.groupby(['Patient ID'])['File name'].apply(list).reset_index() # Normal patients normal_patient_to_image = df_meta_normal.groupby(['Patient ID'])['File name'].apply(list).reset_index() # CAP patients cap_patient_to_image = df_meta_cap.groupby(['Patient ID'])['File name'].apply(list).reset_index() def split_ids(patient_to_image, val_size, test_size): ''' ''' np.random.seed(RAND_SEED) # Calculate the number of training, valdiation and test samples len_all = len(patient_to_image) nb_test = np.floor(len_all * test_size).astype('int') nb_val = np.floor(len_all * val_size).astype('int') nb_train = len_all - (nb_test + nb_val).astype('int') # Create sub-samples of patient_to_image patient_ids = patient_to_image.index.values train_ids = np.random.choice(patient_ids, nb_train, replace=False) val_ids = np.random.choice(list(set(patient_ids)-set(train_ids)), nb_val, replace=False) test_ids = np.random.choice(list(set(patient_ids)-set(train_ids)-set(val_ids)), nb_test, replace = False) return train_ids, val_ids, test_ids # Split data into training, validation and test subsets sample_id_dict = {} for _, cl_dest in classes_list: if cl_dest == 'COVID': # Split covid patients covid_train_ids, covid_val_ids, covid_test_ids = \ split_ids(covid_patient_to_image, validation_size, test_size) # Add data to dictionary sample_id_dict[cl_dest] = {'IMG':covid_patient_to_image, 'TRAIN':covid_train_ids, 'VAL':covid_val_ids, 'TEST':covid_test_ids} elif cl_dest == 'NonCOVID': # Split normal patients norm_train_ids, norm_val_ids, norm_test_ids = \ split_ids(normal_patient_to_image, validation_size, test_size) # Add data to dictionary sample_id_dict[cl_dest] = {'IMG':normal_patient_to_image, 'TRAIN':norm_train_ids, 'VAL':norm_val_ids, 'TEST':norm_test_ids} else: # CAP # Split cap patients cap_train_ids, cap_val_ids, cap_test_ids = \ split_ids(cap_patient_to_image, validation_size, test_size) # Add data to dictionary sample_id_dict[cl_dest] = {'IMG':cap_patient_to_image, 'TRAIN':cap_train_ids, 'VAL':cap_val_ids, 'TEST':cap_test_ids} # Create new directories for training, validation and test data !mkdir $train_dir !mkdir $test_dir !mkdir $validation_dir # Create subdirectories for each class for _, cl in classes_list: c_train = str(train_dir + cl) !mkdir $c_train c_test = str(test_dir + cl) !mkdir $c_test c_validation = str(validation_dir + cl) !mkdir $c_validation def move_files(patient_to_image, patient_ids, from_dir, to_dir): ''' ''' # Set a limit to mv-strings MAX_LEN = 20000 # Check if at least one patient id was provided if len(patient_to_image) < 1: print('No files to move!') return None fs_to_move_batch = [] # move files batch-wise to improve processing time fs_to_move = '' for id in patient_ids: file_names = f' {from_dir}'.join([fn for fn in patient_to_image.at[id,'File name']]) # Extend fs_to_move by new file names and from_dir at the beginning fs_to_move = fs_to_move + f' {from_dir}' + file_names if len(fs_to_move) > MAX_LEN: fs_to_move_batch.append(fs_to_move) fs_to_move = '' if len(fs_to_move) > 0: # Append the last fs_to_move string to the file_stack fs_to_move_batch.append(fs_to_move) for fs_to_move in fs_to_move_batch: !mv $fs_to_move $to_dir # Move all data to new destinations %%time for cl_src, cl_dest in classes_list: from_dir = source_dir + cl_src + '/' imgs = sample_id_dict[cl_dest]['IMG'] # Move training data to_dir = train_dir + cl_dest ids = sample_id_dict[cl_dest]['TRAIN'] move_files(imgs, ids, from_dir, to_dir) # Move validation data to_dir = validation_dir + cl_dest ids = sample_id_dict[cl_dest]['VAL'] move_files(imgs, ids, from_dir, to_dir) # Move test data to_dir = test_dir + cl_dest ids = sample_id_dict[cl_dest]['TEST'] move_files(imgs, ids, from_dir, to_dir)CPU times: user 227 ms, sys: 591 ms, total: 818 ms Wall time: 6.88 sresearch / the plan1. using some U-Net to create a mask for the lung1.1 create the U-Net on kaggle1.2 there are some training data avaliablek-means / DBSCANopencvmask-r-cnnhttps://towardsdatascience.com/computer-vision-instance-segmentation-with-mask-r-cnn-7983502fcad1https://www.analyticsvidhya.com/blog/2019/07/computer-vision-implementing-mask-r-cnn-image-segmentation/Image Segmentation with U-net and Keras!https://www.youtube.com/watch?v=7qJzp3i62S8177 - Semantic segmentation made easy (using segmentation models library)https://www.youtube.com/watch?v=J_XSd_u_Yew&t=135shttps://github.com/bnsreenu/python_for_microscopists/blob/master/177_semantic_segmentation_made_easy_using_segm_models.py google search how to segments lungs from ct scan pythonhow to install segmentation_models on kaggleAttributeError: module 'keras.utils' has no attribute 'generic_utils' vgg16_unethttps://www.youtube.com/watch?v=mgdB7WezqbUhttps://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/vgg16_unet.pyfrom tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input from tensorflow.keras.models import Model from tensorflow.keras.applications import VGG16 def conv_block(input, num_filters): x = Conv2D(num_filters, 3, padding="same")(input) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(num_filters, 3, padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) return x def decoder_block(input, skip_features, num_filters): x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input) x = Concatenate()([x, skip_features]) x = conv_block(x, num_filters) return x def build_vgg16_unet(input_shape): """ Input """ inputs = Input(input_shape) """ Pre-trained VGG16 Model """ vgg16 = VGG16(include_top=False, weights="imagenet", input_tensor=inputs) """ Encoder """ s1 = vgg16.get_layer("block1_conv2").output ## (512 x 512) s2 = vgg16.get_layer("block2_conv2").output ## (256 x 256) s3 = vgg16.get_layer("block3_conv3").output ## (128 x 128) s4 = vgg16.get_layer("block4_conv3").output ## (64 x 64) """ Bridge """ b1 = vgg16.get_layer("block5_conv3").output ## (32 x 32) """ Decoder """ d1 = decoder_block(b1, s4, 512) ## (64 x 64) d2 = decoder_block(d1, s3, 256) ## (128 x 128) d3 = decoder_block(d2, s2, 128) ## (256 x 256) d4 = decoder_block(d3, s1, 64) ## (512 x 512) """ Output """ outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4) model = Model(inputs, outputs, name="VGG16_U-Net") return model if __name__ == "__main__": input_shape = (512, 512, 3) model = build_vgg16_unet(input_shape) model.summary()Model: "VGG16_U-Net" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_2 (InputLayer) [(None, 512, 512, 3) 0 __________________________________________________________________________________________________ block1_conv1 (Conv2D) (None, 512, 512, 64) 1792 input_2[0][0] __________________________________________________________________________________________________ block1_conv2 (Conv2D) (None, 512, 512, 64) 36928 block1_conv1[0][0] __________________________________________________________________________________________________ block1_pool (MaxPooling2D) (None, 256, 256, 64) 0 block1_conv2[0][0] [...]predicttarget_size = (512, 512) input_shape = (512, 512, 3) batch_size = 32 test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory( test_dir, target_size=target_size, batch_size=batch_size, class_mode='sparse', shuffle=False) model = build_vgg16_unet(target_size) # ============================================================================= # Test train_generator (to visualize augmentations performed) # ============================================================================= data_batch = train_generator.next() imgs = data_batch[0] labels = data_batch[1] # Predict for test data y_predict = model.predict(test_generator)Preprocessing Models using pre-cropped images Standard CNN Model# ============================================================================= # ImageDataGenerator # ============================================================================= # ============================================================================= # Paths and variables # ============================================================================= target_size = (224, 224) batch_size = 128 # batch_size = 32 # ============================================================================= # Set up data generators # ============================================================================= train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0., brightness_range = (0.7, 1.3), horizontal_flip=True, vertical_flip=False, rotation_range=0) val_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) # read data from directory train_generator = train_datagen.flow_from_directory( train_dir, target_size=target_size, batch_size=batch_size, class_mode='sparse', shuffle=False) validation_generator = val_datagen.flow_from_directory( validation_dir, target_size=target_size, batch_size=batch_size, class_mode='sparse', shuffle=False) test_generator = test_datagen.flow_from_directory( test_dir, target_size=target_size, batch_size=batch_size, class_mode='sparse', shuffle=False) # ============================================================================= # Test train_generator (to visualize augmentations performed) # ============================================================================= data_batch = train_generator.next() imgs = data_batch[0] labels = data_batch[1] # Der Generator merkt sich die Filenames aus dem flow_from_directory !! orig_img_fn_list = train_generator.filenames for i in range(batch_size): img = imgs[i] orig_img = plt.imread(str(train_dir + orig_img_fn_list[i])) f, ax = plt.subplots(1,2) ax[0].imshow(img, cmap='gray') ax[0].set_title('Augmented') ax[1].imshow(orig_img, cmap='gray') ax[1].set_title('Original') plt.show() print(f"Original shape: {orig_img.shape}") print(f"Augmented shape: {img.shape}") break train_generator.shuffle = True # ============================================================================= # Define Model # ============================================================================= input_shape = (224, 224, 3) # Model parameters learning_rate = 0.001 dec_rate = 0 optim = Adam(lr=learning_rate, decay=dec_rate) def CNN_ConvLayers(): model = Sequential() model.add(Conv2D(16, (2,2), padding='same', activation='relu', input_shape=input_shape)) model.add(BatchNormalization()) model.add(MaxPool2D()) # now: 112 x 112 model.add(Conv2D(32, (2,2), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D()) # Try: model.add(Dropout(0.3)) # now: 56 x 56 model.add(Conv2D(64, (2,2), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D()) # Try: model.add(Dropout(0.3)) # now: 28 x 28 model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(3, activation='softmax')) return model # Compile Model model = CNN_ConvLayers() model.compile(loss='sparse_categorical_crossentropy', optimizer=optim, metrics=['accuracy']) model.summary() # Fit Model and Save Best Model if REBUILD_CNN: mcp = ModelCheckpoint('best_model_cnn', monitor='val_accuracy', save_best_only=True, verbose=1) # STEP_SIZE_TRAIN = 20 STEP_SIZE_TEST = 5 epochs=30 history = model.fit(train_generator, validation_data = validation_generator, validation_steps = STEP_SIZE_TEST, epochs=epochs, callbacks=[mcp], workers=8) # have a look at the predictions of the "best_model_cnn" if REBUILD_CNN: from keras.models import load_model best_model_cnn = load_model('best_model_cnn') # save best model to drive (timestamp appended to filename) save_model_in_drive(best_model_cnn, "best_model_cnn_final") else: # reload previously saved model from drive # model_source_path = '/content/drive/.shortcut-targets-by-id/1lp7vo6EG60jba1Gk_tuNpvDm7q5ZXlKX/Telecom_Specialization_ML3/Students/Peter_Christoph_Marko_Martin/program/' model_source_path = target_path model_source_file = 'best_model_cnn_final_29_Apr_2021_09_06_20' # print(str(model_source_path + model_source_file)) best_model_cnn = load_model(str(model_source_path + model_source_file)) best_model_cnn.evaluate(test_generator) test_generator.shuffle=False # True classes y_true = test_generator.classes # Predict for test data y_predict = best_model_cnn.predict(test_generator) # Choose index of maximum y_predict = np.argmax(y_predict, axis=-1) plot_confusion_matrix_x(labels=y_true,predictions=y_predict,classes_name=['CAP', 'COVID', 'NonCOVID']) # Plot History if REBUILD_CNN: plot_training_history(history) # Create Classification Report from sklearn.metrics import classification_report y_true = test_generator.classes y_predict = best_model_cnn.predict(test_generator) # Choose index of maximum y_predict = np.argmax(y_predict, axis=-1) print(classification_report(y_true, y_predict))precision recall f1-score support 0 0.69 0.97 0.80 277 1 0.78 0.67 0.72 586 2 0.87 0.83 0.85 642 accuracy 0.79 1505 macro avg 0.78 0.82 0.79 1505 weighted avg 0.80 0.79 0.79 1505Build machine learning workflow to predict new data with Amazon SageMaker and AWS Step Functions This script creates a Step Function state machine to preprocess the inference data and predict with the images in ECR. Import modulesimport uuid import boto3 import sagemaker from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.s3 import S3Uploader from sagemaker import get_execution_role from sagemaker.sklearn.processing import SKLearnProcessor from sagemaker.processing import Processor, ProcessingInput, ProcessingOutput import stepfunctions from stepfunctions.steps import ( Chain, ProcessingStep, TransformStep ) from stepfunctions.inputs import ExecutionInput from stepfunctions.workflow import WorkflowSetup Modify according to your configurations.# Bucket name in S3 bucket = "hermione-sagemaker" # Set session region_name="us-east-1" boto3.setup_default_session(region_name=region_name) # Get user role role = get_execution_role() # Role to create and execute step functions # paste the AmazonSageMaker-StepFunctionsWorkflowExecutionRole ARN workflow_execution_role = "" # SageMaker expects unique names for each job, model and endpoint. # Otherwise, the execution will fail. The ExecutionInput creates # dynamically names for each execution. execution_input = ExecutionInput( schema={ "PreprocessingJobName": str, "TransformJobName": str } ) # Get AWS Account ID account_number = boto3.client("sts").get_caller_identity()["Account"] # Processor image name previous uploaded in ECR image_name_processor = "hermione-processor" # Inference image name previous uploaded in ECR image_name_inference = "hermione-inference" # Input and output paths to execute train and inference paths = { 'expectations': f"s3://{bucket}/PREPROCESSING/EXPECTATIONS", 'preprocessing': f"s3://{bucket}/PREPROCESSING/PREPROCESSING", 'test_raw': f"s3://{bucket}/TEST_RAW", 'inference_processed': f"s3://{bucket}/PREPROCESSING/INFERENCE_PROCESSED", 'validations': f"s3://{bucket}/PREPROCESSING/VALIDATIONS", 'model': f"s3://{bucket}/PREPROCESSING/MODEL/Hermione-train-2021-05-26-12-41-29-505/output/model.tar.gz", 'output_path': f"s3://{bucket}/PREPROCESSING/OUTPUT" } # instance to run the code instance_type_preprocessing="ml.t3.medium" instance_type_inference="ml.m5.large"Preprocessing Step# Processor image previous uploaded in ECR image_uri_processor = f"{account_number}.dkr.ecr.{region_name}.amazonaws.com/{image_name_processor}" # Creates the processor to access the ECR image processor = Processor(image_uri=image_uri_processor, role=role, instance_count=1, instance_type=instance_type_preprocessing) # Creates input and output objects for ProcessingStep inputs=[ ProcessingInput(source=paths['test_raw'], destination='/opt/ml/processing/input/raw_data', input_name='raw_data'), ProcessingInput(source=paths['preprocessing'], destination='/opt/ml/processing/input/preprocessing', input_name='preprocessing'), ProcessingInput(source=paths['expectations'], destination='/opt/ml/processing/input/expectations', input_name='expectations') ] outputs = [ ProcessingOutput( source="/opt/ml/processing/output/processed/inference", destination=paths['inference_processed'], output_name="inference_data", ), ProcessingOutput( source="/opt/ml/processing/output/validations", destination=paths['validations'], output_name="validations", ) ] # Creates the ProcessingStep processing_step = ProcessingStep( "SageMaker Preprocessing step", processor=processor, job_name=execution_input["PreprocessingJobName"], inputs=inputs, outputs=outputs, container_arguments=["--step", "test"] )Inference Step# Inference image previous uploaded in ECR image_uri_inference = f"{account_number}.dkr.ecr.{region_name}.amazonaws.com/{image_name_inference}" # Creates input and output objects for TransformStep input_path = paths['inference_processed'] model_path = paths['model'] output_path = paths['output_path'] # Creates the model to access the ECR image model = sagemaker.model.Model( image_uri = image_uri_inference, model_data=model_path, role=role) # Creates a transformer object from the trained model transformer = model.transformer( instance_count=1, instance_type=instance_type_inference, output_path=output_path, accept = 'text/csv') # Creates the TransformStep transform_step = TransformStep( "Inference Step", transformer=transformer, job_name=execution_input["TransformJobName"], data=input_path, content_type='text/csv', wait_for_completion=True, model_name=model.name )Create Workflow and Execute# Creates Fail state to mark the workflow failed in case any of the steps fail. failed_state_sagemaker_processing_failure = stepfunctions.steps.states.Fail( "ML Workflow failed", cause="SageMakerProcessingJobFailed" ) # Adds the Error handling in the workflow catch_state_processing = stepfunctions.steps.states.Catch( error_equals=["States.TaskFailed"], next_step=failed_state_sagemaker_processing_failure, ) processing_step.add_catch(catch_state_processing) transform_step.add_catch(catch_state_processing) # Creates workflow with Pre-Processing Job and Transform Job workflow_graph = Chain([processing_step, transform_step]) branching_workflow = Workflow( name="SFN_Hermione_Inference", definition=workflow_graph, role=workflow_execution_role, ) branching_workflow.create() # Generates unique names for Pre-Processing Job and Training Job # Each job requires a unique name preprocessing_job_name = "Hermione-Preprocessing-{}".format( uuid.uuid1().hex ) inference_job_name = "Hermione-Inference-{}".format( uuid.uuid1().hex ) # Executes the workflow execution = branching_workflow.execute( inputs={ "PreprocessingJobName": preprocessing_job_name, "TransformJobName": inference_job_name } ) execution_output = execution.get_output(wait=False) execution.render_progress()Resultsimport pandas as pd pd.read_csv('s3://hermione-sagemaker/PREPROCESSING/OUTPUT/inference.csv.out')Data Processingimport findspark findspark.init() import pyspark from pyspark.sql import SparkSession EXE_MEMORY="2G" DRIVER_MEMORY="24G" spark = SparkSession.builder.appName("AWS").config("spark.executor.memory", EXE_MEMORY).config("spark.executor.cores", "4").config("spark.driver.memory", DRIVER_MEMORY).getOrCreate() df = spark.read.json('C:/Users/salon/Downloads/All_Amazon_Review.json') df1 = df.select('overall','reviewText') df1.show(2) df1.to_json(r'D:\Project\review.json')Giving ID as idx to each row as reviewerID is not uniquefrom pyspark.sql.window import Window as W from pyspark.sql import functions as F df = df1.withColumn("idx", F.monotonically_increasing_id()) windowSpec = W.orderBy("idx") df.withColumn("idx", F.row_number().over(windowSpec)).show(2)+-------+--------------------+---+ |overall| reviewText|idx| +-------+--------------------+---+ | 1.0|Alexa is not able...| 1| | 4.0|Alexa works great...| 2| +-------+--------------------+---+ only showing top 2 rowsRemoving Null Values in reviewTextdf=df.where(df.reviewText.isNotNull())Removing Punctuations For cleaning text, in this we removed punctuations along with trailing and leading spaces. Also lower cased all the alphabets.from pyspark.sql.functions import regexp_replace, trim, col, lower def removePunctuation(column): """Removes punctuation, changes to lower case, and strips leading and trailing spaces. Note: Only spaces, letters, and numbers should be retained. Other characters should should be eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after punctuation is removed. Args: column (Column): A Column containing a sentence. Returns: Column: A Column named 'sentence' with clean-up operations applied. """ return trim(lower(regexp_replace(column, '[^\sa-zA-Z0-9]', ''))).alias('reviewText') df=df.select("IDX", "overall", (removePunctuation(col('reviewText')))) df.show(5, False)+---+-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ |IDX|overall|reviewText [...]Data processingHere we tokenized text, removed all stop words given in "StopWordRemover"(list of words is given below). After this we lemmatized the text before stemming and atlast we removed the words of size less or equal than 3.from pyspark.sql import SparkSession from pyspark.sql.functions import udf, col, lower, regexp_replace from pyspark.ml.feature import Tokenizer, StopWordsRemover from nltk.stem.snowball import SnowballStemmer from pyspark.sql.types import * from nltk import WordNetLemmatizer import nltk nltk.download('wordnet') # Tokenize text tokenizer = Tokenizer(inputCol='reviewText', outputCol='words_token') df_words_token = tokenizer.transform(df).select('idx','overall','words_token') df_words_token.show(5) # Remove stop words remover = StopWordsRemover(inputCol='words_token', outputCol='words_clean') df_words_no_stopw = remover.transform(df_words_token).select('idx', 'overall','words_clean') df_words_no_stopw.show(5) #lemmatization lemm=WordNetLemmatizer() lemm_udf=udf(lambda tokens:[lemm.lemmatize(token) for token in tokens], ArrayType(StringType())) df_lemm = df_words_no_stopw.withColumn("lemmi", lemm_udf("words_clean")).select('IDX',"overall", 'lemmi') df_lemm.show(5) # Stem text stemmer = SnowballStemmer(language='english') stemmer_udf = udf(lambda tokens: [stemmer.stem(token) for token in tokens], ArrayType(StringType())) df_stemmed = df_lemm.withColumn("words_stemmed", stemmer_udf("lemmi")).select('IDX',"overall",'words_stemmed') df_stemmed.show(5) # Filter length word > 3 filter_length_udf = udf(lambda row: [x for x in row if len(x) > 3], ArrayType(StringType())) df_final_words = df_stemmed.withColumn('words', filter_length_udf(col('words_stemmed'))).select('IDX',"overall", 'words') df_final_words.show(5)[nltk_data] Downloading package wordnet to [nltk_data] C:\Users\salon\AppData\Roaming\nltk_data... [nltk_data] Unzipping corpora\wordnet.zip.Final DataframeHere is final dataframe after cleaning and it is showing just above 10 values.df_final_words.show(10, False)+---+-------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ |IDX|overall|words [...]List of Stop Wordsfrom pyspark.ml.feature import StopWordsRemover # Define a list of stop words or use default list remover = StopWordsRemover() stopwords = remover.getStopWords() # Display default list print(stopwords[:200]) from pyspark.ml.feature import CountVectorizer from pyspark.mllib.linalg import Vectors cv=CountVectorizer(inputCol="words", outputCol="features") model=cv.fit(df_final_words) result=model.transform(df_final_words) result.show(10, False)+---+-------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------[...]stoi%%time d = dstrs.stoi() %%time h = hstrs.str.isdecimal()stof%%time d = dstrs.stof() %%time h = hstrs.str.isdecimal()Joint plotssteels = error_analysis('matbench_steels','sigma_error') steels.plot_feat_distance_all(name='steels', n_neighbours = 200, n_feat = 200, scaling= 'n',ymax=1000, jointplot=True,xlog=True) dielectric = error_analysis('matbench_dielectric','n_error') dielectric.plot_feat_distance_all(name='dielectric', n_neighbours = 5, n_feat = 7, scaling= 'n',ymax=100, jointplot=True,xlog=True) expt_gap = error_analysis('matbench_expt_gap','E_g_error') expt_gap.plot_feat_distance_all(name='expt. gap', n_neighbours = 6000, n_feat = 500, scaling= 'n', jointplot=True,xlog=True) exf = error_analysis('matbench_jdft2d','E_error') exf.plot_feat_distance_all(name='exf. energy', n_neighbours = 5, n_feat = 500, scaling= 'n', jointplot=True,xlog=True) phonons = error_analysis('matbench_phonons','w_error') phonons.plot_feat_distance_all(name='phonons', n_neighbours = 5, n_feat = 50, scaling= 'n', jointplot=True, xlog=True) k = error_analysis('matbench_elastic','K_error',multi=['K_error','G_error']) k.plot_feat_distance_all(name='bulk mod.', n_neighbours = 5, n_feat = 150, scaling= 'n', jointplot=True,xlog=True)欢迎来到线性回归项目若项目中的题目有困难没完成也没关系,我们鼓励你带着问题提交项目,评审人会给予你诸多帮助。所有选做题都可以不做,不影响项目通过。如果你做了,那么项目评审会帮你批改,也会因为选做部分做错而判定为不通过。其中非代码题可以提交手写后扫描的 pdf 文件,或使用 Latex 在文档中直接回答。 1 矩阵运算 1.1 创建一个 4*4 的单位矩阵# 这个项目设计来帮你熟悉 python list 和线性代数 # 你不能调用任何NumPy以及相关的科学计算库来完成作业 # 本项目要求矩阵统一使用二维列表表示,如下: A = [[1,2,3], [2,3,3], [1,2,5]] B = [[1,2,3,5], [2,3,3,5], [1,2,5,1]] # 向量也用二维列表表示 C = [[1], [2], [3]] #TODO 创建一个 4*4 单位矩阵 I = [[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]]1.2 返回矩阵的行数和列数# TODO 返回矩阵的行数和列数 def shape(M): return len(M), len(M[0]) # 运行以下代码测试你的 shape 函数 %run -i -e test.py LinearRegressionTestCase.test_shape. ---------------------------------------------------------------------- Ran 1 test in 0.001s OK1.3 每个元素四舍五入到特定小数数位# TODO 每个元素四舍五入到特定小数数位 # 直接修改参数矩阵,无返回值 def matxRound(M, decPts=4): for row in range(len(M)): for col in range(len(M[row])): M[row][col] = round(M[row][col], decPts) # 运行以下代码测试你的 matxRound 函数 %run -i -e test.py LinearRegressionTestCase.test_matxRound. ---------------------------------------------------------------------- Ran 1 test in 0.010s OK1.4 计算矩阵的转置# TODO 计算矩阵的转置 def transpose(M): rows, cols = shape(M) new = [] for col in range(cols): new.append([]) for row in range(rows): new[col].append(0) for row in range(rows): for col in range(cols): new[col][row] = M[row][col] return new # Pythonic approach # return [list(col) for col in zip(*M)] # 运行以下代码测试你的 transpose 函数 %run -i -e test.py LinearRegressionTestCase.test_transpose. ---------------------------------------------------------------------- Ran 1 test in 0.030s OK1.5 计算矩阵乘法 AB# TODO 计算矩阵乘法 AB,如果无法相乘则raise ValueError def dot(a, b): return sum(i * j for i, j in zip(a, b)) def matxMultiply(A, B): n, m = shape(A) p, q = shape(B) if m != p: raise ValueError('dimension of matrices don\'t match') result = [] for row in range(n): result.append([]) for col in range(q): # ith row jth column = dot product of ith row of A and jth column of B # im not using [row][col] to index but rather just appending... # just because i can result[row].append(dot(A[row], [i[col] for i in B])) return result # 运行以下代码测试你的 matxMultiply 函数 %run -i -e test.py LinearRegressionTestCase.test_matxMultiply. ---------------------------------------------------------------------- Ran 1 test in 0.124s OK--- 2 Gaussian Jordan 消元法 2.1 构造增广矩阵$ A = \begin{bmatrix} a_{11} & a_{12} & ... & a_{1n}\\ a_{21} & a_{22} & ... & a_{2n}\\ a_{31} & a_{22} & ... & a_{3n}\\ ... & ... & ... & ...\\ a_{n1} & a_{n2} & ... & a_{nn}\\\end{bmatrix} , b = \begin{bmatrix} b_{1} \\ b_{2} \\ b_{3} \\ ... \\ b_{n} \\\end{bmatrix}$返回 $ Ab = \begin{bmatrix} a_{11} & a_{12} & ... & a_{1n} & b_{1}\\ a_{21} & a_{22} & ... & a_{2n} & b_{2}\\ a_{31} & a_{22} & ... & a_{3n} & b_{3}\\ ... & ... & ... & ...& ...\\ a_{n1} & a_{n2} & ... & a_{nn} & b_{n} \end{bmatrix}$# TODO 构造增广矩阵,假设A,b行数相同 def augmentMatrix(A, b): # assert n == shape(b)[0] # this is in the assumption! new = [A[row] + b[row] for row in range(len(A))] return new # 运行以下代码测试你的 augmentMatrix 函数 %run -i -e test.py LinearRegressionTestCase.test_augmentMatrix. ---------------------------------------------------------------------- Ran 1 test in 0.004s OK2.2 初等行变换- 交换两行- 把某行乘以一个非零常数- 把某行加上另一行的若干倍:# TODO r1 <---> r2 # 直接修改参数矩阵,无返回值 def swapRows(M, r1, r2): if r1 != r2: M[r1], M[r2] = M[r2], M[r1] # 运行以下代码测试你的 swapRows 函数 %run -i -e test.py LinearRegressionTestCase.test_swapRows ## TODO r1 <--- r1 * scale # scale为0是非法输入,要求 raise ValueError # 直接修改参数矩阵,无返回值 def scaleRow(M, r, scale): if scale == 0: raise ValueError('cannot scale a matrix by zero') for col in range(len(M[r])): M[r][col] *= scale # 运行以下代码测试你的 scaleRow 函数 %run -i -e test.py LinearRegressionTestCase.test_scaleRow # TODO r1 <--- r1 + r2*scale # 直接修改参数矩阵,无返回值 def addScaledRow(M, r1, r2, scale): if scale == 0: raise ValueError('cannot scale a matrix by zero') for col in range(len(M[r1])): M[r1][col] += scale * M[r2][col] # 运行以下代码测试你的 addScaledRow 函数 %run -i -e test.py LinearRegressionTestCase.test_addScaledRow. ---------------------------------------------------------------------- Ran 1 test in 0.002s OK2.3 Gaussian Jordan 消元法求解 Ax = b 2.3.1 算法步骤1 检查A,b是否行数相同步骤2 构造增广矩阵Ab步骤3 逐列转换Ab为化简行阶梯形矩阵 [中文维基链接](https://zh.wikipedia.org/wiki/%E9%98%B6%E6%A2%AF%E5%BD%A2%E7%9F%A9%E9%98%B5.E5.8C.96.E7.AE.80.E5.90.8E.E7.9A.84-.7Bzh-hans:.E8.A1.8C.3B_zh-hant:.E5.88.97.3B.7D-.E9.98.B6.E6.A2.AF.E5.BD.A2.E7.9F.A9.E9.98.B5) 对于Ab的每一列(最后一列除外) 当前列为列c 寻找列c中 对角线以及对角线以下所有元素(行 c~N)的绝对值的最大值 如果绝对值最大值为0 那么A为奇异矩阵,返回None (你可以在选做问题2.4中证明为什么这里A一定是奇异矩阵) 否则 使用第一个行变换,将绝对值最大值所在行交换到对角线元素所在行(行c) 使用第二个行变换,将列c的对角线元素缩放为1 多次使用第三个行变换,将列c的其他元素消为0 步骤4 返回Ab的最后一列**注:** 我们并没有按照常规方法先把矩阵转化为行阶梯形矩阵,再转换为化简行阶梯形矩阵,而是一步到位。如果你熟悉常规方法的话,可以思考一下两者的等价性。 2.3.2 算法推演为了充分了解Gaussian Jordan消元法的计算流程,请根据Gaussian Jordan消元法,分别手动推演矩阵A为***可逆矩阵***,矩阵A为***奇异矩阵***两种情况。 推演示例 $Ab = \begin{bmatrix} -7 & 5 & -1 & 1\\ 1 & -3 & -8 & 1\\ -10 & -2 & 9 & 1\end{bmatrix}$$ --> $$\begin{bmatrix} 1 & \frac{1}{5} & -\frac{9}{10} & -\frac{1}{10}\\ 0 & -\frac{16}{5} & -\frac{71}{10} & \frac{11}{10}\\ 0 & \frac{32}{5} & -\frac{73}{10} & \frac{3}{10}\end{bmatrix}$$ --> $$\begin{bmatrix} 1 & 0 & -\frac{43}{64} & -\frac{7}{64}\\ 0 & 1 & -\frac{73}{64} & \frac{3}{64}\\ 0 & 0 & -\frac{43}{4} & \frac{5}{4}\end{bmatrix}$$ --> $$\begin{bmatrix} 1 & 0 & 0 & -\frac{3}{16}\\ 0 & 1 & 0 & -\frac{59}{688}\\ 0 & 0 & 1 & -\frac{5}{43}\end{bmatrix}$ 推演有以下要求:1. 展示每一列的消元结果, 比如3*3的矩阵, 需要写三步2. 用分数来表示3. 分数不能再约分4. 我们已经给出了latex的语法,你只要把零改成你要的数字(或分数)即可5. 检查你的答案, 可以用[这个](http://www.math.odu.edu/~bogacki/cgi-bin/lat.cgi?c=sys), 或者后面通过单元测试后的`gj_Solve`_你可以用python的 [fractions](https://docs.python.org/2/library/fractions.html) 模块辅助你的约分_ 以下开始你的尝试吧!# 不要修改这里! from helper import * A = generateMatrix(3,seed,singular=False) b = np.ones(shape=(3,1),dtype=int) # it doesn't matter Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了 printInMatrixFormat(Ab,padding=3,truncating=0)-10, 9, 5 || 1 -4, 3, -4 || 1 -2, 3, 5 || 1请按照算法的步骤3,逐步推演***可逆矩阵***的变换。在下面列出每一次循环体执行之后的增广矩阵。要求:1. 做分数运算2. 使用`\frac{n}{m}`来渲染分数,如下: - $\frac{n}{m}$ - $-\frac{a}{b}$$ Ab = \begin{bmatrix} -10 & 9 & 5 & 1 \\ -4 & 3 & -4 & 1 \\ -2 & 3 & 5 & 1 \end{bmatrix}$$ --> \begin{bmatrix} 1 & -\frac{9}{10} & -\frac{5}{10} & -\frac{1}{10} \\ 0 & -\frac{3}{5} & -6 & \frac{3}{5} \\ 0 & \frac{6}{5} & 4 & \frac{4}{5} \end{bmatrix}$ $ --> \begin{bmatrix} 1 & 0 & \frac{5}{2} & \frac{1}{2} \\ 0 & 1 & \frac{10}{3} & \frac{2}{3} \\ 0 & 0 & -4 & 1 \end{bmatrix}$ $ --> \begin{bmatrix} 1 & 0 & 0 & \frac{9}{8} \\ 0 & 1 & 0 & \frac{3}{2} \\ 0 & 0 & 1 & -\frac{1}{4} \end{bmatrix}$# 不要修改这里! A = generateMatrix(3,seed,singular=True) b = np.ones(shape=(3,1),dtype=int) Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了 printInMatrixFormat(Ab,padding=3,truncating=0)6, 1, 9 || 1 0, 1, 1 || 1 -6, 1, -7 || 1请按照算法的步骤3,逐步推演***奇异矩阵***的变换。在下面列出每一次循环体执行之后的增广矩阵。要求:1. 做分数运算2. 使用`\frac{n}{m}`来渲染分数,如下: - $\frac{n}{m}$ - $-\frac{a}{b}$$ Ab = \begin{bmatrix} 6 & 1 & 9 & 1 \\ 0 & 1 & 1 & 1 \\ -6 & 1 & -7 & 1 \end{bmatrix}$$ --> \begin{bmatrix} 1 & \frac{1}{6} & \frac{3}{2} & \frac{1}{6} \\ 0 & 1 & 1 & 1 \\ 0 & 2 & 2 & 2 \end{bmatrix}$ $ --> \begin{bmatrix} 1 & 0 & \frac{4}{3} & 0 \\ 0 & 1 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{bmatrix}$ 2.3.3 实现 Gaussian Jordan 消元法# TODO 实现 Gaussain Jordan 方法求解 Ax = b """ Gaussian Jordan 方法求解 Ax = b. 参数 A: 方阵 b: 列向量 decPts: 四舍五入位数,默认为4 epsilon: 判读是否为0的阈值,默认 1.0e-16 返回列向量 x 使得 Ax = b 返回None,如果 A,b 高度不同 返回None,如果 A 为奇异矩阵 """ from pprint import pprint def gj_Solve(A, b, decPts=4, epsilon=1.0e-16): size = len(A) if size != len(b): return None C = augmentMatrix(A, b) for c in range(size): absCol = [abs(row[c]) for row in C[c:]] maxNum = max(absCol) if maxNum < epsilon: # singular matrix return None swapRows(C, absCol.index(maxNum) + c, c) scaleRow(C, c, 1 / C[c][c]) for r in range(size): if r == c or abs(C[r][c]) < epsilon: continue addScaledRow(C, r, c, -C[r][c]) solution = [[row[-1]] for row in C] return solution # 运行以下代码测试你的 gj_Solve 函数 %run -i -e test.py LinearRegressionTestCase.test_gj_Solve. ---------------------------------------------------------------------- Ran 1 test in 4.122s OK(选做) 2.4 算法正确判断了奇异矩阵:在算法的步骤3 中,如果发现某一列对角线和对角线以下所有元素都为0,那么则断定这个矩阵为奇异矩阵。我们用正式的语言描述这个命题,并证明为真。证明下面的命题:**如果方阵 A 可以被分为4个部分: ** $ A = \begin{bmatrix} I & X \\ Z & Y \\\end{bmatrix} , \text{其中 I 为单位矩阵,Z 为全0矩阵,Y 的第一列全0}$,**那么A为奇异矩阵。**提示:从多种角度都可以完成证明- 考虑矩阵 Y 和 矩阵 A 的秩- 考虑矩阵 Y 和 矩阵 A 的行列式- 考虑矩阵 A 的某一列是其他列的线性组合 **TODO** 证明:由于 $A,\ I$ 为方阵,则 $Y$ 为方阵,易知,$X$ 的行数与 $Z$ 的列数一致。 设 $n$ 为 $I$ 的大小,并设 $x_1,\ x_2,\dots,\ x_n$ 为矩阵 $X$ 的第一列的元素;设 $A_i$ 为矩阵 $A$ 的第 $i$ 列。由于 $Y$ 的第一列均为 0,那么 $A_{n+1}$($Y$ 所在的这一列)可以被表示为$$ A_{n+1} = x_1 A_1 + x_2 A_2 + \cdots + x_n A_n = \sum_{i=1}^n x_i A_i $$因此 $A_{n+1}$ 这一列并不是线性独立的(是 $A$ 前 $n$ 列的线性组合),又因 $A$ 为方阵,则得出 $A$ 不满秩,即 $A$ 是奇异矩阵,证毕。 3 线性回归 3.1 随机生成样本点# 不要修改这里! # 运行一次就够了! from helper import * from matplotlib import pyplot as plt %matplotlib inline X,Y = generatePoints(seed,num=100) ## 可视化 plt.xlim((-5,5)) plt.xlabel('x',fontsize=18) plt.ylabel('y',fontsize=18) plt.scatter(X,Y,c='b') plt.show()3.2 拟合一条直线 3.2.1 猜测一条直线#TODO 请选择最适合的直线 y = mx + b m1 = 2/5 b1 = 13.8 # 不要修改这里! plt.xlim((-5,5)) x_vals = plt.axes().get_xlim() y_vals = [m1*x+b1 for x in x_vals] plt.plot(x_vals, y_vals, '-', color='r') plt.xlabel('x',fontsize=18) plt.ylabel('y',fontsize=18) plt.scatter(X,Y,c='b') plt.show()3.2.2 计算平均平方误差 (MSE) 我们要编程计算所选直线的平均平方误差(MSE), 即数据集中每个点到直线的Y方向距离的平方的平均数,表达式如下:$$MSE = \frac{1}{n}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}$$# TODO 实现以下函数并输出所选直线的MSE def calculateMSE(X,Y,m,b): error = 0 for x, y in zip(X, Y): diff = (y - m * x - b) ** 2 error += diff error /= len(X) return error # Pythonic approach # return sum([(y - m * x - b) ** 2 for x, y in zip(X, Y)]) / len(Y) print(calculateMSE(X,Y,m1,b1))1.4893792890684213.2.3 调整参数 $m, b$ 来获得最小的平方平均误差你可以调整3.2.1中的参数 $m1,b1$ 让蓝点均匀覆盖在红线周围,然后微调 $m1, b1$ 让MSE最小。 3.3 (选做) 找到参数 $m, b$ 使得平方平均误差最小**这一部分需要简单的微积分知识( $ (x^2)' = 2x $ )。因为这是一个线性代数项目,所以设为选做。**刚刚我们手动调节参数,尝试找到最小的平方平均误差。下面我们要精确得求解 $m, b$ 使得平方平均误差最小。定义目标函数 $E$ 为$$E = \frac{1}{2}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}$$因为 $E = \frac{n}{2}MSE$, 所以 $E$ 取到最小值时,$MSE$ 也取到最小值。要找到 $E$ 的最小值,即要找到 $m, b$ 使得 $E$ 相对于 $m$, $E$ 相对于 $b$ 的偏导数等于0. 因此我们要解下面的方程组。$$\begin{cases}\displaystyle\frac{\partial E}{\partial m} =0 \\\\\displaystyle\frac{\partial E}{\partial b} =0 \\\end{cases}$$ 3.3.1 计算目标函数相对于参数的导数首先我们计算两个式子左边的值证明/计算:$$\frac{\partial E}{\partial m} = \sum_{i=1}^{n}{-x_i(y_i - mx_i - b)}$$$$\frac{\partial E}{\partial b} = \sum_{i=1}^{n}{-(y_i - mx_i - b)}$$ **TODO** 证明:设 $u_i = (y_i - mx_i - b)$,我们可以重写目标函数 $E$ 为$$ E = \frac{1}{2} \sum_{i=1}^n u_i^2 $$设 $E_i = \frac{1}{2} u_i^2$,我们可以再重写目标函数 $E$ 为(trivially)$$ E = \sum_{i=1}^n E_i$$则,我们有$$ \frac{\partial E}{\partial m} = \sum_{i=1}^n \frac{\partial E_i}{\partial m} \quad\textrm{和}\quad \frac{\partial E}{\partial b} = \sum_{i=1}^n \frac{\partial E_i}{\partial b} $$我们先求 $E_i$ 关于 $m$ 的偏导数,即 $\frac{\partial E_i}{\partial m}$,根据链式法则,我们有,对于任意的 $i \in \{1,2,\dots,n\}$$$ \frac{\partial E_i}{\partial m} = \frac{\mathrm{d} E_i}{\mathrm{d} u_i} \cdot \frac{\partial u_i}{\partial m} $$在 $u_i$ 中,只有一项 $-mx_i$ 是和 $m$ 相关的,因此 $\frac{\partial u_i}{\partial m} = -x_i$,即$$ \frac{\partial E_i}{\partial m} = u_i\cdot (-x_i) = -x_i(y_i - mx_i - b) $$即$$ \frac{\partial E}{\partial m} = \sum_{i=1}^n -x_i(y_i - mx_i - b) $$同理,我们可得$$ \frac{\partial E_i}{\partial b} = \frac{\partial E_i}{\partial u_i}\cdot\frac{\partial u_i}{\partial b} = u_i\cdot (-1) = -(y_i - mx_i - b) $$即$$ \frac{\partial E}{\partial b} = \sum_{i=1}^n -(y_i - mx_i - b) $$证毕。 3.3.2 实例推演现在我们有了一个二元二次方程组$$\begin{cases}\displaystyle\sum_{i=1}^{n}{-x_i(y_i - mx_i - b)} =0 \\\displaystyle\sum_{i=1}^{n}{-(y_i - mx_i - b)} =0 \\\end{cases}$$为了加强理解,我们用一个实际例子演练。我们要用三个点 $(1,1),\ (2,2),\ (3,2)$ 来拟合一条直线 $y = mx + b$, 请写出- 目标函数 $E$, - 二元二次方程组,- 并求解最优参数 $m, b$ **TODO** 写出目标函数,方程组和最优参数1. 目标函数 $E$\begin{align} E &= \frac{1}{2}\sum_{i=1}^n(y_i - mx_i - b)^2 \\ &= \frac{1}{2}\left((1 - m - b)^2 + (2 - 2m - b)^2 + (3 - 2m - b)^2\right) \\ &= \frac{1}{2}\left(9m^2 - 22m + 10mb - 12b + 3b^2 + 14\right) \\ &= \frac{9}{2}m^2 - 11m + 5mb - 6b + \frac{3}{2}b^2 + 7\end{align}2. 二元二次方程组$$\begin{cases} \displaystyle \sum_{i=1}^n -x_i(y_i - mx_i - b) = 0 \\ \displaystyle \sum_{i=1}^n -(y_i - mx_i - b) = 0 \\\end{cases} \; \Rightarrow \;\begin{cases} \displaystyle -(1 - m - b) - 2(2 - 2m - b) - 3(2 - 3m - b) = 0 \\ \displaystyle -(1 - m - b) - (2 - 2m - b) - (2 - 3m - b) = 0 \\\end{cases} \; \Rightarrow \;\begin{cases} \displaystyle -11 + 14m + 6b = 0 \\ \displaystyle -5 + 8m + 3b = 0 \\\end{cases}$$3. 求解最优参数 $m,\ b$\begin{cases} -11 + 14m + 6b = 0 \\ -5 + 8m + 3b = 0 \\\end{cases}$$ (1) + -2 \times (2) $$$$ \Downarrow $$\begin{cases} \displaystyle -1 - 2m = 0 \quad \\ \displaystyle -5 + 8m + 3b = 0 \quad \\\end{cases}$$ \therefore\ m = -\frac{1}{2} $$$$ \therefore\ b = \frac{11 - 14m}{6} = \frac{18}{6} = 3 $$$$\therefore\begin{cases} \displaystyle m = -\frac{1}{2}\\ \displaystyle b = 3\\\end{cases}$$ 3.3.3 将方程组写成矩阵形式我们的二元二次方程组可以用更简洁的矩阵形式表达,将方程组写成矩阵形式更有利于我们使用 Gaussian Jordan 消元法求解。请证明 $$\begin{bmatrix} \frac{\partial E}{\partial m} \\ \frac{\partial E}{\partial b} \end{bmatrix} = X^TXh - X^TY$$其中向量 $Y$, 矩阵 $X$ 和 向量 $h$ 分别为 :$$Y = \begin{bmatrix} y_1 \\ y_2 \\ ... \\ y_n\end{bmatrix},X = \begin{bmatrix} x_1 & 1 \\ x_2 & 1\\ ... & ...\\ x_n & 1 \\\end{bmatrix},h = \begin{bmatrix} m \\ b \\\end{bmatrix}$$ **TODO** 证明:首先,$$ X^\top X = \begin{bmatrix} \displaystyle \sum_{i=1}^n x_i^2 & \displaystyle \sum_{i=1}^n x_i \\ \displaystyle \sum_{i=1}^n x_i & \displaystyle \sum_{i=1}^n 1\end{bmatrix}$$因此,$$ X^\top Xh = \begin{bmatrix} \displaystyle \sum_{i=1}^n mx_i^2 + \sum_{i=1}^n bx_i \\ \displaystyle \sum_{i=1}^n mx_i + \sum_{i=1}^n b\end{bmatrix}$$另外,$$ X^\top Y = \begin{bmatrix} \displaystyle \sum_{i=1}^n x_i y_i \\ \displaystyle \sum_{i=1}^n y_i\end{bmatrix}$$因此,$$ X^\top Xh - X^\top Y = \begin{bmatrix} \displaystyle \sum_{i=1}^n mx_i^2 + \sum_{i=1}^n bx_i - \sum_{i=1}^n x_i y_i \\ \displaystyle \sum_{i=1}^n mx_i + \sum_{i=1}^n b - \sum_{i=1}^n y_i\end{bmatrix} = \begin{bmatrix} \displaystyle \sum_{i=1}^n (mx_i^2 + bx_i - x_i y_i) \\ \displaystyle \sum_{i=1}^n (mx_i + b - y_i)\end{bmatrix} = \begin{bmatrix} \displaystyle \sum_{i=1}^n -x_i(y_i - mx_i - b) \\ \displaystyle \sum_{i=1}^n -(y_i - mx_i - b)\end{bmatrix} = \begin{bmatrix} \displaystyle \frac{\partial E}{\partial m} \\ \displaystyle \frac{\partial E}{\partial b} \end{bmatrix}$$证毕。 至此我们知道,通过求解方程 $X^TXh = X^TY$ 来找到最优参数。这个方程十分重要,他有一个名字叫做 **Normal Equation**,也有直观的几何意义。你可以在 [子空间投影](http://open.163.com/movie/2010/11/J/U/M6V0BQC4M_M6V2AJLJU.html) 和 [投影矩阵与最小二乘](http://open.163.com/movie/2010/11/P/U/M6V0BQC4M_M6V2AOJPU.html) 看到更多关于这个方程的内容。 3.4 求解 $X^TXh = X^TY$ 在3.3 中,我们知道线性回归问题等价于求解 $X^TXh = X^TY$ (如果你选择不做3.3,就勇敢的相信吧,哈哈)# TODO 实现线性回归 ''' 参数:X, Y 存储着一一对应的横坐标与纵坐标的两个一维数组 返回:m,b 浮点数 ''' def linearRegression(X,Y): X = [[i, 1] for i in X] Y = [[i] for i in Y] A = matxMultiply(transpose(X), X) b = matxMultiply(transpose(X), Y) x = gj_Solve(A, b) return x[0][0], x[1][0] m2,b2 = linearRegression(X,Y) assert isinstance(m2,float),"m is not a float" assert isinstance(b2,float),"b is not a float" print(m2,b2)0.30111301039028665 13.867374963487162你求得的回归结果是什么?请使用运行以下代码将它画出来。# 请不要修改下面的代码 x1,x2 = -5,5 y1,y2 = x1*m2+b2, x2*m2+b2 plt.xlim((-5,5)) plt.xlabel('x',fontsize=18) plt.ylabel('y',fontsize=18) plt.scatter(X,Y,c='b') plt.plot((x1,x2),(y1,y2),'r') plt.title('y = {m:.4f}x + {b:.4f}'.format(m=m2,b=b2)) plt.show()你求得的回归结果对当前数据集的MSE是多少?print(calculateMSE(X,Y,m2,b2))1.402604878826932Predicting Customer Churn*************** HistoryChurn is a measurement of the percentage of accounts that cancel or choose not to renew their subscriptions. A high churn rate can negatively impact Monthly Recurring Revenue (MRR) and can also indicate dissatisfaction with a product or service.Churn is the measure of how many customers stop using a product. This can be measured based on actual usage or failure to renew (when the product is sold using a subscription model). Often evaluated for a specific period of time, there can be a monthly, quarterly, or annual churn rate. How is Churn Rate Calculated? In its most simplistic form, the churn rate is the percentage of total customers that stop using/paying over a period of time. So, if there were 7000 total customers in March and 1800 of them stopped being customers, the monthly churn rate would be 10%.**********************Annual Churn Loss** = (Average Monthly Bill * Total Customers) * Churn Rate **Annual Churn Loss** = (MRR) * Churn Rate# Calculating Turnover rate improvement a = (1500*7000)*0.27 b = (1500*7000)*0.26 print (f"Inital Churn Rate Loss = ${int(a)}") print (f"Improved Churn Rate Loss = ${int(b)}") print (f"Difference = ${int(a-b)}")Inital Churn Rate Loss = $2835000 Improved Churn Rate Loss = $2730000 Difference = $105000Importing Librariesimport re # to handle datasets import pandas as pd import numpy as np # for visualization import seaborn as sns import matplotlib.pyplot as plt # to divide train and test set from sklearn.model_selection import train_test_split # feature scaling from sklearn.preprocessing import StandardScaler # to build the models from sklearn.linear_model import LogisticRegression # to evaluate the models from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score # to persist the model and the scaler import joblib # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None)Prepare the data set# load the data - it is available open source and online data = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv') # cast target variable to int data['Churn'] = data['Churn'].map({'Yes': 1, 'No': 0}) # Cast TotalCharges to float data['TotalCharges'] = pd.to_numeric(data['TotalCharges'],errors = 'coerce') # drop unnecessary variables data.drop(labels=['customerID'], axis=1, inplace=True) # display data data.head() data.shapeData Explorationdata.Churn.value_counts() plt.figure(figsize=(12,8)) turnover = data.Churn.value_counts(1) sns.barplot(y=turnover.values, x=turnover.index, alpha=0.6) plt.title('Distribution of Customer Churn') plt.ylabel('Count', fontsize=16);Find numerical and categorical variablesvars_num = ['SeniorCitizen', 'tenure', 'MonthlyCharges', 'TotalCharges'] vars_cat = ['gender', 'Partner', 'Dependents', 'PhoneService', 'PaperlessBilling', 'Contract', 'DeviceProtection', 'InternetService', 'MultipleLines', 'OnlineBackup', 'OnlineSecurity', 'PaymentMethod', 'StreamingMovies', 'StreamingTV', 'TechSupport'] print('Number of numerical variables: {}'.format(len(vars_num))) print('Number of categorical variables: {}'.format(len(vars_cat)))Number of numerical variables: 4 Number of categorical variables: 15Find missing values in variables# first in numerical variables data[vars_num].isnull().mean() # now in categorical variables data[vars_cat].isnull().mean()Determine cardinality of categorical variablesdata[vars_cat].nunique(dropna=False).sort_values(ascending=True)Determine the distribution of numerical variablesdata[vars_num].hist(bins=30, figsize=(10,10)) plt.show()Separate data into train and testX_train, X_test, y_train, y_test = train_test_split( data.drop('Churn', axis=1), # predictors data['Churn'], # target test_size=0.2, # percentage of obs in test set random_state=0) # seed to ensure reproducibility X_train.shape, X_test.shapeFeature Engineering Fill in Missing data in numerical variables:- Add a binary missing indicator- Fill NA in original variable with 0# add missing indicator X_train['TotalCharges_NA'] = np.where(X_train['TotalCharges'].isnull(), 1, 0) X_test[['TotalCharges_NA']] = np.where(X_test['TotalCharges'].isnull(), 1, 0) X_train['TotalCharges'].fillna(0, inplace=True) X_test['TotalCharges'].fillna(0, inplace=True)Perform one hot encoding of categorical variables into k-1 binary variables- k-1, means that if the variable contains 9 different categories, we create 8 different binary variables- Remember to drop the original categorical variable (the one with the strings) after the encodingfor var in vars_cat: # to create the binary variables, we use get_dummies from pandas X_train = pd.concat([X_train, pd.get_dummies(X_train[var], prefix=var, drop_first=True)], axis=1) X_test = pd.concat([X_test, pd.get_dummies(X_test[var], prefix=var, drop_first=True)], axis=1) X_train.drop(labels=vars_cat, axis=1, inplace=True) X_test.drop(labels=vars_cat, axis=1, inplace=True)X_train.shape, X_test.shape X_train.head() X_test.head()Scale the variables- Use the standard scaler from Scikit-learnvariables = [c for c in X_train.columns] # create scaler scaler = StandardScaler() # fit the scaler to the train set scaler.fit(X_train[variables]) # transform the train and test set X_train = scaler.transform(X_train[variables]) X_test = scaler.transform(X_test[variables])Train the Logistic Regression model- Set the regularization parameter to 0.0005- Set the seed to 0# set up the model # remember to set the random_state / seed model = LogisticRegression(C=0.0005, random_state=0) # train the model model.fit(X_train, y_train)Make predictions and evaluate model performance **Important**: - To determine the accuracy, you need the outcome 0, 1, referring to churn or not-churn. - But to determine the roc-auc you need the probability of churn.***********Determine**:- roc-auc (probability)- accuracy# make predictions for test set class_ = model.predict(X_train) pred = model.predict_proba(X_train)[:,1] # determine mse and rmse print('train roc-auc: {}'.format(roc_auc_score(y_train, pred))) print('train accuracy: {}'.format(accuracy_score(y_train, class_))) print() # make predictions for test set class_ = model.predict(X_test) pred = model.predict_proba(X_test)[:,1] # determine mse and rmse print('test roc-auc: {}'.format(roc_auc_score(y_test, pred))) print('test accuracy: {}'.format(accuracy_score(y_test, class_))) print() # Confusion Matrix for Random Forest confusion_matrix(y_test, model.predict(X_test))Simple baseline to compare spaCy document classification models againstfrom __future__ import division from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.feature_selection import chi2, SelectKBest from sklearn.model_selection import GridSearchCV, KFold, cross_val_score, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import binarize from sklearn import metrics from itertools import islice from pathlib import Path import pandas as pd import re import string DATA_DIR = Path('../../data/wiki10') TEXT_DIR = DATA_DIR / 'text' LABELS_PATH = DATA_DIR / 'clf0-singlelabel.csv'Load datadf = pd.read_csv(CLF_LABELS_PATH) y = df.tag texts = [TEXT_DIR.joinpath(id).read_text() for id in df.id] def tokenizer(text): return re.sub(f'([{string.punctuation}])', r' \1 ', text.lower()).split()Creating sparse feature matrix and split into test/train sets (also split input text for easy reference/model debugging)cvec = CountVectorizer(tokenizer=tokenizer, min_df=3, stop_words='english') X = cvec.fit_transform(texts) X_train, X_test, y_train, y_test, text_train, text_test = train_test_split(X, y, texts, test_size = 0.3, random_state = 0)Define simple training pipeline, using chi-squared feature selectionpipeline = Pipeline([ ('kbest_feat', SelectKBest(chi2, k=2500)), ('classifier', MultinomialNB()) ]) pipeline.fit(X_train, y_train) labels = pipeline.classes_ pred_y_test = pipeline.predict(X_test) print(metrics.classification_report(y_test, pred_y_test)) def confusion_df(y_true, y_pred, labels): confusion_df = pd.DataFrame(metrics.confusion_matrix(y_true, y_pred, labels=labels), columns=labels, index=labels) confusion_df.columns.name = 'predicted' confusion_df.index.name = 'actual' return confusion_df confusion_df(y_test, pred_y_test, labels)Cross-validating result to check variance across multiple train foldscross_val_score(pipeline, X, y, cv=5, scoring='f1_weighted')Investigating a few incorrect predictionsdf_test = pd.DataFrame({'text': text_test, 'y_true': y_test, 'y_pred': pred_y_test}) df_test.head() software_fn = df_test.loc[(df_test.y_pred != df_test.y_true) & (df_test.y_true == 'software')] software_fn for _, row in software_fn.iterrows(): print(row['y_pred']) print(row['text'][:1000], end='\n\n')politics Google Street View is a feature of Google Maps and Google Earth that provides for many streets in the world 360° horizontal and 290° vertical panoramic views from a row of positions along the street (one in every 10 or 20 metres, or so), from a height of about two metres. It was launched on May 25, 2007, and is gradually expanded to include more cities, and in these cities more streets, and also some rural areas. These photographs are currently available for countries including the United States, the United Kingdom, the Netherlands, France, Italy, Spain, Australia, New Zealand and Japan. Coverage is shown by dragging "pegman" from its position, on a map of any scale. Google Street View displays photos taken from a fleet of Chevrolet Cobalts in United States, Opel Astras in Europe and Australia, Vauxhall Astras in the United Kingdom and Toyota Prius cars in Japan. Pedestrian areas, narrow streets and park alleys that cannot be accessed by car are not always covered. However, so[...]This notebook uses sensitivity analysis results to produce *global* stability plots i.e., violin plots in the main paper.import os import copy import re import numpy as np from epimodel import preprocess_data from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.gridspec as gridspec from matplotlib.font_manager import FontProperties from matplotlib.ticker import PercentFormatter import matplotlib.pyplot as plt import seaborn as sns sns.set_style('ticks') %matplotlib inline data = preprocess_data('../merged_data/data_final_nov.csv', last_day='2020-05-30', smoothing=1) data.mask_reopenings(print_out = False) results_base_dir = "../server_final_nov2/" def tag_fname_to_label(tag, fname): if tag == "cases_threshold": r = re.search(r"(cases_t)(\d*)", fname) return r.groups()[1] if tag == "deaths_threshold": r = re.search(r"(deaths_t)(\d*)", fname) return r.groups()[1] if tag == "scalings": if "simple" in fname: return "Random Constant Scaling" else: return "Time-Varying Correction" if tag == "npi_leaveout": npi_names = data.CMs r = re.search(r"(\d*)", fname)[0] label = f"{npi_names[int(r[0])]}" for npi in r[1:]: label += f",\n{npi_names[int(npi)]}" return label if tag == "growth_noise": npi_names = data.CMs r = re.search(r"(growth_noise)-(\d*.\d*)", fname) return r.groups()[1] if tag == "iceswe": return "Iceland and Sweden Excluded" if tag == "NPI_prior": if "icl" in fname: return "Flaxman et. al." if "half_normal" in fname: return "$\\alpha_i \sim$Half Normal$(0.2^2)$" if "normal" in fname and "0.2" in fname: return "$\\alpha_i \sim$Normal$(0, 0.2^2)$" if "normal" in fname and "10" in fname: return "$\\alpha_i \sim$Normal$(0, 10^2)$" if tag == "oxcgrt": npi_names = ['Mask Wearing', 'Travel Screen/Quarantine', 'Travel Bans', 'Public Transport Limited', 'Internal Movement Limited', 'Public Information Campaigns', 'Symptomatic Testing'] r = re.search(r"(\d*)", fname)[0] label = f"{npi_names[int(r[0])]}" for npi in r[1:]: label += f",\n{npi_names[int(npi)]}" return label if tag == "R_prior": r = re.search(r"(R_prior-)(\d*.\d*)", fname) val = r.groups()[1] return val if tag == "alpha_noise_Scale": r = re.search(r"(alpha_noise-)(\d*.\d*)", fname) val = r.groups()[1] return val if tag == "region_holdout": return fname[:2] if tag == "structural": if "default" in fname: return "Fixed Effects" if "additive" in fname: return "Additive" if "cases" in fname: return "Only Case Data" if "deaths" in fname: return "Only Death Data" if "noisy" in fname: return "Noisy-R (Fixed Effects)" if "discrete_renewal_fixed_gi" in fname: return "Discrete Renewal$^*$" if tag == "alpha_noise_scale": an = re.search(r"(alpha_noise-)(\d*.\d*)", fname).groups()[1] print(an) return an return f"{tag} - {fname}" def load_tagged_traces(result_base_dir, tag, extension='-cs.txt'): path = os.path.join(results_base_dir, tag) all_traces = [] for filedir, _, files in os.walk(path): for f in files: if extension in f and 'CasesDelay' not in f and 'DeathsDeath' not in f and '_GI_' not in f: try: trace = np.loadtxt(os.path.join(filedir, f)) all_traces.append([trace, tag_fname_to_label(tag, f)]) except Exception as e: print(e) return all_traces def search_tagged_traces(result_base_dir, tag, search_key, extension='-cs.txt'): path = os.path.join(results_base_dir, tag) for filedir, _, files in os.walk(path): for f in files: if extension in f and search_key in f: trace = np.loadtxt(os.path.join(filedir, f)) label = tag_fname_to_label(tag, f) return (trace, label) print('No experiment found') return None # notes - ignore additive model categories = ['npi_leaveout', 'cases_threshold', 'deaths_threshold', 'region_holdout', 'oxcgrt', 'epiparam_prior', 'R_prior', 'npi_prior', 'structural_mean', 'scaling', 'alpha_noise_scale'] derived_features = [ ("Gatherings limited to\n1000 people or less", [0]), ("Gatherings limited to\n100 people or less", [1, 0]), ("Gatherings limited to\n10 people or less", [2, 1, 0]), ("Some businesses\nclosed", [3]), ("Most nonessential\nbusinesses closed", [4, 3]), ("Schools and universities\nclosed", [5, 6]), ("Stay-at-home order\n(with exemptions)", [7]), ] fp2 = FontProperties(fname=r"../../fonts/Font Awesome 5 Free-Solid-900.otf") cols = sns.cubehelix_palette(3, start=0.2, light=0.6, dark=0.1, rot=0.2) sns.palplot(cols) cm_plot_style = [ ("\uf0c0", cols[0]), # ppl ("\uf0c0", cols[1]), # ppl ("\uf0c0", cols[2]), # ppl ("\uf07a", cols[0]), # shop 1 ("\uf07a", cols[2]), # shop2 ("\uf549", "black"), # school ("\uf19d", "black"), # university ("\uf965", "black") # home ] def combine_trace(trace, derived_features): nS, _ = trace.shape nCMs = len(derived_features) derived_samples = np.zeros((nS, nCMs)) for f_i, (f, prodrows) in enumerate(derived_features): samples = np.ones(nS) for r in prodrows: samples = samples * trace[:, r] derived_samples[:, f_i] = samples res = copy.deepcopy(derived_samples) res = 100*(1-res) return res def bucket_npis_medians(l, u, medians): nCMs = len(derived_features) # low, medium, high bucket_array = np.zeros((nCMs, 3), dtype=np.float64) nT, nCM = medians.shape for t in range(nT): for i in range(nCMs): median = medians[t, i] if median > u: bucket_array[i, 2] += 1 elif median > l: bucket_array[i, 1] += 1 elif not np.isnan(median): # just neglect the nans bucket_array[i, 0] += 1 return bucket_arraynumericsloaded_dict = {} nCMs = len(derived_features) for c_i, c in enumerate(categories): ts = load_tagged_traces(results_base_dir, c) rts, labels = zip(*ts) if c == 'npi_leaveout': for l_i, label in enumerate(labels): for cm_i, cm in enumerate(data.CMs): if cm in label and '<' not in cm: rts[l_i][:, cm_i] = np.nan elif cm == label: rts[l_i][:, cm_i] = np.nan if c == 'oxcgrt': for l_i, label in enumerate(labels): if label in ['Symptomatic Testing', 'Mask Wearing']: temp = copy.deepcopy(rts[l_i]) rts[l_i][:, :8] = temp[:, 1:9] if c == 'scalings': rts = [np.exp(-t) for t in rts] medians = np.zeros((0, nCMs)) for t in rts: comb_t = np.median(combine_trace(t, derived_features), axis=0).reshape((1, nCMs)) medians = np.append(comb_t, medians, axis=0) loaded_dict[c] = mediansviolin plotsimport pandas as pd def categories_to_df(cats): xs = [] ys = [] for c in cats: dt = loaded_dict[c] nT, _ = dt.shape for t in range(nT): for cm in range(nCMs): if not np.isnan(dt[t, cm]): xs.append(dt[t, cm]) ys.append(derived_features[cm][0]) df = pd.DataFrame.from_dict({'npi': ys, 'med': xs}) return df def violin_plot(l, u, data): plt.plot([0, 0], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([l, l], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([u, u], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) sns.stripplot(x="med", y="npi", data=data, color=[0.37647059, 0.37647059, 0.37647059, 1.], order=[l for l, _ in derived_features], size=3.5, zorder=-4, jitter=False) sns.violinplot(x="med", y="npi", data=data, scale='width', color=cols[0], saturation=1, order=[l for l, _ in derived_features], cut=0, inner='box'); x_min = -10 x_max = 60 plt.xlim([x_min, x_max]) xtick_vals = [0, 17.5, 35, 52.5] xtick_str = [f"{x:.1f}%" for x in xtick_vals] plt.ylim([6.5, -0.5]) x_r = np.abs(x_min - x_max) for cm in range(nCMs): for i, val in enumerate(derived_features[cm][1]): if val < len(cm_plot_style): col = cm_plot_style[val][1] else: col = "k" plt.text(x_min - 4.5 - 6*i, cm, cm_plot_style[val][0], horizontalalignment='center', verticalalignment='center', fontproperties=fp2, fontsize=10, color=col) ax = plt.gca() plt.yticks(np.arange(nCMs), [f"{f[0]}" for f in derived_features], fontsize=8, ha="left") yax = ax.get_yaxis() yax.set_tick_params(pad=120) for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())): if i < len(cm_plot_style): col = cm_plot_style[i][1] else: col = "k" ticklabel.set_color(col) plt.xticks(xtick_vals, xtick_str, fontsize=8) plt.xlabel("Median reduction in $R_t$", fontsize=8) plt.ylabel(None) def small_violin_plot(l, u, data, xlabel=True): plt.plot([0, 0], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([l, l], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([u, u], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) sns.stripplot(x="med", y="npi", data=data, color=[0.37647059, 0.37647059, 0.37647059, 1.], order=[l for l, _ in derived_features], size=2, zorder=-4, jitter=False) sns.violinplot(x="med", y="npi", data=data, scale='width', color=cols[0], saturation=1, order=[l for l, _ in derived_features], linewidth=0.5, inner=None, cut=0) x_min = -10 x_max = 60 plt.xlim([x_min, x_max]) xtick_vals = [0, 17.5, 35, 52.5] if xlabel is not False else [] xtick_str = [f"{x:.1f}%" for x in xtick_vals] if xlabel is not False else [] plt.ylim([7.5, -0.5]) x_r = np.abs(x_min - x_max) for cm in range(nCMs): for i, val in enumerate(derived_features[cm][1]): if val < len(cm_plot_style): col = cm_plot_style[val][1] else: col = "k" plt.text(x_min - 6.5 - 8.5*i, cm, cm_plot_style[val][0], horizontalalignment='center', verticalalignment='center', fontproperties=fp2, fontsize=7, color=col) ax = plt.gca() for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())): if i < len(cm_plot_style): col = cm_plot_style[i][1] else: col = "k" ticklabel.set_color(col) plt.xticks(xtick_vals, xtick_str, fontsize=6) plt.xlabel("Median reduction in $R_t$" if xlabel else None, fontsize=7) plt.ylabel(None) plt.yticks([]) def scatter_plot(l, u, data, xlabel=True): plt.plot([0, 0], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([l, l], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) plt.plot([u, u], [-1, 10], color=cols[2], linestyle='--', alpha=0.75, zorder=-5, linewidth=0.75) sns.swarmplot(data['med'], data['npi'], color=cols[0], size=3) # sns.violinplot(x="med", y="npi", data=data, scale='width', color=cols[0], saturation=1, order=[l for l, _ in derived_features], # linewidth=0.5, inner=None) x_min = -10 x_max = 60 plt.xlim([x_min, x_max]) xtick_vals = [0, 17.5, 35, 52.5] if xlabel is not False else [] xtick_str = [f"{x:.1f}%" for x in xtick_vals] if xlabel is not False else [] plt.ylim([6.5, -0.5]) x_r = np.abs(x_min - x_max) for cm in range(nCMs): for i, val in enumerate(derived_features[cm][1]): if val < len(cm_plot_style): col = cm_plot_style[val][1] else: col = "k" plt.text(x_min - 6.5 - 8.5*i, cm, cm_plot_style[val][0], horizontalalignment='center', verticalalignment='center', fontproperties=fp2, fontsize=7, color=col) ax = plt.gca() for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())): if i < len(cm_plot_style): col = cm_plot_style[i][1] else: col = "k" ticklabel.set_color(col) plt.xticks(xtick_vals, xtick_str, fontsize=6) plt.xlabel("Median reduction in $R_t$" if xlabel else None, fontsize=7) plt.ylabel(None) plt.yticks([]) len(categories) # npi_leaveout: 7+1 # cases_threshold: 5 # deaths_threshold: 4 # region_holdout: 41 # oxcgrt: 6 # epiparam_prior: 125 # R_prior: 4 # npi_prior: 3 # structural_mean: 5 # scaling: 2 # alpha_noise_scale: 4 l = 17.5 u = 35 fig = plt.figure(figsize=(7, 3), dpi=300) gs = fig.add_gridspec(2, 4) ax1 = fig.add_subplot(gs[:, :2]) data_all = categories_to_df(categories) violin_plot(l, u, data_all) plt.title('All Sensitivity Analyses (206 conditions)', fontsize=10) ax2 = fig.add_subplot(gs[0, 2]) data_subpanel = categories_to_df(['structural_mean']) scatter_plot(l, u, data_subpanel, False) plt.title('Model Structure\n(5 conditions)', fontsize=8) ax3 = fig.add_subplot(gs[0, 3]) data_subpanel = categories_to_df(['region_holdout', 'cases_threshold', 'deaths_threshold']) small_violin_plot(l, u, data_subpanel, False) plt.title('Data and Preprocessing\n(52 conditions)', fontsize=8) ax4 = fig.add_subplot(gs[1, 2]) data_subpanel = categories_to_df(['epiparam_prior', 'R_prior', 'npi_prior']) small_violin_plot(l, u, data_subpanel) plt.title('Epidemiological Priors\n(135 conditions)', fontsize=8) ax5 = fig.add_subplot(gs[1, 3]) data_subpanel = categories_to_df(['oxcgrt', 'npi_leaveout']) scatter_plot(l, u, data_subpanel) plt.title('Unobserved Factors\n(14 conditions)', fontsize=8) plt.tight_layout(w_pad=-0) plt.savefig(f'figs/violins.pdf', bbox_inches='tight')bucket plotsdef bucket_array_plot(bucket_array, title, ylabel=True): bucket_array_p = 100*bucket_array / np.sum(bucket_array, axis=1)[:, None] im = plt.imshow(bucket_array_p, cmap='inferno', aspect='auto') for cm in range(nCMs): amax = np.argmax(bucket_array_p[cm, :]) plt.text(amax, cm, f'{int(bucket_array_p[cm, amax]):d}%', ha='center', va='center') for j in range(3): if j != amax: plt.text(j, cm, f'{int(np.round(bucket_array_p[cm, j])):d}%', ha='center', va='center', color='white') for cm in range(nCMs): for i, val in enumerate(derived_features[cm][1]): if val < len(cm_plot_style): col = cm_plot_style[val][1] else: col = "k" plt.text(-0.7 - 0.25*i, cm, cm_plot_style[val][0], horizontalalignment='center', verticalalignment='center', fontproperties=fp2, fontsize=12, color=col) ax = plt.gca() if ylabel: plt.yticks(np.arange(nCMs), [f"{f[0]}" for f in derived_features], fontsize=10, ha="left") yax = ax.get_yaxis() yax.set_tick_params(pad=165) for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())): if i < len(cm_plot_style): col = cm_plot_style[i][1] else: col = "k" ticklabel.set_color(col) plt.gca().xaxis.tick_top() plt.gca().xaxis.set_label_position('top') # plt.xlabel(f'NPI Effectiveness') plt.xticks([0, 1, 2], ["Low", "Medium", "High"], fontsize=10) plt.title(title, fontsize=12) # ax = plt.gca() # divider = make_axes_locatable(ax) # cax = divider.append_axes("right", size="5%", pad=0.05) # cbr = plt.colorbar(im, cax=cax, format=PercentFormatter()) # ax = plt.gca() # ax.tick_params(axis="both", which="major", labelsize=10) # cbr.set_ticks([0, 25, 50, 75, 100]) def bucket_array_plot_small(bucket_array, title): bucket_array_p = 100*bucket_array / np.sum(bucket_array, axis=1)[:, None] im = plt.imshow(bucket_array_p, cmap='inferno', aspect='auto') for cm in range(nCMs): for i in range(3): amax = np.argmax(bucket_array_p[cm, :]) plt.text(amax, cm, f'{int(bucket_array_p[cm, amax]):d}%', ha='center', va='center', fontsize=7) for j in range(3): if j != amax: plt.text(j, cm, f'{int(np.round(bucket_array_p[cm, j])):d}%', ha='center', va='center', color='white', fontsize=7) plt.yticks(np.arange(nCMs), []) for cm in range(nCMs): for i, val in enumerate(derived_features[cm][1]): if val < len(cm_plot_style): col = cm_plot_style[val][1] else: col = "k" plt.text(-0.8 - 0.3*i, cm, cm_plot_style[val][0], horizontalalignment='center', verticalalignment='center', fontproperties=fp2, fontsize=8, color=col) ax = plt.gca() for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())): if i < len(cm_plot_style): col = cm_plot_style[i][1] else: col = "k" ticklabel.set_color(col) plt.gca().xaxis.tick_top() plt.gca().xaxis.set_label_position('top') plt.xticks([0, 1, 2], ["Low", "Medium", "High"], fontsize=6) plt.title(title, fontsize=8) # ax = plt.gca() # divider = make_axes_locatable(ax) # cax = divider.append_axes("right", size="5%", pad=0.05) # cbr = plt.colorbar(im, cax=cax, format=PercentFormatter()) # ax = plt.gca() # ax.tick_params(axis="both", which="major", labelsize=6) # cbr.set_ticks([0, 25, 50, 75, 100]) l = 17.5 u = 35 bucket_dict = {} for c_i, c in enumerate(categories): bucket_array = bucket_npis_medians(l, u, loaded_dict[c]) n = int(np.sum(bucket_array, axis=1)[0]) print(f'{c}: {n}') bucket_dict[c] = bucket_array fig = plt.figure(figsize=(10, 6), dpi=300) nCMs = len(derived_features) gs = fig.add_gridspec(2, 4) ax1 = fig.add_subplot(gs[:, :2]) ba = np.zeros((nCMs, 3)) for _, ba_c in bucket_dict.items(): ba += ba_c n = int(np.sum(ba, axis=1)[0]) bucket_array_plot(ba, f'All Sensitivity Analyses ({n+1} conditions)') plt.tight_layout(w_pad=-0.25) plt.savefig(f'figs/buckets_{l}_{u}.pdf', bbox_inches='tight')Stuff that makes the notebook workfrom IPython.core.display import HTML HTML(""" """)Motivation- I'd like to enable this kind of robot design loop:from graphviz import Digraph dot = Digraph() dot.node('A', 'Collect observations of environment') dot.node('B', 'Simulate robot in (quantifiably) similarly distributed environments') dot.node('C', 'Seek out failures in sim to guide future environment observations') dot.edge('A', 'B') dot.edge('B', 'C') dot.edge('C', 'A') dotFocus on one particular unsolved part of this problem: how do we generate **samples** from a distribution of environments?Parameterize environment as a collection of object identities and poses. "Feasible" environment configurations lie on a **manifold** of **nonpenetrating, statically stable poses.**from IPython.display import HTML HTML('')Forward simulation is great for generating, but really hard to "tune" to generate samples distributed like a set of observations.Generative networks are good at that! Can we use them?- **:)** Specifically designed to generate samples from a distribution- **:(** Don't play well with manifold constraintsNeed a modified GAN setup instead: project generator outputs are projected to feasibility before discrimination. Feasibility Projection for Battleship**Input**: A list of *N* battleship poses (x, y, $\theta$) and ship lengths ($l \in \mathbb{Z}_{> 0}$), plus game board width and height.**Output**: A list of *N* battleship poses that is as close as possible to the input, satisfying:- $x, y \in \mathbb{Z}$- $\theta \in \{0, \dfrac{\pi}{2}, \pi, \dfrac{3\pi}{2}\}$- No part of any ship extends outside of the game board.- Each cell on the board is occupied by at most one ship.![projectionexample](groupmeetingfigs/battleship_board_projection.png) NLP versionWithout the integer constraints on $x, y, \theta$, this can be tackled as an NLP:import battleship_board_rbt as bsrbt import random import matplotlib.pyplot as plt random.seed(42) fig, axs = plt.subplots(1, 2) fig.set_size_inches(12, 12) board_width = 10 board_height = 10 rbt, q0 = bsrbt.spawn_rbt(board_width, board_height, max_length=5, N=20) bsrbt.draw_board_state(axs[0], rbt, q0, board_width, board_height) axs[0].set_title("Initial board state") q_sol, info, dqf_dq0 = \ bsrbt.projectToFeasibilityWithIK(rbt, q0, board_width, board_height) bsrbt.draw_board_state(axs[1], rbt, q_sol, board_width, board_height) print("Info %d" % info) axs[1].set_title("Projection");Info 1I like this! But this suffers from all the problems of nonlinear optimization:- It fails frequently- It does not provide consistent solutions- Adding really hard manifold constraints (e.g. the integer contraints, or static stability constraints) will make these problems worse.(How much worse, in practice? Not sure yet.) MIP VersionRecalling Pang's formulation of forward simulation as a mixed integer program (MIP), what if we tackled this with a similar strategy?For full rigid body dynamics, I'd follow his formulation, but this specific game has an alternative formulation as an MIP that is kind of cute.Very briefly:- For the $n^{th}$ ship, allocate one binary variable $b^n_{i, j, k}$ to represent the ship being at the $\{i, j, k\}^{th}$ bin, imposing $ \sum_{i, j, k} b^n_{i, j, k} = 1 $.- For each location on the grid, create a continuous variable $o_{i, j}$, and impose $o_{i, j} \leq 1$.- Each location on the grid $o_{i, j}$ is a logical combination of variables $b$. (e.g., for 1-length ships, $o_{i, j} = \sum_{\text{ship } n} \sum_{\text{angle }k} b^n_{i, j, k}$- Depending on the length of the ship, forbid combinations of $b^n$ that represent part of the ship leaving the boundaries.All of these relationships are linear (although creating logical relationships requires adding additional intermediate binary variables). Since the continuous optimized pose of each ship is a linear function of $b^n$, we can penalize the L2 norm between the optimized pose and the initial pose of each ship.Does it work?%load_ext autoreload %autoreload 2 import battleship_board as bs random.seed(43) board = bs.Board(10, 10) board.spawn_N_ships(20, max_length=5) fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(12, 12) board.draw(ax1) fig.show() board.ships = board.project_to_feasibility_mip(board.ships) board.draw(ax2) plt.show()Basic End to End Machine Learning ProjectIn this Notebook, I'll be showing the general basic steps to build a house pricing prediction model. Get the DataFor this project, a csv with data on house pricing will be used.from six.moves import urllib import tarfile import os DATA_FILE_URL = 'https://raw.githubusercontent.com/ageron/handson-ml/master/datasets/housing/housing.tgz' DEST_DATA_PATH = 'Data' if not os.path.isdir(DEST_DATA_PATH): os.makedirs(DEST_DATA_PATH) tgz_path = os.path.join(DEST_DATA_PATH, "housing.tgz") urllib.request.urlretrieve(DATA_FILE_URL, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=DEST_DATA_PATH) housing_tgz.close()--- Import all the necessary librariesimport re import json import string import datetime import itertools from collections import defaultdict from wordsegment import load, segment from nltk import TweetTokenizer from nltk.corpus import stopwords from textblob import TextBlob import pandas as pd from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer from datetime import datetime import tqdm load()--- Load the depression lexicon to seed the LDA topics# reading Depression PHQ-9 Lexicon with open("depression_lexicon.json") as f: seed_terms = json.load(f) # read all seed terms into a list removing the underscore from all seeds seed_terms_col = [ seed.replace("_", " ") for seed in list( itertools.chain.from_iterable( [seed_terms[signal] for signal in seed_terms.keys()])) ]--- Prepare other lexicons and resources required to filter and pre-process the tweets# Other lexicons and resources emojies = [":‑)", ":)", ":D", ":o)", ":]", ":3", ":c)", ":>", "=]", "8)", "=)", ":}", ":^)", ":っ)", ":‑D", "8‑D", "8D", "x‑D", "xD", "X‑D", "XD", "=‑D", "=D", "=‑3", "=3", "B^D", ":-))", ">:[", ":‑(", ":(", ":‑c", ":c", ":‑<", ":っC", ":<", ":‑[", ":[", ":{", ";(", ":-||", ":@", ">:(", ":'‑(", ":'(", ":'‑)", ":')", "D:<", "D:", "D8", "D;", "D=", "DX", "v.v", "D‑':", ">:O", ":‑O", ":O", ":‑o", ":o", "8‑0", "O_O", "o‑o", "O_o", "o_O", "o_o", "O-O", ":*", ":-*", ":^*", "(", "}{'", ")", ";‑)", ";)", "*-)", "*)", ";‑]", ";]", ";D", ";^)", ":‑,", ">:P", ":‑P", ":P", "X‑P", "x‑p", "xp", "XP", ":‑p", ":p", "=p", ":‑Þ", ":Þ", ":þ", ":‑þ", ":‑b", ":b", "d:", ">:\\", ">:/", ":‑/", ":‑.", ":/", ":\\", "=/", "=\\", ":L", "=L", ":S", ">.<", ":|", ":‑|", ":$", ":‑X", ":X", ":‑#", ":#", "O:‑)", "0:‑3", "0:3", "0:‑)", "0:)", "0;^)", ">:)", ">;)", ">:‑)", "}:‑)", "}:)", "3:‑)", "3:)", "o/\o", "^5", ">_>^", "^<_<", "|;‑)", "|‑O", ":‑J", ":‑&", ":&", "#‑)", "%‑)", "%)", ":‑###..", ":###..", "<:‑|", "<*)))‑{", "><(((*>", "><>", "\o/", "*\0/*", "@}‑;‑'‑‑‑", "@>‑‑>‑‑", "~(_8^(I)", "5:‑)", "~:‑\\", "//0‑0\\\\", "*<|:‑)", "=:o]", "7:^]", ",:‑)", "--- Load and clean the 1.6M tweets datatweets_df = pd.read_csv('Data/tweets.csv', encoding="ISO-8859-1", names=["sentiment", "tweet_id", "created_at", "query", "username", "text"]) def convert_date(date): return datetime.strptime(date.replace(' PDT', ''), "%a %b %d %H:%M:%S %Y") tweets_df['created_at'] = tweets_df['created_at'].apply(convert_date) tweets_df = tweets_df.sort_values( ["username", "created_at"]).reset_index(drop=True) user_tweet_counts=tweets_df[['tweet_id', 'username', 'created_at']].groupby(['username']).agg('count').reset_index() users_50 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=50]) users_70 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=70]) users_100 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=100]) def user_tweets_50(username): if username in users_50: return 1 else: return 0 def user_tweets_70(username): if username in users_70: return 1 else: return 0 def user_tweets_100(username): if username in users_100: return 1 else: return 0 def user_tweets_180(username): if username in users_180: return 1 else: return 0 tweets_df['_50'] = tweets_df['username'].apply(user_tweets_50) tweets_df['_70'] = tweets_df['username'].apply(user_tweets_70) tweets_df['_100'] = tweets_df['username'].apply(user_tweets_100) tweets_df=tweets_df.drop_duplicates()*** Pre-process tweets by filtering the text and recording the sentiments of each tweetanalyzer = SentimentIntensityAnalyzer() def deEmojify(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF" u"\U00002500-\U00002BEF" u"\U00002702-\U000027B0" u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" u"\U0001f926-\U0001f937" u"\U00010000-\U0010ffff" u"\u2640-\u2642" u"\u2600-\u2B55" u"\u200d" u"\u23cf" u"\u23e9" u"\u231a" u"\ufe0f" u"\u3030" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) def de_abbreviate(token): if token == 'u': return 'you' if token == 'r': return 'are' if token == 'some1': return 'someone' if token == 'yrs': return 'years' if token == 'hrs': return 'hours' if token == 'mins': return 'minutes' if token == 'secs': return 'seconds' if token == 'pls' or token == 'plz': return 'please' if token == '2morow' or token == '2moro': return 'tomorrow' if token == '2day': return 'today' if token == '4got' or token == '4gotten': return 'forget' if token in ['hahah', 'hahaha', 'hahahaha']: return 'haha' if token == "mother's": return "mother" if token == "mom's": return "mom" if token == "dad's": return "dad" if token == 'bday' or token == 'b-day': return 'birthday' if token in ["i'm", "don't", "can't", "couldn't", "aren't", "wouldn't", "isn't", "didn't", "hadn't", "doesn't", "won't", "haven't", "wasn't", "hasn't", "shouldn't", "ain't", "they've"]: return token.replace("'", "") if token in ['lmao', 'lolz', 'rofl']: return 'lol' if token == '<3': return 'love' if token == 'thanx' or token == 'thnx': return 'thanks' if token == 'goood': return 'good' if token in ['amp', 'quot', 'lt', 'gt', '½25', '..', '. .', '. . .', '...']: return ' ' else: return token def de_slang(tweet): tweet = tweet.replace("idk", "i dont know") tweet = tweet.replace("i'll", "i will") tweet = tweet.replace("you'll", "you will") tweet = tweet.replace("we'll", "we will") tweet = tweet.replace("it'll", "it will") tweet = tweet.replace("it's", "it is") tweet = tweet.replace("i've", "i have") tweet = tweet.replace("you've", "you have") tweet = tweet.replace("we've", "we have") tweet = tweet.replace("they've", "they have") tweet = tweet.replace("you're", "you are") tweet = tweet.replace("we're", "we are") tweet = tweet.replace("they're", "they are") tweet = tweet.replace("let's", "let us") tweet = tweet.replace("she's", "she is") tweet = tweet.replace("he's", "he is") tweet = tweet.replace("that's", "that is") tweet = tweet.replace("i'd", "i would") tweet = tweet.replace("you'd", "you would") tweet = tweet.replace("there's", "there is") tweet = tweet.replace("what's", "what is") tweet = tweet.replace("how's", "how is") tweet = tweet.replace("who's", "who is") tweet = tweet.replace("y'all", "you all") tweet = tweet.replace("ya'll", "you all") return tweet def preprocess_text(tweet): # replace seeds (as phrases) to unigrams. for seed in seed_terms_col: if seed in tweet and " " in seed: tweet = tweet.replace(seed, seed.replace(" ", "_")) # remove retweet handler if tweet[:2] == "RT": tweet = tweet[tweet.index(":") + 2:] # remove url from tweet tweet = re.sub( r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', tweet) # remove short notations tweet = de_slang(tweet) # remove non-ascii characters tweet = ''.join((filter(lambda x: x in printable, tweet))) # additional preprocessing tweet = tweet.replace("\n", " ").replace(" https", "").replace("http", "") # remove all mentions mentions = re.findall(r"@\w+", tweet) for mention in mentions: tweet = tweet.replace(mention, "") # clean usernames and hashtags for term in re.findall(r"#\w+", tweet): # remove any punctuations from the hashtag and mention token = term[1:].translate(str.maketrans('', '', string.punctuation)) segments = ' '.join(segment(token)) tweet = tweet.replace(term, segments) # remove all punctuations tweet = re.sub(r""" ["""+"".join(punctuation)+"""]+ """, " ", tweet, flags=re.VERBOSE) # remove trailing spaces tweet = tweet.strip() # remove numbers tweet = re.sub(r'[\d-]+', 'NUM', tweet) # pad NUM with spaces tweet = tweet.replace("NUM", " NUM ") # remove emoticons tweet = deEmojify(tweet) # remove all stop words or emojis tweet = " ".join([de_abbreviate(word.lower()) for word in tweet_token.tokenize(tweet) if word.lower( ) not in stop_words_extended and word.lower() not in emojies and len(word) > 1]) # remove multiple spaces tweet = re.sub(' +', ' ', tweet) return tweet def preprocess(tweets): processed_tweets = [] for index, tweet in tqdm.tqdm(tweets.iterrows()): cleaned_text = preprocess_text(tweet['text']) sent_score = TextBlob(tweet['text']).sentiment.polarity vader_compound_score = analyzer.polarity_scores(tweet['text'])[ 'compound'] vader_positive_score = analyzer.polarity_scores(tweet['text'])['pos'] vader_negative_score = analyzer.polarity_scores(tweet['text'])['neg'] vader_neutral_score = analyzer.polarity_scores(tweet['text'])['neu'] sent_score_2 = TextBlob(cleaned_text).sentiment.polarity vader_compound_score_2 = analyzer.polarity_scores(cleaned_text)[ 'compound'] vader_positive_score_2 = analyzer.polarity_scores(cleaned_text)['pos'] vader_negative_score_2 = analyzer.polarity_scores(cleaned_text)['neg'] vader_neutral_score_2 = analyzer.polarity_scores(cleaned_text)['neu'] processed_tweets.append([tweet['tweet_id'], tweet['created_at'], tweet['text'], cleaned_text, sent_score, vader_compound_score, vader_positive_score, vader_neutral_score, vader_negative_score, sent_score_2, vader_compound_score_2, vader_positive_score_2, vader_neutral_score_2, vader_negative_score_2]) return pd.DataFrame(processed_tweets, columns=['tweet_id', 'created_at', 'text', 'cleaned_text', 'polarity_raw', 'vader_compound_raw', 'vader_pos_raw', 'vader_neu_raw', 'vader_neg_raw', 'polarity_cleaned', 'vader_compound_cleaned', 'vader_pos_cleaned', 'vader_neu_cleaned', 'vader_neg_cleaned']) preprocessed_tweets = preprocess(tweets_df[["tweet_id", "created_at", "text"]])6838it [00:14, 456.47it/s]*** Merge the tweets to get the usernames, and filter for tweets countpreprocessed_tweets=pd.merge(preprocessed_tweets, tweets_df[["tweet_id","created_at","username","_50","_70", "_100"]], on=["tweet_id",'created_at']) preprocessed_tweets=preprocessed_tweets.drop_duplicates() preprocessed_tweets = preprocessed_tweets.sort_values(["username", "created_at"]).reset_index(drop=True) preprocessed_tweets.to_csv('Data/tweets_cleaned.csv', header=True, index=False)YOLOv5 Pseudo LabelingAccording to the results of [this notebook](https://www.kaggle.com/nvnnghia/fasterrcnn-pseudo-labeling) FaterRCNN seems to work well with Pseudo Labeling.In this notebook I am going to test Pseudo labeling technique on Yolov5.import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os from tqdm.auto import tqdm import shutil as shGetting yolov5 repo#!git clone https://github.com/ultralytics/yolov5 #!mv yolov5/* ./ !cp -r ../input/yolov5train/* . !pip install --no-deps '../input/weightedboxesfusion/' > /dev/nullConvert train data to yolov5 formatBased on [this notebook](https://www.kaggle.com/orkatz2/yolov5-train)def convertTrainLabel(): df = pd.read_csv('../input/global-wheat-detection/train.csv') bboxs = np.stack(df['bbox'].apply(lambda x: np.fromstring(x[1:-1], sep=','))) for i, column in enumerate(['x', 'y', 'w', 'h']): df[column] = bboxs[:,i] df.drop(columns=['bbox'], inplace=True) df['x_center'] = df['x'] + df['w']/2 df['y_center'] = df['y'] + df['h']/2 df['classes'] = 0 from tqdm.auto import tqdm import shutil as sh df = df[['image_id','x', 'y', 'w', 'h','x_center','y_center','classes']] index = list(set(df.image_id)) source = 'train' if True: for fold in [0]: val_index = index[len(index)*fold//5:len(index)*(fold+1)//5] for name,mini in tqdm(df.groupby('image_id')): if name in val_index: path2save = 'val2017/' else: path2save = 'train2017/' if not os.path.exists('convertor/fold{}/labels/'.format(fold)+path2save): os.makedirs('convertor/fold{}/labels/'.format(fold)+path2save) with open('convertor/fold{}/labels/'.format(fold)+path2save+name+".txt", 'w+') as f: row = mini[['classes','x_center','y_center','w','h']].astype(float).values row = row/1024 row = row.astype(str) for j in range(len(row)): text = ' '.join(row[j]) f.write(text) f.write("\n") if not os.path.exists('convertor/fold{}/images/{}'.format(fold,path2save)): os.makedirs('convertor/fold{}/images/{}'.format(fold,path2save)) sh.copy("../input/global-wheat-detection/{}/{}.jpg".format(source,name),'convertor/fold{}/images/{}/{}.jpg'.format(fold,path2save,name))Some useful functionsTTA, WBF, etcfrom ensemble_boxes import * def run_wbf(boxes, scores, image_size=1023, iou_thr=0.5, skip_box_thr=0.7, weights=None): #boxes = [prediction[image_index]['boxes'].data.cpu().numpy()/(image_size-1) for prediction in predictions] #scores = [prediction[image_index]['scores'].data.cpu().numpy() for prediction in predictions] labels = [np.zeros(score.shape[0]) for score in scores] boxes = [box/(image_size) for box in boxes] boxes, scores, labels = weighted_boxes_fusion(boxes, scores, labels, weights=None, iou_thr=iou_thr, skip_box_thr=skip_box_thr) #boxes, scores, labels = nms(boxes, scores, labels, weights=[1,1,1,1,1], iou_thr=0.5) boxes = boxes*(image_size) return boxes, scores, labels def TTAImage(image, index): image1 = image.copy() if index==0: rotated_image = cv2.rotate(image1, cv2.ROTATE_90_CLOCKWISE) return rotated_image elif index==1: rotated_image2 = cv2.rotate(image1, cv2.ROTATE_90_CLOCKWISE) rotated_image2 = cv2.rotate(rotated_image2, cv2.ROTATE_90_CLOCKWISE) return rotated_image2 elif index==2: rotated_image3 = cv2.rotate(image1, cv2.ROTATE_90_CLOCKWISE) rotated_image3 = cv2.rotate(rotated_image3, cv2.ROTATE_90_CLOCKWISE) rotated_image3 = cv2.rotate(rotated_image3, cv2.ROTATE_90_CLOCKWISE) return rotated_image3 elif index == 3: return image1 def rotBoxes90(boxes, im_w, im_h): ret_boxes =[] for box in boxes: x1, y1, x2, y2 = box x1, y1, x2, y2 = x1-im_w//2, im_h//2 - y1, x2-im_w//2, im_h//2 - y2 x1, y1, x2, y2 = y1, -x1, y2, -x2 x1, y1, x2, y2 = int(x1+im_w//2), int(im_h//2 - y1), int(x2+im_w//2), int(im_h//2 - y2) x1a, y1a, x2a, y2a = min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2) ret_boxes.append([x1a, y1a, x2a, y2a]) return np.array(ret_boxes) def detect1Image(im0, imgsz, model, device, conf_thres, iou_thres): img = letterbox(im0, new_shape=imgsz)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) img = torch.from_numpy(img).to(device) img = img.float() # uint8 to fp16/32 img /= 255.0 if img.ndimension() == 3: img = img.unsqueeze(0) # Inference pred = model(img, augment=False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres) boxes = [] scores = [] for i, det in enumerate(pred): # detections per image # save_path = 'draw/' + image_id + '.jpg' if det is not None and len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Write results for *xyxy, conf, cls in det: boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])]) scores.append(conf) return np.array(boxes), np.array(scores)Make pseudo labels for Yolov5from utils.datasets import * from utils.utils import * def makePseudolabel(): source = '../input/global-wheat-detection/test/' weights = '../input/yolov5/bestv4.pt' imgsz = 1024 conf_thres = 0.5 iou_thres = 0.6 is_TTA = True imagenames = os.listdir(source) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # Load model model = torch.load(weights, map_location=device)['model'].float() # load to FP32 model.to(device).eval() dataset = LoadImages(source, img_size=imgsz) path2save = 'train2017/' if not os.path.exists('convertor/fold0/labels/'+path2save): os.makedirs('convertor/fold0/labels/'+path2save) if not os.path.exists('convertor/fold0/images/{}'.format(path2save)): os.makedirs('convertor/fold0/images/{}'.format(path2save)) for name in imagenames: image_id = name.split('.')[0] im01 = cv2.imread('%s/%s.jpg'%(source,image_id)) # BGR if im01.shape[0]!=1024 or im01.shape[1]!=1024: continue assert im01 is not None, 'Image Not Found ' # Padded resize im_w, im_h = im01.shape[:2] if is_TTA: enboxes = [] enscores = [] for i in range(4): im0 = TTAImage(im01, i) boxes, scores = detect1Image(im0, imgsz, model, device, conf_thres, iou_thres) for _ in range(3-i): boxes = rotBoxes90(boxes, im_w, im_h) enboxes.append(boxes) enscores.append(scores) boxes, scores, labels = run_wbf(enboxes, enscores, image_size = im_w, iou_thr=0.6, skip_box_thr=0.43) boxes = boxes.astype(np.int32).clip(min=0, max=im_w) else: boxes, scores = detect1Image(im01, imgsz, model, device, conf_thres, iou_thres) boxes[:, 2] = boxes[:, 2] - boxes[:, 0] boxes[:, 3] = boxes[:, 3] - boxes[:, 1] boxes = boxes[scores >= 0.05].astype(np.int32) scores = scores[scores >=float(0.05)] lineo = '' for box in boxes: x1, y1, w, h = box xc, yc, w, h = (x1+w/2)/1024, (y1+h/2)/1024, w/1024, h/1024 lineo += '0 %f %f %f %f\n'%(xc, yc, w, h) fileo = open('convertor/fold0/labels/'+path2save+image_id+".txt", 'w+') fileo.write(lineo) fileo.close() sh.copy("../input/global-wheat-detection/test/{}.jpg".format(image_id),'convertor/fold0/images/{}/{}.jpg'.format(path2save,image_id)) convertTrainLabel() makePseudolabel() !lsRetrain yolov5 with pseudo dataif len(os.listdir('../input/global-wheat-detection/test/'))<11: pass #!python train.py --img 1024 --batch 4 --epochs 1 --data ../input/configyolo5/wheat0.yaml --cfg ../input/yolov5/v5/v5/models/yolov5x.yaml --weights ../input/yolov5/bestv4.pt else: !python train.py --img 1024 --batch 4 --epochs 10 --data ../input/configyolo5/wheat0.yaml --cfg ../input/yolov5/v5/v5/models/yolov5x.yaml --weights ../input/yolov5/bestv4.pt !rm -rf convertor def format_prediction_string(boxes, scores): pred_strings = [] for j in zip(scores, boxes): pred_strings.append("{0:.4f} {1} {2} {3} {4}".format(j[0], j[1][0], j[1][1], j[1][2], j[1][3])) return " ".join(pred_strings)Final predictiondef detect(): source = '../input/global-wheat-detection/test/' weights = 'weights/best.pt' if not os.path.exists(weights): weights = '../input/yolov5/bestv4.pt' imgsz = 1024 conf_thres = 0.5 iou_thres = 0.6 is_TTA = True imagenames = os.listdir(source) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # Load model model = torch.load(weights, map_location=device)['model'].float() # load to FP32 model.to(device).eval() dataset = LoadImages(source, img_size=imgsz) results = [] fig, ax = plt.subplots(5, 2, figsize=(30, 70)) count = 0 # img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img #for path, img, im0s, _ in dataset: for name in imagenames: image_id = name.split('.')[0] im01 = cv2.imread('%s/%s.jpg'%(source,image_id)) # BGR assert im01 is not None, 'Image Not Found ' # Padded resize im_w, im_h = im01.shape[:2] if is_TTA: enboxes = [] enscores = [] for i in range(4): im0 = TTAImage(im01, i) boxes, scores = detect1Image(im0, imgsz, model, device, conf_thres, iou_thres) for _ in range(3-i): boxes = rotBoxes90(boxes, im_w, im_h) if 1: #i<3: enboxes.append(boxes) enscores.append(scores) boxes, scores = detect1Image(im01, imgsz, model, device, conf_thres, iou_thres) enboxes.append(boxes) enscores.append(scores) boxes, scores, labels = run_wbf(enboxes, enscores, image_size = im_w, iou_thr=0.6, skip_box_thr=0.5) boxes = boxes.astype(np.int32).clip(min=0, max=im_w) else: boxes, scores = detect1Image(im01, imgsz, model, device, conf_thres, iou_thres) boxes[:, 2] = boxes[:, 2] - boxes[:, 0] boxes[:, 3] = boxes[:, 3] - boxes[:, 1] boxes = boxes[scores >= 0.05].astype(np.int32) scores = scores[scores >=float(0.05)] if count<10: #sample = image.permute(1,2,0).cpu().numpy() for box, score in zip(boxes,scores): cv2.rectangle(im0, (box[0], box[1]), (box[2]+box[0], box[3]+box[1]), (220, 0, 0), 2) cv2.putText(im0, '%.2f'%(score), (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX , 0.5, (255,255,255), 2, cv2.LINE_AA) ax[count%5][count//5].imshow(im0) count+=1 result = { 'image_id': image_id, 'PredictionString': format_prediction_string(boxes, scores) } results.append(result) return results results = detect() test_df = pd.DataFrame(results, columns=['image_id', 'PredictionString']) test_df.to_csv('submission.csv', index=False) test_df.head()请点击[此处](https://ai.baidu.com/docs/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法. Please click [here ](https://ai.baidu.com/docs/AIStudio_Project_Notebook/a38e5576) for more detailed instructions.#train['os'].value_counts() train['lan'].value_counts() #lan_map = {'zh-CN': 1, } train['lan'].value_counts().index lan_map = {'zh-CN': 1, 'zh_CN':2, 'Zh-CN': 3, 'zh-cn': 4, 'zh_CN_#Hans':5, 'zh': 6, 'ZH': 7, 'cn':8, 'CN':9, 'zh-HK': 10, 'tw': 11, 'TW': 12, 'zh-TW': 13,'zh-MO':14, 'en':15, 'en-GB': 16, 'en-US': 17, 'ko': 18, 'ja': 19, 'it': 20, 'mi':21} train['lan'] = train['lan'].map(lan_map) test['lan'] = test['lan'].map(lan_map) test['lan'].value_counts() train['lan'].fillna(22, inplace=True) test['lan'].fillna(22, inplace=True) # In[7]: remove_list = ['os', 'sid'] col = features for i in remove_list: col.remove(i) col from datetime import datetime # lambda 是一句话函数,匿名函数 train['timestamp'] = train['timestamp'].apply(lambda x: datetime.fromtimestamp(x/1000)) #1559892728241.7212 #1559871800477.1477 #1625493942.538375 #import time #time.time() test['timestamp'] = test['timestamp'].apply(lambda x: datetime.fromtimestamp(x/1000)) test['timestamp'] def version_trans(x): if x=='V3': return 3 if x=='v1': return 1 if x=='P_Final_6': return 6 if x=='V6': return 6 if x=='GA3': return 3 if x=='GA2': return 2 if x=='V2': return 2 if x=='50': return 5 return int(x) train['version'] = train['version'].apply(version_trans) test['version'] = test['version'].apply(version_trans) train['version'] = train['version'].astype('int') test['version'] = test['version'].astype('int') # 特征筛选 features = train[col] # 构造fea_hash_len特征 features['fea_hash_len'] = features['fea_hash'].map(lambda x: len(str(x))) features['fea1_hash_len'] = features['fea1_hash'].map(lambda x: len(str(x))) # Thinking:为什么将很大的,很长的fea_hash化为0? # 如果fea_hash很长,都归为0,否则为自己的本身 features['fea_hash'] = features['fea_hash'].map(lambda x: 0 if len(str(x))>16 else int(x)) features['fea1_hash'] = features['fea1_hash'].map(lambda x: 0 if len(str(x))>16 else int(x)) features test_features = test[col] # 构造fea_hash_len特征 test_features['fea_hash_len'] = test_features['fea_hash'].map(lambda x: len(str(x))) test_features['fea1_hash_len'] = test_features['fea1_hash'].map(lambda x: len(str(x))) # Thinking:为什么将很大的,很长的fea_hash化为0? # 如果fea_hash很长,都归为0,否则为自己的本身 test_features['fea_hash'] = test_features['fea_hash'].map(lambda x: 0 if len(str(x))>16 else int(x)) test_features['fea1_hash'] = test_features['fea1_hash'].map(lambda x: 0 if len(str(x))>16 else int(x)) test_features # 对训练集的timestamp提取时间多尺度 # 使用to_datetime进行日期类型转换 # 创建时间戳索引 temp = pd.DatetimeIndex(features['timestamp']) features['year'] = temp.year features['month'] = temp.month features['day'] = temp.day features['week_day'] = temp.weekday #星期几 features['hour'] = temp.hour features['minute'] = temp.minute # 求时间的diff start_time = features['timestamp'].min() features['time_diff'] = features['timestamp'] - start_time features['time_diff'] = features['time_diff'].dt.days + features['time_diff'].dt.seconds/3600/24 features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']] # 创建时间戳索引 temp = pd.DatetimeIndex(test_features['timestamp']) test_features['year'] = temp.year test_features['month'] = temp.month test_features['day'] = temp.day test_features['week_day'] = temp.weekday #星期几 test_features['hour'] = temp.hour test_features['minute'] = temp.minute # 求时间的diff #start_time = features['timestamp'].min() test_features['time_diff'] = test_features['timestamp'] - start_time test_features['time_diff'] = test_features['time_diff'].dt.days + test_features['time_diff'].dt.seconds/3600/24 #test_features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']] test_features['time_diff'] #test['version'].value_counts() #features['version'].value_counts() features['dev_height'].value_counts() features['dev_width'].value_counts() # 构造面积特征 features['dev_area'] = features['dev_height'] * features['dev_width'] test_features['dev_area'] = test_features['dev_height'] * test_features['dev_width'] # In[13]: #分辨率除以面积特征 features['ppivsarea'] = features['dev_ppi'].astype('float') / features['dev_area'].astype('float') test_features['ppivsarea'] = test_features['dev_ppi'].astype('float') / test_features['dev_area'].astype('float') """ Thinking:是否可以利用 dev_ppi 和 dev_area构造新特征 features['dev_ppi'].value_counts() features['dev_area'].astype('float') / features['dev_ppi'].astype('float') """ #features['ntt'].value_counts() features['carrier'].value_counts() features['package'].value_counts() # version - osv APP版本与操作系统版本差 features['osv'].value_counts() features['version_osv'] = features['osv'] - features['version'] test_features['version_osv'] = test_features['osv'] - test_features['version'] # In[14]: features = features.drop(['timestamp'], axis=1) test_features = test_features.drop(['timestamp'], axis=1) train.head() #从train中找到selected字段的关键特征值把他设置出来 def find_key_feature(train,selected): temp0=train[train['label']==0] temp=pd.DataFrame(columns=[0,1]) temp[0]=temp0[selected].value_counts()/len(temp0)*100 temp1=train[train['label']==1] temp[1]=temp1[selected].value_counts()/len(temp1)*100 temp[2]=temp[1]/temp[0] #筛选出特征比大于10倍的关键特征 result=temp[temp[2]>10].sort_values(2,ascending=False).index return result key_feature={} key_feature['osv']=find_key_feature(train,'osv') key_feature['dev_ppi']=find_key_feature(train,'dev_ppi') key_feature['apptype']=find_key_feature(train,'apptype') train['media_id']=features['media_id'] key_feature['media_id']=find_key_feature(train,'media_id') key_feature features def f(x,selected): if x in key_feature[selected]: return 1 else: return 0 features['osv1']=features['osv'].apply(f,args=('osv',)) test_features['osv1']=test_features['osv'].apply(f,args=('osv',)) features['dev_ppi1']=features['dev_ppi'].apply(f,args=('dev_ppi',)) test_features['dev_ppi1']=test_features['dev_ppi'].apply(f,args=('dev_ppi',)) # features['apptype1']=features['apptype'].apply(f,args=('apptype',)) # test_features['apptype1']=test_features['apptype'].apply(f,args=('apptype',)) # features['media_id']=features['media_id'].apply(f,args=('media_id',)) # test_features['media_id']=test_features['media_id'].apply(f,args=('media_id',)) # print(test_features['media_id'].value_counts()) # features['osv1'].value_counts() features.columns import numpy as np import lightgbm as lgb from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score # skf = StratifiedKFold(n_splits=3, random_state=2021) def xgb_model(clf,train_x,train_y,test): prob=[] mean_acc=0 nums=5 sk=StratifiedKFold(n_splits=nums, shuffle=True) for k,(train_index,val_index) in enumerate(sk.split(train_x,train_y)): train_x_real=train_x.iloc[train_index] train_y_real=train_y.iloc[train_index] clf.fit(train_x_real,train_y_real) val_x=train_x.iloc[val_index] val_y=train_y.iloc[val_index] val_y_pred=clf.predict(val_x) acc_val=accuracy_score(val_y,val_y_pred) print('第{}个模型的accuracy为{}'.format(k,acc_val)) mean_acc+=acc_val/nums test_y_pred=clf.predict_proba(test) prob.append(test_y_pred) print(mean_acc) mean_prob=sum(prob)/nums print(prob) return mean_prob # #这个分数是89.0627,并且屏蔽了关键点判断 # import lightgbm as lgb # import xgboost as xgb # # model = lgb.LGBMClassifier() # xgb = xgb.XGBClassifier( # max_depth=15, learning_rate=0.005, n_estimators=3000, # objective='binary:logistic', tree_method='gpu_hist', # subsample=0.7, colsample_bytree=0.7, # min_child_samples=3, eval_metric='auc', reg_lambda=0.5 # ) # prob=xgb_model(xgb,features,train['label'],test_features) features['heightvswidth'] = features['dev_height'].astype('float') / features['dev_width'].astype('float') test_features['heightvswidth'] = test_features['dev_height'].astype('float') / test_features['dev_width'].astype('float') #跑到89.132分用了关键点提取,5折交叉验证 import lightgbm as lgb import xgboost as xgb # model = lgb.LGBMClassifier() xgb = xgb.XGBClassifier( max_depth=15, learning_rate=0.005, n_estimators=3000, objective='binary:logistic', tree_method='gpu_hist', subsample=0.7, colsample_bytree=0.7, min_child_samples=3, eval_metric='auc', reg_lambda=0.5 ) prob=xgb_model(xgb,features,train['label'],test_features) # #再试试又加了一个app的关键点效果不好 # import lightgbm as lgb # import xgboost as xgb # # model = lgb.LGBMClassifier() # xgb = xgb.XGBClassifier( # max_depth=15, learning_rate=0.005, n_estimators=3000, # objective='binary:logistic', tree_method='gpu_hist', # subsample=0.7, colsample_bytree=0.7, # min_child_samples=3, eval_metric='auc', reg_lambda=0.5 # ) # prob=xgb_model(xgb,features,train['label'],test_features) result=np.argmax(prob,axis=1) result #features['version'].value_counts() res = pd.DataFrame(test['sid']) res['label'] = result res.to_csv('./basel6++deep001.csv', index=False) resWellSome preliminaries...import numpy as np import matplotlib.pyplot as plt %matplotlib inline import welly welly.__version__ import os env = %envLoad a well from LASUse the `from_las()` method to load a well by passing a filename as a `str`. This is really just a wrapper for `lasio` but instantiates a `Header`, `Curve`s, etc.from welly import Well w = Well.from_las('P-129_out.LAS') tracks = ['MD', 'GR', 'RHOB', ['DT', 'DTS'], 'MD'] w.plot(tracks=tracks)Add a striplogfrom striplog import Legend, Striplog legend = Legend.builtin('NSDOE') strip = Striplog.from_image('P-129_280_1935.png', 280, 1935, legend=legend) strip.plot() w.data['strip'] = strip tracks = ['MD', 'strip', 'GR', 'RHOB', ['DT', 'DTS'], 'MD'] w.plot(tracks=tracks)HeaderMaybe should be called 'meta' as it's not really a header...w.header w.header.name w.uwi # Fails because not present in this file. See one way to add it in a minute.Location and CRSw.location from welly import CRS w.location.crs = CRS.from_epsg(2038) w.location.crsRight now there's no position log — we need to load a deviation survey.w.location.positionAdd deviation data to a wellp = Well.from_las('P-130_out.LAS') dev = np.loadtxt('P-130_deviation_survey.csv', delimiter=',', skiprows=1)The columns are MD, inclination, azimuth, and TVD.dev[:5]`add_deviation` assumes those are the columns, and computes a position log.p.location.add_deviation(dev[:, :3], td=2618.3)The columns in the position log are _x_ offset, _y_ offset, and TVD.p.location.position[:5]Export curves to data matrixMake a NumPy array:w.data_as_matrix()Export curves to pandasPandas is an optional dependency. You'll need it to make this work.df = w.df() df.head() df.GR.plot()This also gives us another path to getting a matrix:w.df().valuesYou'll have to get depth separately:w.df().index.valuesTo get the UWI of the well as well, e.g. if you want to combine multiple wells (maybe using `welly.Project.df()`):df = w.df(uwi=True) df.head()Making slides in Jupyter []() Text elements As you would expect, text can be in *italics* or **bold**, and you can use 1. ordered lists, 1. unordered lists, and 1. LaTeX 1. code fragments LaTeX formulas The distance between two point $p$ and $q$ in two dimensional space is given by $d(p, q) = \sqrt{(p_x - q_x)^2 + (p_y - q_y)^2}$. For more complicated formulas, you may want them on a separate line.$$n! = \prod_{i=1}^n i$$ Python code Of course, Python code can be embedded as code cells.import numpy as np import matplotlib.pyplot as plt %matplotlib inlineCreate and array, compute some values, make a plot.x = np.linspace(0.0, 1.0, 101) y = np.sqrt(x) _ = plt.plot(x, y)Formule 1 Data Collection from ERGAST Webimport pandas as pd import numpy as np from pprint import pprint import requests # I will use this function later to calculate points and wins prior to the race def lookup (df, team, points): df['lookup1'] = df.season.astype(str) + df[team] + df['round'].astype(str) df['lookup2'] = df.season.astype(str) + df[team] + (df['round']-1).astype(str) new_df = df.merge(df[['lookup1', points]], how = 'left', left_on='lookup2',right_on='lookup1') new_df.drop(['lookup1_x', 'lookup2', 'lookup1_y'], axis = 1, inplace = True) new_df.rename(columns = {points+'_x': points+'_after_race', points+'_y': points}, inplace = True) new_df[points].fillna(0, inplace = True) return new_dfRacesraces = {'season': [], 'round': [], 'circuit_id': [], 'lat': [], 'long': [], 'country': [], 'date': [], 'url': []} for year in list(range(1950,2020)): url = 'https://ergast.com/api/f1/{}.json' r = requests.get(url.format(year)) json = r.json() for item in json['MRData']['RaceTable']['Races']: try: races['season'].append(int(item['season'])) except: races['season'].append(None) try: races['round'].append(int(item['round'])) except: races['round'].append(None) try: races['circuit_id'].append(item['Circuit']['circuitId']) except: races['circuit_id'].append(None) try: races['lat'].append(float(item['Circuit']['Location']['lat'])) except: races['lat'].append(None) try: races['long'].append(float(item['Circuit']['Location']['long'])) except: races['long'].append(None) try: races['country'].append(item['Circuit']['Location']['country']) except: races['country'].append(None) try: races['date'].append(item['date']) except: races['date'].append(None) try: races['url'].append(item['url']) except: races['url'].append(None) races = pd.DataFrame(races) print(races.shape) races.head() races.tail() races.to_csv('./data/races.csv', index = False)Roundsrace = pd.read_csv('./data/races.csv') rounds = [] for year in np.array(race.season.unique()): rounds.append([year, list(race[race.season == year]['round'])]) rounds[:5]Resultsresults = {'season': [], 'round':[], 'circuit_id':[], 'driver': [], 'date_of_birth': [], 'nationality': [], 'constructor': [], 'grid': [], 'time': [], 'status': [], 'points': [], 'podium': [], 'url': []} for n in list(range(len(rounds))): for i in rounds[n][1]: url = 'http://ergast.com/api/f1/{}/{}/results.json' r = requests.get(url.format(rounds[n][0], i)) json = r.json() for item in json['MRData']['RaceTable']['Races'][0]['Results']: try: results['season'].append(int(json['MRData']['RaceTable']['Races'][0]['season'])) except: results['season'].append(None) try: results['round'].append(int(json['MRData']['RaceTable']['Races'][0]['round'])) except: results['round'].append(None) try: results['circuit_id'].append(json['MRData']['RaceTable']['Races'][0]['Circuit']['circuitId']) except: results['circuit_id'].append(None) try: results['driver'].append(item['Driver']['driverId']) except: results['driver'].append(None) try: results['date_of_birth'].append(item['Driver']['dateOfBirth']) except: results['date_of_birth'].append(None) try: results['nationality'].append(item['Driver']['nationality']) except: results['nationality'].append(None) try: results['constructor'].append(item['Constructor']['constructorId']) except: results['constructor'].append(None) try: results['grid'].append(int(item['grid'])) except: results['grid'].append(None) try: results['time'].append(int(item['Time']['millis'])) except: results['time'].append(None) try: results['status'].append(item['status']) except: results['status'].append(None) try: results['points'].append(int(item['points'])) except: results['points'].append(None) try: results['podium'].append(int(item['position'])) except: results['podium'].append(None) try: results['url'].append(json['MRData']['RaceTable']['Races'][0]['url']) except: results['url'].append(None) results = pd.DataFrame(results) print(results.shape) results.head() results.tail() results.to_csv('./data/results.csv', index = False)Driver Standingsdriver_standings = {'season': [], 'round':[], 'driver': [], 'driver_points': [], 'driver_wins': [], 'driver_standings_pos': []} for n in list(range(len(rounds))): for i in rounds[n][1]: url = 'https://ergast.com/api/f1/{}/{}/driverStandings.json' r = requests.get(url.format(rounds[n][0], i)) json = r.json() for item in json['MRData']['StandingsTable']['StandingsLists'][0]['DriverStandings']: try: driver_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season'])) except: driver_standings['season'].append(None) try: driver_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round'])) except: driver_standings['round'].append(None) try: driver_standings['driver'].append(item['Driver']['driverId']) except: driver_standings['driver'].append(None) try: driver_standings['driver_points'].append(int(item['points'])) except: driver_standings['driver_points'].append(None) try: driver_standings['driver_wins'].append(int(item['wins'])) except: driver_standings['driver_wins'].append(None) try: driver_standings['driver_standings_pos'].append(int(item['position'])) except: driver_standings['driver_standings_pos'].append(None) driver_standings = pd.DataFrame(driver_standings) print(driver_standings.shape) driver_standings = lookup(driver_standings, 'driver', 'driver_points') driver_standings = lookup(driver_standings, 'driver', 'driver_wins') driver_standings = lookup(driver_standings, 'driver', 'driver_standings_pos') driver_standings.head() driver_standings.tail() driver_standings.to_csv('./data/driver_standings.csv', index = False)Constructor Standingsconstructor_rounds = rounds[8:] constructor_standings = {'season': [], 'round':[], 'constructor': [], 'constructor_points': [], 'constructor_wins': [], 'constructor_standings_pos': []} for n in list(range(len(constructor_rounds))): for i in constructor_rounds[n][1]: url = 'https://ergast.com/api/f1/{}/{}/constructorStandings.json' r = requests.get(url.format(constructor_rounds[n][0], i)) json = r.json() for item in json['MRData']['StandingsTable']['StandingsLists'][0]['ConstructorStandings']: try: constructor_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season'])) except: constructor_standings['season'].append(None) try: constructor_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round'])) except: constructor_standings['round'].append(None) try: constructor_standings['constructor'].append(item['Constructor']['constructorId']) except: constructor_standings['constructor'].append(None) try: constructor_standings['constructor_points'].append(int(item['points'])) except: constructor_standings['constructor_points'].append(None) try: constructor_standings['constructor_wins'].append(int(item['wins'])) except: constructor_standings['constructor_wins'].append(None) try: constructor_standings['constructor_standings_pos'].append(int(item['position'])) except: constructor_standings['constructor_standings_pos'].append(None) constructor_standings = pd.DataFrame(constructor_standings) print(constructor_standings.shape) constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_points') constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_wins') constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_standings_pos') constructor_standings.head() constructor_standings.tail() constructor_standings.to_csv('./data/constructor_standings.csv', index = False)Tool Asta Fantacalcio Per prima cosa occorre scaricare i due dataset che verranno utilizzati:- quotazioni- votazioni''' quotazioni: https://www.fantacalcio.it/quotazioni-fantacalcio votazioni: https://www.fantacalcio.it/statistiche-serie-a/2021-22/fantacalcio/medie ''' import pandas as pd import os quotazioni_df = pd.read_excel("data/input/Quotazioni_Fantacalcio.xlsx", header = 1, engine='openpyxl') quotazioni_df.head() quotazioni_df.columns def prune(dataset): temp = pd.DataFrame() temp['Nome'] = dataset['Nome'] temp['Pg'] = dataset['Pg'] temp['Mf'] = dataset['Mf'] return tempRimuovo colonne non utilizzatequotazioni_df = quotazioni_df.drop(['Id', 'Diff.'], axis=1) stat_df1 = pd.read_excel("data/input/Statistiche_Fantacalcio_2021-22.xlsx", header = 1, engine='openpyxl') stat_df2 = pd.read_excel("data/input/Statistiche_Fantacalcio_2020-21.xlsx", header = 1, engine='openpyxl') stat_df3 = pd.read_excel("data/input/Statistiche_Fantacalcio_2019-20.xlsx", header = 1, engine='openpyxl') stat_df1.head(2) stat_df2.head(2) stat_df3.head(2) calciatori_oggi = list(stat_df1['Nome'] + '/' + stat_df1['Squadra']) calciatori_old = list(stat_df2['Nome'] + '/' + stat_df2['Squadra']) calciatori_old_dict = dict(zip(stat_df2['Nome'].to_list(), stat_df2['Squadra'].to_list())) giveatry = [i.split('/')[0] for i in calciatori_oggi if i not in calciatori_old] list(stat_df1.columns.values) stat_df1 = prune(stat_df1) stat_df2 = prune(stat_df2) stat_df3 = prune(stat_df3) df_q = pd.DataFrame(quotazioni_df) df_s1 = pd.DataFrame(stat_df1) df_s2 = pd.DataFrame(stat_df2) df_s3 = pd.DataFrame(stat_df3) print(df_q.columns) print(df_s1.columns) print(df_s2.columns) print(df_s3.columns) df = df_q.merge(df_s1, on='Nome').merge(df_s2, on='Nome').merge(df_s3, on='Nome') df.head() media_giocatori = []; for index, row in df.iterrows(): if row.Pg_x > 0 or row.Pg_y > 0: media_w_pg = (row.Pg_x/38 * row.Mf_x)*0.20 + (row.Pg_y/38 * row.Mf_y)*0.80 media_giocatori.append(media_w_pg) else: media_giocatori.append(0) df['mediaGiocatori'] = media_giocatori media = [] for index, row in df.iterrows(): if row.mediaGiocatori > 0: media.append(row.mediaGiocatori / row["Qt. I"]) else: media.append(0) df['media'] = media giocateMax = 0 for index, row in df.iterrows(): if row.Pg > giocateMax: giocateMax = row.Pg print("In questa stagione sono state disputate", giocateMax ,"partite") if giocateMax == 0: giocateMax = 1 # se campionato non iniziato evito divisione per zero media_giocatori = []; for index, row in df.iterrows(): if row.Pg > 0: media_w_pg = (row.Pg_x/38 * row.Mf)*0.20 + (row.Pg_y/38 * row.Mf)*0.40 + (row.Pg/giocateMax * row.Mf)*0.40 media_giocatori.append(media_w_pg) else: media_giocatori.append(0) df['mediaGiocatori_today'] = media_giocatori media = [] for index, row in df.iterrows(): if row.mediaGiocatori_today > 0: media.append(row.mediaGiocatori / row["Qt. I"]) else: media.append(0) df['media_today'] = media prob = [] for index, row in df.iterrows(): if int(row.Pg_y) <= int(row.Pg_x): if round(int(row.Pg_y)/38*100-int(row.Pg_x+1)/int(row.Pg_y+1)) < 0 : prob.append(round(-1/2*(int(row.Pg_y)/38*100-(int(row.Pg_x+1)/int(row.Pg_y+1))))) else: prob.append(round(int(row.Pg_y)/38*100-(int(row.Pg_x+1)/int(row.Pg_y+1)))) else: if round(int(row.Pg_y)/38*100+(int(row.Pg_y)/int(row.Pg_x+1))) > 100: prob.append(99.5) else: prob.append(round(int(row.Pg_y)/38*100+(int(row.Pg_y)/int(row.Pg_x+1)))) df['Probabile_titolarità_futura'] = prob # (occhio se il giocatore ha cambiato squadra) prob = [] for index, row in df.iterrows(): if row.Nome in giveatry: prob.append('Y - ex '+calciatori_old_dict[row.Nome]) else: prob.append('N') df['Squadra_nuova'] = prob total = [] for index, row in df.iterrows(): if row.mediaGiocatori > 0: total.append(row.mediaGiocatori * row.media * row['Mf_y']) else: total.append(0) df['Convenienza_inizio_campionato'] = total # (considera solo le due annate precedenti concluse) total = [] for index, row in df.iterrows(): if row.mediaGiocatori > 0: total.append(row.mediaGiocatori_today * row.media_today * row['Mf']) else: total.append(0) df['Convenienza_today'] = total if giocateMax < 2: result = df.sort_values(by='Convenienza_inizio_campionato', ascending=False) else: result = df.sort_values(by='Convenienza_today', ascending=False) result = result.drop(['mediaGiocatori', 'media', 'mediaGiocatori_today', 'media_today'], axis=1)outputdf_P = result[result['R'] == "P"] df_P.to_excel('data/output/dataset_P.xlsx') df_D = result[result['R'] == "D"] df_D.to_excel('data/output/dataset_D.xlsx') df_C = result[result['R'] == "C"] df_C.to_excel('data/output/dataset_C.xlsx') df_A = result[result['R'] == "A"] df_A.to_excel('data/output/dataset_A.xlsx')Lists and Loops [demonstration]The code below shows some ways of using python **lists** (known as **arrays** in many languages). Note how these lists are "sliced" from index i to j using `my_list[i:j]` notation.# How to print a list structure my_list = [1, 2, 3, "a", "b", "c"] print("my_list is:", my_list) # Prints each element of a list individually print("Looping through a list...") for item in my_list: print("item is", item) # Prints the number of elements in a list print("The len function is important!") num_elements = len(my_list) print("my_list has", num_elements, "elements") # Could also be done with out the intermediate variable print("The len function is important!") print("my_list has", len(my_list), "elements") # Using range to loop through a list by index print("A less great way to loop through a list...") for index in range(len(my_list)): item = my_list[index] # accessing an element in a list! print("item is", item) # Looping through a partial list print("Slicing a list from beginning...") for item in my_list[:3]: print("item is", item) # Looping through a partial list again print("Slicing a list to the end...") for item in my_list[3:]: print("item is", item) # Looping through a partial list again print("Slicing a list in the middle...") for item in my_list[2:4]: print("item is", item) print("Enumerating a list...") for i, item in enumerate(my_list): print("item number", i, "is", item) print("Another way to enumerate using a list 'method'...") for item in my_list: index = my_list.index(item) print("item", item, "has index", index)Another way to enumerate using a list 'method'... item 1 has index 0 item 2 has index 1 item 3 has index 2 item a has index 3 item b has index 4 item c has index 5[03] 람다 표현식(익명 함수(lambda function))# 두 수의 합을 return 해주는 add() def add(a, b): return a + b print('add() :', add(1, 2)) # Lambda를 이용해 add() 구현, 보통 한줄함수 표현에 사용 add = lambda a, b: a + b print(add(1,2)) # 제곱을 표현하는 func1() def func1(x): return x**2 print(func1(10)) # Lambda를 이용해 func1() 구현 func2 = lambda x: x**2 print(func2(9)) # 조건문이 포함된 func3() def func3(x): if x < 5: return x**3 else: return x**2 print(func3(10)) # 100 print(func3(4)) # 64 # Lambda를 이용해 func3() 구현, 3항연산자 사용 # x가 true(x**3) 조건 그외 false(X**2) func3 = lambda x: x**3 if x < 5 else x**2 print(func3(10)) # 100 print(func3(4)) # 64 # 요소간 결합 a = [1, -2, 3, -4, 5] # list b = [9, 8, -7, -6, -5] for i in range(len(a)): # 0 ~ 4 print(a[i], b[i]) # list에 list append시 2차원[[]] data=[] a = [1, -2, 3, -4, 5] b = [9, 8, -7, -6, -5] for i in range(len(a)): # 0 ~ 4 data.append([a[i], b[i]]) # append로 list+ list = 2차원 [[]] print(data) # Lambda로 list + list 표현 data=[] a = [1, -2, 3, -4, 5] b = [9, 8, -7, -6, -5] # Lambda로 만드는 과정 # data2 = [a[i], b[i] for i in range(len(a))] # Error # data2 = [(a[i], b[i]) for i in range(len(a))] # list안에 tuple 추가 # data2 = [lambda x, y: [x, y] (a[i], b[i]) for i in range(len(a))] data2 = [(lambda x, y: [x, y]) (a[i], b[i]) for i in range(len(a))] print(data2) # [(1, 9), (-2, 8), (3, -7), (-4, -6), (5, -5)] # zip() 함수 사용 : 순차적으로 요소 결합 a = [1, -2, 3, -4, 5] b = [9, 8, -7, -6, -5] for x, y in zip(a, b): print(x, y) # Lamada와 동일 결과 a = [1, -2, 3, -4, 5] b = [9, 8, -7, -6, -5] data = [x**2 + y**2 for x, y in zip(a, b)] print(data) # [82, 68, 58, 52, 50] # Lamada로 표현 a = [1, -2, 3, -4, 5] b = [9, 8, -7, -6, -5] data2 = [(lambda x, y: x**2 + y**2)(x, y) for x, y in zip(a, b)] print(data2) # [82, 68, 58, 52, 50][82, 68, 58, 52, 50] [82, 68, 58, 52, 50]Environment Setup Guide to work with Qiskit Textbook This is a comprehensive guide for setting up your environment on your personal computer for working with Qiskit Textbook. This will help you reproduce the results as you see them on the textbook website. The Qiskit Textbook is written in [Jupyter notebooks](https://jupyter.org/install). Notebooks and [the website](https://qiskit.org/textbook/preface.html) are the only media in which the Textbook is fully supported. Installing the qiskit_textbook PackageThe Qiskit Textbook provides some tools and widgets specific to the Textbook. This is not part of Qiskit and is available through the `qiskit_textbook` package. The quickest way to install this with [Pip](http://pypi.org/project/pip/) and [Git](http://git-scm.com/) is through the command:```codepip install git+https://github.com/qiskit-community/qiskit-textbook.gitsubdirectory=qiskit-textbook-src```Alternatively, you can download the folder [qiskit-textbook-src](https://github.com/qiskit-community/qiskit-textbook) from the Github and run:```codepip install ./qiskit-textbook-src```from the directory that contains this folder. Steps to reproduce exact prerendered output as given in qiskit textbook (Optional) 1. Setting up default drawer to MatPlotLibThe default backend for QuantumCircuit.draw() or qiskit.visualization.circuit_drawer() is the text backend. However, depending on your local environment you may want to change these defaults to something better suited for your use case. This is done with the user config file. By default the user config file should be located in ~/.qiskit/ and is the settings.conf file.Qiskit Textbook uses default circuit drawer as MatPlotLib. To reproduce visualizations as given in qiskit textbook create a settings.conf file (usually found in ~/.qiskit/) with contents: ```code[default]circuit_drawer = mpl``` 2. Setting up default image type to svgOptionally, you can add the following line of code to the ipython_kernel_config.py file (usually found in ~/.ipython/profile_default/) to set the default image format from PNG to the more scaleable SVG format: ```codec.InlineBackend.figure_format = 'svg'``` 3. Installing the LaTeX parserTe get a rendering similar to the Qiskit Textbook, optionally install the pylatexenc library.You can do this with [Pip](http://pypi.org/project/pip/) and [Git](http://git-scm.com/) through the command : pip install pylatexenc 4. Syncing with the Qiskit versions used in the TextbookYou will find a code snippet at the end of the most tutorials which will contain the information on which versions of qiskit packages are used in the tutorial. If you find inconsistency in syntax and/or outputs, try to use the same version.To check the version installed in your computer, run the following in Python shell or Jupyter Notebook:import qiskit qiskit.__qiskit_version__Shapley Value> Calculate the exact Shapley Values for an individual $x$ in a game based on a reference $r$ and the reward function $fc$. Theory Shapley Value definitionIn Collaborative Game Theory, Shapley Values ([Shapley,1953]) can distribute a reward among players in a fairly way according to their contribution to the win in a cooperative game. We note $\mathcal{M}$ a set of $d$ players. Moreover, $v : P(\mathcal{M}) \rightarrow R_v$ a reward function such that $v(\emptyset) = 0$. The range $R_v$ can be $\Re$ or a subset of $\Re$. $P(\mathcal{M})$ is a family of sets over $\mathcal{M}$. If $S \subset \mathcal{M}\text{, } v(S)$ is the amount of wealth produced by coalition $S$ when they cooperate.The Shapley Value of a player $j$ is a fair share of the global wealth $v(\mathcal{M})$ produced by all players together:$$\phi_j(\mathcal{M},v) = \sum_{S \subset \mathcal{M}\backslash \{j\}}\frac{(d -|S| - 1)!|S|!}{d!}\left(v(S\cup \{j\}) - v(S)\right),$$with $|S| = \text{cardinal}(S)$, i.e. the number of players in coalition $S$. Shapley Values as contrastive local attribute importance in Machine LearningLet be $X^*\subset\Re^d$ a dataset of individuals where a Machine Learning model $f$ is trained and/or tested and $d$ the dimension of $X^*$. $d>1$ else we do not need to compute Shapley Value. We consider the attribute importance of an individual $\mathbf{x^*} = \{x_1^*, \dots, x_d^*\} \in X^*$ according to a given reference $\mathbf{r} = \{r_1, \dots, r_d\}\in X^*$. We're looking for $\boldsymbol{\phi}=(\phi_j)_{j\in\{1, \dots, d\}}\in \Re^d$ such that:$$ \sum_{j=1}^{d} \phi_j = f(\mathbf{x^*}) - f(\mathbf{r}), $$ where $\phi_j$ is the attribute contribution of feature indexed $j$. We loosely identify each feature by its column number. Here the set of players $\mathcal{M}=\{1, \dots, d\}$ is the feature set.In Machine Learning, a common choice for the reward is $ v(S) = \mathbb{E}[f(X) | X_S = \mathbf{x_S^*}]$, where $\mathbf{x_S^*}=(x_j^*)_{j\in S}$ and $X_S$ the element of $X$ for the coalition $S$. For any $S\subset\mathcal{M}$, let's define $ z(\mathbf{x^*},\mathbf{r},S)$ such that $z(\mathbf{x^*},\mathbf{r},\emptyset) = \mathbf{r}$, \ $z(\mathbf{x^*},\mathbf{r},\mathcal{M}) = \mathbf{x^*}$ and$$ z(\mathbf{x^*},\mathbf{r},S) = (z_1,\dots, z_d) \text{ with } z_i = x_i^* \text{ if } i \in S \text{ and } r_i \text{ otherwise }$$ As explain in [Merrick,2019], each reference $\textbf{r}$ sets a single-game with $ v(S) = f(z(\mathbf{x^*},\mathbf{r},S)) - f(\mathbf{r}) $, $v(\emptyset) = 0 $ and $v(\mathcal{M}) = f(\mathbf{x^*}) - f(\mathbf{r}) $.Furthermore, we can extend the previous result by using several references well chosen. In that case, the final Shapley Values obtained are simply the average of those calculated on each reference independantly. But, in order to accelerate the estimation, we modify the algorithm to take into account this situation. References[Shapley,1953] _A value for n-person games_. . In Contributions to the Theory of Games, 2.28 (1953), pp. 307 - 317.[Merrick,2019] _The Explanation Game: Explaining Machine Learning Models with Cooperative Game Theory_. , , 2019. Function __Parameters__* `x`: pandas Series. The instance $\mathbf{x^*}$ for which we want to calculate Shapley value of each attribute,* `fc`: python function. The reward function $v$,* `ref`: pandas Series or pandas DataFrame. Either one or several references $\mathbf{r}$. The Shapley values (attribute importance) is a contrastive explanation according to these individual(s).__Returns__* `Φ`: pandas Series. Shapley values of each attribute#export def ShapleyValues(x, fc, ref): """ Calculate the exact Shapley Values for an individual x in a game based on a reference r and the reward function fc. """ # Get general information feature_names = list(x.index) d = len(feature_names) # dimension set_features = set(feature_names) # Store Shapley Values in a pandas Series Φ = pd.Series(np.zeros(d), index=feature_names) # Individual reference or dataset of references def output_single_ref(coalition, feature_names): z = np.array([x[col] if col in coalition else ref.loc[col] for col in feature_names]) return fc(z) def output_several_ref(coalition, feature_names): rewards = [] idxs = np.random.choice(ref.index, size=len(ref), replace=False) for idx in idxs: z = np.array([x[col] if col in coalition else ref.loc[idx, col] for col in feature_names]) rewards.append(fc(z)) return np.mean(rewards) if isinstance(ref, pd.core.series.Series): individual_ref = True output = output_single_ref elif isinstance(ref, pd.core.frame.DataFrame): if ref.shape[0] == 1: ref = ref.iloc[0] individual_ref = True output = output_single_ref else: individual_ref = False output = output_several_ref # Start computation (number of coalitions: 2**d - 1) for cardinal_S in tqdm(range(0, d)): # weight ω = factorial(cardinal_S) * (factorial(d - cardinal_S - 1)) ω /= factorial(d) # iter over all combinations of size cardinal_S for S in combinations(feature_names, cardinal_S): S = list(S) f_S = output(S, feature_names) # Consider only features outside of S features_out_S = set_features - set(S) for j in features_out_S: S_union_j = S + [j] f_S_union_j = output(S_union_j, feature_names) # Update Shapley value of attribute i Φ[j] += ω * (f_S_union_j - f_S) return ΦExample We use a simulated dataset from the book _Elements of Statistical Learning_ ([hastie,2009], the Radial example). $X_1, \dots , X_{d}$ are standard independent Gaussian. The model is determined by:$$ Y = \prod_{j=1}^{d} \rho(X_j), $$where $\rho\text{: } t \rightarrow \sqrt{(0.5 \pi)} \exp(- t^2 /2)$. The regression function $f_{regr}$ is deterministic and simply defined by $f_r\text{: } \textbf{x} \rightarrow \prod_{j=1}^{d} \phi(x_j)$. For a reference $\mathbf{r^*}$ and a target $\mathbf{x^*}$, we define the reward function $v_r^{\mathbf{r^*}, \mathbf{x^*}}$ such as for each coalition $S$, $v_r^{\mathbf{r^*}, \mathbf{x^*}}(S) = f_{regr}(\mathbf{z}(\mathbf{x^*}, \mathbf{r^*}, S)) - f_{regr}(\mathbf{r^*}).$ [hastie,2009] _The Elements of Statistical Learning: Data Mining, Inference, and Prediction, Second Edition_. Hastie, Trevor and and . Springer Series in Statistics, 2009.d, n_samples = 5, 100 mu = np.zeros(d) Sigma = np.zeros((d,d)) np.fill_diagonal(Sigma, [1] * d) X = np.random.multivariate_normal(mean=mu, cov=Sigma, size=n_samples) X = pd.DataFrame(X, columns=['x'+str(i) for i in range(1, d+1)]) def fc(x): phi_x = np.sqrt(.5 * np.pi) * np.exp(-0.5 * x ** 2) return np.prod(phi_x) y = np.zeros(len(X)) for i in range(len(X)): y[i] = fc(X.values[i]) n = 2**d - 2 print("dimension = {0} ; nb of coalitions = {1}".format(str(d), str(n)))dimension = 5 ; nb of coalitions = 30Pick an individual x to explainx = X.iloc[np.random.choice(len(X), size=1)[0],:] xSingle referencereference = X.iloc[np.random.choice(len(X), size=1)[0],:] reference true_shap = ShapleyValues(x=x, fc=fc, ref=reference) true_shapSeveral referencesreferences = X.iloc[np.random.choice(len(X), size=10, replace=False),:] references true_shaps = ShapleyValues(x=x, fc=fc, ref=references) true_shapsTestsx_pred = fc(x.values) reference_pred = fc(reference.values) fcs = [] for r in references.values: fcs.append(fc(r)) references_pred = np.mean(fcs) assert np.abs(true_shap.sum() - (x_pred - reference_pred)) <= 1e-10 assert np.abs(true_shaps.sum() - (x_pred - references_pred)) <= 1e-10Export-#hide from nbdev.export import notebook2script notebook2script()Converted index.ipynb. Converted inspector.ipynb. Converted monte_carlo_shapley.ipynb. Converted plots.ipynb. Converted sgd_shapley.ipynb. Converted shapley_values.ipynb.Authorizationfrom pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) from google.colab import drive drive.mount('/content/drive')Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly Enter your authorization code: ·········· Mounted at /content/driveLibrariesimport os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline from scipy.stats import pearsonr/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tmHelper Functions# Correlation every week def corr_week(weeks,df_metric,df_outcome,left_on,right_on,directory_out): for w in weeks: df_metric_temp = df_metric.loc[df_metric['week']==w] df_metric_temp.sort_values(by=right_on,inplace=True) df_metric_temp.reset_index(inplace=True,drop=True) df_outcome_temp = df_outcome.loc[df_outcome['week']==w] df_outcome_temp.sort_values(by=left_on,inplace=True) df_outcome_temp.reset_index(inplace=True,drop=True) df_corr = df_outcome_temp.merge(df_metric_temp, left_on=left_on, right_on=right_on) drop = list(set(left_on + right_on)) df_corr = df_corr.drop(drop,axis=1) df_corr.dropna(axis=1,how="all",inplace=True) df_corr.dropna(axis=0,how="any",inplace=True) df_coeff = pd.DataFrame(index=df_corr.columns, columns=df_corr.columns) df_pvalue = pd.DataFrame(index=df_corr.columns, columns=df_corr.columns) for i in df_corr.columns: for j in df_corr.columns: corrtest = pearsonr(df_corr[i], df_corr[j]) df_coeff.loc[i,j] = corrtest[0] df_pvalue.loc[i,j] = corrtest[1] df_coeff.to_csv('{}/df_coeff_{}.csv'.format(directory_out,w)) df_pvalue.to_csv('{}/df_pvalue_{}.csv'.format(directory_out,w)) # Determine Signficant Correlations def sign_corr(alphas,weeks,outcome,directory_out): for a in alphas: df_significant = pd.DataFrame(columns=['week','metric','outcome','correlation','pvalue']) for w in weeks: df_coeff = pd.read_csv('{}/df_coeff_{}.csv'.format(directory_out,w),index_col=0) df_pvalue = pd.read_csv('{}/df_pvalue_{}.csv'.format(directory_out,w),index_col=0) metric = list(df_pvalue.columns) if df_pvalue.empty == True: print(w) else: for i in outcome: metric.remove(i) for i in metric: for j in outcome: if i <= j: continue elif df_pvalue.loc[i,j] < a: df_significant = df_significant.append({'week':w,'metric':i,'outcome':j, 'correlation':df_coeff.loc[i,j],'pvalue':df_pvalue.loc[i,j]} ,ignore_index=True) df_significant.to_csv('{}/df_significant_{}.csv'.format(directory_out,a))Data Preparations# read df_metric file_in = './drive/Shared drives/2018-Makerspace-Personalization/Analyses/CS205-Parallel Computing/Data/Processed Data/df_weekly.csv' df_metric = pd.read_csv(file_in,index_col=0) # reduce to three types (individual, students, instructors) old_types = ['Individual', 'Instructor', 'Instructors', 'Mixed', 'Student', 'Students'] new_types = ['Individual', 'Instructors', 'Instructors', 'Instructors', 'Students', 'Students'] type_dict = dict(zip(old_types, new_types)) df_metric['type'] = df_metric['type'].map(type_dict) df_metric = df_metric.groupby(['name','identity','week','type'])['time'].sum() df_metric = df_metric.reset_index() # subset students and drop identity column df_metric = df_metric.loc[df_metric['identity']=='Student'] df_metric.drop('identity',axis=1,inplace=True) # create individual_time, instructor_time, student_time df_metric['individual_time'] = 0 df_metric['instructor_time'] = 0 df_metric['student_time'] = 0 for i, row in df_metric.iterrows(): if df_metric.at[i,'type'] == 'Individual': df_metric.at[i,'individual_time'] = df_metric.at[i,'time'] if df_metric.at[i,'type'] == 'Instructors': df_metric.at[i,'instructor_time'] = df_metric.at[i,'time'] if df_metric.at[i,'type'] == 'Students': df_metric.at[i,'student_time'] = df_metric.at[i,'time'] df_metric = df_metric.groupby(['name','week'])['individual_time', 'instructor_time', 'student_time'].sum() df_metric = df_metric.reset_index() df_metric # read df_outcome file_in = './drive/Shared drives/2018-Makerspace-Personalization/Analyses/CS205-Parallel Computing/Data/Survey Data/df_survey.csv' df_outcome = pd.read_csv(file_in,index_col=0) df_outcome.columns = map(str.lower, df_outcome.columns) df_outcomeCalculate Correlation# Correlations between social interaction and survey directory_out = './drive/Shared drives/2018-Makerspace-Personalization/Analyses/CS205-Parallel Computing/Analysis/Social Interaction' weeks = list(set(df_outcome['week'])) alphas = [0.05,0.01,0.001] outcome = list(df_outcome.columns) outcome.remove('name') outcome.remove('week') left_on = ['name','week'] right_on = ['name','week'] corr_week(weeks,df_metric,df_outcome,left_on,right_on,directory_out) sign_corr(alphas,weeks,outcome,directory_out)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy after removing the cwd from sys.path. /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copyVisualizations# read data directory_out = './drive/Shared drives/2018-Makerspace-Personalization/Analyses/CS205-Parallel Computing/Analysis/Social Interaction' w = 3 corr = pd.read_csv('{}/df_coeff_{}.csv'.format(directory_out,w),index_col=0) # Correlation Matrix f = plt.figure(figsize=(15, 15)) plt.matshow(corr, fignum=f.number) plt.xticks(range(corr.shape[1]), corr.columns, rotation='vertical') plt.yticks(range(corr.shape[1]), corr.columns) cb = plt.colorbar() cb.ax.tick_params(labelsize=14) # Heatmap sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns) plt.show() # Scatterplots x = 'student_time' y = 'makerspace' f,ax = plt.subplots(figsize=(10, 10)) ax = sns.scatterplot(x=x, y=y, data=corr) plt.xlabel(x) plt.ylabel(y) plt.show()Matching results from analytical solutions In [Schuss et al.](https://www.pnas.org/content/104/41/16098) they show that for D = 400, a = 0.1 and v = 1, the mean escape time is 0.00625 s.We can use this model to test if that is a resonable result%matplotlib notebook import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from PyEscape.escape_plan import escape from PyEscape.escape_points import fibonacci_spheres from PyEscape.escape_utility import sphere_vol_to_r, calculate_delta from tqdm import tqdm D = 400 v = 1 a = 0.1 n_pores = 1 pores = fibonacci_spheres(n_pores, v) N = 2 results = [] for i in tqdm(range(N)): results.append(escape(D, v, a, pores, dt=1e-8)) escape(D,v,a,pores, dt=1e-7, with_exit_loc=True)We can see from the above figure, that for our parameters we can accurately, and quickly arrive at a viable solution to Schuss et al. Visualising escape pathsvalues from PyEscape.escape_drawing import draw_sphere from PyEscape.escape_points import random_points_on_ellipsoid np.random.seed(1) ABC=np.array([0.62,.62,.62]) pores = random_points_on_ellipsoid(ABC) values = escape(D, v,a, pores, with_path=True, dt=1e-7, shape='ellipsoid', ABC=ABC) eo=1000 x = np.zeros(len(values[::eo,0])+1) y = np.zeros(len(values[::eo,0])+1) z = np.zeros(len(values[::eo,0])+1) x[:-1] = values[::eo,0] y[:-1] = values[::eo,1] z[:-1] = values[::eo,2] x[-1] = values[-1,0] y[-1] = values[-1,1] z[-1] = values[-1,2] len(x) import imageio import os fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') ax.set_box_aspect([1,1,1]) #ax.axis('off') #ax.legend() filenames = [] for i,_ in enumerate(x): # plot the line chart ax.clear() draw_sphere(v, ax) ax.plot3D(x[:i],y[:i],z[:i], c='b') for idx, p in enumerate(pores): ax.scatter(p[0], p[1], p[2], c='r', alpha=0.8, s=100, label='Escape Pore' if idx == 0 else "" ) # create file name and append it to a list filename = f'{i}.png' filenames.append(filename) # save frame fig.savefig(filename) # build gif with imageio.get_writer('mygif.gif', mode='I') as writer: for filename in filenames: image = imageio.imread(filename) writer.append_data(image) # Remove files for filename in set(filenames): os.remove(filename) from PyEscape.escape_drawing import draw_sphere from PyEscape.escape_points import fibonacci_spheres fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') ax.set_box_aspect([1,1,1]) np.random.seed(1) draw_sphere(1, ax) ABC=np.array([0.62,.62,.62]) pores = fibonacci_spheres(1000) px = pores[:,0] py = pores[:,1] pz = pores[:,2] ax.scatter(px, py,pz, c='b', s=1) for idx, p in enumerate(pores): ax.scatter(p[0], p[1], p[2], c='b', alpha=0.8, s=10, label='Escape Pore' if idx == 0 else "" ) ax.axis('off') fig.show() len(filenames) len(x) from PyEscape.escape_drawing import draw_sphere from PyEscape.escape_points import random_points_on_ellipsoid fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') np.random.seed(1) ABC=np.array([0.26467467, 0.33265048, 2.71150778]) pores = random_points_on_ellipsoid(ABC) phi = np.linspace(0,2*np.pi, 256).reshape(256, 1) # the angle of the projection in the xy-plane theta = np.linspace(0, np.pi, 256).reshape(-1, 256) # the angle from the polar axis, ie the polar angle radius = 4 # Transformation formulae for a spherical coordinate system. X = ABC[0]*np.sin(theta)*np.cos(phi) Y = ABC[1]*np.sin(theta)*np.sin(phi) Z = ABC[2]*np.cos(theta) values = escape(D, v,a, pores, with_path=True, dt=1e-7, shape='ellipsoid', ABC=ABC) eo=10 ax.plot(np.append(values[:,0][::eo],values[:,0][-1] ) , np.append(values[:,1][::eo], values[:,1][-1]), np.append(values[:,2][::eo],values[:,2][-1]), alpha=1, linewidth=0.5, label='Escape Path') max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min()) Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min()) Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min()) # Comment or uncomment following both lines to test the fake bounding box: for xb, yb, zb in zip(Xb, Yb, Zb): ax.plot([xb], [yb], [zb], 'w') #draw_sphere(v, ax) ax.plot_surface(X, Y, Z, color='b', alpha=0.2) for idx, p in enumerate(pores): ax.scatter(p[0], p[1], p[2], c='r', alpha=0.8, s=50, label='Escape Pore' if idx == 0 else "" ) ax.legend()Making clusters:def vol_ellipsoid(a,b,c): return 4/3*np.pi*a*b*c vol=1 ABC=np.array([1.,1., 9.93103448]) volN = vol_ellipsoid(*ABC) cbrt_diff = vol/np.cbrt(volN) np.array(ABC * cbrt_diff) from PyEscape.escape_points import make_clusters fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') clusters = make_clusters(50, nclusters=4) for c in clusters: x,y,z = c ax.scatter(x,y,z, s=0.1) draw_sphere(1, ax) indices npoints = 2 vec = np.random.randn(3, npoints) vec /= np.linalg.norm(vec, axis=0) print(vec)CNN Architectures 1D CNN are often use for sensor data, or for time series data. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAArwAAADSCAYAAACo2xNAAAAgAElEQVR4AexdB3gV1db1PX1P/6egiCAdFEGqIr0XAekQSCAQeu+9i9KLSEda6FVAVDoIEpAagoIkoEiRolIEaaGIdf3f2uFcJjcJhNyZa27Y8307M3cyc+bMOmdm1t5nn70fgy6KgCKgCCgCioAioAgoAopAEkbgsSR8b3prioAioAgoAoqAIqAIKAKKAJTwaidQBBQBRUARUAQUAUVAEUjSCCjhTdLNqzenCCgCioAioAgoAoqAIqCEV/tAokDgr7/+wtWrV3H58mX8+eefiaJOTlTi5s2buHTpEi5evOiSW7du4e+//3biclqmIqAIKAKKgCKgCADq0qC9IHEgcP36dYwaNQq9evXCzz//nDgq5UAtVqxYgWbNmqF+/foiDRo0wNq1a/Hbb785cDUtUhFQBBQBRUARUASIgFp4tR8kCgRIcsuXL49XX30VJ0+eTBR1cqISixYtgp+fH6pUqYJs2bLhscceE6J/+/ZtJy6nZSoCioAioAgoAoqAEl7tA4kFAQ7xV65cGXnz5sWpU6cSS7Vsr8evv/4KWrNp0R09ejRSpEiB999/H0p4bYdaC1QEFAFFQBFQBFwIqIXXBYVu/JMIeEJ46f/6xx9/iO/vw/jC8lj6DvPc+53H/9Gv+EHHET8ey+Mo7ov7NSZPnoyUKVMq4XUHSn8rAoqAIqAIKAI2I6CE12ZAtbiEIRAX4SVJvHLlCr799lt8//334AQv68KJbocOHcLmzZsREhIix924ccN6iGyTgPJ8Crd5DLf37Nkj5x4+fNhVNi2wR44ckUllvD7dLfbu3YvPP/8c4eHhYqF1vwCPu3btGr755hts2rRJ6sIyIyMj4yTTkyZNUsLrDqT+VgQUAUVAEVAEHEBACa8DoGqRD49AbISX1teffvoJAwcORI4cOdC+fXshqab0c+fOYciQIciSJQuSJ08ukjNnTkyZMkUiPpjjuCZppctEtWrV8N1332HGjBkoXbo0nn/+efGjLVCgAMLCwuQUTiKjL/H48eNx7NgxuS5dD/71r38hbdq0mDhxYgzSy/rTRSF79uzipvDss8/ipZdewoQJE+Ta7tZdXkgJr7WFdFsRUAQUAUVAEXAOASW8zmGrJT8EAu6ElwSRFtiePXsiffr0aNy4MY4ePeoqkcf37t0bmTJlQpMmTcDJYHPnzpWJb3QTmDNnTjS3ApLjokWLgoS4a9euKFasGFq0aIERI0agcOHCIrT2cmEkhRdeeEGuGRQUJMf27dsXQ4cORZ48eZAqVSqsX7/eZbmlZXfkyJFSF0ZdWLhwIRYsWIAaNWogTZo0mDZtGui76056lfC6mlM3FAFFQBFQBBQBRxFQwusovFp4fBGwEl4SXboUtG3bFhkzZkSHDh2iWXZ///13zJs3D+nSpUPr1q2jhTFbuXIlMmfOjKpVq+L06dOuy5PwlixZUqy0JL7z588XEsq4uLxO2bJlXRbejz/+WIgqrcaMpkBXCbPQisv97733nssF4sMPP5R6BgYGRrtmaGgoXn75ZbEqM/KEEl6Doq4VAUVAEVAEFAHvIqCE17t469XiQMAQ3ty5c2PdunVo2LAhXnzxRXTr1g1nzpyJdhaTU9C9gZbfDz74QEjmiRMnZL1lyxYUL15c3BwOHDjgOo+Et0SJEkiWLJm4JJioCPSxpQX23XffFfcFklISXlp46UaxceNGVxncWLx4MTJkyIB+/fqJfy73sS7//e9/XS4QJOysD69fpEgR5MuXD7t371bCGw1J/aEIKAKKgCKgCHgPASW83sNar3QfBEh4aU2lj2z16tXx+OOPgxbTH374IcZZ3FexYkU899xzKFSoECpUqOCScuXKieWX5ezcudN1Lgkv3Rh4PAmpu7XVHMhoDCS89NkdMGAAaE22LsuWLRPXhR49egjhPX/+vLgu0L+XZbNepj6sC+tBf2ASZ/drqkuDFVndVgQUAUVAEVAEnENACa9z2GrJD4GAIby0njZt2lSsu3RL2LdvXzRfXBZJ6+kbb7yB1KlTo169eujcuTM6derkki5dughZZWQHsxgfXlp/SVLjWgzhpR8w/Xu5WImqIbzdu3eXSA90vShTpgyeeuqpWOvCug0aNEiiO1jLYblKeONqBd2vCCgCioAioAjYi4ASXnvx1NISiIBxaSCR3b9/vxBWElpaexk9wRrX9scff0SlSpVA94ft27ff94qGZBoLLwkvt+NaYiO81mOthJfhy1jv2rVri6vEtm3brIc+cFsJ7wMh0gMUAUVAEVAEFAFbEFDCawuMWoinCBjCy0xrdFng7z59+oil19/fH19//bUkf+B1GEN32LBhYuF9++23Y4QgoxvCpUuXcOfOHVe1nCC8jAHMhREa6AJBf2MrmSbZZkY1xhHWKA2uptANRUARUAQUAUXA6wgo4fU65HrB2BCwEl6TWpjkkREaGCuXoceOHz/uOpXJJhhTlzF4x40bByZ5oKsD16tXrxY3AuvxThJe+gQHBARIpAaSX5Jz1oV1ZF0Yz5fbxtpsboIWXt4bXTB27dol7huM5kCSrIsioAgoAoqAIqAI2IeAEl77sNSSPECAhJc+u4xoYAgviyNppZ8uEzlwotiFCxdcV6E7A/1nOXmNiSNq1aolUREYm5cTx5hgwiwkvIzSUKpUqWhWWPN/s6ZLwyeffCLWY5JX92X58uWSUILxgY2Fl8fQDYNuFqxnwYIF4efnJ9dj6DTWiyTYnfAytTCjQVCyZs0qhJmxhZkkQxdFQBFQBBQBRUARsA8BJbz2YakleYAA4+EyWQPDjNEFwLrQakvyydi3nCRmXb788ksJKcYwZkz6QIvw1KlTERERES3CAsOPzZw5U4TbcS3M7mau98UXX8Q4jOWOHTsWGzZsgAltZojswYMHxdWiUaNGqF+/voQrI6nlflptzXGmUCa64MQ4umVQSHaZ9OJ+9TPn6loRUAQUAUVAEVAE4o+AEt74Y6VHOoSAOxHkZbgvtv1x/Y8TyGhxJWG1LnGVE1fZ1nPdt2M7J7Z9zLzGulgn2sWnLOsxsZVr/b9uKwKKgCKgCCgCikD8EVDCG3+s9MhEiEBcxDCu/U7dwv2ud7//OVUfLVcRUAQUAUVAEVAE7iGghPceFrqlCCgCioAioAgoAoqAIpAEEVDCmwQbVW9JEVAEFAFFQBFQBBQBReAeAkp472GhW4qAIqAIKAKKgCKgCCgCSRABJbxJsFH1lhQBRUARUAQUAUVAEVAE7iGghPceFrqlCCgCioAioAgoAoqAIpAEEVDCmwQbVW9JEVAEFAFFQBFQBBQBReAeAkp472GhW48YAgwXxsxqTCDx+++/xxn39xGDRW9XEVAEFAFFQBFIcggo4U1yTao3FF8ESHjPnj2LD5cskexqd+7cie+pepwioAgoAoqAIqAI+BACSnh9qLG0qvYiQOvugQMH0LZ1G6xauRJXr1y19wJamiKgCCgCioAioAgkCgSU8CaKZtBK/BMIkOCuXbMWlSq+hfFjxuHUyVP/RDX0moqAIqAIKAKKgCLgMAJKeB0GWItPXAjQjcGk+j154ntMGDceJYoURaf27bH/q68SV2W1NoqAIqAIKAKKgCJgCwJKeG2BUQvxFQSshHdvaCgCavohc/oMyJ41KzasX+8rt6H1VAQUAUVAEVAEFIGHQEAJ70OApYf6NgLGsvvX33/h2tVrmDkjGKUKF8XCefPxep7XMGLYMPz0448SucG371RrrwgoAoqAIqAIKAJWBJTwWtHQ7UcCgevXrmHtqjVo37othgwchGtXr2LWjJloHNQQU6dMwYULFx4JHPQmFQFFQBFQBBSBRwUBJbyPSkvrfQoCDD329f4DqOdfF107d0bY3r0Sg/fEiRMYN2YsChcogI3rN+D69euKmCKgCCgCioAioAgkEQSU8CaRhvynb8P4xibW9V9//YWffvoJq1euxNv9+qF54yb4fPNm3Lp1S6D7448/sP+r/RjQ7220b9se8+fNw4njx8HzEus9mXr9022v11cEFAFFQBFQBBI7Akp4E3sL+Uj9SL6YrYxZy27evPmPS2RkJK5cuYLz58/j5PffIyI8AtOmTEGDuoFo27q1TFDj/7n8/XcUyLdu3sK+sDD07d0bftWqY+Tw4QgLC8PxY8clQcXlXy6L5Zf3d+sfvkcS9d9++80VccJHuolWUxFQBBQBRUAR+EcQUML7j8Du+xclwaV7AIf+f/nlFyGWR44cEcK4a8cO7N6585+RXbuwc8cObA0JwepPV2Lm9Ono37cvmjdpipbNmmP8mLGIiIgQYs57sC7mnk6ePIk5s2ahVfPmcl73Ll0xeeJEfLJiBT7f/Dl2bt+OPbt2/TP3t3MniC8jTBwKP4SffvwJly5dkkl4VDZokdZFEVAEFAFFQBFQBKIjoIQ3Oh76K54I/PH77zLkv+zDD9GnVy8E1a+PyhUr4s3SZVDhn5QyZVChTDlUKl8BftVqICigHnp374Hg6dNx8OuvcfXqVbGMmts0pNesuZ/3duNGJI58ewRLFi0RN4cmDYJQu0ZNVClfERXLlEUFXucfu8+yKF+qtNxnPf8A8UUmQWfWOJJeXRQBRUARUAQUAUUgOgJKeKPjob8egACJIaMY0P91zOjRaNGkKdq1ao2JEybgk48/wfp167B+3XqsX7s+as1tb8radVi/fj02bNiAzzdtwhfbtklCiTOnT0cjug+4Tfn3n3/9ifPnziP8YDh2bN8u97xxw0a5x3W8jjfvy1zL4Lp+PVatWoUZ06ajR+euaNKgIQb074/Vq1bhhx9+0NBq8WlgPUYRUAQUAUXgkUFACe8j09Se3yjJLofP58yajRKFC6NGtWoYP3Ycdu/ahV8uXfL8Al4owWrJvd/l4nvc/crwxv8ir0fi6wMHMCt4loRVy5EmE94f9R7OnDkjPtXeqINeQxFQBBQBRUARSOwIKOFN7C2USOr3559/4uLFixg5YjhKFi6CUSNG4vTp04i8cUN8efl/XyGJiQRSj6tBvOmz+9tvd3Dzxk1RRoKnTUfxfPnRr3dvnDp1SkmvxyhrAYqAIqAIKAJJAQElvEmhFb1wDz9fuACSqdLFSmD0e6MlxBdJri6JCwFOIJwVHIzi+Qtg8LsDcer06cRVQa2NIqAIKAKKgCLwDyCghPcfAN2XLmmsiHRbqFC6LN4bMUosh9yvFt3E1ZKmPeh2Mm/uXKR/+mmsXrUav97+VaM3JK6m0tooAoqAIqAIeBkBJbxeBtzXLkcrLkNfBU8PRokiRXH06DFXMgZfu5ekXt+/cU8JIeltFNQQ/Xr1xtcHvlbXhqTe+Hp/ioAioAgoAvdFQAnvfeHRfzID2caNG9GuZWsMevdd8RNVVBIpApawwky8MW/OXDRt2AhzZ8/GnV9/TaSV1mopAoqAIqAIKALOI6CE13mMffoKf/z5JyZNnIBGgfXx+abNuHHzhk/fz6NS+d/u3MG3h7+RrHHdOncGs8jpoggoAoqAIqAIPKoIKOF9VFs+nvfNdMG9evREj27dILFsf/89nmfqYf8kAn//9Rfu3P4VU6dMQfnSZcQy//df91we/sm66bUVAUVAEVAEFAFvI6CE19uI+9j1bt2+jS6dO2PM++9HZfGyDJv72K08UtU1zfTRRx+hWuUqOH70KH7/7XedaPhI9QK9WUVAEVAEFAGDgBJeg4SuY0Xg1s2bWLH8I2zZ/LmLLJloALGeoDsTFQJfffkVli5ZirM//gT6Y2vbJarm0cooAoqAIqAIeAkBJbxeAtpXL0OXhh9/+BE/n78gt6CEybda8srlK/jxxx9x+9Ztja7hW02ntVUEFAFFQBGwEQElvDaCmZSKMsSWa2bz+uvvv5LS7T0y90K/Xbafac9H5sb1RhUBRUARUAQUAQsCSngtYOhmdATcSZL77+hH66/EhkBs7RXbvsRWb62PIqAIKAKKgCJgNwJKeO1GVMtTBBQBRUARUAQUAUVAEUhUCCjhTVTNoZVRBBQBRUARUAQUAUVAEbAbASW8diMaV3l/Q/woxR+WPrEqioH2Ae0D2ge0D2gfsK0PqMtWXARE9xMBJbxe6Ad8CBkD9ebNm7h+/bqKYqB9QPuA9gHtA9oHbOwDN27cwJ07d4Q8e+GzrpfwQQSU8Hqh0X69cwdf7fsSn6z4GHPnzkXwjBmYMX26rLmdUJkxYwZmz5qFhQsXYsmSJZg1c6Yt5Zr6zJ41G0uWLMbCBQswd84czLFDZs+WshYvXgzKgvnz7Sn3bt0Ei8VLsGjRIsHarjrPnzdP6kucpc6zZ3te79lzpI7El+Wy7nPsKHfOHMF43ty5UmdiMTM4GDOmJ7yvmT7BPse+y7JZZ65nBs8E95tjErpm/WbNmoXFixZL+7HteC1Pyzb3vWDBAixm+y1YgODgYM/rSyxmzBCsFy1cKFiz/nY922yzefPm4cMlSzB3zlxMnz7N4zq72iY4WDBmvfnesAuP6dOnY/78+dI3+MxIv/Owb0T1uRnyvPGdwf7M54Tt6nHfmDEDM2fOlD4h/XnePPAe7CiXWPO9uXjRIhDn4BnBHpfrar8ZM+72jQ/vYuH594Rls+/OmjkLSxYvwcIFC+V5lP02tCH7WNT7eTHYN+wql3WePXs2li1dim1btyIyMtILX3W9hC8ioITXC63GB3DShAl4KW1aJE+eHLVr1ULdgAAE1PFPuPhHnfvWm+XxzNNP47HHHkPVSpXRILA+/GvXSXi5dfxR199f6le4UCEp94VnU6BwgYIoVrgwihUu4pGULFZcymJ9KWlTpUbRQoVQvEhRj8otUbQYihcpgieffFLKfeLxx/Fajlzg9TytM8vOmD69lMs6Z0qdBqVLlvS4XNbtjddeR7J//1fKTpEsOUoUK+ZxuYJlocLIn/d1KZf9o1b1GqjrH4CAu/3Gk74X1KABXkqdRsrO9Uo21KxWzeP+7F/HH4F166FM8eL4192+UaxQYdQLCJD+yP8npM58Flhu7Vp+SP1sCqlz+jRpUbtmLSk7oeWyLsSzbh1/FCsY9Zw88fgTqFC2HOrXq5egulrvj89gzerVkTZNFM6F8xdAwwYNPC5X6h0QIP0h2f/9Dy+keB6V36ok7wzpHwnF+S4eDQIDkfHFqDqnT5dO7qFeQF2P610/sB4K5H1d+sa///1vlCxWTNrVitnDbvvXZp+rK+/NF55PKX0jW5aXpf2IRUL7Bs/jPdf280PRgoWl3BTPPouaNWp43OfMPbJfZ38pq5RdXJ6TurY82/XrBeLNMmXx+L8fxxOPPYbKFSpK23naN/gc87uXOuULUudXX8kmzyTxT+j3ijizXnzeXn05q/SNxvXq4cKFqJjxXvi06yV8DAElvF5oMA61zJg2Dc0bN8b8uXNx6dIlGcq6evUqPJWj332HMe+9B3+/2jh9+jRu37rlcZms0/Vr1/D111/LC2/s2LHYuXMnQkNDRfbs2YOEypdffomdu3aiXZs2qB8QgGVLl2H37t3Yu3dvgstkXcLCwhAWthej33sP1StXxuCBA/H5559j37592BMa6lHZLGPFRx+hQ5s2qOtXB8uXLcf+AwdswSJkSwg++GAKqlepipHDh4P4EOeE4svzQu9iuTVkK7p07IxmjZvg4s8/29Lnrl27JimmN2/ejML53sBnGzfip59+wvVr1z3ud1QMjx49iratWuOd/m8jdM8eqTOv6clzwufv4qWLmD1zFpoGNcSs4GBcvHjRYzyuXY+6Z9Zz0Lvvor5/XRw9egw3bt70qL68V94zcV2/dh1ez5lLnr9ff/3V43Ll2b5+HT///DNaNGmKXt174NTp07hy5Ypc0xOc6a5169YtsXRXLFMW69aswVn2jbs4JbRsYnHz5i15TzQOCpK+sf+r/WC7eto32Of43gyeNh2Btf3x8YqPXa5nCa2vwfiXX36RZ7lbpy7o2qmTYO4pFqZOl69cxtp165Dp+VQIj4hApIcYm3Lpdnf8+DEUy19ArL18Hvk/T3Hms3Lp4kW8N3wkWjZthlUrV8p3kPibaydkzffOjcgbCAkJEYzf7tsX58+f98JXXS/hiwgo4fVCq0WS8E6fjrYtWuLjjz7Cb7/9Zs9V//5biMyUyZPRqEGQvJTsKTiqlFMnT4pWPn3aNISHh+Pbb78V+eabb5BQOX78OCIiItCtS1c0a9wYa9euxeHDh3HkyJEEl8m6fPfddzjy3XeYPGkS/GvVxuiRo4RE84XNeie0vjyPZWzYsAHdu3RB4/oNsH7dOnz//fe2lEsyPWf2HLEUTpwwAceOHfO4XLnfw9+AaYV79+yNju3a486vv9rZNYSMlileArt37ZIsbnYVTiLWrXMXDB8yVNrMrnL/+vMvLF3yIdq0aCUuArY9gwC+OXwYo0aMQPPGTWy1Lt2+fRvbt20DLd0R4eF2QSHlkDx37dIFQwcPtv29sWrVKtSoXAVfhGzDr7ft63eHDx1Cm5YtMWLIMHx//IRteERej8TC+QvQpEFDbPrsM9vK5dyNb7/5Fv1698GAfv1EUbStcABffLEdeXLkFMJuZ7kXLpxHhTJlsXD+fPm+2FU2n7kPJk5Cx7btEbJli62+tl/u+xK9e/QQZUgJr10tlvTKUcLrhTYl4Q2ePh2tmzXH8qXLwI+NHcuff/6Jc+fOYfLEiQgKrI/Lly/bUayU8Tf+xonjJ+BXoyamTpmCAwcO4NChQyIkrAkVEltajrt27oymDRti9apV8jEn6U1omTzPEFqSxto1amLk8BHYs3u3kEfW25OySSDXrV0rFoSG9QKxZs0aIaYel3vkCPaG7hWfuQC/2hg/dpwQf0/LNeeH7Q1Dzx490b5tW7H829Y5AOzatQulihXHzu3bccNGnzn25y4dO2HooEE4FBFhW5U5aXTJosVo1awFFi9aaNszyGgr7Fsjhg1Dk4aNcPbsWVvqTLJEC+bWLVtQtEAheWZsKfhuIbTEdunUCYMHDpL3Bu/DruXTTz5B9UqVEPL5FrkHu2bOk/S3btECQwcNwbGjR22pLut29coVzJ8zD40CG2DD+g22lMtC/vjjDxyKOIQ+PXuhX58+Yjm2o3DWme9+juDkzv4qTp48KcXahfPZc2dRvlRpGY08f+6cbcSU371J4yegfZu22LxpE5i23q6F79Ge3brjnf791cJrF6hJsBwlvF5oVF8kvITlxAkS3hqxEl6SqocVEgMX4e10j/DSemwI78OWacgdCS/LsBJeukqQrCakTOs5DyK81mPju22woCvHrOCZiEZ4I6IIenzLsh7Hcvk7IjwCYaFh6NndQcJbvDh2+Arh/d1CeBfaTHjDw4XwNrWb8EZG3iW8BZ0hvB1JeAc6Rni3bLaX8IYfPBhFeAcPkVEXO17dJIl055g/Z+5dwrvejmKlDMcJ79bohNeuip89e+4u4Z0HuwnvRBLe1lGE185Rlr2hoUp47eoASbgcJbxeaFwlvPcsw9EIb1CUhZeEV0jaXbJmJXDx2SbJ82nCO3NWdMJ71yIdn3t3P0YI72ElvO6PNa1JLguv3YT3roU3ivCec790gn6TiImFN4QWXmcJ75XLl22z4vFmXRZeuwmvsfA6QniNhVcJr4vwznOI8N618NpLeNXCm6AXzSN2khJeLzS4El4lvLERU5L/KAuvG+H1GQtvMZ+y8DLcmbg02E141cIb7S1qCK/tFl4lvIKzy6VBLbyufqcWXhcUunEfBJTw3gccu/6lhFcJ730Jr1p4XY+aYz68PmvhDREL78Gvv3ZhZMeG+PDedWm4cvmKYxbemzduSIZJO+rMkSDx4VULb5QPr9OEVy28dnRbr5dBhYgTXhlZgyNb/K1LFAJKeL3QE5TwKuFNmoS3hE9ZeNWl4d7LTglvFBYkA1E+vOrSYHqHujQYJHxzTd9xJrjq2LEj9u/fr4TX0oxKeC1gOLWphFcJ7/0JbyyT1g5FTT5zP+9Bv707aU0Jr0RpcMqlgVEavODDy+gujkRp2LwF9lp4nZy0poTXfP9chHfuPJw7d9a2vsEoDTJpTX14DdSOrIlzQEAAXnzxRcl6yIgeukQhoITXCz1BCW8SJLzHj3k80c7lw6suDa6nUF0aoqCg5TEpTFpTwutgWDKnXRrm6qQ114vJhzZIcJkYaOLEiRIvXl0a7jWeEt57WDi2pYQ3CRLeY4mY8GpYshjPsqNRGsKj4vDaH5ZMLbzWhlQf3ig0SGAkDq8SXlf30Di8LigStGH6FEd6kjJBVsKboO7xcCcp4VXC6+6KQNcDxyy8SnhjPKAkvBql4R4sxod3kC/F4XV00prG4TW9w+rS4Egc3kTq0kCyx/TGP/74I5gW2uoKQBLIERem++YxnrgAsSy+jy5duoQzZ86IFZbprbnNLHF37tyRpuCkM2tdrESUdeH/6I7EevJ/5vgffvgBFI6WcfKa9TzTxlxzP90feBxjzTNiEGPZsx6mXOvxSWFbCa8XWtFRwnvWmUxrhMXxxBMah/duWDJf9eHVxBP88EVEWBNP2BiH18cTT4TYHYfX0cQT6sNrPoVWwksy5Am5M2Vy7fLhTaSJJ0hC58+fj9KlS6NLly5CRE39Wffg4GBUqlQJM2fOdJFS8/+HWTP+8Pbt29GqVSvkzp0bmTJlQrZs2ZA3b14EBgZKVlOWt2zZMrz55pvo0KEDTp065SKuPH/27NkoU6YMpk6dKkScpHfKlCkoWrQoChUqhDfeeAM1a9aUjJhxtR+V3o8//hgVKlRAhgwZkDlzZpE8efJgwIABtmWNfBhsnD5WCa/TCANwlPA6lFqYsCjhjbJMO59pLXocXlp/I3xi0ppvEV7HLLw+THidsPB+cje1sO1xeB0lvGrhNZ/CR5HwGisov3l16tTBM888gxEjRog1l7iEhIQIIS1fvrwkOeLx5hyDW3zWPOeLL76Qsp5//nkh10FBQahevTr4m6SXZJjHXbx4UQhwsvCvcnoAACAASURBVGTJMGTIELHgcv/WrVvluFKlSklac17XRGZgOSTlJLBZs2bFunXr4lRYdu7ciZdffhlp0qSRSW4DBw4Ucs1y27dv70pZHZ/78pVjlPB6oaWU8KpLw0O5NDDxhCeEVzOtxXiqHfXhjUZ4z8a4dkJ28MMmk9a2RGVaczIOr91RGgzhtd3C66hLgzcsvLcS0hVinMO+4S0f3nNnHbDwtmmDzZs2IbFlWiOuXHbt2iWWUlpeP/zwQwntRRKZK1curF+/PhqB5Dm0oMYlpkzTiPRDp2WWFt1Zs2ZFw6BRo0ZimSXhNVZZhhUrUaIE0qdPL6HGaAzx8/NDjhw5sGHDBinW/Rrc2bNnT7Eer1271lWWqYNZDx06FMmTJ0f//v2juW/w/5GRkdHqZs7x9bUSXi+0oKOEV10aRMv16dTCwerSYB5DJ6M0OGbhdSosmY+6NDhGeNXCK48JCY7XCK8jLg1OEN5Q9OzWHe/07y9+sOZ98rBrQx4/+ugjvPrqq8ifPz/Kli0rQ/10GTD+teY4knZahd0NGvxNcktfYLPQn7Zr1654+umnMXr06GiEki4T9evXj0F4SXw//fRTIch0NahatSpeeeUVTJo0CTdv3pSiTV3MdVinzp07C0G/H+El4U6bNq24PtBqfOHCBfH5NeVw7V629X++uK2E1wut5ijhVZcG3ye8GpbM9RQ6SXi9k3jCGQvv1w5mWrPbwmtSC9PCq2HJNCyZebhdPryJdNIa62kIHkdXRo4cieeeew6PPfaYDPFzkpn1GG5zElvLli1RpEiRGPL6669jxYoVcg7/0DpbsmRJIaz79u1z7ec1YyO8pi4cnaJ7Bevy+OOPo0mTJi7/WnOMqzBAiHR8CC8nypFkp0uXTgh1w4YNsXDhQhw7dgz0742tbOt1fHFbCa8XWk0Jr7o0uFsAHI3S4DWXBk08QQsM23LEsGGICktm46Q1S+IJJ10aNLXwFcyf4w2XhiiLnKefHBIRr1h4H+HUwvSJ5aQxkkESXhJIQ3it7Udlcdq0aejXr5+4BtA9wAjdCvbs2eM6nFZUujKUK1cOR48elf2GVJLw1q1bF/ny5RMfXuPSYE5mXbJkySKElwSbRNuca44x6/haeHk+DQzjx48XNwu6TdD6XLx4cXHluH79uikyyayV8HqhKR0lvOrS4PsWXneXBoYVi7Aj09pe9OzeE+3btsXtW/b4D5rHhX5upYoX09TCThJei0vDAbXwIjzcyUxrOmnNPNvWSWuOhCVrnXhdGoiBUWIrVqwoE7/oQsAJZYyIQLcEs8RFOM3/zdoc99lnn4lrBN0Svv/+e/NvWXNidLFixVyT1qyEl6HCGEmBhJeEmW4IdGmIi5A+DOE1lSCZZ4QKf39/pEqVCqlTp8bKlStj+Paa4311rYTXCy3nKOFVl4YkQHijR2mgNdgewhvmMOEtjp3bt+NGZKRtT5FPujRESzxho4U38ga23p20pi4NEJ/I1i1aYOjgIS4Lmacdj2TkyhW18FpxdJ7wtk2Uk9YMBoxv27p1ayG7M2bMwPLly8UflpPWVq9e7fLjNcfHdx0aGioWXIYiCwsLk9PY/+hawOs9+eSTMtGMURxIePk/vg/btm0rURdIuHl9hhzjpLVVq1ZF8wM29YgP4WXZdNvgsdw2C10Zhg0bJlbtPn36xEmqzfG+tlbC64UWU8KrLg33dWkI9l3Cu0MJLyKiTVqzk/BGKuG1vJ8101oUGCQoXnFpcCy1sBMW3r22TFpjdAISPkZo6NWrl0w6ow/t9OnTZR8trSSrxP9hF7oh1KtXD//73//AEGDfffcdvvzyS3GXoG8vSSzDktESTMLLuowZM0aswnSPoBWWbT937lxkzJhRQprRZcJKWFknQ3izZ8+OOXPmuBJb0G3CLJx8R1cMWnFpbWbCC5bPOo4aNQpPPfWU+A2biXHmPF9fK+H1Qgs6SnjVpcH3Lbwz3aI0eGrhdfnwOu3S4FtxeB2btOYY4dXUwtbXsxLeKDS8SXhpYbQOr1vb42G3XZPWHHFp8JzwksQuWrRILLu1atUSQmruke4DtHjStYE+tLQCJ2RhmDNGf0iZMiUKFy4sBJdkl5FNWD5JL/116UNMa27OnDkligJdHgyxZaY3JobgJLYGDRrEcI8g4WU0iBQpUuC1114Tn2HG52UkI7OwLWrXro2XXnpJklswCUbv3r3B0GiMzctJeIzTm9QWJbxeaFFHCa+6NCQBwuurFl6dtEYy4I1Ja066NPjOpDUnfXh10pr5FD6qLg0My8XYtPRj3bZtm4FD1iSbtIQy6xlJJmPlJmShtXjevHmSHKJAgQKS5GLLli1ilaW1tXHjxlizZo34CjMyAzOvbdq0SQiw9XqMpNCtWzfUqFFDkmJYLc4ky3TFIGmvXLkyqlSpIvdE0mwWvrcYsowJJpjNjdnZWB9OWCPp5QQ7Euektijh9UKLKuFVl4ak6dKgk9ZiEl4bw5JZJq05SXg1LJk3fHh7u+KmevrJ8X0LrxM+vJ7F4TXWU2vbcF9s+80x9/ufOca6th5PVwG6D5Ccxncx55u1+3lx7bcex2Pcj6N7ww8//CDxhK1xg3me+7HWsnxxWwmvF1pNCa8S3vsSXrvj8NIlIjwCYaHOT1pTH15aeMMtYclsJLwSliwERQsUhBJenbRmPlUkIb7tw+sE4fXcpcHg+6isH0RmH/R/X8RJCa8XWk0JrxJeJbzxe9B8M0qDlfDaPWnNdwnvls1bZCa4XR9OZ314NSyZeUIdd2lIxIknDAa6TpoIKOH1Qrs6Snh10prv+/A6FodXLbzm8abvnGOT1pyy8IpLQxTh9dU4vAx9ZB/hVR9e9mfi6dMW3tZOWHg9c2kw7wldJ20ElPB6oX0dJbw6ac33Ca/dLg2uKA1OE16dtBbTh9dGC69mWov2dnbWwquT1gzYLgvvI5xpzWCh66SFgBJeL7SnEl51aXgol4aIQ4g45EGmNSW8MZ5q71l4nSG8Tvrw+k6UhnBo4gkvWniV8MZ4j+gO30ZACa8X2k8JrxLehyK8tsXhVQuvebydJbwRlklrSng//eQTVK9UCSGbt+CmrS4NSnjZn73m0qCE17w+dJ1EEFDC64WGVMKrhFcJb/weNJ+ctBahhNfaukp4o9BgyKlDEYfQp2cv9OvTx/fCkinhtXZr3U4CCCjh9UIjKuFVwntfwuuWWpiJDCju58TntzlPw5JFf7AdtfA6mWltyxYNS3a3KcMP6qQ1QuE1C+/cefCdTGs6aS36G09/xYaAEt7YULF5H2cqT5s6FS0aN8HihYvA1IAM9sz0fhRuU5jZxCr8SMcmtBxQeCxTHE6aMAEN6gXi4sWL8jLkRBojfDnGJvG5xRMnTsCvRg1MnTIFBw4ccBEwQ6riQ8Csx/C8I0eOSEzRrp06o2lQQ6xetQqcjMLjPCmXaRMPHz6MiRMmoHaNmhg5fAR2794NZpex1iEh2yxj3dq16NqpExrWC5RMOMx042mdicXevXsxy2cnrWlqYZ20FvNNohbeKEzUwhu9b7hSC2tYsujA6C+vIaCE1wtQ37p1CwP690ehAgVQL6AuBg4ciOHDh4OpA0eNGoX33nsPo0ePxpgxYzB27FiMGzcOEyZMwMSJEzF58mR88MEHmDJlCqZNm4bp06cjODgYM2fOFBk/fjyCGjRA0cJFMHXqVMkFznzgS5YsAfN2L126FMuXL8dHH32Ejz/+WHJ2f/rpp2Aaw1WrVokwZzeFKQ0pTDm4bt06zJ49G8WKFJF0iqwDr82UhUZYD2tdZs2aBQrPo8yZM0dk7ty5MMK6cX9AHX9UrlgRo0aOxPz586XeCxcudNV/8eLFoPA+zL3wfsw98b6Yc5zC+1uxYoVI75498WaZsujcsRMWzF8g98k85RTetxHr/ROH2DAwOGzcuFGwbta4MWpWrYaZM4OxJSQE3E/57LPPRJgCkrJ582aRzz//HBSmjjQSEhIiaRuZunHHjh1Yv349xo0dh+pVqmLo4MFC0rnfCPOZU3bt2uUSEvk9e/a4JDQ0FBSSZ0rYvjBZb9+2HV07d0Hrli1x+ZdfXAqWu5JllCqjSDHkUWwKk/VRYX1KFVfC6y3C6+SkNd/JtKY+vHwGvWbhdcyloQ02b9pka+ravaGaeML6ftbt2BFQwhs7LrbuvX37Njp36oQsmTLjtddeQ/ny5VGhQgVZM491uXLlULZsWZQpUwalSpVCyZIlRUqUKIFixYqJFC1aFIULFxYpWLCg5L1+4403kCdPHmRInx4pnnsOuXLlQt68eZE7d24R/s6ZMydy5MiBV199FdmyZcMrr7yCrFmzuuTll19GXJIxY0Y8/b//IWXKlEifPn28JF26dKDw+Li206ZNi2TJkuHpp59G6lSpwN/xEWt5GTJkAIV1zJQpEzJnzozMWbIgderUeOaZZ5AqVSpkyZJF7pf3TOH9Z8+eXbAgHsSF+FCIFXEjnsSQwrZ6/fXXkS9fPjkmXdq0SPVCKuTOlUtyj7MdmIOcwrYpUqSICNvKtBvbkMI2ZdtSSpcuLW3NdufvfK/nQ7o06ZA3Tx5UrFgxmrz11luSd93kRGde9KpVq6JatWoi1atXl3zqzKles2ZNyZ/u5+cHSs0aNeR+cubIgRbNm6NNmzZo27Yt2rVrJ8K88JSOHTuiU6dO6Ny5M7p06YKuXbtKnvbu3bujR48eIj179kSvXr3Qu3dv9O/fX/KtZ0yfHo0bNZL977zzDijvvvuuKHSDBg3C4MGDMWTIEMlPP2zYMJeSN3LkyGiK3vvvv+9S9nheqZIlUaVyZfF7pMJnVfqo1BnFj4qXUbiMokVlisoV89VTkVqwYAGoSHG7Q/sOKFe6DDq2by/7jLJEZZAKU2wKoVEEjfJDBWXDhg3RFB3WoUWzZqhYvoIoll988QW2bdsGrrdv3y7Ki1FajLJiFJSwsDDs27dP5KuvvsL+/ftlNIUEN3TPHsyeNQuv5cqNj1eskNEKjmRwxIGjA9999x2OHj0KjjZQOCJDOXnyJE6dOoXTp0+LMG0ohalMKRym5jGtWrQAFUSW8fPPP4NpRUl+r1y5IqNQ165dw/Xr10UiIyMliQSVd4oZnTIjUlSaqDBxUQtv1OdDLbzRP6Nq4Y2Oh/7yPgJKeL2AOV0aJk+cKMPh06ZMxfHjx+UDdObMGddHiR8nfqQo/BhRvv/+e5fwHCPmA8eP3u5du/DOgAFiIaR1jx8vfhCN8ANphvs55G+G9Ok+QKE7gZGDBw+KuwE/tnRhoLW3bOnS6Ne3r1hZaUmlkCjQwuouxhrLtbHQck2rLoXEg/8jGanrf8/Ca8gJ10aMRdhYiY3VmJZtEgySHVqcSX4MCaIVnB/xooUKoWGDIIwYPkIs5bSa03pOKzqt6bSqk3TRwk5LO8kYiRkJGgkXLfAkbiRwAwYMkDWJYpnSpVAg3xto2bIl+vXrB5JAEsJu3boJSSRZJGkkeSSJJJls3769kEySzdatW6NVq1ZyfosWLWTdsGFDIbjZsr4iRLhx48YICgpCgwYNUL9+fdSrVw9169ZFQEAA/P39UadOHdSuXTuK0NasKWSXpJcEmESYQlJcqVIllH+zPLK9kg0vvfSSiwyTFFuPJ5HmsSTWRgkzChiJuVHASNqLFy/uIvJUEJI984woDfnz5xcFrECBAuA2FTEqCVQWqDRQqEBQmaBS4a6IUQmhUCGhkvJs8uRIkSKFKE2sO/dRqaFQwTHKFxUgKkpp0qTBiy++KMoOFR4qOy+88EI04b7kyZLjqaeeQvLkyeVYnsNz3ZUto7DxOkaxsipXpi6iZGXOLIodFc5nnn5a6mgUSKtiaRQu3qNRvNyVLypgRgkjRtzOkjkz/u+pp/BK1qwuRcxdISPORjEj7sSfwrYwwrahgmaEv19MnVqw5HZsyhqVNrY5xaq4GYXcKG/sJ0bYd9jOKVOkwBv58okiz37lrrixv7IfWpU09m32cfZ39vvAwEB5Dvg88Lng8dleeUXK9a/jL88Pnyc+V0aZ4/PmrsDx+TTKG59ZKm19+vSRZ5jKG/fVrF4DeXPnRsOgIAwZOtSlqPH9EJuC5j4SN2nSpBgjcXw38X1S+a1KqFqliryvzLvOjGaZUSwzYmVVvsxInFG6OOpmFC4qXVQUs2TIiEWLF4uCxVEjq5LFURgqWO7Klbtixfc/vwX8Phz57gh2bN+BYgUL4b1Ro0Tp4nfHqkRReaIrHZWns2fPigJ14cIFcam7dOmSS3Gi6x6VJqMsUZka+/77aNuyFTasWyeKE5UCjijRau3JohZeT9B7dM5VwuuFttZJa/ZNWjMk3RB289v4004YNw5+1WtgxNBh2LFjpxB8EnlD5knkSeiN8LexqnHb/DYfBf6P59Jq1aldO9QPqCvWNl73yy+/FMsc12bbarGj5Y6/xc0gLEy2zW9+hHgNujh8MGkyalWvIX7HLCd0Tyj2hEa5LBiLoHFh4G/zIeO2+W0siMb1gS4Rmz/bjI7tO6BZ0yaiYF365Rfwg0RrHv29jfC3Vfjxssr58+flo8Y1hWXwI1ykQAGs/PRTHD92TD58/PhR+CE0H0Rumw8k91GMxdGsjeLH9Zf79qF5k6bo0a0bNn32mRxrlECrQkhl0KoY0rJpFERj6eSHmgogLaFsr/ffGw1/v9oYNWKEtBv7ANvXtB3biTgTU+JJDGmhJZFgO9Ethe4qdFsh4SD5oFL4weQP0CgoCGXLlMGc2XPEbYbExSiGdMMxih8tziQ9VOioxNEyTQXOqrzRfYgEigpah/btkTlDRiFsVNqosBlljYra0KFDRVEzShoVtLffflvIXN++fYXYuStmVMqowJE4kwA3a9ZMSCMVOSpizZs3R9OmTYVkNmrUKJoCZpQvd8XLKFEksiTbL6RMiQL5C6BsubIykmVGsKxKlFGgzOgVR0rclSYqSyTQFCoSzz77LF5I+YIoQVQYzIgVFSMqIEYpooLirhAZJej5558Xheq5554T5YejTU8++SSeeOIJPPXkkzJCxFEiCkehuOYxZk2FyQj3c5v1MmuWa8SUTUXL7OP1jVCx4wiaEe7ntqmr2abCZhQ5s83yHn/8cTmWypsRKnFGuM+q0BkFz4yW8X9WBY9KHo958r//RYrnUsi5RtmjsmlG1IzSZ9ZG+eNvblNJpZhto7jy/pInSybX4D6jHHJNBdEog1T2qPRRzGibUeaoxFlH1diPXsubV67XqX0HeUd54bOul/BBBJTweqHRlPDaR3iNhdp9LVbsb74BJ63VqVnL1klrtKRz0lo3TloLDMTaNWtkCNm9Dg/7m0SMJIvD1gG164BknfsOH7pniX/YMnn84W8OSzikfXv3oVePXmjfri1+vX3b1p5OUli6eAns3LEDt27etK1sEm1OaBw2eDC+OXzYtnJpSfpw8RK0bt4CixctlEmi9yvcOtHT6s9strmmZYpD+QcPfo3hQ4eicVBDIfQ8l9czYob7+dv4S5u1cQmwrs0k1su/XMamjRtRJH8B7AsLk/KMKwHXdJUyv7ltFeN6wH1mm+ubN2+KUGnp0LYdBvTrL3U2VjiORlmF+61idXEw22ZNax7LX7xoESqVL481q1aLAkRLnxHjLsHf3DZC619cQlcLHk8FpHHDhujfpy/C9obJubwPI0aBs66tihy3rYqc2aZSNGnCRATU8sOSRYuEMBnFzIy8UZmiIsXRNaNEcRSN7x0+c7SSGqWaShSVWSq1Hy75EI2CGqJxw0aiNFkVKbq9GEWKvv5UpDgfgPMCqEzRjYYKFecYcM4BLb50uzHKFEehMqRNi0kTJ8rcBlqNKUah4ugYFSozKkaliqNhHAkzShXniXC+COeNmDkkVJqyZnkJAf7+ojhRqXJXrDj6RcWKFnKOdtFiTpcnd+WKo14c8eJoF63wJYqXQK4cOWUUiooVFSwqXFSwmjRpEquSFdsoF122atWqJSMEdOdiudmzZUfnjh2V8N7vxfaI/08Jrxc6gKOE9+w5cZcICqwvHww7b4cveI3ScEjcQxyN0hA8EwF+tTF+7DjxzeQHlBbJhJBdOc9rmdaKYcf27bgRGWlbt7MzDq91mJQEc8mixWjVrAUWL1woRNGOSsuktfB7cXhZf7uWmzduYltIiIQlO/j113YVK+VQAaJiMWTQICGOVqw8vdCqlStRvVJlbN2yFbdv3fK0ONf5hw5FoE3Llhg+ZCiOHz/h2u/pBgn7wnkL0Lh+kIwqeFqeOZ8K0aHwQ+gtcXh7izJgVaS4bVWgYttmGRSjYHGbihMVJJLk3NlfxYnjx+WSRsEyipVZG+XKurYqWGabihbPIckvV7IUZgfPwpnTp0WR4v+McnW/tVXpct+m0jJm9Gi0adES69asEXcHo4SZtVHIrGurAmbdNooYz922dSs6d+iEAf36KeE1HVDXMRBQwhsDEvt3OEp4zynhJckzfso+GZbMLQ4vg9V7lFqYhDk8AmGcudy9J9q3bWsr8eAT4otRGhY7RXgjwi2Z1s7a8gIhGaIisVXj8LrwDA/37Ti8JGZ2LIYo09WGhPfUyZN2FOsq49y58yhfugzmz52H8+fOe+xfawomaaYlvUObtvh882Yh1+Z/nq737d2LXt26453+byvh9RTMJHy+El4vNK4SXmddGnye8LrH4fWU8HrNwqthycTC6/OZ1i6LBdGuV6FGaYhCktZSn860JoT3nG19g5bhieMnoL3G4bXrUdNyHhIBJbyxAMZhI/qBcXjSCH/zBZaQRQmvEl539wSSdN9PPFEiUbs0WJ9VR10aohFeGy28N25ga4jzmdauXL5iG6kh5kp4o3qezxNex+LwttU4vNaXk257DQElvLFAzckMnMVsQjYxrA4d6hPqn6eEVwmvEt5YHrRYdvEZ69KxE4YOGoRDERGxHJGwXd4jvPb48IpLgxLeaI3NiWGtW7TA0MFDZOJYtH8m8Adx5sS5+XPmoVFgA2xYvz6BJcU8TQlvdEzUwhsdD/3lfQSU8MaCOWcLM1YrZ5gyVA9D3zDcCmfqJmRRwquE16uE1+XDG6Y+vHcfWGcJr9WH10bCqz680V63Snij4CBJ5yik8eFleD47l7Nnz6F8qdJ3fXjVpcFObLWsfxYBJbyx4M8XipmVyhmrTA7AeIIJfbEo4VXCe1/C67NRGtSHV3x4LVEaGIfYjkUsvJE3dNKaBUwlvFFgKOG1dIq7m3tDQ9FTJq3110lrMeHRPXcRSFKElx8fWnL4QnjQYo7l2iw8z/1cBtqnO4MS3gOuMFkS+upQFIl1J3L3+238VhnwnyGRmgY1xOpVqySGJc/zpFzfjtLgFpaMk9Y8CUumFl7zSLvWvmvhjQpLxmfGzoURA+g6MmjgQAlnaH0Penod53x4fTtKA0Nt2bH4POFt3cYBH14lvKZvsX8wJBzD7dGtxp3TmOMexXWiI7xsHMbXY5pDBhpnYG5mO2Kgewb0Zhw+s/BYvqjpg8X/MfsRsxoxvSIzZjF4OId+rAsttwwczoxJPJZBvhnsn7EIY1uU8NbA1ClTBE9DZj0hY5yopYQ3itA6NmnNa4S3BHYm4ji81ufZe4TXRgsvfXi9EJbsymVfidKghJd9mt89r7g06KQ16yvEZ7b5rmOWRqaZJ3eyU5n1GRDiqGiiIrx8kEkwR4wYAaYRZNpFpk/k+n//+5/klieRNQsfepJVZnqhny2P45ppCplbnllgmP3HLHSaZ0pUprdkikOmNmTqReaMZwab2DqGEl4lvMyo5GjiCfewZB5auw9pWDLzyLvWzhLee4kn6P9ox8J3IZX7rVuctfAO9ikLr05aY9/yGuHVsGR2PMpeL4NGvVatWglfWr58eay8xuuVSiQXTDSElw8xLbWDBg0S4krthPnnaYllKkXm1ObEMea9N8tPP/2EwMBAyWPOyWXr1q2T/9PCy/SKzFVPsz4XfvCYmYZ53kuUKCEEd9++fVi6dCly584t5XNY3H1RwquE1+cIr9csvOrDSyU5IlriCRsJr/jwKuE172T14Y1CQgmv6RH31nuZZEd9eAUQvpOOHz8uo+KcgM/+oksUAomG8JKQknymTp0a/v7+0RzPaZmtWLEismTJ4iK8dEFgHvAnn3wSDRo0APOkWxc2Ms8zVltOJgkKCpLySYjNQm2IluDkyZNLbnCz36yV8Crh9TnC6zULr8bhjSK8Dlp4NQ6veRWLr7+GJfOihVddGlx9L6lv8D1Gf1/Dl5Lq/SYawkvCyslhdF2g765VK6GzP2PhWgkvLbelSpUSl4Rly5Y9sH3o40s3h+LFi+Orr77CiRMnRI4dO4Zp06bJdWvVqhWjHCW8SniV8MZ4LO6mFlbCq4Q3Zt9wbtKaujQQba9ZeB8xwksjGmPwk4vENaeHRjQaz+gqaeUoMZ+CuPfwPBr4fvnlF/zwww84deoUGA2K27w+jXBcyHE4ik0rLd03rdcjJ+L/OCpu/sc6cR+F85fIXTh5zXqetVbcz/vhcZwztW3bNlEqz5w54yrXenxS2E40hJcTouhaUKhQoWjWXYJMX7by5ctHI7zsGPS/ffXVV2VG+4MaY9OmTfi///s/pEuXTkhvyZIlYaRgwYLiFlGlSpUYxSR6wnv2HCZPnIigwPryYMS4AQ92UCnwq6GE13HC6x6WzLYoDXsdjsNbTDOt0aUh3Mk4vL7r0rBl8xZ5d8f1wX3YV1P4QScnrc3VxBN3G8Qah5eJYOyy+pFcSWrhRBqlgfOBaPQiHyD5c19ILOkTmz17dkyaNCnBuHBS/ooVK/DWW2/JHKX//Oc/YnBLliyZcJI9e/bIpRctWiSuljVq1BAiatqBfGjYsGEyX2ns2LHyjLFunPvEaFLkOJyjRPdN8h5znvv9XL16FZMnT5b7efzxx2WUmxyJc6GYeIvEN6ktiYbw0jc3TZo0qFatGi5duhQNZ6b15cQyCbW9yQAAIABJREFUWnhpneVCLYYNkyNHDknRGu2EWH5wstpjjz0mVuExY8ZIh2WnpbDRp0yZgk8++SRG+uBET3jPKeFl1AivhyU7fkzCtHkasWLv3r2YFTwLAX61MX7sOOnLnoZok/PDIxAWqoknzKvAJyetWRJPHHQoLJlvTVpzkvBqpjXzrFgJ73lHCK8TqYU9D0tGYjh+/Hg8/fTT6NatWwwDEi2/9evXF6Pczp07Ba6HVeZovZ07dy4yZsyIvHnzonfv3vjggw+ErJLLcB9dLlkuiW3fvn3FGNe8eXOxAvP8BQsWyIR75gegJZcLCS8TkTBh1tChQ1G4cGEhxGvXro2T8H766ad44YUXkD9/frz//vsyr4l1o4vnhAkTEpxZ1vSjxLhONISXLgfp06cX1wWrPy5N8mwEaixWlwZqJ2wo+vxSE3rQwvAc1Hxq1qwpHel+x1s7sRJetfA6buHVKA2ux1FTC0dBYT543ghLxiHTuKxAroZ5iA11aYgCy+dTCz9CURrMN5+hIuk+yUhPW7ZscbkD8P8klDlz5kTnzp0ldCpbmftJNuMS9+dqzZo1MirNSFF03bQuJLC0ylpDidHVgXOPnn32WQwcOFAm5nMUvFy5chIq1NTBWg63u3fvLiPm9yO8JkAAy6X13brQpYP3lNSWREN4OZzAhsyQIYM0JDsStRlGaeD+p556SqI0hIaGShvwf++9957sJ4ml/4vptKYTkiybDseOw8lwtCIzXq/Zz8J4PIcZSG7dFyW8SniV8Lo/FVAf3ruQ8D1CK/+IYcPQtGEj2BqWjFEadNKaq/NplIYoKMz3TVMLu7oG7IzSMG7cOKRKlQpdu3Z1WXlJAPv06SMkkiPBhgySKB48eBC0+O7atSuakLhSgTcLjXTNmjUT18qFCxe6yuD/WQ6tx+6El/8j5+EIN41+jFRF10/WgaNWXAzvkR+A+B+TlDOy1f0IL8tImzatzG1iTgK6MNA32FqedduU78vrREN4aWXo0qWLuB2wU7ADcTIaozOQ0L755ptioSUBNgs7U506dUDfF57DsGOM0xsWFob169dj1apVLmsuOyi1K8bp5eQ1+rbQUZtkhh2Vbg5vv/22Kdq1NoSXlmTG6mUCDA5Dx0aOXSe5bWhqYU0tbJJ2mDVJUpyJJzz14dUoDW5PYFRYwiWLFqNVsxZYvHBhDItGjBPiuSOK8Drkw8s4vEp4XS2hhDcKCq8RXpm0djaaccjVGAnYuOfD64RLg31hyfhepgWWLgbGwHby5Enxr6XfrXEjIASccEZu8swzz8QQ+sXOnDnThRTdNmm8o9sC+zIXQyjvR3h53IwZM5AyZUo88cQTYmG25hdwXeDuBsl5fAgvXUfbtWsnRsB///vfUje6MvDbRHcKUzf38n35d6IgvAZYxsWlqZ4+NLT00sWBZn7GlKOTNhNFMDavdWHna9y4sXQG+qMwYUWePHnER4axeTmL0SwkvfPnz5f/81hGeeDwBTs2hyro++K+0L2CnYJ14vWzZs0q14iPG4UpSwlvEiS8dvrw+qxLg0ZpcNTCq4TXvEJl7fOE95aPpRZ+xKI0sJMZLkJfWI4G04eWVk/O8eG3n6FQSU7NwpFhzg+aNWtWDJk+fbrMLTHH0iDHMmjEI6exLiSYNN6Rv1hdGngMLbkkznTpZBjWevXqiaHOOkptLSs+hNfcJ4+lLy8NhrQccwId3Tl4/4xGkdSWREF4DahsQL7U6MRNvxKa/Q3otMTS4ksLGRfTYNxmxAYmpxg5cqRYaTkRjeZ6airG7G+uwd/sUBy2YIY2+rEwQQU1udi0JrpFcNYkHcVZH64ZL5guGPFdlPAmQcJ7zMZJa0p4XY+S+vBGQcH3Gz+CauF1dQ35Nmgc3qhvH403jrs0PKKEl88e+QZDmDJiAwkhySjdDei+YBYrBzH7Ylub40h4SSZpZGM4VLOQ93DEmYY3GuushJf/++yzz/Daa6+JgY6RpOhuQR9dvitN2aYsruNDeHmc9Vyew5FxTl7jfdKFlCSe+5PSkmgIrxV8PsxWLcodcHOsWZv/k8zGFneOxxkxx/Iat27dcsW8M/utZVq3zf+t6wf93xyrhFcJr3FlMOuk4dLgW5nWFnvFpcEeqwjfLTck09oWFC1QEF9rlAYlvHc/KOwbXiG8Tk1acyQsmX0uDYSZPMJYeUkAObpLA5nJ3Brfb7/hAFwzwlSRIkXEUmvCnpHQ0tWhcuXK4q5Awsv/cT+vwehDzDrLEWj649LdwpBeWp3JYdyX+BBelk+XBnefXfYrEl26TvTo0QP0O05KS6IhvAQ1tk4U2z5rA9zv/7H9L7Z91vKc2FbCq4TXEF2zvi/hZWrgiAgZzTDHx3dtzovwUliyndu340ZkpG2PjVp4o6Dke8p7Fl6N0jB/joYlMw+xKyyZUxbe1onbh9fgwPlAtPAynCldLGmh9WQhwaTrAMlkmzZtwMhUjATBUKxly5aViWm05G7cuFEIL90xSTrpWsFwacaYx3lMzD/A+UjuE/BZP0N4s2XLJr6/JMm0KPN8s5Aos2z6BnMuk0lUQVcLhkqjawOveT/DoynLl9ZeIbx8eVvFABTbPvO/pLRWwquE152wRie8M6PH4fWZSWu+ZeH1uUlrPhqlge5k1StVQsjmLbhp4+QXn/fhvemMD+/3J0/a+rm0El4qoLQG2rGQPCXmxBPmHslLuNCaSx9eTj5jHFwzUd383xwfn7U5h2SWJJq+uEycxcgLJLt0qWTiCPrR0mWSIe0YOYGJJEiSmXfALCS09CXmJDZahmkFti78P8ks8xQwFKuZd0QCbxa2Re3atWWiHYkzrcicL8UJeHSZqFSpkst9w9TdnOvLa+cJ799Rlls+NEYIGEGkcF9SAjS2zqCENwkSXgcmrU3wucQTOmmN7y8qL46EJfNRH96VSnjlMxAjDq9OWrtLeH3DwssRlgEDBogLApU4uxb66JKQNmrUCO+++64rcRZJL31oOWeIYVc5wZ4kmO8Xw5HMmsm46HrAJBF0iaArglm4zShVnJ/E/9MVY/DgwZK4whzD9xYJ8NSpUyU6FmP9BgQECLFnuFfOUeK1zPXMeb6+dpzwCmBRCpMLq9iATGrAum4WgBLeJEh4E/OkNbpEeMmlYYe6NAjW9wivnT68kdDEE/fepOHhmmmNaPBb6dM+vG0SN+ElvlRW6G5A/126H5hkWJ7wFOu5bD8S6odZzPlm7X5uXPutx/EY9+P4m9Em6EJBom1d3I+1/s8Xtx0nvC5Q3Egv9yc1MF336rahhFcJ731dGoLdXBp8yIdXCe9fiAi3xuG1k/DeUMJreZeqS0MUGPxu+jThTYST1ogpCSijQjGJFdP70re2QIECEiXB0g2TzOaD+NeD/u+LQHiP8PoiOjbVWQmvEt6HIrz04T3kG5PWfInweidKw73MSp68PuQDHEkLb4jPRmnYsnmLkAi7Ppy+beHtLTPiPekT5lziaSW8jvnwzp0noa84/G3Hcs+H1wkLbyh6duuOd/r3d/naPkydiSkng7Vo0UKG9unXSt9Xhkh1t3o+TLl6bOJCQAmvF9pDCa8S3vsT3lkyac348NJni+J+Tnx+m/PUpSH6g82QhT45aW2LhiUzLRl+UF0aiIU74WUWMDsX16Q1x8KSJT7CS/wYhaFGjRpgNrWmTZtKLH8T2cAupc3OdtKyHh4BJbwPj9lDn6GEVwmvO1klMY0ztbC6NGDooEE4FBHx0M9aXCc4S3itk9bsdGm4Z+E96ENxeHXSWlQvjDFpzaEoDUp4gb2hnsfh5TuCocN++eWXuF4jut/HEVDC64UGVMKrhPehCK+6NDhCeB1zaXDMh9f5SWuDBg7E5cvOxOG136UhHJppzbsW3nNnnQhL5oSFN+GEl9bbuCy4ce33Am3QSziAgBJeB0B1L1IJrxLehyK8nlp4D2uUBvdn0HsWXht9eL0Ulswpwss4vJwIZBdp0ElrUb2aeFp9eH3OwpvIozS4vzv0d9JBQAmvF9qShHfGtGlo3aw5ln24VNIB8qXFyQDxER4bm/Cld+7sWUyeOBFBgfVdQzGxHeu+Lz63feLECfjVqIGpU6ZIzD5D2oyfqPkd37UZxmea1K6dOqNpUEOsXrVKUoayDE/KZfBtZoyZOGECateoiZHDR2D37t2SEz2+9YvrOOZVX7d2Lbp26oSG9QKxZs0ayVzjaZ0dc2nwGuHVOLx8ftlvHQlLlgQIryaeOIQ+PXuhX58+jk1ac4zwOpVpTQlvfD6/eowDCCjhdQBU9yKZr3rUiBGo8tZb6Nenr5C8zz77TMKdcGYoneUZ8y8kJARbt24VYVgUBqjesWMHdu7cKeSNAakZnHrv3r0ICwsTYTm9e/VC5bfewhfbtwt5pCXEkEeSQBI2kisGk6YcPXpUCBvTCFqFBNcq27ZuRcXy5TFkyBDJ423q7L7etGkT3IX3ZRXeI38zTzjPb9GsOQJq18bcOXPk/nnvBgNuGzF4WNcsw12IE2X40KGoWqkyBvTrj3Xr1mPXrl2Co8HSHGdwJbZW4fHuQryXLV2Kdq1bo27tOli69EN8tf8rCRDONjHtwraxCs+zimkzs2bgb97H1ClT4VejJkaPGgUqAwwk7i7Mw+4uTE1pFZZHYRncv3vXbnTv2h1tW7dG5PXrYhWiX6ERKkxG3BWvBylIxKhUcc20FpPw2mzh9eEoDWrh/QOHInyY8Do1aU0JrztF0N9eQkAJrxeA5kzPzh07InOmTGCu7AoVKshMUM4GpVSsWDFW4XHuUr58ednHNaVUyZLInj070rz4IkqXLi37mB7QXcqVKwd34TEsw6zNtUx9SpYsidSpUiFnzpwoXrw4+NtIqVKlYITXpZQpU0bSJJq19XqmPrwGtxnyJXPGjJI/nPvMNQ0mTG1ohOkTKVWqVJEUiIyPSKlevTpq1qwp4ufnJ6kSixQpjCyZMqFQwYKoWaMm6tatK1KvXj1QAgMDUb9+fTRo0ADMLkNhxhtK48aN0aRJE5mhy3SOFKaUbNWqlYSqKVSgAHLnzIl6deuiQ4cOaNu2Ldq1a4f27dvLb+7r2LEjOnXqhM6dO0sGm65du4LSrVs3dO/eXTLs9OzZE7169ZK0lfxfYL16eP2111Dbzw/vDBiAt99+W4RZft555x3JxjNw4EBQmD2HWXMoQ4cOxbBhw0SGDx8uWXlGjhyJUaNGYdTIURg2ZBiqVa0mSsvChQvx0UcfiaxYsQIff/yxzEL+9NNPsXLlSqxatQqrV68W6/XatWuxbt06ydbDXO1Mh0klxSg1IVu3YsKECcibKxcmjB+PtWvWiLJiFBZ35YTKm1HgqHgY5cNd0aBFntbzuv4BaN2yJRYvXixKnbvSsG/fPlCMUmAUAXfyT+JPOXjwoCgLo0aMQp1afqJ88hyrUkjFkKMEFCqIRkl0VxSNssjc9FQWuWZWo57du0vZVHhOnz4NWt1OnTolwt+UM2fOSIxPxvmkMH89hWlDKYwBSmE6V8qJ4yfw8YoVKPD66wjZskVGcBgAn1mWKJxgYybZcKINXRMoDCBPuXr1qsi1a9dAYapUCoPMs5z2bdri7X79pQ50Pbh165YI31cUhpGiMCwThSlL6RpihIqTUZi4Jvnn8qkl05q6NCjhNZ9YV1gyJbwGEl17GQElvF4AnBbeEcOH463y5dGjWzcsWbIEy5cvx7JlyyRvNnNmcx8/8IsWLQLJCYWpBSnz5s3DnDlzRGbPno2ZM2eKzJgxA++PHi1ErlCBghgzZgymTZsmsQMnT56MSZMmSc5tkhPKuHHjMHbsWBEeyzSGFKYSpJAokTBRmNKQ6Q9z5sghxJKkjmSOQmLH3xSSPiPMSNO6dWsRksSWLVuKMLYhheSRa5LK/G/kR64cOVCrZk0X2TTEkzm9DRklMaUYosq1Ia+GzJo1UyMWK1oUWTJnlnzlJMT+/v5ChJk3nKSYUqtWLRFDlrlmOBoKz6EYUm3WZUqXxqvZsonSQqJftVpVVK5U2UXKDVE3xN0oD1wb5YRE3ygBzJ/O3yzr9ddfR9o0aZArVy5RGIwCYRQKKhklSpQQoeJhpFixYqIwFC1aFEaKFCkCSuHChVGwQEFkzJhR8qmzLCoiFJZvyubalG/K5ZplmzLd1/wfc74neyaZrHktUxdrGe7b5h7M2ihPXJt6sO7p0qaVHPIM+m6wMGtzD7GtialViDUxNvjnzZMH6dOlx2t584qi6a5Esd3ZB9gf2EeMElWnTh3pR+xfpq8Z5Yn9kYoYFZaXsrwkx7F/uytO5hngM8Fng88Jnxd3pcmqMPH54rXTpH5R8txTUeIzaRSm3r17o0+fPqI4MX0o04hSWaKiRGHaUndFiaM1VJS4n89K+XJv4t133pFnnu+A0aNHi/D9QOH7gu+N8ePHyztk4sSJ8k7h+4UxSqdMmSLpSZmilO8evpuoBObNlRs9u/eQ/zMFKt9bfIfNnTtXhO80vtsWLFggwvcd3318B1L4PuR7cenSpSJU1viuKl+uHALr1sPkSZNFYTOKG1O/GuXNqsC5K3HuihyVOZ7bq0dPlC9bDiNHjJDRJaO8mVE3jsRYFTczUkSljaMdVNYoZrSHShq3P1yyFI0bNkLTJk1kJImKllHMzGgMFTKKGZkzihhdpowiZlXCOEpHRYyYvZLlJRklooLF0bnvv/9exChcRtEyypZVybIqWOfPn8eFCxdEgYqIOITSRYtJ+377zTe4dPFiDEXKKFBUnihUbPido1BxsipNRmHicePHjpWRss82bhRlyowscUTJk8WOKA2eXF/P9Q0ElPB6oZ2MD2+rpvTh/VBeCHzQrdaRhGzT0kLL0KQJE9CgXqBYffjiYFlm2NqTNa1ZtapXBz9utKaZF7OxnCVkzZc4y+rcoSMaN2gg1iB+AFi2tTwzPG9dmw8FPxrGumfcA2jx4/Z7I0eiRpWqGDjgHbFK0uLGjxI/UMa6yA8XP2C0RBrXCX7gjNsFXS9ozaRVkx9E/uZHu1njxqhRtSqoaGzavEmskfyg0jrKjyyFH11+QPkhptCayo+1UXCsSg63SQL69+uPcqXLoEunTvKhtyo7hiDwOJIGq8ITHBwsdZk+fTooJBwkHhSSkXFjxsKvph8qV6qE5cuWSf1M3VgviqmXIRZG8bIqXyQlRvFiffibBCt71lfQu09v6R/Wuph6kAxRWBcqX1YFjASKQkJlyBUJDYlaiWLFUKF8eXTt0kWIGK3XFBI1EjZj4aa1m6SOQks4zzXWcdaP0rdvXyGFJInVq1XDa3nyoGqVKmJ1p8Wdlndjhe/SpYtY5mmhj025I5EjSTWKHYkriSyVqQL58+OVl7MisF6gjAwY0stRAypyRokzChxHGUiWjQJHIk1CTQWNJJdC8l2yRAmkeO45lC1TRoi4UcqoiFWtWlXEjIBwRMQ6YmSULXdFi8pCiZIlkT5dOmTKmBFFixSNpkQZBYeKDKVQoUKiQBYsWFAyT+XPn19SrubLl0+UNSpsHLnKmzevSKZMmfDMM8+I4pkjZ05RiqggPUjy5Mkjx3DtLiybyQBSpEiB1KlTI3u27K7rmeveb23qx7VVeA7rlTFDRjz37HN4+eWXwftiSlneJ5Uu3jcxIBZUyIgPFTwqdFblzaqUGcWL52TOmEmwYDuwfawjV1SWjFJtFC6jkN9P4WJ/oUKXPFky6RcN7/ax2EaqjKHBGCCMsmUULmO8oCGDfZ/HZ0iXXkYSW7dq7RqhorLlrnDx+aK4K1xmZIrPphmV4r6qlaugcMFC8ozwWaZhxYxKWRUuvgtiU7houKHSxXeJUbr4vundqzfeqlBRkk+QvOuiCMSGgBLe2FCxeZ9GafA8SoPV6hHb5DIzHM1Ja3Vq1sIombS256EnrdGi4i60pqxftw7dOndCw8D6MoHt+InjMY5zPy+236ae/B8VChL1ObPnIKB2HUwcP16GyM0xCV1/e+RbfHP4G3y17yv07tkbHdq3k+FoO7v13tBQlClREnt278adX3+1rWgO0Xfv0gUjhg7Fd0eO2FYuFcGliz9E6+YtsGTRIhmi5z6jeBrFkEokLVIcfjVWKlqsaLmiFYtWKroFGDcBuhDQwjegf38EBtTFocOHxRpGlwFazPjxNS4KtKhRQTVWNlrdaIGj6wMtchRa6Gipo6sEEy0sXLAA+fLkxaqVK2UfLX3sF+w/ZtIkLYNGYTQKIpVDoxgaZZBWR+NjTmWvrr8/WjRvjg3rN7hcTqzKoFEIqQgaZdCqCFIZpMsLraZ0g6E7Ct07aGku+MYbGDp4iFhnrUqWcauhokUli1bc2Ea43JUsKlxUdOjCxVGhYUOHuUa5YlOw3Ee4qFzRUk0hkSKhojWbJIuELMA/APnyvi7uTCRgVK6MgkVixmNoFbcqV1SweK8UEj5a22l1p9BliQoVFZ+Cd0kzCaW7UsV9FCpTZqSMhJTKlBklM6NjdLFiQgSWSWJboWJFPJssuYxGcESMShTFOgJGBcooUWaUi4TajGqZ0SyjPJGAk6w/nyIF8uTOHcNNjiTbjK6YkSLrCJF1ZMgoS1QWqDhQiciQPgNeSJkSWV9+2aV4UOmwKjtUQIzC464kWfdbz6GikjZtWnRo1z5BmdZse9FoQYkaASW8XmgeJbyeE97YSK7ZRzJsSEBSiNLA+3kQwTf37r6W8zRKQ4xQWCSykmmteQssXrhQSK1djz7bYOTw4WjaqDHOn79gV7H49fZtbN+2DcUKFrI1CQcrSDy6demCYYOH4EbkDdvqzILo012zShXs2LZdrmNX4RzK58RRKrNnTp+xq1jpCx8uWoJmQY3FVzohBVuVJzPCRqXpwP4D6NmtB3r37Cl+1UaRsipRVKTclSn6XxtfbCpV9NGmUCGk/zYVKSoSr76cVZRm7qNiZdwUjGJFX3EqVlblyihYRrmigmX80bm9c+cu6XOjRo7Ezh07XG4VRslifzfvKKuyxRE6KlxG2bIqXFS6ONLWo1t3+Pv5YfKkSTLqZlxBqDQ+aCSOo3DWkTgzp4AjcBMnTERAbX+JiKEW3oT04EfjHCW8XmhnJbxKeGMjprQc0+o2a2ZUauHxY8eJb575oLifE5/fSnhjf6BdhLdZFOEl8bBjoYU4IiLcEpbM5igNIc6nFr5y+YprwpkdmFgnrT3KYclIfA8fOow+PXujf5++MmJgB74sgwR72xdfIHf2V2WUwK5yWc7PF35GhdJlZHTh4s8/21Y08fhg0mR0bNceW0NCYiilnlxo/1f70adHT7zT/2218HoCZBI/VwmvFxpYCa8SXneySmLqIrzBboTX00xrauGN8VQ7S3itcXh9j/DSgkjibtfiHOE9qJnW7pJdkke6nJDw0hXGzuXs2XMoX6o05s+dJ1Zju/oGlcyJ4yegfWsnMq2Fiv/uO/37+yThpQJjxM621LKiI6CENzoejvxSwquE976E1zEL71707N4T7du2xe1bt2zt21FxeDXxRJSF1yHCG3kDW71g4fUdwuvLqYV7+2biCSdSCzsSlsyHCa9nASpsfa8n9cKU8HqhhZXwKuG9P+GdiQC/2nC5NNDCGxEhk5Lcz3vQb3VpiP2BdtbC66BLwxbnXRqU8F7B/Dnz0CiwATasXx97B0rAXk6E9OnEEz6Vac1HCa8hu2adgH6mp8QfASW88ccqwUcq4U2ChPf4Mdcs+QeR0Nj+H82lwTELb5haeO8+tc4SXocsvJJaOARFCxSUkH0JfgHFciInTXXp2AmDBw6UqBJ2DVvzUs65NPiyhbcPbt66GUtLPPwuDn17xaXBpwjvXp92aXj4XqBnJAQBJbwJQe0hz1HCmwQJ77FETHg5izo8AmGhzhPendu340Zk5EM+EXEfzpnmJGJDBw2yNTKBo4Q33EELr7o0uDoLIwK0btFCwp0xpJ8dC8kjoyGohfcemlYf3vPnztnm3+2sD68S3nstqFtxIaCENy5kbNyvhFcJr7uVVy28sT9gPkl4I5y08Drv0nDFZyatKeHlU+M1C+/ceXCE8Driw6uEN/Y3qu61IqCE14qGQ9tKeJXwJk3CWxw71MIr/tYjhg1D04aNQOuYHQtJDWOzemPSmhJetfBa+6zLwqsuDVZYfGab7w76j3NUi65K/K1LFAJKeL3QE5TwKuG9L+GNLSyZJ5PWvObS4FuEd/GixWjlS3F4IyOxVSetud7QzDznnEvDXJ20dhdpF+F1ysKrYclcfdqJDZJdpoBnmmgm/7DTP9+J+nqzTCW8XkBbCa8S3vsSXp205noKfdOlwUkf3qhJawe//tqFkR0b1klrmnhCLbzWPuUivGrhtcLiM9u//fYb2rdvD6ZlZjY+Jbz3mk4J7z0sHNtSwquEVwlv/B4v3yS8vu7Dq5nWdNLavedTCe89LHxxiwT3yy+/xMqVK8EU0+rScK8VlfDew8KxLSW8SnjvS3iDbY7Dqy4NMZ5lR6M0OJVamIknfNClgVal6pUqYcvmLeKHbNcHV10aoro18fRKWDLHMq21weZNm0BLpF3L3lAfjcNrFwCWctg/KMZ/167nz3IJn91UwuuFplPCq4T3/oQ3ltTCnvjwamrhGE+1TxJer01aUwuvWnjvPTJWCy9HXOwaEneFJUvEURpIwq9duyah6q5evQqrMHwdf/M+PCGRPJcKy82bN+U6v/zyi8TCZvnXr1+XCWdsjdu3b8v1eJwhr6aV7ty5I/+ja5L5nzmedTRl8b0XV125n//n/f74448y+fbMmTNyrinXXC+prJXweqEllfAq4b0v4VUfXtdTqC4NUVDwY+S9KA1KeJXwuh5BiTRSvlRpzH8EfXi3bNmC6tWro2jRoihevLhLihUrhsKFC6NMmTKYP3++i5TeQy3+WyTMn332GZo3b4433ngDOXPmRN68eZE/f340bNjQlWRm9erVqFKlivjjHjt2zEVcSWynT5+OihUrYs6rICqnAAAgAElEQVScOSA5JYGeNm0aSpUqJXVmXevUqYPdu3fHqbDw/bJ48WIpJ1u2bOLzmytXLqnTO++8g7Nnz8b/pnzkSCW8XmgoJbxKeJXwxu9B803C69CkNR91adBMa1F93edTC9Ol4eyjZeFlfPSRI0eiX79+6N+/v8iAAQPg5+eHZ555BmnSpMHChQsTTHipyNLl57XXXkOePHnQrl07DB06FH379gVJJ/dv375dyO358+eF7KZMmRI9evTAhQsXhLyuWrVKyCnr9O2330pnI+EliWZd+/TpI+Q5e/bsWLduXZyEd+PGjeAxr7/+utzvjBkzMGbMGInuMHr0aPH/jd9b23eOUsLrhbZSwquE976E1y0sGV+6FPdz4vPbnOedTGu+FZZsiVNhyZzKtKZhyaK9ncPDfTksWW8Zwo52Qwn8QdLk2z68bROtDy9xpVsDXQYoVFqY1S8oKAjPPfccunXr5tFEsC+++EKsx7Tmfvrpp+KWwOvRlaFu3brIly+fEF7jpsDsgpUrV8YLL7yADz74QCajvfnmmyhUqBBCQkKkH7AbsU+wriyLltsOHTqA1tq1a9fGSXgHDx6M559/HgMHDhS3Bp5PFweef/nyZdlOYBdNtKcp4fVC0yjhVcLrTlZJTI8cOYK9e/diVmyT1g55QHi95sPrW4TXsTi8jhFe3560FrJ5C27euOEaivX0VauphaMQdCe835886Sm00c53+fDKpLWzcRKmaCfF44fLh7e170xaoz8sLaa07pKQkvwSf7OQINNfltZXys8//+wS/qb7gVm43aVLFzz55JOYMGFCtP8Rm/r164s7AS28xm+a16JrA62/WbJkQZEiRWQ9depUcWUwZVvXJL2dO3d+IOGlO0SKFClAAr1v3z4XebaWldS2lfB6oUWV8CZBwnv8mFhgjUXVndA+6Hc0wnvXh3fC2HFCgnmuJ+XK+eERCAvdi57de6J927a4feuWrT19165dKFXctwivcxZe3w5LRmuO+cDa0UnUpSEKxaTg0uBMamHfILyRkZF4//33xbpavnx57N+/3/V4GNL7ww8/CBGma4C7ZMqUCUuWLHGdQ4WtRIkSchzDhlmX2AivuQafzVGjRomF+YknnkCLFi2EXPN8c4y1rPgSXk5QCwgIkPtLly6dbC9duhS8J1q3Yyvbeh1f3FbC64VWU8KbBAnvMfsJ73jbCW+YEt67z7ezURocIrzq0hDt7awW3ig4SES85dLgCOF1JNPaXvTs1h3v9O8P+r56shBfTgSbOXMmMmfOjJIlS2LHjh2uIq1EkNbdefPmYdiwYTGErgJWYksXBFpqaVHlJDQupixaf91dGlwXBCRzWvr06fHvf/9bJradOnXKda71OG7Hh/DyuhTG6aXvbs2aNfHKK6+IxZcT9oKDg8HoEUltUcLrhRZVwquE193iG5uFVwkvoJPWol5I/BjdkElrUZnWvnYo09qggQPFX88JC6/tcXjDwx1MLTxPUwvf/RZaXRocIbyJOCwZIaAysWzZMuTIkQMFCxbEpk2bXMP9hqAa2sDfxveVSrW7WJ8rlvPSSy+hUqVKOHHihClC1qGhoTJ5jBPZrC4N/Cfd3hglgn6/dGngxDkSbLpbxLbEl/Cac3m/fO9yEhsn67366qtIlSqVWKd5b0lpUcLrhdZ0lPCePYfJEyciKLC+fLjsvB0+lH41amDqlCk4cOCAaxKVJ8Pt9Fvlx7trp85oGtQQq1etAi03ng7jf/PNNzh8+DAmTpiA2jVqYuTwERKShbNY3cnmw/5mGevWrkXXTp3QsF4g1qxZIxq6p3V2+fDaHZbMlXhCXRrM8+CohTfcKQuvb/rwWl0aOAHGnSSYNnnYtVp4oxAjnt6y8JIIWUnbw7aZ9Xhf8eH9/PPPhVzS4rl8+XKJu8v7cO/H7r+t92rdNseR1HJSGgnlV1995TrEuEU89thjErmBE9sM5gwNxlBlGTJkkHBoe/bskfBo/P3RRx+5jnMVFk8LL4839TLbvCafV4Y3++9//yuRIeIi1dbr+dK2El4vtJajhPecEl4ScJ8mvG5RGg5FqA/v0EGDcCgiwrank4TXsUlr0TKt2RO7kh+jG+LS4KyFd7ADFl6Tac3+SWtORmlQC6952KwWXmcIb+KN0nDw4EFUqFAByZMnB0NzXbx4USaX0eWAQlcHWlCtZNHg9qA1kzswNu5//vMfCUVGMkk/2pYtW0ocXhJhhiWjJZjkkwoCw4TR2sqJczyeig5j59LKy1i7VuJsrm8svIzvy0gQrDd9cg2J5nHcN3bsWDDuMBNbGMs072/cuHF4/PHHpY78nZQWJbxeaE0lvOrS4G5Vjs2lwf5Ja0778JbAju3bhZjZ9Rj5pkuDQxZeH820tvJuamH7Ca+PuzTcumnLY0Ky5RULr1OJJxKxDy+jH3Bi2L/+9S/xZ33xxRdhFYYmY0xcksqELJzERstxsmTJhNxym8SVsXXpTkA3CrpTsH0ZUoz/p3/t/7d3HtBRFV0c1+M59t57wYKKggiCFEEFRaQjCojSeyo1oWgogvQkFFFQQLGgovipiEhTRKUFaQoWFGygFMsH53x60Pud383O+rJZQpItZDf3nTN5m7cz9838Z96+/9z5z8zWrVv9t2MyHRtDYKNly5aybds2/3d8IG8pKSk6yQ17bKDBJhW8g9wB4W3QoIGWje/ZBKNPnz66UgTeY5Y9Y2JyvB1GeKNQo0Z4jfAWhvCahjdWNbwRIrxR0PBGwsPrlTTYsmSbpH+fvpLev39MrsNb2jS8y5Yt040XEhISdC1bNobwhs6dO8vs2bOLvPGE8wjjaZ03b54SzEaNGum9IJaQ1KVLlyqRRcNLPCbDsSEF3wdqaZm0hgeadYGZGOf13hJ37ty5+h3r8RIgs994lq9zHSY2vWjfvr00a9ZMiTVLo3FP5BeO1Lu8R4EqRfwWRngjDrGIEV4jvFElvLYOb76nOqIa3o0RIrx4eBfHnqTBCG9u84N4IE8ywpuLh1/DW4InrUEcqbeCAmSxOCTQpeEeyBPQ53o17u7enInLfciHSweK7rP3exff/ejxnbPlLYdL6+zwP9+z6QWrNWzfvl0lHNSTi+vOznasn43wRqEGjfAa4Y1PwmuSBl4syFNGDB8u7do8JOgfw3HwouFluHTJYrm1UmWd6BkOu84G2rzkhETBw7tv7748HiIXp7hnI7y5yEEmYprwRkrSUEIJb6TJndc+n10o6nPmteNNe6jrgXEC47l8eM8uTWBcdz1Wz0Z4o1BzRniN8BaJ8IY6ac08vPme6uh5eMM4ac0Ib556tFUacuGAhLgh6XLXlM0zVJ0HsGL+E/FJa11jY+OJYsJnyUowAkZ4o1A5RnjjkPBGYKe1sGl4o0Z4zcNrHt78P6Dm4c3FxDy8edtGLEga8ubY/os3BIzwRqFGjfDGIeG1ndZsa2ERlQJs3LAhMpIGm7SW59c5sh7eGbbxhA9tr4e3tE1ay9Pg7J+4Q8AIbxSqNKKE98cfbeOJmF+Hd5q0aNpM/B7eUCUNtvFEvqc6dtfhjbyGd+/evRHR8IZ9p7X1kVyHN3YJ7zbP7Pt8Db8YF7yENzLr8JqkoRjVYknCgIAR3jCAeDgTESW8tvGEThqK6Y0nwr3Tmkka8j2S0dPw2qQ1kzTkNj+TNOR9DE3SkBcP+y/6CBjhjQLmEN4np06Vrh06yitz2Krwf2G5K/rBXTt3yqTsbN1a+Nd9+8Ji1xlhQeumjRsLi3GzHTBb94YSmLjFAtrsZpOSlKyz2t/8z3+UsLJ9byi2SU/IzsqS5o2byOOPjdC1BLlfKHZJyxbA899+W1LZWrhlK10QnG2XQ7HrsFi1apU8Pf1padGsuWSOH6/4hGKXtJB/7K9etVr69ukrPbp10511XL2G4/zRio+kVvUasmL5cmGt1XAdO3fu1G2nhw0ZIps9C6WHah/y8eLzL0iXDh3lhdmz/duFhmqXZ5B8ulUa8IiF42BiEjsgLVu6VFdpWL9+QzjM+m3oKg2JSTIkI0OXSKIc4Trmvf66NKxXT5YuWhK2tWfJG6thdOnYUYYPGSpffvFlWLL7j4iW/9kZs1TSsOCdd8JiFyNMLNu8abP079tPBqSl6S5d4TBO26C+WDOWSWusyRrOgzZcp1ZteXbmTNm1a5d/iapQ7wHhnZiZJT27dpNF772nS2KFatOlX7VylfRN7SWDBwwQfkPsMASCIWCENxgqYb7G8kJTp0yRTm3byfPPPac/sPv/u1/YMaW4gW1Hsbtj+3aZMG6cPNCihT7o7KBSXJsuHXYJEMgG9etLVmaWrFy5SnJycmTdunV65nNRA9sg8tKCiCV076Ekfe6rr8raNWtCsks+IOSEcWPHSON7G8iwjKHy/rL3lVwXNZ+B8TesXy+8xBO6d5eW97UQtk6FWAbGK+r/aBI/XL5cpk55Qpo2aiKjR40W9KA5IWBMHtbl5MjaNWtlxfIPJTU5Rbp06iT79u5V8kG7cfVcnDPtgkXRlyxZItVvqSJLFy+W3bt3a3spjj1vGkjYN9u2Sc9uPeSRQYMlZ+1aJQnc0xuvqJ8hjqSZNWOmdHi4rcyaMUMYxg/VLukP7D+g+RyakaGdIRZ3D9czuGf3Hlm4YIFUvqmi0DEC96KWPVh88r1nzx4lHoN8BIF44cADUvPynDlSv05deXf+Atn9yy9hs7t69Wrp0LadZAx6RDZt2Kg4h5pn2gYEb/pT06R1i/vlzTfeCI9dX5vNWZsjvVNSpW/v3rInTM8Jz/CBA/tl4bsLpewVZeTzLVvC8pzQBmi7tOHa1WrI9GnT9P1ChzZUnEnPMzd+zFjp2rGTzJ8/X9d/dc9msHZamGvYJc/8jqYmJsnA9HQjvGHmL/FkzghvFGqTh/KJKZOlft26+qM6Yew4mZiVLVkTMosdsn1p8XY0b9JUKpa7QUaOGCFTJk8utk2Xn+zMTNUFDxo4UK69ooza7+fbLSi9f5qk9etf7MALloXYa95aTSpXqCBdOnWUtH79ZECIdgekpasXpVnjJlLhunLSqP69Svb4AQwlv6QdmJYuXTt1lto1akil8uWVQA4eOEh3TwrFNnnjZdjq/pZSvuz10qRBQxmYPkBCxdil753aS26veZtUqVhRxvvanGs3rq6Les7OzJIpkybrGq5lzr9Iz2NGjRKuZ2UWvz2Tj0nZEwXPbu0aNaVhvXskvV9/fU7UdgjPCnYpf9sHH5IaVarKww+20U5iqM8g3irwTOvbTxrWry9Vb6okw4cOk8kTJ4X8DGJ79OOjJLFHT7n07POkb+8+2jEqan0Fiz8xK0vxqFn5Fql3x50ycsRIyRw/IbcOQ8AZu1MmTZIObdtL+avKSmL3HjJm1OjQf+sysxRTPKU1qlbRtsHvCPXKb1WwMhb22sTsiToa1KZVa62/7p276L1CbXO0LTT5af3TpG7tO+Su2rf7nsGskPLrnrEJ4ydIz+7d5ZwTT9LOIfcLFQswo+3Shq++8BJp07K1DB8yTPMbLjzua9xEalWrLj2795Dx48YJ+Be2roLFI1+0g6SeCVLvzjqSmpSkXukovNbtFjGIgBHeKFQaniuGyoZlZKhnMzkhQZITE0MLCYlKNlKTkqVXcopwTklKkiSue227/93Z+92hPvviYi8lMVGJY6+UFL0P9wolpCb78pucLKnJSZJrl8+518Nhu1dysvT25zdMdrGXkiKpKf/mP5S8klbL7Owm5WIQDhw0X5pX7pGidZiSmJTbLorSDgpoH9o2knLrj89sYpCn3ZGWa8GuB7Pri4stcM6tv+TC2yjoPgkJKpOgc0EAH8WjMPnz2nWf3dlXDuz1Ts21ywu3UGUOsJEHO75LSNQ89kpJ1TPtQn83gqULdi0Yxp46ofwafO2O5zxovr22vZ+D2ef7hETplcRvRLLWY7624Wz44uYp96Fs+vLmnhfaRi4eQdpcMBsFXfPhnNs2cn/bFAdv/lyeC7Lj/c6XFnx5/vR3Izn53zbnjcvnItpP0Tzzu5zbJhTjQDvOpi8virP3c2AeXHofHnhLaXu8V4Lmz9l3dgL/d9fdWb9P0nahz7avXQe17dJw9uY52D183yf26KGOAmQY+8Is7YsCRbBbRAkBI7xRAJoJM9/t2CErP/lEFi1cKIsXL5YlFgwDawPWBqwNWBuwNhByG0ATjKxh65YtYdPnR4Ea2C2ijIAR3igA/s/ff8tff/6lWiM0SxYih8GB/fvFhUjgjG2zm4tBpLCIBL6RthlpLCJtP9z4RDK/kbCNzUjYDTeugfYsz//+HjOSis6dCX3xtiVuFGhKqbiFEd4IV7M9eBEG2MwbAoaAIWAIGAKGgCFwGASM8B4GIPvaEDAEDAFDwBAwBAwBQyC2ETDCG9v1Z7k3BApEgBEGHWVgwVE7DAFDwBAwBAyBUoqAEd5SWvFW7NKBAJo2tG1svOAnv6Wj6FZKQ8AQMAQMAUPAj4ARXj8U9sEQiD8EWJSdBd/ZEMAmc8Rf/VqJDAFDwBAwBAqHgBHewuFksQyBmEPAJkzGXJVZhg0BQ8AQMAQihIAR3ggBa2YNAUOg9CKwadMmmTJlirz//vvy559/ll4grOSGgCFgCJQQBIzwlpCKsGwYAoZA/CAwY8YMOf/88yUtLU3YWtwOQ8AQMAQMgSOLgBHeI4u/3d0QMATiEAEjvHFYqVakwyLABNnff/9dJ8keNrJFMASijIAR3igDbrczBEobAkVZHcLFLYr+2KUpDK5FievsFSUvLk1hCG9x8uLs29kQKIkIvPDCC9K3b19588035euvv7ZtfktiJZXiPBnhLcWVb0U3BCKFAGSOpdBYJeKnn37SgPfnr7/+0tUigt334MGDum0z8X/77bcC42Kf+Ohjf/nlF9m9e7e+XA9ln7gs0UY8Vq0gHde8BzZJT+Azq1qQhvz88ccf/qXdvGn47M0L8TmmT58u5513Xj5JA3HBhS1iv//++) ![A 1D Global max pooling](https://peltarion.com/static/1d_global_max_pooling.png) A 1D Global max pooling The difference here versus the previous models which consisted only of Dense layers is the presence of convolution layers for which this type of network is named. A convolution layer passes over one set of values to the next calculating a dot product as it goes, creating one value from multiple values. We can determine the quantity of kernels (the functions that iterate over the array to calculate the dot product) as well as the size of its window it uses to calculate the dot product, and the size of its stride. As it creates a new value from multiple, we would lose dimensionality each time we have a Conv layer so we use padding. After the first Conv1D we add a pooling layer. This is to reduce the spatial size of the convolved features and also helps reduce overfitting which is a probelm with CNNs.Here we use max pooling instead of average pooling because taking the maximum instead of the average value from our kernel reduces noise by discarding noisy activations and so is better than average pooling. Basic Convolutional Neural Network# Basic CNN model = Sequential() model.add(Conv1D(64, 2, activation="relu", input_shape=(32,1))) model.add(Flatten()) model.add(Dense(5, activation = 'softmax')) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = "adam", metrics = ['accuracy']) model.summary() model.compile(loss = 'sparse_categorical_crossentropy', optimizer = "adam", metrics = ['accuracy']) model.fit(x_train, y_train, batch_size=32,epochs=30, verbose=2, validation_split=0.2, callbacks=[tf.keras.callbacks.EarlyStopping('loss', patience=3)]) acc = model.evaluate(x_test, y_test) print("Loss:", acc[0], " Accuracy:", acc[1])Epoch 1/30 12468/12468 - 44s - loss: 0.2602 - accuracy: 0.9065 - val_loss: 0.4388 - val_accuracy: 0.8419 Epoch 2/30 12468/12468 - 44s - loss: 0.2596 - accuracy: 0.9064 - val_loss: 0.3199 - val_accuracy: 0.8983 Epoch 3/30 12468/12468 - 45s - loss: 0.2592 - accuracy: 0.9069 - val_loss: 0.4657 - val_accuracy: 0.8281 Epoch 4/30 12468/12468 - 44s - loss: 0.2582 - accuracy: 0.9075 - val_loss: 0.5101 - val_accuracy: 0.8149 Epoch 5/30 12468/12468 - 44s - loss: 0.2578 - accuracy: 0.9074 - val_loss: 0.4511 - val_accuracy: 0.8448 Epoch 6/30 12468/12468 - 46s - loss: 0.2562 - accuracy: 0.9075 - val_loss: 0.4868 - val_accuracy: 0.8307 Epoch 7/30 12468/12468 - 45s - loss: 0.2571 - accuracy: 0.9077 - val_loss: 0.4163 - val_accuracy: 0.8631 Epoch 8/30 12468/12468 - 45s - loss: 0.2565 - accuracy: 0.9081 - val_loss: 0.3188 - val_accuracy: 0.8960 Epoch 9/30 12468/12468 - 44s - loss: 0.2550 - accuracy: 0.9083 - val_loss: 0.3496 - val_accuracy: 0.8855 Epoch 10/30 12468/12468 - 44s - loss: 0.2556 - accuracy[...]Intermediate Convolutional Neural Networkmodel = Sequential() model.add(Conv1D(28, 2, activation="relu", input_shape=(32,1))) model.add(Conv1D(28, 2, activation="relu")) model.add(MaxPooling1D(2)) model.add(Conv1D(64, 2, activation="relu")) model.add(Conv1D(64, 2,activation="relu")) model.add(MaxPooling1D(2)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(6, activation = 'softmax')) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = "adam", metrics = ['accuracy']) model.fit(x_train, y_train, batch_size=32,epochs=30, verbose=2, validation_split=0.2, callbacks=[tf.keras.callbacks.EarlyStopping('loss', patience=3)]) acc = model.evaluate(x_test, y_test) print("Loss:", acc[0], " Accuracy:", acc[1])3336/3336 [==============================] - 7s 2ms/step - loss: 0.3265 - accuracy: 0.8874 Loss: 0.32654300332069397 Accuracy: 0.8874179124832153Principles of CNN Design:- by convention channel size stays the same throughout network- number of filters should start low and increase throughout the network- keep adding layers until we over-fit then regularize using l1/l2 regularisation, droput, batch norm- be inspired by patterns in classic networks such as Conv-Pool-Conv-Pool or Conv-Conv-Poolcallbacks_list = [ keras.callbacks.ModelCheckpoint( filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True), keras.callbacks.EarlyStopping(monitor='accuracy', patience=1)] model.fit(x_train, y_train, batch_size=32,epochs=50, verbose=2, callbacks=callbacks_list, validation_split=0.2) acc = model.evaluate(x_test, y_test) print("Loss:", acc[0], " Accuracy:", acc[1]) def build_model(hp): model = Sequential() model.add(Conv1D(hp.Int('n_filt_1', 4, 32, 4), 2, activation="relu", input_shape=(32,1))) model.add(Conv1D(hp.Int('n_filt_1', 4, 32, 4), 2, activation="relu")) model.add(MaxPooling1D()) for i in range(hp.Int('n_layers', 1, 12)): filt_nb = hp.Int(f'conv_{i}_units', min_value=4, max_value=32, step=4) model.add(Conv1D(filt_nb, hp.Int(f'kernal_{i}_size', 1, 4), activation="relu")) model.add(Conv1D(filt_nb, hp.Int(f'kernal_{i}_size', 1, 4), activation="relu")) model.add(MaxPooling1D()) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(6, activation = 'softmax')) adam=keras.optimizers.Adam(hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = adam, metrics = ['accuracy']) return model from kerastuner import Hyperband tuner = Hyperband(build_model, max_epochs=150, objective="val_accuracy",project_name="cnn", executions_per_trial=2) # Display search space summary tuner.search_space_summary() tuner.search(x=x_train,y=y_train, validation_data=(x_test,y_test), callbacks=[tf.keras.callbacks.EarlyStopping('val_loss', patience=3)] ) tuner.results_summary() tuner.get_best_hyperparameters(num_trials=1)Auto-Optimised Models Here, before attempting to use more appropriate CNN and RNNs, we are going to attempt one last time to get the best performance using only dense/fully-connected layers, by using the keras-tuner package to tune the hyperparameters of our model. Usually in machine learning we manually change each of these through trial and error, but with this package we can automate the combinatory process of optimising each hyperparameter. Here we have decided to auto-optimise the hyperperameters controlling the number of hidden layers, and the nb of neurons in each of those hidden layers!pip install keras-tuner from kerastuner import RandomSearch def build_model(hp): d1 = hp.Int("d1_units", min_value=6, max_value=256, step=16) model = keras.models.Sequential() model.add(tf.keras.layers.Dense(d1, activation='relu', input_dim=32)) for i in range(hp.Int('n_layers', 1, 8)): # adding variation of layers. model.add(Dense(hp.Int(f'conv_{i}_units', min_value=6, max_value=256, step=16), activation='relu')) model.add(Dense(6, activation='softmax')) model.compile(optimizer="adam",loss="sparse_categorical_crossentropy",metrics=["accuracy"]) return model tuner = RandomSearch(build_model, objective="val_accuracy", max_trials=5,executions_per_trial=5) tuner.search(x=x_train,y=y_train,epochs=15,validation_data=(x_test,y_test))Trial 5 Complete [00h 50m 10s] val_accuracy: 0.917763352394104 Best val_accuracy So Far: 0.9192193508148193 Total elapsed time: 04h 20m 50s INFO:tensorflow:Oracle triggered exitConstruire le Modelemodel = Sequential() model.add(Conv1D(64, 2, activation="relu", input_shape=(4,1))) model.add(Conv1D(64, 2, activation="relu")) model.add(Dense(16, activation="relu")) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense(3, activation = 'softmax')) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = "adam", metrics = ['accuracy']) model.summary() model.fit(xtrain, ytrain, batch_size=16,epochs=100, verbose=2, validation_split=0.2)Epoch 1/100 7/7 - 0s - loss: 1.0363 - accuracy: 0.3465 - val_loss: 0.9948 - val_accuracy: 0.2692 Epoch 2/100 7/7 - 0s - loss: 0.9452 - accuracy: 0.3465 - val_loss: 0.9041 - val_accuracy: 0.5769 Epoch 3/100 7/7 - 0s - loss: 0.8996 - accuracy: 0.6238 - val_loss: 0.8725 - val_accuracy: 0.3846 Epoch 4/100 7/7 - 0s - loss: 0.8596 - accuracy: 0.6337 - val_loss: 0.8560 - val_accuracy: 0.4231 Epoch 5/100 7/7 - 0s - loss: 0.8324 - accuracy: 0.6436 - val_loss: 0.8400 - val_accuracy: 0.4231 Epoch 6/100 7/7 - 0s - loss: 0.8110 - accuracy: 0.6337 - val_loss: 0.7942 - val_accuracy: 0.5385 Epoch 7/100 7/7 - 0s - loss: 0.7920 - accuracy: 0.7525 - val_loss: 0.8010 - val_accuracy: 0.5000 Epoch 8/100 7/7 - 0s - loss: 0.7828 - accuracy: 0.6832 - val_loss: 0.8196 - val_accuracy: 0.5000 Epoch 9/100 7/7 - 0s - loss: 0.7653 - accuracy: 0.7624 - val_loss: 0.7344 - val_accuracy: 0.9231 Epoch 10/100 7/7 - 0s - loss: 0.7511 - accuracy: 0.9208 - val_loss: 0.7240 - val_accuracy: 0.8077 Epoch 11/100 7/7 - 0s - loss:[...]Session 9: Introduction to Support Vector Machines------------------------------------------------------Introduction to Data Science & Machine Learning* *------------------------------------------------------ [Support vector machines (SVMs)](http://scikit-learn.org/stable/modules/svm.html) are a particularly powerful and flexible class of supervised algorithms for both classification and regression.In this section, we will develop the intuition behind support vector machines and their use in classification problems.Recommended Bibliography:- Chapter 7 of Pattern Recognition and Machine Learning, , 2006- Chapter 17 of [Bayesian Reasoning and Machine Learning](http://web4.cs.ucl.ac.uk/staff/D.Barber/textbook/090310.pdf), , 2010- Chapter 14 of Machine Learning: a probabilistic perspective, 2012- This excellent [post](http://efavdb.com/svm-classification/) by Andrew Ng. We begin with the standard imports:%matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy import stats # use seaborn plotting defaults import seaborn as sns; sns.set()Consider the simple case of a classification task, in which the two classes of points are well separated. Check out first what the [make blobs](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html) function does.Also, to learn about color maps in matplotlib check [here](https://matplotlib.org/users/colormaps.html)from sklearn.datasets.samples_generator import make_blobs X, y = make_blobs(n_samples=100, centers=2, random_state=10, cluster_std=3) #With random_state we fix the random seed #We separate away some data for test X_test = X[50:-1,:] y_test = y[50:-1] X = X[:50,:] y = y[:50] print("The shape of X is ",X.shape) print("y is a label vector. The first 10 labels are:", y[:10]) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu') plt.rcParams["figure.figsize"] = [8,8]The shape of X is (50, 2) y is a label vector. The first 10 labels are: [1 1 1 0 1 1 0 1 0 0]A linear discriminative classifier would attempt to draw a straight line separating the two sets of data, and thereby create a model for classification. ** However, note there are many possible solutions!! **xfit = np.linspace(-5, 15) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu') m,b = (1.5, -9.3) plt.plot(xfit, m * xfit + b, '-k') m,b = (1, -5.5) plt.plot(xfit, m * xfit + b, '-m') m,b = (-0.2,0.1) plt.plot(xfit, m * xfit + b, '-g') plt.rcParams["figure.figsize"] = [8,8]** Which one do you think separates best the data? **Lets plot some *test data* with the right category.xfit = np.linspace(-5, 15) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu',label='Train') plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=50, cmap='RdBu',marker='x',label='Test') plt.legend() m,b = (1.5, -9.3) plt.plot(xfit, m * xfit + b, '-k') m,b = (1, -5.5) plt.plot(xfit, m * xfit + b, '-m') m,b = (-0.2,0.1) plt.plot(xfit, m * xfit + b, '-g') plt.rcParams["figure.figsize"] = [8,8]Maximizing the *Margin*Support vector machines offer one way to improve on this.The intuition is this: rather than simply drawing a zero-width line between the classes, we can draw around each line a *margin* of some width, up to the nearest point. The largest the margin is, the more robust the model is and generalizes better.Here is an example of how this might look:xfit = np.linspace(-5, 15) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu') m,b,d = (1.5, -9.3,0.01) yfit = m * xfit + b plt.plot(xfit,yfit, '-k') plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none',color='#AAAAAA', alpha=0.4) m,b,d = (1, -5.5,1) yfit = m * xfit + b plt.plot(xfit,yfit, '-m') plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none',color='#AAAAAA', alpha=0.4) m,b,d = (-0.2,0.1,0.05) yfit = m * xfit + b plt.plot(xfit,yfit, '-g') plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none',color='#AAAAAA', alpha=0.4) plt.rcParams["figure.figsize"] = [8,8]In support vector machines, the line that maximizes this margin is the one we will choose as the optimal model.Support vector machines are an example of such a *maximum margin* estimator. A little bit of geometry: hyperplanes HyperplanesFirst of all, for $\mathbf{x}\in\mathbb{R}^D$, consider the function\begin{align}y(\mathbf{x}) = \mathbf{w}^T\mathbf{x}+w_0\end{align}Then, the set of points $\mathbf{x}\in\mathbb{R}^D$ such that $y(\mathbf{x})=0$ is called an [**hyperplane**](http://mathworld.wolfram.com/Hyperplane.html), a subspace of dimension $D-1$. E.g., for $D=2$, then the hyperplan is a line. For $D=3$ is a plane. Some useful results with hyperplanesA) If $\mathbf{x}_A$ and $\mathbf{x}_B$ are two points in the hyperplane, then the vector $\mathbf{v}=\alpha(\mathbf{x}_A-\mathbf{x}_B)$ is *orthogonal* to $\mathbf{w}$ for $\alpha\in\mathbb{R}$. Thus, $\mathbf{w}$ is orthogonal to any vector **contained** in the hyperplane.B) The normal distance between the origin $\mathbf{0}$ and the closest point in the hyperplane is given by $-\frac{w_0}{||\mathbf{w}||_2}$, where a negative distance denotes that the hyperplane lies below the origin, and $||\mathbf{w}||_2=\sqrt{\mathbf{w}^T\mathbf{w}}$.C) The normal distance to any point $\mathbf{x}^*\in\mathbb{R}^D$ to the hyperplane is \begin{align}\frac{y(\mathbf{x}^*)}{||\mathbf{w}||_2}\end{align}This distance is positive for points **above** the hyperplane, and negative for points **below** the hyperplane. Some useful results with hyperplanes (Proofs)*Proof for A):* if $\mathbf{x}_A$ and $\mathbf{x}_B$ are two points in the hyperplane, then $\mathbf{w}^T\mathbf{x}_A+w_0=\mathbf{w}^T\mathbf{x}_B+w_0=0$. Thus, $\mathbf{w}^T\left(\mathbf{x}_A-\mathbf{x}_B\right)=0$.*Proof for B):* Note that, for any $\mathbf{x}$ (not necesarily in the hyperplane), the projection of $\mathbf{x}$ into $\mathbf{w}$ is \begin{align}\frac{ \mathbf{w}^T\mathbf{x}}{||\mathbf{w}||_2} = \frac{||\mathbf{w}||_2||\mathbf{x}||_2\cos(\phi)}{||\mathbf{w}||_2}=||\mathbf{x}||_2\cos(\phi),\end{align}where $\phi$ is the angle between $\mathbf{x}$ and $||\mathbf{w}||_2$. Note that this projection is the normal distance between the $\mathbf{0}$ point and the hyperplane orthogonal to $ ||\mathbf{w}||_2$ that passes through $\mathbf{x}$.Further, if $\mathbf{x}$ belongs to the hyperplane $y(\mathbf{x}) = \mathbf{w}^T\mathbf{x}+w_0$, then \begin{align}||\mathbf{x}||_2\cos(\phi) = -\frac{w_0}{||\mathbf{w}||_2}\end{align}*Proof for C) Given any point $\mathbf{x}$ in the hyperplane $y(\mathbf{x}) = \mathbf{w}^T\mathbf{x}+w_0=0$, the normal distance of $\mathbf{x}^*$ to the hyperplane is the proyection of $(\mathbf{x}^*-\mathbf{x})$ into $\mathbf{w}$ (since $\mathbf{w}$ is perpedincular to the plane). Hence\begin{align}\text{distance} = \frac{\mathbf{w}^T(\mathbf{x}^*-\mathbf{x})}{||\mathbf{w}||_2}=\frac{\mathbf{w}^T\mathbf{x}-\mathbf{w}^T\mathbf{x}^*}{||\mathbf{w}||_2}=\frac{\mathbf{w}^T\mathbf{x}^*-\mathbf{w}^T\mathbf{x}}{||\mathbf{w}||_2}=\frac{\mathbf{w}^T\mathbf{x}^*+w_0}{||\mathbf{w}||_2}=\frac{y(\mathbf{x}^*)}{||\mathbf{w}||_2}\end{align} Support Vector MachinesAssume there exists an hyperplane that separates our data ** in a transformed space** $\mathcal{D}=(\phi(\mathbf{x})^{(i)},t^{(i)})$, $i=1,\ldots,N$, where $\mathbf{x}\in\mathbb{R}^D$ and $t^{(i)}\in\{-1,+1\}$. Then, there must exist an hyperplane $y(\mathbf{x}) = \mathbf{w}^T\phi(\mathbf{x})+w_0=0$ that verifies\begin{align}t^{(i)}y(\mathbf{x}^{(i)})\geq 0, ~~ \forall (\mathbf{x}^{(i)},t^{(i)})\end{align} The SVM: maximizing the marginThe SVM optimization problem reads as follows\begin{align}\arg\max_{\mathbf{w},w_0} \left\{ \min_{i} \frac{t^{(i)}y(\mathbf{x}^{(i)})}{||\mathbf{w}||_2}\right\}\end{align}However, this can be written in a simpler way. Since the distance of any point $\mathbf{x}^{(i)}$ to the hyperplane is invariant to a scale of the form $\mathbf{w}\leftarrow \eta\mathbf{w}$, $w_0\leftarrow\eta w_0$, **then we can freely set the minimum distance to the hyperplane to $||\mathbf{w}||_2$**, rescaling the whole problem. Thus, any point in the training set must verify\begin{align}t^{(i)}y(\mathbf{x}^{(i)})\geq 1, ~~ \forall (\mathbf{x}^{(i)},t^{(i)})\end{align}And the equivalent problem can be written follows:\begin{align}&\min_{\mathbf{w},w_0} = \frac{1}{2} ||\mathbf{w}||^2_2\\&\text{subject to}&t^{(i)}y(\mathbf{x}^{(i)})\geq 1, ~ i=1,\ldots,N\end{align}where we have introduced $||\mathbf{w}||^2_2$ as objective function and the factor $\frac{1}{2}$ for later convenience. This optimization problem is an example of a [**Quadratic Programming (QP)**](https://sites.math.washington.edu/~burke/crs/408f/notes/qp/qp1.pdf) optimization problem. Very efficient solvers are known for these kind of problems. Complexity scales cubic in the input dimension, i.e., $\mathcal{O}(D^3)$.Lets visualize the solution for our running example.from sklearn.svm import SVC # "Support vector classifier" model = SVC(kernel='linear', C=1E10) # We use a linear kernel (no transformation). # Also, we explain below the role of C model.fit(X, y)The following function plots the SVM decision boundaries for us:def plot_svc_decision_function(model, ax=None, plot_support=True): """Plot the decision function for a 2D SVC""" if ax is None: #If no figure handle is provided, it opens the current figure ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model x = np.linspace(xlim[0], xlim[1], 30) #30 points in the grid axis y = np.linspace(ylim[0], ylim[1], 30) Y, X = np.meshgrid(y, x) # We create a grid with the x,y coordinates defined above # From the grid to a list of (x,y) values. # Check Numpy help for ravel() xy = np.vstack([X.ravel(), Y.ravel()]).T P = model.decision_function(xy).reshape(X.shape) # plot decision boundary and margins # In levels we provide a list of floating point numbers indicating #the level curves to draw, in increasing order; e.g., to draw just the zero contour pass ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors if plot_support: ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=300, linewidth=1, marker='p') ax.set_xlim(xlim) ax.set_ylim(ylim) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model,plot_support=False) plt.rcParams["figure.figsize"] = [8,8]Understanding the SVM solution: kernels and the dual problemThere's lot more that we can say about how SVM perform. To this end, we have to go deeper into the optimization problem itself:\begin{align}&\min_{\mathbf{w},w_0} = \frac{1}{2} ||\mathbf{w}||^2_2\\&\text{subject to}\\&t^{(i)}y(\mathbf{x}^{(i)})\geq 1, ~ i=1,\ldots,N\end{align} Introducing Lagrange MultipliersGiven the constraints of the problem, the Lagrange function that has to be optimized is of the form\begin{align}\mathcal{L}(\mathbf{w},w_0,\mathbf{a}) = \frac{||\mathbf{w}||^2_2}{2} -\sum_{i=1}^N a_i (t^{(i)}y(\mathbf{x}^{(i)})-1),\end{align}where $a_i\geq 0$, $i=1,\ldots,N$ are the Lagrange multipliers. If we compute the gradient of $\mathcal{L}(\mathbf{w},w_0,\mathbf{a})$ w.r.t. $\mathbf{w}$ and $w_0$ and equalize to zero we get the following conditions\begin{align}\mathbf{w} &=\sum_{i=1}^N a_i t^{(i)} \phi(\mathbf{x}^{(i)}), ~~~~~~0 = \sum_{i=1}^{N} a_i t^{(i)}\end{align} The Dual ProblemIf we introduce the above expressions in the the Lagrange function, our optimization problem reads\begin{align}\mathcal{L}(\mathbf{a}) &= \sum_{i=1}^{N}a_i -\frac{1}{2}\sum_{i=1}^N \sum_{j=1}^N a_i a_j k(\mathbf{x}^{(i)},\mathbf{x}^{(j)})\\&\text{subject to} \\a_i&\geq 0, ~ i=1,\ldots,N\\0 &= \sum_{i=1}^{N} a_i t^{(i)}\end{align}where $k(\mathbf{x}^{(i)},\mathbf{x}^{(j)})=\phi(\mathbf{x}^{(i)})^T\phi(\mathbf{x}^{(j)})$ is the **kernel** between points $\mathbf{x}^{(i)}$ and $\mathbf{x}^{(j)}$. This problem is another instance of **Quadratic Programming**. The resolution of the problem in this dual space is $\mathcal{O}(N^3)$ complex. Given the solution, we classify a new point according to the sign of\begin{align}y(\mathbf{x}^*) = \sum_{i=1}^{N} a_i t^{(i)} k(\mathbf{x}^{(i)},\mathbf{x}^*)+w_0\end{align} Lets visualize the fit of a kernel SVM with the **RBF** kernelfrom sklearn.datasets.samples_generator import make_circles Xc, yc = make_circles(20, factor=0.25, noise=.1) model2 = SVC(kernel='rbf',C=10^6).fit(Xc, yc) f,ax = plt.subplots(1,2) ax[0].scatter(Xc[:, 0], Xc[:, 1], c=yc, s=50, cmap='autumn') ax[1].scatter(Xc[:, 0], Xc[:, 1], c=yc, s=50, cmap='autumn') plot_svc_decision_function(model2, ax=ax[1],plot_support=False) plt.rcParams["figure.figsize"] = [8,8]SVMs are sparse! There is even more we can say about the SVM solution. In fact, we will see that the SVM solution is determine only by a **subset** of training points, which are known as **support vectors**.Despite we do not prove it, given the problem\begin{align}\mathcal{L}(\mathbf{a}) &= \sum_{i=1}^{N}a_i -\frac{1}{2}\sum_{i=1}^N \sum_{j=1}^N a_i a_j k(\mathbf{x}^{(i)},\mathbf{x}^{(j)})\\&\text{subject to} \\a_i&\geq 0, ~ i=1,\ldots,N\\0 &= \sum_{i=1}^{N} a_i t^{(i)}\end{align}the [**Karush-Kuhn-Tucker (KKT)**](http://www.onmyphd.com/?p=kkt.karush.kuhn.tucker) conditions require that the solution of the problem must verify the following. For $i=1,\ldots,N$,- $a_i\geq 0$- $t^{(i)}(y(\mathbf{x}^{(i)})-1)\geq 0$- $a_i\left(t^{(i)}(y(\mathbf{x}^{(i)})-1)\right)=0$If you want to understand how to prove this results, check out Appendix E in Bishop's book. Support VectorsThe third condition above implies that, for any point in our training set, either $a_i=0$ or $y(\mathbf{x}^{(i)})-1$. This means that either the point **do not participate in the SVM solution** or that the point lies **exactly in the margin**. Points for which $a_i>0$ are called **support vectors** and are the only ones defining the separation hyperplane and the prediction for future values!:\begin{align}\mathbf{w} &=\sum_{i=1}^N a_i t^{(i)} \phi(\mathbf{x}^{(i)})\\w_0 &= \frac{1}{N_S}\sum_{i: a_i>0} \left(t^{(i)}-\sum_{j: a_j>0} a_j t^{(j)}k(\mathbf{x}^{(i)},\mathbf{x}^{(j)})\right)\\y(\mathbf{x}^*) &= \sum_{i=1}^{N} a_i t^{(i)} k(\mathbf{x}^{(i)},\mathbf{x}^*)+w_0\end{align}The fraction of support vectors w.r.t. to the total number of training points must be read as a measure of the complexity of the model and how much it is exposed to **overfitting**. The more we have, the poorest generalization we can expect.Lets plot the support vectors in the two examples we have done before.f,ax = plt.subplots(1,2) ax[0].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model, ax=ax[0],plot_support=True) ax[1].scatter(Xc[:, 0], Xc[:, 1], c=yc, s=50, cmap='autumn') plot_svc_decision_function(model2, ax=ax[1],plot_support=True)Dealing with Non-separable datasetsSo far, the whole SVM formulation builds up over the assumption that the data is separable by an hyperplane in a transformed space. I.e., that there exists an hyperplane $y(\mathbf{x}) = \mathbf{w}^T\phi(\mathbf{x})+w_0=0$ that verifies\begin{align}t^{(i)}y(\mathbf{x}^{(i)})\geq 1, ~~ \forall (\mathbf{x}^{(i)},t^{(i)}),\end{align}where points with equality are the support vectors. In order to **relax** this assumption and prevent **overfitting**, we could allow certain **training** points to be missclassified. We introduce the so-called **slack** variables:\begin{align}t^{(i)}y(\mathbf{x}^{(i)})\geq 1-\xi_i, ~~ \forall (\mathbf{x}^{(i)},t^{(i)})\end{align}where $\xi_i\geq 0$:- Training points for which $\xi_i\leq 1$ are correctly classified.- Training points for which $\xi_i > 1$ are in the wrong side of the decision boundary $y(\mathbf{x})=0$. Optimization problem with slack variablesThe optimization problem can now be written as follows:\begin{align}&\min_{\mathbf{w},w_0} = \frac{1}{2} ||\mathbf{w}||^2_2 + C \sum_{i=1}^{N}\xi_i\\&\text{subject to}\\&t^{(i)}y(\mathbf{x}^{(i)})\geq 1-\xi_i, ~ i=1,\ldots,N\\&\xi_i\geq 0, ~ i=1,\ldots,N\end{align}where note that the regularization is controlled by the $C$ parameter. For $C\rightarrow\infty$ we recover the original problem, as the only solution tends to $\xi_i=0$ for $i=1,\ldots,N$, and the original SVM formulation is recovered.The Lagrange function to be optimized is now\begin{align}\mathcal{L}(\mathbf{w},w_0,\mathbf{a},\mathbf{b}) = \frac{||\mathbf{w}||^2_2}{2} + C \sum_{i=1}^{N}\xi_i -\sum_{i=1}^N a_i (t^{(i)}y(\mathbf{x}^{(i)})-1+\xi_i)-\sum_{i=1}^{N}b_i\xi_i,\end{align} KKT conditionsThe KKT conditions associated to the new optimization problem are as follows. For $i=1,\ldots,N$$$\xi_n\geq 0$$$$a_i\geq 0$$$$b_i\geq 0$$$$t^{(i)} (\mathbf{w}^T \phi(\mathbf{x})+b)-1+\xi_n)\geq 0$$$$b_i\xi_n=0$$$$a_i\left[t^{(i)} (y(\mathbf{x})-1+\xi_n)\right]=0$$ Support VectorsAs before, the last condition implies that, for any point in our training set, either $a_i=0$ or $y(\mathbf{x}^{(i)})-1$. This means that either the point **do not participate in the SVM solution** or that the point lies **exactly in the margin**. Points for which $a_i>0$ are called **support vectors** and are the only ones defining the separation hyperplane and the prediction for future values! Dual problemIf we compute the gradient Lagrange function w.r.t. $\mathbf{w}$, $w_0$ and $\xi_i$ and equalize to zero we get the following conditions\begin{align}\mathbf{w} &=\sum_{i=1}^N a_i t^{(i)} \phi(\mathbf{x}^{(i)}), ~~~~~~0 = \sum_{i=1}^{N} a_i t^{(i)}, ~~~~~~~ a_i = C-b_i\end{align}If we substitute them in the Lagrange function, we derive the dual optimization problem, with complexity $\mathcal{O}(N^3)$. Lets play with parameter $C$ with another exampleX, y = make_circles(100, factor=0.5, noise=.2) model_lin = SVC(kernel='linear',C=10^8 ).fit(X, y) f,ax = plt.subplots(1,2) ax[0].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') ax[1].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model_lin, ax=ax[1],plot_support=True) # Try to plot the suppor vectors by changing the flag plt.rcParams["figure.figsize"] = [8,8]Clearly, a linear Kernel does not do the jobmodel_rbf = SVC(kernel='rbf',C=1e6).fit(X, y) f, axes = plt.subplots(1, 2) axes[0].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model_rbf ,ax=axes[0], plot_support=False) axes[1].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model_rbf ,ax=axes[1], plot_support=True) plt.rcParams["figure.figsize"] = [20,15]And now with $C=10$ ...model_rbf_2 = SVC(kernel='rbf',C=1e1).fit(X, y) f, axes = plt.subplots(1, 2) axes[0].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model_rbf_2,ax=axes[0], plot_support=False) axes[1].scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn') plot_svc_decision_function(model_rbf_2,ax=axes[1], plot_support=True)Predicting Housing Prices*Using Linear/Nonlinear Regression* ---by **** Import Librariesimport numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn import datasets from sklearn import metrics from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split import sklearn.model_selection as model_selection from sklearn import preprocessing from sklearn.linear_model import Ridge import statsmodels.api as smData CollectionThis data was provided by CNM Ingenuity. **train & test_data**from google.colab import drive drive.mount('/content/drive') paste_train_path_here = '/content/drive/My Drive/Colab Notebooks/csv files/Housing-Data.csv' paste_blind_path_here = '/content/drive/My Drive/Colab Notebooks/csv files/Housing_Data_Blind_Test.csv' train = pd.read_csv(paste_train_path_here) test_data = pd.read_csv(paste_blind_path_here) print(test_data.shape, train.shape)(246, 79) (2637, 81)Perameterspd.set_option('display.max_rows', 150) pd.set_option('display.max_columns', 150)Data Cleaning Let's have a look at our dataframe...train.head() train.info() train_copy = train.copy()Drop appropriate rows and columns **Drop row 2001 because basement information is incomplete.**train_copy = train_copy.drop(2001,axis=0)**Drop rows 162 and 329 because Year Garage Built information is inconsistent.**train_copy = train_copy.drop(162,axis=0) train_copy = train_copy.drop(329,axis=0)**Drop row 1555 because it is the only instance with a tennis court ('TenC')**train_copy = train_copy.drop(1555,axis=0)**Remove 'PID' feature.**train_copy.drop('PID',axis=1,inplace=True)**Drop 'Garage Area'** We will drop 'Garage Area' and see if it improves the prediction, supposing that it is over correlated to 'Garage Cars' and the amount of cars the garage holds is more important than how big it is.train_copy.drop('Garage Area',axis=1,inplace=True)Dealing with NaNs **First let's fill null values with appropriate substitutions...**train_copy['Lot Frontage'] = train_copy['Lot Frontage'].fillna(0.0) train_copy['Alley'] = train_copy['Alley'].fillna('No Alley') train_copy['Mas Vnr Type'] = train_copy['Mas Vnr Type'].fillna('None') train_copy['Mas Vnr Area'] = train_copy['Mas Vnr Area'].fillna(0.0) train_bsmt_null = train_copy[train_copy['BsmtFin Type 2'].isnull()].copy() train_copy['Bsmt Qual'] = train_copy['Bsmt Qual'].fillna('No Basement') train_copy['Bsmt Cond'] = train_copy['Bsmt Cond'].fillna('No Basement') train_copy['Bsmt Exposure'] = train_copy['Bsmt Exposure'].fillna('No Basement') train_copy['BsmtFin Type 1'] = train_copy['BsmtFin Type 1'].fillna('No Basement') train_copy['BsmtFin Type 2'] = train_copy['BsmtFin Type 2'].fillna('No Basement') train_copy['BsmtFin SF 1'] = train_copy['BsmtFin SF 1'].fillna(0.0) train_copy['BsmtFin SF 2'] = train_copy['BsmtFin SF 2'].fillna(0.0) train_bsmt_null = train_copy[train_copy['Bsmt Unf SF'].isnull()].copy() train_copy['Bsmt Unf SF'] = train_copy['Bsmt Unf SF'].fillna(0.0) train_copy['Total Bsmt SF'] = train_copy['Total Bsmt SF'].fillna(0.0) train_copy['Bsmt Full Bath'] = train_copy['Bsmt Full Bath'].fillna(0.0) train_copy['Bsmt Half Bath'] = train_copy['Bsmt Half Bath'].fillna(0.0) train_copy['Fireplace Qu'] = train_copy['Fireplace Qu'].fillna('No Fireplace') train_copy['Garage Type'] = train_copy['Garage Type'].fillna('No Garage') train_copy['Garage Yr Blt'].fillna(train_copy['Year Built'], inplace=True) train_copy['Garage Finish'] = train_copy['Garage Finish'].fillna('No Garage') train_copy['Garage Qual'] = train_copy['Garage Qual'].fillna('No Garage') train_copy['Garage Cond'] = train_copy['Garage Cond'].fillna('No Garage') train_copy['Pool QC'] = train_copy['Pool QC'].fillna('No Pool') train_copy['Fence'] = train_copy['Fence'].fillna('No Fence') train_copy['Misc Feature'] = train_copy['Misc Feature'].fillna('No Misc Features') train_copy.isna().sum().sum()The devil is in the details... **Note that feature 'MS Subclass' is actually a categorical feature. Change numerical values to 'String' values.**train_copy['MS SubClass'] = train_copy['MS SubClass'].apply(str) train_copy['Full Bath'] = train_copy['Full Bath'].apply(str) train_copy['Half Bath'] = train_copy['Half Bath'].apply(str)**Found outliers due to NON "Normal" data instances. Removing all NON "Normal" data instances.**train_copy = train_copy[train_copy['Sale Condition']=='Normal']Cleaned Data **Let's make a new variable for the cleaned dataset.**train_clean = train_copy.copy()**All NaNs have been removed.**train_clean.info() Int64Index: 2163 entries, 0 to 2636 Data columns (total 79 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 MS SubClass 2163 non-null object 1 MS Zoning 2163 non-null object 2 Lot Frontage 2163 non-null float64 3 Lot Area 2163 non-null int64 4 Street 2163 non-null object 5 Alley 2163 non-null object 6 Lot Shape 2163 non-null object 7 Land Contour 2163 non-null object 8 Utilities 2163 non-null object 9 Lot Config 2163 non-null object 10 Land Slope 2163 non-null object 11 Neighborhood 2163 non-null object 12 Condition 1 2163 non-null object 13 Condition 2 2163 non-null object 14 Bldg Type 2163 non-null object 15 House Style 2163 non-null object 16 Overall Qual 2163 non-null int64 17 Overall Cond [...]**Re-index the dataframe.**train_clean = train_clean.reset_index(drop=True)Exploratory Data Analysis **Make a new dataframe with just the numeric features. We will do some analysis before adding features with categorical values (using onehot encoding).**train_numeric = train_clean.select_dtypes(include=['int64','float64']) train_numeric.info() RangeIndex: 2163 entries, 0 to 2162 Data columns (total 33 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Lot Frontage 2163 non-null float64 1 Lot Area 2163 non-null int64 2 Overall Qual 2163 non-null int64 3 Overall Cond 2163 non-null int64 4 Year Built 2163 non-null int64 5 Year Remod/Add 2163 non-null int64 6 Mas Vnr Area 2163 non-null float64 7 BsmtFin SF 1 2163 non-null float64 8 BsmtFin SF 2 2163 non-null float64 9 Bsmt Unf SF 2163 non-null float64 10 Total Bsmt SF 2163 non-null float64 11 1st Flr SF 2163 non-null int64 12 2nd Flr SF 2163 non-null int64 13 Low Qual Fin SF 2163 non-null int64 14 Gr Liv Area 2163 non-null int64 15 Bsmt Full Bath 2163 non-null float64 16 Bsmt Half Bath 2163 non-null float64 17 Bedroom AbvGr [...]**Create a variable that shows correlations to the sales price.**train_numeric_corr = train_numeric.corr()['SalePrice'] type(train_numeric_corr)Check out some of the correlations with the heat map...sns.set() plt.figure(figsize=(20,12)) sns.heatmap(train.corr(), cmap='BuPu', annot=False);**Make a 'golden features list' that has the absolute values of the correlations from highest to lowest, the drop the sales price column because we don't care that it's correlated to itself.** Golden Features List **numeric values with high correlation**golden_features_list = train_numeric_corr.abs().sort_values(ascending=False) golden_features_list = golden_features_list.drop('SalePrice',axis=0) golden_features_list golden_features_list.shape golden_features_list.index[:4] #assign sales price as variable 'outcomes' outcomes = train_numeric['SalePrice']Trying out different models **Regression with one numerical feature (top correlation).**#created a dataframe with one feature from goldenf features list train_numeric_one_feature = pd.DataFrame(train_numeric[golden_features_list.index[:1]].copy()) #add a '1' for the y intercept train_numeric_one_feature['Ones'] = 1 train_numeric_one_featureCreate a OLS (ordinary least squares) model with sm (statsmodels.api)#fit the model model_one_feature = sm.OLS(outcomes,train_numeric_one_feature).fit() #Store prediction in this variable predictions_one_feature = model_one_feature.predict(train_numeric_one_feature) model_one_feature.summary() #Check the R-squared value for a good fit...(Above) has an R-squared value of 0.628... not greatplt.figure(figsize=(12,10)) plt.scatter(outcomes,predictions_one_feature) plt.plot([0, max(predictions_one_feature)],[0, max(predictions_one_feature)], c='red');We can see that the result(above) is... okay - kind of choppy. **Let's add another feature. Create a new dataframe with two features...**train_numeric_two_features = pd.DataFrame(train_numeric[golden_features_list.index[:2]].copy()) #adding our '1's again... train_numeric_two_features['Ones'] = 1 train_numeric_two_features**Create the model with two features and check the fit...**model_two_features = sm.OLS(outcomes,train_numeric_two_features).fit() predictions_two_features = model_two_features.predict(train_numeric_two_features) model_two_features.summary()(above) R-squared value has increased a bit, but still not impressiveplt.figure(figsize=(12,10)) plt.scatter(outcomes,predictions_two_features) plt.plot([0, max(predictions_two_features)],[0, max(predictions_two_features)], c='red'); plt.xlabel('Actual Sales Price',fontsize=18); plt.ylabel('Predictions',fontsize=18); plt.axis('equal'); plt.axis('square');(Above) We can see it's starting to line up, but there are some outliers and nonlinear patterns. Observing change in R-squared **Let's investigate the change in R-squared values when adding new features...**numLoops = 37 r_squared_array = np.zeros(numLoops) for idx in range(0,numLoops): train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]].copy()) #add '1's for y intercept train_numeric_temp_df['Ones'] = 1 model_temp = sm.OLS(outcomes,train_numeric_temp_df).fit() r_squared_array[idx] = model_temp.rsquared #Declare plot size plt.figure(figsize=(12,10)); #Plot 'r_squared_array' plt.plot(r_squared_array); #label the x & y axes plt.xlabel('Number of Features',fontsize=18); plt.ylabel('R-Squared',fontsize=18); #Change font for the ticks plt.xticks(fontsize=12); plt.yticks(fontsize=12);(Above) Visualize the increase in R-Squared as you increase the number of features in the regression. We can see that the R-squared value caps at about ~25 features. **Visualize the increase in prediction (on the same training data) as you increase the number of features in the regression.**#create a function for the percentage of error def root_mean_squared_percentage_error(y_true, y_predicted): #root mean squared percentage errror is rmspe rmspe = np.sqrt(np.mean(np.square((y_true - y_predicted)/y_true)))*100 return rmspeLook at the percentage of error for prediction with 1 feature... it's 26.1%y_predict_one_feature = model_one_feature.predict(train_numeric_one_feature) total_percent_error = root_mean_squared_percentage_error(outcomes, y_predict_one_feature) print(total_percent_error,"%")26.088533668732804 %Do the same with two features... it's 22.5%... a lil bettery_predict_two_features = model_two_features.predict(train_numeric_two_features) total_percent_error = root_mean_squared_percentage_error(outcomes, y_predict_two_features) print(total_percent_error,"%")22.45263019837068 %**Make a 'for loop' showing the percentage error with each feature added...**numLoops = 37 percentage_error_array = np.zeros(numLoops) for idx in range(0,numLoops): train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]].copy()) train_numeric_temp_df['Ones'] = 1 model_temp = sm.OLS(outcomes, train_numeric_temp_df).fit() predict_temp = model_temp.predict(train_numeric_temp_df) percentage_error_array[idx] = root_mean_squared_percentage_error(outcomes, predict_temp) plt.figure(figsize=(12,10)); #Plot array, add argument for dots instead of a line plt.plot(percentage_error_array,'.'); plt.xlabel('Number of Features', fontsize=18); plt.ylabel('Percentage Error', fontsize=18); plt.xticks(fontsize=12); plt.yticks(fontsize=12); y_predict_two_features[y_predict_two_features>400000] train_clean.iloc[670,:]Data Processing Cross Validation (Numeric Values) **Let's see cross validation for the numerical columns from the golden features list.**res = 100 percentage_error_mean = np.zeros(36) for idx in range(0,36): print(idx) train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]]).copy() train_numeric_temp_df['Ones'] = 1 percentage_error_array = np.zeros(res) for idx2 in range(0, res): x_train, x_test, y_train, y_test = model_selection.train_test_split(train_numeric_temp_df,outcomes,test_size=0.1) model_temp = sm.OLS(y_train, x_train).fit() predict_temp = model_temp.predict(x_test) percentage_error_array[idx2] = root_mean_squared_percentage_error(y_test, predict_temp) percentage_error_mean[idx] = percentage_error_array.mean() print(percentage_error_mean) print(percentage_error_mean.mean()) plt.figure(figsize=(12,10)); plt.plot(percentage_error_mean);Scaling (Robust)train_numeric_all_features = pd.DataFrame(train_numeric[golden_features_list.index[:36]].copy()) scaler = preprocessing.RobustScaler() train_numeric_robust = scaler.fit_transform(train_numeric_all_features) train_numeric_robust = pd.DataFrame(train_numeric_robust, columns=train_numeric_all_features.columns) train_numeric_robust['Ones'] = 1 train_numeric_robust.head() model_robust_features = sm.OLS(outcomes,train_numeric_robust).fit() predictions_robust_features = model_robust_features.predict(train_numeric_robust) model_robust_features.summary() type(train_numeric_robust) train_numeric_robust type(model_robust_features)One-Hot Encoding An example with one feature one-hot encodedtrain_neighborhood = pd.DataFrame(train_clean['Neighborhood'].copy()) train_one_hot = pd.get_dummies(train_neighborhood['Neighborhood']) train_one_hot train_numeric_one_hot = pd.concat([train_numeric, train_one_hot], axis=1) train_numeric_one_hot.drop(['SalePrice'], axis=1, inplace=True) train_numeric_one_hot['Ones'] = 1 model_one_hot = sm.OLS(outcomes, train_numeric_one_hot).fit() predict_one_hot = model_one_hot.predict(train_numeric_one_hot) percentage_error = root_mean_squared_percentage_error(outcomes, predict_one_hot) percentage_errorMaking a list of all the non-numeric featurestrain_non_numeric = train_clean.select_dtypes(include=['object']) train_non_numeric.info() train_non_numericOne hot encoding all non-numeric featurestrain_objects = pd.DataFrame(train_non_numeric.copy()) train_one_hot_all = pd.get_dummies(train_objects) train_one_hot_all['SalePrice'] = train_clean['SalePrice'] train_one_hot_all.info() RangeIndex: 2163 entries, 0 to 2162 Columns: 290 entries, MS SubClass_120 to SalePrice dtypes: int64(1), uint8(289) memory usage: 627.5 KBAdding those features to our select numeric featurestrain_numeric_one_hot_all = pd.concat([train_numeric_robust, train_one_hot_all], axis=1) train_numeric_one_hot_all.drop(['SalePrice'], axis=1, inplace=True) train_numeric_one_hot_all['Ones'] = 1 model_one_hot_all = sm.OLS(outcomes, train_numeric_one_hot_all).fit() predict_one_hot_all = model_one_hot_all.predict(train_numeric_one_hot_all) percentage_error_total_one_hot = root_mean_squared_percentage_error(outcomes, predict_one_hot_all) percentage_error_total_one_hotNonlinear Fitting Determine the best power to use for nonlinear fitting (outcomes) with cross-validation# Look for 'powNum' that gives us the optimal (lowest) percentage error. intArray = 1/np.arange(1,11,1) print(intArray) numInnerLoops = 100 # Pre-allocate outer loop result array percentage_error_mean = np.zeros(len(intArray)) for outerIdx, powNum in enumerate(intArray): print('idx = ',outerIdx) # Create dataframe for prediction #train_nonlinear = train_numeric_one_hot_all.drop(['SalePrice'], axis=1).copy() #train_nonlinear['Ones'] = 1 # Pre-allocate inner result array percentage_error = np.zeros(numInnerLoops) # Cross validation loop for innerIdx in range(0,numInnerLoops): # Cross validation process: Split data, train model, predict from model, check accuracy X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.2) # For training use X_train, y_train y_train_with_power = y_train.pow(powNum) # Set training outcome to a power (powNum) of the 'SalePrice' model_loop_temp = sm.OLS(y_train_with_power, X_train).fit() # For prediction use X_test (didn't train on it) predict_loop_temp = model_loop_temp.predict(X_test) final_predict_loop_temp = predict_loop_temp.pow(1/powNum) # Set final prediction outcome to the power (1/powNum) # Check accuracy of predict_loop_temp versus y_test (real outcomes) percentage_error[innerIdx] = root_mean_squared_percentage_error(y_test, final_predict_loop_temp) percentage_error_mean[outerIdx] = percentage_error.mean() plt.plot(percentage_error_mean);Above shows us that best power number is 1/3percentage_error_mean.mean()Ridge Regressionmodel_one_hot = sm.OLS(outcomes, train_numeric_one_hot_all).fit() predict_one_hot = model_one_hot.predict(train_numeric_one_hot_all) percentage_error_one_hot = root_mean_squared_percentage_error(outcomes,predict_one_hot) percentage_error_one_hot alpha_value = [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000] percentage_error_ridge = np.zeros(len(alpha_value)) numLoops = 250 for idx, alpha_val in enumerate(alpha_value): print('idx = ',idx) percentage_error_temp = np.zeros(numLoops) ridgereg = Ridge(alpha=alpha_val, normalize=False) for innerIdx in range(0,numLoops): X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.1) ridgereg.fit(X_train, y_train) y_pred = ridgereg.predict(X_test) percentage_error_temp[innerIdx] = root_mean_squared_percentage_error(y_test, y_pred) percentage_error_ridge[idx] = percentage_error_temp.mean() plt.plot(percentage_error_ridge); print(percentage_error_ridge) print(percentage_error_ridge.mean()) percentage_error_ridge.mean()Above shows best alpha value is 10alpha_value = [10] best_powNum = 1/3 percentage_error_ridge = np.zeros(len(alpha_value)) numLoops = 1000 for idx, alpha_val in enumerate(alpha_value): print('idx = ',idx) percentage_error_temp = np.zeros(numLoops) ridgereg = Ridge(alpha=alpha_val, normalize=False) for innerIdx in range(0,numLoops): X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.1) ridgereg.fit(X_train, y_train.pow(best_powNum)) y_pred = ridgereg.predict(X_test) full_y_pred = np.power(y_pred, 1/best_powNum) percentage_error_temp[innerIdx] = root_mean_squared_percentage_error(y_test, full_y_pred) percentage_error_ridge[idx] = percentage_error_temp.mean() # plt.plot(percentage_error_ridge); print(percentage_error_ridge) print(percentage_error_ridge.mean()) train_numeric_one_hot_allFinal Model# Set our optimized variables. best_powNum = 1/3 best_alpha = 10 # Save final outcomes outcomes_final = train_numeric['SalePrice'].copy() # Save final numeric features train_numeric_final = train_numeric.drop(['SalePrice'], axis=1).copy()Produce final model using ridge regression.model_final_ridge = Ridge(alpha=best_alpha, normalize=False) model_final_ridge.fit(train_numeric_one_hot_all, outcomes_final.pow(best_powNum)) train_numeric_one_hot_all.shapeProcess the blind test data. Load blind test data. Examine blind test data.test_data.shapeInsure there are no NaNs.test_data.isna().sum().sum()Change 'MS Subclass' from int64 to str.test_data['MS SubClass'] = test_data['MS SubClass'].apply(str) test_data['Full Bath'] = test_data['Full Bath'].apply(str) test_data['Half Bath'] = test_data['Half Bath'].apply(str)Make a dataframe of the numerical features used to train the final model.test_data_numeric = test_data[train_numeric_final.columns]Scaling the test data (Robust)test_numeric_all_features = pd.DataFrame(test_data_numeric[golden_features_list.index[:36]].copy()) scaler = preprocessing.RobustScaler() test_numeric_robust = scaler.fit_transform(test_numeric_all_features) test_numeric_robust = pd.DataFrame(test_numeric_robust, columns=test_numeric_all_features.columns) test_numeric_robust['Ones'] = 1 test_numeric_robust.head() type(test_numeric_robust) test_numeric_robust.head()One-Hot the Test Data Make a dataframe of the categorical features (before one hot encoding) used to train the final model.test_data_category = test_data[train_non_numeric.columns]One hot encode the test_data categories used to train the final model.test_category_one_hot = pd.get_dummies(test_data_category) test_data_one_hot = pd.concat([test_numeric_robust, test_category_one_hot], axis=1) test_data_one_hot['Ones'] = 1 test_data_one_hot.shapeCreate DataFrame with Correct Size Get the number of rows (data instances) in the test_data.numRows = test_data_one_hot.shape[0] numRowsCreat a dataframe of all zeros with the size of: Number of rows equal to the number of data instances in the test_data; and Columns equal to the features used to train the final model. You will get an error if the sizes are not the same.test_data_full = pd.DataFrame(0, index=np.arange(numRows), columns=train_numeric_one_hot_all.columns)Need to find the intersection of the features used to train the model (train_numeric_one_hot_all) and the features in the test_data set (test_data_one_hot).column_intersection = test_data_one_hot.columns.intersection(train_numeric_one_hot_all.columns)Populate DataFrame with Intersection Values Populate the dataframe of zeros with intersection features from above code block.test_data_full[column_intersection] = test_data_one_hot[column_intersection] test_data_full.head()Final Prediction Predict using ridge regression.test_predict_final_ridge = model_final_ridge.predict(test_data_full) full_test_predict_final_ridge = np.power(test_predict_final_ridge, 1/best_powNum) full_test_predict_final_ridge_result = pd.DataFrame(full_test_predict_final_ridge,columns=['Result']) full_test_predict_final_ridge_result.to_csv('/content/drive/My Drive/Colab Notebooks/csv files/Blind_Test_Predictions.csv', index=False) temp = pd.read_csv('/content/drive/My Drive/Colab Notebooks/csv files/Blind_Test_Predictions.csv') temp.head()Conclusion - We can increase the prediction accuracy by using numerical values highly correlated to sales price.- Scaling the numerical values can help by giving us more useful coefficients.-Nonlinear fitting can decrease error percentage by dealing with outliers that don't fit the linear model.-We can improve the model even more by breaking out string type values into additional features with one-hot encoding.-Too many features can lead to over-fitting the model, which decreases it's predictive power but...-We can address over-fitting issues with another nonlinear regression technique, for example ridge regression.Using PyEDF library for Tabular data generationThe Pyedf reader can be used to create an object that is capable of reading the EDF files and creating a dataframe of channels with each sample representing a column and the rows being the different channels in the multichannel data. This dataframe can be transposed to create a `n-channel` number of columns with `number of samples` number of rows dataframe. This can be compared to see the uniformity in the dataset.import pyedflib import numpy as np import pandas as pd import os import warnings warnings.filterwarnings("ignore") def read_edf(): ''' Reads the edf files from the data folder ''' path = os.getcwd() + '\\data\\' files = [] print("Reading files...") for f in os.listdir(path): files.append(pyedflib.EdfReader(path+f)) print(f"Reading {f} now..") print('Reading successfully completed!') return files def get_shape(files): ''' Gives the number of samples in the EEG data ''' for index, f in enumerate(files): n = f.signals_in_file signal_labels = f.getSignalLabels() sigbufs = np.zeros((n, f.getNSamples()[0])) for i in np.arange(n): sigbufs[i, :] = f.readSignal(i) print(f'Number of samples in EEG data of patient {index+1}: {sigbufs.shape[1]}') return ("Completed Reading") def show_frame(files): ''' Displays the dataframe version and describes the data for each patient ''' for index, f in enumerate(files): print(f'Patient {index+1}: ') n = f.signals_in_file signal_labels = f.getSignalLabels() sigbufs = np.zeros((n, f.getNSamples()[0])) df = pd.DataFrame(sigbufs, index = signal_labels) df_transpose = df.transpose() print(df_transpose.head(4)) print("-----------------------------"*2) print(df_transpose.info()) print("-----------------------------"*2) print(df_transpose.describe()) print("-----------------------------"*2) return "All frames displayed!" files = read_edf() get_shape(files) show_frame(files)script to apply SSK BO over a set of candidate strings we demonstrate on the task of finding moelcules with high scoresimport numpy as np import pickle import gzip import emukit import re import matplotlib.pyplot as plt from emukit.core.initial_designs import RandomDesign from emukit.core import ParameterSpace from emukit.core.optimization import RandomSearchAcquisitionOptimizer from emukit.bayesian_optimization.loops import BayesianOptimizationLoop from emukit.bayesian_optimization.acquisitions import ExpectedImprovement from emukit.core.loop import FixedIterationsStoppingCondition import warnings warnings.filterwarnings('ignore') #import our code from boss.code.parameters.candidate_parameter import CandidateStringParameter from boss.code.optimizers.StringGeneticAlgorithmAcquisitionOptimizer import StringGeneticProgrammingOptimizer from boss.code.emukit_models.emukit_bow_model import BOW_model from boss.code.emukit_models.emukit_linear_model import linear_model from boss.code.emukit_models.emukit_ssk_model import SSK_modelExplain Methods# we perform optimziation using our SSK-approach and random search # VAE baselines are availible for Grammar VAEs and Character VAES at https://github.com/mkusner/grammarVAECollect candidate strings# get 250,000 candidate molecules file = gzip.GzipFile("../example_data/SMILES/SMILES.gzip", 'rb') data = file.read() smiles_full = pickle.loads(data) file.close() # get their scores file = gzip.GzipFile("../example_data/SMILES/TARGETS.gzip", 'rb') data = file.read() targets_full = pickle.loads(data) file.close() # for tutorial only keep strings <40 length (for quick SSK) smiles=[] targets=[] for i in range(0,len(smiles_full)): if len(smiles_full[i])<40: smiles.append(smiles_full[i]) targets.append(targets_full[i]) smiles=np.array(smiles) targets=np.array(targets) #seperate all character with blank space smiles = np.array([" ".join(list(smile)) for smile in smiles]).reshape(-1,1)Define problem (objective and space)# define an objective function (to be minimized) and space def objective(x): # return score of the molecules # *-1 so we can minimize return -targets[np.where(smiles==x)[0][0]] objective=np.vectorize(objective) # define search space space = ParameterSpace([CandidateStringParameter("string",smiles)])Collect initial points# collect initial design (uniform sample) np.random.seed(1234) random_design = RandomDesign(space) initial_points_count = 15 X_init = random_design.get_samples(initial_points_count) Y_init = objective(X_init)Perform BO with SSK# build BO loop # fit SSK model # just a single restart when fitting kernel params for demo # (we recommend at least 3 for high performance) model =SSK_model(space,X_init,Y_init,max_subsequence_length=5,n_restarts=1) # Load core elements for Bayesian optimization expected_improvement = ExpectedImprovement(model) # use random search to optimize acqusition function optimizer = RandomSearchAcquisitionOptimizer(space,100) bayesopt_loop_ssk = BayesianOptimizationLoop(model = model, space = space, acquisition = expected_improvement, acquisition_optimizer = optimizer) # add loop summary def summary(loop, loop_state): print("Performing BO step {}".format(loop.loop_state.iteration)) bayesopt_loop_ssk.iteration_end_event.append(summary) # run BO loop for 35 steps stopping_condition = FixedIterationsStoppingCondition(i_max = 35) bayesopt_loop_ssk.run_loop(objective, stopping_condition)Optimization restart 1/1, f = 19.187225084021044 Performing BO step 1 Optimization restart 1/1, f = 19.467343308047404 Performing BO step 2 Optimization restart 1/1, f = 22.16853567860297 Performing BO step 3 Optimization restart 1/1, f = 22.957946170821735 Performing BO step 4 Optimization restart 1/1, f = 23.740146166171456 Performing BO step 5 Optimization restart 1/1, f = 24.650012279227404 Performing BO step 6 Optimization restart 1/1, f = 25.825125558890214 Performing BO step 7 Optimization restart 1/1, f = 26.60063518581489 Performing BO step 8 Optimization restart 1/1, f = 27.367796030291274 Performing BO step 9 Optimization restart 1/1, f = 27.823581210645564 Performing BO step 10 Optimization restart 1/1, f = 28.452241448965133 Performing BO step 11 Optimization restart 1/1, f = 29.638513087002877 Performing BO step 12 Optimization restart 1/1, f = 30.127600865423073 Performing BO step 13 Optimization restart 1/1, f = 30.387036896158236 Performing BO step 14 Optimization rest[...]Perform random search# also see performance of random search #(starting from the initialization used by the other approaches) np.random.seed(1234) Y_random=np.vstack([Y_init,objective(random_design.get_samples(35))])plot results# plot results from the two methods # recall that first 15 points are a random sample shared by all the methods plt.plot(-np.minimum.accumulate(bayesopt_loop_ssk.loop_state.Y),label="Split SSk") plt.plot(-np.minimum.accumulate(Y_random),label="Random Search") plt.ylabel('Current best') plt.xlabel('Iteration') plt.legend()Homework solution - 02 experiment trackingIn this notebook are the answers of the homework of the module 02 of the course (Experiment Tracking).# Built-in imports import os # External imports from mlflow.entities import ViewType from mlflow.tracking import MlflowClient # Own imports from scripts import get_path_dir as gpd # Define the path to the data directory DATA_DIR = gpd.get_desired_folder_path("data")Q1. Install MLflowmlflow_version = os.popen('mlflow --version') print(f"The version of MLflow that i installed is the following: {mlflow_version.read()}")The version of MLflow that i installed is the following: mlflow, version 1.26.1Q2. Download and preprocess the data# Run the file in order to preprocess the datasets _ = os.system(f'python ../scripts/preprocess_pipeline.py --raw_data_path "{DATA_DIR}" --dest_path ../artifacts' ) artifacts_dir_ls = os.popen('ls ../artifacts/ | wc -l') print(f"The number of resulted files is: {int(artifacts_dir_ls.read()) - 1}")The number of resulted files is: 4Q3. Train a model with autolog# Run the file in order to train a simple random forest model to predict the time duration of a taxi ride _ = os.system(f'python ../scripts/train.py --data_path ../artifacts') # Define the MLflow client API client = MlflowClient() # Get the run_id os the first run runs = client.search_runs( experiment_ids='0', run_view_type=ViewType.ACTIVE_ONLY, max_results=10, order_by=["attribute.start_time ASC"] ) id_first_run = runs[0].info.run_id params_dir_ls = os.popen(f'ls mlruns/0/{id_first_run}/params/ | wc -l') print(f"The number of logged params is: {int(params_dir_ls.read())}")The number of logged params is: 17Q4. Launch the tracking server locally In addition to `backend-store-uri`, it is important to use the `default-artifact-root` param too. Q5. Tune the hyperparameters of the model# Run the file in order to make a hyperparameter optimization of the Random Forest model _ = os.system(f'python ../scripts/hpo.py --data_path ../artifacts') # Define important constant variables MLFLOW_TRACKING_URI = "http://127.0.0.1:5000" HPO_EXPERIMENT_NAME = "random-forest-hyperopt" # Define the MLflow client API client = MlflowClient(tracking_uri=MLFLOW_TRACKING_URI) # Get the run with the lowest rsme experiment = client.get_experiment_by_name(HPO_EXPERIMENT_NAME) runs = client.search_runs( experiment_ids=experiment.experiment_id, run_view_type=ViewType.ACTIVE_ONLY, max_results=1, order_by=["metrics.rmse ASC"] ) lowest_rmse = runs[0].data.metrics['rmse'] print(f"The lowest value of rmse got is: {lowest_rmse:.4f}")The lowest value of rmse got is: 6.6284Q6. Promote the best model to the model registry# Run the file in order to registry the best model in the model registry _ = os.system(f'python ../scripts/register_model.py --data_path ../artifacts') # Define important constant variables HPO_EXPERIMENT_NAME = "random-forest-best-models" # Get the run with the lowest test_rsme experiment = client.get_experiment_by_name(HPO_EXPERIMENT_NAME) runs = client.search_runs( experiment_ids=experiment.experiment_id, run_view_type=ViewType.ACTIVE_ONLY, max_results=1, order_by=["metrics.rmse ASC"] ) lowest_test_rmse = runs[0].data.metrics['test_rmse'] print(f"The lowest value of test rmse got is: {lowest_test_rmse:.4f}")The lowest value of test rmse got is: 6.548914장 합성곱신경망: 컴퓨터비전 (2부) 감사의 글자료를 공개한 저자 오렐리앙 제롱과 강의자료를 지원한 한빛아카데미에게 진심어린 감사를 전합니다. ResNet-34 CNN 구현 * 앞서 소개된 대부분의 CNN 모델들을 케라스가 기본으로 지원함. * 하지만 실제로 케라스로 구현하는 것도 대부분 매우 쉬움. 잔차 유닛(RU) 구현 * ResNet-34 모델에 사용되는 ResidualUnit 층을 직접 구현하는 것도 간단함. * ResidualUnit 클래스 구성 요소 * `main_layers`: 오른쪽 모듈 * `skip_layers`: 왼쪽 모듈. 보폭이 1보다 큰 경우에만 합성곱 모델 적용. ```pythonDefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, strides=1, padding="SAME", use_bias=False)class ResidualUnit(keras.layers.Layer): def __init__(self, filters, strides=1, activation="relu", **kwargs): super().__init__(**kwargs) self.activation = keras.activations.get(activation) self.main_layers = [ DefaultConv2D(filters, strides=strides), keras.layers.BatchNormalization(), self.activation, DefaultConv2D(filters), keras.layers.BatchNormalization()] self.skip_layers = [] if strides > 1: self.skip_layers = [ DefaultConv2D(filters, kernel_size=1, strides=strides), keras.layers.BatchNormalization()]``` ```python def call(self, inputs): main_layers Z = inputs for layer in self.main_layers: Z = layer(Z) skip_layers skip_Z = inputs for layer in self.skip_layers: skip_Z = layer(skip_Z) return self.activation(Z + skip_Z)``` ResNet-34 구현 * Sequential 클래스 활용* 잔차 유닛을 하나의 층으로 취급 가능 ```pythonmodel = keras.models.Sequential()model.add(DefaultConv2D(64, kernel_size=7, strides=2, input_shape=[224, 224, 3]))model.add(keras.layers.BatchNormalization())model.add(keras.layers.Activation("relu"))model.add(keras.layers.MaxPool2D(pool_size=3, strides=2, padding="SAME"))prev_filters = 64for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3: strides = 1 if filters == prev_filters else 2 model.add(ResidualUnit(filters, strides=strides)) prev_filters = filtersmodel.add(keras.layers.GlobalAvgPool2D())model.add(keras.layers.Flatten())model.add(keras.layers.Dense(10, activation="softmax"))``` 케라스 제공 사전훈련된 모델 활용 * 많은 모델이 `keras.applications` 에서 기본제공됨. * ResNet 모델 변종 * Inception-v3, Xception 등 GoogLeNet 모델 변종 * VGGNet 모델 변종 * MobileNet, MobileNetV2 등 모바일 전용 모델 예제: ResNet-50 모델 모델 불러오기 ```pythonmodel = keras.applications.resnet50.ResNet50(weights="imagenet")``` * `224x224` 모양의 이미지를 입력값으로 사용해야 함. 주의사항: 입력 이미지 모양 * 모델에 따라 입력 이미지의 모양이 달리짐. * 입력모양 변경: `tf.image.resize()`, `tf.image.crop_and_resize()` 등 다양한 함수 이용 가능. * 일반적으로 가로, 세로 비율을 유지하지는 않음. ```pythonimages_resized = tf.image.resize(images, [224, 224])또는images_resized = tf.image.crop_and_resize(images, [china_box, flower_box], [0, 1], [224, 224])``` 주의사항: 입력 데이터 값 * 모델에 따라 입력 이미지에 사용되는 값는 0에서1 또는 -1에서 1 사이로 스케일링 된 것을 기대. * 모델 마다 `preprocess_input()` 제공. * 이미지에 사용된 값이 0에서 255 사이인 것을 기대 * 예를 들어, 이미지에 사용된 값이 0에서 1사이의 값이라면, 255를 곱해서 입력해 주어야 함. ```pythoninputs = keras.applications.resnet50.preprocess_input(images_resized * 255)``` 예측 ```pythonY_proba = model.predict(inputs)``` 예측결과 확인 * `decode_predictions()` 메서드 활용 * 이미지별로 지정된 수 만큼의 최상위 예측 클래스를 보여줌. * 아래 코드는 두 이미지 각각에 대한 최상위 3개의 클래스를 보여줌. * 클래스 수: 1,000개 ```pythontop_K = keras.applications.resnet50.decode_predictions(Y_proba, top=3)for image_index in range(len(images)): print("Image {}".format(image_index)) for class_id, name, y_proba in top_K[image_index]: print(" {} - {:12s} {:.2f}%".format(class_id, name, y_proba * 100)) print()``` * 결과 * 정확하지는 않지만 사원(monastery), 데이지(daisy) 등 유사한 클래스가 탑 3 항목에 포함되었음. ```pythonImage 0 n03877845 - palace 43.39% n02825657 - bell_cote 43.07% n03781244 - monastery 11.70%Image 1 n04522168 - vase 53.96% n07930864 - cup 9.52% n11939491 - daisy 4.97%``` 사전훈련된 모델 활용 전이학습 * 이미지넷에 없는 이미지 클래스를 감지하는 분류기를 만들고자 하는 경우 활용 예제: 사전훈련된 Xception 모델 활용 * 사전훈련된 Xception 모델을 활용한 꽃 이미지 분류기 모델 훈련하기 데이터 불러오기 * `tensorflow_datasets` 모듈의 `load()` 함수 활용하여 샘플 데이터셋 불러오기 ```pythonimport tensorflow_datasets as tfdsdataset, info = tfds.load("tf_flowers", as_supervised=True, with_info=True)``` * `tf_flowers` 데이터셋 * 훈련 세트만 존재하는 데이터셋 * 5개의 꽃 클래스: `['dandelion', 'daisy', 'tulips', 'sunflowers', 'roses']` * 샘플 수: 3,670 * 테스트 세트(10%), 검증 세트(15%), 훈련 세트(75%)로 분리하기 ```pythontest_set_raw, valid_set_raw, train_set_raw = tfds.load( "tf_flowers", split=["train[:10%]", "train[10%:25%]", "train[25%:]"], as_supervised=True)``` 전처리 * 배치 활용: 크기는 32* 섞기(shuffle) 실행* 선택 기능: `randomize=True`: 무작위적으로 사진자르기와 수평뒤집기 등을 활용한 데이터 증식* `224x224` 모양으로 변환 후 `preprocess_input()` 메서드로 전치리 실행* `prefetch(1)`: 배치 미리 준비시키기 ```pythondef preprocess(image, label, randomize=False): if randomize: cropped_image = random_crop(image) cropped_image = tf.image.random_flip_left_right(cropped_image) else: cropped_image = central_crop(image) resized_image = tf.image.resize(cropped_image, [224, 224]) final_image = keras.applications.xception.preprocess_input(resized_image) return final_image, labelbatch_size = 32train_set = train_set_raw.shuffle(1000).repeat()train_set = train_set.map(partial(preprocess, randomize=True)).batch(batch_size).prefetch(1)valid_set = valid_set_raw.map(preprocess).batch(batch_size).prefetch(1)test_set = test_set_raw.map(preprocess).batch(batch_size).prefetch(1)``` ImageNet에서 사전훈련된 Xception 모델 활용 * 분리 합성곱 층들을 제외한 (최상위에 위치한) 층 제거 * 전역평균 층 * 밀집 출력층 * 제거된 층 대신 새로은 층 두 개 추가 * 전역평균층 * softmax 활성화함수를 사용하는 밀집 출력층 ```pythonbase_model = keras.applications.xception.Xception(weights="imagenet", include_top=False)avg = keras.layers.GlobalAveragePooling2D()(base_model.output)output = keras.layers.Dense(n_classes, activation="softmax")(avg)model = keras.models.Model(inputs=base_model.input, outputs=output)``` 1차 훈련 * 사전훈련된 층의 가중치를 동결 후 훈련* 학습률: `lr=0.2`* 성능: 검증 세트에 대한 정확도가 88% 정도 ```python 사전훈련된 층의 가중치 동결하기for layer in base_model.layers: layer.trainable = False 컴파일 후 훈련 시작optimizer = keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01)model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])history = model.fit(train_set, steps_per_epoch=int(0.75 * dataset_size / batch_size), validation_data=valid_set, validation_steps=int(0.15 * dataset_size / batch_size), epochs=5)``` 2차 훈련 * 사전훈련된 층의 가중치 동결 해제 후 다시 훈련* 학습률 작게: `lr=0.0.01`* 성능: 검증 세트에 대한 정확도가 94.3% 정도 ```python 사전훈련된 층의 가중치 동결 해제for layer in base_model.layers: layer.trainable = True 재 컴파일 후 훈련 시작optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True, decay=0.001)model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])history = model.fit(train_set, steps_per_epoch=int(0.75 * dataset_size / batch_size), validation_data=valid_set, validation_steps=int(0.15 * dataset_size / batch_size), epochs=40)``` 분류와 위치추정(classification and localization) * 사진에 포함된 꽃을 분류하면서 해당 꽃의 위치 추청하기 * 위치추정은 회귀 모델로 구현 가능 * 탐색대상인 객체의 주위로 네모 모양의 __경계상자__(bounding box) 그리기 * 네모상자의 중심좌표와 높이와 너비(세로와 가로), 즉, 네 개의 값을 예측해야 함. 경계상자 추정 모델 구현 * 앞서 사용된 분류 모델에 위치추정 기능을 갖는 출력층을 추가하면 됨. * 위치추정 출력층: 값예측을 위한 네 개의 뉴런 사용. ```pythonbase_model = keras.applications.xception.Xception(weights="imagenet", include_top=False)avg = keras.layers.GlobalAveragePooling2D()(base_model.output)class_output = keras.layers.Dense(n_classes, activation="softmax")(avg)loc_output = keras.layers.Dense(4)(avg)model = keras.models.Model(inputs=base_model.input, outputs=[class_output, loc_output])model.compile(loss=["sparse_categorical_crossentropy", "mse"], loss_weights=[0.8, 0.2], 어느 출력에 보다 집중할지 결정 optimizer=optimizer, metrics=["accuracy"])``` 경계상자 레이블링 * 경계상자 추정 모델을 (지도)학습시키려면 모든 이미지에 경계상자 레이블이 있어야 하거나 추가되어야함. * 하지만 가장 어려우면서 고비용이 요구되는 작업이 선행되어야 함. 경계상자 레이블링 도구 소개 * 오픈소스 프로그램 * VGG Image Annotation * LabelImg * OpenLabeler * ImgLab * 상용 프로그램 * LabelBox * Supervisely * 크라우드소싱(crowdsourcing) 플랫폼 * Amazon Mechanical Turk * 아주 많은 양의 이미지에 경계상자 등 표기할 경우 * 많은 준비작업이 요구됨. * 참조 논문: [Crowdsourcing in Computer Vision, Adriana Kovashka et al.](https://arxiv.org/abs/1611.02145) 이미지 전처리 * 경계상자 레이블링이 완성된 후 입력되는 샘플이 두 개의 레이블을 사용하도록 입력값 형식을 다음과 같이 변경해야 함: ```python (images, (class_labels, bounding_boxes)) ``` 경계상자 평가지표: IoU * 보통 MSE 사용 * 하지만 __합집합에 대한 교집합의 비율__(intersection over union, IoU)이 경계상자 예측값을 보다 정확하게 평가함. * IoU: 전체 영역에서 겹치는 부분의 비율 객체탐지 * 하나의 이미지에서 여러 개의 물체를 분류하고 위치를 추정하는 작업 기본 아이디어 * CNN 모델이 사진의 전체영역을 훑어 보도록 함. * 훑어보는 각각의 탐색영역에서 하나의 객체를 탐색하는 것을 목표로 삼음. * 탐색 영역의 크기를 변화시키며 객체 탐색 * `3x3`, `4x4` 등등 * 그 중에 가장 적절한 탐색결과를 이용하여 객체 탐지 * 단점 * 조금씩 다른 위치에서 동일한 물체를 여러 번 감지하여 불필요한 경계상자를 제거하는 사후 작업 필요 * 경계상자 안에 __객체가 존재할 가능성__(objectness)을 계산하여 일정 값 이하의 경계상자 제거 * CNN을 여러 번 작동시켜야 하기에 꽤 느림. 완전 합성곱신경망(Fully Convolutional Networks, FCN) * CNN을 여러 번 작동시키는 단점 해소. * 기본 아이디어: CNN의 최상위 밀집층을 합성곱 층으로 교체 예제 * 가정: 아래 형식을 따르는 밀집층이 사용된 CNN 모델 * 입력값: `224 x 224` 크기의 이미지 * 마지막 합성곱 층/풀링 층의 결과: `7 x 7` 크기의 특성지도로 이루어짐. * 밀집층: 10개의 뉴런으로 구성 * 밀집층 교체 * 새로 추가되는 첫재 합성곱층: `7 x 7` 크기의 필터 10개 사용 * 동일한 모델로 다양한 크기의 입력사진을 다룰 수 있음.* 또한 목적에 맞추어 다른 합성곱 층으로 구성 가능. * 예를 들어, 하나의 이미지에서 여러 객체의 탐지 가능 FCN의 장점 * 이미지를 단 한 번만 처리 * YOLO(You Only Look Once): 인기 높은 객체탐지 기법 YOLO * 2015년에 소개된 후 2016년 YOLOv2, 2018년 YOLOv3로 기능 향상됨. * 심층신경망 훈련 전에 K-평균 비지도 학습을 이용하여 대상 주의로 앵커 박스(anchor box)를 먼저 표시함. * 앵커 박스가 이후 훈련과정에서 보다 빠르고 정확한 경계상자를 예측하도록 도와줌. * 학습된 모델은 실시간으로 비디오에 적용 가능할 정도로 빠름. 아래 데모 참조from IPython.display import YouTubeVideo # a talk about IPython at Sage Days at U. Washington, Seattle. # Video credit: . YouTubeVideo('MPU2HistivI')Run recovery analysis jobs with```$ python3 src/processing/recovery_analysis_jobs.py --dataset=EBA --save_path=$SAVE_PATH --save_every=n --read_checkpoints=True```This script checkpoints every n records and can be restarted from saved checkpoint state.Final results are saved in $SAVE_PATH/all.featherSAVE_PATH = pathlib.Path("/gws/nopw/j04/forecol/ah2174/biomass_recovery/EBA") eba_data = pd.read_feather(SAVE_PATH / "all.feather") eba_data quality_eba = eba_data[(eba_data.overlap_quality != 0) & (eba_data.recovery_period >= 3) & (eba_data.recovery_period <= 22)] len(quality_eba) lin_chm_med = linregress(quality_eba.recovery_period, quality_eba.chm_med) lin_acd_longo2016 = linregress(quality_eba.recovery_period, quality_eba.acd_longo2016) lin_acd_asner2014 = linregress(quality_eba.recovery_period, quality_eba.acd_asner2014) plt.figure(1, (15,10)) ax = sns.boxplot(x="recovery_period", y="chm_med", data=quality_eba) ax.set_ylabel("EBA Canopy height median (m)") xs = range(0, 20) ax.plot(xs, lin_chm_med.intercept + lin_chm_med.slope * xs, 'r') text_label = ( "R-score: {0:.2f}\n" "slope: {1:.2f} m/yr\n" "intercept: {2:.2f} m").format( lin_chm_med.rvalue, lin_chm_med.slope, lin_chm_med.intercept) ax.text(x=0.2, y=25.2, s=text_label) plt.figure(2, (15,10)) ax = sns.boxplot(x="recovery_period", y="acd_longo2016", data=quality_eba) ax.set_ylabel("Longo (2016) Aboveground Carbon Density (MgC/ha)") xs = range(0, 20) ax.plot(xs, lin_acd_longo2016.intercept + lin_acd_longo2016.slope * xs, 'r') text_label = ( "R-score: {0:.2f}\n" "slope: {1:.2f} MgC/ha/yr\n" "intercept: {2:.2f} MgC/ha").format( lin_acd_longo2016.rvalue, lin_acd_longo2016.slope, lin_acd_longo2016.intercept) ax.text(x=0.2, y=150, s=text_label)Session 5: Edge Detectionimport os import sys sys.path.append(os.path.join("..")) import cv2 import numpy as np from utils.imutils import jimshow from utils.imutils import jimshow_channel import matplotlib.pyplot as plt__Import image__fname = os.path.join("..", "data", "img", "coins.png") image = cv2.imread(fname) jimshow(image)__Make the image greyscale__# Converting the image into a greyscale image grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Because we now have a greyscale image, we have to use the jimshow_channel function jimshow_channel(grey_image)Sobel Operators We need two Sobel kernels. One for the horizontal axis (x-axis) and one for the vertical axis (y-axis). For that we use this function: ```cv2.Sobel(image, kernel x-axis, y-axis)```When specifying the Sobel kernel we are creating a performing Sobel kernel on the image using 64bit floating numbers. The numbers for arrays in OpenCV are unsigned 8bit integers, which means that they can only be positive. We want negative numbers as well when performing edge detection, because we are working with a decreasing slope (going from a high intensity, white, area to a low intensity area, black), and therefore we need negative values as well, otherwise we would not be able to detect when moving from light to dark.# We create our two Sobel kernels sobelX = cv2.Sobel(grey_image, cv2.CV_64F, 1, 0) # 1 means that we "turn on" the x-axis (horizontal) and work along that sobelY = cv2.Sobel(grey_image, cv2.CV_64F, 0, 1) # Now we are turining on the y-axis (vertical) and working along that # When we define the sobel kernels, the convolutional operations are done also behind the scenes. sobelX # This is the array of the gradient. The convolution kernel operations has been done.Now we no longer want to have negative values. Hence, we want to go back to unsigned values. The orignial image only uses positive values, which is why we also only want to use positive values to extract values form the image, which is why we have to go back to unsigned values. Hence, if something is -5 we only want to return 5. "Unsigned intergers": positive whole numbers. __Why are we only interested in positive values?__ Once we want to visualize the image, pixel values are all positive values (0-255), which means that if we want to plot an image, we only want positive values. With the laplacian operator we have less information about internal structure than we did with the combined Sobel operators, because the Laplacian is less sensitive to noise. With the laplacian operator we get a clear outline of the coins. Hence, when we plot the image, we are only interested in the edges.# We take our array and define the absolute magnitude in order to get rid of negative values sobelX = np.uint8(np.absolute(sobelX)) sobelY = np.uint8(np.absolute(sobelY)) # Now we have essentially performed our edge detection. # We use the hstack() to put the edges detected by the two kernels side by side sobel_edges = np.hstack([sobelX, sobelY]) jimshow_channel(sobel_edges, "Horizontal vs. Vertical Sobel")Left: we have detected horizontal edges.Right: we have found vertical edges. __Combined Sobel__ Taking the horizontal and vertical sobel to combine them to show the combined sobel operator on an image. For this we use the bitwise OR operation. bitwise OR is used to give a TRUE value when either pixel is greater than 0.sobel_combined = cv2.bitwise_or(sobelX, sobelY) # either sobelX or sobelY can be above 0 jimshow_channel(sobel_combined, "Sobel combined")Here we see all the points of the original picture, where either the horizontal or the vertical sobel returns a postiive value. Hence, by combining them, we get a much more nuanced edge detection. Now we actually get some internal structure of the coins as well. Laplacian Operator Using the Laplacian kernel we do not have to define an x and a y.laplace = cv2.Laplacian(grey_image, cv2.CV_64F) # Making sure we are working with unsigned intergers laplace = np.uint8(np.absolute(laplace)) jimshow_channel(laplace, "Laplacian")With the laplacian operator we have less information about internal structure than we did with the combined Sobel operators, because the Laplacian is less sensitive to noise. With the laplacian operator we get a clear outline of the coins. Canny Edge Detection Edge detection is a process with multiple steps:1. Blurring2. Sobel3. Non-maximum compression4. Thresholding# Blurring blurred = cv2.GaussianBlur(grey_image, (5,5), 0) # 5x5 kernel. The 0 indicates the amount of variation from the mean that we take into account (a kind of standard deviation) # Canny edge detection with manually set thresholds 30 and 150 canny = cv2.Canny(blurred, 30, 150) jimshow_channel(canny, "Canny edge detection")Here we see that we have way less details than the original image. We now only have the outline (edges) of the coins. Find Contours Now we want to find the contours in the image. A contour is an edge which forms a continous and unbroken line around an object. Hence, the edges inside the coins will not be contours, while the outline of the coins will be detected as contours. For finding contours we use this function:```cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) ```cv2.RETR_EXTERNAL has to do with how contours are structured hierarchically. It performs a hierarchical structuring - we filter internal structures out and focus only on the external contours. Hence, if there are contours inside the object we filter those out, and only focus on the contours that surrounds the object itself. OpenCV finds contours using the CHAIN_APPROX_SIMPLE method.# Finding contours using a np function called "copy" that takes a copy of the image instead of the image itself so that we do not destroy the original image (contours, _) = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Since we are only interested in the contours, we use a dummy variable for the other thing that the function returns contoursDrawing Contours We use this function to draw to contours on the original image:```cv2.drawContours(image, contours, fill, color, thickness)```If you set "fill" to -1 it draws a contour for every object in the image. If you set it to 0, it only contours the first object, which allows us to look at the objects one after another.# We use jimshow and not jimshow_channel because we want to see the contours on the original image and not on the greyscale iamge. jimshow(cv2.drawContours(image.copy(), # draw contours on original image contours, # our list of contours -1, # which contours to draw (0,255,0), # 2))We have found edges and with those edges we have found contours and we have mapped these contours on the original image.# The contours are just a list type(list) # We can count the lenght of the list of contours which tells us how many objects are in the image len(contours) print(f"I can count {len(contours)} coins in the image!")I can count 9 coins in the image!Tensorsimport torch import numpy as np data = [[1,2],[3,4]] x_data = torch.tensor(data) x_data np_array = np.array(data) x_np = torch.from_numpy(np_array) x_np x_ones = torch.ones_like(x_data) print(x_ones) x_rand = torch.rand_like(x_data, dtype=torch.float) print(x_rand) shape = (2,3,) # just a convention, it seems rand_tensor = torch.rand(shape) ones_tensor = torch.ones(shape) zeros_tensor = torch.zeros(shape) print(rand_tensor, '\n', ones_tensor, '\n',zeros_tensor) tensor = torch.rand(3,4) print(f'shape: {tensor.shape}') print(f'datatype: {tensor.dtype}') print(f'device tensor is stored on: {tensor.device}') if torch.cuda.is_available(): tensor = tensor.to('cuda') print('GPU AVAILABLE') tensor = torch.ones(4,4) print(tensor[0]) print(tensor[...,-1]) t2 = torch.cat([tensor, tensor, tensor], dim=0) t2, t2.shape t1 = torch.stack([tensor, tensor, tensor], dim=1) t1, t1.shape`.cat`: Concatenates the given sequnce of tensors **in the given dimension**`.stack`: Concatenates sequence of tensors along a **new dimension**# Matric Multiplication; y1, y2, y3 will have the same value y1 = tensor @ tensor.T y2 = tensor.matmul(tensor.T) y3 = torch.rand_like(tensor) print(y3) torch.matmul(tensor, tensor.T, out=y3) # basically it just overwrites it print(y1) print(y2) print(y3) # Element-wise product; z1, z2, z3 will have the same value z1 = tensor * tensor z2 = tensor.mul(tensor) z3 = torch.rand_like(tensor) torch.mul(tensor, tensor, out=z3) agg = tensor.sum() agg_item = agg.item() print(agg_item, type(agg_item)) # In-place operations: stores the result into the operand. print(tensor) tensor.add_(5) print(tensor)tensor([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]]) tensor([[6., 6., 6., 6.], [6., 6., 6., 6.], [6., 6., 6., 6.], [6., 6., 6., 6.]])Bridge with Numpy Tensors on the CPU and NumPy arrays can share their underlying memory locations, and changing one will change the other.t = torch.ones(5) n = t.numpy() print(f"t: {t}") print(f"n: {n}") t.add_(1) print(f"t: {t}") print(f"n: {n}") n = np.ones(5) t = torch.from_numpy(n) np.add(n, 1, out=n) print(f"t: {t}") print(f"n: {n}")t: tensor([2., 2., 2., 2., 2.], dtype=torch.float64) n: [2. 2. 2. 2. 2.]SST (Spatial Stress Test) Behavioral Analysis Data Collected 2014import pandas as pd import moss from scipy import stats import scipy as sp import seaborn as sns import numpy as np import matplotlib import matplotlib.pyplot as plt import os.path as op import re #widget ability from IPython.html.widgets import interact, interactive, fixed from IPython.html import widgets from IPython.display import clear_output, display, HTML # Gather project info & functions from sst_setup_project import * # for plotting sns.set(style='whitegrid', context='poster') %matplotlib inline # R for stats %load_ext rpy2.ipython %R require(lme4) %R require(lmerTest)Set up directories & exp-specific informationdirs = dict() dirs['basedir'] = op.join(op.expanduser('~'), 'Experiments/SST') dirs['datadir'] = op.join(dirs['basedir'], 'data_pilot1/') dirs['analydir'] = op.join(dirs['basedir'], 'analysis') dirs['subj_info_file'] = op.join(dirs['datadir'], 'subj_info.csv') proj = gather_experiment_info(exp_name='SST', dirs=dirs) projRead in subject informationsubj_info = pd.read_csv(dirs['subj_info_file']) subj_info = subj_info[pd.isnull(subj_info.remove)] subj_infoRead in data filesdf = pd.DataFrame() # dataframe of subjs x envs test = True questionnaires_shock = False questionnaires_post = False # iterate through subjects for subid in subj_info.subid: print subid if test: # iterate through environments for env in proj['envs']: # print env # add test file test_file = op.join(dirs['datadir'], env, 'data', subid, 'session_0', 'log.txt') output = [] with open(test_file) as f: data = f.readlines() for line in data: columns = re.split('\t|\r|\n', line) output.append(columns) d2 = pd.DataFrame(output, columns = ['time', 'c2', 'command', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8']) # include subid and env d2['subid'] = subid d2['env'] = env # force time to be integer, diff from end to beginning d2.time = d2.time.astype(int) d2.time = d2.time - d2.time.min() df = df.append(d2, ignore_index=True) # Compiled group data if questionnaires_shock: q_file = op.join(dirs['basedir'], 'data/Quest/Questionnaires_shockblock_group.csv') dq_shock = pd.read_csv(q_file, index_col=0) if questionnaires_post: q_file = op.join(dirs['basedir'], 'data/Quest/Questionnaires_group.csv') dq_post = pd.read_csv(q_file, index_col=0)Look at datadf.head()Look at 2D position in spacedp = df[df.command == 'VROBJECT_POS'].reset_index() dp = dp.drop(['c5', 'c6', 'c7', 'c8', 'command', 'index'], 1) # remove unncessary cols dp.head() dp = dp.join(pd.DataFrame(dp.c4.str.split('Point3|, |\(|\)').tolist()).reindex(columns=[2, 3, 4])) dp.rename(columns={2: 'x', 3: 'y', 4: 'z'}, inplace=True) dp = dp.drop(['c4', 'c2'], 1) dp[['x', 'y', 'z']] = dp[['x', 'y', 'z']].astype(float) dp.head() plot_paths('env1', 's1') for i, sub in enumerate(subj_info.subid): fig, ax = plot_paths('env1', sub)Data for 1 subject, 1 environmentdsub = dp.query('env == "env3" & subid=="s1" & c3=="PandaEPL_avatar"') dsub.head() plt.plot(dsub.time) dsub.time.max() plt.scatter(dsub.time/1000, dsub.x.astype(float), s=10, marker='.') plt.scatter(dsub.time/1000, dsub.y.astype(float), s=10, marker='.') plt.scatter(dsub.time/1000, dsub.z.astype(float), s=10, marker='.')Heading Directiondh = df[(df.command == 'VROBJECT_HEADING') & (df.c3 == 'PandaEPL_avatar')].reset_index() dh = dh.drop(['c5', 'c6', 'c7', 'c8', 'command', 'index'], 1) # remove unncessary cols dh.head()Look at data just for 1 sub in 1 envdsub = dh.query('env == "env3" & subid=="s1"') dsub.head() plt.scatter(dsub.time/1000, dsub.c4.astype(float), s=10, marker='.')Text Eventsdt = df[(df.command.isin(['ASSIGNED'])) | df.command.isin(['ARRIVED'])].reset_index() dt.ix[dt.command == 'ARRIVED', 'c3'] = 'between_trials' dt = dt.drop(['c2', 'c4', 'c5', 'c6', 'c7', 'c8', 'command', 'index'], 1) # remove unncessary cols dt = dt.rename(columns={'c3': 'instructions'}) dt.head() dt['total_time'] = (dt.time - dt.time.shift(1)).shift(-1) dt.head()Average time to navigate to goalmeans = dt[dt.instructions != 'between_trials'].merge(subj_info).groupby(['subid', 'group', 'env']).mean().reset_index() sns.factorplot(x='env', y='total_time', hue='group', units='subid', ci=68, dodge=.1, data=means, palette=proj['palette'])Shock Eventsds = df[(df.command == 'SHOCK')].reset_index() ds = ds.drop(['c4', 'c5', 'c6', 'c7', 'c8', 'command', 'index'], 1) # remove unncessary cols ds.head() ds.groupby('subid').env.count()Input Eventsdi = df[(df.command == 'INPUT_EVENT')].reset_index() di = di.drop(['c4', 'c5', 'c6', 'c7', 'c8', 'command', 'index'], 1) # remove unncessary cols di = di.query('(c3 != "dismiss") & (c3 != "exit")') di.head() counts = di.merge(subj_info).groupby(['subid', 'group','env', 'c3']).count().reset_index() sns.factorplot(x='c3', y='time', hue='group', aspect=1.5, units='subid', ci=68, data=counts, palette=proj['palette']) plt.ylabel('Count (time stamps)') plt.xlabel('Key press')Merge dataframesdpt = dp.merge(dt, on=['subid', 'env', 'time'], how='left') dpt.ix[0,'instructions'] = 'intro' dpt = dpt.fillna(method='ffill') dpt.head() for i, sub in enumerate(subj_info.subid): fig, ax = plot_path('env1', sub, 'George_Clooney') plot_path_group('env1', subj_info[subj_info.group == 'stress'].subid, 'George_Clooney') plot_path_group('env1', subj_info[subj_info.group == 'control'].subid, 'George_Clooney') plot_path_group('env2', subj_info[subj_info.group == 'stress'].subid, 'duck') plot_path_group('env2', subj_info[subj_info.group == 'control'].subid, 'duck')Calculate Path Lengthsdpt.head() dplen = pd.DataFrame(columns=['subid', 'env', 'goal_type', 'path_len']) for env in proj['envs']: goals = set(dpt[dpt.env == env].instructions) goals.discard('between_trials') goals.discard('intro') for subid in subj_info.subid: # print subid for goal in goals: # print goal goal_type = dict((v,k) for k,v in proj['goals'][env].items()).get(goal) # print goal_type data_sub = dpt[(dpt.subid == subid) & (dpt.env == env) & (dpt.instructions == goal) & (dpt.c3 == 'PandaEPL_avatar')] distance = sp.spatial.distance.euclidean(data_sub.x, data_sub.y) row = pd.Series({'subid': subid, 'env': env, 'goal_type': goal_type, 'path_len': distance}) dplen = dplen.append(row, ignore_index=True) dplen.head() means = dplen.merge(subj_info).groupby(['subid', 'group', 'goal_type']).mean().reset_index() sns.factorplot(x='goal_type', y='path_len', hue='group', units='subid', ci=68, dodge=.1, data=means, palette=proj['palette']) means = dplen.merge(subj_info).groupby(['subid', 'group', 'env']).mean().reset_index() sns.factorplot(x='env', y='path_len', hue='group', units='subid', ci=68, dodge=.1, data=means, palette=proj['palette']) data = dplen.merge(subj_info) %R -i data %%R #print(str(data)) data_agg = with(data, aggregate(path_len ~ subid+group, FUN=mean)) print(data_agg) res1 = lm(path_len~group, data=data_agg) print(summary(res1)) print(anova(res1))Otherx = np.array(dp.query('env == "env1" & subid=="s1" & c3=="PandaEPL_avatar"').x.astype(float).reset_index()) y = np.array(dp.query('env == "env1" & subid=="s1" & c3=="PandaEPL_avatar"').y.astype(float).reset_index()) def animate(nframe): plt.scatter(x[range(nframe)], y[range(nframe)]) plt.ylim(-50,50) plt.xlim(-50,50) fig = plt.figure() ani = animation.FuncAnimation(fig, animate, frames=2500, interval=200, blit=True) plt.show() ani.save('/Users/steph-backup/Desktop/path.gif', writer='imagemagick', fps=50)Little Sister's Vocabulary - Strings - Solved#task 1 def add_prefix_un(word): """ :param word: str of a root word :return: str of root word with un prefix This function takes `word` as a parameter and returns a new word with an 'un' prefix. """ return 'un'+word add_prefix_un('happy') #task 2 #make_word_groups(['en', 'close', 'joy', 'lighten']) #'en :: enclose :: enjoy :: enlighten' def make_word_groups(vocab_words): """ :param vocab_words: list of vocabulary words with a prefix. :return: str of prefix followed by vocabulary words with prefix applied, separated by ' :: '. This function takes a `vocab_words` list and returns a string with the prefix and the words with prefix applied, separated by ' :: '. """ prefix = vocab_words[0] return prefix + ' :: ' + ' :: '.join(prefix + x for x in vocab_words[1:]) make_word_groups(['en', 'close', 'joy', 'lighten']) #task3 def remove_suffix_ness(word): """ :param word: str of word to remove suffix from. :return: str of word with suffix removed & spelling adjusted. This function takes in a word and returns the base word with `ness` removed. """ base = word[:-4] if base[-1] == 'i': return base[:1]+'y' return base #task4 def noun_to_verb(sentence, index): ''' :param sentence: str that uses the word in sentence :param index: index of the word to remove and transform :return: str word that changes the extracted adjective to a verb. A function takes a `sentence` using the vocabulary word, and the `index` of the word once that sentence is split apart. The function should return the extracted adjective as a verb. ''' return sentence.split()[index].rstrip(punctuation+whitespace) + "en"Darts - Numbers - Solved#circulo externo raio de 10 uni #circulo meio raio de 5 uni #circulo interno raio 1uni def score(x, y): distance = (x**2 + y**2)**(1/2) if distance > 10: return 0 elif distance <=10 and distance > 5: return 1 elif distance <=5 and distance > 1: return 5 else: return 10Card Games - Lists - Solved#task2 def get_rounds(number): """ :param number: int - current round number. :return: list - current round and the two that follow. """ return [number,number+1,number+2] get_rounds(27) #task3 def concatenate_rounds(rounds_1, rounds_2): """ :param rounds_1: list - first rounds played. :param rounds_2: list - second set of rounds played. :return: list - all rounds played. """ return rounds_1+rounds_2 concatenate_rounds([27, 28, 29], [35, 36]) #task 4 def list_contains_round(rounds, number): """ :param rounds: list - rounds played. :param number: int - round number. :return: bool - was the round played? """ if number in rounds: return True else: return False list_contains_round([27, 28, 29, 35, 36], 29) #task 5 def card_average(hand): """ :param hand: list - cards in hand. :return: float - average value of the cards in the hand. """ total_sum=0 for i in hand: total_sum += i return total_sum/len(hand) card_average([5, 6, 7]) #task 6 def approx_average_is_average(hand): """ :param hand: list - cards in hand. :return: bool - is approximate average the same as true average? """ mean_first_last = (hand[0] + hand[-1])/2 if card_average(hand) == mean_first_last: return True else: return False approx_average_is_average([1, 2, 3, 5, 9]) #task 7 def average_even_is_average_odd(hand): """ :param hand: list - cards in hand. :return: bool - are even and odd averages equal? """ even_list = [] odd_list = [] for i in range(len(hand)): if i % 2 == 0: even_list.append(hand[i]) else: odd_list.append(hand[i]) if card_average(even_list) == card_average(odd_list): return True else: return False average_even_is_average_odd([1, 2, 3]) #task 8 def maybe_double_last(hand): """ :param hand: list - cards in hand. :return: list - hand with Jacks (if present) value doubled. """ if hand[-1] == 11: hand[-1] = hand[-1]*2 return hand hand = [5, 9, 11] maybe_double_last(hand)Resistor Color - List - Solveddef color_code(color): colors_list = colors() return colors_list.index(color) def colors(): return [ "black", "brown", "red", "orange", "yellow", "green", "blue", "violet", "grey", "white" ]Little Sister's Essay - Strings#task 1 def capitalize_title(title): """ :param title: str title string that needs title casing :return: str title string in title case (first letters capitalized) """ return title.title() capitalize_title("my hobbies") #task 2 def check_sentence_ending(sentence): """ :param sentence: str a sentence to check. :return: bool True if punctuated correctly with period, False otherwise. """ return sentence.endswith('.') check_sentence_ending("I like to hike, bake, and read.") #task3 def clean_up_spacing(sentence): """ :param sentence: str a sentence to clean of leading and trailing space characters. :return: str a sentence that has been cleaned of leading and trailing space characters. """ return sentence.strip(' ') clean_up_spacing(" I like to go on hikes with my dog. ") #task 4 def replace_word_choice(sentence, old_word, new_word): """ :param sentence: str a sentence to replace words in. :param new_word: str replacement word :param old_word: str word to replace :return: str input sentence with new words in place of old words """ return sentence.replace(old_word,new_word) replace_word_choice("I bake good cakes.", "good", "amazing")Reinforcement learning - TME 4 - DQN L'objectif du TME est d'implémenter les algorithmes de renforcement value-based étudiés en cours (Q-learning et ses variantes) et de les tester dans un framework classique (gym de open-ai, MDP GridWorld).import matplotlib #from matplotlib import pyplot as plt matplotlib.use("TkAgg") import gym import gridworld from gym import wrappers, logger import numpy as np import copy import torch from torch import nnImplémentation des algorithmesclass RandomAgent(object): """The world's simplest agent!""" def __init__(self, action_space): self.action_space = action_space def act(self, observation, reward, done): return np.random.choice([0, 1]) def learn(self, observation, reward, done): return class NN(nn.Module): def __init__(self, inSize, outSize, layers=[]): super(NN, self).__init__() self.layers = nn.ModuleList([]) for x in layers: self.layers.append(nn.Linear(inSize, x)) inSize = x self.layers.append(nn.Linear(inSize, outSize)) def forward(self, x): x = self.layers[0](x) for i in range(1, len(self.layers)): x = torch.nn.functional.leaky_relu(x) x = self.layers[i](x) return xParamètres CartPoleepsilon=0.01, epsilonDecay=0.99999gamma=0.999btachSize=100, capacity=100000ctarget=100layers=[200]lr=0.001LunarLander (convergence après environ 10000 episodes):epsilon=0.1, epsilonDecay=0.99999gamma=0.99btachSize=1, capacity=1ctarget=1000layers=[200]lr=0.0001 Pour Gridworld (convergence après environ 2000 episodes sur plan0 avec rewards={0:-0.001,3:1,4:1,5:-1,6:-1}):epsilon=0.1, epsilonDecay=0.9999 (epsilon multiplié par epsilonDecay à chaque passage dans act)gamma=0.99batchSize=10, capacity=1000000ctarget=1000 (fréquence de mise à jour du réseau cible)layers=[30,30]lr=0.0001 (learning rate)class DQN(object): """Implementing a DQN learning agent""" def __init__(self, env, params): self.env = env self.N = params['N'] self.D = np.zeros((self.N, 4 + 1 + 1 + 4 + 1)) self.C = params['C'] self.batch = params['batch'] self.Q = NN(5, 1, params['layers']) self.Qhat = NN(5, 1, params['layers']) self.eps = params['eps'] self.epsDecay = params['epsDecay'] self.state = [] self.step = -1 self.gamma = params['gamma'] self.loss = torch.nn.SmoothL1Loss() self.optim = torch.optim.Adam(params=self.Q.parameters(), lr=params['lr']) def act(self, observation, reward, done): self.state = observation if np.random.rand() < self.eps: self.action = np.random.choice([0, 1]) else: input0 = torch.tensor(np.append(observation, [0])).float() input1 = torch.tensor(np.append(observation, [1])).float() self.action = 0 if self.Q(input0) > self.Q(input1) else 1 self.eps *= self.epsDecay return self.action def learn(self, observation, reward, done): self.step += 1 self.D[self.step % self.N] = list(self.state) + [self.action, reward] + list(observation) + [int(done)] inputs = self.D[np.random.randint(0, min(self.N, self.step+1), self.batch)] x = torch.from_numpy(inputs[:, 0:5]).float() # print(x) rewards = inputs[:, 5] input0 = torch.from_numpy(np.append(inputs[:, 6:10], np.zeros((self.batch, 1)), axis=1)).float() input1 = torch.from_numpy(np.append(inputs[:, 6:10], np.ones((self.batch, 1)), axis=1)).float() y = rewards + self.gamma * (1-inputs[:, -1]) * np.max([self.Qhat(input0).detach().numpy(), self.Qhat(input1).detach().numpy()], axis=0).flatten() self.optim.zero_grad() l = self.loss(torch.from_numpy(y).float(), self.Q(x).flatten()) l.backward() self.optim.step() if (self.step % self.C) == 0: self.Qhat = copy.deepcopy(self.Q) env = gym.make('CartPole-v1') env.seed(0) # Initialise le seed du pseudo-random np.random.seed(5) params = {'eps':0.01, 'epsDecay':0.99999, 'batch':100, 'C':100, 'lr':0.005, 'layers':[200], 'gamma':0.999, 'N':100000} agent = DQN(env, params) #agent = RandomAgent(env.action_space) outdir = outdir = 'cartpole-v0/random-agent-results' envm = wrappers.Monitor(env, directory=outdir, force=True, video_callable=False) for i in range(500): rsum = 0 obs = envm.reset() reward = 0 done = False while True: action = agent.act(obs, reward, done) obs, reward, done, _ = envm.step(action) agent.learn(obs, reward, done) rsum += reward if done: break print(rsum) env.close()10.0 9.0 8.0 11.0 8.0 9.0 10.0 9.0 10.0 9.0 8.0 11.0 8.0 9.0 10.0 8.0 10.0 8.0 9.0 11.0 8.0 9.0 10.0 10.0 9.0 10.0 10.0 8.0 9.0 10.0 10.0 9.0 9.0 9.0 9.0 10.0 9.0 9.0 9.0 10.0 9.0 9.0 9.0 10.0 9.0 9.0 9.0 10.0 9.0 11.0 10.0 10.0 9.0 10.0 11.0 10.0 9.0 8.0 10.0 10.0 16.0 29.0 10.0 43.0 31.0 10.0 62.0 8.0 8.0 8.0 11.0 29.0 11.0 24.0 37.0 51.0 49.0 51.0 32.0 40.0 41.0 37.0 13.0 47.0 31.0 24.0 26.0 22.0 12.0 14.0 15.0 17.0 15.0 15.0 16.0 14.0 18.0 18.0 80.0 17.0 100.0 101.0 104.0 94.0 107.0 115.0 134.0 138.0 158.0 152.0 186.0 141.0 131.0 126.0 119.0 106.0 118.0 128.0 112.0 120.0 121.0 117.0 122.0 135.0 158.0 138.0 114.0 105.0 125.0 126.0 136.0 129.0 124.0 115.0 128.0 117.0 118.0 164.0 103.0 146.0 110.0 126.0 117.0 120.0 124.0 110.0 166.0 195.0 100.0 107.0 196.0 95.0 92.0 92.0 95.0 104.0 92.0 108.0 114.0 122.0 123.0 106.0 117.0 117.0 116.0 115.0 109.0 97.0 105.0 113.0 97.0 122.0 103.0 115.0 114.0 107.0 118.0 120.0 129.0 117.0 116.0 145.0 353.0 282.0 316.0 23.0 92.0 94.0 91.0 107.0 103.0 116[...]Build colab-katago-gd client binariesGIT_REPOSITORY_URL = 'https://github.com/mildinvestor/katago-colab.git' # Install Golang and git !add-apt-repository ppa:longsleep/golang-backports -y 1>/dev/nul !apt-get update 1>/dev/nul !apt-get install --yes git golang-go 1>/dev/nul %env GOPATH=/root/go # Display version !git version !go version %cd /content # Clone katago-colab !rm -rf katago-colab !git clone $GIT_REPOSITORY_URL # Build colab-katago %cd katago-colab !chmod +x ./package.sh !./package.sh # File list !ls -l ./bin/ # Zip and donwload !zip -r ./colab-katag-gd-bin.zip ./bin/*.zip from google.colab import files files.download("./colab-katag-gd-bin.zip")Compile KataGo OPENCL for colab-katagohttps://github.com/lightvector/KataGo/blob/master/Compiling.md#@markdown KataGo version TAG = "v1.9.1" #@param {type:"string"} # Install libraries !apt-get update 1>/dev/nul !apt-get install --yes git libzip-dev cmake 1>/dev/nul import subprocess gpu_name = str(subprocess.check_output("nvidia-smi -q | grep \"Product Name\" | cut -d\":\" -f2 | tr -cd '[:alnum:]._-'", shell=True), encoding='utf-8') print("GPU: {}".format(gpu_name)) %cd /content # Checkout source of KataGo !git clone https://github.com/lightvector/KataGo.git !git checkout $TAG # Compile %cd KataGo/cpp !cmake . -DUSE_BACKEND=OPENCL #!cmake . -DUSE_BACKEND=OPENCL -DUSE_TCMALLOC=1 !make # Display version !./katago version # Download KataGo binary !zip -r ./katago.zip ./katago from google.colab import files files.download("./katago.zip")Create a tuning file of KataGoWEIGHT_URL = 'https://media.katagotraining.org/uploaded/networks/models/kata1/kata1-b40c256-s9854456576-d2405111631.bin.gz' !wget --quiet $WEIGHT_URL -O /content/weight.bin.gz %cd /content !./KataGo/cpp/katago version # New tuning features in KataGo v1.9.0 and later !./KataGo/cpp/katago tuner -model ./weight.bin.gz -config ./KataGo/cpp/configs/gtp_example.cfg #!./KataGo/cpp/katago benchmark -model ./weight.bin.gz -config ./KataGo/cpp/configs/gtp_example.cfg -v 300 !zip -r ./opencltuning.zip /root/.katago/opencltuning from google.colab import files files.download("./opencltuning.zip")The following sections are for debugging only, you can ignore. GPU information!nvidia-smiHierarchical models for predicting IoT lifetimeimport arviz as az import numpy as np import pandas as pd from cmdstanpy import CmdStanModel import matplotlib.pyplot as plt import matplotlib as mpl import scipy.stats as stats from ploting_functions import visualise_continuous_predictions, visualise_integer_predictions, plot_failures_batches, plot_individual, plot_resets_batches %matplotlib inline plt.style.context('seaborn-white') mpl.rcParams['figure.dpi'] = 200 LIGHT = "#FFFCDC" LIGHT_HIGHLIGHT = "#FEF590" MID = "#FDED2A" MID_HIGHLIGHT = "#F0DC05" DARK = "#EECA02" DARK_HIGHLIGHT = "#BB9700" GREEN = "#00FF00" LIGHT_GREY = "#DDDDDD"Data generationdata_gen2 = CmdStanModel(stan_file='generate_multilevel_data2.stan') data2 = data_gen2.sample(data={'N_batch': 4, 'N': 200}, fixed_param=True, iter_warmup=0, iter_sampling=1, chains=1, seed=30082021) my_dict = data2.stan_variables() [my_dict.pop(x, None) for x in ['batch_coef']] print('batch_coef: {}'.format(data2.stan_variable('batch_coef'))) data_dict = {key: my_dict[key][0] for key in my_dict.keys()} df = pd.DataFrame(data_dict) df = df.astype({'resets': 'int32', 'batch': 'int32'}) df.to_csv('training_data.csv') di = {1: LIGHT_HIGHLIGHT, 2: MID, 3: DARK, 4: DARK_HIGHLIGHT} df['batch_c'] = df.batch.map(di) fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = df.plot.scatter(x='resets', y='failure_time', c='batch_c', ax=ax) ax.set_title('Collected data (colored by batch)') ax.set_ylabel('Failure time') fig.savefig('dataset_colored.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = df.plot.scatter(x='resets', y='failure_time', c=DARK, ax=ax) ax.set_title('Collected data') ax.set_ylabel('Failure time') fig.savefig('dataset.png') plt.close()Simpler case - no batch 3df_restricted=df[df['batch']!=3] fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = df_restricted.plot.scatter(x='resets', y='failure_time', c='batch_c', ax=ax) ax.set_title('Collected data (colored by batch)') ax.set_ylabel('Failure time') fig.savefig('dataset_restricted_colored.png')Poisson modeldata_for_inf = {'N': len(df_restricted), 'resets': df_restricted.resets.values, 'failure_time': df_restricted.failure_time.values} inference = CmdStanModel(stan_file='inference_simple.stan') simple_restricted_result = inference.sample(data_for_inf, seed=29082021)INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing chain 1 | | 00:00 Status   chain 1 |█████▉ | 00:00 Iteration: 1001 / 2000 [ 50%] (Sampling) chain 1 |██████████| 00:00 Sampling completed chain 2 |██████████| 00:00 Sampling completed chain 3 |██████████| 00:00 Sampling completed chain 4 |██████████| 00:00 Sampling completedLatex tablesimple_restricted_summary = az.summary(simple_restricted_result, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) simple_restricted_summary.index = ['$\lambda_{R}$', '$\kappa$','$\\beta$'] simple_restricted_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\widehat{R}$'] tex_file = open("table_simple_restricted_posterior_summary.tex", "w") n = tex_file.write(simple_restricted_summary.to_latex(escape=False)) tex_file.close() simple_restricted_summary resets_sample = simple_restricted_result.stan_variable('pred_resets') failures_sample = simple_restricted_result.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df_restricted.resets,resets_sample,ax) fig.savefig('simple_restricted_ppd_resets.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df_restricted.failure_time,failures_sample,ax) fig.savefig('simple_restricted_ppd_failure_time.png') plt.close() plot_individual(resets_sample, failures_sample, df_restricted, name_prefix='simple_restricted_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample,df_restricted,fig,axes,name_prefix='simple_restricted_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample,df_restricted,fig,axes,name_prefix='simple_restricted_',close=True)Negative Binomial modelinference = CmdStanModel(stan_file='inference_dispersed.stan') dispersed_restricted_result = inference.sample(data_for_inf, seed=29082021,refresh=1000, max_treedepth=50)INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing chain 1 | | 00:00 Status    chain 1 |█████ | 00:03 Iteration: 1 / 2000 [ 0%] (Warmup)  chain 1 |██████████| 00:06 Sampling completed chain 2 |██████████| 00:06 Sampling completed chain 3 |██████████| 00:06 Sampling completed chain 4 |██████████| 00:06 Sampling completedLatex tabledispersed_restricted_summary = az.summary(dispersed_restricted_result, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop('inv_phi').drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) dispersed_restricted_summary.index = ['$\lambda_{R}$', '$\kappa$','$\\beta$','$\phi$'] dispersed_restricted_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\hat{R}$'] tex_file = open("table_dispersed_posterior_summary.tex", "w") n = tex_file.write(dispersed_restricted_summary.to_latex(escape=False)) tex_file.close() dispersed_restricted_summaryFiguresresets_sample = dispersed_restricted_result.stan_variable('pred_resets') failures_sample = dispersed_restricted_result.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df_restricted.resets,resets_sample,ax) fig.savefig('dispersed_restricted_ppd_resets.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df_restricted.failure_time,failures_sample,ax) fig.savefig('dispersed_restricted_ppd_failure_time.png') plt.close() plot_individual(resets_sample, failures_sample, df_restricted, name_prefix='dispersed_restricted_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample,df_restricted,fig,axes,name_prefix='dispersed_restricted_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample,df_restricted,fig,axes,name_prefix='dispersed_restricted_',close=True)Return to full dataset Poisson modeldata_for_inf = {'N': 200, 'resets': df.resets.values, 'failure_time': df.failure_time.values} inference = CmdStanModel(stan_file='inference_simple.stan') simple_result = inference.sample(data_for_inf, seed=29082021,refresh=1000)INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing chain 1 | | 00:00 Status  chain 1 |█████ | 00:00 Iteration: 1 / 2000 [ 0%] (Warmup)   chain 1 |██████████| 00:00 Iteration: 1001 / 2000 [ 50%] (Sampling) chain 1 |██████████| 00:00 Sampling completed chain 2 |██████████| 00:00 Sampling completed chain 3 |██████████| 00:00 Sampling completed chain 4 |██████████| 00:00 Sampling completedLatex tablesimple_summary = az.summary(simple_result, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) simple_summary.index = ['$\lambda_{R}$', '$\kappa$','$\\beta$'] simple_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\hat{R}$'] tex_file = open("table_simple_posterior_summary.tex", "w") n = tex_file.write(simple_summary.to_latex(escape=False)) tex_file.close() simple_summaryFiguresresets_sample = simple_result.stan_variable('pred_resets') failures_sample = simple_result.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df.resets,resets_sample,ax) fig.savefig('simple_ppd_resets.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df.failure_time,failures_sample,ax) fig.savefig('simple_ppd_failure_time.png') plt.close() plot_individual(resets_sample, failures_sample, df, name_prefix='simple_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample,df,fig,axes,name_prefix='simple_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample,df,fig,axes,name_prefix='simple_',close=True)Negative binomial modeldata_for_inf = {'N': 200, 'resets': df.resets.values, 'failure_time': df.failure_time.values} inference = CmdStanModel(stan_file='inference_dispersed.stan') dispersed_result = inference.sample(data_for_inf, seed=29082021,refresh=1000, max_treedepth=50)INFO:cmdstanpy:found newer exe file, not recompiling INFO:cmdstanpy:CmdStan start procesing chain 1 | | 00:00 Status  chain 1 |█████ | 00:06 Iteration: 1 / 2000 [ 0%] (Warmup)    chain 1 |██████████| 00:16 Sampling completed chain 2 |██████████| 00:16 Sampling completed chain 3 |██████████| 00:16 Sampling completed chain 4 |██████████| 00:16 Sampling completedLatex tabledispersed_summary = az.summary(dispersed_result, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop('inv_phi').drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) dispersed_summary.index = ['$\lambda_{R}$', '$\kappa$','$\\beta$','$\phi$'] dispersed_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\hat{R}$'] tex_file = open("table_dispersed_posterior_summary.tex", "w") n = tex_file.write(dispersed_summary.to_latex(escape=False)) tex_file.close() dispersed_summaryFiguresresets_sample = dispersed_result.stan_variable('pred_resets') failures_sample = dispersed_result.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df.resets,resets_sample,ax) fig.savefig('dispersed_ppd_resets.png') plt.close() # plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df.failure_time,failures_sample,ax) fig.savefig('dispersed_ppd_failure_time.png') plt.close() plot_individual(resets_sample, failures_sample, df, name_prefix='dispersed_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample,df,fig,axes,name_prefix='dispersed_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample,df,fig,axes,name_prefix='dispersed_',close=True)Multilevel model inference Prior predictive checkmultilevel_ppc = CmdStanModel(stan_file='multilevel_ppc_2.stan') result_ppc = multilevel_ppc.sample(data={'N_batch': 4, 'N': 200, 'batch': df.batch.values}, fixed_param=True, iter_warmup=0, iter_sampling=1000, chains=1, seed=30082021) ppc_summary = az.summary(result_ppc, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop('r_hat', axis=1).drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) index_of_variables = ['$\lambda_{R}$', '$\kappa$', '$\mu_{batch}$', '$\sigma_{batch}$', '$\\tilde{\\alpha}_{batch,1}$', '$\\tilde{\\alpha}_{batch,2}$', '$\\tilde{\\alpha}_{batch,3}$', '$\\tilde{\\alpha}_{batch,4}$', '$\\beta_1$', '$\\beta_2$', '$\\beta_3$', '$\\beta_4$' ] ppc_summary.index = index_of_variables ppc_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)'] tex_file = open("table_ppc_summary.tex", "w") n = tex_file.write(ppc_summary.to_latex(escape=False)) tex_file.close() ppc_summary y_ppc_1 = result_ppc.stan_variable('pred_resets') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df.resets,y_ppc_1,ax,PPC=True) fig.savefig('ppc_resets.png') plt.close() y_ppc_2 = result_ppc.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df.failure_time,y_ppc_2,ax,PPC=True) fig.savefig('ppc_failure_time.png') plt.close()Simulation based calibrationranks_np = np.genfromtxt('new_ranks_N1000.csv', delimiter=',') R = len(ranks_np) sbc_low = stats.binom.ppf(0.005, R, 25.0 / 500) sbc_mid = stats.binom.ppf(0.5, R, 25.0 / 500) sbc_high = stats.binom.ppf(0.995, R, 25.0 / 500) bar_x = [-10, 510, 500, 510, -10, 0, -10] bar_y = [sbc_high, sbc_high, sbc_mid, sbc_low, sbc_low, sbc_mid, sbc_high] fig, axes = plt.subplots(4, 2, figsize=(7, 4)) list_of_variables = ['$\lambda_{{reset}}$', '$\kappa$', '$\mu_{{batch}}$', '$\sigma_{{batch}}$', '$\\tilde{{\\alpha}}_{{batch,1}}$', '$\\tilde{{\\alpha}}_{{batch,2}}$', '$\\tilde{{\\alpha}}_{{batch,3}}$', '$\\tilde{{\\alpha}}_{{batch,4}}$'] axes = axes.flatten() for i in range(8): ax = axes[i] name = list_of_variables[i] sbc_ranks = ranks_np[:, i] ax.fill(bar_x, bar_y, color="#DDDDDD", ec="#DDDDDD") ax.plot([0, 500], [sbc_mid, sbc_mid], color="#999999", linewidth=2) ax.hist(sbc_ranks, bins=[25 * x - 0.5 for x in range(21)], color=DARK, ec=DARK_HIGHLIGHT, zorder=3) ax.set_title(name) ax.set_yticks([]) fig.tight_layout() fig.savefig('sbc_results.png') plt.close()Posterior inferencemultilevel_infer2 = CmdStanModel(stan_file='multilevel_inference2.stan') data_for_inf = {'N': 200, 'N_batch': 4, 'batch': df.batch.values, 'resets': df.resets.values, 'failure_time': df.failure_time.values} multileve_result2 = multilevel_infer2.sample( data_for_inf, seed=29082021, adapt_delta=.99, max_treedepth=50, refresh=1000) posterior_summary = az.summary(multileve_result2, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like').drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) # -0.373246 0.123104 2.01696 -0.771476 posterior_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\widehat{R}$'] index_of_variables = ['$\lambda_{R}$', '$\kappa$', '$\mu_{batch}$', '$\sigma_{batch}$', '$\\tilde{\\alpha}_{batch,1}$', '$\\tilde{\\alpha}_{batch,2}$', '$\\tilde{\\alpha}_{batch,3}$', '$\\tilde{\\alpha}_{batch,4}$', '$\\beta_1$', '$\\beta_2$', '$\\beta_3$', '$\\beta_4$' ] posterior_summary.index = index_of_variables tex_file = open("table_posterior_summary.tex", "w") n = tex_file.write(posterior_summary.to_latex(escape=False)) tex_file.close() posterior_summary resets_sample = multileve_result2.stan_variable('pred_resets') failures_sample = multileve_result2.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df.resets,resets_sample,ax) fig.savefig('ppd_resets.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df.failure_time,failures_sample,ax,binwidth=2.5) fig.savefig('ppd_failure_time.png') plt.close() plot_individual(resets_sample, failures_sample, df, name_prefix='multilevel_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample,df,fig,axes,name_prefix='multilevel_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample,df,fig,axes,name_prefix='multilevel_',close=True)Poststratification Data for poststratificationfrom numpy.random import default_rng rng = default_rng(seed=31082021) batch_PS = rng.choice([1, 2, 3, 4], 1000, p=[0.6, 0.2, 0.1, 0.1])Poststratification computationpostratification = CmdStanModel( stan_file='multilevel_inference2_poststrat.stan') data_for_PS = {'N': 200, 'N_batch': 4, 'batch': df.batch.values, 'resets': df.resets.values, 'failure_time': df.failure_time.values, 'N_PS': 1000, 'batch_PS': batch_PS} poststrat_result = postratification.sample( data_for_PS, seed=29082021, adapt_delta=.99, max_treedepth=50,refresh=1000) poststrat_summary = az.summary(poststrat_result, var_names=['quant20', 'median']).drop(['mcse_sd','hdi_3%','hdi_97%'], axis=1) poststrat_summary.index = ['$q_{20}$','median'] poststrat_summary.columns = ['mean','st. dev.', '$\widehat{\mathrm{MCSE}}$', 'ESS (bulk)', 'ESS (tail)', '$\hat{R}$'] tex_file = open("table_poststrat_summary.tex", "w") n = tex_file.write(poststrat_summary.to_latex(escape=False)) tex_file.close() poststrat_summary fig, (ax,ax2) = plt.subplots(2, 1, figsize=(7, 4),sharex=True) ax.hist(poststrat_result.stan_variable('quant20'), color=DARK, ec=DARK_HIGHLIGHT, density=True) ax.set_xlabel('Time after 20% of devices fail') #ax.set_xlabel('Failure time') ax.set_yticks([]) ax2.hist(poststrat_result.stan_variable('median'), color=DARK, ec=DARK_HIGHLIGHT, density=True) ax2.set_xlabel('Time after 50% of devices fail') #ax2.set_xlabel('Failure time') ax2.set_yticks([]) fig.tight_layout() fig.savefig('poststrat_results.png')Robustness analysis Quarter of datadf_quarter = df.sample(frac=.25,random_state=42) df_quarter.batch.value_counts() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = df_quarter.plot.scatter(x='resets', y='failure_time', c='batch_c', ax=ax) ax.set_title('Collected data (colored by batch)') ax.set_ylabel('Failure time') fig.savefig('dataset_restricted_colored.png') data_for_inf_q = {'N': len(df_quarter), 'N_batch': 4, 'batch': df_quarter.batch.values, 'resets': df_quarter.resets.values, 'failure_time': df_quarter.failure_time.values} multileve_result_q = multilevel_infer2.sample( data_for_inf_q, seed=29082021, adapt_delta=.99, max_treedepth=50, refresh=1000) print(multileve_result_q.diagnose()) posterior_summary_q = az.summary(multileve_result_q, var_names=[ '~pred_failure_time', '~pred_resets'], filter_vars='like') index_of_variables = ['$\lambda_{R}$', '$\kappa$', '$\mu_{batch}$', '$\sigma_{batch}$', '$\\tilde{\\alpha}_{batch,1}$', '$\\tilde{\\alpha}_{batch,2}$', '$\\tilde{\\alpha}_{batch,3}$', '$\\tilde{\\alpha}_{batch,4}$', '$\\beta_1$', '$\\beta_2$', '$\\beta_3$', '$\\beta_4$' ] posterior_summary_q.index = index_of_variables tex_file = open("table_posterior_summary_q.tex", "w") n = tex_file.write(posterior_summary.to_latex(escape=False)) tex_file.close() posterior_summary resets_sample_q = multileve_result_q.stan_variable('pred_resets') failures_sample_q = multileve_result_q.stan_variable('pred_failure_time') fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_integer_predictions(df_quarter.resets,resets_sample_q,ax) fig.savefig('q_ppd_resets.png') plt.close() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = visualise_continuous_predictions(df_quarter.failure_time,failures_sample_q,ax,binwidth=2.5) fig.savefig('q_ppd_failure_time.png') plt.close() plot_individual(resets_sample_q, failures_sample_q, df_quarter, name_prefix='q_multilevel_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_failures_batches(failures_sample_q, df_quarter,fig,axes,name_prefix='q_multilevel_',close=True) fig, axes = plt.subplots(2, 2, figsize=(7, 4)) axes = axes.flatten() plot_resets_batches(resets_sample_q,df_quarter,fig,axes,name_prefix='q_multilevel_',close=True) data_for_PS_q = {'N': len(df_quarter), 'N_batch': 4, 'batch': df_quarter.batch.values, 'resets': df_quarter.resets.values, 'failure_time': df_quarter.failure_time.values, 'N_PS': 1000, 'batch_PS': batch_PS} poststrat_result_q = postratification.sample( data_for_PS_q, seed=29082021, adapt_delta=.99, max_treedepth=50,refresh=1000) poststrat_summary_q = az.summary(poststrat_result_q, var_names=['quant20', 'median'])Tenth of datadf_tenth = df.sample(frac=.1,random_state=42) df_tenth.batch.value_counts() fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax = df_tenth.plot.scatter(x='resets', y='failure_time', c='batch_c', ax=ax) ax.set_title('Collected data (colored by batch)') ax.set_ylabel('Failure time') fig.savefig('dataset_restricted_colored.png') data_for_PS_t = {'N': len(df_tenth), 'N_batch': 4, 'batch': df_tenth.batch.values, 'resets': df_tenth.resets.values, 'failure_time': df_tenth.failure_time.values, 'N_PS': 1000, 'batch_PS': batch_PS} poststrat_result_t = postratification.sample( data_for_PS_t, seed=29082021, adapt_delta=.99, max_treedepth=50,refresh=1000) poststrat_summary_t = az.summary(poststrat_result_t, var_names=['quant20', 'median']) df_90 = df.sample(frac=.9,random_state=42) data_for_PS_90 = {'N': len(df_90), 'N_batch': 4, 'batch': df_90.batch.values, 'resets': df_90.resets.values, 'failure_time': df_90.failure_time.values, 'N_PS': 1000, 'batch_PS': batch_PS} poststrat_result_90 = postratification.sample( data_for_PS_90, seed=29082021, adapt_delta=.99, max_treedepth=50,refresh=1000) poststrat_summary_90 = az.summary(poststrat_result_90, var_names=['quant20', 'median']) poststrat_summary_90 poststrat_summary = az.summary(poststrat_result, var_names=['quant20', 'median']) poststrat_summary def plot_conf_from_summary(summary,variable,ax,value=1): line=summary.loc[variable,'mean']+np.array([-1,1])*summary.loc[variable,'sd'] ax.plot(line,[value,value],c=DARK,zorder=0) ax.scatter(summary.loc[variable,'mean'],value,c=DARK,ec=DARK_HIGHLIGHT,zorder=1) return ax ax = plot_conf_from_summary(poststrat_summary_90,'quant20',ax,3) fig, (ax,ax2) = plt.subplots(2,1,figsize=(7,4),sharex=True) ax = plot_conf_from_summary(poststrat_summary,'quant20',ax,4) ax = plot_conf_from_summary(poststrat_summary_90,'quant20',ax,3) ax = plot_conf_from_summary(poststrat_summary_q,'quant20',ax,2) ax = plot_conf_from_summary(poststrat_summary_t,'quant20',ax,1) ax.set_yticks([1,2,3,4]) ax.set_yticklabels(['10% of data','25% of data','90% of data','Full dataset']) ax.set_xlabel('Time after 20% of devices will fail') ax2 = plot_conf_from_summary(poststrat_summary,'median',ax2,4) ax2 = plot_conf_from_summary(poststrat_summary_90,'median',ax2,3) ax2 = plot_conf_from_summary(poststrat_summary_q,'median',ax2,2) ax2 = plot_conf_from_summary(poststrat_summary_t,'median',ax2,1) ax2.set_yticks([1,2,3,4]) ax2.set_yticklabels(['10% of data','25% of data','90% of data','Full dataset']) ax2.set_xlabel('Time after 50% of devices will fail') fig.tight_layout() fig.savefig('poststrat_summary.png') lista_df = [poststrat_summary_t.iloc[:,:2],poststrat_summary_q.iloc[:,:2],poststrat_summary_90.iloc[:,:2],poststrat_summary.iloc[:,:2]] poststrat_over_datasets = pd.concat(lista_df,keys= ['10\% of data','25\% of data','90\% of data','Full dataset'],axis=0) tex_file = open("table_poststrat_robustness.tex", "w") n = tex_file.write(poststrat_over_datasets.to_latex(escape=False).replace('quant20','$q_{20}$').replace('sd','st. dev.')) tex_file.close() poststrat_over_datasetsImport the library and read the dataimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot')In this part, I only focus on multiple choice responses so I import it. The rate file will be used later to caculate the salary.fx_rates = pd.read_csv('conversionRates.csv', encoding = "ISO-8859-1", low_memory = False) df = pd.read_csv('multipleChoiceResponses.csv', encoding = "ISO-8859-1", low_memory = False) df.head()Define functions to deal with multiple value or column First of all, I'll write function to deal with multiple value and multiple columns response.def total_count(df): ''' This function receives a column with multiple values separated by a comma. It counts the number of occurrences of each value in the column. INPUT: df - the pandas dataframe contain multiple values OUTPUT: new_df - a dataframe of each value with the count of how often it shows up ''' new_df = df.str.split(',') new_df_set=[] for i in new_df.dropna(): new_df_set.extend(i) new_df = pd.Series(new_df_set).value_counts().sort_values(ascending = False).to_frame() return new_df def count_multi_col(df, colname, delcol=[]): ''' This function searches for all columns in the data that contain a particular string. These columns have to have the same range. Function calculates the number of each value in each column. And then it calculates the proportion of each column. INPUT: df - the pandas dataframe contain multiple columns to extract colname - a string as the prefix of columns names to be extracted delcol - a list contains colnames to be removed OUTPUT: new_df - a dataframe of each value with the ratio of this option be selected ''' target_features = [x for x in df.columns if x.find(colname) != -1] if len(delcol) != 0: for col in delcol: target_features.remove(col) new_df_dict = {} for feature in target_features: feature_value = df[feature].value_counts() feature_value_ratio = feature_value / feature_value.sum() new_df_dict[feature[len(colname):]] = feature_value_ratio new_df = pd.DataFrame(new_df_dict).transpose() return new_dfPlot learning platform and learning platform usefulness Let's see what learning platform we can choose and how useful they are.f, ax = plt.subplots(1, 2, figsize = (16, 8)) learn_platform_plt = total_count(df['LearningPlatformSelect']) sns.barplot(learn_platform_plt[0], learn_platform_plt.index, ax = ax[0]) ax[0].set_title('Learning Platform') LP_usefulness = count_multi_col(df, 'LearningPlatformUsefulness') LP_usefulness = LP_usefulness.sort_values('Very useful', ascending = False) sns.heatmap(LP_usefulness, ax = ax[1], annot = True, fmt = ".2f") ax[1].set_title('Learning Platform Usefulness') plt.show()Plot course platform and blogs etc. Let's see what specific choices do we have in the courses, blogs, podcasts and newsletters.f, ax = plt.subplots(1, 2, figsize = (15, 6)) learn_platform_plt = total_count(df['CoursePlatformSelect']) sns.barplot(learn_platform_plt[0], learn_platform_plt.index, ax = ax[0]) ax[0].set_title('Course Platform') BPN_plt = total_count(df['BlogsPodcastsNewslettersSelect']) BPN_plt = BPN_plt.iloc[:5] sns.barplot(BPN_plt[0], BPN_plt.index, ax = ax[1], palette = sns.color_palette('inferno', 5)) ax[1].set_title('Blogs Podcasts Newsletters') plt.subplots_adjust(wspace = 1) plt.show()Plot job skill importance and work tools frequency We alse can learn from what data scientist think most important skill and which tools they most frequently used.f, ax = plt.subplots(1, 2, figsize = (20, 10)) JS_importance = count_multi_col(df, 'JobSkillImportance', ['JobSkillImportanceOtherSelect1', 'JobSkillImportanceOtherSelect2', 'JobSkillImportanceOtherSelect3']) JS_importance = JS_importance.sort_values('Necessary', ascending = False) sns.heatmap(JS_importance, ax = ax[0], annot = True, fmt = ".2f") ax[0].set_title('Job Skill Importance') ax[0].set_yticklabels(ax[0].get_yticklabels(), rotation = 0, fontsize = 15) ax[0].tick_params(axis = 'x', labelsize = 15) WT_frequency = count_multi_col(df, 'WorkToolsFrequency', ['WorkToolsFrequencySelect1', 'WorkToolsFrequencySelect2']) WT_frequency = WT_frequency.sort_values('Most of the time', ascending = False) sns.heatmap(WT_frequency[:15], ax = ax[1], annot = True, fmt = ".2f") ax[1].set_title('Work Tools Frequency') ax[1].set_yticklabels(ax[1].get_yticklabels(), rotation = 0, fontsize = 15) ax[1].tick_params(axis = 'x', labelsize = 15) plt.show()Plot work challenges and time spentLet's see which challenges is biggest and which work cost most of time.f, ax = plt.subplots(1, 1, figsize = (16, 8)) #learn_platform_plt = total_count(df['WorkChallengesSelect']) #sns.barplot(learn_platform_plt[0], learn_platform_plt.index, ax = ax[0]) #ax[0].set_title('WorkChallengesSelect') time_spent = ['TimeFindingInsights', 'TimeVisualizing', 'TimeGatheringData', 'TimeModelBuilding'] df_time_spent = df[time_spent].copy() df_time_spent = df_time_spent[(df_time_spent > 100).sum(axis = 1) == 0] sns.violinplot(data = df_time_spent, ax = ax) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.show() f, ax = plt.subplots(1, 1, figsize = (16, 8)) learn_platform_plt = total_count(df['WorkChallengesSelect']) sns.barplot(learn_platform_plt[0], learn_platform_plt.index, ax = ax) plt.show()Predict salary This model will predict whether the salary is above the median salary. Clean the salary and transform it to numeric type. Then calculate dollar compensation through the exchange rate.df_salary = df.copy() df_salary['CompensationAmount'] = df_salary['CompensationAmount'].str.replace(',', '') df_salary['CompensationAmount'] = df_salary['CompensationAmount'].str.replace('-', '') df_salary = df_salary.merge(fx_rates, left_on = 'CompensationCurrency', right_on = 'originCountry', how = 'left') df_salary['Salary'] = pd.to_numeric(df_salary['CompensationAmount']) * df_salary['exchangeRate'] df_salary.drop(['CompensationAmount', 'exchangeRate', 'Unnamed: 0'], axis =1, inplace = True)Clean the job satisfaction and transform it to numeric type.df_salary['JobSatisfaction'].replace({'10 - Highly Satisfied':'10', '1 - Highly Dissatisfied':'1','I prefer not to share': np.NaN},inplace = True) df_salary['JobSatisfaction'] = df_salary['JobSatisfaction'].astype(float)Select the demographic and numeric features.dem_features = ['GenderSelect','Country','Age', 'FormalEducation','MajorSelect','ParentsEducation', 'EmploymentStatus', 'CurrentJobTitleSelect', 'DataScienceIdentitySelect','CodeWriter', 'JobFunctionSelect', 'SalaryChange','RemoteWork','WorkMLTeamSeatSelect', 'Tenure','EmployerIndustry','EmployerSize'] num_features = df_salary.select_dtypes(include=['float', 'int']).columns df_num_dem = df_salary[list(set(num_features) | set(demographic_features))].copy()Drop NA salary and fill other numeric NA.df_num_dem = df_num_dem.dropna(axis = 0, subset = ['Salary']) # Use median to fill nas for col in num_features: df_num_dem.loc[:, col] = df_num_dem[col].fillna((df_num_dem[col].median()))Use One-hot to recode the categorical features.df_num_dem = pd.get_dummies(data = df_num_dem)Prepare the data for modeling.X = df_num_dem.drop('Salary', axis=1) y = df_num_dem['Salary'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 42) # Replace salary amount to whether salary is above the median. y_median = y_train.median() y_train = y_train > y_median y_test = y_test > y_medianUse random forest to train and predict the salary sataus.from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators = 200) model.fit(X_train, y_train) y_pred = model.predict(X_test)The median is balance distributed so we can use accuracy as measures.y_pred = model.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred)Plot the importance of the features.features = X_train.columns importances = model.feature_importances_ indices = np.argsort(importances) plt.title('Feature Importances') plt.barh(range(len(indices[-9:])), importances[indices[-9:]], color='b', align='center') plt.yticks(range(len(indices[-9:])), features[indices[-9:]]) plt.xlabel('Relative Importance') plt.show()Use logistic regression to train and predict the salary again. We can see the result is similar.from sklearn.linear_model import LogisticRegression import operator model = LogisticRegression(penalty = 'l1', C = 0.05, tol = 0.0001, random_state = 42, solver = 'liblinear') model.fit(X_train,y_train) # Use coefs as feature importance coefs = np.transpose(model.coef_) feature_importance={} for (feature, k) in zip(list(X_train), range(len(coefs))) : feature_importance[feature] = abs(coefs[k]) sorted_features = sorted(feature_importance.items(), key=operator.itemgetter(1)) top5 = sorted_features[-5::] print(top5) # Predit the salary and y_pred = model.predict(X_test) print(accuracy_score(y_test, y_pred))[('Country_United Kingdom', array([0.54814768])), ('Country_Germany', array([0.7042979])), ('EmployerIndustry_Academic', array([1.009369])), ('Country_India', array([1.43855345])), ('Country_United States', array([2.10467018]))] 0.8144424131627057Cross validation and Grid Search didn't really improve the classification accuracy score on the test set. Let's see which are the results that are getting falsely classifiedconf_mx = confusion_matrix(y_test, y_test_pred) conf_mxIt seems like the issue is with mainly between the numbers 1, 7 and 9 Lets try to create more data (data augmentation) to see if that will improve the score The way we are going to create more data is by rotating the images by 10deg on both - right and left sidesfrom scipy.ndimage.interpolation import rotate def img_augmenter(img) -> list: img = img.reshape(28, 28) rot_img_left = rotate(img, angle=10, reshape=False) rot_img_right = rotate(img, angle=-10, reshape=False) rot_img_left = rot_img_left.reshape(1, -1) rot_img_right = rot_img_right.reshape(1, -1) return [rot_img_left, rot_img_right] # Check for a sample image sample_img = X_train[7] sample_out_left, sample_out_right = tuple(img_augmenter(sample_img)) fig, ax = plt.subplots(1, 3, figsize=(10, 10)) #sample_out_left.shape img_left = sample_out_left.reshape(28, 28) ax[0].imshow(img_left, cmap='gray') original_img = sample_img.reshape(28, 28) ax[1].imshow(original_img, cmap='gray') img_right = sample_out_right.reshape(28, 28) ax[2].imshow(img_right, cmap='gray')If you carefully notice, the left and right rotated images (new images) are slightly less in contrast compared tooriginal image. This may be the effect of aliasing due to rotation. Let's ignore this issue and go ahead for now.new_data = list() new_labels = list() for i, img in enumerate(X_train): label = y_train[i] new_data.extend(img_augmenter(img)) new_labels.extend([label] * 2) new_data = np.asarray(new_data) new_labels = np.asarray(new_labels) print(new_data.shape) print(new_labels.shape) new_data = new_data.reshape(-1, 784) sample = 14 img = new_data[sample].reshape(28, 28) plt.imshow(img, cmap='gray') new_labels[sample] new_data.shape X_train = list(X_train) X_train.extend(list(new_data)) X_train = np.asarray(X_train) y_train = list(y_train) y_train.extend(list(new_labels)) y_train = np.asarray(y_train) y_train.shape from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train) knn_clf = KNeighborsClassifier(n_neighbors=5, weights='distance') knn_clf.fit(X_train, y_train) y_test_pred = knn_clf.predict(X_test) accuracy_score(y_test, y_test_pred)Q01 - Sorteie 10 inteiros entre 1 e 100 para uma lista e descubra o maior e o menor valor, sem usar as funções max e minimport random numeros = [random.randint(1,100) for _ in range(10)] max, min = 0, 100 for num in numeros: if num > max: max = num if num < min: min = num print(numeros) print('O maior número é {} e o menor é {}'.format(max, min))[18, 91, 44, 2, 27, 31, 23, 91, 89, 43] O maior número é 91 e o menor é 2Q02 - Sorteie 20 inteiros entre 1 e 100 num a lista. Armazene os números ares na lista PAR e os números ímpares na lista IMPAR. Imprima as três listas.import random numeros = [random.randint(1, 100) for _ in range(20)] lista_par, lista_impar = [], [] for num in numeros: if num % 2 == 0: lista_par.append(num) else: lista_impar.append(num) print(numeros, len(numeros)) print(lista_par, len(lista_par)) print(lista_impar, len(lista_impar))[94, 95, 72, 98, 91, 65, 58, 48, 70, 88, 30, 24, 89, 9, 97, 11, 8, 56, 96, 91] 20 [94, 72, 98, 58, 48, 70, 88, 30, 24, 8, 56, 96] 12 [95, 91, 65, 89, 9, 97, 11, 91] 8Q03 - Faça um programa que crie dois vetores com 10 elementos aleatórios entre 1 e 100. Gere um terceiro vetor de 20 elementos, cujos valores deverão ser compostos pelos elementos intercalados dos dois outros vetores. Imprima os três vetoreslista1, lista2 = [random.randint(1,100) for _ in range(10)], [random.randint(1,100) for _ in range(10)] lista3 = [] pos = 0 while len(lista3) < 20: lista3.append(lista1[pos]) lista3.append(lista2[pos]) pos += 1 print(lista1, len(lista1)) print(lista2, len(lista2)) print(lista3, len(lista3))[7, 86, 76, 2, 32, 59, 64, 26, 8, 69] 10 [89, 37, 82, 14, 72, 53, 31, 88, 62, 29] 10 [7, 89, 86, 37, 76, 82, 2, 14, 32, 72, 59, 53, 64, 31, 26, 88, 8, 62, 69, 29] 20Q04 - Seja o statement sobre diversidade: “The Python Software Foundation and the global Python community welcome and encourage participation by everyone. Our community is based on mutual respect, tolerance, and encouragement, and we are working to help each other live up to these principles. We want our community to be more diverse : whoever you are, and whatever your background, we welcome you.”. Gere uma lista de palavras deste texto com split(), a seguir crie uma lista com as palavras que começam ou terminam com uma das letras “python”. Imprima a lista resultante. Não se esqueça de remover antes os caracteres especiais e cuidado com maiúsculas e minúsculas.import re texto = '''The Python Software Foundation and the global Python community welcome and encourage participation by everyone. Our community is based on mutual respect, tolerance, and encouragement, and we are working to help each other live up to these principles. We want our community to be more diverse : whoever you are, and whatever your background, we welcome you.''' texto = re.sub('[!@#$,.]', '', texto) palavras = texto.split() lista = [x for x in palavras if x[0].lower() in 'python' or x[-1].lower() in 'python'] print(lista)['The', 'Python', 'Foundation', 'the', 'Python', 'community', 'participation', 'by', 'Our', 'community', 'on', 'respect', 'tolerance', 'encouragement', 'to', 'help', 'each', 'other', 'up', 'to', 'these', 'principles', 'want', 'our', 'community', 'to', 'you', 'your', 'you']Q05 - Seja o mesmo texto acima “splitado”. Calcule quantas palavras possuem uma das letras “python” e que tenham mais de 4 caracteres. Não se esqueça de transfor mar maiúsculas para minúsculas e de remover antes os caracteres especiais.texto = '''The Python Software Foundation and the global Python community welcome and encourage participation by everyone. Our community is based on mutual respect, tolerance, and encouragement, and we are working to help each other live up to these principles. We want our community to be more diverse : whoever you are, and whatever your background, we welcome you.''' texto = re.sub('[!@#$,.]', '', texto) palavras = texto.split() lista = [x for x in palavras if x[0].lower() in 'python' or x[-1].lower() in 'python'] lista = [i for i in lista if len(i)>4] print(len(lista), lista)13 ['Python', 'Foundation', 'Python', 'community', 'participation', 'community', 'respect', 'tolerance', 'encouragement', 'other', 'these', 'principles', 'community']Nerf PytorchThis file runs the standard nerf model on a dynamic scene.!git clone https://github.com/rohaldb/nerf-pytorch.git %cd nerf-pytorch !pip install -r requirements.txt !gdown https://drive.google.com/uc?id=12xA1xKZ7QHBl4-7oK2TyOATkPQk0vIJA !unzip "nerf_data.zip"; rm "nerf_data.zip" #connect to google drive from google.colab import drive drive.mount('/content/gdrive')Mounted at /content/gdriveTraining a new model#train the model !python run_nerf.py --config configs/kid-running.txt --i_weights 1000 --i_testset 5000 while True: pass #save results #copy results to google drive %cp -R /content/nerf-pytorch/logs/* "/content/drive/My Drive/Thesis/Standard Nerf/"Eval a pretrained model%cd /content/nerf-pytorch !mkdir -p "./logs/kid-running_test/" %cp -R "/content/gdrive/MyDrive/Thesis/Standard Nerf/kid-running_test/." /content/nerf-pytorch/logs/kid-running_test %cd /content/nerf-pytorch #remember to change i_test to the appropriate indicies in run_nerf.py !python run_nerf.py --config configs/kid-running.txt --render_only --render_test %cp -R /content/nerf-pytorch/logs/kid-running_test/renderonly_test_009999/* "/content/gdrive/My Drive/Thesis/Standard Nerf/Eval"quant-econ Solutions: Markov Asset Pricing Solutions for http://quant-econ.net/py/markov_asset.html%matplotlib inline import numpy as np import quantecon as qe import matplotlib.pyplot as pltNext we load the code from the lectures into a cell to have access to the functions. This next line assumes that you have the file `asset_pricing.py` from [QuantEcon.applications](https://github.com/QuantEcon/QuantEcon.applications) in the same directory as this notebook.from asset_pricing import *Exercise 1 First let's enter the parameters:n = 5 P = 0.0125 * np.ones((n, n)) P += np.diag(0.95 - 0.0125 * np.ones(5)) s = np.array([0.95, 0.975, 1.0, 1.025, 1.05]) # state values mc = qe.MarkovChain(P, state_values=s) gamma = 2.0 beta = 0.94 zeta = 1.0 p_s = 150.0Next we'll create an instance of `AssetPriceModel` to feed into the functions.apm = AssetPriceModel(beta=beta, mc=mc, gamma=gamma, g=lambda x: x)Now we just need to call the relevent functions on the data:tree_price(apm) consol_price(apm, zeta) call_option(apm, zeta, p_s)Let's show the last two functions as a plot.fig, ax = plt.subplots() ax.plot(s, consol_price(apm, zeta), label='consol') ax.plot(s, call_option(apm, zeta, p_s), label='call option') ax.legend()Exercise 2 Here's a suitable function:def finite_horizon_call_option(ap, zeta, p_s, k): """ Computes k period option value. """ # == Simplify names, set up matrices == # beta, gamma, P, y = ap.beta, ap.gamma, ap.mc.P, ap.mc.state_values M = P * ap.g(y)**(- gamma) # == Make sure that a unique solution exists == # ap.test_stability(M) # == Compute option price == # p = consol_price(ap, zeta) w = np.zeros(ap.n) for i in range(k): # == Maximize across columns == # w = np.maximum(beta * M @ w, p - p_s) return wNow let's compute the option values at `k=5` and `k=25`fig, ax = plt.subplots() for k in [5, 25]: w = finite_horizon_call_option(apm, zeta, p_s, k) ax.plot(s, w, label=r'$k = {}$'.format(k)) ax.legend()Import Data and Preprocessdf = pd.read_csv("dataset_comb.csv") df df = df.drop(['id'],axis=1) df['Class'] = [0 if x == 'jasmine' else 1 for x in df['Class']] X = df.iloc[:,0:-1] Y = df.iloc[:,-1]Normalize datafor col in X.columns: if col!='Class': max_val = X[col].max() min_val = X[col].min() for val in X[col]: norm_val = (max_val - val)/(max_val - min_val) X[col] = X[col].replace(val, norm_val) X = X.values Y = Y.valuesImport Classifiers from Sklearnfrom sklearn.model_selection import train_test_split,KFold cv = KFold(n_splits = 7,random_state = 3,shuffle = True) from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import Perceptron from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_scoreInitializing Classifier Objectsfc = LinearDiscriminantAnalysis() pc = Perceptron() nb = GaussianNB() lr = LogisticRegression() ann = MLPClassifier() svm = SVC() fc_score_train = [] pc_score_train = [] nb_score_train = [] lr_score_train = [] ann_score_train = [] svm_score_train = [] fc_score_test = [] pc_score_test = [] nb_score_test = [] lr_score_test = [] ann_score_test = [] svm_score_test = []Train, Test and Calculate Accuracyfor train_index,test_index in cv.split(X): X_train,X_test = X[train_index], X[test_index] Y_train,Y_test = Y[train_index], Y[test_index] #train all the classifiers with train data fc.fit(X_train,Y_train) pc.fit(X_train,Y_train) nb.fit(X_train,Y_train) lr.fit(X_train,Y_train) ann.fit(X_train,Y_train) svm.fit(X_train,Y_train) #preditct train data Y_pred_fc_train = fc.predict(X_train) Y_pred_pc_train = pc.predict(X_train) Y_pred_nb_train = nb.predict(X_train) Y_pred_lr_train = lr.predict(X_train) Y_pred_ann_train = ann.predict(X_train) Y_pred_svm_train = svm.predict(X_train) #calculate train accuraciies and appento train_accuracy fc_score_train.append(accuracy_score(Y_train,Y_pred_fc_train)) pc_score_train.append(accuracy_score(Y_train,Y_pred_pc_train)) nb_score_train.append(accuracy_score(Y_train,Y_pred_nb_train)) lr_score_train.append(accuracy_score(Y_train,Y_pred_lr_train)) ann_score_train.append(accuracy_score(Y_train,Y_pred_ann_train)) svm_score_train.append(accuracy_score(Y_train,Y_pred_svm_train)) #predict test data Y_pred_fc_test = fc.predict(X_test) Y_pred_pc_test = pc.predict(X_test) Y_pred_nb_test = nb.predict(X_test) Y_pred_lr_test = lr.predict(X_test) Y_pred_ann_test = ann.predict(X_test) Y_pred_svm_test = svm.predict(X_test) #calculate test accuraciies and appento test_accuracy fc_score_test.append(accuracy_score(Y_test,Y_pred_fc_test)) pc_score_test.append(accuracy_score(Y_test,Y_pred_pc_test)) nb_score_test.append(accuracy_score(Y_test,Y_pred_nb_test)) lr_score_test.append(accuracy_score(Y_test,Y_pred_lr_test)) ann_score_test.append(accuracy_score(Y_test,Y_pred_ann_test)) svm_score_test.append(accuracy_score(Y_test,Y_pred_svm_test))Print Accuraciesnames = ['Linear Discriminant'] test_dict = {'Linear Discriminant': fc_score_test , 'Perceptron' : pc_score_test, 'Naive-Bayes' : nb_score_test, 'Logistic Regression' : lr_score_test,'ANN' : ann_score_test,'SVM' : svm_score_test} test_acc = pd.DataFrame(test_dict) train_dict = {'Linear Discriminant': fc_score_train , 'Perceptron' : pc_score_train, 'Naive-Bayes' : nb_score_train, 'Logistic Regression' : lr_score_train,'ANN' : ann_score_train,'SVM' : svm_score_train} train_acc = pd.DataFrame(train_dict) train_acc test_accBox Plotsimport matplotlib.pyplot as plt %matplotlib inline import seaborn as sns accuracy = list([fc_score_test,pc_score_test,nb_score_test,lr_score_test,ann_score_test,svm_score_test]) fig,ax = plt.subplots(figsize=(20,10)) ax.boxplot(accuracy) ax.set_ylim([0.94,1.0]) ax.set_title('Box Plots') ax.set_xlabel('Classifcation Models') ax.set_ylabel('Accuracy') xticklabels = ["Fischer Linear Discriminant","Perceptron","Naive-Bayes","Logistic Regression","ANN","SVM"] ax.set_xticklabels(xticklabels) ax.yaxis.grid(True) plt.show()Exercise 1 (5 points)This exercise is a quiz with 14 multiple answer questions.In the cell below, we've declared a dictionary called `answers`.You should fill in that dictionary with your answers, using as keys the question number, like `question_x`, and as values the number from `1` to `4` that corresponds to the right answer.For each question, you should provide only one answer, i.e, the dict values should have type **int**.For example, if you want to answer Question 1 with choice number 2, then you do:```answers["question_1"] = 2```answers = {}Question 1In the terminal of a Unix based machine, which of these commands will take you to your home directory?1. cd ..2. ls ..3. cd ~/4. ls ~/# YOUR CODE HERE ### BEGIN SOLUTION answers["question_1"] = 3 ### END SOLUTION # answers["question_1"] = ...Question 2In the terminal of a Unix based machine, which of these commands would you use to see the entire content of a file named `data.csv`?1. head -5 data.csv2. tail -5 data.csv3. cat data.csv4. ls -la data.csv# YOUR CODE HERE ### BEGIN SOLUTION answers["question_2"] = 3 ### END SOLUTION # answers["question_2"] = ...Question 3After running one of these commands in the terminal of a Unix based machine, you won't have a file named `data.csv` in your current directory. Which command is it?1. cp dataset.csv data.csv2. rm data.csv3. touch data.csv4. mv dataset.csv data.csv# YOUR CODE HERE ### BEGIN SOLUTION answers["question_3"] = 2 ### END SOLUTION # answers["question_3"] = ...Question 4In the terminal of a Unix based machine, if you want to see the size of the files in the current directory, in a human readable format, which command is the best option?1. ls2. ls .3. ls -la4. ls -lh# YOUR CODE HERE ### BEGIN SOLUTION answers["question_4"] = 4 ### END SOLUTION # answers["question_4"] = ...Question 5There are certain types of files that you usually don't want to have in your git repositories, like cache files, or local configuration files that contain passwords. In order to have git not tracking these files, you can use the `.gitignore` file. Say you're currently in the root directory of your project, and you have a `.gitignore` file there. In the terminal of a Unix based machine, which of these commands should you use to list the files and see the `.gitignore` file on that list?1. ls -la2. ls .3. ls -lh4. ls -l# YOUR CODE HERE ### BEGIN SOLUTION answers["question_5"] = 1 ### END SOLUTION # answers["question_5"] = ...Question 6Consider the following git commands: `add`, `commit`, `status`, `pull`, `push`. Which is the most natural order to use them?1. `pull` > `status` > `add` > `commit` > `push`2. `status` > `commit` > `add` > `push` > `pull`3. `push` > `add` > `status` > `commit` > `pull`4. `pull` > `push` > `status` > `add` > `commit`# YOUR CODE HERE ### BEGIN SOLUTION answers["question_6"] = 1 ### END SOLUTION # answers["question_6"] = ...Question 7How do you change from the `master` branch to an existing `feature_dev` branch?1. git checkout master2. git checkout master feature_dev3. git checkout -b feature_dev4. git checkout feature_dev# YOUR CODE HERE ### BEGIN SOLUTION answers["question_7"] = 4 ### END SOLUTION # answers["question_7"] = ...Question 8Which of these commands should you use to find the hash of your previous commit?1. git status2. git log3. git diff4. git commit# YOUR CODE HERE ### BEGIN SOLUTION answers["question_8"] = 2 ### END SOLUTION # answers["question_8"] = ...Question 9Which one of these is false:1. You can use a tuple as a dictionary key.2. You can add new elements to a tuple.3. Tuples can have duplicated elements.4. You can define a tuple with length 0.# YOUR CODE HERE ### BEGIN SOLUTION answers["question_9"] = 2 ### END SOLUTION # answers["question_9"] = ...Question 10If you want to incorporate the master branch into another branch called `dev`, which 2 commands should you run sequentially?1. git checkout dev; git merge master2. git merge master; git checkout dev3. git checkout master; git merge dev4. git merge dev; git checkout master# YOUR CODE HERE ### BEGIN SOLUTION answers["question_10"] = 1 ### END SOLUTION # answers["question_10"] = ...Question 11Which command should you use to bring back changes that you've stashed?1. git stash2. git stash pop3. git revert4. git revert apply# YOUR CODE HERE ### BEGIN SOLUTION answers["question_11"] = 2 ### END SOLUTION # answers["question_11"] = ...Question 12Which one of these files will you always find in the root directory of a Python package?1. README.md2. requirements.txt3. \_\_init\_\_.py4. setup.py# YOUR CODE HERE ### BEGIN SOLUTION answers["question_12"] = 3 ### END SOLUTION # answers["question_12"] = ...Question 13How do you import function `my_function` from module `my_module`?1. import my_function2. import my_module3. from my_function import my_module4. from my_module import my_function# YOUR CODE HERE ### BEGIN SOLUTION answers["question_13"] = 4 ### END SOLUTION # answers["question_13"] = ...Question 14Which one of these should not raise an Error?1. '4' + 'None'2. 4 + None3. 4 + 'None'4. '4' + None# YOUR CODE HERE ### BEGIN SOLUTION answers["question_14"] = 1 ### END SOLUTION # answers["question_14"] = ... # Run this cell to see if your answers are correct utils.exercise_1(answers)Answer is correct. Good Job.Exercise 2Consider a csv file with four columns: `city`, `latitude`, `longitude`, `country`.File `cities_locations.csv` is an example of such file and was included in the zip file you downloaded with this notebook, so you can preview it using some command line arguments. Part I (2 points)Implement a function that reads a file with the same format as the `cities_locations.csv` file, and stores the data in a dictionary with the following structure:```{ "city_name": { "idx": row number in the input file, not considering the header, i.e, the first row with data values has idx=0 (type: int), "lat": latitude value (type: float), "lng": longitude value (type: float), "country": country (type: str) },}```The function should be called `read_cities`, receives an argument called `filepath` (which is the file that the function should read the data from), and returns the dictionary that was created.def read_cities(filepath): """ Reads the file in filepath, parses it and returns the data in a dictionary. Parameters: filepath (str): Path to the input file to be parsed. Returns: cities_parsed_data (dict): Cities locations dataset stored as a dictionary, where the keys are city names and the values are data about the city. """ # YOUR CODE HERE ### BEGIN SOLUTION with open(filepath, 'r') as f: reader = csv.reader(f, delimiter=',', quotechar='"') next(reader) data = {} for i, line in enumerate(reader): data[line[0]] = { "idx": i, "lat": float(line[1]), "lng": float(line[2]), "country": line[3] } return data ### END SOLUTION cities_locations_data = read_cities('cities_locations.csv') assert len(cities_locations_data) == 10 assert 'Barcelona' in cities_locations_data barcelona = cities_locations_data['Barcelona'] assert barcelona['idx'] == 5 assert barcelona['country'] == 'Spain' np.testing.assert_almost_equal(barcelona['lat'], 41.3833, decimal=3) np.testing.assert_almost_equal(barcelona['lng'], 2.1834, decimal=3) from more_tests import test_exercise_2_I test_exercise_2_I(read_cities)Part II (3 points)We want to calculate the distance in km between each pair of cities from our dataset, and store this in a numpy array.In order to calculate these distances, you should use the **geodesic** function from the python package [geopy](https://pypi.org/project/geopy/).You'll need to read the package's docs, in order to install it in your local environment and to learn how to use the **geodesic** function. The version of geopy that is installed in our grader is `1.22.0`, so, to make sure that you'll have the same behaviour in your local environment and in our grader, make sure to install the same version.The matrix with the distances will have shape `(n_cities, n_cities)`, where `n_cities` is the number of cities in the dataset. Each city's position in the matrix's rows and columns should be the `idx` value from the dataset. Implement a function called `create_dist_matrix` that receives as input a dictionary like the one we created in the first part of this exercise, and returns the numpy array representing the distances matrix.from geopy.distance import geodesic def create_dist_matrix(cities_data): """ Computes the geodesic distance between each pair of cities in the input dataset, and returns a matrix with the computed distances. Parameters: cities_data (dict): Dictionary with the city locations dataset. Returns: matrix (np.array): Matrix with shape (n_cities, n_cities) where the value in position (i, j) represents the geodesic distance in km between city_i and city_j. """ # YOUR CODE HERE ### BEGIN SOLUTION n_cities = len(cities_data) dists = np.zeros((n_cities, n_cities)) for city_i, data_i in cities_data.items(): city_i_coords = (data_i['lat'], data_i['lng']) city_i_idx = data_i['idx'] for city_j, data_j in cities_data.items(): city_j_coords = (data_j['lat'], data_j['lng']) city_j_idx = data_j['idx'] dist = geodesic(city_i_coords, city_j_coords).km dists[city_i_idx][city_j_idx] = dist return dists ### END SOLUTION cities_locations_data = { 'Rome': {'idx': 0, 'lat': 41.896, 'lng': 12.4833, 'country': 'Italy'}, 'Milan': {'idx': 1, 'lat': 45.47, 'lng': 9.205, 'country': 'Italy'}, 'Lisbon': {'idx': 2, 'lat': 38.7227, 'lng': -9.1449, 'country': 'Portugal'}, 'Porto': {'idx': 3, 'lat': 41.15, 'lng': -8.62, 'country': 'Portugal'}, 'Madrid': {'idx': 4, 'lat': 40.4, 'lng': -3.6834, 'country': 'Spain'}, 'Barcelona': {'idx': 5, 'lat': 41.3833, 'lng': 2.1834, 'country': 'Spain'} } dists = create_dist_matrix(cities_locations_data) assert type(dists) == np.ndarray assert dists.shape == (6, 6) for i in range(dists.shape[0]): assert dists[i][i] == 0 np.testing.assert_almost_equal(dists[0][1], 476.93, decimal=1) np.testing.assert_almost_equal(dists[0][5], 859.51, decimal=1) from more_tests import test_exercise_2_II test_exercise_2_II(create_dist_matrix)Part III (2 points)Create a function called `furthest_cities` that finds in our distances matrix, the pair of cities furthest away from each other.It should return a tuple of city indexes, where the first one is smaller than the second.def furthest_cities(dist_matrix): """ Finds the pair of cities that is furthest away from each other. Parameters: dist_matrix (np.array): Matrix that stores geodesic distances between cities. Returns: furthest_cities_idx (tuple): Indexes of the two cities. The first element in the tuple is the lower index. """ # YOUR CODE HERE ### BEGIN SOLUTION idx_1, idx_2 = sorted(np.argwhere(dist_matrix == dist_matrix.max())[0]) return idx_1, idx_2 ### END SOLUTION # Creating a random distances matrix for this test np.random.seed(19) dists = np.random.rand(5, 5) * 1000 for i in range(dists.shape[0]): dists[i][i] = 0 for j in range(dists.shape[1]): if j > i: dists[i][j] = dists[j][i] idx_1, idx_2 = furthest_cities(dists) assert idx_1 == 1 assert idx_2 == 4 from more_tests import test_exercise_2_III test_exercise_2_III(furthest_cities)Exercise 3In this exercise, we'll use object oriented programming concepts to model [Pokemons](https://en.wikipedia.org/wiki/Pok%C3%A9mon).In case you're not familiar with Pokemon, they are wild (imaginary) creatures. These are some examples.When they're living in the wild, Pokemon can be captured by Pokemon Trainers. Once they belong to a Trainer, they will obey the Trainer's commands. They're usually sent out to non-lethal battles against other Pokémon, in order to gain experience and level up.When they reach certain levels, they can undergo a form of metamorphosis and transform into a similar but stronger species of Pokémon: this is called evolution. Part I (4 points)Your first assignment is to implement a class that represents a `Pokemon`.You'll need to store the following information about a Pokemon:* `name`: the Pokemon's name.* `max_health`: the number of health points that this Pokemon has with full health.* `speed`: a measure of how fast this Pokemon is. Faster Pokemons usually attack first in battles.* `hp`: current number of health points. Pokemons may lose health points during battles.* `level`: Pokemon's current level. This is a measure of the Pokemon's experience. Pokemons in higher levels have more chances of winning battles.Some additional information:* All the stats points described above (`max_health`, `speed`, `hp`, `level`) should be measured with non-negative integers.* When a Pokemon is born, its `level` is always 1 and its `hp` is always the same as `max_health`, but its `name`, `max_health`and `speed` vary from Pokemon to Pokemon.Our Pokemon class should implement 4 methods, described below. 1. Method `is_knocked_out`During battles, Pokemon take damage, which translates into losing health points.Method `is_knocked_out` receives no arguments, and checks whether the Pokemon is knocked out by checking if the Pokemon's `hp` is equal to 0.This method should return a bool. 2. Method `level_up`When a Pokemon wins a battle, it will level up.Method `level_up` receives no arguments, and doesn't return anything.This method should:* Increase the Pokemon's `level` by one.* Increase the Pokemon's `max_health` by 20 points.* Increase the Pokemon's `speed` by 10%. But speed must be an integer, so round it down.* It shouldn't change the Pokemon's `hp`! 3. Method `take_damage`As explained above, during battles, Pokemon take damage, which translates into losing health points.Method `take_damage` receives as argument the integer `damage_points`, and doesn't return anything.This method should decrease the Pokemon's `hp` by `damage_points`. Make sure that the Pokemon's `hp`doesn't fall below 0. 4. Method `attack_damage`This method calculates the number of damage points that our Pokemon's attack will inflict on an enemy Pokemon, during a battle. More experienced Pokemon inflict more damage and have higher chances of having successful attacks than less experienced Pokemon. Method `attack_damage` receives as argument an `enemy_pokemon` (which is another instance of the Pokemon class) and returns an integer representing the number of damage points.This method should compute the damage points in the following way:* Create a variable `level_diff` that stores the difference in levels of the two Pokemon. If our Pokemon is more experienced, `level_diff` should be positive. If our Pokemon is less experienced, `level_diff` should be 0.* Create a variable `max_level` that stores the maximum level between the levels of the two Pokemon.* Create a variable called `p_success`, that represents the probability of success of your Pokemon's attack, and set it to: $$0.5 + \frac{level\_diff}{2 * max\_level}$$* Create a variable called `attack_success`, that represents whether the attack was successful or not. Calculate its value by drawing one sample from a binomial distribution: use numpy's [binomial function](https://numpy.org/doc/stable/reference/random/generated/numpy.random.binomial.html), with parameters `n=1` and `p=p_success`. This function will output a 0 (which means the attack was not successful) or a 1 (which means the attack was successful). * Create a variable called `damage_points`. Calculate its value by multiplying your Pokemon's `level` by `attack_success`.* Return `damage_points`.class Pokemon: # YOUR CODE HERE ### BEGIN SOLUTION def __init__(self, name, max_health, speed): self.name = name self.max_health = max_health self.speed = speed self.hp = max_health self.level = 1 def is_knocked_out(self): return self.hp == 0 def level_up(self): self.level += 1 self.max_health += 20 new_speed = int(self.speed * 1.1) def take_damage(self, damage_points): self.hp = max(0, self.hp - damage_points) def attack_damage(self, enemy_pokemon): level_diff = max(0, self.level - enemy_pokemon.level) max_level = max(self.level, enemy_pokemon.level) p_success = 0.5 + level_diff / (2 * max_level) attack_success = np.random.binomial(1, p_success) damage_points = attack_success * self.level return damage_points ### END SOLUTION score = 0 try: pika = Pokemon(name='Pikachu', max_health=20, speed=5) assert pika.name == 'Pikachu' assert pika.max_health == 20 assert pika.speed == 5 assert pika.hp == pika.max_health assert pika.level == 1 assert not pika.is_knocked_out() except AssertionError: pass else: score += 1 try: pika.level_up() assert pika.max_health == 40 assert pika.hp == 20 assert pika.speed == 5 assert pika.level == 2 except AssertionError: pass else: score += 1 try: enemy = Pokemon(name='Squirtle', max_health=10, speed=10) np.random.seed(42) assert pika.attack_damage(enemy) == 2 assert pika.attack_damage(enemy) == 0 assert pika.attack_damage(enemy) == 2 np.random.seed(13) assert pika.attack_damage(enemy) == 0 assert pika.attack_damage(enemy) == 2 assert pika.attack_damage(enemy) == 0 except AssertionError: pass else: score += 1 try: pika.take_damage(5) assert pika.hp == 15 pika.take_damage(20) assert pika.hp == 0 assert pika.is_knocked_out() except AssertionError: pass else: score += 1 if score == 0: raise AssertionError("Not enough correct answers to score points :(") scorePart II (2 points)Now we'll implement a battle between two Pokemon.Write a function called `battle` that receives as arguments `p1` and `p2`, both instances of the Pokemon class, and returns the Pokemon that wins the battle and the Pokemon that loses the battle (in this order!).A battle is a sequence of attacks. In the first turn, the Pokemon who attacks is the fastest (if they have the same speed, `p1` attacks first); the slowest Pokemon attacks in the second turn, and then the fastest Pokemon attacks again. No Pokemon is allowed to attack twice, they always switch places as the attacker.In an attack, the attacker inflicts as many damage points as indicated by the `attack_damage` method. Remember that this method has a random component, so you have to explicitly call it everytime the Pokemon attacks.The defender may suffer damage points. In order to record that, you should use the `take_tamage` method.The battle ends as soon as one of the Pokemon is knocked out.Grader Tip: If you see a `KeyboardInterrupt` error on the grader feedback, that is because your cell is taking too long to run, which is probably due to an infinite loop.def battle(p1, p2): """ Represents a battle between two Pokemon, where each Pokemon attacks at a time. Parameters: p1 (Pokemon): A Pokemon fighting in the battle. p2 (Pokemon): The other Pokemon fighting in the battle. Returns: winner (Pokemon): The winner Pokemon loser (Pokemon): The loser Pokemon """ # YOUR CODE HERE ### BEGIN SOLUTION if p1.speed >= p2.speed: p_attack, p_defense = p1, p2 else: p_attack, p_defense = p2, p1 while (not p_attack.is_knocked_out()) and (not p_defense.is_knocked_out()): damage_points = p_attack.attack_damage(p_defense) p_defense.take_damage(damage_points) tmp_p = p_attack p_attack = p_defense p_defense = tmp_p if p2.is_knocked_out(): winner, loser = p1, p2 else: winner, loser = p2, p1 return winner, loser ### END SOLUTION pika = Pokemon(name='Pikachu', max_health=20, speed=5) squirtle = Pokemon(name='Squirtle', max_health=10, speed=10) np.random.seed(19) winner, loser = battle(pika, squirtle) assert winner.name == "Pikachu" assert loser.name == "Squirtle" assert winner.hp == 15 assert loser.hp == 0 from more_tests import test_exercise_3_II test_exercise_3_II(battle, Pokemon)Exercise 4Consider that you have a matrix, that represents a field, where you can go for a walk.In order to refer to a specific position in the matrix, we'll use the notation `(i,j)`, where `i` represents the row number, `j` represents the column number, using 0-indexing (the first row has index 0).You start walking at some position in the matrix, and the value on that position will guide you towards your next position. The possible values in the matrix are:* `l`: means that you'll move to the column on your **left**, in the same row* `r`: means that you'll move to the column on your **right**, in the same row* `u`: means that you'll move to the row **above**, in the same column* `d`: means that you'll move to the row **below**, in the same columnYou'll be able to keep walking until you're guided to a position outside the boundaries of the field.Let's see an example. This is a field.[['r', 'r', 'd', 'l', 'u'], ['r', 'd', 'l', 'd', 'd'], ['u', 'r', 'd', 'r', 'l'], ['l', 'd', 'd', 'u', 'r']]If you start your walk in position (0,0), your path will be:* `(0,0)`* `r` => `(0,1)`* `r` => `(0,2)`* `d` => `(1,2)`* `l` => `(1,1)`* `d` => `(2,1)`* `r` => `(2,2)`* `d` => `(3,2)`* `d` => `end`In red you can see the positions where you passed.[['r', 'r', 'd', 'l', 'u'], ['r', 'd', 'l', 'd', 'd'], ['u', 'r', 'd', 'r', 'l'], ['l', 'd', 'd', 'u', 'r']]The length of your path is the number of positions where you have been in the field.The path in the example has length 8.*** Part I (1 point)Write a function, named `walk` that receives as arguments:* a numpy matrix that represents a **field**, and* a tuple `(i, j)` that represents a **starting position**,and returns the length of the path that you get if you start walking at the starting position and continue until you leave the field.You can assume that the field is not empty, i.e, it has at least one position, and that the starting position is within the field. Part II (1 point)For an extra point, add the following validations to your function.If you find an unknown instruction while walking your path, raise a **ValueError** with message: `Invalid instruction found on position (i, j)`, where `i` and `j` are the row and column numbers of the position where you found the unknown instruction. If the field has unknown instructions, but you don't stumble upon them while walking, no exception should be raised.And be careful! Your field might tell you to start walking in circles! If during your walk, you find yourself in a position where you have been before, you should raise a **ValueError** with message: `"I'm running in circles"`Grader Tip: If you see a `KeyboardInterrupt` error on the grader feedback, that is because your cell is taking too long to run, which is probably due to an infinite loop.def walk(field, start): """ Returns the length of the path that begins on a starting position and continues according to the field's instructions. Parameters: field (np.array): Matrix with instructions that describes a field where one can go for a walk. start (tuple): Position where to start walking. The first value is the row index and the second is the column index. Returns: walk_length (int): Length of the walk. Raises: ValueError: if an invalid instruction is found. ValueError: if the walk has a loop. """ # YOUR CODE HERE ### BEGIN SOLUTION i, j = start max_i, max_j = field.shape path = [] while (0 <= i < max_i) and (0 <= j < max_j): if (i, j) in path: raise ValueError("I'm running in circles") path.append((i, j)) instruction = field[i][j] if instruction == 'r': j += 1 elif instruction == 'l': j -= 1 elif instruction == 'u': i -= 1 elif instruction == 'd': i += 1 else: raise ValueError(f"Invalid instruction found on position ({i}, {j})") return len(path) ### END SOLUTION # test field with single position field = np.array([['d']]) assert walk(field, (0, 0)) == 1 # test field with successful walk field = np.array([ ['d', 'l', 'd'], ['d', 'u', 'l'], ['d', 'r', 'd'], ['d', 'd', 'd'], ]) assert walk(field, (1, 2)) == 7 assert walk(field, (2, 2)) == 2 # test another field with successful walk field = np.array([ ['r', 'r', 'd', 'l', 'u'], ['r', 'd', 'l', 'd', 'd'], ['u', 'r', 'd', 'r', 'l'], ['l', 'd', 'd', 'u', 'r'], ]) assert walk(field, (0, 0)) == 8 # test field with loop field = np.array([ ['r', 'r', 'd', 'l', 'u'], ['r', 'd', 'l', 'd', 'd'], ['u', 'r', 'd', 'r', 'l'], ['l', 'd', 'd', 'u', 'r'], ]) try: walk(field, (1, 3)) raise AssertionError("Should raise a ValueError when running in circles!") except ValueError as e: assert str(e) == "I'm running in circles" else: raise AssertionError("Should have raised the right error type!") # test another field with loop field = np.array([ ['d', 'l', 'l'], ['d', 'w', 'u'], ['d', 'u', 'u'], ['r', 'r', 'u'], ]) try: walk(field, (3, 1)) raise AssertionError("Should raise a ValueError when running in circles!") except ValueError as e: assert str(e) == "I'm running in circles" else: raise AssertionError("Should have raised the right error type!") # test field with error field = np.array([ ['d', 'l', 'l'], ['d', 'w', 'u'], ['d', 'u', 'u'], ['r', 'r', 'u'], ]) try: walk(field, (2, 1)) raise AssertionError("Should raise a ValueError when an invalid instruction is found!") except ValueError as e: assert str(e) == "Invalid instruction found on position (1, 1)" else: raise AssertionError("Should have raised the right error type!") # test another field with error field_with_error = np.array([ ['r', 'x', 'd', 'l', 'u'], ['r', 'd', 'l', 'd', 'd'], ['u', 'r', 'd', 'r', 'l'], ['l', 'd', 'd', 'u', 'r'], ]) try: walk(field_with_error, (0, 0)) raise AssertionError("Should raise a ValueError when an invalid instruction is found!") except ValueError as e: assert str(e) == "Invalid instruction found on position (0, 1)" else: raise AssertionError("Should have raised the right error type!")--- Last but not least, submit your work! To submit your work, fill your slack ID in the `slack_id` variable (as a string).Example: `slack_id = "x-men"`Help: if you forgot your slack ID, [read this](https://moshfeu.medium.com/how-to-find-my-member-id-in-slack-workspace-d4bba942e38c).# Submit your work! #slack_id = ### BEGIN SOLUTION slack_id = "ADMINSLU18_3" ### END SOLUTION from submit import submit assert isinstance(slack_id, str) slu = 18_3 submit(slack_id, slu)SuccessComputational Examples: Limits# The following line will initialize the notebook by loading some libraries necessary to run the code below %run limits_estimate.pyEstimating limits by evaluating a function In this example, we will randomly generate a function $g(x)$. We will not know the function $g(x)$ explicitely, but we can evaluate $g$ for specific $x$ to estimate limits. Let's generate a function $g(x)$ and a number $s$:g,s = generate_example()We first compute a table with values of $g(x)$ for numbers $x$ close, *but not equal to*, $s$:show_table(g, [s-0.1, s-0.01, s-0.001, s+0.001, s+0.01, s+0.1])It appears that the left-hand limit is not equal to the right-hand limit.For further detail, we can evaluate $g$ at additional inputs:g(s-0.0000001) g(s+0.0000001)These values should give us a good estimate of the limits. Let's confirm this by computing the limit formally:lim_left(g,s) lim_right(g,s)Finally, note that in this example both limits are **different from the value of $g$ at $s$**:g(s)Diameter of a Binary Tree is the distance between two farthest nodesIf one node is on left side and one node is on right side then --> leftHeight + rightHeight is the answerBoth nodes on the Left side --> Tree diameter is equal to the left sub tree diameterBoth nodes on the right side --> Tree diameter is equal to the right sub tree diameterTime Complexity is O(n*h)import queue class BinaryTreeNode: def __init__(self, data): self.data = data self.left = None self.right = None def height(root): if root == None: return 0 return 1 + max(height(root.right), height(root.left)) def diameter(root): if root == None: return 0 option1 = height(root.left) + height(root.right) option2 = diameter(root.left) option3 = diameter(root.right) return max(option1, option2, option3) def buildLevelTree(levelorder): index = 0 length = len(levelorder) if length<=0 or levelorder[0]==-1: return None root = BinaryTreeNode(levelorder[index]) index += 1 q = queue.Queue() q.put(root) while not q.empty(): currentNode = q.get() leftChild = levelorder[index] index += 1 if leftChild != -1: leftNode = BinaryTreeNode(leftChild) currentNode.left =leftNode q.put(leftNode) rightChild = levelorder[index] index += 1 if rightChild != -1: rightNode = BinaryTreeNode(rightChild) currentNode.right =rightNode q.put(rightNode) return root # Main levelOrder = [int(i) for i in input().strip().split()] root = buildLevelTree(levelOrder) print(1 + diameter(root))8 3 10 1 6 -1 14 -1 -1 4 7 13 -1 -1 -1 -1 -1 -1 -1 7missing value : age, embarked, deck, embark_town¶df['age'].fillna(29) df.info() df_deck = df.dropna(subset=['deck'], how='any', axis='index') df_deck.info() df_age = df['age'].fillna(29) type(df_age), df_age.shape df['age'] = df_age df.info() df['deck'].value_counts() df['deck'] = df['deck'].fillna('B') df.info() df['embarked'].value_counts() df['embarked'] = df['embarked'].fillna('C') df.info() df['embark_town'].value_counts() df['embark_town'] = df['embark_town'].fillna('Cherbourg') df.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 survived 891 non-null int64 1 pclass 891 non-null int64 2 sex 891 non-null object 3 age 891 non-null float64 4 sibsp 891 non-null int64 5 parch 891 non-null int64 6 fare 891 non-null float64 7 embarked 891 non-null object 8 class 891 non-null category 9 who 891 non-null object 10 adult_male 891 non-null bool 11 deck 891 non-null category 12 embark_town 891 non-null object 13 alive 891 non-null object 14 alone 891 non-null bool dtypes: bool(2), category(2), float64(2), int64(4), object(5) memory usage: 80.6+ KBResidual Networksby: source: deeplearning.aiBuild very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible. Changelog- Setup notebookimport numpy as np from keras import layers from keras.layers import (Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D) from keras.optimizers import Adam from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input import pydot from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from keras.initializers import glorot_uniform import scipy.misc import matplotlib.pyplot as plt from matplotlib.pyplot import imshow, imread %matplotlib inline from mymods.lauthom import * import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1) get_path('pycode', 'resnets_utils', add_path=True) from resnets_utils import *1 - The problem of very deep neural networksLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode" to take very large values). During training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds: **Figure 1** : **Vanishing gradient** The speed of learning decreases very rapidly for the early layers as the network trains You are now going to solve this problem by building a Residual Network! 2 - Building a Residual NetworkIn ResNets, a "shortcut" or a "skip connection" allows the gradient to be directly backpropagated to earlier layers: **Figure 2** : A ResNet block showing a **skip-connection** The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. We also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.)Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them. 2.1 - The identity blockThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps: **Figure 3** : **Identity block.** Skip connection "skips over" 2 layers. The upper path is the "shortcut path." The lower path is the "main path." In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this: **Figure 4** : **Identity block.** Skip connection "skips over" 3 layers.Here're the individual steps.First component of main path: - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Second component of main path:- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. - The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Third component of main path:- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. - The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. Final step: - The shortcut and the input are added together.- Then apply the ReLU activation function. This has no name and no hyperparameters. **Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest. - To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/conv2d)- To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis))- For the activation, use: `Activation('relu')(X)`- To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/add)def identity_block(X, f, filters, stage, block): """ Implementation of the identity block as defined in Figure 3 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network Returns: X -- output of the identity block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = Activation('relu')(X) # Second component of main path X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = identity_block(A_prev, f=2, filters=[2, 4, 6], stage=1, block='a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0]))**Expected Output**: **out** [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003] 2.2 - The convolutional blockYou've implemented the ResNet identity block. Next, the ResNet "convolutional block" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: **Figure 4** : **Convolutional block** The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. The details of the convolutional block are as follows. First component of main path:- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '2a'`. - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Second component of main path:- The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is "same" and it's name should be `conv_name_base + '2b'`.- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Third component of main path:- The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is "valid" and it's name should be `conv_name_base + '2c'`.- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. Shortcut path:- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '1'`.- The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`. Final step: - The shortcut and the main path values are added together.- Then apply the ReLU activation function. This has no name and no hyperparameters. **Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.- [Conv Hint](https://keras.io/layers/convolutional/conv2d)- [BatchNorm Hint](https://keras.io/layers/normalization/batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))- For the activation, use: `Activation('relu')(X)`- [Addition Hint](https://keras.io/layers/merge/add)def convolutional_block(X, f, filters, stage, block, s=2): """ Implementation of the convolutional block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network s -- Integer, specifying the stride to be used Returns: X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X ##### MAIN PATH ##### # First component of main path X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = Activation('relu')(X) # Second component of main path X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X) ##### SHORTCUT PATH #### X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut) # Final step: Add shortcut value to main path, and pass it through a RELU activation X = Add()([X, X_shortcut]) X = Activation('relu')(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = convolutional_block(A_prev, f=2, filters=[2, 4, 6], stage=1, block='a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0]))**Expected Output**: **out** [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603] 3 - Building your first ResNet model (50 layers)You now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. "ID BLOCK" in the diagram stands for "Identity block," and "ID BLOCK x3" means you should stack 3 identity blocks together. **Figure 5** : **ResNet-50 model** Here're some other functions we used in the code below:- Conv2D: [See reference](https://keras.io/layers/convolutional/conv2d)- BatchNorm: [See reference](https://keras.io/layers/normalization/batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))- Zero padding: [See reference](https://keras.io/layers/convolutional/zeropadding2d)- Max pooling: [See reference](https://keras.io/layers/pooling/maxpooling2d)- Average pooling [see reference](https://keras.io/layers/pooling/averagepooling2d)- Fully conected layer: [See reference](https://keras.io/layers/core/dense)- Addition: [See reference](https://keras.io/layers/merge/add)def ResNet50(input_shape=(64, 64, 3), classes=6): """ Implementation of the popular ResNet50 the following architecture: CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3 -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER Arguments: input_shape -- shape of the images of the dataset classes -- integer, number of classes Returns: model -- a Model() instance in Keras """ # Define the input as a tensor with shape input_shape X_input = Input(input_shape) # Zero-Padding X = ZeroPadding2D((3, 3))(X_input) # Stage 1 X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name='bn_conv1')(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') # Stage 3 X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') # Stage 4 X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') # Stage 5 X = X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') # AVGPOOL X = AveragePooling2D(pool_size=(2, 2), padding='same')(X) # output layer X = Flatten()(X) X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer=glorot_uniform(seed=0))(X) # Create model model = Model(inputs=X_input, outputs=X, name='ResNet50') return modelRun the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.model = ResNet50(input_shape=(64, 64, 3), classes=6)As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False) model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])The model is now ready to be trained. The only thing you need is a dataset. Let's load the SIGNS Dataset. **Figure 6** : **SIGNS dataset**X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() # Normalize image vectors X_train = X_train_orig / 255. X_test = X_test_orig / 255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print("number of training examples = " + str(X_train.shape[0])) print("number of test examples = " + str(X_test.shape[0])) print("X_train shape: " + str(X_train.shape)) print("Y_train shape: " + str(Y_train.shape)) print("X_test shape: " + str(X_test.shape)) print("Y_test shape: " + str(Y_test.shape))Train modelRun the following cell to train your model on 2 epochs with a batch size of 32. On a CPU (2,9 GHz Intel Core i5) it should take you around 2min per epoch.model.fit(X_train, Y_train, epochs=10, batch_size=16)**Expected Output**: ** Epoch 1/2** loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours. ** Epoch 2/2** loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing. Let's see how this model (trained on only two epochs) performs on the test set.preds = model.evaluate(X_test, Y_test) print("Loss = " + str(preds[0])) print("Test Accuracy = " + str(preds[1]))**Expected Output**: **Test Accuracy** between 0.16 and 0.25 For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well. After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. Using a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model. Load pretrained model# model = load_model('ResNet50.h5') preds = model.evaluate(X_test, Y_test) print("Loss = " + str(preds[0])) print("Test Accuracy = " + str(preds[1]))ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.Congratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! 4 - Test on your own image If you wish, you can also take a picture of your own hand and see the output of the model. To do this: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right!img_path = '../data/conv_images/thumbs_up.jpg' #'images/my_image.jpg' img = image.load_img(img_path, target_size=(64, 64)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) my_image = imread(img_path) _ = imshow(my_image) print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ") print(model.predict(x))You can also print a summary of your model by running the following code.model.summary()Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".plot_model(model, to_file='model.png') SVG(model_to_dot(model).create(prog='dot', format='svg'))3. Sequence Pattern Minning# ! pip install gsppya)def _mystrip(x): return x[:-1].split(',') import pandas as pd df = pd.read_csv('Sequence.csv', sep='\n', header=None) contains = df[df[0].str.contains(",Bread,Sweet")] df[0]=df[0].apply(_mystrip) df.head() # trans = df.values.tolist() trans = list(df[0]) trans from csv import reader list_of_rows=[] with open('Sequence.csv', 'r') as read_obj: csv_reader = reader(read_obj) list_of_rows = list(csv_reader) display(list_of_rows)ابتدا فایل را به صورت یک دیتاست با یک ستون می خوانیم و به کاما ها به عنوان جداکننده هر سطر را به یک لیست تبدیل می کنیم تا به فرمت ورودی تابع در بیاید b)from gsppy.gsp import GSP _gsp = GSP(trans) result = _gsp.search(0.3) display(result)با 0.3 تابع را احرا می کنیم. همانطور که میبینیم اطلاعات چندانی به ما نمی دهد چون تعداد رکورد های فریکوئنت ست زیاد است و نسبت تکرار الگوها به آنها ممکن است عدد ناچیزی شود و از این آستانه ساپورت کمتر باشد. c)_gsp2 = GSP(trans) result2 = _gsp2.search(0.001) result2برای پیداکردن آن دنباله آستانه ساپورت را کمتر کردیم. با سرچ میبینیم که مقادیر زیر یافت می شوند: ('Panner', 'Bread', 'Sweet'): 10,('Cheese', 'Bread', 'Sweet'): 9 با کمتر کردن این آستانه به دنباله های بیشتری هم میرسیم اما زمان اجرا خیلی خیلی زیاد می شود.contains.head(60) print("Lassi :",contains[0].str.count("Lassi,Bread,Sweet,").sum()) print("Ghee :",contains[0].str.count("Ghee,Bread,Sweet,").sum()) print("Panner :",contains[0].str.count("Panner,Bread,Sweet,").sum()) print("Butter :",contains[0].str.count("Butter,Bread,Sweet,").sum()) print("Coffee Powder :",contains[0].str.count("Coffee Powder,Bread,Sweet,").sum()) print("Milk :",contains[0].str.count("Milk,Bread,Sweet,").sum()) print("Sugar :",contains[0].str.count("Sugar,Bread,Sweet,").sum()) print("Cheese :",contains[0].str.count("Cheese,Bread,Sweet,").sum()) print("Tea Powder :",contains[0].str.count("Tea Powder,Bread,Sweet,").sum()) print("Yougurt :",contains[0].str.count("Yougurt,Bread,Sweet,").sum())Lassi : 6 Ghee : 6 Panner : 10 Butter : 6 Coffee Powder : 7 Milk : 4 Sugar : 7 Cheese : 9 Tea Powder : 3 Yougurt : 3Exercise notebook :import warnings warnings.simplefilter('ignore', FutureWarning) import pandas as pd from datetime import datetimeExercise 1: Dataframes and CSV filesA CSV file is a plain text file that is used to hold tabular data. The acronym CSV is short for`‘comma-separated values’`.To read a CSV file into a dataframe you need to call the pandas function called read_csv(). The simplest usage of this function is with a single argument, a string that holds the name of the CSV file, for example.df = pd.read_csv('WHO POP TB all.csv')The above code creates a dataframe from the data in the file `WHO POP TB all.csv` andassigns it to the variable `df`. This is the simplest usage of the `read_csv()` function, justusing a single argument, a string that holds the name of the CSV file.However the function can take many additional arguments (some of which you’ll uselater), which determine how the file is to be read. Dataframe attributesA dataframe attribute is like a variable that can only be accessed in the context of a dataframe. One such attribute is columns which holds a dataframe's column names.So the expression `df.columns` evaluates to the value of the columns attribute insidethe dataframe `df`. The following code will get and display the names of the columns in thedataframe df:df.columnsGetting and displaying dataframe rowsDataframes can have hundreds or thousands of rows, so it is not practical to display awhole dataframe.However, there are a number of dataframe attributes and methods that allow you to getand display either a single row or a number of rows at a time. Three of the most usefulmethods are: **iloc(), head() and tail()**. Note that to distinguish methods andattributes, we write () after a method’s name. Dataframe rowsA dataframe has a default integer index for its rows, which starts at zero 0. The `iloc` attribute can be used to obtain the row at the given index.**The iloc attribute**You can get and display any single row in a dataframe by using the `iloc` attribute with the index of therow you want to access as its argument. For example, the following code will get anddisplay the first row of data in the dataframe df, which is at index 0:df.iloc[0] # first row, index 0Similarly, the following code will get and display the third row of data in the dataframe df,which is at index 2:df.iloc[2] # third row, index 2The head() methodThe `head()` method returns a dataframe with the first rows, as many as given in the argument. By default, if the argument is missing, it returns the first five rows.The first few rows of a dataframe can be printed out with the head() method.You can tell `head()` is a method, rather than an attribute such as columns, because ofthe parentheses (round brackets) after the property name.If you don’t give any argument, i.e. don’t put any number within those parentheses, thedefault behaviour is to return the `first five rows of the dataframe`. If you give an argument,it will print that number of rows (starting from the row indexed by 0).For example, executing the following code will get and display the first five rows in thedataframe df.df.head() # first five rowsAnd, executing the following code will get and display the first seven rows in thedataframe df.df.head(7) # first seven rowsThe tail() methodThe tail() method is similar to the head() method. If no argument is used, the last five rows of the dataframe are returned, otherwise the number of rows returned is dependent on the argument.df.tail() # last five rowsGetting and displaying dataframe columnsYou learned that you can get and display a single column of a dataframe byputting the name of the column (in quotes) within square brackets immediately after thedataframe’s name.For example, like this:df['TB deaths']Notice that although there is an index, there is no column heading. This is because what isreturned is not a new dataframe with a single column but an example of the Series datatype. Each column in a dataframe is an example of a seriesThe Series data type is a collection of values with an integer index that starts from zero.In addition, the Series data type has many of the same methods and attributes asthe DataFrame data type, so you can still execute code like:df['TB deaths'].head() df['TB deaths'].iloc[2]However, pandas does provide a mechanism for you to get and display one or moreselected columns as a new dataframe in its own right. To do this you need to use a list. A list in Python consists of one or more items separated by commas and enclosed withinsquare brackets, for example `['Country'] or ['Country', 'Population(1000s)']`. This list is then put within outer square brackets immediately after thedataframe’s name, like this:df[['Country']].head()Note that the column is now named. The expression `df[['Country']]`(with two squarebrackets) evaluates to a new dataframe (which happens to have a single column) ratherthan a series.To get a new dataframe with multiple columns you just need to put more column names inthe list, like this:df[['Country', 'Population (1000s)']].head()Tutorial: Quantifying allele-specific expression This is the second tutorial based on [De novo and inherited loss-of-functionvariants in *TLK2*: identification, clinical delineation andgenotype-phenotype evaluation of a distinct neurodevelopmental disorder](https://www.sciencedirect.com/science/article/pii/S0002929718301617)by , et al and in particular Figure 4 from that paper.The experimental data used here was generated by and .Here, we will discuss how amplimap was used to quantify allele-specific expressionof two loss-of-function mutations found in patients, and thus measure their effecton nonsense-mediated decay (NMD).We will be using a subset of the samples, looking at a single replicate from two different mutations, both of which generate a premature stop codon in *TLK2*:- p.Ser330\*, which is expected to result in a truncated product leading to NMD- p.Arg698\*, which is expected to escape NMD because it is located in the last exonFor both of these mutations, we will look at the balance between the mutation allele and the wild-type allele.If no NMD occured, both the wild-type and the mutation allele should be observed in 50% of the reads. Ifone of the alleles was targeted for NMD, we should be seeing an inbalance in the read counts. Analysis overviewStarting from the raw sequencing reads, we would like to:- Trim off primer sequences- Align reads to the reference genome, taking into account gaps introduced by spliced introns- Count how often each allele was observed at the mutation site Initial setupTo run this tutorial amplimap needs to be installed and configured already.Please see [Installation](https://amplimap.readthedocs.io/en/latest/installation.html)and [Configuration](https://amplimap.readthedocs.io/en/latest/configuration.html) for details.In particular, you need to have the hg19 (GRCh37) reference FASTA genome and theassociated indices prepared for use with STAR (see [Reference genome paths](https://amplimap.readthedocs.io/en/latest/configuration.htmlreference-genome-paths)). Preparing the working directoryFor every experiment that we want to process, we create a new workingdirectory. This will contain all the input files required, as well asthe output generated by amplimap. This makes it easy to keep track ofthe data for each experiment, as well as to rerun analyses if required.To create a directory, we use the standard ``mkdir`` unix command andchange into it with ``cd``: mkdir TLK2_NMD cd TLK2_NMDAll further commands should now be run inside this working directory. reads_inThe first input we need to provide to amplimap is of course thesequencing data. These can be obtained directly from the sequencer as``.fastq.gz`` files and should be placed in a directory called [``reads_in``](https://amplimap.readthedocs.io/en/latest/usage.htmlreads-in).[Download the sample data from this tutorial](http://userweb.molbiol.ox.ac.uk/public/koelling/amplimap/tutorial_data/TLK2_ASE.tar)and extract the ``reads_in`` directory into your working directory. There are many different ways of doing this butwe recommend using ``wget`` and ``tar`` on the command line: wget http://userweb.molbiol.ox.ac.uk/public/koelling/amplimap/tutorial_data/TLK2_ASE.tar tar xf TLK2_ASE.tarYou can use ``ls`` to check that the files have been extracted to the correct subdirectory: ls reads_inThis should display a list of four fastq.gz files, which represent read 1and read 2 of two samples: Sample1_Ser330_L001_R1_001.fastq.gz Sample1_Ser330_L001_R2_001.fastq.gz Sample2_Arg698_L001_R1_001.fastq.gz Sample2_Arg698_L001_R2_001.fastq.gz probes.csvNext, we need to provide a [probes.csv file](https://amplimap.readthedocs.io/en/latest/usage.htmlprobes-csv) that describes the usedprimer sequences and the regions they are supposed to capture. This canbe created with spreadsheet software such as Excel, as long as the file issaved as plain text. However, we recommend always checking the file manuallyusing a plain text editor such as ``nano`` or ``vim``, to make sure it is actually in the rightformat.Create a new plain text file called ``probes.csv`` (for example using ``nano``or ``vim``) in your working directory and copy the following text into it: id,first_primer_5to3,second_primer_5to3,chr,target_start,target_end,strand TLK2_cDNA_1,TGCAAGACCGCTTGAGACTG,CAGCTCTGCCTGGATCTCTG,chr17,60642418,60655843,+ TLK2_cDNA_2,GCATGCATGTAGGGAATACCG,ACTGTTATTGGACGCCCCAG,chr17,60673966,60689893,+ snps.txtIn this case we have two specific genomic positions that we want to look at andalso know the alleles that we expect to see. Thus, we can provide a[snps.txt](https://amplimap.readthedocs.io/en/latest/usage.htmlsnps-txt) fileand obtain allele counts specifically for these positions, rather thanscreening a whole genomic region. This both speeds up the processingand simplifies the downstream analysis.Create a new plain text file called ``snps.txt`` (for example using ``nano``or ``vim``) in your working directory and copy the following text into it: chr17 60650596 TLK2_Ser330 C A chr17 60689765 TLK2_Arg698 C T config.yamlFinally, we create a config.yaml file to set some experiment-specific settings.We could set [a lot more options](https://amplimap.readthedocs.io/en/latest/configuration.html)here but in this case set a few of them. All other options will be leftas specified in the default configuration.Create a new plain text file called ``config.yaml`` (for example using ``nano``or ``vim``) in your working directory and copy the following text into it: general: genome_name: "hg19" align: aligner: "star"This tells amplimap to use the reference genome ``hg19``, as specified in your[default configuration](https://amplimap.readthedocs.io/en/latest/configuration.htmldefault-configuration).If you do not have this reference genome set up there, you can also specify the necessary paths directlyin the ``config.yaml`` by adding the following additional lines and editing the paths to match your local setup: paths: hg19: star: "/INSERT/PATH/TO/GENOME" fasta: "/INSERT/PATH/TO/FASTA"For ``star`` you would provide the path to the Genome directory generated by ``STAR --runMode genomeGenerate``.For ``fasta`` you would provide the path to the corresponding FASTA file, which needs to have been indexed with ``samtools faidx``.Note that we also specify the **STAR aligner** instead of the normal option of BWA/bowtie2. This is becausewe are dealing with spliced cDNA here, which means that ourreads will only contain the exonic sequence. We need to use an aligner that work with spliced dataand create alignments with long gaps to account for introns.For a real-world analysis, we might also want to use a custom reference genome in which we have masked the target SNPs to avoid reference bias. However, for the purposes of this example, we will stick with the standard reference. Running amplimapNow we can run amplimap. In our case, we want to obtain coverage values(“coverages”) and annotated variant calls (“variants”). This will alsoautomatically run the other parts of the pipeline that are required,such as trimming the primers and aligning reads to the genome.First we will do a dry-run to confirm that all input files can be found: amplimap pileups This should output a long list of commands, ending with these lines: Job counts: count jobs 2 align_pe 1 copy_probes 1 copy_snps 2 do_pileup_snps 4 link_reads 2 parse_reads_pe 1 pileup_snps_agg 1 pileups 1 start_analysis 2 stats_alignment 1 stats_alignment_agg 1 stats_reads_agg 1 stats_samples_agg 2 tool_version 22 amplimap dry run successful. Set --run to run!You can see how amplimap is planning to run 2 alignment jobs (align_pe) and 2 SNP pileup jobs (do_pileup_snps),corresponding to the 2 samples we are analysing.Having confirmed that everything looks as expected, we can run amplimap: amplimap pileups --runThis will take a few minutes to complete. It would be much faster if weran jobs in parallel (for example using a cluster), but we are notdoing that for the purposes of this tutorial. Analysing the resultsamplimap has now processed our reads, aligned them to the reference genome, called germline variants, annotated themand produced a summary table with the variant calls.All of the output files have been placed into the ``analysis`` directory.Let's explore some of the output. Most analyses in amplimap produce one or more CSV file with a table of results. In this tutorial, we will use Python and pandas to process and visualize these files. However, the same thing could also be done in R or Excel. analysis/reads_parsed/This directory contains results from the first step of the pipeline whichidentified primer arms in reads, trimmed them off and calculated somerun statistics.For example, the ``stats_samples.csv`` file tells us about the number of reads in each sample and how many of these contained the expected primer sequences:import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import scipy.stats %matplotlib inline pd.read_csv('analysis/reads_parsed/stats_samples.csv')And for a more detailed look at the number of reads observed per probe in each sample, there is ``stats_reads.csv``:pd.read_csv('analysis/reads_parsed/stats_reads.csv')We can see that Sample 1 had a lot more reads than Sample 2, but both of them have good coverage. As expected, Sample 1 only contained reads for the first probe targeting Ser330, while Sample 2 only contained reads for the second probe targeting Arg698. analysis/pileup_snps/Based on the details that we provided in ``snps.txt``, amplimap has generated pileup tables that contains the read counts for each of the alleles at each of the SNPs.Let's have a look at the detailed summary table, which tells us the count for each possible nucleotide at each position and each sample:d = pd.read_csv('analysis/pileups_snps/target_snps_pileups_long_detailed.csv') d = d.loc[d.number_called_hq > 0].sort_values(['pos', 'sample']) d[ ['sample', 'chr', 'pos', 'snp_ref', 'snp_alt', 'number_called_hq', 'snp_alt_hq_count_fraction', 'count_hq_A', 'count_hq_C', 'count_hq_G', 'count_hq_T'] ]To test whether the alt allele fraction differs significantly from the null expectation of 50% (ie. the fraction we would expect if there was no NMD), we can use a binomial test:d['count_hq_alt'] = [getattr(row, 'count_hq_%s' % row.snp_alt) for row in d.itertuples()] d['p_binom'] = [scipy.stats.binom_test(x = row.count_hq_alt, n = row.number_called_hq, p = 0.5) for row in d.itertuples()] d[ ['sample', 'chr', 'pos', 'snp_ref', 'snp_alt', 'number_called_hq', 'count_hq_alt', 'snp_alt_hq_count_fraction', 'p_binom'] ]For Sample 1, we observed 5238 cDNA reads covering the Ser330\* mutation site. Of these, 1113 (21%) carried the nucleotide creating the mutation, while 3999 carried the reference nucleotide. This deviation from a balanced 50%/50% allele frequency ratio is highly significant $(p < 10^{-323})$ and suggests that mRNA carrying the mutation is degraded through nonsense-mediated decay, as expected.On the other hand, we did not see a similar effect for the second mutation, which we expected to escape NMD. Here, the allele counts are roughly equal, with 51% of reads carrying the mutation $(p = 0.75)$. We can also visualise this with a bar plot:sns.barplot( data=d[ ['sample', 'snp_ref_hq_count_fraction', 'snp_alt_hq_count_fraction'] ] \ .rename(columns = { 'snp_ref_hq_count_fraction': 'ref', 'snp_alt_hq_count_fraction': 'alt' }) \ .melt(id_vars = ['sample'], var_name = 'Allele', value_name = 'fraction'), x='fraction', y='sample', hue='Allele', orient='h', ) plt.title("Fraction of cDNA reads with given allele") plt.xlabel("Fraction of reads") plt.ylabel("")Appendix#high-res version of plot plt.figure(figsize=(5, 3), dpi=400) sns.barplot( data=d[ ['sample', 'snp_ref_hq_count_fraction', 'snp_alt_hq_count_fraction'] ] \ .rename(columns = { 'snp_ref_hq_count_fraction': 'ref', 'snp_alt_hq_count_fraction': 'alt' }) \ .melt(id_vars = ['sample'], var_name = 'Allele', value_name = 'fraction'), x='fraction', y='sample', hue='Allele', orient='h', ) plt.title("Fraction of cDNA reads with given allele") plt.xlabel("Fraction of reads") plt.ylabel("") sns.despine(bottom=True) !amplimap --version !grep '_amplimap' analysis/versions.yamlamplimap 0.4.5 _amplimap: 0.4.5Sparkify Project WorkspaceThis workspace contains a tiny subset (128MB) of the full dataset available (12GB). Feel free to use this workspace to build your project, or to explore a smaller subset with Spark before deploying your cluster on the cloud. Instructions for setting up your Spark cluster is included in the last lesson of the Extracurricular Spark Course content.You can follow the steps below to guide your data analysis and model building portion of this project.# import libraries import pyspark from pyspark import SparkConf from pyspark.sql import SparkSession from pyspark.sql import Window from pyspark.sql.types import StringType, DoubleType, IntegerType from pyspark.sql.functions import desc from pyspark.sql.functions import asc from pyspark.sql.functions import udf from pyspark.sql.functions import sum as Fsum from pyspark.sql.functions import max as Fmax from pyspark.sql.functions import isnan, count, when, col, desc, udf, col, sort_array, asc, avg import datetime import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt from pyspark.ml.feature import VectorAssembler, Normalizer, StandardScaler, StringIndexer, PCA from pyspark.ml.classification import LogisticRegression from pyspark.ml import Pipeline from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.ml.evaluation import MulticlassClassificationEvaluator # create a Spark session spark_session = SparkSession.builder.appName('Sparkify').getOrCreate() #Let's check if the change went through spark_session.sparkContext.getConf().getAll() spark_sessionLoad and Clean DatasetWe begin with provided mini-dataset file called `mini_sparkify_event_data.json`. We load and clean the dataset, checking for invalid or missing data - for example, records without userids or sessionids.# read file path = "mini_sparkify_event_data.json" user_log = spark_session.read.json(path) user_log.take(5) user_log.printSchema() user_log.describe() user_log.show(n=1) user_log.count() # Drop Rows with Missing Values user_log_valid = user_log.dropna(how = "any", subset = ["userId", "sessionId"]) user_log_valid.count()As we can see all the log lines have userId and sessionId. 2. Exploratory Data AnalysisWhen working with the full dataset, it is interesting to perform EDA by loading a small subset of the data and doing basic manipulations within Spark. In this workspace, we are provided with a small subset of data to explore. Define ChurnOnce we've done some preliminary analysis, we will create a column `Churn` to use as the label for our model. The values `Cancellation Confirmation`events are to define the churn, which happen for both paid and free users. We will also look into the `Downgrade` events. Explore DataOnce we've defined churn, we perform some exploratory data analysis to observe the behavior for users who stayed vs users who churned. We start by exploring aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played. In order to be able to create the Churn and Downgrade columns we need to check the existing page column's values# Lets check all the pages visited by the users pages_df = user_log.select("page").dropDuplicates().sort("page") pages_df.show() def flag_row(df, column_name, column_value, new_column_name): ''' Creates a new flag column called new_column_name, based on the value passed with the column_value. If the value of the column column_name equals to column_value the corresponding value of new_column_name wille be 1. othervise 0. :param: df : dataframe :param: column_name : The name of the column to explore. :param: column_value : The searched value :param: new_column_name : The name of the new flag column :type column_name: The name of the column to explore :type column_value: The type defined for the column column_name :type new_column_name: String, the name of the new flag column :return: the modified dataframe :rtype: dataframe ''' flag_downgrade_event = udf(lambda x: 1 if x == column_value else 0, IntegerType()) df = df.withColumn(new_column_name, flag_downgrade_event(column_name)) return df2.1. Users perform Churnuser_log.filter("page = 'Cancellation Confirmation'").show(3) user_log = flag_row(user_log, "page", 'Cancellation Confirmation', "Churn") user_log.show(3)+----------------+---------+---------+------+-------------+--------+---------+-----+--------------------+------+--------+-------------+---------+-----------------+------+-------------+--------------------+------+-----+ | artist| auth|firstName|gender|itemInSession|lastName| length|level| location|method| page| registration|sessionId| song|status| ts| userAgent|userId|Churn| +----------------+---------+---------+------+-------------+--------+---------+-----+--------------------+------+--------+-------------+---------+-----------------+------+-------------+--------------------+------+-----+ | |Logged In| Colin| M| 50| Freeman|277.89016| paid| Bakersfield, CA| PUT|NextSong|1538173362000| 29| Rockpools| 200|1538352117000|Mozilla/5.0 (Wind...| 30| 0| |Five Iron Frenzy|Logged In| Micah| M| 79| Long|236.09424| free|Boston-Cambridge-...| PUT|NextSong|153[...]2.2 Users Downgrade Their AccountsLet's find the users who downgraded their accounts and flag those log entries. A window function and cumulative sum is used to distinguish each user's data as either pre or post downgrade events.user_log.filter("page = 'Submit Downgrade'").show(3) user_log = flag_row(user_log, "page", 'Submit Downgrade', "Downgraded") user_log.show(3)+----------------+---------+---------+------+-------------+--------+---------+-----+--------------------+------+--------+-------------+---------+-----------------+------+-------------+--------------------+------+-----+----------+ | artist| auth|firstName|gender|itemInSession|lastName| length|level| location|method| page| registration|sessionId| song|status| ts| userAgent|userId|Churn|Downgraded| +----------------+---------+---------+------+-------------+--------+---------+-----+--------------------+------+--------+-------------+---------+-----------------+------+-------------+--------------------+------+-----+----------+ | |Logged In| Colin| M| 50| Freeman|277.89016| paid| Bakersfield, CA| PUT|NextSong|1538173362000| 29| Rockpools| 200|1538352117000|Mozilla/5.0 (Wind...| 30| 0| 0| |Five Iron Frenzy|Logged In| Micah| M| 79| Long|236.09424| f[...]As we added new columns, we can go on with our analysis. Let's begin with userId.# Lets check all the user ids in our dataset user_ids_df = user_log.select("userId").dropDuplicates().sort("userId") user_ids_df.show() user_ids_df.count() user_log.select(["userId", "sessionId", "page", "song"]).where(user_log.userId == "10").collect() # For the rest of project we will work with not empty userIds users' logs. user_log = user_log.filter(user_log["userId"] != "") user_log.count() #Check the result df = user_log.select("userId").groupby("userId").count().orderBy(desc("count")) df.show()+------+-----+ |userId|count| +------+-----+ | 39| 9632| | 92| 7230| | 140| 6880| |300011| 5732| | 124| 4825| |300021| 4659| |300017| 4428| | 85| 4370| | 42| 4257| |200023| 3769| | 6| 3761| | 29| 3603| | 54| 3437| | 100| 3214| | 9| 3191| | 126| 3102| |300015| 3051| | 91| 3014| | 98| 2891| | 74| 2887| +------+-----+ only showing top 20 rowsOnce filtered all the unregistrated users' logs we can go on our analysis.#Column analyse: artist df = user_log.select("artist").groupby("artist").count().orderBy(desc("count")) df.show() df.count() # Get missing value rate for artist column 50046/278154 #Let's replace null with values 'unknown_artist' user_log = user_log.na.fill('unknown_artist', "artist") # How many songs were played from the most played artist? most_played_artist_count = user_log.select("artist").groupby("artist").count().orderBy(desc("count")) most_played_artist_count.show() #Column analyse : auth df = user_log.select("auth").groupby("auth").count().orderBy(desc("count")) df.show() df.count() #Column analyse : firstName df = user_log.select("firstName").groupby("firstName").count().orderBy(desc("count")) df.show() df.count() #Column analyse :lastName df = user_log.select("lastName").groupby("lastName").count().orderBy(desc("count")) df.show() df.count()+----------+-----+ | lastName|count| +----------+-----+ | Campbell|14060| | Reed| 9284| | Williams| 8410| | Taylor| 7230| | Johnson| 6106| | Larson| 6105| | House| 5732| | Thompson| 5217| | Jones| 4831| | Beck| 4825| | Allen| 4659| | Stewart| 4461| | Miller| 4428| | Thomas| 4370| |Richardson| 4257| | Phillips| 4002| | Robinson| 3890| | Daniels| 3769| | Roberson| 3761| | Long| 3483| +----------+-----+ only showing top 20 rowsAs we have unique user identifier userId we can skip the use of the columns firstName and lastName.#Column analyse : gender df = user_log.select("gender").groupby("gender").count().orderBy(desc("count")) df.show() df.count() user_log = user_log.withColumn("gender", \ when(user_log["gender"] == 'F', 'Female').otherwise(user_log["gender"])) user_log = user_log.withColumn("gender", \ when(user_log["gender"] == 'M', 'Male').otherwise(user_log["gender"])) user_log.select("gender").groupby("gender").count().orderBy(desc("count")).show() # Column analyse : itemInSession df = user_log.select("itemInSession").groupby("itemInSession").count().orderBy(desc("count")) df.show() df.count() # Column analyse : length user_log.select("length").groupby("length").count().orderBy(desc("count")).show() # It seems to be the length of listedned part of the song so it will be interesting to keep this information. # Lets replace null values by value 0. user_log = user_log.na.fill(0.0, "length") user_log.select("length").groupby("length").count().orderBy(desc("count")).show() # Column analyse : level user_log.select("level").groupby("level").count().orderBy(desc("count")).show() #Column analyze : location df = user_log.select("location").groupby("location").count().orderBy(desc("count")) df.show() df.count() #Column analyze : method df = user_log.select("method").groupby("method").count().orderBy(desc("count")) df.show() df.count()+------+------+ |method| count| +------+------+ | PUT|257818| | GET| 20336| +------+------+It seems to be technical column so it may give us some insights about user's interaction with the platform. Let's keep this column for further analysis.#Column analyse : sessionId user_log.describe("sessionId").show() df = user_log.select("sessionId").dropDuplicates().sort("sessionId") df.show() df.count() # Column analyse : page df = user_log.select("page").dropDuplicates().sort("page") df.show() df.count() #Column analyze : registration df = user_log.select("registration").groupby("registration").count().orderBy(desc("count")) df.show() df.count() #Check the year of registration get_year = udf(lambda x: datetime.datetime.fromtimestamp(x / 1000.0). year) df = user_log.withColumn("registration_year", get_year(user_log.registration)) df.head() df = df.select("registration_year").groupby("registration_year").count().orderBy(desc("count")) df.show()+-----------------+------+ |registration_year| count| +-----------------+------+ | 2018|278154| +-----------------+------+As there is only one year let's check month. But with a bigger dataset we may have several valuesfor year, so let's keep it.user_log = user_log.withColumn("registration_year", get_year(user_log.registration)) #Check the month of registration get_month = udf(lambda x: datetime.datetime.fromtimestamp(x / 1000.0). month) df = user_log.withColumn("registration_month", get_month(user_log.registration)) df.head() df = df.select("registration_month").groupby("registration_month").count().orderBy(desc("count")) df.show() df.count()+------------------+------+ |registration_month| count| +------------------+------+ | 9|165635| | 8| 48386| | 7| 33519| | 6| 20523| | 3| 3761| | 5| 2391| | 11| 2257| | 10| 1682| +------------------+------+Lets add this column to our dataset.user_log = user_log.withColumn("registration_month", get_month(user_log.registration)) #Column analyze : song df = user_log.select("song").groupby("song").count().orderBy(desc("count")) df.show() df.count()+--------------------+-----+ | song|count| +--------------------+-----+ | null|50046| | You're The One| 1153| | Undo| 1026| | Revelry| 854| | Sehr kosmisch| 728| |Horn Concerto No....| 641| |Dog Days Are Over...| 574| | Secrets| 466| | Use Somebody| 459| | Canada| 435| | Invalid| 424| | Ain't Misbehavin| 409| | Représente| 393| |Sincerité Et J...| 384| |Catch You Baby (S...| 373| | Yellow| 343| | Somebody To Love| 343| | Hey_ Soul Sister| 334| | The Gift| 327| | Fireflies| 312| +--------------------+-----+ only showing top 20 rowsIn the column song, the are too many missing values as well as to many different values. So for the moment we wont use this column.#Column analyze : status df = user_log.select("status").groupby("status").count().orderBy(desc("count")) df.show() #df.count()+------+------+ |status| count| +------+------+ | 200|254718| | 307| 23184| | 404| 252| +------+------+The column status seems to be a technical column. If we check the list of HTTP status codes, we see that :* 2xx Success, 200 OK * 3xx Redirection, 307 Temporary Redirect (since HTTP/1.1)* 4xx Client errors, 404 Not FoundThe users experiencing technical errors are more susceptive not to be happy with the platform and to churn. So let's keep this column as categorical column.#Column analyze : userAgent df = user_log.select("userAgent").groupby("userAgent").count().orderBy(desc("count")) #Column analyze : userAgent df.show() df.count()+--------------------+-----+ | userAgent|count| +--------------------+-----+ |"Mozilla/5.0 (Win...|22751| |"Mozilla/5.0 (Mac...|19611| |"Mozilla/5.0 (Mac...|18448| |"Mozilla/5.0 (Mac...|17348| |Mozilla/5.0 (Wind...|16700| |"Mozilla/5.0 (Win...|15395| |"Mozilla/5.0 (Win...|14598| |Mozilla/5.0 (Maci...|10300| |"Mozilla/5.0 (iPa...| 8912| |Mozilla/5.0 (comp...| 8624| |"Mozilla/5.0 (Mac...| 8094| |"Mozilla/5.0 (Win...| 7923| |"Mozilla/5.0 (Mac...| 7906| |"Mozilla/5.0 (Win...| 7624| |"Mozilla/5.0 (iPh...| 6417| |Mozilla/5.0 (Wind...| 5989| |"Mozilla/5.0 (Mac...| 5716| |"Mozilla/5.0 (Win...| 5238| |"Mozilla/5.0 (Win...| 4917| |Mozilla/5.0 (Wind...| 4663| +--------------------+-----+ only showing top 20 rowsLet's do some more analysis to understand better our data set.# Distribution of genders in the data set.. nb_female = user_log.select(["userId"]).where("gender = 'Female'").dropDuplicates().count() nb_male = user_log.select(["userId"]).where("gender = 'Male'").dropDuplicates().count() nb_unkown_gender = user_log.select(["userId"]).where("gender = 'unknown_gender'").dropDuplicates().count() print('There are {} female, {} male and {} unknow_gender users and in our dataset.'.format(nb_female, nb_male, nb_unkown_gender)) # How many songs were played from the most played artist? most_played_artist_count = user_log.select("artist").groupby("artist").count().orderBy(desc("count")).show()+--------------------+-----+ | artist|count| +--------------------+-----+ | unknown_artist|50046| | Kings Of Leon| 1841| | Coldplay| 1813| |Florence + The Ma...| 1236| | | 1135| | Björk| 1133| | The Black Keys| 1125| | Muse| 1090| | | 1044| | | 1007| | Eminem| 953| | Radiohead| 884| | Alliance Ethnik| 876| | Train| 854| | | 840| | OneRepublic| 828| | The Killers| 822| | | 787| | Evanescence| 781| | Harmonia| 729| +--------------------+-----+ only showing top 20 rowsCalculating Statistics by Hourget_hour = udf(lambda x: datetime.datetime.fromtimestamp(x / 1000.0). hour) user_log = user_log.withColumn("hour", get_hour(user_log.ts)) user_log.head() songs_in_hour = user_log.filter(user_log.page == "NextSong").groupby(user_log.hour).count().orderBy(user_log.hour.cast("float")) songs_in_hour.show() songs_in_hour_pd = songs_in_hour.toPandas() songs_in_hour_pd.hour = pd.to_numeric(songs_in_hour_pd.hour) plt.scatter(songs_in_hour_pd["hour"], songs_in_hour_pd["count"]) plt.xlim(-1, 24); plt.ylim(0, 1.2 * max(songs_in_hour_pd["count"])) plt.xlabel("Hour") plt.ylabel("Songs played"); windowval = Window.partitionBy("userId").orderBy(desc("ts")).rangeBetween(Window.unboundedPreceding, 0) user_log = user_log.withColumn("phase", Fsum("downgraded").over(windowval)) #user_log.select(["userId", "firstname", "page", "level", "song", "phase"]).where(user_log.userId == "141").collect() def save_as_csv(data, path): data.write.save(path, format="csv", header=True) def load_from_csv(spark, path): data = spark.read.csv(path, header=True) return data path = "data/sparkify_log_small_6.csv" save_as_csv(user_log, path) path = "data/sparkify_log_small_6.csv" user_log = load_from_csv(spark_session, path) user_log.printSchema()root |-- artist: string (nullable = true) |-- auth: string (nullable = true) |-- firstName: string (nullable = true) |-- gender: string (nullable = true) |-- itemInSession: string (nullable = true) |-- lastName: string (nullable = true) |-- length: string (nullable = true) |-- level: string (nullable = true) |-- location: string (nullable = true) |-- method: string (nullable = true) |-- page: string (nullable = true) |-- registration: string (nullable = true) |-- sessionId: string (nullable = true) |-- song: string (nullable = true) |-- status: string (nullable = true) |-- ts: string (nullable = true) |-- userAgent: string (nullable = true) |-- userId: string (nullable = true) |-- Churn: string (nullable = true) |-- Downgraded: string (nullable = true) |-- registration_year: string (nullable = true) |-- registration_month: string (nullable = true) |-- hour: string (nullable = true) |-- phase: string (nullable = true)3. Feature EngineeringOnce we have familiarized ourself with the data, we build out the features to train your model on. To work with the full dataset, we can follow the following steps.- Write a script to extract the necessary features from the smaller subset of data- Ensure that our script is scalable, using the best practices discussed in Lesson 3- Try our script on the full data set, debugging your script if necessaryLet's begin by defining numerical and categorical columns.numerical_columns = [ 'itemInSession', 'length'] categorical_columns =[ "auth", "artist", "gender", "level", "page", "status", "userId", "method", "Churn", "Downgraded", "phase", "registration_year", "registration_month", "hour", ] all_columns = categorical_columns.copy() for col in numerical_columns: all_columns.append(col) all_columns registreted_user_log = user_log.select(all_columns) # Change the type of numerical features tu integer for col in numerical_columns: registreted_user_log = registreted_user_log.withColumn(col,registreted_user_log[col].cast(IntegerType())) assembler = VectorAssembler(inputCols=numerical_columns, outputCol="num_features") registreted_user_log = assembler.transform(registreted_user_log) standard_scaler = StandardScaler(inputCol="num_features", outputCol="SScaler_num_features", withMean=True, withStd=True) standard_scaler_model = standard_scaler.fit(registreted_user_log) registreted_user_log = standard_scaler_model.transform(registreted_user_log) for col in categorical_columns: indexer = StringIndexer(inputCol=col, outputCol="Idx_"+col) registreted_user_log = indexer.fit(registreted_user_log).transform(registreted_user_log) registreted_user_log.printSchema() new_categorical_columns = [] for col in categorical_columns: new_categorical_columns.append("Idx_" + col) new_categorical_columns new_numerical_columns = "SScaler_num_features" new_all_columns = new_categorical_columns.copy() new_all_columns.append(new_numerical_columns) new_all_columns #The list of column to keep processed_user_log = registreted_user_log.select(new_all_columns) processed_user_log.columns4. ModelingSplit the full dataset into train, test, and validation sets. Test out several of the machine learning methods you learned. Evaluate the accuracy of the various models, tuning parameters as necessary. Determine your winning model based on test accuracy and report results on the validation set. Since the churned users are a fairly small subset, I suggest using F1 score as the metric to optimize. 4.1. Train Test Splitrest, validation = processed_user_log.randomSplit([0.7, 0.3], seed = 42) print("Training Dataset Count: " + str(rest.count())) print("Test Dataset Count: " + str(validation.count()))Training Dataset Count: 194863 Test Dataset Count: 832914.2. Build Pipelinefeatures = [] for col in processed_user_log.columns: if col not in ["Idx_Churn", 'Idx_Downgraded'] : features.append(col) features assembler = VectorAssembler(inputCols = features, outputCol='features') pca = PCA(k=10, inputCol='features', outputCol='pcaFeature') churn_indexer = StringIndexer(inputCol="Idx_Churn", outputCol="label") lr = LogisticRegression(maxIter=10, regParam=0.3) churn_pipeline = Pipeline (stages=[assembler, pca, churn_indexer, lr]) assembler = VectorAssembler(inputCols = features, outputCol='features') pca = PCA(k=10, inputCol='features', outputCol='pcaFeature') indexer = StringIndexer(inputCol="Idx_Downgraded", outputCol="label") lr = LogisticRegression(maxIter=10, regParam=0.3) downgraded_pipeline = Pipeline (stages=[assembler, pca, indexer, lr])4.3. Tune ModelparamGrid = ParamGridBuilder() \ .addGrid(lr.maxIter,[100, 50]) \ .addGrid(lr.regParam,[0.0, 0.1]) \ .build() churn_crossval = CrossValidator(estimator=churn_pipeline, estimatorParamMaps=paramGrid, evaluator=MulticlassClassificationEvaluator(), numFolds=3) downgraded_crossval = CrossValidator(estimator=downgraded_pipeline, estimatorParamMaps=paramGrid, evaluator=MulticlassClassificationEvaluator(), numFolds=3)After training the model when we check the average cross-validation metrics by avgMetrics.churn_model = churn_crossval.fit(rest) print(churn_model.avgMetrics) churn_results = churn_model.transform(validation) downgraded_model = downgraded_crossval.fit(rest) print(downgraded_model.avgMetrics) downgraded_results = churn_model.transform(validation) churn_counts_Pred_OK = churn_results.filter(churn_results.label == churn_results.prediction).count() churn_counts_Total = churn_results.count() print(churn_counts_Pred_OK) print(churn_counts_Total) downgraded_counts_Pred_OK = downgraded_results.filter(downgraded_results.label == downgraded_results.prediction).count() downgraded_counts_Total = downgraded_results.count() print(downgraded_counts_Pred_OK) print(downgraded_counts_Total)83277 83291Scikit-learnIn this tutorial, we will build a model with the Python [`scikit-learn`](https://scikit-learn.org/stable/) module. Additionally, you will learn how to create a data preprocessing pipline. Data preparation# See section "Data" for details about data preprocessing from case_duke_data_prep import * dfData preprocessing pipeline# Modules from sklearn.compose import ColumnTransformer from sklearn.compose import make_column_selector as selector from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn import set_config from sklearn.preprocessing import StandardScaler, OneHotEncoder # for numeric features # we use Pipeline(), SimpleImputer with median numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler()) ]) ## Imputer ersetzt leere Zellen --> hier mit dem Median ## scaler bringt alle Werte in einen einheitlichen Wertebereich # for categorical features # use OneHotEncoder categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) ##onehot wandelt variable in binäres format --> scikit learn erwartet categoriale Werte binär # Pipeline # ColumnTransformer preprocessor = ColumnTransformer(transformers=[ ('num', numeric_transformer, selector(dtype_exclude="category")), ('cat', categorical_transformer, selector(dtype_include="category")) ]) ## ColumnTransformer wird dazu verwendet Spalten zu transformierenSimple regression# Select features for simple regression features = ['area'] X = df[features] # Create response y = df["price"] # check feature X.info() # check label y # check for missing values print("Missing values X:",X.isnull().any(axis=1).sum()) print("Missing values Y:",y.isnull().sum())Missing values X: 0 Missing values Y: 0Data splittingfrom sklearn.model_selection import train_test_split # Train Test Split # Use random_state to make this notebook's output identical at every run X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_trainModelingfrom sklearn.linear_model import LinearRegression # Create pipeline with model lm_pipe = Pipeline(steps=[ ('preprocessor', preprocessor), ('lm', LinearRegression()) ]) # show pipeline set_config(display="diagram") # Fit model lm_pipe.fit(X_train, y_train) # Obtain model coefficients lm_pipe.named_steps['lm'].coef_Evaluation with training dataThere are various options to evaluate a model in scikit-learn. Review this overview about [metrics and scoring: quantifying the quality of predictions](https://scikit-learn.org/stable/modules/model_evaluation.html).X_train.head() y_pred = lm_pipe.predict(X_train) from sklearn.metrics import r2_score r2_score(y_train, y_pred) ## zeigt das es kein sehr gutes modell ist, da es nur zu ca 35 % Werte richtig vorhersagen kann from sklearn.metrics import mean_squared_error mean_squared_error(y_train, y_pred) # RMSE mean_squared_error(y_train, y_pred, squared=False) from sklearn.metrics import mean_absolute_error mean_absolute_error(y_train, y_pred) ## durchschnittlicher absoluter Fehler der mit dem Modell gemacht wird %matplotlib inline import seaborn as sns sns.set_theme(style="ticks") # Plot with Seaborn # We first need to create a DataFrame df_train = pd.DataFrame({'x': X_train['area'], 'y':y_train}) sns.lmplot(x='x', y='y', data=df_train, line_kws={'color': 'darkred'}, ci=False); ## wir erkennen das es ein paar streuwerte gibt die die daten evtl. verzerren --> regression diagnostics anwenden import plotly.io as pio import plotly.offline as py import plotly.express as px # Plot with Plotly Express fig = px.scatter(x=X_train['area'], y=y_train, opacity=0.65, trendline='ols', trendline_color_override='darkred'); fig.show() sns.residplot(x=y_pred, y=y_train, scatter_kws={"s": 80});Let's take a look at the wrongest predictions:# create dataframe df_error = pd.DataFrame( { "y": y_train, "y_pred": y_pred, "error": y_pred - y_train }) # sort by error, select top 10 and get index error_index = df_error.sort_values(by=['error']).nlargest(10, 'error').index # show corresponding data observations df.iloc[error_index] ## das sind die Werte mit den größten FehlernEvaluation with test datay_pred = lm_pipe.predict(X_test) print('MSE:', mean_squared_error(y_test, y_pred)) print('RMSE:', mean_squared_error(y_test, y_pred, squared=False)) # Plot with Plotly Express fig = px.scatter(x=X_test['area'], y=y_test, opacity=0.65, trendline='ols', trendline_color_override='darkred') fig.show()Model generalization on unseen data (see [plotly documentation](https://plotly.com/python/ml-regression/))import numpy as np import plotly.graph_objects as go x_range = pd.DataFrame({ 'area': np.linspace(X_train['area'].min(), X_train['area'].max(), 100)}) y_range = lm_pipe.predict(x_range) go.Figure([ go.Scatter(x=X_train.squeeze(), y=y_train, name='train', mode='markers'), go.Scatter(x=X_test.squeeze(), y=y_test, name='test', mode='markers'), go.Scatter(x=x_range.area, y=y_range, name='prediction') ])Multiple regression# Select features for multiple regression features= [ 'bed', 'bath', 'area', 'year_built', 'cooling', 'lot' ] X = df[features] X.info() print("Missing values:",X.isnull().any(axis = 1).sum()) # Create response y = df["price"] # Data splitting X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Create pipeline with model lm_pipe = Pipeline(steps=[ ('preprocessor', preprocessor), ('lm', LinearRegression()) ]) # show pipeline set_config(display="diagram") # Fit model lm_pipe.fit(X_train, y_train) # Obtain model coefficients lm_pipe.named_steps['lm'].coef_Evaluation with test data:y_pred = lm_pipe.predict(X_test) r2_score(y_test, y_pred)**Let's start with importing the libraries as follows:**!pip install unidecode import unidecode import string import random import math import torch import torch.nn as nn from torch.autograd import Variable from keras.datasets import reutersRequirement already satisfied: unidecode in /usr/local/lib/python3.6/dist-packages (1.0.23)**As input and output, we can use any character:**all_characters = string.printable input_size = len(all_characters) output_size = input_size print(input_size)100**We need to define the hyperparameters before moving on:**n_steps = 2000 batch_size = 512 hidden_size = 100 n_layers = 2 learning_rate = 0.01 len_text = 200 print_every = 50**We will be using the Reuters datasets from Keras**data = reuters.load_data() len_data = len(data)**Let's define a function that transforms characters to tensors:**def char_to_tensor(string): tensor = torch.zeros(len(string)).long() for c in range(len(string)): try: tensor[c] = all_characters.index(string[c]) except: continue return tensor**Next, we define a batch generator:**def batch_gen(length_text, batch_size): X = torch.LongTensor(batch_size, length_text) y = torch.LongTensor(batch_size, length_text) for i in range(batch_size): start_index = random.randint(0, len_data - length_text) end_index = start_index + length_text + 1 text = data[start_index : end_index] X[i] = char_to_tensor(text[:-1]) y[i] = char_to_tensor(text[1:]) X = Variable(X) y = Variable(y) X = X.cuda() y = y.cuda() return X, y**We are now ready to define our network architecture:**class create_model(nn.Module): def __init__(self, input_size, hidden_size, output_size, n_layers = 1): super(create_model, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.encoder = nn.Embedding(input_size, hidden_size) self.rnn = nn.GRU(hidden_size, hidden_size, n_layers) self.decoder = nn.Linear(hidden_size, output_size) def forward(self, input, hidden): batch_size = input.size(0) encoded = self.encoder(input) output, hidden = self.rnn(encoded.view(1, batch_size, -1), hidden) output = self.decoder(output.view(batch_size, -1)) return output, hidden def init_hidden(self, batch_size): return Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size))**We continue by creating our model and defining the optimizer and loss function as follows:**decoder_model = create_model( input_size, hidden_size, output_size, n_layers = n_layers, ) opt = torch.optim.Adam(decoder_model.parameters(), lr = learning_rate) loss = nn.CrossEntropyLoss() decoder_model.cuda()**We also create a function that we can use to generate text during training:**def generate_text(decoder, start = 'The', predict_len = 100): hidden = decoder.init_hidden(1).cuda() prime_input = Variable(char_to_tensor(start).unsqueeze(0)).cuda() predicted = start for p in range(len(start) - 1): _, hidden = decoder(prime_input[:, p], hidden) x = prime_input[:, -1] for p in range(predict_len): output, hidden = decoder(x, hidden) output_dist = output.data.view(-1).div(0.8).exp() # Add some randomness top_i = torch.multinomial(output_dist, 1)[0] predicted_char = all_characters[top_i] predicted += predicted_char x = Variable(char_to_tensor(predicted_char).unsqueeze(0)).cuda() return predicted**Finally, let's start training:**loss_avg = 0 for i in range(n_steps): X, y = batch_gen(len_text, batch_size) hidden = decoder_model.init_hidden(batch_size).cuda() decoder_model.zero_grad() loss_total = 0 for c in range(len_text): output, hidden = decoder_model(X[:, c], hidden) loss_total += loss(output.view(batch_size, -1), y[:, c]) loss_total.backward() opt.step() loss_value = loss_total.data[0] / len_text loss_avg += loss_value if i % print_every == 0: print('Epoch {}: loss {}'.format(i, loss_avg)) print(generate_text(decoder_model, 'The', 100), '\n')# Imports import pandas as pd # Url black_friday_csv_link = "https://raw.githubusercontent.com/pierretd/datasets-1/master/BlackFriday.csv" # Reading in csv black_friday_df = pd.read_csv(black_friday_csv_link)The above dataset has already been loaded in for you, answer the following questions. If you get done quickly, move onto stretch goals and super stretch goals. Work on improving your model until 8:40. 1) Clean the data set and drop the Null/NaN values. Rename Product_Category_1-3 columns with an actual Product.df = black_friday_df #Making Variable Name Shorter to make the code clean and easier to work with df.head() #verifying the data looks 'somewhat' correct df.info() # How many NaNs? df.isna().sum() # Looking at NaNs df[['Product_Category_2','Product_Category_3']] # Dropping NaNs clean_df = df.dropna() # Making sure that it is clean clean_df.isna().sum() # Renaming Product Categories clean_df = clean_df.rename(index=str, columns={'Product_Category_1': 'Bacon', 'Product_Category_2': 'Eggs', 'Product_Category_3': 'Waffles'}) # CHecking that it worked list(clean_df)2) How many unique user_ids does the data set contain?# Unique values clean_df['User_ID'].unique() # Unique elements - user id clean_df['User_ID'].nunique()3) How many unique age brackets are in the dataset. Which Age bracket has the most entries? Which has the least?# Unique elements - age clean_df['Age'].nunique() # Index of first occurance of maximum clean_df['Age'].value_counts().idxmax() # Index of first occurance of minimum clean_df['Age'].value_counts().idxmin()4) Transform the Gender categorical variable into a numerical variable. Then transform that numerical value into a Boolean.# Unique values - gender clean_df['Gender'].unique() # Another way to do this #clean_df['Gender'] = clean_df['Gender'].astype(int) # Encode gender as 1 or 0 clean_df = clean_df.replace({'F': 1, 'M': 0}) # Checking my work clean_df['Gender'].unique() # Change to boolean clean_df['Gender'] = clean_df['Gender'].astype(bool) # Checking my work clean_df['Gender'].unique() # Renaming columns clean_df = clean_df.rename(index=str, columns={"Gender": "IsFemale"}) # Checking my work list(clean_df)5) What is the average Occupation score? What is the Standard Deviation? What is the maximum and minimum value?# Examining Occupation clean_df['Occupation'].describe()6) Group Age by Gender and print out a cross tab with age as the y axis# Crosstab Time! pd.crosstab(clean_df['Age'], clean_df['IsFemale']) # Simulating customers clean_df = pd.get_dummies(clean_df) clean_df.head() from sklearn.linear_model import LinearRegression X = clean_df.drop('Purchase', axis = 1) y = clean_df.Purchase lm = LinearRegression() lm lm.fit(X, y) import matplotlib.pyplot as pltStretch Goal: Build a linear regression model to predict the purchase amount given the other features in the data set with scikit learn.Super Stretch Goals: Plot the actual values vs the predicted values.plt.figure(figsize=(9,9)) plt.scatter(y, lm.predict(X)) plt.xlabel("Actual Purchase") plt.ylabel("Predicted Purchase") plt.title("Actual vs Predicted Purchase");Find a good way to measure your model's predictive power.Co-Ordinate distance Create columndef distance(df): df["d1"] = np.abs(df.X1 - df.Y1) df["d2"] = np.abs(df.X2 - df.Y2) df["d3"] = np.abs(df.X3 - df.Y3) df["d4"] = np.abs(df.X4 - df.Y4) df["d5"] = np.abs(df.X5 - df.Y5) df["d6"] = np.abs(df.X6 - df.Y6) df["d7"] = np.abs(df.X7 - df.Y7) df["d8"] = np.abs(df.X8 - df.Y8) df["d9"] = np.abs(df.X9 - df.Y9) df["d10"] = np.abs(df.X10 - df.Y10) df["d11"] = np.abs(df.X11 - df.Y11) df["d12"] = np.abs(df.X12 - df.Y12) df["d13"] = np.abs(df.X13 - df.Y13) df["d14"] = np.abs(df.X14 - df.Y14) df["d15"] = np.abs(df.X15 - df.Y15) df["d16"] = np.abs(df.X16 - df.Y16) distance(df_sydney) df_sydney.head(10)Creating Angle with tanhdef angle(df): df["ang1"] = (np.arctan(np.divide(df.Y1 , df.X1))*180)/np.pi df["ang2"] = (np.arctan(np.divide(df.Y2 , df.X2))*180)/np.pi df["ang3"] = (np.arctan(np.divide(df.Y3 , df.X3))*180)/np.pi df["ang4"] = (np.arctan(np.divide(df.Y4 , df.X4))*180)/np.pi df["ang5"] = (np.arctan(np.divide(df.Y5 , df.X5))*180)/np.pi df["ang6"] = (np.arctan(np.divide(df.Y6 , df.X6))*180)/np.pi df["ang7"] = (np.arctan(np.divide(df.Y7 , df.X7))*180)/np.pi df["ang8"] = (np.arctan(np.divide(df.Y8 , df.X8))*180)/np.pi df["ang9"] = (np.arctan(np.divide(df.Y9 , df.X9))*180)/np.pi df["ang10"] =(np.arctan(np.divide(df.Y10 , df.X10))*180)/np.pi df["ang11"] =(np.arctan(np.divide(df.Y11 , df.X11))*180)/np.pi df["ang12"] =(np.arctan(np.divide(df.Y12 , df.X12))*180)/np.pi df["ang13"] =(np.arctan(np.divide(df.Y13 , df.X13))*180)/np.pi df["ang14"] =(np.arctan(np.divide(df.Y14 , df.X14))*180)/np.pi df["ang15"] =(np.arctan(np.divide(df.Y15 , df.X15))*180)/np.pi df["ang16"] =(np.arctan(np.divide(df.Y16 , df.X16))*180)/np.pi angle(df_sydney) df_sydney.head(10) df_sydneydrop=df_sydney.drop(['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','X11','X12','X13','X14','X15','X16', 'Y1','Y2','Y3','Y4','Y5','Y6','Y7','Y8','Y9','Y10','Y11','Y12','Y13','Y14','Y15','Y16'], axis=1) df_sydneydrop=df_sydneydrop.fillna(0.000000) basic_details(df_sydneydrop) df_sydneydrop.head(50) tot_cat_col = list(df_sydneydrop.select_dtypes(include=['category']).columns) num_col = [c for c in df_sydneydrop.columns if c not in tot_cat_col] #finding the outliers which is excluded from our prediction from dataset def outlier(df,columns): for i in columns: quartile_1,quartile_3 = np.percentile(df[i],[25,75]) quartile_f,quartile_l = np.percentile(df[i],[1,99]) IQR = quartile_3-quartile_1 lower_bound = quartile_1 - (1.5*IQR) upper_bound = quartile_3 + (1.5*IQR) print(i,lower_bound,upper_bound,quartile_f,quartile_l) df[i].loc[df[i] < lower_bound] = quartile_f df[i].loc[df[i] > upper_bound] = quartile_l outlier(df_sydneydrop,num_col)P1 73632.15068749999 110890.54598750002 77895.104 106733.40925999999 P2 70596.11585 118040.56024999998 78170.7885 107342.248981 P3 71336.89092499999 114284.370325 77908.215727 107205.97099999999 P4 75698.09855000001 108917.52495 77868.171498 106651.66337899999 P5 72354.63617499998 114712.53397500001 78548.24900000001 106975.3728 P6 70864.1050375 113113.4957375 77760.372366 107508.972459 P7 74364.79249999995 111548.16030000002 77701.6398 107342.241478 P8 73221.486775 112287.99097500002 77694.24926999999 106443.58951199999 P9 70814.5899625 113861.94926250001 77933.469022 107332.4152 P10 72509.43898749996 112497.79468750003 77887.8969 106691.8291 P11 72636.59418750001 114264.27368749998 77956.6274 107080.36862299996 P12 72003.620425 112852.54302499999 77949.10665500001 106599.77994199998 P13 70898.068825 115444.45962499999 77793.275187 107086.4462 P14 75609.5973125 114103.93941250001 78521.31545600001 107493.97730499998 P15 70820.19600000003 113527.90719999999 77734.61597 107232.507356 P1[...]Don't need to run the below cell. it is statistical features increase actually#increase the attrivute to do astatistical analysis, don't need to run this cell def descrictive_stat_feat(df): df = pd.DataFrame(df) dcol= [c for c in df.columns if df[c].nunique()>=10] d_median = df[dcol].median(axis=0) d_mean = df[dcol].mean(axis=0) q1 = df[dcol].apply(np.float32).quantile(0.25) q3 = df[dcol].apply(np.float32).quantile(0.75) #Add mean and median column to data set having more then 10 categories for c in dcol: df[c+str('_median_range')] = (df[c].astype(np.float32).values > d_median[c]).astype(np.int8) df[c+str('_mean_range')] = (df[c].astype(np.float32).values > d_mean[c]).astype(np.int8) df[c+str('_q1')] = (df[c].astype(np.float32).values < q1[c]).astype(np.int8) df[c+str('_q3')] = (df[c].astype(np.float32).values > q3[c]).astype(np.int8) return df df_test = descrictive_stat_feat(df_sydneydrop) # df_test # df_test=df_test.drop(['Total_median_range','Total_mean_range','Total_q1', 'Total_q3'], axis = 1) # df_test import seaborn as sns #visualize the features for corelation too much attribute cannot predict plt.figure(figsize=(50, 50)) corr = df_sydneydrop.apply(lambda x: pd.factorize(x)[0]).corr() ax = sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, linewidths=.2) #now do the listing for Real Price Paid (Our Target) corr['Total'].sort_values(ascending= False) df_sydneylabel = pd.DataFrame(df_sydneydrop, columns=[ 'd1','d2','d3','d4','d5','d6','d7','d8','d9','d10','d11','d12','d13','d14','d15','d16', 'ang1','ang2','ang3','ang4','ang5','ang6','ang7','ang8','ang9','ang10','ang11','ang12','ang13','ang14','ang15','ang16','P1','P2', 'P3','P4','P5','P6','P7','P8','P9','P10','P11','P12','P13','P14','P15','P16','Total']) final_label = 'Total' print(df_sydneylabel.describe())d1 d2 d3 d4 d5 \ count 72000.000000 72000.000000 72000.000000 72000.000000 72000.000000 mean 202.110332 266.557699 238.413002 191.919437 220.898621 std 151.759878 157.236389 173.860879 153.348400 149.512734 min 0.000000 0.000000 0.000000 0.000000 0.000000 25% 67.354125 131.496500 87.442700 67.992125 92.497125 50% 180.204700 280.363300 213.064150 148.100300 213.878800 75% 321.311100 386.375600 366.801800 299.148925 324.293950 max 566.000000 566.000000 566.000000 566.000000 566.000000 d6 d7 d8 d9 d10 \ count 72000.000000 72000.000000 72000.000000 72000.000000 72000.000000 mean 266.369163 196.456050 192.228228 253.432656 228.275827 std 179.567405 161.445585 138.404332 [...]BaseLine Modeldef train_validate_test_split(df, train_part=.6, validate_part=.2, test_part=.2, seed=None): np.random.seed(seed) total_size = train_part + validate_part + test_part train_percent = train_part / total_size validate_percent = validate_part / total_size test_percent = test_part / total_size perm = np.random.permutation(df.index) m = len(df) train_end = int(train_percent * m) validate_end = int(validate_percent * m) + train_end train = perm[:train_end] validate = perm[train_end:validate_end] test = perm[validate_end:] return train, validate, test train_size, valid_size, test_size = (80, 20, 0) kc_train, kc_valid, kc_test = train_validate_test_split(df_sydneylabel, train_part=train_size, validate_part=valid_size, test_part=test_size, seed=2020) kc_y_train = df_sydneylabel.loc[kc_train, [final_label]] kc_x_train = df_sydneylabel.loc[kc_train, :].drop(final_label, axis=1) kc_y_valid = df_sydneylabel.loc[kc_valid, [final_label]] kc_x_valid = df_sydneylabel.loc[kc_valid, :].drop(final_label, axis=1) print('Size of training set: ', len(kc_x_train)) print('Size of validation set: ', len(kc_x_valid)) print('Size of test set: ', len(kc_test), '(not converted)') #Function to get statistics about a data frame def norm_stats(df1, df2): dfs = df1.append(df2) minimum = np.min(dfs) maximum = np.max(dfs) mu = np.mean(dfs) sigma = np.std(dfs) return (minimum, maximum, mu, sigma) #Function to Z-normalise the entire data frame - note stats for Z transform passed in. def z_score(col, stats): m, M, mu, s = stats df = pd.DataFrame() for c in col.columns: df[c] = (col[c]-mu[c])/s[c] return df stats = norm_stats(kc_x_train, kc_x_valid) arr_x_train = np.array(z_score(kc_x_train, stats)) arr_y_train = np.array(kc_y_train) arr_x_valid = np.array(z_score(kc_x_valid, stats)) arr_y_valid = np.array(kc_y_valid) print('Training shape:', arr_x_train.shape) print('Training samples: ', arr_x_train.shape[0]) print('Validation samples: ', arr_x_valid.shape[0]) import keras from keras import metrics from keras import regularizers from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Activation from keras.layers import Conv2D, MaxPooling2D from keras.optimizers import Adam, RMSprop from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint from keras.utils import plot_model from keras.models import load_model #This baseline. it extends Nadam optimizer, dropouts and L1/L2 regularisers. def baseline(x_size, y_size): t_model = Sequential() t_model.add(Dense(80, activation="tanh", kernel_initializer='normal', input_shape=(x_size,))) t_model.add(Dropout(0.2)) t_model.add(Dense(120, activation="relu", kernel_initializer='normal', kernel_regularizer=regularizers.l1(0.01), bias_regularizer=regularizers.l1(0.01))) t_model.add(Dropout(0.1)) t_model.add(Dense(20, activation="relu", kernel_initializer='normal', kernel_regularizer=regularizers.l1_l2(0.01), bias_regularizer=regularizers.l1_l2(0.01))) t_model.add(Dropout(0.1)) t_model.add(Dense(10, activation="relu", kernel_initializer='normal')) t_model.add(Dropout(0.0)) t_model.add(Dense(y_size)) t_model.compile( loss='mean_squared_error', optimizer='nadam', metrics=[metrics.mae]) return(t_model) model = baseline(arr_x_train.shape[1], arr_y_train.shape[1]) model.summary() #Define how many epochs of training should be done and what is the batch size epochs = 500 batch_size = 128 print('Epochs: ', epochs) print('Batch size: ', batch_size) #Specify Keras callbacks which allow additional functionality while the model is being fitted. #ModelCheckpoint allows to save the models as they are being built or improved. #TensorBoard interacts with TensorFlow interactive reporting system. #EarlyStopping watches one of the model measurements and stops fitting when no improvement. keras_callbacks = [ # ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=True, verbose=2) # ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}.hdf5', monitor='val_loss', save_best_only=True, verbose=0) # TensorBoard(log_dir='/tmp/keras_logs/model_3', histogram_freq=0, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None), EarlyStopping(monitor='val_mean_absolute_error', patience=20, verbose=0) ] #Fit the model and record the history of training and validation. #As we specified EarlyStopping with patience=20, with luck the training will stop in less than 200 epochs. #Be patient, the fitting process takes time, use verbose=2 for visual feedback. history = model.fit(arr_x_train, arr_y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, # Change it to 2, if wished to observe execution validation_data=(arr_x_valid, arr_y_valid), callbacks=keras_callbacks) #Evaluate and report performance of the trained model train_score = model.evaluate(arr_x_train, arr_y_train, verbose=0) valid_score = model.evaluate(arr_x_valid, arr_y_valid, verbose=0) print('Train MAE: ', round(train_score[1], 4), ', Train Loss: ', round(train_score[0], 4)) print('Val MAE: ', round(valid_score[1], 4), ', Val Loss: ', round(valid_score[0], 4)) def plot_hist(h, xsize=6, ysize=10): # Prepare plotting fig_size = plt.rcParams["figure.figsize"] plt.rcParams["figure.figsize"] = [xsize, ysize] fig, axes = plt.subplots(nrows=4, ncols=4, sharex=True) # summarize history for MAE plt.subplot(211) plt.plot(h['mean_absolute_error']) plt.plot(h['val_mean_absolute_error']) plt.title('Training vs Validation MAE') plt.ylabel('MAE') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc='upper left') # summarize history for loss plt.subplot(212) plt.plot(h['loss']) plt.plot(h['val_loss']) plt.title('Training vs Validation Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc='upper left') # Plot it all in IPython (non-interactive) plt.draw() plt.show() return plot_hist(history.history, xsize=8, ysize=12)LSTM Start# !pip install keras==2.1.6 #pip uninstall keras # !pip install git+git://github.com/fchollet/keras.git --upgrade #!pip install keras==2.2.4 !pip3 install tensorflow==2.2.0 import torch import torch.nn as nn from torch.autograd import Variable from sklearn.preprocessing import MinMaxScaler training_set = df_sydneylabel.values #LSTM Dataloading def sliding_windows(data, seq_length): x = [] y = [] for i in range(len(data)-seq_length-1): _x = data[i:(i+seq_length)] _y = data[i+seq_length] x.append(_x) y.append(_y) return np.array(x),np.array(y) sc = MinMaxScaler() training_data = sc.fit_transform(training_set) seq_length = 4 x, y = sliding_windows(training_data, seq_length) train_size = int(len(y) * 0.67) test_size = len(y) - train_size dataX = Variable(torch.Tensor(np.array(x))) dataY = Variable(torch.Tensor(np.array(y))) trainX = Variable(torch.Tensor(np.array(x[0:train_size]))) trainY = Variable(torch.Tensor(np.array(y[0:train_size]))) testX = Variable(torch.Tensor(np.array(x[train_size:len(x)]))) testY = Variable(torch.Tensor(np.array(y[train_size:len(y)]))) trainY.shape # Pytorch LSTM Model class LSTM(nn.Module): def __init__(self, num_classes, input_size, hidden_size, num_layers): super(LSTM, self).__init__() self.num_classes = num_classes self.num_layers = num_layers self.input_size = input_size self.hidden_size = hidden_size self.seq_length = seq_length self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): h_0 = Variable(torch.zeros( self.num_layers, x.size(0), self.hidden_size)) c_0 = Variable(torch.zeros( self.num_layers, x.size(0), self.hidden_size)) # Propagate input through LSTM ula, (h_out, _) = self.lstm(x, (h_0, c_0)) h_out = h_out.view(-1, self.hidden_size) out = self.fc(h_out) return out #Training LSTM num_epochs = 500 learning_rate = 0.001 input_size = 49 hidden_size = 2 num_layers = 1 num_classes = 49 lstm = LSTM(num_classes, input_size, hidden_size, num_layers) criterion = torch.nn.MSELoss() # mean-squared error for regression optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate) #optimizer = torch.optim.SGD(lstm.parameters(), lr=learning_rate) # Train the model for epoch in range(num_epochs): outputs = lstm(trainX) optimizer.zero_grad() # obtain the loss function loss = criterion(outputs, trainY) loss.backward() optimizer.step() if epoch % 100 == 0: print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))Epoch: 0, loss: 0.55257 Epoch: 100, loss: 0.34200 Epoch: 200, loss: 0.17132 Epoch: 300, loss: 0.10911 Epoch: 400, loss: 0.08589Score prediction bt model evaluating#Evalutating the data . the best 50. lstm.eval() train_predict = lstm(dataX) data_predict = train_predict.data.numpy() dataY_plot = dataY.data.numpy() data_predict = sc.inverse_transform(data_predict) dataY_plot = sc.inverse_transform(dataY_plot) #plt.axvline(x=test_size, c='r', linestyle='--') plt.plot(dataY_plot[50], c = 'r', linestyle='--') plt.plot(data_predict[50], c= 'g') plt.suptitle('Time-Series Prediction') plt.show()This is the Keras model but keras has recently a problem , If you can fix it . it will be best# df_sydneylstm = pd.DataFrame(df_sydneydrop, columns=[ # 'Total','d1','d2','d3','d4','d5','d6','d7','d8','d9','d10','d11','d12','d13','d14','d15','d16', # 'ang1','ang2','ang3','ang4','ang5','ang6','ang7','ang8','ang9','ang10','ang11','ang12','ang13','ang14','ang15','ang16','P1','P2', # 'P3','P4','P5','P6','P7','P8','P9','P10','P11','P12','P13','P14','P15','P16']) # df_sydneylstm.head(5) # def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): # n_vars = 1 if type(data) is list else data.shape[1] # dff = pd.DataFrame(data) # cols, names = list(), list() # # input sequence (t-n, ... t-1) # for i in range(n_in, 0, -1): # cols.append(dff.shift(i)) # names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # # forecast sequence (t, t+1, ... t+n) # for i in range(0, n_out): # cols.append(dff.shift(-i)) # if i == 0: # names += [('var%d(t)' % (j+1)) for j in range(n_vars)] # else: # names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # # put it all together # agg = pd.concat(cols, axis=1) # agg.columns = names # # drop rows with NaN values # if dropnan: # agg.dropna(inplace=True) # return agg # values = df_sydneylstm.values # scaler = MinMaxScaler(feature_range=(0, 1)) # scaled = scaler.fit_transform(values) # # frame as supervised learning # reframed = series_to_supervised(scaled, 1, 1) # values.shape # reframed.head(5) # reframed.drop(reframed.columns[[50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73, # 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97]], axis=1, inplace=True) # # split into train and test sets # values = reframed.values # #n_train_time = 72000*0.7 # train = values[:50400, :] # test = values[50400:, :] # ##test = values[n_train_time:n_test_time, :] # # split into input and outputs # train_X, train_y = train[:, :-1], train[:, -1] # test_X, test_y = test[:, :-1], test[:, -1] # # reshape input to be 3D [samples, timesteps, features] # train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1])) # test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) # print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # # We reshaped the input into the 3D format as expected by LSTMs, namely [samples, timesteps, features]. # model = Sequential() # model.add(LSTM(100, input_shape=(train_X.shape[1], train_X.shape[2]))) # model.add(Dropout(0.2)) # # model.add(LSTM(70)) # # model.add(Dropout(0.3)) # model.add(Dense(1)) # model.compile(loss='mean_squared_error', optimizer='adam') # # fit network # history = model.fit(train_X, train_y, epochs=20, batch_size=70, validation_data=(test_X, test_y), verbose=2, shuffle=False) # # summarize history for loss # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper right') # plt.show() # # make a prediction # yhat = model.predict(test_X) # test_X = test_X.reshape((test_X.shape[0], 7)) # # invert scaling for forecast # inv_yhat = np.concatenate((yhat, test_X[:, -6:]), axis=1) # inv_yhat = scaler.inverse_transform(inv_yhat) # inv_yhat = inv_yhat[:,0] # # invert scaling for actual # test_y = test_y.reshape((len(test_y), 1)) # inv_y = np.concatenate((test_y, test_X[:, -6:]), axis=1) # inv_y = scaler.inverse_transform(inv_y) # inv_y = inv_y[:,0] # # calculate RMSE # rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat)) # print('Test RMSE: %.3f' % rmse)Merge IntervalsGiven a collection of intervals, merge all overlapping intervals. 解析题目来源:[LeetCode Merge - Intervals - 56](https://leetcode.com/problems/merge-intervals/)按照数学的思维,两个区间有以下的关系:1. 不重合2. 完全重合3. 不完全重合(分左右)但是如果按部就班的按照这些情况处理,就非常麻烦(至少LeetCode中这么麻烦至少是Hard而不是Medium)此题比较巧,有两个关键点:- 事先对列表- 合并中的流程控制最关键的是第一点def merge(intervals): cur = 0 intervals.sort(key=lambda item: item[0]) while cur < len(intervals): zone = intervals[cur] merge_cur = cur + 1 while merge_cur < len(intervals): if intervals[cur][1]>=intervals[merge_cur][0]: intervals[cur][1] = max(intervals[cur][1],intervals[merge_cur][1]) intervals.remove(intervals[merge_cur]) continue merge_cur += 1 cur += 1 return intervals print(merge([[1,3],[2,6],[8,10],[15,18]]))Custom methods and problems In order to add a new algorithm to the registry, you must first implement a CustomMethod object that adheres to the specifications of the other TigerForecast methods. As long as your class inherits the tigerforecast CustomClass object and has the three core TigerForecast Method methods implemented — *initialize*, *predict*, and *update* — the class will interface as expected with all the other features in the framework. These methods are:* Initialize — initializes method parameters and other local variables, and is called at the beginning of every problem run.* Predict — a function that takes an input observation 'x' and returns a prediction which can depend on the parameters defined in initialize.* Update — which takes the true label with respect to the last observed value 'x' and performs some correction over the initialized parameters.In this notebook, we demonstrate how to implement and add your own methods to TigerForecast. We start as usual by installing the package.!git clone https://github.com/johnhallman/tigerforecast.git !pip install -e tigerforecast import tigerforecast import jax import jax.numpy as np import jax.random as random from tigerforecast.utils import generate_key import matplotlib.pyplot as plt tigerforecast.set_key(1)Below, we implement the most trivial method possible, with no initialize or update and with a predict method which returns the previously observed value.class TrivialMethod(tigerforecast.CustomMethod): def initialize(self): pass def predict(self, x): return x def update(self, y_true): passTigerForecast comes with a built in ```register_custom_method``` method that allows users to register method classes with custom names.tigerforecast.register_custom_method(TrivialMethod, "TrivialMethod")Next, we demonstrate how to implement and register a recurrent neural network (RNN) using JAX and the existing Adagrad optimizer. In order to use the existing TigerForecast optimizers, the custom class needs to provide a method which takes parameters $\theta$ and input $x$ and maps them to a prediction using solely JAX and regular arithmetic API. Because TigerForecast methods are provided only with the most recent observation as input for predict, and because RNNs need to take a history of observations in order to make predictions, we need to implement a private helper method to support RNN's predict. In the code below, we will call this private method \_predict, which takes parameters $\theta$ in the form of a list of matrices, and $x$ which is a list of the previous $k$ observations $x_t$.# get glorot initialization from JAX import jax.experimental.stax as stax glorot_init = stax.glorot() class MyRNN(tigerforecast.CustomMethod): # input_dim: dimension of RNN inputs, hidden_dim: dimension of hidden state/output # memory_length: length of observation history to store # optimizer: TigerForecast optimizer class or instance (used in update method) def initialize(self, input_dim, hidden_dim, memory_length, optimizer): # store arguments self.d_in = input_dim self.d_hid = hidden_dim self.mem = memory_length self.x_history = np.zeros((memory_length, input_dim)) # initialize and store method parameters W_h = glorot_init(generate_key(), (hidden_dim, hidden_dim)) W_x = glorot_init(generate_key(), (hidden_dim, input_dim)) b = random.normal(generate_key(), shape=(hidden_dim,)) self.params = [W_h, W_x, b] # initialize predict helper method (x is a history in this case) def _predict(params, x): W_h, W_x, b = params # unroll parameters hid = np.zeros(hidden_dim) for x_t in x: hid = np.tanh(np.dot(W_h, hid) + np.dot(W_x, x_t) + b) return hid self._predict = jax.jit(_predict) # jit compiles code and improves efficiency # store optimizer in method by providing helper predict self._store_optimizer(optimizer, self._predict) # x is an input_dim length ndarray def predict(self, x): # store new observation in history temp = np.roll(self.x_history, self.d_in) self.x_history = jax.ops.index_update(temp, jax.ops.index[0,:], x) # return prediction via helper method return self._predict(self.params, self.x_history) # y_true is the next observation/label def update(self, y): self.params = self.optimizer.update(self.params, self.x_history, y) returnEvery TigerForecast method, including those inheriting the CustomMethod class, come with a built-in \_store\_optimizer method which can be called to load the optimizer into the method. Optimizers as well as \_store\_optimizer must take a prediction method in order to be able to compute gradients to optimize against, and because optimizers depend on JAX's autograd functionality, any predict method passed to an optimizer must be implemented using JAX NumPy.We now register and run our method on an ARMA(5, 3) problem and compare it's performance to LastValue.from tigerforecast.utils.optimizers import Adagrad, mse # register and initialize custom RNN method tigerforecast.register_custom_method(MyRNN, "MyRNN") my_method = tigerforecast.method("MyRNN") d_in, d_hid, memory = 1, 1, 6 optimizer = Adagrad(learning_rate=0.01) my_method.initialize(d_in, d_hid, memory, optimizer)Once our method has been initialized, we now run the comparison against the LastValue method.# initialize LastValue method and ARMA problem pz_method = tigerforecast.method("LastValue") pz_method.initialize() T = 2500 p, q = 5, 3 problem = tigerforecast.problem("ARMA-v0") x = problem.initialize(p, q) # run comparison my_results = [] pz_results = [] for i in range(T): my_y_pred = my_method.predict(x) pz_y_pred = pz_method.predict(x) y_true = problem.step() my_loss = mse(y_true, my_y_pred) pz_loss = mse(y_true, pz_y_pred) my_results.append(my_loss) pz_results.append(pz_loss) my_method.update(y_true) pz_method.update(y_true) x = y_trueFinally, we plot the results of our comparisondef smoothen(data, k=50): return [np.sum(data[i:i+k])/k for i in range(len(data) - k)] # plot method performance plt.plot(smoothen(my_results), label="MyRNN") plt.plot(smoothen(pz_results), label="LastValue") plt.legend() plt.title("MyRNN vs LastValue on ARMA problem") plt.show()As we can see, LastValue performs better than the RNN, whereas the RNN improves over time due to the optimizer.The code below will generate images after training our network just with random noise (no MFCC coefficients). We have mode collapse problems, so the generated faces are always more or less the same. You can change the variable images_to_generate to generate more faces.(Please restart the kernel each time you run the code)import tensorlayer as tl import os from tensorlayer.layers import * import argparse from PIL import Image import matplotlib.pyplot as plt from IPython.display import display def denorm_img(norm): return tf.clip_by_value((norm + 1)*127.5, 0, 255) def restore_model(sess, checkpoint_path): # Get the state of the checkpoint and then restore using ckpt path ckpt = tf.train.get_checkpoint_state(checkpoint_path) if checkpoint_path is not None: restorer = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="generator")) restorer.restore(sess, ckpt.model_checkpoint_path) def generator(z, reuse, hidden_number=64, kernel=3): w_init = tf.random_normal_initializer(stddev=0.02) with tf.variable_scope("generator", reuse=reuse): tl.layers.set_name_reuse(reuse) # DECODER BEGINS x = InputLayer(z, name="in") x = DenseLayer(x, n_units=8 * 8 * hidden_number, name='Generator/dense2') arguments = {'shape': [-1, 8, 8, hidden_number], 'name': 'Generator/reshape1'} x = LambdaLayer(x, fn=tf.reshape, fn_args=arguments) x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv1') x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv2') x = UpSampling2dLayer(x, size=[2, 2], is_scale=True, method=1, name='Generator/UpSampling1') # method= 1 NN x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv3') x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv4') x = UpSampling2dLayer(x, size=[2, 2], is_scale=True, method=1, name='Encoder/UpSampling2') # method= 1 NN x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv5') x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv6') x = UpSampling2dLayer(x, size=[2, 2], is_scale=True, method=1, name='Generator/UpSampling3') # method= 1 NN x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv7') x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, hidden_number], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, act=tf.nn.elu, name='Generator/conv8') x = Conv2dLayer(x, shape=[kernel, kernel, hidden_number, 3], strides=[1, 1, 1, 1], padding='SAME', W_init=w_init, name='Generator/convLAST') return x def test(images_to_generate, z_num = 256, reuse = False, checkpoint = "storagecheckpoints_trained_collapsed_good_quality"): # ##========================== DEFINE INPUT DATA ============================### z = tf.placeholder('float32', [None, z_num], name='t_noise_generator') # ##========================== DEFINE MODEL ============================### net_gen = generator(z=z, reuse=reuse) output_gen = denorm_img(net_gen.outputs) # Denormalization with tf.Session() as sess: print("Restoring model from checkpoint") restore_model(sess, checkpoint) for iteration in range(0, images_to_generate): input_z = np.random.uniform(-1., 1., size=[1, z_num]) output_image = sess.run(output_gen, feed_dict={z: input_z})[0] ima = Image.fromarray(output_image.astype(np.uint8), 'RGB') display(ima) iteration += 1 if __name__ == '__main__': test(images_to_generate = 2)Table of content- Introduction- Data Wrangling - GDP in PPP dollars - GDP in current USD - Total population - Fuel exports - Democracy index - Join the data sets IntroductionIn this project we investigate data sets onGDP per capita,democracy indices,total population,and fuel exports,availablefrom [www.gapminder.org](https://www.gapminder.org/)and [World Bank Open Data](https://data.worldbank.org/).The data was available under the [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/)from both sources, see[free material from Gapminder](https://www.gapminder.org/free-material/)and[Summary of the Terms of Use for Datasets Listed in The World Bank Data Catalog](https://data.worldbank.org/summary-terms-of-use). Data Wranglingfrom common import * import pandas as pd %load_ext autoreload %autoreload print(f"We are going to analyze data sets for the year {the_year}.")We are going to analyze data sets for the year 2018.Helper functionsFirst we define helper functions for data wrangling.def read_csv(filepath, usecols = None, col_names = None, indexcol = None, header = 0): """ Convenience function to read selected columns from a CSV and assign them custom names. :param filepath: Path to the CSV file to be read :param usecols: Original columns to be used. If None then all columns will be read :param col_names: Custom column names to be set after reading :param indexcol: Column to be set as index column :param header: Row number to be used as header containing the original column names :return: """ df = pd.read_csv(filepath, usecols=usecols, header=header) if usecols is not None: df.columns = col_names if indexcol is not None: df.set_index(indexcol, inplace=True) return dfGDP data, in PPP dollarsWe investigate the data set "GD001: GDP per capita, constant PPP dollars",available [here](https://www.gapminder.org/data/documentation/gd001/) from Gapminder.We consider the data on GDP per capita in [PPP (Purchasing Power Parity)](https://en.wikipedia.org/wiki/Purchasing_power_parity)dollars for the year 2018,compiled by [Gapminder](https://www.gapminder.org/)from the data provided by the World Bank [here](https://data.worldbank.org/indicator/NY.GDP.PCAP.PP.KD). We will use pre-defined column names.print_declared_class_attributes(ColGdp) df_gdp_ppp = read_csv('../resources/project/GM-GDP-per-capita_Dataset-v26_data-for-contries-by-year.csv', header=0, usecols=["geo","name","time","Income per person"], col_names=[ColGdp.country_code, ColGdp.country, ColGdp.year, ColGdp.gdp_per_capita_ppp], indexcol=ColGdp.country_code) df_gdp_ppp = df_gdp_ppp[df_gdp_ppp[ColGdp.year] == the_year] df_gdp_ppp.head()Which countries are there in the dataset?print(f"There are {df_gdp_ppp[ColGdp.country].nunique()} countries in the data set:\n" f"{df_gdp_ppp[ColGdp.country].unique()}") df_gdp_ppp.info() Index: 195 entries, afg to zwe Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 195 non-null object 1 Year 195 non-null int64 2 GDP per capita (PPP dollars) 195 non-null int64 dtypes: int64(2), object(1) memory usage: 6.1+ KBThere are 195 rows and no missing values in the data set.df_gdp_ppp.duplicated().sum()There are no duplicated rows in the data set.df_gdp_ppp[ColGdp.country].duplicated().sum()There are also no duplicate country values:each country has exactly one entry for the considered year 2018. GDP data, in current USDWe investigate the data set "GDP per capita (current US$)",available [here](https://data.worldbank.org/indicator/NY.GDP.PCAP.CD) from the World Bank.We consider the data on GDP per capita in current US dollars for the year 2018.df_gdp_curr = read_csv('../resources/project/API_NY.GDP.PCAP.CD_DS2_en_csv_v2_2055804_WorldBank_GDP_per_capita_current_USD.csv', header=2, usecols=["Country Name", "Country Code", "2018"], col_names=[ColGdp.country, ColGdp.country_code, ColGdp.gdp_per_capita], indexcol=ColGdp.country_code) # the actual header row in the dataset is preceded by some other information df_gdp_curr.head()Convert the country codes to lower case so that it matches the country codes in `df_gdp_ppp`.df_gdp_curr.index = df_gdp_curr.index.str.lower() df_gdp_curr.head() df_gdp_curr.info() Index: 264 entries, abw to zwe Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 264 non-null object 1 GDP per capita 249 non-null float64 dtypes: float64(1), object(1) memory usage: 6.2+ KBThere are some missing values. We will deal with them after joining the two GDP data frames in the next subsection. Join the GDP data framesWe join the `df_dgp_curr` data frame containing GDP per capita in actual USD,and the `df_gdp_ppp` data frame containing GDP in PPP dollars.df_gdp = df_gdp_ppp.join(df_gdp_curr[[ColGdp.gdp_per_capita]]) df_gdp.head() df_gdp.describe()There are a few countries where the GDP per capita is missing.df_gdp[df_gdp[ColGdp.gdp_per_capita].isnull()]We will substitute the missing values by the available values in PPP dollars:although this is not very accurate, it should be sufficient for the envisioned analysis.We will only make more precise manual adjustments for two of these countries.df_gdp[ColGdp.gdp_per_capita].fillna(df_gdp[ColGdp.gdp_per_capita_ppp], inplace=True) df_gdp.isnull().sum()Now there are no more missing values. Manual adjustments for selected countries We separately adjust the data points for [Venezuela](https://en.wikipedia.org/wiki/Venezuela)and [Taiwan](https://en.wikipedia.org/wiki/Taiwan). [Venezuela](https://en.wikipedia.org/wiki/Venezuela)is a country with large [oil and natural gas reserves](https://en.wikipedia.org/wiki/VenezuelaPetroleum_and_other_resources)that experienced a significant decline in both[GDP](https://countryeconomy.com/gdp/venezuela:~:text=The%20GDP%20per%20capita%20of,2017%2C%20when%20it%20was%20%244%2C894.)and [democracy index](https://en.wikipedia.org/wiki/Democracy_IndexBy_country)since 2015.The GDP per capita fell from ca. 10,000 USD in 2015 to less than 4,000 USD in 2018.We will approximate the missing GDP value with the latter figure.df_gdp.at['ven', ColGdp.gdp_per_capita] = 4000 df_gdp[ColGdp.gdp_per_capita]['ven'][Taiwan](https://en.wikipedia.org/wiki/Taiwan), officially the Republic of China (ROC),is a developed democratic country in East Asia.It's political status is disputed because it's considered by China as one if its provinces.Only few other countries maintain official diplomatic relations with it.Yet, many contries, including China, cooperate economically.Taiwans high-tech industry plays an important role for economies worldwide.As an example, its semiconductor contract manufacturers have[over 50% of world market share](https://www.cnbc.com/2021/03/16/2-charts-show-how-much-the-world-depends-on-taiwan-for-semiconductors.html).Taiwans disputed political status could also be a reason why it's missing in some data sets,esp. the ones from the World Bank,so that we have to separately collect it from other sources,e.g., [Wikipedia](https://en.wikipedia.org/wiki/Economy_of_Taiwan).Based on various sources on the Internet, we will approximate its GDP per capita for 2018 by 25,000 in current USD.df_gdp.at['twn', ColGdp.gdp_per_capita] = 25000 df_gdp[ColGdp.gdp_per_capita]['twn']The total population data setWe investigate the data set "Total population",available [here](https://data.worldbank.org/indicator/SP.POP.TOTL) from the World Bank.We consider the data on total population for the year 2018.print_declared_class_attributes(ColPop) df_pop = read_csv('../resources/project/API_SP.POP.TOTL_DS2_en_csv_v2_2163507_WorldBank_total_population.csv', header=2, usecols=["Country Name", "Country Code", f"{the_year}"], col_names=[ColPop.country, ColPop.country_code, ColPop.population], indexcol=ColPop.country_code) df_pop.head()Convert the country codes to lower case so that it matches the country codes in the other data frames.df_pop.index = df_pop.index.str.lower() df_pop.head() df_pop.info() df_pop[df_pop[ColPop.population].isnull()]There are two rows with missing population figures. We will omit these rows for our analysis.df_pop.dropna(inplace=True) df_pop.sort_values(by=ColPop.population, ascending=False)It turns out that the data set contains more values, than just the population by country.For example, it has a row for the total world population and also rows for population by region and income.This data would only disturb our analysis, so we filter the data set to contain only countriesthat are also present in the other data sets.df_pop = df_pop.filter(df_gdp.index, axis='index') df_pop.info() Index: 193 entries, afg to zwe Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 193 non-null object 1 Population 193 non-null float64 dtypes: float64(1), object(1) memory usage: 4.5+ KBThe `population` column should contain integers, but was imported as `float64`,probably because of the rows with missing values.We will convert the column to `int64`.df_pop[ColPop.population] = df_pop[ColPop.population].astype('int64') df_pop.head() df_pop.info() Index: 193 entries, afg to zwe Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 193 non-null object 1 Population 193 non-null int64 dtypes: int64(1), object(1) memory usage: 4.5+ KBAs for GDP, the data for Taiwan is missing. We approximate it by 23 million.df_pop.at['twn', ColPop.population] = 23000000 df_pop[ColPop.population]['twn']Now the population data set has been cleaned for our intended analysis. The fuel exports data setWe investigate the data set "Fuel exports (% of merchandise exports)",available [here](https://data.worldbank.org/indicator/TX.VAL.FUEL.ZS.UN) from the World Bank.We consider the data on fuel exports for the year 2018.print_declared_class_attributes(ColFuel)class ColFuel: country_code = "Country code" country = "Country" fuel_exports = "Fuel exports (% of merchandise exports)"We expect some missing values for 2018,so we also import the data for 2 previous years in order to fill the gaps.fuel_exports_2_years_ago = "Fuel exports 2 years ago" fuel_exports_1_year_ago = "Fuel exports 1 year ago" df_fuel = read_csv('../resources/project/API_TX.VAL.FUEL.ZS.UN_DS2_en_csv_v2_2167156_WorldBank_fuel_exports.csv', header=2, usecols=["Country Name", "Country Code", f"{the_year - 2}", f"{the_year - 1}", f"{the_year}"], col_names=[ColFuel.country, ColFuel.country_code, fuel_exports_2_years_ago, fuel_exports_1_year_ago, ColFuel.fuel_exports], indexcol=ColFuel.country_code) df_fuel.head()Convert the country codes to lower case so that it matches the country codes in the other data frames.df_fuel.index = df_fuel.index.str.lower() df_fuel.head() df_fuel[df_fuel[ColFuel.fuel_exports].isnull()]There are a lot of missing values.We fill try to fill them from the previous years.df_fuel.loc[:, fuel_exports_2_years_ago:] = df_fuel.loc[:, fuel_exports_2_years_ago:].fillna(method='ffill', axis='columns') df_fuel[ColFuel.fuel_exports].isnull().sum()There are still a lot of missing values: we now assume that those are no longer relevant for our analysisand fill them with 0.df_fuel.fillna(0, inplace=True)We drop the columns for previous years because we only needed them to fill missing values.df_fuel.drop(columns=[fuel_exports_2_years_ago, fuel_exports_1_year_ago], inplace=True) df_fuel.head() df_fuel.describe() df_fuel.loc[['wld', 'mea', 'oed']]This data set, similarly to the population data set, contains more values than just the fuel exports by country.For example, it has a row for the world and for some world regions.This data would only disturb our analysis, so we filter the data set to contain only countriesthat are also present in the other data sets.df_fuel = df_fuel.filter(df_gdp.index, axis='index') df_fuel.info() Index: 194 entries, afg to zwe Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 194 non-null object 1 Fuel exports (% of merchandise exports) 194 non-null float64 dtypes: float64(1), object(1) memory usage: 4.5+ KBNow the fuel export data set has been cleaned for our intended analysis. The democracy index data setWe investigate the data set "Democracy indices" published by the[The Economist Intelligence Unit](https://www.eiu.com/),available [here](https://data.worldbank.org/indicator/NY.GDP.PCAP.CD) from the World Bank.We consider the overall democracy index scores for the year 2018,calculated from five category indices measuringelectoral process and pluralism, functioning of government, political participation, political culture, and civil liberties.Further freely available explanation on the democracy index can be found in the [Wikipedia article](https://en.wikipedia.org/wiki/Democracy_Index).print_declared_class_attributes(ColDem) df_dem = read_csv('../resources/project/EIU-Democracy-Indices_Dataset-v3_data-for-countries-by-year.csv', usecols=["geo", "name", "time", "Democracy index (EIU)"], col_names=[ColDem.country_code, ColDem.country, ColDem.year, ColDem.democracy_index], indexcol=ColDem.country_code) df_dem = df_dem[df_dem[ColDem.year] == the_year].copy() df_dem.head() df_dem.info() Index: 167 entries, afg to zwe Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 167 non-null object 1 Year 167 non-null int64 2 Democracy index 167 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 5.2+ KBThere are 167 rows and no missing values in the data set. We now calcluate the regime types. The EUI differentiates betweenfull democracy, flawed democracy, hybrid regime, and authoritarian regime,cf. [EUI's democracy index visualization](https://infographics.economist.com/2018/DemocracyIndex/)or the [Wikipedia article on Democracy Index](https://en.wikipedia.org/wiki/Democracy_IndexBy_regime_type).We simplify this differentiation to the 3 categories Democracy, Hybrid, and Authoritarian:| Type of Regime | Democracy Index ||:---------------|:----------------|| Democracy | 6 - 10 || Hybrid | 4 - 6 || Authoritarian | 0 - 4 |print_declared_class_attributes(RegimeType) regime_types = [RegimeType.authoritarian, RegimeType.hybrid, RegimeType.democracy] regime_type_edges = [0, 40, 60, 100] df_dem[ColDem.regime_type] = pd.cut(df_dem[ColDem.democracy_index], bins=regime_type_edges, labels=regime_types) df_dem.head()Assess missing data in democracy data frameprint(f"Number of countries in GDP data frame: {len(df_gdp)}") print(f"Number of countries in Democracy data frame: {len(df_dem)}") print(f"There are {df_gdp.index.difference(df_dem.index).size} countries with GDP data but no democracy index data")Number of countries in GDP data frame: 195 Number of countries in Democracy data frame: 167 There are 28 countries with GDP data but no democracy index dataThe democracy index data is missing for 28 countries.We want to check how much of GDP and population they represent to assessif we can omit them from our analysis.df_dem_missing = df_gdp.loc[df_gdp.index.difference(df_dem.index)]\ .join(df_pop[ColPop.population]) df_dem_missingIt looks like almost all of those countries are quite small, most of them having a population of less than 1M,and some even less than 100.000.We now calculate the share of their population and GDP relatively to all countries in the GDP data set.pop_total = df_pop[ColPop.population].sum() pop_total_missing_in_df_dem = df_dem_missing[ColPop.population].sum() gdp_total = (df_gdp[ColGdp.gdp_per_capita] * df_gdp.join(df_pop[ColPop.population])[ColPop.population]).sum() gdp_total_missing_in_df_dem = (df_dem_missing[ColGdp.gdp_per_capita] * df_dem_missing[ColPop.population]).sum()The following data frame shows the population, the GDP, and the percentage of population and GDPin countries with missing democracy data in relation to all countries in the GDP data set:pd.DataFrame( data=[[pop_total, gdp_total], [pop_total_missing_in_df_dem, gdp_total_missing_in_df_dem], [f"{pop_total_missing_in_df_dem / pop_total * 100:.2f}%", f"{gdp_total_missing_in_df_dem / gdp_total * 100:.2f}%"]], index=["All countries", "Countries without democracy index data", "Percentage for countries without democracy index data "], columns=["Total Population", "Total GDP"])The countries with missing democracy data representaround 0.4% of world populationand around 0.1% of world GDP.Hence, leaving them out would not significantly impact the outcome of our analysis. Join all data setsJoin the GDP per capita in the `df_gdp` data framewith the `df_pop` data frame containing population values,the `df_fuel` data frame containing fuel export values,and the `df_dem` data frame containing democracy index values.We will use inner join to consider only data on countries,for which every data frame has values available.Only for fuel exports, we will use left join and fill missing values with 0.df = df_gdp.join(df_pop[ColPop.population], how="inner")\ .join(df_dem[[ColDem.democracy_index, ColDem.regime_type]], how="inner")\ .join(df_fuel[ColFuel.fuel_exports], how="left")\ df[ColFuel.fuel_exports].fillna(0, inplace=True) df.head() df.info() Index: 166 entries, afg to zwe Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 166 non-null object 1 Year 166 non-null int64 2 GDP per capita (PPP dollars) 166 non-null int64 3 GDP per capita 166 non-null float64 4 Population 166 non-null float64 5 Democracy index 166 non-null float64 6 Regime type 166 non-null category 7 Fuel exports (% of merchandise exports) 166 non-null float64 dtypes: category(1), float64(4), int64(2), object(1) memory usage: 15.6+ KBWe now have a data frame of 166 countriescontaining data onGDP per capita (in PPP dollars and in current USD),population,democracy level,and fuel exportsfor the year 2018. Derive additional columnsWe have already added the `regime_type` columns derived from democracy index.We now add columns with total GDP per country, calculated from GDP per capita and population size.df[ColGdp.gdp_total] = df[ColGdp.gdp_per_capita] * df[ColPop.population] df[ColGdp.gdp_total_ppp] = df[ColGdp.gdp_per_capita_ppp] * df[ColPop.population] df.head()Save data frame as CSV for further usageFinally, we save the cleaned data set into a CSV file.import os if not os.path.exists(output_folder): os.mkdir(output_folder) df.to_csv(f"{output_folder}/democracy_and_gdp.csv")!git clone https://github.com/superspray/study.git import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.preprocessing import LabelEncoder import lightgbm as lgb from tqdm import tqdm from sklearn.ensemble.partial_dependence import partial_dependence, plot_partial_dependence !pip install pdpbox from pdpbox import pdp, get_dataset, info_plots from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score, precision_score, recall_score from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.svm import LinearSVC,SVC from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.feature_selection import SelectKBest, chi2, f_classif, mutual_info_classif train_data=pd.read_csv('/content/study/train2.csv')#, encoding='latin_1')#, engine='python', error_bad_lines=False) #train_data = pd.read_csv('/content/Corona_NLP_train.csv', encoding='latin_1') test_data = pd.read_csv('/content/study/Corona_NLP_test.csv') train_data.columns=test_data.columns print('train data shape is :', train_data.shape) print('test data shape is :', test_data.shape) train_data.tail() #train_data.drop_duplicates(inplace= True) train_data=train_data.drop_duplicates() test_data.drop_duplicates(inplace=True) train_data=train_data[train_data.Sentiment.notnull()] #train_data = train_data[train_data['Sentiment'].notna()] #train_data = train_data[train_data['Sentiment'].notna()] print('train data shape is :', train_data.shape) print('test data shape is :', test_data.shape) train_data.tail() train_data.head() import nltk nltk.download('stopwords') import re from nltk.corpus import stopwords stopWords = stopwords.words('english') def clean(text): # remove urls text = re.sub(r'http\S+', " ", str(text)) text = re.sub(r"i'm", "i am", text) text = re.sub(r"you'll", "you will", text) text = re.sub(r"i'll", "i will", text) text = re.sub(r"she'll", "she will", text) text = re.sub(r"he'll", "he will", text) text = re.sub(r"he's", "he is", text) text = re.sub(r"she's", "she is", text) text = re.sub(r"that's", "that is", text) text = re.sub(r"what's", "what is", text) text = re.sub(r"where's", "where is", text) text = re.sub(r"there's", "there is", text) text = re.sub(r"here's", "here is", text) text = re.sub(r"who's", "who is", text) text = re.sub(r"how's", "how is", text) text = re.sub(r"\'ll", " will", text) text = re.sub(r"\'ve", " have", text) text = re.sub(r"\'re", " are", text) text = re.sub(r"\'d", " would", text) text = re.sub(r"can't", "cannot", text) text = re.sub(r"won't", "will not", text) text = re.sub(r"don't", "do not", text) text = re.sub(r"shouldn't", "should not", text) text = re.sub(r"n't", " not", text) # remove mentions text = re.sub(r'@\w+',' ',text) # remove hastags text = re.sub(r'#\w+', ' ', text) # remove digits text = re.sub(r'\d+', ' ', text) # remove html tags text = re.sub('r<.*?>',' ', text) text = re.sub(r'[^(A-Za-z)]',r' ',text) text = re.sub('[\d]',r'',text) text = re.sub('[()]',r'',text) text = re.sub(r'(<.*?>)',r'',text) text = re.sub(r' ',' ',text) # remove stop words text = text.split() text = " ".join([word.lower() for word in text if not word in stopWords]) text = re.sub(r' ',' ',text) text= re.sub(r"\s+"," ",text).strip() return text train_df=train_data.copy().dropna(subset=['OriginalTweet']) test_df=test_data.copy() train_df['OriginalTweet'] = train_df['OriginalTweet'].apply(lambda x: clean(x)) test_df['OriginalTweet'] = test_df['OriginalTweet'].apply(lambda x: clean(x)) train_df= train_df[train_df['OriginalTweet'].notna()] train_df= train_df[train_df['OriginalTweet']!=''] train_df= train_df[train_df['OriginalTweet']!=' '] train_df= train_df[train_df['OriginalTweet']!=' '] test_df= test_df[test_df['OriginalTweet']!=''] test_df= test_df[test_df['OriginalTweet']!=''] print('train data shape is :', train_df.shape) print('test df shape is :', test_df.shape) train_df.head(10) df_train = train_df.iloc[:,4:].reset_index(drop=True) df_test = test_df.iloc[:,4:].reset_index(drop=True) l = {"Neutral":0, "Positive":1,"Extremely Positive":1,"Negative":-1, "Extremely Negative":-1 } df_train=df_train.replace({"Sentiment": l}) df_test=df_test.replace({"Sentiment": l}) df_train.head() y_train = df_train['Sentiment'].copy() y_test = df_test['Sentiment'].copy() X_text_train = df_train['OriginalTweet'].copy() X_text_test = df_test['OriginalTweet'].copy() X_test=X_text_test X_train=X_text_train X_train.shape, y_train.shape,X_test.shape, y_test.shape from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression import datetime ### K-fold cross-validation using pipeline ### from sklearn.model_selection import cross_val_score import time from sklearn.base import TransformerMixin from sklearn.feature_selection import SelectKBest, chi2 !pip install xgboost #%%time from xgboost import XGBClassifier ngram=(1,1) ! pip install catboost from catboost import CatBoostClassifier,Pool # 시간 표시 함수 def format_time(elapsed): # 반올림 elapsed_rounded = int(round((elapsed))) # hh:mm:ss으로 형태 변경 return str(datetime.timedelta(seconds=elapsed_rounded)) class DenseTransformer(TransformerMixin): def fit(self, X, y=None, **fit_params): return self def transform(self, X, y=None, **fit_params): return X.toarray() y_train.shape from sklearn.model_selection import StratifiedKFold ngram=(1,1) preproc = Pipeline([('tfidf',TfidfVectorizer(ngram_range = ngram,min_df = 5, max_df=0.6,lowercase = True)),('to_dense', DenseTransformer()), ('selec',SelectKBest(k=1000,score_func=chi2))]) X_train1=preproc.fit_transform(X_train,y_train) X_test1=preproc.transform(X_test) train_dataset=Pool(data=X_train1, label=y_train) eval_dataset=Pool(data=X_test1, label=y_test) param_grid={ 'max_depth':[6,8], 'iterations':[500,1000], 'random_strength':[1,10], 'max_bin':[16,32], } clf=CatBoostClassifier(random_seed=0, logging_level='Silent', l2_leaf_reg=None, iterations= 1000, max_bin=16, max_depth= 8, random_strength= 1) X_df=pd.DataFrame(X_train1, columns=select_k_voca) clf.fit(X_df,y_train) #print(clf.score(X_train1,y_train)) print('Model Training Accuracy w/o CV: %.3f' % clf.score(train_dataset)) print('Model Test Accuracy w/o CV: %.3f' % clf.score(eval_dataset)) X_train.shape tfidf = TfidfVectorizer(ngram_range = (1,1),min_df = 5, max_df=0.6,lowercase = True) X_train_tfidf=tfidf.fit_transform(X_train) tfidf_dict=tfidf.get_feature_names() tfidf_dict[:10] tfidf_voca=tfidf.vocabulary_ tfidf_voca= dict((v,k) for k,v in tfidf_voca.items()) tfidf_voca selector = SelectKBest(k=1000,score_func=chi2) X_selec=selector.fit_transform(X_train_tfidf.toarray(), y_train) # Get columns to keep and create new dataframe with those only cols = selector.get_support(indices=True) select_k_voca=[tfidf_voca[k]for k in cols] print(select_k_voca[:10], select_k_voca[-10:]) important_features_dict = {} for x,i in enumerate(clf.feature_importances_): name=select_k_voca[x] important_features_dict[name]=i data=pd.DataFrame(important_features_dict.items(), columns=['feature_names', 'feature_importance']) data.sort_values(by=['feature_importance'], ascending=False,inplace=True) data.tail(50) #doctors def plot_feature_importance(data,model_type): import matplotlib.pyplot as plt import seaborn as sns #Create arrays from feature importance and feature names fi_df=data #Sort the DataFrame in order decreasing feature importance fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True) #Define size of bar plot plt.figure(figsize=(10,8)) #Plot Searborn bar chart sns.barplot(x='feature_importance', y='feature_names', data=fi_df[:15])#, order=fi_df[:20]['feature_importance']) # for p in splot.patches: # splot.annotate(format(p.get_width(), '.1f'), # ( p.get_width(), p.get_height()+p.get_x()), # ha = 'center', va = 'center', # xytext = (0, 9), # textcoords = 'offset points') #Add chart labels plt.title(model_type + 'FEATURE IMPORTANCE') plt.xlabel('FEATURE IMPORTANCE') plt.ylabel('FEATURE NAMES') return fi_df[:20] plot_feature_importance(data,"CatBoost ") ngram=(1,1) preproc = Pipeline([('tfidf',TfidfVectorizer(ngram_range = ngram,min_df = 5, max_df=0.6,lowercase = True)),('to_dense', DenseTransformer()), ('selec',SelectKBest(k=1000,score_func=chi2))]) X_train1=preproc.fit_transform(X_train,y_train) X_test1=preproc.transform(X_test) X_df=pd.DataFrame(X_train1, columns=select_k_voca) X_df.head() lgb_clf=lgb.LGBMClassifier(random_state=0,silent=True, verbose_eval=50, max_depth= 50, min_data_in_leaf= 10, n_estimators= 500, num_leaves= 30, reg_alpha= 0.01) lgb_clf.fit(X_df, y_train) #print(clf.score(X_train1,y_train)) print('Model Training Accuracy w/o CV: %.3f' % lgb_clf.score(X_train1, y_train)) print('Model Test Accuracy w/o CV: %.3f' % lgb_clf.score(X_test1, y_test)) important_features_dict = {} for x,i in enumerate(lgb_clf.feature_importances_): name=select_k_voca[x] important_features_dict[name]=i data=pd.DataFrame(important_features_dict.items(), columns=['feature_names', 'feature_importance']) data.sort_values(by=['feature_importance'], ascending=False,inplace=True) data.tail() plot_feature_importance(data,"LightGBM ") background=[i for i,x in enumerate(X_train1) if x[620]!=0] y_back=y_train[background] #2436개 X_back=X_train[background] #2436개 y_df=pd.DataFrame(y_back) y_df=y_df.replace({'Sentiment': {-1: 'Negative', 0:'Neutral',1: 'Positive'}}) import matplotlib.pyplot as plt import seaborn as sns sns.countplot(y_df.Sentiment) plt.title("Count plot of Sentiments with 'panic'") plt.xlabel('Sentiment') plt.ylabel('Count') # print(X_back[6]) print(X_back[7]) print(X_back[29]) background=[i for i,x in enumerate(X_train1) if x[180]!=0] y_back=y_train[background] #2436개 X_back=X_train[background] #2436개 y_df=pd.DataFrame(y_back) y_df=y_df.replace({'Sentiment': {-1: 'Negative', 0:'Neutral',1: 'Positive'}}) import matplotlib.pyplot as plt import seaborn as sns sns.countplot(y_df.Sentiment) plt.title("Count plot of Sentiments with 'covid'") plt.xlabel('Sentiment') plt.ylabel('Count') X_back print(X_back[0]) print(X_back[5]) print(X_back[8]) background=[i for i,x in enumerate(X_train1) if x[192]!=0] y_back=y_train[background] #2436개 X_back=X_train[background] #2436개 y_df=pd.DataFrame(y_back) y_df=y_df.replace({'Sentiment': {-1: 'Negative', 0:'Neutral',1: 'Positive'}}) import matplotlib.pyplot as plt import seaborn as sns sns.countplot(y_df.Sentiment) plt.title("Count plot of Sentiments with 'crisis") plt.xlabel('Sentiment') plt.ylabel('Count') X_back print(X_back[3]) print(X_back[31]) print(X_back[82]) background=[i for i,x in enumerate(X_train1) if x[412]!=0] y_back=y_train[background] #2436개 X_back=X_train[background] #2436개 y_df=pd.DataFrame(y_back) y_df=y_df.replace({'Sentiment': {-1: 'Negative', 0:'Neutral',1: 'Positive'}}) import matplotlib.pyplot as plt import seaborn as sns sns.countplot(y_df.Sentiment) plt.title("Count plot of Sentiments with 'hand'") plt.xlabel('Sentiment') plt.ylabel('Count') X_back print(X_back[2]) print(X_back[91]) print(X_back[153]) print(X_back[160]) print(X_back[41037]) background=[i for i,x in enumerate(X_train1) if x[528]!=0] y_back=y_train[background] #2436개 X_back=X_train[background] #2436개 y_df=pd.DataFrame(y_back) y_df=y_df.replace({'Sentiment': {-1: 'Negative', 0:'Neutral',1: 'Positive'}}) import matplotlib.pyplot as plt import seaborn as sns sns.countplot(y_df.Sentiment) plt.title("Count plot of Sentiments with 'like'") plt.xlabel('Sentiment') plt.ylabel('Count') X_back print(X_back[41]) print(X_back[65]) print(X_back[72]) #########PDP@##################### #class 0:negative/ 1: neutral pdp_dist pdp_dist = pdp.pdp_isolate(model=lgb_clf, dataset=X_df , model_features=select_k_voca , feature='panic') pdp.pdp_plot(pdp_dist, 'panic', ncols=3); pdp_dist = pdp.pdp_isolate(model=lgb_clf, dataset=X_df , model_features=select_k_voca , feature='covid') pdp.pdp_plot(pdp_dist, 'covid', ncols=3); pdp_dist = pdp.pdp_isolate(model=lgb_clf, dataset=X_df , model_features=select_k_voca , feature='crisis') pdp.pdp_plot(pdp_dist, 'crisis', ncols=3); #CAT pdp_dist = pdp.pdp_isolate(model=clf, dataset=X_df , model_features=select_k_voca , feature='crisis') pdp.pdp_plot(pdp_dist, 'crisis', ncols=3); pdp_dist = pdp.pdp_isolate(model=clf, dataset=X_df , model_features=select_k_voca , feature='panic') pdp.pdp_plot(pdp_dist, 'panic', ncols=3); pdp_dist = pdp.pdp_isolate(model=clf, dataset=X_df , model_features=select_k_voca , feature='hand') pdp.pdp_plot(pdp_dist, 'hand', ncols=3);Name(s)**** **Instructions:** This is an individual assignment. Complete the following code and push to get your score. I am providing the autograder answers locally so you may test your code before pushing. I will be reviewing your submissions, and if I find you are circumventing the autograder in any manner, you will receive a 0 on this assignment and your case will be reported to the honor board for review. i.e., approach the assignment in a genuine manner and you have nothing to worry about. **Question 1.**When will new material be available each week? You can answer the question by defining an anonymous function. This creates a function that I can test using pytest. You don't have to worry about the details. You just need to answer the question by changing the string argument that is currently set to "D". I know this is a bit weird, but I want you to get used to submitting code as early as possible.# Nothing to modify in this cell def question_1(answer): answers = { "A": "Monday morning", "B": "Sunday night", "C": "Monday evening", "D": "I don't know" } try: return answers[answer] except: return "Not a valid answer" # YOUR SOLUTION HERE # Sample incorrect answer answer_question_1 = lambda: question_1("C")**Question 2.**Do I need to buy the textbook?# Nothing to modify in this cell def question_2(answer): answers = { "A": "No", "B": "Maybe", "C": "Yes. You will struggle with some of the chapters without the textbook", } try: return answers[answer] except: return "Not a valid answer" # YOUR SOLUTION HERE # Sample incorrect answer answer_question_2 = lambda: question_2("C")**Question 3.**Are these any required times that I be online?# Nothing to modify in this cell def question_3(answer): answers = { "A": "Yes", "B": "No" } try: return answers[answer] except: return "Not a valid answer" # YOUR SOLUTION HERE # Sample incorrect answer answer_question_3 = lambda: question_3("A")**Question 4.**What software will I use to complete the assignments?# Nothing to modify in this cell def question_4(answer): answers = { "A": "Java", "B": "Netbeans", "C": "Anaconda" } try: return answers[answer] except: return "Not a valid answer" # YOUR SOLUTION HERE # Sample incorrect answer answer_question_4 = lambda: question_4("C")**Question 5.**Do I need to participate in this class or can I just do the labs and assignments?# Nothing to modify in this cell def question_5(answer): answers = { "A": "Yes. If you want to get anything higher than a C, you'll need to do more than the labs and assignments", "B": "No", } try: return answers[answer] except: return "Not a valid answer" # YOUR SOLUTION HERE # Sample incorrect answer answer_question_5 = lambda: question_5("A") # Don't forget to push!Churn predictionBased on the blog https://medium.com/@pushkarmandot/build-your-first-deep-learning-neural-network-model-using-keras-in-python-a90b5864116dimport numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix import keras from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout np.random.seed(301) churn_data = pd.read_csv("Churn_Modelling.csv") churn_data.head() churn_data.tail() churn_data.shape X = churn_data.iloc[:,3:13].values # .values converts this sliced df into ndarray y = churn_data.iloc[:, 13].values X[:,1] = LabelEncoder().fit_transform(X[:, 1]) X[:, 2] = LabelEncoder().fit_transform(X[:, 2]) X.shape X onehotencoder = OneHotEncoder(categorical_features = [1]) X = onehotencoder.fit_transform(X).toarray() X = X[:, 1:] X.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) X_val = sc.transform(X_val) X_train churner_nn = Sequential() dense_classes = [15, 10, 5] drops = [0.15, 0.1] churner_nn.add(Dense(dense_classes[0], activation = 'relu', input_dim = 11)) churner_nn.add(Dropout(drops[1])) churner_nn.add(Dense(dense_classes[1], activation = 'relu')) churner_nn.add(Dropout(drops[1])) churner_nn.add(Dense(dense_classes[1], activation = 'relu')) churner_nn.add(Dropout(drops[1])) churner_nn.add(Dense(dense_classes[2], activation = 'relu')) churner_nn.add(Dense(output_dim = 1, activation = 'sigmoid')) churner_nn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) churner_nn.fit(X_train, y_train, batch_size = 100, nb_epoch = 70) y_pred_val = churner_nn.predict(X_val) y_pred_val = (y_pred_val > 0.5) cm1 = confusion_matrix(y_val, y_pred_val) cm1 (cm1[0][0]+cm1[1][1])/cm1.sum() y_pred_test = churner_nn.predict(X_test) y_pred_test = (y_pred_test > 0.5) cm2 = confusion_matrix(y_test, y_pred_test) (cm2[0][0]+cm2[1][1])/cm2.sum() cm2 cm1 score1 = churner_nn.evaluate(X_val, y_val) score1 score2 = churner_nn.evaluate(X_test, y_test) score2Special cases Geant4 analysis tools outputfrom physt.compat.geant4 import load_csv %matplotlib inline h = load_csv("../tests/data/geant-h1.csv") h h.plot("line", show_stats=True); h2 = load_csv("../tests/data/geant-h2.csv") h h2.plot(show_zero=False)Line Chartx_sc = LinearScale() y_sc = LinearScale() ax_x = Axis(label='Test X', scale=x_sc, grid_lines='solid') ax_y = Axis(label='Test Y', scale=y_sc, orientation='vertical', grid_lines='solid') line = Lines(x=y_data_3, y=y_data_3, scales={'x': x_sc, 'y': y_sc}) fig = Figure(axes=[ax_x, ax_y], marks=[line]) display(fig)Line Chart with Date xdates = np.arange('2005-02', '2005-03', dtype='datetime64[D]') size = len(dates) prices = 100 + 5 * np.cumsum(np.random.randn(size)) dt_x = DateScale() lin_y = LinearScale() x_ax = Axis(label='Date', scale=dt_x, tick_format='%b-%Y', grid_lines='solid') x_ay = Axis(label=('Price'), scale=lin_y, orientation='vertical', tick_format='0.2f', grid_lines='solid') lc = Lines(x=dates, y=prices, scales={'x': dt_x, 'y': lin_y}, colors=['DodgerBlue']) fig = Figure(marks=[lc], axes=[x_ax, x_ay], fig_color='Green') display(fig) fig.fig_color = 'Black'Scatter Chartsc_x = LinearScale() sc_y = LinearScale() scatter = Scatter(x=x_data, y=y_data, scales={'x': sc_x, 'y': sc_y}) ax_x = Axis(label='Test X', scale=sc_x) ax_y = Axis(label='Test Y', scale=sc_y, orientation='vertical', tick_format='0.2f') fig = Figure(axes=[ax_x, ax_y], marks=[scatter]) display(fig)Histogramscale_x = LinearScale() scale_y = LinearScale() hist = Hist(sample=y_data, scales={'sample': scale_x, 'count': scale_y}) ax_x = Axis(label='Test X', scale=scale_x, tick_format='0.2f') ax_y = Axis(label='Test Y', scale=scale_y, orientation='vertical', grid_lines='solid') fig = Figure(axes=[ax_x, ax_y], marks=[hist]) display(fig)Bar Chartsc_x1 = OrdinalScale() sc_y1 = LinearScale() bar_x = Axis(label='Test X', scale=sc_x1) bar_y = Axis(label='Test Y', scale=sc_y1, orientation='vertical', tick_format='0.2f', grid_lines='solid') bar_chart = Bars(x=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U'], y=np.abs(y_data[:20]), scales={'x': sc_x1, 'y': sc_y1}) fig = Figure(axes=[bar_x, bar_y], marks=[bar_chart], padding_x=0) display(fig)Work for model step versioning, May 2018 | Python 3.6This notebook was created during development of the model step versioning feature in UrbanSim Templates. It contains working code and tests.---- Feature description:Stored model step instances should include both the model manager codebase version (at the file level) and the template codebase version (at the model step level). The rationale for this is that eventually we'll need/want to make changes that introduce incompatibilities with older versions of saved model steps. Tracking version numbers will help us deal with (convert, warn, etc) old saved model steps.import os; os.chdir('../') from urbansim_templates import modelmanager as mm from urbansim_templates.models import OLSRegressionStep import orca mm.list_steps() step = mm.get_step('ols-test') step.version step.name = 'ols-test-2' mm.add_step(step.to_dict()) mm.remove_step('ols-test-2') step.register() mm.list_steps() orca.list_steps() step = mm.get_step('ols-test-2')Part I: The Babble Labble Pipeline The purpose of this notebook is to introduce the basic pipeline of a Babble Labble application. Our task is to classify candidate mentions of spouses from news articles. That is, given a sentence with two identified entities (people), we want to classify whether or not the two people were/are/will soon be married (according to the text). A classifier trained on this task could be used, for example, to populate a knowledge base.This notebook consists of five steps:1. Load candidates2. Collect explanations3. Parse and filter4. Aggregate labels5. Train classifierLet's get started! Step 1: Load Candidates%load_ext autoreload %autoreload 2The autoreload extension is already loaded. To reload it, use: %reload_ext autoreloadFirst, we load the candidates, and target labels.import pickle DATA_FILE = 'data/tutorial_data.pkl' with open(DATA_FILE, 'rb') as f: Cs, Ys = pickle.load(f)Our data is now divided into three splits (80/10/10), which we'll refer to as the training, dev(evelopment), and test splits. In these tutorials, we will do the bulk of our analysis on the dev split to protect the integrity of the held-out test set.The variables `Cs` and `Ys` are each lists of length 3, corresponding to the three splits; each `C` is a list of candidates, and each `Y` is a numpy arrays of gold labels. Our labels are categorical (1=True, 2=False).print(f"Train Size: {len(Cs[0])}") print(f"Dev Size: {len(Cs[1])}") print(f"Test Size: {len(Cs[2])}")Train Size: 8000 Dev Size: 1000 Test Size: 1000Each candidate consists of two spans from the same sentence (which we refer to as X and Y in explanations). These spans correspond to tokens identified as people using a standard NER tagger. Our first candidate from the train split does appear to be an actual pair of spouses, so it should classified as True by our classifier.candidate = Cs[0][0] print(f"Sentence:\n{candidate.text}") print(f"Candidate:\nX: {candidate[0]}\nY: {candidate[1]}")Sentence: His mother Joanna, 36, who lives with husband Ian in a detached house, declined to comment when approached yesterday. Candidate: X: EntityMention(doc_id=14945: 'Joanna'(11:17) Y: EntityMention(doc_id=14945: 'Ian'(46:49)Step 2: Collect Explanations We now collect a small number of **natural language explanations** for why candidates should be labeled in a certain way. In Part II of this tutorial, you can look at examples from the dataset and write your own explanations. In this first notebook, we load 10 sample explanations as an example.To improve the coverage of explanations, users may provide aliases, sets of words that can be referred to with a single term. For example, you may define "spouse" words as "husband, wife, spouse, bride, groom" and then refer to these terms collectively in an explanation like "There is at least one spouse word between person1 and person2." We store these user-provided aliases in a dictionary. Load existing explanationsfrom data.sample_explanations import explanations, aliasesHere are the first five explantions in our set:for exp in explanations[:5]: print(exp)Explanation(LF_and_married: 1, "the word 'and' is between X and Y and 'married' within five words of Y") Explanation(LF_third_wheel: 2, "there is a person between X and Y") Explanation(LF_married_two_people: 1, "the word 'married' is in the sentence and there are only two people in the sentence") Explanation(LF_same_person: 2, "X and Y are identical") Explanation(LF_husband_wife: 1, "there is at least one spouse word between X and Y")Step 3: Parse Explanations & Apply Filter Bank The conversion from Explanations into Labeling Functions (LFs) is performed by an instance of the `Babbler` class. This class includes a semantic parser and filter bank chained together. The semantic parser creates (possibly multiple) candidate LFs for each Explanation, and the filter bank removes as many of these as it can. (See the paper for a description of the different filters).from babble import Babbler babbler = Babbler(Cs, Ys, aliases=aliases)Grammar construction complete.In this case, we see that our 10 explanations become 37 parses (labeling functions) that are then filtered back down to 10:babbler.apply(explanations, split=0)Building list of target candidate ids... Collected 10 unique target candidate ids from 10 explanations. Gathering desired candidates... Found 10/10 desired candidates Linking explanations to candidates... Linked 10/10 explanations 10 explanation(s) out of 10 were parseable. 32 parse(s) generated from 10 explanation(s). 17 parse(s) remain (15 parse(s) removed by DuplicateSemanticsFilter). 14 parse(s) remain (3 parse(s) removed by ConsistencyFilter). Applying labeling functions to investigate labeling signature. [========================================] 100% 14 parse(s) remain (0 parse(s) removed by UniformSignatureFilter: (0 None, 0 All)). 11 parse(s) remain (3 parse(s) removed by DuplicateSignatureFilter). 10 parse(s) remain (1 parse(s) removed by LowestCoverageFilter). Added 10 parse(s) from 10 explanations to set. (Total # parses = 10) Applying labeling functions to split 1 [========================================] 100% Added 986 labels to split 1: L.nnz = 986, L.shape = (1000, [...]Apply LFs Now that we have our final (filtered) set of LFs, we can label all three splits of our data to get our label matrices, which we'll store in a list called `Ls`, similar to our Cs and Ys lists.Ls = [] for split in [0,1,2]: L = babbler.get_label_matrix(split) Ls.append(L)Retrieved label matrix for split 0: L.nnz = 7838, L.shape = (8000, 10) Retrieved label matrix for split 1: L.nnz = 986, L.shape = (1000, 10) Retrieved label matrix for split 2: L.nnz = 980, L.shape = (1000, 10)Each label matrix is an \[n x m\] sparse matrix where L\[i,j\] = the label given by labeling function j to candidate i. Most of the entries in L are 0 (representing an abstention), since most labeling functions apply to only a small portion of the candidates.Ls[0]Step 4: Aggregate Labels We now aggregate the noisy labels in L into one label per example. We do this with the `LabelModel` class from [Snorkel MeTaL](https://github.com/HazyResearch/metal), which implements a new matrix approximation approach to data programming with significantly improved speed and scaling properties.To run the label model with a single setting, we can do the following:from metal import LabelModel label_aggregator = LabelModel() label_aggregator.train(Ls[0], n_epochs=50, lr=0.01) label_aggregator.score(Ls[1], Ys[1])Computing O... Estimating \mu... [E:0] Train Loss: 4.957 [E:10] Train Loss: 0.101 [E:20] Train Loss: 0.267 [E:30] Train Loss: 0.127 [E:40] Train Loss: 0.023 [E:49] Train Loss: 0.032 Finished Training Accuracy: 0.287Or we can perform a random search to identify the hyperparameters that result in the best F1 score on the dev set.from metal.tuners import RandomSearchTuner search_space = { 'n_epochs': [50, 100, 500], 'lr': {'range': [0.01, 0.001], 'scale': 'log'}, 'show_plots': False, } tuner = RandomSearchTuner(LabelModel, seed=123) label_aggregator = tuner.search( search_space, train_args=[Ls[0]], X_dev=Ls[1], Y_dev=Ys[1], max_search=20, verbose=False, metric='f1')============================================================ [SUMMARY] Best model: [5] Best config: {'n_epochs': 100, 'show_plots': False, 'lr': 0.0037849826648026384, 'seed': 127} Best score: 0.6968838526912181 ============================================================Notice that our labeling functions have limited coverage. In fact, over 40% of our candidates do not have a single label.from metal.analysis import label_coverage print(f"Fraction of dev data with at least one label: {label_coverage(Ls[1])}")Fraction of dev data with at least one label: 0.58Instead, we'll use our label aggregator to generate approximate labels for our training set, which will then be used to train a discriminative classifier. In a typical data programming pipeline, we would generate probabilistic labels here. In this tutorial, we want to take advantage of scikit-learn's blazing fast LogisticRegression classifier, so we'll just use normal hard labels.Y_p = label_aggregator.predict(Ls[0])Step 5: Train Classifier There are a variety of reasons why we might find it advantageous to train a discriminative model rather than use the label aggregator directly. Some of these include:* **increased coverage**: As alluded to above, our labeling functions often do not provide labels for all examples in our training set. A trained discriminative model, however, can make informed predictions about any candidate that has features for which it has learned weights.* **improved generalization**: One of the long-standing success stories in weak supervision is distant supervision (e.g., using a database of known spouses to vote positive on those candidates). However, the goal of distant supervision is to _generalize_ beyond the known examples, not memorize them. Similarly, passing supervision information from the user to the model in the form of a dataset--rather than hard rules--facilitates such generalization.* **larger feature set**: The label model uses only those "features" described in labeling functions; by training a discriminative model, however, we open the door to using larger sets of features known to be helpful in our domain, or learning features appropriate for the problem via deep learning.* **faster execution**: For the label model to make a prediction on a new example, it must execute all of its labeling functions, some of which may be expensive (e.g., requiring database lookups). A trained discriminative model, however, requires only a single forward pass through the network, often making it faster to execute.* **servable features**: Sometimes, there are features that are convenient to supervise over, but hard to use in a servable model (e.g., statistics aggregated over time, features generated by heavy-weight third-party tools, etc.). Training a discriminative model on the label model's outputs allows us to transfer the supervision signal to a new serving environment.For additional discussion of this topic in the larger context of a shift toward "Software 2.0" systems, see our [technical report](https://ajratner.github.io/assets/papers/software_2_mmt_vision.pdf). In this tutorial, we use a very simple feature set with a simple logistic regression model for the sake of simplicity and fast runtimes. However, these can easily be swapped out for more advanced features and more sophisticated models. Generate Features Our feature set is simply a bag of ngrams (size 1-3) for the text between the two entities in a relation, plus a small amount of additional context on either side. The text is preprocessed by lowercasing, removing stopwords, and replacing entities with generic markers.from metal.contrib.featurizers.ngram_featurizer import RelationNgramFeaturizer featurizer = RelationNgramFeaturizer(min_df=3) featurizer.fit(Cs[0]) Xs = [featurizer.transform(C) for C in Cs][nltk_data] Downloading package stopwords to [nltk_data] /Users/bradenjh/nltk_data... [nltk_data] Package stopwords is already up-to-date!The resulting `X` objects (one per split) are sparse one-hot matrices.Xs[0]Train Model Once again, we perform random search over hyperparameters to select the best model.from babble.disc_model import LogisticRegressionWrapper from metal.metrics import metric_score search_space = { 'C': {'range': [0.0001, 1000], 'scale': 'log'}, 'penalty': ['l1', 'l2'], } tuner = RandomSearchTuner(LogisticRegressionWrapper, seed=123) disc_model = tuner.search( search_space, train_args=[Xs[0], Y_p], X_dev=Xs[1], Y_dev=Ys[1], max_search=20, verbose=False, metric='f1')============================================================ [SUMMARY] Best model: [19] Best config: {'penalty': 'l2', 'C': 25.184688168733086, 'seed': 141} Best score: 0.6931818181818181 ============================================================Evaluation In this case, even with a very simple model class and feature set, we see that the discriminative model performs on par with the label aggregator. In other words, the supervision signal provided via natural language explanations has been successfully transferred to a more transportable, generalizable discriminative model via an auto-generated labeled training set!pr, re, f1 = label_aggregator.score(Ls[1], Ys[1], metric=['precision', 'recall', 'f1']) pr, re, f1 = disc_model.score(Xs[1], Ys[1], metric=['precision', 'recall', 'f1'])Precision: 0.762 Recall: 0.635 F1: 0.693SavingBefore we move on to our next notebook, we'll save the `Ls` and training set predictions `Y_p` in pickles so we can use them in the other notebooks without having to repeat the parsing and labeling process.import pickle with open("Ls.pkl", 'wb') as f: pickle.dump(Ls, f) with open("Y_p.pkl", 'wb') as f: pickle.dump(Y_p, f)This notebook illustrates running the deAlmeida overland flow component in an extremely simple-minded way on a real topography, then shows it creating a flood sequence along an inclined surface with an oscillating water surface at one end.First, import what we'll need:import numpy as np from pymt.models import OverlandFlow➡ models: Avulsion, Plume, Sedflux3D, Subside, FrostNumber, Ku, ExponentialWeatherer, Flexure, FlowAccumulator, FlowDirectorD8, FlowDirectorDINF, FlowDirectorSteepest, FlowRouter, LinearDiffuser, OverlandFlow, SoilMoisture, StreamPowerEroder, TransportLengthHillslopeDiffuser, Vegetation, Hydrotrend, Child, Cem, WavesPick the initial and run conditionsrun_time = 60.0 * 60.0 # duration of run, (s) time_step = 60.0 h_init = 0.1 # initial thin layer of water (m)Get the [example DEM](https://github.com/landlab/tutorials/blob/release/overland_flow/Square_TestBasin.asc). This DEM is an ESRII ASCII file so we can use numpy to load it into a numpy array (note that there are 5 header lines). The following command will download it for you.import urllib.request path_to_dem, _ = urllib.request.urlretrieve( "https://raw.githubusercontent.com/landlab/tutorials/release/overland_flow/Square_TestBasin.asc" ) z = np.loadtxt(path_to_dem, skiprows=5) n_rows, n_cols = z.shape spacing = 30.0 # The spacing of the DEM is 30 m overland_flow = OverlandFlow() config_file, config_dir = overland_flow.setup( grid_row_spacing=spacing, grid_column_spacing=spacing, grid_rows=n_rows, grid_columns=n_cols, clock_start=0.0, clock_stop=run_time, clock_step=time_step, steep_slopes=True, # For stability in steeper environments, we set the steep_slopes flag to True ) overland_flow.initialize(config_file, config_dir) status_at_node = np.zeros_like(z) status_at_node[(0, -1), :] = 4 status_at_node[:, (0, -1)] = 4 my_outlet_node = 100 # This DEM was generated using Landlab and the outlet node ID was known status_at_node[0, my_outlet_node] = 1 # 1 is the code for fixed value overland_flow.set_value("boundary_condition_flag", status_at_node) overland_flow.set_value("topographic__elevation", z) overland_flow.set_value("surface_water__depth", h_init) overland_flow.quick_plot("topographic__elevation") for _ in range(10): overland_flow.update() overland_flow.quick_plot("surface_water__depth") water_depth = overland_flow.get_value("surface_water__depth").reshape(z.shape) water_depth[:50, :] = 10.0 * h_init overland_flow.set_value("surface_water__depth", water_depth) for _ in range(10): overland_flow.update() overland_flow.quick_plot("surface_water__depth") for _ in range(10): overland_flow.update() overland_flow.quick_plot("surface_water__depth") while overland_flow.time < overland_flow.end_time: overland_flow.update() overland_flow.quick_plot("surface_water__depth")Chapter 7=============================== Linear Regression with Normal equationimport numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sklearn.datasets import load_boston boston = load_boston() boston.keys() boston.feature_names df = pd.DataFrame(boston.data, columns=boston.feature_names) df.head() X = df.values y = boston.target from sklearn.linear_model import LinearRegression lr_ne = LinearRegression(fit_intercept=True) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) lr_ne.fit(X_train, y_train) y_hat = lr_ne.predict(X_test) y_true = y_test rmse = np.sqrt((((y_hat - y_true) ** 2).sum() / len(y_true))) rmse import sklearn mse = sklearn.metrics.mean_squared_error(y_hat, y_true) mse plt.scatter(y_true, y_hat, s=10) plt.xlabel("Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") lr_ne.coef_ boston.feature_namesLinear Regression with SGDfrom sklearn.linear_model import SGDRegressor lr_SGD = SGDRegressor() from sklearn.preprocessing import StandardScaler std_scaler = StandardScaler() std_scaler.fit(X) X_scaled = std_scaler.transform(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)- 단점 : 설정해줄 hyper parameter가 많다.lr_SGD.fit(X_train, y_train) y_hat = lr_SGD.predict(X_test) y_true = y_test- rmse와 mse 값이 커졌다.학습이 잘 되지 않았다.scaling하지 않은 데이터 사용이 원인mse = sklearn.metrics.mean_squared_error(y_hat, y_true) rmse = np.sqrt((((y_hat - y_true) ** 2).sum() / len(y_true))) rmse, mse plt.scatter(y_true, y_hat, s=10) plt.xlabel("Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$")- 반드시 데이터 scaling 후 사용from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.33, random_state=42) lr_SGD.fit(X_train, y_train) y_hat = lr_SGD.predict(X_test) y_true = y_test mse = sklearn.metrics.mean_squared_error(y_hat, y_true) rmse = np.sqrt((((y_hat - y_true) ** 2).sum() / len(y_true))) rmse, mse plt.scatter(y_true, y_hat, s=10) plt.xlabel("Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") from sklearn.linear_model import Lasso, Ridge X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) ridge = Ridge(fit_intercept=True, alpha=0.5) ridge.fit(X_train,y_train) y_hat = ridge.predict(X_test) y_true = y_test mse = sklearn.metrics.mean_squared_error(y_hat, y_true) rmse = np.sqrt((((y_hat - y_true) ** 2).sum() / len(y_true))) rmse, mse plt.scatter(y_true, y_hat, s=10) plt.xlabel("Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$")Regularization and Shrinkage: Why do They Matter?🔗 Materials: https://github.com/Imperial-College-Data-Science-Society/workshops/tree/master/notebooks/workshop-7💻 Content covered:- Motivation- From linear regression to Ridge- Connections with Principle Component Analysis- Lasso- Example👾 This lecture will be held online on Microsoft Teams.🔴 The event will be recorded and will be publicly available.🎉 Attendance is FREE for members! Whether you are a student at Imperial College or not, sign up to be a member at https://www.icdss.club/joinus⭐️ We encourage participants of this workshop to have looked at our previous sessions on YouTube. 📖 A schedule of our lecture series is currently available Motivation and SetupGiven covariates $x_i = (x_{i1}, x_{i2}, \ldots, x_{id})^\intercal \in \mathbb{R}^d$ and response variables $y_i$, $i = 1, \ldots, n$, so that $$y_i = f(x_i) + \epsilon_i,$$where $\epsilon_i \sim \mathcal{N}(0, \sigma^2)$ is a Gaussian noise. For convenience, we write everything in matrix form: for $\epsilon \sim \mathcal{N}(0, \sigma^2 I_n)$,$$Y = f(X) + \epsilon,$$where $Y = (y1, \ldots, y_n)^\intercal$, and $X \in \mathbb{R}^{n \times d}$ whose $i$-th row is $x_i$. E.g. $f(X) = \alpha_0 \mathbf{1} + X \beta_0$, $Y = X \beta_0 + \epsilon$。 In this case, $(\alpha_0, \beta_0) \in \mathbb{R}^{d + 1}$ are the true parameters. Want to learn $f$. Linear regression Assume $Y = \alpha_0 \mathbf{1} + x\beta_0$. Least-square (LS) solution: Find the optimal $(\hat{\alpha}, \hat{\beta}) \in \mathbb{R}^{d + 1}$ by minimizing the least-square error:$$(\hat{\alpha}, \hat{\beta}) = \mathrm{argmin}_{(\alpha, \beta) \in \mathbb{R}^{d + 1}} \| Y - \alpha \mathbf{1} - X\beta \|_2^2. $$ Let $\tilde{X} = [\mathbf{1} \ X]$ be the design matrix with an intercept. Simple linear algebra gives the LS solution:$$(\hat{\alpha}, \hat{\beta}^\intercal)^\intercal = (\tilde{X}^T \tilde{X})^{-1} \tilde{X}^\intercal Y.$$ Caveat: For the LS solution to be well-defined, the $d \times d$ matrix $\tilde{X}^T \tilde{X}$ needs to be invertible, or equivalently, $\tilde{X}$ need to have full-rank ($n \geq d$). Problem: What about when $d > > n$? E.g. 1. In cancer prediction problems, it is common to have thousands of gene expressions as your covariates, but only a few hundreds of patients' record. E.g. 2. In a customer behaviour analysis where you are given whether a customer has purchased a product from Amazon as the response, and some features (type of product, price, time of visit etc.) as covariates. You don't really want to *predict* buy/not buy, but to understand which features, amongst a handful of them, are most correlated to the purchase behaviour. SummaryLS is good, but does not give us an answer when:1. The problem is high-dimensional and we have more features than cases.2. We are interested in selecting the features that are most "important". Ridge Regression Ridge solution: Find the optimal $(\hat{\alpha}, \hat{\beta}) \in \mathbb{R}^{d + 1}$ by minimizing the least-square error **with $L_2$-penalty**:$$(\hat{\alpha}, \hat{\beta}) = \mathrm{argmin}_{(\alpha, \beta) \in \mathbb{R}^{d + 1}} \| Y - \alpha \mathbf{1} - X\beta \|_2^2 \color{red}{ + \lambda \| \beta \|_2^2 },$$where $\| \beta \|_2^2 = \sum_{j = 1}^d \beta_j^2$ is called the penalty/regularization term, and $\lambda > 0$ is a hyperparameter we need to choose (often by cross-validation). Why adding in a regularization helps?Under some conditions (columns of $X$ are standardized to have zero mean and unit variance), the Ridge solution can be derived analytically:\begin{align*}\hat{\alpha} &= \frac{1}{n} \sum_{i = 1}^{n} Y_i = 0, \\\hat{\beta} &= (X^\intercal X + \lambda I_n)^{-1} X^{\intercal}Y,\end{align*} **Key observations**: Comparing with LS solution $\hat{\beta} = (X^\intercal X)^{-1} X^{\intercal}Y$,- $(X^\intercal X + \lambda I_n)$ is always invertible for **any** $X$, as long as $\lambda > 0$. So the Ridge solution is always well-defined.- Adding the penalty term **shrinks** the fitted coefficients in $\hat{\beta}$ towards zero (more on this later).- $\hat{\beta}$ is now biased, but always has a smaller variance for a judicious choice of $\lambda$ (bias-variance trade-off):Mean-Square Error = Variance + $\textrm{Bias}^2$.- We normally do **not** penalize the intercept term! Choosing $\lambda$ The optimal $\lambda$ is often chosen by cross-validation:1. Split training data into various subsets, called **folds**.2. For each $\lambda$ over a pre-defined grid of values $\lambda_1, \ldots, \lambda_k$, calculate Ridge solution from all but one folds, compute out-of-sample error on the other fold, and repeat to get an averaged loss.3. Pick the $\lambda$ that gave the smallest averaged cross-validation loss. Connections with PCA Idea of Principal Value Analaysis (PCA): Find the directions along which the **features** $X$ have the largest variance (i.e. most informative), and only look at the first few of them. - The variance is quantified by the **eigenvalues** of the matrix $X^\intercal X$. - The directions are given by the **eigenvectors**, called principal components (PCs). **Principal component regression**: Use the first, say $s$, eigenvectors as the covariates, and perform least-square fit to find $\hat{\beta}$. Principal component regression: - 1: Perform PCA to create PCs as our new input features- 2: Use these PCs as input features to train our model for a least-square fit.- 3: Transform these PCs back to the original input features, in order to make predictions on the actual dataset. Adding the penalty term in Ridge regression effectively shrinks the Ridge solution $\hat{\beta}$ according to the **eigenvalues** of the matrix $X^\intercal X$. - Let $\hat{\beta}^{(LS)}$ = LS solution, $\hat{\beta}_s^{(PC)}$ = PC regression with $s$ PCs, $\hat{\beta}_\lambda^{(R)}$ = Ridge solution with regularization parameter $\lambda > 0$. - Let $D_1 \geq D_2 \geq \ldots \geq D_d$ be the eigenvalues of $X^\intercal X$ with corresponding eigenvectors $u_1, \ldots, u_d$. **Fact**: The fitted values can be rewritten as\begin{align*}X \hat{\beta}^{(LS)} &= \sum_{j = 1}^{d} (u_j^\intercal Y) u_j \\X \hat{\beta}_s^{(PC)} &= \sum_{j = 1}^s (u_j^\intercal Y) u_j \\X \hat{\beta}_\lambda^{(R)} &= \sum_{j = 1}^d \color{blue}{\frac{D_j^2}{D_j^2 + \lambda}} (u_j^\intercal Y) u_j \end{align*} **Key observations**: - Ridge shrinks the directions with the smallest eigenvalues the most.- $\lambda$ &8593;, shrinkage &8593; Lasso Regression Lasso solution: Find the optimal $(\hat{\alpha}, \hat{\beta}) \in \mathbb{R}^{d + 1}$ by minimizing the least-square error **with $L_1$-penalty**:$$(\hat{\alpha}, \hat{\beta}) = \mathrm{argmin}_{(\alpha, \beta) \in \mathbb{R}^{d + 1}} \frac{1}{2}\| Y - \alpha \mathbf{1} - X\beta \|_2^2 \color{red}{ + \lambda \| \beta \|_1 },$$where $\| \beta \|_1 = \sum_{j = 1}^d |\beta_j|$, $\lambda > 0$ is a hyperparameter we need to choose. **Key observations**:- Lasso is more likely to give rise to $\hat{\beta}$ that are **exactly** zero.- Lasso solutions has nice theoretical properties: with judicious choice of $\lambda$ and under regularity conditoins, $\hat{\beta} \approx \beta_0$ with high probability.- Lasso can be combined with Ridge to give the *elastic net* penalty: for $\alpha \in [0, 1]$$$\lambda \left( \alpha \| \beta \|_2^2 + (1 - \alpha) \| \beta \|_1 \right).$$ Experiments (Prostate Cancer)See the notebook here:https://github.com/Imperial-College-Data-Science-Society/workshops/tree/master/notebooks/workshop-7 Adopted from *Elements of Statistical Learning, Example 3.2.1*url = "https://web.stanford.edu/~hastie/ElemStatLearn/datasets/prostate.data" import pandas as pd data = pd.read_csv(url, delimiter="\t") data = data.iloc[:, 1:] print("Shape:", data.shape) data.head() data_train = data.loc[data.train == "T"].drop("train", axis=1) data_test = data.loc[data.train != "T"].drop("train", axis=1) print("Train set:", data_train.shape, "Test set:", data_test.shape) trainX = data_train.drop("lpsa", axis=1) trainY = data_train.lpsa testX = data_test.drop("lpsa", axis=1) testY = data_test.lpsaTrain set: (67, 9) Test set: (30, 9)Utility functionsdef compare_loss(model, alphas): '''Plot train and test errors for different values of alphas. ''' train_loss = [] test_loss = [] for a in alphas: ridge = model.set_params(alpha=a) ridge.fit(trainX, trainY) train_predY = ridge.predict(trainX) test_predY = ridge.predict(testX) train_loss.append(mean_squared_error(trainY, train_predY)) test_loss.append(mean_squared_error(testY, test_predY)) ax = plt.gca() ax.plot(alphas, train_loss, label="train") ax.plot(alphas, test_loss, label="test") ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('MSE') plt.title('Loss as a function of the regularization') plt.axis('tight') plt.legend() def solution_path(model, alphas): '''Plot paths of fitted coefficients for different values of alpha. ''' coefs = [] for a in alphas: ridge = model.set_params(alpha=a) ridge.fit(trainX, trainY) coefs.append(ridge.coef_) # Display results ax = plt.gca() ax.plot(alphas, coefs) ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('Coefficients') plt.title('Coefficients as a function of regularization') plt.axis('tight') return(coefs) def plot_cvloss(cvres, model): '''Plot CV loss for each values of alpha. ''' ax = plt.gca() ax.set_xscale('log') ax.plot(cvres.param_alpha, - cvres.mean_test_score) plt.title("CV loss for different alpha (%s)" % model) plt.xlabel("alpha") plt.ylabel("MSE") plt.tight_layout()Linear regressionfrom sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt linreg = LinearRegression() linreg.fit(trainX, trainY) coeffs_df = pd.DataFrame(linreg.coef_, trainX.columns, columns=["lr"]) coeffs_df.lr.plot(kind="barh") plt.ylabel("Coefficients") plt.show() linreg_predY = linreg.predict(testX) linreg_loss = mean_squared_error(testY, linreg_predY) print('MSE: %.5f' % linreg_loss)MSE: 0.52127Ridge regressionimport numpy as np from sklearn.model_selection import GridSearchCV, RepeatedKFold ridgereg = Ridge(normalize=True) # define model evaluation method cv = RepeatedKFold(n_splits=5, n_repeats=3, random_state=0) # define grid grid = {"alpha": np.arange(0.01, 5, 0.01)} # define search search = GridSearchCV(ridgereg, grid, scoring="neg_mean_squared_error", cv=cv, n_jobs=2, return_train_score=True) # perform the search ridge_res = search.fit(trainX, trainY) ridgecv = pd.DataFrame(ridge_res.cv_results_) ridge = ridge_res.best_estimator_ ridge_predY = ridge.predict(testX) ridge_loss = mean_squared_error(testY, ridge_predY) print('MSE: %.5f' % ridge_loss) print('Best alpha: %s' % ridge_res.best_params_) plot_cvloss(ridgecv, "Ridge") alphas = np.logspace(-5, 5, 200) compare_loss(Ridge(normalize=True), alphas = alphas) alphas = np.logspace(-5, 5, 200) ridge_sol_path = fit_and_plot_hyperparams(Ridge(normalize=True), alphas = alphas)Lassolassoreg = Lasso(normalize=True) # define search grid = {"alpha": np.arange(0.0001, 0.05, 0.0001)} lasso_search = GridSearchCV(lassoreg, grid, scoring="neg_mean_squared_error", cv=cv, n_jobs=2) # perform the search lasso_res = lasso_search.fit(trainX, trainY) lassocv = pd.DataFrame(lasso_res.cv_results_) lasso = lasso_res.best_estimator_ lasso_predY = lasso.predict(testX) lasso_loss = mean_squared_error(testY, lasso_predY) print('MSE: %.5f' % lasso_loss) print('Best alpha: %s' % lasso_res.best_params_) plot_cvloss(lassocv, "Lasso") alphas = np.logspace(-5, 5, 200) compare_loss(Lasso(normalize=True), alphas = alphas) lasso_sol_path = fit_and_plot_hyperparams(Lasso(normalize=True), alphas = alphas)Elastic netenet = ElasticNet(normalize=True) # define search grid = { "alpha": np.arange(0.001, 0.04, 0.001), "l1_ratio": [1e-3, 1e-2, 1e-1, 0.5, 0.75] } enet_search = GridSearchCV(enet, grid, scoring="neg_mean_squared_error", cv=cv, n_jobs=2) # perform the search enet_res = enet_search.fit(trainX, trainY) enetcv = pd.DataFrame(enet_res.cv_results_) enet = enet_res.best_estimator_ enet_predY = enet.predict(testX) enet_loss = mean_squared_error(testY, enet_predY) print('MSE: %.5f' % enet_loss) print('Best params: %s' % enet_res.best_params_) enet_sol_path = fit_and_plot_hyperparams( ElasticNet(normalize=True, l1_ratio=enet_res.best_params_["l1_ratio"]), alphas = alphas )Comparing solution pathsfig = plt.figure(figsize=(20, 14)) ax = fig.add_subplot(321) ax2 = fig.add_subplot(322) ax3 = fig.add_subplot(323) ax.plot(alphas, ridge_sol_path) ax.set_xscale('log') ax.set_xlabel('alpha') ax.set_ylabel('Coefficients') ax.set_title("Ridge") ax2.plot(alphas, lasso_sol_path) ax2.set_xscale('log') ax2.set_xscale('log') ax2.set_xlabel('alpha') ax2.set_ylabel('Coefficients') ax2.set_title("Lasso") ax3.plot(alphas, enet_sol_path) ax3.set_xscale('log') ax3.set_xscale('log') ax3.set_xlabel('alpha') ax3.set_ylabel('Coefficients') ax3.set_title("Elastic Net") fig.tight_layout() plt.show()Plot fitted coefficientscoeffs_df["Ridge"] = ridge.coef_ coeffs_df["Lasso"] = lasso.coef_ coeffs_df["ElasNet"] = enet.coef_ coeffs_df.plot(kind="barh") plt.ylabel("Coefficients") plt.title("Fitted coefficients of different models") plt.show()Redução de vibrações em sistemas com desbalanceamento rotativo A presença de desbalanceamento em máquinas rotativas, tais como, turbinas, bombas centrígugas, ventiladores e outros, é uma fonte comum de excitação vibratória harmônica em sistema mecânicos. Nesses casos, o desbalancemento é causado pelo desvio entre o eixo de rotação e o centro de massa do rotor, que pode ser causado pela perda ou adição de massa ao rotor em assimetria ao eixo de rotação ou por empenamento do eixo. Em máquinas de alta rotação, pequenos desvios podem causar grandes problemas vibratórios na estrutura que suporta a máquina, ou mesmo esforços alternativos no eixo, podendo acelerar o processo de fadiga, levando à eventual falha não prevista no projeto.Na Figura a seguir, é mostrado o modelo unidimensional de um sistema excitado por desbalanceamento. ![image.png](attachment:image.png)Nesse modelo, $M$ representa a massa total do sistema, $m$ a massa de desbalanceamento localizada no centro de massa do sistema $G$ que está deslocado do eixo de rotação de uma distância $e$, denominada excentricidade. Considerando que a massa de desbalanceamento rotaciona a uma velocidade angular $\omega$ e o sistema está restrito ao movimento vertical, a equação do sistema desbalanceado pode ser escrita na forma:$M \ddot{x} + c \dot{x} + kx = F_0 sin(\omega t) = m e \omega^2 sin (\omega t)$ A magnitude da força transmitida é da por:$F_T = [ (kx)^2 + (c \dot x)^2 ]^{1/2} = X \sqrt{(k^2+w^2 c^2)} $ A razão de transmissibilidade de força é dada por$$ T_f = \frac{F_T}{F_0} = \frac{ (k^2+w^2 c^2)^{1/2}}{[(k-mw^2)^2 + w^2 c^2]^{1/2}} = \left \{ \frac{1 + (2 \zeta r)^2}{[1- r^2]^{1/2} + (2 \zeta r)^{1/2}} \right \}^{1/2}$$ou $$T_f = \frac{F_T}{F_0} = \frac{F_T}{me \omega^2} = \frac{F_T}{me r^2 \omega_n^2}$$ou ainda,$$ \frac{F_T}{me \omega_n^2} = r^2 \left \{ \frac{1 + (2 \zeta r)^2}{[1- r^2]^{1/2} + (2 \zeta r)^{1/2}} \right \}^{1/2}$$onde $ r = \omega / \omega_n$ é a razão de frequências e $\zeta = c/(2 m \omega _n)$ é a razão de amortecimento. TDEUma bomba centrífuga, com massa de 50kg e rotação de 3000 rpm, está montada no centro de uma viga de aço biapoiada de 100cm de comprimento, 20cm de largura e 1,5 cm de espessura. A razão de amortecimento da viga pode ser considerada como $\zeta = 0,05$. O rotor da bomba tem uma massa de 5kg com excentricidade de 1 mm. Se a máxima deflexão da permitida para a viga é de 1.5 mm, determine:1. A viga utilizada para o suporte é adequada?2. Em caso negativo, o que poderia ser feito para diminuir a deflexão da viga?3. Considerando somente a geometria da viga, qual a espessura adequada para a viga?4. Qual outra geometria de seção da viga seria adequada para o suporte sem alterar sua massa?5. Considerando somente a introdução de elementos de amortecimento (coxins) na montagem da bomba centrífuga na viga, qual seria a menor razão de amortecimento necessária?6. Considerando que após muitos ciclos vibratórios a viga do suporte iniciou uma trinca que reduziu 10% da sua espessura: 6.1 A viga ainda suporta o carregamento estático exercido pela bomba centrífuga? 6.2 A deflexão da viga ainda atende a máxima deflexão perimitida? Instruções para apresentaçãoO grupo deve preparar slides para uma apresentação que será realizada para a Gerência de Projetos e Manutenção de uma empresa que contratou o seu grupo para resolver o problema acima proposto. A apresentação tem no máximo dez minutos, e deve conter as fórmulas utilizadas e as respostas obtidas para as questões propostas. Não será necessária a elaboração de relatório. Análise de VibraçõesA rigidez de flexão da viga biapoiada, ou constante de mola, é dada por:$$ k = \frac{48 E I}{l^3} $$Onde $E$ é a constante de rigidez do aço ($207 \times 10^9$ Pa), $l$ é o comprimento da viga e $I$ é o momento de inércia e pode ser calculado para a viga de seção retangular, de largura $w$ e espessura $t$ como:$$I = \frac{1}{12}wt^3 $$Usando a densidade do aço, que é $ \rho = 7,85 g/cm^3 $, a massa da viga pode ser determinada, uma vez que:$$ \rho = \frac{m_v}{V_v} $$onde $m_v$ é a massa da viga e $V_v$ é o volume da viga.Assim, a massa total do sistema é soma das massas da bomba centrífuga e da massa efetiva da viga no seu centro de massa, $M = m+\frac{17}{35}m_v$ Lembrando que a frequência natural é dada por$$ w_n = \sqrt{\frac{k}{m}} $$E que a amplitude da oscilação do sistema pode ser calculada:$$ \frac{kX}{F_0} = \left \{ \frac{1 }{[1- r^2]^{2} + (2 \zeta r)^{2}} \right \}^{1/2} $$$$ X = \frac{m e w^2}{k} \left \{ \frac{1 }{[1- r^2]^{2} + (2 \zeta r)^{2}} \right \}^{1/2}$$A deflexão estática da viga sob o peso da bomba centrífuga ($ W_b$ pode ser calculada como:$$ \delta_b = \frac{W_b}{k} $$Assim a deflexão total do sistema é:$$ \delta_{total} = X + \delta_b $$%matplotlib inline #%matplotlib notebook from numpy import sin, cos import numpy as np import matplotlib.pyplot as plt import scipy.integrate as integrate import matplotlib.animation as animation from matplotlib.widgets import Slider from ipywidgets import interactive from ipywidgets import widgets from IPython.display import display from ipywidgets import FloatSlider mb = 50. # kg Massa da bomba centrífuga m = 10. # kg Massa do desbalanceamento rho = 7.85e3 # kg/m^3 Densidade do aço lv = 100.e-2 # m Comprimento da viga wv = 20.e-2 # m Largura da viga tv = 1.5e-2 # m Espessura da viga Vv = lv*wv*tv # m^3 Volume da viga retangular (Qual o volume da viga de seção em U ou T?) mv = rho * Vv # kg Massa da viga print('mv= ', mv) M = mb + mv*17./35. print('M= ', M) e = 0.001 # m Excentricidade rotacao = 3000. #rpm Rotação w = rotacao*2*np.pi/60. # rad/s Frequência angular print('w =',w) I = (wv*tv**3)/12. # m^4 Momento de inércia da viga print('I= ', I) E = 207.e9 # Pa Constante de rigidez do aço print('E= ',E) k = 48.*E*I/lv**3 # N/m Rigidez da viga print('k= ', k) wn = np.sqrt(k/M) # rad/s Frequência natural print('wn= ', wn) zeta = 0.05 # Razão de amortecimento c = 2*zeta * M *wn # Amortecimento viscoso r = w/wn # Razão de frequências print('r= ', r) F0 = m*e*wn**2 # N Amplitude da Força de excitação print('F0 = ', F0) X = (F0/k)/np.sqrt(((1-r*r)**2+(2*zeta*r)**2)) print('X= ',X) delta_b = mb*9.81/k print('delta_b= ', delta_b) delta_t = X + delta_b print('delta_t= ', delta_t) def derivs(state, t): dydx = np.zeros_like(state) dydx[0] = state[1] dydx[1] = (m*e*w*w*sin(w*t) -c*dydx[0]-k*state[0])/M return dydx # create a time array from 0..100 sampled at 0.05 second steps dt = 0.01 t = np.arange(0, 5, dt) # initial state state = [0.0, 0.0] #np.radians([th1, w1, th2, w2]) # integrate your ODE using scipy.integrate. y = integrate.odeint(derivs, state, t) y1 = y[:, 0] y2 = y[:, 1] fa_wd = widgets.FloatSlider(min=0.0, max=1.0, step=0.05, value=zeta, description='Fator de Amortecimento',readout_format='.3f') rpm_wd = widgets.FloatSlider(min=1.0, max=4000.0, step=10.0, value=rotacao, description='Rotação (rpm)',readout_format='.3f') e_wd = widgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=e, description='Excentricidade (m)',readout_format='.3f') def updatewidget(vfa_wd, vrpm_wd, ve_wd): global zeta, rotacao, w, wn, c, e, dt zeta = vfa_wd rotacao = vrpm_wd w = rotacao*2.0*np.pi/60.0 dt = 1.0/(2*w) wn = np.sqrt(k/M) r = w/wn c = 2.0*zeta * M *wn e = ve_wd I = (wv*tv**3)/12. # m^4 Momento de inércia da viga wn = np.sqrt(k/M) # rad/s Frequência natural c = 2*zeta * M *wn # Amortecimento viscoso r = w/wn # Razão de frequências F0 = m*e*wn**2 # N Amplitude da Força de excitação X = (F0/k)/np.sqrt(((1-r*r)**2+(2*zeta*r)**2)) delta_b = mb*9.81/k delta_t = X + delta_b Ft = m*e*w*w*np.sqrt((1+(2*zeta*r)*(2*zeta*r))/((1-r*r)*(1-r*r)+(2*zeta*r)*(2*zeta*r))) print('Fator de Amortecimento: ', zeta) print('Rotação: ', rotacao) print('Excentricidade: ', e) print('==================Resultados=================') print('Razão de frequências: ', w/wn) print('Força excitação (N): ', F0) print('Força transmitida (N): ', Ft) print('Deflexão Total (m): ', delta_t) print('Deflexão Estática (m): ', delta_b) print('=============================================') return e v = interactive(updatewidget, vfa_wd=fa_wd, vrpm_wd= rpm_wd, ve_wd=e_wd) display(v)mv= 23.55 M= 61.43857142857143 w = 314.1592653589793 I= 5.624999999999999e-08 E= 207000000000.0 k= 558899.9999999999 wn= 95.37762422999184 r= 3.293846621734061 F0 = 90.96891203757527 X= 1.651601518802553e-05 delta_b= 0.0008776167471819647 delta_t= 0.0008941327623699902Hyperparameter Tuning using HyperDrive Azure ML importsimport logging import os import csv import pkg_resources import joblib import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn import datasets from sklearn.metrics import confusion_matrix import azureml.core from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget from azureml.core.compute_target import ComputeTargetException from azureml.core.dataset import Dataset from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.train.hyperdrive.parameter_expressions import uniform, choice from azureml.train.hyperdrive.policy import BanditPolicy from azureml.train.hyperdrive.runconfig import HyperDriveConfig from azureml.train.hyperdrive.run import PrimaryMetricGoal from azureml.train.hyperdrive.sampling import RandomParameterSampling from azureml.train.sklearn import SKLearn from azureml.widgets import RunDetails # Check core SDK version number print("SDK version:", azureml.core.VERSION)SDK version: 1.27.0Initialize workspaceInitialize a workspace object from persisted configuration.ws = Workspace.from_config() print( 'Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep = '\n' )Workspace name: udacity-ml-capstone-ws Azure region: eastus Subscription id: b329467a-d1f8-4c9b-b3dc-95cdc7bff7fa Resource group: udacity-ml-capstone-rgCreate an Azure HyperDrive experimentLet's create an experiment named `heart-failure-hd-exp` and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the source_directory would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the source_directory of the step.# Choose a name for the run history container in the workspace experiment_name = 'heart-failure-hd-exp' project_folder = './heart-failure-hd-proj' experiment = Experiment(ws, experiment_name) experimentCreate or attach an AmlCompute clusterYou will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecturecompute-target) for your HyperDrive run.# Choose a name for your CPU cluster compute_cluster_name = "compute-cluster" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=compute_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration( vm_size='STANDARD_D2_V2',# for GPU, use "STANDARD_NC6" #vm_priority = 'lowpriority', # optional min_nodes=0, max_nodes=5) compute_target = ComputeTarget.create(ws, compute_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) # For a more detailed view of current AmlCompute status, use get_status() print(compute_target.get_status().serialize())Found existing cluster, use it. Succeeded AmlCompute wait for completion finished Minimum number of nodes requested have been provisioned {'currentNodeCount': 4, 'targetNodeCount': 4, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 4, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2021-05-11T16:21:15.704000+00:00', 'errors': [{'error': {'code': 'ClusterCoreQuotaReached', 'message': 'Operation results in exceeding quota limits of Total Cluster Dedicated Regional vCPUs. Maximum allowed: 10, Current in use: 10, Additional requested: 2. Click here to view and request for quota: https://portal.azure.com/#resource/subscriptions/b329467a-d1f8-4c9b-b3dc-95cdc7bff7fa/resourceGroups/udacity-ml-capstone-rg/providers/Microsoft.MachineLearningServices/workspaces/udacity-ml-capstone-ws/quotaUsage'}}], 'creationTime': '2021-05-11T12:45:11.596800+00:00', 'modifiedTime': '2021-[...]DatasetThe data is loaded into the workspace using `TabularDataFactory` in the `train.py` script. Hyperdrive ConfigurationFor the HyperDrive experiment, we chose the [`LogisticRegression`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) classifier from scikit-learn.The script `train.py` takes care of data collection, cleansing and splitting, model training and testing. Hyperparameter sampling and applying the early stopping policy is performed by HyperDrive. Data collection, cleansing and splittingThe dataset is loaded using `TabularDatasetFactory`. The cleansing process drops rows with empty values and performs one hot encoding for categorical columns (our dataset does not have any). The dataset is split into train and test sets. 70% of the data is used for training and 30% for testing. Hyperparameter samplingThe project uses two hyperparameters:- `--C`: inverse regularization strength- `--max_iter`: maximum iteration to converge for the scikit-learn Logistic Regression modelI use [random parameter sampling](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.randomparametersampling?view=azure-ml-py). Random sampling supports discrete and continuous hyperparameters. It supports early termination of low-performance runs. In random sampling, hyperparameter values are randomly selected from the defined search space. Random parameter sampling is good approach for discovery learning as well as hyperparameter combinations. Model training and testingModel training and testing is performed using scikit-learn's Logistical Regression model. In `train.py`, metrics are generated and logged. The accuracy is used to benchmark the model. Applying early stopping policyThe execution of the pipeline is stopped if the conditions specified by the policy are met.The model uses [BanditPolicy](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.banditpolicy?view=azure-ml-py).Bandit policy is based on slack factor/slack amount and evaluation interval. Bandit ends runs when the primary metric isn't within the specified slack factor/slack amount of the most successful run.See [HyperDriveConfig Class](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.hyperdriveconfig?view=azure-ml-py) for a complete list of configuration parameters.# Early termination policy (not required if using Bayesian sampling) early_termination_policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1) # Params that you will be using during training param_sampling = RandomParameterSampling({ "--C": uniform(0.001, 100), "--max_iter": choice(10, 50, 100, 150, 200) }) # Training directory and script train_dir = "./training" train_script = "train.py" # SKLearn estimator for use with train.py estimator = SKLearn( source_directory=train_dir, entry_script=train_script, compute_target=compute_cluster_name ) # HyperDriveConfig using the estimator, hyperparameter sampler, and policy hyperdrive_run_config = HyperDriveConfig( estimator=estimator, hyperparameter_sampling=param_sampling, primary_metric_name='Accuracy', primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=25, max_concurrent_runs=5, policy=early_termination_policy, ) # Submit your experiment hyperdrive_run = experiment.submit(hyperdrive_run_config)WARNING:root:If 'script' has been provided here and a script file name has been specified in 'run_config', 'script' provided in ScriptRunConfig initialization will take precedence.Run DetailsUse the `RunDetails` widget to show the different experiments.RunDetails(hyperdrive_run).show() hyperdrive_run.wait_for_completion(show_output=True) hyperdrive_runBest ModelGet the best model from the hyperdrive experiments and display all the properties of the model.# Get your best run best_run = hyperdrive_run.get_best_run_by_primary_metric() print(f"Best run arguments: {best_run.get_details()['runDefinition']['arguments']}") print(f"Best run metrics: {best_run.get_metrics()}") print(f"Best run file names: {best_run.get_file_names()}") # Save the best model joblib.dump(value=best_run.id, filename="./outputs/hyperdrive_model.joblib")Train a LR on the training examples of the few-shot task and evaluate it on the query samples.# Loader train_loader = get_dataloader('train', IBC_path, parcel, split_dir, meta=False, batch_size=1, sampler=None) test_loader = get_dataloader('test', IBC_path, parcel, split_dir, meta=False, batch_size=1, sampler=None) # Reshape the data samples. base_mean, data = extract_feature(train_loader, test_loader, model, 'best') # Train a LR on 10000 few-shot problems and evaluate it. ## Essayer avec normalisation et sans normalisation. Et aussi avec centrage et normalisation. acc_list = [] # You can use the same normalization as in SimpleShot ('CL2N') or only divide each example by its norm ('L2N'). norm_type = None # Iterate over several episodes. for episode in range(n_episode): # Retrieve train and test from test_loader train_data, test_data, train_labels, test_labels = sample_case(data, n_shot, n_way, n_query) #print("train_data", train_data.shape) # Examples associated with the same class follow each other. #print("train_labels", train_labels.shape) # (Optional) Normalize the data samples. if norm_type == 'CL2N': train_data = train_data - base_mean train_data = train_data / LA.norm(train_data, 2, 1)[:, None] test_data = test_data - base_mean test_data = test_data / LA.norm(test_data, 2, 1)[:, None] elif norm_type == 'L2N': train_data = train_data / LA.norm(train_data, 2, 1)[:, None] test_data = test_data / LA.norm(test_data, 2, 1)[:, None] train_data = torch.from_numpy(train_data) train_labels = torch.from_numpy(train_labels) test_data = torch.from_numpy(test_data) test_labels = torch.from_numpy(test_labels) # Rename the labels for criterion #print(train_labels) unique_labels = torch.sort(torch.unique(train_labels)) #print(unique_labels.values) for new_l, l in enumerate(unique_labels.values): train_labels[train_labels == l] = torch.ones_like(train_labels[train_labels == l]) * new_l test_labels[test_labels == l] = torch.ones_like(test_labels[test_labels == l]) * new_l # Initialize the LR on the train for a few epochs model = LR(n_way) # Optimization criterion = nn.CrossEntropyLoss() optimizer, scheduler = get_optimizer(model, lr, n_epoch) # Train the LR model.train() losses = [] train_accs = [] for epoch in range(n_epoch+1): # Train for one epoch. # Zero the parameter gradients. optimizer.zero_grad() # Forward + backward + optimize. outputs = model(train_data) loss = criterion(outputs, train_labels) loss.backward() optimizer.step() scheduler.step() acc = compute_accuracy(outputs.clone().detach(), train_labels) # Statistics. losses.append(loss.item()) train_accs.append(acc*100 / train_labels.shape[0]) # Compute the accuracy on test_data optimizer.zero_grad() model.eval() with torch.no_grad(): outputs = model(test_data) # Compute the accuracy. acc = compute_accuracy(outputs.clone().detach(), test_labels) print('Acc on episode {} : {:.2f}.'.format(episode, acc*100 / test_labels.shape[0]), end='\r') acc_list.append(acc*100 / test_labels.shape[0]) acc_mean, acc_conf = compute_confidence_interval(acc_list) print('The baseline has an average accuracy of {:.2f}% over {} tasks with 95% confidence interval {:.2f}.'.format(np.round(acc_mean, 2), n_episode, np.round(acc_conf, 2)))The baseline has an average accuracy of 52.94% over 10000 tasks with 95% confidence interval 0.19.Programación para *Data Science*============================Unidad 5: Adquisición de datos en Python--------------------------------------En este Notebook encontraréis dos conjuntos de ejercicios: un primer conjunto de **ejercicios para practicar** y un segundo conjunto de **actividades evaluables** como PEC de la asignatura.En cuanto al conjunto de ejercicios para practicar, éstos no puntúan para la PEC, pero os recomendamos que los intentéis resolver como parte del proceso de aprendizaje. Encontraréis ejemplos de posibles soluciones a los ejercicios al propio notebook, pero es importante que intentéis resolverlos vosotros antes de consultar las soluciones. Las soluciones os permitirán validar vuestras respuestas, así como ver alternativas de resolución de las actividades. También os animamos a preguntar cualquier duda que surja sobre la resolución de los **ejercicios para practicar** en el foro del aula.Además, veréis que todas las actividades tienen una etiqueta que indica los recursos necesarios para llevarla a cabo. Hay tres posibles etiquetas:* NM **Sólo materiales**: las herramientas necesarias para realizar la actividad se pueden encontrar en los materiales de la asignatura. * EG **Consulta externa guiada**: la actividad puede requerir hacer uso de herramientas que no se encuentran en los materiales de la asignatura, pero el enunciado contiene indicaciones de dónde o cómo encontrar la información adicional necesaria para resolver la actividad.* EI **Consulta externa independente**: la actividad puede requerir hacer uso de herramientas que no se encuentran en los materiales de la asignatura, y el enunciado puede no incluir la descripción de dónde o cómo encontrar esta información adicional. Será necesario que el estudiante busque esta información utilizando los recursos que se han explicado en la asignatura.Es importante notar que estas etiquetas no indican el nivel de dificultad del ejercicio, sino únicamente la necesidad de consulta de documentación externa para su resolución. Además, recordad que las **etiquetas son informativas**, pero podréis consultar referencias externas en cualquier momento (aunque no se indique explícitamente) o puede ser que podáis hacer una actividad sin consultar ningún tipo de documentación. Por ejemplo, para resolver una actividad que sólo requiera los materiales de la asignatura, puedéis consultar referencias externas si queréis, ya sea tanto para ayudaros en la resolución como para ampliar el conocimiento!En cuanto a la consulta de documentación externa en la resolución de los ejercicios, recordad **citar siempre la bibliografía utilizada** para resolver cada actividad. --- Ejercicios para practicar**Los siguientes 3 ejercicios no puntúan para la PEC**, pero os recomendamos que los intentéis resolver antes de pasar a los ejercicios propios de la PEC. También encontraréis las soluciones a estos ejercicios al final del Notebook. Ejercicio 1Queremos saber los crímenes que se han producido en Reino Unido en una localización (latitud, longitud) y fecha concretas. Identificad qué métodos de la API siguiente podemos utilizar para obtener la información y contestad a las siguientes preguntas.NM 1. ¿A qué URL haremos la petición?2. ¿Qué tipo de petición HTTP (qué acción) deberemos realizar contra la API para obtener los datos deseados?3. ¿En qué formato obtendremos la respuesta de la API?4. ¿Qué parámetros deberemos proporcionar en la petición a la API? **Respuesta** Ejercicio 2 Programad una función que retorne el estado meteorológico actual en una cierta localización, definida por su código postal (**zip code**) y código de país (e.g: us, uk, es, fr, etc). La función debe devolver una lista de tuplas de dos elementos, correspondientes al resumen del estado actual del tiempo **(weather.main)** y a la descripción extendida **(weather.description)**. Utilizad la API de [openweathermap](https://openweathermap.org/api) para obtener las predicciones.Para utilizar la API necesitareis registraros y obtener una API key. Podéis registraros [aquí](https://home.openweathermap.org/users/sign_up) y obtener vuestra API key [aquí](https://home.openweathermap.org/api_keys) una vez registrados. Tened en cuenta que la API key puede tardar un rato en funcionar después de registraros, y la API os devolverá un error 401 conforme la clave no es valida:`{"cod":401, "message": "Invalid API key. Please see http://openweathermap.org/faqerror401 for more info."}`Simplemente esperad un rato antes de utilizar la clave.NM**Hints**: - Veréis que en general la API esta documentada sin incluir la API key, aun que esta es necesaria. Deberéis incluir la API key en la llamada como uno de los parámetros de la URL (&appid=your_api_key): http://example_url.com?param1=value1¶m2=value2&appid=your_api_key - Os animamos a que paséis por el proceso de registro para que veáis de que trata y cómo se generan las API keys. Aún así, os proporcionamos una API key en caso de que tengáis problemas con el proceso. owm_api_key = '' **Respuesta** Ejercicio 3[CoinMarketCap](https://coinmarketcap.com/) es una web con contenido acerca delas 100 criptomonedas con más capitalización de mercado. Programad un _crawler_ que extraiga los nombres y la capitalización de todas les monedas que se muestran en CoinMarketCap. Utilizad la estructura de _crawler_ que hemos visto en el Notebook de esta unidad **modificando únicamente dos lineas de código**:EG- URL de inicio.- La expresión XPath que selecciona el contenido a capturar.**Pista**: tal vez os puede ser de utilidad investigar sobre la scrapy shell y utilizarla para encontrar la expresión XPath que necesitas para resolver el ejercicio.**Nota**: si la ejecución del _crawler_ os devuelve un error `ReactorNotRestartable`, reiniciad el núcleo del Notebook (en el meú: `Kernel` - `Restart`). **Respuesta** --- Ejercicios y preguntas teóricas para la PECA continuación, encontraréis los **ejercicios y preguntas teóricas que debéis completar en esta PEC** y que forman parte de la evaluación de esta unidad. Ejercicio 1La librería Tweepy nos permite interactuar con la API de Twitter de una forma sencilla. Utilizando la librería Tweepy, recuperad la descripción, la fecha de creación y localización de vuestra cuenta de Twitter. Si lo preferís, podéis obtener dicha información de la cuenta del usuario de Twitter de la Python Software Foundation, `ThePSF`( en vez de utilizar vuestra cuenta). **(1 punto)** NM**Nota**: Necesitáis las claves ***Consumer API keys y Access token & acces token secret***. Para obtener las claves, seguid las indicaciones que encontraréis en el Notebook de esta unidad. Podéis utilizar el código presente en el Notebook, adaptándolo para resolver el ejercicio. **Respuesta** Ejercicio 2Implementad un conjunto de funciones para obtener la **secuencia de ADN** del organismo *Homo sapiens* del cromosoma 1 (**chr1**) desde la posición 100000 hasta 101000 para la referencia **hg19**. Para realizar el ejercicio utilizad la API de [UCSC](https://genome.ucsc.edu/goldenPath/help/api.html). **(1.5 puntos)** NM**Nota**: El genoma de referencia de una célula es un repositorio de secuencias de ADN ( ácido desoxirribonucleico) empaquetado en forma de cromosoma. El ADN es un ácido nucleico que contiene la información genética que dirige el desarrollo y el funcionamiento de todos los seres vivos. El ADN se puede entender como una secuencia de nucleótidos (A, C, T y G) de una determinada longitud. Este material hereditario codifica los genes que, una vez descifrados, son indispensables para la síntesis de las proteínas. Un genoma de referencia es la representación de la secuencia de ADN del genoma de una especie. En el caso del organismo *Homo sapiens*, existen diferentes versiones del genoma de referencia. La última versión, hg38, se publicó en el 2014 y es la más detallada y precisa.UCSC es un navegador de la Universidad de Santa Cruz de California que ofrece acceso a secuencias genómicas y su correspondiente anotación (genes, mRNAs, CpG,…) de una gran variedad de organismos, vertebrados e invertebrados. Referencia: [Genómica Computacional](http://discovery.uoc.edu/iii/encore/record/C__Rb1046448__Sgenomica%20Computacional__Orightresult__U__X7?lang=cat&suite=def). . Barcelona, Universitat Oberta de Catalunya, 2011.**Importante**: No es necesario entender toda la información que podéis obtener a través de la API de UCSC. Fijaros bien con lo que os pide el enunciado ( prestad atención a la palabras clave en negrita), y revisad los ejemplos de acceso a los datos que hay en la web de [UCSC](https://genome.ucsc.edu/goldenPath/help/api.html). **Respuesta**import requests import json import tweepy from IPython.display import HTML def secuencia_adn(chromosome, start, end, version="hg19"): URL='https://api.genome.ucsc.edu/getData/sequence?genome={};track=knownGene;chrom=chr{};start={};end={}'.format(version, chromosome, start, end) req = requests.get(URL) data_adn = req.text sec_adn = json.loads(data_adn)["dna"] return sec_adn def long_crom(chromosome, version="hg19"): url='https://api.genome.ucsc.edu/getData/sequence?genome={};track=knownGene;chrom=chr{};'.format(version, chromosome) resp = requests.get(url=url) data_adn = resp.text sec_adn = json.loads(data_adn)["dna"] return len(sec_adn) chromosome=1 start=100000 end=101000 secuencia_adn(chromosome, start, end)Ejercicio 3Dada la API de UCSC del ejercicio anterior, obtened la longitud del chr1 del organismo *Homo sapiens* según la versión del genoma de referencia hg19. Calculad la diferencia entre la longitud del cromosoma chr1 entre las versiones hg19 y hg18. **(1.5 puntos)** NM **Respuesta**chromosome=1 sec_hg19 = long_crom(chromosome, version="hg19") sec_hg38 = long_crom(chromosome, version="hg38") len_difference = abs(sec_hg19-sec_hg38) len_differenceEjercicio 4La [NASA](https://www.nasa.gov) mediante su [API](https://api.nasa.gov) publica cada día una imagen de astronomía. Implementad una función para descargar y visualizar la imagen dentro del notebook. **(2 puntos)** NM **Respuesta**import urllib from IPython.display import Image from IPython.core.display import HTML URL = "https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY" resp = requests.get(URL) imagen = json.loads(resp.text)["url"] urllib.request.urlretrieve(imagen, "photo.jpg") #DOWNLOADS IMAGE Image(url=imagen) #Mostrar la imagenEjercicio 5[Scimago Journal](https://www.scimagojr.com/journalrank.php) es una web para consultar la información de las principales revistas de la comunidad científica. Programad un crawler que devuelva una tupla con el código y la área de todas las revistas que se muestran en la web. Utilizad la estructura de crawler que hemos visto en el Notebook de esta unidad modificando únicamente dos líneas de código:- URL de inicio.- La expresión XPath que selecciona el contenido a capturar.**Nota**: si la ejecución del _crawler_ os devuelve un error `ReactorNotRestartable`, reiniciad el núcleo del Notebook (en el menú: `Kernel` - `Restart`). **(2 puntos)** EG **Respuesta**import scrapy from scrapy.crawler import CrawlerProcess class uoc_spider(scrapy.Spider): # asignamos un nombre a la araña name = "uoc_spider" # Indicamos la URL que queremos analizar. # Incluimos aquí la URL de inicio: 7 ################################################ start_urls = [ "https://www.scimagojr.com/journalrank.php" ] ################################################ # Definimos el analizador. def parse(self, response): # Extraer el nombre de la moneda # Incluir la expresión 'xpath' que nos devuelve los nombres de las␣ ################################################ for journal in response.xpath('//td[@class="tit"]/a/text()'): ################################################ yield { 'journal': journal.extract() } # Creamos un crawler. process = CrawlerProcess({ 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)', 'DOWNLOAD_HANDLERS': {'s3': None}, 'LOG_ENABLED': True }) # Inicializamos el crawler con la muestra de la araña. process.crawl(uoc_spider) # Lanzamos la araña. process.start()Ejercicio 6Queremos conocer la Agenda de actos de la Anella Olímpica de la ciudad de Barcelona. Imprimid por pantalla el nombre del grupo o cantante que celebrará un concierto en la Anella Olímpica durante el año 2020. Para realizar el ejercicio, consultad el portal de datos abiertos del Ayuntamiento de Barcelona mediante la siguiente [url](https://opendata-ajuntament.barcelona.cat/es/). Primero tenéis que identificar qué métode utilizar para descargar los datos. Seguidamente, descargad los datos y procesarlos para responder la pregunta. **(2 puntos)** EG **Respuesta**URL="https://api.bsmsa.eu/ext/api/ao/waos/ca/actosanella.json" resp = requests.get(URL) data = json.loads(resp.text) bands=[] for info in data["actes"]["acte"]: if "2020" in info["date"]: bands.append(info["name"]) set(bands)Ejercicio Opcional Modificad la función del ejercicio 5 para obtener las 10 primeras revistas asociadas en la área Computer Science del 2017.EI **Respuesta** --- Soluciones ejercicios para practicar Ejercicio 1Queremos saber los crímenes que se han producido en Reino Unido en una localización (latitud, longitud) y fecha concretas. Identificad qué métodos de la API siguiente podemos utilizar para obtener la información y contestad a las siguientes preguntas.NM 1. ¿A qué URL haremos la petición?2. ¿Qué tipo de petición HTTP (qué acción) deberemos realizar contra la API para obtener los datos deseados?3. ¿En qué formato obtendremos la respuesta de la API?4. ¿Qué parámetros deberemos proporcionar en la petición a la API? **Respuesta**1. https://data.police.uk/docs/method/crimes-at-location/2. Tenemos que realizar una petición tipo GET3. La respuesta la obtendremos en formato JSON4. Tenemos que proporcionar la fecha (date), latitud (lat) y longitud (lng) Ejercicio 2 Programad una función que retorne el estado meteorológico actual en una cierta localización, definida por su código postal (**zip code**) y código de país (e.g: us, uk, es, fr, etc). La función debe devolver una lista de tuplas de dos elementos, correspondientes al resumen del estado actual del tiempo **(weather.main)** y a la descripción extendida **(weather.description)**. Utilizad la API de [openweathermap](https://openweathermap.org/api) para obtener las predicciones.NMPara utilizar la API necesitareis registraros y obtener una API key. Podéis registraros [aquí](https://home.openweathermap.org/users/sign_up) y obtener vuestra API key [aquí](https://home.openweathermap.org/api_keys) una vez registrados. Tened en cuenta que la API key puede tardar un rato en funcionar después de registraros, y la API os devolverá un error 401 conforme la clave no es valida:`{"cod":401, "message": "Invalid API key. Please see http://openweathermap.org/faqerror401 for more info."}`Simplemente esperad un rato antes de utilizar la clave.**Hints**: - Veréis que en general la API esta documentada sin incluir la API key, aun que esta es necesaria. Deberéis incluir la API key en la llamada como uno de los parámetros de la URL (&appid=your_api_key): http://example_url.com?param1=value1¶m2=value2&appid=your_api_key- Os animamos a que paséis por el proceso de registro para que veáis de que trata y cómo se generan las API keys. Aún así, os proporcionamos una API key en caso de que tengáis problemas con el proceso. owm_api_key = '' **Respuesta**Lo primero que haremos será revisar la API de openweathermap para identificar qué endpoints nos pueden ser útiles. El enunciado nos pide devolver el estado meteorológico actual dado un código postal, podemos utilizar https://openweathermap.org/current.Existe un método que nos devuelve el estado meteorológico a partir del código postal y el código del país separado por coma:api.openweathermap.org/data/2.5/weather?zip=zip_code,country_codeimport json import requests def parse_response(response): data = None if response.status_code == 200: # Data is formatted as JSON but received as string. Load it as JSON object data = json.loads(response.content) # Raise an error otherwise else: raise Exception("Unexpected response (%s: %s)." %(response.status_code, response.reason)) return data def get_weather_zip(zip_code, country, api_key): # Query the data from the API base_url = 'http://api.openweathermap.org/data/2.5/weather?zip=%s,%s&appid=%s' # We also add the API KEY to the request response = requests.get(base_url % (zip_code, country, api_key)) # Check the response code and act accordingly data = parse_response(response) # If the data was properly processed if data: weather = data.get('weather') r = [(w.get('main'), w.get('description')) for w in weather] else: raise Exception("Couldn't get weather data.") return r api_key = '' zip_code = '08018' country_code = 'es' weather_data = get_weather_zip(zip_code, country_code, api_key) print (weather_data)[('Clouds', 'scattered clouds')]Ejercicio 3[CoinMarketCap](https://coinmarketcap.com/) es una web con contenido acerca delas 100 criptomonedas con más capitalización de mercado.Programad un _crawler_ que extraiga los nombres y la capitalización de todas les monedas que se muestran en CoinMarketCap. Para hacerlo, utilizad la estructura de _crawler_ que hemos visto en el Notebook de esta unidad **modificando únicamente dos lineas de código**:- URL de inicio.- La expresión XPath que selecciona el contenido a capturar.**Pista**: tal vez os puede ser de utilidad investigar sobre la scrapy shell y utilizarla para encontrar la expresión XPath que necesitas para resolver el ejercicio.**Nota**: si la ejecución del _crawler_ os devuelve un error `ReactorNotRestartable`, reiniciad el núcleo del Notebook (en el menú: `Kernel` - `Restart`).NM **Respuesta**import scrapy from scrapy.crawler import CrawlerProcess # Creamos la araña. class uoc_spider(scrapy.Spider): # asignamos un nombre a la araña name = "uoc_spider" # Indicamos la URL que queremos analizar. # Incluimos aquí la URL de inicio: ################################################ start_urls = [ "https://coinmarketcap.com/" ] ################################################ # Definimos el analizador. def parse(self, response): # Extraer el nombre de la moneda # Incluir la expresión 'xpath' que nos devuelve los nombres de las monedas. ################################################ for currency in response.xpath('//td[@class="no-wrap currency-name"]/@data-sort'): ################################################ yield { 'currency': currency.extract() } if __name__ == "__main__": # Creamos un crawler. process = CrawlerProcess({ 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)', 'DOWNLOAD_HANDLERS': {'s3': None}, 'LOG_ENABLED': True }) # Inicializamos el crawler con la muestra de la araña. process.crawl(uoc_spider) # Lanzamos la aranya. process.start()2019-11-16 13:49:08 [scrapy.utils.log] INFO: Scrapy 1.7.4 started (bot: scrapybot) 2019-11-16 13:49:08 [scrapy.utils.log] INFO: Versions: lxml 4.4.1.0, libxml2 2.9.9, cssselect 1.1.0, parsel 1.5.2, w3lib 1.21.0, Twisted 19.7.0, Python 3.7.3 (default, Mar 27 2019, 09:23:15) - [Clang 10.0.1 (clang-1001.0.46.3)], pyOpenSSL 19.0.0 (OpenSSL 1.1.1d 10 Sep 2019), cryptography 2.8, Platform Darwin-19.0.0-x86_64-i386-64bit 2019-11-16 13:49:08 [scrapy.crawler] INFO: Overridden settings: {'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'} 2019-11-16 13:49:08 [scrapy.extensions.telnet] INFO: Telnet Password: 2019-11-16 13:49:08 [scrapy.middleware] INFO: Enabled extensions: ['scrapy.extensions.corestats.CoreStats', 'scrapy.extensions.telnet.TelnetConsole', 'scrapy.extensions.memusage.MemoryUsage', 'scrapy.extensions.logstats.LogStats'] 2019-11-16 13:49:08 [scrapy.middleware] INFO: Enabled downloader middlewares: ['scrapy.downloadermiddlewares.httpauth.HttpAuthM[...]Carregando dadosdata_df = pd.read_csv(PATH) data_dfDemonstração básica de funcionalidadesdata_df.head() data_df.tail() data_df.columns data_df.index name_df = data_df.set_index("Name") name_df name_df.index # Analisando colunas específicas data_df["Name"][:10] # Selecionando multiplas colunas new_data_df = data_df[["Name", "Survived"]] new_data_df # Removendo colunas data_df.drop(columns=["PassengerId", "Ticket"]) # data_df.drop(columns=["PassengerId", "Ticket"], inplace=True) # Indexando example = data_df.iloc[0] example example["Fare"] # Indexando for i in range(len(data_df)): print(data_df.iloc[i]) if i == 5: break # Indexação usando strings name_df.loc["Braund, Mr. "]Operações sobre colunas de um DataFramedata_df["Fare"].describe() data_df["Fare"].mean() data_df["Sex"].describe() data_df["Sex"].unique() len(data_df["Cabin"].unique()) # Explicação sobre numpy data_df["Fare"] + 100 data_df["Fare"] + 2*data_df["Fare"] data_df["Sex"] + "OQWNUDJU" data_df["Sex"][:5] # Select data_df["Sex"] == "male" is_male = data_df["Sex"] == "male" data_df[is_male] is_male.sum()Consultando DataFrames Estabelecendo perguntas para as consultas:- Quantos passageiros sobreviveram?- Quantos passageiros são mulheres? Quantos homens?- Quantos passageiros sao menores de idade? (Exercício)- Qual a idade média dos passageiros? - Faixa etária que mais sobreviveu?- Classe de passageiros que mais sobreviveu?- Idade média de cada classe de passageiros?- Preço médio da tarifa por classe? Quantos passageiros sobreviveram?# Jeito fácil survived = data_df["Survived"] == 1 survived.sum() data_df[survived]Quantos passageiros são mulheres? Quantos homens?# "Select" no pandas is_male = data_df["Sex"] == "male" is_female = data_df["Sex"] == "female" is_male.sum(), is_female.sum() data_df[is_male].head(10) data_df[is_female].head(10)Quantos passageiros sao menores de idade? (Exercício)is_underage = data_df["Age"] < 18 is_underage.sum() data_df[is_underage].tail(10)Qual a idade média dos passageiros?data_df["Age"].mean()Qual a faixa etária que teve mais sobreviventes?survived_df = data_df[data_df["Survived"] == 1] survived_df["Age"].describe() survived_df["Age"].plot.hist()Classe de passageiros que mais sobreviveu?survived_df.head() survived_df["Pclass"].unique() (survived_df["Pclass"] == 3).sum() survived_df.groupby("Pclass").count()Qual é a idade média de cada classe de passageiros?data_df[["Pclass", "Age"]].groupby("Pclass").mean() data_df[["Pclass", "Age"]].groupby("Pclass").max()Qual o preço médio das tarifas por classe?data_df[["Pclass", "Fare"]].groupby("Pclass").mean() data_df[["Pclass", "Fare"]].groupby("Pclass").max()Desafio: Predizendo sobrevidentes baseado no gênero do passageiro# Cirando uma nova coluna inicializada com zeros data_df["Prediction"] = 0 data_df.head(5) sex = "female" # Primeira forma for i in data_df.index: if data_df.loc[i, "Sex"] == sex: data_df.loc[i, "Prediction"] = 1 data_df.head() # Segunda forma data_df["Prediction"] = data_df["Sex"] data_df["Prediction"].replace({"male": 0, "female": 1}, inplace=True) data_df.head(5) corrects = data_df["Survived"] == data_df["Prediction"] corrects.sum() / len(data_df)Linear Regression Methoddf = df.sort_index() # Get sizes of each of the datasets num_cv = int(cv_size*len(df)) num_test = int(test_size*len(df)) num_train = len(df) - num_cv - num_test print("num_train = " + str(num_train)) print("num_cv = " + str(num_cv)) print("num_test = " + str(num_test)) # Split into train, cv, and test train = df[:num_train] cv = df[num_train:num_train+num_cv] train_cv = df[:num_train+num_cv] test = df[num_train+num_cv:] print("train.shape = " + str(train.shape)) print("cv.shape = " + str(cv.shape)) print("train_cv.shape = " + str(train_cv.shape)) print("test.shape = " + str(test.shape)) # Plot adjusted close over time rcParams['figure.figsize'] = 18, 8 # width 10, height 8 ax = train.plot(x='Date', y='Close', style='b-', grid=True) ax = cv.plot(x='Date', y='Close', style='y-', grid=True, ax=ax) ax = test.plot(x='Date', y='Close', style='g-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test']) ax.set_xlabel("Date") ax.set_ylabel("Price") RMSE = [] R2 = [] mape = [] for N in range(1, Nmax+1): # N is no. of samples to use to predict the next value est_list = get_preds_lin_reg(train_cv, 'Close', N, 0, num_train) cv['est' + '_N' + str(N)] = est_list RMSE.append(math.sqrt(mean_squared_error(est_list, cv['Close']))) R2.append(r2_score(cv['Close'], est_list)) mape.append(mf.get_mape(cv['Close'], est_list)) print('RMSE = ' + str(RMSE)) print('R2 = ' + str(R2)) print('MAPE = ' + str(mape)) cv.head() # Plot RMSE versus N rcParams.update({'font.size': 14}) plt.figure(figsize=(12, 8), dpi=80) plt.plot(range(1, Nmax+1), RMSE, 'x-') plt.grid() plt.xlabel('N') plt.ylabel('RMSE') plt.xlim([2, 30]) # Plot R2 versus N. Note for R2 larger better. rcParams.update({'font.size': 14}) plt.figure(figsize=(12, 8), dpi=80) plt.plot(range(1, Nmax+1), R2, 'x-') plt.grid() plt.xlabel('N') plt.ylabel('R2') # Plot MAPE versus N. Note for MAPE smaller better. plt.figure(figsize=(12, 8), dpi=80) plt.plot(range(1, Nmax+1), mape, 'x-') plt.grid() plt.xlabel('N') plt.ylabel('MAPE') from datetime import date, datetime, time, timedelta # Specify the day you are interested in day = pd.Timestamp(date(2018, 8, 20)) # Specify the maximum N you want to plot (If Nmax2 is too large it gets very cluttered) Nmax2 = 5 df_temp = cv[cv['Date'] <= day] plt.figure(figsize=(12, 8), dpi=80) plt.plot(range(1,Nmax2+2), df_temp[-Nmax2-1:]['Close'], 'bx-') plt.plot(Nmax2+1, df_temp[-1:]['Close'], 'ys-') legend_list = ['Close', 'actual_value'] # Plot the linear regression lines and the predictions color_list = ['r', 'g', 'k', 'y', 'm', 'c', '0.75'] marker_list = ['x', 'x', 'x', 'x', 'x', 'x', 'x'] regr = LinearRegression(fit_intercept=True) # Create linear regression object for N in range(5, Nmax2+1): # Plot the linear regression lines X_train = np.array(range(len(df_temp['Close'][-N-1:-1]))) # e.g. [0 1 2 3 4] y_train = np.array(df_temp['Close'][-N-1:-1]) # e.g. [2944 3088 3226 3335 3436] X_train = X_train.reshape(-1, 1) y_train = y_train.reshape(-1, 1) regr.fit(X_train, y_train) # Train the model y_est = regr.predict(X_train) # Get linear regression line plt.plot(range(Nmax2+1-N,Nmax2+2), np.concatenate((y_est, np.array(df_temp['est_N'+str(N)][-1:]).reshape(-1,1))), color=color_list[N%len(color_list)], marker=marker_list[N%len(marker_list)]) legend_list.append('est_N'+str(N)+'_lr') # Plot the predictions plt.plot(Nmax2+1, df_temp['est_N'+str(N)][-1:], color=color_list[N%len(color_list)], marker='o') legend_list.append('est_N'+str(N)) plt.grid() plt.xlabel('timestep') plt.ylabel('Price') plt.legend(legend_list, bbox_to_anchor=(1.05, 1)) matplotlib.rcParams.update({'font.size': fontsize}) # Plot adjusted close over time rcParams['figure.figsize'] = 10, 8 # width 10, height 8 ax = train.plot(x='Date', y='Close', style='bx-', grid=True) ax = cv.plot(x='Date', y='Close', style='yx-', grid=True, ax=ax) ax = test.plot(x='Date', y='Close', style='gx-', grid=True, ax=ax) ax = cv.plot(x='Date', y='est_N1', style='rx-', grid=True, ax=ax) ax = cv.plot(x='Date', y='est_N5', style='mx-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test', 'predictions with N=1', 'predictions with N=5']) ax.set_xlabel("date") ax.set_ylabel("USD") ax.set_xlim([date(2018, 11, 1), date(2018, 12, 30)]) ax.set_ylim([120, 240]) ax.set_title('Zoom in to dev set') optN = 1 est_list = get_preds_lin_reg(df, 'Close', optN, 0, num_train+num_cv) test['est' + '_N' + str(optN)] = est_list print("RMSE = %0.3f" % math.sqrt(mean_squared_error(est_list, test['Close']))) print("R2 = %0.3f" % r2_score(test['Close'], est_list)) print("MAPE = %0.3f%%" % mf.get_mape(test['Close'], est_list)) test.head() # Plot adjusted close over time rcParams['figure.figsize'] = 18, 8 # width 10, height 8 ax = train.plot(x='Date', y='Close', style='b-', grid=True) ax = cv.plot(x='Date', y='Close', style='y-', grid=True, ax=ax) ax = test.plot(x='Date', y='Close', style='g-', grid=True, ax=ax) ax = test.plot(x='Date', y='est_N1', style='r-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test', 'predictions with N_opt=1']) ax.set_xlabel("date") ax.set_ylabel("USD") # Plot adjusted close over time rcParams['figure.figsize'] = 18, 8 # width 10, height 8 ax = train.plot(x='Date', y='Close', style='bx-', grid=True) ax = cv.plot(x='Date', y='Close', style='yx-', grid=True, ax=ax) ax = test.plot(x='Date', y='Close', style='gx-', grid=True, ax=ax) ax = test.plot(x='Date', y='est_N1', style='rx-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test', 'predictions with N_opt=1']) ax.set_xlabel("date") ax.set_ylabel("USD") ax.set_xlim([date(2019, 5, 1), date(2019, 6, 30)]) ax.set_ylim([120, 240]) ax.set_title('Zoom in to test set') # Plot adjusted close over time, only for test set rcParams['figure.figsize'] = 18, 8 # width 10, height 8 rcParams.update({'font.size': 14}) ax = test.plot(x='Date', y='Close', style='gx-', grid=True) ax = test.plot(x='Date', y='est_N1', style='rx-', grid=True, ax=ax) ax.legend(['test', 'predictions using linear regression'], loc='upper left') ax.set_xlabel("date") ax.set_ylabel("USD") ax.set_xlim([date(2019, 1, 2), date(2019, 7, 3)]) ax.set_ylim([120, 240]) test.tail()Step 0.1: Getting sample points for one-time partial radiussamples = 1000 sample_points = [] samples_indeces = [] t0 = time.time() for i in np.arange(samples): random_object = np.random.choice(point_collection_indices) object_name = new_contiguous_point_collection[random_object][0] pointCloud = new_contiguous_point_collection[random_object][1] triangles = new_contiguous_point_collection[random_object][2] triangle_index = np.random.choice(np.arange(len(triangles))) vertex_index = triangles[triangle_index,1] original_vertex = pointCloud[vertex_index] sample_points.append([ random_object, object_name, original_vertex ]) samples_indeces.append(random_object) print("Done generating",len(sample_points),"samples in {:.3f} seconds.".format(time.time()-t0)) with open('sample_points.pickle','wb') as f: pickle.dump(sample_points,f) plt.title("Distribution of the sample spaces") plt.xlabel("Count") plt.ylabel("Sample space") plt.hist(samples_indeces,bins = np.arange(0,8))Step 0.2: Creating a synthetic set of successive partial spacesSimilar to the partial case above, we use the same sample points, i.e. centroids, for successive releases but will only vary the size of the partial space for every release.samples = 100 releases = 100 nearby_range = 2.0 t1 = time.time() successive_sample_points = [] for i in np.arange(samples):# random_object = np.random.choice(point_collection_indices) #reference_ransac = np.random.randint(5) object_name = new_contiguous_point_collection[random_object][0] pointCloud = new_contiguous_point_collection[random_object][1] triangles = new_contiguous_point_collection[random_object][2] current_vertex = pointCloud[np.random.randint(len(pointCloud))] growing_point_collection_vertices = [[ random_object, object_name, current_vertex ]] nbrs = NearestNeighbors(n_neighbors=min(20000,len(pointCloud)),algorithm='kd_tree').fit(pointCloud[:,:3]) for release in np.arange(releases-1): distances, indices = nbrs.kneighbors([current_vertex[:3]]) cand_indices = indices[0,np.where(distances[0]<(nearby_range))[0]] distribution = np.sort(abs(np.random.normal(nearby_range*0.5,nearby_range*0.3,len(cand_indices)))) current_vertex = pointCloud[ np.random.choice( cand_indices, p = distribution/np.sum(distribution) ) ] growing_point_collection_vertices.append([ random_object, object_name, current_vertex ]) successive_sample_points.append([ [random_object, object_name], growing_point_collection_vertices ]) if i % 33 == 1: print(" Done with successive {} sample_points extraction in {:.3f} seconds".format(i,time.time()-t1)) t1 = time.time() with open('successive_sample_points.pickle','wb') as f: pickle.dump(successive_sample_points,f) t1 = time.time() try: with open('successive_sample_points.pickle','rb') as f: successive_point_collection = pickle.load(f) samples = len(successive_point_collection) releases = len(successive_point_collection[0][1]) print(samples,"samples for radius",radius) print(releases,"releases each") except Exception as e1: print(e1) successive_sample_points_per_release = [[]] for k, [obj_, growing_point_collection] in enumerate(successive_point_collection): t2 = time.time() successive_sample_points = [] reference_ransac = np.random.randint(5) for i, obj_meta in enumerate(growing_point_collection): successive_sample_points.append([obj_meta, reference_ransac]) try: successive_sample_points_per_release[i].append(successive_sample_points) except: successive_sample_points_per_release.append([successive_sample_points]) #print(len(successive_sample_points_per_release[i]),len(successive_sample_points_per_release[i][k])) with open('successive_sample_points_per_release.pickle','wb') as f: pickle.dump(successive_sample_points_per_release,f) print(" Done with successive sample_points extraction in {:.3f} seconds".format(time.time()-t1))Step 0.3: Create submaps for pointnetvlad using same samplesspatial_span = 2.0 interval = 0.5 num_points = 4096 cutoff = 0.5 with open('sample_points.pickle','rb') as f: sample_points = pickle.load(f)Step 0.3.1: Generate the reference dataset using the raw datasetbaseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) raw_path = os.path.join(baseline_path,"raw_dataset") raw_pc_path = os.path.join(raw_path,"pointcloud_4m_0.25") if not os.path.exists(raw_path): os.mkdir(raw_path) if not os.path.exists(raw_pc_path): os.mkdir(raw_pc_path) t0 = time.time() csvfile = open(raw_path+"/pointcloud_centroids_4m_0.25.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting','alting','obj']) for obj_, [object_name, pointCloud, triangles] in enumerate(new_contiguous_point_collection): if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 0 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 new_Y = pointCloud[:,1] new_object_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=20000, algorithm='kd_tree').fit(new_object_pointcloud) round_new_pointcloud = 0.25*100*np.around((0.01/0.25)*new_object_pointcloud,decimals=2) unq_round_pointcloud = np.unique(round_new_pointcloud[:,:3],axis = 0) raw_centroids = unq_round_pointcloud#+np.random.normal(0,0.25,unq_round_pointcloud.shape) for northing, easting, alting in raw_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_object_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(raw_pc_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) print("Done with submap generation for object ({}) {} in {:.3f} seconds".format(obj_,object_name,time.time()-t0)) csvfile.close()Step 0.3.2: Generate a reference dataset using a sample RANSAC dataset# First, we need to create the reference dataset using the raw dataset and a (randomly chosen) ransac dataset. num_points = 4096 baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) try: trial = np.random.randint(5) with open("../ransac_pc/ransac_point_collection_{}.pickle".format(trial),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, pointCloud_, tri_ = ransac_trial_point_collection[0] print("Chosen ransac trial",trial) except Exception as ex: print("Error:",ex) raw_path = os.path.join(baseline_path,"ransac_dataset") raw_pc_path = os.path.join(raw_path,"pointcloud_4m_0.25") if not os.path.exists(raw_path): os.mkdir(raw_path) if not os.path.exists(raw_pc_path): os.mkdir(raw_pc_path) t0 = time.time() csvfile = open(raw_path+"/pointcloud_centroids_4m_0.25.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting','alting','obj']) for obj_, [object_name, pointCloud, triangles] in enumerate(ransac_trial_point_collection): if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 50 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 0 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) - 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] - np.mean(pointCloud[:,0]) + 25 new_Z = pointCloud[:,2] - np.mean(pointCloud[:,2]) + 50 new_Y = pointCloud[:,1] new_object_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=20000, algorithm='kd_tree').fit(new_object_pointcloud) round_new_pointcloud = 0.25*100*np.around((0.01/0.25)*new_object_pointcloud,decimals=2) unq_round_pointcloud = np.unique(round_new_pointcloud[:,:3],axis = 0) raw_centroids = unq_round_pointcloud#+np.random.normal(0,0.25,unq_round_pointcloud.shape) for northing, easting, alting in raw_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_object_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(raw_pc_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) print("Done with submap generation for object ({}) {} in {:.3f} seconds".format(obj_,object_name,time.time()-t0)) csvfile.close()Step 0.3.3: Generate the test submaps: - using Raw spaces for validation - using Ransac spaces for evaluation - using Ransac spaces for the successive case# One-time releases Raw partial spaces baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) for radius in np.arange(0.25,3.1,0.25): per_radius_partial_length = [] t1 = time.time() partial_path = os.path.join(baseline_path,"raw_partial_radius_"+str(radius)+"_"+str(num_points))+"_unassisted" pointcloud_partial_path = os.path.join(partial_path,"pointcloud_4m") #pointcloud_partial_bin_path = os.path.join(partial_path,"pointcloud_4m_npy") if not os.path.exists(partial_path): os.mkdir(partial_path) if not os.path.exists(pointcloud_partial_path): os.mkdir(pointcloud_partial_path) #if not os.path.exists(pointcloud_partial_bin_path): os.mkdir(pointcloud_partial_bin_path) print(" ",pointcloud_partial_path) #""" csvfile = open(partial_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) #""" count = 0 for obj_, object_name, original_vertex in sample_points: new_partial_pointcloud = [] new_vX = [] new_vZ = [] try: object_, ransac_pointCloud, tri_ = new_contiguous_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] except: print("Can't get ransac samples for",trial,obj_meta[0],dist_.shape,ind_.shape) continue #if len(gen_planes) == 0: continue if len(pointCloud) == 0: continue if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",obj_meta) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T max_known_span = max(np.amax(new_partial_pointcloud, axis = 0) - np.amin(new_partial_pointcloud, axis = 0)) nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique if max_known_span > 3*spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # raw_partial_centroids = unq_round_partial_pointcloud c_nbrs = NearestNeighbors(n_neighbors = min(25,len(raw_partial_centroids)), algorithm='kd_tree').fit(raw_partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(raw_partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) raw_partial_centroids = raw_partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] raw_partial_centroids = raw_partial_centroids+np.random.normal(0,interval,raw_partial_centroids.shape) else: # Correcting this, because the attacker is supposed to not know the true centroid # and has to estimate it instead. #raw_partial_centroids = [[new_vX, new_vZ, original_vertex[1]]] raw_partial_centroids = [np.mean(new_partial_pointcloud, axis = 0)] for northing, easting, alting in raw_partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_partial_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 print(" Done with submap generation for radius {} ( {} samples) in {:.3f} seconds".format(radius,count,time.time()-t1)) csvfile.close() # One-time releases RANSAC partial spaces baseline_path = 'pointnetvlad_submaps/' if not os.path.exists(baseline_path): os.mkdir(baseline_path) for radius in np.arange(0.25,3.1,0.25): t1 = time.time() partial_path = os.path.join(baseline_path,"ransac_partial_radius_"+str(radius)+"_"+str(num_points))+"_unassisted" pointcloud_partial_path = os.path.join(partial_path,"pointcloud_4m") #pointcloud_partial_bin_path = os.path.join(partial_path,"pointcloud_4m_npy") if not os.path.exists(partial_path): os.mkdir(partial_path) if not os.path.exists(pointcloud_partial_path): os.mkdir(pointcloud_partial_path) #if not os.path.exists(pointcloud_partial_bin_path): os.mkdir(pointcloud_partial_bin_path) print(" ",pointcloud_partial_path) #""" csvfile = open(partial_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) #""" count = 0 for obj_, object_name, original_vertex in sample_points: new_partial_pointcloud = [] new_vX = [] new_vZ = [] try: trial = np.random.randint(5) with open("../ransac_pc/ransac_point_collection_{}.pickle".format(trial),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, ransac_pointCloud, tri_ = ransac_trial_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] except: print("Can't get ransac samples for",trial,obj_meta[0]) continue #if len(gen_planes) == 0: continue if len(pointCloud) == 0: continue if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",obj_meta) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T max_known_span = max(np.amax(new_partial_pointcloud, axis = 0) - np.amin(new_partial_pointcloud, axis = 0)) nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique if max_known_span > 3*spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # raw_partial_centroids = unq_round_partial_pointcloud c_nbrs = NearestNeighbors(n_neighbors = min(25,len(raw_partial_centroids)), algorithm='kd_tree').fit(raw_partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(raw_partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) raw_partial_centroids = raw_partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] raw_partial_centroids = raw_partial_centroids+np.random.normal(0,interval,raw_partial_centroids.shape) else: # Correcting this, because the attacker is supposed to not know the true centroid # and has to estimate it instead. #raw_partial_centroids = [[new_vX, new_vZ, original_vertex[1]]] raw_partial_centroids = [np.mean(new_partial_pointcloud, axis = 0)] for northing, easting, alting in raw_partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_partial_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 print(" Done with submap generation for radius {} ( {} samples) in {:.3f} seconds".format(radius,count,time.time()-t1)) csvfile.close() # Successive release of RANSAC partial spaces baseline_path = 'pointnetvlad_submaps/' skip = 5 t0 = time.time() with open('successive_sample_points_per_release.pickle','rb') as f: successive_sample_points_per_release = pickle.load(f) for radius in np.arange(0.5,2.1,0.5): t1 = time.time() successive_path = os.path.join(baseline_path,"successive_radius_"+str(radius)) if not os.path.exists(successive_path): os.mkdir(successive_path) for i in np.arange(1,100,skip): # releases successive_release_path = os.path.join(successive_path,"release_"+str(i)) pointcloud_successive_path = os.path.join(successive_release_path,"pointcloud_4m") if not os.path.exists(successive_release_path): os.mkdir(successive_release_path) if not os.path.exists(pointcloud_successive_path): os.mkdir(pointcloud_successive_path) csvfile = open(successive_release_path+"/pointcloud_centroids_4m.csv",'w',newline = '') csv_writer = csv.writer(csvfile, delimiter = ',') csv_writer.writerow(['timestamp', 'northing', 'easting', 'alting','obj']) count = 0 for successive_sample_points_per_release_per_obj in successive_sample_points_per_release[i]: #print(" ",len(successive_sample_points_per_release_per_obj),"releases") growing_point_cloud = [] new_vX = [] new_vZ = [] ransac_pointCloud = [] for [obj_, object_name, original_vertex], reference_ransac in successive_sample_points_per_release_per_obj[:i]: try: if len(ransac_pointCloud) == 0: # if empty, open. This only happens at beginning with open("../ransac_pc/ransac_point_collection_{}.pickle".format(reference_ransac),'rb') as f: ransac_trial_point_collection = pickle.load(f) object_, ransac_pointCloud, tri_ = ransac_trial_point_collection[int(obj_)] ransac_nbrs = NearestNeighbors(n_neighbors=min(20000,len(ransac_pointCloud)), algorithm='kd_tree').fit(ransac_pointCloud[:,:3]) except: print("Can't get ransac samples for",i,obj_, object_name) continue dist_, ind_ = ransac_nbrs.kneighbors([original_vertex[:3]]) pointCloud = ransac_pointCloud[ind_[0,np.where(dist_[0,:]<=radius)[0]]] if len(pointCloud) == 0: continue #Regular Accumulation if len(growing_point_cloud) == 0: growing_point_cloud = pointCloud else: growing_point_cloud = np.concatenate( (growing_point_cloud,pointCloud), axis=0 ) if len(growing_point_cloud) == 0: continue pointCloud = np.unique(growing_point_cloud,axis=0) if object_name == "Reception-Data61-L5.obj": new_X = pointCloud[:,0] + 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 50 new_vZ = original_vertex[2] + 0 elif object_name == "Driveway.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] - 50 elif object_name == "Apartment.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] - 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] - 50 elif object_name == "Workstations-Data61-L4.obj": new_X = pointCloud[:,0] - 50 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] - 50 new_vZ = original_vertex[2] + 0 elif object_name == "Kitchen-Data61-L4.obj": new_X = pointCloud[:,0] + 0 new_Z = pointCloud[:,2] + 0 new_vX = original_vertex[0] + 0 new_vZ = original_vertex[2] + 0 elif object_name == "HallWayToKitchen-Data61-L4.obj": new_X = pointCloud[:,0] - 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] - 25 new_vZ = original_vertex[2] + 50 elif object_name == "StairWell-Data61-L4.obj": new_X = pointCloud[:,0] + 25 new_Z = pointCloud[:,2] + 50 new_vX = original_vertex[0] + 25 new_vZ = original_vertex[2] + 50 else: print("Error:",[obj_, object_name, original_vertex]) new_Y = pointCloud[:,1] new_partial_pointcloud = np.stack((new_X,new_Z,new_Y)).T nbrs = NearestNeighbors(n_neighbors=min(2*num_points,len(new_partial_pointcloud)), algorithm='kd_tree').fit(new_partial_pointcloud) # Get submap "centroids" by quantizing by 0.25m, i.e. round then unique #if radius > spatial_span: round_new_partial_pointcloud = 100*np.around(0.01*new_partial_pointcloud,decimals=2) unq_round_partial_pointcloud = np.unique(round_new_partial_pointcloud[:,:3],axis = 0) # partial_centroids = unq_round_partial_pointcloud+np.random.normal(0,interval,unq_round_partial_pointcloud.shape) c_nbrs = NearestNeighbors(n_neighbors = min(25,len(partial_centroids)), algorithm='kd_tree').fit(partial_centroids) c_dist, c_ind = c_nbrs.kneighbors(partial_centroids) ia1, ia2 = np.where(c_dist < 1.73) dist_bins = np.bincount(ia1) max_dist = max(np.bincount(ia1)) partial_centroids = partial_centroids[[i for i, j in enumerate(dist_bins) if j == max_dist]] for northing, easting, alting in partial_centroids: # Getting the points around our centroid defined by [northing, easting] distances, indices = nbrs.kneighbors([[northing, easting, alting]]) #if max(distances[0]) < 0.5*spatial_span: continue submap_pointcloud = new_partial_pointcloud[indices[0,np.where(distances[0,:]<=spatial_span)[0]]] if len(submap_pointcloud) == 0: continue # Centering and rescaling submap_pointcloud = (submap_pointcloud - [northing, easting, alting])/spatial_span #per_radius_partial_length.append([northing, easting, alting, len(submap_pointcloud)]) if len(submap_pointcloud) > num_points: submap_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points)] elif len(submap_pointcloud) < num_points and len(submap_pointcloud) >= cutoff*num_points : #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud))] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) elif len(submap_pointcloud) < cutoff*num_points : #continue #print(i,submap_pointcloud.shape) additional_pointcloud = submap_pointcloud[np.random.choice(len(submap_pointcloud),num_points-len(submap_pointcloud), True)] additional_pointcloud = additional_pointcloud + np.random.normal(0,0.05,additional_pointcloud.shape) submap_pointcloud = np.concatenate((submap_pointcloud,additional_pointcloud),axis = 0) timestamp = int(10**16*(time.time())) csv_writer.writerow([timestamp,northing,easting,alting,obj_]) with open(pointcloud_successive_path+'/{}.pickle'.format(timestamp),'wb') as f: pickle.dump(submap_pointcloud.T,f) count += 1 if i % 10 == 1: print(" Done with submap generation for iteration {}, radius {} ({} submaps) in {:.3f} seconds".format(i,radius,count,time.time()-t1)) t1 = time.time() csvfile.close() print(" Done with generalized submap generation for radius {} in {:.3f} seconds".format(radius,time.time()-t0)) t0 = time.time()Step 0.3.4: Building database and query files for evaluation with pointnetVLAD - the combined Raw and RANSAC referece database - for validation with one-time released Raw spaces - for testing with one-time released RANSAC spaces - for testing with successive RANSAC spacesbase_path= "pointnetvlad_submaps/"#"../partial_dataset/" construct_query_and_database_sets( base_path, ['raw_dataset', 'ransac_dataset'], "/pointcloud_4m_0.25/", "pointcloud_centroids_4m_0.25.csv")#, all_folders[index]) # For validation with raw queries. for radius in np.arange(0.25,3.1,0.25): partial_path = 'raw_partial_radius_'+str(radius)+"_4096_unassisted"# print(partial_path) construct_query_sets(partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) # For the Ransac queries. for radius in np.arange(0.25,3.1,0.25): partial_path = 'ransac_partial_radius_'+str(radius)+"_4096_unassisted"# print(partial_path) construct_query_sets(partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) # For the successive queries. successive_dir = os.path.join(base_path,"successive_queries") if not os.path.exists(successive_dir): os.mkdir(successive_dir) for radius in np.arange(0.5,2.1,0.5): successive_path = 'successive_radius_'+str(radius) for release in np.arange(1,100,5): partial_path = 'release_'+str(release) print(partial_path) construct_successive_query_sets(successive_path,partial_path, "/pointcloud_4m/", "pointcloud_centroids_4m.csv")#, all_folders[index]) #print(all_folders) #print("training:",train_folders) #Pie chart vignette===================Demo pie chart with matplotlib and style the figure.import numpy as np import matplotlib.pyplot as plt n = 20 X = np.ones(n) X[-1] *= 2 plt.pie(X, explode=X*.05, colors = ['%f' % (i/float(n)) for i in range(n)]) fig = plt.gcf() w, h = fig.get_figwidth(), fig.get_figheight() r = h / float(w) plt.xlim(-1.5, 1.5) plt.ylim(-1.5 * r, 1.5 * r) plt.xticks([]) plt.yticks([]) # Add a title and a box around it from matplotlib.patches import FancyBboxPatch ax = plt.gca() ax.add_patch(FancyBboxPatch((-0.05, .87), width=.66, height=.165, clip_on=False, boxstyle="square,pad=0", zorder=3, facecolor='white', alpha=1.0, transform=plt.gca().transAxes)) plt.text(-0.05, 1.02, " Pie Chart: plt.pie(...)\n", horizontalalignment='left', verticalalignment='top', size='xx-large', transform=plt.gca().transAxes) plt.text(-0.05, 1.01, "\n\n Make a pie chart of an array ", horizontalalignment='left', verticalalignment='top', size='large', transform=plt.gca().transAxes) plt.show()Navigation---This notebook contains a solution for the Navigation project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893). This solution was prepared on local machine under Ubuntu-20.01. Instructions for environment setup are located in README.md file. 1. Start the EnvironmentWe begin by importing some necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).from collections import deque from unityagents import UnityEnvironment import numpy as np import matplotlib.pyplot as plt import time import torch %matplotlib inlineNext, if not already done, fetch environment files and start it.# fetch the Banana Linux environment if not present ![! -d "Banana_Linux"] && wget https://s3-us-west-1.amazonaws.com/udacity-drlnd/P1/Banana/Banana_Linux.zip && unzip Banana_Linux.zip env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64") brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=True)[brain_name]INFO:unityagents: 'Academy' started successfully! Unity Academy name: Academy Number of Brains: 1 Number of External Brains : 1 Lesson number : 0 Reset Parameters : Unity brain name: BananaBrain Number of Visual Observations (per agent): 0 Vector Observation space type: continuous Vector Observation space size (per agent): 37 Number of stacked Vector Observation: 1 Vector Action space type: discrete Vector Action space size (per agent): 4 Vector Action descriptions: , , ,2. Examine the State and Action SpacesThe simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal:- `0` - walk forward - `1` - walk backward- `2` - turn left- `3` - turn rightThe state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward of `-1` is provided for collecting a blue banana. Run the code cell below to print some information about the environment.# reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size)Number of agents: 1 Number of actions: 4 States look like: [ 0. 1. 0. 0. 0.16895212 0. 1. 0. 0. 0.20073597 1. 0. 0. 0. 0.12865657 0. 1. 0. 0. 0.14938059 1. 0. 0. 0. 0.58185619 0. 1. 0. 0. 0.16089135 0. 1. 0. 0. 0.31775284 0. 0. ] States have length: 373. Initialize DQN agentDQN learing algorithm and model is contained in the Agent class located in dqn_agent.py. The cell below initializes learning agent and prints model structure:from dqn_agent import Agent device = "cuda:0" if torch.cuda.is_available() else "cpu" agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device, use_double_dqn=True) agent.qnetwork_targetPerform a test run for a single episode and display the total score of untrained agent. Score over 13 is considered success.env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for j in range(500): action = agent.act(state) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Score: {}, steps: {}".format(score, j))Score: 0.0, steps: 2994. Train the agentNow run DQN learning algorithm and display average score over episodes benchmark.def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, solved_score=13): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state, eps) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=solved_score: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'trained_weights.pth') break return scores start = time.time() scores = dqn() learning_took = time.time() - start print("Learning took: {}, per episode: {}".format(learning_took, learning_took/len(scores))) # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show()Episode 100 Average Score: 0.80 Episode 200 Average Score: 3.81 Episode 300 Average Score: 7.80 Episode 400 Average Score: 10.53 Episode 469 Average Score: 13.00 Environment solved in 369 episodes! Average Score: 13.00 Learning took: 578.329110622406, per episode: 1.2331111100690965. Evaluate the agentPerform a test run for a single episode and display the total score of untrained agent. Score over 13 is considered success.agent.qnetwork_local.load_state_dict(torch.load('trained_weights.pth')) env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for j in range(500): action = agent.act(state) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Score: {}, steps: {}".format(score, j))Score: 18.0, steps: 299When finished, you can close the environment.env.close()Null models & Jaccard coefficients etcfor i in range(0, 100): filename = ('/home/zxu/Documents/mscbioinfo/Data Project/scripts/Jupyter/null_models/random/' + 'model' + str(i) + '.tsv') null_model = make_null_model(pathway_2_compounds, list(test_compounds), True) #save_null_model(null_model, filename) print(null_model['path:eco00010']) print(len(null_model['path:eco00010'] & null_model['path:eco00030'])) print(len(pathway_2_compounds['path:eco00010'] & pathway_2_compounds['path:eco00030'])) null_jaccard = [] kegg_jaccard = [] for i in combinations(pathway_2_compounds, 2): null_jaccard.append(len(null_model[i[0]] & null_model[i[1]]) / len(null_model[i[0]] | null_model[i[1]])) kegg_jaccard.append(len(pathway_2_compounds[i[0]] & pathway_2_compounds[i[1]]) / len(pathway_2_compounds[i[0]] | pathway_2_compounds[i[1]])) np.var(null_jaccard) bins = np.arange(0, 0.5, 0.01) hist, bin_edges = np.histogram(null_jaccard, bins=bins) plt.clf() plt.bar(bins[:-1],hist,width=np.diff(bins)) plt.show() bins = np.arange(0, 0.5, 0.01) hist, bin_edges = np.histogram(kegg_jaccard, bins=bins) plt.clf() plt.bar(bins[:-1],hist,width=np.diff(bins)) plt.show()Generating results using null modelssig_count = 0 for ko_number in range(0, len(all_knockouts)): nullmod_pval, nullmod_pathway_id, nullmod_sizes = oras_ko(ko_number, ecoli_pathways, zamboni_bg, null_model, pos_annot, pos_mod, neg_annot, neg_mod, 2, False, False, 0, []) for i in nullmod_pval: if i < 0.05: sig_count += 1 print(sig_count)6215Analysis & Stuffs3717 Knockouts in total (0 to 3716)'C00186' in zamboni_bg build_metabo_input(2281, pos_annot, pos_mod, neg_annot, neg_mod, 5) fh = open('/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/modzscore_neg_annotated.tsv', 'r') lines = fh.readlines() ko_scores = [] for line in lines: #print(len(line.rstrip().split('\t'))) score = line.rstrip().split('\t')[2281] ko_scores.append(score) plt.scatter(np.arange(len(ko_scores)), ko_scores) plt.show() all_knockouts.index('ybjO') build_metabo_input(1541, pos_annot, pos_mod, neg_annot, neg_mod, 10) for ko_number in range(0, len(all_knockouts)): fh = open('./Backgrounds/KO' + str(ko_number) + '.tsv', 'r') nobg_pval = [] nobg_pathways = [] nobg_size = [] zamboni_pval = [] lines = fh.readlines() for line in lines: fields = line.rstrip().split('\t') nobg_pval.append(float(fields[1])) nobg_pathways.append(fields[0]) nobg_size.append(fields[2]) zamboni_pval.append(float(fields[3])) fh.close() if len(nobg_pval) == 0: continue elif max(nobg_pval) < 1.30 and max(zamboni_pval) < 1.30: continue print(''.format(ko_number, all_knockouts[ko_number]), end='')Preparation for analysis# Stating the annotation files & modzscore files pos_annot = '/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/annotation_pos.txt' pos_mod = '/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/modzscore_pos_annotated.tsv' neg_annot = '/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/annotation_neg.txt' neg_mod = '/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/modzscore_neg_annotated.tsv' # Initialise KEGG instance k = KEGG() k.organism = "eco" # Initialise both backgrounds test_compounds = get_all_compounds('eco') zamboni_bg = loadTsv('/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/annotation_all.txt') zamboni_bg = zamboni_bg & test_compounds # build {pathway: compounds} dictionary for E.coli ecoli_pathways = k.pathwayIds pathway_2_compounds = dict() for pathway in ecoli_pathways: parsed_output = k.parse(k.get(pathway)) # parsed_ouput has lots of information about the pathway try: compounds = set(parsed_output['COMPOUND'].keys()) pathway_2_compounds[pathway] = compounds except KeyError: # Some pathways do not have defined compounds #name = parsed_output['NAME'] #print(pathway, name) pass # Translate KO number to gene name sample_id_all = '/home/zxu/Documents/mscbioinfo/Data Project/Zamboni/sample_id_modzscore.tsv' all_knockouts = []# End product fh_sample_id_all = open(sample_id_all, 'r') for knockout in fh_sample_id_all: all_knockouts.append(knockout.rstrip()) fh_sample_id_all.close() #print(all_knockouts)Analysis 0: Translate compound names (Raboniwitz) Manual KEGG translationfh = open('rabinowitz.txt', 'r') rabinowitz_lines = fh.readlines() for line in rabinowitz_lines[80:85]: compound = line.rstrip() if compound.endswith(')'): compound = compound.split(' (')[0] print(compound) print(k.find('compound', compound)) print('=' * 20) fh.close() met_conc = {} with open('/home/zxu/Documents/mscbioinfo/Bioinfo Project/rabinowitz_conc.csv', 'r') as fh: lines = fh.readlines() for line in lines: fields = line.rstrip().split('\t') # Converting the concentration to actual numbers conc = fields[0] value = conc.split(' ')[0] power = conc.split('− ')[1] number = float(value) * 10 ** (-int(power)) metabolite = fields[1] met_conc[metabolite] = number # Might use if we want to cut off based on concentrations rab_met = sorted(met_conc, key=met_conc.get, reverse=True)Generating the small background resultsfor ko_number in range(0, len(all_knockouts)): rabbg_pval, rabbg_pathway_id, rabbg_sizes = oras_ko(ko_number, ecoli_pathways, zamboni_bg & set(rab_met), pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, False, 0, []) with open('./rabinowitz/full/KO' + str(ko_number) + '.tsv', 'w') as fh: for i in range(0, len(rabbg_pval)): fh.write(rabbg_pathway_id[i][5:] + '\t' + str(rabbg_pval[i]) + '\n')Comparing the small background results with the original results* p-value distribution* number of significant hitszamboni_pval = [] rabinowitz_pval = [] for ko_number in range(0, len(all_knockouts)): ora_results = {} rabinowitz_results = './rabinowitz/full/KO' + str(ko_number) + '.tsv' zamboni_results = './allresult/KO' + str(ko_number) + '.tsv' with open(rabinowitz_results, 'r') as rabinowitz_fh: for line in rabinowitz_fh.readlines(): fields = line.rstrip().split('\t') pathname = fields[0] pathpval = float(fields[1]) ora_results[pathname] = pathpval with open(zamboni_results, 'r') as zamboni_fh: for line in zamboni_fh.readlines(): fields = line.rstrip().split('\t') pathname = fields[0] pathpval = float(fields[1]) try: rabinowitz_pval.append(ora_results[pathname]) zamboni_pval.append(pathpval) except KeyError: pass print(len(zamboni_pval), len(rabinowitz_pval)) zamboni_pval = list(map(np.log10, zamboni_pval)) zamboni_pval = list(map(np.negative, zamboni_pval)) rabinowitz_pval = list(map(np.log10, rabinowitz_pval)) rabinowitz_pval = list(map(np.negative, rabinowitz_pval)) xedges = np.arange(0, 2, 0.05) yedges = np.arange(0, 2, 0.05) heatmap, xedges, yedges = np.histogram2d(zamboni_pval, rabinowitz_pval, bins=(xedges, yedges)) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.clf() plt.plot([0,10], [0,10], color="Black", label='y=x') plt.imshow(heatmap.T, extent=extent, origin='lower', norm=colors.LogNorm(), aspect='auto') plt.set_cmap('rainbow') plt.colorbar(orientation="horizontal", pad=0.20) plt.ylabel('Rabinowitz Background') plt.xlabel('Zamboni Background') plt.title('-log(P) in two backgrounds') plt.legend(bbox_to_anchor=(1, 1.22)) plt.savefig('minuslog_zamboni_rabinowitz', transparent=False) #plt.scatter(nobg, zamboni) #plt.plot([0,10], [0,10]) #plt.xlim([0, 20]) #plt.ylim([0, 10]) #plt.show()Analysis 1: Background metabolites Verdict:1) A more specified background tends to make p-values less significant2) Some KOs survived the multiple testing correctionpath_2_pathname = {} for path in ecoli_pathways: pathname = path[5:] path_2_pathname[pathname] = k.parse(k.get(pathname))['NAME'][0][:-31] path_2_pathname out_fh = open('datajs2.js', 'w') for ko_number in range(0, 3717): fh = open('./Backgrounds/KO' + str(ko_number) + '.tsv', 'r') nobg_pval = [] nobg_pathways = [] nobg_size = [] zamboni_pval = [] lines = fh.readlines() for line in lines: fields = line.rstrip().split('\t') nobg_pval.append(float(fields[1])) nobg_pathways.append(fields[0]) nobg_size.append(fields[2]) zamboni_pval.append(float(fields[3])) fh.close() if len(nobg_pval) == 0: continue elif max(nobg_pval) < 1.30 and max(zamboni_pval) < 1.30: continue out_fh.write('else if(selVal == \"' + str(ko_number) + '\")') out_fh.write('{options.series = [{data: [') # Plotting for i in range(0, len(nobg_pval)): if nobg_pval[i] > 1.30 or zamboni_pval[i] > 1.30: name = path_2_pathname[nobg_pathways[i]] out_fh.write('{ ' + 'x: {}, y: {}, z: {}, name: "{}", country: "{}"'.format(nobg_pval[i], zamboni_pval[i], nobg_size[i], nobg_pathways[i], name) + ' }') if i == len(nobg_pval)-1: pass else: out_fh.write(',') #name = path_2_pathname[nobg_pathways[-1]] #out_fh.write('{ ' + # 'x: {}, y: {}, z: {}, name: "{}", country: "{}"'.format(nobg_pval[-1], zamboni_pval[-1], nobg_size[-1], # nobg_pathways[-1], name) + # ' }') out_fh.write(']}], options.title = {text: \'OverRepresentation Analysis of ' + all_knockouts[ko_number] + '\'}}' + '\n') out_fh.close() # Background Analysis for ko_number in range(2406, 3717): nobg_pval, nobg_pathway_id, nobg_sizes = oras_ko(ko_number, ecoli_pathways, test_compounds, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, True, 0, []) zamboni_pval, zamboni_pathway_id, zamboni_sizes = oras_ko(ko_number, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, True, 0, []) result_file = './Backgrounds/KO' + str(ko_number) + '.tsv' fh = open(result_file, 'w') for i in range(0, len(nobg_pathway_id)): fh.write('{}\t{}\t{}\t{}\t{}\n'.format(nobg_pathway_id[i][5:], nobg_pval[i], nobg_sizes[i], zamboni_pval[i], zamboni_sizes[i])) fh.close() print(len(fp), len(fn)) nobg = [] zamboni = [] fp = [] fn = [] for ko_number in range(0, 3717): result_file = './Backgrounds/KO' + str(ko_number) + '.tsv' fh = open(result_file, 'r') lines = fh.readlines() for line in lines: fields = line.rstrip().split('\t') nobg_pval = float(fields[1]) zamboni_pval = float(fields[3]) nobg.append(nobg_pval) zamboni.append(zamboni_pval) if (nobg_pval > zamboni_pval) and (nobg_pval > 1.301) and (zamboni_pval < 1.301): # fp fp.append(1) elif (nobg_pval < zamboni_pval) and (zamboni_pval > 1.301) and (nobg_pval < 1.301): # fn fn.append(1) else: pass nobg xedges = np.arange(0, 6, 0.05) yedges = np.arange(0, 3, 0.05) heatmap, xedges, yedges = np.histogram2d(nobg, zamboni, bins=(xedges, yedges)) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.clf() plt.plot([0,10], [0,10], color="Black", label='y=x') plt.imshow(heatmap.T, extent=extent, origin='lower', norm=LogNorm(), aspect='auto') plt.set_cmap('rainbow') plt.colorbar(orientation="horizontal", pad=0.20) plt.ylabel('Specified Background') plt.xlabel('Unspecified Background') plt.title('-log(P) in two backgrounds') plt.legend(bbox_to_anchor=(1, 1.22)) plt.savefig('contour', transparent=True) #plt.scatter(nobg, zamboni) #plt.plot([0,10], [0,10]) #plt.xlim([0, 20]) #plt.ylim([0, 10]) #plt.show() for met in build_metabo_input(0, pos_annot, pos_mod, neg_annot, neg_mod, 2): print(met)C00644 C06193 C05818 C00296 C03274 C01103 C03090 C03406 C00082 C00719 C00855 C00037 C00417 C06840 C01233 C00246 C16186 C05822 C04332 C00048 C04438 C00314 C06735 C00460 C06156Analysis 2: Pval distribution and Pathway Size distribution Verdict:1) Pval distribution is just another form of Analysis 1: Background metabolites so verdicts followed2) Pathway Size distribution showed pathway size experienced a shift when the background was applied# pval distribution for ko_number in range(0, 101): ko_gene = all_knockouts[ko_number] ko_metabolites = loadTsv('/home/zxu/Documents/mscbioinfo/Data Project/scripts/metaboAnalystqueries/maKO' + str(ko_number) + 'Cutoff2.tsv') ecoli_pathways = k.pathwayIds nobg_pval = [] zamboni_pval = [] for pathway_index in range(0, len(ecoli_pathways)): pathway = ecoli_pathways[pathway_index] #print(pathway_index) nobg_ora_res = ora(ko_metabolites, pathway, test_compounds, pathway_2_compounds) if len(nobg_ora_res) == 2: nobg_pval.append(nobg_ora_res[0]) zamboni_pval.append(ora(ko_metabolites, pathway, zamboni_bg, pathway_2_compounds)[0]) ''' nobg_pval = list(map(np.log10, nobg_pval)) zamboni_pval = list(map(np.log10, zamboni_pval)) nobg_pval = list(map(np.negative, nobg_pval)) zamboni_pval = list(map(np.negative, zamboni_pval)) ''' # Plotting fig, ax = plt.subplots(nrows=2, ncols=1) fig.subplots_adjust(bottom=-0.5) binsize = 0.05 ax[0].hist(nobg_pval, bins=np.arange(0, 1+binsize, binsize)) ax[1].hist(zamboni_pval, bins=np.arange(0, 1+binsize, binsize)) ax[0].set_title('P-value distribution (all compounds) for knockout ' + ko_gene) ax[1].set_title('P-value distribution (Zamboni background) for knockout ' + ko_gene) ax[0].get_xaxis().set_visible(False) for axe in ax: axe.patches[0].set_color('r') #ax.set_xlabel('No specified background') #ax.set_ylabel('Zamboni background') plt.tight_layout() fig.savefig('./pvaldist/KO' + str(ko_number) + '.png') #plt.show() fig.clf() size_dist = [] for pathway in pathway_2_compounds: #if len(pathway_2_compounds[pathway]) == 1: # print(pathway) size_dist.append(len(pathway_2_compounds[pathway])) zamboni_size_dist = [] for pathway in pathway_2_compounds: compounds = pathway_2_compounds[pathway] cmpd_count = 0 for compound in compounds: if compound in zamboni_bg: cmpd_count += 1 zamboni_size_dist.append(cmpd_count) print(min(zamboni_size_dist), max(zamboni_size_dist)) plt.hist(zamboni_size_dist, bins=range(0, 145, 5)) plt.ylim(0, 40) plt.xlabel('Pathway size') plt.ylabel('Number of pathways') plt.title('Pathway size distribution (Zamboni background)') plt.show() print(min(size_dist)) print(max(size_dist)) plt.hist(size_dist, bins=range(0, 145, 5)) plt.ylim(0, 40) plt.xlabel('Pathway size') plt.ylabel('Number of pathways') plt.title('Pathway size distribution (all compounds)') plt.show()1 138Analysis 3: Random metabolite misidentification-Data noise-misindentification*Repeat it for many times*Summarize the results (Number of significant pathways; Rank KOs based on length of list) Zamboni background length: 4135% = 2010% = 4120% = 8250% = 206random_knockouts = np.random.randint(3717, size=50) random_knockouts = np.array([2673, 470, 3457, 859, 2461, 2776, 514, 1537, 3114, 2120, 2880, 1312, 484, 3494, 110, 29, 1514, 791, 1925, 1131, 2776, 1274, 1342, 875, 2235, 2938, 1460, 2957, 718, 1214, 3058, 509, 3215, 2066, 2598, 3622, 3627, 436, 2223, 2691, 2442, 3439, 2490, 1223, 90, 1902, 1893, 929, 3349, 746]) print(zero, one, two, more) print(20) print(zero, one, two, more) print(10) import os zero = {} one = {} two = {} more = {} for file in os.listdir('./mis_ident50/3more'): path = './mis_ident10/3more/' + file fh = open(path, 'r') lines = fh.readlines() for line in lines: fields = line.rstrip().split('\t') total_hits = int(fields[0]) false_pos = int(fields[1]) false_neg = int(fields[2]) if total_hits == 0: zero['fp'] = zero.get('fp', 0) + false_pos zero['fn'] = zero.get('fn', 0) + false_neg elif total_hits == 1: one['fp'] = one.get('fp', 0) + false_pos one['fn'] = one.get('fn', 0) + false_neg elif total_hits == 2: two['fp'] = two.get('fp', 0) + false_pos two['fn'] = two.get('fn', 0) + false_neg elif total_hits >= 3: more['fp'] = more.get('fp', 0) + false_pos more['fn'] = more.get('fn', 0) + false_neg fh.close() # Random metabolite mutation mutation_rate = 0.5 filename = './mis_ident/new/mrate50.tsv' fh = open(filename, 'w') for ko_number in random_knockouts: fp = 0 fn = 0 ora_results = [] for i in range(0, 51): ora_results.append([]) (pvals, pathwayids, junk) = oras_ko(ko_number, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, False, 0, []) for ind in range(0, len(pvals)): if pvals[ind] < 0.05: ora_results[0].append(pathwayids[ind]) for k in range(0, 50): # Number of mutations per ko #print(k) (pvals_mut, pathwayids_mut, junk) = oras_ko(ko_number, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, False, int(mutation_rate * 413), test_compounds) for ind in range(0, len(pvals_mut)): if pvals_mut[ind] < 0.05: ora_results[k+1].append(pathwayids_mut[ind]) # write ora_result to a file for i in range(1, len(ora_results)): result = ora_results[i] fp += len(set(result) - set(ora_results[0])) fn += len(set(ora_results[0]) - set(result)) fh.write('\t'.join([str(len(ora_results[0])), str(fp), str(fn), str(ko_number)])) fh.write('\n') fh.close() fh = open('./mis_ident/new/mrate50.tsv', 'r') lines = fh.readlines() fp_all = [] fn_all = [] for line in lines: fields = line.rstrip().split('\t') tp = int(fields[0]) fp = int(fields[1]) fn = int(fields[2]) fp_all.append(fp) if tp != 0: fn_all.append(fn) fh.close() print(np.mean(fp_all)/50, np.mean(fn_all)/50, sum(fn_all)/2500) count = -1 for k in ora_results: if k == ora_results[0]: count += 1 print(count)0Analysis 4: Getting 'correct' answerOnly include metabolic enzyme (EC number)Annotation (missing/incorrect)Dont use a cutoffBiological?Chemical?Statistical?eco_enzymes eco_enzymes = [] for i in range(0, len(all_knockouts)): print(i) try: if 'Enzymes' in k.parse(k.get(k.find('eco', all_knockouts[i])[:9])).get('BRITE', []): eco_enzymes.append(i) except AttributeError: pass outfile = './locality/allKO.tsv' fh = open(outfile, 'w') for ko_number in eco_enzymes: print(ko_number) ko_gene = all_knockouts[ko_number] gene_id = k.find("eco", ko_gene)[:9] try: gene_pathways = k.parse(k.get(gene_id))['PATHWAY'].keys() except KeyError: gene_pathways = [] except TypeError: gene_pathways = [] if len(gene_pathways) > 0: fh.write(str(ko_number) + '\t' + ko_gene + '\t') fh.write(' '.join(list(gene_pathways))) fh.write('\n') fh.close()4 5 11 12 13 14 16 17 18 25 30 36 38 40 43 45 46 49 50 53 54 55 56 57 58 59 60 65 68 69 70 72 73 75 78 80 81 82 85 86 87 95 98 99 100 101 102 103 104 105 106 108 109 113 114 115 116 117 118 120 121 122 123 134 136 137 143 144 145 147 148 149 150 151 152 153 154 155 159 160 161 162 163 165 167 169 174 176 177 181 182 183 184 188 189 190 191 192 193 194 196 204 205 207 208 211 213 218 226 235 236 238 242 243 245 246 248 249 250 252 254 257 258 259 261 262 263 265 269 271 273 274 275 277 278 280 281 282 283 284 285 286 288 289 291 292 293 294 295 297 306 331 332 337 341 343 344 346 347 349 350 352 353 354 355 356 357 358 359 360 362 368 370 372 375 377 378 387 393 397 398 399 400 401 402 404 405 406 407 411 413 414 418 421 423 428 429 431 432 436 439 447 449 454 455 458 466 471 480 481 482 483 484 490 491 495 497 498 507 511 512 514 515 517 518 519 520 521 524 529 530 533 548 557 561 577 579 580 607 622 623 624 625 626 627 632 634 636 638 639 640 641 643 644 646 647 649 650 651 654 655 65[...]MethodsGather the following statistics:How many got ranked significant?How many significant in ORA but not related?What is the average rank of related pathways?from operator import itemgetter # Get KO number # Load rankings for that KO # Get significant/not related/average rank kegg_results = './locality/allKO.tsv' fh = open(kegg_results, 'r') result_lines = fh.readlines() fh.close() out_fh = open('./locality/outresult_sig.tsv', 'w') out_fh.write('KO_number\tGene\tSigpathways\tNot_KEGG\tSigKEGG\tRank\tPath_count\n') for line in result_lines: fields = line.rstrip().split('\t') ko_number = fields[0] ko_name = fields[1] ko_kegg_pathways = fields[2].split() ko_ora_results = './allresult/KO' + ko_number + '.tsv' fh = open(ko_ora_results, 'r') ora_lines = fh.readlines() fh.close() ora_pvals = [] ora_pathways = [] ora_sigpathways = [] for oraline in ora_lines: ora_pathway_result = oraline.rstrip().split('\t') ora_pvals.append(ora_pathway_result[1]) ora_pathways.append(ora_pathway_result[0]) if float(ora_pathway_result[1]) < 0.05: ora_sigpathways.append(ora_pathway_result[0]) pathway_rank = dict(zip(ora_pathways, list(dup_argsort(ora_pvals)))) # Sigpathway not_related_pathways = len(set(ora_sigpathways) - set(ko_kegg_pathways)) missing_pathways = len(set(ko_kegg_pathways) - set(ora_sigpathways)) # average rank ranksum = 0 path_count = 0 for path in ko_kegg_pathways: try: rank = pathway_rank[path] ranksum += rank path_count += 1 except KeyError: pass if path_count != 0: rankavg = ranksum/path_count else: rankavg = 'N' output_str = '\t'.join([ko_number, ko_name, str(len(ora_sigpathways)), str(not_related_pathways), str(len(ora_sigpathways) - not_related_pathways), str(rankavg), str(path_count)]) if len(ora_sigpathways) > 0: out_fh.write(output_str+'\n') out_fh.close() a2 = np.array([4,2,1,1,2]) def dup_argsort(in_val): u, v = np.unique(in_val, return_inverse=True) out_ind = (np.cumsum(np.concatenate(([0], np.bincount(v)))))[v] return out_ind dup_argsort(a2) fh = open('./2281.tsv', 'w') fh.write('Gene\tKegg\tKegg_path\tSig_path(0.1)\tMinPval\n') for ko_number in range(2281, 2282): print(ko_number) ko_gene = all_knockouts[ko_number] gene_id = k.find("eco", ko_gene)[:9] try: gene_pathways = k.parse(k.get(gene_id))['PATHWAY'].keys() except KeyError: gene_pathways = [] except TypeError: gene_pathways = [] if len(gene_pathways) >= 0: #print(ko_gene, gene_id) #print(list(gene_pathways)) significant_pathways = [] (pvals, pathwayids, junk) = oras_ko(ko_number, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, False, False, 0, []) for ind in range(0, len(pvals)): if pvals[ind] < 0.1: significant_pathways.append(pathwayids[ind]) fh.write('{}\t{}\t{}\t{}\t{}\n'.format(ko_gene, gene_id, list(gene_pathways), significant_pathways, min(pvals))) fh.close()2281Analysis 5: Rank analysis ORA vs MetaboAnalystfor metabolite in build_metabo_input(0, pos_annot, pos_mod, neg_annot, neg_mod, 2): print(metabolite) (pval, pathwayid, pathwaysize) = oras_ko(0, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, False, False, 0) def kegg_2_name(kegg_id, kegg_instance): return kegg_instance.parse(kegg_instance.get(kegg_id))['NAME'][0].split(' - ')[0] kegg_2_name('path:eco00330', k) len(k.parse(k.get('path:eco00401'))['COMPOUND']) pval = np.array(pval) pvalind = np.argsort(pval) for i in pvalind: size = pathwaysize[i] ptw = pathwayid[i] ptwname = kegg_2_name(ptw, k) print(ptw, size, ptwname, pval[i]) for ko_number in range(0, 3717): result_file = './Backgrounds/KO' + str(ko_number) + '.tsv' in_fh = open(result_file, 'r') lines = in_fh.readlines() out_fh = open(('./allresult/KO') + str(ko_number) + '.tsv', 'w') for line in lines: fields = line.rstrip().split('\t') pathway_id = fields[0] zamboni_pval = float(fields[3]) pvalue = 10**(np.negative(zamboni_pval)) out_fh.write('{}\t{}\n'.format(pathway_id, pvalue)) out_fh.close() in_fh.close() number_of_hits = {} for ko_number in range(0, 3717): result_file = './allresult/KO' + str(ko_number) + '.tsv' fh = open(result_file, 'r') lines = fh.readlines() sig_pathway = 0 for line in lines: fields = line.rstrip().split('\t') pvalue = float(fields[1]) if pvalue < 0.05: sig_pathway += 1 number_of_hits[ko_number] = sig_pathway oras_ko(333, ecoli_pathways, zamboni_bg, pathway_2_compounds, pos_annot, pos_mod, neg_annot, neg_mod, 2, True, False, 0, []) no_hits = [] for ko in number_of_hits: if number_of_hits[ko] == 0: no_hits.append(ko) no_hits total = 0 for ko in number_of_hits: total += number_of_hits[ko] totalSchema As we have seen, the schema changed. Let us build a function which picks the common columns we need for future queries and rename them to the same column name Old:```bash+-----------+--------------------+---------------------+---------------+------------------+-------------------+------------------+---------+-----------------+-------------------+------------------+------------+------------------+---------+-------+-------+---------+------------------+|vendor_name|Trip_Pickup_DateTime|Trip_Dropoff_DateTime|Passenger_Count| Trip_Distance| Start_Lon| Start_Lat|Rate_Code|store_and_forward| End_Lon| End_Lat|Payment_Type| Fare_Amt|surcharge|mta_tax|Tip_Amt|Tolls_Amt| Total_Amt|+-----------+--------------------+---------------------+---------------+------------------+-------------------+------------------+---------+-----------------+-------------------+------------------+------------+------------------+---------+-------+-------+---------+------------------+| VTS| 2009-01-04 02:52:00| 2009-01-04 03:02:00| 1|2.6299999999999999|-73.991956999999999| 40.721567| null| null| -73.993803|40.695922000000003| CASH|8.9000000000000004| 0.5| null| 0| 0|9.4000000000000004|| VTS| 2009-01-04 03:31:00| 2009-01-04 03:38:00| 3|4.5499999999999998|-73.982101999999998|40.736289999999997| null| null|-73.955849999999998|40.768030000000003| Credit| 12.1| 0.5| null| 2| 0| 14.6|+-----------+--------------------+---------------------+---------------+------------------+-------------------+------------------+---------+-----------------+-------------------+------------------+------------+------------------+---------+-------+-------+---------+------------------+```New:```bash+--------+--------------------+---------------------+---------------+-------------+----------+------------------+------------+------------+------------+-----------+-----+-------+----------+------------+---------------------+------------+|VendorID|tpep_pickup_datetime|tpep_dropoff_datetime|passenger_count|trip_distance|RatecodeID|store_and_fwd_flag|PULocationID|DOLocationID|payment_type|fare_amount|extra|mta_tax|tip_amount|tolls_amount|improvement_surcharge|total_amount|+--------+--------------------+---------------------+---------------+-------------+----------+------------------+------------+------------+------------+-----------+-----+-------+----------+------------+---------------------+------------+| 1| 2017-01-09 11:13:28| 2017-01-09 11:25:45| 1| 3.30| 1| N| 263| 161| 1| 12.5| 0| 0.5| 2| 0| 0.3| 15.3|| 1| 2017-01-09 11:32:27| 2017-01-09 11:36:01| 1| .90| 1| N| 186| 234| 1| 5| 0| 0.5| 1.45| 0| 0.3| 7.25|+--------+--------------------+---------------------+---------------+-------------+----------+------------------+------------+------------+------------+-----------+-----+-------+----------+------------+---------------------+------------+```We pick: ```Trip_Pickup_DateTime tpep_pickup_datetimeTrip_Dropoff_DateTime tpep_dropoff_datetimePassenger_Count passenger_countTrip_Distance trip_distancePayment_Type payment_typeTip_Amt tip_amountTotal_Amt total_amount``` Create Folder in HDFS!hdfs dfs -mkdir /taxi/by_month/testing our approach firstyear = "2009" month = "01" df = spark.read.parquet(f"/taxi/raw_parquet/{year}/{month}.parquet")\ .withColumnRenamed("Trip_Pickup_DateTime","pickup_datetime")\ .withColumnRenamed("Trip_Dropoff_DateTime","dropoff_datetime")\ .withColumnRenamed("tpep_pickup_datetime","pickup_datetime")\ .withColumnRenamed("tpep_dropoff_datetime","dropoff_datetime")\ .withColumnRenamed("Passenger_Count","passenger_count")\ .withColumnRenamed("Trip_Distance","trip_distance")\ .withColumnRenamed("Payment_Type","payment_type")\ .withColumnRenamed("Tip_Amt","tip_amount")\ .withColumnRenamed("Total_Amt","total_amount")\ .selectExpr(\ "cast(pickup_datetime as timestamp)", \ "cast(dropoff_datetime as timestamp)", \ "cast(passenger_count as int)", \ "cast(trip_distance as double)", \ "cast(payment_type as string)", \ "cast(tip_amount as double)", \ "cast(total_amount as double)" \ ) df.printSchema() df.show(2) year = "2021" month = "03" df = spark.read.parquet(f"/taxi/raw_parquet/{year}/{month}.parquet")\ .withColumnRenamed("Trip_Pickup_DateTime","pickup_datetime")\ .withColumnRenamed("Trip_Dropoff_DateTime","dropoff_datetime")\ .withColumnRenamed("tpep_pickup_datetime","pickup_datetime")\ .withColumnRenamed("tpep_dropoff_datetime","dropoff_datetime")\ .withColumnRenamed("Passenger_Count","passenger_count")\ .withColumnRenamed("Trip_Distance","trip_distance")\ .withColumnRenamed("Payment_Type","payment_type")\ .withColumnRenamed("Tip_Amt","tip_amount")\ .withColumnRenamed("Total_Amt","total_amount")\ .selectExpr(\ "cast(pickup_datetime as timestamp)", \ "cast(dropoff_datetime as timestamp)", \ "cast(passenger_count as int)", \ "cast(trip_distance as double)", \ "cast(payment_type as string)", \ "cast(tip_amount as double)", \ "cast(total_amount as double)" \ ) df.printSchema() df.show(2)+-------------------+-------------------+---------------+-------------+------------+----------+------------+ | pickup_datetime| dropoff_datetime|passenger_count|trip_distance|payment_type|tip_amount|total_amount| +-------------------+-------------------+---------------+-------------+------------+----------+------------+ |2021-03-05 14:47:10|2021-03-05 15:16:13| 1| 5.4| 1| 6.3| 31.6| |2021-03-05 16:46:03|2021-03-05 16:52:01| 1| 0.97| 1| 0.0| 10.3| +-------------------+-------------------+---------------+-------------+------------+----------+------------+ only showing top 2 rowsRead and Writedef read_and_write(year, month): df = spark.read.parquet(f"/taxi/raw_parquet/{year}/{month}.parquet")\ .withColumnRenamed("Trip_Pickup_DateTime","pickup_datetime")\ .withColumnRenamed("Trip_Dropoff_DateTime","dropoff_datetime")\ .withColumnRenamed("tpep_pickup_datetime","pickup_datetime")\ .withColumnRenamed("tpep_dropoff_datetime","dropoff_datetime")\ .withColumnRenamed("Passenger_Count","passenger_count")\ .withColumnRenamed("Trip_Distance","trip_distance")\ .withColumnRenamed("Payment_Type","payment_type")\ .withColumnRenamed("Tip_Amt","tip_amount")\ .withColumnRenamed("Total_Amt","total_amount")\ .selectExpr(\ "cast(pickup_datetime as timestamp)", \ "cast(dropoff_datetime as timestamp)", \ "cast(passenger_count as int)", \ "cast(trip_distance as double)", \ "cast(payment_type as string)", \ "cast(tip_amount as double)", \ "cast(total_amount as double)" \ ) df.write.parquet(f"/taxi/by_month/{year}/{month}.parquet") for year in ["2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021", "2022"]: for month in ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]: !echo processing {year}/{month} read_and_write(year, month)processing 2009/01Stopping Sparkspark.stop()import matplotlib.animation as mpla import matplotlib.pyplot as plt import numpy as np def f1(a, b): c = a / 3 d = b / 3 return c, d def f2(a, b): c = (a / 3) / 2 - (b / 3) * (np.sqrt(3)) / 2 + 1 / 3 d = (a / 3) * (np.sqrt(3)) / 2 + (b / 6) return c, d def f3(a, b): c = (a / 3) / 2 + (b / 3) * (np.sqrt(3)) / 2 + 1 / 2 d = -(a / 6) * (np.sqrt(3)) + (b / 6) + (np.sqrt(3)) / 6 return c, d def f4(a, b): c = a / 3 + 2 / 3 d = b / 3 return c, d x = np.zeros(50000) y = np.zeros(50000) x[0] = 0 y[0] = 0 x[1] = 1 y[1] = 0 a = np.zeros(50000) b = a c = a d = a e = a f = a g = a h = a i = 2 for marhale in range(0, 7, 1): while i < 2 * (4 ** marhale): a, b = f1(x, y) c, d = f2(x, y) e, f = f3(x, y) g, h = f4(x, y) j = 0 while j != i: x[j] = a[j] y[j] = b[j] j = j + 1 while j != 2 * i: x[j] = c[j - i] y[j] = d[j - i] j = j + 1 while j != 3 * i: x[j] = e[j - 2 * i] y[j] = f[j - 2 * i] j = j + 1 while j != 4 * i: x[j] = g[j - 3 * i] y[j] = h[j - 3 * i] j = j + 1 i = 4 * i x_values = np.zeros(i) y_values = np.zeros(i) j = 0 while j != i: x_values[j] = x[j] y_values[j] = y[j] j = j + 1 var = str(marhale) plt.plot(x_values, y_values) plt.yticks(np.arange(0, 1, 0.2)) plt.xticks(np.arange(0, 1.2, 0.2)) matplotlib.pyplot.title('Total steps = ' + var) matplotlib.pyplot.legend(['0', '1', '2', '3', '4', '5', '6'])Exercise 6-3 LSTMThe following two cells will create a LSTM cell with one neuron.We scale the output of the LSTM linear and add a bias.Then the output will be wrapped by a sigmoid activation.The goal is to predict a time series where every $n^{th}$ ($5^{th}$ in the current example) element is 1 and all others are 0.a) Please read and understand the source code below.b) Consult the output of the predictions. What do you observe? How does the LSTM manage to predict the next element in the sequence?import tensorflow as tf import numpy as np from matplotlib import pyplot as plt tf.reset_default_graph() tf.set_random_seed(12314) epochs=50 zero_steps = 5 learning_rate = 0.01 lstm_neurons = 1 out_dim = 1 num_features = 1 batch_size = zero_steps window_size = zero_steps*2 time_steps = 5 x = tf.placeholder(tf.float32, [None, window_size, num_features], 'x') y = tf.placeholder(tf.float32, [None, out_dim], 'y') lstm = tf.nn.rnn_cell.LSTMCell(lstm_neurons) state = lstm.zero_state(batch_size, dtype=tf.float32) regression_w = tf.Variable(tf.random_normal([lstm_neurons])) regression_b = tf.Variable(tf.random_normal([out_dim])) outputs, state = tf.contrib.rnn.static_rnn(lstm, tf.unstack(x, window_size, 1), state) output = outputs[-1] predicted = tf.nn.sigmoid(output * regression_w + regression_b) cost = tf.reduce_mean(tf.losses.mean_squared_error(y, predicted)) optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) forget_gate = output.op.inputs[1].op.inputs[0].op.inputs[0].op.inputs[0] input_gate = output.op.inputs[1].op.inputs[0].op.inputs[1].op.inputs[0] cell_candidates = output.op.inputs[1].op.inputs[0].op.inputs[1].op.inputs[1] output_gate_sig = output.op.inputs[0] output_gate_tanh = output.op.inputs[1] X = [ [[ (shift-n) % zero_steps == 0 ] for n in range(window_size) ] for shift in range(batch_size) ] Y = [[ shift % zero_steps == 0 ] for shift in range(batch_size) ] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) loss = 1 epoch = 0 while loss >= 1e-5: epoch += 1 _, loss = sess.run([optimizer, cost], {x:X, y:Y}) if epoch % (epochs//10) == 0: print("loss %.5f" % (loss), end='\t\t\r') print() outs, stat, pred, fg, inpg, cell_cands, outg_sig, outg_tanh = sess.run([outputs, state, predicted, forget_gate, input_gate, cell_candidates, output_gate_sig, output_gate_tanh], {x:X, y:Y}) outs = np.asarray(outs) for batch in reversed(range(batch_size)): print("input:") print(np.asarray(X)[batch].astype(int).reshape(-1)) print("forget\t\t%.4f\ninput gate\t%.4f\ncell cands\t%.4f\nout gate sig\t%.4f\nout gate tanh\t%.4f\nhidden state\t%.4f\ncell state\t%.4f\npred\t\t%.4f\n\n" % ( fg[batch,0], inpg[batch,0], cell_cands[batch,0], outg_sig[batch,0], outg_tanh[batch,0], stat.h[batch,0], stat.c[batch,0], pred[batch,0]))loss 0.00001 input: [0 0 0 0 1 0 0 0 0 1] forget 0.0135 input gate 0.9997 cell cands 0.9994 out gate sig 1.0000 out gate tanh 0.7586 hidden state 0.7586 cell state 0.9928 pred 0.0000 input: [0 0 0 1 0 0 0 0 1 0] forget 0.8747 input gate 0.9860 cell cands -0.0476 out gate sig 1.0000 out gate tanh 0.6759 hidden state 0.6759 cell state 0.8215 pred 0.0000 input: [0 0 1 0 0 0 0 1 0 0] forget 0.8504 input gate 0.9877 cell cands -0.1311 out gate sig 0.9999 out gate tanh 0.5148 hidden state 0.5147 cell state 0.5692 pred 0.0000 input: [0 1 0 0 0 0 1 0 0 0] forget 0.7921 input gate 0.9903 cell cands -0.2875 out gate sig 0.9999 out gate tanh 0.1646 hidden state 0.1646 cell state 0.1661 pred 0.0052 input: [1 0 0 0 0 1 0 0 0 0] forget 0.6150 input gate 0.9943 cell cands -0.5732 out gate sig 0.9997 out gate tanh -0.4363 hidden state -0.4361 cell state -0.4676 pred 0.9953Predict Cancer - Benign or MalignantDataset of 569 people who were diagnosed with malignant or benign cancer. This dataset is provided by Wisconsin, it is available in both kaggle and UCI Machine Learning Repository. Import Libraries# load libraries import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score, confusion_matrix import matplotlib.pyplot as plt import seaborn as snsLoad Datasetcancer_data = pd.read_csv('breastcancer/data.csv') cancer_data.head()Find more about datacancer_data.describe() cancer_data.isna().sum()**Here most of the data in the unnamed: 32 are empty. We also donot want the data in coumn id as it doesnot contribute much to our prediction** Removing less useful data from our datasetdel_columns=['Unnamed: 32', 'id'] cancer_data.drop(del_columns, axis = 1, inplace= True) cancer_data.head(2)Let us find the number of benign and malignant class# Data to plot labels = 'Benign', 'Malignant' sizes = cancer_data['diagnosis'].value_counts() colors = ['lightskyblue', 'orange'] explode= [0.4,0] # Plot plt.pie(sizes, explode=explode, labels=labels,radius= 1400 ,colors=colors, autopct='%1.1f%%', shadow=True, startangle=90) plt.axis('equal') fig = plt.gcf() fig.set_size_inches(7,7) plt.show()Split data into X and yX = cancer_data.drop(['diagnosis'], axis = 1) y = cancer_data['diagnosis'] X.head(2) y.head(2)Normalize dataX = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) XSplit data into training and testing setX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=4) print('train_set:', X_train.shape, y_train.shape) print('test_set:', X_test.shape, y_test.shape)train_set: (455, 30) (455,) test_set: (114, 30) (114,)Implement KNN algorithmk = 4 knn_model = KNeighborsClassifier(n_neighbors=k).fit(X_train,y_train) knn_model y_pred = knn_model.predict(X_test) y_pred[0:5] print("Train set Accuracy: ", accuracy_score(y_train, knn_model.predict(X_train))) print("Test set Accuracy: ", accuracy_score(y_test, y_pred)) cm_KNN = confusion_matrix(y_test, y_pred) print(cm_KNN)[[80 0] [ 2 32]]To find Best K value for the KNN algorithmtrain_scores = [] # Create a list of test scores test_scores = [] # Create a list of different values for n_neighbors neighbors = range(1, 30) # 1 to 30 # Setup algorithm knn = KNeighborsClassifier() # Loop through different neighbors values for i in neighbors: knn.set_params(n_neighbors = i) # set neighbors value # Fit the algorithm knn.fit(X_train, y_train) # Update the training scores train_scores.append(knn.score(X_train, y_train)) # Update the test scores test_scores.append(knn.score(X_test, y_test)) print(f"Maximum KNN score on the test data: {max(test_scores)*100:.2f}%") print("With K =", test_scores.index(max(test_scores))+1)Maximum KNN score on the test data: 99.12% With K = 15Refresher Course on Matrix Analysis and Optimization . 2018 Chap. 2 - Refresher Course ``2. Numerical Optimization``--- Package check and StylingOutline    a) the Gradient Method     b) Application to Regression     c) an Adaptive Gradient Algorithm     d) Application to Classification     e) Newton's Algorithm     f) To go Further a) the Gradient Method Go to topWe consider two $\mathbb{R}^2 \to \mathbb{R}$ convex functions with the same global minimizer $(3,1)$ but quite different *shapes* and see how this impacts the performance of gradient-based algorithms. The considered functions $f$ and $g$ and their 3D are:\begin{array}{rrcll}f: & \mathbb{R}^2 & \to &\mathbb{R}\\& (x_1,x_2) & \mapsto & 4 (x_1-3)^2 + 2(x_2-1)^2\end{array} \begin{array}{rrcll}g: & \mathbb{R}^2 & \to &\mathbb{R}\\& (x_1,x_2) & \mapsto & \log( 1 + \exp(4 (x_1-3)^2 ) + \exp( 2(x_2-1)^2 ) ) - \log(3) .\end{array} **Note:** For convenience, we provide custom plotting functions in lib/custom_plot_lib.pyfrom lib.custom_plot_lib import *Question 1: Observe how is written the function f that return $f(x)$ from input vector $x$. Observe the 3D plot and level plot with custom_3dplot... and level_plot.... Do the same for function g.# f def f(x): """ Function f """ x1 = x[0] x2 = x[1] return 4*(x1-3)**2+2*(x2-1)**2 f_plot_param = {'x1_min' : -0.5, 'x1_max' : 5.5, 'x2_min' : -0.5, 'x2_max' : 5.5, 'nb_points' : 200, 'v_min' : 0, 'v_max' : 80, 'levels' : [0.5,1,2,5,10,15], 'title' : 'f: a simple function' } custom_3dplot( f, f_plot_param ) level_plot( f, f_plot_param ) def g(x): return 0 ## To complete g_plot_param = {'x1_min' : -0.5, 'x1_max' : 5.5, 'x2_min' : -0.5, 'x2_max' : 5.5, 'nb_points' : 500, 'v_min' : 0, 'v_max' : 100, 'levels' : [0.5,1,2,5,10,15], 'title' : 'g: a harder function' } custom_3dplot( g, g_plot_param ) #level_plot( g, g_plot_param )Question 2: Fill the function f_grad that return $\nabla f(x)$ from input vector $x$. Do the same for function g_grad.def f_grad(x): # ................................... x1 = x[0] x2 = x[1] g = np.array( [ 0.0 , 0.0 ] ) return g #### def g_grad(x): # ................................... g = np.array( [ 0.0 , 0.0 ] ) return g ####Question 3: implement a constant stepsize gradient method gradient_algorithm(f , f_grad , x0 , step , PREC , ITE_MAX ) that takes: f and f_grad: respectively functions and gradient simulators; x0: starting point; step: a stepsize; PREC and ITE_MAX: stopping criteria for sought precision and maximum number of iterations;and return x, the final value, and x_tab, the matrix of all vectors stacked vertically.def gradient_algorithm(f , f_grad , x0 , step , PREC , ITE_MAX ): # ....................................... x = np.copy(x0) x_tab = np.copy(x) return x,x_tabQuestion 4: Test your gradient descent function on $f$ and $g$: i) Verify that the final point is close to the sought minimizer $(3,1)$; ii) observe the behavior of the iterates with level_points_plot. Change the stepsize and give the values for which the algorithm (i) diverges and (ii) oscillates. Compare with theoretical limits by computing the Lipschitz constant of the gradients.step = 0 # ......................... PREC = 1e-5 # ......................... ITE_MAX = 10 # ......................... x0 = np.array([0,0]) # ......................... x,x_tab = gradient_algorithm(f , f_grad , x0 , step , PREC , ITE_MAX ) level_points_plot( f , x_tab , f_plot_param ) step = 0 # ......................... PREC = 1e-5 # ......................... ITE_MAX = 10 # ......................... x0 = np.array([0,0]) # ......................... x,x_tab = gradient_algorithm(g , g_grad , x0 , step , PREC , ITE_MAX ) level_points_plot( g , x_tab , g_plot_param )b) Application to Regression Go to topWe now get back to the problem of predicting the final grade of a student from various features treated in the Matrix part of the course.We remind that mathematically, from the $m_{learn} \times (n+1)$ *learning matrix* $A_{learn}$($m_{learn} = 300$, $n=27$) comprising of the features values of each training student in line, and the vector of the values of the target features $b_{learn}$; we seek a size-$(n+1)$ *regression vector* that minimizes the squared error between $A_{learn} x$ and $b_{learn}$. This problem boils down to the following least square problem:$$ \min_{x\in\mathbb{R}^{n+1}} s(x) = \frac{1}{2} \| A_{learn} x - b_{learn} \|_2^2 . $$import numpy as np # File reading dat_file = np.load('data/student.npz') A_learn = dat_file['A_learn'] b_learn = dat_file['b_learn'] A_test = dat_file['A_test'] b_test = dat_file['b_test'] m = 395 # number of read examples (total:395) n = 27 # features m_learn = 300Question 1: Construct the suitable function $s$ and gradient simulator as in the previous section. Question 2: Compute the Lipschitz constant of the gradient of $s$. Find a solution to the minimization of $s$ using your gradient algorithm. Compare with Numpy's Least Square routine. Question 3: Generate a random Gaussian matrix/vector couple $A,b$ with increasing size. Create simulators to compare the execution time of constant stepsize gradien and pseudo-inverse computation \emph{via} SVD on the least squares problem $\min_x \|Ax-b\|_2^2$. Notably change the *shape* of $A$ from *tall* (nb. of rows $>\!>$ nb. of cols.) to *fat* (nb. of rows $<\!<$ nb. of cols.). c) an Adaptive Gradient Algorithm Go to topThe *Rosenbrock* function $r$ writes \begin{array}{rrcll}r: & \mathbb{R}^2 & \to &\mathbb{R}\\& (x_1,x_2) & \mapsto & (1-x_1)^2 + 100(x_2-x_1^2)^2 .\end{array} Question 1: Fill the functions r that return $r(x)$ from input vector $x$; and r_grad that return $\nabla r(x)$ from input vector $x$. Observe the 3D plot and level plot of the function.##### Definition of function r def r(x): """ Rosenbrock.""" x1 = x[0] x2 = x[1] return (1-x1)**2+100*(x2-x1**2)**2 r_plot_param = {'x1_min' : -1.5, 'x1_max' : 1.55, 'x2_min' : -0.2, 'x2_max' : 1.5, 'nb_points' : 200, 'v_min' : 0, 'v_max' : 120, 'levels' : [0.05,1,5,15,50,200], 'title' : 'r: Rosenbrock function' } custom_3dplot( r,r_plot_param) level_plot( r, r_plot_param) def r_grad(x): # ......................... g = np.array( [ 0.0 , 0.0 ] ) return gQuestion 2: Try to minimize $r$ using your constant stepsize gradient function gradient_algorithm. Can you find a stepsize for which the algorithm converges? Question 3: Implement an *adaptive* stepsize gradient method gradient_adaptive_algorithm(f , f_grad , x0 , step , PREC , ITE_MAX ) that takes the same inputs and returns the same as the gradient method but implements a *stepsize adaptation method*. For instance, one can use this rule:\begin{align*}\mathbf{if} f(x_{k+1})>f(x_k)&:\\x_{k+1} &= x_k\\step &= step/2\end{align*}which halves the stepsize if a gradient step makes the functional value increase.Test your method on $r$: i) Verify that the final point is close to the sought minimizer $(1,1)$; ii) observe the behavior of the iterates with level_points_plot.step = 0 # ......................... PREC = 1e-5 # ......................... ITE_MAX = 10 # ......................... x0 = np.array([0,0]) # ......................... x,x_tab = gradient_adaptive_algorithm(r , r_grad , x0 , step , PREC , ITE_MAX ) #level_points_plot( r , x_tab , r_plot_param)d) Application to Classification Go to topBinary classification is another popular problem in machine learning. Instead of predicting a numerical value, the goal is now to classify the student into two classes: $+1$ -- *pass* i.e. final grade $\geq 10$; and $-1$ -- *fail*. To this purpose, we create a class vector $c_{learn}$ from the observation vector $b_{learn}$ by simply setting $c_{learn}(i) = +1 $ if $b_{learn}(i)\geq10$ and $-1$ otherwise. Then, the most common approach is to minimize the logistic loss regularized by a squared norm:\begin{equation}\min_{x\in\mathbb{R}^{n+1}} \ell(x) = \sum_{i=1}^{m_{learn} } \log\left( 1 + \exp\left( -c_{learn}(i) a_i^{\mathrm{T}} x \right) \right) + \frac{1}{m}\|x\|^2\end{equation}where $a^\mathrm{T}_i$ is the $i$-th row of $A_{learn}$.Then, from a solution $x^\star$ of this problem, one can classify a new example, represented by its feature vector $a$, as such: the quantity $p(a) = \frac{1}{1+\exp(- a^\mathrm{T} x^\star)}$ estimates the probability of belonging to class $1$; thus, one can decide class $+1$ if for instance $ p(a) \geq 0.5$; otherwise, decide class $-1$. Question 1: Compute the gradient of $q(t) = \log(1+\exp(t))$. Is the function is convex? Deduce that $\ell$ is convex and its gradient. Question 2: Construct the suitable function and gradient simulators in order to use your gradient_adaptive_algorithm to minimize $\ell$. Question 3: From a final point of the optimization algorithm above, generate a decision vector corresponding to the testing set $A_{test}$. Evaluate the classification error. e) Newton's Algorithm Go to top Question 1: Fill the function f_grad_hessian that return $\nabla f(x)$ and $H_f(x)$ from input vector $x$. Same thing for $g$.def f_grad_hessian(x): # ................................... g = np.array( [ 0.0 , 0.0 ] ) H = np.array( [ ( 0.0 , 0.0 ) , ( 0.0 , 0.0 ) ] ) return g,H #### def g_grad_hessian(x): # ................................... g = np.array( [ 0.0 , 0.0 ] ) H = np.array( [ ( 0.0 , 0.0 ) , ( 0.0 , 0.0 ) ] ) return g,H ####Question 2: Implement Newton's method in newton_algorithm(f , f_grad_hessian , x0 , PREC , ITE\_MAX ) that takes as an input: f and f_grad_hessian: respectively functions and gradient + Hessian simulators; x0: starting point; PREC and ITE_MAX: stopping criteria for sought precision and maximum number of iterations;x the final value, and x_tab, the matrix of all vectors stacked vertically.def newton_algorithm(f , f_grad_hessian , x0 , PREC , ITE_MAX ): # ....................................... x = np.copy(x0) x_tab = np.copy(x) return x,x_tabQuestion 3: Test your method on $f$ and $g$: i) Verify that the final point is close to the sought minimizer $(3,1)$; ii) observe the behavior of the iterates with level_points_plot. Compare graphically constant stepsize gradient and Newton's algorithms with level_2points_plot. Newton's algorithm should take exactly one iteration to converge for function $f$. Why so? Is it the case for function $g$? Question 4: Compare Newton method with the adaptive gradient in the case of the classification problem f) To go Further Go to topWe introduce two functions:\begin{array}{rrcll}t: & \mathbb{R}^2 & \to &\mathbb{R}\\& (x_1,x_2) & \mapsto & (0.6 x_1 + 0.2 x_2)^2 \left((0.6 x_1 + 0.2 x_2)^2 - 4 (0.6 x_1 + 0.2 x_2)+4\right) \\& & & + (-0.2 x_1 + 0.6 x_2)^2\end{array} \begin{array}{rrcll}p: & \mathbb{R}^2 & \to &\mathbb{R}\\& (x_1,x_2) & \mapsto & \left| x_1-3 \right| + 2\left| x_2-1\right| \end{array} Question: Test adaptive gradient methods on these functions from different starting points. What do you observe? --- Package Check and StylingGo to topimport lib.notebook_setting as nbs packageList = ['IPython', 'numpy', 'scipy', 'matplotlib', 'cvxopt'] nbs.packageCheck(packageList) nbs.cssStyling()Une affaire de caninesC’est en 1897 que publie *Dracula*, un roman épistolaire relatant l’histoire d’un vampire des Carpates émigrant dans un Londres encore meurtri par les crimes de Jack l’Éventreur.Le texte du roman, disponible dans le répertoire *data*, est extrait du [corpus Gutenberg](https://www.gutenberg.org/).Au cours de cet exercice, vous vous intéresserez aux vingt-cinq adverbes les plus fréquents du roman. Préparer les donnéesDans un premier temps, importez dans une variable `text` le contenu du fichier :path = '../data/dracula.txt' # your code hereImportez ensuite la librairie *NLTK* ainsi que la fonction `word_tokenize()` qui permet de segmenter du texte en mots :# your code herePour utiliser la fonction `word_tokenize()`, vous aurez peut-être besoin de charger la ressource nommée `punkt` :# your code hereAppliquez maintenant la fonction au texte pour récupérer une liste de mots dans une variable `words` :# your code hereÉtiquetageLa seconde étape consiste à lancer l’étiqueteur pré-entraîné de NLTK sur la liste des mots. Vous aurez besoin d’importer la fonction `pos_tag()` :# your code hereEt de charger la ressource `averaged_perceptron_tagger` :# your code hereConstituez une liste de tuples `tagged_words` au format `(word, tag)` grâce à la fonction `pos_tag()` :# your code hereRépétez l’opération mais en choisissant un jeu d’étiquettes personnalisé, le *universal part-of-speech tagset*, qu’il est nécessaire de télécharger au préalable :# your code hereFiltrer les donnéesComme seuls nous importent les adverbes, filtrez la liste afin de ne retenir que les mots étiquetés comme adverbes :# your code hereDénombrer Importez la structure de données `FreqDist` qui appartient au *package* `nltk.probability` et convertissez votre liste d’adverbes :# your code hereAffichez maintenant les 25 résultats les plus fréquents :# your code hereThe Normal Equation Para encontrar el valor de **θ** que minimiza la función de costo, existe una solución de forma cerrada, en otras palabras, una ecuación matemática que da el resultado directamente. A esto se le llama Ecuación Normal: Ecuación normal: $\theta = (X^{T}X)^{-1}X^{T}Y$ * $\theta$ es el valor de θ que minimiza la función de costo.* Y es el vector de valores objetivo que contiene $Y^{(1)}$ hacia $Y^{(m)}$ Primero, importemos algunos módulos comunes, asegurémonos de que MatplotLib traza las figuras en línea y preparemos una función para guardar las figuras. También verificamos que Python 3.5 o posterior esté instalado (aunque Python 2.x puede funcionar, está obsoleto, por lo que le recomendamos encarecidamente que use Python 3 en su lugar), así como Scikit-Learn ≥0.20.# Verificamos que Python ≥3.5 import sys assert sys.version_info >= (3, 5) # Verificamos que Scikit-Learn ≥0.20 import sklearn assert sklearn.__version__ >= "0.20" # Importamos bibliotecas comunes import numpy as np import os # Para que salgan resultados semejantes np.random.seed(42) # Para pintar bonitas figuras %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Lugar donde se guardaran las figuras PROJECT_ROOT_DIR = "." CHAPTER_ID = "training_linear_models" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # Para ignorar adventencias que no son necesarias import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd")Vamos a generar algunos datos de apariencia lineal para probar esta ecuación:import numpy as np X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1)Mostremos el conjunto de datos lineales generados aleatoriamenteplt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([0, 2, 0, 15]) save_fig("generated_data_plot") plt.show()Saving figure generated_data_plotAhora calculemos $\theta$ usando la ecuación normal. Usaremos la función inv() del módulo de Álgebra lineal de NumPy (np.linalg) para calcular la inversa de una matriz, y el método dot() para la multiplicación de matrices:X_b = np.c_[np.ones((100, 1)), X] # agregue x0 = 1 a cada instancia theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)La función real que usamos para generar los datos es y = 4 + 3x1 + ruido gaussiano. Veamos qué encontró la ecuación:theta_bestHabríamos esperado $\theta_{0}$ = 4 y $\theta_{1}$ = 3 en lugar de $\theta_{0}$ = 4.215 y $\theta_{1}$ = 2.770. Lo suficientemente cerca, pero el ruido hizo imposible recuperar los parámetros exactos de la función original. Ahora harémos predicciones usando θ:X_new = np.array([[0], [2]]) X_new_b = np.c_[np.ones((2, 1)), X_new] # agregue x0 = 1 a cada instancia y_predict = X_new_b.dot(theta_best) y_predictTracemos las predicciones de este modelo''' plt.plot(X_new, y_predict, "r-") plt.plot(X, y, "b.") plt.axis([0, 2, 0, 15]) plt.show() '''Realizar una regresión lineal usando Scikit-Learn es bastante simple:plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 2, 0, 15]) save_fig("linear_model_predictions_plot") plt.show() from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) lin_reg.intercept_, lin_reg.coef_ lin_reg.predict(X_new)La clase LinearRegression se basa en la función scipy.linalg.lstsq () (el nombre significa "mínimos cuadrados"), a la que puede llamar directamente:theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6) theta_best_svdEsta función calcula $\theta$ = $X^+$ + y, donde $X^+$ + es el pseudoinverso de X (específicamente el inverso de Moore-Penrose). Puede usar np.linalg.pinv () para calcular la pseudoinversa directamente:np.linalg.pinv(X_b).dot(y)El pseudoinverso en sí mismo se calcula usando una técnica de factorización de matrices estándar llamada Descomposición de valores singulares (SVD) que puede descomponer la matriz **X** del conjunto de entrenamiento en la multiplicación de tres matrices **U Σ** $V^T$ (ver numpy.linalg.svd ()). La pseudoinversa se calcula como $X^+$ = $VΣ^+U^T$. Para calcular la matriz $Σ^+$, el algoritmo toma Σ y pone a cero todos los valores menores que un pequeño valor de umbral, luego reemplaza todos los valores distintos de cero con su inverso, y finalmente transpone la matriz resultante. Este enfoque es más eficiente que calcular la ecuación normal, y además maneja bien los casos extremos: de hecho, la ecuación normal puede no funcionar si la matriz $X^T$X no es invertible (es decir, singular), como si m < n o si algunas características son redundante, pero el pseudoinverso siempre está definido. Complejidad computacionalLa ecuación normal calcula la inversa de $X^T X$, que es una matriz (n + 1) × (n + 1) (donde n es el número de características). La complejidad computacional de invertir tal matriz es típicamente de aproximadamente O ($n^2.4$) a O ($n^3$) (dependiendo de la implementación). En otras palabras, si duplica el número de características, multiplica el tiempo de cálculo por aproximadamente $2^2,4$ = 5,3 a $2^3$ = 8. El enfoque SVD utilizado por la clase LinearRegression de Scikit-Learn es aproximadamente O ($n^2$). Si duplica la cantidad de funciones, multiplica el tiempo de cálculo por aproximadamente 4. >Tanto la ecuación normal como el enfoque SVD se vuelven muy lentos cuando la cantidad de características aumenta (por ejemplo, 100.000). En el lado positivo, ambos son lineales con respecto al número de instancias en el conjunto de entrenamiento (son O (m)), por lo que manejan grandes conjuntos de entrenamiento de manera eficiente, siempre que puedan caber en la memoria. Además, una vez que haya entrenado su modelo de regresión lineal (usando la ecuación normal o cualquier otro algoritmo), las predicciones son muy rápidas: la complejidad computacional es lineal con respecto a la cantidad de instancias en las que desea hacer predicciones y la cantidad de características. En otras palabras, hacer predicciones en el doble de instancias (o el doble de funciones) llevará aproximadamente el doble de tiempo. Ahora veremos formas muy diferentes de entrenar un modelo de regresión lineal, más adecuado para casos donde hay una gran cantidad de características o demasiadas instancias de entrenamiento para caber en la memoria. Gradient DescentGradient Descent es un algoritmo de optimización muy genérico capaz de encontrar soluciones óptimas a una amplia gama de problemas. La idea general de Gradient Descent es ajustar los parámetros de forma iterativa para minimizar una función de coste.Suponga que está perdido en las montañas en una densa niebla; solo puedes sentir la pendiente del suelo debajo de tus pies. Una buena estrategia para llegar rápidamente al fondo del valle es descender en dirección a la pendiente más pronunciada. Esto es exactamente lo que hace Gradient Descent: mide el gradiente local de la función de error con respecto al vector de parámetros θ, y va en la dirección del gradiente descendiente. Una vez que el gradiente es cero, ¡ha alcanzado un mínimo!Concretamente, comienza llenando θ con valores aleatorios (esto se llama inicialización aleatoria), y luego lo mejora gradualmente, dando un pequeño paso a la vez, cada paso intentando disminuir la función de costo (por ejemplo, el MSE), hasta que el algoritmo converja al mínimo: ![1](https://user-images.githubusercontent.com/63415652/104545505-d68c5d80-55ef-11eb-943c-fe01aed20c8b.PNG) Un parámetro importante en Gradient Descent es el tamaño de los pasos, determinado por el hiperparámetro de tasa de aprendizaje. Si la tasa de aprendizaje es demasiado pequeña, el algoritmo tendrá que pasar por muchas iteraciones para converger, lo que llevará mucho tiempo: ![2](https://user-images.githubusercontent.com/63415652/104545665-3a168b00-55f0-11eb-9a6c-eb2014d0d9cc.PNG) Por otro lado, si la tasa de aprendizaje es demasiado alta, podría saltar a través del valle y terminar en el otro lado, posiblemente incluso más alto de lo que estaba antes. Esto podría hacer que el algoritmo diverja, con valores cada vez mayores, sin encontrar una buena solución: ![3](https://user-images.githubusercontent.com/63415652/104545887-a2656c80-55f0-11eb-9d96-c42aaf3c97f1.PNG) Por último, no todas las funciones de coste se ven como buenos cuencos normales. Puede haber huecos, crestas, mesetas y todo tipo de terrenos irregulares, lo que dificulta la convergencia al mínimo. La siguiente figura muestra los dos desafíos principales con Gradient Descent: si la inicialización aleatoria inicia el algoritmo de la izquierda, entonces convergerá a un mínimo local, que no es tan bueno como el mínimo global. Si comienza a la derecha, tomará mucho tiempo cruzar la meseta, y si se detiene demasiado pronto, nunca alcanzará el mínimo global. ![4](https://user-images.githubusercontent.com/63415652/104546323-89a98680-55f1-11eb-8bc4-de9801badab1.PNG) Afortunadamente, la función de costo MSE para un modelo de regresión lineal resulta ser una función convexa, lo que significa que si elige dos puntos cualesquiera en la curva, el segmento de línea que los une nunca cruza la curva. Esto implica que no hay mínimos locales, solo un mínimo global. También es una función continua con una pendiente que nunca cambia abruptamente. Estos dos hechos tienen una gran consecuencia: Se garantiza que el descenso del gradiente se acercará arbitrariamente al mínimo global (si espera lo suficiente y si la tasa de aprendizaje no es demasiado alta) .De hecho, la función de costo tiene la forma de un cuenco, pero puede ser un cuenco alargado si las características tienen escalas muy diferentes. La siguiente figura muestra Gradient Descent en un conjunto de entrenamiento donde las características 1 y 2 tienen la misma escala (a la izquierda), y en un conjunto de entrenamiento donde la característica 1 tiene valores mucho más pequeños que la característica 2 (a la derecha).Gradient Descent con y sin escala de características: ![5](https://user-images.githubusercontent.com/63415652/104548011-7dbfc380-55f5-11eb-9e38-0904a7b5a2fa.PNG) Como puede ver, a la izquierda el algoritmo Gradient Descent va directo hacia el mínimo, alcanzándolo rápidamente, mientras que a la derecha primero va en una dirección casi ortogonal a la dirección del mínimo global, y termina con una larga marcha. por un valle casi plano. Eventualmente alcanzará el mínimo, pero llevará mucho tiempo. >Al usar Gradient Descent, debe asegurarse de que todas las funciones tengan una escala similar (por ejemplo, usando la clase StandardScaler de Scikit-Learn), o de lo contrario, la convergencia llevará mucho más tiempo. Este diagrama también ilustra el hecho de que entrenar un modelo significa buscar una combinación de parámetros del modelo que minimice una función de costo (sobre el conjunto de entrenamiento). Es una búsqueda en el espacio de parámetros del modelo: cuantos más parámetros tiene un modelo, más dimensiones tiene este espacio y más difícil es la búsqueda: buscar una aguja en un pajar de 300 dimensiones es mucho más complicado que en tres dimensiones . Afortunadamente, dado que la función de costo es convexa en el caso de la regresión lineal, la aguja está simplemente en el fondo del cuenco. Batch Gradient DescentPara implementar el desenso del gradiente, es necesario calcular el gradiente de la función de costo con respecto a cada parámetro del modelo $\theta_j$. En otras palabras, necesita calcular cuánto cambiará la función de costo si cambia $θ_j$ solo un poco. A esto se le llama derivada parcial. Es como preguntar "¿Cuál es la pendiente de la montaña bajo mis pies si miro hacia el este?" y luego hacer la misma pregunta mirando al norte (y así sucesivamente para todas las demás dimensiones, si puede imaginar un universo con más de tres dimensiones). La siguiente ecuación calcula la derivada parcial de la función de costo con respecto al parámetro $θ_j$, anotado $\frac{\partial}{\partial \theta_j}$ MSE(θ). Está es la ecuación de las derivadas parciales de la función de costo![6](https://user-images.githubusercontent.com/63415652/104643870-aafe8700-5672-11eb-9a4a-675bf3894df2.PNG) En lugar de calcular estas derivadas parciales individualmente, puede usar la siguiente ecuación para calcularlas todas de una vez. El vector de gradiente, denominado $∇_θ$ MSE (θ), contiene todas las derivadas parciales de la función de costo (una para cada parámetro del modelo).Ecuación del vector de gradiente de la función de costo: ![7](https://user-images.githubusercontent.com/63415652/104644345-47c12480-5673-11eb-831a-484352dd9531.PNG) >Tenga en cuenta que esta fórmula implica cálculos sobre el conjunto de entrenamiento completo X, en cada paso de Gradient Descent. Esta es la razón por la que el algoritmo se llama Descenso de gradiente por lotes: utiliza el lote completo de datos de entrenamiento en cada paso (en realidad, Descenso de gradiente completo probablemente sería un mejor nombre). Como resultado, es terriblemente lento en conjuntos de entrenamiento muy grandes (pero veremos algoritmos de Gradient Descent mucho más rápidos en breve). Sin embargo, Gradient Descent se adapta bien al número de características; El entrenamiento de un modelo de regresión lineal cuando hay cientos de miles de entidades es mucho más rápido usando Gradient Descent que usando la ecuación normal o la descomposición de SVD. Una vez que tenga el vector de gradiente, que apunta hacia arriba, simplemente vaya en la dirección opuesta para ir cuesta abajo. Esto significa restar $∇_θ$MSE (**θ**) de **θ**. Aquí es donde entra en juego la tasa de aprendizaje η: multiplique el vector de gradiente por η para determinar el tamaño del paso cuesta abajo: Ecuación del paso del Decenso del gradiente: ![8](https://user-images.githubusercontent.com/63415652/104644993-201e8c00-5674-11eb-86c4-c78012d305af.PNG)Veamos una implementación rápida de este algoritmo:eta = 0.1 # tasa de aprendizaje n_iterations = 1000 m = 100 theta = np.random.randn(2,1) # inicialización aleatoria for iteration in range(n_iterations): gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradientsVeamos la theta resultante:theta X_new_b.dot(theta)Es exactamente lo que encontró la ecuación normal. Gradient Descent funcionó a la perfección. Pero, ¿y si hubiera utilizado una eta de tasa de aprendizaje diferente? La grafica siguiente muestra los primeros 10 pasos de Gradient Descent utilizando tres tasas de aprendizaje diferentes (la línea discontinua representa el punto de partida).Ahora verémos el Gradient Descent con varias tasas de aprendizaje:theta_path_bgd = [] def plot_gradient_descent(theta, eta, theta_path=None): m = len(X_b) plt.plot(X, y, "b.") n_iterations = 1000 for iteration in range(n_iterations): if iteration < 10: y_predict = X_new_b.dot(theta) style = "b-" if iteration > 0 else "r--" plt.plot(X_new, y_predict, style) gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients if theta_path is not None: theta_path.append(theta) plt.xlabel("$x_1$", fontsize=18) plt.axis([0, 2, 0, 15]) plt.title(r"$\eta = {}$".format(eta), fontsize=16) np.random.seed(42) theta = np.random.randn(2,1) # random initialization plt.figure(figsize=(10,4)) plt.subplot(131); plot_gradient_descent(theta, eta=0.02) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd) plt.subplot(133); plot_gradient_descent(theta, eta=0.5) save_fig("gradient_descent_plot") plt.show()Saving figure gradient_descent_plotA la izquierda, la tasa de aprendizaje es demasiado baja: el algoritmo finalmente alcanzará la solución, pero llevará mucho tiempo. En el medio, la tasa de aprendizaje parece bastante buena: en solo unas pocas iteraciones, ya ha convergido a la solución. A la derecha, la tasa de aprendizaje es demasiado alta: el algoritmo diverge, salta por todos lados y, de hecho, se aleja cada vez más de la solución en cada paso. Para encontrar una buena tasa de aprendizaje, puede utilizar la búsqueda en cuadrícula. Sin embargo, es posible que desee limitar el número de iteraciones para que la búsqueda de cuadrícula pueda eliminar los modelos que tardan demasiado en converger. Quizás se pregunte cómo establecer el número de iteraciones. Si es demasiado bajo, aún estará lejos de la solución óptima cuando el algoritmo se detenga, pero si es demasiado alto, perderá tiempo mientras los parámetros del modelo ya no cambian. Una solución simple es establecer una gran cantidad de iteraciones pero interrumpir el algoritmo cuando el vector de gradiente se vuelve pequeño, es decir, cuando su norma se vuelve más pequeña que un número diminuto ϵ (llamado tolerancia), porque esto sucede cuando Gradient El descenso ha llegado (casi) al mínimo. Tasa de convergenciaCuando la función de costo es convexa y su pendiente no cambia abruptamente (como es el caso de la función de costo MSE), Batch Gradient Descent con una tasa de aprendizaje fija eventualmente convergerá a la solución óptima, pero es posible que tenga que esperar un poco: puede tomar O (1/ϵ) iteraciones para alcanzar el óptimo dentro de un rango de ϵ dependiendo de la forma de la función de costo. Si divide la tolerancia por 10 para tener una solución más precisa, es posible que el algoritmo deba ejecutarse unas 10 veces más. Stochastic Gradient DescentEl principal problema con Batch Gradient Descent es el hecho de que utiliza todo el conjunto de entrenamiento para calcular los gradientes en cada paso, lo que lo hace muy lento cuando el conjunto de entrenamiento es grande. En el extremo opuesto, Stochastic Gradient Descent solo elige una instancia aleatoria en el conjunto de entrenamiento en cada paso y calcula los gradientes basándose solo en esa única instancia. Obviamente, esto hace que el algoritmo sea mucho más rápido, ya que tiene muy pocos datos para manipular en cada iteración. También hace posible entrenar en grandes conjuntos de entrenamiento, ya que solo una instancia necesita estar en la memoria en cada iteración (SGD se puede implementar como un algoritmo fuera del núcleo.)Por otro lado, debido a su naturaleza estocástica (es decir, aleatoria), este algoritmo es mucho menos regular que Batch Gradient Descent: en lugar de disminuir suavemente hasta alcanzar el mínimo, la función de costo rebotará hacia arriba y hacia abajo, disminuyendo solo en promedio. Con el tiempo, terminará muy cerca del mínimo, pero una vez que llegue allí, continuará rebotando y nunca se asentará. Entonces, una vez que se detiene el algoritmo, los valores finales de los parámetros son buenos, pero no óptimos.Stochastic Gradient Descent![9](https://user-images.githubusercontent.com/63415652/104658893-54e90e00-5689-11eb-9976-c23bff509ab7.PNG) Cuando la función de costo es muy irregular, esto puede ayudar al algoritmo a saltar de los mínimos locales, por lo que el Descenso de gradiente estocástico tiene más posibilidades de encontrar el mínimo global que el Descenso de gradiente por lotes.Por lo tanto, la aleatoriedad es buena para escapar de los óptimos locales, pero mala porque significa que el algoritmo nunca puede establecerse en el mínimo. Una solución a este dilema es reducir gradualmente la tasa de aprendizaje. Los pasos comienzan siendo grandes (lo que ayuda a avanzar rápidamente y escapar de los mínimos locales), luego se vuelven cada vez más pequeños, lo que permite que el algoritmo se establezca en el mínimo global. Este proceso es similar al recocido simulado, un algoritmo inspirado en el proceso de recocido en metalurgia donde el metal fundido se enfría lentamente. La función que determina la tasa de aprendizaje en cada iteración se llama programa de aprendizaje. Si la tasa de aprendizaje se reduce demasiado rápido, puede quedarse atascado en un mínimo local o incluso terminar congelado a la mitad del mínimo. Si la tasa de aprendizaje se reduce demasiado lentamente, puede saltar alrededor del mínimo durante mucho tiempo y terminar con una solución subóptima si detiene el entrenamiento demasiado pronto. Este código implementa el descenso de gradiente estocástico usando un programa de aprendizaje simple:theta_path_sgd = [] m = len(X_b) np.random.seed(42)Por convención iteramos por rondas de m iteraciones; cada ronda se llama época. Mientras que el código Batch Gradient Descent se repitió 1000 veces a través de todo el conjunto de entrenamiento, este código pasa por el conjunto de entrenamiento solo 50 veces y alcanza una solución bastante buena:thetaAquí se muestra los primeros 20 pasos del entrenamiento (observe cuál irregulares son los pasos).n_epochs = 50 t0, t1 = 5, 50 # learning schedule hyperparameters def learning_schedule(t): return t0 / (t + t1) theta = np.random.randn(2,1) # random initialization for epoch in range(n_epochs): for i in range(m): if epoch == 0 and i < 20: # not shown in the book y_predict = X_new_b.dot(theta) # not shown style = "b-" if i > 0 else "r--" # not shown plt.plot(X_new, y_predict, style) # not shown random_index = np.random.randint(m) xi = X_b[random_index:random_index+1] yi = y[random_index:random_index+1] gradients = 2 * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(epoch * m + i) theta = theta - eta * gradients theta_path_sgd.append(theta) # not shown plt.plot(X, y, "b.") # not shown plt.xlabel("$x_1$", fontsize=18) # not shown plt.ylabel("$y$", rotation=0, fontsize=18) # not shown plt.axis([0, 2, 0, 15]) # not shown save_fig("sgd_plot") # not shown plt.show() # not shownSaving figure sgd_plotTenga en cuenta que, dado que las instancias se seleccionan al azar, algunas instancias pueden elegirse varias veces por época, mientras que otras pueden no seleccionarse en absoluto. Si quieres estar seguro de que el algoritmo pasa por cada instancia en cada época, otro enfoque es mezclar el conjunto de entrenamiento (asegurándote de mezclar las características de entrada y las etiquetas de manera conjunta), luego revisarlo instancia por instancia, luego mezclarlo de nuevo, y así sucesivamente. Sin embargo, esto generalmente converge más lentamente.>Cuando se usa el descenso de gradiente estocástico, las instancias de entrenamiento deben ser independientes y distribuidas de manera idéntica (IID), para garantizar que los parámetros se acerquen al óptimo global, en promedio. Una forma sencilla de garantizar esto es barajar las instancias durante el entrenamiento (por ejemplo, elegir cada instancia al azar o mezclar el conjunto de entrenamiento al comienzo de cada época). Si no hace esto, por ejemplo, si las instancias están ordenadas por etiqueta, entonces SGD comenzará optimizando para una etiqueta, luego la siguiente, y así sucesivamente, y no se acercará al mínimo global.Para realizar una regresión lineal usando SGD con Scikit-Learn, puede usar la clase SGDRegressor, que por defecto optimiza la función de costo de error al cuadrado. El siguiente código se ejecuta durante un máximo de 1000 épocas (max_iter = 1000) o hasta que la pérdida disminuya en menos de 1e-3 durante una época (tol = 1e-3), comenzando con una tasa de aprendizaje de 0.1 (eta0 = 0.1), usando el horario de aprendizaje predeterminado (diferente del anterior), y no usa ninguna regularización (penalización = Ninguna; más detalles sobre esto en breve):from sklearn.linear_model import SGDRegressor sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1, random_state=42) sgd_reg.fit(X, y.ravel())Una vez más, encuentra una solución bastante cercana a la devuelta por la Ecuación Normal:sgd_reg.intercept_, sgd_reg.coef_Descenso de gradiente de mini lotesEl último algoritmo de Gradient Descent que veremos se llama Gradient Descent por mini lotes. Es bastante simple de entender una vez que se conoce el descenso de gradientes por lotes y estocástico: en cada paso, en lugar de calcular los gradientes basados en el conjunto de entrenamiento completo (como en GD por lotes) o basados en una sola instancia (como en el estocástico GD), Minibatch GD calcula los gradientes en pequeños conjuntos aleatorios de instancias llamados minibatches. La principal ventaja de Mini-batch GD sobre Stochastic GD es que puede obtener un aumento del rendimiento de la optimización del hardware de las operaciones matriciales, especialmente cuando se utilizan GPU.El progreso del algoritmo en el espacio de parámetros es menos errático que con SGD, especialmente con mini lotes bastante grandes. Como resultado, Mini-batch GD terminará caminando un poco más cerca del mínimo que SGD. Pero, por otro lado, puede resultarle más difícil escapar de los mínimos locales (en el caso de problemas que sufren de mínimos locales, a diferencia de la Regresión lineal como vimos anteriormente). El siguiente grafico muestra las rutas tomadas por los tres algoritmos de descenso de gradiente en el espacio de parámetros durante el entrenamiento. Todos terminan cerca del mínimo, pero el camino de Batch GD en realidad se detiene en el mínimo, mientras que tanto Stochastic GD como Mini-batch GD continúan caminando. Sin embargo, no olvide que la GD por lotes requiere mucho tiempo para dar cada paso, y la GD estocástica y la GD por mini lotes también alcanzarían el mínimo si utilizara un buen programa de aprendizaje. Rutas de descenso de gradientes en el espacio de parámetrostheta_path_mgd = [] n_iterations = 50 minibatch_size = 20 np.random.seed(42) theta = np.random.randn(2,1) # random initialization t0, t1 = 200, 1000 def learning_schedule(t): return t0 / (t + t1) t = 0 for epoch in range(n_iterations): shuffled_indices = np.random.permutation(m) X_b_shuffled = X_b[shuffled_indices] y_shuffled = y[shuffled_indices] for i in range(0, m, minibatch_size): t += 1 xi = X_b_shuffled[i:i+minibatch_size] yi = y_shuffled[i:i+minibatch_size] gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(t) theta = theta - eta * gradients theta_path_mgd.append(theta) theta theta_path_bgd = np.array(theta_path_bgd) theta_path_sgd = np.array(theta_path_sgd) theta_path_mgd = np.array(theta_path_mgd) plt.figure(figsize=(7,4)) plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic") plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch") plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch") plt.legend(loc="upper left", fontsize=16) plt.xlabel(r"$\theta_0$", fontsize=20) plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0) plt.axis([2.5, 4.5, 2.3, 3.9]) save_fig("gradient_descent_paths_plot") plt.show()Saving figure gradient_descent_paths_plotPYTHON Pandas Temel FonksiyonlarŞimdiye kadar, üç pandas veri yapısı ve bunları nasıl oluşturulacağını öğrendik. Gerçek zamanlı veri işlemedeki önemi nedeniyle şu an dataframe nesnelerine odaklanacağız ve diğer birkaç veri yapısını değineceğiz. Serilerin Temel Fonksiyonları- axes : Satır ekseni etiketlerinin listesini döndürür.- dtype : dtype nesnesi döndürür.- empty : Seri boş işe false döndürür.- ndim : Temel alınan verilerin boyutlarının sayısını tanım 1 olarak döndürür.- size : Temel veri öğeleri sayısını döndürür.- values : Seriyi ndarray olarak döndürür.- head : İlk n satırlarını döndürür.- tail : Son n satırlarını döndürür.Şimdi yukarıdaki yapıları örnekler ile pekiştirelim.**Örnek 1**import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s)0 1.373370 1 -0.960120 2 0.143523 3 0.553266 dtype: float64**Örnek 2 (axes)** Satır ekseni etiketlerinin listesini döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.axes) # start: başlangıç # end : son # step : artış sayısı[RangeIndex(start=0, stop=4, step=1)]**Örnek 3(empty)**dtype nesnesi döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.empty) # boş ise false # boş değilse trueFalse**Örnek 4(ndim)** Temel alınan verilerin boyutlarının sayısını tanım 1 olarak döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.ndim)1**Örnek 5(size)**Temel veri öğeleri sayısını döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.size)4**Örnek 6(values)** Seriyi ndarray olarak döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.values)[0.22313449 0.40836713 1.72190058 0.84874151]**Örnek 7(head)**İlk n satırlarını döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.head(2))0 0.405320 1 2.096544 dtype: float64**Örnek 8(tail)**Son n satırlarını döndürür.import pandas as pd import numpy as np s = pd.Series(np.random.randn(4)) print(s.tail(2))2 1.790909 3 -1.439368 dtype: float64PYTHON Pandas Temel FonksiyonlarŞimdi DataFrame temel işlevselliğinin ne olduğunu anladık. Aşağıdaki tablolarda, önemli öznitelikleri veya DataFrame temel işlevlerinde yardımcı olan yöntemleri listeli. Serilerin Temel Fonksiyonları- T : Satırları ve sütunları Transposesi.- axes : satır ekseni etiketleri ve sütun ekseni etiketleri içeren bir liste döndürür.- dtype : dtype nesnesi döndürür.- empty : DataFrame boş işe false döndürür.- ndim : Eksen sayısı / dizi boyutu.- shape : DataFrame boyutunu temsil eden bir tanımlama grubu döndürür.- size : Ndframe öğe sayısını döndürür.- values : Ndframe'i numpy olarak döndürür.- head : İlk n satırlarını döndürür.- tail : Son n satırlarını döndürür.Şimdi aynı şekilde yukardaki yapılara örnekler vererek konuyu pekiştirelim. **Örnek 1**import pandas as pd import numpy as np # Series yardımıyla bir dict oluşturalım data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df**Örnek 1(T)** Satırları ve sütunları Transposesi.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df.T**Örnek 2(axes)** satır ekseni etiketleri ve sütun ekseni etiketleri içeren bir liste döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) print(df.axes) # index: satır değerleri # dtype: veri türü[RangeIndex(start=0, stop=6, step=1), Index(['Ad', 'Yaş', 'Meslek'], dtype='object')]**Örnek 3(dtypes)** dtype nesnesi döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df.dtypes**Örnek 4(empty)** DataFrame boş işe false döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) print(df.empty) # boş mu? # hayır -> false # evet -> trueFalse**Örnek 5(ndim)** Eksen sayısı / dizi boyutu.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) print(df.ndim)2**Örnek 6(shape)** DataFrame boyutunu temsil eden bir tanımlama grubu döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) print(df.shape) # (satır sayısı, sütun sayısı)(6, 3)**Örnek 7(size)** Ndframe öğe sayısını döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) print(df.size)18**Örnek 8(values)** Ndframe'i numpy olarak döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df.values**Örnek 9(head)** İlk n satırlarını döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df.head(3)**Örnek 10(tail)** Son n satırlarını döndürür.import pandas as pd import numpy as np data = { 'Ad': pd.Series(['Furkan','Kemal','Osman','Zeynep','Fatma','Buse']), 'Yaş': pd.Series([21,25,30,24,28,26]), 'Meslek': pd.Series(['mühendis','doktor','polis','mimar','avukat','öğretmen']) } df = pd.DataFrame(data) df.tail(2)Fault Tree with qubovert *qubovert* must be pip installed. We will encode the Fault Tree problem shown below. The inputs are `e0`, `e1`, `e2`, and `e3`, which are boolean variables; for example, `e0 == 1` if Error 0 occurs, otherwise it is 0. The output is `p0`, which is a boolean indicating top level failure; it is 1 if a top level failure occurs, otherwise it is 0. The goal of this analysis is to find the minimum number of errors (`e0`, `e1`, `e2`, `e3`) that must be 1 in order for `p0` to be 1.![](https://files.slack.com/files-pri/T24940PQV-FFR7K200K/fault_tree.png?pub_secret=f370b437dc)The Fault Tree image above can be summarized by the following:\begin{align*}z0 &= e0 \text{ OR } e3\\z1 &= e2 \text{ OR } z0\\z2 &= e0 \text{ OR } e1\\p0 &= z1 \text{ AND } z2\end{align*}We will solve this problem in two ways:1. Solving with the `qubovert.sat` library1. Solving with `qubovert.PCBO` Solving with the `qubovert.sat` libraryLet's create the variables and use the `qubovert.sat` libary to encode `p0`.from qubovert import boolean_var from qubovert.sat import OR, AND e0, e1 = boolean_var('e0'), boolean_var('e1') e2, e3 = boolean_var('e2'), boolean_var('e3') z0 = OR(e0, e3) z1 = OR(z0, e2) z2 = OR(e0, e1) p0 = AND(z1, z2) print(p0){('e0',): 1, ('e0', 'e1', 'e3'): -1, ('e1', 'e3'): 1, ('e0', 'e1', 'e3', 'e2'): 1, ('e1', 'e3', 'e2'): -1, ('e0', 'e1', 'e2'): -1, ('e1', 'e2'): 1}We want to find the minimum number of errors that will lead to a top level error. Thus, we want to minimize `H`.H = e0 + e1 + e2 + e3 print(H){('e0',): 1, ('e1',): 1, ('e2',): 1, ('e3',): 1}We now subject `H` to the constraint that `p0 == 1`, or equivalently `1 - p0 == 0`, where we notice that `1 - p0` is bounded below by 0 and above by 1. We will add a penalty `lam` to the PCBO to enforce this constraint. For now, let's make it a symbol that we can tune later.#!pip install sympy from sympy import Symbol lam = Symbol("lam", positive=True) H.add_constraint_eq_zero(1 - p0, lam=lam, bounds=(0, 1)) print("PCBO:\n", H, "\n") print("Constraints:\n", H.constraints)PCBO: {('e0',): 1 - lam, ('e1',): 1, ('e2',): 1, ('e3',): 1, ('e0', 'e1', 'e3'): lam, ('e1', 'e3'): -lam, ('e0', 'e1', 'e3', 'e2'): -lam, ('e1', 'e3', 'e2'): lam, ('e0', 'e1', 'e2'): lam, ('e1', 'e2'): -lam, (): lam} Constraints: {'eq': [{('e0',): -1, ('e0', 'e1', 'e3'): 1, ('e1', 'e3'): -1, ('e0', 'e1', 'e3', 'e2'): -1, ('e1', 'e3', 'e2'): 1, ('e0', 'e1', 'e2'): 1, ('e1', 'e2'): -1, (): 1}]}Notice there is one equality constraint coming from the requirement that `1 == p0`. The `H.is_solution_valid` function will take in a proposed solution to the problem and ensure that this constraint is satisfied.The `'eq'` key of the constraints dictionary indicates that the quantity equals zero, and the `'lt'` key of the constraints dictionary indicates that the quantity is less than zero. Other possible keys are `'le'`, `'gt'`, and `'ge'`. See the docstrings for `PCBO.add_constraint_eq_zero`, `PCBO.add_constraint_lt_zero`, `PCBO.add_constraint_le_zero`, `PCBO.add_constraint_gt_zero`, and `PCBO.add_constraint_ge_zero` for info. For testing purposes, let's solve this bruteforce to make sure everything is working.solutions = H.solve_bruteforce(all_solutions=True) print(solutions)[{'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0}]Notice that there is one unique solution that minimizes the objective function and obeys all the constraints. If just the error `e0` occurs, then a top level failure will occur.solution = solutions[0] print("Minimum number of failures that leads to a top level failure:", H.value(solution)) print("p0 =", p0.value(solution))Minimum number of failures that leads to a top level failure: 1 p0 = 1Now let's solve this problem with a generic QUBO solver. Notice that the degree of problem is more than two, making `H` not a natural Quadratic Unconstrained Boolean Optimization Problem (QUBO).H.degreeWe can convert it to a QUBO (note that there are some options for the reduction from PUBO to QUBO, see the `H.to_qubo` method for details). Ancilla bits will need to be added, and bit labels are mapped to integers.Q = H.to_qubo() print("num PUBO variables", H.num_binary_variables) print("num QUBO variables", Q.num_binary_variables) print() print(Q)num PUBO variables 4 num QUBO variables 6 {(0,): 1 - lam, (1,): 1, (2,): 1, (3,): 1, (4,): 9*lam + 9, (1, 3): 2*lam + 3, (1, 4): -6*lam - 6, (3, 4): -6*lam - 6, (0, 4): lam, (5,): 6*lam + 6, (0, 2): 2*lam + 2, (0, 5): -4*lam - 4, (2, 5): -4*lam - 4, (4, 5): -lam, (2, 4): lam, (1, 5): lam, (1, 2): -lam, (): lam}For testing purposes, let's solve this with bruteforce to see what the proper value of $\lambda$ should be to enforce the constraints. Notice how we remap the QUBO solution to the PCBO solution with `H.convert_solution(x)`.for l in (1, 2, 3): Q_temp = Q.subs({lam: l}) solutions = Q_temp.solve_bruteforce(all_solutions=True) solutions = [H.convert_solution(x) for x in solutions] print('lam', l) for s in solutions: print("\t", s, "is", "valid" if H.is_solution_valid(s) else "invalid") print()lam 1 {'e0': 0, 'e1': 0, 'e2': 0, 'e3': 0} is invalid {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0} is valid lam 2 {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0} is valid lam 3 {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0} is validWe see that $\lambda = 2$ is sufficient to enforce the constraints. So let's update our QUBO.Q_good = Q.subs({lam: 2})Now let's solve the QUBO with D'Wave's simulated annealer.#!pip install dwave-neal from neal import SimulatedAnnealingSampler sampler = SimulatedAnnealingSampler()Note that their software package takes in a specific form for QUBOs, namely, the keys of the dictionary must be two element tuples. This form can be accessed from `Q` and `Q_good` with `Q.Q` or `Q_good.Q`.qubo_sample = sampler.sample_qubo(Q_good.Q, num_reads=100) print("objective function:", qubo_sample.first.energy + Q_good.offset, "\n") qubo_solution = qubo_sample.first.sample print("qubo solution:", qubo_solution, "\n") solution = H.convert_solution(qubo_solution) print("pcbo solution:", solution) print("objective function:", H.value(solution), "\n") print("The solution is", "valid" if H.is_solution_valid(solution) else "invalid")objective function: 1.0 qubo solution: {0: 1, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0} pcbo solution: {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0} objective function: 1 The solution is validThis matches the result of `H.solve_bruteforce()`. Recall that the objective function is equal to the minimum number of failures that will lead to a top level failure.Now we'll solve an QUSO formulation of our problem. Again we'll take $\lambda = 2$.L = H.to_quso().subs({lam: 2}) # note that we cannot do H.subs({lam: 2}).to_quso()!! This is because H.subs({lam: 2}) # creates a new PCBO object, and it's mapping from variables labels to integers may be # different than H's mapping. For example, try H.mapping == H.subs({lam: 2}).mapping a # few times. They will often be different. print("num PUBO variables", H.num_binary_variables) print("num QUSO variables", L.num_binary_variables) print() print(L)num PUBO variables 4 num QUSO variables 6 {(0,): 1.5, (): 14.25, (1,): 2.25, (2,): 1.0, (3,): 2.25, (4,): -5.0, (1, 3): 1.75, (1, 4): -4.5, (3, 4): -4.5, (0, 4): 0.5, (5,): -3.0, (0, 2): 1.5, (0, 5): -3.0, (2, 5): -3.0, (4, 5): -0.5, (2, 4): 0.5, (1, 5): 0.5, (1, 2): -0.5}Similar to their QUBO solver, D'Wave's QUSO solver accepts a specific form for QUSO models, namely a linear term dictionary and a quadratic term dictionary. These can be accessed with `L.h` and `L.J`.quso_sample = sampler.sample_ising(L.h, L.J, num_reads=100) print("objective function:", quso_sample.first.energy + L.offset, "\n") quso_solution = quso_sample.first.sample print("quso solution:", quso_solution, "\n") solution = H.convert_solution(quso_solution) print("pcbo solution:", solution) print("objective function:", H.value(solution), "\n") print("The solution is", "valid" if H.is_solution_valid(solution) else "invalid")objective function: 1.0 quso solution: {0: -1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1} pcbo solution: {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0} objective function: 1 The solution is validAgain this matches the result of `H.solve_bruteforce()`. Solving with `qubovert.PCBO`We want to find the minimum number of errors that will lead to a top level error. Thus, we want to minimize `H`. We could make `H` the same way that we did in the previous section, by creating variables with ``qubovert.boolean_var``, but instead for illistration we will make it as a dictionary instead.from qubovert import PCBO H = PCBO( {(x,): 1 for x in ('e0', 'e1', 'e2', 'e3')} ) print(H){('e0',): 1, ('e1',): 1, ('e2',): 1, ('e3',): 1}Now let's enforce the constraints. We will need to enforce the constraints with a penalty factor $\lambda$. Let's create a symbol here that we can tune later.#!pip install sympy from sympy import Symbol lam = Symbol("lam", positive=True)Now let's enforce the constraints (reproduced here for reference):\begin{align*}z0 &= e0 \text{ OR } e3\\z1 &= e2 \text{ OR } z0\\z2 &= e0 \text{ OR } e1\end{align*}H.add_constraint_eq_OR( 'z0', 'e0', 'e3', lam=lam ).add_constraint_eq_OR( 'z1', 'e2', 'z0', lam=lam ).add_constraint_eq_OR( 'z2', 'e0', 'e1', lam=lam ) print(H){('e0',): 2*lam + 1, ('e1',): lam + 1, ('e2',): lam + 1, ('e3',): lam + 1, ('z0',): 2*lam, ('e0', 'e3'): lam, ('e0', 'z0'): -2*lam, ('z0', 'e3'): -2*lam, ('z1',): lam, ('z0', 'e2'): lam, ('e2', 'z1'): -2*lam, ('z0', 'z1'): -2*lam, ('z2',): lam, ('e0', 'e1'): lam, ('e0', 'z2'): -2*lam, ('e1', 'z2'): -2*lam}Finally, we want to make $p0 = z1 \text{ AND } z2$ energetically favorable. We can do this with the following.H.add_constraint_AND('z1', 'z2', lam=lam) print("PCBO:\n", H, "\n") print("Constraints:\n", H.constraints)PCBO: {('e0',): 2*lam + 1, ('e1',): lam + 1, ('e2',): lam + 1, ('e3',): lam + 1, ('z0',): 2*lam, ('e0', 'e3'): lam, ('e0', 'z0'): -2*lam, ('z0', 'e3'): -2*lam, ('z1',): lam, ('z0', 'e2'): lam, ('e2', 'z1'): -2*lam, ('z0', 'z1'): -2*lam, ('z2',): lam, ('e0', 'e1'): lam, ('e0', 'z2'): -2*lam, ('e1', 'z2'): -2*lam, ('z2', 'z1'): -lam, (): lam} Constraints: {'eq': [{('z0',): 1, ('e0',): 1, ('e3',): 1, ('e0', 'e3'): 1, ('e0', 'z0'): -2, ('z0', 'e3'): -2}, {('z1',): 1, ('e2',): 1, ('z0',): 1, ('z0', 'e2'): 1, ('e2', 'z1'): -2, ('z0', 'z1'): -2}, {('z2',): 1, ('e0',): 1, ('e1',): 1, ('e0', 'e1'): 1, ('e0', 'z2'): -2, ('e1', 'z2'): -2}, {('z2', 'z1'): -1, (): 1}]}The `H.is_solution_valid` function will take in a proposed solution to the problem and ensure that these constraints are satisfied. For testing purposes, let's solve this bruteforce to make sure everything is working.solutions = H.solve_bruteforce(all_solutions=True) print(solutions)[{'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1}]Notice that there is one unique solution that minimizes the objective function and obeys all the constraints. If just the error `e0` occurs, then a top level failure will occur.solution = solutions[0] print("Minimum number of failures that leads to a top level failure:", H.value(solution)) print("p0 =", H.value(solution))Minimum number of failures that leads to a top level failure: 1 p0 = 1Now let's solve this problem with a generic QUBO solver. Notice that the degree of problem is two, making `H` a natural Quadratic Unconstrained Boolean Optimization Problem (QUBO).H.degree Q = H.to_qubo() print(Q){(0,): 2*lam + 1, (1,): lam + 1, (2,): lam + 1, (3,): lam + 1, (4,): 2*lam, (0, 3): lam, (0, 4): -2*lam, (3, 4): -2*lam, (5,): lam, (2, 4): lam, (2, 5): -2*lam, (4, 5): -2*lam, (6,): lam, (0, 1): lam, (0, 6): -2*lam, (1, 6): -2*lam, (5, 6): -lam, (): lam}For testing purposes, let's solve this with bruteforce to see what the proper value of $\lambda$ should be to enforce the constraints. Notice how we remap the QUBO solution to the PCBO solution with `H.convert_solution(x)`.for l in (1, 2, 3): Q_temp = Q.subs({lam: l}) solutions = Q_temp.solve_bruteforce(all_solutions=True) solutions = [H.convert_solution(x) for x in solutions] print('lam', l) for s in solutions: print("\t", s, "is", "valid" if H.is_solution_valid(s) else "invalid") print()lam 1 {'e0': 0, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 0, 'z1': 0, 'z2': 0} is invalid {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1} is valid lam 2 {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1} is valid lam 3 {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1} is validWe see that $\lambda = 2$ is sufficient to enforce the constraints. So let's update our QUBO.Q_good = Q.subs({lam: 2})Now let's solve the QUBO with D'Wave's simulated annealer.#!pip install dwave-neal from neal import SimulatedAnnealingSampler sampler = SimulatedAnnealingSampler()Note that their software package takes in a specific form for QUBOs, namely, the keys of the dictionary must be two element tuples. This form can be accessed from `Q` and `Q_good` with `Q.Q` or `Q_good.Q`.qubo_sample = sampler.sample_qubo(Q_good.Q, num_reads=100) print("objective function:", qubo_sample.first.energy + Q_good.offset, "\n") qubo_solution = qubo_sample.first.sample print("qubo solution:", qubo_solution, "\n") solution = H.convert_solution(qubo_solution) print("pcbo solution:", solution) print("objective function:", H.value(solution), "\n") print("The solution is", "valid" if H.is_solution_valid(solution) else "invalid")objective function: 1.0 qubo solution: {0: 1, 1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1} pcbo solution: {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1} objective function: 1 The solution is validThis matches the result of `H.solve_bruteforce()`. Recall that the objective function is equal to the minimum number of failures that will lead to a top level failure.Now we'll solve an QUSO formulation of our problem. Again we'll take $\lambda = 2$.L = H.to_quso().subs({lam: 2}) # note that we cannot do H.subs({lam: 2}).to_quso()!! This is because H.subs({lam: 2}) # creates a new PCBO object, and it's mapping from variables labels to integers may be # different than H's mapping. For example, try H.mapping == H.subs({lam: 2}).mapping a # few times. They will often be different. print("num QUSO variables", L.num_binary_variables) print() print(L)num QUSO variables 7 {(0,): -1.5, (): 8.0, (1,): -1.0, (2,): -1.0, (3,): -1.0, (0, 3): 0.5, (0, 4): -1.0, (3, 4): -1.0, (2, 4): 0.5, (4,): 0.5, (2, 5): -1.0, (4, 5): -1.0, (5,): 1.5, (0, 1): 0.5, (0, 6): -1.0, (1, 6): -1.0, (6,): 1.5, (5, 6): -0.5}Similar to their QUBO solver, D'Wave's QUSO solver accepts a specific form for QUSO models, namely a linear term dictionary and a quadratic term dictionary. These can be accessed with `L.h` and `L.J`.quso_sample = sampler.sample_ising(L.h, L.J, num_reads=100) print("objective function:", quso_sample.first.energy + L.offset, "\n") quso_solution = quso_sample.first.sample print("quso solution:", quso_solution, "\n") solution = H.convert_solution(quso_solution) print("pcbo solution:", solution) print("objective function:", H.value(solution), "\n") print("The solution is", "valid" if H.is_solution_valid(solution) else "invalid")objective function: 1.0 quso solution: {0: -1, 1: 1, 2: 1, 3: 1, 4: -1, 5: -1, 6: -1} pcbo solution: {'e0': 1, 'e1': 0, 'e2': 0, 'e3': 0, 'z0': 1, 'z1': 1, 'z2': 1} objective function: 1 The solution is valid*IPCC SR15 scenario assessment* Analysis of carbon capture and sequestration (CCS)This notebook computes indicators and diagnostics of the deployment of CCS by fueland the total amount of CO2 stored as shown in **Figure 2.17**in the IPCC's _"Special Report on Global Warming of 1.5°C"_.The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). Load `pyam` package and other dependenciesimport pandas as pd import numpy as np import io import itertools import yaml import math import matplotlib.pyplot as plt %matplotlib inline import pyam from utils import boxplot_by_catImport scenario data, categorization and specifications filesThe metadata file with scenario categorisation and quantitative indicators can be downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). Alternatively, it can be re-created using the notebook `sr15_2.0_categories_indicators`.The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r2.0.xlsx') sr1p5.load_meta('sr15_metadata_indicators.xlsx') with open("sr15_specs.yaml", 'r') as stream: specs = yaml.load(stream, Loader=yaml.FullLoader) rc = pyam.run_control() for item in specs.pop('run_control').items(): rc.update({item[0]: item[1]}) cats = specs.pop('cats') all_cats = specs.pop('all_cats') subcats = specs.pop('subcats') all_subcats = specs.pop('all_subcats') plotting_args = specs.pop('plotting_args') marker= specs.pop('marker')Downselect scenario ensemble to categories of interest for this assessmentUse all years (as of 2020) to correctly compute the total amount of CO2 stored in each scenario.years = range(2020, 2101, 5) cats.remove('Above 2C') df = sr1p5.filter(category=cats, year=years)Set specifications for filter and plotting and initialize a data listplot_ylabel = '{} with CCS (EJ)' save_name = 'output/fig2.17{}.{}' figure_format = 'png' filter_args = dict(df=sr1p5, category=cats, marker=None, join_meta=True) def plotting_args(name, panel_label=None, filetype=figure_format): return {'categories': cats, 'column': 'category', 'years': range(2020, 2101, 10), 'add_marker': marker, 'ylabel': plot_ylabel.format(name), 'save': save_name.format(name if panel_label is None else '{}_{}'.format(panel_label, name), filetype)} data = []Add IEA's 'Faster Transition Scenario' to the set of marker scenarios for comparisonm = 'IEA WEM' col = 'marker' sr1p5.set_meta(m, col, sr1p5.filter(model='IEA World Energy Model 2017', scenario='Faster Transition Scenario')) rc.update({'marker': {col: {m: 'o'}}, 'c': {col: {m: 'red'}}, 'edgecolors': {col: {m: 'black'}}} ) marker += [m]Extract CCS timeseries data by fuelccs_bio = ( pyam.filter_by_meta( df.filter(variable='Primary Energy|Biomass|Modern|w/ CCS') .timeseries(), **filter_args) ) name = 'bioenergy' fig = boxplot_by_cat(ccs_bio, **plotting_args(name, 'a')) data.append(('Bioenergy with CCS', ccs_bio)) ccs_coal = ( pyam.filter_by_meta( df.filter(variable='Primary Energy|Coal|w/ CCS') .timeseries(), **filter_args) ) name = 'coal' boxplot_by_cat(ccs_coal, **plotting_args(name, 'b'), legend=False) data.append(('Coal with CCS', ccs_coal)) ccs_gas = ( pyam.filter_by_meta( df.filter(variable='Primary Energy|Gas|w/ CCS') .timeseries(), **filter_args) ) name = 'gas' boxplot_by_cat(ccs_gas, **plotting_args(name, 'c'), legend=False) data.append(('Gas with CCS', ccs_gas))Compute cumulative CO2 storedccs = ( df.filter(variable='Carbon Sequestration|CCS') .convert_unit('Mt CO2/yr', 'Gt CO2/yr') .timeseries() ) ccs.index = ccs.index.droplevel([2, 3, 4]) cum_ccs = pd.DataFrame() for i in range(2030, 2100, 10): cum_ccs[i] = ccs.apply(pyam.cumulative, raw=False, axis=1, first_year=2020, last_year=i) cum_ccs = pyam.filter_by_meta(cum_ccs, **filter_args) boxplot_by_cat( cum_ccs, categories=cats, column='category', years=range(2030, 2100, 10), ylabel='cumulative CO2 stored (Gt)', save=save_name.format('d_cumulative_ccs', figure_format), ymax=2050, add_marker=marker, legend=False) data.append(('Cumulative CCS', cum_ccs))Export timeseries data to `xlsx`writer = pd.ExcelWriter('output/fig2.17_data_table.xlsx') for (name, _df) in data: pyam.utils.write_sheet(writer, name, _df, index=True) writer.save()Import Modulesimport pandas as pd import numpy as np import spacy import string from scipy import stats from matplotlib import pyplot as plt from progress.bar import BarData Preparation# filepath = '../data/mytrain.csv' filepath = 'toy_set.csv' # a small set of 2000 questions for testing df_data = pd.read_csv(filepath) nlp = spacy.load('en_core_web_sm') PUNCT_DICT = {'all_punctuation': string.punctuation, 'commas': ',', \ 'periods': '.', 'quotation_marks': '\'\"', 'question_marks': '?', \ 'exclamation_marks': '!', 'other_punctuations': [s for s in string.punctuation if s not in ',.\'\"?!']} POS_LIST = ['ADJ', 'ADV', 'INTJ', 'NOUN', 'PROPN', 'VERB', 'ADP', 'AUX', \ 'CCONJ', 'DET', 'NUM', 'PART', 'PRON', 'SCONJ', 'SYM', 'X'] # Reference: https://universaldependencies.org/u/pos/ ENT_LIST = ['PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', \ 'WORK_OF_ART', 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'] #reference: https://spacy.io/api/annotation#section-named-entitiesBuild dictionaries for statistical information for one sentence- Currently this covers 3 aspects: punctuation, pos, and entities- The statistical information are all counts of occurrances - **Might need more statistics?**- The lists of pos and entity types are cited below- Note that spacy provides a PUNCT tag in the pos-tagger, but here we define finer-grained categorizationdef build_count_dict(sentence): """ Return count dictionaries for sentence mapping from labels to the count of words that satisfies a corresponding condition: 1 - char in char_type (punctuation) 2 - taken.pos_ == pos (part of speech) 3 - ent.label_ == ent (named entites) """ punc = {key: 0 for key in PUNCT_DICT.keys()} pos = {pos: 0 for pos in POS_LIST} ent = {ent: 0 for ent in ENT_LIST} doc = nlp(sentence) ents = doc.ents for word in sentence: for key, value in PUNCT_DICT.items(): if word in value: punc[key] += 1 for token in doc: if token.pos_ in POS_LIST: pos[token.pos_] += 1 for e in ents: if e.label_ in ENT_LIST: ent[e.label_] += 1 return punc, pos, entCollect statistical data for all sentencesdef data_collection(dataframe): """ Return statistical data of sentences with label, which is 0 for negative and 1 for positive. """ sentences = dataframe['question_text'].values # punctuations punc = dict((key, []) for key in PUNCT_DICT.keys()) # punc_count = dict((key, 0) for key in PUNCT_DICT.keys()) pos = dict((pos, []) for pos in POS_LIST) # pos_count = dict((pos, 0) for pos in POS_LIST) ent = dict((ent, []) for ent in ENT_LIST) data_container = [punc, pos, ent] bar = Bar("Collecting data over sentences", max=len(sentences)) for s in sentences: # punctuations punc_dict, pos_dict, ent_dict = build_count_dict(s) data = [punc_dict, pos_dict, ent_dict] for i in range(len(data)): for key, value in data[i].items(): data_container[i][key].append(value) bar.next() bar.finish() for container in data_container: for key, value in container.items(): dataframe[key] = pd.Series(value, index=dataframe.index)Two Sample KS TestingThe purpose is to extract features of which the distributions in positive and negative datasets are significantly different. P-value threshold: 0.01 (standard for two-tailed test)def ks_test(set1, set2, theme): """ Conduct KS test to compare set1 and set2. Print the results and return True iff set1 and set2 are significantly different at 0.001 level. Theme is a text label for the comparison. """ ks_test_score, ks_p_value = stats.ks_2samp(set1, set2) print("===== KS test for {} =====".format(theme)) print("KS statistic: {}\np-value: {}".format(ks_test_score, ks_p_value)) # Since it is a two-tailed test, the difference is considered significant # when p value is smaller thatn 0.01 if ks_p_value < 0.01: print("The two distributions are significantly different. ") return True return FalseExecuting the methods: main() Getting raw data from data_collection functiondata_collection(df_data) df_positive, df_negative = df_data[df_data['target']==1], df_data[df_data['target'] == 0]Containers for punctuation marks/PoS/entities countsfeatures = {'punctuation':[PUNCT_DICT.keys(), []], 'pos_tag':[POS_LIST, []], 'ent':[ENT_LIST, []]}Getting statistical infofor key, value in features.items(): for label in value[0]: if ks_test(df_positive[label].values, df_negative[label].values, label): value[1].append(label) df = df_data[value[1]] df['target'] = df_data['target'] # filename = '{}.csv'.format(key) # df.to_csv(filename, index=0)print test resultsfor key, value in features.items(): print('{} test results: {}'.format(key, value[1])) ===== KS test for all_punctuation ===== KS statistic: 0.20649401214523827 p-value: 0.0 The two distributions are significantly different. ===== KS test for commas ===== KS statistic: 0.14935060881751216 p-value: 0.0 The two distributions are significantly different. ===== KS test for periods ===== KS statistic: 0.060830283120621775 p-value: 1.1036355412102202e-195 The two distributions are significantly different. ===== KS test for quotation_marks ===== KS statistic: 0.0896970770949378 p-value: 0.0 The two distributions are significantly different. ===== KS test for question_marks ===== KS statistic: 0.0675250841708418 p-value: 4.996541012816196e-241 The two distributions are significantly different. ===== KS test for exclamation_marks ===== KS statistic: 0.005223242826926211 p-value: 0.07267448091854482 ===== KS test for other_punctuations ===== KS statistic: 0.044723076204157164 p-value: 5.71709290637383e-106 The two distributions are significantly different. corpus_stats.py:134: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy df['target'] = df_data['target'] ===== KS test for ADJ ===== KS statistic: 0.1935908580556876 p-value: 0.0 The two distributions are significantly different. ===== KS test for ADV ===== KS statistic: 0.2741942028242801 p-value: 0.0 The two distributions are significantly different. ===== KS test for INTJ ===== KS statistic: 0.005810648571668464 p-value: 0.03306683461969816 ===== KS test for NOUN ===== KS statistic: 0.07901515248192403 p-value: 0.0 The two distributions are significantly different. ===== KS test for PROPN ===== KS statistic: 0.19957844130372066 p-value: 0.0 The two distributions are significantly different. ===== KS test for VERB ===== KS statistic: 0.23490756135964508 p-value: 0.0 The two distributions are significantly different. ===== KS test for ADP ===== KS statistic: 0.1425457752970124 p-value: 0.0 The two distributions are significantly different. ===== KS test for AUX ===== KS statistic: 0.0 p-value: 1.0 ===== KS test for CCONJ ===== KS statistic: 0.14024186182060316 p-value: 0.0 The two distributions are significantly different. ===== KS test for DET ===== KS statistic: 0.0981772568175312 p-value: 0.0 The two distributions are significantly different. ===== KS test for NUM ===== KS statistic: 0.028908604027122253 p-value: 1.5941384267652604e-44 The two distributions are significantly different. ===== KS test for PART ===== KS statistic: 0.09337191361933339 p-value: 0.0 The two distributions are significantly different. ===== KS test for PRON ===== KS statistic: 0.06995257073260086 p-value: 1.2265959395833205e-258 The two distributions are significantly different. ===== KS test for SCONJ ===== KS statistic: 0.0 p-value: 1.0 ===== KS test for SYM ===== KS statistic: 0.004367102937732015 p-value: 0.19690057964334792 ===== KS test for X ===== KS statistic: 0.0034428522295403274 p-value: 0.4674693650568761 ===== KS test for PERSON ===== KS statistic: 0.09407880548837111 p-value: 0.0 The two distributions are significantly different. ===== KS test for NORP ===== KS statistic: 0.3093578779584073 p-value: 0.0 The two distributions are significantly different. ===== KS test for FAC ===== KS statistic: 0.0001372413203811762 p-value: 0.9999999999999998 ===== KS test for ORG ===== KS statistic: 0.013272179404682394 p-value: 1.0137461597272936e-09 The two distributions are significantly different. ===== KS test for GPE ===== KS statistic: 0.08024333595431177 p-value: 0.0 The two distributions are significantly different. ===== KS test for LOC ===== KS statistic: 0.01593505246154603 p-value: 7.978103928117553e-14 The two distributions are significantly different. ===== KS test for PRODUCT ===== KS statistic: 0.003329168579452646 p-value: 0.5110730082239134 ===== KS test for EVENT ===== KS statistic: 0.002687291471993425 p-value: 0.7726293163529684 ===== KS test for WORK_OF_ART ===== KS statistic: 0.0018686404864821649 p-value: 0.9837798597348894 ===== KS test for LAW ===== KS statistic: 0.002270596561190663 p-value: 0.9127969713412695 ===== KS test for LANGUAGE ===== KS statistic: 0.0013589432188911843 p-value: 0.9998745643907627 ===== KS test for DATE ===== KS statistic: 0.016170799755453547 p-value: 3.180582105070732e-14 The two distributions are significantly different. ===== KS test for TIME ===== KS statistic: 0.001761013430913283 p-value: 0.9917271436649813 ===== KS test for PERCENT ===== KS statistic: 0.002277878972795744 p-value: 0.9108623253635588 ===== KS test for MONEY ===== KS statistic: 0.001213247931561634 p-value: 0.9999914531426718 ===== KS test for QUANTITY ===== KS statistic: 0.0010873408185718691 p-value: 0.9999996752966367 ===== KS test for ORDINAL ===== KS statistic: 0.003573301528227546 p-value: 0.4198663662283268 ===== KS test for CARDINAL ===== KS statistic: 0.011329958638504944 p-value: 3.367636618202617e-07 The two distributions are significantly different. punctuation test results: ['all_punctuation', 'commas', 'periods', 'quotation_marks', 'question_marks', 'other_punctuations'] pos_tag test results: ['ADJ', 'ADV', 'NOUN', 'PROPN', 'VERB', 'ADP', 'CCONJ', 'DET', 'NUM', 'PART', 'PRON'] ent test results: ['PERSON', 'NORP', 'ORG', 'GPE', 'LOC', 'DATE', 'CARDINAL']Importing the dataimport pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns file_ = "/content/sample_data/IMDB_movies.csv" df = pd.read_csv(file_, low_memory=False) # Note: This is a shortcut for using cleaned data so I don't have to rerun the # cleaning tasks every time I want to run this notebook import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.model_selection import train_test_split file_ = "/content/sample_data/IMDB_movies_cleaned.csv" df = pd.read_csv(file_, low_memory=False) # Now split into Training and Test data X = df.iloc[:, 0:-1] y = df['success'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1)Munging the data# Remove all rows that don't have Budget and worlwide_gross_income [sic] df = df.dropna(subset=['budget','worlwide_gross_income']) # Remove all rows where the budget isn't in $ (because it would be too time consuming to convert currency) df = df[pd.DataFrame([df['budget'].str.contains('\$')]).T.any(axis=1)] # Remove $ and , from those columns df['budget'] = df['budget'].str.replace('$', '').str.replace(',', '').astype(int) df['worlwide_gross_income'] = df['worlwide_gross_income'].str.replace('$', '').str.replace(',', '').astype(int) # Drop all movies whose budget < 10,000 (ex, The Devil's Doorway has a budget of $1) df.drop(df.index[df['budget'] < 10000], inplace = True) # Get the month from the date_published field df['month'] = pd.DatetimeIndex(df['date_published']).month # Drop columns I don't need df.drop(['imdb_title_id', 'title','description','date_published','production_company','usa_gross_income','language','country','year'], axis=1, inplace=True) # Useful to see but not needed in the model df.drop(['original_title'], axis=1, inplace=True) # Now since there are only a handful of rows with NAN left, we can drop them df = df.dropna(axis=0) # What is a success? Any move that grossed > 2.5x its budget df['success'] = np.where(df['worlwide_gross_income'] > 2.5 * df['budget'], 1, 0) # Drop variables subject to hindsight bias df.drop(['avg_vote','metascore','worlwide_gross_income','votes','reviews_from_users','reviews_from_critics'], axis=1, inplace=True) df = df.reset_index(drop=True) # Director, Writer, Actor are also comma-delimited lists – unable to create dummy variables because would create too many features # Suggestion: Chunk the categories. Ex. Top Actors / Middle Actors. # Do this via iterating through dataframe, building a dictionary, and joining it back into the original dataframe from collections import defaultdict def parse_and_append(column): person_dict = defaultdict(int) for index, row in df.loc[df['success'] == 1].iterrows(): for person in row[column].split(', '): person_dict[person] += 1 # Low success person: 1 - 2 # Mid success person 3 - 10 # High Success person > 10 successful_people = {} for index, row in df.iterrows(): data = {'low': 0, 'mid': 0, 'high': 0 } for person in row[column].split(', '): #print(person + ":" + str(person_dict[person])) if (person_dict[person] > 0 and person_dict[person] <= 2): data['low'] = 1 if (person_dict[person] > 2 and person_dict[person] <= 9): data['mid'] = 1 if (person_dict[person] > 9): data['high'] = 1 successful_people[index] = data return successful_people df_success = pd.DataFrame(parse_and_append('actors')).transpose() df = df.join(df_success) df.rename(columns={'low': 'low_success_actor', 'mid': 'mid_success_actor', 'high': 'high_success_actor'}, inplace=True) df_success = pd.DataFrame(parse_and_append('writer')).transpose() df = df.join(df_success) df.rename(columns={'low': 'low_success_writer', 'mid': 'mid_success_writer', 'high': 'high_success_writer'}, inplace=True) df_success = pd.DataFrame(parse_and_append('director')).transpose() df = df.join(df_success) df.rename(columns={'low': 'low_success_director', 'mid': 'mid_success_director', 'high': 'high_success_director'}, inplace=True) # Move Success back to the end df = df[[c for c in df if c not in ['success']] + ['success']] # Drop columns I no longer need df.drop(['director', 'writer','actors'], axis=1, inplace=True)Exploratory Data Analysisdf['success'].value_counts() sns.countplot(df['success']) x = df['month'] y = df[df['success'] == 1]['month'] bins = np.linspace(1, 12) plt.hist([y, x], bins, label=['Success', 'Released'], histtype='stepfilled', color=['firebrick','royalblue'], edgecolor='k') plt.legend(loc='upper center') plt.title("Movies released per month") plt.show() x = df['duration'] y = df[df['success'] == 1]['duration'] bins = np.linspace(60, 200, 30) plt.hist([y, x], bins, label=['Success', 'Released'], histtype='stepfilled', color=['firebrick','royalblue'], edgecolor='k') plt.legend(loc='upper center') plt.title("Duration of Movie") plt.show() x = df['budget'] y = df[df['success'] == 1]['budget'] bins = np.linspace(100000000, 360000000, 50) plt.hist([y, x], bins, label=['Success', 'Released'], histtype='stepfilled', color=['firebrick','royalblue'], edgecolor='k') plt.legend(loc='upper center') plt.title("Budget: \$100M - \$360M") plt.show() plt.title("Duration (minutes)") boxplot = df.where(df["success"] == 1).boxplot(column=['duration']) plt.show() plt.title("Budget (Millions)") boxplot = df.where(df["success"] == 1).boxplot(column=['budget']) plt.show() # Drop all Object columns df = df.select_dtypes(exclude=['object']) X, y = df.iloc[:, 0:-1].values, df.iloc[:, -1].values print(list(df)) feature_names = ['duration', 'budget', 'month'] target_name = 'success' # https://heartbeat.fritz.ai/analyzing-machine-learning-models-with-yellowbrick-37795733f3ee from yellowbrick.features import Rank1D visualizer = Rank1D(features=feature_names, algorithm='shapiro') visualizer.fit(X, y) visualizer.transform(X) visualizer.poof() from yellowbrick.features import Rank2D visualizer = Rank2D(features=feature_names, algorithm='covariance') #visualizer = Rank2D(features=feature_names, algorithm='pearson') visualizer.fit(X, y) visualizer.transform(X) visualizer.poof() cols = ['duration', 'budget', 'month', 'success'] sns.pairplot(df[cols], size=2.5) plt.tight_layout() plt.show() cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.5) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 15}, yticklabels=cols, xticklabels=cols) plt.show() # We can verify these results using the density plots for each feature with relevance to the outcome. This is not that complex since we only have two outcomes: 0 or 1. So it would look like this in code # In the plots, when the green and red curves are almost the same (overlapping), it means the feature does not separate the outcomes. # When you can see some separation this is in agreement with the correlation values. import pandas as pd from pylab import rcParams import matplotlib.pyplot as plt def plot_densities(data): ''' Plot features densities depending on the outcome values ''' # change fig size to fit all subplots beautifully rcParams['figure.figsize'] = 15, 20 # separate data based on outcome values outcome_0 = data[data['success'] == 0] outcome_1 = data[data['success'] == 1] # init figure fig, axs = plt.subplots(10, 1) fig.suptitle('Features densities for Success 0 vs. 1') plt.subplots_adjust(left = 0.25, right = 0.9, bottom = 0.1, top = 0.95, wspace = 0.2, hspace = 0.9) # plot densities for outcomes for column_name in names[:-1]: ax = axs[names.index(column_name)] #plt.subplot(4, 2, names.index(column_name) + 1) outcome_0[column_name].plot(kind='density', ax=ax, subplots=True, sharex=False, color="red", legend=True, label=column_name + ' for Success = 0') outcome_1[column_name].plot(kind='density', ax=ax, subplots=True, sharex=False, color="green", legend=True, label=column_name + ' for Success = 1') ax.set_xlabel(column_name + ' values') ax.set_title(column_name + ' density') ax.grid('on') plt.show() fig.savefig('densities.png') # load your data data = df names = list(data.columns) # plot correlation & densities plot_densities(data)Creating dummy variables for Genre and Month# Converting pandas column of comma-separated strings into dummy variables - 21 columns # Remove spaces b/c after converting, genre_ Action and genre_Action were entered as 2 different columns df['genre'] = df['genre'].str.replace(' ', '') df = pd.concat([df, df['genre'].str.get_dummies(sep=',').add_prefix('genre_')], axis = 1) # Even though Months are numbers, it is categorical so convert it to dummies df_months = pd.get_dummies(df['month'], prefix='month') df = pd.concat([df, df_months], axis=1) # Move Success back to the end df = df[[c for c in df if c not in ['success']] + ['success']] # Drop columns I no longer need df.drop(['genre','month'], axis=1, inplace=True)Looking at the cleaned data setdf.head() df.shape df.info() df.describe().T print(list(df)) #df.to_csv(r'/content/sample_data/IMDB_movies_cleaned.csv', index = False) RangeIndex: 6368 entries, 0 to 6367 Data columns (total 45 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 duration 6368 non-null int64 1 budget 6368 non-null int64 2 low_success_actor 6368 non-null int64 3 mid_success_actor 6368 non-null int64 4 high_success_actor 6368 non-null int64 5 low_success_writer 6368 non-null int64 6 mid_success_writer 6368 non-null int64 7 high_success_writer 6368 non-null int64 8 low_success_director 6368 non-null int64 9 mid_success_director 6368 non-null int64 10 high_success_director 6368 non-null int64 11 genre_Action 6368 non-null int64 12 genre_Adventure 6368 non-null int64 13 genre_Animation 6368 non-null int64 14 genre_Biography 6368 non-null int64 15 genre_Comedy 6368 non-null [...]Building, Training, and Testing the models PCAfrom sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.ensemble import RandomForestClassifier # Now split into Training and Test data X = df.iloc[:, 0:-1] y = df['success'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1) print(X.shape, y.shape, X_train.shape, y_train.shape, X_test.shape, y_test.shape) # Scale the data stdsc = StandardScaler() X_train_std = stdsc.fit_transform(X_train) X_test_std = stdsc.transform(X_test) feat_labels = df.columns[0:-1] forest = RandomForestClassifier(random_state=1) forest.fit(X_train_std, y_train) importances = forest.feature_importances_ indices = np.argsort(importances)[::-1] for f in range(X_train_std.shape[1]): print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]])) plt.title('Feature Importance') plt.bar(range(X_train_std.shape[1]), importances[indices], align='center') plt.xticks(range(X_train_std.shape[1]), feat_labels[indices], rotation=90) plt.xlim([-1, X_train_std.shape[1]]) plt.tight_layout() plt.show() # Get a basis to determine if I should use PCA (use StandardScaler and LogisticRegression) from sklearn.pipeline import Pipeline, make_pipeline from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score pipe = make_pipeline(StandardScaler(), LogisticRegression(max_iter=1000, random_state=1)) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print('Test Accuracy: %.3f' % pipe.score(X_test, y_test)) scores = cross_val_score(estimator=pipe, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: {}'.format(scores)) print('CV accuracy mean:{} and std:{}'.format(np.mean(scores), np.std(scores))) # Now see if PCA improved or compromised my model? from sklearn.decomposition import PCA # Now add PCA into my pipeline pipe = make_pipeline(StandardScaler(), PCA(n_components=2), LogisticRegression(random_state=1)) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print('Test Accuracy: %.3f' % pipe.score(X_test, y_test)) scores = cross_val_score(estimator=pipe, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: {}'.format(scores)) print('CV accuracy mean:{} and std:{}'.format(np.mean(scores), np.std(scores)))Test Accuracy: 0.637 CV accuracy scores: [0.65919283 0.63004484 0.65768799 0.671156 0.66105499] CV accuracy mean:0.655827331612085 and std:0.013724590971716088Comparing different Transformers# Use different Transformers from sklearn import preprocessing import json output = [] def process_model(name, input_model): if (name == "[None]"): pipe = make_pipeline(LogisticRegression(max_iter=1000, random_state=1)) else: pipe = make_pipeline(input_model, LogisticRegression(max_iter=1000, random_state=1)) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) test_Accuracy = pipe.score(X_test, y_test) return {"name": name, "accuracy": round(test_Accuracy, 3)} output.append(process_model("[None]", "")) output.append(process_model("StandardScaler", preprocessing.StandardScaler())) output.append(process_model("MinMaxScaler", preprocessing.MinMaxScaler())) output.append(process_model("RobustScaler", preprocessing.RobustScaler())) output.append(process_model("PowerTransformer", preprocessing.PowerTransformer())) output.append(process_model("FunctionTransformer", preprocessing.FunctionTransformer())) output.append(process_model("MaxAbsScaler", preprocessing.MaxAbsScaler())) output.append(process_model("Normalizer", preprocessing.Normalizer())) output.append(process_model("Binarizer", preprocessing.Binarizer())) output.append(process_model("PolynomialFeatures", preprocessing.PolynomialFeatures())) output.append(process_model("QuantileTransformer", preprocessing.QuantileTransformer())) print(json.dumps(output, indent=4)) import csv csv_columns = ['name', 'accuracy'] csv_file = "Transformers.csv" try: with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in output: writer.writerow(data) except IOError: print("I/O error")[ { "name": "[None]", "accuracy": 0.614 }, { "name": "StandardScaler", "accuracy": 0.779 }, { "name": "MinMaxScaler", "accuracy": 0.774 }, { "name": "RobustScaler", "accuracy": 0.78 }, { "name": "PowerTransformer", "accuracy": 0.802 }, { "name": "FunctionTransformer", "accuracy": 0.614 }, { "name": "MaxAbsScaler", "accuracy": 0.776 }, { "name": "Normalizer", "accuracy": 0.614 }, { "name": "Binarizer", "accuracy": 0.769 }, { "name": "PolynomialFeatures", "accuracy": 0.386 }, { "name": "QuantileTransformer", "accuracy": 0.804 } ]Comparing different Classifiers# Use different Linear Models from sklearn.preprocessing import QuantileTransformer from sklearn import linear_model from sklearn import naive_bayes from sklearn import neighbors from sklearn import svm from sklearn import tree from sklearn import ensemble from sklearn.metrics import accuracy_score import json output = [] def process_model(name, input_model): pipe = make_pipeline(QuantileTransformer(random_state=1), input_model) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) test_Accuracy = pipe.score(X_test, y_test) return {"name": name, "accuracy": round(test_Accuracy, 3)} output.append(process_model("Perceptron", linear_model.Perceptron(tol=1e-3, random_state=1))) output.append(process_model("LogisticRegression", linear_model.LogisticRegression(max_iter=1000, random_state=1))) output.append(process_model("LogisticRegressionCV", linear_model.LogisticRegressionCV(cv=5, max_iter=1000, random_state=1))) output.append(process_model("RidgeClassifier", linear_model.RidgeClassifier(random_state=1))) output.append(process_model("RidgeClassifierCV", linear_model.RidgeClassifierCV(cv=5, alphas=[1e-3, 1e-2, 1e-1, 1]))) output.append(process_model("SGDClassifier", linear_model.SGDClassifier(max_iter=1000, tol=1e-3, random_state=1))) output.append(process_model("LinearRegression", linear_model.LinearRegression())) output.append(process_model("BayesianRidge", linear_model.BayesianRidge())) output.append(process_model("BernoulliNB", naive_bayes.BernoulliNB())) output.append(process_model("CategoricalNB", naive_bayes.CategoricalNB())) output.append(process_model("ComplementNB", naive_bayes.ComplementNB())) output.append(process_model("GaussianNB", naive_bayes.GaussianNB())) output.append(process_model("MultinomialNB", naive_bayes.MultinomialNB())) output.append(process_model("KNeighborsClassifier(3)", neighbors.KNeighborsClassifier(n_neighbors=3))) output.append(process_model("KNeighborsClassifier(5)", neighbors.KNeighborsClassifier(n_neighbors=5))) output.append(process_model("KNeighborsRegressor(3)", neighbors.KNeighborsRegressor(n_neighbors=3))) output.append(process_model("KNeighborsRegressor(5)", neighbors.KNeighborsRegressor(n_neighbors=5))) output.append(process_model("RadiusNeighborsClassifier", neighbors.RadiusNeighborsClassifier(radius=10.0))) output.append(process_model("NearestCentroid", neighbors.NearestCentroid())) output.append(process_model("SVC", svm.SVC(gamma='auto'))) output.append(process_model("LinearSVC", svm.LinearSVC(random_state=0, tol=1e-5))) output.append(process_model("NuSVC", svm.NuSVC())) output.append(process_model("DecisionTreeClassifier", tree.DecisionTreeClassifier())) output.append(process_model("DecisionTreeRegressor", tree.DecisionTreeRegressor())) output.append(process_model("ExtraTreeClassifier", tree.ExtraTreeClassifier())) output.append(process_model("AdaBoostClassifier", ensemble.AdaBoostClassifier(n_estimators=100, random_state=1))) output.append(process_model("BaggingClassifier-SVC", ensemble.BaggingClassifier(base_estimator=svm.SVC(), n_estimators=10, random_state=0))) output.append(process_model("BaggingClassifier-NuSVC", ensemble.BaggingClassifier(base_estimator=svm.NuSVC(), n_estimators=10, random_state=0))) output.append(process_model("ExtraTreesClassifier", ensemble.ExtraTreesClassifier(n_estimators=100, random_state=0))) output.append(process_model("GradientBoostingClassifier", ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0))) output.append(process_model("RandomForestClassifier", ensemble.RandomForestClassifier(max_depth=10, random_state=1))) print(json.dumps(output, indent=4)) import csv csv_columns = ['name', 'accuracy'] csv_file = "Classifiers.csv" try: with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in output: writer.writerow(data) except IOError: print("I/O error") # Neural Network from sklearn import neural_network from sklearn.model_selection import cross_val_score nn = neural_network.MLPClassifier(hidden_layer_sizes=(10,10,10),max_iter=1000) #nn = neural_network.MLPClassifier(hidden_layer_sizes=(20,20,20),max_iter=1000) #nn = neural_network.MLPRegressor(hidden_layer_sizes=(10,10,10),max_iter=1000) #nn = make_pipeline(QuantileTransformer(random_state=1), neural_network.MLPClassifier(hidden_layer_sizes=(10,10,10),max_iter=1000)) ##nn = make_pipeline(QuantileTransformer(random_state=1), neural_network.MLPRegressor(hidden_layer_sizes=(20,20,20),max_iter=1000)) nn.fit(X_train, y_train) y_pred = nn.predict(X_test) print('Test Accuracy: %.3f' % nn.score(X_test, y_test)) scores = cross_val_score(estimator=nn, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: {}'.format(scores)) print('CV accuracy mean:{} and std:{}'.format(np.mean(scores), np.std(scores))) # Does Transformer help? Yes from sklearn.ensemble import BaggingClassifier #pipe = make_pipeline(BaggingClassifier(base_estimator=svm.NuSVC(), n_estimators=10, random_state=0)) pipe = make_pipeline(QuantileTransformer(random_state=1), BaggingClassifier(base_estimator=svm.SVC(random_state=1), n_estimators=10, random_state=0)) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print('Test Accuracy: %.3f' % pipe.score(X_test, y_test)) scores = cross_val_score(estimator=pipe, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: {}'.format(scores)) print('CV accuracy mean:{} and std:{}'.format(np.mean(scores), np.std(scores)))Test Accuracy: 0.811 CV accuracy scores: [0.82623318 0.79484305 0.80808081 0.80359147 0.81481481] CV accuracy mean:0.8095126652675232 and std:0.010582976224868168Using GridSearchCV to tweak hyperparameters on model with best Transformer/Classifier# Gridsearch CV on BaggingClassifier with QuantileTransformer from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV param_grid = { 'baggingclassifier__base_estimator__C': [0.1, 1, 10, 100, 1000], 'baggingclassifier__base_estimator__gamma': [1, 0.1, 0.01, 0.001, 0.0001], 'baggingclassifier__base_estimator__kernel': ['rbf'], 'quantiletransformer__n_quantiles': [100, 1000, 3565], 'quantiletransformer__output_distribution': ['uniform','normal'] } gs = GridSearchCV(make_pipeline(QuantileTransformer(random_state=1), BaggingClassifier(base_estimator=svm.SVC(random_state=1), n_estimators=10, random_state=1)), param_grid=param_grid, cv= 5, verbose=True) # for param in gs.get_params().keys(): # print(param) gs = gs.fit(X_train, y_train) print(gs.best_score_) # print best parameter after tuning print(gs.best_params_) # print how our model looks after hyper-parameter tuning print(gs.best_estimator_)Fitting 5 folds for each of 150 candidates, totalling 750 fitsUsing tuned hyperparameters, generate ROC-AOC curve and Confusion Matrixfrom sklearn.ensemble import BaggingClassifier from sklearn.metrics import roc_curve, roc_auc_score from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.svm import SVC optimized_rfc = make_pipeline(QuantileTransformer(copy=True, ignore_implicit_zeros=False, n_quantiles=100, output_distribution='uniform', random_state=1, subsample=100000), BaggingClassifier(base_estimator=SVC(C=1000, break_ties=False, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=False, random_state=1, shrinking=True, tol=0.001, verbose=False), bootstrap=True, bootstrap_features=False, max_features=1.0, max_samples=1.0, n_estimators=10, n_jobs=None, oob_score=False, random_state=1, verbose=0, warm_start=False)) optimized_rfc.fit(X_train, y_train); y_score = optimized_rfc.predict_proba(X_test)[:,1] false_positive_rate1, true_positive_rate1, threshold1 = roc_curve(y_test, y_score) print('roc_auc_score for BaggingClassifier w. SVC: ', roc_auc_score(y_test, y_score)) plt.subplots(1, figsize=(10,10)) plt.title('Receiver Operating Characteristic - BaggingClassifier') plt.plot(false_positive_rate1, true_positive_rate1) plt.plot([0, 1], ls="--") plt.plot([0, 0], [1, 0] , c=".7"), plt.plot([1, 1] , c=".7") plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() from sklearn.ensemble import BaggingClassifier from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.svm import SVC from sklearn.model_selection import cross_val_score pipe = make_pipeline(QuantileTransformer(copy=True, ignore_implicit_zeros=False, n_quantiles=100, output_distribution='uniform', random_state=1, subsample=100000), BaggingClassifier(base_estimator=SVC(C=1000, break_ties=False, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=False, random_state=1, shrinking=True, tol=0.001, verbose=False), bootstrap=True, bootstrap_features=False, max_features=1.0, max_samples=1.0, n_estimators=10, n_jobs=None, oob_score=False, random_state=1, verbose=0, warm_start=False)) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print('Test Accuracy: %.3f' % pipe.score(X_test, y_test)) scores = cross_val_score(estimator=pipe, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: {}'.format(scores)) print('CV accuracy mean:{} and std:{}'.format(np.mean(scores), np.std(scores))) from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay optimized_rfc.fit(X_train, y_train) pred_optimized_rfc = optimized_rfc.predict(X_test) print(confusion_matrix(y_test, pred_optimized_rfc)) print(classification_report(y_test, pred_optimized_rfc)) cm = confusion_matrix(y_test, pred_optimized_rfc, normalize='all') cmd = ConfusionMatrixDisplay(cm, display_labels=['Successful','Not Successful']) cmd.plot()[[971 202] [163 575]] precision recall f1-score support 0 0.86 0.83 0.84 1173 1 0.74 0.78 0.76 738 accuracy 0.81 1911 macro avg 0.80 0.80 0.80 1911 weighted avg 0.81 0.81 0.81 1911What is the prediction of a potential new movie?What is the prediction for my mid-summer, $35 Million, 2 hour action/musical, starring low and moderately successful actors/actresses, written by a first-time writer, and directed by a highly successful director?new_movie = { 'duration' : 120, 'budget' : 35000000, 'low_success_actor' : 1, 'mid_success_actor' : 1, 'high_success_actor' : 0, 'low_success_writer' : 1, 'mid_success_writer' : 0, 'high_success_writer' : 0, 'low_success_director' : 0, 'mid_success_director' : 0, 'high_success_director' : 1, 'genre_Action' : 1, 'genre_Adventure' : 0, 'genre_Animation' : 0, 'genre_Biography' : 0, 'genre_Comedy' : 0, 'genre_Crime' : 0, 'genre_Drama' : 0, 'genre_Family' : 0, 'genre_Fantasy' : 0, 'genre_Film-Noir' : 0, 'genre_History' : 0, 'genre_Horror' : 0, 'genre_Music' : 0, 'genre_Musical' : 1, 'genre_Mystery' : 0, 'genre_Romance' : 0, 'genre_Sci-Fi' : 0, 'genre_Sport' : 0, 'genre_Thriller' : 0, 'genre_War' : 0, 'genre_Western' : 0, 'month_1' : 0, 'month_2' : 0, 'month_3' : 0, 'month_4' : 0, 'month_5' : 0, 'month_6' : 0, 'month_7' : 1, 'month_8' : 0, 'month_9' : 0, 'month_10' : 0, 'month_11' : 0, 'month_12' : 0} df_new_movie = pd.DataFrame(new_movie, index=[0]) pred_new_movie = pipe.predict(df_new_movie) if pred_new_movie[0] == 0: print("Movie will not be a success") else: print("Movie will be a success")Movie will not be a successUtilisation des scriptsCe notebook vise à montrer l'utilisation des scripts du projet afin d'effectuer une inférence d'une image et d'une vidéo. Installation du projet `bfc` et accès aux donnéesLes login et mot de passe sont nécessaires pour accéder à des repos privés à partir de Google/Colab. Cela ne serait pas le cas en accès public.import os from getpass import getpass user = getpass('GitHub user') password = getpass('') os.environ['GITHUB_AUTH'] = user + ':' + password %%shell git clone https://$GITHUB_AUTH@github.com/tibocour/IA.gitCloning into 'IA'... remote: Enumerating objects: 115, done. remote: Counting objects: 100% (115/115), done. remote: Compressing objects: 100% (82/82), done. remote: Total 115 (delta 54), reused 86 (delta 27), pack-reused 0 Receiving objects: 100% (115/115), 71.69 MiB | 23.61 MiB/s, done. Resolving deltas: 100% (54/54), done.Installation des dépendances%%shell pip install -r IA/requirements.txtRequirement already satisfied: imgaug in /usr/local/lib/python3.7/dist-packages (from -r IA/requirements.txt (line 1)) (0.2.9) Collecting pascal-voc-writer Downloading pascal_voc_writer-0.1.4-py2.py3-none-any.whl (4.0 kB) Collecting tflite-model-maker-nightly Downloading tflite_model_maker_nightly-0.3.4.dev202109080509-py3-none-any.whl (621 kB)  |████████████████████████████████| 621 kB 4.2 MB/s [?25hRequirement already satisfied: pycocotools in /usr/local/lib/python3.7/dist-packages (from -r IA/requirements.txt (line 4)) (2.0.2) Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from -r IA/requirements.txt (line 5)) (4.1.2.30) Requirement already satisfied: imageio in /usr/local/lib/python3.7/dist-packages (from imgaug->-r IA/requirements.txt (line 1)) (2.4.1) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from imgaug->-r IA/requirements.txt (line 1)) (1.4.1) Requirement already satisfied: Shapely in /u[...]Inférence d'une image%%shell rm -rf /tmp/images /tmp/annotations unzip -q IA/data/valid_megots150images.zip -d /tmp ls /tmp/images %%shell cd IA/python/google-coral-inference/ python detect_image.py \ --model /content/IA/data/efficientdet-lite-bfc.tflite \ --labels /content/IA/data/bfc-labels.txt \ --input /tmp/images/1608644746.jpg \ --output /content/1608644746_processed.jpg----INFERENCE TIME---- Note: The first inference is slow because it includes loading the model into Edge TPU memory. 3029.35 ms 3027.69 ms 3014.00 ms 3024.42 ms 3031.91 ms -------RESULTS-------- megot id: 0 score: 0.9921875 bbox: BBox(xmin=248, ymin=104, xmax=396, ymax=289)Affichage de l'image de prédictionfrom PIL import Image im = Image.open("1608644746_processed.jpg") import matplotlib.pyplot as plt plt.imshow(im) plt.show()Inférence d'une vidéo%%shell cd IA/python/google-coral-inference/ python detect_video.py \ --model /content/IA/data/efficientdet-lite-bfc.tflite \ --labels /content/IA/data/bfc-labels.txt \ --input /content/IA/data/2megot.mp4 \ --output /content/2megot_processed.mp4OpenCV: FFMPEG: tag 0x5634504d/'MP4V' is not supported with codec id 12 and format 'mp4 / MP4 (MPEG-4 Part 14)' OpenCV: FFMPEG: fallback to use tag 0x7634706d/'mp4v' inference time of the frame 0: 3030.933705 ms No objects detected in the frame 0 write the frame 0 inference time of the frame 1: 3031.248131 ms No objects detected in the frame 1 write the frame 1 inference time of the frame 2: 3004.098480 ms No objects detected in the frame 2 write the frame 2 inference time of the frame 3: 3025.358577 ms No objects detected in the frame 3 write the frame 3 inference time of the frame 4: 3016.593952 ms No objects detected in the frame 4 write the frame 4 inference time of the frame 5: 3018.479107 ms No objects detected in the frame 5 write the frame 5 inference time of the frame 6: 3034.967351 ms No objects detected in the frame 6 write the frame 6 inference time of the frame 7: 3001.463171 ms No objects detected in the frame 7 write the frame 7 inference time of the fram[...]Affichage de la vidéo de prédictionfrom IPython.display import HTML from base64 import b64encode # compress video os.system(f"ffmpeg -i 2megot_processed.mp4 -vcodec libx264 compressed_2megot_processed.mp4") # Show video mp4 = open("compressed_2megot_processed.mp4",'rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" """ % data_url)Testing on SemEval test setdf_test = df4.append(df3) df_test.info df_test = df_test[df_test['Sentence'].notna()] # Report the number of sentences. print('Number of test sentences: {:,}\n'.format(df.shape[0])) # Create sentence and label lists sentences = df_test.Sentence.values labels = df_test.Label.values # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 128, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. truncation=True ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Set the batch size. batch_size = 16 # Create the DataLoader. prediction_data = TensorDataset(input_ids, attention_masks, labels) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size) # Prediction on test set print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) # Put model in evaluation mode model.eval() # Tracking variables predictions , true_labels = [], [] # Predict for batch in prediction_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs[0] # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Store predictions and true labels predictions.append(logits) true_labels.append(label_ids) print(' DONE.') from sklearn.metrics import classification_report,confusion_matrix,accuracy_score # Combine the results across all batches. flat_predictions = np.concatenate(predictions, axis=0) # For each sample, pick the label (0 or 1) with the higher score. flat_predictions = np.argmax(flat_predictions, axis=1).flatten() # Combine the correct labels for each batch into a single list. flat_true_labels = np.concatenate(true_labels, axis=0) matrix = confusion_matrix(flat_true_labels,flat_predictions) print(matrix) score = accuracy_score(flat_true_labels,flat_predictions) print(score) report = classification_report(flat_true_labels, flat_predictions) print(report)Testing on Climate Val Setos.chdir('/content/drive/MyDrive/climateMind') df_test = pd.read_csv('Validation Dataset- Cause_Effects - Classifier (1).csv') df_test['Sentence(s)'] df_test['Nikita_has_cause_effect'] for x in range(0, len(df_test)): df_test.iloc[x]['Nikita_has_cause_effect'] = (int)(df_test.iloc[x]['Nikita_has_cause_effect']) df_test.sample(10) df_test.columns = ['original curator', 'keep', 'Sentence', 'if edge, edge type (causes or inhibits/inhibited by) ?', 'Nikita_has_cause_effect', 'Shweta_has_cause_effect', 'key word that indicates edge type', 'if edge, node(s) 1 [start node(s)]', 'if edge, node(s) 2 [end node(s)]', 'Easy / Hard Label', 'Comments', 'Unnamed: 11'] df_test.Sentence.values df_test.Nikita_has_cause_effect.values # Report the number of sentences. print('Number of test sentences: {:,}\n'.format(df_test.shape[0])) # Create sentence and label lists labels = df_test.Nikita_has_cause_effect.values sentences = df_test.Sentence.values # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 128, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. truncation=True ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Set the batch size. batch_size = 16 # Create the DataLoader. prediction_data = TensorDataset(input_ids, attention_masks, labels) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size) # Prediction on test set print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) # Put model in evaluation mode model.eval() # Tracking variables predictions , true_labels = [], [] # Predict for batch in prediction_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs[0] # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Store predictions and true labels predictions.append(logits) true_labels.append(label_ids) print(' DONE.') (predictions) probabilities = tf.nn.softmax(predictions[0], axis=-1) probabilities probabilities = [] for x in range(0, len(predictions)): probabilities.append(tf.nn.softmax(predictions[x], axis=-1)) len(probabilities) str(probabilities[0][0].numpy()[0]) dataF = pd.DataFrame(probabilities[0], columns = ['a', 'b']) dataF l = [] for x in range(0, len(probabilities)): for y in range(0, len(probabilities[x])): l.append(str(probabilities[x][y].numpy()[0])+', '+str(probabilities[x][y].numpy()[1])) l import numpy as np from sklearn.metrics import classification_report,confusion_matrix,accuracy_score # Combine the results across all batches. flat_predictions = np.concatenate(predictions, axis=0) # For each sample, pick the label (0 or 1) with the higher score. flat_predictions = np.argmax(flat_predictions, axis=1).flatten() # Combine the correct labels for each batch into a single list. flat_true_labels = np.concatenate(true_labels, axis=0) matrix = confusion_matrix(flat_true_labels,flat_predictions) print(matrix) score = accuracy_score(flat_true_labels,flat_predictions) print(score) report = classification_report(flat_true_labels, flat_predictions) print(report) flat_predictions df_test['confidence'] = l df_test['bert_pred'] = flat_predictions df_test df_test.to_excel('bertPred_on_val_set.xlsx', columns = df_test.columns) import os # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() output_dir = './causalRel/' # Create output directory if needed if not os.path.exists(output_dir): os.makedirs(output_dir) print("Saving model to %s" % output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model # torch.save(args, os.path.join(output_dir, 'training_args.bin')) os.getcwd() output_dir = '/content/drive/MyDrive/climateMind/causalRel' model = BertForSequenceClassification.from_pretrained(output_dir) tokenizer = BertTokenizer.from_pretrained(output_dir) model.to(device) os.chdir('/content/drive/MyDrive/climateMind') os.getcwd() import pandas as pd os.listdir() df_pocket = pd.read_csv('pocket_effect_tag_texts_only_split_on_sentences_with_ids_en-core-web-md (1).csv') df_pocket.sample(10) list = [] for x in range(0,len(df_pocket)): list.append(1) df_pocket.insert(1, "label", list, True) df_pocket.sample(20) df_pocket = df_pocket[df_pocket['text'].notna()] type(df_pocket.iloc[0]['text']) # Report the number of sentences. print('Number of test sentences: {:,}\n'.format(df_pocket.shape[0])) # Create sentence and label lists labels = df_pocket.label.values sentences = df_pocket.text.values # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 128, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. truncation=True ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Set the batch size. batch_size = 16 # Create the DataLoader. prediction_data = TensorDataset(input_ids, attention_masks, labels) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size) # Prediction on test set print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) # Put model in evaluation mode model.eval() # Tracking variables predictions , true_labels = [], [] # Predict for batch in prediction_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs[0] # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Store predictions and true labels predictions.append(logits) true_labels.append(label_ids) print(' DONE.') import numpy as np from sklearn.metrics import classification_report,confusion_matrix,accuracy_score # Combine the results across all batches. flat_predictions = np.concatenate(predictions, axis=0) # For each sample, pick the label (0 or 1) with the higher score. flat_predictions = np.argmax(flat_predictions, axis=1).flatten() # Combine the correct labels for each batch into a single list. flat_true_labels = np.concatenate(true_labels, axis=0) matrix = confusion_matrix(flat_true_labels,flat_predictions) print(matrix) score = accuracy_score(flat_true_labels,flat_predictions) print(score) report = classification_report(flat_true_labels, flat_predictions) print(report) len(flat_predictions) df_pocket.insert(1, "Pred", flat_predictions, True) df_pocket.head(50) df_pocket = df_pocket.drop('label', axis=1) df_pocket.to_csv('pred_pocket_effect_tag_texts_only_split_on_sentences_with_ids_en-core-web-md.csv') os.getcwd()Introduction to MygeopackageMygeopackage is a python package for geographic data mining. Useful tools like converting common GIS data, like Geojson or Shapefile, to numpy array are offered in this package to speed up data preprocessing.Mygeopackage offers functionalities for data mining, including supervised machine learning, unsupervised machine learning, regressino, etc, based on the scikit-learn module. These functionalities are designed for specifically for geographic data. Getting started Import mygeopackage to your python script.import mygeopackage from mygeopackage import GeoCreate a Geo classgeojson = Geo(r'https://github.com/yungming0119/mygeopackage/blob/main/docs/notebooks/data/sample_points.geojson?raw=true') geojsonGeo object is the fundamental component for mygeopackage. It stores metadata, sptail geometry and attribute data for spatial data mining. First, we will create a Geo object and assign it to the *goejson* variable. When we create the class, we will give a URI as the argument.GeoJson data will be fetched from the URI.More on Geo(), please see Main Module notebook.geojson.show(top=20)Census Hierarchy![](assets/census_hierarchy.jpg)https://www.census.gov/newsroom/blogs/random-samplings/2014/07/understanding-geographic-relationships-counties-places-tracts-and-more.html# Hand built by clicking around on speedupamerica.com's results page # Next time I will use https://tigerweb.geo.census.gov/tigerweb/ eugene_tracts = [ 41039002201, 41039002202, 41039002301, 41039002401, 41039002302, 41039002403, 41039002404, 41039002501, 41039002503, 41039002504, 41039002600, 41039002700, 41039002800, 41039002902, 41039002903, 41039002904, 41039003000, 41039003101, 41039003102, 41039003600, 41039003700, 41039003800, 41039004000, 41039005400, 41039004100, 41039004200, 41039004300, 41039004401, 41039004403, 41039004404, 41039004405, 41039004501, 41039004502, 41039004600, 41039004700, 41039004800, 41039004900, 41039005000, 41039005100, 41039005300, ] springfield_tracts = [ 41039001801, 41039001803, 41039001804, # Thurston 41039001902, 41039001903, 41039001904, # Centeral Springfield 41039002001, 41039002002, # North Springfield 41039002101, 41039002102, # Gateway 41039003201, 41039003202, # West Springfield (Centenial) 41039003301, 41039003302, # Springfield 41039003400, # East of Mohawk 41039003500 # and ] # Set 'lane county' on all rows lane['lane_region'] = 'lane county' # Set 'eugene' on rows with a Eugene Census Tract eugene = lane['census_tract'].isin(eugene_tracts) lane.loc[eugene, 'lane_region'] = 'eugene' # Set 'springfield' on rows with a Eugene Census Tract springfield = lane['census_tract'].isin(springfield_tracts) lane.loc[springfield, 'lane_region'] = 'springfield' ############################################################### # With additional list of Census Tract lists more cities # could be added. Pull requests welcome! ############################################################### aggs = { "id": ["count"], "rating": ["mean", "median", "count"], # 1 to 7 scale "actual_down_speed": ["mean", "median", "count"], # actual_down_speed / monthly_price "price_per_mbps": ["mean", "median", "count"] } # Aggregate by `lane_region`, showing the mean, median, and count for # rating, actual download speed, montly price, and actual price lane.groupby('lane_region').agg(aggs).sort_values(('id', 'count'), ascending=False)Eugeneeugene = lane[lane['lane_region'] == 'eugene'] agg = eugene.groupby('provider').agg(aggs) agg.sort_values(('id', 'count'), ascending=False).head(10) eugene.groupby('census_tract').agg(aggs).sample(10)Springfieldspringfield = lane[lane['lane_region'] == 'springfield'] agg = springfield.groupby('provider').agg(aggs) agg.sort_values(('id', 'count'), ascending=False).head(10) springfield.groupby('census_tract').agg(aggs).sample(10)Lane Countycounty = lane[lane['lane_region'] == 'lane county'] agg = county.groupby('provider').agg(aggs) agg.sort_values(('id', 'count'), ascending=False).head(10) county.groupby('census_tract').agg(aggs).sample(10)Train custom segmentation model with `IceVision`, `OpenImages`, and `SageMaker` Serving PyTorch Models In Production With BYOC And Amazon SagemakerSources:- https://github.com/aws-samples/amazon-sagemaker-endpoint-deployment-of-fastai-model-with-torchserve%reload_ext autoreload %autoreload 2 %matplotlib inlineImport librariesimport base64 import json import io import requests import numpy as np import matplotlib.pyplot as plt from PIL import Image from pathlib import Path from urllib.request import urlopenConfigurationimport boto3 import sagemaker from sagemaker import get_execution_roleIAM RoleYour `IAM Role` needs to have the following policies: - `AmazonS3FullAccess` - `AmazonEC2ContainerRegistryFullAccess` - `AmazonSageMakerFullAccess`role = get_execution_role() account_id = role.split(':')[4] region = boto3.Session().region_name sagemaker_session = sagemaker.session.Session() bucket = sagemaker_session.default_bucket() print(f'account_id: "{account_id}"') print(f'region: "{region}"') print(f'role: "{role}"') print(f'bucket: "{bucket}"')account_id: "849118573017" region: "eu-west-2" role: "arn:aws:iam::849118573017:role/service-role/AmazonSageMaker-ExecutionRole-20210113T160042" bucket: "sagemaker-eu-west-2-849118573017"Build and Push ContainerWe are now ready to build this container and push it to Amazon ECR. This task is executed using a shell script stored in the ../script/ folder. Let's take a look at this script and then execute it. Configure Amazon Elastic Container Registry (`ECR`)ecr_namespace = 'torchserve-sagemaker/' prefix = 'background-removal' ecr_repository_name = ecr_namespace + prefix image = f"{account_id}.dkr.ecr.{region}.amazonaws.com/{ecr_repository_name}:latest" imagePytorch Model ArtifactsCreate a compressed `*.tar.gz` file from the `*.mar` file per requirement of Amazon SageMaker and upload the model to your Amazon S3 bucket. Your file needs to be in the top-level (https://github.com/shashankprasanna/torchserve-examples/issues/3)model_artifacts_folder = '../model_store' model_file_name = [x.stem for x in Path(model_artifacts_folder).glob('*.mar')][0] !cd {model_artifacts_folder} && tar cvzf {model_file_name}.tar.gz {model_file_name}.mar !cd {model_artifacts_folder} && aws s3 cp {model_file_name}.tar.gz s3://{bucket}/torchserve_model_store/mask-rcnn-remove-bkg.mar upload: ./mask-rcnn-remove-bkg.tar.gz to s3://sagemaker-eu-west-2-849118573017/torchserve_model_store/mask-rcnn-remove-bkg.tar.gzBuild Torchserve Docker Container and Push to Amazon ECRpath_to_dockerfile_folder = '../deployment_code/' !aws ecr get-login-password --region {region} | docker login --username AWS --password-stdin {account_id}.dkr.ecr.{region}.amazonaws.com !aws ecr describe-repositories --repository-names $ecr_repository_name || aws ecr create-repository --repository-name $ecr_repository_name !docker build -t {ecr_repository_name} {path_to_dockerfile_folder} !docker tag {ecr_repository_name}:latest {image} !docker push {image}WARNING! Your password will be stored unencrypted in /home/ec2-user/.docker/config.json. Configure a credential helper to remove this warning. See https://docs.docker.com/engine/reference/commandline/login/#credentials-store Login Succeeded { "repositories": [ { "repositoryArn": "arn:aws:ecr:eu-west-2:849118573017:repository/torchserve-sagemaker/background-removal", "registryId": "849118573017", "repositoryName": "torchserve-sagemaker/background-removal", "repositoryUri": "849118573017.dkr.ecr.eu-west-2.amazonaws.com/torchserve-sagemaker/background-removal", "createdAt": 1617816642.0, "imageTagMutability": "MUTABLE", "imageScanningConfiguration": { "scanOnPush": false }, "encryptionConfiguration": { "encryptionType": "AES256" } } ] } Sending build context to Docker daemon 78.34kB Step 1/16 : FROM pytorch/pytorc[...]Inference Endpoint Create Sagemaker Modelmodel_data = f"s3://{bucket}/torchserve_model_store/{model_file_name}.tar.gz" sm_model_name = model_file_name container = {"Image": image, "ModelDataUrl": model_data} create_model_response = sagemaker_session.create_model( name=sm_model_name, role=role, container_defs=container)Endpoint configuration**Note**: choose your preferred `InstanceType`: https://aws.amazon.com/sagemaker/pricing/deployment_instance_type = "ml.g4dn.xlarge" import time endpoint_config_name = f"{model_file_name}-endpoint-config-" + time.strftime( "%Y-%m-%d-%H-%M-%S", time.gmtime() ) print(endpoint_config_name) create_endpoint_config_response = sagemaker_session.create_endpoint_config( name=endpoint_config_name, model_name = sm_model_name, initial_instance_count = 1, instance_type = deployment_instance_type, )mask-rcnn-remove-bkg-endpoint-config-2021-04-12-10-57-41Deployment time will take around 7-10 minutesendpoint_name = f"{model_file_name}-endpoint-" + time.strftime( "%Y-%m-%d-%H-%M-%S", time.gmtime() ) print(endpoint_name) create_endpoint_response = sagemaker_session.create_endpoint( endpoint_name=endpoint_name, config_name=endpoint_config_name )mask-rcnn-remove-bkg-endpoint-2021-04-12-10-57-43 ---------------!Testingdef get_image_bytes(path:str): try: response = requests.get(path) data = urlopen(path) except Exception: data = open(path,'rb') return data.read() # path_to_image = 'https://df2sm3urulav.cloudfront.net/tenants/ca/uploads/images/0-4999/1601/5d82a21c1abf4.jpg' path_to_image = 'test_images/FindID_161098.jpg' payload = get_image_bytes(path_to_image) %%time client = boto3.client("runtime.sagemaker") response = client.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/x-image", Body=payload, ) response = json.loads(response["Body"].read()) target_image = Image.open(io.BytesIO(payload)) predicted_image = Image.open(io.BytesIO(base64.b64decode(response['base64_prediction']))) f = plt.figure(figsize=(10,10)) f.add_subplot(1,2,1) plt.imshow(target_image) f.add_subplot(1,2, 2) plt.imshow(predicted_image) plt.show(block=True)Clean-upclient = boto3.client("sagemaker") client.delete_model(ModelName=sm_model_name) client.delete_endpoint(EndpointName=endpoint_name) client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)SCMsThis notbook contains solutions to some problems presented by al. in **Elements of Causal Inference** (2017) Joint sample from SCMConsider the SCM:$$\large X := Y^2 + N_X$$$$\large Y := N_Y$$where:$N_X, N_Y \sim N(0, 1)$ Generate an iid sample of 200 observation from joint distr. $(X, Y)$.# Sample from Y y = stats.norm(0, 1).rvs(200) # Compute X x = y**2 + stats.norm(0, 1).rvs(200) # Plot histograms plt.hist(x, bins=15, density=True, label='X', alpha=.7) plt.hist(y, bins=15, density=True, label='Y', alpha=.7) plt.legend() plt.show() # Joint sample xy = np.hstack([x[:, np.newaxis], y[:, np.newaxis]]) plt.hist2d(x, y, density=True, bins=15) plt.xlabel('$X$') plt.ylabel('$Y$') plt.colorbar() plt.show()C:\Users\aleks\AppData\Local\Temp/ipykernel_7100/973994305.py:1: MatplotlibDeprecationWarning: Auto-removal of grids by pcolor() and pcolormesh() is deprecated since 3.5 and will be removed two minor releases later; please call grid(False) first. plt.hist2d(x, y, density=True, bins=15) C:\Users\aleks\AppData\Local\Temp/ipykernel_7100/973994305.py:4: MatplotlibDeprecationWarning: Auto-removal of grids by pcolor() and pcolormesh() is deprecated since 3.5 and will be removed two minor releases later; please call grid(False) first. plt.colorbar()Determine Whether a movie review is good sentiment or bad Each (unique)word is encoded by a number representing how common it is in the dataset%tensorflow_version 2.x from keras.datasets import imdb from keras.preprocessing import sequence import tensorflow as tf import os import numpy as np import keras # set some parameters VOCAB_SIZE = 88584 MAXLEN = 250 # each movie review has differnt lengths(shorter ones we padd with 0 longer ones we truncate) BATCH_SIZE = 64 (train_data,train_labels),(test_data,test_label) = imdb.load_data(num_words=VOCAB_SIZE)* **lets have a look at a single movie review**train_data[0] len(train_data[0]) print(train_labels[0]) print(train_labels[2]) print(train_labels[10])1 0 1more preprocessing making the lengths uniform* MAX_LENGTH = 250 WORDS* if the length is shorter than 250 we add 0's to the left(padding)* if the length is longer than 250 words we cut of the extrastrain_data = sequence.pad_sequences(train_data,MAXLEN) test_data = sequence.pad_sequences(test_data,MAXLEN) print(train_data[1])[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 194 1153 194 8255 78 228 5 6 1463 4369 5012 134 26 4 715 8 118 1634 14 394 20 13 119 954 189 102 5 207 110 3103 21 14 69 188 8 30 23 7 4 249 126 93 4 114 9 2300 1523 5 647 4 116 9 35 8163 4 229 9 340 1322 4 118 9 4 130 4901 19 4 1002 5 89 29 952 46 37 4 455 9 45 43 38 1543 1905 398 4 1649 26 6853 5 163 11 3215 10156 4 1153 9 194 775 7 8255 11596 349 2637 [...]CREATING THE MODELmodel = tf.keras.Sequential([ tf.keras.layers.Embedding(VOCAB_SIZE,32), tf.keras.layers.LSTM(32), tf.keras.layers.Dense(1,activation='sigmoid') ]) model.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, None, 32) 2834688 _________________________________________________________________ lstm (LSTM) (None, 32) 8320 _________________________________________________________________ dense (Dense) (None, 1) 33 ================================================================= Total params: 2,843,041 Trainable params: 2,843,041 Non-trainable params: 0 _________________________________________________________________Trainingmodel.compile(loss = 'binary_crossentropy',optimizer = 'rmsprop',metrics = ['acc'] ) history = model.fit(train_data,train_labels,epochs=10,validation_split=0.2)Epoch 1/10 625/625 [==============================] - 40s 64ms/step - loss: 0.4164 - acc: 0.8101 - val_loss: 0.2961 - val_acc: 0.8776 Epoch 2/10 625/625 [==============================] - 40s 63ms/step - loss: 0.2324 - acc: 0.9110 - val_loss: 0.2978 - val_acc: 0.8824 Epoch 3/10 625/625 [==============================] - 39s 63ms/step - loss: 0.1802 - acc: 0.9330 - val_loss: 0.2694 - val_acc: 0.8922 Epoch 4/10 625/625 [==============================] - 39s 63ms/step - loss: 0.1479 - acc: 0.9488 - val_loss: 0.3084 - val_acc: 0.8874 Epoch 5/10 625/625 [==============================] - 39s 62ms/step - loss: 0.1261 - acc: 0.9570 - val_loss: 0.2888 - val_acc: 0.8800 Epoch 6/10 625/625 [==============================] - 39s 63ms/step - loss: 0.1052 - acc: 0.9641 - val_loss: 0.3012 - val_acc: 0.8834 Epoch 7/10 625/625 [==============================] - 39s 62ms/step - loss: 0.0942 - acc: 0.9689 - val_loss: 0.3348 - val_acc: 0.8888 Epoch 8/10 625/625 [==============================] - 39s 63ms[...]Evaluating Model on Test datasetresults = model.evaluate(test_data,test_label) print(results)782/782 [==============================] - 13s 16ms/step - loss: 0.4738 - acc: 0.8568 [0.4737767279148102, 0.8568000197410583]Predictions * get the imdb Dictionay(with those words)* function that chops the text into words only* loop through the words * check if the word is in the dictionary * take its integer representation in the dictionary and place it into a list called (token )* if word is not in dictionary then in the token list we replace it with 0word_index = imdb.get_word_index() def encode_text(text): # chop the review(sentence) into tokens(individual words) tokens = keras.preprocessing.text.text_to_word_sequence(text) tokens = [word_index[word] if word in word_index else 0 for word in tokens] return sequence.pad_sequences([tokens],MAXLEN)[0] text = 'this is a movie i like' encoded = encode_text(text) print(encoded)[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 6 3 17 10 37]Decode Functionreverse_word_index = {value:key for (key,value) in word_index.items()} # loops through all words in vocab get key(integer representation) and value(word) def decode_integer(integers): PAD = 0 text = " " for num in integers: if num != 0: text += reverse_word_index[num] + " " # gets the word from the dictionary concats it with a space the concat it with (text) to make a sentence return text[:-1] # return everything in the sentence except the last character (which is a space) print(decode_integer(encoded))this is a movie i likePrediction Functiondef predict(text): encoded_text = encode_text(text) pred = np.zeros((1,250)) # blank nupy array filled with 0's and of 250 length(what model expects) pred[0] = encoded_text # pick first array in the pred (array of arrays) and (set it to the array from encoding function) results = model.predict(pred) print(results[0]) # Example of positive reviews and negative review positive_review = "i really really love it its awesome" negative_review = " i hate that movie it sucks" def predict(text): encoded_text = encode_text(text) pred = np.zeros((1,250)) # blank nupy array filled with 0's and of 250 length(what model expects) pred[0] = encoded_text # pick first array in the pred (array of arrays) and (set it to the array from encoding function) results = model.predict(pred) review_status = results[0][0] if review_status > 0.5: print('positive sentiment') else: print('negative sentiment') print(predict(positive_review)) print(predict(negative_review))Prepare data Using dataset ``lpd_5_cleansed`` from [Lakh Pianoroll Dataset](https://salu133445.github.io/lakh-pianoroll-dataset/dataset).I cut each song into 400 time steps, removed the lowest 20 pitches and the highest 24 pitches, and made a .npy file called ``lpd_5_cleansed.npy``. Shape is: 21425(song) × 400(time step) × 84(note) × 5(track)import numpy as np data_file = './data/lpd_5_cleansed.npy' data = np.load(data_file) print(np.shape(data)) data = np.reshape(data, [21425, 400, -1]) print(np.shape(data)) data.dtype import gc from utils.dataset_helper import read_data_sets dataset = read_data_sets(data) train_set = dataset.train develop_set = dataset.develop test_set = dataset.test # release space del data del dataset gc.collect()Create Modelfrom models.rnn import RNN model = RNN()Trainlog_tag = "20180518-1030" model.train(train_set, develop_set, log_tag)Trying to restore saved checkpoints from ./logdir/20180518-1030/train ... No checkpoint found. Epoch 1 [batch 100] > train loss: 0.0405 develop loss: 0.0438 Epoch 1 [batch 200] > train loss: 0.0544 develop loss: 0.0571 Epoch 1 [batch 300] > train loss: 0.0472 develop loss: 0.0535 Epoch 1 [batch 400] > train loss: 0.0443 develop loss: 0.0511 Epoch 1 [batch 500] > train loss: 0.0562 develop loss: 0.0533 Epoch 1 [batch 600] > train loss: 0.0394 develop loss: 0.0473 Epoch 1 [batch 700] > train loss: 0.0493 develop loss: 0.0424 Epoch 1 [batch 800] > train loss: 0.0517 develop loss: 0.0517 Epoch 1 [batch 900] > train loss: 0.0489 develop loss: 0.0513 Epoch 1 [batch 1000] > train loss: 0.0547 develop loss: 0.0503 Storing checkpoint to ./logdir/20180518-1030/train ... Done. Epoch 2 [batch 100] > train loss: 0.0449 develop loss: 0.0418 Epoch 2 [batch 200] > train loss: 0.0323 develop loss: 0.0469 Epoch 2 [batch 300] > train loss: 0.0395 develop loss: 0.0420 Epoch 2 [batch 400] > [...]**Plot figures**from IPython.display import display, Image from utils.ops import plot_gif plot_gif(fig_dir='./logdir/%s/results' % log_tag) Image(url='./logdir/%s/results/Train.gif' % log_tag)Testmodel.test(test_set, log_tag) Image(url='./logdir/%s/results/Test.png' % log_tag)Test loss: 0.0009 Write to midi file './logdir/20180518-1030/results/Test.mid' done.Embadding Layer Embedding Layer Toy Example Importsimport numpy as np from keras.preprocessing.text import one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense, Embedding, FlattenDatareviews = [ 'never comming back', 'horrible service', 'rude waitress', 'cold foot', 'horrible food', 'awesome', 'awesome service', 'rocks', 'poor work', 'could not have done better' ] # (1 = Negative, 0 = Positive) labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])Encode Datavocab_size = 50 encoded_reviews = [one_hot(review, n = vocab_size) for review in reviews] print('Encoded Reviews:', encoded_reviews)Encoded Reviews: [[32, 21, 10], [47, 41], [24, 36], [37, 25], [47, 12], [15], [15, 41], [9], [4, 3], [2, 3, 22, 12, 39]]Pad Datamaxlen = 4 padded_reviews = pad_sequences(sequences = encoded_reviews, maxlen = maxlen, padding = 'post') print('Padded Reviews:', padded_reviews)Padded Reviews: [[32 21 10 0] [47 41 0 0] [24 36 0 0] [37 25 0 0] [47 12 0 0] [15 0 0 0] [15 41 0 0] [ 9 0 0 0] [ 4 3 0 0] [ 3 22 12 39]]Build Modelmodel = Sequential() model.add(Embedding(input_dim = vocab_size, output_dim = 8, input_length = maxlen)) model.add(Flatten()) model.add(Dense(units = 1, activation = 'sigmoid')) model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['acc']) print(model.summary())Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, 4, 8) 400 _________________________________________________________________ flatten_1 (Flatten) (None, 32) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 33 ================================================================= Total params: 433 Trainable params: 433 Non-trainable params: 0 _________________________________________________________________ NoneTrain Modelmodel.fit(padded_reviews, labels, epochs = 100, verbose = False)Evaluate Modelloss, acc = model.evaluate(padded_reviews, labels, verbose = False) print(f'Loss: {loss}, Accuracy: {acc}')Loss: 0.4816771149635315, Accuracy: 1.0Data Science Unit 2 Sprint Challenge 4 — Model Validation Follow the instructions for each numbered part to earn a score of 2. See the bottom of the notebook for a list of ways you can earn a score of 3. Predicting Blood DonationsOur dataset is from a mobile blood donation vehicle in Taiwan. The Blood Transfusion Service Center drives to different universities and collects blood as part of a blood drive.The **goal** is to predict the **last column** = whether the donor made a **donation in March 2007**, using information about each donor's history. We'll measure success using **_recall score_ as the model evaluation metric**.Good data-driven systems for tracking and predicting donations and supply needs can improve the entire supply chain, making sure that more patients get the blood transfusions they need. Run this cell to load the data:# initial imports import pandas as pd import numpy as np from sklearn.metrics import accuracy_score, recall_score from sklearn.model_selection import train_test_split as tts from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.feature_selection import SelectKBest from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC # from sklearn.feature_selection import f_classif from sklearn.linear_model import LogisticRegression df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data') df = df.rename(columns={ 'Recency (months)': 'months_since_last_donation', 'Frequency (times)': 'number_of_donations', 'Monetary (c.c. blood)': 'total_volume_donated', 'Time (months)': 'months_since_first_donation', 'whether he/she donated blood in March 2007': 'made_donation_in_march_2007' }) print(df.shape) # 748 rows by 5 columns print(df.isna().sum()) # zero nan's; thanks, ! df.head()(748, 5) months_since_last_donation 0 number_of_donations 0 total_volume_donated 0 months_since_first_donation 0 made_donation_in_march_2007 0 dtype: int64Part 1.1 — Begin with baselinesWhat **accuracy score** would you get here with a **"majority class baseline"?** (You don't need to split the data into train and test sets yet. You can answer this question either with a scikit-learn function or with a pandas function.)# make copy of df, work w copy going forward df1 = df.copy() # will refrain, in this cell, from yet doing tts on df1 # Hat Tip to /LSDS X = df1.drop('made_donation_in_march_2007', axis='columns') y_true = df1.made_donation_in_march_2007 majority_class = y_true.mode()[0] y_pred = np.full(shape=y_true.shape, fill_value=majority_class) # validate print(y_true.shape, y_pred.shape) all(y_pred == majority_class) # compute accuracy_score print('accuracy score is:', accuracy_score(y_true, y_pred))accuracy score is: 0.7620320855614974What **recall score** would you get here with a **majority class baseline?**(You can answer this question either with a scikit-learn function or with no code, just your understanding of recall.)# compute recall_score # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html ''' The recall is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. ''' print('recall score is:', recall_score(y_true, y_pred, average=None))recall score is: [1. 0.]Part 1.2 — Split dataIn this Sprint Challenge, you will use "Cross-Validation with Independent Test Set" for your model evaluation protocol.First, **split the data into `X_train, X_test, y_train, y_test`**, with random shuffle. (You can include 75% of the data in the train set, and hold out 25% for the test set.)# generate cross_val_score_model # cf. https://github.com/johnpharmd/DS-Unit-2-Sprint-4-Model-Validation/blob/master/module-1-begin-modeling-process/LS_DS_241_Begin_modeling_process_LIVE_LESSON.ipynb X_train, X_test, y_train, y_test = tts(X, y_true, shuffle=True)Part 2.1 — Make a pipelineMake a **pipeline** which includes:- Preprocessing with any scikit-learn [**Scaler**](https://scikit-learn.org/stable/modules/classes.htmlmodule-sklearn.preprocessing)- Feature selection with **[`SelectKBest`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html)([`f_classif`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.f_classif.html))**- Classification with [**`LogisticRegression`**](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)# make pipeline, which is kernel_svm # hat tip to R/LSDS for following URL: # https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb cls = SVC(C=10.0, kernel='rbf', gamma=0.1, decision_function_shape='ovr') kernel_svm = Pipeline([('std', StandardScaler()), ('svc', cls)]) # select features using SelectKBest features = SelectKBest(f_classif, k=3) # perform classification using LogReg log_reg = LogisticRegression().fit(X_train, y_train)C:\Users\jhump\Anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning)Part 2.2 — Do Grid Search Cross-ValidationDo [**GridSearchCV**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) with your pipeline. Use **5 folds** and **recall score**.Include these **parameters for your grid:** `SelectKBest`- `k : 1, 2, 3, 4` `LogisticRegression`- `class_weight : None, 'balanced'`- `C : .0001, .001, .01, .1, 1.0, 10.0, 100.00, 1000.0, 10000.0`**Fit** on the appropriate data.# perform GridSearchCV # make param_grid param_grid = [{'svc__C': [.0001, .001, .01, .1, 1.0, 10.0, 100.00, 1000.0, 10000.0], 'svc__gamma': [0.001, 0.0001], 'svc__kernel': ['rbf']},] param_grid_adjust = [{'k': [1, 2, 3, 4], 'class_weight': [None, 'balanced'], 'svc__C': [.0001, .001, .01, .1, 1.0, 10.0, 100.00, 1000.0, 10000.0]},] # make gs object gs = GridSearchCV(estimator=kernel_svm, param_grid=param_grid_adjust, scoring='recall', n_jobs=-1, cv=5, verbose=1, refit=True, pre_dispatch='2*n_jobs') # run gs gs.fit(X_train, y_train)Fitting 5 folds for each of 72 candidates, totalling 360 fitsPart 3 — Show best score and parametersDisplay your **best cross-validation score**, and the **best parameters** (the values of `k, class_weight, C`) from the grid search.(You're not evaluated here on how good your score is, or which parameters you find. You're only evaluated on being able to display the information. There are several ways you can get the information, and any way is acceptable.)# display best gscv score, and the best parameters, from the gs # best cv score print('Best GS Score %.2f' % gs.best_score_) # best parameters COMMENT: need to refactor param_grid for k, class_weight, and C print('best GS Params %s' % gs.best_params_)Best GS Score 0.09 best GS Params {'svc__C': 10000.0, 'svc__gamma': 0.001, 'svc__kernel': 'rbf'}Part 4 — Calculate classification metrics from a confusion matrixSuppose this is the confusion matrix for your binary classification model: Predicted Negative Positive Actual Negative 85 58 Positive 8 36 Calculate accuracy# accuracy == (TP + TN)/Total accuracy = (36 + 85)/187 print('accuracy is:', accuracy)accuracy is: 0.6470588235294118Calculate precision# precision == TP/(TP + FP) precision = 36/(36 + 58) print('precision is:', precision)precision is: 0.3829787234042553Calculate recall# recall == sensitivity == TP/P recall = 36/44 print('recall is:', recall)recall is: 0.8181818181818182Logical Operators and Findimport numpy as np from numpy import mean, stdExercise 1data = np.load("exercise3_compressed.npz") dates = data["dates"] SP500 = data["SP500"] XOM = data["XOM"] print("sum(SP500<0):") sum(SP500 < 0) print("sum(XOM<0):") sum(XOM < 0)Exercise 2SP500big = SP500 > (2 * SP500.std()) SP500small = SP500 < (-2 * SP500.std()) print("mean(SP500[SP500big]):") mean(SP500[SP500big]) print("mean(SP500[SP500small]):") mean(SP500[SP500small]) XOMbig = XOM > (2 * std(XOM)) XOMsmall = XOM < (-2 * std(XOM)) print("mean(XOM[XOMbig]):") mean(XOM[XOMbig]) print("mean(XOM[XOMsmall]):") mean(XOM[XOMsmall])Exercise 3bothNeg = np.logical_and(SP500 < 0, XOM < 0) data = np.vstack((SP500, XOM)).T corr = np.corrcoef(data.T) negCorr = np.corrcoef(data[bothNeg, :].T) print("corr:") corr print("negCorr:") negCorrExercise 4oneNeg = np.logical_or(SP500 < 0, XOM < 0) oneNegCorr = np.corrcoef(data[oneNeg, :].T) print("oneNegCorr:") oneNegCorrExercise 5def myany(x): """Returns True if any value in the input is True""" return not np.all(np.logical_not(x)) def myall(x): """Returns True if all values in the input is True""" return not np.any(np.logical_not(x)) np.any(SP500 < 0) myany(SP500 < 0) np.any(SP500 < -0.5) myany(SP500 < -0.5)[Oregon Curriculum Network](http://www.4dsolutions.net/ocn) [Discovering Math with Python](Introduction.ipynb) Chapter 8: TRANSFORMATIONSWe only need to talk about three types of transformation in this chapter: translation, rotation, and scaling.To translate a polyhedron is to slide it around in space without having it turn around an axis. Just adding the same translation vector to every vertex vector effectively slides it.Rotation is most easily defined about the origin, i.e. we center our polyhedron around the center of our coordinate system before applying our rotation algorithm. If the polyhedron was not at centered at the origin to begin with, we may translated it (slide it without rotation), do the rotation, and translate it back to where we found it.We'll be looking at two ways to rotate our polyhedron:* applying a rotation matrix to each vertex vector* applying a quaternion to each vertex vector, and then its inverseThe second technique will be the topic of Chapter 11.Scaling means having the polyhedron grow or shrink. The same translation trick may be applied, as once a polyhedron is centered at the origin, we need only multiply each vertex vector by a scaler, thereby elongating or shortening all the vectors. See Chapter 6.class Vector: passImportação Train Treino e Validaçãodf_train = pd.read_csv('train.csv') df_train.head() df_test = pd.read_csv('test.csv') df_test.head(10) df_train.shape df_x = df_train[df_test.columns] df_x['NU_NOTA_MT'] = df_train['NU_NOTA_MT'] mtx = msno.matrix(df_x) df_x.head() df_x = df_x.drop('SG_UF_RESIDENCIA', axis=1) df_x.head() dfck = df_x.isna().sum().to_frame().reset_index() dfck.columns = ['Coluna', 'Qtd_NAN'] dfck['Percentual'] = round((dfck['Qtd_NAN'] / df_train.shape[0]) * 100, 2) dfck.loc[21:30] colsTo0 = ['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO'] for i in colsTo0: df_x[i] = df_x[i].replace(np.nan, 0) df_x.head() df_x['TP_ENSINO'] = df_x['TP_ENSINO'].replace(np.nan, -1) df_x['TP_DEPENDENCIA_ADM_ESC'] = df_x['TP_DEPENDENCIA_ADM_ESC'].replace(np.nan, -1) df_x['Q027'] = df_x['Q027'].replace(np.nan, -1) df_x.head() mtx = msno.matrix(df_x) df_x['CO_PROVA_CN'].value_counts() co_provas = ['CO_PROVA_CN', 'CO_PROVA_CH', 'CO_PROVA_LC', 'CO_PROVA_MT'] df_co_prova = df_x[co_provas] df_co_prova.head() df_co_prova = pd.get_dummies(df_co_prova) df_co_prova.head() df_x = df_x.drop(co_provas, axis=1) df_x = pd.concat([df_x, df_co_prova], axis=1) df_x.head() df_x = pd.concat([df_x, pd.get_dummies(df_x['TP_SEXO'])], axis=1) df_x = df_x.drop('TP_SEXO', axis=1) df_x.head() qColsFact = ['Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'Q027', 'Q047'] for i in qColsFact: df_x[i], uniques = pd.factorize(df_x[i], sort=True) df_x.head() df_nu_inscricao = df_x['NU_INSCRICAO'] df_x = df_x.drop('NU_INSCRICAO', axis=1) df_x.head()Correlações#fig, ax = plt.subplots(figsize=(20,10)) corr = df_x.corr() #hm = sns.heatmap(corr, xticklabels=True, yticklabels=True, annot=True, linewidths=.5, ax=ax) #hmRFE""" X = df_x y = df_x['NU_NOTA_MT'] estimator = SVR(kernel="linear") rfe_selector = RFE(estimator=estimator, n_features_to_select=10, step=10, verbose=5) rfe_selector.fit(X, y) rfe_support = rfe_selector.get_support() rfe_feature = X.loc[:,rfe_support].columns.tolist() print(str(len(rfe_feature)), 'selected features') """Modeloregr = RandomForestRegressor(n_estimators=2000, n_jobs=-1, max_depth=100, min_samples_leaf=4, min_samples_split=10, random_state=0) y = df_x['NU_NOTA_MT'] df_x = df_x.drop('NU_NOTA_MT', axis=1) X = df_x regr.fit(X, y) print('R² = {}'.format(regr.score(X, y).round(2)))R² = 0.97Testedf_t = df_test df_t.head() for i in colsTo0: if (i != 'NU_NOTA_MT'): df_t[i] = df_t[i].replace(np.nan, 0) df_t.head() df_t['TP_ENSINO'] = df_t['TP_ENSINO'].replace(np.nan, -1) df_t['TP_DEPENDENCIA_ADM_ESC'] = df_t['TP_DEPENDENCIA_ADM_ESC'].replace(np.nan, -1) df_t['Q027'] = df_t['Q027'].replace(np.nan, -1) df_t.head() mtx = msno.matrix(df_t) co_provas = ['CO_PROVA_CN', 'CO_PROVA_CH', 'CO_PROVA_LC', 'CO_PROVA_MT'] df_co_prova = df_t[co_provas] df_co_prova.head() df_co_prova = pd.get_dummies(df_co_prova) df_t = df_t.drop(co_provas, axis=1) df_t = pd.concat([df_t, df_co_prova], axis=1) df_t.head() df_t = pd.concat([df_t, pd.get_dummies(df_t['TP_SEXO'])], axis=1) df_t = df_t.drop('TP_SEXO', axis=1) df_t.head() qColsFact = ['Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'Q027', 'Q047'] for i in qColsFact: df_t[i], uniques = pd.factorize(df_t[i], sort=True) df_t.head() answer = df_t['NU_INSCRICAO'] df_t = df_t.drop('NU_INSCRICAO', axis=1) df_t = df_t.drop('SG_UF_RESIDENCIA', axis=1) print(answer.head(5)) df_t.head(5) print(df_x.shape) print(df_t.shape) for i in df_x.columns: if i not in df_t.columns: print(i) df_t['CO_PROVA_CN_a27a1efea095c8a973496f0b57a24ac6775d95b0'] = 0 df_t['CO_PROVA_CH_d5f6d17523d2cce3e4dc0a7f0582a85cec1c15ee'] = 0 df_t.head()Prediçãopred = regr.predict(df_t)Exportaçãodf_csv = pd.DataFrame(answer) df_pred = pd.DataFrame(pred) df_csv['NU_NOTA_MT'] = round(df_pred, 2) df_csv.head(10) for idx, row in df_csv.iterrows(): nuInscricao = row['NU_INSCRICAO'] dfcn = df_test.query('NU_INSCRICAO == @nuInscricao')['NU_NOTA_CN'] if (dfcn.values <= 0): df_csv.loc[idx, 'NU_NOTA_MT'] = 0 df_csv.head(10) df_csv.to_csv('answer.csv', index = False, header=True)Modeltrain_X = np.swapaxes(train_X,2,1) train_X.shape train_X = np.expand_dims(train_X,axis=4) model = Sequential() model.add(ConvLSTM2D(128,(47,8),input_shape=(1034,128,1,1),padding='same',kernel_initializer='he_uniform' ,dropout=0.6,recurrent_dropout=0.6,go_backwards=True)) model.add(Dense(41,activation='softmax')) model.summary()***kk = np.array([[[0],[1]],[[0],[1]],[[0],[1]]]) kk.shape np.swapaxes(kk,0,2).shapeQuestions to answer, given hiring funnel data1. How biased is the hiring funnel?2. Which steps in the flow introduces most of the bias?3. What can I do to reduce the bias introduced by this step?import pandas as pd from scipy import stats from plotly import graph_objects as go # Data courtesy of , CEO of Webiks: https://webiks.com/ # Trans & Non-Binary data added for illustration recruitment_finnel_dict = {'Fullstack Dev': {'Male': {'CV': 1448, 'Phone Interview': 40, 'Professional Interview 1': 23, 'Professional Interview 2': 13, 'CEO Interview': 11, 'Offered': 8, 'Signed': 4}, 'Female': {'CV': 493, 'Phone Interview': 24, 'Professional Interview 1': 12, 'Professional Interview 2': 2, 'CEO Interview': 2, 'Offered': 2, 'Signed': 1}, 'Non-Binary': {'CV': 50, 'Phone Interview': 2, 'Professional Interview 1': 1, 'Professional Interview 2': 1, 'CEO Interview': 1, 'Offered': 0, 'Signed': 0}, 'Trans': {'CV': 50, 'Phone Interview': 2, 'Professional Interview 1': 1, 'Professional Interview 2': 1, 'CEO Interview': 1, 'Offered': 1, 'Signed': 1}, }, 'Data Scientist': {'Male': {'CV': 22, 'Phone Interview': 7, 'Professional Interview 1': 1, 'Professional Interview 2': 0, 'CEO Interview': 3, 'Offered': 0, 'Signed': 0}, 'Female': {'CV': 5, 'Phone Interview': 0, 'Professional Interview 1': 0, 'Professional Interview 2': 0, 'CEO Interview': 0, 'Offered': 0, 'Signed': 0} } } fullstack_flow_df = pd.DataFrame(recruitment_finnel_dict['Fullstack Dev']) fullstack_flow_df.eval("Percent_Female = Female * 100.0 / (Female + Male)")1. We can see that only 20% of the hires (1/5) are Female, despite the fact that 25.4% of applicants were Female.2. We can also see that there is no representation of Non-Binary Genders in the dataset (which either speaks to the data collection process or to lack of participation from non-binary people at the process)3. What are the reasons for disqualifying male vs female?def generate_funnel(flow_df): fig = go.Figure() for gender in flow_df.columns: fig.add_trace(go.Funnel( name = gender, y = flow_df.index, x = flow_df[gender], textinfo = 'value+percent previous', )) fig.show() generate_funnel(fullstack_flow_df) def hiring_prob(flow_df, gender): top_of_funnel = fullstack_flow_df[gender][0] end_of_funnel = fullstack_flow_df[gender][-1] return end_of_funnel * 100.0 / top_of_funnel for gender in fullstack_flow_df.columns: print(f"Prob. of getting hired, given that a {gender} person sent a CV: {round(hiring_prob(fullstack_flow_df, gender),2)}%") def compute_score(flow_df, marginalized_group, hegemonic_group): marg_group_prob = hiring_prob(flow_df, marginalized_group) heg_group_prob = hiring_prob(flow_df, hegemonic_group) return marg_group_prob * 100.0 / heg_group_prob check_disc = 'Female' baseline_reference = 'Male' discrimination_score = compute_score(fullstack_flow_df, check_disc, baseline_reference) print(f"Your score: {round(discrimination_score,2)}") print(f"How to interpret your score? In your hiring flow {check_disc} candidates who are sending a CV are {round(100-discrimination_score,2)}%±5% less likely to get hired when compared to {baseline_reference} candidates with similar credentials.")Your score: 73.43 How to interpret your score? In your hiring flow Female candidates who are sending a CV are 26.57%±5% less likely to get hired when compared to Male candidates with similar credentials.Remaining questions1. How many "eligible" candidates are there prior to the CV stage? How different is the drop off in answering the job ad?2. What happenes in "Proffesional Interview 2"?Biases in job ads (based on the [Gender Decoder](http://gender-decoder.katmatfield.com/)) Methodology1. Get ad 2. If ad is not in English: translate to English (via Google Translate)3. Feed ad to Gender Decoder and identify biases Use case 1: DS @ the Ministry of Justice1. Ad: [DS @ the Ministry of Justice](https://www.linkedin.com/jobs/view/2228989246/?refId=2c659612-86f0-3204-854d-b67d06d53b1a)2. Result: the Ministry of Justice ad is **strongly masculine-coded**) (see [full report](http://gender-decoder.katmatfield.com/results/e2fcad8c-bea6-4539-99f1-0e14c6589d50)) Use case 2: Computer Vision Engineer @ Webiks1. Ad: [Computer Vision Engineer @ Webiks](https://www.linkedin.com/jobs/view/2227800643/?refId=3845504191604142176566&trackingId=19bD1N3ahvrh%2BBsENcDXkw%3D%3D)2. Result: the Webiks ad is **subtly feminine-coded**) (see [full report](http://gender-decoder.katmatfield.com/results/b844b2db-2d94-4e51-a4b3-aae06f00c37f)Vibration Folder Analysis | 20161121This notebook will explore how to create a generic program to analyize vibration data. Using a calibration run that recorded the ESI truck vibration profile for testing purposes. Goals* Import CSV data* Find grms, zrms, Gs per sigma, Z per sigma* Plot PSD, Transmissibility, comparisons with profiles, impact histogram (sigmas), per axis* Output folder summary%matplotlib inline import scipy as sp import numpy as np import matplotlib.pyplot as plt from numpy import shapeImport Datadef csv_to_data(path): """Given the path to a folder containing .csv files, csv_to_data reads the files and outputs a data variable. Data is of the form: data = list[(event1), (event2), ... (eventn)] eventn = np.array([time, x, y, z], dtype=float) The subsequent programs need all the events to have the same length so I cut off the ends of each recording to match the smallest file. Alternatively I could look into adding zeros to match the longest recording This program assumes the csv files all contain valid data. This program also ignores the header of the csv file which should contain [Sample Time, X, Y, Z]""" ## Import libraries import glob import csv import scipy as sp from itertools import islice ## Create a list of the files in the folder filels = glob.glob1(path, '*.csv') # Initialize variables d = [] data1 = [0.]*len(filels) datapnts = [0.]*len(filels) # The number of points recorded in the csv file. # To compare the fft, all files need to have the # same number of points. # Finds the length of each csv file for i in range(len(filels)): r = csv.reader(open(path + '\\' + filels[i])) datapnts[i] = len([line for line in r]) ## Records the data file using the minimum length csv file as a limit #for i in range(len(filels)): # r = csv.reader(open(path + '\\' + filels[i])) # data[i] = [line for line in islice(r, 1,min(datapnts))] # data[i] = scipy.array(data[i], dtype=float) # Records the data file using the maximum length csv file as a limit for i in range(len(filels)): r = csv.reader(open(path + '\\' + filels[i])) next(r, None) # Skips the headers data1[i] = [line for line in r] data1[i] = sp.array(data1[i]) data = sp.zeros((len(filels), max(datapnts), 4)) for i in range(len(data1[:])): for j in range(len(data1[i][:,0])): for k in range(4): data[i,j,k] = float(data1[i][j,k]) for i in range(len(data[:,0,0])): dt = data[i,1,0]-data[i,0,0] for j in range(len(data[i,:,0])-1): data[i,j+1,0] = data[i,j,0]+dt for i in range(1,4): data[:,:,i] = data[:,:,i] - sp.mean(data[:,:,i]) return(data) ## Set folder path containing csv files path = 'C:\\RWork\\Dell\\lattice_20170111\\logger1' #path = 'C:\\slamstick\\Tests\\lattice_20161220\\Ch32' data = csv_to_data(path) def vib_peaks(data): import scipy pkx = [0]*len(data) pky = [0]*len(data) pkz = [0]*len(data) for i in range(len(data)): pkx[i] = max(abs(data[i,:,1])) pky[i] = max(abs(data[i,:,2])) pkz[i] = max(abs(data[i,:,3])) peaks = scipy.array([pkx, pky, pkz]).T return(peaks) peaks = vib_peaks(data) ## Calculate the STD and mean of the vibration data def vib_mean_std(peaks): N = len(peaks[0,:]) # The number of values mean = sp.mean(peaks,0) # [mean_x, mean_y, mean_z] for i in range(N): std = (peaks[0,:] - mean)**2 std_samp = ((1/(N-1))*std)**0.5 std_pop = ((1/N)*std)**0.5 return(mean, std_samp, std_pop) pks_mean, pks_std, pks_std_pop = vib_mean_std(peaks) ## This can be calucated using sp.std(peaks, axis=0, ddof=1) # ddof = 1 ~ sampling and ddof = 0 ~ population def sigma_range(sigma, mean, std): sig_plus = mean + sigma * std sig_minus = mean - sigma * std return(sig_plus, sig_minus) def data_to_hist(peaks): ## Histogram of the peak impacts from the vibration data import scipy import matplotlib.mlab as mlab import matplotlib.pylab as plt sig3 = scipy.array(sigma_range(3, sp.mean(peaks,0), sp.std(peaks, axis=0, ddof=1))) pkx = peaks[:,0] pky = peaks[:,1] pkz = peaks[:,2] # Plot Results plt.figure('Histogram', figsize=(8,8)) #plt.title('Histogram of Peak Impacts On Each Axis') # Combined results plt.subplot(2,2,1) nz, binsz, patchesz = plt.hist(pkz, 30, normed=0, facecolor='green', alpha=0.50, label='Z') ny, binsy, patchesy = plt.hist(pky, 30, normed=0, facecolor='red', alpha=0.50, label='Y') nx, binsx, patchesx = plt.hist(pkx, 30, normed=0, facecolor='blue', alpha=0.50, label='X') plt.xlabel('Peak Acceleration [G]') plt.ylabel('Probability') plt.legend(loc='upper right', fontsize = 'small') plt.grid(True) # Z Axis plt.subplot(2,2,2) nz, binsz, patchesz = plt.hist(pkz, 30, normed=0, facecolor='green', alpha=0.50) plt.plot((scipy.mean(pkz),scipy.mean(pkz)), (0, max(nz)), 'k--', linewidth=2, label=('Mean (%.2f)' % scipy.mean(pkz))) plt.plot((sig3[0,2],sig3[0,2]), (0, max(nz)), 'k-.', linewidth=2, label=('3$\sigma$ (%.2f - %.2f)' %(sig3[1,2], sig3[0,2]))) plt.plot((sig3[1,2],sig3[1,2]), (1, max(nz)), 'k-.', linewidth=2) plt.legend(loc='upper right', fontsize='small') plt.grid(True) # X Axis plt.subplot(2,2,3) nx, binsx, patchesx = plt.hist(pkx, 30, normed=0, facecolor='blue', alpha=0.50) plt.plot((scipy.mean(pkx),scipy.mean(pkx)), (0, max(nx)), 'k--', linewidth=2, label=('Mean (%.2f)' % scipy.mean(pkx))) plt.plot((sig3[0,0],sig3[0,0]), (0, max(nx)), 'k-.', linewidth=2, label=('3$\sigma$ (%.2f - %.2f)' %(sig3[1,0], sig3[0,0]))) plt.plot((sig3[1,0],sig3[1,0]), (1, max(nx)), 'k-.', linewidth=2) plt.legend(loc='upper right', fontsize='small') plt.xlabel('Peak Acceleration [G]') plt.ylabel('Probability') plt.grid(True) # Y Axis plt.subplot(2,2,4) ny, binsy, patchesy = plt.hist(pky, 30, normed=0, facecolor='red', alpha=0.50) plt.plot((scipy.mean(pky),scipy.mean(pky)), (0, max(ny)), 'k--', linewidth=2, label=('Mean (%.2f)' % scipy.mean(pky))) plt.plot((sig3[0,1],sig3[0,1]), (0, max(ny)), 'k-.', linewidth=2, label=('3$\sigma$ (%.2f - %.2f)' %(sig3[1,1], sig3[0,1]))) plt.plot((sig3[1,1],sig3[1,1]), (1, max(ny)), 'k-.', linewidth=2) plt.legend(loc='upper right', fontsize='small') plt.xlabel('Peak Acceleration [G]') plt.grid(True) #add a best fit line curve #y = mlab.normpdf(bins, mu, sigma) #l = plt.plot(bins, y, 'r--', linewidth=1) plt.savefig('hist_data') plt.show() data_to_hist(peaks) ## Vibration Profiles def vib_profiles(profile): import csv import scipy if profile == 'input_psd': r = csv.reader(open('C:\\python\\Vibration\\input_psd3.csv')) vibls = [line for line in r] vibls = sp.array(vibls, dtype=float).T vibls = vibls[:,1:].tolist() return (vibls) else: r = csv.reader(open('C:\\python\\Vibration\\vib_profiles.csv')) vibls = [line for line in r] vibls = scipy.array(vibls) for i in range(len(vibls[0,:])): if vibls[0,i].upper() == profile.upper(): f = [var for var in vibls[1:,i] if var] p = [var for var in vibls[1:,i+1] if var] return([f,p]) ## Transform the data into frequency domain. G vs freq. and PSD vs freq. def fft_data(data, printi=0): """Given a set of acceleration and time recordings, fft_data outputs PSD and frequency. Data is input in the form: data = list[(event1), (event2), ... (eventn)] eventn = np.array([time, x, y, z], dtype=float) fft_data outputs the data in the same format as the input. """ import scipy import matplotlib.pyplot as plt# Define Time from scipy.signal import welch ## If the time is in ms, convert to s if max(data[0,:,0] > 10): data[:,:,0] = data[:,:,0]/1000 # Calculate time variables, use the same variables across all events. dt = (data[0,2,0] - data[0,1,0]) # Delta t, [s] sample_rate = 1./dt # Sampling rate, [Hz] sig_len = len(data[0,:,:])/sample_rate # [s] Defined by recorder df = 1./sig_len # Freq. between points in freq. domain, [Hz] t = scipy.arange(0, sig_len, dt) # The time vector n_t = len(t) # Length of the time vector # Initialize the variable to hold the FFT of the data variable. Same format. data_fft = [0]*len(data) data_psd = [0]*len(data) for i in range(len(data)): sig = data[i] # Compute fourier transform of signal fx = scipy.fft(sig[1:-1,1]) fy = scipy.fft(sig[1:-1,2]) fz = scipy.fft(sig[1:-1,3]) # Define frequencies freq = df*scipy.arange(0, len(fx), dtype='d') # The frequency vector n_freq = len(freq) data_fft[i] = scipy.array([freq, fx, fy, fz]).T # Compute the PSD of the signal frx, px = welch(sig[:,1], fs=sample_rate, nperseg=len(sig), detrend=None, scaling='density', window='hanning') fry, py = welch(sig[:,2], fs=sample_rate, nperseg=len(sig), detrend=None, scaling='density', window='hanning') frz, pz = welch(sig[:,3], fs=sample_rate, nperseg=len(sig), detrend=None, scaling='density', window='hanning') data_psd[i] = scipy.array([frx, px, py, pz]).T data_fft = scipy.array(data_fft) data_psd = sp.array(data_psd) if 1 == printi: # plot input data y against time plt.subplot(2,1,1) plt.plot(data[0][:,0], data[0][:,3], label='input data') plt.xlabel('time [s]') plt.ylabel('signal') # plot frequency spectrum plt.subplot(2,1,2) plt.loglog(data_fft[0][:,0], abs(data_fft[0][:,3]), label='abs(fourier transform)') plt.xlabel('frequency [Hz]') plt.ylabel('abs(DFT(signal))') # Print plot plt.show() return(data_fft, data_psd) data_fft, data_psd = fft_data(data) ## Plot the average PSD response avg_psd = sp.array([[sp.mean(data_psd[:,i,j]) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))]) max_psd = sp.array([[max(abs(data_psd[:,i,j])) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))]) min_psd = sp.array([[min(abs(data_psd[:,i,j])) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))]) ## Print Results plt.figure('PSD') esi_truck_profile = vib_profiles('ista air ride') plt.loglog(esi_truck_profile[0], esi_truck_profile[1], 'k', label='Input') plt.loglog(avg_psd[:,0], (avg_psd[:,1]), 'b', label='X') plt.loglog(avg_psd[:,0], avg_psd[:,2], 'r', label='Y') plt.loglog(avg_psd[:,0], (avg_psd[:,3]), 'g', label='Z') plt.xlabel('Frequency [Hz]') plt.ylabel('PSD [G^2/Hz]') plt.title('PSD Response') plt.axis([1,200,1e-6, 1e-1]) #plt.axis([200,1000,1e-7, 1e-2]) plt.show() # Save the psd response to file if it is the input of other responses. # This will allow for the transmissibility to be calculated. use_input_psd = 0 if use_input_psd: import csv ofile = open('input_psd3.csv', 'w') writer = csv.writer(ofile, lineterminator = '\n') for row in avg_psd: writer.writerow(row) ofile.close() ## Interpolate values of one profile across frequency range of another response. def vib_trans(resp, profile): """Interpolate the values of the profile across the frequency range of the response. The profile consists of two lists, a frequency and amplitude. The response consists of the same. This program finds the amplitudes of the profile at the frequencies of the response. This allows you to compare the amplitudes of the response and the profile at the same frequencies. resp = [frequency, amplitude] profile = [frequency, amplitude] Returns the transmissibility results Respose / Input Profile. return([frequency, transmissibility amplitude]) """ from scipy import log10 import scipy as sp # Convert the input from a list into an array resp = sp.array(resp) profile = sp.array(profile, dtype=float) m0 = [] # Finding the slope of the input profile for i in range(len(profile[0,:])-1): m0.append((log10(profile[1,i+1])-log10(profile[1,i]))/(log10(profile[0,i+1])-log10(profile[0,i]))) freq = [] # Initialize the frequency variable resp_c = [] # Initialize the clipped response variable m = [] # Initialize the slope variable x1 = [] # Initialize the frequency used in the point slope equation y1 = [] # Initialize the amplitude used in the point slope equation # Find the frequencies and response where which lie within the profile frequency range for i in range(len(resp[0,:])): if resp[0,i] >= float(min(profile[0,:])) and resp[0,i] <= float(max(profile[0,:])): freq.append(resp[0,i]) resp_c.append(resp[1,i]) for j in range(len(profile[0,:])-1): if resp[0,i] <= profile[0,j+1] and resp[0,i] > profile[0,j]: m.append(m0[j]) x1.append(profile[0,j+1]) y1.append(profile[1,j+1]) # Make sure the slope is recording across the appropriate values. if len(m)!= len(freq): print('Error finding slope, len(m) != len(freq)') resp_int = [] # Initializing the interpolated response variable. # Calculating the interpolated response given the slope and input profile point for i in range(len(freq)): resp_int.append(10**(m[i]*(log10(freq[i])-log10(x1[i])) + log10(y1[i]))) # Converting the list to an array resp_int = sp.array(resp_int) resp_c = sp.array(resp_c) ## From Steinberg 1988 # P_out = Q^2 * P # Solving for Q -> trans = (resp_c/resp_int)**0.5 # Q ~ Transmissibility of system return([freq, trans]) #input_profile_label = 'input_psd' input_profile_label = 'ista air ride' input_profile = vib_profiles(input_profile_label) if input_profile_label == 'input_psd': trans = vib_trans([avg_psd[:,0], avg_psd[:,1]], [input_profile[0], input_profile[1]]) #transx = vib_trans([avg_psd[:,0], avg_psd[:,1]], [input_profile[0], input_profile[1]])[1] trans.append(vib_trans([avg_psd[:,0], avg_psd[:,2]], [input_profile[0], input_profile[2]])[1]) trans.append(vib_trans([avg_psd[:,0], avg_psd[:,3]], [input_profile[0], input_profile[3]])[1]) #trans = [transf, transx, transy, transz] else: trans = vib_trans([avg_psd[:,0], avg_psd[:,3]], input_profile) ## Calculating the Grms of a shaped random vibration input curve. # Sec. 8.8, Eqns. 8.4 - 8.6. def grms (freq, PSD): """Returns the Grms value for a shaped random vibration input curve. Input the frequency and PSD values as a list in the form grms(freq, PSD). The frequency and PSD list must have the same number of elements.""" from math import log10, log A = 0 if len(freq)!=len(PSD): print("Error: The number of elements in the Frequency and PSD lists do not match.") else: for i in range(1,len(freq)): # Calculate the slope dB = 10 * log10(PSD[i]/PSD[i-1]) # dB OCT = log10(freq[i]/freq[i-1])/log10(2) # Octave S = dB/OCT # Slope # Calculate the area in units of [G^2] if S == 0: A = A + PSD[i] * (freq[i] - freq[i-1]) elif S == -3: A = A + -freq[i] * PSD[i] * log(freq[i-1] / freq[i]) else: A = A + (3 * PSD[i]/(3 + S)) * (freq[i] - (freq[i-1]/freq[i])**(S/3) * freq[i-1]) # Calculate the Grms [G] grms = A**(0.5) return(grms) ## Print results from the data above print("The input GRMS is : %.3f" %(grms(sp.array(input_profile[0], dtype=float),sp.array(input_profile[1], dtype=float)))) print("The response GRMS is : %.3f" %(grms(avg_psd[:,0], avg_psd[:,3]))) print("Out of %i events, the maximum impacts are \n" %(len(data)), "X : %.2f, Y : %.2f, Z : %.2f [G]" %(max(peaks[:,0]), max(peaks[:,1]), max(peaks[:,2]))) print('The average peak impact by axis are \n', "X : %.2f, Y : %.2f, Z : %.2f [G]" %(sp.mean(peaks[:,0]), sp.mean(peaks[:,1]), sp.mean(peaks[:,2]))) sig3 = sp.array(sigma_range(3, sp.mean(peaks,0), sp.std(peaks, axis=0, ddof=1))) print('The 3 sigma values are \n', "X : %.2f, Y : %.2f, Z : %.2f [G]" %(sig3[0,0], sig3[0,1], sig3[0,2])) data_to_hist(peaks) ista_air = vib_profiles('ista air ride') plt.figure('PSD', figsize=(8,4)) if len(trans)>2: plt.loglog(ista_air[0], ista_air[1], 'k', label='ISTA Air (Ref)') plt.loglog(input_profile[0], input_profile[1], '--b', label='Input') plt.loglog(input_profile[0], input_profile[2], '--r', label='') plt.loglog(input_profile[0], input_profile[3], '--g', label='') plt.loglog(avg_psd[:,0], avg_psd[:,1], 'b', label='X') plt.loglog(avg_psd[:,0], avg_psd[:,2], 'r', label='Y') plt.loglog(avg_psd[:,0], avg_psd[:,3], 'g', label='Z') else: plt.loglog(input_profile[0], input_profile[1], 'k', label='Input') plt.loglog(avg_psd[:,0], avg_psd[:,1], 'b', label='X') plt.loglog(avg_psd[:,0], avg_psd[:,2], 'r', label='Y') plt.loglog(avg_psd[:,0], avg_psd[:,3], 'g', label='Z') plt.xlabel('Frequency [Hz]') plt.ylabel('PSD [G^2/Hz]') plt.title('PSD Response') plt.legend(loc='best', fontsize='small') plt.axis([1,200,1e-6, 1e-1]) plt.savefig('psd') plt.show() plt.figure('Trans', figsize=(8,4)) plt.loglog(trans[0], trans[1], 'b', label='X') if len(trans)>2: plt.loglog(trans[0], trans[2], 'r', label='Y') plt.loglog(trans[0], trans[3], 'g', label='Z') plt.legend(loc='best', fontsize='small') plt.loglog([1, 200], [1, 1], 'k') plt.xlabel('Frequency [Hz]') plt.ylabel('Transmissibility') plt.title('Transmissibility of the Response') plt.axis([1,200,0.1,30]) plt.savefig('trans') plt.show() # Create a report title = 'Lattice Testing 01/11/2017, Logger #1' import docx doc = docx.Document() doc.add_heading('Shock and Vibration Testing', 0) doc.add_heading(title,1) doc.add_paragraph(text ="\nThe input GRMS is : %.3f\nThe response GRMS is : %.3f\n" %(grms(sp.array(input_profile[0], dtype=float), sp.array(input_profile[1], dtype=float)), grms(avg_psd[:,0], avg_psd[:,3]))) doc.add_paragraph(('''Out of %i events, the maximum impacts are: X : %.2f, Y : %.2f, Z : %.2f [G]\n''') %(len(data), max(peaks[:,0]), max(peaks[:,1]), max(peaks[:,2]))) doc.add_paragraph('''The average peak impact by axis are: X : %.2f, Y : %.2f, Z : %.2f [G]\n''' %(sp.mean(peaks[:,0]), sp.mean(peaks[:,1]), sp.mean(peaks[:,2]))) doc.add_paragraph('''The 3 sigma values are: X : %.2f, Y : %.2f, Z : %.2f [G]\n\n''' %(sig3[0,0], sig3[0,1], sig3[0,2])) doc.add_picture('psd.png', width=docx.shared.Inches(6), height=docx.shared.Inches(4)) #doc.paragraphs[6].runs[0].add_break(docx.enum.text.WD_BREAK.PAGE) doc.add_picture('trans.png', width=docx.shared.Inches(6), height=docx.shared.Inches(2.5)) doc.add_paragraph('\nHistogram the peak impact accelerations and their frequency:') doc.add_picture('hist_data.png', width=docx.shared.Inches(6), height=docx.shared.Inches(5)) doc.save('lattice_20170111-logger1.docx')C:\Anaconda3\lib\site-packages\ipykernel\__main__.py:21: RuntimeWarning: divide by zero encountered in double_scalarsLoad model# neuralnet network = NeuralNetClassifier( patchproject=project, nameproject=name, no_cuda=no_cuda, seed=seed, gpu=gpu ) cudnn.benchmark = True # load model if network.load( pathmodels ) is not True: assert(False) name_dataset=FactoryDataset. subset=FactoryDataset.validation nactores=10 kfold=0 idenselect = np.arange(nactores) + kfold*nactores breal=False if breal: data = Dataset( data=FactoryDataset.factory( pathname=pathname, name=name_dataset, subset=subset, idenselect=idenselect, download=True ), num_channels=num_input_channels, transform=get_transforms_det(network.size_input) #network.size_input ) else: data = SyntheticFaceDataset( data=FactoryDataset.factory( pathname=pathname, name=name_dataset, subset=subset, idenselect=idenselect, download=True ), pathnameback='~/.datasets/coco', generate='image_and_label', ext='jpg', count=2000, num_channels=network.num_input_channels, iluminate=True, angle=45, translation=0.3, warp=0.2, factor=0.2, #iluminate=True, angle=45, translation=0.1, warp=0.0, factor=0.0, transform_image=get_transforms_det( network.size_input ) ) # data = SyntheticFaceDataset( # data=FactoryDataset.factory( # pathname=pathname, # name=name_dataset, # subset=subset, # download=True # ), # pathnameback='~/.datasets/coco', # generate='image_and_label', # ext='jpg', # count=2000, # num_channels=3, # iluminate=True, angle=45, translation=0.3, warp=0.2, factor=0.2, # #iluminate=True, angle=45, translation=0.1, warp=0.0, factor=0.0, # transform=get_transforms_det(network.size_input) # ) dataloader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=workers ) print('Data loader ') print(len(dataloader)) print(len(data)) import matplotlib.pyplot as plt img = np.transpose( data[0]['image'], (1,2,0) ) print( img.shape, img.min(), img.max() ) plt.figure() plt.imshow( img[:,:,0] ) plt.show() Yhat, Y = network.test( dataloader ) df = pd.DataFrame( np.concatenate((Yhat, Y), axis=1) ) df.to_csv( os.path.join(project , '{}_{}_{}_dp.csv'.format(subset,name,name_dataset)), index=False, encoding='utf-8') print('dir: {}'.format(project)) print('DONE!!!') df = pd.read_csv( os.path.join(project , '{}_{}_{}_dp.csv'.format(subset, name, name_dataset)) ) df.head() result = df.as_matrix() yhat = np.argmax( result[:,:-1], axis=1 ) y = result[:,-1] acc = metrics.accuracy_score(y, yhat) precision = metrics.precision_score(y, yhat, average='weighted') recall = metrics.recall_score(y, yhat, average='weighted') f1_score = 2*precision*recall/(precision+recall) # print('Accuracy : %f' % acc) # print('Precision : %f' % precision) # print('Recall : %f' % recall) # print('F1 score : %f' % f1_score) # print("") print('|Acc\t|Prec\t|Rec\t|F1\t|') print( '|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|'.format(acc,precision,recall,f1_score).replace('.',',') ) print() print( '{:0.3f}\n{:0.3f}\n{:0.3f}\n{:0.3f}'.format(acc*100,precision*100,recall*100,f1_score*100).replace('.',',') ) metric = metrics.classification_report(y, yhat) print(metric) mc = metrics.confusion_matrix(y, yhat) # print(mc) print( np.diag( mc ) ) import itertools import matplotlib.pyplot as plt import matplotlib %matplotlib inline def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] cm = cm*100 print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') #print(cm) print( np.diag(cm) ) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) #plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') matplotlib.style.use('bmh') plt.figure( figsize=(8,8)) emotion = np.array(['NE', 'HA', 'SU', 'SA', 'AN', 'DI', 'FR', 'CO']) plot_confusion_matrix(mc, classes=emotion, title=' ', normalize=True) # plot_confusion_matrix(mc, classes=data.data.classes, title=' ') plt.grid('off') plt.show() print(plt.style.available) import cv2 def imageshow( image ): plt.figure( figsize=(10,10)) plt.imshow(image, cmap='gray') plt.axis('off') plt.ioff() plt.show() def roi( image, box ): imgray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) roi_gray = imgray[box[1]:box[3], box[0]:box[2]] return roi_gray def vistensor(tensor, ch=0, allfeature=False, nrow=8, padding=1, brgb=True, scol=1, srow=1): """ vistensor: visuzlization tensor @ch: visualization channel @allkernels: visualization all tensores """ n,c,w,h = tensor.shape if allfeature: tensor = tensor.view(n*c,-1,w,h ) elif brgb: tensor = tensor[:,ch:(ch+3),:,:] else: tensor = tensor[:,ch,:,:].unsqueeze(dim=1) rows = np.min( (tensor.shape[0]//nrow + 1, 64 ) ) grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding) #print(grid.shape) #print(nrow,rows) plt.figure( figsize=(nrow*scol,rows*srow) ) plt.imshow(grid.numpy().transpose((1, 2, 0))) plt.axis('off') plt.ioff() plt.show() # pathnameimage = '../out/image960x640.jpg' pathnameimage = '../out/ferproblem.png' image = cv2.imread(pathnameimage)[:,:,(2,1,0)] # bbox = np.array([380, 161, 602, 383], dtype=np.int32) # imface = roi(image, bbox) imface = image imageshow(imface) print(imface.shape) def mean_normalization(image, mean, std): tensor = image.float()/255.0 result_tensor = [] for t, m, s in zip(tensor, mean, std): result_tensor.append(t.sub_(m).div_(s)) return torch.stack(result_tensor, 0) ntc = network.net imsize=32 x = imface x = x.mean(axis=2) x = np.stack( (x,x,x), axis=2 ) x = cv2.resize( x, (imsize,imsize) ) x = torch.from_numpy(x).permute( (2,0,1) ).unsqueeze(0).float().cuda() # x = x/255.0 x = mean_normalization(x, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ntc.eval() out1 = ntc.conv1(x) out2 = ntc.layer1(out1) out3 = ntc.layer2(out2) out4 = ntc.layer3(out3) out5 = ntc.layer4(out4) print('ntc.conv1(latt_pool)') print(out1.shape) vistensor( out1.cpu().detach(), nrow=16 , allfeature=True, brgb=False, scol=3, srow=1.5 ) print('ntc.layer1(out1)') print(out2.shape) vistensor( out1.cpu().detach(), nrow=16 , allfeature=True, brgb=False, scol=3, srow=1.5 ) print('ntc.layer1(out2)') print(out3.shape) vistensor( out3.cpu().detach(), nrow=16 , allfeature=True, brgb=False, scol=3, srow=1.5 ) print('ntc.layer1(out3)') print(out4.shape) vistensor( out4.cpu().detach(), nrow=32 , allfeature=True, brgb=False, scol=2, srow=1.5 ) print('ntc.layer1(out4)') print(out5.shape) vistensor( out5.cpu().detach(), nrow=64 , allfeature=True, brgb=False, scol=1, srow=1 ) import torch.nn.functional as TF import operator emotion_name = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt'] ntc = network.net imsize=32 x = imface x = x.mean(axis=2) x = np.stack( (x,x,x), axis=2 ) x = cv2.resize( x, (imsize,imsize) ) x = torch.from_numpy(x).permute( (2,0,1) ).unsqueeze(0).float().cuda() # x = x/255.0 x = mean_normalization(x, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) yhat = ntc(x) print(yhat) labelhat = TF.softmax( yhat, dim=1 )[0,:] score = dict(zip(emotion_name, labelhat )) curremot = max(score.items(), key=operator.itemgetter(1)) print(labelhat) print(curremot) print('face: ') print('----------------------------') for e,v in score.items(): print(e, '{:.4f}'.format(v*100) )tensor([[ 0.8298, -0.1605, 0.7665, 2.0047, -0.1070, -0.4725, 0.5100, -2.9196]], device='cuda:1', grad_fn=) tensor([0.1436, 0.0534, 0.1348, 0.4651, 0.0563, 0.0391, 0.1043, 0.0034], device='cuda:1', grad_fn=) ('sadness', tensor(0.4651, device='cuda:1', grad_fn=)) face: ---------------------------- neutral 14.3647 happiness 5.3360 surprise 13.4830 sadness 46.5100 anger 5.6294 disgust 3.9060 fear 10.4329 contempt 0.3380Windows Usersexecutable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) url = 'https://mars.nasa.gov/news/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') slide = soup.find('li', class_='slide') news_title = slide.find('div', class_='content_title').text news_p = slide.find('div',class_='article_teaser_body').text print(news_title) print(news_p) url = 'https://www.jpl.nasa.gov/spaceimages/?search=mars&category=#submit' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') full_image = browser.find_by_id('full_image') full_image.click() browser.is_element_present_by_text('more info', wait_time=1) more_info = browser.links.find_by_partial_text('more info') more_info.click() html = browser.html soup = BeautifulSoup(html, 'html.parser') mars_image = soup.find('figure', class_='lede').a.get("href") mars_image featured_image_url = 'https://www.jpl.nasa.gov' + mars_image featured_image_url url = 'https://twitter.com/marswxreport?lang=en' browser.visit(url) html = browser.html weather_soup = BeautifulSoup(html, 'html.parser') weather_tweet = weather_soup.find_all('span', class_='css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0') weather_tweet for element in weather_tweet: if 'InSight' in element.text: weather = element.text break print(weather) facts_df=pd.read_html('https://space-facts.com/mars/')[0] facts_df.columns=['Attribute', 'Value'] facts_df.set_index('Attribute', inplace=True) facts_df url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') hem = soup.find_all('h3') hemisphere_image_urls=[] for i in range(len(hem)): hemisphere = {} browser.find_by_css('h3')[i].click() #retrieve hemisphere name hemisphere['title'] = browser.find_by_css('h2').text #retrieve image url hem_url = browser.find_link_by_text('Sample').first hemisphere['img_url'] = hem_url['href'] #add hname to dictionary hemisphere_image_urls.append(hemisphere) #add image url to dictionary browser.back() hemisphere_image_urlsSequence to sequence model (S2S)(c) , 2018. Based on ([Sutskever et al. 2014](https://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf)).S2S models learn to map input sequences to output sequences using an encoder and a decoder RNN. Note that this is an instructional example written in low-level Julia/Knet and it is slow to train. For a faster and high-level implementation please see `@doc RNN`.using Pkg; haskey(Pkg.installed(),"Knet") || Pkg.add("Knet") using Knet(image source)# S2S model definition function initmodel(H, V; atype=(gpu()>=0 ? KnetArray{Float32} : Array{Float32})) init(d...)=atype(xavier(d...)) model = Dict{Symbol,Any}() model[:state0] = [ init(1,H), init(1,H) ] model[:embed1] = init(V,H) model[:encode] = [ init(2H,4H), init(1,4H) ] model[:embed2] = init(V,H) model[:decode] = [ init(2H,4H), init(1,4H) ] model[:output] = [ init(H,V), init(1,V) ] return model end; # S2S loss function and its gradient function s2s(model, inputs, outputs) state = initstate(inputs[1], model[:state0]) for input in inputs input = onehotrows(input, model[:embed1]) input = input * model[:embed1] state = lstm(model[:encode], state, input) end EOS = eosmatrix(outputs[1], model[:embed2]) input = EOS * model[:embed2] sumlogp = 0 for output in outputs state = lstm(model[:decode], state, input) ypred = predict(model[:output], state[1]) ygold = onehotrows(output, model[:embed2]) sumlogp += sum(ygold .* logp(ypred,dims=2)) input = ygold * model[:embed2] end state = lstm(model[:decode], state, input) ypred = predict(model[:output], state[1]) sumlogp += sum(EOS .* logp(ypred,dims=2)) return -sumlogp end s2sgrad = gradloss(s2s);(image source)# A LSTM implementation in Knet function lstm(param, state, input) weight,bias = param hidden,cell = state h = size(hidden,2) gates = hcat(input,hidden) * weight .+ bias forget = sigm.(gates[:,1:h]) ingate = sigm.(gates[:,1+h:2h]) outgate = sigm.(gates[:,1+2h:3h]) change = tanh.(gates[:,1+3h:4h]) cell = cell .* forget + ingate .* change hidden = outgate .* tanh.(cell) return (hidden,cell) end; # S2S helper functions function predict(param, input) input * param[1] .+ param[2] end function initstate(idx, state0) h,c = state0 h = h .+ fill!(similar(value(h), length(idx), length(h)), 0) c = c .+ fill!(similar(value(c), length(idx), length(c)), 0) return (h,c) end function onehotrows(idx, embeddings) nrows,ncols = length(idx), size(embeddings,1) z = zeros(Float32,nrows,ncols) @inbounds for i=1:nrows z[i,idx[i]] = 1 end oftype(value(embeddings),z) end let EOS=nothing; global eosmatrix function eosmatrix(idx, embeddings) nrows,ncols = length(idx), size(embeddings,1) if EOS==nothing || size(EOS) != (nrows,ncols) EOS = zeros(Float32,nrows,ncols) EOS[:,1] .= 1 EOS = oftype(value(embeddings), EOS) end return EOS end end; # Use reversing English words as an example task # This loads them from /usr/share/dict/words and converts each character to an int. function readdata(file="words") isfile(file) || (file=download("http://people.csail.mit.edu/deniz/models/tutorial/words","words")) global strings = map(chomp,readlines(file)) global tok2int = Dict{Char,Int}() global int2tok = Vector{Char}() push!(int2tok,'\n'); tok2int['\n']=1 # We use '\n'=>1 as the EOS token sequences = Vector{Vector{Int}}() for w in strings s = Vector{Int}() for c in collect(w) if !haskey(tok2int,c) push!(int2tok,c) tok2int[c] = length(int2tok) end push!(s, tok2int[c]) end push!(sequences, s) end return sequences end; sequences = readdata(); for x in (sequences, strings, int2tok, tok2int); println(summary(x)); end for x in strings[501:505]; println(x); end # Minibatch sequences putting equal length sequences together: function minibatch(sequences, batchsize) table = Dict{Int,Vector{Vector{Int}}}() data = Any[] for s in sequences n = length(s) nsequences = get!(table, n, Any[]) push!(nsequences, s) if length(nsequences) == batchsize push!(data, [[ nsequences[i][j] for i in 1:batchsize] for j in 1:n ]) empty!(nsequences) end end return data end batchsize, statesize, vocabsize = 128, 128, length(int2tok) data = minibatch(sequences,batchsize) summary(data) # Training loop function train(model, data, opts) sumloss = cntloss = 0 for sequence in data grads,loss = s2sgrad(model, sequence, reverse(sequence)) update!(model, grads, opts) sumloss += loss cntloss += (1+length(sequence)) * length(sequence[1]) end return sumloss/cntloss end file = "rnnreverse113.jld2"; model = opts = nothing; Knet.gc() # clean memory from previous run if (print("Train from scratch? ");readline()[1]=='y') # Initialize model and optimization parameters model = initmodel(statesize,vocabsize) opts = optimizers(model,Adam) @time for epoch=1:10 @time loss = train(model,data,opts) # ~17 sec/epoch println((epoch,loss)) end Knet.save(file,"model",model) else isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file) model = Knet.load(file,"model") end summary(model) # Test on some examples: function translate(model, str) state = model[:state0] for c in collect(str) input = onehotrows(tok2int[c], model[:embed1]) input = input * model[:embed1] state = lstm(model[:encode], state, input) end input = eosmatrix(1, model[:embed2]) * model[:embed2] output = Char[] for i=1:100 #while true state = lstm(model[:decode], state, input) pred = predict(model[:output], state[1]) i = argmax(vec(Array(pred))) i == 1 && break push!(output, int2tok[i]) input = onehotrows(i, model[:embed2]) * model[:embed2] end String(output) end; translate(model,"capricorn")Importsimport numpy as np from PIL import ImageGrab #grabbing image from PIL import Image import cv2 #opencv import io import time from matplotlib import pyplot as plt plt.rcParams['figure.figsize'] = (30, 30) import seaborn as sns import pandas as pd import numpy as np from random import randint import os from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.keys import Keys #keras imports %matplotlib inline from keras.models import model_from_json from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.optimizers import SGD , Adam from keras.callbacks import TensorBoard from collections import deque import random import pickle import json #variaveis path game_url = "game/dino.html" chrome_driver_path = "chromedriver.exe" loss_file_path = "objects/loss_df.csv" actions_file_path = "objects/actions_df.csv" scores_file_path = "objects/scores_df.csv" ''' * Game class: Selenium interfacing between the python and browser * __init__(): Launch the broswer window using the attributes in chrome_options * get_crashed() : return true if the agent as crashed on an obstacles. Gets javascript variable from game decribing the state * get_playing(): true if game in progress, false is crashed or paused * restart() : sends a signal to browser-javascript to restart the game * press_up(): sends a single to press up get to the browser * get_score(): gets current game score from javascript variables. * pause(): pause the game * resume(): resume a paused game if not crashed * end(): close the browser and end the game ''' class Game: def __init__(self,custom_config=True): chrome_options = Options() chrome_options.add_argument("disable-infobars") self._driver = webdriver.Chrome(executable_path = chrome_driver_path,chrome_options=chrome_options) self._driver.set_window_position(x=-10,y=0) self._driver.set_window_size(200, 300) self._driver.get(os.path.abspath(game_url)) #modifying game before trainNetworkining if custom_config: self._driver.execute_script("Runner.config.ACCELERATION=0") def get_crashed(self): return self._driver.execute_script("return Runner.instance_.crashed") def get_playing(self): return self._driver.execute_script("return Runner.instance_.playing") def restart(self): self._driver.execute_script("Runner.instance_.restart()") time.sleep(0.25)# no actions are possible # for 0.25 sec after game starts, # skip learning at this time and make the model wait def press_up(self): self._driver.find_element_by_tag_name("body").send_keys(Keys.ARROW_UP) def get_score(self): score_array = self._driver.execute_script("return Runner.instance_.distanceMeter.digits") score = ''.join(score_array) # the javascript object is of type array with score in the formate[1,0,0] which is 100. return int(score) def pause(self): return self._driver.execute_script("return Runner.instance_.stop()") def resume(self): return self._driver.execute_script("return Runner.instance_.play()") def end(self): self._driver.close() class DinoAgent: def __init__(self,game): #takes game as input for taking actions self._game = game; self.jump(); #to start the game, we need to jump once time.sleep(.5) # no action can be performed for the first time when game starts def is_running(self): return self._game.get_playing() def is_crashed(self): return self._game.get_crashed() def jump(self): self._game.press_up() def duck(self): self._game.press_down() class Game_sate: def __init__(self,agent,game): self._agent = agent self._game = game self._display = show_img() #display the processed image on screen using openCV, implemented using python coroutine self._display.__next__() # initiliaze the display coroutine def get_state(self,actions): actions_df.loc[len(actions_df)] = actions[1] # storing actions in a dataframe score = self._game.get_score() reward = 0.1*score/10 # dynamic reward calculation is_over = False #game over if actions[1] == 1: self._agent.jump() reward = 0.1*score/11 image = grab_screen() self._display.send(image) #display the image on screen if self._agent.is_crashed(): scores_df.loc[len(loss_df)] = score # log the score when game is over self._game.restart() reward = -11/score is_over = True return image, reward, is_over #return the Experience tuple def save_obj(obj, name ): with open('objects/'+ name + '.pkl', 'wb') as f: #dump files into objects folder pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name ): with open('objects/' + name + '.pkl', 'rb') as f: return pickle.load(f) def grab_screen(_driver = None): screen = np.array(ImageGrab.grab(bbox=(40,180,440,400))) #bbox = region of interset on the entire screen image = process_img(screen)#processing image as required return image def process_img(image): #game is already in grey scale canvas, canny to get only edges and reduce unwanted objects(clouds) image = cv2.resize(image, (0,0), fx = 0.15, fy = 0.10) # resale image dimensions image = image[2:38,10:50] #img[y:y+h, x:x+w] #crop out the dino agent from the frame image = cv2.Canny(image, threshold1 = 100, threshold2 = 200) #apply the canny edge detection return image def show_img(graphs = False): """ Show images in new window """ while True: screen = (yield) window_title = "logs" if graphs else "game_play" cv2.namedWindow(window_title, cv2.WINDOW_NORMAL) imS = cv2.resize(screen, (800, 400)) cv2.imshow(window_title, screen) if (cv2.waitKey(1) & 0xFF == ord('q')): cv2.destroyAllWindows() break #Intialize log structures from file if exists else create new loss_df = pd.read_csv(loss_file_path) if os.path.isfile(loss_file_path) else pd.DataFrame(columns =['loss']) scores_df = pd.read_csv(scores_file_path) if os.path.isfile(loss_file_path) else pd.DataFrame(columns = ['scores']) actions_df = pd.read_csv(actions_file_path) if os.path.isfile(actions_file_path) else pd.DataFrame(columns = ['actions']) # training variables saved as checkpoints to filesystem to resume training from the same step def init_cache(): """initial variable caching, done only once""" save_obj(INITIAL_EPSILON,"epsilon") t = 0 save_obj(t,"time") D = deque() save_obj(D,"D") #game parameters ACTIONS = 2 # possible actions: jump, do nothing GAMMA = 0.99 # decay rate of past observations original 0.99 OBSERVATION = 50000. # timesteps to observe before training EXPLORE = 100000 # frames over which to anneal epsilon FINAL_EPSILON = 0.0001 # final value of epsilon INITIAL_EPSILON = 0.1 # starting value of epsilon REPLAY_MEMORY = 50000 # number of previous transitions to remember BATCH = 32 # size of minibatch FRAME_PER_ACTION = 1 LEARNING_RATE = 1e-4 img_rows , img_cols = 40,20 img_channels = 4 #We stack 4 frames def buildmodel(): print("Now we build the model") model = Sequential() model.add(Conv2D(32, (8, 8), strides=(4, 4), padding='same',input_shape=(img_cols,img_rows,img_channels))) #20*40*4 model.add(Activation('relu')) model.add(Conv2D(64, (4, 4), strides=(2, 2), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same')) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dense(ACTIONS)) adam = Adam(lr=LEARNING_RATE) model.compile(loss='mse',optimizer=adam) print("We finish building the model") return model buildmodel().summary() ''' main training module Parameters: * model => Keras Model to be trained * game_state => Game State module with access to game environment and dino * observe => flag to indicate wherther the model is to be trained(weight updates), else just play ''' def trainNetwork(model,game_state,observe=False): last_time = time.time() # store the previous observations in replay memory D = load_obj("D") #load from file system # get the first state by doing nothing do_nothing = np.zeros(ACTIONS) do_nothing[0] =1 #0 => do nothing, #1=> jump x_t, r_0, terminal = game_state.get_state(do_nothing) # get next step after performing the action s_t = np.stack((x_t, x_t, x_t, x_t), axis=2) # stack 4 images to create placeholder input s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*20*40*4 initial_state = s_t if observe : OBSERVE = 999999999 #We keep observe, never train epsilon = FINAL_EPSILON print ("Now we load weight") model.load_weights("model_final.h5") adam = Adam(lr=LEARNING_RATE) model.compile(loss='mse',optimizer=adam) print ("Weight load successfully") else: #We go to training mode OBSERVE = OBSERVATION epsilon = load_obj("epsilon") model.load_weights("model_final.h5") adam = Adam(lr=LEARNING_RATE) model.compile(loss='mse',optimizer=adam) t = load_obj("time") # resume from the previous time step stored in file system while (True): #endless running loss = 0 Q_sa = 0 action_index = 0 r_t = 0 #reward at 4 a_t = np.zeros([ACTIONS]) # action at t #choose an action epsilon greedy if t % FRAME_PER_ACTION == 0: #parameter to skip frames for actions if random.random() <= epsilon: #randomly explore an action print("----------Random Action----------") action_index = random.randrange(ACTIONS) a_t[0] = 1 else: # predict the output q = model.predict(s_t) #input a stack of 4 images, get the prediction max_Q = np.argmax(q) # chosing index with maximum q value action_index = max_Q a_t[action_index] = 1 # o=> do nothing, 1=> jump #We reduced the epsilon (exploration parameter) gradually if epsilon > FINAL_EPSILON and t > OBSERVE: epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE #run the selected action and observed next state and reward x_t1, r_t, terminal = game_state.get_state(a_t) print('loop took {} seconds'.format(time.time()-last_time)) # helpful for measuring frame rate last_time = time.time() x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) #1x20x40x1 s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3) # append the new image to input stack and remove the first one # store the transition in D D.append((s_t, action_index, r_t, s_t1, terminal)) if len(D) > REPLAY_MEMORY: D.popleft() #only train if done observing if t > OBSERVE: #sample a minibatch to train on minibatch = random.sample(D, BATCH) inputs = np.zeros((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) #32, 20, 40, 4 targets = np.zeros((inputs.shape[0], ACTIONS)) #32, 2 #Now we do the experience replay for i in range(0, len(minibatch)): state_t = minibatch[i][0] # 4D stack of images action_t = minibatch[i][1] #This is action index reward_t = minibatch[i][2] #reward at state_t due to action_t state_t1 = minibatch[i][3] #next state terminal = minibatch[i][4] #wheather the agent died or survided due the action inputs[i:i + 1] = state_t targets[i] = model.predict(state_t) # predicted q values Q_sa = model.predict(state_t1) #predict q values for next step if terminal: targets[i, action_t] = reward_t # if terminated, only equals reward else: targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa) loss += model.train_on_batch(inputs, targets) loss_df.loc[len(loss_df)] = loss else: # artificial time delay as training done with this delay time.sleep(0.12) s_t = initial_state if terminal else s_t1 #reset game to initial frame if terminate t = t + 1 # save progress every 1000 iterations if t % 1000 == 0: print("Now we save model") model.save_weights("model_final.h5", overwrite=True) save_obj(D,"D") #saving episodes save_obj(t,"time") #caching time steps save_obj(epsilon,"epsilon") #cache epsilon to avoid repeated randomness in actions loss_df.to_csv("./objects/loss_df.csv",index=False) scores_df.to_csv("./objects/scores_df.csv",index=False) actions_df.to_csv("./objects/actions_df.csv",index=False) with open("model.json", "w") as outfile: json.dump(model.to_json(), outfile) # print info state = "" if t <= OBSERVE: state = "observe" elif t > OBSERVE and t <= OBSERVE + EXPLORE: state = "explore" else: state = "train" print("TIMESTEP", t, "/ STATE", state, "/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, "/ Q_MAX " , np.max(Q_sa), "/ Loss ", loss) print("Episode finished!") print("************************") def playGame(observe=False): game = Game() dino = DinoAgent(game) game_state = Game_sate(dino,game) model = buildmodel() try: trainNetwork(model,game_state,observe=observe) except StopIteration: game.end() playGame(observe=False); def show_plots(): fig, axs = plt.subplots(ncols=1,nrows =2,figsize=(15,15)) axs[0].set_title('Loss') axs[1].set_title('Game Score progress') loss_df = pd.read_csv("./objects/loss_df.csv").clip(0,50).tail(100000) scores_df = pd.read_csv("./objects/scores_df.csv").head(190000) actions_df = pd.read_csv("./objects/actions_df.csv").tail(100000) loss_df['loss'] = loss_df['loss'].astype('float') loss_df.plot(use_index=True,ax=axs[0]) scores_df.plot(ax=axs[1]) # sns.distplot(actions_df,ax=axs[2]) imgg = fig.canvas.draw() show_plots() #training_data_final_working.npy file contains the the keystrokes and gameframes recording for a score of 500 supervised_frames = np.load("training_data_final_working.npy") frame = supervised_frames[0][0] action_index = supervised_frames[0][1] #plotting a sample frame from human recorded gameplay plt.imshow(frame) print('Action taken at this frame : Action index = {} i.e. jump'.format(str(action_index))) supervised_actions = [] for frame in supervised_frames: supervised_actions.append(frame[1]) fig, axs = plt.subplots(ncols=1,nrows =2,figsize=(15,15)) sns.distplot(supervised_actions,ax=axs[0]) axs[1].set_title('AI gameplay distribution') axs[0].set_title('Human gameplay distribution') actions_df = pd.read_csv("./objects/actions_df.csv") sns.distplot(actions_df,ax=axs[1])C:\Users\\Anaconda3\lib\site-packages\seaborn\distributions.py:198: RuntimeWarning: Mean of empty slice. line, = ax.plot(a.mean(), 0) C:\Users\\Anaconda3\lib\site-packages\numpy\core\_methods.py:85: RuntimeWarning: invalid value encountered in double_scalars ret = ret.dtype.type(ret / rcount) C:\Users\\Anaconda3\lib\site-packages\numpy\lib\histograms.py:893: RuntimeWarning: invalid value encountered in true_divide return n/db/n.sum(), bin_edgesCopyright 2019 The TensorFlow Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.TensorBoard 性能分析: 在 Keras 中对基本训练指标进行性能分析 在 TensorFlow.org 上查看 在 Google Colab 上运行 在 GitHub 上查看源代码 下载此 notebook 总览在机器学习中性能十分重要。TensorFlow 有一个内置的性能分析器可以使您不用费力记录每个操作的运行时间。然后您就可以在 TensorBoard 的 *Profile Plugin* 中对配置结果进行可视化。本教程侧重于 GPU ,但性能分析插件也可以按照[云 TPU 工具](https://cloud.google.com/tpu/docs/cloud-tpu-tools)来在 TPU 上使用。本教程提供了非常基础的示例以帮助您学习如何在开发 Keras 模型时启用性能分析器。您将学习如何使用 Keras TensorBoard 回调函数来可视化性能分析结果。“其他性能分析方式”中提到的 *Profiler API* 和 *Profiler Server* 允许您分析非 Keras TensorFlow 的任务。 事先准备* 在你的本地机器上安装最新的[TensorBoard](https://tensorflow.google.cn/tensorboard)。* 在 Notebook 设置的加速器的下拉菜单中选择 “GPU”(假设您在Colab上运行此notebook)>![Notebook 设置](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-notebook-settings.png?raw=1) 设置try: # %tensorflow_version 只在 Colab 中存在。 %tensorflow_version 2.x except Exception: pass # 加载 TensorBoard notebook 扩展。 %load_ext tensorboard from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime from packaging import version import functools import tensorflow as tf import tensorflow_datasets as tfds from tensorflow.python.keras import backend from tensorflow.python.keras import layers import numpy as np print("TensorFlow version: ", tf.__version__)TensorFlow version: 2.0.0-dev20190424确认 TensorFlow 可以看到 GPU。device_name = tf.test.gpu_device_name() if not tf.test.is_gpu_available(): raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name))Found GPU at: /device:GPU:0使用 TensorBoard callback 运行一个简单的模型你将使用 Keras 来构建一个使用 ResNet56 (参考: [用于图像识别的深度残差学习](https://arxiv.org/abs/1512.03385))来分类[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html)图像集的简单模型。从 [TensorFlow 模型园](https://github.com/tensorflow/models/blob/master/official/resnet/keras/resnet_cifar_model.py)复制 ResNet 模型代码。BATCH_NORM_DECAY = 0.997 BATCH_NORM_EPSILON = 1e-5 L2_WEIGHT_DECAY = 2e-4 def identity_building_block(input_tensor, kernel_size, filters, stage, block, training=None): """标识块是一种在捷径上没有卷积层的块。 参数: input_tensor:输入张量 kernel_size:默认为3,内核大小为 主路径上的中间卷积层 过滤器:整数列表,主路径上3个卷积层的过滤器 stage:整数,当前阶段标签,用于生成层名称 block:当前块标签,用于生成层名称 training:仅在使用 Estimator 训练 keras 模型时使用。 在其他情况下,它是自动处理的。 返回值: 输出块的张量。 """ filters1, filters2 = filters if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = tf.keras.layers.Conv2D(filters1, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( x, training=training) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = tf.keras.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( x, training=training) x = tf.keras.layers.add([x, input_tensor]) x = tf.keras.layers.Activation('relu')(x) return x def conv_building_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), training=None): """在捷径中具有卷积层的块。 参数: input_tensor:输入张量 kernel_size:默认为3,内核大小为 主路径上的中间卷积层 filters:整数列表,主路径上3个卷积层的过滤器 stage:整数,当前阶段标签,用于生成层名称 block:当前块标签,用于生成层名称 training:仅在使用 Estimator 训练 keras 模型时使用。在其他情况下,它是自动处理的。 返回值: 输出块的张量。 请注意,从第3阶段开始, 主路径上的第一个卷积层的步长=(2,2) 而且捷径的步长=(2,2) """ filters1, filters2 = filters if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = tf.keras.layers.Conv2D(filters1, kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( x, training=training) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = tf.keras.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( x, training=training) shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides, kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( axis=bn_axis, name=bn_name_base + '1', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( shortcut, training=training) x = tf.keras.layers.add([x, shortcut]) x = tf.keras.layers.Activation('relu')(x) return x def resnet_block(input_tensor, size, kernel_size, filters, stage, conv_strides=(2, 2), training=None): """一个应用层后跟多个标识块的块。 参数: input_tensor:输入张量 size:整数,构成转化卷积/身份块的数量。 一个卷积层使用后,再跟(size-1)个身份块。 kernel_size:默认为3,内核大小为 主路径上的中间卷积层 filters:整数列表,主路径上3个卷积层的过滤器 stage:整数,当前阶段标签,用于生成层名称 conv_strides:块中第一个卷积层的步长。 training:仅在使用 Estimator 训练 keras 模型时使用。其他情况它会自动处理。 返回值: 应用层和身份块后的输出张量。 """ x = conv_building_block(input_tensor, kernel_size, filters, stage=stage, strides=conv_strides, block='block_0', training=training) for i in range(size - 1): x = identity_building_block(x, kernel_size, filters, stage=stage, block='block_%d' % (i + 1), training=training) return x def resnet(num_blocks, classes=10, training=None): """实例化ResNet体系结构。 参数: num_blocks:整数,每个块中的卷积/身份块的数量。 ResNet 包含3个块,每个块包含一个卷积块 后面跟着(layers_per_block - 1) 个身份块数。 每 卷积/理想度块具有2个卷积层。 用输入 卷积层和池化层至最后,这带来了 网络的总大小为(6 * num_blocks + 2) classes:将图像分类为的可选类数 training:仅在使用 Estimator 训练 keras 模型时使用。其他情况下它会自动处理。 返回值: Keras模型实例。 """ input_shape = (32, 32, 3) img_input = layers.Input(shape=input_shape) if backend.image_data_format() == 'channels_first': x = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)), name='transpose')(img_input) bn_axis = 1 else: # channel_last x = img_input bn_axis = 3 x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(x) x = tf.keras.layers.Conv2D(16, (3, 3), strides=(1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name='conv1')(x) x = tf.keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1', momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( x, training=training) x = tf.keras.layers.Activation('relu')(x) x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[16, 16], stage=2, conv_strides=(1, 1), training=training) x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[32, 32], stage=3, conv_strides=(2, 2), training=training) x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[64, 64], stage=4, conv_strides=(2, 2), training=training) x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) x = tf.keras.layers.Dense(classes, activation='softmax', kernel_initializer='he_normal', kernel_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer= tf.keras.regularizers.l2(L2_WEIGHT_DECAY), name='fc10')(x) inputs = img_input # 创建模型 model = tf.keras.models.Model(inputs, x, name='resnet56') return model resnet20 = functools.partial(resnet, num_blocks=3) resnet32 = functools.partial(resnet, num_blocks=5) resnet56 = functools.partial(resnet, num_blocks=9) resnet110 = functools.partial(resnet, num_blocks=18)从 [TensorFlow 数据集](https://tensorflow.google.cn/datasets)下载 CIFAR-10 数据集。cifar_builder = tfds.builder('cifar10') cifar_builder.download_and_prepare()建立数据输入线性通信模型并编译 ResNet56 模型。HEIGHT = 32 WIDTH = 32 NUM_CHANNELS = 3 NUM_CLASSES = 10 BATCH_SIZE = 128 def preprocess_data(record): image = record['image'] label = record['label'] # 调整图像大小以在每侧增加四个额外的像素。 image = tf.image.resize_with_crop_or_pad( image, HEIGHT + 8, WIDTH + 8) # 随机裁剪图像的 [HEIGHT,WIDTH] 部分。 image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS]) # 随机水平翻转图像。 image = tf.image.random_flip_left_right(image) # 减去均值并除以像素方差。 image = tf.image.per_image_standardization(image) label = tf.compat.v1.sparse_to_dense(label, (NUM_CLASSES,), 1) return image, label train_data = cifar_builder.as_dataset(split=tfds.Split.TRAIN) train_data = train_data.repeat() train_data = train_data.map( lambda value: preprocess_data(value)) train_data = train_data.shuffle(1024) train_data = train_data.batch(BATCH_SIZE) model = resnet56(classes=NUM_CLASSES) model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['categorical_accuracy'])当你创建 TensorBoard 回调时,您可以指定您想要进行性能分析的批次。默认情况下,TensorFlow 将对第二个批次进行性能分析,因为第一个批次的时候会运行很多一次性的图优化。您可以通过设置 `profile_batch` 对其进行修改。您还可以通过将其设置为 0 来关闭性能分析。这时候,您将会对第三批次进行性能分析。log_dir="logs/profile/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch = 3)开始使用 [Model.fit()](https://https://tensorflow.google.cn/api_docs/python/tf/keras/models/Modelfit) 进行训练。model.fit(train_data, steps_per_epoch=20, epochs=5, callbacks=[tensorboard_callback])Epoch 1/5 1/20 [>.............................] - ETA: 14:27 - loss: 5.4251 - categorical_accuracy: 0.0859使用 TensorBoard 可视化性能分析结果不幸的是,由于[#1913](https://github.com/tensorflow/tensorboard/issues/1913), 您无法在 Colab 中使用 TensorBoard 来可视化性能分析结果。您需要下载日志目录并在本地计算机上启动 TensorBoard。压缩下载日志:!tar -zcvf logs.tar.gz logs/profile/在“文件”选项卡中右键单击以下载 `logdir.tar.gz`。![下载](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-download-logdir.png?raw=1)请保证在你本地的机器安装最新的 [TensorBoard](https://tensorflow.google.cn/tensorboard)。在你的本地机器上执行下面的命令:```> cd download/directory> tar -zxvf logs.tar.gz> tensorboard --logdir=logs/ --port=6006``` 在您的Chrome浏览器中打开一个新标签,然后导航至[localhost:6006](http://localhost:6006),单击 “Profile” 标签。您可能会看到以下性能分析结果:![跟踪视图](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-trace-viewer.png?raw=1) 跟踪查看器当您单击性能分析选项卡后,您将看到跟踪查看器。该页面显示了聚合期间 CPU 和加速器上发生的不同事件的时间轴。跟踪查看器在垂直轴上显示多个 *事件组*。 每个事件组都有多个水平 *跟踪*,其中填充了跟踪事件。*跟踪* 事件是在线程或 GPU 流上执行的基本时间线,。单个事件是时间轴轨道上的彩色矩形块。时间从左到右移动。您可以使用 `w`(放大),`s`(缩小),`a`(向左滚动),`d`(向右滚动)浏览结果。单个矩形代表 *跟踪事件* :从这个时间的开始到结束时间。 要研究单个矩形,可以在浮动工具栏中选择鼠标光标图标后单击它。 这将显示有关矩形的信息,例如其开始时间和持续时间。除了点击之外,您还可以拖动鼠标以选择覆盖一组跟踪事件的矩形。这将为您提供与该矩形相交并汇总的事件列表。 `m` 键可用于测量所选事件的持续时间。![List of Events](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-trace-viewer-select.png?raw=1)跟踪事件是从三个来源收集的:* **CPU:** CPU事件位于名为`/host:CPU`的事件组下。每个轨道代表 CPU 上的一个线程。例如,输入线性通信模型事件,GPU 操作调度事件, CPU 操作执行事件等。* **GPU:** GPU 事件位于以 `/device:GPU:`为前缀的事件组下。 除了 `stream:all`,每个事件组都代表在 GPU 上一个流。 `stream::all`将所有事件汇集到一个 GPU 上。例如。 内存复制事件,内核执行事件等。* **TensorFlow 运行时间:** 运行时事件在以 `/job:`为前缀的事件组下。运行事件表示 python 程序调用的 TensorFlow ops。 例如, tf.function 执行事件等。 调试性能现在,您将使用 Trace Viewer 来改善您的模型的性能。让我们回到刚刚捕获的分析结果。![GPU kernel](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-idle-gpu.png?raw=1)GPU 事件表明,GPU 在该步骤的上半部分什么都没有做。![CPU events](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-input-cpu.png?raw=1)CPU 事件表明,在此步骤的开始的时候,CPU 被数据输入管道占用。![Runtime](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-blocking-runtime.png?raw=1)在 TensorFlow 运行时中,有一个叫 `Iterator::GetNextSync`的大阻塞,这是从数据输入管道中获取下一批的阻塞调用。而且它阻碍了训练步骤。 因此,如果您可以在 `s-1` 的时候为 `s` 步骤准备输入数据,则可以更快地训练该模型。您也可以通过使用 [tf.data.prefetch](https://tensorflow.google.cn/api_docs/python/tf/data/Datasetprefetch).train_data = cifar_builder.as_dataset(split=tfds.Split.TRAIN) train_data = train_data.repeat() train_data = train_data.map( lambda value: preprocess_data(value)) train_data = train_data.shuffle(1024) train_data = train_data.batch(BATCH_SIZE) # 它将在(s-1)步骤中预取数据 train_data = train_data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)重新运行模型。log_dir="logs/profile/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch = 3) model.fit(train_data, steps_per_epoch=20, epochs=5, callbacks=[tensorboard_callback])Epoch 1/5 20/20 [==============================] - 5s 265ms/step - loss: 3.4081 - categorical_accuracy: 0.1055 Epoch 2/5 20/20 [==============================] - 4s 205ms/step - loss: 3.3122 - categorical_accuracy: 0.1141 Epoch 3/5 20/20 [==============================] - 4s 200ms/step - loss: 3.2795 - categorical_accuracy: 0.1199 Epoch 4/5 20/20 [==============================] - 4s 204ms/step - loss: 3.2237 - categorical_accuracy: 0.1469 Epoch 5/5 20/20 [==============================] - 4s 201ms/step - loss: 3.1888 - categorical_accuracy: 0.1465Woohoo! 你刚刚把训练性能从 *~235ms/step* 提高到 *~200ms/step*。!tar -zcvf logs.tar.gz logs/profile/再一次下载 `logs` 目录来查看 TensorBoard的新的分析结果。![TF Runtime](https://github.com/tensorflow/tensorboard/blob/master/docs/images/profiler-prefetch-runtime.png?raw=1)`Iterator::GetNextSync`大阻塞不再存在。做得好!显然,这依旧不是最佳性能。请自己尝试,看看是否可以有更多的改进。有关性能调整的一些有用参考:* [数据输入线性通信模型](https://tensorflow.google.cn/guide/data_performance)* [训练表现: 更快收敛的用户指南 (TensorFlow Dev Summit 2018)](https://www.youtube.com/watch?v=SxOsJPaxHME) 其他分析方式除了 TensorBoard 回调外,TensorFlow 还提供了其他两种方式来手动触发分析器:*Profiler APIs* 和 *Profiler Service*。注意:请不要同时运行多个分析器。如果您想将 Profiler API 或 Profiler Service 与 TensorBoard 回调一起使用,请确保将`profile_batch` 参数设置为0。 Profiler APIs# 内容管理接口 with tf.python.eager.profiler.Profiler('logdir_path'): # 进行你的训练 pass # 功能接口 tf.python.eager.profiler.start() # 进行你的训练 profiler_result = tf.python.eager.profiler.stop() tf.python.eager.profiler.save('logdir_path', profiler_result)Profiler Service# 此 API 将在您的 TensorFlow 作业上启动 gRPC 服务器,该 API 可以按需接收分析请求。 tf.python.eager.profiler.start_profiler_server(6009) # 在这里写你的 TensorFlow 项目**TODO**- create a better control stuc for internal parameters to, look as SKiDl's lib file that does the conversion from SKiDl to pyspice for inspiration#Library import statements from skidl.pyspice import * #can you say cheeky import PySpice as pspice #becouse it's written by a kiwi you know import lcapy as kiwi import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings from IPython.display import YouTubeVideo, display import traceback #notebook specific loading control statements %matplotlib inline #tool to log notebook internals #https://github.com/jrjohansson/version_information %load_ext version_information %version_information skidl, PySpice,lcapy, sympy, numpy, matplotlib, pandas, scipy #import the op read tool from last subchapter from DC_1_Codes import op_results_collectWorking with SKiDl elements The following example, to be honest, is pedantic; but it serves to introduce the current source in SPICE which can be a real headache. It also shows how to let Python do the work of interacting with elements that we will readily make great use of down the road.So why is thus so pedantic? source transformations are mostly an analytic simplification tool. And while yes there is some practicality in being able to find a matching current source to a voltage source and vice versa with equivalent power from the standpoint of Thevenin and Norton's theorem. There are, however, serious limation with how the real circuits handle inputs of voltage and current differently. And frankly, from SPICE’s point of view for this exercise, it doesn't care, it's going to solve regardless. So if you need a refresher on source transformations with an illustration of why there a really great analytical tool please watch ALL ABOUT ELECTRONICS YT video on "Source Transformations" where this example is pulled from. Example 1 from "Source Transformation" @ ~6:30 minYouTubeVideo('FtEU5ZoO-fg', width=500, height=400, start=390)A very important word about current source in SPICE Before building the circuit above a word about any current sources in SPICE. Recalling the discussion around the surefire method of measuring currents in SPICE using a 0V Liner Voltage Souce (aka the SPICE ammeter trick) in SPICE current flow is in the positive direction from a positive voltage to a negative voltage. So by convention, we draw independent and dependent sources with an arrow in the direction of how current is being added. While that means that all current sources are polarized such that the positive terminal is at the tail of the drawn current arrow and the head is the negative terminal. When you use a schematic editor with built-in SPICE it does all the terminal work in the background. But when we are indirectly working with netlist, via SKiDL, you will have to make sure you remember this. Or else this will bite you in the butt and keep costing you time till have this arrow and terminal connection for current ingrained into you.reset() vs_4=V(ref='s_4', dc_value=4@u_V) vs_8=V(ref='s_8', dc_value=8@u_V) cs_2=I(ref='s_2', dc_value=2@u_A) r1=R(ref='1', value=6@u_Ohm) r2=R(ref='2', value=12@u_Ohm) r3=R(ref='3', value=12@u_Ohm) (gnd&vs_4['p', 'n']&r1) |r2 vs_8['p', 'n']+=r2[2], r3[2] (gnd & cs_2 | r3) circ=generate_netlist() print(circ) preop_sim=op_results_collect(circ) #store the results for comperasion to post souce transfromations pre_tf_res=preop_sim.results_df pre_tf_res.title Vs_4 0 N_1 4V Vs_8 N_2 N_3 8V Is_2 0 N_3 2A R1 N_1 N_2 6Ohm R2 0 N_2 12Ohm R3 0 N_3 12OhmSKiDl's Diabetic Syntax `&`, `|` Notice above that usage of Pythons logical AND operator & and logical OR operator | in creating the circuit. Since & and | are just operators in python we can do what is called operator extensions to them to make them act in special ways in a certain context. In SKiDls particular case the logical and (&) is a shorthand for putting two elements in series. And the logical OR (|) is shorthand for putting two elements in parral. Furthermore, these operators and parentheses sensitive, and are not polarized sensitive and so polarized elements need to have their terminals called out when using the. There called Diabetic Syntical Suger in light of their release announcement entitled ["SWEETENING SKIDL"](http://xess.com/skidl/docs/_site/blog/sweetening-skidl). Using is up to your codding style and that of your colleagues. All joking aside they are extremely useful operators to know how to use, and we will use them in this book. Crafting the transfomation tool We are not going into that much detial about these tool. The important thing is that we can take advante that all our elements (SKiDl part) and nets are objects in Python. And therefore have methods and attriputs that are accesable and therefore more usable then helping produce part of a line of a SPICE netlist. For instiance Voltage and Current souce store there dcvalue in `.dc_value` where resitors store there resistince in `R.value`.This then alows us to use the elements to perform calculation outside of SPICE and even better assisit in creating new elements as we have done below.#%%writefile -a DC_1_Codes.py #chapter 1 section 2 get_skidl_spice_ref function #used for getting the name of the element as it would appear in a #generated netlist def get_skidl_spice_ref(skidle_element): """ Helper function to retrieve SKiDL element name as appears in the final netlist Args: skidle_element (skidl.part.Part): SKiDl part to get the netlist name from Returns: returns a string with the netlist name of `skidle_element`, or throws an error if `skidle_element` is not a SKiDl part """ assert repr(type(skidle_element))=="", '`skidle_element` must be a SKiDl part' if skidle_element.ref_prefix!=skidle_element.ref[0]: return skidle_element.ref_prefix+skidle_element.ref else: return skidle_element.ref #%%writefile -a DC_1_Codes.py #chapter 1 section 2 dc_cs2vs function #creates a voltage source element to the current source based on the #value if the input DC current element and it's parallel resistor #via the source transformation rules def dc_cs2vs(dc_cs, cs_par_res): """ Create a new equivalent voltage source to the current source with parallel resistance Args: dc_cs (SKiDl current source): the current source to transform to a voltage source cs_par_res (SKiDl resistor): the parrel resistor to the current source to be transformed Returns: returns an equivalent DC voltage source element to the current source based on the value if the input DC current element and it's parallel resistor via the source transformation rules TODO: -figure out how to do assertion check that cs_par_res is in parallel to dc_cs Future: -make into subcircuit with net inputs to automatically add the new source and resistance to the circuit """ #do assertion checks to insure that passed in objects are what they are supposed to be assert dc_cs.ref_prefix=='I', ' was not a current source' assert cs_par_res.ref_prefix=='R', ' was not a resistor' old_maxpower=float((dc_cs.dc_value**2)*cs_par_res.value) new_vs_val=float(dc_cs.dc_value*cs_par_res.value) assert np.around(float(new_vs_val*dc_cs.dc_value), 6)==np.around(old_maxpower, 6), "Um, something wrong since before and after max power not equal" new_vs_ref=dc_cs.ref if new_vs_ref[0]!='I': new_vs_ref='I'+new_vs_ref new_vs_ref=f"V{new_vs_ref[1:]}_f_{new_vs_ref}" print(new_vs_ref) eq_vs=V(dc_value=new_vs_val@u_V, ref=new_vs_ref) warnings.warn(f"""New voltage source values: {new_vs_val} [V] with max aviabel power {old_maxpower} [W] \n transformed creation statment will be like: \n`(gnd & ['n', 'p'] & )`""") return eq_vs #%%writefile -a DC_1_Codes.py #chapter 1 section 2 dc_vs2cs function #creats current source element to the voltage source based on the #value if the input DC current element and it's series resistor #via the source transformation rules def dc_vs2cs(dc_vs, vs_ser_res): """ Create a new equivalent current source to voltage source with series resistance Args: dc_vs (SKiDl voltage source): the voltage source to transform to a current source vs_ser_res (SKiDl resistor): the serries resistor to the voltage source to be transformed Returns: TODO: -figure out how to do assertion check that vs_ser_res is in serries to dc_vs Future: -make into subcircuit with net inputs to automatically add the new source and resistance to the circuit """ #do assertion checks to insure that passed in objects are what they are supposed to be assert dc_vs.ref_prefix=='V', ' was not a voltage source' assert vs_ser_res.ref_prefix=='R', ' was not a resistor' old_maxpower=float((dc_vs.dc_value**2)/vs_ser_res.value) new_cs_val=float(dc_vs.dc_value/vs_ser_res.value) assert np.around(float(new_cs_val*dc_vs.dc_value), 6)==np.around(old_maxpower, 6), "Um, something wrong since before and after max power not equal" new_cs_ref=dc_vs.ref if new_cs_ref[0]!='V': new_cs_ref='V'+new_cs_ref new_cs_ref=f"I{new_cs_ref[1:]}_f_{new_cs_ref}" #print(new_cs_ref) eq_cs=I(dc_value=new_cs_val@u_A, ref=new_cs_ref)# might still need this: , circuit=vs_ser_res.circuit) warnings.warn(f"""New current source values: {new_cs_val} [A] with max aviabel power {old_maxpower} [W] \n transformed creation statment will be like:\n `(gnd & ['n', 'p'] | )` \n""") return eq_csvalidate the transform¶For this, we are to transform the left voltage source and series resistors and the right current source and parral resistor simultaneously which halfway deviates from what ALL ABOUT ELECTRONICS did working the example analytically. We will have the center parallel resistor and voltage source as a reference network.reset() r1=R(ref='1', value=6@u_Ohm) r2=R(ref='2', value=12@u_Ohm) r3=R(ref='3', value=12@u_Ohm) vs_8=V(ref='s_8', dc_value=8@u_V) cs_4_f_cs_4=dc_vs2cs(vs_4, r1) vs_2_f_cs_2=dc_cs2vs(cs_2, r3) (gnd&cs_4_f_cs_4['n', 'p']|r1) |r2 vs_8['p', 'n']+=r2[2], r3[2] (gnd & vs_2_f_cs_2['n', 'p'] & r3) circ=generate_netlist() print(circ) postop_sim=op_results_collect(circ) #store the results for comperaion to pre souce transfromations post_tf_res=postop_sim.results_df post_tf_res/home/iridium/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:43: UserWarning: New current source values: 0.6666666666666666 [A] with max aviabel power 2.6666666666666665 [W] transformed creation statment will be like: `(gnd & ['n', 'p'] | )` /home/iridium/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:44: UserWarning: New voltage source values: 24.0 [V] with max aviabel power 48.0 [W] transformed creation statment will be like: `(gnd & ['n', 'p'] & )` No errors or warnings found during netlist generation.Since we stored the results from the pre transformed circuit we can try to do an eyeball compersion between the two dataframes, however, since the net names are no longer the same we only can look at the branch current of vs_8 which remained constantpre_tf_res (pre_tf_res.loc[get_skidl_spice_ref(vs_8)]==post_tf_res.loc[get_skidl_spice_ref(vs_8)]).all()Thus we can assume that the circuits are source equivalents of each other, but this book is about cultivating analog design verification. And assuming can yield performance hits and even worse the need for a SPIN. Therefore DONT ASSUME, figure out a way to VERIFY via Quantifiable answers. internal parameters:ngspice (in fact most SPICE engines) elements have what are called internal parameters. Most of the setup parameters like dc_value, resistance, ect along with nitty-gritty parameters for more advanced SPICE simulations that will get too. What we are after of now are the internal values that store results of simulations as we have alluded to in the non surefire way to save internal parameters. For instance, resistors have a way of measuring the internal current flowing through them the but the caveat is that it only returns real values in ngspice, which will be an issue when doing AC simulations. But for DC simulations is a tool we should utilize. Also at the time of writing this PySPICE has a quark that internal values are not retrieved at the same time the branch currents and net voltages are. So to get both the simulation has to be run twice and the author is not sure if this is quark of ngspice or PySPICE but the author will look into it at a later time.For now, just know internal parameters have a string calls of `@[]` that is passed to a PySPICE simulation objects `.save_internal_parameters` method and then are returned in the results as the original string call to the results super dictionary.#%%writefile -a DC_1_Codes.py #chapter 1 section 2 op_internal_ivp class # class to get both the branch currents and node voltages, # along with the internal parameters values for # DC operating point analysis class op_internal_ivp(): """ Class for creating a SPICE simulation on the whole circuits internal parameters for current, voltage, and power for dc operating point (.op) simulations. Will only collect internal parameters and not global voltage and currents of the circuit TODO: Make this inheritable from op_results_collect """ def __init__(self, op_sim_circ, display_results=False): """ Basic class to get pyspice operating_point (ngspice `.op`) simulation results for internal parameters for Resistors, Current Source, Voltage Source current, voltage, power respectively Args: op_sim_circ (pspice.Spice.Netlist.Circuit): the Netlist circuit produced from SKiDl's `generate_netlist()` display_results (bool; False): option to have the simulation results stored in `self.results_df` automatically displayed from a jupyter notebook ell Returns: will create a simulation in `self.op_sim`, raw results of dc operating point simulation will be stored in `self.op_sim_results`, the tablized results will be stored in pandas dataframe `self.results_df` TODO: - add kwargs to the simulator - add an assertion that only a pyspice netlist circuit obj can be passed into op_sim_circ """ #need to add assertions for op_sim_circ ==pspice.Spice.Netlist.Circuit #store the circuit internally self.op_sim_circ=op_sim_circ #create the sim obj self.op_sim=self.op_sim_circ.simulator() #store bool to display results dataframe self.display_results=display_results #create the internal parameters to save self.create_savable_items() #run the sim for .op for internal parameters and record results self.sim_and_record() def create_savable_items(self): """ Helper method to create a listing of internal parameters and the table of the results. Right now only creates savable internal parameters for: Linear Dependent Voltage Sources: current, power Linear Dependent Current Sources: current, voltage, power Standard Resistor: current, voltage, power Linear Dependent Current Sources: current, voltage, power VCCS: current, voltage, power VCVS: current, voltage, power CCVS: current, voltage, power CCCS:currrent, voltage, power See ngspice manual typically chapter 31 "Model and Device Parameters" for more deitals about deice intiernal parmaters """ self.results_df=pd.DataFrame(columns=['Circ_Item', 'Item_Type', 'Value', 'Unit']) self.results_df.set_index('Circ_Item', drop=True, append=False, inplace=True, verify_integrity=False) #helper row creation statement def add_row(index, unit): self.results_df.at[index, ['Item_Type', 'Unit']]=['internal', unit] for e in self.op_sim_circ.element_names: """ Ref: ngspice documentation chapter 31 (typically): Model and Device Parameters """ #resistors if e[0]=='R': add_row(f'@{e}[i]', 'A') add_row(f'@{e}[p]', 'W') #independnt current source elif e[0]=='I': add_row(f'@{e}[c]', 'A') add_row(f'@{e}[v]', 'V') add_row(f'@{e}[p]', 'W') #independ Voltage source elif e[0]=='V': add_row(f'@{e}[i]', 'A') add_row(f'@{e}[p]', 'W') #controlled sources elif e[0] in ['F', 'H', 'G', 'E']: add_row(f'@{e}[i]', 'A') add_row(f'@{e}[v]', 'V') add_row(f'@{e}[p]', 'W') else: warnings.warn(f"Circ Element {e} is not setup to have internals read, skiping") def sim_and_record(self): """ run the .op simulation and get the internal values Args: None Returns: `self.internal_opsim_res` store the raw results of the .op for internal pamtyers whereas `self.results_df` stores the pandas dataframe of internal parameters results TODO: figure out how to do this at the same time as the main node branch sim this doing separately is getting old """ save_items=list(self.results_df.index) self.op_sim.save_internal_parameters(*save_items) self.internal_opsim_res=self.op_sim.operating_point() for save in save_items: self.results_df.at[save, 'Value']=self.internal_opsim_res[save].as_ndarray()[0] if self.display_results: print('.op sim internal parmter results') display(self.results_df)pre transform_internalsreset() vs_4=V(ref='s_4', dc_value=4@u_V) vs_8=V(ref='s_8', dc_value=8@u_V) cs_2=I(ref='s_2', dc_value=2@u_A) r1=R(ref='1', value=6@u_Ohm) r2=R(ref='2', value=12@u_Ohm) r3=R(ref='3', value=12@u_Ohm) (gnd&vs_4['p', 'n']&r1) |r2 vs_8['p', 'n']+=r2[2], r3[2] (gnd & cs_2 | r3) circ=generate_netlist() print(circ) preop_ivp_sim=op_internal_ivp(circ) pre_ivp_res=preop_ivp_sim.results_df pre_ivp_resUnit is None for @is_2[p] powerpost transform internalsreset() r1=R(ref='1', value=6@u_Ohm) r2=R(ref='2', value=12@u_Ohm) r3=R(ref='3', value=12@u_Ohm) vs_8=V(ref='s_8', dc_value=8@u_V) cs_f_vs_4=dc_vs2cs(vs_4, r1) vs_f_cs_2=dc_cs2vs(cs_2, r3) (gnd&cs_f_vs_4['n', 'p']|r1) |r2 vs_8['p', 'n']+=r2[2], r3[2] (gnd & vs_f_cs_2['n', 'p'] & r3) circ=generate_netlist() print(circ) postop_ivp_sim=op_internal_ivp(circ) post_ivp_res=postop_ivp_sim.results_df post_ivp_resUnit is None for @vs_2_f_is_2[p] powerQuantitive comparison ¶Since our results are stored in Pandas dataframes we can make use of the power of Pandas to do data analysis to get insight into what is going on. Where below we get a merger of the two dataframes side by side for all the elements that remained the same in the circuit pre and post-transformation. And we then follow that up with color-coding of what values remained the same between the pre and post-transformation of the circuitpre_post_comp=pd.concat([pre_ivp_res, post_ivp_res], join='inner', axis='columns', keys=['Pre', 'Post']) pre_post_comp def color_results(row): is_equal=(row['Pre']==row['Post']).all() if is_equal: return ['background-color: lightgreen']*len(row) else: return ['background-color: yellow']*len(row) pre_post_comp.style.apply(color_results, axis=1)使用 RNN 和 CNN 处理序列 击球手击球。外野手立即开始奔跑,预测球的轨迹。他跟踪它,调整他的动作,最后抓住它(在雷鸣般的掌声中)。预测未来是你一直在做的事情,无论你是在完成朋友的一句话,还是在期待早餐时咖啡的味道。在本章中,我们将讨论循环神经网络 (RNN),这是一类可以预测未来的网络(当然,在一定程度上)。他们可以分析股票价格等时间序列数据,并告诉您何时买入或卖出。在自动驾驶系统中,他们可以预测汽车轨迹并帮助避免事故。更一般地说,它们可以处理任意长度的序列,而不是像我们目前考虑的所有网络那样处理固定大小的输入。例如,它们可以将句子、文档或音频样本作为输入,这使得它们对于自动翻译或语音到文本等自然语言处理应用非常有用。 在本章中,我们将首先了解 RNN 的基本概念以及如何使用时间反向传播来训练它们,然后我们将使用它们来预测时间序列。 之后我们将探讨 RNN 面临的两个主要困难:* 不稳定梯度(在第 11 章中讨论),可以使用各种技术来缓解,包括循环 dropout 和循环层归一化*(非常)有限的短期记忆,可以使用 LSTM 和 GRU 单元进行扩展 RNN 并不是唯一能够处理序列数据的神经网络类型:对于小序列,常规密集网络可以做到这一点; 对于非常长的序列,例如音频样本或文本,卷积神经网络实际上也可以很好地工作。 我们将讨论这两种可能性,我们将通过实现 WaveNet 来结束本章:这是一种 CNN 架构,能够处理数万个时间步长的序列。 在第 16 章中,我们将继续探索 RNN,看看如何将它们用于自然语言处理,以及基于注意力机制的最新架构。 让我们开始吧! 循环神经元和层 到目前为止,我们一直专注于前馈神经网络,其中激活仅在一个方向上流动,从输入层到输出层(附录 E 中讨论了一些例外)。 循环神经网络看起来非常像前馈神经网络,除了它也有指向后的连接。 让我们看一下最简单的 RNN,它由一个神经元接收输入、产生输出并将该输出发送回自身组成,如图 15-1(左)所示。 在每个时间步长 t(也称为帧),这个循环神经元接收输入 x(t) 以及它自己来自前一个时间步长 y(t-1) 的输出。 由于在第一个时间步没有以前的输出,所以一般设置为 0。我们可以将这个微小的网络表示在时间轴上,如图 15-1(右)所示。 这称为随时间展开网络(它是每个时间步表示一次的相同循环神经元)。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtld2yoy0kj61bk0ik76j02.jpg) 您可以轻松创建一层循环神经元。 在每个时间步 t,每个神经元都接收输入向量 x(t) 和来自前一个时间步 y(t–1) 的输出向量,如图 15-2 所示。 请注意,现在输入和输出都是向量(当只有一个神经元时,输出是标量)。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtld40uugyj61ca0jetbj02.jpg) 每个循环神经元都有两组权重:一组用于输入 x(t),另一组用于前一时间步长 y(t-1) 的输出。 我们称这些权重向量为 $w_x$ 和 $w_y$。 如果我们考虑整个循环层而不是一个循环神经元,我们可以将所有权重向量放在两个权重矩阵 $W_x$ 和 $W_y$ 中。 然后,整个循环层的输出向量几乎可以按照您的预期进行计算,如公式 15-1 所示(b 是偏置向量,$φ(·)$ 是激活函数(例如 ReLU1)。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtld61bzc3j61ag06i74w02.jpg) 与前馈神经网络一样,我们可以通过将时间步 t 的所有输入放入输入矩阵 X(t) 中(参见公式 15-2),一次性计算整个小批量的循环层输出。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtld6wtb51j61b20cemya02.jpg) 在这个等式中:* $\mathbf{Y}_{(t)}$ 是一个 $m \times n$ 神经元矩阵,其中包含小批量中每个实例在时间步长 $t$ 时层的输出($m$ 是小批量中的实例数,$n$ 个神经元是神经元数)。* $\mathbf{X}_{(t)}$ 是一个 $m \times n$ 输入矩阵,包含所有实例的输入(n 个输入是输入特征的数量)。* $\mathbf{W}_x$ 是一个 $n_{inputs} \times n_{neurons}$ 矩阵,包含当前时间步长输入的连接权重。* $\mathbf{W}_y$ 是一个神经元 $\times$ 神经元矩阵,包含前一时间步输出的连接权重。* $b$ 是包含每个神经元偏置项的大小神经元向量。* 权重矩阵 $\mathbf{W}_x$ 和 $\mathbf{W}_y$ 通常垂直串联成一个形状为 $(n_{inputs} + n_{neurons}) \times n_{neurons}$ 的权重矩阵 $\mathbf{W}$(参见公式 15-2 的第二行)。* 符号$[\mathbf{X}_{(t)} \space \mathbf{Y}_{(t-1)}]$ 表示矩阵$\mathbf{X}_{(t)}$ 和$\mathbf{Y}_{(t-1)}$ 的水平串联。 请注意,$\mathbf{Y}_{(t)}$ 是 $\mathbf{X}_{(t)}$ 和 $\mathbf{Y}_{(t-1)}$ 的函数,它是 $\mathbf{X}_{(t-1)}$ 和 $\mathbf{Y}_{(t-2)}$ 的函数,它是 $\mathbf{X}_{(t- 2)}$ 和 $\mathbf{Y}_{(t–3)}$,依此类推。 这使得 $\mathbf{Y}_{(t)}$ 成为自时间 $t = 0$ 起所有输入的函数(即 $\mathbf{X}_{(0)}, \mathbf{X}_{(1)}, \cdots, \mathbf{X}_{(t)}$)。 在第一个时间步 $t = 0$,没有先前的输出,因此通常假设它们全为零。 记忆细胞 由于循环神经元在时间步 t 的输出是先前时间步的所有输入的函数,因此您可以说它具有某种形式的记忆。 跨时间步长保留某种状态的神经网络的一部分称为记忆单元(或简称为单元)。 单个循环神经元或循环神经元层是一个非常基本的细胞,只能学习短模式(通常长约 10 步,但这取决于任务)。 在本章的后面,我们将研究一些更复杂、更强大的细胞类型,它们能够学习更长的模式(大约长 10 倍,但这同样取决于任务)。 通常,单元格在时间步 $t$ 的状态,表示为 $\mathbf{h}_{(t)}$(“$\mathbf{h}$”代表“隐藏”),是该时间步的某些输入及其在前一时间步的状态的函数:$\mathbf{h}_{(t)} = f(\mathbf{h}_{(t–1)}, \mathbf{x}_{(t)})$。 它在时间步长 $t$ 的输出,表示为 $\mathbf{y}_{(t)}$ ,也是先前状态和当前输入的函数。 在我们目前讨论的基本单元的情况下,输出只是等于状态,但在更复杂的单元中情况并非总是如此,如图 15-3 所示。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtldydoq3lj61bu0juwg602.jpg) 输入和输出序列 RNN 可以同时接受一系列输入并产生一系列输出(参见图 15-4 中左上角的网络)。 这种类型的序列到序列网络对于预测诸如股票价格之类的时间序列非常有用:您将过去 N 天的价格提供给它,并且它必须输出未来一天的价格(即,从 N – 1天前到明天)。 或者,您可以为网络提供一系列输入并忽略除最后一个之外的所有输出(参见图 15-4 中右上角的网络)。 换句话说,这是一个序列到向量的网络。 例如,您可以向网络提供与电影评论相对应的单词序列,网络将输出情绪分数(例如,从 –1 [仇恨] 到 +1 [爱])。相反,您可以在每个时间步上一遍又一遍地向网络提供相同的输入向量,并让它输出一个序列(参见图 15-4 的左下角网络)。 这是一个向量到序列的网络。 例如,输入可以是图像(或 CNN 的输出),输出可以是该图像的标题。 最后,你可以有一个序列到向量网络,称为编码器,然后是一个向量到序列网络,称为解码器(见图 15-4 右下角的网络)。 例如,这可用于将句子从一种语言翻译成另一种语言。 你会向网络输入一种语言的句子,编码器将这个句子转换成单个向量表示,然后解码器将这个向量解码成另一种语言的句子。 这种称为编码器-解码器的两步模型比尝试使用单个序列到序列 RNN(如左上角表示的那个)即时翻译要好得多:句子的最后一个词可以影响 翻译的第一个词,所以你需要等到你看到整个句子再翻译。 我们将在第 16 章中看到如何实现编码器-解码器(正如我们将看到的,它比图 15-4 中建议的要复杂一些)。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtle1depv9j614c0t8jv202.jpg) 听起来很有希望,但是如何训练循环神经网络呢? 训练 RNN 要训练 RNN,诀窍是通过时间展开它(就像我们刚刚做的那样),然后简单地使用常规的反向传播(见图 15-5)。这种策略称为时间反向传播 (BPTT)。就像在常规的反向传播中一样,有第一次前向通过展开的网络(由虚线箭头表示)。然后使用成本函数 $C(\mathbf{Y}_{(0)}, \mathbf{Y}_{(1)}, \cdots, \mathbf{Y}_{(T)})$ 评估输出序列)(其中 $T$ 是最大时间步长)。请注意,此成本函数可能会忽略某些输出,如图 15-5 所示(例如,在序列到向量 RNN 中,除了最后一个输出之外,所有输出都将被忽略)。然后,该成本函数的梯度通过展开的网络(由实线箭头表示)向后传播。最后,使用 BPTT 期间计算的梯度更新模型参数。请注意,梯度通过成本函数使用的所有输出向后流动,而不仅仅是通过最终输出(例如,在图 15-5 中,成本函数是使用网络的最后三个输出 $\mathbf{Y}_{(2)}$ 计算的, $\mathbf{Y}_{(3)}$ 和 $\mathbf{Y}_{(4)}$,所以梯度流经这三个输出,但不流经 $\mathbf{Y}_{(0)}$ 和 $\mathbf{Y}_{(1)}$)。此外,由于在每个时间步使用相同的参数 $\mathbf{W}$ 和 $\mathbf{b}$,反向传播将做正确的事情并在所有时间步上求和。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtle8jzjhvj614k0judhi02.jpg) 预测时间序列 假设您正在研究您网站上每小时的活跃用户数量,或您所在城市的每日温度,或您公司的财务状况,每季度使用多个指标进行测量。 在所有这些情况下,数据将是每个时间步长一个或多个值的序列。 这称为时间序列。 在前两个示例中,每个时间步长只有一个值,因此它们是单变量时间序列,而在财务示例中,每个时间步长有多个值(例如,公司的收入、债务等),因此是 多元时间序列。 一个典型的任务是预测未来值,这称为预测。 另一个常见的任务是填补空白:预测(或者说“postdict”)过去的缺失值。 这称为插补。 例如,图 15-6 显示了 3 个单变量时间序列,每个序列有 50 个时间步长,这里的目标是预测每个时间步长(由 X 表示)的值。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtleavei94j61440ecjtd02.jpg) 为简单起见,我们使用由 `generate_time_series()` 函数生成的时间序列,如下所示:import numpy as np def generate_time_series(batch_size, n_steps): freq1, freq2, offsets1, offsets2 = np.random.rand(4, batch_size, 1) time = np.linspace(0, 1, n_steps) series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10)) # wave 1 series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20)) # + wave 2 series += 0.1 * (np.random.rand(batch_size, n_steps) - 0.5) # + noise return series[..., np.newaxis].astype(np.float32)time: 2.56 ms (started: 2021-08-20 17:39:05 +08:00)此函数根据请求创建尽可能多的时间序列(通过 batch_size 参数),每个序列的长度为 n_steps,并且每个序列中的每个时间步只有一个值(即,所有序列都是单变量)。 该函数返回一个形状为 [批量大小,时间步长,1] 的 NumPy 数组,其中每个序列是两个固定幅度但随机频率和相位的正弦波的总和,加上一点噪声。 > 在处理时间序列(以及其他类型的序列,例如句子)时,输入特征通常表示为形状 [批量大小、时间步长、维数] 的 3D 数组,其中对于单变量时间序列维数为 1,对于多变量时间维数更多 系列。 现在让我们使用这个函数创建一个训练集、一个验证集和一个测试集:n_steps = 50 series = generate_time_series(10000, n_steps + 1) X_train, y_train = series[:7000, :n_steps], series[:7000, -1] X_valid, y_valid = series[7000:9000, :n_steps], series[7000:9000, -1] X_test, y_test = series[9000:, :n_steps], series[9000:, -1]time: 95.5 ms (started: 2021-08-20 17:39:08 +08:00)X_train 包含 7,000 个时间序列(即其形状为 [7000, 50, 1]),而 X_valid 包含 2,000 个(从第 7,000 个时间序列到第 8,999 个)和 X_test 包含 1,000 个(从第 9,000 个到第 9,999 个) . 由于我们想为每个系列预测单个值,因此目标是列向量(例如,y_train 的形状为 [7000, 1])。 基线指标 在我们开始使用 RNN 之前,拥有一些基线指标通常是个好主意,否则我们最终可能会认为我们的模型效果很好,但实际上它比基本模型做得更差。 例如,最简单的方法是预测每个系列中的最后一个值。 这被称为天真的预测,有时要超越它是非常困难的。 在这种情况下,它给我们的均方误差约为 0.020:y_pred = X_valid[:, -1] np.mean(keras.losses.mean_squared_error(y_valid, y_pred))另一种简单的方法是使用完全连接的网络。 由于每个输入都需要一个扁平的特征列表,我们需要添加一个 Flatten 层。 让我们用一个简单的线性回归模型使每个预测都是时间序列中的值:model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[50, 1]), keras.layers.Dense(1) ])time: 83.3 ms (started: 2021-08-20 17:42:27 +08:00)如果我们使用 MSE 损失和默认的 Adam 优化器编译这个模型,然后将其拟合到训练集上 20 个时期并在验证集上对其进行评估,我们会得到大约 0.004 的 MSE。 这比幼稚的方法要好得多! 实现一个简单的 RNN 让我们看看我们是否可以用一个简单的 RNN 打败它:model = keras.models.Sequential([ keras.layers.SimpleRNN(1, input_shape=[None, 1]) ])time: 129 ms (started: 2021-08-20 17:44:04 +08:00)这确实是您可以构建的最简单的 RNN。正如我们在图 15-1 中看到的那样,它只包含一个层和一个神经元。我们不需要指定输入序列的长度(与之前的模型不同),因为循环神经网络可以处理任意数量的时间步长(这就是我们将第一个输入维度设置为 None 的原因)。默认情况下,SimpleRNN 层使用双曲正切激活函数。它的工作原理与我们之前看到的完全一样:初始状态 h(init) 设置为 0,并将它与第一个时间步的值 x(0) 一起传递给单个循环神经元。神经元计算这些值的加权和,并将双曲正切激活函数应用于结果,这给出了第一个输出 y0。在一个简单的 RNN 中,这个输出也是新状态 h0。这个新状态与下一个输入值 x(1) 一起传递给同一个循环神经元,并重复该过程直到最后一个时间步。然后该层只输出最后一个值,y49。所有这些都是针对每个时间序列同时执行的。 > 默认情况下,Keras 中的循环层仅返回最终输出。 为了让它们在每个时间步返回一个输出,您必须设置 return_sequences=True,正如我们将看到的。 如果你编译、拟合和评估这个模型(就像之前,我们使用 Adam 训练了 20 个 epochs),你会发现它的 MSE 只达到 0.014,所以它比朴素的方法好,但它没有击败简单的线性 模型。 请注意,对于每个神经元,线性模型的每个输入和每个时间步都有一个参数,加上一个偏置项(在我们使用的简单线性模型中,总共有 51 个参数)。 相比之下,对于简单 RNN 中的每个循环神经元,每个输入和每个隐藏状态维度只有一个参数(在简单 RNN 中,这只是层中循环神经元的数量),加上一个偏置项。 在这个简单的 RNN 中,总共只有三个参数。 **趋势和季节性**--- 还有许多其他模型可以预测时间序列,例如加权移动平均模型或自回归积分移动平均 (ARIMA) 模型。其中一些要求您首先删除趋势和季节性。例如,如果您正在研究网站上的活跃用户数量,并且每月增长 10%,则您必须从时间序列中删除此趋势。一旦模型经过训练并开始进行预测,您就必须重新添加趋势以获得最终预测。同样,如果您试图预测每个月的防晒乳液销量,您可能会观察到很强的季节性:因为它每年夏天都卖得很好,所以每年都会重复类似的模式。您必须从时间序列中去除这种季节性,例如通过计算每个时间步的值与一年前的值之间的差异(这种技术称为差分)。同样,在模型经过训练并做出预测后,您必须重新添加季节性模式以获得最终预测。使用 RNN 时,通常没有必要做所有这些,但在某些情况下它可能会提高性能,因为模型不必学习趋势或季节性。--- 显然,我们的简单 RNN 太简单了,无法获得良好的性能。 所以让我们尝试添加更多的循环层! 深度 RNN 堆叠多层单元是很常见的,如图 15-7 所示。 这为您提供了深度 RNN。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnem0u4dnj61420js76t02.jpg) 使用 tf.keras 实现深度 RNN 非常简单:只需堆叠循环层。 在这个例子中,我们使用了三个 SimpleRNN 层(但我们可以添加任何其他类型的循环层,例如 LSTM 层或 GRU 层,我们将在稍后讨论):model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]), keras.layers.SimpleRNN(20, return_sequences=True), keras.layers.SimpleRNN(1) ])time: 208 ms (started: 2021-08-20 17:57:06 +08:00)> 确保为所有循环层设置 return_sequences=True (除了最后一个,如果你只关心最后一个输出)。 如果你不这样做,他们将输出一个 2D 数组(只包含最后一个时间步的输出)而不是一个 3D 数组(包含所有时间步的输出),并且下一个循环层会抱怨你没有喂它 预期 3D 格式的序列。 如果您编译、拟合和评估此模型,您会发现它达到了 0.003 的 MSE。我们终于成功击败了线性模型!请注意,最后一层并不理想:它必须有一个单位,因为我们要预测单变量时间序列,这意味着每个时间步长必须有一个输出值。但是,具有单个单元意味着隐藏状态只是一个数字。这真的不多,而且可能没那么有用;据推测,RNN 将主要使用其他循环层的隐藏状态来携带它在时间步到时间步所需的所有信息,并且不会非常多地使用最后一层的隐藏状态。此外,由于 SimpleRNN 层默认使用 tanh 激活函数,因此预测值必须在 –1 到 1 的范围内。但是如果您想使用另一个激活函数怎么办?由于这两个原因,最好用 Dense 层替换输出层:它会运行得稍微快一点,准确率大致相同,并且允许我们选择我们想要的任何输出激活函数。如果您进行此更改,还要确保从第二个(现在是最后一个)循环层中删除 return_sequences=True :model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]), keras.layers.SimpleRNN(20), keras.layers.Dense(1) ])time: 157 ms (started: 2021-08-20 18:02:17 +08:00)如果你训练这个模型,你会发现它收敛得更快,性能也一样好。 另外,您可以根据需要更改输出激活函数。 提前预测几个时间步 到目前为止,我们只预测了下一个时间步的值,但是我们可以通过适当地更改目标(例如,要预测提前 10 步,只需将目标值更改为 10 领先一步而不是领先一步)。 但是如果我们想预测接下来的 10 个值呢?第一种选择是使用我们已经训练过的模型,让它预测下一个值,然后将该值添加到输入中(就像这个预测值确实发生过一样),然后再次使用该模型来预测下一个值,并且 依此类推,如以下代码所示:series = generate_time_series(1, n_steps + 10) X_new, Y_new = series[:, :n_steps], series[:, n_steps:] X = X_new for step_ahead in range(10): y_pred_one = model.predict(X[:, step_ahead:])[:, np.newaxis, :] X = np.concatenate([X, y_pred_one], axis=1) Y_pred = X[:, n_steps:]正如您所料,下一步的预测通常比后续时间步的预测更准确,因为错误可能会累积(如图 15-8 所示)。 如果您在验证集上评估此方法,您会发现 MSE 约为 0.029。 这比之前的模型要高得多,但也是一项艰巨的任务,因此比较没有多大意义。 将此性能与朴素预测(仅预测时间序列将在 10 个时间步长内保持不变)或与简单的线性模型进行比较更有意义。 天真的方法很糟糕(它给出的 MSE 约为 0.223),但线性模型给出的 MSE 约为 0.0188:它比使用我们的 RNN 一步一步地预测未来要好得多,而且训练和训练速度也快得多。 跑。 不过,如果您只想提前预测几个时间步,在更复杂的任务上,这种方法可能会很有效。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnewx8zqaj61fc0qg41p02.jpg) 第二种选择是训练 RNN 一次预测所有 10 个下一个值。 我们仍然可以使用序列到向量模型,但它会输出 10 个值而不是 1。但是,我们首先需要将目标更改为包含接下来 10 个值的向量:series = generate_time_series(10000, n_steps + 10) X_train, Y_train = series[:7000, :n_steps], series[:7000, -10:, 0] X_valid, Y_valid = series[7000:9000, :n_steps], series[7000:9000, -10:, 0] X_test, Y_test = series[9000:, :n_steps], series[9000:, -10:, 0]time: 99.7 ms (started: 2021-08-20 18:11:48 +08:00)现在我们只需要输出层有 10 个单元而不是 1 个:model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]), keras.layers.SimpleRNN(20), keras.layers.Dense(10) ])time: 158 ms (started: 2021-08-20 18:13:24 +08:00)训练此模型后,您可以非常轻松地一次预测接下来的 10 个值:Y_pred = model.predict(X_new)该模型运行良好:接下来 10 个时间步的 MSE 约为 0.008。 这比线性模型好得多。 但是我们仍然可以做得更好:实际上,我们可以训练模型在每个时间步预测接下来的 10 个值,而不是仅在最后一个时间步训练模型来预测接下来的 10 个值。 换句话说,我们可以把这个序列到向量的 RNN 变成一个序列到序列的 RNN。 这种技术的优点是损失将包含每个时间步的 RNN 输出项,而不仅仅是最后一个时间步的输出。 这意味着将有更多的误差梯度流经模型,而且它们不必只流经时间; 它们也将从每个时间步的输出中流出。 这将稳定并加速训练。 需要明确的是,在时间步长 0 时,模型将输出包含时间步长 1 到 10 的预测的向量,然后在时间步长 1 时,模型将预测时间步长 2 到 11,依此类推。 所以每个目标必须是一个与输入序列长度相同的序列,每一步都包含一个10维的向量。 让我们准备这些目标序列:Y = np.empty((10000, n_steps, 10)) # each target is a sequence of 10D vectors for step_ahead in range(1, 10 + 1): Y[:, :, step_ahead - 1] = series[:, step_ahead:step_ahead + n_steps, 0] Y_train = Y[:7000] Y_valid = Y[7000:9000] Y_test = Y[9000:]> 目标将包含出现在输入中的值可能令人惊讶(X_train 和 Y_train 之间有很多重叠)。 这不是作弊吗? 幸运的是,一点也不:在每个时间步,模型只知道过去的时间步,所以它不能向前看。 据说这是一个因果模型。 要将模型变成序列到序列模型,我们必须在所有循环层(甚至最后一层)中设置 return_sequences=True,并且我们必须在每个时间步应用输出 Dense 层。 Keras 为此提供了一个 TimeDistributed 层:它包装任何层(例如,密集层)并在其输入序列的每个时间步长处应用它。它通过重塑输入来有效地做到这一点,以便每个时间步都被视为一个单独的实例(即,它将输入从 [批量大小、时间步长、输入维度] 重塑为 [批量大小 × 时间步长、输入维度];在这个例子中,输入维数为 20,因为前面的 SimpleRNN 层有 20 个单元),然后运行 ​​Dense 层,最后将输出重新整形回序列(即,将输出从 [batch size × time步骤,输出维度]到[批量大小,时间步长,输出维度];在这个例子中,输出维度的数量是10,因为密集层有10个单位)。这是更新后的模型:model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]), keras.layers.SimpleRNN(20, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(10)) ])Dense 层实际上支持序列作为输入(甚至更高维度的输入):它像 TimeDistributed(Dense(...)) 一样处理它们,这意味着它只应用于最后一个输入维度(独立于所有时间步)。 因此,我们可以只用 Dense(10) 替换最后一层。 然而,为了清楚起见,我们将继续使用 TimeDistributed(Dense(10)),因为它清楚地表明 Dense 层在每个时间步独立应用,并且模型将输出一个序列,而不仅仅是单个向量。 训练期间需要所有输出,但只有最后一个时间步的输出对预测和评估有用。 因此,尽管我们将依赖于所有输出的 MSE 进行训练,但我们将使用自定义指标进行评估,只计算最后一个时间步输出的 MSE:def last_time_step_mse(Y_true, Y_pred): return keras.metrics.mean_squared_error(Y_true[:, -1], Y_pred[:, -1]) optimizer = keras.optimizers.Adam(lr=0.01) model.compile(loss="mse", optimizer=optimizer, metrics=[last_time_step_mse])time: 70.3 ms (started: 2021-08-20 19:02:28 +08:00)我们得到了大约 0.006 的验证 MSE,比之前的模型好 25%。 您可以将此方法与第一种方法结合使用:只需使用此 RNN 预测接下来的 10 个值,然后将这些值连接到输入时间序列并再次使用模型预测接下来的 10 个值,并根据需要重复该过程多次 . 使用这种方法,您可以生成任意长的序列。 对于长期预测,它可能不是很准确,但如果您的目标是生成原创音乐或文本,这可能很好,我们将在第 16 章中看到。 在预测时间序列时,在您的预测中包含一些误差线通常很有用。 为此,一种有效的技术是 MC Dropout,在第 11 章介绍:在每个存储单元中添加一个 MC Dropout 层,丢弃部分输入和隐藏状态。 训练后,要预测新的时间序列,请多次使用该模型并计算每个时间步预测的均值和标准差。 简单的 RNN 可以很好地预测时间序列或处理其他类型的序列,但它们在长时间序列或序列上表现不佳。 让我们讨论原因,看看我们能做些什么。 处理长序列 要在长序列上训练 RNN,我们必须在许多时间步上运行它,使展开的 RNN 成为一个非常深的网络。 就像任何深度神经网络一样,它可能会遇到梯度不稳定问题,这在第 11 章中讨论过:训练可能需要很长时间,或者训练可能不稳定。 此外,当 RNN 处理一个长序列时,它会逐渐忘记序列中的第一个输入。 让我们看看这两个问题,从不稳定梯度问题开始。 解决不稳定梯度问题 我们在深度网络中用于缓解不稳定梯度问题的许多技巧也可以用于 RNN:良好的参数初始化、更快的优化器、dropout 等。然而,非饱和激活函数(例如 ReLU)在这里可能没有多大帮助;事实上,它们实际上可能导致 RNN 在训练过程中更加不稳定。为什么?好吧,假设梯度下降以一种在第一时间步长略微增加输出的方式更新权重。因为在每个时间步都使用相同的权重,所以第二个时间步的输出也可能会略微增加,第三步的输出,依此类推,直到输出爆炸——非饱和激活函数并不能阻止这一点。您可以通过使用较小的学习率来降低这种风险,但您也可以简单地使用像双曲正切这样的饱和激活函数(这解释了为什么它是默认值)。以同样的方式,梯度本身可以爆炸。如果您注意到训练不稳定,您可能需要监控梯度的大小(例如,使用 TensorBoard)并可能使用 Gradient Clipping。 此外,RNN 不能像深度前馈网络那样有效地使用批归一化。事实上,你不能在时间步之间使用它,只能在循环层之间使用。更准确地说,在技术上可以将 BN 层添加到存储单元(我们将很快看到),以便在每个时间步(在该时间步的输入和隐藏状态)上应用它上一步)。但是,无论输入和隐藏状态的实际规模和偏移如何,每个时间步都将使用相同的 BN 层,具有相同的参数。在实践中,这不会产生好的结果,正如 César Laurent 等人所证明的那样。在 2015 年的一篇论文中:作者发现 BN 仅在应用于输入时略微有益,而不是应用于隐藏状态。换句话说,当应用在循环层之间(即图 15-7 中的垂直),而不是循环层内(即水平)时,它总比没有好。在 Keras 中,这可以通过在每个循环层之前添加一个 Batch Normalization 层来完成,但不要期望太多。 另一种形式的归一化通常更适合 RNN:层归一化。 这个想法是由 等人提出的。 在 2016 年的一篇论文中:它与 Batch Normalization 非常相似,但它不是跨批处理维度进行归一化,而是跨特征维度进行归一化。 一个优点是它可以在每个时间步独立地为每个实例动态计算所需的统计数据。 这也意味着它在训练和测试期间的行为方式相同(与 BN 不同),并且不需要使用指数移动平均值来估计训练集中所有实例的特征统计数据。 与 BN 一样,Layer Normalization 为每个输入学习一个尺度和一个偏移参数。 在 RNN 中,它通常在输入和隐藏状态的线性组合之后立即使用。 让我们使用 tf.keras 在一个简单的内存单元中实现层归一化。 为此,我们需要定义一个自定义内存单元。 它就像一个普通层,除了它的 call() 方法有两个参数:当前时间步的输入和前一时间步的隐藏状态。 请注意, states 参数是一个包含一个或多个张量的列表。 在一个简单的 RNN 单元的情况下,它包含一个等于前一个时间步的输出的张量,但其他单元可能有多个状态张量(例如,LSTMCell 有一个长期状态和一个短期状态,因为我们 很快就会看到)。 单元格还必须具有 state_size 属性和 output_size 属性。 在简单的 RNN 中,两者都简单地等于单元数。 以下代码实现了一个自定义内存单元,它的行为类似于 SimpleRNNCell,除了它还会在每个时间步应用层归一化:class LNSimpleRNNCell(keras.layers.Layer): def __init__(self, units, activation="tanh", **kwargs): super().__init__(**kwargs) self.state_size = units self.output_size = units self.simple_rnn_cell = keras.layers.SimpleRNNCell(units, activation=None) self.layer_norm = keras.layers.LayerNormalization() self.activation = keras.activations.get(activation) def call(self, inputs, states): outputs, new_states = self.simple_rnn_cell(inputs, states) norm_outputs = self.activation(self.layer_norm(outputs)) return norm_outputs, [norm_outputs]time: 1.97 ms (started: 2021-08-20 19:10:55 +08:00)代码非常简单。我们的 LNSimpleRNNCell 类继承自 keras.layers.Layer 类,就像任何自定义层一样。构造函数获取单元数和所需的激活函数,并设置 state_size 和 output_size 属性,然后创建一个没有激活函数的 SimpleRNNCell(因为我们希望在线性操作之后但在激活函数之前执行层归一化)。然后构造函数创建 LayerNormalization 层,最后它获取所需的激活函数。 call() 方法首先应用简单的 RNN 单元,它计算当前输入和先前隐藏状态的线性组合,并返回两次结果(实际上,在 SimpleRNNCell 中,输出刚好等于隐藏状态) :换句话说,new_states[0] 等于输出,因此我们可以安全地忽略 call() 方法其余部分中的 new_states。接下来, call() 方法应用层归一化,然后是激活函数。最后,它返回两次输出(一次作为输出,一次作为新的隐藏状态)。要使用这个自定义单元格,我们需要做的就是创建一个 keras.layers.RNN 层,并传递一个单元格实例:model = keras.models.Sequential([ keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True, input_shape=[None, 1]), keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(10)) ])同样,您可以创建一个自定义单元格以在每个时间步之间应用 dropout。 但是有一个更简单的方法:所有循环层(除了 keras.layers.RNN)和 Keras 提供的所有单元格都有一个 dropout 超参数和一个 recurrent_dropout 超参数:前者定义了应用于输入的 dropout 率(在每个时间步) ,后者定义了隐藏状态的丢失率(也在每个时间步)。 无需创建自定义单元来在 RNN 中的每个时间步应用 dropout。使用这些技术,您可以缓解不稳定梯度问题并更有效地训练 RNN。 现在让我们看看如何处理短期记忆问题。 解决短期记忆问题 由于数据在遍历 RNN 时经历的转换,每个时间步都会丢失一些信息。 一段时间后,RNN 的状态几乎不包含第一个输入的痕迹。 这可能是一个亮点。 想象一下小鱼多莉试图翻译一个长句; 当她读完它时,她不知道它是如何开始的。 为了解决这个问题,已经引入了具有长期记忆的各种类型的细胞。 事实证明,它们非常成功,以至于基本电池不再使用太多。 让我们首先看看这些长期记忆单元中最受欢迎的:LSTM 单元。 LSTM 单元 长短期记忆 (LSTM) 单元是由 于 1997 年提出的,并在多年来逐渐得到了几位研究人员的改进,例如 。 如果将 LSTM 单元视为黑盒,则它可以非常像基本单元那样使用,只是性能会好得多; 训练会更快收敛,并且会检测数据中的长期依赖关系。 在 Keras 中,您可以简单地使用 LSTM 层而不是 SimpleRNN 层:model = keras.models.Sequential([ keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]), keras.layers.LSTM(20, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(10)) ])time: 440 ms (started: 2021-08-20 19:24:35 +08:00)或者,您可以使用通用的 keras.layers.RNN 层,将 LSTMCell 作为参数:model = keras.models.Sequential([ keras.layers.RNN(keras.layers.LSTMCell(20), return_sequences=True, input_shape=[None, 1]), keras.layers.RNN(keras.layers.LSTMCell(20), return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(10)) ])time: 183 ms (started: 2021-08-20 19:25:10 +08:00)但是,LSTM 层在 GPU 上运行时使用了优化的实现(参见第 19 章),因此通常最好使用它(RNN 层在定义自定义单元时最有用,就像我们之前所做的那样)。 那么 LSTM 单元是如何工作的呢? 其架构如图15-9所示。 如果你不看盒子里面的东西,LSTM 单元看起来就像一个普通的单元,除了它的状态被分成两个向量:h(t) 和 c(t)(“c”代表“单元” )。 您可以将 h(t) 视为短期状态,将 c(t) 视为长期状态。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnh8xmw9oj61360pm0va02.jpg) 现在让我们打开盒子! 关键思想是网络可以学习在长期状态下存储什么、丢弃什么以及从中读取什么。 当长期状态 c(t-1) 从左到右遍历网络时,可以看到它首先通过一个遗忘门,丢弃一些记忆,然后通过加法运算添加一些新的记忆(增加 由输入门选择的记忆)。 结果 c(t) 直接发送出去,没有任何进一步的变换。 因此,在每个时间步长中,都会删除一些记忆并添加一些记忆。 而且,在加法运算之后,长期状态被复制并通过tanh函数,然后结果被输出门过滤。 这会产生短期状态 h(t)(它等于该时间步长中单元格的输出 y(t))。 现在让我们看看新记忆从何而来以及门是如何工作的。 首先,当前输入向量 x(t) 和之前的短期状态 h(t-1) 被馈送到四个不同的全连接层。 它们都有不同的用途:* 主层是输出 g(t) 的层。 它通常具有分析当前输入 x(t) 和先前(短期)状态 h(t-1) 的作用。 在基本单元格中,除了这一层之外别无其他,其输出直接输出到 y(t) 和 h(t) 。 相比之下,在 LSTM 单元中,该层的输出不会直接输出,而是将其最重要的部分存储在长期状态中(其余部分被丢弃)。* 其他三层是门控制器。 由于它们使用逻辑激活函数,因此它们的输出范围从 0 到 1。如您所见,它们的输出被馈送到逐元素乘法运算,因此如果它们输出 0,则关闭门,如果输出 1,则打开门 . 具体来说: - 遗忘门(由 f(t) 控制)控制应擦除长期状态的哪些部分。 - 输入门(由 i(t) 控制)控制应将 g(t) 的哪些部分添加到长期状态。 - 最后,输出门(由 o(t) 控制)控制应该在这个时间步读取和输出长期状态的哪些部分,包括 h(t) 和 y(t)。 简而言之,LSTM 单元可以学习识别重要输入(这就是输入门的作用),将其存储在长期状态中,并在需要时保留它(这就是遗忘门的作用) ,并在需要时提取它。 这就解释了为什么这些细胞在捕捉时间序列、长文本、录音等的长期模式方面取得了惊人的成功。公式 15-3 总结了如何为单个实例计算单元格的长期状态、短期状态和输出(整个小批量的公式非常相似)。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnhdytpoyj61ge0l6wgr02.jpg) 在这个等式中:* $\mathbf{W}_{xi}、\mathbf{W}_{xf}、\mathbf{W}_{xo}、\mathbf{W}_{xg}$ 是四个层中每一层的权重矩阵,用于连接到输入向量 $x(t)$。* $\mathbf{W}_{hi}、\mathbf{W}_{hf}、\mathbf{W}_{ho}$ 和 $\mathbf{W}_{hg}$ 是四个层中每一层的权重矩阵,用于连接到先前的短期状态 $h(t-1)$。* $b_{i} 、 b_{f} 、 b_{o}$ 和 $b_{g}$ 是四层中每一层的偏置项。 请注意,TensorFlow 将 $b_f$ 初始化为一个全为 1 而不是 0 的向量。 这可以防止在训练开始时忘记一切。 窥孔连接 在常规 LSTM 单元中,门控制器只能查看输入 x(t) 和之前的短期状态 h(t–1)。 通过让他们同时查看长期状态,为他们提供更多背景信息可能是个好主意。 这个想法是由 在 2000 年提出的。 他们提出了一种带有额外连接的 LSTM 变体,称为窥视孔连接:先前的长期状态 c(t-1) 作为输入添加到遗忘门的控制器和 输入门,当前的长期状态 c(t) 作为输入添加到输出门的控制器。 这通常会提高性能,但并非总是如此,并且没有明确的模式来说明哪些任务在有或没有它们的情况下更好:你必须在你的任务上尝试它,看看它是否有帮助。 在 Keras 中,LSTM 层基于 keras.layers.LSTMCell 单元,不支持窥视孔。 然而,实验性 tf.keras.experimental.PeepholeLSTMCell 确实如此,因此您可以创建一个 keras.layers.RNN 层并将 PeepholeLSTM Cell 传递给其构造函数。LSTM 单元还有许多其他变体。 一种特别流行的变体是 GRU 单元,我们现在将介绍它。 GRU单元 门控循环单元 (GRU) 单元(见图 15-10)由 等人提出。 在 2014 年的一篇论文中也介绍了我们之前讨论过的编码器-解码器网络。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnhlo50d5j61260qumyz02.jpg) GRU 单元是 LSTM 单元的简化版本,它的性能似乎也一样(这解释了它日益流行的原因)。 这些是主要的简化:* 两个状态向量合并为一个向量h(t)。* 单个门控制器 z(t) 控制遗忘门和输入门。 如果门控制器输出 1,则遗忘门打开 (= 1),输入门关闭 (1 – 1 = 0)。 如果它输出 0,则会发生相反的情况。 换句话说,无论何时必须存储内存,都会首先擦除将要存储的位置。 这实际上是 LSTM 单元本身的常见变体。* 没有输出门; 在每个时间步输出完整的状态向量。 然而,有一个新的门控制器 r(t) 控制前一状态的哪一部分将显示给主层 (g(t) )。 公式 15-4 总结了如何为单个实例计算每个时间步长的单元格状态。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtni0gar6gj61gi0emjt102.jpg) Keras 提供了 keras.layers.GRU 层(基于 keras.layers.GRUCell 存储单元); 使用它只是用 GRU 替换 SimpleRNN 或 LSTM 的问题。LSTM 和 GRU 单元是 RNN 成功背后的主要原因之一。 然而,尽管它们可以处理比简单 RNN 长得多的序列,但它们的短期记忆仍然相当有限,而且它们很难在 100 个时间步长或更多的序列中学习长期模式,例如音频样本、长时间 系列,或长句。 解决这个问题的一种方法是缩短输入序列,例如使用一维卷积层。 使用一维卷积层处理序列 在第 14 章中,我们看到 2D 卷积层通过在图像上滑动几个相当小的内核(或过滤器)来工作,产生多个 2D 特征图(每个内核一个)。类似地,一维卷积层在一个序列上滑动多个内核,为每个内核生成一个一维特征图。每个内核将学习检测单个非常短的序列模式(不超过内核大小)。如果使用 10 个内核,则该层的输出将由 10 个一维序列(所有长度相同)组成,或者等效地,您可以将此输出视为单个 10 维序列。这意味着您可以构建一个由循环层和一维卷积层(甚至一维池化层)混合组成的神经网络。如果使用步长为 1 且填充“相同”的一维卷积层,则输出序列的长度将与输入序列相同。但是,如果您使用“有效”填充或步长大于 1,则输出序列将比输入序列短,因此请确保相应地调整目标。例如,下面的模型与之前的模型相同,只是它从一个 1D 卷积层开始,将输入序列下采样因子 2,使用步长 2。内核大小大于步长,因此所有输入都将用于计算层的输出,因此模型可以学习保留有用的信息,只删除不重要的细节。通过缩短序列,卷积层可以帮助 GRU 层检测更长的模式。请注意,我们还必须裁剪目标中的前三个时间步长(由于内核的大小为 4,卷积层的第一个输出将基于输入时间步长 0 到 3),并按一个因子对目标进行下采样共 2 个:model = keras.models.Sequential([ keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding="valid", input_shape=[None, 1]), keras.layers.GRU(20, return_sequences=True), keras.layers.GRU(20, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(10)) ]) model.compile(loss="mse", optimizer="adam", metrics=[last_time_step_mse]) history = model.fit(X_train, Y_train[:, 3::2], epochs=20, validation_data=(X_valid, Y_valid[:, 3::2]))如果你训练和评估这个模型,你会发现它是迄今为止最好的模型。 卷积层确实有帮助。 事实上,实际上可以只使用一维卷积层并完全丢弃循环层! WaveNet 在 2016 年的一篇论文中, 和其他 DeepMind 研究人员介绍了一种名为 WaveNet 的架构。 他们堆叠了 1D 卷积层,在每一层将扩张率(每个神经元输入的分散程度)加倍:第一个卷积层一次只看到两个时间步长,而下一个看到四个时间步长(它的接受 字段是四个时间步长),下一个看到八个时间步长,依此类推(见图 15-11)。 这样,较低层学习短期模式,而较高层学习长期模式。 由于膨胀率加倍,网络可以非常有效地处理极大的序列。 ![](https://tva1.sinaimg.cn/large/008i3skNgy1gtnidaakonj61420hwq7d02.jpg) 1, 2, 4, 8, ..., 256, 512,然后他们堆叠另一组 10 个相同的层(同样具有膨胀率 1, 2, 4, 8, ..., 256, 512),然后又是另一个相同的层 10层一组。 他们通过指出具有这些膨胀率的 10 个卷积层的单个堆栈将充当内核大小为 1,024 的超高效卷积层(除了速度更快、功能更强大且使用明显更少的参数之外)来证明这种架构的合理性,其中 这就是他们堆叠 3 个这样的块的原因。 他们还在每一层之前用等于扩张率的多个零填充输入序列,以在整个网络中保持相同的序列长度。 以下是如何实现一个简化的 WaveNet 来处理与之前相同的序列:model = keras.models.Sequential() model.add(keras.layers.InputLayer(input_shape=[None, 1])) for rate in (1, 2, 4, 8) * 2: model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding="causal", activation="relu", dilation_rate=rate)) model.add(keras.layers.Conv1D(filters=10, kernel_size=1)) model.compile(loss="mse", optimizer="adam", metrics=[last_time_step_mse]) history = model.fit(X_train, Y_train, epochs=20, validation_data=(X_valid, Y_valid))prime numbers between 1 to 200for num in range(1,201): if num > 1: for i in range(2,num): if(num % i) == 0: break else: print(num) else: print("DONE!!!!")2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71 73 79 83 89 97 101 103 107 109 113 127 131 137 139 149 151 157 163 167 173 179 181 191 193 197 199 DONE!!!!Reflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # create engine to hawaii.sqlite engine = create_engine('sqlite:///hawaii.sqlite') # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # View all of the classes that automap found Base.classes.keys() # Save references to each table station = Base.classes.station measurement = Base.classes.measurement # Create our session (link) from Python to the DB session = Session(engine)Exploratory Precipitation Analysis# Finding column names of both tables station_first_row = session.query(station).first() station_first_row.__dict__ # Finding column names of both tables measurement_first_row = session.query(measurement).first() measurement_first_row.__dict__ # Find the most recent date in the data set. data = engine.execute("SELECT MAX(date) FROM measurement") for record in data: print(record) # Design a query to retrieve the last 12 months of precipitation data and plot the results # Starting from the most recent data point in the database. # Calculate the date one year from the last date in data set # Perform a query to retrieve the data and precipitation # Save the query results as a Pandas DataFrame and set the index to the date column data = pd.read_sql("SELECT date, prcp FROM measurement WHERE date >= DATE('2017-08-23', '-12 month')", engine) data # Sort the dataframe by date data['date'] = pd.to_datetime(data['date']) df_sort = data.sort_values('date') df_sort # Use Pandas Plotting with Matplotlib to plot the data df_sort.plot(x='date', y='prcp', kind = 'bar', xticks = []); # Use Pandas to calcualte the summary statistics for the precipitation data df_sort.describe()["prcp"]Exploratory Station Analysis# Design a query to calculate the total number stations in the dataset data_station = engine.execute("SELECT COUNT(DISTINCT station) FROM measurement") for record in data_station: print(record) # Design a query to find the most active stations (i.e. what stations have the most rows?) data_station = pd.read_sql("SELECT station, COUNT(*) AS count FROM measurement GROUP BY station", engine) # List the stations and the counts in descending order. data_station.sort_values('count', ascending = False) # Using the most active station id from the previous query, calculate the lowest, highest, and average temperature. pd.read_sql("SELECT MIN(tobs) AS Lowest, MAX(tobs) AS Highest, AVG(tobs) AS Average\ FROM measurement WHERE station = 'USC00519281'", engine) # Using the most active station id # Query the last 12 months of temperature observation data for this station data = pd.read_sql("SELECT * FROM measurement\ WHERE date >= DATE('2017-08-23', '-12 month')\ AND station = 'USC00519281'", engine) data['date'] = pd.to_datetime(data['date']) df_sort = data.sort_values('date') # And plot the results as a histogram df_sort['tobs'].plot(kind = 'hist', bins = 12);Close session# Close Session session.close()Describe how the equilibrium interest rate depends of the intertemporal discount factor,$\beta$, and intertemporal substitution/risk aversion, $\sigma$.import numpy as np def util_func(x): return (x**(1-sigma))/(1-sigma) def parameters_objects(): global beta, sigma,delta,alpha,pi,y_domain, a_domain, V, iG, n_y, n_a, w,r beta = 0.90 sigma = 2 delta = 0.08 alpha = 0.44 pi = np.array([[0.4 , 0.5 , 0.1], [0.3 , 0.2 , 0.5 ], [0.2 , 0.4 , 0.4]]) r = 0.05 #we will need some interest rate to maximize our objective function w = 1 #we will need the wage too # the income domain was not defined we will crete one to make our program works properly y_min = 1e-2 y_max = 1 n_y = len(pi) #must have one income for each state y_domain = np.linspace(y_min , y_max, n_y) # neither the assets domain was defined a_max = 10 n_a = 9 #three assets distributions for each income state # we need a non-Ponzi condition for this case barA = 0 # no borrow allowed a_domain = np.linspace(-barA, a_max, n_a) #Now we just neeed some place to store our value function and policy function V = np.zeros((n_y, n_a)) #value iG = np.zeros((n_y, n_a), dtype = np.int) def build_objective(V): global n_y, n_a, w, r F_OBJ = np.zeros((n_y, n_a, n_a)) #one dimension to each income, asset, and future asset #looping through all income, assets and future assets for i_y in range(n_y): y = y_domain[i_y] for i_a in range(n_a): a = a_domain[i_a] for i_a_line in range(n_a): #i_a_line stands for future assets aa = a_domain[i_a_line] c = w*y + a - aa/(1+r) #consumption if c <= 0: F_OBJ[i_y, i_a, i_a_line] = -np.inf else: F_OBJ[i_y, i_a, i_a_line] = util_func(c) + beta*(np.dot(pi[i_y, :],V[:, i_a_line])) return F_OBJ def maximize_TV_IG(F_OBJ): #maximizing for time t TV = np.zeros((n_y, n_a)) T_iG = np.zeros((n_y, n_a), dtype = np.int) for i_y, y in enumerate(y_domain): for i_a, a in enumerate(a_domain): TV[i_y, i_a] = np.max(F_OBJ[i_y, i_a, :]) # max value of f_obj T_iG[i_y, i_a] = np.argmax(F_OBJ[i_y, i_a, :]) # position associated to (y,a) pair that maximizes F_OBJ return TV, T_iG def compute_V_G_est(): global V norm, tol = 2, 1e-7 while norm>tol: F_OBJ = build_objective(V) TV, T_iG = maximize_TV_IG(F_OBJ) norm = np.max(abs(TV - V)) V = np.copy(TV) iG = np.copy(T_iG) return V, iG def compute_Q(iG): #This function gives a markov transition function, Q_{r} to compute, a stationary measure phi_{r} #associated associated to this transition function Q = np.zeros((n_y*n_a, n_y*n_a)) for i_y in range(n_y): for i_a in range(n_a): c_state = i_y*n_a + i_a for i_y_line in range(n_y): for i_a_line in range(n_a): n_state = i_y_line*n_a + i_a_line if iG[i_y, i_a] == i_a_line: Q[c_state, n_state] += pi[i_y, i_y_line] return Q def compute_phi(Q): global phi phi = np.ones(n_y*n_a) / (n_y*n_a) norm_Q, tol_Q = 1, 1e-6 while norm_Q > tol_Q: T_phi = np.dot(phi, Q) norm_Q = max(abs(T_phi - phi)) phi = np.copy(T_phi) return phi def compute_Ea(phi): Ea = 0 for i_y in range(n_y): for i_a in range(n_a): s_index = iG[i_y, i_a] savings = a_domain[s_index] t_index = i_y * n_a + i_a size = phi[t_index] Ea += savings*size return Ea def compute_L(): L = 0 for i_y in range(n_y): for i_a in range(n_a): labor_supply = y_domain[i_y] t_index = i_y*n_a + i_a size_l = phi[t_index] L += labor_supply*size_l return L def compute_k(r): k =(alpha/(r+delta))**(1/(1-alpha)) return k def compute_w(k): w = (1-alpha)*(k**alpha) return w def compute_d(phi): k = compute_k(r) L = compute_L() K = k*L ea = compute_Ea(phi) d = K - ea return d def compute_equilibrium(): global r, w, V, iG, Q, phi, L, k rho = beta**(-1)-1 r_1, r_2 = -delta, rho norm_r, tol_r = 1, 1e-10 while norm_r>tol_r: r = (r_1+r_2)/2 k = compute_k(r) V, iG = compute_V_G_est() Q = compute_Q(iG) phi = compute_phi(Q) d = compute_d(phi) if d>0: r_1 = r elif d<0: r_2 = r norm_r = abs(r_1-r_2) print('[d,r_L,r_H,norm]=[{:9.6f},{:9.6f},{:9.6f},{:9.6f}]'.format(d,r_1,r_2,norm_r)) parameters_objects() compute_equilibrium()O capital de equilíbrio para essa taxa de juros é 15.285508234945167 [d,r_L,r_H,norm]=[ 5.200731, 0.015556, 0.111111, 0.095556] O capital de equilíbrio para essa taxa de juros é 7.410221785681968 [d,r_L,r_H,norm]=[-0.648122, 0.015556, 0.063333, 0.047778] O capital de equilíbrio para essa taxa de juros é 10.261864804092587 [d,r_L,r_H,norm]=[ 1.838470, 0.039444, 0.063333, 0.023889] O capital de equilíbrio para essa taxa de juros é 8.655871080723847 [d,r_L,r_H,norm]=[ 0.430048, 0.051389, 0.063333, 0.011944] O capital de equilíbrio para essa taxa de juros é 7.995346467855122 [d,r_L,r_H,norm]=[ 0.079274, 0.057361, 0.063333, 0.005972] O capital de equilíbrio para essa taxa de juros é 7.694114939130894 [d,r_L,r_H,norm]=[-0.080695, 0.057361, 0.060347, 0.002986] O capital de equilíbrio para essa taxa de juros é 7.8424750020861955 [d,r_L,r_H,norm]=[-0.001908, 0.057361, 0.058854, 0.001493] O capital de equilíbrio para essa taxa de juros é 7.918335257752005 [d,r_L,r_H,norm]=[ 0.038377, 0.0[...]Exploring Netflix Data This notebook follows the [CRISP-DM](https://paginas.fe.up.pt/~ec/files_0405/slides/02%20CRISP.pdf) process to analyze the [Netflix Movies and TV Shows on Kaggle.](https://www.kaggle.com/shivamb/netflix-shows)This dataset consists of tv shows and movies available on Netflix as of 2019. The dataset is collected from Flixable which is a third-party Netflix search engine.In 2018, they released an interesting [report](https://flixable.com/netflix-museum/) which shows that the number of TV shows on Netflix has nearly tripled since 2010. The streaming service’s number of movies has decreased by more than 2,000 titles since 2010, while its number of TV shows has nearly tripled. It will be interesting to explore what all other insights can be obtained from the same dataset.Integrating this dataset with other external datasets such as IMDB ratings, rotten tomatoes can also provide many interesting findings. Business UnderstandingThese are the guiding questions that drive this analysis* In what month of the year, does the most new content arrives on netflix?* What is the average delay between release of a show and its arrival on netflix?* How does the different genres relate to the target audience belonging to different age groups?* Which Country has most content available on netflix? Data Understanding Setupimport matplotlib.pyplot as plt import pandas as pd import seaborn as sns from matplotlib.ticker import FuncFormatter from collections import Counter %matplotlib inline pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100)As the data is already collected for us by Flixable, and made accessible by Kaggle, we move onto next steps in the CRISP-DM cycle. Data Readingdf = pd.read_csv('netflix_titles.csv') df.head()Getting to know Data First we will have a look at the total number of rows and column in the datadf.shapeThe data types for every column can be found as:df.info() RangeIndex: 7787 entries, 0 to 7786 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 show_id 7787 non-null object 1 type 7787 non-null object 2 title 7787 non-null object 3 director 5398 non-null object 4 cast 7069 non-null object 5 country 7280 non-null object 6 date_added 7777 non-null object 7 release_year 7787 non-null int64 8 rating 7780 non-null object 9 duration 7787 non-null object 10 listed_in 7787 non-null object 11 description 7787 non-null object dtypes: int64(1), object(11) memory usage: 730.2+ KBBelow we can see the total missing values in each of the fields.df.isna().sum()Data Wrangling First we will convert the string column to datetime and separate out the month and year columns for our analysis.For this analysis, we will remove any rows with missing values for the *date_added* column.df.dropna(subset=['date_added'], inplace=True) df['date_added'] = pd.to_datetime(df['date_added']) df['added_year']= df['date_added'].dt.year df['added_month']= df['date_added'].dt.month_name().str.slice(stop=3) df.head()Finding Results In what month of the year, does the most new content arrives on netflix?hue_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def plot_month_wise_data(data, hue=None, hue_order=None): """ Plots Number of media items added per month to netflix :param data: dataframe :param hue: list :param hue_order: string :return: None """ plt.figure(figsize=(8 , 8)) ax = sns.countplot(data=data, x='added_month', order=hue_order, hue=hue) ax.set(xlabel='Months', ylabel='Number of items added', title='Media Added per Month on Netflix') plt.show() plot_month_wise_data(df, hue_order=hue_order)The above graph shows the arrival of content each month on netflix. For a better comparison, let's create another graph that displays data for movies and TV shows separately.plot_month_wise_data(df, hue_order=hue_order, hue='type')From the above graphs, it can be seen that most content gets uploaded to netlflix in December followed by October and then January. This can lead us to conclusion that Netflix adds new data near holiday season. What is the average delay between release of a show and its arrival on netflix. Let's start with finding the number of shows released each year and the numberof shows added per year in our data.release_year = df.release_year.value_counts() added_year = df.added_year.value_counts() def create_line_plot_between_added_released_year(rel_year, add_year): """ Plots a line graph between the added and released year data :param rel_year: series :param add_year: series :return: None """ plt.figure(figsize=(15 , 6)) ax = sns.lineplot(data=rel_year) ax.xaxis.set_major_formatter(FuncFormatter(lambda x, _: int(x))) sns.lineplot(data=add_year) plt.legend(['Release Year', 'Added Year']) plt.show() create_line_plot_between_added_released_year(release_year, added_year)As we can see from above graph, Netflix had media that got released in late 1940s, but this graph does not provides any meaningful insight due to the fact of the large difference between oldest data available and release on netflix. Therefore, we take into account only the media added or released during last 2 decades.create_line_plot_between_added_released_year(release_year[:20], added_year)Since there are outliers in the release data, mean might not be the most accurate measure to use here. We therefore plot the median of difference between movies released and added every year since 2000. In easy words, This graph will tell us the difference of how many years it took on average for a media released in that year to arrive on netflix.diff_med = {} for year in range(2000, 2021): rows = df[df['release_year'] == year] dif = rows['added_year'].median() - year diff_med[year] = dif diff_median = pd.Series(diff_med) diff_median plt.figure(figsize=(12 , 8)) ax = sns.lineplot(data=diff_median, marker='o') ax.xaxis.set_major_formatter(FuncFormatter(lambda x, _: int(x))) ax.set(xlabel='Year of Release', ylabel='Average difference in years', title='Median of difference between release and added date of media') plt.show()How does the different genres relate to the target audience belonging to different age groups?As we can see from the data, that a media might belong to multiple genres. For reference, we print the first 10 members of the `listed_in` parameter. And then we proceed on to finding all the unique genres.all_genres = df['listed_in'].unique() all_genres[:10] genres_set = set() for gen in all_genres: separate_genres = [word.strip() for word in gen.split(',')] genres_set.update(separate_genres) genres = list(genres_set) genresWe then try to find out the total number of shows belonging to each unique genre.all_genre_list = df['listed_in'].str.split(',').explode().str.strip().to_list() genre_counts = Counter(all_genre_list) genre_countsAs we can see, this data is unsorted and is difficult to understand in first glance, we sort the genres in the ascending order, and then plot a bar graph for visual representation.sorted_genres = dict(sorted(genre_counts.items(), key=lambda item: item[1], reverse=True)) sorted_genres plt.figure(figsize=(10 , 10)) plt.bar(sorted_genres.keys(), sorted_genres.values()) plt.xticks(rotation='vertical') plt.xlabel('Genre') plt.ylabel('Count of Shows') plt.title('Count of Shows per Genre') plt.show()Since there are total of 42 unique genres, We pick the top 5 most popular genres and plot the various genres the shows belong to.top_five = list(sorted_genres.keys())[:5] top_five df['rating'].value_counts() for top in top_five: plt.figure(figsize=(4 , 4)) rating_records = df[df['listed_in'].str.contains(top)] ax = sns.countplot(data=rating_records, x='rating') ax.set(xlabel='Rating', ylabel='Number of movies/shows', title=top) plt.xticks(rotation='vertical') plt.show()Which Country has most content available on netflix?For this comparison, we being by dropping records that do not have country data available. Furthermore, we also pick the first country where multiple countries are listed for a better graphical approach.Finally, we plot a pie chart of the top 20 countries with percent of data available. We are not including all countries as the graph becomes difficult to understand.df['country'].value_counts() df.dropna(subset=['country'], inplace=True) df['country'] = df.country.str.split(',').str[0] plt.figure(figsize=(15 , 8)) ax = sns.countplot(data=df, x='country') plt.xticks(rotation='vertical') plt.show()As evident from above, the United States is a clear winner. However, there are se many countries that it is very hard to read the graph. For a clearer representation, we take the top 20 countries and display pie chart with the precent of data for each country.df['country'].value_counts()[:20].plot.pie(autopct='%1.1f%%',figsize=(10,10));HetSim DemoWe will now demonstrate the use of HetSim for an example scenarios. * We will first change an existing target model* We will then write an application for it and run it through a) detailed simulation, and b) HetSim Install Dependencies* Install LLVM (version > 10.0) by following instructions from https://llvm.org/docs/GettingStarted.html* Install any cross-compilers required for the target * This example uses an Arm gcc that you can install by running `sudo apt install g++-arm-linux-gnueabihf` Build gem5We will build gem5 by running a convenience script inside `scripts/`.%cd scripts !VERBOSE=0 CC=/usr/bin/gcc CXX=/usr/bin/g++ bash build-gem5.sh %cd ../home/subh/research/hetsim-rel/scripts [0]: Starting gem5 build for TimingSimpleCPU [1]: gem5 build succeeded [2]: Compiling m5threads library Makefile:50: warning: overriding recipe for target 'test_omp' Makefile:42: warning: ignoring old recipe for target 'test_omp' make: '../pthread.o' is up to date. [3]: build-gem5.sh successfully exiting /home/subh/research/hetsim-relConstruct a gem5 Model for the TargetConsider a programmable target composed of two types of PEs - **worker** and **manager**. * All PEs share a D-Cache, a DSPM, and the main memory* Each PE has a private instruction cache* The manager distributes work to the workers via _FIFO queues_ Tweak the Existing ModelWe will make two changes to the existing model.* Change number of workers from 4 &8594; 16* Change the depth of each work queue from 4 &8594; 6 Step 1: Change Macros and Python Bindings%cd example/model # change the relevant define in params.h !sed -i 's/#define NUM_WORKER.*/#define NUM_WORKER 8/g' params.h !sed -i 's/#define WQ_DEPTH.*/#define WQ_DEPTH 6/g' params.h # generate Python bindings !make %cd ../../home/subh/research/hetsim-rel/example/model swig -python -module params params.h gcc -c -fpic params_wrap.c -I/usr/include/python2.7 -o params_wrap.o gcc -shared params_wrap.o -o _params.so /home/subh/research/hetsim-relStep 2: Reflect Changes in User Spec and Libraries * Assign PE IDs to the new worker PEs and queue IDs for the queues corresponding the new workers# this step should be done manually! !sed -i 's/\[1, 2, 3, 4\]/[1, 2, 3, 4, 5, 6, 7, 8]/g' spec/spec.json* Generate code to connect the queues in the `gem5` model and the emulation and TRE libraries%cd scripts !python2 populate_init_queues.py !VERBOSE=0 bash build-gem5.sh %cd ../home/subh/research/hetsim-rel/scripts [0]: Starting gem5 build for TimingSimpleCPU [1]: gem5 build succeeded [2]: Compiling m5threads library Makefile:50: warning: overriding recipe for target 'test_omp' Makefile:42: warning: ignoring old recipe for target 'test_omp' make: '../pthread.o' is up to date. [3]: build-gem5.sh successfully exiting /home/subh/research/hetsim-relStep 3: Regenerate Compiler PluginFinally, we regenerate the compiler plugin and build the tracing library.%cd scripts !python generate_model.py ../spec/spec.json > /dev/null %cd ../tracer %mkdir -p build %cd build !cmake .. > /dev/null && make %cd ../../home/subh/research/hetsim-rel/scripts /home/subh/research/hetsim-rel/tracer /home/subh/research/hetsim-rel/tracer/build /usr/bin/ar: creating t.a Scanning dependencies of target LLVMHetsim [ 20%] Building CXX object compiler-pass/CMakeFiles/LLVMHetsim.dir/hetsim-analysis.cpp.o [ 40%] Building CXX object compiler-pass/CMakeFiles/LLVMHetsim.dir/hetsim-codegen.cpp.o [ 60%] Linking CXX shared module LLVMHetsim.so [ 60%] Built target LLVMHetsim Scanning dependencies of target hetsim_default_rt [ 80%] Building CXX object runtime/default/CMakeFiles/hetsim_default_rt.dir/hetsim_default_rt.cpp.o [100%] Linking CXX shared library libhetsim_default_rt.so [100%] Built target hetsim_default_rt /home/subh/research/hetsim-relWrite Application for the TargetWe will write a program to do **vector addition** on the target hardware. Header Files```Cppinclude "params.h" // import parameters of target hardware as macrosinclude "util.h" // import primitive definitionsinclude include ``` Boilerplate Initialization```Cppvoid *work(void *arg) { // manager "spawns" worker threads with tid=1,2,3... unsigned tid = *(unsigned *)(arg); __register_core_id(tid); ...}int main() { __init_queues(WQ_DEPTH); __register_core_id(0); // manager is assigned core-id 0 ... __teardown_queues(); return 0;}``` NoteThe `core_id` must be the same across the application, `spec.json` and `target.py`.# spec.json !grep -A1 -B1 "id" spec/spec.json # target.py !grep -A1 -B1 "id=" example/model/target.pysystem.mgr = TRE( id=0, # This must correspond to the ID assigned in the user spec file. queue_depth=WQ_DEPTH, -- wrkr.append(TRE( id=i+1, # This must correspond to the ID assigned in the user spec file. max_outstanding_addrs=MAX_OUTSTANDING_REQSManager PE code: `main()`Memory allocation is done at the beginning part of `main()`.```Cpp // main memory allocation // in this example, we are working with 3 float arrays each of size N size_t RAM_SIZE_BYTES = 3 * N * sizeof(float); char *ram = (char *)mmap((void *)(RAM_BASE_ADDR), RAM_SIZE_BYTES, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, 0, 0);``` ```Cpp // scratchpad memory allocationifdef EMULATION // for emulation char *dspm = (char *)mmap((void *)(SPM_BASE_ADDR), SPM_SIZE_BYTES, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, 0, 0);else // !EMULATION // the model uses physically-addressed scratchpad that does not need explicit allocation char *dspm = (char *)SPM_BASE_ADDR;endif // EMULATION``` We "allocate" the input and output vectors at the pre-allocated main memory and initialize them.```Cpp // allocate the vectors and populate them float *a = (float *)(ram); float *b = (float *)(ram + N * sizeof(float)); float *c = (float *)(ram + 2 * N * sizeof(float)); for (int i = 0; i < N; ++i) { a[i] = float(i + 1); b[i] = float(i + 1); c[i] = 0.0; }``` For illustration, we allocate a barrier in the shared DSPM.```Cpp // allocate barrier object for synchronization pthread_barrier_t *bar = (pthread_barrier_t *)(dspm); // initialize barrier with participants = 1 manager + NUM_WORKER workers __barrier_init(bar, NUM_WORKER + 1); ``` Next, we allocate and spawn the worker threads.```Cpp // allocate thread objects for each "worker" PE pthread_t *workers = new pthread_t[NUM_WORKER];``` ```Cpp // create vector of core IDs to send to each thread unsigned *tids = new unsigned[NUM_WORKER]; for (int i = 0; i < NUM_WORKER; ++i) { tids[i] = i + 1; // spawn worker thread pthread_create(workers + i, NULL, work, &tids[i]); }``` The most important part of the code for the manager -- distribute and push work to the workers!```Cpp // partition the work and push work "packets" for (int i = 0; i < NUM_WORKER; ++i) { // each worker is assigned floor(N / NUM_WORKER) elements int n = N / NUM_WORKER; int start_idx = i * n; int end_idx = (i + 1) * n - 1; // handle trailing elements by assigning to final worker if (i == NUM_WORKER - 1) { end_idx = N - 1; }``` ```Cpp // push through work queues __push(i + 1, (uintptr_t)(a)); __push(i + 1, (uintptr_t)(b)); __push(i + 1, (uintptr_t)(c)); __push(i + 1, (unsigned)(start_idx)); __push(i + 1, (unsigned)(end_idx)); __push(i + 1, (uintptr_t)(bar)); }``` ```Cpp// ----- ROI begin ----- __reset_stats(); // begin recording time here for (int i = 0; i < NUM_WORKER; ++i) { __push(i + 1, 0); // start signal, value is ignored } __barrier_wait(bar); // synchronize with worker threads __dump_reset_stats(); // end recording time here// ----- ROI end -----``` ```Cpp // join with all threads for (int tid = 0; tid < NUM_WORKER; ++tid) { pthread_join(workers[tid], NULL); }``` ```Cpp // clean upifdef EMULATION munmap(dspm, SPM_SIZE_BYTES);endif // EMULATION munmap(ram, RAM_SIZE_BYTES); delete[] workers; delete[] tids; __teardown_queues();} // end of main()``` Worker PE Code: `work()` ```Cppvoid *work(void *arg) { unsigned tid = *(unsigned *)(arg); __register_core_id(tid); // retrieve variables from work queue volatile float *a = (volatile float *)__pop(0); volatile float *b = (volatile float *)__pop(0); volatile float *c = (volatile float *)__pop(0); int start_idx = (int)__pop(0); int end_idx = (int)__pop(0); pthread_barrier_t *bar = (pthread_barrier_t *)__pop(0);``` ```Cpp // receive start signal __pop(0); // perform actual computation for (int i = start_idx; i <= end_idx; ++i) { c[i] += a[i] + b[i]; } // synchronize with manager __barrier_wait(bar); return NULL;} // end of work()``` Verify Functionality of Emulated Code# build emulator library %cd emu !mkdir -p build %cd build !cmake .. > /dev/null && make %cd ../.. # build application with emulation library %cd example/app %rm -rf build %mkdir -p build %cd build !CC=/usr/bin/gcc CXX=/usr/bin/g++ MODE=EMU cmake .. > /dev/null && make %cd ../../.. # run emulated application for functional verification %cd example/app/build !./vector_add %cd ../../../home/subh/research/hetsim-rel/example/app/build == Vector Add Test with N = 100000, NUM_WORKER = 8 == Test Passed == /home/subh/research/hetsim-relSimulation on Detailed Model%set_env CMAKE_C_COMPILER=/usr/bin/arm-linux-gnueabihf-gcc %set_env CMAKE_CXX_COMPILER=/usr/bin/arm-linux-gnueabihf-g++ # build application with emulation library %cd example/app %rm -rf build %mkdir -p build %cd build !MODE=SIM cmake .. > /dev/null && make %cd ../../../ # run gem5 simulation %cd scripts !MODE=SIM APP=vector_add bash run-gem5.sh > /dev/null !echo "..." && tail -n20 ../gem5/m5out/run.log %cd ../ ### Gather Runtime %mkdir -p res %cp gem5/m5out/stats.txt res/stats.det.txt !grep sim_ticks res/stats.det.txt | head -n1 | tr -s ' ' | cut -d' ' -f2 > ticks.det.txt %set_env CMAKE_C_COMPILER=/usr/bin/gcc %set_env CMAKE_CXX_COMPILER=/usr/bin/g++env: CMAKE_C_COMPILER=/usr/bin/gcc env: CMAKE_CXX_COMPILER=/usr/bin/g++Run Trace GenerationWe will first make the required changes to enable tracing in the CPP program that we wrote and then run the trace generation step. ```Cppif defined(AUTO_TRACING) || defined(MANUAL_TRACING)include "hetsim_default_rt.h"endifvoid *work(void *arg) { unsigned tid = *(unsigned *)(arg); __register_core_id(tid);if defined(AUTO_TRACING) || defined(MANUAL_TRACING) __open_trace_log(tid);endif // AUTO_TRACING || MANUAL_TRACING // ROI begin ... // ROI endif defined(AUTO_TRACING) || defined(MANUAL_TRACING) __close_trace_log(tid);endif // AUTO_TRACING || MANUAL_TRACING}``` ```Cppint main() { ...if defined(AUTO_TRACING) || defined(MANUAL_TRACING) __open_trace_log(0); // use core-id as argumentendif ... __barrier_init(bar, NUM_WORKER + 1); // set number of participants to 1 manager + NUM_WORKER worker PEs ... __dump_reset_stats(); // end recording time here // ----- ROI end -----if defined(AUTO_TRACING) || defined(MANUAL_TRACING) __close_trace_log(0);endif // AUTO_TRACING || MANUAL_TRACING ...} // end of main()```# run trace generation %cd example/app/build !mkdir -p traces %rm -f CMakeCache.txt !MODE=EMU_AUTO_TRACE cmake .. > /dev/null && make !./vector_add ### Trace Format !head -n10 traces/pe_[01].trace %cd ../../../==> traces/pe_0.trace <== BARINIT 0xe0101000 9 10 ST @11 0x1dfdf60 ( ) ST @12 0x1dfdf64 ( ) ST @13 0x1dfdf68 ( ) ST @14 0x1dfdf6c ( ) ST @15 0x1dfdf70 ( ) ST @16 0x1dfdf74 ( ) ST @17 0x1dfdf78 ( ) ST @18 0x1dfdf7c ( ) PUSH 1 1 ==> traces/pe_1.trace <== POP 0 1 POP 0 1 POP 0 1 POP 0 1 POP 0 1 POP 0 1 POP 0 1 STALL 3 ( ) LD @1 0x40000000 ( ) LD @2 0x40061a80 ( 0x40000000 ) /home/subh/research/hetsim-relRun Trace ReplayWe will now run the generated traces through the TRE-enabled `gem5` model.%cd scripts !MODE=EMU_TRACE APP=vector_add bash run-gem5.sh > /dev/null !echo "..." && tail -n20 ../gem5/m5out/run.log %cd ../home/subh/research/hetsim-rel/scripts ... TRE[6]: halted @694256563 after completing 100005 trace entries Number of TREs IDLE now: 1/9 TRE[8]: halted @694258565 after completing 100005 trace entries Number of TREs IDLE now: 2/9 TRE[1]: halted @694260567 after completing 100005 trace entries Number of TREs IDLE now: 3/9 TRE[3]: halted @694262569 after completing 100005 trace entries Number of TREs IDLE now: 4/9 TRE[2]: halted @694264571 after completing 100005 trace entries Number of TREs IDLE now: 5/9 TRE[7]: halted @694266573 after completing 100005 trace entries Number of TREs IDLE now: 6/9 TRE[5]: halted @694268575 after completing 100005 trace entries Number of TREs IDLE now: 7/9 TRE[4]: halted @694270577 after completing 100005 trace entries Number of TREs IDLE now: 8/9 TRE[0]: triggered DMPRST TRE[0]: halted @694273580 after completing 0 trace entries Number of TREs IDLE now: 9/9 Exiting @ tick 694273580 because all TREs are done /home/subh/research/hetsim-relComparison between Detailed and HetSim RunsAs the final step, we will compare the runtime between the detailed `gem5` run and the TRE-enabled `gem5` run.%cp gem5/m5out/stats.txt res/stats.tre.txt !grep sim_ticks res/stats.tre.txt | head -n1 | tr -s ' ' | cut -d' ' -f2 > ticks.tre.txt !cat ticks.det.txt !cat ticks.tre.txt752808056 694044351import pandas as pd import numpy as np import os import re import urllib.request as urllib import networkx as nx from io import BytesIO from zipfile import ZipFile url = 'http://www-personal.umich.edu/~mejn/netdata/dolphins.zip' #dolphin connectivity within a social group with urllib.urlopen(url) as stream: with ZipFile(BytesIO(stream.read())) as archive: archive.printdir() #looked at what files were in the zipfile with urllib.urlopen(url) as stream: with ZipFile(BytesIO(stream.read())) as archive: #extracted files as strings txt = archive.read('dolphins.txt').decode() gml = archive.read('dolphins.gml').decode() print(txt) #the data I choose to use as Marine Biology is my field print(gml) #list of nodes and edges with individuals ids G = nx.parse_gml(gml) nx.draw_networkx(G) #graph is messy and difficult to see connections, made more easily understandable below colors = ['black' for n in G] colors[16] = 'cyan' #id 'Hook' options = { 'node_color': colors, 'node_size': 50, 'linewidths': 0, 'width': 0.1, 'with_labels': False } nx.draw_networkx(G, **options) print(nx.info(G)) #average degree of relatedness nx.average_clustering(G) #the degree to which take any node and look at all of nodes linked to, on average how many of those nodes linked to each other nx.degree_centrality(G) #how many links as a proportion of how many you could have (if totally connected = 1 or 100%) 'Hook' is not very connected but want to see the effect of removing Hook from the population, could have used these numbers to remove the most connected individual however Hook lost the popular vote of which dolphin got voted out of the group so it wasn't really up to me. Something about being friends with a Crocodile. list(G.nodes) #list of all ids G.remove_node('Hook') #already removed colors = ['black' for n in G] #new network map minus 'Hook', RIP options = { 'node_color': colors, 'node_size': 50, 'linewidths': 0, 'width': 0.1, 'with_labels': False } nx.draw_networkx(G, **options) list(G.nodes) #Hook is now gone from the group nx.average_clustering(G) #average_clustering w/ Hook was 0.2589582460550202 print(nx.info(G)) #average degree w/ Hook was 5.1290 nx.degree_centrality(G)Table of Contents Table of Contents Table of Contents# default_exp exec.normalize_audio # export import argparse import os import sys from uberduck_ml_dev.utils.audio import normalize_audio, trim_audio def run(dirname, backup, top_db): """Normalize all the audio files in a directory.""" old_dirname = dirname if backup: old_dirname = f"{os.path.normpath(old_dirname)}_backup" os.rename(dirname, old_dirname) for dirpath, _, filenames in os.walk(old_dirname): rel_path = os.path.relpath(dirpath, old_dirname) for filename in filenames: if not filename.endswith(".wav"): continue old_path = os.path.join(dirpath, filename) new_path = os.path.join(dirname, rel_path, filename) if not os.path.exists(os.path.join(dirname, rel_path)): os.makedirs(os.path.join(dirname, rel_path)) trim_audio(old_path, new_path, top_db) def parse_args(args): parser = argparse.ArgumentParser() parser.add_argument( "-d", "--dirname", help="Path to the directory which contains audio files to normalize.", ) parser.add_argument("--backup", dest="backup", action="store_true") parser.add_argument("--no-backup", dest="backup", action="store_false") parser.add_argument("--top-db", type=int) parser.set_defaults(backup=True, top_db=20) return parser.parse_args(args) # export try: from nbdev.imports import IN_NOTEBOOK except: IN_NOTEBOOK = False if __name__ == "__main__" and not IN_NOTEBOOK: args = parse_args(sys.argv[1:]) run(args.dirname, args.backup, args.top_db)Project 1 - Analysing Seattle and Boston Airbnb datasets In this project we will try to answer this 4 folowing questions:1) When is there more (or less) availability in Boston and Seattle? Is there a difference between the two cities?2) When is it cheaper to stay in Boston or Seattle? Is there a difference between the two cities?3) Do neighborhoods influence prices? And the score rating?4) What features do the top rated (score rating +90) properties have in common? What is different from the other properties? In this notebook, we will answer 1) questionsimport pandas as pd # import data df_boston_calendar = pd.read_csv('./airbnb-boston/calendar.csv') df_seattle_calendar = pd.read_csv('./airbnb-seattle/calendar.csv')Analyzing Boston data:# looking into data df_boston_calendar.head() df_boston_calendar.dtypes # convert date to datetime type df_boston_calendar['date'] = pd.to_datetime(df_boston_calendar['date']) df_boston_calendar.dtypes # convert availabe column into binary df_boston_calendar['available'] = df_boston_calendar['available'].apply(lambda x: 1 if x == 't' else 0) # calculate percentage of available properties df_boston_calendar['available'].mean() # calculate percentage of availability per day df_daily_boston = df_boston_calendar[['date','available']].groupby('date').mean() df_daily_boston.head() # look at the highest availability rate df_daily_boston.sort_values(by='available', ascending=False).head(1) df_daily_boston.plot.line(figsize=(15,5))- We can see that the availability rate starts low (15%) and grows until the beginning of December where it reaches +60%- The highest availability rate happens on 2016-12-04 where it reaches 61%- At the beginning of December there is a big drop in the rate, which is stable around 55% until March- At the beginning of March there is another big drop in the rate, which is stable just below 50% until September- There is a drop in availability at the end of April, where it drops ~ 6% and returns to the level before in just 3 days. It may be explained by the Boston marathon (https://en.wikipedia.org/wiki/2017_Boston_Marathon)# create a new column with the weekday name of the date df_boston_calendar['week_day'] = df_boston_calendar["date"].dt.weekday.astype(str)+'-'+df_boston_calendar["date"].dt.day_name() # calculate percentage of availability per week day df_week_day_boston = df_boston_calendar[['week_day','available']].groupby('week_day').mean() df_week_day_boston.plot.line(figsize=(10,5))- Between Thursdays and Saturdays there is the lowest availability- On Sundays and Mondays there is the highest availability Analyzing Seattle data:# looking into data df_seattle_calendar.head() df_seattle_calendar.dtypes # convert date to datetime type df_seattle_calendar['date'] = pd.to_datetime(df_seattle_calendar['date']) df_seattle_calendar.dtypes # convert availabe column into binary df_seattle_calendar['available'] = df_seattle_calendar['available'].apply(lambda x: 1 if x == 't' else 0) # calculate percentage of available properties df_seattle_calendar['available'].mean() # calculate percentage of availability per day df_daily_seattle = df_seattle_calendar[['date','available']].groupby('date').mean()#.reset_index() df_daily_seattle.head() # look at the highest availability rate df_daily_seattle.sort_values(by='available', ascending=False).head(1) df_daily_seattle.plot.line(figsize=(15,5))- We can see that the availability rate starts low in January and grows until the beginning of April where it reaches +70%- At the beginning of April there is a big drop in the rate, which is stable around 67% until March- At the beginning of July there is another big drop in the rate that comes close to 60%- Between July and January the rate grows evenly until the end of December, reaching a peak of 76% and falls again# create a new column with the weekday name of the date df_seattle_calendar['week_day'] = df_seattle_calendar["date"].dt.weekday.astype(str)+'-'+df_seattle_calendar["date"].dt.day_name() # calculate percentage of availability per weekday df_week_day_seattle = df_seattle_calendar[['week_day','available']].groupby('week_day').mean() df_week_day_seattle.plot.line(figsize=(10,5))https://github.com/abhirooptalasila/ML-Projects/tree/master/Image%20Denoising%20using%20Convolutional%20Autoencodersimport os import numpy as np import pandas as pd from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D from tensorflow.keras.models import Model from tensorflow.keras.datasets import mnist from tensorflow.keras.callbacks import EarlyStopping import matplotlib.pyplot as plt (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) #add noises noise_factor = 0.5 x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_test_noisy = np.clip(x_test_noisy, 0., 1.) #plot noisy data n = 10 plt.figure(figsize=(20, 2)) for i in range(1,n): ax = plt.subplot(1, n, i) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() #model input_img = Input(shape=(28, 28, 1)) x = Conv2D(32, (3, 3), activation='relu', padding='same', strides=2)(input_img) # x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(32, (3, 3), activation='relu', padding='same', strides=2)(x) # encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (7, 7, 32) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = Model(input_img, decoded) autoencoder.summary() #compile and Fit autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) early = EarlyStopping(monitor="val_loss", mode="min", patience=5) history = autoencoder.fit(x_train_noisy, x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test_noisy, x_test), callbacks=[early]) #plot accuracy and loss curves def plot_history(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() plt.plot(epochs, loss, 'r', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend(loc=0) plt.figure() plt.show() plot_history(history) #predict on sample test_img = np.expand_dims(x_test_noisy[0].reshape(28, 28, 1), axis=0) img_pred = autoencoder.predict(test_img) %matplotlib inline plt.imshow(np.squeeze(x_test_noisy[0].reshape(28, 28, 1)), interpolation='nearest') plt.show() plt.imshow(np.squeeze(img_pred), interpolation='nearest') plt.show()Importing Librariesimport tensorflow as tf from keras.preprocessing.image import ImageDataGenerator tf.__version__Data Preprocessing Preprocessing trainingset- preprocessing training set helps prevent overfitting- generatig new images with feature scaling (rescale param)- data augmentation transformations: i) shear ii) zoom iii) horizontal flip**taget_size is the final image size when they get fed in to the CNN (Bigger images are slower)**train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) training_set = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary')Found 8000 images belonging to 2 classes.Preprocessing the test set- Only do feature scaling- dont apply transformationstest_datagen = ImageDataGenerator(rescale=1./255) test_set = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary')Found 2000 images belonging to 2 classes.Building the CNN model Initialising the CNN modelcnn = tf.keras.models.Sequential()Add Convolution layercnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu',input_shape=[64,64,3]))Add Pooling Layer to convolutional layer (max pooling)cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))Add second Convolutional Layercnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu')) cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))Add flattening layercnn.add(tf.keras.layers.Flatten())Add Fully Connected Layer#units refers to hidden neurons cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))Output layer#units =1 because this is a binary classification cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))Compiling the CNNcnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])Train the CNN- train on training set and evaluating on the test setcnn.fit(x=training_set, validation_data=test_set, epochs=25)Epoch 1/25 250/250 [==============================] - 164s 655ms/step - loss: 0.6678 - accuracy: 0.5794 - val_loss: 0.6396 - val_accuracy: 0.6655 Epoch 2/25 250/250 [==============================] - 40s 159ms/step - loss: 0.5990 - accuracy: 0.6762 - val_loss: 0.5561 - val_accuracy: 0.7285 Epoch 3/25 250/250 [==============================] - 41s 165ms/step - loss: 0.5586 - accuracy: 0.7126 - val_loss: 0.5258 - val_accuracy: 0.7385 Epoch 4/25 250/250 [==============================] - 43s 172ms/step - loss: 0.5260 - accuracy: 0.7366 - val_loss: 0.5068 - val_accuracy: 0.7600 Epoch 5/25 250/250 [==============================] - 43s 172ms/step - loss: 0.4977 - accuracy: 0.7574 - val_loss: 0.4884 - val_accuracy: 0.7630 Epoch 6/25 250/250 [==============================] - 43s 171ms/step - loss: 0.4780 - accuracy: 0.7738 - val_loss: 0.4813 - val_accuracy: 0.7710 Epoch 7/25 250/250 [==============================] - 41s 163ms/step - loss: 0.4618 - accuracy: 0.7763 - val_loss: 0.5157 - val_a[...]Making a single predictionimport numpy as np from keras.preprocessing import image test_image = image.load_img('dataset/single_prediction/cat_or_dog_2.jpg', target_size=(64,64)) test_image = image.img_to_array(test_image) #add the batch dimension to test image since images were trained in batches test_image = np.expand_dims(test_image, axis=0) result = cnn.predict(test_image) print(training_set.class_indices) #in result[0][0] the first index represents the batch and the second index represents the actual prediction if result[0][0] > 0.5: prediction = 'dog' else: prediction = 'cat' print(prediction){'cats': 0, 'dogs': 1} [[3.485288e-37]] catType dispatch> Basic single and dual parameter dispatch Helpers#exports def type_hints(f): "Same as `typing.get_type_hints` but returns `{}` if not allowed type" return typing.get_type_hints(f) if isinstance(f, typing._allowed_types) else {} #export def anno_ret(func): "Get the return annotation of `func`" if not func: return None ann = type_hints(func) if not ann: return None return ann.get('return') #hide def f(x) -> float: return x test_eq(anno_ret(f), float) def f(x) -> typing.Tuple[float,float]: return x test_eq(anno_ret(f), typing.Tuple[float,float]) def f(x) -> None: return x test_eq(anno_ret(f), NoneType) def f(x): return x test_eq(anno_ret(f), None) test_eq(anno_ret(None), None) #export cmp_instance = functools.cmp_to_key(lambda a,b: 0 if a==b else 1 if issubclass(a,b) else -1) td = {int:1, numbers.Number:2, numbers.Integral:3} test_eq(sorted(td, key=cmp_instance), [numbers.Number, numbers.Integral, int]) #export def _p2_anno(f): "Get the 1st 2 annotations of `f`, defaulting to `object`" hints = type_hints(f) ann = [o for n,o in hints.items() if n!='return'] while len(ann)<2: ann.append(object) return ann[:2] def _f(a): pass test_eq(_p2_anno(_f), (object,object)) def _f(a, b): pass test_eq(_p2_anno(_f), (object,object)) def _f(a:None, b)->str: pass test_eq(_p2_anno(_f), (NoneType,object)) def _f(a:str, b)->float: pass test_eq(_p2_anno(_f), (str,object)) def _f(a:None, b:str)->float: pass test_eq(_p2_anno(_f), (NoneType,str)) def _f(a:int, b:int)->float: pass test_eq(_p2_anno(_f), (int,int)) def _f(self, a:int, b:int): pass test_eq(_p2_anno(_f), (int,int)) def _f(a:int, b:str)->float: pass test_eq(_p2_anno(_f), (int,str)) test_eq(_p2_anno(attrgetter('foo')), (object,object))TypeDispatch - The following class is the basis that allows us to do type dipatch with type annotations. It contains a dictionary type -> functions and ensures that the proper function is called when passed an object (depending on its type).#export class _TypeDict: def __init__(self): self.d,self.cache = {},{} def _reset(self): self.d = {k:self.d[k] for k in sorted(self.d, key=cmp_instance, reverse=True)} self.cache = {} def add(self, t, f): "Add type `t` and function `f`" if not isinstance(t,tuple): t=(t,) for t_ in t: self.d[t_] = f self._reset() def all_matches(self, k): "Find first matching type that is a super-class of `k`" if k not in self.cache: types = [f for f in self.d if k==f or (isinstance(k,type) and issubclass(k,f))] self.cache[k] = [self.d[o] for o in types] return self.cache[k] def __getitem__(self, k): "Find first matching type that is a super-class of `k`" res = self.all_matches(k) return res[0] if len(res) else None def __repr__(self): return self.d.__repr__() def first(self): return next(iter(self.d.values())) #export class TypeDispatch: "Dictionary-like object; `__getitem__` matches keys of types using `issubclass`" def __init__(self, *funcs): self.funcs = _TypeDict() for o in funcs: self.add(o) self.inst = None def add(self, f): "Add type `t` and function `f`" a0,a1 = _p2_anno(f) t = self.funcs.d.get(a0) if t is None: t = _TypeDict() self.funcs.add(a0, t) t.add(a1, f) def first(self): return self.funcs.first().first() def returns(self, x): return anno_ret(self[type(x)]) def returns_none(self, x): r = anno_ret(self[type(x)]) return r if r == NoneType else None def _attname(self,k): return getattr(k,'__name__',str(k)) def __repr__(self): r = [f'({self._attname(k)},{self._attname(l)}) -> {v.__name__}' for k in self.funcs.d for l,v in self.funcs[k].d.items()] return '\n'.join(r) def __call__(self, *args, **kwargs): ts = L(args).map(type)[:2] f = self[tuple(ts)] if not f: return args[0] if self.inst is not None: f = types.MethodType(f, self.inst) return f(*args, **kwargs) def __get__(self, inst, owner): self.inst = inst return self def __getitem__(self, k): "Find first matching type that is a super-class of `k`" k = L(k if isinstance(k, tuple) else (k,)) while len(k)<2: k.append(object) r = self.funcs.all_matches(k[0]) if len(r)==0: return None for t in r: o = t[k[1]] if o is not None: return o return None def f_col(x:typing.Collection): return x def f_nin(x:numbers.Integral)->int: return x+1 def f_ni2(x:int): return x def f_bll(x:(bool,list)): return x def f_num(x:numbers.Number): return x t = TypeDispatch(f_nin,f_ni2,f_num,f_bll) t.add(f_ni2) #Should work even if we add the same function twice. test_eq(t[int], f_ni2) test_eq(t[np.int32], f_nin) test_eq(t[str], None) test_eq(t[float], f_num) test_eq(t[bool], f_bll) test_eq(t[list], f_bll) t.add(f_col) test_eq(t[str], f_col) test_eq(t[np.int32], f_nin) o = np.int32(1) test_eq(t(o), 2) test_eq(t.returns(o), int) assert t.first() is not None t def m_nin(self, x:(str,numbers.Integral)): return str(x)+'1' def m_bll(self, x:bool): self.foo='a' def m_num(self, x:numbers.Number): return x t = TypeDispatch(m_nin,m_num,m_bll) class A: f = t a = A() test_eq(a.f(1), '11') test_eq(a.f(1.), 1.) test_is(a.f.inst, a) a.f(False) test_eq(a.foo, 'a') def f1(x:numbers.Integral, y): return x+1 def f2(x:int, y:float): return x+y t = TypeDispatch(f1,f2) test_eq(t[int], f1) test_eq(t[int,int], f1) test_eq(t[int,float], f2) test_eq(t[float,float], None) test_eq(t[np.int32,float], f1) test_eq(t(3,2.0), 5) test_eq(t(3,2), 4) test_eq(t('a'), 'a') ttypedispatch Decorator#export class DispatchReg: "A global registry for `TypeDispatch` objects keyed by function name" def __init__(self): self.d = defaultdict(TypeDispatch) def __call__(self, f): nm = f'{f.__qualname__}' self.d[nm].add(f) return self.d[nm] typedispatch = DispatchReg() @typedispatch def f_td_test(x, y): return f'{x}{y}' @typedispatch def f_td_test(x:numbers.Integral, y): return x+1 @typedispatch def f_td_test(x:int, y:float): return x+y test_eq(f_td_test(3,2.0), 5) test_eq(f_td_test(3,2), 4) test_eq(f_td_test('a','b'), 'ab')Export -#hide from local.notebook.export import notebook2script notebook2script(all_fs=True)Converted 00_test.ipynb. Converted 01_core.ipynb. Converted 01a_utils.ipynb. Converted 01b_dispatch.ipynb. Converted 01c_torch_core.ipynb. Converted 02_script.ipynb. Converted 03_dataloader.ipynb. Converted 04_transform.ipynb. Converted 05_data_core.ipynb. Converted 06_data_transforms.ipynb. Converted 07_vision_core.ipynb. Converted 08_pets_tutorial.ipynb. Converted 09_vision_augment.ipynb. Converted 10_data_block.ipynb. Converted 11_layers.ipynb. Converted 11a_vision_models_xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 14_callback_schedule.ipynb. Converted 14a_callback_data.ipynb. Converted 15_callback_hook.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_metrics.ipynb. Converted 21_tutorial_imagenette.ipynb. Converted 22_vision_learner.ipynb. Converted 23_tutorial_transfer_learning.ipynb. Converted 30_text_core.ipynb. Converted 31_tex[...]Cust-close with CPFP Generate all the priv/public keys# Generate example priv/pubkeys funding_privkey_hex = "1111111111111111111111111111111100000000000000000000000000000000" funding_pubkey_hex = privkey_to_pubkey(bytes.fromhex(funding_privkey_hex)).hex() change_privkey_hex = "1111111111111111111111111111111111111111111111111111111111111111" change_pubkey_hex = privkey_to_pubkey(bytes.fromhex(change_privkey_hex)).hex() merch_privkey_hex = "3911111111111111111111111111111111111111111111111111111111111111" merch_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_privkey_hex)).hex() cust_privkey_hex = "7911111111111111111111111111111111111111111111111111111111111111" cust_pubkey_hex = privkey_to_pubkey(bytes.fromhex(cust_privkey_hex)).hex() cust_payout_privkey_hex = "7711111111111111111111111111111111111111111111111111111111111111" cust_payout_pubkey_hex = privkey_to_pubkey(bytes.fromhex(cust_payout_privkey_hex)).hex() merch_payout_privkey_hex = "3711111111111111111111111111111111111111111111111111111111111111" merch_payout_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_payout_privkey_hex)).hex() merch_disp_privkey_hex = "3111111111111111111111111111111111111111111111111111111111111111" merch_disp_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_disp_privkey_hex)).hex() revocation_secret_hex = "4011111111111111111111111111111111111111111111111111111111111111" RL = hashlib.sha256(bytes.fromhex(revocation_secret_hex)).digest() revocation_lock_hex = RL.hex() merch_cpfp_privkey_hex = "2222222222222222222222222222222277777777777777777777777777777777" merch_cpfp_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_cpfp_privkey_hex)).hex() cust_cpfp_privkey_hex = "3322222222222222222222222222222277777777777777777777777777777777" cust_cpfp_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_cpfp_privkey_hex)).hex() # merch_fee_privkey_hex = "2222222222222222222222222222222266666666666666666666666666666666" # merch_fee_pubkey_hex = privkey_to_pubkey(bytes.fromhex(merch_fee_privkey_hex)).hex() # cust priv/pubkeys for executing cpfp cust1_privkey_hex = "0881111111111111111111111111111100000000000000000000000000000000" cust1_privkey = bytes.fromhex(merch1_privkey_hex) cust1_pubkey = privkey_to_pubkey(merch1_privkey) cust2_privkey_hex = "8881111111111111111111111111111100000000000000000000000000000000" cust2_privkey = bytes.fromhex(merch2_privkey_hex) cust2_pubkey = privkey_to_pubkey(merch2_privkey).hex() cust3_privkey_hex = "7771111111111111111111111111111100000000000000000000000000000000" cust3_privkey = bytes.fromhex(merch3_privkey_hex) cust3_pubkey = privkey_to_pubkey(merch3_privkey).hex()Start up regtest modeDelete any history so we are starting from scratch. Mine 101 blocks so we can spend some btc.# Make sure bitcoind is not already running os.system("bitcoin-cli -regtest stop") time.sleep(2) # Delete any previous files to restart regtest os.system("rm -rfv $HOME/Library/Application\ Support/Bitcoin/regtest/") # start up bitcoind in regtest mode # os.system("bitcoind -regtest -daemon -minrelaytxfee=0") os.system("bitcoind -regtest -daemon") time.sleep(2) # generate 101 blocks so we can fund transactions os.system("bitcoin-cli -regtest generate 101") blockcount = subprocess.getoutput("bitcoin-cli -regtest getblockcount") print("blockcount: " + str(blockcount))blockcount: 101Generate base58 address for the escrow funder# Generate p2sh-p2wpkh address to fund the escrow funder privkey = bytes.fromhex(funding_privkey_hex) public_key = privkey_to_pubkey(privkey) p2sh_p2wpkh_address = pk_to_p2sh_p2wpkh(public_key, testnet = True) # print("Private key: " + privkey.hex()) # print("Public key: " + public_key.hex()) print("Address: " + p2sh_p2wpkh_address)Address: 2MuuVDMJqfh2J9iS3DtfUeU1tEfff1SNahiSend btc to the escrow fundertxid_1 = subprocess.getoutput("bitcoin-cli -regtest sendtoaddress " + p2sh_p2wpkh_address + " 3.0") print(txid_1) # Find which output index the btc was sent to raw_tx = subprocess.getoutput("bitcoin-cli -regtest getrawtransaction " + txid_1) decoded = subprocess.getoutput("bitcoin-cli -regtest decoderawtransaction " + raw_tx) d = json.loads(decoded) # print(decoded) if d["vout"][0]["scriptPubKey"]["addresses"][0] == p2sh_p2wpkh_address: index = 0 else: index = 1 print("index: " + str(index)) os.system("bitcoin-cli -regtest generate 1");Create Funding txraw_escrow_tx = subprocess.getoutput("python funding_tx_with_changev2.py" + " --txid " + txid_1 + " --index " + str(index) + " --input_amount_btc " + "3.0" + " --funding_privkey " + funding_privkey_hex + " --escrow_value_btc " + "2.1" + " --cust_pubkey " + cust_pubkey_hex + " --merch_pubkey " + merch_pubkey_hex + " --cust_change_value_btc " + "0.89" + " --cust_change_pubkey " + change_pubkey_hex) print("serialized funding tx:\n" + raw_escrow_tx) # Broadcast funding tx escrow_txid = subprocess.getoutput("bitcoin-cli -regtest sendrawtransaction " + raw_escrow_tx + " true") # "true" flag means we are okay with an 'absurdly' high tx fee print("\nfunding txid:\n"+escrow_txid) os.system("bitcoin-cli -regtest generate 1");Decode escrow funding transaction# raw_escrow_tx1 = subprocess.getoutput("bitcoin-cli -regtest getrawtransaction " + escrow_txid) # decoded = subprocess.getoutput("bitcoin-cli -regtest decoderawtransaction " + raw_escrow_tx1) # print(decoded)Cust-close from Escrow with CPFPcust_close_cpfp_tx = subprocess.getoutput("python cust_close_with_cpfp.py" + " --spend_from " + "escrow" + " --txid_str " + escrow_txid + " --index " + "0" + " --input_amount_btc " + "2.1" + " --cust_privkey " + cust_privkey_hex + " --merch_privkey " + merch_privkey_hex + " --cust_script_value_btc " + "0.98" + " --cust_payout_pubkey " + cust_payout_pubkey_hex + " --to_self_delay " + "05cf" + " --merch_payout_value_btc " + "1" + " --merch_payout_pubkey " + merch_payout_pubkey_hex + " --revocation_lock " + revocation_lock_hex + " --merch_dispute_pubkey " + merch_disp_pubkey_hex + " --cust_cpfp_value_btc " + "0.099" + " --cust_cpfp_pubkey " + cust_pubkey_hex) print("cust close with cpfp tx:\n" + cust_close_cpfp_tx)cust close with cpfp tx: 020000000001013e3cf638e43b8a95b07c2f6345b66ee960ce985d2ea76a4afea286cd06b18dda0000000000ffffffff04805cd7050000000022002067cb20e705c4eb4363194a74d2f743afc1c9ee3cd741d45e21268b16add04f8b00e1f50500000000160014d4354803d10e77eccfc3bf06c152ae694d05d3810000000000000000436a41f8345a21a55dc665b65c8dcfb49488b8e4f337d5c9bb843603f7222a892ce94103195e272df2310ded35f9958fd0c2847bf73b5b429a716c005d465009bd768641e00f9700000000001600145d6f6add4b70012131dbb8f0a7b067b70ec6a76f0400483045022100bd93490dedb197c3a736b92cf2c5862ee94fcd589e30e6328cb3a1aca2f1fe7702202fdbb6c019c1230f25bb4d72048bf5486f326376885a034f1ff6d6328706569e0147304402206dee41f5d0f50097498e48438cff68804d1118b941d2fbe0053a95e4322fb78802206d5dcd44bf5fdbab77d2bef5c40b042fbb193d1bf46e29fbfbf578ae688c5e9c0147522102f3d17ca1ac6dcf42b0297a71abb87f79dfa2c66278cbb99c1437e6570643ce902103fc43b44cd953c7b92726ebefe482a272538c7e40fdcde5994a62841525afa8d752ae00000000This tx could be broadcast by itself and it would be a valid tx (cell below). If the fee was too small, we can effectively bump up the fee by creating a 'child' tx that spends from itcust_close_cpfp_txid = subprocess.getoutput("bitcoin-cli -regtest sendrawtransaction " + cust_close_cpfp_tx) print(cust_close_cpfp_txid) os.system("bitcoin-cli -regtest generate 1"); mined_cust_close_cpfp_tx = subprocess.getoutput("bitcoin-cli -regtest getrawtransaction " + cust_close_cpfp_txid) decoded = subprocess.getoutput("bitcoin-cli -regtest decoderawtransaction " + mined_cust_close_cpfp_tx) print(decoded){ "txid": "fa8687450943044bad065f0503e87b100eed1e0d6e015454d2b8e7af3f01c83f", "hash": "7eaf76af8f868e1b9573eaf252f393836054321691b355bc811bc487689981ae", "version": 2, "size": 453, "vsize": 288, "weight": 1149, "locktime": 0, "vin": [ { "txid": "da8db106cd86a2fe4a6aa72e5d98ce60e96eb645632f7cb0958a3be438f63c3e", "vout": 0, "scriptSig": { "asm": "", "hex": "" }, "txinwitness": [ "", "", "", "" ], "sequence": 4294967295 } ], "vout": [ { "value[...]Cust-fee input tx fund another input that the merchant will use to add a large fee to the 'child' transaction.# address to fund the merchant child tx cust1_p2sh_p2wpkh_address = pk_to_p2sh_p2wpkh(cust1_pubkey, testnet = True) # print("Address: " + cust1_p2sh_p2wpkh_address) # Fund the merchant child input txid_2 = subprocess.getoutput("bitcoin-cli -regtest sendtoaddress " + cust1_p2sh_p2wpkh_address + " 1.01") print("txid:" + txid_2) # Find which output index the btc was sent to raw_tx = subprocess.getoutput("bitcoin-cli -regtest getrawtransaction " + txid_2) decoded = subprocess.getoutput("bitcoin-cli -regtest decoderawtransaction " + raw_tx) d = json.loads(decoded) # print(decoded) if d["vout"][0]["scriptPubKey"]["addresses"][0] == cust1_p2sh_p2wpkh_address: index = 0 else: index = 1 print("index: " + str(index)) os.system("bitcoin-cli -regtest generate 1"); cust1_tx = subprocess.getoutput("python p2nsh_to_p2wpkh.py" + " --txid " + txid_2 + " --index " + str(index) + " --input_amount_btc " + "1.01" + " --input_privkey " + merch1_privkey_hex + " --payout_value_btc " + "1" + " --payout_pubkey " + merch2_pubkey) # print(cust1_tx) cust1_txid = subprocess.getoutput("bitcoin-cli -regtest sendrawtransaction " + cust1_tx) # "true" flag means we are okay with absurdly high tx fee print("cust fee outpoint:\n" + cust1_txid)cust fee outpoint: 5102f9d77b313e6838afd8463537f9a3b35f0469fd96392c8c4ee3bd1d9b85e9Create child transactionInput[0]: Child outpoint Input[1]: Merch p2wpkh outpointOutput[0]: Merch p2wpkhcust_child_tx = subprocess.getoutput("python merch_close_child_tx.py" + " --child_txid_str " + cust_close_cpfp_txid + " --child_index " + "3" + " --child_privkey " + cust_cpfp_privkey_hex + " --child_input_amount_btc " + "0.099" + " --merch_txid_str " + cust1_txid + " --merch_index " + "0" + " --merch_privkey " + cust2_privkey_hex + " --merch_input_amount_btc " + "1" + " --output_value_btc " + "1.008" + " --payout_pubkey " + cust3_pubkey) print(cust_child_tx)020000000001023fc8013fafe7b8d25454016e0d1eed0e107be803055f06ad4b044309458786fa0300000000ffffffffe9859b1dbde34e8c2c3996fd69045fb3a3f9373546d8af38683e317bd7f902510000000000ffffffff0100160206000000001600140cbd8eef39d742140b81cf2f7fbade71af58a182024730440220240080e953a67d68c6aaf7f7709d7ee0500d6a8b35795d27c9c7a363e24e09cb02202f8d4ae266a1f61776a6df7b4c512c308e4d6ab183547d77c463ca08a54fce440121029ee20dd6f7d7385b0816b88ae7519369256ceae2431f59473ab7a0917902b590024730440220241ae27c027f880f10db173447e582893de75aca5bfca9d547d95a592c9a53ec0220456e9291b0cf4c3e31b4a20bd14ae25cd8b175e5c6cf293db117887551e988370121033d954c5326559213b14e37cf029db0f8a75bb7bbac4644ce19a4834b1c71dee900000000Broadcast child txmerch_child_txid = subprocess.getoutput("bitcoin-cli -regtest sendrawtransaction " + merch_child_tx + " true") print(merch_child_txid) mined_merch_child_txid = subprocess.getoutput("bitcoin-cli -regtest getrawtransaction " + merch_child_txid) decoded = subprocess.getoutput("bitcoin-cli -regtest decoderawtransaction " + mined_merch_child_txid) print(decoded){ "txid": "defc3281489701d40517c8a8119eb07e874dc5145edceeb0380a64a9cb667136", "hash": "c774120b46f3b9cf87b1be082434893f558e64dbabc1f77055a9169b0a4f6410", "version": 2, "size": 339, "vsize": 177, "weight": 708, "locktime": 0, "vin": [ { "txid": "9cef5178b6464d566532aad2510608a6c4c715490c7751b8d91387ba909905e6", "vout": 1, "scriptSig": { "asm": "", "hex": "" }, "txinwitness": [ "304402203902507c229075f32eb6f9e2cc1c586219fcf993c12bc77724bf0492fd563043022024ff8a53a4d999f64793584aba73332db462b95e5eb35d7489dcd369129fa91201", "025bdf02f6aeb3a5cbfdece604956a49571d823b8888f5c4d3ceb58e453c044e57" ], "sequence": 4294967295 }, { "txid": "514c7ef5d1d94d6774753d78296a2b62b1b7f6e61cddbde6918bf7e243b536d9", "vout": 0, "scriptSig": { "asm": "", "hex": "" }, "txinwitness": [ "30440220335e18eff0525006a03e78be8657e3addf6e2cb0b7b53f98ed4db32a30dd5de9022[...]Plotting DataVisualizing the metadata is very useful to get a first look at the nature and quality of the run.First we need a `DataFrame` with the meta data. You can make one with `porekit.gather_metadata` once, and then load it later from a hdf file or something similar.df = pd.read_hdf("../examples/data/ru9_meta.h5", "meta")Read length distributionporekit.plots.read_length_distribution(df);This is a histogram showing the distribution of read length. In this case it's the max of template and complement length. This plots ignores a small part of the longest reads in order to be more readable. Reads over timeporekit.plots.reads_vs_time(df);Yield Curvesporekit.plots.yield_curves(df);This plot shows the sequence yields in Megabases over time. Template length vs complement lengthporekit.plots.template_vs_complement(df);In the standard 2D library preparation, a "hairpin" is attached to one end of double stranded DNA. Then, when the strand goes through the nanopore, first one strand translocates, then the hairpin and finally the complement. Because template and complement both carry the same information, they can be used to improve accuracy of the basecalling.However, not all molecules have a hairpin attached, not all have a complement strand, and in most cases, the template and complement length does not match completely. This can be seen in the plot above, where most data points are on a diagonal with template and complement length being almost the same. There are more points under the diagonal than above it, and there is a solid line at the bottom, showing reads with no complement. Occupancyporekit.plots.occupancy(df);This shows the occupancy of pores over time. In General, pores break over time, which is a major factor in limiting the total yield over the lifetime of a flowcell. Squiggle DotsThe `squiggle_dots` function takes a Fast5 File and outputs a plot of all event means as dots on a graph. This way of plotting event data does a better job at characterizing a long read than the traditional "squiggle" plot. In this example there is a marked difference between the traces of the template and the complement, as segmented by the detected hairpin section.fast5 = porekit.Fast5File(df.iloc[1002].absolute_filename) porekit.plots.squiggle_dots(fast5) fast5.close()Customizing plotsThe plots inside `porekit.plots` are designed to work best inside the Jupyter notebook when exploring nanopore data interactively, and showing nanopore data as published notebooks or presentations. This is why they use colors and a wide aspect ratio.But the plots can be customized somewhat using standard matplotlib. Every plot function returns a figure and an axis object:f, ax = porekit.plots.read_length_distribution(df) f.suptitle("Hello World"); f.set_figwidth(6)Sometimes you want to subdivide a figure into multiple plots. You can do it like this:f, axes = plt.subplots(1,2) f.set_figwidth(14) ax1, ax2 = axes porekit.plots.read_length_distribution(df, ax=ax1); porekit.plots.yield_curves(df, ax=ax2);The below steps show how the default cifar10 models run on arm cmsis nn, and it works well generally.cwd = os.path.abspath(os.curdir) def RunSysCmd(cmd): import subprocess p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() print(output.decode('utf-8')) def Download(url, force=False): tgt = os.path.basename(url) if(os.path.exists(tgt) and force): RunSysCmd('rm -f %s'%(tgt)) if(not os.path.exists(tgt)): RunSysCmd('wget %s'%(url)) return tgt def ToList(d): sz=1 for s in d.shape: sz = sz*s return d.reshape(sz).tolist() def q2f(d, Q): '''To convert a number from Qm.n format to floating point: 1. Convert the number to floating point as if it were an integer, in other words remove the binary point 2. Multiply by 2−n ''' if(type(d) is list): D = [] for v in d: D.append(float(v*math.pow(2,-Q))) elif(type(d) is np.ndarray): D = d*math.pow(2,-Q) else: D = float(d*math.pow(2,-Q)) return D def show(w): if(type(w) is np.ndarray): aL = ToList(w) else: aL = list(w) plt.figure(figsize=(18, 3)) plt.subplot(121) plt.title('green is real, red is sort') plt.plot(aL,'g') plt.grid() aL.sort() plt.plot(aL,'r') plt.grid() plt.subplot(122) plt.hist(aL,100) plt.title('hist') plt.grid() plt.show() def compare(a,b): if(type(a) is np.ndarray): aL = ToList(a) else: aL = list(a) if(type(b) is np.ndarray): bL = ToList(b) else: bL = list(b) assert(len(aL) == len(bL)) Z = list(zip(aL,bL)) Z.sort(key=lambda x: x[0]) aL,bL=zip(*Z) plt.figure(figsize=(18, 3)) plt.subplot(131) plt.plot(aL,'r') plt.grid() plt.subplot(133) plt.plot(bL,'g') plt.plot(aL,'r') plt.grid() plt.subplot(132) bL=list(bL) bL.sort() plt.plot(bL,'g') plt.grid() RunSysCmd('scons') defaults = [] reImg = re.compile('#define IMG_DATA \{([\w\d,]+)\}') with open('arm_nnexamples_cifar10_inputs.h') as f: for l in f.readlines(): if(reImg.search(l)): data = eval('['+reImg.search(l).groups()[0]+']') data = np.asarray(data,dtype=np.uint8).reshape(32,32,3) defaults.append(data) fig, axs = plt.subplots(1, len(defaults)) for i,dft in enumerate(defaults): axs[i].imshow(dft) for i,dft in enumerate(defaults): data = dft data.tofile('img.bin') RunSysCmd('./cifar10 img.bin') # CATs #url = 'http://p5.so.qhimgs1.com/bdr/_240_/t011b628e47ccf9983b.jpg' #url = 'http://p3.so.qhmsg.com/bdr/_240_/t01067394101dcd6278.jpg' #url = 'http://p5.so.qhimgs1.com/bdr/_240_/t01425873ec4207251b.jpg' # AIRPLANEs #url = 'http://p0.so.qhimgs1.com/bdr/_240_/t01a45e71a8867f2354.jpg' #url = 'http://p4.so.qhmsg.com/bdr/_240_/t01cbd2106353872279.jpg' # DOGs #url = 'http://p0.so.qhimgs1.com/bdr/_240_/t0180d8b6dbb9eb54b0.jpg' #url = 'http://p5.so.qhimgs1.com/bdr/_240_/t015ac334b42ef829db.jpg' url = 'http://p3.so.qhimgs1.com/bdr/_240_/t017f279f05b2c73b93.jpg' img = Download(url,True) # ref plot: https://www.jianshu.com/p/2b2caa2cf381 im = Image.open(img) im = im.convert('RGB') fig, axs = plt.subplots(1, 2) axs[0].imshow(im) im = im.resize((32,32)) im.save('img.png') axs[1].imshow(im) data = np.asarray(im) data = data.astype(np.int8) data.tofile('img.bin') RunSysCmd('./cifar10 img.bin')start execution airplane: 0.0% automobile: 0.0% bird: 0.0% cat: 0.0% deer: 0.0% dog: 99.2% frog: 0.0% horse: 0.0% ship: 0.0% truck: 0.0%Now research about how the cifar10 model was quantized and run on arm cmsis nn with Q Format.Firstly, It's much more better to follow page [cifar10 convert tools](https://github.com/ARM-software/ML-examples/tree/master/cmsisnn-cifar10) to study about how to quantize a model and generated C files.But need to modify the models/cifar10_m7_train_test.prototxt to point to the right input data and then run below command:```shcd ML-examples/cmsisnn-cifar10python nn_quantizer.py --model models/cifar10_m7_train_test.prototxt \ --weights models/cifar10_m7_iter_300000.caffemodel.h5 \ --save models/cifar10_m7.pklpython code_gen.py --model models/cifar10_m7.pkl --out_dir code/m7```m7=True if(m7): model_file ='ML-examples/cmsisnn-cifar10/models/cifar10_m7_train_test.prototxt' weight_file='ML-examples/cmsisnn-cifar10/models/cifar10_m7_iter_300000.caffemodel.h5' genWT='ML-examples/cmsisnn-cifar10/code/m7/weights.h' else: model_file ='ML-examples/cmsisnn-cifar10/models/cifar10_m4_train_test.prototxt' weight_file='ML-examples/cmsisnn-cifar10/models/cifar10_m4_iter_70000.caffemodel.h5' genWT='ML-examples/cmsisnn-cifar10/code/m4/weights.h' RunSysCmd('git clone https://github.com/autoas/ML-examples.git') inference_model = 'ML-examples/cmsisnn-cifar10/models/inference.prototxt'Then on need to new a inference model inference.prototxt based on the cifar10_m7_train_test.prototxt by:* 1. Replace the data layer as below:```jsonlayer { name:"data" type:"Input" top:"data" input_param {shape: {dim:1 dim:3 dim:32 dim:32}}}```* 2. And then remove the layer accuracy and loss, and add below softmax layer:```jsonlayer { name: "prob" type: "Softmax" bottom: "ip1" top: "prob"}```# load caffe model caffe.set_mode_cpu() net = caffe.Net(inference_model,weight_file,caffe.TEST)All of the below method is not given the right prediction, I don't know why, need more research.# caffe method 1 #caffe_model_file = '/home/parai/workspace/caffe/examples/cifar10/cifar10_quick.prototxt' #caffe_weight_file = '/home/parai/workspace/caffe/examples/cifar10/cifar10_quick_iter_5000.caffemodel.h5' caffe_model_file = inference_model caffe_weight_file = weight_file #caffeTestImg = '${CAFFE}/examples/images/cat.jpg' caffeTestImg = 'img.png' cmd = 'export CAFFE=${HOME}/workspace/caffe' cmd += ' && export PYTHONPATH=${CAFFE}/python:$PYTHONPATH' cmd += ' && python2 ${CAFFE}/python/classify.py --model_def %s' \ ' --pretrained_model %s' \ ' --center_only %s result'%(caffe_model_file, caffe_weight_file, caffeTestImg) RunSysCmd(cmd) CIFAR10_LABELS_LIST = [ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ] r = np.load('result.npy').tolist()[0] R = list(zip(CIFAR10_LABELS_LIST,r)) print(R) # caffe method 2 if(1): im = Image.open(img) im = im.convert('RGB') im = im.resize((32,32)) else: im = defaults[0] im = np.asarray(im) #fig, axs = plt.subplots(1, 3) #axs[0].imshow(im) #print(im[2][:10]) im = im - (125,123,114) #print(im[2][:10]) im = np.asarray(im.transpose(2,0,1)) #axs[1].imshow(im.transpose(1,2,0)) net.blobs['data'].data[...] = im.astype(np.float32) #axs[2].imshow(im.transpose(1,2,0)+(125,123,114)) out = net.forward() print(out) # save output of each layer RunSysCmd('mkdir -p caffe_out2 caffe_out3 out') for name,blob in net.blobs.items(): d = blob.data if(len(d.shape)==4): d = blob.data.transpose((0,2,3,1)) d.tofile('caffe_out2/%s.raw'%(name)) print('layer %s shape: %s'%(name, blob.data.shape)) # caffe method 3 # run inference, https://www.zhihu.com/question/38107945 transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) # change dimensition from HWC to CHW transformer.set_transpose('data', (2,0,1)) # subtract mean mean = np.asarray((125,123,114)) # RGB transformer.set_mean('data', mean) # scale to range 0-255 transformer.set_raw_scale('data', 255) transformer.set_channel_swap('data', (2,1,0)) # if using RGB instead of BGR im=caffe.io.load_image(img) #fig, axs = plt.subplots(1, 2) #axs[0].imshow(im) im = transformer.preprocess('data',im) net.blobs['data'].data[...] = im out = net.forward() #axs[1].imshow(im.transpose(1,2,0)) print(out) # load quantized weights weights = {} reWT = re.compile('#define\s+(\w+)\s+\{([-\w\d,]+)\}') with open(genWT) as f: for l in f.readlines(): if(reWT.search(l)): grp = reWT.search(l).groups() name = grp[0] data = eval('['+grp[1]+']') weights[name] = data for name, p in net.params.items(): for i,blob in enumerate(p): d = blob.data print('%s weiths[%s]: max=%s, min=%s, shape=%s'%(name,i,d.max(),d.min(),d.shape)) show(net.params['conv1'][0].data) CONV1_WT = q2f(weights['CONV1_WT'],7) CONV1_BIAS = q2f(weights['CONV1_BIAS'],7) compare(net.params['conv1'][0].data.transpose(0,2,3,1), CONV1_WT) compare(net.params['conv1'][1].data, CONV1_BIAS) CONV2_WT = q2f(weights['CONV2_WT'],8) CONV2_BIAS = q2f(weights['CONV2_BIAS'],8) compare(net.params['conv2'][0].data.transpose(0,2,3,1), CONV2_WT) compare(net.params['conv2'][1].data, CONV2_BIAS) CONV3_WT = q2f(weights['CONV3_WT'],9) CONV3_BIAS = q2f(weights['CONV3_BIAS'],8) compare(net.params['conv3'][0].data.transpose(0,2,3,1), CONV3_WT) compare(net.params['conv3'][1].data, CONV3_BIAS) RunSysCmd('scons --m7') RunSysCmd('./cifar10 img.bin') compare( np.fromfile('caffe_out2/data.raw', dtype=np.float32), q2f(np.fromfile('out/data.raw', dtype=np.int8),7) ) compare( np.fromfile('caffe_out2/conv1.raw', dtype=np.float32), q2f(np.fromfile('out/conv1.raw', dtype=np.int8),7) ) compare( np.fromfile('caffe_out2/conv2.raw', dtype=np.float32), q2f(np.fromfile('out/conv2.raw', dtype=np.int8),7) ) compare( np.fromfile('caffe_out2/conv3.raw', dtype=np.float32), q2f(np.fromfile('out/conv3.raw', dtype=np.int8),7) )study of the cifar10 traning data and test data, try to know how data was feed to caffeimport lmdb #env = lmdb.open('/home/parai/workspace/caffe/examples/cifar10/cifar10_train_lmdb', readonly=True) env = lmdb.open('/home/parai/workspace/caffe/examples/cifar10/cifar10_test_lmdb', readonly=True) RunSysCmd('mkdir -p testimg') with env.begin() as txn: cursor = txn.cursor() for i, (key, value) in enumerate(cursor): if(i!=1):continue datum = caffe.proto.caffe_pb2.Datum() datum.ParseFromString(value) flat_x = np.frombuffer(datum.data, dtype=np.uint8) x = flat_x.reshape(datum.channels, datum.height, datum.width) y = datum.label #Image.fromarray(x.transpose(1,2,0)).save('testimg/%s_%s.png'%(CIFAR10_LABELS_LIST[int(y)],i)) plt.imshow(x.transpose(1,2,0)) break inference_model = 'ML-examples/cmsisnn-cifar10/models/cifar10_m7_train_test.prototxt' weight_file='ML-examples/cmsisnn-cifar10/models/cifar10_m7_iter_300000.caffemodel.h5' caffe.set_mode_cpu() net = caffe.Net(inference_model,weight_file,caffe.TEST) out = net.forward() print(out) im = net.blobs['data'].data.transpose(0,2,3,1)[1] im = im + (125, 123, 114) # RGB im = im.astype(np.uint8) plt.imshow(im)**hu_wiwi_grades - usage / examples** Please also refer to the docstrings of the functions for further information. **Install from author's repo and import**pip install git+https://github.com/NDelventhal/hu_wiwi_grades.git import hu_wiwi_grades as hu***list_sources()***Scrapes URL sources that list grading overviews and returns a dictionary containing the semesters as keys and the URLs as values.hu.list_sources()***scrape_overview()***Scrapes the latest grading overview and returns the overview or a subset based on the entered exam specification. The exam arguments defaults to "" (no filtering).hu.scrape_overview()***scrape_overview(exam = "Statistik")***Scrapes the latest grading overview and returns the overview or a subset based on the entered exam specification. In this example solely "Statistik" exams are returned.hu.scrape_overview(exam = "Statistik")**hu.scrape_all_overviews(exam = "Applied Econometric")** Same as above, but instead of solely the latest overview all available historical overviews are pulled. Typically, a few semesters are available.hu.scrape_all_overviews(exam = "Applied Econometric")***scrape_all_overviews()***The function defaults to no exam specification (all exams).hu.scrape_all_overviews()***get_grading(exam="", only_current_semester = True)*** Scrapes the grades from the URLs listed in the overview pages of either only the latest semester (only_current_semester = True) or all (only_current_semester = False). An exam filter may be specified as in the examples above or not. Returns a dataframe listing the number of participants, the examiner and all grades as variables. In case of errors in the extraction process, the link of the unprocessed exam is printed.**Please note:** In case no exam is specified, all exams of all available semesters (currently over 1,000) are processed. This will take a while.df = hu.get_grading(exam="Finance", only_current_semester = False) df.sample(3)***prepare_for_analysis(df)*** Prepares the dataframe output of get_grading() for further analysis, such as visualisations, descriptive statistics or regression analysis.df2 = hu.prepare_for_analysis(df) df2.sample(3)Examples for further analysis **descriptive statistics**kw = "Finance Theory" df = hu.get_grading(exam=kw, only_current_semester = False) df2 = hu.prepare_for_analysis(df) def first_quartile(x): return x.quantile(0.25) def third_quartile(x): return x.quantile(0.75) def failure_rate(x): return "{:.2%}".format((x==5).sum()/len(x)) df2[['exam', 'semester', 'round', 'examiner', 'grade']].groupby(['exam', 'examiner', 'semester', 'round']).agg([first_quartile , 'mean', 'median', third_quartile, 'std', 'var', 'count', failure_rate])**visualisations examples**import seaborn as sns import matplotlib.pyplot as plt import pandas.testing as tm sns.stripplot(x="semester", y="grade", hue="round", data=df2, jitter=True, palette="Set3", dodge=True,linewidth=1,edgecolor='gray').set_title('Grading HU - Exam: ' + kw) ax = sns.boxplot(x="semester", y="grade", hue="round", data=df2,palette="Set3",fliersize=0) han, lab = ax.get_legend_handles_labels() l = plt.legend(han[0:2], lab[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.).set_title('Round') df = hu.get_grading(only_current_semester = False) df2 = hu.prepare_for_analysis(df) sns.boxplot(y='semester',x='grade',data=df2,orient="exam",hue='round').set_title('Grading Humboldt University - WiWi') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.).set_title('Round') import statsmodels.formula.api as sm result = sm.ols(formula="grade ~ C(round)", data=df2).fit() print(result.params)Intercept 2.880122 C(round)[T.02] 0.185354 dtype: float64Lambda School Data Science Module 141 Statistics, Probability, and Inference Prepare - examine what's available in SciPyAs we delve into statistics, we'll be using more libraries - in particular the [stats package from SciPy](https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html).from scipy import stats dir(stats) # As usual, lots of stuff here! There's our friend, the normal distribution norm = stats.norm() print(norm.mean()) print(norm.std()) print(norm.var()) # And a new friend - t t1 = stats.t(5) # 5 is df "shape" parameter print(t1.mean()) print(t1.std()) print(t1.var())0.0 1.2909944487358056 1.6666666666666667![T distribution PDF with different shape parameters](https://upload.wikimedia.org/wikipedia/commons/4/41/Student_t_pdf.svg)*(Picture from [Wikipedia](https://en.wikipedia.org/wiki/Student's_t-distribution/media/File:Student_t_pdf.svg))*The t-distribution is "normal-ish" - the larger the parameter (which reflects its degrees of freedom - more input data/features will increase it), the closer to true normal.t2 = stats.t(30) # Will be closer to normal print(t2.mean()) print(t2.std()) print(t2.var())0.0 1.0350983390135313 1.0714285714285714Why is it different from normal? To better reflect the tendencies of small data and situations with unknown population standard deviation. In other words, the normal distribution is still the nice pure ideal in the limit (thanks to the central limit theorem), but the t-distribution is much more useful in many real-world situations.History sidenote - this is "Student":![](https://upload.wikimedia.org/wikipedia/commons/4/42/William_Sealy_Gosset.jpg)*(Picture from [Wikipedia](https://en.wikipedia.org/wiki/File:William_Sealy_Gosset.jpg))*His real name is , and he published under the pen name "Student" because he was not an academic. He was a brewer, working at Guinness and using trial and error to determine the best ways to yield barley. He's also proof that, even 100 years ago, you don't need official credentials to do real data science! Live Lecture - let's perform and interpret a t-testWe'll generate our own data, so we can know and alter the "ground truth" that the t-test should find. We will learn about p-values and how to interpret "statistical significance" based on the output of a hypothesis test.# TODO - during class, but please help! # TODO - your code here! survey_data= [0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0] import numpy as np import pandas as pd df = pd.DataFrame(survey_data) df.describe()zero = cokeone = pepsithe mean above = sample mean# histogram helps us see the overall distribution # more 1s than 0s df.plot.hist() # Now with confidence import scipy scipy.stats.ttest_1samp(survey_data, 0.5) scipy.stats.ttest_1samp?? # above tells us that we reject null hypothesys https://homepage.stat.uiowa.edu/~mbognar/applets/t.html <- use statistic and pvalue on two-tailed test (df.mean() - 0.5) / (df.std()* len(survey_data)**(0.5)) # Science: Reproducibility... import random # creating more coke & pepsi data def make_soda_data(n=50): # Fair version: # return pd.DataFrame([random.randint(0, 1) for _ in range(n)]) # Unfair version: return pd.DataFrame(np.random.binomial(n=1, p=0.6, size=n)) sample_stderr = 0.478518 /np.sqrt(len(survey_data)) sample_mean = 0.660000 null_hypothesis_mean = 0.5 # We want to calculate --> tstat = 2.364321853156195 # our null hypothesis is not that is centered at zero # it's centered at 0.5 # result would be more precise if we were doing a z-test # but this is the kind of calculation that happens in t-statistics # you divide the error by the difference stderr / (sample_mean -0.5) # In the right direction t_stat = (sample_mean - null_hypothesis_mean) / sample_stderr print(t_stat) make_soda_data(n=500).describe() # Running some hypothesis tests t_statistics = [] p_values = [] n_experiments = 10000 for _ in range(n_experiments): df = make_soda_data(n=1000) ttest = scipy.stats.ttest_1samp(df, 0.5) # like a binomial dist with fair coin (0.5) t_statistics.append(ttest.statistic) p_values.append(ttest.pvalue) pd.DataFrame(t_statistics).describe() pd.DataFrame(p_values).describe() dir(random) help(random.choice) random.choice([0, 1, 1]) #Unfair coins # gives us the sum of 100 coin flips np.random.binomial(100, .5) # n, p np.random.binomial(1, 0.6)Assignment - apply the t-test to real dataYour assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!Your goals:1. Load and clean the data (or determine the best method to drop observations when running tests)2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.013. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.014. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.Stretch goals:1. Refactor your code into functions so it's easy to rerun with arbitrary variables2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data', header=None) data = data.replace({'y': 1, 'n': 0, '?': 0.5}) data.describe() data.head() cols = data.columns.tolist() colsRepublicansdata_rep = data[data[0] == 'republican'] data_rep.describe()Democratsdata_dem = data[data[0] == 'democrat'] data_dem.describe()Comparing Republicans VS Democrats# Histogram - Republicans data_rep.hist(); # Histogram - Democrats data_dem.hist();Q1: Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01data_dem[8].describe() dem_s_stderr = 0.376602 / np.sqrt(len(data_dem[8])) dem_s_mean = 0.823970 dnull_hyp_mean = 0.5 dem_t_stat = (dem_s_mean - dnull_hyp_mean) / dem_s_stderr print(dem_t_stat) scipy.stats.ttest_1samp(data_dem[8], 0.5) scipy.stats.ttest_1samp(data_rep[8], 0.5)Similar Calculation with `stats.ttest_ind`scipy.stats.ttest_ind(data_rep[8], data_dem[8])Q2: Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01data_rep[4].describe() s_stderr = 0.126611 / np.sqrt(len(data_rep[4])) s_mean = 0.979167 null_hyp_mean = 0.5 rep_t_stat = (s_mean - null_hyp_mean) / s_stderr print(rep_t_stat) scipy.stats.ttest_1samp(data_rep[4], 0.5)Q3: Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)scipy.stats.ttest_1samp(data_rep[10], 0.5) scipy.stats.ttest_1samp(data_dem[10], 0.5)Stretch Goals 1. Refactor your code into functions so it's easy to rerun with arbitrary variablesdef ttesting(data): scipy.stats.ttest_1samp(data, 0.5)2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)The graph above articulates that there is a greater number of Private construction projects compared to public construction. During the recession in 2008, you see a dip in private construction, but a slight increase in public. I predicted that private construction would drop, however was surprised to see the increase in public construction.Year = df_CTS['Month-Year'].str[:3] #strip the year from the date column df_CTS['Month'] = Year #add column to df df_CTS df_CTS.groupby(['Month'])['Total Construction'].mean() AVG_Monthly_Total_Construction= df_CTS.groupby(df_CTS.Month)['Total Construction'].transform('mean') #get yearly average df_CTS['AVG_Monthly_Total_Construction'] =Monthly_Total_Construction #add column to df df_drop1=df_CTS.drop_duplicates(subset='Month', keep= "first") #drop duplicates df_drop1 ax = df_drop1.set_index('Month')['AVG_Monthly_Total_Construction'].plot() ax.set_ylabel("Amount of Construction") #ylabel plt.suptitle('Monthly Construction from 2002-2014', fontsize=12) #names titles at axis ax.axvspan(5,8,ymin=0,ymax=300,facecolor='k',alpha = 0.1,label='Summer Months') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))Tensorflow-keras Functional API When building models with the functional API, layers are callable (on a tensor), and return a tensor as output. These input tensor(s) and output tensor(s) can then be used to define a model. For example:import tensorflow as tf from tensorflow.keras import layers, activations tf.__version__ inputs = tf.keras.Input(shape=(32,)) # A layer instance is callable on a tensor, and returns a tensor. hidden = layers.Dense(64, activation='relu')(inputs) hidden = layers.Dense(64, activation='relu')(hidden) predictions = layers.Dense(10, activation='softmax')(hidden) # Instantiate the model given inputs and outputs. model = tf.keras.Model(inputs=inputs, outputs=predictions) model.summary()Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32)] 0 _________________________________________________________________ dense (Dense) (None, 64) 2112 _________________________________________________________________ dense_1 (Dense) (None, 64) 4160 _________________________________________________________________ dense_2 (Dense) (None, 10) 650 ================================================================= Total params: 6,922 Trainable params: 6,922 Non-trainable params: 0 _________________________________________________________________Fully customizable models can be built by using the Model Subclassing API, You define your own forward pass imperatively in this style, in the body of a class method. For example:num_classes = 9 class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() # Define your layers here. self.dense_1 = layers.Dense(32, activation='relu') self.dense_2 = layers.Dense(num_classes, activation='sigmoid') def call(self, inputs): # Define your forward pass here, # using layers you previously defined in `__init__` x = self.dense_1(inputs) return self.dense_2(x)Coding Challenge 2: Natural Language Processing A common task in NLP is to determine the similarity between documents or words. In order to facilitate the comparison between documents or words, you will leverage the learnings from Coding Challenge 1 to create vectors. Once you have a document term matrix, comparisons are possible since you can measure the difference between the numbers.In this Coding Challenge, you will utilize the "**Gensim**" library, which is a free Python library to determine document similarity.**"Gensim" Reference**: https://radimrehurek.com/project/gensim/ **Install Gensim**:# https://radimrehurek.com/gensim/install.html !pip install --upgrade gensim import gensim**Install NLTK:**# Import the NLTK package import nltk # Get all the data associated with NLTK – could take a while to download all the data nltk.download('all')[nltk_data] Downloading collection 'all' [nltk_data] | [nltk_data] | Downloading package abc to /content/nltk_data... [nltk_data] | Package abc is already up-to-date! [nltk_data] | Downloading package alpino to /content/nltk_data... [nltk_data] | Package alpino is already up-to-date! [nltk_data] | Downloading package biocreative_ppi to [nltk_data] | /content/nltk_data... [nltk_data] | Package biocreative_ppi is already up-to-date! [nltk_data] | Downloading package brown to /content/nltk_data... [nltk_data] | Package brown is already up-to-date! [nltk_data] | Downloading package brown_tei to [nltk_data] | /content/nltk_data... [nltk_data] | Package brown_tei is already up-to-date! [nltk_data] | Downloading package cess_cat to /content/nltk_data... [nltk_data] | Package cess_cat is already up-to-date! [nltk_data] | Downloading package cess_esp to /content/nltk_data... [nltk_data] | Package cess_esp is already up-to[...]**Import the requiste NLTK packages:**#Import word tokenizer from nltk.tokenize import word_tokenize**Dataset:**#For the purposes of this challenge, each line represents a document. In all, there are 8 documents raw_documents = ['The dog ran up the steps and entered the owner\'s room to check if the owner was in the room.', 'My name is , commander of the Machine Learning program at Lambda school.', 'I am creating the curriculum for the Machine Learning program and will be teaching the full-time Machine Learning program.', 'Machine Learning is one of my favorite subjects.', 'I am excited about taking the Machine Learning class at the Lambda school starting in April.', 'When does the Machine Learning program kick-off at Lambda school?', 'The batter hit the ball out off AT&T park into the pacific ocean.', 'The pitcher threw the ball into the dug-out.']**Step 1**: **Create a document that contains a list of tokens**tokens = [word_tokenize(doc) for doc in raw_documents]**Step 2: Use the document to create a dictionary - a dictionary maps every word to a number**dct = gensim.corpora.Dictionary(tokens)**Step 3: Convert the list of tokens from the document (created above in Step 1) into a bag of words. The bag of words highlights the term frequency i.e. each element in the bag of words is the index of the word in the dictionary and the of times it occurs**corpus = [dct.doc2bow(doc) for doc in tokens] print(corpus)[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 2), (10, 1), (11, 2), (12, 1), (13, 4), (14, 1), (15, 1), (16, 1)], [(1, 1), (13, 1), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 1), (27, 1), (28, 1), (29, 1), (30, 1)], [(1, 1), (3, 1), (13, 3), (20, 2), (21, 2), (29, 2), (31, 1), (32, 1), (33, 1), (34, 1), (35, 1), (36, 1), (37, 1), (38, 1), (39, 1)], [(1, 1), (20, 1), (21, 1), (26, 1), (28, 1), (40, 1), (41, 1), (42, 1), (43, 1)], [(1, 1), (8, 1), (13, 2), (19, 1), (20, 1), (21, 1), (24, 1), (30, 1), (31, 1), (32, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1)], [(13, 1), (19, 1), (20, 1), (21, 1), (24, 1), (29, 1), (30, 1), (50, 1), (51, 1), (52, 1), (53, 1)], [(1, 1), (2, 1), (13, 2), (54, 1), (55, 1), (56, 1), (57, 1), (58, 1), (59, 1), (60, 1), (61, 1), (62, 1), (63, 1), (64, 1), (65, 1)], [(1, 1), (2, 1), (13, 2), (57, 1), (60, 1), (66, 1), (67, 1), (68, 1)]]**Step 4: Use the "*Gensim*" library to create a TF-IDF module for the bag of words**model = gensim.models.TfidfModel(corpus)**Step 5: a) Output the 5th document, b) Output the bag of words for the fifth document i.e. term frequency, c) Review the Inverse Document Frequency (IDF) for each term in the bag of words for the 5th document**print('Document:', raw_documents[4]) print('Term Frequency:', corpus[4]) print('Tfidf:', model[corpus[4]])Document: I am excited about taking the Machine Learning class at the Lambda school starting in April. Term Frequency: [(1, 1), (8, 1), (13, 2), (19, 1), (20, 1), (21, 1), (24, 1), (30, 1), (31, 1), (32, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1)] Tfidf: [(1, 0.02253010613488428), (8, 0.2339027435896511), (13, 0.04506021226976856), (19, 0.16549057668178024), (20, 0.07930143947845378), (21, 0.07930143947845378), (24, 0.16549057668178024), (30, 0.16549057668178024), (31, 0.2339027435896511), (32, 0.2339027435896511), (44, 0.35085411538447664), (45, 0.35085411538447664), (46, 0.35085411538447664), (47, 0.35085411538447664), (48, 0.35085411538447664), (49, 0.35085411538447664)]**Step 6: Determine document similarity** - Identify the most similar document and the least similar document to the body of text below.*Good Reference for review*: https://radimrehurek.com/gensim/similarities/docsim.html# Step 6 # Document to compare: "Machine Learning at Lambda school is awesome" from gensim.similarities import MatrixSimilarity test_doc = "Machine Learning at Lambda school is awesome" query = model[dct.doc2bow(word_tokenize(test_doc))] index = MatrixSimilarity(model[corpus]) sims = index[query] most = sims.argmax() least = sims.argmin() print('Most similar document:\n{}\ \nLeast similar document:\n{}'.format(raw_documents[most], raw_documents[least]))Most similar document: My name is Thomson Comer, commander of the Machine Learning program at Lambda school. Least similar document: The dog ran up the steps and entered the owner's room to check if the owner was in the room.Pandas is a Python package providing fast, flexible, and expressive data structures designed to make working with structured (tabular, multidimensional, potentially heterogeneous) and time series data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, real world data analysis in Python. Pandas is designed to make it easier to work with structured data. Most of the analyses you might perform will likely involve using tabular data, e.g., from .csv files or relational databases (e.g., SQL). The DataFrame object in pandas is "a two-dimensional tabular, column-oriented data structure with both row and column labels."Pandas Cheatsheet: https://assets.datacamp.com/blog_assets/PandasPythonForDataScience.pdfHere are just a few of the things that pandas does well:* Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving / loading data from the ultrafast HDF5 format* Easy handling of missing data (represented as NaN) in floating point as well as non-floating point data* Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects* Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let Series, DataFrame, etc. automatically align the data for you in computations* Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data* Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects* Intelligent label-based slicing, fancy indexing, and subsetting of large data sets* Intuitive merging and joining data sets* Flexible reshaping and pivoting of data sets* Hierarchical labeling of axes (possible to have multiple labels per tick)** Time series-specific functionality: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc.If you're curious:The pandas name itself is derived from panel data, an econometrics term for multidimensional structured data sets, and Python data analysis itself. After getting introduced, you can consult the full pandas documentation. https://pandas.pydata.org/pandas-docs/stable/?v=20190919220335 Today we will cover * Introduction of series and dataframe* Load csv, excel and json file from location* Saving file to location#Import package import pandas as pd import numpy as np # Version of pandas pd.__version__ # Help help(pd.DataFrame.sum) # Can also refer https://pandas.pydata.org/pandas-docs/stable/?v=20190913225125Help on function sum in module pandas.core.frame: sum(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs) Return the sum of the values for the requested axis. This is equivalent to the method ``numpy.sum``. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a Series. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the resu[...]Introduction of series and dataframe Series: One dimension listData Frame: A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. Pandas DataFrame consists of three principal components, the data, rows, and columns.# Series s = pd.Series([2, 3, 5, 6, 8],index=['a', 'b', 'd','e','f']) #print(s.astype('int')) print(s[0]) print(s['a']) #Data Frame df = pd.DataFrame(np.random.randint(4, 6,(3,4)), columns=['Ax','B','C','D']) print(df) df['Ax'] # initialize list of lists data = np.array([['amar', 10], ['akbar', 15], ['anthony', 14]]) print(data.shape) # Create the pandas DataFrame df = pd.DataFrame(data, columns = ['Name', 'Age']) # print dataframe. df # intialise data of lists. data = {'Name':['Tom', 'nick', 'don', 'harry'], 'Age':[20, 21, 19, 18]} # Create DataFrame df = pd.DataFrame(data) # Print the output. df # Intialise data to Dicts of series. d = {'one' : pd.Series([10, 20, 30, 40], index =['a', 'b', 'c', 'd']), 'two' : pd.Series([10, 20, 30, 40], index =['a', 'b', 'c', 'e'])} # creates Dataframe. df = pd.DataFrame(d) # print the data. df # initialise data of lists. data = {'Name':['Tom', 'don', 'nick', 'juli'], 'marks':[99, 98, 95, 90]} # Creates pandas DataFrame. df = pd.DataFrame(data, index =['rank1', 'rank2', 'rank3', 'rank4']) # print the data df # # Initialise data to lists. # data = [{'a': 1, 'b': 2, 'c':3}, {'a':10, 'b': 20, 'c': 30}] # # Creates DataFrame. # df = pd.DataFrame(data) # # Print the data # df # Exmaple if number of columns are different data = [{'b': 2, 'c':3}, {'a': 10, 'b': 20, 'c': 30}] df = pd.DataFrame(data, index =['first', 'second']) df # List1 Name = ['tom', 'krish', 'nick', 'juli'] # List2 Age = [ 34,54,67,np.nan ] # get the list of tuples from two lists. # and merge them by using zip(). list_of_tuples = list(zip(Name, Age)) # Assign data to tuples. # list_of_tuples # ## Converting lists of tuples into pandas Dataframe. df = pd.DataFrame(list_of_tuples, columns = ['Name', 'Age'],index=list('abcd')) # ['a','b','c','d'] # Print data. df df.index df = pd.DataFrame(np.random.randn(15, 4), columns=['A','B','C','D']) df # Head, tail, columns, index df.head() # First 5 values print(df.tail(2)) # Last 5 values print(df.columns) print(df.index) print(df) print(df.info()) RangeIndex: 15 entries, 0 to 14 Data columns (total 4 columns): A 15 non-null float64 B 15 non-null float64 C 15 non-null float64 D 15 non-null float64 dtypes: float64(4) memory usage: 560.0 bytes NoneRenamedf df.rename({'Name':'Rename'},axis=1,inplace = True) # df df2=df.rename({'Name':'Rename'},axis=1) df2Load csv, excel and Json file from location Loading CSV filecols=['id', 'belongs_to_collection', 'budget', 'genres', 'homepage', 'imdb_id', 'original_language', 'original_title', 'overview', 'popularity', 'poster_path', 'production_companies', 'production_countries', 'release_date', 'runtime', 'spoken_languages', 'status', 'tagline', 'title', 'Keywords', 'cast', 'crew', 'revenue'] #C:\Users\koser\Documents\Floatint Courses\Machine Learning From Scratch\C01L09- Chug Data, Spit Frames\data df=pd.read_csv('./data/train.csv', header=None,names=cols,sep=',') # df.head() df.columns type(df['belongs_to_collection']) type(df)Loading excel file# Excel file file=pd.read_excel('./data/data_info.xlsx',sheet_name='train') # By default first sheet type(file) file.info() RangeIndex: 6 entries, 0 to 5 Data columns (total 2 columns): Columns 6 non-null object Description 6 non-null object dtypes: object(2) memory usage: 176.0+ bytesLoading Json filedf = pd.read_json("./data/students.json",lines= True) type(df) print(df.head()) print(df.tail())_id exam homework name quiz 0 0 1.5 35.874035 12 1 1 2.8 36.000000 15 2 2 3.5 40.000000 10 3 3 3.6 49.000000 20 4 4 3.7 54.000000 20 _id exam homework name quiz 6 6 4.8 100.0 16 7 7 3.2 54.0 17 8 8 3.1 43.0 13 9 9 4.6 90.0 19 10 10 3.9 67.0 18Topic 3: Saving file to location Writing file to CSVdf.to_csv('./data/df_batch2.csv',sep=',',index=True,header=True)Writing file to Excel#df.to_excel('C:/Users/koser/Documents/Floatint Courses/Machine Learning From Scratch/C01L09- Chug Data, Spit Frames/data/df_batch2.xlsx',sheet_name='batch1') #Multiple sheet in one sheet with pd.ExcelWriter('./data/output_batch1.xlsx') as writer: df.to_excel(writer, sheet_name='Sheet_name_1_batch') df.to_excel(writer, sheet_name='Sheet_name_2')Writing file to jsondf # Converting to json df.to_json('./data/df_batch2.json',orient='index') # orient='index' , orient='columns'Writing file to picklePython pickle module is used for serializing and de-serializing a Python object structure. Any object in Python can be pickled so that it can be saved on disk. What pickle does is that it “serializes” the object first before writing it to file. Pickling is a way to convert a python object (list, dict, etc.) into a character stream. The idea is that this character stream contains all the information necessary to reconstruct the object in another python script.df.to_pickle("./data/df_batch.pkl") df=pd.read_pickle('./data/df_batch.pkl') dfRunning, Debugging, Testing & Packaging!code ./1-helloconnectedworldIs Cutthroat Kitchen Predictable?%matplotlib inline import csv from matplotlib import pyplot as plt with open('../data/cutthroat_kitchen.csv', 'rb') as csvfile: cutthroat = csv.reader(csvfile, delimiter=',') count = 0 first_round_pre_commercial_order_count_arr = [] first_round_post_commercial_order_count_arr = [] for row in cutthroat: chef_arr = [] count += 1 # Skip header if (count == 1): continue chef_arr.append(row[1]) chef_arr.append(row[2]) chef_arr.append(row[3]) chef_arr.append(row[4]) ######################################################################### # ROUND ONE ######################################################################### first_round_loser = row[6] first_round_alton_talking_pre_commercial_order_arr = list(row[8]) first_round_alton_talking_post_commercial_order_arr = list(row[9]) first_round_pre_commercial_order_arr = list(row[10]) first_round_post_commercial_order_arr = list(row[11]) for i in range(0, len(first_round_pre_commercial_order_arr)): if (first_round_pre_commercial_order_arr[i] == first_round_loser): first_round_pre_commercial_order_count_arr.append(i + 1) for i in range(0, len(first_round_alton_talking_pre_commercial_order_arr)): if (first_round_alton_talking_pre_commercial_order_arr[i] == first_round_loser): first_round_pre_commercial_order_count_arr.append(i + 1) for i in range(0, len(first_round_post_commercial_order_arr)): if (first_round_post_commercial_order_arr[i] == first_round_loser): first_round_post_commercial_order_count_arr.append(i + 1) for i in range(0, len(first_round_alton_talking_post_commercial_order_arr)): if (first_round_alton_talking_post_commercial_order_arr[i] == first_round_loser): first_round_post_commercial_order_count_arr.append(i + 1) second_round_loser = row[12] final_round_loser = row[18] print first_round_pre_commercial_order_count_arr print first_round_post_commercial_order_count_arr plt.hist(first_round_pre_commercial_order_count_arr, histtype='bar', stacked=True, color='blue', label='pre-commercial') plt.hist(first_round_post_commercial_order_count_arr, histtype='bar', stacked=True, color='green', label='post-commercial') plt.legend() plt.show() with open('../data/cutthroat_kitchen.csv', 'rb') as csvfile: cutthroat = csv.reader(csvfile, delimiter=',') count = 0 second_round_pre_commercial_order_count_arr = [] second_round_post_commercial_order_count_arr = [] for row in cutthroat: chef_arr = [] count += 1 # Skip header if (count == 1): continue chef_arr.append(row[1]) chef_arr.append(row[2]) chef_arr.append(row[3]) chef_arr.append(row[4]) ######################################################################### # ROUND TWO ######################################################################### second_round_loser = row[12] second_round_alton_talking_pre_commercial_order_arr = list(row[14]) second_round_alton_talking_post_commercial_order_arr = list(row[15]) second_round_pre_commercial_order_arr = list(row[16]) second_round_post_commercial_order_arr = list(row[17]) for i in range(0, len(second_round_pre_commercial_order_arr)): if (second_round_pre_commercial_order_arr[i] == second_round_loser): second_round_pre_commercial_order_count_arr.append(i + 1.1) for i in range(0, len(second_round_alton_talking_pre_commercial_order_arr)): if (second_round_alton_talking_pre_commercial_order_arr[i] == second_round_loser): second_round_pre_commercial_order_count_arr.append(i + 1.1) for i in range(0, len(second_round_post_commercial_order_arr)): if (second_round_post_commercial_order_arr[i] == second_round_loser): second_round_post_commercial_order_count_arr.append(i + 1) for i in range(0, len(second_round_alton_talking_post_commercial_order_arr)): if (second_round_alton_talking_post_commercial_order_arr[i] == second_round_loser): second_round_post_commercial_order_count_arr.append(i + 1) final_round_loser = row[18] plt.hist(second_round_pre_commercial_order_count_arr, color='blue', label='pre-commercial') plt.hist(second_round_post_commercial_order_count_arr, color='green', label='post-commercial') plt.legend() plt.show() with open('../data/cutthroat_kitchen.csv', 'rb') as csvfile: cutthroat = csv.reader(csvfile, delimiter=',') count = 0 final_round_order_count_arr = [] for row in cutthroat: chef_arr = [] count += 1 # Skip header if (count == 1): continue chef_arr.append(row[1]) chef_arr.append(row[2]) chef_arr.append(row[3]) chef_arr.append(row[4]) ######################################################################### # FINAL ROUND ######################################################################### final_round_loser = row[18] final_round_alton_talking_order_arr = list(row[20]) for i in range(0, len(final_round_alton_talking_order_arr)): if (final_round_alton_talking_order_arr[i] == second_round_loser): final_round_order_count_arr.append(i + 1.1) plt.hist(final_round_order_count_arr, color='blue', label='pre-commercial') plt.legend() plt.show()Project 1 - Working with list and dictionary (DEADLINE: 06.00 WIB 8 JUNI 2020)Pada project ini peserta pelatihan python akan membuat code-code untuk mengolah `list` dan `dictionary` Project items yang dibutuhkan:- Pemahaman dan pembuatan `fungsi`, `variabel`, dan `tipe data`- pemahaman `list` dan `dictionary`- logika dasar pemrograman**Note: Tidak diperkenankan untuk menggunakan library/module lain, kecuali disediakan atau didefinisikan**>>**BOLEH BERDISKUSI TANPA MEMBERIKAN JAWABAN!!!** 1. Buatlah fungsi `letter_catalog` dengan sebuah positional argument berupa list dan keyword argument `letter` untuk nilai default 'A'. Fungsi `letter_catalog` akan mengembalikan sebuah list yang berisi nama-nama buah yang dimulai dengan huruf yang ada keyword argument `letter`. Jika tidak ada item di list inputan tersebut yang diawali dengan huruf yang didefinisikan di keyword `letter` maka fungsi mengembalikan list kosong.#Graded def letter_catalog(items,letter='A'): newList =[] for comp in items : if comp[0] == letter: newList += [comp] return newList # Cek output kode anda letter_catalog(['Apple','Avocado','Banana','Blackberries','Blueberries','Cherries'],letter='C')Expected output:```['Apple', 'Avocado']``` 2. Buatlah fungsi `counter_item` yang memiliki sebuah input argument berupa list. Fungsi ini mengembalikan sebuah dictionary yang menghitung jumlah buah dalam list input, dengan `key` berupa nama buah tersebut dan `value` berupa jumlah nama buah tersebut muncul di list input.#Graded def counter_item(items): dictBuah ={} for buah in items : dictBuah[buah] = dictBuah.get(buah,0)+1 return dictBuah # MULAI KODEMU DI SINI # Cek output kode anda counter_item(['Apple','Apple','Apple','Blueberries','Blueberries','Blueberries'])Expected output:```{'Apple': 3, 'Blueberries': 3}``` 3. Di bawah ini sudah ada tiga variables `fruits`, `prices`, dan `chart`. - Buatlah sebuah dictionary yang berupa daftar harga buah dengan `key` berupa nama buah di variable `fruits` dan dengan `value` berupa harga dari buah tersebut di variable `price` (sudah diurutkan sesuai dengan nama-nama buah di varibale `fruits`, kemudian Dictionary tersebut disimpan di dalam variable `fruit_price`.- Selanjutnya, Buatlah fungsi `total_price` dengan dua input yaitu: 1) dictionary yang merupakan keluaran dari fungsi `counter_item` dan 2) dictionary harga buah `fruit_price`. Fungsi ini mengeluarkan sebuah total harga dari daftar buah di dictionary keluaran dari `counter_item`.**Hint**: Gunakan fungsi `counter_item` di soal nomor 2.#Graded # dua variable berikut jangan diubah fruits = ['Apple','Avocado','Banana','Blackberries','Blueberries','Cherries','Date Fruit','Grapes','Guava','Jackfruit','Kiwifruit'] prices = [6,5,3,10,12,7,14,15,8,7,9] # list buah chart = ['Blueberries','Blueberries','Grapes','Apple','Apple','Apple','Blueberries','Guava','Jackfruit','Blueberries','Jackfruit'] #counterBuah = counter_item(chart) # MULAI KODEMU DI SINI fruit_price = dict(zip(fruits,prices)) def total_price(dcounter,fprice): totalBelanja = 0 for buah, jumlah in dcounter.items(): totalBelanja += jumlah*fprice[buah] return totalBelanja # MULAI KODEMU DI SINI # Cek output kode anda total_price(counter_item(chart),fruit_price)Expected output:```103``` 4. Buatlah fungsi `discounted_price` dengan dua positional arguments input dari keluaran fungsi totalprice dan discount dalam persen(80 berarti 80%, dst), dan satu keyword argument `minprice` yang menunjukkan hanya dengan minimum price tersebut yang hanya di-discount, set default value `minprice` ke 100. Fungsi tersebut menghitung nilai harga akhir setelah di-discount sebesar discount variable (argument kedua). Untuk harga total yang kurang dari `minprice` maka keluarannya sama dengan harga total tersebut tanpa discount.#Graded def discounted_price(total,discount,minprice=100): if total >= minprice : hargaDiskon = total-(total*discount/100) return hargaDiskon else : return total # MULAI KODEMU DI SINI # Cek output kode anda discounted_price(total_price(counter_item(chart),fruit_price),10,minprice=100)Expected output:```92.7``` 5. Buatlah fungsi `print_summary` dengan 2 posisional argument input, list nama2 buah (cth: seperti variable `chart`) dan dictionary harga buah (cth: seperti variable `fruit_price`, yang mengeluarkan tulisan ringakasan dari pembelian buah seperti berikut ini.cth keluaran:```3 Apple : 184 Blueberries : 481 Grapes : 151 Guava : 82 Jackfruit : 14total : 103discount price : 92.7```dengan format `jumlahbuah`\`namabuah`\`totalhargabuah\n` dengan nama nama buah berurutan sesuai abjad `ascending`, kemudian diikuti total harga semuanya dan discount price dengan besaran 10% dengan minimum harga pembelian 100.**Note**: dipisahkan hanya satu spasi. Output harus sama dengan contoh diatas untuk pemanggilan fungsi berikut ini.```print_summary(chart,fruit_price)```#Graded def print_summary(items,fprice): counterBuah = counter_item(sorted(items)) for buah, jumlah in counterBuah.items(): print(jumlah, buah, ":", jumlah*fprice[buah]) print("total :", total_price(counterBuah,fprice)) print("discount price :", discounted_price(total_price(counterBuah,fprice),10,minprice=100)) # MULAI KODEMU DI SINI # Cek output kode anda print_summary(chart,fruit_price)3 Apple : 18 4 Blueberries : 48 1 Grapes : 15 1 Guava : 8 2 Jackfruit : 14 total : 103 discount price : 92.7Expected output:```3 Apple : 184 Blueberries : 481 Grapes : 151 Guava : 82 Jackfruit : 14total : 103discount price : 92.7``` Cara Submit0. Isikan variable priority dengan nilai yang lebih besar dari submisi sebelumnya untuk revisi.1. Tuliskan email akun netacad anda di variable email (tentunya dengan tanda kutip) 2. Copy-paste **SEMUA** cell yang ada komentar `Graded` ke cell code di bawah ini.3. Simpan kode tersebut sebagai p1.py4. Submit file ke google form berikut: https://forms.gle/6AQ8BYM8UoMK7kJg65. Pengisian form membutuhkan anda login ke akun google anda.# nama file p1.py # Isikan email anda dan copy semua cell code yang dengan komentar #Graded # untuk revisi dan resubmisi sebelum deadline # silakan di resubmit dengan nilai variable priority yang lebih besar dari # nilai priority submisi sebelumnya # JIKA TIDAK ADA VARIABLE priority DIANGGAP priority=0 priority = 0 #netacad email cth: '' email='' # copy-paste semua #Graded cells YANG SUDAH ANDA KERJAKAN di bawah iniInitial data exploration for my datasets Note: this notebook is just for Noah's purposes and is not tidied up More information about this project is available in my github repo here: https://github.com/Noah-Baustin/sf_crime_data_analysis#import modules import pandas as pd #import altair as alt # import csv into a variable historical_data = pd.read_csv('raw_data/SFPD_Incident_Reports_2003-May2018/Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv', dtype=str)Let's take a look at our data:historical_data historical_data.info() RangeIndex: 2129525 entries, 0 to 2129524 Data columns (total 35 columns): # Column Dtype --- ------ ----- 0 PdId object 1 IncidntNum object 2 Incident Code object 3 Category object 4 Descript object 5 DayOfWeek object 6 Date object 7 Time object 8 PdDistrict object 9 Resolution object 10 Address [...]I also see that I've got some date columns that need to be reformatted: for sure 'Date'. Let's reformat that:historical_data['Date'] = pd.to_datetime(historical_data['Date']) historical_data.head()Let's take a closer look at our columns:historical_data.columnsWe can see here that there's a bunch of extra columns in the data that's not included in the documentation and are not essential to our analysis. Let's get rid of those columns.#get rid of all those extra columns we don't need historical_data = historical_data[['PdId', 'IncidntNum', 'Incident Code', 'Category', 'Descript', 'DayOfWeek', 'Date', 'Time', 'PdDistrict', 'Resolution', 'Address', 'X', 'Y', 'location']].copy() historical_data.columnsNow that our columns are cleaned up and our dates are formatted correctly, let's take a look at our date range included in the data:historical_data['Date'].min() historical_data['Date'].max()We can see here that it appears that we most likely have complete data beginning in 2003. BUT we see here that the 2018 data is incomplete, so if we want to do an annual analysis, we'll need to exclude 2018. We've got 2,129,525 entries. Let's check out our three different columns that have incident indentification codes to see how many unique values are in each:historical_data['IncidntNum'].nunique() historical_data['Incident Code'].nunique() historical_data['PdId'].nunique()Our documentation tells us that the PdId column is equivelant to the row_id column in the new data (see downloaded pdf titled: Change Notice - Police Incident Reports). And the documentation for the newer dataset tells us that the row_id is the unique identifier for each row. It's a good sign that there's exaclty as many PdId unique values as there are rows in the dataset.It also makes sense that there are duplicate values in the IncidntNum column. The IncidntNum refers to the case number. So if a supplemental report was filed after the incident was initially entered in this dataset, it would show up with a new PdId BUT the IncidntNum would be the same. But that does mean that I need to make sure I'm not counting the same incident multiple times if it shows up in this dataset multiple times. Most likely, my analysis will focus on a unique set of IncidntNum. Let's double check that there's no duplicate rows:historical_data[historical_data.duplicated()]No duplicate rows, that's great. Let's find out how many duplicated IncidntNum values we have:historical_data[historical_data['IncidntNum'].duplicated()] historical_data['IncidntNum'].nunique() + len(historical_data[historical_data['IncidntNum'].duplicated()])There's 425,899 duplicates for 'IncidntNum'. We we add that to the number of unique values (above) it is equal to the number of rows in our dataset... that tells us that 1,703,626 is the actual number of incidents that we're working with. NOTE: explain this step better b/c confusion in Soo meeting Let's take a look at some of our duplicates: First we'll create a dataframe with our duplicated cases:dupe_cases = historical_data[historical_data['IncidntNum'].duplicated()].copy()Now we create a `list` or `array` (the numpy version of a list) of those unique PdId's:dupe_IncidentNum = dupe_cases['IncidntNum'].to_list()Finally, we're displaying a subset our of original data that just includes the duplicate incident numbers:dupe_cases_full = historical_data[historical_data['IncidntNum'].isin(dupe_IncidentNum)].sort_values(by='IncidntNum') dupe_cases_full.head(30)We can see here anecdotely that additional entries for duplicated IncidentNum's include additional entries in the descript column in some cases, but sometimes they remain the same. come back to this! We're going to want to isolate our incidents that include marijuana crimes. So let's take a look at the unique values in the two columns that might contain information about marijuana crimes:historical_data['Category'].unique()We can see here that there's a DRUG/NARCOTIC category, so that's probably where we're going to find the marijuana crimes. But nothing specific about marijuana here. That's going to show up in our 'Descript' column. There's too many unique values in that column to list, so let's create a subset:#create dataframe with all our marijuana incidents historical_data_marijuana = historical_data[ historical_data['Descript'].str.contains('MARIJUANA') ].copy()Soo meeting notesdrug_narcotic_incidents = historical_data[ historical_data['Category'] == 'DRUG/NARCOTIC' ].reset_index(drop=True) # needed if I decide to answer my 'extra' question: Compare marijuana arrests to other types of crimes, like narcotics. #drug_narcotic_incidents.to_csv('drug_narcotic_incidents_historical.csv', index=False) drug_narcotic_incidents.head() #how to isolate the marijuana duplicate incidentnum's historical_data[ historical_data['Descript'].str.contains('MARIJUANA') & historical_data['IncidntNum'].isin(dupe_IncidentNum) ].sort_values(by='IncidntNum') historical_data[historical_data['IncidntNum'] == "000123436"] #show info about the first row in the dataframe [iloc means index location] incident_duplicate.iloc[0] #what unique values are in this marijuana data frame for types of arrests historical_data_marijuana['Descript'].unique() #dropping duplicate incident numbers #going to need to write an explainer historical_data_marijuana[historical_data_marijuana['IncidntNum'].duplicated()] historical_data_marijuana[historical_data_marijuana['IncidntNum'] == '160676737'] # export the df to a csv #historical_data_marijuana.to_csv("historical_data_marijuana.csv", index=False)Bring in the more recent datasetnewer_data.columns historical_data.columns # import csv into a variable newer_data = pd.read_csv('raw_data/SFPD_Incident_Reports_2018-10.14.21/Police_Department_Incident_Reports__2018_to_Present(1).csv', dtype=str) historical_data['Resolution'].unique() newer_data.columns newer_data['Incident Description'] = newer_data['Incident Description'].str.upper()Now we need to figure out where the marijuana cases are organized in the newer datasetfind_marijuana_1 = newer_data[ newer_data['Incident Description'].str.contains('MARIJUANA') ].reset_index(drop=True) find_marijuana_1 find_marijuana_1['Incident Description'].unique()Unfortunately this shows us that the Incident Description column does not contain information categorizing crimes by the term 'marijuana', unlike the older data.find_marijuana_2 = newer_data[ newer_data['Incident Subcategory'].str.contains('MARIJUANA', na=False) ].reset_index(drop=True) find_marijuana_2No marijuana strings in the incident subcategory either.find_marijuana_3 = newer_data[ newer_data['Incident Category'].str.contains('MARIJUANA', na=False) ].reset_index(drop=True) find_marijuana_3 find_marijuana_4 = newer_data[ newer_data['Report Type Description'].str.contains('MARIJUANA', na=False) ].reset_index(drop=True) find_marijuana_4Frustratingly I'm not finding any marijuana information in any of these columnsupper_test = newer_data upper_test = upper_test.upper() test = 'hello' print(test) test.upper() newer_data.columns incident_descript_upper = newer_data['Incident Subcategory'].str.upper() find_marijuana_5 = incident_descript_upper.str.contains('MARIJUANA').reset_index(drop=True) find_marijuana_5.unique() newer_data.info() newer_data['Incident Description'].unique()Compare EC orphan drugs with all EC registered active drugsSource of data: 1. Active: https://ec.europa.eu/health/documents/community-register/html/reg_od_act.htm?sort=n2. Withdrawn, suspended, expired, not renewed: https://ec.europa.eu/health/documents/community-register/html/reg_hum_nact.htm?sort=a3. Rejected: https://ec.europa.eu/health/documents/community-register/html/reg_hum_refus.htm Import data EC all drugsimport os import numpy as np import pandas as pd path = os.path.dirname(os.getcwd()) data_all_drugs = pd.read_csv(path + '/data/ec_all_drugs.csv') data_all_drugs['EU #'] = data_all_drugs['EU #'].astype(str) data_all_drugs['Brand name'] = data_all_drugs['Brand name'].str.upper() data_all_drugsEC orphan drug designationspath = os.path.dirname(os.getcwd()) data_od_raw = pd.read_csv(path + '/data/ec_orphan_designations.csv', header=2) data_od_raw['EU #'] = data_od_raw['EU #'].astype(str) data_od_rawOnly keep orphan drug designations between 2001 and 2019years = pd.to_datetime( data_od_raw['Designation date']).apply(lambda x: x.year) data_od = data_od_raw.where((years > 2000) & (years < 2020)) data_od = data_od.dropna() data_od['EU #'] = data_od['EU #'].astype(str) data_od1. How many ODs can be identified by EU in the all drugs registryimport plotly.colors import plotly.graph_objects as go # Get all EU # in all drug registry eu_numbers_all_drugs = data_all_drugs['EU #'].unique() # Get all EU # in OD drug registry eu_numbers_od_drugs = data_od['EU #'].unique() # Check whether OD drug number can be found is_found = np.zeros(shape=len(eu_numbers_od_drugs), dtype=bool) for idx, number in enumerate(eu_numbers_od_drugs): if number in eu_numbers_all_drugs: is_found[idx] = True # Create dataframe matched_data = pd.DataFrame(columns=['EU #', 'Match status']) matched_data = matched_data.append(pd.DataFrame({ 'EU #': eu_numbers_od_drugs[is_found], 'Match status': 'In all drugs registry'})) matched_data = matched_data.append(pd.DataFrame({ 'EU #': eu_numbers_od_drugs[~is_found], 'Match status': 'Not in all drugs registry'})) # Create figure fig = go.Figure() colours = plotly.colors.qualitative.Plotly n_in_all_drugs = len( matched_data[matched_data['Match status'] == 'In all drugs registry']) n_not_in_all_drugs = len( matched_data[matched_data['Match status'] == 'Not in all drugs registry']) fig.add_trace(go.Pie( labels=['In all drugs registry', 'Not in all drugs registry'], values=[n_in_all_drugs, n_not_in_all_drugs], sort=False, pull=[0, 0.2], marker=dict( colors=colours[:2], line=dict(color='#000000', width=1)))) fig.show()The EC all drugs registry does not include any OD applications 2. How many OD drugs are repurposed drugs?We check how many of the OD drug products (i.e. the international non-proprietary name which identifies the active ingredient of the medicine (INN)) are identical to those of drugs in the EC all drugs registry.TODO:1. Get INNs of EC drugs by cross-referencing EC drugs with EMA dataset.2. Then check how many EC ODs products are in EC all drugs.Procedure:- Filter NaNs- Capitalise all product names / INNs- Check whether OD product is identical to an INN in all drugs dataset 2.1 Get INN / product names from EMA datasetThe EC dataset does not keep track of the INN for all drugs, so we would like to match drugs by EU to get the INN from the EMA dataset. But **EU ** is not available for EMA all drugs registry. Alternative we attempt to match drugs based on Brand name.path = os.path.dirname(os.getcwd()) data_ema = pd.read_excel(path + '/data/ema_all_drugs.xlsx', header=8) data_ema['Medicine name'] = data_ema['Medicine name'].str.upper() data_ema data_ema.columnsCheck whether some brand names occur multiple timesnon_unique_brands = [] for brand in data_all_drugs['Brand name'].unique(): mask = data_all_drugs['Brand name'] == brand if len(data_all_drugs[mask]) > 1: non_unique_brands.append(brand) non_unique_brandsWe manually checked: each non-unique brand appears only twice, one of which is 'active' the other is either 'not active' or 'rejected'. So it's likely that the same brand name labels the exact same drug and the multiple matches correspond to repeated applications or renewals. Match unique EC brands with EMA dataset and thereby find product numberFor now: If EC brand name is contained in EMA brand name, we match. In the worst case scenario, this includes more active subtances in the resulting dataset than there truly are. But on the other hand, those active may also belong to non-OD drugs according to the EMA database (possibly explicitly exclude OD drugs in EMA database).is_identified = [] inn = [] ec_brand = [] matched_ema_brand = [] brands = data_all_drugs['Brand name'].unique() ema_brands = data_ema['Medicine name'].unique() for brand in brands: is_matched = False for ema_brand in ema_brands: # Check whether ec brand is subset of ema brand if brand in ema_brand: is_identified.append('yes') matched_ema_brand.append(ema_brand) ec_brand.append(brand) # Mask for product mask = data_ema['Medicine name'] == ema_brand product = data_ema[mask]['Active substance'].iloc[0] inn.append(product) is_matched = True if is_matched is True: continue # If not identified is_identified.append('no') matched_ema_brand.append('none') inn.append('none') ec_brand.append(brand) matched_drugs = pd.DataFrame({ 'Brand name': ec_brand, 'EMA brand name': matched_ema_brand, 'is identified': is_identified, 'INN': inn}) matched_drugsCheck unmatched drugsSome EC brand names could not be found. For EC brands of the form "name 1/name 2" or "name 1 (name 2)" the above matching method might miss the drugs, because the complete EC brand name has to be contained in the EMA brand name. We check those candidates manually below.mask = matched_drugs['is identified'] == 'no' matched_drugs[mask]Filter all names which are just one wordcritical_brand_names = [] uncritical_brand_names = [] for brand in matched_drugs[mask]['Brand name']: if (' ' in brand) or ('/' in brand) or ('(' in brand) or ('-' in brand): critical_brand_names.append(brand) else: uncritical_brand_names.append(brand) print('Critical drugs: ', len(critical_brand_names), '\n') print(critical_brand_names) print('\n') print('Uncritical drugs: ', len(uncritical_brand_names), '\n') print(uncritical_brand_names)Critical drugs: 36 ['AMLODIPINE/VALSARTAN MYLAN', 'ARIKAYCE LIPOSOMAL', 'BROPAIR SPIROMAX', 'CLOPIDOGREL TAW PHARMA', 'CLOPIDOGREL/ACETYLSALICYLIC ACID MYLAN', 'COVID-19 VACCINE JANSSEN', 'COVID-19 VACCINE MODERNA', 'EMTRICITABINE /TENOFOVIR DISOPROXIL KRKA D.D.', 'EXPAREL LIPOSOMAL', 'HEPLISAV B', 'IRBESARTAN / HYDROCHLOROTHIAZIDE TEVA', 'IRBESARTAN HCT ZENTIVA', 'LAMIVUDINE / ZIDOVUDINE TEVA', 'LENALIDOMIDE KRKA', 'LENALIDOMIDE KRKA D.D.', 'LENALIDOMIDE KRKA D.D. NOVO MESTO', 'LENALIDOMIDE MYLAN', 'OBILTOXAXIMAB SFL', 'RIBAVIRIN TEVA PHARMA BV', 'RIVAROXABAN ACCORD', 'SEFFALAIR SPIROMAX', 'SUNITINIB ACCORD', 'THIOTEPA RIEMSER', 'TRIXEO AEROSPHERE', 'CLOPIDOGREL / ACETYLSALICYLIC ACID TEVA', 'HUMALOG-HUMAJECT', 'HUMALOG-PEN', 'IRBESARTAN HCT BMS', 'RIVASTIGMINE 3M HEALTH CARE LTD.', 'TECNEMAB-K-1', 'TENECTEPLASE BOEHRINGER INGELHEIM PHARMA GMBH & CO. KG', 'VALDYN (EX KUDEQ)', 'MYLOTARG REFUSAL', 'RAMELTEON TAKEDA GLOBAL RESEARCH AND DEVELOPMENT CENTRE (EUROPE) LTD', 'RAXONE REFUSAL[...]Try to match critical brands that are obvious to parse# Parse critical name manually parse_dict = { 'AMLODIPINE/VALSARTAN MYLAN': ['AMLODIPINE', 'VALSARTAN MYLAN'], 'CLOPIDOGREL/ACETYLSALICYLIC ACID MYLAN': ['CLOPIDOGREL', 'ACETYLSALICYLIC ACID MYLAN'], 'EMTRICITABINE /TENOFOVIR DISOPROXIL KRKA D.D.': ['EMTRICITABINE', 'TENOFOVIR DISOPROXIL KRKA D.D.'], 'IRBESARTAN / HYDROCHLOROTHIAZIDE TEVA': ['IRBESARTAN', 'HYDROCHLOROTHIAZIDE TEVA'], 'LAMIVUDINE / ZIDOVUDINE TEVA': ['LAMIVUDINE', 'ZIDOVUDINE TEVA'], 'CLOPIDOGREL / ACETYLSALICYLIC ACID TEVA': ['CLOPIDOGREL', 'ACETYLSALICYLIC ACID TEVA']} # Find parsed names in EMA dataset ema_brands = data_ema['Medicine name'].unique() for brand, parsed_brands in parse_dict.items(): matched_brands = [] inn = [] for parsed_brand in parsed_brands: for ema_brand in ema_brands: # Check whether ec brand is subset of ema brand if parsed_brand in ema_brand: matched_brands.append(ema_brand) # Mask for product mask = data_ema['Medicine name'] == ema_brand product = data_ema[mask]['Active substance'].iloc[0] inn.append(product) # is_matched = True # Make sure that parsed drugs are not matched to different drugs if len(matched_brands) == 2: assert matched_brands[0] == matched_brands[1] # Update if one of the parsed drugs was matched if len(matched_brands) > 0: mask = matched_drugs['Brand name'] == brand matched_drugs.loc[mask, 'EMA brand name'] = matched_brands[0] matched_drugs.loc[mask, 'is identified'] = 'yes' matched_drugs.loc[mask, 'INN'] = inn[0] mask = \ (matched_drugs['Brand name'] == 'AMLODIPINE/VALSARTAN MYLAN') | \ (matched_drugs['Brand name'] == 'CLOPIDOGREL/ACETYLSALICYLIC ACID MYLAN') | \ (matched_drugs['Brand name'] == 'EMTRICITABINE /TENOFOVIR DISOPROXIL KRKA D.D.') | \ (matched_drugs['Brand name'] == 'IRBESARTAN / HYDROCHLOROTHIAZIDE TEVA') | \ (matched_drugs['Brand name'] == 'LAMIVUDINE / ZIDOVUDINE TEVA') | \ (matched_drugs['Brand name'] == 'CLOPIDOGREL / ACETYLSALICYLIC ACID TEVA') matched_drugs[mask]Check double matchesdouble_matches = [] brands = matched_drugs['Brand name'].unique() for brand in brands: mask = matched_drugs['Brand name'] == brand temp = matched_drugs[mask] if len(temp) > 1: double_matches.append(temp['Brand name'].iloc[0]) len(double_matches)Match EC OD drugs based on product 1. Identical match Check INN of EC all drugs (mostly single words, 2 words, or even a list of compounds?)critical_inns = [] uncritical_inns = [] inns = matched_drugs['INN'].unique() for inn in inns: # Count number of spaces space_count = 0 for letter in inn: if letter.isspace(): space_count += 1 if (space_count > 1) or (',' in inn) or ('/' in inn): critical_inns.append(inn) else: uncritical_inns.append(inn) print('Critical INNs: ', len(critical_inns), '\n') print(critical_inns) print('\n') print('Uncritical INNs: ', len(uncritical_inns), '\n') print(uncritical_inns)isartan, amlodipine', 'cobicistat on silicon dioxide', 'indacaterol, Glycopyrronium bromide', 'Glycopyrronium bromide, indacaterol maleate', 'meropenem trihydrate, vaborbactam', 'vibrio cholerae, strain cvd 103-hgr, live', 'Diphtheria toxoid, tetanus toxoid, Bordetella pertussis antigens: pertussis toxoid, filamentous haemagglutinin, pertactin, fimbriae Types 2 and 3, hepatitis B surface antigen produced in yeast cells, poliovirus (inactivated): type 1 (Mahoney), type 2 (MEF-1), type 3 (Saukett) produced in Vero cells/ Haemophilus influenzae type b polysaccharide (polyribosylribitol phosphate) conjugated to meningococcal protein.', 'mixture of polynuclear iron(III)-oxyhydroxide, sucrose and starches', 'patiromer sorbitex calcium', 'tenofovir alafenamide fumarate', 'Ombitasvir, paritaprevir, ritonavir', 'recombinant human n-acetylgalactosamine-6-sulfatase (rhgalns)', 'alogliptin benzoate, metformin hydrochloride', 'tenofovir disoproxil fumarate', 'canagliflozin, metformin hydrochloride'[...]Check INN of EC OD drugs (mostly single words, 2 words, or even a list of compounds?)critical_inns = [] uncritical_inns = [] inns = data_od['Product'].unique() for inn in inns: # Count number of spaces space_count = 0 for letter in inn: if letter.isspace(): space_count += 1 if (space_count > 1) or (',' in inn) or ('/' in inn): critical_inns.append(inn) else: uncritical_inns.append(inn) print('Critical INNs: ', len(critical_inns), '\n') print(critical_inns) print('\n') print('Uncritical INNs: ', len(uncritical_inns), '\n') print(uncritical_inns)inant human hepatitis C monoclonal antibody against C4 region of E1', 'Mercaptopurine (oral liquid)', 'Methotrexate (oral liquid)', '4-ethoxy-2-(piperazin-1-yl)-7-(pyridin-4-yl)-5H-pyrimido[5,4-b]indol', 'Adenovirus associated viral vector serotype 4 containing the human RPE65 gene', '4-Amino-1-[5-O-[(2R,4S)-2-oxido-4-(4-pyridinyl)-1,3,2-dioxaphosphorinan-2-yl]-ß-D-arabinofuranosyl]-2(1H)-pyrimidinone', 'Alginate oligosaccharide (G-block) fragment', 'Human coagulation factor X', 'L-threo-3,4-dihydroxyphenylserine', 'Pyridoxalated haemoglobin polyoxyethylene', '1-{3-[3-(4-chlorophenyl)propoxy]propyl}piperidine, hydrochloride', 'Recombinant fusion protein consisting of human coagulation factor IX attached to the Fc domain of human IgG1', 'R-1-[2,3-dihydro-2-oxo-1-pivaloylmethyl-5-(2-pyridyl)-1 H -1,4-benzodiazepin-3-yl]-3-(3-methylaminophenyl)urea', 'Autologous CD34+ cells transfected with lentiviral vector containing the human arylsulfatase A cDNA', 'Antisense Oligonucleotide (TATCCGGAG[...]It's not obvious how to parse most of the INNs, except for deleting additional information such as "(oral use)", "(rectal use)", etc. 1. Identical match based on "raw" INNsis_identified = [] matched_all_drug_inns = [] matched_od_inns = [] all_drug_brand = [] od_inns = data_od['Product'].str.upper() all_drugs_inns = matched_drugs['INN'].str.upper().unique() for od_inn in od_inns: is_matched = False for all_drug_inn in all_drugs_inns: # Check whether ec brand is subset of ema brand if od_inn in all_drug_inn: is_identified.append('yes') matched_all_drug_inns.append(all_drug_inn) matched_od_inns.append(od_inn) # Mask for brand mask = matched_drugs['INN'].str.upper() == all_drug_inn brand = matched_drugs[mask]['Brand name'].iloc[0] all_drug_brand.append(brand) is_matched = True if is_matched is True: continue # If not identified is_identified.append('no') matched_all_drug_inns.append('none') matched_od_inns.append(od_inn) all_drug_brand.append('none') matched_ods = pd.DataFrame({ 'OD INN': matched_od_inns, 'All drug INN': matched_all_drug_inns, 'is identified': is_identified, 'All drug brand name': all_drug_brand}) matched_ods mask = matched_ods['is identified'] == 'yes' matched_ods[mask]There are some ODs that are falsely matched to multiple drugs, e.g. CHOLIC ACID is matched OBETICHOLIC ACID. Those will have to be filtered out manually. 1.1 Filter multiple matchesdouble_matches = [] mask = matched_ods['is identified'] == 'yes' inns = matched_ods[mask]['OD INN'].unique() for inn in inns: mask = matched_ods['OD INN'] == inn temp = matched_ods[mask] if len(temp) > 1: double_matches.append(temp['OD INN'].iloc[0]) len(double_matches)How many of those 55 double matches are falsely matched multiple times? Keep only those double matches that are matched to different All drug INNs.double_matches = pd.DataFrame(columns=['OD INN', 'All drug INN', 'is identified', 'All drug brand name']) mask = matched_ods['is identified'] == 'yes' inns = matched_ods[mask]['OD INN'].unique() for inn in inns: mask = matched_ods['OD INN'] == inn temp = matched_ods[mask] if len(temp) > 1: unique_inns = temp['All drug INN'].unique() if len(unique_inns) > 1: double_matches = double_matches.append(temp) double_matches double_matches.iloc[:50] double_matches.iloc[50:] fig = go.Figure() fig.add_trace(go.Pie( labels=['Recycled INNs', 'Not recycled'], values=[219, 1629 - 219], sort=False, pull=[0, 0.2], marker=dict( colors=colours[:2], line=dict(color='#000000', width=1)))) fig.show()Make the matching more strictis_identified = [] inn = [] ec_brand = [] matched_ema_brand = [] brands = data_all_drugs['Brand name'].unique() ema_brands = data_ema['Medicine name'].unique() for brand in brands: is_matched = False for ema_brand in ema_brands: # Check whether ec brand is subset of ema brand if brand == ema_brand: is_identified.append('yes') matched_ema_brand.append(ema_brand) ec_brand.append(brand) # Mask for product mask = data_ema['Medicine name'] == ema_brand product = data_ema[mask]['Active substance'].iloc[0] inn.append(product) is_matched = True if is_matched is True: continue # If not identified is_identified.append('no') matched_ema_brand.append('none') inn.append('none') ec_brand.append(brand) matched_drugs = pd.DataFrame({ 'Brand name': ec_brand, 'EMA brand name': matched_ema_brand, 'is identified': is_identified, 'INN': inn}) matched_drugs len(matched_drugs[matched_drugs['is identified'] == 'yes']) is_identified = [] matched_all_drug_inns = [] matched_od_inns = [] all_drug_brand = [] od_inns = data_od['Product'].str.upper() all_drugs_inns = matched_drugs['INN'].str.upper().unique() for od_inn in od_inns: is_matched = False for all_drug_inn in all_drugs_inns: # Check whether ec brand is subset of ema brand if od_inn == all_drug_inn: is_identified.append('yes') matched_all_drug_inns.append(all_drug_inn) matched_od_inns.append(od_inn) # Mask for brand mask = matched_drugs['INN'].str.upper() == all_drug_inn brand = matched_drugs[mask]['Brand name'].iloc[0] all_drug_brand.append(brand) is_matched = True if is_matched is True: continue # If not identified is_identified.append('no') matched_all_drug_inns.append('none') matched_od_inns.append(od_inn) all_drug_brand.append('none') matched_ods = pd.DataFrame({ 'OD INN': matched_od_inns, 'All drug INN': matched_all_drug_inns, 'is identified': is_identified, 'All drug brand name': all_drug_brand}) matched_ods len(matched_ods[matched_ods['is identified'] == 'yes']) fig = go.Figure() fig.add_trace(go.Pie( labels=['Recycled INNs', 'Not recycled'], values=[175, 1629 - 175], sort=False, pull=[0, 0.2], marker=dict( colors=colours[:2], line=dict(color='#000000', width=1)))) fig.show()3. TransformsData does not always come in its final processed form that is required fortraining machine learning algorithms. We use **transforms** to perform somemanipulation of the data and make it suitable for training.All TorchVision datasets have two parameters -``transform`` to modify the features and``target_transform`` to modify the labels - that accept callables containing the transformation logic.The [torchvision.transforms](https://pytorch.org/vision/stable/transforms.html) module offersseveral commonly-used transforms out of the box.The FashionMNIST features are in PIL Image format, and the labels are integers.For training, we need the features as normalized tensors, and the labels as one-hot encoded tensors.To make these transformations, we use ``ToTensor`` and ``Lambda``.import torch from torchvision import datasets from torchvision.transforms import ToTensor, Lambda ds = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor(), target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) )/Users/didi/opt/anaconda3/envs/py3/lib/python3.8/site-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:180.) return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)ToTensor()[ToTensor](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.ToTensor)converts a PIL image or NumPy ``ndarray`` into a ``FloatTensor``. and scalesthe image's pixel intensity values in the range $[0., 1.]$ Lambda TransformsLambda transforms apply any user-defined lambda function. Here, we define a functionto turn the integer into a one-hot encoded tensor.It first creates a zero tensor of size 10 (the number of labels in our dataset) and calls[scatter](https://pytorch.org/docs/stable/generated/torch.Tensor.scatter_.html) which assigns a``value=1`` on the index as given by the label ``y``.target_transform = Lambda(lambda y: torch.zeros( 10, dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))Mostly based on https://github.com/aksub99/molecular-vae/blob/master/Molecular_VAE.ipynb Additions:- Property prediction segment and auxiliary loss- Different data prep (in load_data.ipynb) that more closely follows the original code https://github.com/aspuru-guzik-group/chemical_vae/- Sigmoid annealing schedule- Slower training it seems (TM)- Validation set and lossTODO:- better data loading (canonical only, less storage space)- teacher forcing gruimport numpy as np import torch from torch import nn, optim from torch.nn import functional as F import pandas as pd # imports the torch_xla package import os TPU = 'COLAB_TPU_ADDR' in os.environ if TPU: !pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.7-cp36-cp36m-linux_x86_64.whl import torch_xla import torch_xla.core.xla_model as xm torch.manual_seed(42) !git clone https://github.com/loodvn/pytorch-chemicalvae.git !mv pytorch-chemicalvae/data data # !ls data X = np.load('data/train_compressed.npz')['arr_0'] Y = np.load('data/Y_reg.npy') # X = np.load('data/X_100.npy') # Y = np.load('data/Y_reg100.npy') # Put in load_data from torch.utils.data import DataLoader, TensorDataset, DataLoader TMP_TRAIN_SIZE = -1 BATCH_SIZE = 256 if TMP_TRAIN_SIZE < 0: TMP_TRAIN_SIZE = Y.shape[0] # 75/25 split valid_idx = int(TMP_TRAIN_SIZE*0.75) x_train, y_train, x_valid, y_valid = map(torch.tensor, (X[:valid_idx], Y[:valid_idx], X[valid_idx:], Y[valid_idx:])) del(X) # Takes up too much RAM train_ds = TensorDataset(x_train, y_train) valid_ds = TensorDataset(x_valid, y_valid) train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(valid_ds, batch_size=2*BATCH_SIZE) class ChemVAE(torch.nn.Module): def __init__(self): super(ChemVAE, self).__init__() self.latent_dims = 196 # p7, VAEs self.num_char = 35 # including +1 for padding # From Methods/Autoencoder architecture section (p13) self.enc_cnn1 = nn.Conv1d(in_channels=120, out_channels=9, kernel_size=9) # 9,9 self.enc_cnn2 = nn.Conv1d(in_channels=9, out_channels=9, kernel_size=9) # 9,9 self.enc_cnn3 = nn.Conv1d(in_channels=9, out_channels=11, kernel_size=10) # 10, 11 (filter size, convolutional kernels) self.enc_fc_mu = nn.Linear(11*10, self.latent_dims) # 11 (out_channels * whatever's left?) self.enc_fc_var = nn.Linear(11*10, self.latent_dims) # 11 self.dec_gru = nn.GRU(input_size=self.latent_dims, hidden_size=488, num_layers=3, batch_first=True) # TODO input_size is latent space? # self.dec_gru_last = nn.GRU(input_size = self.latent_dims, hidden_size=488, ) # output GRU layer had one additional input, corresponding to the character sampled from the softmax output self.dec_fc = nn.Linear(488, self.num_char) self.property_1 = nn.Linear(self.latent_dims, 1000) self.property_2 = nn.Linear(1000, 3) self.property_dropout = nn.Dropout(p=0.2) # TODO activation functions? Assuming tanh not relu? Also, difference between F.relu and nn.ReLU? self.act = F.relu def encode(self, x): # print("initial size:", x.shape) x = self.act(self.enc_cnn1(x)) # print("initial size:", x.shape) x = self.act(self.enc_cnn2(x)) x = self.act(self.enc_cnn3(x)) # print("size after enc_cnns:", x.shape) x = x.view(x.size(0), -1) # Flatten, Keep batch size mu = self.enc_fc_mu(x) var = self.enc_fc_var(x) return mu, var def decode(self, z): # print("size before reshape", z.size) z = z.view(z.size(0), 1, z.size(-1)) # Expand_dims (1, latent_dim) -> (1, 1, latent_dim) # print("size mid-reshape", z.size) z = z.repeat(1, 120, 1) # Repeat latent*120: (1, 1, latent_dim) -> (1, 120, latent_dim) # print("size after reshape", z.size) output, hn = self.dec_gru(z) softmax = self.dec_fc(output) softmax = F.softmax(softmax, dim=1) # print("softmax shape:", softmax.size()) return softmax # Copied from PyTorch VAE example def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def prediction(self, z): # two fully connected layers of 1000 neurons, dropout rate of 0.2 fc1 = self.act(self.property_dropout(self.property_1(z))) # print("prop1 shape: ", fc1.shape) pred = self.act(self.property_dropout(self.property_2(fc1))) # print("prop 2 shape", pred.shape) # output: batch size * 3 return pred def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar, zTraining- variational loss (KL divergence) annealed according to sigmoid schedule after 29 epochs, running for a total 120 epochs.- output GRU layer had one additional input, corresponding to the character sampled from the softmax output, trained using teacher forcingGetting output samples from softmax (depending on temperature):https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.htmlpreparing-for-trainingPytorch training loop over batches:loss.backward()opt.step()opt.zero_grad()Which reconstruction loss?CE loss?def one_hot_array(i, n): return map(int, [ix == i for ix in xrange(n)]) def one_hot_index(vec, charset): return map(charset.index, vec) def from_one_hot_array(vec): oh = np.where(vec == 1) if oh[0].shape == (0, ): return None return int(oh[0][0]) def decode_smiles_from_indexes(vec, charset): return "".join(map(lambda x: charset[x], vec)).strip() charset = ['n', '[', 'o', 'I', '3', 'H', '+', 'S', '@', '8', '4', '1', 's', 'N', 'F', 'P', '/', '=', 'O', 'B', 'C', '\\', '(', '-', ']', '6', ')', 'r', '5', '7', '2', '#', 'l', 'c', ' '] def sigmoid_schedule(time_step, slope=1., start=22): return float(1 / (1. + np.exp(slope * (start - float(time_step))))) sigmoid_schedule()Baseline: Mean prediction# logP = np.mean(np.abs(Y[:,0].mean()-Y[:,0])) print("logP baseline: ", logP) QED = np.mean(np.abs(Y[:,1].mean()-Y[:,1])) print("QED baseline: ", QED) (np.abs(Y.mean(axis=0)-Y)).mean(axis=0) # logP, QED, SASTrain# From other pytorch implementation def vae_loss(x_decoded_mean, x, z_mean, z_logvar): xent_loss = F.binary_cross_entropy(x_decoded_mean, x, reduction='sum') kl_loss = -0.5 * torch.sum(1 + z_logvar - z_mean.pow(2) - z_logvar.exp()) return xent_loss + kl_loss def xent_loss(x_decoded_mean, x): return F.binary_cross_entropy(x_decoded_mean, x, reduction='sum') def kl_loss(z_mean, z_logvar): return -0.5 * torch.sum(1 + z_logvar - z_mean.pow(2) - z_logvar.exp()) # prediction loss: mse def pred_loss(y_pred, y_true): return torch.mean((y_pred - y_true).pow(2)).to(device) def mae(y_pred, y_true): return torch.mean(torch.abs(y_pred - y_true)) print("Starting training") import time start = time.time() device = 'cuda' if torch.cuda.is_available() else 'cpu' # device = xm.xla_device() epochs = 20 #120 model = ChemVAE().to(device) optimizer = optim.Adam(model.parameters()) SIGMOID_ANNEALING = True # From other pytorch implementation TODO reference properly # TODO save checkpoints every 1 hours def train(epoch): model.train() train_loss = 0 for batch_idx, data in enumerate(train_loader): y_true = data[1].to(device) data = data[0].to(device) optimizer.zero_grad() output, mean, logvar, z = model(data) pred = model.prediction(z) # print("pred:", pred.shape, "y: ", y_true.shape) if batch_idx==0: inp = data.cpu().numpy() outp = output.cpu().detach().numpy() lab = data.cpu().numpy() print("Input:") print(decode_smiles_from_indexes(map(from_one_hot_array, inp[0]), charset)) print("Label:") print(decode_smiles_from_indexes(map(from_one_hot_array, lab[0]), charset)) sampled = outp[0].reshape(1, 120, len(charset)).argmax(axis=2)[0] print("Output:") print(decode_smiles_from_indexes(sampled, charset)) # print("pred loss: ", pred_loss(pred, y_true), "shape: ", pred_loss(pred, y_true).shape) sched = torch.tensor(sigmoid_schedule(epoch)).to(device) if SIGMOID_ANNEALING else 1 loss = sched*kl_loss(mean, logvar) + xent_loss(output, data) + sched*pred_loss(pred, y_true) # import pdb; pdb.set_trace() loss.backward() train_loss += loss optimizer.step() if TPU: xm.mark_step() if batch_idx % 100 == 0: print(f'epoch {epoch} / batch {batch_idx}\tFull loss: {loss/BATCH_SIZE:.4f}') # TODO print all of the loss components seperately pred_mae = mae(pred, y_true) print(f'epoch {epoch} / batch {batch_idx}\tPred mae loss: {pred_mae/BATCH_SIZE:.4f}') # print(f'epoch {epoch}: train loss:', (train_loss / len(train_loader.dataset))) return train_loss / len(train_loader.dataset) def eval_model(): model.eval() with torch.no_grad(): eval_loss = 0 eval_pred_loss = 0 logP_loss = 0 QED_loss = 0 for batch_idx, data in enumerate(valid_loader): y_true = data[1].to(device) data = data[0].to(device) output, mean, logvar, z = model(data) pred = model.prediction(z) sched = torch.tensor(sigmoid_schedule(epoch)).to(device) loss = sched*kl_loss(mean, logvar) + xent_loss(output, data) + sched*pred_loss(pred, y_true) eval_loss += loss eval_pred_loss += pred_loss(pred, y_true) logP_loss += torch.sum(torch.abs(pred[:,0] - y_true[:,0])) # MAE loss to reproduce Table 2 QED_loss += torch.sum(torch.abs(pred[:,1] - y_true[:,1])) # MAE loss to reproduce Table 2 return eval_loss / len(valid_loader.dataset), eval_pred_loss / len(valid_loader.dataset), torch.sum(logP_loss) / len(valid_loader.dataset), torch.sum(QED_loss / len(valid_loader.dataset)) val_losses = [] val_pred_losses = [] logP_losses = [] qed_losses = [] for epoch in range(1, epochs + 1): e_start = time.time() train_loss = train(epoch) print(f"{epoch} Training loss: {train_loss}") e_end = time.time() print(f"Time per epoch ({epoch}): {e_end-e_start:.3f}s") print("Evaluating...") val_loss, eval_pred_loss, logP_loss, qed_loss = eval_model() print(f"Evaluation loss (training): \n{val_loss}, \n{eval_pred_loss}, \n{logP_loss}, \n{qed_loss}") val_losses.append(val_loss.item()) val_pred_losses.append(eval_pred_loss.item()) logP_losses.append(logP_loss.item()) qed_losses.append(qed_loss.item()) print(f"Elapsed time: {e_end-start:.3f}s") end = time.time() print(f"Total time taken: {int(end-start)//60}m{(end-start)%60:.3f}s")Starting training Input: 44-on6nF(FFFF-r4PP+\c4-4-on6n4-4646o4-46Oo41#444-on6O1c6F( Label: 44-on6nF(FFFF-r4PP+\c4-4-on6n4-4646o4-46Oo41#444-on6O1c6F( Output: BB====ssssOOOOOrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr epoch 1 / batch 0 Full loss: 608.6711 epoch 1 / batch 0 Pred mae loss: 0.0078 epoch 1 / batch 100 Full loss: 534.9409 epoch 1 / batch 100 Pred mae loss: 0.0066 epoch 1 / batch 200 Full loss: 530.1423 epoch 1 / batch 200 Pred mae loss: 0.0047 epoch 1 / batch 300 Full loss: 527.1787 epoch 1 / batch 300 Pred mae loss: 0.0049 epoch 1 / batch 400 Full loss: 529.0965 epoch 1 / batch 400 Pred mae loss: 0.0050 epoch 1 / batch 500 Full loss: 527.3892 epoch 1 / batch 500 Pred mae loss: 0.0049 epoch 1 / batch 600 Full loss: 526.2923 epoch 1 / batch 600 Pred mae loss: 0.0048 epoch 1 / batch 700 Full loss: 526.9380 epoch 1 / batch 700 Pred mae loss: 0.0052 1 Training loss: 531.5980829516166 Time per epoch (1): 224.476s Evaluat[...]Plot lossesdf = pd.DataFrame({"val": val_losses, "val_pred": val_pred_losses, "logP": logP_losses, "qed": qed_losses}) df_total = df['val'] df_pred = df.drop(columns=["val"]) df_pred.plot()tmpTPU Error: /usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables) 130 retain_graph: Optional[bool] = None, 131 create_graph: bool = False,--> 132 only_inputs: bool = True, 133 allow_unused: bool = False 134 ) -> Tuple[torch.Tensor, ...]:RuntimeError: vector::_M_range_check: __n (which is 1) >= this->size() (which is 1)Loss after ~10 mins:Evaluation loss (training): (tensor(513.6445, device='cuda:0', dtype=torch.float64), tensor(0.0252, device='cuda:0', dtype=torch.float64), tensor([0.0208, 0.0050, 0.0183], device='cuda:0', dtype=torch.float64), tensor([0.0208, 0.0050, 0.0183], device='cuda:0', dtype=torch.float64 Starting trainingInput:4@(FF-BFcFFFF-4rO+]\144r4PP\-n6-4-I6-I6I6416Fc6F@(Label:4@(FF-BFcFFFF-4rO+]\144r4PP\-n6-4-I6-I6I6416Fc6F@(Output:ssssssssssn111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111epoch 1 / batch 0 Full loss: 155817.0156epoch 1 / batch 0 Pred loss: 1.9640epoch 1 / batch 100 Full loss: 135395.6875epoch 1 / batch 100 Pred loss: 1.2291epoch 1 / batch 200 Full loss: 135623.5313epoch 1 / batch 200 Pred loss: 1.1131epoch 1 / batch 300 Full loss: 135002.1563epoch 1 / batch 300 Pred loss: 1.1840epoch 1 / batch 400 Full loss: 135334.3438epoch 1 / batch 400 Pred loss: 1.2385epoch 1 / batch 500 Full loss: 134454.6094epoch 1 / batch 500 Pred loss: 1.2087epoch 1 / batch 600 Full loss: 134295.0781epoch 1 / batch 600 Pred loss: 1.2694epoch 1 / batch 700 Full loss: 134368.5938epoch 1 / batch 700 Pred loss: 1.2350Time per epoch (1): 208.468sEvaluating...Evaluation loss (training): 523.524375040089, 0.004267985728560455, 1.5497679990182962, 0.3987843898209971Elapsed time: 218.533sInput:44O(44rO+]\c444Or4P+\c4(onLabel:44O(44rO+]\c444Or4P+\c4(onOutput:4=(1PFBBNNNNN222221666H(((lepoch 2 / batch 0 Full loss: 134005.5000epoch 2 / batch 0 Pred loss: 1.2578epoch 2 / batch 100 Full loss: 133798.4688epoch 2 / batch 100 Pred loss: 1.2414epoch 2 / batch 200 Full loss: 134115.9375epoch 2 / batch 200 Pred loss: 1.3186epoch 2 / batch 300 Full loss: 134341.1875epoch 2 / batch 300 Pred loss: 1.2813epoch 2 / batch 400 Full loss: 133899.8438epoch 2 / batch 400 Pred loss: 1.2360epoch 2 / batch 500 Full loss: 133523.3281epoch 2 / batch 500 Pred loss: 1.2713epoch 2 / batch 600 Full loss: 134387.4063epoch 2 / batch 600 Pred loss: 1.2330epoch 2 / batch 700 Full loss: 133031.4063epoch 2 / batch 700 Pred loss: 1.2627Time per epoch (2): 223.383sEvaluating...Evaluation loss (training): 518.1136467954343, 0.003951255076182853, 1.5243500593229338, 0.36010640248726383Elapsed time: 462.739sInput:44r4PP+\-4F(FFFFF(6O4-on6O(444r4P+\-4-on6Oc44n44c64(Label:44r4PP+\-4F(FFFFF(6O4-on6O(444r4P+\-4-on6Oc44n44c64(Output:no-]]\((@FFFFFFFFNNNNNO332222222222222221111cc6H(((epoch 3 / batch 0 Full loss: 132805.6719epoch 3 / batch 0 Pred loss: 1.2380epoch 3 / batch 100 Full loss: 133457.9688epoch 3 / batch 100 Pred loss: 1.1281epoch 3 / batch 200 Full loss: 132526.6563epoch 3 / batch 200 Pred loss: 1.0878epoch 3 / batch 300 Full loss: 131957.4688epoch 3 / batch 300 Pred loss: 1.0116epoch 3 / batch 400 Full loss: 132188.6094epoch 3 / batch 400 Pred loss: 1.0222epoch 3 / batch 500 Full loss: 131492.4531epoch 3 / batch 500 Pred loss: 0.9942epoch 3 / batch 600 Full loss: 131535.2813epoch 3 / batch 600 Pred loss: 0.9581epoch 3 / batch 700 Full loss: 130860.2813epoch 3 / batch 700 Pred loss: 1.0071Time per epoch (3): 225.783sEvaluating...Evaluation loss (training): 511.09088476526335, 0.002368703126348485, 1.230272634271861, 0.21858335207106477Elapsed time: 710.049sInput:44-46-46@(FF-4-on6O44c-4-O6on644n44c6F@(Label:44-46-46@(FF-4-on6O44c-4-O6on644n44c6F@(Output:4=--6nnF(FFFrrrPrr]]++\\\\\\111111cccH((lepoch 4 / batch 0 Full loss: 130977.4609epoch 4 / batch 0 Pred loss: 1.0126 Manually push data through networkexample_input = x_train[0] x = example_input x = x.view(1, x.size(0), -1).to(device) print(x.size()) mu, logvar = model.encode(x) print(mu.shape, logvar.shape) z = model.reparameterize(mu, logvar) z.shape output = model.decode(z) print("decoded shape: ", output.shape) out, m, l, z = model.forward(x) vae_loss(out, x, m, l) model.prediction(z).shape # TODO should we still have batch here?Compile csv data into one dataframeos.chdir("data") filepath = "data"Get an idea about the number of watersheds in Ontariowatersheds = pd.read_csv("../ON_hourly_hydrometric_June_July_todate.csv", encoding = "latin") %%time # Rename columns by stripping whitespace ws_cols = [] for colnum in range(len(watersheds.columns)): ws_cols.append(watersheds.columns[colnum].lstrip()) watersheds.rename(columns=dict(zip(watersheds.columns,ws_cols)), \ inplace = True) %%time # Number of watersheds in Ontario num_watersheds = watersheds.groupby(by = "ID").size().reset_index(name = "count").shape print("Number of watersheds in Ontario =", num_watersheds)Number of watersheds in Ontario = (522, 2) Wall time: 39.6 msSample mean daily flow filehydro_df = pd.read_csv("hydro_daily_mean_39.csv", encoding = "latin")Number of stations - might not usehydro_df.groupby(by = "STATION_NUMBER").ngroupsEstimate the average number of records per stationhydro_df.groupby(by = "STATION_NUMBER").size().reset_index(name = "count").mean() hydro_df.shapeBuild out code using sample file hydro_daily_mean_39. Get peak (maximum) values for same dates To introduce a new flood exposure index, annual maximum peak flows were extracted from data for the gauging stations. Use this timeseries data to calculate the magnitude of the 100-year flood by using frequency analysis. Analyze 100 years of flood data Load 100 year sflood data%%time hydro_flood_100yr = pd.read_csv("../hydrometric-annual-peaks_100yr_ON.csv") hydro_flood_100yr.columnsAdd new date related columns%%time hydro_flood_100yr["datetime"] = pd.to_datetime(hydro_flood_100yr["DATE"]) hydro_flood_100yr["date"] = hydro_flood_100yr["datetime"].dt.date hydro_flood_100yr["year"] = hydro_flood_100yr["datetime"].dt.yearWall time: 48.4 msCleanup IDENTIFIER column for consistencyhydro_flood_100yr["IDENTIFIER"] = hydro_flood_100yr["IDENTIFIER"].str.split(".") hydro_flood_100yr["IDENTIFIER"].head() identifier = [] for id in range(len(hydro_flood_100yr["IDENTIFIER"])): identifier.append(hydro_flood_100yr["IDENTIFIER"][id][0]) hydro_flood_100yr["IDENTIFIER"] = pd.Series(identifier) ### Years in which the minimum and maximum flow occurs - per station %%time hydro_flood_100yr_minmax_yr = hydro_flood_100yr[(hydro_flood_100yr["PEAK_CODE_EN"] == "Maximum") & \ (hydro_flood_100yr["DATA_TYPE_EN"] == "Flow")].groupby(["x", "y", "STATION_NAME"], \ as_index = False)["year"].agg(["count", "min", \ "max"]).sort_values("count", \ ascending = False).apply(lambda x: \ (x).astype("int")).reset_index() hydro_flood_100yr_minmax_yr.head() hydro_flood_100yr_minmax_yr.rename(columns = {"min": "year_minflow", "max": "year_maxflow"}, inplace = True)Frequency analysis of data via Fast Fourier Transform (FFT) Use 100 years of data. Ideally the past 20 years should be used given the guidelines. 100 years gives more data and is therefore easier for proof of concept (POC).hydro_flood_100yr.head()Fast Fourier Transform (FFT) to extract magnitude of peakflowdef fft_station_peakflow2(peaks_df, nmax): get_df = peaks_df peaks = np.array(peaks_df["PEAK"]) results = np.fft.fft(peaks, nmax, norm = "ortho") results = np.ndarray.max(np.abs(results)) get_df["mag_100yr_peakflow"] = results return get_dfMagnitude of peakflow over 100 yearsn = hydro_flood_100yr_minmax_yr["year_maxflow"] - hydro_flood_100yr_minmax_yr["year_minflow"] + 1 nmax = max(n) peakflow_100yr_df = hydro_flood_100yr.groupby("STATION_NAME")["STATION_NAME", \ "PEAK", "year"].apply(lambda x: fft_station_peakflow2(x, nmax)) hydro_flood_100yr_minmax_yr["100yr_mag_peakflow"] = peakflow_100yr_df["mag_100yr_peakflow"] peakflow_100yr_df.columns mag_100yr_peakflow2 = peakflow_100yr_df[["STATION_NAME", "mag_100yr_peakflow"]].groupby("STATION_NAME")["mag_100yr_peakflow"].agg("first").reset_index()Remove any magnitude peakflow null valuesmag_100yr_peakflow2 = mag_100yr_peakflow2 = mag_100yr_peakflow2[mag_100yr_peakflow2["mag_100yr_peakflow"].isnull() == False] hydro_flood_100yr = hydro_flood_100yr[hydro_flood_100yr["PEAK"].isnull() == False]Add id column for ease of plottingmag_100yr_peakflow2.sort_values("mag_100yr_peakflow", ascending = True, inplace = True) mag_100yr_peakflow2["id_value"] = np.arange(len(mag_100yr_peakflow2)) + 1 mag_100yr_peakflow2.head() #output_notebook() output_notebook() session_data = ColumnDataSource(data = mag_100yr_peakflow2) id_value = session_data.data['id_value'].tolist() plot = figure(plot_width=750, plot_height = 400, \ min_border = 0) plot.line(x = "id_value", y = "mag_100yr_peakflow", source = session_data, \ line_width = 2, line_color = "blue") plot.title.text = 'Fast Fourier Transform magnitude for 100 years of magnitude peakflow' plot.xaxis.axis_label = 'ID' plot.yaxis.axis_label = 'Peakflow' plot.xaxis.major_label_orientation = 90 hover = HoverTool(tooltips = [('ID', '@id_value'), ('Magnitude 100 year Peak Flow', '@mag_100yr_peakflow')]) plot.add_tools(hover) show(plot) (mag_100yr_peakflow2[mag_100yr_peakflow2["mag_100yr_peakflow"] > outlier].count()/mag_100yr_peakflow2["mag_100yr_peakflow"].count().astype(float)).id_value53% of the peak flow magnitudes are outliers, hence the graph appearing to have lots of values close to 0. Too many values to remove from analysis (over half of the values). Solution:- - Keep outliers - Transofrm y axis by taking log10 Transform the magnitude by log10mag_100yr_peakflow2["log_mag_100yr_peakflow"] = np.log10(mag_100yr_peakflow2["mag_100yr_peakflow"]) output_notebook() session_data = ColumnDataSource(data = mag_100yr_peakflow2) id_value = session_data.data['id_value'].tolist() plot = figure(plot_width=750, plot_height = 400, \ min_border = 0)#, Tooltips = tooltips) plot.line(x = "id_value", y = "log_mag_100yr_peakflow", source = session_data, \ line_width = 2, line_color = "blue") plot.title.text = 'Fast Fourier Transform magnitude for 100 years of magnitude peakflow' plot.xaxis.axis_label = 'ID' plot.yaxis.axis_label = 'Peakflow' plot.xaxis.major_label_orientation = 90 hover = HoverTool(tooltips = [('ID', '@id_value'), ('Magnitude 100 year Peak Flow', '@mag_100yr_peakflow')]) plot.add_tools(hover) show(plot) mag_100yr_peakflow2.shapeCreate actual flood index Load annual mean peakflowsDaily mean peakflow would be better than annual mean peakflows, however this is not available for historical hydrometric data. %%time hydro_annual_peakflow = pd.read_csv("../hydrometric_daily_peaks_ON_47yr.csv") hydro_annual_peakflow.columns ### Select the most important columns hydro_annual_peakflow_cols = hydro_annual_peakflow[['x', 'y', #'IDENTIFIER', \ 'STATION_NAME', \ 'STATION_NUMBER', \ 'PROV_TERR_STATE_LOC', 'DATA_TYPE_EN', 'MAX_DATE', 'MAX_SYMBOL_EN', 'MAX_VALUE']]Rename columnshydro_annual_peakflow_cols = hydro_annual_peakflow_cols.rename(columns = {"MAX_DATE": "DATE_annual_peakflow", \ "MAX_VALUE": "annual_avg_peakflow", \ "DATA_TYPE_EN": "flow_or_level"}) # Extract datetime and year hydro_annual_peakflow_cols["annual_peakflow_datetime"] = pd.to_datetime(hydro_annual_peakflow_cols["DATE_annual_peakflow"]) hydro_annual_peakflow_cols["flow_year"] = hydro_annual_peakflow_cols["annual_peakflow_datetime"].dt.year.astype(int)Join magnitude 100yr peakflow data and annual peak flow. Remove last row (blank row)%%time hydro_index_data = mag_100yr_peakflow2.merge(hydro_annual_peakflow_cols, \ how = "inner", \ on = "STATION_NAME") hydro_index_data.columns flood_index = (hydro_index_data["mag_100yr_peakflow"]/(hydro_index_data["annual_avg_peakflow"].astype(float))) flood_index.replace([np.inf, -np.inf], np.nan, inplace = True) flood_index_log = np.log10(flood_index) #### Remove any null values from the index num_nulls = np.count_nonzero(pd.Series.isnull(flood_index_log).values == True) print("Number of null values =", num_nulls) flood_index_log = flood_index_log[~np.isnan(flood_index_log)].sort_values(ascending = True) round(flood_index_log.quantile([0, 0.25, 0.5, 0.75, 1.0]), 3) flood_index_dict = {"flood_index": flood_index_log, "id_value": np.arange(len(flood_index_log)) + 1} flood_index_log_df = pd.DataFrame(flood_index_dict) #output_notebook() output_notebook() session_data = ColumnDataSource(data = flood_index_log_df) id_value = session_data.data['id_value'].tolist() plot = figure(plot_width=750, plot_height = 400, \ min_border = 0)#, Tooltips = tooltips) plot.line(x = "id_value", y = "flood_index", source = session_data, \ line_width = 2, line_color = "blue") plot.title.text = 'Flood index varies betwwen -2.9 and 3.7. Values roughly between 0 and 1 can be considered medium alerts' plot.xaxis.axis_label = 'ID' plot.yaxis.axis_label = 'Flood Index' plot.xaxis.major_label_orientation = 90 hover = HoverTool(tooltips = [('ID', '@id_value'), ('Flood Index', '@flood_index')]) plot.add_tools(hover) show(plot) A good approximation for index can be any values between 0 and 1 are medium alerts. Less than 0 for low alerts, and > 1 high #output_file("alert_table.html") alert_data = dict( \ alert = ["Low", "Medium", "High"], \ index = ["-2.9 to 0", "0 to 1.06", "1.06 to 3.72"], \ approx_index = ["< 0", "0 to 1", "> 1"] ) source_data = ColumnDataSource(alert_data) alert_columns = [TableColumn(field = "alert", title = "Alert Category", width = 100), \ TableColumn(field = "index", title = "Actual Flood Index", width = 100), \ TableColumn(field = "approx_index", title = "Modified Flood Index", width = 100)] alert_table = DataTable(source = source_data, \ columns = alert_columns, \ fit_columns = True, selectable = True, sortable = True, \ width = 400, height = 400) show(widgetbox(alert_table, height = 100)) Modified flood index should generalise well to othe hydrometric data.Future workModify the following, creating the index from 1 and 2 : 1. Use 20 year historical time series data instead of 100 year 2. Daily mean peakflow or daily peakflow - instead of annual mean peakflow 3. Get 48 hour forecasts based on equations - See MeteoHAck2019.pptx and MeteoHack2019 - create index.pptx. - Daily mean peakflow and/or 5 minute real time data can be used to build this model. - Techniques to experiment with ARIMA, RNN etc. 4. Create a seperate model for predicting flood alerts. Note both models will need to take water levels into consideration something that was not done here. ############################################################################################Recommendation SystemStudent Name: (dachengw) IntroductionThis tutorial will introduce a approach to build a simple recommendation system.Accroding to the definition from Wikipedia, recommendation system is a subclass of information filtering system that seek to predict the "rating" or "preference" that a user would give to an item.A daily example is the Amazon's recommendation engine:[](http://netdna.webdesignerdepot.com/uploads/amazon//recommended.jpg)Theoretically, Amazon analyzes users' information (purchase history, browse history and more) to recommend what the users may want to buy. Tutorial contentIn this tutorial, we will build a simple offline recommendation system to recommend movies. This recommendaiton system is not a practical or sophisticated one for commerical use, but working through this tutorial can give a sense about how a recommendation system works.We will cover the following topics in this tutorial:- [Expectation](Expectation)- [Downloading and loading data](Downloading-and-loading-data)- [Item-based collaborative filtering](Item-based-collaborative-filtering)- [Recommendation for new users](Recommendation-for-new-users)- [Summary](Summary) Expectation The recommendation system we will build can:1. Take the existing rating data as input.2. Recommend at most k (k = 5 for this tutorial) movies which haven't rated by the user for each user.k = 5Downloading and loading data We are going to use the open dataset provided by MovieLens (https://movielens.org/).The dataset can be downloaded from http://grouplens.org/datasets/movielens/. For this tutorial, we will use the u.data file from smallest dataset (100K records)According to the ReadMe (http://files.grouplens.org/datasets/movielens/ml-100k/README). This files contains ratings by 943 users on 1682 items. Each user has rated at least 20 movies. Users and items are numbered consecutively from 1. The data is randomly ordered. This is a tab separated list of: user id | item id | rating | timestamp. Note: 1. An item means an movie, so the item id is the movie id. We consider item and movie interchangable for this tutorial.2. For the simple recommendaiton system we are going to build, we only use the first three fields, user id, item id and rating. That is to say, we ignore the timestamp. Timestamp is indeed a valuable information, but we ignore it in this tutorial for simplicity.3. The range of rating is 1-5, and 5 means the best.Althought not necessry, it would be nice to be able to get the movie title by its id. Therefore we need to download the u.item file. The first two fields of every record in this file are movie id | movie title | ...Let's download these files:import requests def download_file(link_address, filename): response = requests.get(link_address, stream=True) if (response.status_code == requests.codes.ok) : with open(filename, 'wb') as handle: for block in response.iter_content(1024): handle.write(block) print "Successfully downloaded " + filename return True else: print "Sorry, " + filename + " download failed" return False # download user - movie ratings download_file('http://files.grouplens.org/datasets/movielens/ml-100k/u.data', 'u.data') # download movie id - movie map download_file('http://files.grouplens.org/datasets/movielens/ml-100k/u.item', 'u.item')Successfully downloaded u.data Successfully downloaded u.itemThen read the files to memory:# read u.data user_rating_raw = [] with open('u.data') as f: for line in f: fields = line.split('\t') user_rating_raw.append([int(fields[0]), int(fields[1]), float(fields[2]), int(fields[3])]) print "Read u.data, got " + str(len(user_rating_raw)) + " rating records." print print "The first 5 records are:" for row_index in range(5): print user_rating_raw[row_index] print # read u.item movie_title_map = {}; with open('u.item') as f: for line in f: fields = line.split('|') movie_title_map[int(fields[0])] = fields[1] print "Read id-title map for " + str(len(movie_title_map)) + " movies." print print "The first 5 movies in the map are:" for movie_id in range(1, 6): print (movie_id, movie_title_map[movie_id]) printRead id-title map for 1682 movies. The first 5 movies in the map are: (1, 'Toy Story (1995)') (2, 'GoldenEye (1995)') (3, 'Four Rooms (1995)') (4, 'Get Shorty (1995)') (5, 'Copycat (1995)')Item based collaborative filtering Among the multiple recommendation alogrithms, item-based collabrative filtering is one of most popular alogorithm. The recommendation alogrithm used by Amazon and other websites are based on item-based collabrative filtering (https://en.wikipedia.org/wiki/Item-item_collaborative_filtering). * We are going to implement a simple item-based collabrative filtering on thie tutorial.The idea of item-based collabrative filtering is to find similar items, and then recommend items based on the users' history related item. Let's say we found that _Star Wars (1977)_ is similar to _Return of the Jedi (1983)_, we assumes that the users who like _Star Wars (1977)_ are going to enjoy _Return of the Jedi (1983)_ too. Therefore, if we find that there is a user who watched (rated) _Star Wars (1997)_ but haven't watched (rated) _Return of the Jedi (1983)_, we will recommend _Return of the Jedi (1983)_ to the user.For our MovieLens scenario, we need to:1. Compute the similarity between movies based on the ratings2. For each user, recommend movies which are similar to the movies rated by that user, and the recommended movies should not contains those movies which have already rated by that user.Reference: * ., ., & . (2003). Amazon. com recommendations: Item-to-item collaborative filtering. IEEE Internet computing, 7(1), 76-80. Before computing the similarity between movies, let's convert the raw data, user_rating_record, into a matrix (numpy 2d array), movie_user_mat.Each element in the movie_user_mat stores a rating. movie_user_mat is of size num_movie by num_user. num_movie\[i\]\[j\] means the j-th user's rating for i-th movie. Therefore, each row stores the ratings for a movie from all users, and each column stores a user's rating.Noted that the the range of the rating is 1-5, so we can use 0 to indicate that a user haven't rated a movie.import numpy as np # number of movies and number of users, # these two numbers are from ReadMe (http://files.grouplens.org/datasets/movielens/ml-100k/README) num_user = 943 num_movie = 1682 movie_user_mat = np.zeros((num_movie, num_user)); for user_rating_record in user_rating_raw: # minus 1 to convert the index (id) to 0 based user_index = user_rating_record[0] - 1 movie_index = user_rating_record[1] - 1 rating = user_rating_record[2] movie_user_mat[movie_index][user_index] = ratingNow that we have the movie-user matrix, we can perform the first step, computing the similarity between movies. We will use cosine similarity that we learned (https://en.wikipedia.org/wiki/Cosine_similarity). Because each row represents the ratings for a movie from all users, we consider treat rows as the input vectors. Noted that the similarity matrix, movie_similarity_mat, is a sysemtric matrix (movie_similarity_mat\[i\]\[j\] = movie_similarity_mat\[j\]\[i\]).import scipy.spatial as scp movie_similarity_mat = np.zeros((num_movie, num_movie)) for i in range(num_movie): movie_i_rating = movie_user_mat[i] for j in range(i, num_movie): movie_j_rating = movie_user_mat[j] cos_similarity = 1.0 - scp.distance.cosine(movie_i_rating, movie_j_rating) movie_similarity_mat[i][j] = cos_similarity movie_similarity_mat[j][i] = cos_similarityFinally, we can compute the what movies should be recommended to the users.In order to achieve this goal, for each user, we need to compute his / her interest in each movie. We represent the interests using a coefficient.The coefficient that indicates j-th user's interest in i-th movie (a large the coefficient means the user is highly interested in that movie)$$ coefficient[i][j]= \sum_{k=1}^n similarity[k-1][i] * rating[k-1][j]$$Where n is the number of movies, similarity\[k-1\]\[i\] is movie_similarity_mat\[k-1\]\[i\] (similarity between k-1 th movie and i-th movie) and rating\[k-1\]\[j\] is movie_user_mat\[k-1\]\[j\] (j-th user's rating on k-1 th movie)Noted that this equation is equivalent to$$ coefficient[i][j]= \sum_{k=1}^n similarity[i][k-1] * rating[k-1][j]$$because movie_similarity_mat is symmetric. It may looks cofusing, so let's take a small dataset (stored in test_rat) as an example.test_rat = np.asarray([[0,1,5], [1,0,5], [5,0,0], [0,5,3]]); test_simi = np.zeros((4, 4)) for i in range(4): movie_i_rating = test_rat[i] for j in range(i, 4): movie_j_rating = test_rat[j] cos_similarity = 1.0 - scp.distance.cosine(movie_i_rating, movie_j_rating) test_simi[i][j] = cos_similarity test_simi[j][i] = cos_similarity print "movie-rating:" print test_rat print print "similarities:" print test_simimovie-rating: [[0 1 5] [1 0 5] [5 0 0] [0 5 3]] similarities: [[ 1. 0.96153846 0. 0.67267279] [ 0.96153846 1. 0.19611614 0.5045046 ] [ 0. 0.19611614 1. 0. ] [ 0.67267279 0.5045046 0. 1. ]]For the first user (0-th user), his / her interst in the first movie (0-th movie) should be:$$ coefficent[0][0] = rating[0][0] * similarity[0][0] + rating[1][0] * similarity[1][0] + rating[2][0] * similarity[2][0] + rating[3][0] * similarity[3][0] $$$$ coefficent[0][0] = 0 * 1 + 1 * 0.96153846 + 5 * 0 + 0 * 0.67267279 = 0.96153846 $$his / her interst in the last movie (3-th movie) should be:$$ coefficent[3][0] = 0 * 0.67267279 + 1 * 0.5045046 + 5 * 0 + 0 * 1 = 0.5045046 $$because 0.96153846 > 0.5045046, we should recommend the first movie instead of the last movie if we can only recommend one movie.Noted that the equation$$ coefficient[i][j]= \sum_{k=1}^n similarity[i][k-1] * rating[k-1][j]$$is simply a matrix dot operation:$$coefficient = similarity.dot(rating)$$ The last detail we need to take care of is that we shouldn't recommend a movie that have been rated. If a user already rated the movie _Star Wars (1977)_, we should not recomment _Star Wars (1977)_ to this user. We store the coeffiecients in recommendation_coefficient_mat, and store the id of the recommended movies for each user in a dictionary, recommendation_per_user.import heapq # find n elements with largest values from a dictonary # http://www.pataprogramming.com/2010/03/python-dict-n-largest/ def dict_nlargest(d,n): return heapq.nlargest(n, d, key = lambda t: d[t]) # num_movie by num_user = (num_movie by num_movie) * (num_movie by num_user) recommendation_coefficient_mat = movie_similarity_mat.dot(movie_user_mat) recommendation_per_user = {} for user_index in range(num_user): recommendation_coefficient_vector = recommendation_coefficient_mat.T[user_index] # remove the movies that already been rated unrated_movie = (movie_user_mat.T[user_index] == 0) recommendation_coefficient_vector *= unrated_movie recommendation_coefficient_dict = {movie_id:coefficient for movie_id, coefficient in enumerate(recommendation_coefficient_vector)} recommendation_per_user[user_index] = dict_nlargest(recommendation_coefficient_dict, k)So the recommended movie for the first user is:print "(movie id, title)" for movie_id in recommendation_per_user[0]: # movie_id + 1 to convert it backed to 1-based instead of 0-based print (movie_id, movie_title_map[movie_id + 1]) print(movie id, title) (422, 'E.T. the Extra-Terrestrial (1982)') (654, 'Stand by Me (1986)') (567, 'Speed (1994)') (402, 'Batman (1989)') (384, 'True Lies (1994)')Recommendation for new users We mentioned that we can use users's information to recommend movies, but what if we have a new user that we have no information about? The coefficients for that user will be all zeros, it is not reasonable to find the top-5 elements in an array of zeros.What movies should we recommend? An option is to recommend the movies which got rated by the most number of the users. This is similiar to recommending "best seller" on Amazon.com to new users.import collections movie_rated_counter = collections.Counter([rating_record[1] for rating_record in user_rating_raw]) most_rated_movies = movie_rated_counter.most_common(k) print "The most rated 5 movies are:\n" for movie_id, rated_count in most_rated_movies: print (movie_id, movie_title_map[movie_id], rated_count) printThe most rated 5 movies are: (50, 'Star Wars (1977)', 583) (258, 'Contact (1997)', 509) (100, 'Fargo (1996)', 508) (181, 'Return of the Jedi (1983)', 507) (294, ' (1997)', 485)Jupyter Notebook Fundamentals A **notebook** is a collection **cells**. These cells are run to execute code, render formatted text or display graphical visualizations. Understanding Code Cells and Markdown Cells The following cell (with the gray text area) is a code cell.# This is a code cell # By default, a new cell added in a notebook is a code cell 1 + 1This notebook is written in Python. Because of this, you need to select the appropriate **Kernel** that you use to run the cells of this notebook.To select your Kernel:1. In the notebook toolbar, select the **Kernel** dropdown.2. From the drop-down, select **Python 3**. ![Kernel dropdown.](https://github.com/solliancenet/MCW-Modernizing-data-analytics-with-SQL-Server-2019/raw/master/Hands-on%20lab/media/ads-notebook-select-kernel.png)The code cell above has not run yet, so the expressions of `1 + 1` has not been evaluated. To run the code cell, select the cell by placing your cursor within the cell text area and do any of the following:- Press `F5` to run the cell- Use the cell Run icon to the left of the cell ![Run Cell](https://github.com/solliancenet/MCW-Modernizing-data-analytics-with-SQL-Server-2019/raw/master/Hands-on%20lab/media/ads-notebook-run.png) The following cell is another example of a code cell. Run it to see its output.# This is also a code cell print("Welcome to your SQL Server 2019 Big Data cluster!")The following cell, which displays its output as formatted text is a text cell that supports [markdown](https://en.wikipedia.org/wiki/Markdown) format. This is a *text* cell.To create a text cell, select the cell command menu on the upper-right (...). In the context menu, select **Insert Text Before** to add a text cell above this one, or **Insert Text After** to add one after this cell.![Cell command menu.](https://github.com/solliancenet/MCW-Modernizing-data-analytics-with-SQL-Server-2019/raw/master/Hands-on%20lab/media/ads-notebook-command.png) Double click on the above cell and notice how the cell changes to an editable code cell.A preview of the markdown is displayed below the cell. To finish editing, simply click somewhere outside of the cell or press `Esc`. Understanding cell output By default, a notebook cell will output the value of evaluating the last line the cell. Run the following cell. Observe that the entire cell is echoed in the output because the cell contains only one line."Hello SQL world!"Next, examine the following cell. What do you expect the output to be? Run the cell and confirm your understanding."Hello SQL world!" "And, hello Jupyter notebook!"If you want to ensure your output displays something, use the `print` method.print("Hello SQL world!") print("And, hello Jupyter notebook!")Not all code lines return a value to be output. Run the following cell to see one such an example.text_variable = "Hello, hello!"Running multiple notebook cells It is not uncommon to need to run (or re-run) a all notebook cells in top to bottom order.To do this, select **Run Cells** in the toolbar above the notebook. This runs all cells starting from the first. Adding code cells You can add new code cells in the same way you add text cells.To do this, select the cell command menu on the upper-right (...). In the context menu, select **Insert Code Before** to add a code cell above this one, or **Insert Code After** to add one after this cell.![Cell command menu.](https://github.com/solliancenet/MCW-Modernizing-data-analytics-with-SQL-Server-2019/raw/master/Hands-on%20lab/media/ads-cell-command-code.png)You can also use this command menu to delete a cell. Understanding notebook state When you execute notebook cells, their execution is backed by a process running on a cluster or locally, depending on the Kernel you select. The state of your notebook, such as the values of variables, is maintained in the process. All variables default to a global scope (unless you author your code so it has nested scopes) and this global state can be a little confusing at first when you re-run cells. Run the following two cells in order and take note of the value ouput for the variable `y`:x = 10 y = x + 1 yNext, run the following cell.x = 100Planewave propagation in a Whole-space (frequency-domain) PurposeWe visualizae downward propagating planewave in the homogeneous earth medium. With the three apps: a) Plane wave app, b) Profile app, and c) Polarization ellipse app, we understand fundamental concepts of planewave propagation. Set upPlanewave EM equation can be written as $$\frac{\partial^2 \mathbf{E}}{\partial z^2} + k^2 \mathbf{E} = 0,$$For homogeneous earth, solution can be simply derived:$$\mathbf{E} = \mathbf{E}_0 e^{ikz}$$$$\mathbf{H} = - i \omega \mu \nabla \times (\mathbf{E}_0 e^{ikz}).$$where complex wavenumber $k$ is $$ k = \sqrt{\mu \epsilon \omega^2 - i \mu \sigma \omega}.$$In time domain, the wave travelling in the negative z-direction has the form:$$ \mathbf{e} = \mathbf{e}_0^- e^{i(k z + \omega t)}.$$ax = plotObj3D()Planewave app Parameters:- Field: Type of EM fields ("Ex": electric field, "Hy": magnetic field)- AmpDir: Type of the vectoral EM fields None: $F_x$ or $F_y$ or $F_z$ Amp: $\mathbf{F} \cdot \mathbf{F}^* = |\mathbf{F}|^2$ Dir: Real part of a vectoral EM fields, $\Re[\mathbf{F}]$ - ComplexNumber: Type of complex data ("Re", "Im", "Amp", "Phase") - Frequency: Transmitting frequency (Hz)- Sigma: Conductivity of homogeneous earth (S/m)- Scale: Choose "log" or "linear" scale - Time:dwidget = PlanewaveWidget() Q = dwidget.InteractivePlaneWave() display(Q)Profile appWe visualize EM fields at vertical profile (marked as red dots in the above app). Parameters:- **Field**: Ex, Hy, and Impedance - ** $\sigma$ **: Conductivity (S/m)- **Scale**: Log10 or Linear scale- **Fixed**: Fix the scale or not- **$f$**: Frequency- **$t$**: Timedisplay(InteractivePlaneProfile())Polarization Ellipse appPolarwidget = PolarEllipse(); Polarwidget.Interactive()Use this section to inspect the datametadata_path = 'data/metadata.csv' meta_df = pd.read_csv(metadata_path) meta_df.shape meta_df.head() meta_df.dropna(subset=['sha'], inplace=True) meta_df.shape meta_df.full_text_file.unique()Data manipulationclass Document: def __init__(self, paper_id, abstract, body_text): self.paper_id = paper_id self.abstract = abstract self.body_text = body_text @classmethod def from_json(cls, path): with open(path, 'r') as fd: data = json.load(fd) paper_id = data['paper_id'] abstract = '\n'.join([record['text'] for record in data['abstract']]) body_text = '\n'.join([record['text'] for record in data['body_text']]) return cls(paper_id, abstract, body_text) def __repr__(self): return f'{self.paper_id}: {self.abstract[:200]} ... {self.body_text[:200]} ...' def _repr_html_(self): paper_html = f'Paper ID: {self.paper_id}' abstract_html = ['

' + record + '

' for record in self.abstract.split('\n')] abstract_html = '

' + 'Abstract' + '

' + ''.join(abstract_html) body_text_html = ['

' + record + '

' for record in self.body_text.split('\n')] body_text_html = '

' + 'Body text' + '

' + ''.join(body_text_html) return paper_html + abstract_html + body_text_html class CollectionLoader: def __init__(self, dirs, spec=''): spec = self._parse_spec(spec, dirs) docfiles = [] for i, dirname in enumerate(dirs): dirfiles = glob.glob(f'{dirname}/**/*.json', recursive=True) limit = spec[i] or len(dirfiles) docfiles.extend(dirfiles[:limit]) self.docfiles = docfiles @staticmethod def _parse_spec(spec, dirs): if not spec: return [None] * len(dirs) spec_to_int = [int(s) if s.isdigit() else None for s in spec.split(':')] if len(dirs) != len(spec_to_int): raise ValueError('length of dirs does not match length of spec') return spec_to_int def __iter__(self): for fname in self.docfiles: yield Document.from_json(fname)Basic usage# specify list of directories # note: if topmost directory does not contain json files, # recursive search is performed dirs = ('./data/dataset/noncomm_use100', './data/dataset/comm_use100', './data/dataset/biorxiv_medrxiv100') # pass above list and spec string # each entry, delimited by :, in spec string represents the number of json files # that will be read from corresponding directory collection_loader = CollectionLoader(dirs, spec='2:1:3') collection = list(collection_loader) # sanity check print(collection[3]) print('number of documents:', len(collection)) # rich output (only available in Jupyter) # collection[3]Preprocessing pipelineimport re import spacy class Pipeline: def __init__(self, model, before_tokenizer=None, after_tokenizer=None): self.model = model self.before_tokenizer = before_tokenizer or [] self.after_tokenizer = after_tokenizer or [] self._build() def _build(self): nlp = self._create_tokenizer() for component in self.after_tokenizer: if isinstance(component, str): # spacy component if component in self._pretrained: obj = self._pretrained[component] else: obj = nlp.create_pipe(component) nlp.add_pipe(obj) else: # user-defined component name, obj = component nlp.add_pipe(obj, name=name) # we dont't need cache anymore del self._pretrained self.nlp = nlp def _create_tokenizer(self): # hacky way of creating spacy pipeline without components nlp = spacy.load(self.model) # we have to cache the pretrained components in case we need them later self._pretrained = {} for pipe in nlp.pipe_names: name, obj = nlp.remove_pipe(pipe) if name in self.after_tokenizer: self._pretrained[name] = obj return nlp def _apply_before_tokenizer(self, text): for func in self.before_tokenizer: text = func(text) return text def __call__(self, texts, n_process=1): pre_tokenizer = (self._apply_before_tokenizer(text) for text in texts) # nlp.pipe returns the generator, so yield from it yield from self.nlp.pipe(pre_tokenizer, n_process=n_process)Helper functions for text normalizationNON_ALPHANUM_REG = re.compile(r"[^A-Za-z']") def lowercase(text): return text.lower() def single_space(text): return re.sub(r'\s+', ' ', text) def remove_non_alphanum(text): return re.sub(NON_ALPHANUM_REG, ' ', text)The next step is to build a class for the convenient access to the tokens of a documentclass DocViewer: def __init__(self, doc): self.doc = doc def __getitem__(self, key): # if the key is a normal attribute, get its value # otherwise ask for forgiveness try: return [getattr(token, key) for token in self.doc] except: pass values = [] for token in self.doc: extension_holder = getattr(token, '_') values.append(getattr(extension_holder, key)) return valuesThere is no stemmer in spacy, so let's provide onefrom nltk.stem.snowball import SnowballStemmer from spacy.tokens import Token class Stemmer: def __init__(self, language='english'): self._stemmer = SnowballStemmer(language) Token.set_extension('stem', default=None, force=True) def __call__(self, doc): for token in doc: token._.set('stem', self._stemmer.stem(token.text)) return docUsagepipeline = Pipeline(model='en_core_web_sm', before_tokenizer= [lowercase, remove_non_alphanum, single_space], after_tokenizer= [('stemmer', Stemmer()), 'tagger']) processed = list(pipeline([doc.abstract for doc in collection])) viewer = DocViewer(processed[3]) # print textual representation # print(viewer['text']) # print stemms # print(viewer['stem']) # print lemmas (in spacy, lemmatization is performed by default) # print(viewer['lemma_']) # check if a word is a stopword # print(viewer['is_stop']) # print POS tags # print(viewer['tag_'])Text Harsh by HashingVectorizer* 把文档中的单词HASH到一个字典(固定大小),然后再统计词频from sklearn.feature_extraction.text import HashingVectorizer x_train_o = [ '123 543 hello 999 345', 'i love play football', 'is this a tree', 'ha ha ha ha ', ] y_train_o = [1,0,0,0] x_test_o = ['333 111', 'this is'] vectorizer = HashingVectorizer(decode_error='ignore', n_features=10, non_negative=True) x_train = vectorizer.fit_transform(x_train_o) x_test = vectorizer.transform(x_test_o) x_train.shape x_test.shape x_train.toarray() x_test.toarray()Dilation, Erosion, Opening and Closing https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html- Dilation - add pixel to the boundaries- Erosion - removes pixel from the boundaries- Opening - erosion followed by dilation- Closing - dilation follwed by erosionimport cv2 import numpy as np image = cv2.imread('../images/opencv.png', 0) cv2.imshow('Original', image) cv2.waitKey(0) # Let's define our kernel size kernel = np.ones((5,5), np.uint8) # erode erosion = cv2.erode(image, kernel, iterations = 1) cv2.imshow('Erosion', erosion) cv2.waitKey(0) # dilation = cv2.dilate(image, kernel, iterations = 1) cv2.imshow('Dilation', dilation) cv2.waitKey(0) # opening - Good for removing noise opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel) cv2.imshow('Opening', opening) cv2.waitKey(0) # closing - Good for removing noise closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel) cv2.imshow('Closing', closing) cv2.waitKey(0) cv2.destroyAllWindows()Nonparametric Latent Dirichlet Allocation_Latent Dirichlet Allocation_ is a [generative](https://en.wikipedia.org/wiki/Generative_model) model for topic modeling. Given a collection of documents, an LDA inference algorithm attempts to determined (in an unsupervised manner) the topics discussed in the documents. It makes the assumption that each document is generated by a probability model, and, when doing inference, we try to find the parameters that best fit the model (as well as unseen/latent variables generated by the model). If you are unfamiliar with LDA, has a [friendly introduction](http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/) you should read.Because LDA is a _generative_ model, we can simulate the construction of documents by forward-sampling from the model. The generative algorithm is as follows (following [Heinrich](http://www.arbylon.net/publications/text-est.pdf)):* for each topic $k\in [1,K]$ do * sample term distribution for topic $\overrightarrow \phi_k \sim \text{Dir}(\overrightarrow \beta)$* for each document $m\in [1, M]$ do * sample topic distribution for document $\overrightarrow\theta_m\sim \text{Dir}(\overrightarrow\alpha)$ * sample document length $N_m\sim\text{Pois}(\xi)$ * for all words $n\in [1, N_m]$ in document $m$ do * sample topic index $z_{m,n}\sim\text{Mult}(\overrightarrow\theta_m)$ * sample term for word $w_{m,n}\sim\text{Mult}(\overrightarrow\phi_{z_{m,n}})$ You can implement this with [a little bit of code](https://gist.github.com/tdhopper/521006b60e1311d45509) and start to simulate documents.In LDA, we assume each word in the document is generated by a two-step process:1. Sample a topic from the topic distribution for the document.2. Sample a word from the term distribution from the topic. When we fit the LDA model to a given text corpus with an inference algorithm, our primary objective is to find the set of topic distributions $\underline \Theta$, term distributions $\underline \Phi$ that generated the documents, and latent topic indices $z_{m,n}$ for each word.To run the generative model, we need to specify each of these parameters:vocabulary = ['see', 'spot', 'run'] num_terms = len(vocabulary) num_topics = 2 # K num_documents = 5 # M mean_document_length = 5 # xi term_dirichlet_parameter = 1 # beta topic_dirichlet_parameter = 1 # alphaThe term distribution vector $\underline\Phi$ is a collection of samples from a Dirichlet distribution. This describes how our 3 terms are distributed across each of the two topics.from scipy.stats import dirichlet, poisson from numpy import round from collections import defaultdict from random import choice as stl_choice term_dirichlet_vector = num_terms * [term_dirichlet_parameter] term_distributions = dirichlet(term_dirichlet_vector, 2).rvs(size=num_topics) print(term_distributions)[[ 0.41 0.02 0.57] [ 0.38 0.36 0.26]]Each document corresponds to a categorical distribution across this distribution of topics (in this case, a 2-dimensional categorical distribution). This categorical distribution is a _distribution of distributions_; we could look at it as a Dirichlet process!The base base distribution of our Dirichlet process is a uniform distribution of topics (remember, topics are term distributions).base_distribution = lambda: stl_choice(term_distributions) # A sample from base_distribution is a distribution over terms # Each of our two topics has equal probability from collections import Counter for topic, count in Counter([tuple(base_distribution()) for _ in range(10000)]).most_common(): print("count:", count, "topic:", [round(prob, 2) for prob in topic])count: 5066 topic: [0.40999999999999998, 0.02, 0.56999999999999995] count: 4934 topic: [0.38, 0.35999999999999999, 0.26000000000000001]Recall that a sample from a Dirichlet process is a distribution that approximates (but varies from) the base distribution. In this case, a sample from the Dirichlet process will be a distribution over topics that varies from the uniform distribution we provided as a base. If we use the stick-breaking metaphor, we are effectively breaking a stick one time and the size of each portion corresponds to the proportion of a topic in the document.To construct a sample from the DP, we need to [again define our DP class](/dirichlet-distribution/):from scipy.stats import beta from numpy.random import choice class DirichletProcessSample(): def __init__(self, base_measure, alpha): self.base_measure = base_measure self.alpha = alpha self.cache = [] self.weights = [] self.total_stick_used = 0. def __call__(self): remaining = 1.0 - self.total_stick_used i = DirichletProcessSample.roll_die(self.weights + [remaining]) if i is not None and i < len(self.weights) : return self.cache[i] else: stick_piece = beta(1, self.alpha).rvs() * remaining self.total_stick_used += stick_piece self.weights.append(stick_piece) new_value = self.base_measure() self.cache.append(new_value) return new_value @staticmethod def roll_die(weights): if weights: return choice(range(len(weights)), p=weights) else: return NoneFor each document, we will draw a topic distribution from the Dirichlet process:topic_distribution = DirichletProcessSample(base_measure=base_distribution, alpha=topic_dirichlet_parameter)A sample from this _topic_ distribution is a _distribution over terms_. However, unlike our base distribution which returns each term distribution with equal probability, the topics will be unevenly weighted.for topic, count in Counter([tuple(topic_distribution()) for _ in range(10000)]).most_common(): print("count:", count, "topic:", [round(prob, 2) for prob in topic])count: 9589 topic: [0.38, 0.35999999999999999, 0.26000000000000001] count: 411 topic: [0.40999999999999998, 0.02, 0.56999999999999995]To generate each word in the document, we draw a sample topic from the topic distribution, and then a term from the term distribution (topic).topic_index = defaultdict(list) documents = defaultdict(list) for doc in range(num_documents): topic_distribution_rvs = DirichletProcessSample(base_measure=base_distribution, alpha=topic_dirichlet_parameter) document_length = poisson(mean_document_length).rvs() for word in range(document_length): topic_distribution = topic_distribution_rvs() topic_index[doc].append(tuple(topic_distribution)) documents[doc].append(choice(vocabulary, p=topic_distribution))Here are the documents we generated:for doc in documents.values(): print(doc)['see', 'run', 'see', 'spot', 'see', 'spot'] ['see', 'run', 'see'] ['see', 'run', 'see', 'see', 'run', 'spot', 'spot'] ['run', 'run', 'run', 'spot', 'run'] ['run', 'run', 'see', 'spot', 'run', 'run']We can see how each topic (term-distribution) is distributed across the documents:for i, doc in enumerate(Counter(term_dist).most_common() for term_dist in topic_index.values()): print("Doc:", i) for topic, count in doc: print(5*" ", "count:", count, "topic:", [round(prob, 2) for prob in topic])Doc: 0 count: 6 topic: [0.38, 0.35999999999999999, 0.26000000000000001] Doc: 1 count: 3 topic: [0.40999999999999998, 0.02, 0.56999999999999995] Doc: 2 count: 5 topic: [0.40999999999999998, 0.02, 0.56999999999999995] count: 2 topic: [0.38, 0.35999999999999999, 0.26000000000000001] Doc: 3 count: 5 topic: [0.38, 0.35999999999999999, 0.26000000000000001] Doc: 4 count: 5 topic: [0.40999999999999998, 0.02, 0.56999999999999995] count: 1 topic: [0.38, 0.35999999999999999, 0.26000000000000001]To recap: for each document we draw a _sample_ from a Dirichlet _Process_. The base distribution for the Dirichlet process is a categorical distribution over term distributions; we can think of the base distribution as an $n$-sided die where $n$ is the number of topics and each side of the die is a distribution over terms for that topic. By sampling from the Dirichlet process, we are effectively reweighting the sides of the die (changing the distribution of the topics).For each word in the document, we draw a _sample_ (a term distribution) from the distribution (over term distributions) _sampled_ from the Dirichlet process (with a distribution over term distributions as its base measure). Each term distribution uniquely identifies the topic for the word. We can sample from this term distribution to get the word.Given this formulation, we might ask if we can roll an _infinite_ sided die to draw from an unbounded number of topics (term distributions). We can do exactly this with a _Hierarchical_ Dirichlet process. Instead of the base distribution of our Dirichlet process being a _finite_ distribution over topics (term distributions) we will instead make it an infinite Distribution over topics (term distributions) by using yet another Dirichlet process! This base Dirichlet process will have as its base distribution a Dirichlet _distribution_ over terms. We will again draw a _sample_ from a Dirichlet _Process_ for each document. The base distribution for the Dirichlet process is itself a Dirichlet process whose base distribution is a Dirichlet distribution over terms. (Try saying that 5-times fast.) We can think of this as a countably infinite die each side of the die is a distribution over terms for that topic. The sample we draw is a topic (distribution over terms).For each word in the document, we will draw a _sample_ (a term distribution) from the distribution (over term distributions) _sampled_ from the Dirichlet process (with a distribution over term distributions as its base measure). Each term distribution uniquely identifies the topic for the word. We can sample from this term distribution to get the word.These last few paragraphs are confusing! Let's illustrate with code.term_dirichlet_vector = num_terms * [term_dirichlet_parameter] base_distribution = lambda: dirichlet(term_dirichlet_vector).rvs(size=1)[0] base_dp_parameter = 10 base_dp = DirichletProcessSample(base_distribution, alpha=base_dp_parameter)This sample from the base Dirichlet process is our infinite sided die. It is a probability distribution over a countable infinite number of topics. The fact that our die is countably infinite is important. The sampler `base_distribution` draws topics (term-distributions) from an uncountable set. If we used this as the base distribution of the Dirichlet process below each document would be constructed from a _completely unique set of topics_. By feeding `base_distribution` into a Dirichlet Process (stochastic memoizer), we allow the topics to be shared across documents. In other words, `base_distribution` will never return the same topic twice; however, every topic sampled from `base_dp` would be sampled an infinite number of times (if we sampled from `base_dp` forever). At the same time, `base_dp` will also return an _infinite number_ of topics. In our formulation of the the LDA sampler above, our base distribution only ever returned a finite number of topics (`num_topics`); there is no `num_topics` parameter here.Given this setup, we can generate documents from the _hierarchical Dirichlet process_ with an algorithm that is essentially identical to that of the original _latent Dirichlet allocation_ generative sampler:nested_dp_parameter = 10 topic_index = defaultdict(list) documents = defaultdict(list) for doc in range(num_documents): topic_distribution_rvs = DirichletProcessSample(base_measure=base_dp, alpha=nested_dp_parameter) document_length = poisson(mean_document_length).rvs() for word in range(document_length): topic_distribution = topic_distribution_rvs() topic_index[doc].append(tuple(topic_distribution)) documents[doc].append(choice(vocabulary, p=topic_distribution))Here are the documents we generated:for doc in documents.values(): print(doc)['spot', 'spot', 'spot', 'spot', 'run'] ['spot', 'spot', 'see', 'spot'] ['spot', 'spot', 'spot', 'see', 'spot', 'spot', 'spot'] ['run', 'run', 'spot', 'spot', 'spot', 'spot', 'spot', 'spot'] ['see', 'run', 'see', 'run', 'run', 'run']And here are the latent topics used:for i, doc in enumerate(Counter(term_dist).most_common() for term_dist in topic_index.values()): print("Doc:", i) for topic, count in doc: print(5*" ", "count:", count, "topic:", [round(prob, 2) for prob in topic])Doc: 0 count: 2 topic: [0.17999999999999999, 0.79000000000000004, 0.02] count: 1 topic: [0.23000000000000001, 0.58999999999999997, 0.17999999999999999] count: 1 topic: [0.089999999999999997, 0.54000000000000004, 0.35999999999999999] count: 1 topic: [0.22, 0.40000000000000002, 0.38] Doc: 1 count: 2 topic: [0.23000000000000001, 0.58999999999999997, 0.17999999999999999] count: 1 topic: [0.17999999999999999, 0.79000000000000004, 0.02] count: 1 topic: [0.35999999999999999, 0.55000000000000004, 0.089999999999999997] Doc: 2 count: 4 topic: [0.11, 0.65000000000000002, 0.23999999999999999] count: 2 topic: [0.070000000000000007, 0.65000000000000002, 0.27000000000000002] count: 1 topic: [0.28999999999999998, 0.65000000000000002, 0.070000000000000007] Doc: 3 count: 2 topic: [0.17999999999999999, 0.79000000000000004, 0.02] count: 2 topic: [0.25, 0.55000000000000004, 0.20000000000000001] count: 2 topic: [0.2899999999999999[...]NumPy Basicsimport numpy as np1-D arrayarr_from_list = np.array([1,3,5,7,9,111]) arr_from_tuple = np.array((11,13,51,71,92,98)) print(arr_from_list) print(arr_from_tuple) print(type(arr_from_list))[ 1 3 5 7 9 111] [11 13 51 71 92 98] 2-D Array, & Array propertiesdata = np.array([[1, 2], [3, 4], [5, 6], [20,30]]) print(data) print('data.dtype=', data.dtype) print('data.size=', data.size) print('data.itemsize=', data.itemsize) print('data.nbytes=', data.nbytes) print('data.shape=', data.shape) print('data.shape type', type(data.shape)) ##tuple print('data.shape[0]=', data.shape[0]) print('data.ndim=', data.ndim) print('data.strides=', data.strides)[[ 1 2] [ 3 4] [ 5 6] [20 30]] data.dtype= int64 data.size= 8 data.itemsize= 8 data.nbytes= 64 data.shape= (4, 2) data.shape type data.shape[0]= 4 data.ndim= 2 data.strides= (16, 8)Creation with data typenp.arange(2,20, dtype=np.int8)3-D Array & Access n-dim array itema = np.array([[[1,2,3],[4,5,6]], [[1,2,3],[4,5,6]]] ) print(a) print(type(a)) print(a.ndim) # how many subscripts needed to access this array print(a.shape) print(a[1,0,2]) #access a item in array[[[1 2 3] [4 5 6]] [[1 2 3] [4 5 6]]] 3 (2, 2, 3) 3np arange, reshape, linspacex = np.arange(50) # 0 to 49 print(x) print("\n*****\n") x = np.arange(20,100,3) # start stop and steps print(x) print("\n*****\n") x = np.arange(50).reshape(5,2,5) # NOTE: shape passed as plain arguments print(x) # start at 10 stop at 20 included and get 15 evenly spaced points between with both included np.linspace(10, 20, 15)zeroes, ones, empty array, identity matrixz_m = np.zeros(3) # one dim zero matrix print(z_m) zero_matrix = np.zeros((4,2)) # NOTE: shape passed as list or tuple for dim > 1 zero_matrix np.zeros((2,3), dtype=np.int8) one_matrix = np.ones([5,3]) one_matrix empty_array = np.empty([2,3]) empty_array identity_matrix = np.eye(3) #takes only one arg since identity matrix is a square matrix of n x n identity_matrixLogistic RegressionNotebook orignially contributed by: [](https://github.com/Avinashshah099) Run in Google Colab View source on GitHub The following notebook uses Pima Indians Diabetes Database in order to determine whether to predict the onset of diabetes based on diagnostic measures. It is followed by a series of steps involving data visualisation and applying **Machine Learning Algorithm (Logistic Regression)**.We are using [Pima Indians Diabetes Database](https://www.kaggle.com/uciml/pima-indians-diabetes-database) originally contributed by [UCI Machine Learning](https://archive.ics.uci.edu/ml/index.php). Import LibraryImporting required libraries for Machine Learning Alogorithm and visualizing.#Import libraries with alias import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as snsGetting the DataThe dataset used here is **diabetes.csv**.The dataset consists of several medical predictor variables and one target variable, **Outcome**. Predictor variables includes the number of pregnancies the patient has had, their BMI, insulin level, age, and so on.- Pregnancies : Number of pregnancies the patient- Glucose : Level of Glucose- Insulin : Insulin level- BMI : BMI value#load dataset pima = pd.read_csv("diabetes.csv") pima.head() #list of top five index pima.shapeThere are 768 rows and 9 columnspima.info() pima.describe().T # let's find out how many 0 values are there in all columns (pima == 0).sum(axis=0) #We will use 'median' to replace 0 for all columns except for 'Insulin' as diff between mean and median was big pima['Pregnancies'].replace(0,pima['Pregnancies'].median(),inplace=True) pima['Glucose'].replace(0,pima['Glucose'].median(),inplace=True) pima['BloodPressure'].replace(0,pima['BloodPressure'].median(),inplace=True) pima['SkinThickness'].replace(0,pima['SkinThickness'].median(),inplace=True) pima['BMI'].replace(0,pima['BMI'].median(),inplace=True) pima['Insulin'].replace(0,pima['Insulin'].mean(),inplace=True) # let's check if all 0 values are replaced now (pima == 0).sum(axis=0) #loading columns of given dataset list(pima.columns)Exploratory Data Analysis(EDA)sns.pairplot(pima) pima.corr()There is no strong correlation between any columns# Data for BMI, Glucose and BloodPressure has normal distribution sns.distplot(pima['Glucose'],kde=True) sns.distplot(pima['BloodPressure'],kde=True) sns.distplot(pima['BMI'],kde=True) # Let's check outliers of the dataset columns, we are droping Insuline columns as it have most number of outlires plt.subplots(figsize=(15,10)) sns.boxplot(data=pima.drop(['Insulin','Outcome'],axis=1)) # Insulin has high number of outliers compared to other columns plt.subplots(figsize=(15,10)) sns.boxplot(data=pima['Insulin'])Selecting FeatureHere, you need to divide the given columns into two types of variables dependent(or target variable) and independent variable(or feature variables).#split dataset in features and target variable feature_cols = pima.columns.drop(["Outcome"]) X = pima[feature_cols] # Features y = pima["Outcome"] # Target variableSplitting Data To understand model performance, dividing the dataset into a training set and a test set is a good strategy. Let's split dataset by using function **train_test_split()**. You need to pass 3 parameters **features**, **target**, and **test_set size**. Additionally, you can use random_state to select records randomly.# split X and y into training and testing sets from sklearn.model_selection import train_test_split # training and testing sets are divided in 80:20 ratio X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.30,random_state=0)Here, the Dataset is broken into two parts in a ratio of **70:30**. It means **70%** data will be used for model training and **30%** for model testing. Model Development and Prediction First, import the **Logistic Regression** module and create a Logistic Regression classifier object using **LogisticRegression()** function. Then, fit your model on the train set using **fit()** and perform prediction on the test set using **predict()**.# import the class from sklearn.linear_model import LogisticRegression # instantiate the model (using the default parameters) log_reg = LogisticRegression() # fit the model with data log_reg.fit(X_train,y_train) # predict the model y_pred=log_reg.predict(X_test)/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)Model Evaluation using Confusion MatrixA confusion matrix is a table that is used to evaluate the performance of a classification model. You can also visualize the performance of an algorithm. The fundamental of a confusion matrix is the number of correct and incorrect predictions are summed up class-wise.# import the metrics class from sklearn import metrics cnf_matrix = metrics.confusion_matrix(y_test, y_pred) cnf_matrixThe dimension of this matrix is 2*2 because this model is binary classification (0 and 1). Diagonal values represent accurate predictions, while non-diagonal elements are inaccurate predictions. In the output, 141 and 38 are actual predictions, and 36 and 16 are incorrect predictions. Visualizing Confusion Matrix using Heatmap Let's visualize the results of the model in the form of a confusion matrix using matplotlib and seaborn.class_names=[0,1] # name of classes fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) # create heatmap sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu" ,fmt='g') ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=2) plt.ylabel('Actual label') plt.xlabel('Predicted label')Let's evaluate the model using model evaluation metrics such as accuracy, precision, and recall.print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) print("Precision:",metrics.precision_score(y_test, y_pred)) print("Recall:",metrics.recall_score(y_test, y_pred))Accuracy: 0.7748917748917749 Precision: 0.7037037037037037 Recall: 0.5135135135135135Well, We got a classification rate of 77.5% accuracy.**Precision**: Precision is about being precise, Logistic Regression model predicted patients are going to suffer from diabetes, that patients have 70.3% of the time.**Recall**: If there are patients who have diabetes in the test set and your Logistic Regression model can identify it 51.3% of the time. ROC CurveReceiver Operating Characteristic(ROC) curve is a plot of the true positive rate against the false positive rate. It shows the tradeoff between sensitivity and specificity.y_pred_proba = log_reg.predict_proba(X_test)[::,1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) auc = metrics.roc_auc_score(y_test, y_pred_proba) plt.plot(fpr,tpr,label="data 1, auc="+str(auc)) plt.legend(loc=4) plt.show()!pip install -q condacolab import condacolab condacolab.install() # check that everything works import condacolab condacolab.check() import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import mean_absolute_error, mean_squared_error # define experimental and calculated values # draw from normal distribution x_mu, x_sigma = 10.0, 1.5 x_values = np.random.normal(x_mu, x_sigma, 1000) y_mu, y_sigma = 11.0, 1.5 y_values = np.random.normal(y_mu, y_sigma, 1000) plt.hist(x_values, alpha=0.5) plt.hist(y_values, alpha=0.5) plt.show() # bootstrap metric def bootstrap_metric(fct, x_values, y_values): assert callable(fct) == True bootstrapped_metric = [] # bootstrap metric to generate test distribution for _ in range(1000): indices = np.random.choice(range(0, len(x_values)), size=len(x_values), replace=True) x_selection = np.take(x_values, indices) y_selection = np.take(y_values, indices) r = fct(x_selection, y_selection) bootstrapped_metric.append(r) # define 90% CI alpha = 10.0 lower_p = alpha / 2.0 # get value at or near percentile (take a look at the definition of percentile if # you have less than 100 values to make sure you understand what is happening) lower = np.percentile(bootstrapped_metric, lower_p) upper_p = (100 - alpha) + (alpha / 2.0) upper = np.percentile(bootstrapped_metric, upper_p) # calculate true mean mean = fct(x_values, y_values) return mean, lower, upper # bootstrap MAE mean, lower, upper = bootstrap_metric(mean_absolute_error, x_values, y_values) print(f'MAE: {round(mean, 2):.2f} [{round(lower,2):.2f}, {round(upper,2):.2f}]') # bootstrap RMSE def calc_rmse(x_values, y_values): from sklearn.metrics import mean_squared_error return np.sqrt(mean_squared_error(x_values, y_values)) mean, lower, upper = bootstrap_metric(calc_rmse, x_values, y_values) print(f'RMSE: {round(mean, 2):.2f} [{round(lower,2):.2f}, {round(upper,2):.2f}]')MAE: 1.84 [1.77, 1.92] RMSE: 2.35 [2.26, 2.44]babilim.core.annotations> A collection of helpful annotations.This code is under the MIT License and requires the abc package.#export # MIT License # # Copyright (c) 2019 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from abc import ABC, abstractmethodhide Class Decorator#export class _ClassDecorator(ABC): def __get__(self, obj, objtype): """ A class decorator is a base class that is used for all annotations that should be usable with python classes. Regular annotations will not work with classes. This is a helper class that can be used when writing annotations. """ import functools return functools.partial(self.__call__, obj) @abstractmethod def __call__(self, *args, **kwargs): passhide RunOnlyOnce#export class RunOnlyOnce(_ClassDecorator): def __init__(self, f): """ A decorator that ensures a function in an object gets only called exactly once. :param f: The function that should be wrapped. """ self.f = f self.called = {} def __call__(self, *args, **kwargs): if args[0] not in self.called: self.called[args[0]] = True self.f(*args, **kwargs)The run only once annotation is fundamental for the build function pattern, whereas it allows to write a function which is only called once, no matter how often it gets called. This behaviour is very usefull for creating variables on the GPU only once in the build and not on every run of the neural network.> Important: This is for use with the build function in a module. Ensuring it only gets called once and does not eat memory on the gpu.Using this in an example function which prints the parameter only yields on printout, even though the function gets called multiple times.@RunOnlyOnce def test_fun(msg): print(msg) test_fun("Foo") test_fun("Foo") test_fun("Foo") test_fun("Foo") test_fun("Foo")FooJupyter Notebook Helpers#export def extend_class(clazz, function_name): """ Extend a class by the function decorated with this decorator. :param clazz: The class that should be decorated. :param function_name: The name that the function in the class should have. (Can be different than unbound name of the class.) """ def _wrapper(func): setattr(clazz, function_name, func) return func return _wrapperThis annotation can be used for developing code in a jupyter notebook. It allows you to define a class in separate cells, like the following example. This gives you the exploratory capabilities of jupyter while developing a class that can be later exported (using nbdev) and used in production.# first cell class TestClass(object): def __init__(self, var): self.my_var = var # later cell test = TestClass(42) # again later cell @extend_class(TestClass, "foo") def __foo(self, name): print("self.my_var={}".format(self.my_var)) print("name={}".format(name)) self.name = name # and again later cell test.foo(name="Hello") print(test.name)self.my_var=42 name=Hello HelloInitializing the system with a latticeBefore you can begin a simulation, you must specify the initial conditions of the particles and the periodic box dimensions. HOOMD provides a number of ways to set initial conditions. You can create particles on a lattice, read the configuration from a file, or specify data directly in python. Create a latticeHOOMD can place particles on a lattice. HOOMD includes several standard lattices. Here is a square lattice in 2D:hoomd.context.initialize(''); system = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=1.2), n=16);HOOMD-blue v2.1.8 CUDA (7.5) DOUBLE HPMC_MIXED MPI SSE SSE2 SSE3 Compiled: 07/21/2017 Copyright 2009-2016 The Regents of the University of Michigan. ----- You are using HOOMD-blue. Please cite the following: * , , and . "General purpose molecular dynamics simulations fully implemented on graphics processing units", Journal of Computational Physics 227 (2008) 5342--5359 * , , , , , , , and . "Strong scaling of general-purpose molecular dynamics simulations on GPUs", Computer Physics Communications 192 (2015) 97--107 ----- notice(2): This system is not compute exclusive, using local rank to select GPUs notice(2): Unable to identify node local rank information notice(2): Using global rank to select GPUs HOOMD-blue is running on the following GPU(s): [0] Tesla K20c 13 SM_3.5 @ 0.706 GHz, 5061 MiB DRAM notice(2): Group "all" created containing 256 particlesExamine how the system configuration evolves over time. [ex_render](ex_render.py) is a helper script that builds animated gifs from trajectory files and system snapshots. It is part of the [hoomd-examples](https://github.com/glotzerlab/hoomd-examples) repository and designed only to render these examples.ex_render.render_disk_frame(system.take_snapshot(all=True))Here is a hexagonal lattice:hoomd.context.initialize(''); system = hoomd.init.create_lattice(unitcell=hoomd.lattice.hex(a=1.2), n=[16, 9]); ex_render.render_disk_frame(system.take_snapshot(all=True))And here is fcc.hoomd.context.initialize(''); system = hoomd.init.create_lattice(unitcell=hoomd.lattice.fcc(a=1.2), n=8); ex_render.render_sphere_frame(system.take_snapshot(all=True))Lattice defaultsBy default, the lattice generator names all particles A, and gives them mass 1.0, charge 0, diameter 1, moment of inertia [0,0,0], and orientation [1,0,0,0]. You can change the type name of the basic lattices in the argument list. Here is an example of a rectangular unit cell with two particle types of different sizes. The generic ``unitcell`` command takes in arbitrary unit cell vectors, along with positions, type names, mass, charge, etc... for every particle in the unit cell.hoomd.context.initialize(''); uc = hoomd.lattice.unitcell(N=2, a1=[1, 0, 0], a2=[0, 2, 0], a3=[0, 0, 1], dimensions=2, position=[[0,0,0], [0.5, 1, 0]], type_name=['A', 'B'], diameter=[1.0, 0.5]); system = hoomd.init.create_lattice(unitcell=uc, n=[16, 8]); ex_render.render_disk_frame(system.take_snapshot(all=True))A lattice of moleculesYou can use the `unitcell` command to help build a lattice of molecules. As of hoomd v2.0, `unitcell` does not allow topology specification. But you can add bond topology manually using the snapshot.hoomd.context.initialize(''); uc = hoomd.lattice.unitcell(N=2, a1=[2, 0, 0], a2=[0, 2, 0], a3=[0, 0, 1], dimensions=2, position=[[0,0,0], [0.6, 0.6, 0]], type_name=['A', 'B']); # Get a snapshot from the unitcell and add in bond topology snap = uc.get_snapshot(); snap.bonds.resize(1); snap.bonds.group[0] = [0, 1]; snap.bonds.types = ['bondA']; # replicate the lattice and initialize hoomd snap.replicate(4,4,1); system = hoomd.init.read_snapshot(snap); ex_render.render_disk_frame(system.take_snapshot(all=True))1) POS Tagging Basics **Tagging** is a kind of classification, the automatic assignment of description to tokens is called as tagging.The descriptor is called tag, which represents one of the part-of-speech, semantic information and so on. **Part-of-Speech tagging** is the process of assigning one of the parts of speech to the given word.In simple words we can say that, POS tagging is a task of labelling each word in a sentence with its appropriate part of speech Parts of speech include nouns, verb, adverbs, adjectives, pronouns, conjunction and their sub-categories **e.g.** Word: Paper, Tag: Noun POS tagging has applications in Named Entity Recognition (NER), sentiment analysis, question answering, etc# Import spaCy import spacy # load the English language library nlp = spacy.load(name='en_core_web_sm') # create a doc object doc = nlp("Apple is looking at buying U.K. startup for $1 billion") # print entire document text print(doc.text) # we can grab tokens by their index positions print(doc[2]) # Grab POS tag print(doc[2].pos_) # Fine-grained POS tag print(doc[2].tag_) # Spacy documentation link: https://spacy.io/usage/linguistic-features/ # table of information using for loop for token in doc: print(f'{token.text:{10}} {token.lemma_:{8}} {token.pos_:{8}} {token.tag_:{6}} {spacy.explain(token.tag_)}') # numbers in bracket used for space between the columnsApple Apple PROPN NNP noun, proper singular is be AUX VBZ verb, 3rd person singular present looking look VERB VBG verb, gerund or present participle at at ADP IN conjunction, subordinating or preposition buying buy VERB VBG verb, gerund or present participle U.K. U.K. PROPN NNP noun, proper singular startup startup NOUN NN noun, singular or mass for for ADP IN conjunction, subordinating or preposition $ $ SYM $ symbol, currency 1 1 NUM CD cardinal number billion billion NUM CD cardinal number2) Counting POS Tags **doc.count_by() method** accepts a specific token attribute as its argument and returns a frequency count of the given attribute as a dictionary objectdoc = nlp("Apple is looking at buying U.K. startup for $1 billion") # Count the frequencies of different coarse-grained POS tags: POS_counts = doc.count_by(spacy.attrs.POS) # attrs for attributes print(POS_counts) # output is a dictionary # these numbers are actually POS code # decode POS code doc.vocab[96].text # checking POS for an individual tokan doc[0].pos_3) Visualizing the Parts of Speech# Import spaCy import spacy # load the English language library nlp = spacy.load(name='en_core_web_sm') # Import the displaCy library from spacy import displacy # Create a simple Doc object doc = nlp("Apple is looking at buying U.K. startup for $1 billion") # Render the dependency displacy.render(doc, style='dep', jupyter=True, options={'distance': 80})import dataimport pandas as pd from sklearn.model_selection import train_test_split df = pd.read_csv("Churn.csv") df.shape df.head(3) df.Churn.unique() x = pd.get_dummies(df.drop(['Churn', 'Customer ID'], axis = 1)) y = df['Churn'].apply(lambda x: 1 if x == 'Yes' else 0) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2) x_train.head(3) x_test.head(3)import dependenciesfrom tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Dense from sklearn.metrics import accuracy_scorebuild and compile modelmodel = Sequential() model.add(Dense(units = 32, activation = 'relu', input_dim = len(x_train.columns))) model.add(Dense(units = 64, activation = 'relu')) model.add(Dense(units = 1, activation = 'sigmoid')) #final layer terkait dengan banyaknya keluaran/output model.compile(loss = 'binary_crossentropy', optimizer = 'sgd', metrics = 'accuracy')fit, predict, and evaluatemodel.fit(x_train, y_train, epochs = 200, batch_size = 30) y_hat = model.predict(x_test) y_hat = [0 if val < 0.5 else 1 for val in y_hat] y_hat accuracy_score(y_hat, y_test)saving and reloadingmodel.save('tfmodel') model = load_model('tfmodel') #dan dapat digunakan dengan step seperti sebelumnyaBuilding Open Source Geochemical Research Tools in Python , , and ; CSIRO Mineral Resources Contents| [**Abstract**](./00_overview.ipynb) | **Introduction** | [**Examples**](./00_overview.ipynbExamples) ||:-----|:-----|:-----|| | [Software in Geochem](./01_intro.ipynbSoftware-in-Geochemistry) | [pyrolite](./011_pyrolite.ipynb) || | [Development & Tools](./01_intro.ipynbDevelopment-Workflow-&-Tools) | [pyrolite-meltsutil](./012_pyrolite-meltsutil.ipynb) | | | | [interferences](./013_interferences.ipynb) | | | | [autopew](./014_autopew.ipynb) | interferences> Tools for inorganic mass spectra and interference patterns.[![Docs](https://readthedocs.org/projects/interferences/badge/?version=develop)](https://interferences.readthedocs.io/) This is an under-development package to facilitate debugging and identification ofissues during method development and analysis of novel samples for geologically-focusedmass spectrometry. ``interferences`` is centred around building tables which contain sets of molecular ions corresponding to a specific set of elements within your analytical target. Building the tables themselves is relatively straightforward. For examples, to build a table of ions which might be expected from Ca, O, Ar and H with a charge of +1 and up to two atoms within a molecule, you could use: ```pythondf = build_table(["Ca", "O", "Ar", "H"], charges=[1], max_atoms=2)``` These tables can then be displayed graphically, with a rudimentary estimate of relative abundance shown. Currently, ``interferences`` has two such methods, ``stemplot`` and ``spectra``. The first simply illustates the relative position of peaks (in m/z), while the second attempts to represent the width of some of these mass peaks given a specified mass resolution.import pandas as pd import numpy as np from interferences.table import build_table import matplotlib.pyplot as plt from pyrolite.geochem.ind import REE window = ("Tm[169]", 0.1) df = build_table(REE() + ["O", "N", "H"], window=window, max_atoms=2) ax = df.mz.stemplot(window=window, max_labels=5, figsize=(8, 4)) ax.figure.suptitle('stemplot: Tm Interferences') plt.show() window = ("B[10]O[16]", 0.05) df = build_table(["C", "B", "N", "O"], window=window, max_atoms=2) ax = df.mz.spectra(window=window, mass_resolution=3000, max_labels=5, figsize=(8, 4)) ax.figure.suptitle('spectra: Cyanide BO Interference') plt.show()We've included some interactive examples below, if you'd like to play around with some of these without getting into the code itself. Note that it takes a second or two to run and refresh on these servers.import ipywidgets as widgets #interact_manual = widgets.interact.options(manual=True, manual_name="Build Plot") def show_table(elements=[], window1=None, window2=0.05, max_atoms=2, max_charge=2): elements = [el.strip() for el in elements.strip(',').split(',')] window = None if window1 is not None: if window1.strip(): window = (window1, window2) df = build_table(elements, window=window, max_atoms=max_atoms, charges=[i+1 for i in range(max_charge)]) print("Table size: {}".format(df.index.size)) return display(df.style.background_gradient(cmap="Blues", axis=0, subset=pd.IndexSlice[:, ['iso_product']])) mode= widgets.ToggleButtons(options=['spectra','stemplot'], value='spectra', description='Mode:') elements=widgets.Text(value="C,B,N,O",description='Elements:') max_atoms=widgets.IntSlider(min=1, max=3, step=1, value=2, description='Atoms:', continuous_update=False) max_charge=widgets.IntSlider(min=1, max=3, step=1, value=2, description='Max Charge:', continuous_update=False ) window1=widgets.Text(value="B[10]O[16]", description='Target:') window2=widgets.FloatLogSlider(min=-2, max=np.log10(2), step=0.1, value=0.05, description='Mass Width', continuous_update=False) mass_resolution=widgets.IntSlider(min=500, max=10000, step=1000, value=3000, description='Resolution:', continuous_update=False) image_ratio = widgets.FloatSlider(min=0, max=1.5, step=0.1, value=0.5, description='Image Ratio:', continuous_update=False) tableui = widgets.VBox([widgets.HTML(value="

Table Generator

"), widgets.HBox([widgets.VBox([elements, max_atoms, max_charge]), widgets.VBox([window1, window2]) ])]) tableout = widgets.interactive_output(show_table, { 'elements': elements, 'max_atoms': max_atoms, 'max_charge': max_charge, 'window1': window1, 'window2': window2 }) tableout.layout.width='500px' display(tableui, tableout)--------def plot_function(elements=[], window1=None, window2=0.05, max_atoms=2, max_charge=2, mass_resolution=3000, image_ratio=0, n_labels=2): try: elements = [el.strip() for el in elements.strip(',').split(',')] window = (window1, window2) df = build_table(elements, window=window, max_atoms=max_atoms, charges=[i+1 for i in range(max_charge)]) ax = df.mz.spectra(window=window, mass_resolution=mass_resolution, image_ratio=image_ratio, max_labels=n_labels, figsize=(8, 4), iter_lim=10) return ax.figure except: pass elements=widgets.Text(value="C,B,N,O",description='Elements:') max_atoms=widgets.IntSlider(min=1, max=3, step=1, value=2, description='Atoms:', continuous_update=False) max_charge=widgets.IntSlider(min=1, max=3, step=1, value=2, description='Max Charge:', continuous_update=False ) n_labels= widgets.IntSlider(min=0, max=5, step=1, value=4, description='# Labels:', continuous_update=False ) window1=widgets.Text(value="B[10]O[16]", description='Target:') window2=widgets.FloatLogSlider(min=-2, max=0, step=0.1, value=0.05, description='Mass Width', continuous_update=False) mass_resolution=widgets.IntSlider(min=1000, max=15000, step=2000, value=3000, description='Resolution:', continuous_update=False) image_ratio = widgets.FloatSlider(min=0, max=1.5, step=0.1, value=0.5, description='Image Ratio:', continuous_update=False) ui = widgets.VBox([widgets.HTML(value="

Spectra Generator

"), widgets.HBox([ widgets.VBox([elements, max_atoms, max_charge, n_labels]), widgets.VBox([window1, window2, mass_resolution, image_ratio]) ]) ]) out = widgets.interactive_output(plot_function, {'n_labels': n_labels, 'elements': elements, 'max_atoms': max_atoms, 'max_charge': max_charge, 'window1': window1, 'window2': window2, 'mass_resolution': mass_resolution, 'image_ratio' :image_ratio }) out.layout.height = '350px' display(ui, out)Exploratory Data Analysis Case Study - Conducted by & 1.Import libraries and set required parameters#import all the libraries and modules import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import re from scipy import stats # Supress Warnings #Enable autocomplete in Jupyter Notebook. %config IPCompleter.greedy=True import warnings warnings.filterwarnings('ignore') import os ## Set the max display columns to None so that pandas doesn't sandwich the output pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 40)Reading and analysing DataapplicationData=pd.read_csv("./application_data.csv") applicationData.head()2. Data Inspection#shape of application_data.csv data applicationData.shape #take information about the data applicationData.info() #get the information about the numerical data applicationData.describe() ## print the column names for application_data.csv applicationData.columns ## print the various datatypes of application_data.csv applicationData.dtypes3. Data Cleaning & Quality Check In this section we will perform various checks and balances on the application_data.csv file. We will:* Perform a check for the number of missing/null values on each column* Perform a check for the percentage of missing/null values of each column * Drop the columns that have a high percentage of null values, i.e. over 60% * Print the names of the dropped columns * Verify that the columns were dropped by comparing the shape of the new dataframe created* For columns with around 13% of null values we will discuss the best way to handle the missing/null values in the columns * Check the data types of these columns and determine if they are categorical in nature or not* Check the data types for all the columns in the dataframe and convert them to numerical data types if required* Check for any outliers in any 3 numerical columns and treat them accordingly* Create a bin for continous variables and analyse them### Let us create a utility function to generate a list of null values in different dataframes ### We will utilize this function extensively througout the notebook. def generateNullValuesPercentageTable(dataframe): totalNullValues = dataframe.isnull().sum().sort_values(ascending=False) percentageOfNullValues = round((dataframe.isnull().sum()*100/len(dataframe)).sort_values(ascending=False),2) columnNamesWithPrcntgOfNullValues = pd.concat([totalNullValues, percentageOfNullValues], axis=1, keys=['Total Null Values', 'Percentage of Null Values']) return columnNamesWithPrcntgOfNullValues ## Check the number of null values of each column and display them in ## decending order along with the percentage of null values there is generateNullValuesPercentageTable(applicationData) ### Assess the shape of the dataframe before dropping ### columns with a high percentage of ### null values print("The Initial shape of the DataFrame is: ", applicationData.shape) #Drop all the columns where the ## percentage of missing values is above 60% in application_data.csv droppedColumns = applicationData.columns[applicationData.isnull().mean() > 0.60] applicationDataAfterDroppedColumns = applicationData.drop(droppedColumns, axis = 1) print("The new shape of the DataFrame is: ", applicationDataAfterDroppedColumns.shape) ## analysing the dataframe is correct after dropping columns applicationDataAfterDroppedColumns.head()Observation: As you can see, the shape of the data has changed from (307511, 122) to (307511, 105). Which mean we have dropped 17 columns that had over 60% percent null values. The dropped columns are mentioned below.print("The columns that have been dropped are: ", droppedColumns) ## print the percentage of columns with null values in the ## new data frame after the columns have been dropped generateNullValuesPercentageTable(applicationDataAfterDroppedColumns) #### Check dataframe shape to confirm no other columns were dropped applicationDataAfterDroppedColumns.shapeObservation: As you can see above, there are still a few columns that have a above 30% of null/missing values. We can deal with those null/missing values using various methods of imputation. Some key points:- The columns with above 60% of null values have successfully been dropped- The column with the highest percentage of null values after the drop is "LANDAREA_MEDI" with 59.38% null values. Whereas earlier it was "COMMONAREA_MEDI" with 69.87% null values- The new shape of the dataframe is (307511, 105) Checking the datadrame after dropping null valuesapplicationDataAfterDroppedColumns.head() ### Analyzing Columns with null values around 14% to determine ### what might be the best way to impute such values listOfColumnsWithLessValuesOfNull = applicationDataAfterDroppedColumns.columns[applicationDataAfterDroppedColumns.isnull().mean() < 0.14] applicationDataWithLessPrcntgOfNulls = applicationDataAfterDroppedColumns.loc[:, listOfColumnsWithLessValuesOfNull] print(applicationDataWithLessPrcntgOfNulls.shape) applicationDataWithLessPrcntgOfNulls.head(20) ### Analysing columns with around 13.5% null values columnsToDescribe = ['AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_HOUR','AMT_REQ_CREDIT_BUREAU_WEEK', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'EXT_SOURCE_2'] applicationDataAfterDroppedColumns[columnsToDescribe].describe() ### Let us plot a boxplot to see the various variables fig, axes = plt.subplots(nrows=3, ncols = 2, figsize=(40,25)) sns.boxplot(data=applicationDataAfterDroppedColumns.AMT_REQ_CREDIT_BUREAU_YEAR, ax=axes[0][0]) axes[0][0].set_title('AMT_REQ_CREDIT_BUREAU_YEAR') sns.boxplot(data=applicationDataAfterDroppedColumns.AMT_REQ_CREDIT_BUREAU_MON, ax=axes[0][1]) axes[0][1].set_title('AMT_REQ_CREDIT_BUREAU_MON') sns.boxplot(data=applicationDataAfterDroppedColumns.AMT_REQ_CREDIT_BUREAU_DAY, ax=axes[1][0]) axes[1][0].set_title('AMT_REQ_CREDIT_BUREAU_DAY') sns.boxplot(applicationDataAfterDroppedColumns.AMT_REQ_CREDIT_BUREAU_HOUR, ax=axes[1][1]) axes[1][1].set_title('AMT_REQ_CREDIT_BUREAU_HOUR') sns.boxplot(applicationDataAfterDroppedColumns.AMT_REQ_CREDIT_BUREAU_WEEK, ax=axes[2][0]) axes[2][0].set_title('AMT_REQ_CREDIT_BUREAU_WEEK') plt.show()ObservationAs you can see above, when we take a look at the columns that have a low number of null values, the shape of the data changes to (307511, 71) compared to (307511, 105). We lose 34 columns in the process. Checking columns having less no. of Null values(around 13% or so) and analysing the best metric to impute the missing/null values in those columns basis if the column/variable is 'Categorical' or 'Continuous'' - AMT_REQ_CREDIT_BUREAU_HOUR (99.4% of the values are 0.0 with 4.0 and 3.0 values being outliers. Its safe to impute the missing values with 0.0) - AMT_REQ_CREDIT_BUREAU_DAY (99.4% of the values are 0.0 with 9.0 and 8.0 values being outliers. Its safe to impute the missing values with 0.0) - AMT_REQ_CREDIT_BUREAU_WEEK (96.8% of the values are 0.0 with 8.0 and 7.0 values being outliers. Its safe to impute the missing values with 0.0) - AMT_REQ_CREDIT_BUREAU_MON (83.6% of the values are 0.0. Its safe to impute the missing values with mode : 0.0) - AMT_REQ_CREDIT_BUREAU_YEAR (It seems fine to use the median value 1.0 here for imputing the missing values)### Checking for categorical data categoricalDataColumns = applicationDataAfterDroppedColumns.nunique().sort_values() categoricalDataColumnsObservation:Given the wide number of columns with a less number of unique values, we will convert all columns with upto 5 values into categorical columnslistOfColumnsWithMaxTenUniqueValues = [i for i in applicationDataAfterDroppedColumns.columns if applicationDataAfterDroppedColumns[i].nunique() <= 5] for col in listOfColumnsWithMaxTenUniqueValues: applicationDataAfterDroppedColumns[col] = applicationDataAfterDroppedColumns[col].astype('category') applicationDataAfterDroppedColumns.shape applicationDataAfterDroppedColumns.head() ## Check for datatypes of all columns in the new dataframe applicationDataAfterDroppedColumns.info()Observation:We notice above that after dropping the null columns we still have:- 43 Categorical- 48 Float- 6 Integer - 8 Object data types## Convert the categorical data columns into individual columns with numeric values for better analysis ## we will do this using one-hot-encoding method convertedCategoricalColumnsDataframe = pd.get_dummies(applicationDataAfterDroppedColumns, columns=listOfColumnsWithMaxTenUniqueValues, prefix=listOfColumnsWithMaxTenUniqueValues) convertedCategoricalColumnsDataframe.head() ## Converting these columns has changed the shape of the data to print("Shape of Application Data after categorical column conversion: ", convertedCategoricalColumnsDataframe.shape)ObservationAs you can see above we have successfully converted the varius categorical datatypes into their own columns.The new shape of the data is (307511, 158) compared to (307511, 105). We have introuced 53 new columns. These will help us identify the best possible method to use for imputing values.### Count the number of missing values in the new dataframe generateNullValuesPercentageTable(convertedCategoricalColumnsDataframe)ObservationLet us take the following columns - AMT_REQ_CREDIT_BUREAU_YEAR, AMT_REQ_CREDIT_BUREAU_MON, OBS_30_CNT_SOCIAL_CIRCLE, OBS_60_CNT_SOCIAL_CIRCLE, EXT_SOURCE_2.Determine their datatypes and using the describe above try and identify what values can be used to impute into the null columns.listOfCols = ['AMT_REQ_CREDIT_BUREAU_YEAR', 'AMT_REQ_CREDIT_BUREAU_MON', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'EXT_SOURCE_2'] convertedCategoricalColumnsDataframe[listOfCols].dtypes applicationDataAfterDroppedColumns['AMT_REQ_CREDIT_BUREAU_HOUR'].fillna(0.0, inplace = True) applicationDataAfterDroppedColumns['AMT_REQ_CREDIT_BUREAU_HOUR'] = applicationDataAfterDroppedColumns['AMT_REQ_CREDIT_BUREAU_HOUR'].astype(int) ## convert DAYS_BIRTH to years def func_age_yrs(x): return round(abs(x/365),0) applicationDataAfterDroppedColumns['DAYS_BIRTH'] = applicationDataAfterDroppedColumns['DAYS_BIRTH'].apply(func_age_yrs)ObservationIn all the selected columns we can see that we can use the median to impute the values in the dataframe. They all correspond to 0.00 except EXT_SOURCE_2. For EXT_SOURCE_2 we observe that the mean and the median values are roughly similar at 5.143927e-01 for mean & 5.659614e-01 for median. So we could use either of those values to impute. Let us now check for outliers on 6 numerical columns.For this we can use our dataset from after we dropped the columns with over 60% null values.### We will use boxplots to handle the outliers on AMT_CREDIT, AMT_ANNUITY, AMT_GOODS_PRICE fig, axes = plt.subplots(nrows=3, ncols = 2, figsize=(50,50)) sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_CREDIT.dropna(), ax=axes[0][0]) axes[0][0].set_title('AMT_CREDIT') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_ANNUITY.dropna(), ax=axes[0][1]) axes[0][1].set_title('AMT_ANNUITY') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_GOODS_PRICE.dropna(), ax=axes[1][0]) axes[1][0].set_title('AMT_GOODS_PRICE') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_INCOME_TOTAL.dropna(), ax=axes[1][1]) axes[1][1].set_title('AMT_INCOME_TOTAL') sns.boxplot(data= applicationDataAfterDroppedColumns.DAYS_BIRTH.dropna(), ax=axes[2][0]) axes[2][0].set_title('DAYS_BIRTH') sns.boxplot(data= applicationDataAfterDroppedColumns.DAYS_EMPLOYED.dropna(), ax=axes[2][1]) axes[2][1].set_title('DAYS_EMPLOYED') plt.show()ObservationWe can easily see in the box plot that there are so many outliers which has to removed for the better calculation. So, In the next part of the code we remove outliers from the function "remove_outliers" which accept dataframe and columns name (In which we want to remove outliers) as argument and return the outliers removed dataframe.Analysing outliers in Numeric variables and Handling/Treating them with appropriate methods.- AMT_REQ_CREDIT_BUREAU_HOUR (99.4% of the values are 0.0 with value '4' and '3' being outliers. Should be retained) Considering that its the number of enquiries made by the company to credit bureau, this could significantly mean that the company was extremely cautious in making a decision of whether to grant loan/credit to this particular client or not. This might imply that it could be a case of 'High Risk' client and can influence the Target variable. Its better to retain these outlier values- AMT_INCOME_TOTAL ( Clearly 117000000.0 is an outlier here.)The above oulier can be dropped in order to not skew with the analysis. We can use IQR to remove this value. - DAYS_BIRTH ( There is no outlier in this column) - DAYS_EMPLOYED ( Clearly 1001 is an outlier here and should be deleted.18% of the column values are 1001)Clearly 1001 is an outlier here. 18% of the column values are 1001. Since , this represents the no. of years of employement as on the application date, these should be deleted. Though values above 40 years till 49 years of employment seems questionable as well but lets not drop it for now considering exception cases.Another way to see the distribution of is using a distribution plot.fig, axes = plt.subplots(nrows=3, ncols = 2, figsize=(50,50)) sns.distplot(applicationDataAfterDroppedColumns.AMT_CREDIT.dropna(), ax=axes[0][0]) axes[0][0].set_title('AMT_CREDIT') sns.distplot(applicationDataAfterDroppedColumns.AMT_ANNUITY.dropna(), ax=axes[0][1]) axes[0][1].set_title('AMT_ANNUITY') sns.distplot(applicationDataAfterDroppedColumns.AMT_GOODS_PRICE.dropna(), ax=axes[1][0]) axes[1][0].set_title('AMT_GOODS_PRICE') sns.distplot(applicationDataAfterDroppedColumns.AMT_INCOME_TOTAL.dropna(), ax=axes[1][1]) axes[1][1].set_title('AMT_INCOME_TOTAL') sns.distplot(applicationDataAfterDroppedColumns.DAYS_BIRTH.dropna(), ax=axes[2][0]) axes[2][0].set_title('DAYS_BIRTH') sns.distplot(applicationDataAfterDroppedColumns.DAYS_EMPLOYED.dropna(), ax=axes[2][1]) axes[2][1].set_title('DAYS_EMPLOYED') plt.show()ObservationAs you can see from the distplots above there are a few outliers that aren't properly normalized.The 'DAYS_EMPLOYED' column is heavily skewed in the -ve side of the plot.#Function for removing outliers def remove_outlier(df, col_name): q1 = df[col_name].quantile(0.25) q3 = df[col_name].quantile(0.75) iqr = q3-q1 #Interquartile range l = q1-1.5*iqr h = q3+1.5*iqr dfOutput = df.loc[(df[col_name] > l) & (df[col_name] < h)] return dfOutput cols=['AMT_CREDIT','AMT_ANNUITY', 'AMT_GOODS_PRICE', 'AMT_INCOME_TOTAL', 'DAYS_EMPLOYED'] for i in cols: applicationDataAfterDroppedColumns=remove_outlier(applicationDataAfterDroppedColumns,i) applicationDataAfterDroppedColumns.head() ### Plot the box plot again after removing outliers fig, axes = plt.subplots(nrows=3, ncols = 2, figsize=(50,50)) sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_CREDIT.dropna(), ax=axes[0][0]) axes[0][0].set_title('AMT_CREDIT') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_ANNUITY.dropna(), ax=axes[0][1]) axes[0][1].set_title('AMT_ANNUITY') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_GOODS_PRICE.dropna(), ax=axes[1][0]) axes[1][0].set_title('AMT_GOODS_PRICE') sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_INCOME_TOTAL.dropna(), ax=axes[1][1]) axes[1][1].set_title('AMT_INCOME_TOTAL') sns.boxplot(data= applicationDataAfterDroppedColumns.DAYS_BIRTH.dropna(), ax=axes[2][0]) axes[2][0].set_title('DAYS_BIRTH') sns.boxplot(data= applicationDataAfterDroppedColumns.DAYS_EMPLOYED.dropna(), ax=axes[2][1]) axes[2][1].set_title('DAYS_EMPLOYED') plt.show()ObservationAfter dropping the outliers we observe that there very few points mentioned on the box plots above for the outliers.### Plotting the distribution plot after removing the outliers fig, axes = plt.subplots(nrows=3, ncols = 2, figsize=(50,50)) sns.distplot(applicationDataAfterDroppedColumns.AMT_CREDIT.dropna(), ax=axes[0][0]) axes[0][0].set_title('AMT_CREDIT') sns.distplot(applicationDataAfterDroppedColumns.AMT_ANNUITY.dropna(), ax=axes[0][1]) axes[0][1].set_title('AMT_ANNUITY') sns.distplot(applicationDataAfterDroppedColumns.AMT_GOODS_PRICE.dropna(), ax=axes[1][0]) axes[1][0].set_title('AMT_GOODS_PRICE') sns.distplot(applicationDataAfterDroppedColumns.AMT_INCOME_TOTAL.dropna(), ax=axes[1][1]) axes[1][1].set_title('AMT_INCOME_TOTAL') sns.distplot(applicationDataAfterDroppedColumns.DAYS_BIRTH.dropna(), ax=axes[2][0]) axes[2][0].set_title('DAYS_BIRTH') sns.distplot(applicationDataAfterDroppedColumns.DAYS_EMPLOYED.dropna(), ax=axes[2][1]) axes[2][1].set_title('DAYS_EMPLOYED') plt.show()ObservationBased on the distplots above you can see that there is a marked difference between the minimum values for various columns, particularly the DAYS_EMPLOYED column where the minimum value increased from -7500 to -6000. This proves that the treatment of outliers was succesfulapplicationDataAfterDroppedColumns.shapeObservationWe observe that after removing the outliers the boxplots show a slight shift in the maximum ranges. The distribution plot gives us a more significant display in changes. There is a significant reduction in the max ranges on the x-axis for all the three variables we chose. As we can see above, after treating the outliers for various columns the shape of our dataset has changed significantly. The shape of the dataframe after dropping columns with high number of null values was (307511, 105) & after treating for outliers is (209624, 105).Let us now create bins for 3 different continous variables and plot them. We will use AMT_INCOME_TOTAL, AMT_CREDIT & DAYS_BIRTH to create our bins.## Creating bins for Income range based on AMT_INCOME_TOTAL bins=[0,100000,200000,300000,400000,500000,600000,20000000] range_period=['0-100000','100000-200000','200000-300000','300000-400000','400000-500000','500000-600000','600000 and above'] applicationDataAfterDroppedColumns['Income_amount_range']=pd.cut(applicationDataAfterDroppedColumns['AMT_INCOME_TOTAL'],bins,labels=range_period) plotIncomeAmountRange = applicationDataAfterDroppedColumns['Income_amount_range'].value_counts().plot(kind='bar', title='Income Range Bins Plot') plotIncomeAmountRange.set_xlabel('Income Range Bins') plotIncomeAmountRange.set_ylabel('Count')ObservationAs you can clearly see from the plot above:- The most number of people earn between 100000-200000- The number of people who earn between 200000-300000 is less than half of the number of people in 100000-200000 range- No one earns above 300000.#create bins for credit anount bins=[0,50000,100000,150000,200000,250000,300000,400000] range_period=['0-50000','50000-100000','100000-150000','150000-200000','200000-250000','250000-300000','300000-400000'] applicationDataAfterDroppedColumns['credit_amount_range']=pd.cut(applicationDataAfterDroppedColumns['AMT_CREDIT'],bins,labels=range_period) plotCreditAmountRange = applicationDataAfterDroppedColumns['credit_amount_range'].value_counts().plot(kind='bar', title='Credit Amount Range Plots') plotCreditAmountRange.set_xlabel('Credit Amount Range Bins') plotCreditAmountRange.set_ylabel('Count')ObservationAs you can see from the plots above- Very less number of people borrow money between 0-50000- Highest number of people are borrowing money between 250000-300000##Creating bins for age range for DAYS_BIRTH in years bins = [10, 20, 30, 40, 50, 60, 70, 80] labels = ['10-20','21-30','31-40','41-50','51-60','61-70','71-80'] applicationDataAfterDroppedColumns['BINNED_AGE'] = pd.cut(applicationDataAfterDroppedColumns['DAYS_BIRTH'], bins=bins,labels=labels) plotAgeRange = applicationDataAfterDroppedColumns['BINNED_AGE'].value_counts().plot(kind='bar', title='Age Range Plot') plotAgeRange.set_xlabel('Age Range') plotAgeRange.set_ylabel('Count')Observation- People between the ages of 71-80 & 10-20 are not borrowing any money.- For people in the age range of 10-20, no borrowing could suggest that children/teenagers/young adults could have just opened new bank accounts with their parents or have just joined university so do not have a need of borrowing money- People in between the ages of 31-40 have a significantly higher number of borrowers, this could be suggestive of various personal expenses & it would be beneficial for the firm to identify the reasons why they are borrowing more so that they can introduce newer products at more competitive interest rates to these customers 4. Data Analysis In this section we will perform indepth analysis on the application_data.csv file.This will be achieved by:- Checking the imbalance percentage in the dataset- Dividing the dataset based on the "TARGET" column into 2 separate dataframes- Performing univariate analysis for categorical variables on both Target = 0 & Target = 1 columns- Identifying the correlation between the numerical columns for both Target = 0 & Target = 1 columns- Comparing the results across continous variables- Performing bivariate analysis for numerical variables on both Target = 0 & Target = 1 columns Selecting relevant columns from 'applicationDataAfterDroppedColumns' which would be used for EDA further- Selecting only the relevant columns(25 or so) from 'applicationDataAfterDroppedColumns' i.e. removing those columns which aren't relevant for analysis out of a total of 105 columnsapplicationDataWithRelevantColumns = applicationDataAfterDroppedColumns.loc[:,['SK_ID_CURR', 'TARGET', 'NAME_CONTRACT_TYPE', 'CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY', 'CNT_CHILDREN', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE', 'NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'NAME_HOUSING_TYPE', 'REGION_POPULATION_RELATIVE', 'BINNED_AGE', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'FLAG_CONT_MOBILE', 'OCCUPATION_TYPE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT', 'REGION_RATING_CLIENT_W_CITY', 'ORGANIZATION_TYPE', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY']]We will now use applicationDataWithRelevantColumns as our dataframe to run further analysis### Checking shape of the new dataframe applicationDataWithRelevantColumns.shape applicationDataWithRelevantColumns['CODE_GENDER'].value_counts()Since the number of Females is higher than Males, we can safely impute XNA values with F.applicationDataWithRelevantColumns.loc[applicationDataWithRelevantColumns['CODE_GENDER']=='XNA','CODE_GENDER']='F' applicationDataWithRelevantColumns['CODE_GENDER'].value_counts() #Check the total percentage of target value as 0 and 1. imbalancePercentage = applicationDataWithRelevantColumns['TARGET'].value_counts()*100/len(applicationDataAfterDroppedColumns) imbalancePercentage imbalancePercentage.plot(kind='bar',rot=0)ObservationWe can easily see that this data is very much imbalance. Rows with target value 0 is only 90.612239% and with 1 is only 9.387761%.This also means that only 9.38% of all the loan applicants default while paying back their loans.#Splitting the data based on target values one_df = applicationDataWithRelevantColumns.loc[applicationDataWithRelevantColumns['TARGET']==1] zero_df = applicationDataWithRelevantColumns.loc[applicationDataWithRelevantColumns['TARGET']==0] ## Inspecting data with TARGET = 1 one_df.head() one_df.info() one_df.shape ## Inspecting data with TARGET = 0 zero_df.head() zero_df.describe zero_df.shape zero_df.infoWe will now use the following columns to perform Univariate & Bivariate analysis- CODE_GENDER- NAME_CONTRACT_TYPE- NAME_INCOME_TYPE- NAME_EDUCATION_TYPE- NAME_FAMILY_STATUS- NAME_HOUSING_TYPE- OCCUPATION_TYPE- ORGANIZATION_TYPE Univariate Analysis:- Univariate Analysis on one_df dataset#Univariate Analysis for categorical variable 'CODE_GENDER' in dataframe one_df. sns.countplot(x ='CODE_GENDER', data = one_df) plt.title('Number of applications by Gender') plt.ylabel('Number of Applications') plt.xlabel('Gender') plt.show()ObservationAs you can see above the number of Female applicants is higher than the number of Male applicants.#Univariate Analysis for categorical variable 'NAME_EDUCATION_TYPE' in dataframe T1. sns.countplot(x ='NAME_EDUCATION_TYPE', data = one_df) plt.title("Number of applications by Client's Education Level") plt.ylabel('Number of Applications') plt.xlabel("Client's Education Level") plt.xticks(rotation = 90) plt.show()ObservationFrom the plot above we can infer that:- The highest number of applications for credit were made by people having Secondary/ secondary special education and these people defaulted on being able to pay back their loans. This could mean that they face trouble in being able to manage their money effectively or have jobs that pay less/are contractual in nature- People with higher education also applied for a credit and defaulted on their loans#Univariate Analysis for categorical variable 'NAME_CONTRACT_TYPE' in dataframe one_df. sns.countplot(x ='NAME_CONTRACT_TYPE', data = one_df) plt.title('Number of applications by Contract Type') plt.ylabel('Number of Applications') plt.xlabel('Contract Type') plt.show()Observation- A high number of applicants who defaulted applied for cash loans#Univariate Analysis for categorical variable 'NAME_INCOME_TYPE' in dataframe one_df. sns.countplot(x ='NAME_INCOME_TYPE', data = one_df) plt.title("Number of applications by Client's Income Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Income Type") plt.xticks(rotation = 90) plt.show()Observation- Mostly working professionals apply for credit and are also the ones that default on being able to payback the loans on time- State servants have a very low number of defaulters#Univariate Analysis for categorical variable 'NAME_FAMILY_STATUS' in dataframe one_df. sns.countplot(x ='NAME_FAMILY_STATUS', data = one_df) plt.title("Number of applications by Client's Family Status") plt.ylabel('Number of Applications') plt.xlabel("Client's Family Status") plt.xticks(rotation = 90) plt.show()Observation- Married applicants make a higher number of applications as compared to other categories- It would be beneficial for the bank to introduce newer products for people in such a category to attract more customers#Univariate Analysis for categorical variable 'NAME_HOUSING_TYPE' in dataframe one_df. sns.countplot(x ='NAME_HOUSING_TYPE', data = one_df) plt.title("Number of applications by Client's Housing Status") plt.ylabel('Number of Applications') plt.xlabel("Client's Housing Status") plt.xticks(rotation = 90) plt.show()Observation- People who live in their own apartment/house apply for loans almost 160 times more than those who live with their parents.- People living in office apartments default significantly less. This could be because their houses are rent free or they pay minimum charges to live in the house.#Univariate Analysis for categorical variable 'OCCUPATION_TYPE' in dataframe one_df. sns.countplot(x ='OCCUPATION_TYPE', data = one_df) plt.title("Number of applications by Client's Occupation Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Occupation Type") plt.xticks(rotation = 90) plt.show()Observation- Labourers apply for a lot of loans and default on being able to repay them. This could be because of the contractual nature of their work and the unsetady + low income they might earn from their daily jobs- IT & HR Staff make very few applications for credit and default the least on their loan applications. This could be, in stark contrast to the labourers, because of the stable job & salaried nature of their work. Thus enabling them to be better at handling monthly expenses.# Since there are subcategories like Type1,2 etc under few categories like Business Entity,Trade etc. # Because of this, there are a lot of categories making it difficult to analyse data # Its better to remove the types and just have the main category there one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Business Entity Type 3", "Business Entity") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Business Entity Type 2", "Business Entity") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Business Entity Type 1", "Business Entity") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 7", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 3", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 2", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 1", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 6", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 5", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Trade: type 4", "Trade") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Transport: type 4", "Transport") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Transport: type 3", "Transport") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Transport: type 2", "Transport") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Transport: type 1", "Transport") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 1", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 2", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 3", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 4", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 5", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 6", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 7", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 8", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 9", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 10", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 11", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 12", "Industry") one_df.ORGANIZATION_TYPE= one_df.ORGANIZATION_TYPE.replace("Industry: type 13", "Industry") one_df['ORGANIZATION_TYPE'].value_counts() #Univariate Analysis for categorical variable 'ORGANIZATION_TYPE' in dataframe one_df. plt.figure(figsize = (14,14)) sns.countplot(x ='ORGANIZATION_TYPE', data = one_df) plt.title("Number of applications by Client's Organization Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Organization Type") plt.xticks(rotation = 90) plt.show()Observation- Based on the plot above we can see that Business Entity employees have the maximum number of loan applications- Religious people, priests etc dont seem to be making any credit applications at all- Self-employed people also make a lot of loan applications. This could be to boost their business or to repay other loans. Continuous - Continuous Bivariate Analysis for one_df dataframe## Plotting cont-cont Client Income vs Credit Amount plt.figure(figsize=(12,12)) sns.scatterplot(x="AMT_INCOME_TOTAL", y="AMT_CREDIT", hue="CODE_GENDER", style="CODE_GENDER", data=one_df) plt.xlabel('Income of client') plt.ylabel('Credit Amount of loan') plt.title('Client Income vs Credit Amount') plt.show()Observation- We do see some outliers here wherein Females having income less than 50000 have applied for loan with credit amount 1300000 approx- Most of the loans seem to be concentrated between credit amount of 200000 & 6000000 for income ranging from 50000-150000## Plotting cont-cont Client Income vs Region population plt.figure(figsize=(12,12)) sns.scatterplot(x="AMT_INCOME_TOTAL", y="REGION_POPULATION_RELATIVE", hue="CODE_GENDER", style="CODE_GENDER", data=one_df) plt.xlabel('Income of client') plt.ylabel('Population of region where client lives') plt.title('Client Income vs Region population') plt.show()Observation- Very less no of people live in highly dense/populated region- Most of the clients live between population density of 0.00 to 0.04 Univariate analysis for zero_df dataframe#Univariate Analysis for categorical variable 'CODE_GENDER' in dataframe zero_df. sns.countplot(x ='CODE_GENDER', data = zero_df) plt.title('Number of applications by Gender') plt.ylabel('Number of Applications') plt.xlabel('Gender') plt.show()ObservationAs you can see above the number of Female applicants is higher than the number of Male applicants.#Univariate Analysis for categorical variable 'NAME_CONTRACT_TYPE' in dataframe zero_df. sns.countplot(x ='NAME_CONTRACT_TYPE', data = zero_df) plt.title('Number of applications by Contract Type') plt.ylabel('Number of Applications') plt.xlabel('Contract Type') plt.show()ObservationApplicants prefer to apply more for cash loans rather than revolving loans#Univariate Analysis for categorical variable 'NAME_INCOME_TYPE' in dataframe zero_df. sns.countplot(x ='NAME_INCOME_TYPE', data = zero_df) plt.title("Number of applications by Client's Income Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Income Type") plt.xticks(rotation = 90) plt.show()Observation- Working people make the most number of applications and are able to successfully repay their loans as well.- Students, Pensioners, Business men and Maternity leave applicants is close to 0. This could be due to a multitude of reasons.#Univariate Analysis for categorical variable 'NAME_EDUCATION_TYPE' in dataframe zero_df. sns.countplot(x ='NAME_EDUCATION_TYPE', data = zero_df) plt.title("Number of applications by Client's Education Level") plt.ylabel('Number of Applications') plt.xlabel("Client's Education Level") plt.xticks(rotation = 90) plt.show()ObservationFrom the plot above we can infer that:- The highest number of applications for credit were made by people having Secondary/ secondary special education and these people did not default on being able to pay back their loans.- People with higher education also applied for a credit and were able to repay them successfully#Univariate Analysis for categorical variable 'NAME_FAMILY_STATUS' in dataframe zero_df. sns.countplot(x ='NAME_FAMILY_STATUS', data = zero_df) plt.title("Number of applications by Client's Family Status") plt.ylabel('Number of Applications') plt.xlabel("Client's Family Status") plt.xticks(rotation = 90) plt.show()ObservationFrom the plot above we can infer that:- Married people apply for credit the most. - Married people are able to repay their loans without any defaults as well#Univariate Analysis for categorical variable 'NAME_HOUSING_TYPE' in dataframe zero_df. sns.countplot(x ='NAME_HOUSING_TYPE', data = zero_df) plt.title("Number of applications by Client's Housing Status") plt.ylabel('Number of Applications') plt.xlabel("Client's Housing Status") plt.xticks(rotation = 90) plt.show()Observation- People who live in their own apartment/house apply for loans almost 160 times more than those who live with their parents.- People living in office apartments apply for loans significantly less. This could be because their houses are rent free or they pay minimum charges to live in the house.- People in rented apartments apply for loans significantly less. This could be due to the added expenses of paying rent and other utility bills leaves them with not enough capital to payback their loans.#Univariate Analysis for categorical variable 'OCCUPATION_TYPE' in dataframe zero_df. sns.countplot(x ='OCCUPATION_TYPE', data = zero_df) plt.title("Number of applications by Client's Occupation Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Occupation Type") plt.xticks(rotation = 90) plt.show()Observation- Labourers apply for a lot of loans.- IT & HR Staff make very few applications for credit. This could be, in stark contrast to the labourers, because of the stable job & salaried nature of their work. Thus enabling them to be better at handling monthly expenses.# Since there are subcategories like Type1,2 etc under few categories like Business Entity,Trade etc. # Because of this, there are a lot of categories making it difficult to analyse data # Its better to remove the types and just have the main category there zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Business Entity Type 3", "Business Entity") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Business Entity Type 2", "Business Entity") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Business Entity Type 1", "Business Entity") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 7", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 3", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 2", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 1", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 6", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 5", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Trade: type 4", "Trade") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Transport: type 4", "Transport") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Transport: type 3", "Transport") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Transport: type 2", "Transport") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Transport: type 1", "Transport") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 1", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 2", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 3", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 4", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 5", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 6", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 7", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 8", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 9", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 10", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 11", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 12", "Industry") zero_df.ORGANIZATION_TYPE= zero_df.ORGANIZATION_TYPE.replace("Industry: type 13", "Industry") zero_df['ORGANIZATION_TYPE'].value_counts() #Univariate Analysis for categorical variable 'ORGANIZATION_TYPE' in dataframe zero_df. plt.figure(figsize = (14,14)) sns.countplot(x ='ORGANIZATION_TYPE', data = zero_df) plt.title("Number of applications by Client's Organization Type") plt.ylabel('Number of Applications') plt.xlabel("Client's Organization Type") plt.xticks(rotation = 90) plt.show()Observation- Based on the plot above we can see that Business Entity employees have the maximum number of loan applications- Religious people, priests etc dont seem to be making a lot of credit applications at all. They are able to repay their loans on time as well. - Self-employed people also make a lot of loan applications. This could be to boost their business or to repay other loans. Bivariate Analysis for zero_df### Let us create a helper function to help with ### plotting various graphs def uniplot(df,col,title,hue =None): sns.set_style('whitegrid') sns.set_context('talk') plt.rcParams["axes.labelsize"] = 20 plt.rcParams['axes.titlesize'] = 22 plt.rcParams['axes.titlepad'] = 30 plt.figure(figsize=(40,20)) temp = pd.Series(data = hue) fig, ax = plt.subplots() width = len(df[col].unique()) + 7 + 4*len(temp.unique()) fig.set_size_inches(width , 8) plt.xticks(rotation=45) plt.title(title) ax = sns.countplot(data = df, x= col, order=df[col].value_counts().index,hue = hue, palette='magma') plt.show() # PLotting for income range uniplot(zero_df,col='NAME_INCOME_TYPE',title='Distribution of Income type',hue='CODE_GENDER')Observation - For income type ‘working’, ’commercial associate’, and ‘State Servant’ the number of credits are higher than others.- For this Females are having more number of credit applications than males in all the categories.uniplot(zero_df,col='NAME_CONTRACT_TYPE',title='Distribution of contract type',hue='CODE_GENDER')Observation- For contract type ‘cash loans’ is having higher number of credits than ‘Revolving loans’ contract type.- For this also Females are applying for credit a lot more than males.uniplot(zero_df,col='NAME_FAMILY_STATUS',title='Distribution of Family status',hue='CODE_GENDER')Observation- As observed above the number of married females applying for loans is almost 3.5 times the number of single females. - No male widowers are applying for credituniplot(zero_df,col='NAME_EDUCATION_TYPE',title='Distribution of education level',hue='CODE_GENDER')Observation- No person with an 'Academic Degree' is applying for a loan- The number of females with 'Higher Education' that apply for a loan is almost double the number of males for the same categoryuniplot(zero_df,col='NAME_HOUSING_TYPE',title='Distribution of Housing Type',hue='CODE_GENDER')Observation- Females living in their own apartments/houses apply for more loans and are able to successfully payback.- A very small number of females living in Co-op apartments apply for loansuniplot(zero_df,col='OCCUPATION_TYPE',title='Distribution Occupation Type',hue='CODE_GENDER')Observation- Male Labourers & Drivers take more loans and are able to successfully payback in time.- Female Care staff & Sales Staff are also able to take loans and payback in time Bivariate Analysis on one_df Perform correlation between numerical columns for finding correlation which having TARGET value as 1uniplot(one_df,col='NAME_INCOME_TYPE',title='Distribution of Income type',hue='CODE_GENDER')Observation - For income type ‘working’, ’commercial associate’, and ‘State Servant’ the number of credits are higher than others.- Females have more number of credit applications than males in all the categories.uniplot(one_df,col='NAME_CONTRACT_TYPE',title='Distribution of contract type',hue='CODE_GENDER')Observation- For contract type ‘cash loans’ is having higher number of credits than ‘Revolving loans’ contract type.- For this also Females are applying for credit a lot more than males.- Females are also able to payback their loans on timeuniplot(one_df,col='NAME_FAMILY_STATUS',title='Distribution of Family status',hue='CODE_GENDER')Observation- As observed above the number of married females applying for loans is almost 3.5 times the number of single females. - No male widowers are applying for credit- The number of males applying for loans and being able to not payback is higher if they are unmarried/single compared to females- A very small number of male widowers are unable to payback their loans afteruniplot(one_df,col='NAME_EDUCATION_TYPE',title='Distribution of education level',hue='CODE_GENDER')Observation- Males with lower secondary education make more loan applications and default more compared to females- There is very little difference between the number of defaulters for males and females with secondary education compared to the non-defaulters we saw aboveuniplot(one_df,col='NAME_HOUSING_TYPE',title='Distribution of Housing Type',hue='CODE_GENDER')Observation- Males living with their parents tend to apply and default more on their loans- Almost an equal number of males and females default on loans if they are living in rented apartmentsuniplot(one_df,col='OCCUPATION_TYPE',title='Distribution Occupation Type',hue='CODE_GENDER')Observations- The number of male applicants who default on paying back their loans is almost double the amount of female applicants- Irrespective of gender, managers seem to default on their loans equally Categorical vs Numerical Analysis# Box plotting for Credit amount for zero_df based on education type and family status plt.figure(figsize=(40,20)) plt.xticks(rotation=45) sns.boxplot(data =zero_df, x='NAME_EDUCATION_TYPE',y='AMT_CREDIT', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Credit amount vs Education Status') plt.show()Observation- Widows with secondary education have a very high median credit amount borrowing and default on paying back loans as well. It would be better to be vary of lending to them- Widows with an academic degree have a higher median for borrowing as compared to any other category. - People in civil marriages, those who are seperated and widows with secondary education have the same median values and usually borrow in around 400000# Box plotting for Income amount for zero_df based on their education type & family status plt.figure(figsize=(40,20)) plt.xticks(rotation=45) plt.yscale('log') sns.boxplot(data =zero_df, x='NAME_EDUCATION_TYPE',y='AMT_INCOME_TOTAL', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Income amount vs Education Status') plt.show()Observation- Except widows, the median earning for all other family status types with an incomplete higher education is the same- Median income for all family status categories is the same for people with a secondary education# Box plotting for Credit amount for one_df plt.figure(figsize=(16,12)) plt.xticks(rotation=45) sns.boxplot(data =one_df, x='NAME_EDUCATION_TYPE',y='AMT_CREDIT', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Credit amount vs Education Status') plt.show()Observation- Widows with secondary education have a very high median credit amount borrowing and default on paying back loans as well. It would be better to be vary of lending to them- Married people have a consistently high median across all categories of education except secondary education# Box plotting for Income amount for one_df plt.figure(figsize=(40,20)) plt.xticks(rotation=45) plt.yscale('log') sns.boxplot(data =one_df, x='NAME_EDUCATION_TYPE',y='AMT_INCOME_TOTAL', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Income amount vs Education Status') plt.show()Observation- The median income for all family status types is the same for people with education type as Secondary/secondary special- The median income for widows is the lowest across all the education types### Perform correlation between CNT_CHILDREN, AMT_INCOME_TOTAL, AMT_CREDIT, AMT_GOODS_PRICE, REGION_POPULATION_RELATIVE ### and AMT_ANNUITY. Then make correlation matrix across the one_df dataframe columns=['CNT_CHILDREN','AMT_INCOME_TOTAL','AMT_CREDIT','AMT_GOODS_PRICE','REGION_POPULATION_RELATIVE', 'AMT_ANNUITY'] corr=one_df[columns].corr() corr.style.background_gradient(cmap='coolwarm')ObservationIn the heatmap above: The closer you are to RED there is a stronger relationship, the closer you are to blue the weaker the relationship.As we can see from the corelation matrix above, there is a very close relationship between AMT_GOODS_PRICE & AMT_CREDIT. AMT_ANNUITY & AMT_CREDIT have a medium/strong relationship. Annuity has a similar relationship with AMT_GOODS_PRICE.### Sorting based on the correlation and extracting top 10 relationships on the defaulters in one_df corrOneDf = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)).unstack().reset_index() corrOneDf.columns = ['VAR1','VAR2','Correlation'] corrOneDf.sort_values('Correlation', ascending = False).nlargest(10, 'Correlation')ObservationIn the correlation matrix, we can identify-Columns with High Correlation:1.AMT_GOODS_PRICE and AMT_CREDITColumns with Medium Correlation:1.REGION_POPULATION_RELATIVE and AMT_INCOME_TOTAL 2.REGION_POPULATION_RELATIVE and AMT_GOODS_PRICE 3.REGION_POPULATION_RELATIVE and AMT_CREDITColumns with low correlation:1.AMT_INCOME_TOTAL and CNT_CHILDRENWe also observed that the top 10 correlation pairs are:- VAR1 VAR2 Correlation Value- AMT_GOODS_PRICE AMT_CREDIT 0.981276- AMT_ANNUITY AMT_CREDIT 0.748446- AMT_ANNUITY AMT_GOODS_PRICE 0.747315- AMT_ANNUITY AMT_INCOME_TOTAL 0.390809- AMT_GOODS_PRICE AMT_INCOME_TOTAL 0.317123- AMT_CREDIT AMT_INCOME_TOTAL 0.313347- REGION_POPULATION_RELATIVE AMT_INCOME_TOTAL 0.141307- AMT_ANNUITY REGION_POPULATION_RELATIVE 0.065024- REGION_POPULATION_RELATIVE AMT_GOODS_PRICE 0.055120- REGION_POPULATION_RELATIVE AMT_CREDIT 0.050097 Perform correlation between numerical columns for finding correlation which having TARGET value as 0#Perform correlation between CNT_CHILDREN, AMT_INCOME_TOTAL, AMT_CREDIT, AMT_GOODS_PRICE and REGION_POPULATION_RELATIVE #Then make correlation matrix corrZero=zero_df[columns].corr() corrZero.style.background_gradient(cmap='coolwarm')ObservationIn the heatmap above: The closer you are to RED there is a stronger relationship, the closer you are to blue the weaker the relationship.As we can see from the corelation matrix above, there is a very close relationship between AMT_GOODS_PRICE & AMT_CREDIT.AMT_ANNUITY & AMT_CREDIT have a medium/strong relationship. Annuity has a similar relationship with AMT_GOODS_PRICE.This relationship is consistent with the one we saw for the defaulters in the one_df dataframe. Thus confirming that the relationships are consistent across TARGET valuescorrZeroDf = corrZero.where(np.triu(np.ones(corrZero.shape), k=1).astype(np.bool)).unstack().reset_index() corrZeroDf.columns = ['VAR1','VAR2','Correlation'] # corrOneDf.dropna(subset - ['Correlation'],inplace = True) corrZeroDf.sort_values('Correlation', ascending = False).nlargest(10, 'Correlation')In the correlation matrix, we can identify-Columns with High Correlation:1.AMT_GOODS_PRICE and AMT_CREDITColumns with Medium Correlation:1.AMT_INCOME_TOTAL and AMT_CREDIT 2.AMT_INCOME_TOTAL and AMT_GOODS_PRICE Columns with low correlation:1.AMT_GOODS_PRICE and CNT_CHILDRENWe also observed that the top 10 correlation pairs are:- VAR1 VAR2 Correlation- AMT_GOODS_PRICE AMT_CREDIT 0.981276- AMT_ANNUITY AMT_CREDIT 0.748446- AMT_ANNUITY AMT_GOODS_PRICE 0.747315- AMT_ANNUITY AMT_INCOME_TOTAL 0.390809- AMT_GOODS_PRICE AMT_INCOME_TOTAL 0.317123- AMT_CREDIT AMT_INCOME_TOTAL 0.313347- REGION_POPULATION_RELATIVE AMT_INCOME_TOTAL 0.141307- AMT_ANNUITY REGION_POPULATION_RELATIVE 0.065024- REGION_POPULATION_RELATIVE AMT_GOODS_PRICE 0.055120- REGION_POPULATION_RELATIVE AMT_CREDIT 0.050097 Key ObervationWe also observed that the top categories between both the data frames zero_df & one_df is the same:AMT_GOODS_PRICE AMT_CREDIT 0.981276 Analysing Numerical Data#Box plot on the numerical columns having TARGET value as 1 plt.figure(figsize=(25,25)) plt.subplot(2,2,1) plt.title('CHILDREN COUNT') sns.boxplot(one_df['CNT_CHILDREN']) plt.subplot(2,2,2) plt.title('AMT_INCOME_TOTAL') sns.boxplot(one_df['AMT_INCOME_TOTAL']) plt.subplot(2,2,3) plt.title('AMT_CREDIT') sns.boxplot(one_df['AMT_CREDIT']) plt.subplot(2,2,4) plt.title('AMT_GOODS_PRICE') sns.boxplot(one_df['AMT_GOODS_PRICE']) plt.show()Observation- From the box plots above we can safely say that having children has no impact on the reason to why someone defaults on paying back their loans- The amount of credit taken is roughly around 450000 by the defaulters#Box plot on the numerical columns having TARGET value as 0 plt.figure(figsize=(25,25)) plt.subplot(2,2,1) plt.title('CHILDREN COUNT') sns.boxplot(zero_df['CNT_CHILDREN']) plt.subplot(2,2,2) plt.title('AMT_INCOME_TOTAL') sns.boxplot(zero_df['AMT_INCOME_TOTAL']) plt.subplot(2,2,3) plt.title('AMT_CREDIT') sns.boxplot(zero_df['AMT_CREDIT']) plt.subplot(2,2,4) plt.title('AMT_GOODS_PRICE') sns.boxplot(zero_df['AMT_GOODS_PRICE']) plt.show()Observation- From the box plots above we can safely say that having children has no impact oa persons ability to repay their loans- The amount of credit taken is roughly around 450000 by the defaulters- There are no outliers in the amoount of goods price- The income median lies just below 150000 Bivariate Analysis on zero_df for continuous - continuous (Target value =0)## Plotting cont-cont Client Income vs Credit Amount plt.figure(figsize=(12,12)) sns.scatterplot(x="AMT_INCOME_TOTAL", y="AMT_CREDIT", hue="CODE_GENDER", style="CODE_GENDER", data=zero_df) plt.xlabel('Income of client') plt.ylabel('Credit Amount of loan') plt.title('Client Income vs Credit Amount') plt.show()Observation- We do see some outliers here wherein Females having income less than 50000 have applied for loan with credit amount 1300000 approx## Plotting cont-cont Client Income vs Region population plt.figure(figsize=(12,12)) sns.scatterplot(x="AMT_INCOME_TOTAL", y="REGION_POPULATION_RELATIVE", hue="CODE_GENDER", style="CODE_GENDER", data=zero_df) plt.xlabel('Income of client') plt.ylabel('Population of region where client lives') plt.title('Client Income vs Region population') plt.show()Observation- Very less no of people live in highly dense/populated region >0.07- Most of the clients live between population density of 0.00 to 0.04 5 PREVIOUS DATA Read the dataset file previous_application.csv which consist previous loan of the customer.previousApplicationData=pd.read_csv("./previous_application.csv") previousApplicationData.head()Analysing previous application datapreviousApplicationData.shape previousApplicationData.describe previousApplicationData.columns previousApplicationData.dtypes ### Join the previous application data and application data files using merge mergedApplicationDataAndPreviousData = pd.merge(applicationDataWithRelevantColumns, previousApplicationData, how='left', on=['SK_ID_CURR']) mergedApplicationDataAndPreviousData.head()ObservationWe will be merging on 'SK_ID_CURR' column as we have duplicate IDs present in the SK_ID_CURR in previousApplicationData and in the application_data file all the values are unique.mergedApplicationDataAndPreviousData.shape mergedApplicationDataAndPreviousData.NAME_CONTRACT_STATUS.value_counts(normalize=True)AnalysisWe will be focusing on analysing the NAME_CONTRACT_STATUS Column and the various relationships based on that. Univariate Analysisuniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution of contract status type', hue=None)Observation- A large number of applications were approved for the clients- Some clients who recieved the offer did not use their loan offers- The number of refused & cancelled applications is roughly the same Bivariate Analysisuniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution Occupation Type',hue='NAME_INCOME_TYPE')ObservationBased on the plot above we can conclude that:- Working professionals have the highest number of approved loan applications.- Working professionals also have the highest number of refused or cancelled loan applications- Students, pensioners, businessmen and applicants on maternity leave have statistically low or no application status data presentuniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution based on Gender',hue='CODE_GENDER')Observation - Female applicants make more applications and have a higher number of applications approved- They also have a higher number of applications refused or canceled- The number of male applicant statuses is lower than female ones across the board. This could be because of low number of males present in the dataset.uniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution Target',hue='TARGET')Observation- Based on the target column, we see that a high number of applicants who have a history of being abe to repay their loans are approved for new loans- A very low number of defaulters are approved for new loans. This means that the bank is following a cautious approach to defaultersuniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution based on Family Status',hue='NAME_FAMILY_STATUS')Observation- A large number of married people make loan applications & are approved for loans- Separated individuals have a very low number of applications in the unused offer- The number of single/not married people who apply for loans and are refused or have their applications cancelled as compared to approved is less than half.uniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution based Application Start Day',hue='WEEKDAY_APPR_PROCESS_START')Observation- Most applicants start their loan applications on a Saturday and are successfully approved- Applicants who start their applications on Friday have a higher chance of getting rejected or cancelling their application compared to the other 2 weekend days, Saturday and Sunday- The number of cancelled applications is highest on Monday. This could suggest that after starting the application on the weekend, the client changed their mind on a workday.uniplot(mergedApplicationDataAndPreviousData,col='NAME_CONTRACT_STATUS',title='Distribution of Age on Loans',hue='BINNED_AGE')Observation- People between the ages of 31-40 apply for the most number of loans and have consistently higher values across all application statuses- People above the age of 71 & below 20 dont make any loan applications- The people in the ages of 31-40 could be applying for more loans as they are married or living with a partnerplt.figure(figsize=(40,25)) sns.catplot(x="NAME_CONTRACT_STATUS", hue="TARGET", col="CODE_GENDER", data=mergedApplicationDataAndPreviousData, kind="count")Observation- Female population has high chances of getting the loans approved- Cancellation of loans by females is significant across defaulters and non defaulters Continous & Categorical Plots### Plotting the relationship between NAME_CONTRACT_STATUS vs AMT_CREDIT_x ### from the merged application data and splitting on the basis of family status plt.figure(figsize=(40,25)) plt.xticks(rotation=45) sns.boxplot(data =mergedApplicationDataAndPreviousData, x='NAME_CONTRACT_STATUS',y='AMT_CREDIT_x', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Income amount vs Application Status based on Family Status') plt.show()Observation- Married people take a higher amount of credit and have a higher median chance of getting approved- People in Civil marriage, widows & separated applicants have a consistently similar median value across all the application statuses### Plotting the relationship between NAME_CONTRACT_STATUS vs AMT_INCOME_TOTAL ### from the merged application data and splitting on the basis of family status plt.figure(figsize=(40,25)) plt.xticks(rotation=45) plt.yscale('log') sns.boxplot(data =mergedApplicationDataAndPreviousData, x='NAME_CONTRACT_STATUS',y='AMT_INCOME_TOTAL', hue ='NAME_FAMILY_STATUS',orient='v') plt.title('Income amount vs Application status based on Family Status') plt.show()Observation- People who are married, live in civil marriages & single/not married earn consistently well across all application status types- Their median income is also the same- Widows earn less than all the other categories Continous & Continuous Plotsplt.figure(figsize=(30,20)) plt.scatter(mergedApplicationDataAndPreviousData.AMT_APPLICATION, mergedApplicationDataAndPreviousData.AMT_CREDIT_y) plt.title("Final Amount Approved vs Credit Amount Applied") plt.xlabel("Credit Amount applied by Client") plt.ylabel("Final Amount approved by Bank") plt.show()Observation- The Credit Amount applied vs Final Amount approved shows a good linear relation till 2000000.- However post 2000000, we could see good number of outliers where the approved amount is quite less as compared to amount applied- The number of applications with credit amount > 3500000 are quite less and there are not very good chances that the same amount is going to be approved Conclusion Through this case study we have made the following conclusions:- Most popular days for making applications is Saturday. The bank could focus on keeping offices open longer on Saturday to aid in completion of the applications. - Most popular age group for taking loans or credit is 31-40 with the most number of applications. The firm should focus on exploring more lucrative options for clients in that age range. They could be offered lower interest rates, longer repayment holidays etc.- Married people have the highest chance of making a loan application and being approved for a loan. - Because of the imbalance in the data, Females appear to be making the most number of loan applications. They also have a higher chance of getting approved and being able to repay the loans on time- Widows with secondary education have a very high median credit amount borrowing and default on paying back loans as well. It would be better to be vary of lending to them - Male labourers have high number of applications and also a high number of defaults as compared to females. It would be better for the bank to assess whether the person borrowing in this occupation type could be helped with staged loans or with loans on a lower interest rate than the other categories- The number of applications with credit amount > 3500000 are quite less and there are not very good chances that the same amount is going to be approved- Cancellation of loans by females is significant across defaulters and non defaulterssns.boxplot(data= applicationData.AMT_ANNUITY.head(500000).isnull()) axes[0][1].set_title('AMT_ANNUITY') plt.show() print(applicationDataAfterDroppedColumns.AMT_ANNUITY.head(500000).isnull().sum()) print(applicationData.AMT_ANNUITY.head(500000).isnull().sum()) sns.boxplot(data= applicationDataAfterDroppedColumns.AMT_ANNUITY.dropna()) plt.show()Example use of gore module This notebook demonstrates basic use of the gore module to- Read a fundus image from file- Produce a map where each pixel of the fundus image has been mapped to a point in the eye- Produce an interrupted projection of this map (a set of gores) for assembly; we add a "no-cut zone" between gores, so that the gores are connected by a strip to avoid fiddly cutting. Gores with image centred at the "equator"The default behaiour is the position the gores such that they are joined along what would be the equator on a globe.Import `gore` module and other utilitiesimport sys sys.path.insert(0, './gore') import gore from math import piOpen the example input image and displayim = gore.openimage('./img/img1.jpg') gore.fig(im)Map the image to position in the eye using the simple eye model and get its angular size.# parameters focal_length = 24 # crude estimate of focal length fundus_equi, lammax, phimax = gore.equi(im = im, focal_length = focal_length, alpha_max = gore.deg2rad(32), numpoints = 400)Processing coordinate positions: 100%|██████████| 399/399 [00:05<00:00, 67.77it/s]Using the output of the previous function `fundus_equi`, produce a set of six gores using the Cassini projection. We also choose to add a "no-cut zone" at the equator, which is produced using a simple cylindrical projection (Lambert cylindrical equal-area) .#parameters num_gores = 6 projection = "cassini" # other projections are available phi_no_cut = pi / 24 # 7.5 degrees fundus_gores = gore.make(fundus_equi, num_gores = num_gores, projection = projection, phi_min = -phimax, phi_max = phimax, lam_min = -lammax, lam_max = lammax, phi_no_cut = phi_no_cut)Processing image rows: 100%|██████████| 400/400 [00:01<00:00, 283.04it/s]Finally, display and save the resulting gores.gore.fig(fundus_gores) fundus_gores.save("fundus_gores.png")Joining at the poleProduce a set of gores joined at the pole. Until now the centre of the fundus image has been centred on the equator of our map of the eye; the function that creates the rotary pattern of gores expects the region of interest to be centred at the "north" pole, so we use the function `swap` to rotate our map of the eye so that it is correctly oriented.fundus_swapped = gore.swap(fundus_equi, phi_extent = phimax, lam_extent = lammax)Processing image rows: 100%|██████████| 400/400 [00:00<00:00, 416.64it/s]Now use the `make` function again, this time with the argument `pole stitch = True` to join the gores at the polefundus_rotary = gore.make(fundus_swapped, num_gores = 6, projection="cassini", pole_stitch=True, alpha_limit = gore.deg2rad(100)) fundus_rotary.save("fundus_rotary.png") gore.fig(fundus_rotary)Processing image rows: 100%|██████████| 400/400 [00:02<00:00, 188.10it/s]Finally, produce a polar cap using a simple projection (azimuthal equidistant) that can be pasted on to the rotary gores, so that there is a region in the centre where no cutting is required.fundus_cap = gore.polecap(fundus_swapped, num_gores=6, phi_cap = pi / 8) fundus_cap.save("fundus_cap.png") gore.fig(fundus_cap)Processing image rows: 100%|██████████| 400/400 [00:02<00:00, 195.68it/s] Processing image rows: 100%|██████████| 400/400 [00:00<00:00, 460.69it/s]Teamwork studyIn this notebook, we use the `teamwork` library to represent a care teams' collaborative experience as a network graph. We can then leverage the `networkx` library to calculate the average clustering coefficient for each care team in the dataset. Combining this care team collaboration data with patient discharge data, we can study the correlation between care team collaboration experience and patient outcomes. Import librariesimport pandas as pd import time import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm import os import sys # This allows us to import from src and utils for dir in ['teamwork','utils']: sys.path.append(os.path.join(os.getcwd(), '..', dir)) import teamwork import utilsRead in EHR data and create study runner objectThe study runner is an iterable (generator) object#Get EHR notes data notes_df = pd.read_csv(utils.notes_with_disposition_file, parse_dates=[2]) # set a 90 day window to find collaboration among care teams WINDOW = 90 # identify care teams within 2 day increments STEP = 2 # create the study runner get_care_dates = teamwork.TeamworkStudyRunner(notes_df, WINDOW, STEP)Gather data for each care team identified on each care dateThe `get_careteam_data` utility function uses the `care_team` network graphto calculate the cumulative experience and other metrics for the care team# measure performance start_time = time.perf_counter() # flatten the experience data into a list experience_data_list = [utils.get_careteam_data(care_team) for care_date in get_care_dates for care_team in care_date] stop_time = time.perf_counter()Summarizing performance...print(f"It took {stop_time - start_time} seconds or {(stop_time - start_time) / 60} minutes" + f" to process a total of {len(notes_df.index)} notes. The study walked through the notes {STEP} days at a time" + f" to identify care teams and calculate care team experience within the previous {WINDOW} day window.")It took 118.00482139998348 seconds or 1.9667470233330582 minutes to process a total of 2721 notes. The study walked through the notes 2 days at a time to identify care teams and calculate care team experience within the previous 90 day window.Convert data into DataFrame for analysisTo study care team experience and patient outcomes, we need to tie in the patient info from discharge dataexperience_df = pd.DataFrame(experience_data_list, columns=utils.columns).drop_duplicates() discharges_df = pd.read_csv(utils.discharges_with_disposition_file) experience_master_df = experience_df.merge(discharges_df, left_on='discharge_id', right_on='id', copy=False) print(experience_master_df.shape)(388, 12)Analysis can be performed on the resulting DataFrame to study the correlation between cumulative care team experience and patient outcomesdef get_model(var): return sm.GLM.from_formula(f'disposition ~ {var} + age', family = sm.families.Binomial(), data=experience_master_df) model = get_model('avg_clust') result = model.fit() result.summary() model = get_model('cumulative_experience') result = model.fit() result.summary() model = get_model('avg_cumulative_experience') result = model.fit() result.summary()Нейронные сети. Архитектуры нейронных сетей Подготовка данных В этом практическом заданий мы будем решать задачу классификации цифр на датасете `mnist` с помощью полносвязной и сверточной нейронной сети. Для этого мы будем использовать надстройку над `tensorflow`, которая называется `keras`. Для начала обсудим данные. `mnist` датасет состоит из черно-белых изображений цифр размером $28 \times 28$ пикселей. В данном случае, мы работаем с одним каналом, хотя в случае цветных изображений, общее число каналов равно трем. Загрузим наши данные используя функцию `load_data` объекта `mnist` из модуля `keras.dataset`. Перед выполнением этого задания убедитесь, что ваша версия `tensorflow` >= 1.4.import tensorflow as tf (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()Нормализуйте заруженные данные `x_train` и `x_test`. Для этого следует разделить числовое значение каждого пикселя на $255$. Далее, переведите `y_train` и `y_test` в one-hot представление, используя функцию `tf.keras.utils.to_categorical`. Наше первое задание будет заключатся в реализации полносвязной нейронной сети. Поэтому измените размерность тренировочных и тестовых данных с помощью метода `reshape`. >> np_vector.shape >> (28, 28) >> np_vector = np_vector.reshape(28 * 28) >> np_vector.shape >> (784,) *РЕШЕНИЕ*num_classes = 10 y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255 x_test = x_test / 255 x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) y_train.shapeПолносвязная нейронная сеть. В этой части задания вам предлагается реализовать обычную нейронную сеть с использованием последовательной модели `tf.keras.models.Sequential`. Данная модель позволяет добавлять слои при помощи функции встроенной `add`. Наша нейронная сеть будет состоять всего лишь из одного скрытого слоя с количеством нейроннов равным $256$, функцией активации ReLU и с `input_shape=(784,)`, что соответствует количеству нейронов во входном слое нашей нейронной сети. Количество нейроннов в выходном слое равно количеству классов, в качестве функции активации нужно использовать softmax. Не забудьте вызвать `model.compile` после добавления слоев. Используйте в качестве функции потерь `categorical_crossentropy`, оптимизатор `adadelta` и метрику `accuracy`. *РЕШЕНИЕ*nn = tf.keras.models.Sequential() nn.add(tf.keras.layers.Dense(256, activation='relu', input_shape=(784, ))) nn.add(tf.keras.layers.Dense(num_classes, activation='softmax')) nn.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])После этого, создайте модель и загрузите веса нейронной сети из файла `neural_networks.h5`. Какое количество настраиваемых параметров содержится в этой нейронной сети. Запишите это число в качестве первого ответа `answer1` на это задание. Оцените качетво на тестовой выборке и запишите это значение с точностью до трех знаков после запятой в переменную `answer2`.nn.load_weights('neural_networks.h5') answer1 = nn.count_params() loss, accuracy = nn.evaluate(x_test, y_test, verbose=0) print('Test loss:', loss) print('Test accuracy:', accuracy) answer2 = round(accuracy, 3)*РЕШЕНИЕ*nn.get_config()Сверточная нейронная сеть Далее, вам предлагается реализовать сверточную нейронную сеть. * Размерность входного слоя $(28, 28, 1)$.* Сверточный слой с $32$ каналами, ядро свертки $3 \times 3$.* Макспулинг слой $(2,2)$.* Сверточный слой с $64$ каналами, ядро свертки $3 \times 3$.* Макспулинг слой $(2,2)$.* Понижение размерности признаков.* Полносвязный слой с 64 нейронами* Выходной слой с количеством нейронов равному количеству классов.Для этого предлагается использовать следующие классы `Convolution2D`, `MaxPooling2D` и `Flatten` для понижения размерности признаков. Все эти классы как и слой полносвязной нейронной сети `Dense` находятся в `tf.keras.layers`. Используйте ReLU в качестве функции активации во всех слоях, где это потребуется, кроме выходного, в нем по аналогии с прошлым заданием используется softmax. Аналогичная ситуация с функцией `compile` после добавления слоев. *РЕШЕНИЕ*cnn = tf.keras.models.Sequential() cnn.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) cnn.add(tf.keras.layers.Convolution2D(filters=64, kernel_size=(3, 3), activation='relu')) cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) cnn.add(tf.keras.layers.Flatten()) cnn.add(tf.keras.layers.Dense(64, activation='relu')) cnn.add(tf.keras.layers.Dense(num_classes, activation='softmax')) cnn.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])Теперь оцените качество получившейся модели на тестовой выборке. Для этого измените размерность `x_train` и `x_test` на размерность входного слоя. Загрузите веса `conv_net.h5`. Запишите количество параметров этой сверточной нейронной сети в `answer3`. Сравните его с количеством параметром в полносвязной нейронной сети, которую мы реализовали ранее. Оценку качества запишите в `answer4` с точностью до 3 трех знаков после запятой.x_train = x_train.reshape(60000, 28, 28, 1) x_test = x_test.reshape(10000, 28, 28, 1) answer3 = cnn.count_params()*РЕШЕНИЕ*cnn.load_weights('conv_net.h5') loss, accuracy = cnn.evaluate(x_test, y_test, verbose=0) print('Test loss:', loss) print('Test accuracy:', accuracy) answer4 = round(accuracy, 3)Строка с ответамиoutput = "nn params {0}\n nn score {1:.3f}\ncnn params {2}\ncnn score {3:.3f}" print(output.format(answer1, answer2, answer3, answer4))nn params 203530 nn score 0.982 cnn params 121930 cnn score 0.993Lab 6: Group, Join, Conditionals, Iteration, Randomness. Welcome to Lab 6! This week, we will get a bit more practice with grouping and joining tables, using iteration and simulations, and practicing the concept of randomness and probability. This material is covered in [Chapter 9](https://www.inferentialthinking.com/chapters/09/randomness.html) and the prior chapters (e.g., [Chapter 8.4](https://www.inferentialthinking.com/chapters/08/4/Joining_Tables_by_Columns)).# Don't change this cell; just run it. import numpy as np from datascience import * %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') from client.api.notebook import Notebook ok = Notebook('lab06.ok') _ = ok.auth(inline=True)**Important**: The `ok` tests don't usually tell you that your answer is correct. More often, they help catch careless mistakes. It's up to you to ensure that your answer is correct. If you're not sure, ask someone (not for the answer, but for some guidance about your approach). Basically, for your solution to be correct, it is **necessary** that the tests are passed; however, passing the tests is not **sufficient** for your solution to be correct. 1. The Method `group` and Extravaganza Lineup Review of the method `group`Before we work on UCSB's Extravaganza data, let's work on a toy example to review some basics about `group`.Run the following cell to create a table. This table contains some anonymous ratings (1, 2, 3, 4, or 5 stars) for three flavors of ice creams. (Those flavors can be found at [McConnell's Fine Ice Creams](https://mcconnells.com/)!)ratings = Table().with_columns("Flavor", make_array("Banana & Salted Caramel", "Dutchman's Chocolate", "Eureka Lemon & Marionberries", "Banana & Salted Caramel", "Dutchman's Chocolate", "Eureka Lemon & Marionberries", "Banana & Salted Caramel", "Dutchman's Chocolate", "Eureka Lemon & Marionberries", "Banana & Salted Caramel"), "Rating", make_array(2,3,5,1,4,3,4,3,4,5)) ratingsSuppose we want to count how many ratings are collected for each flavor of the ice cream.ratings.group("Flavor")The `group` method with a single argument (the name of a column) counts the number of rows for each category in a column. The result contains one row per unique value in the grouped column. The call to `group` creates a column of counts in each category. The column is called `count` by default, and contains the number of rows in each category.So in the example table above, we have collected 4 ratings for Banana & Salted Caramel, 3 ratings for Dutchman's Chocolate and 3 ratings for Eureka Lemon & Marionberries.Now suppose we would like to look at individual ratings for each flavor of the ice cream.ratings.group("Flavor", list)When we use the `group` method with an optional second argument `list`, for each flavor, we have a list of the ratings this particular flavor received. Banana & Salted Caramel received 2, 1, 4 and 5 stars, Dutchman's Chocolate received 3, 4 and 3 stars, and Eureka Lemon & Marionberries received 5, 3 and 4 stars.There are also other optional second arguments, which will accomplish different objectives. Read the textbook for more!Now suppose that we want to sort in ascending order the list of ratings for each flavor. We will sort the ratings first and then group them, which will maintain their ratings in sorted order. **The order of these operations is important.**ratings.sort("Rating").group("Flavor", list)Compare the two tables above. The second table now has each list of ratings sorted! Extravaganza Data AnalysisEvery spring, UCSB hosts Extravaganza, a one-day on-campus music festival. The following questions are based on last year's festival. The AS Program Board (an on-campus organization tasked with organizing entertainment events) sends out a survey to UCSB students asking for their suggestions for music artists. The instructions in the survey specify that each student should select a first choice artist (rank 1), a second choice artist (rank 2), and a third choice artist (rank 3). Run the following cell to see how the first several students responded.survey = Table().read_table("survey.csv") surveyAfter these responses come in, however, the AS Program Board notices that their survey form does not actually enforce that each participant choose a single first choice artist, a single second choice artist, and a single third choice artist. Run the cell below to see an example of a student who did not follow the survey's instructions.survey.where("Perm Number", are.equal_to(5978341))The AS Program Board decides to identify all students who did not follow the survey's instructions, delete their votes from the table, and email the students to tell them that their votes did not comply with the rules, and that they should revote if they want to have a say in the Extravaganza lineup. The email addreses of all students are available in the student database, a portion of which is displayed in the table below.database = Table().read_table("student_data.csv") database**Question 1.1** Use the survey data and the student database information to identify the students who did not follow the survey's instructions. Make an **array** called `violators` that contains the **email addresses** of all students who did not follow the survey's instructions. *Hint 1:* Use the `group` command with second argument `list` to see how each student voted.*Hint 2:* A vote is valid if and only if the participant chose a single first choice artist, a single second choice artist, and a single third choice artist. That means in the table returned by the `group` method, a vote is valid if and only if its rank list is one of `[1, 2, 3]`, `[1, 3, 2]`, ..., `[3, 1, 2]`, i.e., any permutation of `[1, 2, 3]`.*Hint 3:* However, it's kind of tedious to compare with a vote's rank list with each permutation. Sort the data in some fashion before using the `group` method so that each student who voted correctly has a rank list that looks like `[1, 2, 3]`. Your task now is to find those whose vote does not look like this array.*Hint 4:* Once you have all the violators' perm numbers, use the `database` table to **get their email addresses**. Remember that you can use the `where` with the predicate `are.contained_in`.violators = ... violators _ = ok.grade('q1_1')**Question 1.2** Now delete the rows from the table `survey` that correspond to the voters in `violators`. After this, the table `survey` should have only valid votes in it.survey = ... survey _ = ok.grade('q1_2')After months of collecting votes and contacting artists, the 2017-18 UC Santa Barbara Extravaganza lineup has officially been released! Run the following cell to see a table of the scheduled performers and their respective numbers of Instagram followers (in thousands).extravaganza_performers = ["", "", "", "Coast Modern"] extravaganza_instagram = [2105, 3101, 35100, 15.5] extravaganza_lineup = Table().with_columns("Artists", extravaganza_performers, "Instagram", extravaganza_instagram) extravaganza_lineup.show()As we come closer to the event, we get word that one of the performers is unable to make it to Extravaganza, and has been replaced by another performer instead! Run the following cell to see a table of the new lineup and their respective numbers of Twitter followers (in thousands).new_performers = ["", "", "DRAM", "Coast Modern"] new_twitter = [1057, 3249, 123, 8] new_lineup = Table().with_columns("Performers", new_performers, "Twitter", new_twitter) new_lineup.show()**Question 1.3** Use the `join` method to join these two tables together so each row contains the name of the performer, their number of Instagram followers (in thousands), and their number of Twitter followers (in thousands). Save this new table into the variable `lineup_data`.*Hint:* Here is an [example](https://www.inferentialthinking.com/chapters/08/4/Joining_Tables_by_Columns.html) using `join`.lineup_data = ... lineup_data _ = ok.grade('q1_3')**Question 1.4** You should notice that a couple of artists are missing. Which ones are missing and why are they not in the new table? Replace this text with your answer **Question 1.5** Let's add `DRAM` back into the `lineup_data` table so that we can see all the artists who actually performed at Extravaganza last year. DRAM currently has 294.8 thousand Instagram followers and 122.5 thousand Twitter followers.lineup_data = ... lineup_data _ = ok.grade('q1_5')2. Getting Hold of Your Friend You are excited to go to Extravaganza but you don't want to go alone! You are trying to get a hold of your friend to see if they want to go to Extravaganza with you. However, each time you call your friend, the probability that they answer their phone is 1/3. If you call your friend two times today, what is the chance that you will talk to them? Here is the equation to help you find the probability:$$P(reaching\ your\ friend\ at\ least\ once\ in\ N\ times) = 1 - P(not\ reaching\ your\ friend\ all\ N\ times)$$You can find out more about this equation in the textbook [here](https://www.inferentialthinking.com/chapters/09/5/Finding_Probabilitiesat-least-one-success) under "At Least One Success" for an example on the probability of rolling a 6 on a die. **Question 2.0** Just to make sure that you are on the right track and are able to compute the probability, tell us, what's the probability that you rolled a 6 if you roll a die once?prob_rolling_six = ... _ = ok.grade('q2_01')Now, what is the probability that you rolled 6 two times in a row? Well, you need to make sure that you roll a 6 first, followed by another 6. Since each roll is independent from each other, the final probability of rolling a 6 twice in a row is the product of individual probabilities. Compute `prob_rolling_six_twice` using `prob_rolling_six` in your answer below.prob_rolling_six_twice = ... _ = ok.grade('q2_02')OK, let's get back to your friend that you want to invite to go to Extravaganza. **Question 2.1** Let's first calculate the probability that your friend will not answer the phone both times you call them.no_answer = ... no_answer _ = ok.grade('q2_1')**Question 2.2** Now that we have the probability of your friend not answering both times, let's calculate the probability that you will reach your friend at least once out of the two calls (using the formula from above).answered = ... answered _ = ok.grade('q2_2')3. Memes Twitter has just hired you to analyze some of its most popular memes! Run the following cell to see a table of information on recent Twitter posts that contained memes of certain popular formats. For each Twitter post, the table contains* The format of the meme in the post. For example, the format *Chemistry Cat* shows a cat dressed up as a scientist in a chemistry lab. The sign above shows a question that is answered below with a witty comment involving a chemical element or a chemistry concept. Two examples of a meme in this format are shown above.* The Twitter handle (username) of the person who made the post.* The number of retweets (shares).* The number of likes.* The number of days from when the post was generated to when you got the dataset.memes = Table.read_table('memes.csv') memes.show()**Question 3.1** Twitter is interested in determining which meme formats get the most retweets and likes. Calculate the total number of retweets and likes associated with each of the meme formats, and save a table of these results in a variable called `retweets_likes`. Your table should have three columns, containing, from left to right:* The format of the meme.* The total number of retweets for all memes with this format.* The total number of likes for all memes with this format.*Hint 1:* You should use the `group` method with an optional second argument called `sum`. Look at the [textbook](https://www.inferentialthinking.com/chapters/08/2/Classifying_by_One_Variable.html) if you are not familiar with this argument. (It might remind you of how we used `apply` method with a function name to apply to the columns.)*Hint 2:* The table `retweets_likes` should not have the column called `Number of Days Since Post` or `Posted By`. Remove those columns before using the `group` method.retweets_likes = ... retweets_likes _ = ok.grade('q3_1')**Question 3.2** The total number of retweets and likes should be taken relative to the number of days since the meme was posted, because memes that have been posted for longer will naturally have more of a chance to gather retweets and likes. For each meme format, calculate the number of days since a meme of that format was first posted, and add a column called `age` with these results to the table `retweets_likes`, saving your new table in a variable called `retweets_likes_age`.*Hint:* The number of days since a meme of that format was first posted is the maximum of `Number of Days Since Post` for that particular format. Use the method `group` with a second optional argument called `max`.retweets_likes_age = ... retweets_likes_age _ = ok.grade('q3_2')**Question 3.3** Rank the meme formats by popularity. The popularity of a meme format is measured as the total number of retweets and likes per day since the meme format was originally posted. In other words, we define the popularity of a meme format by the formula below.\begin{equation*}\text{popularity} = (\text{retweets} + \text{like})/\text{age}\end{equation*}Create an array called `popular_memes` that contains the meme formats ranked by popularity, so that the most popular meme is first in the array, and the least popular meme is last.*Hint:* We recommend you to create a new table with an additional column called `popularity` and sort the table.popular_memes = ... popular_memes _ = ok.grade('q3_3')4. Yahtzee In the dice game Yahtzee, players roll and reroll dice, trying to meet certain objectives. A player rolls five dice on the first roll, and after looking at the results, *can choose to* reroll any number of them on the second roll. Similarly, after looking at the results of the second roll, the player can choose to reroll any number of those for the third roll. After the third roll, no more rolling is allowed.One objective in Yahtzee is to roll as many 6's as possible. The standard strategy is as follows:* Roll all five dice.* Keep any that are 6's. Reroll all other dice.* Keep any that are 6's. Reroll all other dice.The number of 6's at the end of this process determined the player's score. **Question 4.1** Create an array called `my_dice` that contains the results of a first Yahtzee roll (that is, five random numbers between 1 and 6).my_dice = ... my_dice _ = ok.grade('q4_1')**Question 4.2** Define a function called `reroll()` that takes an array as an input parameter (e.g., an array that holds the five dice rolls). This function generates an array that contains the contents of the input array with the results after one additional Yahtzee roll. Your function should implement the standard Yahtzee strategy for rolling 6's, that is, keep all dice that were a 6 and reroll all other dice. Your solution would need to check which dice in the array are not 6 and re-roll only those dice.*Hint:* You can test out your function by repeatedly rerolling. Since you are keeping all the 6's you ever roll, eventually you should get all 6's by repeatedly rerolling. The dice that were 6 should not change as you reroll._Note: as you are rerolling the dice, even if your function is not returning the array, Python will remember any modifications that you made to the **array** that you passed as the input parameter even after you exit from the function._*Hint:* As you are checking the elements of the array, you _do not_ want to use `for dice in rolled_dice:`, because the variable `dice` would not know _which_ element it is looking at: it will only know the _value_ of each element not its _index_ (e.g., it will know that you are looking at the _value_ 5 but doesn't know if it is in the first or the i-th position in the array). Take a look at the examples of the `for` loop we did in class or in the previous lab to see how to get the _index_ of the value in an array.def reroll( rolled_dice ): ... # You can rerun this cell repeatedly to test your code! reroll( my_dice ) my_dice _ = ok.grade('q4_2')Now, practice taking a complete turn at Yahtzee, and see how many 6's you can get! Re-run the code cell from Question 4.1 to roll new dice. Then use your `reroll( my_dice )` function twice, and calculate the number of 6's you have at the end of your turn.# Practice taking a turn here. How many 6's did you get?**Question 4.3** Now, use a `for` loop to help you take 100,000 turns at Yahtzee. On each turn, you should roll the dice (all five of them), reroll them twice (rerolling means you only reroll the dice that are not sixes; use the `reroll()` function you already defined), and calculate the number of 6's you have at the end of your turn.Create an array called `sixes` that contains the number of 6's you had at the end of each turn. This array should have 100,000 entries.*Hint*: Try taking 10 turns with a `for` loop. Once you are sure you have that figured out, change it to 100,000 turns. It will take a little while (about a minute) for Python to perform the calculations when you are doing 100,000 turns.sixes = ... sixes _ = ok.grade('q4_3')Expected Value (EV)In the next question, we are going to work with a statistical term called _expected value_ (EV). EV is essentially an average, except that EV takes into account the probability of getting each value. Here's an example from the [Statistics How To](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/expected-value/) website:**You toss a fair coin three times. $X$ is the number of heads which appear. What is the EV?****Step 1**: Figure out the **possible values for $X$**. For a three-coin toss, you could get anywhere from 0 to 3 heads. So your values for X (the number of heads in 3 tosses) are 0,1,2 and 3.**Step 2**: Figure out your **probability of getting each value of $X$**. You may need to use a sample space, which contains all possible outcomes. (The sample space for this problem is: `{HHH TTT TTH THT HTT HHT HTH THH}`). The probabilities are: * $1/8$ for 0 heads (only tails came up on each coin toss), * $3/8$ for one head (3 outcomes contain exactly one head in three tosses), * $3/8$ for two heads, and * $1/8$ for 3 heads (three heads in three coin flips can happen in exactly one way).**Step 3**: Multiply your $X$ values in Step 1 by the probabilities from step 2 to get the expected value which is usually written as $E(X)$.$E(X) = 0\times(1/8) + 1\times(3/8) + 2\times(3/8) + 3\times(1/8) = 3/2$.The resulting EV is $3/2 = 1.5$.Thus, if you collected data by repeatedly flipping three coins, you would say that the expected number of heads you see is about 1.5 (which means you would expect 1 or more heads more often)._Note that the expected value is **not** a probability (its values can be way larger than probability's max value of 1)._You can read more about how to calculate expected values in the article "_Finding the Expected Value of a Dice Game_" in the Method 3: https://www.wikihow.com/Calculate-an-Expected-Value. Expected Value (EV) of a specific outcomeThe above example talks about the number of heads in hypothetical coin tosses. How would you compute the expected value of _your specific coin tosses_? Imagine your three coin flips resulted in `HTH` (two heads and one tail). In order to compute the EV value, we would compute the sum of the values of each event (assume `T=01` and `H=1`) and divide by the total number of trials (3 coin tosses), which in this case would give us $(1+0+1)/3 = 2/3$. We are essentially computing _the average value_ we got in this specific experiment. (Make sure you are convinced that the expected _value_ would be different if we change the representation of `H` and `T` to be 1 and 2 respectively.) Let's get back to our Yahtzee rolls.**Question 4.4** Use the data you have collected in your array `sixes` to approximate **the number of 6's** you would you expect to get in one turn using this strategy. Store your result in a variable called `expected_sixes.` Note that this **does not** need to be a whole number. *Hint:* How to calculate expected values based on the array `sixes`? Look at the example above. In this example, if you only took five turns and got 3, 4, 1, 2 and 3 in `sixes`, the expected value of the number of sixes is $(3 + 4 + 1 + 2 + 3) / 5$. Generalize this to the array `sixes` which has 100,000 turns. (_Use the methods that can determine the sum of an array and the length of an array!_)expected_sixes = ... expected_sixes _ = ok.grade('q4_4')**Question 4.5** Use the data you have collected in `expected_sixes` to approximate **the most commonly rolled number of 6's** when taking a single turn using this strategy. Store your result in a variable called `most_common_sixes.`Note that this **does** need to be **a whole number**, because it is impossible to roll a non-integer number of sixes in a single turn of Yahtzee. You are going to look at the valus you collected in `expected_sixes` and see which value came up most often. _Hint: You can count the values or you can also look at the `mode` (used the same way you'd use `sum` and `max`)._most_common_sixes = ... most_common_sixes _ = ok.grade('q4_5')Congratulations, you completed Lab 6!To submit:1. Select `Run All` from the `Cell` menu to ensure that you have executed all cells, including the test cells. 2. **Save and Checkpoint** from the `File` menu,3. Read through the notebook to make sure everything is fine.4. Submit using the cell below._ = ok.submit()Orthogonal Learning Simulation Designimport hdmpy import numpy as np import random import statsmodels.api as sm import matplotlib.pyplot as plt import numpy as np from matplotlib import colorsFirst Simulation: B = 50# Set seed np.random.seed(0) B = 50 Naive = np.zeros( B ) Orthogonal = np.zeros( B ) for i in range(0, B): n = 100 p = 100 beta = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) gamma = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) mean = 0 sd = 1 X = np.random.normal( mean , sd, n * p ).reshape( n, p ) D = ( X @ gamma ) + np.random.normal( mean , sd, n ).reshape( n, 1 )/4 #We reshape because in r when we sum a vector with a matrix it sum by column # DGP Y = 5*D + ( X @ beta ) + np.random.normal( mean , sd, n ).reshape( n, 1 ) # single selection method r_lasso_estimation = hdmpy.rlasso( np.concatenate( ( D , X ) , axis = 1 ) , Y , post = True ) #Regress main equation by lasso coef_array = r_lasso_estimation.est[ 'coefficients' ].iloc[ 2:, :].to_numpy() # Get "X" coefficients SX_IDs = np.where( coef_array != 0 )[0] # In case all X coefficients are zero, then regress Y on D if sum(SX_IDs) == 0 : Naive[ i ] = sm.OLS( Y , sm.add_constant(D) ).fit().summary2().tables[1].round(3).iloc[ 1, 0 ] # Otherwise, then regress Y on X and D (but only in the selected coefficients) elif sum( SX_IDs ) > 0 : X_D = np.concatenate( ( D, X[:, SX_IDs ] ) , axis = 1 ) Naive[ i ] = sm.OLS( Y , sm.add_constant( X_D ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] # In both cases we save D coefficient # Regress residuals. resY = hdmpy.rlasso( X , Y , post = False ).est[ 'residuals' ] resD = hdmpy.rlasso( X , D , post = False ).est[ 'residuals' ] Orthogonal[ i ] = sm.OLS( resY , sm.add_constant( resD ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] Orto_breaks = [-1.2, -1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2] Naive_breaks = [-0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2] fig, axs = plt.subplots(1, 2, sharex= True, tight_layout=True) # We can set the number of bins with the `bins` kwarg axs[0].hist( Orthogonal - 5 , range = (-2, 2), density = True , bins = Orto_breaks ) axs[1].hist( Naive - 5, range = (-2, 2), density = True , bins = Naive_breaks ) axs[0].title.set_text('Orthogonal') axs[1].title.set_text('Naive') axs[0].set_xlabel( 'Orhtogonal - True' ) axs[1].set_xlabel( 'Naive - True' )Second Simulation: B = 100# Set seed np.random.seed(0) B = 100 Naive = np.zeros( B ) Orthogonal = np.zeros( B ) for i in range(0, B): n = 100 p = 100 beta = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) gamma = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) mean = 0 sd = 1 X = np.random.normal( mean , sd, n * p ).reshape( n, p ) D = ( X @ gamma ) + np.random.normal( mean , sd, n ).reshape( n, 1 )/4 #We reshape because in r when we sum a vector with a matrix it sum by column # DGP Y = 5*D + ( X @ beta ) + np.random.normal( mean , sd, n ).reshape( n, 1 ) # single selection method r_lasso_estimation = hdmpy.rlasso( np.concatenate( ( D , X ) , axis = 1 ) , Y , post = True ) #Regress main equation by lasso coef_array = r_lasso_estimation.est[ 'coefficients' ].iloc[ 2:, :].to_numpy() # Get "X" coefficients SX_IDs = np.where( coef_array != 0 )[0] # In case all X coefficients are zero, then regress Y on D if sum(SX_IDs) == 0 : Naive[ i ] = sm.OLS( Y , sm.add_constant(D) ).fit().summary2().tables[1].round(3).iloc[ 1, 0 ] # Otherwise, then regress Y on X and D (but only in the selected coefficients) elif sum( SX_IDs ) > 0 : X_D = np.concatenate( ( D, X[:, SX_IDs ] ) , axis = 1 ) Naive[ i ] = sm.OLS( Y , sm.add_constant( X_D ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] # In both cases we save D coefficient # Regress residuals. resY = hdmpy.rlasso( X , Y , post = False ).est[ 'residuals' ] resD = hdmpy.rlasso( X , D , post = False ).est[ 'residuals' ] Orthogonal[ i ] = sm.OLS( resY , sm.add_constant( resD ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] Orto_breaks = [-1.2, -1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2] Naive_breaks = [-0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2] fig, axs = plt.subplots(1, 2, sharex= True, tight_layout=True) # We can set the number of bins with the `bins` kwarg axs[0].hist( Orthogonal - 5 , range = (-2, 2), density = True , bins = Orto_breaks ) axs[1].hist( Naive - 5, range = (-2, 2), density = True , bins = Naive_breaks ) axs[0].title.set_text('Orthogonal') axs[1].title.set_text('Naive') axs[0].set_xlabel( 'Orhtogonal - True' ) axs[1].set_xlabel( 'Naive - True' )Third Simulation: B = 1000# Set seed np.random.seed(0) B = 1000 Naive = np.zeros( B ) Orthogonal = np.zeros( B ) for i in range(0, B): n = 100 p = 100 beta = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) gamma = ( 1 / (np.arange( 1, p + 1 ) ** 2 ) ).reshape( p , 1 ) mean = 0 sd = 1 X = np.random.normal( mean , sd, n * p ).reshape( n, p ) D = ( X @ gamma ) + np.random.normal( mean , sd, n ).reshape( n, 1 )/4 #We reshape because in r when we sum a vector with a matrix it sum by column # DGP Y = 5*D + ( X @ beta ) + np.random.normal( mean , sd, n ).reshape( n, 1 ) # single selection method r_lasso_estimation = hdmpy.rlasso( np.concatenate( ( D , X ) , axis = 1 ) , Y , post = True ) #Regress main equation by lasso coef_array = r_lasso_estimation.est[ 'coefficients' ].iloc[ 2:, :].to_numpy() # Get "X" coefficients SX_IDs = np.where( coef_array != 0 )[0] # In case all X coefficients are zero, then regress Y on D if sum(SX_IDs) == 0 : Naive[ i ] = sm.OLS( Y , sm.add_constant(D) ).fit().summary2().tables[1].round(3).iloc[ 1, 0 ] # Otherwise, then regress Y on X and D (but only in the selected coefficients) elif sum( SX_IDs ) > 0 : X_D = np.concatenate( ( D, X[:, SX_IDs ] ) , axis = 1 ) Naive[ i ] = sm.OLS( Y , sm.add_constant( X_D ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] # In both cases we save D coefficient # Regress residuals. resY = hdmpy.rlasso( X , Y , post = False ).est[ 'residuals' ] resD = hdmpy.rlasso( X , D , post = False ).est[ 'residuals' ] Orthogonal[ i ] = sm.OLS( resY , sm.add_constant( resD ) ).fit().summary2().tables[1].round(3).iloc[ 1, 0] Orto_breaks = [-1.2, -1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2] Naive_breaks = [-0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2] fig, axs = plt.subplots(1, 2, sharex= True, tight_layout=True) # We can set the number of bins with the `bins` kwarg axs[0].hist( Orthogonal - 5 , range = (-2, 2), density = True , bins = Orto_breaks ) axs[1].hist( Naive - 5, range = (-2, 2), density = True , bins = Naive_breaks ) axs[0].title.set_text('Orthogonal') axs[1].title.set_text('Naive') axs[0].set_xlabel( 'Orhtogonal - True' ) axs[1].set_xlabel( 'Naive - True' ) # It can be seen that the data of the orthogonal matrix tend more to the center which is equal to 0. On the other hand, # the Naive data is a little more dispersed. The data of both matrices should be centered on zero, # because by subtracting 5 from all the data we achieve that they behave like a standardized normal (mean = 0). # Since Orthogonal contains the estimates of the D variables through a Partialing out, # the effect of non-explanatory variables is eliminated, which allows the data to present a lower variance than the Naive data.Double Lasso - Testing the Convergence Hypothesis Double Lasso Approach The Double Lasso Approach is used to select the variables to be included in the analyses in a way that avoids inflated type I errors, with the objective of identifying the covariates to be included in two steps, finding those that predict the dependent variable and those that predict the independent variable. The second step is important, because the exclusion of a covariate that is a simple predictor of the dependent variable may still be a strong predictor of the independent variable and may create an important omitted variable bias. This second step also serves as a test of randomization in the experimental data. Necessary to remember that the variables selected in either step are included in the regression of interest.The penalty term causes the lasso regression to reduce the estimated coefficients of the estimated regression toward zero and the coefficients of some variables to be exactly zero, both of which help reduce overfitting. The lasso, by setting some coefficients to zero, is performing variable selection, so, these shrinkage methods allow using Lasso regression even when the number of observations is small relative to the number of predictors.Compared to the lasso method, it is better to use the "double lasso" variable selection procedure. This is because the non-zero coefficients estimated by the lasso are often underestimated, and may be erroneously excluding variables with non-zero coefficients, particularly those with moderate effects. Consequently, a regularization bias occurs that negatively affects estimation and inference given the omission of covariates with moderate but non-zero coefficients having an omitted variable bias when these covariates are relevant predictors of the focal variable. Testing the Convergence Hypothesis Example of partialling-out with Lasso to estimate the regression coefficient $\beta_1$ in the high-dimensional linear regression model:$$ Y = \beta_1 D + \beta_2'W + \epsilon. $$Specifically, we are interested in how the rates at which economies of different countries grow ($Y$) are related to the initial wealth levels in each country ($D$) controlling for country's institutional, educational, and other similar characteristics ($W$).The relationship is captured by $\beta_1$, the speed of convergence/divergence, which measures the speed at which poor countries catch up ($\beta_1$ 0) rich countries, after controlling for $W$. Our inference question here is: do poor countries grow faster than rich countries, controlling for educational and other characteristics? In other words, is the speed of convergence negative: $\beta_1$ < 0? This is the Convergence Hypothesis predicted by the Solow Growth Model. This is a structural economic model. Under some strong assumptions, that we won't state here, the predictive exercise we are doing here can be given causal interpretation.The outcome $Y$ is the realized annual growth rate of a country's wealth (Gross Domestic Product per capita). The target regressor ($D$) is the initial level of the country's wealth. The target parameter $\beta_1$ is the speed of convergence, which measures the speed at which poor countries catch up with rich countries. The controls ($W$) include measures of education levels, quality of institutions, trade openness, and political stability in the country. Data Analysisimport hdmpy import pandas as pd import numpy as np import pyreadr import math import matplotlib.pyplot as plt import random # I downloaded the data that the author used growth_read = pyreadr.read_r("d:/Users/Manuela/Documents/GitHub/ECO224/Labs/data/GrowthData.RData") # Extracting the data frame from rdata_read growth = growth_read[ 'GrowthData' ] list(growth)The dimension of our data set:growth.shapeThe sample contains $90$ countries and $63$ controls. Thus $p \approx 60$, $n=90$ and $p/n$ is not small. We expect the least squares method to provide a poor estimate of $\beta_1$. We expect the method based on partialling-out with Lasso to provide a high quality estimate of $\beta_1$.To check this hypothesis, we analyze the relation between the output variable $Y$ and the other country's characteristics by running a linear regression in the first step.import statsmodels.api as sm import statsmodels.formula.api as smf # We create the main variables y = growth['Outcome'] X = growth.drop('Outcome', 1) # OLS regression reg_ols = sm.OLS(y, X).fit() print(reg_ols.summary()) # output: estimated regression coefficient corresponding to the target regressor est_ols = reg_ols.summary2().tables[1]['Coef.']['gdpsh465'] # output: std. error std_ols = reg_ols.summary2().tables[1]['Std.Err.']['gdpsh465'] # output: 95% confidence interval lower_ci = reg_ols.summary2().tables[1]['[0.025']['gdpsh465'] upper_ci = reg_ols.summary2().tables[1]['0.975]']['gdpsh465']Summarize OLS resultstable_1 = np.zeros( (1, 4) ) table_1[0,0] = est_ols table_1[0,1] = std_ols table_1[0,2] = lower_ci table_1[0,3] = upper_ci table_1_pandas = pd.DataFrame( table_1, columns = [ "Estimator","Std. Error", "lower bound CI", "upper bound CI" ]) table_1_pandas.index = [ "OLS" ] table_1_html = table_1_pandas.to_html() table_1_html # Create main variables Y = growth['Outcome'] W = growth.drop(['Outcome','intercept', 'gdpsh465'], 1 ) D = growth['gdpsh465']Double Lasso using cross Validation (Sklearn in Python)from sklearn import linear_model # Seat values for Lasso lasso_model = linear_model.Lasso( alpha = 0.00077 ) r_Y = Y - lasso_model.fit( W, Y ).predict( W ) r_Y = r_Y.rename('r_Y') # Part. out d r_D = D - lasso_model.fit( W, D ).predict( W ) r_D = r_D.rename('r_D') # ols partial_lasso_fit = sm.OLS(r_Y, r_D).fit() # # output: estimated regression coefficient corresponding to the target regressor est_lasso = partial_lasso_fit.summary2().tables[1]['Coef.']['r_D'] # # output: std. error std_lasso = partial_lasso_fit.summary2().tables[1]['Std.Err.']['r_D'] # # output: 95% confidence interval lower_ci_lasso = partial_lasso_fit.summary2().tables[1]['[0.025']['r_D'] upper_ci_lasso = partial_lasso_fit.summary2().tables[1]['0.975]']['r_D'] # Regress residuales partial_lasso_fit = sm.OLS(r_Y, r_D).fit() partial_lasso_est = partial_lasso_fit.summary2().tables[1]['Coef.']['r_D'] print( f"Coefficient for D via partialling-out using lasso {partial_lasso_est}" ) # output: estimated regression coefficient corresponding to the target regressor est_lasso = partial_lasso_fit.summary2().tables[1]['Coef.']['r_D'] # output: std. error std_lasso = partial_lasso_fit.summary2().tables[1]['Std.Err.']['r_D'] # output: 95% confidence interval lower_ci_lasso = partial_lasso_fit.summary2().tables[1]['[0.025']['r_D'] upper_ci_lasso = partial_lasso_fit.summary2().tables[1]['0.975]']['r_D']Summary Lasso Results:table_2 = np.zeros( (1, 4) ) table_2[0,0] = est_lasso table_2[0,1] = std_lasso table_2[0,2] = lower_ci_lasso table_2[0,3] = upper_ci_lasso table_2_pandas = pd.DataFrame( table_2, columns = [ "Estimator","Std. Error", "lower bound CI", "upper bound CI" ]) table_2_pandas.index = [ "LASSO CROSS VALIDATION" ] table_2_pandas table_3 = table_1_pandas.append(table_2_pandas) table_3 table_3_html = table_3.to_html() print(table_3_html)
Estimator Std. Error lower bound CI upper bound CI
OLS -0.009378 0.029888 -0.070600 0.051844
LASSO -0.047747 0.017705 -0.082926 -0.012567
The least square method provides a rather noisy estimate of the speed of convergence. We can not answer the question if poor countries grow faster than rich countries. The least square method does not work when the ratio $p/n$ is large.In sharp contrast, partialling-out via Lasso provides a more precise estimate. The Lasso based point estimate is $-5\%$ and the $95\%$ confidence interval for the (annual) rate of convergence $[-7.8\%,-2.2\%]$ only includes negative numbers. This empirical evidence does support the convergence hypothesis. Double Lasso using theoretical Lambda (HDM package)res_Y = hdmpy.rlasso( W, Y, post=True ).est['residuals'] res_D = hdmpy.rlasso( W, D, post=True ).est['residuals'] r_Y = pd.DataFrame(res_Y, columns=['r_Y']) r_D = pd.DataFrame(res_D, columns=['r_D']) # OLS regression reg_ols = sm.OLS(r_Y, r_D).fit() print(reg_ols.summary()) # output: estimated regression coefficient corresponding to the target regressor est_lasso = reg_ols.summary2().tables[1]['Coef.']['r_D'] # output: std. error std_lasso = reg_ols.summary2().tables[1]['Std.Err.']['r_D'] # output: 95% confidence interval lower_ci_lasso = reg_ols.summary2().tables[1]['[0.025']['r_D'] upper_ci_lasso = reg_ols.summary2().tables[1]['0.975]']['r_D'] table_3 = np.zeros( (1, 4) ) table_3[0,0] = est_lasso table_3[0,1] = std_lasso table_3[0,2] = lower_ci_lasso table_3[0,3] = upper_ci_lasso table_3_pandas = pd.DataFrame( table_3, columns = [ "Estimator","Std. Error", "lower bound CI", "upper bound CI" ]) table_3_pandas.index = [ "LASSO LAMBDA" ] table_3_pandasDouble Lasso using method="partialling out"lasso_direct = hdmpy.rlassoEffect(x=W, y=Y, d=D, method="partialling out") lasso_direct est_lasso = lasso_direct["coefficients"] std_lasso = lasso_direct["se"] lower_ci_lasso = est_lasso - 1.96*std_lasso upper_ci_lasso = est_lasso + 1.96*std_lasso table_4 = np.zeros( (1, 4) ) table_4[0,0] = est_lasso table_4[0,1] = std_lasso table_4[0,2] = lower_ci_lasso table_4[0,3] = upper_ci_lasso table_4_pandas = pd.DataFrame( table_4, columns = [ "Estimator","Std. Error", "lower bound CI", "upper bound CI" ]) table_4_pandas.index = [ "LASSO_direct" ] table_4_pandasPlot the main coefficient and its confidence interval of the convergence hypothesis from these 4 cases in one figure Summary and PlotHere we summarize the four methods used to estimate $\beta_1$.tables=[table_1_pandas,table_2_pandas,table_3_pandas,table_4_pandas] summary=pd.concat(tables) summary # We can see that the OLS estimator is the smallest of the 4, so it would be the least accurate. On the other hand, # the two most precise methods are those of the double-lasso; plus your confidence intervals that the hypothesis is true. for lower,upper,coef,y in zip(summary['Lower bound CI'],summary['Upper bound CI'],summary['Estimator'],range(len(summary))): plt.plot((lower,upper),(y,y),'o-',color='blue') plt.plot(coef,y,'^',color='red') plt.yticks(range(len(summary)),list(['OLS', 'LASSO CROSS VALIDATION', 'LASSO LAMBDA', 'LASSO_direct'])) plt.title('Confidence Intervals for the Speed of Convergence ') plt.show() # We can see how each method varies in its estimator and its confidence intervals. The double-lasso is more accurate.Boolean Operatorsa= 1 b= 2 print(a>b) print(aFalse True True TrueBull Functionprint(bool(15)) print(bool(True)) print(bool(1)) print(bool(False)) print(bool(0)) print(bool(None))True True True False False FalseFunctions return a Boleandef myFunction(): return True print(myFunction()) def myFunction():return False if myFunction (): print("True") else: print("False")FalseRelation OperatorArithmetic Operatorprint(10+5) print(10-5) print(10*5) print(10/5) print(10%5) #modulo division that shows the remainder after the division print(10//3) #floor division, 3.33 print(10**2) #concatenation15 5 50 2.0 0 3 100Bitwise Operatorsa=60 #0011 1100 b=13 #0000 1101 print(a & b) print(a|b) print(a^b) print(a<<2)#0011 1100 print(a>>1)12 61 49 240 30Assignment Operatorsa+=3 #Same As a= a+3, a=60, a=63 print(a)63Logical Operatora =True b =False print(a and b) print(a or b) print(not(a or b)) print(a>b and b>a) print(a==a or b==b) print(not(a==a or b==b))False True False False True FalseIdentify Operatorsprint(a is b) print(a is not b)False True* Find out the features that take up multiple values.* One Hot Encode them and see which of them having the maximum impact on the overall performance.* Include those variables with all the other variables.%matplotlib inline import numpy as np import pandas as pd import os, sys import warnings warnings.filterwarnings('ignore') from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction import DictVectorizer from sklearn.pipeline import Pipeline from sklearn.externals import joblib import xgboost as xgb basepath = os.path.expanduser('~/Desktop/src/AllState_Claims_Severity/') sys.path.append(os.path.join(basepath, 'src')) from data import * np.random.seed(2016) train = pd.read_csv(os.path.join(basepath, 'data/raw/train.csv')) test = pd.read_csv(os.path.join(basepath, 'data/raw/test.csv')) sample_sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv')) def encode_categorical_features(train, test, feature): train_ = train.copy() test_ = test.copy() encoder = LabelEncoder() encoder.fit(pd.concat((train_[feature], test_[feature]))) train_[feature] = encoder.transform(train_[feature]) test_[feature] = encoder.transform(test_[feature]) return train_[feature], test_[feature] def label_encoding(train, test, features): for feat in features: train[feat], test[feat] = encode_categorical_features(train, test, feat) return train, test** Multi valued features **mv_features = get_multi_valued_features(train, test)** Binary valued features **bv_features = get_binary_valued_features(train, test)** Label Encoding **train, test = label_encoding(train, test, features, encode_categorical_features)** One Hot Encoding **train_vec, test_vec = one_hot_encode_features(train, test, features)** Train Test Split **itrain, itest = train_test_split(range(len(train)), test_size=0.2, random_state=40)** Multi-valued categorical variable feature selection **def evaluate_features(train, test, y, itrain, itest, cols, folderpath): evaluation = [] for col in cols: # encode categorical variable train_vec, test_vec = label_encoding(train, test, [col]) del test_vec X_train = train_vec.iloc[itrain][[col]] X_test = train_vec.iloc[itest][[col]] y_train = y.iloc[itrain] y_test = y.iloc[itest] del train_vec # train model pipeline = Pipeline([ ('model', RandomForestRegressor(n_estimators=10, n_jobs=-1, random_state=11)) ]) pipeline.fit(X_train, y_train) ypreds = pipeline.predict(X_test) score = mean_absolute_error(y_test, ypreds) print('Feature Name: %s and MAE: %f'%(col, score)) evaluation.append((col, score)) evaluation = np.array(sorted(evaluation, key=lambda x: x[1])) joblib.dump(evaluation, os.path.join(basepath, 'data/processed/%s'%(folderpath))) evaluate_features(train, test, train.loss, itrain, itest, bv_features, 'binary-valued-features/bv_feat_importance') feat_importance = [79, 78, 56, 117, 122, 11, 80, 127, 129, 126, 104, 99, 71, 111, 105, 118, 120, 100, 109] features = train.columns[1:-1] imp_features = [features[f] for f in feat_importance] train_vec, test_vec = one_hot_encode_features(train, test, ['cat80', 'cat79', 'cat81', 'cat105', 'cat100', 'cat112', 'cat106', 'cat101', 'cat110']) train_vec = train_vec.drop(['cat80_D', 'cat79_D', 'cat81_D', 'cat105_T', 'cat100_O', 'cat112_Y','cat106_R', 'cat101_U', 'cat110_Y'], axis=1) test_vec = test_vec.drop(['cat80_D', 'cat79_D', 'cat81_D', 'cat105_T', 'cat100_O', 'cat112_Y','cat106_R', 'cat101_U', 'cat110_Y'], axis=1) train_lbl, test_lbl = label_encoding(train, test, ['cat57', 'cat12', 'cat72']) train_cont = train[['cont2', 'cont7', 'cont12', 'cont14', 'cont11', 'cont3', 'cont5']] test_cont = test[['cont2', 'cont7', 'cont12', 'cont14', 'cont11', 'cont3', 'cont5']] train_processed = pd.concat((train_lbl[['cat57', 'cat12', 'cat72']], train_vec, train_cont), axis=1) test_processed = pd.concat((test_lbl[['cat57', 'cat12', 'cat72']], test_vec, test_cont), axis=1) X_train = train_processed.iloc[itrain] X_test = train_processed.iloc[itest] y_train = train.iloc[itrain].loss y_test = train.iloc[itest].loss y = train.loss del train_vec, test_vec, train_lbl, test_lbl, train_cont, test_cont, train, test pipeline = Pipeline([ ('model', xgb.XGBRegressor(colsample_bytree=0.8, subsample=0.8, seed=124, gamma=1, max_depth=5, learning_rate=0.1)) ]) pipeline.fit(X_train, y_train)** Public Leaderboard Score: 1760.47142 **ypred = pipeline.predict(X_test) print('MAE on unseen examples: %f'%(mean_absolute_error(y_test, ypred)))MAE on unseen examples: 1304.152417** Training **pipeline.fit(train_processed, train.loss) predictions = pipeline.predict(test_processed)** Submissions **sample_sub['loss'] = predictions sample_sub.to_csv(os.path.join(basepath, 'submissions/basic_simple_features.csv'), index=False)This notebook shows how to use Orchestrator APIs for user experimentsimport os from fabrictestbed.slice_manager import SliceManager, Status, SliceState import json ssh_key_file_priv=os.environ['HOME']+"/.ssh/id_rsa" ssh_key_file_pub=os.environ['HOME']+"/.ssh/id_rsa.pub" ssh_key_pub = None with open (ssh_key_file_pub, "r") as myfile: ssh_key_pub=myfile.read() ssh_key_pub=ssh_key_pub.strip() credmgr_host = os.environ['FABRIC_CREDMGR_HOST'] print(f"FABRIC Credential Manager : {credmgr_host}") orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST'] print(f"FABRIC Orchestrator : {orchestrator_host}")Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below.slice_manager = SliceManager(oc_host=orchestrator_host, cm_host=credmgr_host , project_name='all', scope='all') # Initialize the slice manager slice_manager.initialize()Orchestrator API example to query for available resourcesstatus, advertised_topology = slice_manager.resources() print(f"Status: {status}") if status == Status.OK: print(f"Toplogy: {advertised_topology}") else: print(f"Error: {advertised_topology}") if status == Status.OK: advertised_topology.draw()Create SliceIn Release 1.0, user is expected to assign the IP addresses manually. Please use the example comands indicated below: Configure Slice Parametersslice_name = 'MySlice' site = 'RENC' node1_name = 'Node1' node2_name = 'Node2' network_service_name='bridge1' nic1_name = 'node1-nic1' nic2_name = 'node2-nic1' image = 'default_centos_8' image_type = 'qcow2' cores = 2 ram = 16 disk = 100 from fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType # Create topology t = ExperimentTopology() # Add node n1 = t.add_node(name=node1_name, site=site) # Set capacities cap = Capacities() cap.set_fields(core=cores, ram=ram, disk=disk) # Set Properties n1.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Add node n2 = t.add_node(name=node2_name, site=site) # Set properties n2.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Shared Cards n1.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic1_name) n2.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_5, name=nic2_name) # L2Bridge Service t.add_network_service(name=network_service_name, nstype=ServiceType.L2Bridge, interfaces=t.interface_list) # Generate Slice Graph slice_graph = t.serialize() # Request slice from Orchestrator return_status, slice_reservations = slice_manager.create(slice_name=slice_name, slice_graph=slice_graph, ssh_key=ssh_key_pub) if return_status == Status.OK: slice_id = slice_reservations[0].get_slice_id() print("Submitted slice creation request. Slice ID: {}".format(slice_id)) else: print(f"Failure: {slice_reservations}")Get the Sliceimport time def wait_for_slice(slice,timeout=180,interval=10,progress=False): timeout_start = time.time() if progress: print("Waiting for slice .", end = '') while time.time() < timeout_start + timeout: return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] if slice.slice_state == "StableOK": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice if slice.slice_state == "Closing" or slice.slice_state == "Dead": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice else: print(f"Failure: {slices}") if progress: print(".", end = '') time.sleep(interval) if time.time() >= timeout_start + timeout: if progress: print(" Timeout exceeded ({} sec). Slice: {} ({})".format(timeout,slice.slice_name,slice.slice_state)) return slice return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] slice = wait_for_slice(slice, progress=True) print() print("Slice Name : {}".format(slice.slice_name)) print("ID : {}".format(slice.slice_id)) print("State : {}".format(slice.slice_state)) print("Lease End : {}".format(slice.lease_end))Get the NodesRetrieve the node information and save the management IP address. Get the Topologyreturn_status, experiment_topology = slice_manager.get_slice_topology(slice_object=slice)Configure Node1Use ssh to configure eth1 on node 1. ```ip addr add 192.168.10.51/24 dev eth1```node1 = experiment_topology.nodes[node1_name] management_ip_node1 = str(node1.get_property(pname='management_ip')) print("Node Name : {}".format(node1.name)) print("Management IP : {}".format(management_ip_node1)) print() import paramiko key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip_node1,username='centos',pkey = key) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.51/24 dev eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n'))Configure Node2Use ssh to configure eth1 on each Node 2. ```ip addr add 192.168.10.52/24 dev eth1```node2 = experiment_topology.nodes[node2_name] management_ip_node2 = str(node2.get_property(pname='management_ip')) print("Node Name : {}".format(node2.name)) print("Management IP : {}".format(management_ip_node2)) print() import paramiko key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip_node2,username='centos',pkey = key) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.52/24 dev eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n'))Delete Slicereturn_status, result = slice_manager.delete(slice_object=slice) print("Response Status {}".format(return_status)) print("Response received {}".format(result))Recap: k-Nearest Neighbor* Building the model consists only of storing the training dataset. * To make a prediction, the algorithm finds the _k_ closest data points in the training dataset * Classification: predict the most frequent class of the k neighbors * Regression: predict the average of the values of the k neighbors * Both can be weighted by the distance to each neighbor* Main hyper-parameters: * Number of neighbors (k). Acts as a regularizer. * Choice of distance function (e.g. Euclidean) * Weighting scheme (uniform, distance,...)* Model: - Representation: Store training examples (e.g. in KD-tree) - Typical loss functions: * Classification: Accuracy (Zero-One Loss) * Regression: Root mean squared error - Optimization: None (no model parameters to tune)# Auto-setup when running on Google Colab import os if 'google.colab' in str(get_ipython()) and not os.path.exists('/content/master'): !git clone -q https://github.com/ML-course/master.git /content/master !pip install -rq master/requirements_colab.txt %cd master/notebooks # Global imports and settings %matplotlib inline from preamble import * interactive = True # Set to True for interactive plots if interactive: fig_scale = 1.5k-Nearest Neighbor Classificationk=1: look at nearest neighbor only: likely to overfit k>1: do a vote and return the majority (or a confidence value for each class)plt.rcParams["figure.figsize"] = (12*fig_scale,6*fig_scale) mglearn.plots.plot_knn_classification(n_neighbors=3)AnalysisWe can plot the prediction for each possible input to see the _decision boundary_from sklearn.neighbors import KNeighborsClassifier X, y = mglearn.datasets.make_forge() fig, axes = plt.subplots(1, 3, figsize=(10*fig_scale, 3*fig_scale)) for n_neighbors, ax in zip([1, 3, 9], axes): clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y) mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4) mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax) ax.set_title("{} neighbor(s)".format(n_neighbors)) ax.set_xlabel("feature 0") ax.set_ylabel("feature 1") _ = axes[0].legend(loc=3)Using few neighbors corresponds to high model complexity (left), and using many neighbors corresponds to low model complexity and smoother decision boundary (right). Nearest Shrunken Centroid* Nearest Centroid: Represents each class by the centroid of its members. * Parameteric model (while kNN is non-parametric)* Regularization is possible with the `shrink_threshold` parameter * Shrinks (scales) each feature value by within-class variance of that feature * Soft thresholding: if feature value falls below threshold, it is set to 0 * Effectively removes (noisy) featuresfrom sklearn.neighbors import NearestCentroid from sklearn.datasets import make_blobs fig, axes = plt.subplots(1, 3, figsize=(10*fig_scale, 5*fig_scale)) thresholds = [0, 0.4, .8] X, y = make_blobs(centers=2, cluster_std=2, random_state=0, n_samples=50) for threshold, ax in zip(thresholds, axes): ax.set_title(f"shrink_threshold={threshold}") nc = NearestCentroid(shrink_threshold=threshold) nc.fit(X, y) ax.scatter(X[:, 0], X[:, 1], c=y, edgecolor='k') mglearn.tools.plot_2d_classification(nc, X, alpha=.5, ax=ax) ax.scatter(nc.centroids_[:, 0], nc.centroids_[:, 1], c=['b', 'r'], s=50, marker='x') ax.set_aspect("equal")Note: Nearest Centroid suffers when the data is not 'convex'X, y = make_blobs(centers=4, random_state=8) y = y % 2 knn = KNeighborsClassifier(n_neighbors=1).fit(X, y) nc = NearestCentroid().fit(X, y) plt.figure fig, axes = plt.subplots(1, 2, figsize=(12*fig_scale, 5*fig_scale)) for est, ax in [(knn, axes[0]), (nc, axes[1])]: ax.scatter(X[:, 0], X[:, 1], c=y, edgecolor='k') ax.set_title(est.__class__.__name__) mglearn.tools.plot_2d_classification(est, X, alpha=.5, ax=ax) ax.set_aspect("equal")ScalabilityWith n = nr examples and p = nr features* Nearest shrunken threshold * Fit: $O(n * p)$ * Memory: $O(nrclasses * p)$ * Predict: $O(nrclasses * p)$* Nearest neighbors (naive) * Fit: $0$ * Memory: $O(n * p)$ * Predict: $O(n * p)$* Nearest neighbors (with KD trees) * Fit: $O(p * n log n)$ * Memory: $O(n * p)$ * Predict: $O(k * log n)$ k-Neighbors Regressionk=1: return the target value of the nearest neighbor (overfits easily) k>1: return the _mean_ of the target values of the _k_ nearest neighborsmglearn.plots.plot_knn_regression(n_neighbors=3)AnalysisWe can again output the predictions for each possible input, for different values of _k_.from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import train_test_split # split the wave dataset into a training and a test set X, y = mglearn.datasets.make_wave(n_samples=40) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) fig, axes = plt.subplots(1, 3, figsize=(15*fig_scale, 4*fig_scale)) # create 1000 data points, evenly spaced between -3 and 3 line = np.linspace(-3, 3, 1000).reshape(-1, 1) for n_neighbors, ax in zip([1, 3, 9], axes): # make predictions using 1, 3 or 9 neighbors reg = KNeighborsRegressor(n_neighbors=n_neighbors) reg.fit(X_train, y_train) ax.plot(line, reg.predict(line)) ax.plot(X_train, y_train, '^', c=mglearn.cm2(0), markersize=8) ax.plot(X_test, y_test, 'v', c=mglearn.cm2(1), markersize=8) ax.set_title( "{} neighbor(s)\n train score: {:.2f} test score: {:.2f}".format( n_neighbors, reg.score(X_train, y_train), reg.score(X_test, y_test))) ax.set_xlabel("Feature") ax.set_ylabel("Target") _ = axes[0].legend(["Model predictions", "Training data/target", "Test data/target"], loc="best")!pip install datasets !pip install transformersRequirement already satisfied: datasets in /usr/local/lib/python3.7/dist-packages (1.8.0) Requirement already satisfied: pyarrow<4.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (3.0.0) Requirement already satisfied: xxhash in /usr/local/lib/python3.7/dist-packages (from datasets) (2.0.2) Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from datasets) (20.9) Requirement already satisfied: tqdm<4.50.0,>=4.27 in /usr/local/lib/python3.7/dist-packages (from datasets) (4.41.1) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.19.5) Requirement already satisfied: huggingface-hub<0.1.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (0.0.12) Requirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets) (0.70.12.2) Requirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.4) Requirement alread[...]load datafrom datasets import load_dataset from transformers import AutoTokenizer, DataCollatorWithPadding raw_datasets = load_dataset("glue", "mrpc") checkpoint = "bert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(checkpoint) def tokenize_function(example): return tokenizer(example["sentence1"], example["sentence2"], truncation=True) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer)Reusing dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad) loading configuration file https://huggingface.co/bert-base-uncased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e Model config BertConfig { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "transformers_version": "4.8.2", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30522 [...]define modelfrom transformers import AutoModelForSequenceClassification training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch") model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)PyTorch: setting up devices The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). loading configuration file https://huggingface.co/bert-base-uncased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e Model config BertConfig { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad[...]define trainerfrom transformers import Trainer from datasets import load_metric import numpy as np def compute_metrics(eval_preds): metric = load_metric("glue", "mrpc") logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels) trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics )test untrained modelpredictions = trainer.predict(tokenized_datasets["validation"]) preds = np.argmax(predictions.predictions, axis=-1) metric = load_metric("glue", "mrpc") metric.compute(predictions=preds, references=predictions.label_ids)The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: sentence1, sentence2, idx. ***** Running Prediction ***** Num examples = 408 Batch size = 8training modeltrainer.train()The following columns in the training set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: sentence1, sentence2, idx. ***** Running training ***** Num examples = 3668 Num Epochs = 3 Instantaneous batch size per device = 8 Total train batch size (w. parallel, distributed & accumulation) = 8 Gradient Accumulation steps = 1 Total optimization steps = 1377test trained modeltrained_model = AutoModelForSequenceClassification.from_pretrained('/content/test-trainer/checkpoint-1000',num_labels=2) trainer = Trainer( trained_model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics ) predictions = trainer.predict(tokenized_datasets["validation"]) preds = np.argmax(predictions.predictions, axis=-1) metric = load_metric("glue", "mrpc") metric.compute(predictions=preds, references=predictions.label_ids)loading configuration file /content/test-trainer/checkpoint-1000/config.json Model config BertConfig { "_name_or_path": "bert-base-uncased", "architectures": [ "BertForSequenceClassification" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "problem_type": "single_label_classification", "transformers_version": "4.8.2", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30522 } loading weights file /content/test-trainer/checkpoint-1000/pytorch_model.bin All model checkpoint weights were used when initializing BertForSequenceClassification. All the weights of BertForSequenceClassification were initialize[...]deep into datasetsraw_datasets raw_datasets['validation'] raw_datasets['validation']['sentence1'][:5] raw_datasets.remove_columns(["sentence1"])CLASSIFICATIE STOMATA OP BEZONDE EN BESCHADUWDE BLADEREN In deze notebook zal je bezonde en beschaduwde bladeren van elkaar scheiden. De twee klassen zijn bij benadering lineair scheidbaar. Krappa of crabwood is een snel groeiende boomsoort die veelvuldig voorkomt in het Amazonegebied. Volwassen exemplaren kunnen een diameter hebben van meer dan een meter en kunnen meer dan 40 meter hoog zijn. Het hout van hoge kwaliteit wordt gebruikt voor het maken van meubelen, vloeren, masten... Uit de schors wordt een koorstwerend middel gehaald. Uit de zaden produceert men een olie voor medicinale toepassingen, waaronder de behandeling van huidziekten en tetanos, en als afweermiddel voor insecten. Foto's: Mauroguanandi [Public domain] [2] en [CC BY-SA 4.0] [3]. Omdat sommige klimaatmodellen een stijging van de temperatuur en een vermindering in regenval voorspellen in de komende decennia, is het belangrijk om te weten hoe deze bomen zich aanpassen aan veranderende omstandigheden. Wetenschappers Camargo en Marenco deden onderzoek in het Amazonewoud [1].Naast de invloed van seizoensgebonden regenval, bekeken ze ook stomatale kenmerken van bladeren onder bezonde en onder beschaduwde condities. Hiervoor werden een aantal planten, opgekweekt in de schaduw, verplaatst naar vol zonlicht gedurende 60 dagen. Een andere groep planten werd in de schaduw gehouden. De kenmerken van de stomata werden opgemeten op afdrukken van de bladeren gemaakt met transparante nagellak. Nodige modules importerenimport pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from matplotlib import animation from IPython.display import HTML1. Inlezen van de data Lees met de module `pandas` de dataset in.stomata = pd.read_csv(".data/schaduwzon.dat", header="infer") # in te lezen tabel heeft een hoofding2. Tonen van de ingelezen data 2.1 Tabel met de data Kijk de gegevens in.stomataWelke gegevens zijn kenmerken? Welk gegeven is het label? Deze gegevens kunnen worden gevisualiseerd met een puntenwolk. Welke matrices heb je daarvoor nodig? Antwoord:De plantensoort is overal dezelfde: Carapa. De kenmerken zijn de stomatale dichtheid en de stomatale grootte. Het aantal monsters is 50.Het label is het milieu waarin het monster werd geplukt: zon of schaduw.Om de puntenwolk weer te geven, heb je twee matrices nodig met dimensie 50x1. De onderzoekers zetten de stomatale dichtheid uit tegenover de stomatale lengte. Ga op dezelfde manier te werk. 2.2 De data weergeven in puntenwolkx1 = stomata["stomatale lengte"] # kenmerk: lengte x2 = stomata["stomatale dichtheid"] # kenmerk: dichtheid x1 = np.array(x1) # kenmerk: lengte x2 = np.array(x2) # kenmerk: dichtheid # dichtheid t.o.v. lengte plt.figure() plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 plt.title("Carapa") plt.xlabel("stomatale lengte (micron)") plt.ylabel("stomatale densiteit (per mm²)") plt.legend(loc="lower left") plt.show()3. Standaardiseren 3.1 Lineair scheidbaar? Er zijn twee groepen te onderscheiden. Ze zijn op enkele punten na lineair scheidbaar. De grootte-orde van deze gegevens is sterk verschillend. De gegevens moeten gestandaardiseerd worden. 3.2 Standaardiseren Meer uitleg over het belang van standaardiseren vind je in de notebook 'Standaardiseren'.x1_gem = np.mean(x1) x1_std = np.std(x1) x2_gem = np.mean(x2) x2_std = np.std(x2) x1 = (x1 - x1_gem) / x1_std x2 = (x2 - x2_gem) / x2_std # dichtheid t.o.v. lengte plt.figure() plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 plt.title("Carapa") plt.xlabel("gestandaardiseerde stomatale lengte (micron)") plt.ylabel("gestandaardiseerde stomatale densiteit (per mm²)") plt.legend(loc="lower left") plt.show()4. Classificatie met Perceptron 4.1 Geannoteerde data Het ML-systeem zal machinaal leren uit de 50 gelabelde voorbeelden. Lees de labels in.y = stomata["milieu"] # labels: tweede kolom van de oorspronkelijke tabel y = np.array(y) print(y) y = np.where(y == "zon", 1, 0) # labels numeriek maken, zon:1, schaduw:0 print(y) X = np.stack((x1, x2), axis = 1) # omzetten naar gewenste formaat4.2 Perceptron Als twee klassen lineair scheidbaar zijn, kan men een rechte vinden die beide klassen scheidt. Men kan de vergelijking van de scheidingslijn opschrijven in de vorm $ax+by+c=0$. Voor elk punt $(x_{1}, y_{1})$ in de ene klasse is dan $ax_{1}+by_{1}+c \geq 0$ en voor elk punt $(x_{2}, y_{2})$ in de andere klasse is dan $ax_{2} +by_{2}+c Zolang dit niet voldaan is, moeten de coëfficiënten worden aangepast.De trainingset met bijhorende labels wordt enkele keren doorlopen. Voor elk punt worden de coëfficiënten aangepast indien nodig. Er wordt een willekeurige rechte gekozen die de twee soorten bladeren zou moeten scheiden. Dit gebeurt door de coëfficiënten in de vergelijking van de rechte willekeurig te kiezen. Beide kanten van de scheidingslijn bepalen een andere klasse. Met systeem wordt getraind met de trainingset en de gegeven labels. Voor elk punt van de trainingset wordt nagegaan of het punt aan de juiste kant van de scheidingslijn ligt. Bij een punt die niet aan de juiste kant van de scheidingslijn ligt, worden de coëfficiënten in de vergelijking van de rechte aangepast. De volledige trainingset wordt een aantal keer doorlopen. Het systeem leert gedurende deze 'pogingen' of *epochs*.def grafiek(coeff_x1, coeff_x2, cte): """Plot scheidingsrechte ('decision boundary') en geeft vergelijking ervan.""" # stomatale densiteit t.o.v. lengte van stomata plt.figure() plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 (label 1) plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 (label 0) x = np.linspace(-1.5, 1.5, 10) y_r = -coeff_x1/coeff_x2 * x - cte/coeff_x2 print("De grens is een rechte met vgl.", coeff_x1, "* x1 +", coeff_x2, "* x2 +", cte, "= 0") plt.plot(x, y_r, color="black") plt.title("Classificatie Carapa") plt.xlabel("gestandaardiseerde stomatale lengte (micron)") plt.ylabel("gestandaardiseerde stomatale densiteit (per mm²)") plt.legend(loc="lower left") plt.show() class Perceptron(object): """Perceptron classifier.""" def __init__(self, eta=0.01, n_iter=50, random_state=1): """self heeft drie parameters: leersnelheid, aantal pogingen, willekeurigheid.""" self.eta = eta self.n_iter = n_iter self.random_state = random_state def fit(self, X, y): """Fit training data.""" rgen = np.random.RandomState(self.random_state) # kolommatrix van de gewichten ('weights') # willekeurig gegenereerd uit normale verdeling met gemiddelde 0 en standaardafwijking 0.01 # aantal gewichten is aantal kenmerken in X plus 1 (+1 voor bias) self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1]+1) # gewichtenmatrix die 3 gewichten bevat print("Initiële willekeurige gewichten:", self.w_) self.errors_ = [] # foutenlijst # plot grafiek met initiële scheidingsrechte print("Initiële willekeurige rechte:") grafiek(self.w_[1], self.w_[2], self.w_[0]) gewichtenlijst = np.array([self.w_]) # gewichten punt per punt aanpassen, gebaseerd op feedback van de verschillende pogingen for _ in range(self.n_iter): print("epoch =", _) errors = 0 teller = 0 for x, label in zip(X, y): # x is datapunt, y overeenkomstig label print("teller =", teller) # tel punten, het zijn er acht print("punt:", x, "\tlabel:", label) gegiste_klasse = self.predict(x) print("gegiste klasse =", gegiste_klasse) # aanpassing nagaan voor dit punt update = self.eta * (label - gegiste_klasse) # als update = 0, juiste klasse, geen aanpassing nodig print("update =", update) # grafiek en gewichten eventueel aanpassen na dit punt if update !=0: self.w_[1:] += update *x self.w_[0] += update errors += update print("gewichten =", self.w_) # bepalen voorlopige 'decision boundary' gewichtenlijst = np.append(gewichtenlijst, [self.w_], axis =0) teller += 1 self.errors_.append(errors) # na alle punten, totale fout toevoegen aan foutenlijst print("foutenlijst =", self.errors_) return self, gewichtenlijst # geeft lijst gewichtenmatrices terug def net_input(self, x): # punt invullen in de voorlopige scheidingsrechte """Berekenen van z = lineaire combinatie van de inputs inclusief bias en de weights voor elke gegeven punt.""" return np.dot(x, self.w_[1:]) + self.w_[0] def predict(self, x): """Gist klasse.""" print("punt ingevuld in vergelijking rechte:", self.net_input(x)) klasse = np.where(self.net_input(x) >=0, 1, 0) return klasse # perceptron, leersnelheid 0.0001 en 20 pogingen ppn = Perceptron(eta=0.0001, n_iter=20) gewichtenlijst = ppn.fit(X,y)[1] print("Gewichtenlijst =", gewichtenlijst) # animatie xcoord = np.linspace(-1.5, 1.5, 10) ycoord = [] for w in gewichtenlijst: y_r = -w[1]/w[2] * xcoord - w[0]/w[2] ycoord.append(y_r) ycoord = np.array(ycoord) # type casting fig, ax = plt.subplots() line, = ax.plot(xcoord, ycoord[0]) plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 (label 1) plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 (label 0) ax.axis([-2,2,-2,2]) def animate(i): line.set_ydata(ycoord[i]) # update de vergelijking van de rechte return line, plt.close() # om voorlopig plot-venster te sluiten, enkel animatiescherm nodig anim = animation.FuncAnimation(fig, animate, interval=1000, repeat=False, frames=len(ycoord)) HTML(anim.to_jshtml())Mooi resultaat! Maar nog niet optimaal. Opdracht 4.2Wellicht bieden meer iteraties nog een beter resultaat. Probeer eens uit. Omdat de klassen niet lineair scheidbaar zijn, zal het Perceptron er natuurlijk niet in slagen de fout op nul te krijgen. Daarom is er geen goede methode om de fout te meten en om te kijken of een optimale scheiding werd bekomen.Men zal daarom in machinaal leren geen Perceptron gebruiken, maar de klassen scheiden op een andere manier: met gradient descent en binary cross entropy. 5. Stap voor stap op zoek naar de scheidingslijn Er wordt nog steeds met gestandaardiseerde data gewerkt.# data x1 = stomata["stomatale lengte"] # kenmerk: lengte x2 = stomata["stomatale dichtheid"] # kenmerk: dichtheid x1 = np.array(x1) # kenmerk: lengte x2 = np.array(x2) # kenmerk: dichtheid y = stomata["milieu"] # labels: tweede kolom van de oorspronkelijke tabel y = np.array(y) y = np.where(y == "zon", 1, 0) # labels numeriek maken, zon:1, schaduw:0 # standaardiseren x1 = (x1 - np.mean(x1)) / np.std(x1) x2 = (x2 - np.mean(x2)) / np.std(x2) X = np.stack((x1, x2), axis = 1) # juiste formaat one_column = np.ones((X.shape[0],1)) X = np.concatenate((one_column, X), axis = 1) # 1 toevoegen bij elk punt # trainingset met input X(x1, x2) en output y print(X) print(y)5.1 Opbouw van het algoritme Zo'n scheidingslijn wordt gezocht met een algoritme. Hier zie je hoe zo'n algoritme is opgebouwd. Het ML-systeem is een neuraal netwerk zonder verborgen laag en met activatiefunctie de sigmoid-functie. Als foutenfunctie gebruikt me binary cross entropy.Om een rechte te vinden die de twee klassen van elkaar scheidt, vertrekt het ML-systeem van een willekeurig gekozen rechte. Dit gebeurt door de richtingscoëfficiënt en het snijpunt met de y-as van deze rechte willekeurig te kiezen. Het systeem wordt *getraind* met de trainingset (de inputs en de corresponderende labels): Voor elk punt van de trainingset wordt nagegaan hoeveel de fout bedraagt. De coëfficiënten in de vergelijking van de rechte worden aangepast totdat de fout minimaal is. De volledige trainingset wordt een aantal keer doorlopen. Zo'n keer noemt men een *epoch*. Het systeem *leert* gedurende deze *pogingen ('epochs')*. Het neuraal netwerk maakt eerst een lineaire combinatie van de input met de weights. Op dit resultaat werkt dan de **activatiefunctie** in. In dit neuraal netwerk is dat *sigmoid*. Voor elk datapunt geeft de sigmoid-functie een waarde terug tussen 0 en 1. Deze waarde geeft aan hoe zeker het systeem is dat het punt tot de klasse met label 1 behoort.def sigmoid(x): return 1 / (1 + np.exp(-x)) def predict(kenmerken, weights): """De voorspelling is een waarde die weergeeft hoe zeker het punt tot de klasse met label 1 behoort.""" z = np.dot(kenmerken, weights.T) voorspelling = sigmoid(z) return voorspellingHet systeem moet de fout kunnen berekenen na elke epoch. Daartoe wordt voor elk punt het residu $y-\hat{y}$ berekend. Hierbij is $y$ de gegeven y-waarde en $\hat{y}$ de voorspelde waarde, nl. de waarde die men bekomt door de gegeven x-waarde in te vullen in de vergelijking van de rechte. De kwadraten van de residu's worden bij elkaar opgeteld. Deze som gedeeld door het aantal datapunten is de gezochte fout.def bc(kenmerken, labels, weights): """Fout binary crossentropy berekenen.""" n = len(y) # aantal punten predictions = predict(kenmerken, weights) # huidige voorspelling #Take the error when label=1 class1_cost = - labels * np.log(predictions) #Take the error when label=0 class2_cost = (1 - labels) * np.log(1-predictions) #Take the sum of both costs kost = class1_cost + class2_cost #Take the average cost kost = kost.mean() return kost #def loss(h, y): # return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() def gradient_descent(kenmerken, labels, weights, eta): """Aanpassing parameters q en m na voltooide epoch met learning rate eta.""" n = len(labels) # aantal punten is aantal waarden in lijst van labels y predictions = predict(kenmerken, weights) # bekijk huidige predictions #2 Transpose kenmerken X from (50, 3) to (3, 50) # So we can multiply with the (50,1) cost matrix. # Returns a (3,1) matrix holding 3 partial derivatives nl. naar w1 en w2 en w0 # berekenen van de partiële afgeleiden gradient = np.dot(kenmerken.T, (predictions - labels)) gradient = gradient / n # waarden weights aanpassen weights = weights - eta *gradient # aangepaste weights teruggeven return weights5.2 Uittesten van het algoritme van gradient descent voor meerdere epochs Neem (0; 1; 0,2) als initiële waarde voor de *weights*. Voer gradient descent uit voor 200 epochs met learning rate 0,01 en waarbij de aanpassingen van de *weights* en de fout na elke *epoch* wordt getoond.# algoritme testen w = np.array([0, 1, 0.2]) eta = 0.01 for j in range(200): fout = bc(X,y,w) # binary crossentropy berekenen na elke epoch print(j, w, fout) # waarden weights en fout tonen na elke epoch w = gradient_descent(X, y, w, eta) # waarden weights aanpassen na elke epoch print("De rechte snijdt de y-as in: %.3f" % (-w[0]/w[2])) print("De rechte heeft als rico: %.3f" % (-w[1]/w[2])) print("Binary crossentropy voor de rechte m.b.t. de data: %.4f" % fout)In het voorbeeld zie je dat het aantal epochs mee zal bepalen hoe nauwkeurig de scheidingslijn wordt bepaald. De rechte die men heeft gevonden na bv. 20 epochs ligt nog zeer ver van de beoogde scheidingslijn. Kijk ook hoe de fout verloopt, zolang deze in absolute waarde blijft dalen is ze nog niet geminimaliseerd, het systeem *underfit* dan. Blijkbaar wordt de fout wel weer groter. Misschien is de *learning rate* te groot. 5.3 Hoe verandert de fout en de stand van de rechte gedurende het proces?def gradient_descent_proces(kenmerken, labels, weights, eta, epochs): """Proces doorlopen en gaandeweg ijsten maken van q, m en fout.""" lijst_fout = [bc(kenmerken, labels, weights)] # foutenlijst declareren en initialiseren lijst_weights = [weights] # lijst van weights declareren en initialiseren # Voor elke epoch lijsten aanvullen for i in range(epochs): weights = gradient_descent(kenmerken, labels, weights, eta) # aangepaste parameters na epoch fout = bc(kenmerken, labels, weights) # kost na epoch lijst_weights.append(weights) # aangepaste q toevoegen lijst_fout.append(fout) # deze kost toevoegen return [lijst_weights, lijst_fout]Het proces doorlopen voor gekozen beginwaarden voor de gewichten, gekozen *learning rate* en gekozen aantal *epochs*.# initialisatie van de weights w = np.array([0, 1, 0.2]) # vastleggen van aantal epochs en learning rate èta eta = 0.01 epochs = 1000 # algoritme lineaire regressie doorlopen voor keuze weights, èta en epochs lijst_weights, lijst_fout = gradient_descent_proces(X, y, w, eta, epochs) # scheidingslijn print ("Doorgang y-as: %.3f" % (-lijst_weights[-1][0]/lijst_weights[-1][2])) print ("Rico: %.3f" % (-lijst_weights[-1][1]/lijst_weights[-1][2])) # gemiddelde kwadratische afwijking regressielijn print ("Geminimaliseerde fout: %.4f" % lijst_fout[-1])Een animatie:# alle rechten xcoord = np.linspace(-2, 2, 30) ycoord = [] for j in range(epochs): y_r = (-lijst_weights[j][1]/lijst_weights[j][2]) * xcoord + (-lijst_weights[j][0]/lijst_weights[j][2]) # y-waarde berekenen van alle x'n uit xcoord voor betreffende rechte ycoord.append(y_r) ycoord = np.array(ycoord) # type casting # plot-venster initialiseren fig, ax = plt.subplots() line, = ax.plot(xcoord, ycoord[0], color="green") # rechte plotten ax.axis([x1.min()-1,x1.max()+1,x2.min()-1,x2.max()+1]) # bereik assen plt.title("Amazone zon-schaduw gestandaardiseerd") plt.xlabel("lengte stomata") # xlabel geeft een omschrijving op de x-as plt.ylabel("stomatale dichtheid") # ylabel geeft een omschrijving op de y-as plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 (label 1) plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 (label 0) def animate(i): line.set_ydata(ycoord[i]) # update de vergelijking van de rechte return line, plt.close() # om voorlopig plot-venster te sluiten, enkel animatiescherm nodig anim = animation.FuncAnimation(fig, animate, repeat=False, frames=len(ycoord)) HTML(anim.to_jshtml()) # grafiek evolutie fout plt.figure(figsize=(10,8)) plt.plot(lijst_fout) plt.xlabel('epoch') plt.ylabel('binary cross entropy') plt.title('Evolutie van de fout') plt.show()Experimenteer met de *learning rate* en het aantal *epochs*. 6. Classificatie met scikit-learn# data x1 = stomata["stomatale lengte"] # kenmerk: lengte x2 = stomata["stomatale dichtheid"] # kenmerk: dichtheid x1 = np.array(x1) # kenmerk: lengte x2 = np.array(x2) # kenmerk: dichtheid y = stomata["milieu"] # labels: tweede kolom van de oorspronkelijke tabel y = np.array(y) y = np.where(y == "zon", 1, 0) # labels numeriek maken, zon:1, schaduw:0 # standaardiseren x1 = (x1 - np.mean(x1)) / np.std(x1) x2 = (x2 - np.mean(x2)) / np.std(x2) X = np.stack((x1, x2), axis = 1) # juiste formaat # dataset met input X(x1, x2) en output y print(X) print(y) # Classificatie met de scikit-learn Logistic Regression Classifier clf = LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial') clf.fit(X,y) # bereik assen x_min, x_max = x1.min() - .5, x1.max() + .5 y_min, y_max = x2.min() - .5, x2.max() + .5 h = .01 # stap xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # rooster maken met xx en yy # np.c_[xx.ravel(), yy.ravel() is alle mogelijke coördinaten gevormd met de xx'n en de yy's z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # van al deze punten voorspellen tot welke klasse ze behoren # resultaat plotten # voorspellingen z staan in een rijmatrix, elke voorspelling moet weer overeenkomen met punt in rooster z = z.reshape(xx.shape) plt.figure(figsize=(10, 8)) # plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # plot datapunten # plt.scatter(x1, x2, c=y, edgecolors='k', cmap=plt.cm.Paired) plt.scatter(x1[:25], x2[:25], color="lightgreen", marker="o", label="zon") # zon zijn eerste 25 plt.scatter(x1[25:], x2[25:], color="darkgreen", marker="o", label="schaduw") # schaduw zijn de volgende 25 # plot scheidingslijn # voorspelling z heeft voor elk punt in rooster waarde 0 of 1, rooster wordt zo in twee delen verdeeld # grens tussen twee gebieden wordt getekend in zwart plt.contour(xx, yy, z, colors="black") plt.title("Carapa (gestandaardiseerd)") plt.xlabel("stomatale lengte") plt.ylabel("stomatale densiteit") plt.legend(loc="lower left") plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show()Importing Librariesimport os import mdtraj as md import seaborn as sns import matplotlib.pyplot as plt import nglview as nv/home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds) /home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds) /home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds) /home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds) /home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dty[...]Initial original CG trajdef adding_path(file_name): #return os.path.join('/home/diego/Projects/MOP_CG/elnedynp',file_name) return os.path.join('/home/diego/Trabajo/Proyectos/MOP_CG/elnedynp',file_name) pdb_file = adding_path('cell.pdb') traj_file = adding_path('cell.xtc') top_file = adding_path('topol.tpr')Test tng piece of trajectory with whole system (200 frames)traj_chunks_iterator = md.iterload(traj_file, top=pdb_file, chunk=200) first_200_frames = next(traj_chunks_iterator) first_200_frames.save('MOP_whole_200.tng') del(traj_chunks_iterator, first_200_frames)At this point `MOP_whole_200.tng` is stored together with the a copy of the original pdb `cell.pdb` renamed as `MOP_whole.pdb` in the directory `Examples/Data/MOP/CG` Test h5 piece of trajectory only with MOP (10000 frames)first_frame = md.load(traj_file, top=pdb_file, frame=0) beads_protein_indices = first_frame.topology.select('protein') mop_traj = md.load(traj_file, top=pdb_file, atom_indices=beads_protein_indices) mop_traj.slice(range(10000)).save('MOP.h5')/home/diego/Myusr/opt/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds)https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/ vanilla lstm# univariate lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a univariate sequence into samples def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 3 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) print(X.shape, y.shape) # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) print(model.summary()) print(model.input) # demonstrate prediction x_input = array([175, 186, 199]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)stacked lstmreturn_sequences=True# univariate stacked lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a univariate sequence def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 3 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) # define model model = Sequential() model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) model.add(LSTM(50, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([120, 131, 142]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)Bidirectional lstmDoes it improve accuracy ?# univariate bidirectional lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import Bidirectional # split a univariate sequence def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 3 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) # define model model = Sequential() model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([113, 123, 133]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)cnn lstm# univariate cnn lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import Flatten from keras.layers import TimeDistributed from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D # split a univariate sequence into samples def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 4 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, subsequences, timesteps, features] n_features = 1 n_seq = 2 n_steps = 2 X = X.reshape((X.shape[0], n_seq, n_steps, n_features)) # define model model = Sequential() model.add(TimeDistributed(Conv1D(filters=64, kernel_size=1, activation='relu'), input_shape=(None, n_steps, n_features))) model.add(TimeDistributed(MaxPooling1D(pool_size=2))) model.add(TimeDistributed(Flatten())) model.add(LSTM(50, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=500, verbose=0) # demonstrate prediction x_input = array([60, 70, 80, 90]) x_input = x_input.reshape((1, n_seq, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)conv lstm# univariate convlstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import Flatten from keras.layers import ConvLSTM2D # split a univariate sequence into samples def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps = 4 # split into samples X, y = split_sequence(raw_seq, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, rows, columns, features] n_features = 1 n_seq = 2 n_steps = 2 X = X.reshape((X.shape[0], n_seq, 1, n_steps, n_features)) # define model model = Sequential() model.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', input_shape=(n_seq, 1, n_steps, n_features))) model.add(Flatten()) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=500, verbose=0) # demonstrate prediction x_input = array([60, 70, 80, 90]) x_input = x_input.reshape((1, n_seq, 1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)multivariate lstm - multiple input# multivariate lstm example from numpy import array from numpy import hstack from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a multivariate sequence into samples def split_sequences(sequences, n_steps): X, y = list(), list() for i in range(len(sequences)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the dataset if end_ix > len(sequences): break # gather input and output parts of the pattern seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) # convert to [rows, columns] structure in_seq1 = in_seq1.reshape((len(in_seq1), 1)) in_seq2 = in_seq2.reshape((len(in_seq2), 1)) out_seq = out_seq.reshape((len(out_seq), 1)) # horizontally stack columns dataset = hstack((in_seq1, in_seq2, out_seq)) # choose a number of time steps n_steps = 3 # convert into input/output X, y = split_sequences(dataset, n_steps) # the dataset knows the number of features, e.g. 2 n_features = X.shape[2] # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([[80, 85], [90, 95], [100, 105]]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)multivariate - parallel input# multivariate output stacked lstm example from numpy import array from numpy import hstack from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a multivariate sequence into samples def split_sequences(sequences, n_steps): X, y = list(), list() for i in range(len(sequences)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the dataset if end_ix > len(sequences)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) # convert to [rows, columns] structure in_seq1 = in_seq1.reshape((len(in_seq1), 1)) in_seq2 = in_seq2.reshape((len(in_seq2), 1)) out_seq = out_seq.reshape((len(out_seq), 1)) # horizontally stack columns dataset = hstack((in_seq1, in_seq2, out_seq)) # choose a number of time steps n_steps = 3 # convert into input/output X, y = split_sequences(dataset, n_steps) # the dataset knows the number of features, e.g. 2 n_features = X.shape[2] # define model model = Sequential() model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) model.add(LSTM(100, activation='relu')) model.add(Dense(n_features)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=400, verbose=0) # demonstrate prediction x_input = array([[70,75,145], [80,85,165], [90,95,185]]) x_input = x_input.reshape((1, n_steps, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)multistep vector output# univariate multi-step vector-output stacked lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a univariate sequence into samples def split_sequence(sequence, n_steps_in, n_steps_out): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps_in out_end_ix = end_ix + n_steps_out # check if we are beyond the sequence if out_end_ix > len(sequence): break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps_in, n_steps_out = 3, 2 # split into samples X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) # define model model = Sequential() model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps_in, n_features))) model.add(LSTM(100, activation='relu')) model.add(Dense(n_steps_out)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=50, verbose=0) # demonstrate prediction x_input = array([70, 80, 90]) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)encoder decoder# univariate multi-step encoder-decoder lstm example from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import RepeatVector from keras.layers import TimeDistributed # split a univariate sequence into samples def split_sequence(sequence, n_steps_in, n_steps_out): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps_in out_end_ix = end_ix + n_steps_out # check if we are beyond the sequence if out_end_ix > len(sequence): break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] # choose a number of time steps n_steps_in, n_steps_out = 3, 2 # split into samples X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) y = y.reshape((y.shape[0], y.shape[1], n_features)) # define model model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(n_steps_in, n_features))) model.add(RepeatVector(n_steps_out)) model.add(LSTM(100, activation='relu', return_sequences=True)) model.add(TimeDistributed(Dense(1))) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=100, verbose=0) # demonstrate prediction x_input = array([70, 80, 90]) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)multivariate multi-step stacked lstm example# multivariate multi-step stacked lstm example from numpy import array from numpy import hstack from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense # split a multivariate sequence into samples def split_sequences(sequences, n_steps_in, n_steps_out): X, y = list(), list() for i in range(len(sequences)): # find the end of this pattern end_ix = i + n_steps_in out_end_ix = end_ix + n_steps_out-1 # check if we are beyond the dataset if out_end_ix > len(sequences): break # gather input and output parts of the pattern seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) # convert to [rows, columns] structure in_seq1 = in_seq1.reshape((len(in_seq1), 1)) in_seq2 = in_seq2.reshape((len(in_seq2), 1)) out_seq = out_seq.reshape((len(out_seq), 1)) # horizontally stack columns dataset = hstack((in_seq1, in_seq2, out_seq)) # choose a number of time steps n_steps_in, n_steps_out = 3, 2 # covert into input/output X, y = split_sequences(dataset, n_steps_in, n_steps_out) # the dataset knows the number of features, e.g. 2 n_features = X.shape[2] # define model model = Sequential() model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps_in, n_features))) model.add(LSTM(100, activation='relu')) model.add(Dense(n_steps_out)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # demonstrate prediction x_input = array([[70, 75], [80, 85], [90, 95]]) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)multivariate multi-step encoder-decoder lstm example# multivariate multi-step encoder-decoder lstm example from numpy import array from numpy import hstack from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import RepeatVector from keras.layers import TimeDistributed # split a multivariate sequence into samples def split_sequences(sequences, n_steps_in, n_steps_out): X, y = list(), list() for i in range(len(sequences)): # find the end of this pattern end_ix = i + n_steps_in out_end_ix = end_ix + n_steps_out # check if we are beyond the dataset if out_end_ix > len(sequences): break # gather input and output parts of the pattern seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] X.append(seq_x) y.append(seq_y) return array(X), array(y) # define input sequence in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) # convert to [rows, columns] structure in_seq1 = in_seq1.reshape((len(in_seq1), 1)) in_seq2 = in_seq2.reshape((len(in_seq2), 1)) out_seq = out_seq.reshape((len(out_seq), 1)) # horizontally stack columns dataset = hstack((in_seq1, in_seq2, out_seq)) # choose a number of time steps n_steps_in, n_steps_out = 3, 2 # covert into input/output X, y = split_sequences(dataset, n_steps_in, n_steps_out) # the dataset knows the number of features, e.g. 2 n_features = X.shape[2] # define model model = Sequential() model.add(LSTM(200, activation='relu', input_shape=(n_steps_in, n_features))) model.add(RepeatVector(n_steps_out)) model.add(LSTM(200, activation='relu', return_sequences=True)) model.add(TimeDistributed(Dense(n_features))) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=300, verbose=0) # demonstrate prediction x_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]]) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = model.predict(x_input, verbose=0) print(yhat)Prepare data for *Google Cloud AutoML Natural Language* from scikit-learn Overview This notebook demonstrates how to prepare text data available in scikit-learn (or other libraries), so that it can be used in [Google Cloud AutoML Natural Language](https://cloud.google.com/natural-language/automl).The script reads the data into a pandas dataframe, and then makes some minor transformations to ensure that it is compatible with the AutoML Natural Language input specification. Finally, the CSV is saved into a CSV file, which can be downloaded from the notebook server. Dataset This notebook downloads the [20 newsgroups dataset](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html) using scikit-learn. This dataset contains about 18000 posts from 20 newsgroups, and is useful for text classification. More details on the dataset can be found [here](http://qwone.com/~jason/20Newsgroups/). Objectives There are 3 goals for this notebook:1. Introduce scikit-learn datasets2. Explore pandas dataframe text manipulation3. Import data into AutoML Natural language for text classification What's next? After downloading the CSV at the end of this notebook, import the data into [Google Cloud AutoML Natural Language](https://cloud.google.com/natural-language/automl) to explore classifying text. Importsimport numpy as np import pandas as pd import csv from sklearn.datasets import fetch_20newsgroupsFetch datanewsgroups = fetch_20newsgroups(subset='all') df = pd.DataFrame(newsgroups.data, columns=['text']) df['categories'] = [newsgroups.target_names[index] for index in newsgroups.target] df.head()Clean data# Convert multiple whitespace characters into a space df['text'] = df['text'].str.replace('\s+',' ') # Trim leading and tailing whitespace df['text'] = df['text'].str.strip() # Truncate all fields to the maximum field length of 128kB df['text'] = df['text'].str.slice(0,131072) # Remove any rows with empty fields df = df.replace('', np.NaN).dropna() # Drop duplicates df = df.drop_duplicates(subset='text') # Limit rows to maximum of 100,000 df = df.sample(min(100000, len(df))) df.head()Export to CSVcsv_str = df.to_csv(index=False, header=False) with open("20-newsgroups-dataset.csv", "w") as text_file: print(csv_str, file=text_file)Code Profiling : Analyze the cProfile outputWe will use the `pstats` module to read and analyze the outupt of the profiler.import pstats mystats = pstats.Stats("profiler_output.txt")Display the top 15 functions that took the longest time to runEven in such a short script, python called millions of function! A lot of them are super quick and really not what we are after. Typically, we want to find the few functions that are bottlenecks in our code.The function below sorts the functions by which took the longest to run, and prints out only the stats of the top 15 functions that took longest to run. The columns below mean: * ncalls: number of times this function was called * tottime: the total execution time spent in this code NOT including calls to other functions * percall: this first percall divides tottime by ncalls. The amount of time per call spent solely in this function. * cumtime: the total execution time spent in this code INCLUDING calls to sub functions. `cumtime > tottime always. * percall: second percall divides cumtime by ncalls * filename: the name of the function being consideredBoth `tottime` and `cumtime` are useful. A function with a high `tottime` means we should focus on speeding up this function. A function with only a high `cumtime` means we should see what this function is calling to improve runtime. * `_newton_solver()` is a function with a higher tottime. We should look at the lines in that function to check out how to speed it up. * `_logl()` is a function with a low tottime but a very high `cumtime`. We should look at the functions it calls to figure out what takes up so much time.Generally, we want to put some filters on `print_stats`, becuase otherwise there will be so much printed out it is unmanageable.mystats.sort_stats("cumtime").print_stats(15)We can also apply multiple filters. For example let's look at the top 15 numpy functions that took the most time to run. Note that the order of the filtering matters. This command first selects every function with numpy in the name, followed by taking the top 15. Calling `print_states(15, 'numpy')` would pick the top 15 longest runtime functions, and downselecting which has numpy in the name from those. That would give us less than 15 numpy functions.mystats.sort_stats("time").print_stats('numpy', 15)Activity Part 1: Analyze the outputGenerate the output above on your own by running `python -m cProfile -o profiler_output.txt profile_orbitize.py` and answer the following questions by analyzing the output.1. Which function takes up the most runtime (not including calls to sub-functions)?2. Which function takes up the most runtime (including calls to sub-functions)?3. Which function is called the most? Which `orbitize` function is called the most?4. If we had the magical ability to speed up one function by a factor of 2, which function should we speed up? What is the improvement in end-to-end runtime of the script? Activity Part 2: Investigate why `_logl` takes so long`_logl()` is a helper function in `orbitize!` to compute the log likelihood of the data given the model. The `_logl()` function itself has a short runtime but it is calling something that takes a long time that makes it have a long `cumtime`. We can use the `print_callees()` function to look at the stats of all the functions it calls.We can see that `compute_model` is the function with the highest cumtime, but its tottime is low, so we something it calls takes all the time. We must dig deeper! Keep digging down recursively to find what function called within `_logl()` that takes the longest.mystats.print_callees('_logl')Activity Part 3: Which function calls `numpy.array` the most?`numpy.array` is a popular function because it gets called anytime a new numpy array gets created. We can use `print_callers()` to see which functions call it to look into potentially reducing the number of array creations to speed up the code. Which function in `orbitize` calls `numpy.array` the most times per function call? (it's harder than it looks)mystats.print_callers('numpy.array')Proprocessing of input data for Yield-gap analysisData sources: - EarthStat yield gap data [source](http://www.earthstat.org/yield-gaps-climate-bins-major-crops/)%matplotlib inline %load_ext autoreload %autoreload 2 from pathlib import Path import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import sys import xarray as xr from io import StringIO import re module_path = Path.cwd().resolve() if module_path not in sys.path: sys.path.append(module_path) data_path = Path('data') # calculate area grid for summary stats def calc_area(lat, pixeldegree): area_km2 = (110.45 * pixeldegree) * (111.1944 * pixeldegree) * math.cos(lat * (math.pi / 180.0)) area_ha = area_km2 * 100 return area_ha def create_area_grid(da, res=0.1): da_area = xr.zeros_like(da) da_area.attrs = {'long_name': 'area', 'units': 'ha'} da_area.name = 'area' for lat in da_area.lat.values: da_area.loc[{'lat': lat}] = calc_area(lat, res) return da_area def fix_coords(da, res=0.1): """Make sure that float32 and float64 have the exact same coordinates so we can compute with them""" ndigits = len(str(res).split('.')[-1])+1 for y, x in [('latitude', 'longitude'),('lat', 'lon'), ('y', 'x')]: if (y in list(da.coords)) and (x in list(da.coords)): return da.assign_coords({y: da[y].astype('float32').round(ndigits), x: da[x].astype('float32').round(ndigits)}) raise NotImplementedErrorYield gap dataygap = xr.open_rasterio(data_path / "maize_yieldgap.tif").squeeze(drop=True) ygap = fix_coords(ygap, res=0.083) ygap = ygap.where(ygap > -1000) ygap = ygap.rename({'x': 'lon', 'y': 'lat'}) ygap.plot(); ygap # load bioclimate data (import of resampled geotiff/ source: ArcGIS resampling) ypot = xr.open_rasterio(data_path / "maize_yieldpotential.tif").squeeze(drop=True) ypot = fix_coords(ypot, res=0.083) ypot = ypot.where(ypot > -1000) ypot = ypot.rename({'x': 'lon', 'y': 'lat'}) ypot.plot(); ypot # store the rectified data with xr.Dataset() as dsout: dsout['yieldgap'] = ygap.astype('float32') dsout['yieldpot'] = ypot.astype('float32') # sort latitude to go from net to pos dsout = dsout.sortby(dsout.lat) dsout.to_netcdf('yielddata_maize_clean.nc') # load yield data layers again ygap = (xr.open_dataset("yielddata_maize_clean.nc")['yieldgap']).sel(lat=slice(-50,40), lon=slice(-30, 60)) ypot = (xr.open_dataset("yielddata_maize_clean.nc")['yieldpot']).sel(lat=slice(-50,40), lon=slice(-30, 60))Administrative data for summary stats and country maskwith xr.open_dataset(data_path / 'tmworld' / 'tmworld_un_HR.nc') as tm: admin = fix_coords(tm.sel(lat=slice(-50,40), lon=slice(-30, 60)), res=0.083) country = admin.rename({'Band1': 'COUNTRY'}) country.to_netcdf('countries_0083deg.nc') with xr.open_dataset(data_path / 'tmworld' / 'tmworld_region_HR.nc') as tm: admin = fix_coords(tm.sel(lat=slice(-50,40), lon=slice(-30, 60)), res=0.083) admin = admin.rename({'Band1': 'REGION'}) mask = admin.REGION.where(admin.REGION == 2).notnull() mask.to_netcdf('africa_mask_0083deg.nc')Cropping seasons/ rotationsdef read_asc(fname): SIZE = 0.083333333333 HSIZE = SIZE * 0.5 with open(fname, 'r') as f: lines = f.read().splitlines() data = np.array([[int(x) for x in line.split()] for line in lines[5:]]).astype('float') lats = np.arange(-35 + HSIZE, -35 + (876*SIZE), SIZE)[::-1] lons = np.arange(-18 + HSIZE, -18 + (840*SIZE), SIZE) da = xr.DataArray(np.zeros((876, 840), 'float'), coords=[('lat', lats),('lon', lons)], name='rotations') data[data==14] = np.nan da[:] = data - 1 return da # cropping seasons rotations = read_asc("data/n_seasons_africa_5m.asc") rotations = rotations.sortby(rotations.lat).reindex_like(mask, method='nearest', tolerance=0.01) rotations.plot();Create joined datasetwith xr.Dataset() as new: new["ygap"] = ygap new["ypot"] = ypot new["mask"] = mask new["ygapm"] = ygap.where(mask == True) new["ypotm"] = ypot.where(mask == True) new["area"] = create_area_grid(ygap, res=0.0833333333) new["rot"] = rotations.where(mask == True) new["country"] = country.COUNTRY.where(mask == True) new.to_netcdf("yieldgap_analysis_0083deg.nc")Extract potential yield for measurement sitesWe use the original dataset since the locations are from all over the world...# coordinates as provided by Sonja (cleaned) coords= """no latitude longitude 1 17° 35' S 31° 14' E 2 0° 6' N 34° 33' E 3 0° 0' N 34° 35' E 4 01° 15' S 36° 46' E 5 17° 42' S 31° 00' E 6 0° 8' N 34° 24' E 7 07° 15' S 37° 48' E 8 00° 47' S 37° 39' E 9 1˚ 05' S 37˚ 0' E 10 0˚ 34' N 34˚ 11' E 11 0˚ 08' N 34˚ 25' E 12 29° 43' S 53° 42' W 13 30° 06' S 51° 4' W 14 28° 15' S 52° 24' W 15 12° 00' S 46° 03' W 16 20° 44' N 101° 19' W 17 5° 17' N 52° 55' W 18 26° 45' N 111° 52' E 19 28° 37' N 116° 26' E 20 28° 19' N 113° 79' E 21 1° 26' S 120° 18' E 22 14° 09' N 121° 15' E 23 26° 58' S 151° 82' E""" # read and clean degree symbols df = pd.read_table(StringIO(coords.replace("˚", "°"))) # parse geo lat lon notation to decimals #https://stackoverflow.com/questions/33997361 def dms2dd(s): # example: s = """0°51'56.29"S""" degrees, minutes, direction = re.split('[°\']+', s) dd = float(degrees) + float(minutes)/60 #+ float(seconds)/(60*60); if direction.strip() in ('S','W'): dd*= -1 return dd df['latitude'] = df['latitude'].apply(dms2dd) df['longitude'] = df['longitude'].apply(dms2dd) df.head()Now extract potential yield from original data file for given coordinateswith xr.open_rasterio(data_path / "maize_yieldpotential.tif") as ypottif: ypottif = ypottif.where(ypottif > -1000) #.sel(x=slice(-30,60), y=slice(40, -50)) ypottif.plot(); # find valid data, searches surrounding pixels for given distances until # it findes valid data (up to a distance of 1.5 degrees) def find_valid(df, idx, row): value = df.loc[idx, "ypot"] = ypottif.sel(x=row.longitude, y=row.latitude, method='nearest').values if np.isnan(value): for eps in [0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 1.5]: for i, j in [(-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1)]: LO, LA = i*eps, j*eps next_best = ypottif.sel(x=row.longitude+LO, y=row.latitude+LA, method='nearest').values if np.isnan(next_best) == False: print(f"{idx} - found next best with eps={eps} in direction {(LO,LA)}") df.loc[idx, "ypot"] = next_best df.loc[idx, "ypot_dist"] = eps return df else: df.loc[idx, "ypot"] = value return df df["ypot"] = np.nan df["ypot_dist"] = 0 for idx, row in df.iterrows(): df = find_valid(df, idx, row) df.to_csv("locations_with_ypot.csv") df.head()16 - found next best with eps=1.0 in direction (-1.0, 0.0) 18 - found next best with eps=0.025 in direction (-0.025, 0.0) 19 - found next best with eps=0.025 in direction (0.025, 0.025)Python AssertPython provides the assert statement to check if a given logical expression is true or false. Program execution proceeds only if the expression is true and raises the AssertionError when it is false. The following code shows the usage of the assert statement.num=10 assert num>10 try: num=int(input("Enter the even number")) assert num%2==0 print("The number is even") except AssertionError: print("Please enter even number")Enter the even number13 Please enter even number![Imgur](https://i.imgur.com/5pXzCIu.png) Data Science va Sun'iy Intellekt Praktikum Ma'lumotlar tahlili. (NumPy kutubxonasi) NumPy kutubxonasini chaqirib olishimport numpy as npPython list bilan NumPy kutubxonasidagi massivlar (arraylar) hisoblashlari orasidagi farqni ko'ramiz.my_list = list(range(100000)) # python list 0~99999 -->Normal my_array = np.array(range(100000)) # numpy array(massiv) 0~99999 --> Vektorlashgan %time for _ in range(10): [x*2 for x in my_list] # Normal %time for _ in range(10): my_array*2 # Vektorlashgan 105/3.142003 to 2008 NYC Housing Datasetsdf_2003_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_manhattan_03.xls', header=3) df_2003_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_bronx_03.xls', header=3) df_2003_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_queens_03.xls', header=3) df_2003_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_brooklyn_03.xls', header=3) df_2003_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_si_03.xls', header=3) df_2003_1.info() df_2003 = pd.concat([df_2003_1, df_2003_2, df_2003_3, df_2003_4, df_2003_5]) df_2003.head() df_2003.shape df_2003.to_csv('data/df_2003.csv') df_2004_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_manhattan_04.xls', header=3) df_2004_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_bronx_04.xls', header=3) df_2004_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_queens_04.xls', header=3) df_2004_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_brooklyn_04.xls', header=3) df_2004_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_si_04.xls', header=3) df_2004_1.info() df_2004 = pd.concat([df_2004_1, df_2004_2, df_2004_3, df_2004_4, df_2004_5]) df_2004.head() df_2004.shape df_2004.to_csv('data/df_2004.csv') df_2005_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_manhattan_05.xls', header=3) df_2005_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_bronx_05.xls', header=3) df_2005_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_queens_05.xls', header=3) df_2005_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_brooklyn_05.xls', header=3) df_2005_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_si_05.xls', header=3) df_2005 = pd.concat([df_2005_1, df_2005_2, df_2005_3, df_2005_4, df_2005_5]) df_2005.head() df_2005.shape df_2005.to_csv('data/df_2005.csv') df_2006_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_manhattan_06.xls', header=3) df_2006_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_bronx_06.xls', header=3) df_2006_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_queens_06.xls', header=3) df_2006_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_brooklyn_06.xls', header=3) df_2006_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_si_06.xls', header=3) df_2006 = pd.concat([df_2006_1, df_2006_2, df_2006_3, df_2006_4, df_2006_5]) df_2006.head() df_2006.shape df_2006.to_csv('data/df_2006.csv') df_2007_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2007_manhattan.xls', header=3) df_2007_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2007_bronx.xls', header=3) df_2007_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2007_queens.xls', header=3) df_2007_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2007_brooklyn.xls', header=3) df_2007_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2007_statenisland.xls', header=3) df_2007 = pd.concat([df_2007_1, df_2007_2, df_2007_3, df_2007_4, df_2007_5]) df_2007.head() df_2007.shape df_2007.to_csv('data/df_2007.csv') df_2008_1 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2008_manhattan.xls', header=3) df_2008_2 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2008_bronx.xls', header=3) df_2008_3 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2008_queens.xls', header=3) df_2008_4 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2008_brooklyn.xls', header=3) df_2008_5 = pd.read_excel('data/NYC_Sales_2003-2020/sales_2008_statenisland.xls', header=3) df_2008 = pd.concat([df_2008_1, df_2008_2, df_2008_3, df_2008_4, df_2008_5]) df_2008.head() df_2008.shape df_2008.to_csv('data/df_2008.csv') df_2003_08 = pd.concat([df_2003, df_2004, df_2005, df_2006, df_2007, df_2008]) df_2003_08.head() df_2003_08.shape df_2003_08.to_csv('data/df_2003_08.csv')2009 to 2015 NYC Housing Datasetdf_2009_1 = pd.read_excel('data/NYC_Sales_2003-2020/2009_manhattan.xls', header=3) df_2009_2 = pd.read_excel('data/NYC_Sales_2003-2020/2009_bronx.xls', header=3) df_2009_3 = pd.read_excel('data/NYC_Sales_2003-2020/2009_queens.xls', header=3) df_2009_4 = pd.read_excel('data/NYC_Sales_2003-2020/2009_brooklyn.xls', header=3) df_2009_5 = pd.read_excel('data/NYC_Sales_2003-2020/2009_statenisland.xls', header=3) df_2009_1.info() df_2009 = pd.concat([df_2009_1, df_2009_2, df_2009_3, df_2009_4, df_2009_5]) df_2009.head() df_2009.to_csv('data/df_2009.csv') df_2010_1 = pd.read_excel('data/NYC_Sales_2003-2020/2010_manhattan.xls', header=3) df_2010_2 = pd.read_excel('data/NYC_Sales_2003-2020/2010_bronx.xls', header=3) df_2010_3 = pd.read_excel('data/NYC_Sales_2003-2020/2010_queens.xls', header=3) df_2010_4 = pd.read_excel('data/NYC_Sales_2003-2020/2010_brooklyn.xls', header=3) df_2010_5 = pd.read_excel('data/NYC_Sales_2003-2020/2010_statenisland.xls', header=3) df_2010 = pd.concat([df_2010_1, df_2010_2, df_2010_3, df_2010_4, df_2010_5]) df_2010.head() df_2010.to_csv('data/df_2010.csv') df_2011_1 = pd.read_excel('data/NYC_Sales_2003-2020/2011_manhattan.xls', header=4) df_2011_2 = pd.read_excel('data/NYC_Sales_2003-2020/2011_bronx.xls', header=4) df_2011_3 = pd.read_excel('data/NYC_Sales_2003-2020/2011_queens.xls', header=4) df_2011_4 = pd.read_excel('data/NYC_Sales_2003-2020/2011_brooklyn.xls', header=4) df_2011_5 = pd.read_excel('data/NYC_Sales_2003-2020/2011_statenisland.xls', header=4) df_2011_1.info() df_2011 = pd.concat([df_2011_1, df_2011_2, df_2011_3, df_2011_4, df_2011_5]) df_2011.head() df_2011.to_csv('data/df_2011.csv') df_2012_1 = pd.read_excel('data/NYC_Sales_2003-2020/2012_manhattan.xls', header=4) df_2012_2 = pd.read_excel('data/NYC_Sales_2003-2020/2012_bronx.xls', header=4) df_2012_3 = pd.read_excel('data/NYC_Sales_2003-2020/2012_queens.xls', header=4) df_2012_4 = pd.read_excel('data/NYC_Sales_2003-2020/2012_brooklyn.xls', header=4) df_2012_5 = pd.read_excel('data/NYC_Sales_2003-2020/2012_statenisland.xls', header=4) df_2012 = pd.concat([df_2012_1, df_2012_2, df_2012_3, df_2012_4, df_2012_5]) df_2012.head() df_2012.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2012.info() df_2012.to_csv('data/df_2012.csv') df_2013_1 = pd.read_excel('data/NYC_Sales_2003-2020/2013_manhattan.xls', header=4) df_2013_2 = pd.read_excel('data/NYC_Sales_2003-2020/2013_bronx.xls', header=4) df_2013_3 = pd.read_excel('data/NYC_Sales_2003-2020/2013_queens.xls', header=4) df_2013_4 = pd.read_excel('data/NYC_Sales_2003-2020/2013_brooklyn.xls', header=4) df_2013_5 = pd.read_excel('data/NYC_Sales_2003-2020/2013_statenisland.xls', header=4) df_2013 = pd.concat([df_2013_1, df_2013_2, df_2013_3, df_2013_4, df_2013_5]) df_2013.head() df_2013.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2013.to_csv('data/df_2013.csv') df_2014_1 = pd.read_excel('data/NYC_Sales_2003-2020/2014_manhattan.xls', header=4) df_2014_2 = pd.read_excel('data/NYC_Sales_2003-2020/2014_bronx.xls', header=4) df_2014_3 = pd.read_excel('data/NYC_Sales_2003-2020/2014_queens.xls', header=4) df_2014_4 = pd.read_excel('data/NYC_Sales_2003-2020/2014_brooklyn.xls', header=4) df_2014_5 = pd.read_excel('data/NYC_Sales_2003-2020/2014_statenisland.xls', header=4) df_2014 = pd.concat([df_2014_1, df_2014_2, df_2014_3, df_2014_4, df_2014_5]) df_2014.head() df_2014.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2014.to_csv('data/df_2014.csv') df_2015_1 = pd.read_excel('data/NYC_Sales_2003-2020/2015_manhattan.xls', header=4) df_2015_2 = pd.read_excel('data/NYC_Sales_2003-2020/2015_bronx.xls', header=4) df_2015_3 = pd.read_excel('data/NYC_Sales_2003-2020/2015_queens.xls', header=4) df_2015_4 = pd.read_excel('data/NYC_Sales_2003-2020/2015_brooklyn.xls', header=4) df_2015_5 = pd.read_excel('data/NYC_Sales_2003-2020/2015_statenisland.xls', header=4) df_2015 = pd.concat([df_2015_1, df_2015_2, df_2015_3, df_2015_4, df_2015_5]) df_2015.head() df_2015.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2015.info() df_2015.to_csv('data/df_2015.csv') df_2009_15 = pd.concat([df_2009, df_2010, df_2011, df_2012, df_2013, df_2014, df_2015]) df_2009_15.head() df_2009_15.info() df_2009_15.shape df_2009_15.to_csv('data/df_2009_15.csv')2016 to 2020 NYC Housing Datasetdf_2016_1 = pd.read_excel('data/NYC_Sales_2003-2020/2016_manhattan.xls', header=4) df_2016_2 = pd.read_excel('data/NYC_Sales_2003-2020/2016_bronx.xls', header=4) df_2016_3 = pd.read_excel('data/NYC_Sales_2003-2020/2016_queens.xls', header=4) df_2016_4 = pd.read_excel('data/NYC_Sales_2003-2020/2016_brooklyn.xls', header=4) df_2016_5 = pd.read_excel('data/NYC_Sales_2003-2020/2016_statenisland.xls', header=4) df_2016 = pd.concat([df_2016_1, df_2016_2, df_2016_3, df_2016_4, df_2016_5]) df_2016.head() df_2016.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2016.info() df_2016.to_csv('data/df_2016.csv') df_2017_1 = pd.read_excel('data/NYC_Sales_2003-2020/2017_manhattan.xls', header=4) df_2017_2 = pd.read_excel('data/NYC_Sales_2003-2020/2017_bronx.xls', header=4) df_2017_3 = pd.read_excel('data/NYC_Sales_2003-2020/2017_queens.xls', header=4) df_2017_4 = pd.read_excel('data/NYC_Sales_2003-2020/2017_brooklyn.xls', header=4) df_2017_5 = pd.read_excel('data/NYC_Sales_2003-2020/2017_statenisland.xls', header=4) df_2017 = pd.concat([df_2017_1, df_2017_2, df_2017_3, df_2017_4, df_2017_5]) df_2017.head() df_2017.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2017.info() df_2017.to_csv('data/df_2017.csv') dtype={'APARTMENT NUMBER\n': np.int32, 'ZIP CODE\n': np.int32, 'RESIDENTIAL UNITS\n': np.int32, 'COMMERCIAL UNITS\n': np.int32, 'TOTAL UNITS\n': np.int32, 'LAND SQUARE FEET\n': np.int32, 'GROSS SQUARE FEET\n': np.int32, 'YEAR BUILT\n': np.int32 } df_2018_1 = pd.read_excel('data/NYC_Sales_2003-2020/2018_manhattan.xlsx', header=4) df_2018_2 = pd.read_excel('data/NYC_Sales_2003-2020/2018_bronx.xlsx', header=4) df_2018_3 = pd.read_excel('data/NYC_Sales_2003-2020/2018_queens.xlsx', header=4) df_2018_4 = pd.read_excel('data/NYC_Sales_2003-2020/2018_brooklyn.xlsx', header=4) df_2018_5 = pd.read_excel('data/NYC_Sales_2003-2020/2018_statenisland.xlsx', header=4) df_2018 = pd.concat([df_2018_1, df_2018_2, df_2018_3, df_2018_4, df_2018_5]) df_2018.head() df_2018.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2018.info() df_2018.to_csv('data/df_2018.csv') df_2019_1 = pd.read_excel('data/NYC_Sales_2003-2020/2019_manhattan.xlsx', header=4) df_2019_2 = pd.read_excel('data/NYC_Sales_2003-2020/2019_bronx.xlsx', header=4) df_2019_3 = pd.read_excel('data/NYC_Sales_2003-2020/2019_queens.xlsx', header=4) df_2019_4 = pd.read_excel('data/NYC_Sales_2003-2020/2019_brooklyn.xlsx', header=4) df_2019_5 = pd.read_excel('data/NYC_Sales_2003-2020/2019_statenisland.xlsx', header=4) df_2019 = pd.concat([df_2019_1, df_2019_2, df_2019_3, df_2019_4, df_2019_5]) df_2019.head() df_2019.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2019.to_csv('data/df_2019.csv') df_2020_1 = pd.read_excel('data/NYC_Sales_2003-2020/2020_manhattan.xlsx', header=6) df_2020_2 = pd.read_excel('data/NYC_Sales_2003-2020/2020_bronx.xlsx', header=6) df_2020_3 = pd.read_excel('data/NYC_Sales_2003-2020/2020_queens.xlsx', header=6) df_2020_4 = pd.read_excel('data/NYC_Sales_2003-2020/2020_brooklyn.xlsx', header=6) df_2020_5 = pd.read_excel('data/NYC_Sales_2003-2020/2020_staten_island.xlsx', header=6) df_2020 = pd.concat([df_2020_1, df_2020_2, df_2020_3, df_2020_4, df_2020_5]) df_2020.head() df_2020.columns = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BLOCK', 'LOT', 'EASE-MENT', 'BUILDING CLASS AT PRESENT', 'ADDRESS', 'APARTMENT NUMBER', 'ZIP CODE', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS', 'LAND SQUARE FEET', 'GROSS SQUARE FEET', 'YEAR BUILT', 'TAX CLASS AT TIME OF SALE', 'BUILDING CLASS AT TIME OF SALE', 'SALE PRICE', 'SALE DATE'] df_2020.to_csv('data/df_2020.csv') df_2016_20 = pd.concat([df_2016, df_2017, df_2018, df_2019, df_2020]) df_2016_20.head() df_2016_20.info() df_2016_20.to_csv('data/df_2016_20.csv')2003-2020 NYC Housing Datasetdf_2003_08 = pd.read_csv('data/df_2003_08.csv', index_col=0) df_2003_08.head() df_2009_15 = pd.read_csv('data/df_2009_15.csv', index_col=0) df_2009_15.head() df_2016_20 = pd.read_csv('data/df_2016_20.csv', index_col=0) df_2016_20.head() df = pd.concat([df_2003_08, df_2009_15, df_2016_20]) df.head() df.to_csv('data/df_2003_20.csv')Testing Datasettest_1 = pd.read_excel('data/nyc_sales/rollingsales_manhattan.xlsx', header=4) test_2 = pd.read_excel('data/nyc_sales/rollingsales_bronx.xlsx', header=4) test_3 = pd.read_excel('data/nyc_sales/rollingsales_queens.xlsx', header=4) test_4 = pd.read_excel('data/nyc_sales/rollingsales_brooklyn.xlsx', header=4) test_5 = pd.read_excel('data/nyc_sales/rollingsales_statenisland.xlsx', header=4) test = pd.concat([test_1, test_2, test_3, test_4, test_5]) test.head() test.to_csv('data/test.csv') import pandas as pd new = pd.read_csv('data/NYC_Citywide_Annualized_Calendar_Sales_Update.csv') new.head() new.shape new.BLOCK = new.BLOCK.astype(str) new.BLOCK.value_counts()NAMED ENTITY RECOGNITION:1. The named entities are pre-defined categories chosen according to the use case such as names of people, organizations, places, codes, time notations, monetary values, etc. Importing Packageimport pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from simpletransformers.ner import NERModel,NERArgs import nltk import string from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import re data = pd.read_csv("ner_dataset.csv",encoding="latin1" ) data.head() data =data.fillna(method ="ffill") data.head() data["Sentence #"] = LabelEncoder().fit_transform(data["Sentence #"] ) data.head() data.rename(columns={"Sentence #":"sentence_id","Word":"words","Tag":"labels"}, inplace =True) data["labels"] = data["labels"].str.upper() X= data[["sentence_id","words"]] Y =data["labels"] x_train, x_test, y_train, y_test = train_test_split(X,Y, test_size =0.2) train_data = pd.DataFrame({"sentence_id":x_train["sentence_id"],"words":x_train["words"],"labels":y_train}) test_data = pd.DataFrame({"sentence_id":x_test["sentence_id"],"words":x_test["words"],"labels":y_test})Model Traininglabel = data["labels"].unique().tolist() label args = NERArgs() args.num_train_epochs = 1 args.learning_rate = 1e-4 args.overwrite_output_dir =True args.train_batch_size = 32 args.eval_batch_size = 32 model = NERModel('bert', 'bert-base-cased',labels=label,args =args) model.train_model(train_data,eval_data = test_data,acc=accuracy_score) result, model_outputs, preds_list = model.eval_model(test_data) result stop_words = stopwords.words("english") wordnet = WordNetLemmatizer() def clean_data(x): x = ' '.join([word for word in x.split(' ') if word not in stop_words]) x = x.encode('ascii', 'ignore').decode() x = re.sub(r'https*\S+', ' ', x) x = re.sub(r'@\S+', ' ', x) x = re.sub(r'#\S+', ' ', x) x = re.sub(r'\'\w+', '', x) x = re.sub('[%s]' % re.escape(string.punctuation), ' ', x) x = re.sub(r'\w*\d+\w*', '', x) x = re.sub(r'\s{2,}', ' ', x) return x user = input() user_clean_data = clean_data(user) prediction, model_output = model.predict([user_clean_data]) prediction dataframe = pd.DataFrame([[user,user_clean_data,prediction]], columns=["text", "clean","extracted"]) dataframeKlasteryzacja Czym jest klasteryzacja?Najprościej — szukaniem skupień (klastrów).![](https://miro.medium.com/max/561/0*ff7kw5DRQbs_uixR.jpg)Żródło: https://www.kdnuggets.com/2019/09/hierarchical-clustering.html Po co?* Aby znaleźć „naturalne” podziały w zbiorze.* Aby zaproponować podział na klasy.* Aby ułatwić opis (klastrom można przyporządkować etykiety i do pewnego stopnia traktować jako całość).* ... Czym właściwie jest klaster?Nie mamy jednej definicji. Na ogół — grupa podobnych obiektów. Różne algorytmy rożnie „rozumieją” podobieństwo i różnie go szukają.Zasadniczo metody dzielimy na metody hierarchiczne i kombinatoryczne. Zacznijmy od kombinatorycznych. Metoda k-średnichPomysł jest prosty:1. Zakładamy, że w zbiorze jest k klastrów.2. Wybieramy k punktów będącymi początkowymi położeniami środków naszych klastrów.![](https://upload.wikimedia.org/wikipedia/commons/5/5e/K_Means_Example_Step_1.svg)3. Określamy przynależność do klastrów jako przynależność do klastra „generowanego” przez najbliższy środek.![](https://upload.wikimedia.org/wikipedia/commons/a/a5/K_Means_Example_Step_2.svg)4. Aktualizujemy położenie środków klastrów jako środek masy punków należących do klastra.![](https://upload.wikimedia.org/wikipedia/commons/3/3e/K_Means_Example_Step_3.svg)5. Sprawdzamy, czy przynależność jakiegokolwiek punktu zmieniła się po wyznaczeniu nowych środków. Jeśli tak — wracamy do punktu 3. Jeśli nie — kończymy działanie.Źródło ilustracji: https://en.wikipedia.org/wiki/K-means_clusteringDziałanie algorytmu wygląda następująco:![](https://miro.medium.com/max/960/1*KrcZK0xYgTa4qFrVr0fO2w.gif)Żródło: https://towardsdatascience.com/the-5-clustering-algorithms-data-scientists-need-to-know-a36d136ef68# Nareszcie kod! from sklearn.datasets import make_blobs import matplotlib.pyplot as plt X, _ = make_blobs(n_samples=300, centers=4, cluster_std=0.9, random_state=314) plt.scatter(X[:,0], X[:,1]) plt.show() from sklearn.cluster import KMeans def plot_kmeans_clusters(X, n_clusters): kmeans = KMeans(n_clusters=n_clusters, random_state=0) kmeans.fit(X) y_kmeans = kmeans.predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=30, cmap='viridis') centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.75) plt.title('K-means clusters') plt.show() plot_kmeans_clusters(X, n_clusters=4)A co jeśli podamy złe k?plot_kmeans_clusters(X, n_clusters=6)Zatem jak dobrać liczbę skupień? Metoda łokciaZacznijmy od tego, że algorytm k-średnich minimalizuję wewnątrzklastrową sumę kwadratów (ang. within-cluster sum of squares):$$\underset{\mathbf{S}}{\operatorname{argmin}}\sum_{i=1}^{k}\sum_{\mathbf{x} \in S_i} {\lVert \mathbf{x} - \mu_i \rVert^2} = \underset{\mathbf{S}}{\operatorname{argmin}}\sum_{i=1}^{k} \ S_i \mathrm{Var} (S_i)$$Liczymy sumę odległości punktu od środka skupienia (możemy tutaj użyć różnych metryk, my zastosujemy kwadrat odległości $L_2$, ponieważ jest to naturalna metryka dla naszego algorytmu). Rysujemy wykres tejże odległości w zależności od liczby klastrów i wybieramy punkt „przegięcia”. Wygląda on jak łokieć — stąd nazwa metody.def count_wcss_scores(X, k_max): # WCSS = within-cluster sum of squares scores = [] for k in range(1, k_max+1): kmeans = KMeans(n_clusters=k, random_state=0) kmeans.fit(X) wcss = kmeans.score(X) * -1 # score returns -WCSS scores.append(wcss) return scores wcss_vec = count_wcss_scores(X, 10) x_ticks = list(range(1, len(wcss_vec) + 1)) plt.plot(x_ticks, wcss_vec, 'bx-') plt.xlabel('k') plt.ylabel('Within-cluster sum of squares') plt.title('The Elbow Method showing the optimal k') plt.show()Oczywiście jest to metoda subiektywna i czasami wybór łokcia jest nieoczywisty. Dlatego często stosuje się inną metodę: Metoda silhouetteZdefiniujmy:$$ a(i) := \frac{1}{\ C(i) -1 } \sum_{j \in C(i), i \neq j} d(i, j)$$Gdzie: $i$ — indeks punktu, $C(i)$ — klaster, do którego należy $i$-ty punkt, $d(i, j)$ — odległość między $i$-tym i $j$-tym punktem.Liczbę $a$ możemy interpretować jako średnią odległość od punktu w tym samym klastrze.$$ b(i) := \underset{k: C_k \bigcap C(i) = \emptyset}{min} \frac{1}{\ C_k} \sum_{j \in C_k} d(i, j) $$$C_k$ to $k$-ty klaster. Nie mylić z oznaczeniem $C(i)$, czyli klastrem, do którego należy $i$-ty punkt.Liczba $b$ to po prostu średnia odległość od punktów tego klastra, który jest „drugim najlepszym” dla $i$-tego punktu. Jeśli $\ C(i) \neq 1$ definiujemy: $$ s(i) := \frac{b(i) - a(i)}{\max{ \{ a(i), b(i) \} }} $$Dla kompletności dodajmy, że jeśli $\ C(i) = 1$, wtedy:$$ s(i) := 0 $$Zatem $s(i)$ mówi nam o tym, jak dobrze dany punkt pasuje do klastra, do którego został przyporządkowany w porównaniu z tym klastrem, który jest „drugim wyborem".Ostatecznie nasza miara silhouette to średnia $s(i)$ po wszystkich punktach:$$ \frac{\sum_{i=1}^{n} s(i)}{n} $$Oczywiście $n$ to liczba wszystkich punktów.Największa wada? Premiowanie "okrągłych" klastrów.# A w praktyce wygląda to tak: def count_clustering_scores(X, cluster_num, model, score_fun): # Napiszmy tę funkcje tak ogólnie, jak to możliwe. # Zwróćcie uwagę na przekazanie obiektów typu callable: model i score_fun. if isinstance(cluster_num, int): cluster_num_iter = [cluster_num] else: cluster_num_iter = cluster_num scores = [] for k in cluster_num_iter: model_instance = model(n_clusters=k) labels = model_instance.fit_predict(X) wcss = score_fun(X, labels) scores.append(wcss) if isinstance(cluster_num, int): return scores[0] else: return scores from sklearn.metrics import silhouette_score cluster_num_seq = range(2, 11) # Niektóre metryki nie działają gdy mamy tylko jeden klaster silhouette_vec = count_clustering_scores(X, cluster_num_seq, KMeans, silhouette_score) plt.plot(cluster_num_seq, silhouette_vec, 'bx-') plt.xlabel('k') plt.ylabel('Silhouette score') plt.show()Inne, podobne metryki: Indeks Daviesa–Bouldina Indeks Dunna (dla zainteresowanych — kliknij!) $$ DI = \frac{ \underset{i, j}{\min} \delta (C_i, C_j)}{\underset{k}{\min} \Delta (C_k)} $$Gdzie: $\delta (C_i, C_j)$ -- odległość między klastrami. Możemy ją liczyć na wiele sposóbów. Są to między innymi:* Średnia odległość między punktami obu klastrów.* Najmniejsza odległość między punktami obu klastrów.* Maksymalna odległość między punktami obu klastrów.* Odległośc między środkami klastrów.$\Delta (C_k)$ -- wielkość klastra. Znowu mamy wiele pomysłów na jej wyznaczanie:* Maksymalna odległość między punktami.* Średnia odległość między punktami.* Dwukrotność średniej odległości do środka (można o tym myśleć jako o czymś na wzór średnicy).Indeks Dunna "patrzy" tylko na największy klaster i najmniejszą odległość między klastrami. Czyli możemy myśleć o tym jako o rozważaniu najgorszego przypadku. Z drugiej strony jeśli odpowiednich $\delta ()$ i $ \Delta () $ -- do pewnego stopnia uniezależniamy się od premiowania "okrągłych" klastrów.Indeks Calińskiego-Harabasza Metryki interpretowalne>*Gdy dowolny wskaźnik zaczyna być traktowany jako cel, przestaje być dobrym wskaźnikiem.* >           Prawo GoodhartaPowyższe metryki dobrze sprawdzają się przy doborze liczby klastrów. Z drugiej strony są one trudne w interpretacji. Często mamy pewne oczekiwania wobec klastrów. Na przykład:* Dobra separacja klastrów* Małe odległości wewnątrz klastrów* Klastry podobnej wielkości* Dobra reprezentacja klastrów przez środki* Stabilność klasteryzacji* Duża gęstość klastrów![](https://images7.memedroid.com/images/UPLOADED977/5d89a67bc65a0.jpeg)O doborze metryk Często metryka, którą wybierzemy, jest związana z tym, czego szukamy. Przykłady metryk:* Dobra separacja klastrów -> minimalna odległość między punktami różnych klastrów* Małe odległości wewnątrz klastrów -> średnia odległość między punktami* Klastry podobnej wielkości -> wariancja wielkości klastrów (na przykład średniej odległości między punktami)* Dobra reprezentacja klastrów przez ich środki -> średnia odległość między punktem w klastrze a środkiem* Stabilność klasteryzacji -> bootstrap i frakcja punktów, które różnią się* Duża gęstość klastrów -> największa odległość między punktami wewnątrz klastraNiestety metryki te są wrażliwe na liczbę klastrów. Na przykład, jeśli naszym celem jest dobra reprezentacja punktów w klastrze przez środki, oczywiście najlepiej będzie, gdy każdy punkt będzie osobnym klastrem. A to chyba nie o to chodziło. Dlatego tego typu metryki najlepiej sprawdzają się, gdy porównujemy różne algorytmy, ale przy ustalonej liczbie klastrów.# Zaimplementujmy zatem kilka wspomnianych metryk. from scipy.spatial import distance import numpy as np # def two_class def min_interclust_dist(X, label): clusters = set(label) global_min_dist = np.inf for cluster_i in clusters: cluster_i_idx = np.where(label == cluster_i) for cluster_j in clusters: if cluster_i != cluster_j: cluster_j_idx = np.where(label == cluster_j) interclust_min_dist = np.min(distance.cdist(X[cluster_i_idx], X[cluster_j_idx])) global_min_dist = np.min([global_min_dist, interclust_min_dist]) return global_min_dist def _inclust_mean_dists(X, label): clusters = set(label) inclust_dist_list = [] for cluster_i in clusters: cluster_i_idx = np.where(label == cluster_i) inclust_dist = np.mean(distance.pdist(X[cluster_i_idx])) inclust_dist_list.append(inclust_dist) return inclust_dist_list def mean_inclust_dist(X, label): inclust_dist_list = _inclust_mean_dists(X, label) return np.mean(inclust_dist_list) def std_dev_of_inclust_dist(X, label): inclust_dist_list = _inclust_mean_dists(X, label) return np.std(inclust_dist_list) def mean_dist_to_center(X, label): clusters = set(label) inclust_dist_list = [] for cluster_i in clusters: cluster_i_idx = np.where(label == cluster_i) cluster_i_mean = np.mean(X[cluster_i_idx], axis=0, keepdims=True) inclust_dist = np.mean(distance.cdist(X[cluster_i_idx], cluster_i_mean)) inclust_dist_list.append(inclust_dist) return np.mean(inclust_dist_list) print(f'Minimal distance between clusters = {count_clustering_scores(X, 4, KMeans, min_interclust_dist):.2f}.') print(f'Average distance between points in the same class = ' f'{count_clustering_scores(X, 4, KMeans, mean_inclust_dist):.2f}.') print(f'Standard deviation of distance between points in the same class = ' f'{count_clustering_scores(X, 4, KMeans, std_dev_of_inclust_dist):.3f}.') print(f'Average distance to cluster center = ' f'{count_clustering_scores(X, 4, KMeans, mean_dist_to_center):.2f}.')Minimal distance between clusters = 0.55. Average distance between points in the same class = 1.64. Standard deviation of distance between points in the same class = 0.107. Average distance to cluster center = 1.15.Uwaga programistyczna:Funkcje `_inclass_mean_dists()` i `mean_dist_to_center()` mają taką samą pętlę. Tak właściwie należałoby napisać wrapper, który robiłby naszą pętlę i przekazywać jako argument funkcję wykonującą ciało (odpowiednio liczenie średniej odległości między punktami i średniej odleglości od środka), ale być może zaciemniłoby to tematykę zajęć, więc się powstrzymam.Dla zainteresowanych: jest to ten sam manewr, który użyliśmy w funkcji `count_clustering_scores()`. Klasteryzacja hierarchiczna![](https://46gyn61z4i0t1u1pnq2bbk2e-wpengine.netdna-ssl.com/wp-content/uploads/2018/03/Hierarchical-clustering-2.png)Źródło: https://www.displayr.com/what-is-hierarchical-clustering/Powyżej pokazano koncepcję klasteryzacji aglomeracyjnej, ale na jakiej podstawie wybieramy, które klastry połączyć? Zasadniczo zawsze wybieramy połączenie tych klastrów, które są najbliżej siebie.Jest kilka pomysłów określania tego, co znaczy „są blisko siebie”. Na przykład:* Połączenie kompletne* Połączenie pojedyncze* Połączenie średnie* Połączenie centroidalne* Połączenie Warda — o ile wzrośnie wariancja nowego klastra względem sumy wariancji starych klastrów![](https://ars.els-cdn.com/content/image/1-s2.0-S2300396016300799-gr6.jpg)Źródło: ., ., & . (2005). Introduction to data mining. Boston:Addison-Wesleyfrom sklearn.cluster import AgglomerativeClustering model = AgglomerativeClustering(n_clusters=4) y = model.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap='viridis') plt.show()Co nam jeszcze daje klasteryzacja hierarchiczna: nie musimy znać liczby klastrów. Zauważmy, że model buduje całą hierarchię klastrów. Na przykład:from scipy.cluster import hierarchy # Generujemy nowe dane o mnjejszej liczbie punktów (dla czytelności) X_small, _ = make_blobs(n_samples=20, centers=4, cluster_std=0.9, random_state=314) Z = hierarchy.linkage(X_small, method='average') plt.figure(figsize=(10, 5), dpi= 200, facecolor='w', edgecolor='k') hierarchy.dendrogram(Z) plt.show()Co nam to daje? Dzięki temu możemy nie podawać jawnie liczby klastrów, a powiedzieć na przykład:> Interesują nas klastry, w których odległość między punktami będzie nie większa niż 1,23.# A Wtedy możemy zrobić tak: model = AgglomerativeClustering(n_clusters=None, linkage='single', distance_threshold=1.23) y = model.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap='viridis') plt.show()Warto wspomnieć, że bardzo podobna do klasteryzacji aglomeracyjnej jest klasteryzacja deglomeracyjna.W klasteryzacji aglomeracyjnej zaczynamy od pojedynczych punktów i łączymy je kolejno w klastry.W klasteryzacji deglomeracyjnej zaczynamy od jednego „superklastra” zawierającego wszystkie punkty i dzielimy go na mniejsze klastry.Jeśli spojrzymy na drzewo hierarchii, to w metodzie aglomeracyjnej jest ono budowane „od dołu”, w metodzie deglomeracyjnej „od góry”.Metoda deglomeracyjna jest rzadko wykorzystywana ze względu na większą złożoność. Rozważmy szukanie skupień dla $n$ punktów. Już pierwszej iteracji musimy rozważyć $2^n$ możliwych podziałów.Dla porównania metoda aglomeracyjna w pierwszej iteracji ma do przeanalizowania zaledwie $\frac{n (n-1)}{2}$ możliwych połączeń. A co z nieco bardziej wymagającym zbiorem?from sklearn.datasets import make_circles X, _ = make_circles(factor = 0.5, noise=0.08, n_samples=200, random_state=3) plt.scatter(X[:,0], X[:,1]) plt.show() def plot_agglomerative_clustering(X, n_clusters, linkage): model = AgglomerativeClustering(n_clusters=2, linkage=linkage) y = model.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap='viridis') plt.title(f'Clusters from agglomerative_clustering with {linkage} linkage') plt.show() for linkage in {'ward', 'complete', 'average', 'single'}: plot_agglomerative_clustering(X, n_clusters=2, linkage=linkage) plot_kmeans_clusters(X, n_clusters=2) import pandas as pd from functools import partial metrices = { 'min dist between clusters': min_interclust_dist, 'mean dist in clust': mean_inclust_dist, 'std dev dist in clust': std_dev_of_inclust_dist, 'mean dist to clust center': mean_dist_to_center, 'silhouette': silhouette_score } models = { 'Agglomerative ward linkage': partial(AgglomerativeClustering, linkage='ward'), 'Agglomerative complete linkage': partial(AgglomerativeClustering, linkage='complete'), 'Agglomerative average linkage': partial(AgglomerativeClustering, linkage='average'), 'Agglomerative single linkage': partial(AgglomerativeClustering, linkage='single'), 'Kmeans': KMeans } df = pd.DataFrame() for model_key in models: for metric_key in metrices: df.loc[model_key, metric_key] = count_clustering_scores(X=X, cluster_num=2, model=models[model_key], score_fun=metrices[metric_key]) df # Tutaj w wersji macierzowej, znowu: dla chętnych # Rozpakowywujemy słowniki do list # Jedna lista zawiera klucze, druga wartości metrices_names, metrices_fun = list(metrices.keys()), list(metrices.values()) models_names, models_constructors = list(models.keys()), list(models.values()) # Stwórzmy macierz będącą iloczynem kartezjańskim modeli i metryk # Wzdłuż osi 1. mamy różne modele # Wzdłuż osi 2. -- różne metryki # Tak właściwie to łącząc macierze, otrzymamy tensor, gdzie w 3. wymiarze mamy pary (metryka, model) # W pythonie numerujemy od 0, więc żeby połączyć po 3 osi (matematycznie), jako numer osi podajemy 2. arguments_array = np.stack(np.meshgrid(metrices_fun, models_constructors), axis=2) # Przykładowa para (metryka, model): print(arguments_array[0, 0, :]) # Tworzymy funkcję, która przyjmuję parę (metryka, model) i zwraca wartość metryki # Moglibyśmy stworzyć tę funkcję normalnie, ale korzystamy z niej tylko raz, a tak jest zwięźle benchmark_fun = lambda pair: count_clustering_scores(X=X, cluster_num=2, model=pair[1], score_fun=pair[0]) # Wreszcie używamy funkcji do par (metryka, model) we wcześniej stworzonej macierzy scores_arr = np.apply_along_axis(benchmark_fun, axis=-1, arr=arguments_array) pd.DataFrame(scores_arr, columns=metrices_names, index=metrices_names)[ functools.partial(, linkage='ward')]Cleaning the raw Data The purpose of this notebook is to take the original PostgreSQL database and clean the data in it. The output is a set of tables in a new PostgreSQL schema that hold the cleaned data.Cleaning occurs at several levels:- textual data is unified (e.g., spelling, accents, ...)- duplicate rows/records are merged together- numeric columns are checked for plausibility- foreign key relationships are strictly enforcedThe structure of the data can be viewed at the [ORM layer](https://github.com/webartifex/urban-meal-delivery/tree/main/src/urban_meal_delivery/db) in the package.!umd --versionurban-meal-delivery, version 0.2.0Importsfrom urban_meal_delivery import config, db import collections import datetime import hashlib import pandas as pd import pytz as tz import numpy as np import sqlalchemy as saSettings & Globals%load_ext lab_black pd.set_option("display.max_columns", 999) pd.set_option("display.max_rows", 999) connection = db.connectionNew Database Schema As a result of this notebook, a new PostgreSQL schema called `"clean"` is created holding the tables with the cleaned data.config.CLEAN_SCHEMAAll tables with the original data are stored in the default PostgreSQL schema called `"public"`.config.ORIGINAL_SCHEMAUse `alembic` to run the very first database migration script that creates the new tables.%cd -q .. !alembic upgrade f11cd76d2f45 %cd -q researchINFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> f11cd76d2f45, Create the database from scratch.Cities While the original database consists of data concerning the UDP's operations in five cities in France, we only look at `"Bordeaux"`, `"Lyon"`, and `"Paris"` in this research project, as the amount of data for `"Lille"` and `"Nantes"` is simply not a lot due to the very short time horizons the UDP had been operating there. Raw Data The following `target_cities` data were manually obtained from Google Maps and mapped to the `"database_id"`s of the cities in the original database where the UDP was operating in.target_cities = { "Bordeaux": { "database_id": 4, "google_maps_data": { "center_latitude": 44.837789, "center_longitude": -0.57918, "northeast_latitude": 44.91670389999999, "northeast_longitude": -0.5333089999999999, "southwest_latitude": 44.810752, "southwest_longitude": -0.638973, "initial_zoom": 13, }, }, "Lyon": { "database_id": 1, "google_maps_data": { "center_latitude": 45.764043, "center_longitude": 4.835659, "northeast_latitude": 45.808425, "northeast_longitude": 4.898393, "southwest_latitude": 45.707486, "southwest_longitude": 4.7718489, "initial_zoom": 13, }, }, "Paris": { "database_id": 2, "google_maps_data": { "center_latitude": 48.856614, "center_longitude": 2.3522219, "northeast_latitude": 48.9021449, "northeast_longitude": 2.4699208, "southwest_latitude": 48.815573, "southwest_longitude": 2.225193, "initial_zoom": 12, }, }, } city_ids = tuple(city["database_id"] for city in target_cities.values())`cities` below holds the cleaned city related data from the original database. They come with KML data (i.e., area) associated with a city, which is kept.cities = pd.read_sql_query( f""" SELECT cities.id, cities.name, geo_areas.kml FROM {config.ORIGINAL_SCHEMA}.cities LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.geo_areas ON cities.geo_area_id = geo_areas.id WHERE cities.id IN %(city_ids)s ORDER BY cities.id """, con=connection, index_col="id", params={"city_ids": city_ids}, )Merge in the data from Google Maps.for city in target_cities.values(): for col, val in city["google_maps_data"].items(): cities.loc[city["database_id"], col] = valCast the columns' types explicitly.cities = cities.astype( { "name": "string", "kml": "string", "center_latitude": float, "center_longitude": float, "northeast_latitude": float, "northeast_longitude": float, "southwest_latitude": float, "southwest_longitude": float, "initial_zoom": int, } )Clean Datacities.head() cities.info() Int64Index: 3 entries, 1 to 4 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 name 3 non-null string 1 kml 3 non-null string 2 center_latitude 3 non-null float64 3 center_longitude 3 non-null float64 4 northeast_latitude 3 non-null float64 5 northeast_longitude 3 non-null float64 6 southwest_latitude 3 non-null float64 7 southwest_longitude 3 non-null float64 8 initial_zoom 3 non-null int64 dtypes: float64(6), int64(1), string(2) memory usage: 320.0 bytesAs this notebook was developed iteratively, we validate that the cleaned data stays unchanged using SHA256 checksums of the cleaned DataFrames and other `assert`s.assert ( hashlib.sha256(cities.to_json().encode()).hexdigest() == "800689a6ba5b6d03f583f258e058eca0b12e6df8e34c98bfe7aec246ed688c92" )Addresses Raw Data Only load addresses with orders in the target cities, excluding the cut-off day.addresses = pd.read_sql_query( f""" SELECT id, created_at, place_id, latitude, longitude, city_id, city_name AS city, zip, street_address AS street, floor, special_instructions FROM {config.ORIGINAL_SCHEMA}.addresses WHERE city_id IN %(city_ids)s AND id IN ( SELECT DISTINCT address_id FROM ( SELECT DISTINCT pickup_address_id AS address_id FROM {config.ORIGINAL_SCHEMA}.orders WHERE created_at < '{config.CUTOFF_DAY}' UNION SELECT DISTINCT dropoff_address_id AS address_id FROM {config.ORIGINAL_SCHEMA}.orders WHERE created_at < '{config.CUTOFF_DAY}' ) AS orders ) ORDER BY id """, con=connection, index_col="id", params={"city_ids": city_ids}, parse_dates=["created_at"], )All columns are `"strings"`, even `zip`.addresses = addresses.astype( { "place_id": "string", "city": "string", "zip": "string", "street": "string", "floor": "string", "special_instructions": "string", } ) addresses.head() addresses.info() assert len(addresses) == 663_082Adjust Time Zones Create a helper function that strips out the microseconds from datetime columns and converts their time zones from UTC to Europe/Paris.def clean_datetime(col): """Strip Microseconds and convert timezone to Europe/Paris.""" return ( col.dt.tz_localize(tz.utc) .dt.tz_convert(tz.timezone("Europe/Paris")) .dt.tz_localize(None) .map( lambda x: datetime.datetime( x.year, x.month, x.day, x.hour, x.minute, x.second ) if x is not pd.NaT else x ) ) addresses["created_at"] = clean_datetime(addresses["created_at"])Clean Place IDs A tiny number of addresses has `latitude` / `longitude` pairs as `place_id`s.addresses["place_id"] = ( addresses["place_id"].str.replace(r"^[\d\.,-]+$", "", regex=True).str.strip() )Discard addresses without a `place_id` by Google Maps. If even Google does not know where these addresses are geo-located, we do not even try.msk = addresses["place_id"].isnull() | (addresses["place_id"] == "") addresses = addresses[~msk] assert msk.sum() == 139Clean City Names Some customers entered too much data into the `city` part of the address. Unify this column by only keeping the city's name.addresses["city"].unique() addresses["city"] = ( addresses["city"] .str.replace(r"(E Arrondissement|Er Arrondissement)", "", regex=True) .str.replace(r"(\d)", "", regex=True) # Get rid off accents. .str.normalize("NFKD") .str.encode("ascii", errors="ignore") .str.decode("utf8") .astype("string") # Unify hyphens. .str.strip() .str.replace("-", " ") .str.title() .str.replace(" ", "-") )Sub-urban city names surrounding the three big cities in this research project are kept.addresses["city"].value_counts()Clean Zip Codesaddresses["zip"].unique() addresses["zip"] = ( addresses["zip"] .str.replace(r".*(\d{5}).*", r"\1", regex=True) .str.replace(r"\D+", "", regex=True) .replace("", "NaN") .astype(float) )Zip codes with less than 5 digits are invalid. Paris has zip codes with 75xxx (with 92xxx, 93xxx, and 94xxx being suburbs), Lyon 69xxx, and Bordeaux 33xxx (cf., [source](https://en.wikipedia.org/wiki/Postal_codes_in_France)). Keep only valid zip codes in target cities.invalid = addresses["zip"].notnull() & (addresses["zip"] < 10000) assert invalid.sum() == 9 not_in_target_cities = ( addresses["zip"].notnull() & ~invalid & ~( (33000 <= addresses["zip"]) & (addresses["zip"] < 34000) | (69000 <= addresses["zip"]) & (addresses["zip"] < 70000) | (75000 <= addresses["zip"]) & (addresses["zip"] < 76000) | (92000 <= addresses["zip"]) & (addresses["zip"] < 95000) ) ) assert not_in_target_cities.sum() == 10 addresses.loc[invalid | not_in_target_cities, "zip"] = np.NaN addresses["zip"].unique()Discard addresses with missing zip codes because they are hard to geo-code.msk = addresses["zip"].isnull() addresses = addresses[~msk] assert msk.sum() == 21 addresses = addresses.astype({"zip": int})Clean Street Names Remove extra whitespace, HTML encodings, and accents.addresses["street"] = ( addresses["street"] .str.replace("\s+", " ", regex=True) .str.replace("'", "'") # Get rid off accents. .str.normalize("NFKD") .str.encode("ascii", errors="ignore") .str.decode("utf8") .astype("string") .str.strip() .str.title() )There are no addresses without a `street` name.assert not addresses["street"].isnull().any()Parse Floor Numbers Make `floor` an integer column.addresses["floor"].unique()Parse out floors from the `floor` text column.addresses["floor"] = ( addresses["floor"] # Get rid of accents and lower case everything. .str.normalize("NFKD") .str.encode("ascii", errors="ignore") .str.decode("utf8") .astype("string") .str.casefold() # Replace common text that messes up the matching. .str.replace(".", "") .str.replace(":", "") .str.replace(";", "") .str.replace("'", "'") .str.replace("36b25", "") .str.replace("n°", "") .str.replace("#", "") .str.replace("face l'assanceur", "") .str.replace("\(drt\)", "") .str.replace("floor", "") .str.replace("et demi", "") .str.replace("et droite", "") .str.replace("droite", "") .str.replace("droit", "") .str.replace("a gauche", "") .str.replace("e gauche", "") .str.replace("gauche", "") .str.replace("entrez", "") .str.replace("serez", "") .str.replace("dussol", "") .str.replace("soler", "") .str.replace("sonner", "") .str.replace("code", "") .str.replace("perez", "") .str.replace("-", "") .str.replace("\s+", " ", regex=True) .str.strip() # Abbreviations. .str.replace( r"^.*?((\d+)\s?(er|ere|em|eme|ele|ieme|bis|(e|g|st|nd|rd|th|z)($|,|\s+))).*", r"\2", regex=True, ) # French written out. .str.replace(r".*(rdc|rez|sol|ground).*", "0", regex=True) .str.replace(r".*(premiere|premier).*", "1", regex=True) .str.replace(r".*(deuxieme).*", "2", regex=True) .str.replace(r".*(troisieme).*", "3", regex=True) .str.replace(r".*(quatrieme).*", "4", regex=True) .str.replace(r".*(cinquieme).*", "5", regex=True) .str.replace(r".*(sixieme).*", "6", regex=True) .str.replace(r".*(septieme).*", "7", regex=True) .str.replace(r".*(huitieme).*", "8", regex=True) .str.replace(r".*(neuvieme).*", "9", regex=True) .str.replace(r".*(dixieme).*", "10", regex=True) .str.replace(r"^.*?((etage|etg) (\d+))($|\D+.*)", r"\3", regex=True) .str.replace(r"^.*?((\d+)(etage| etage|etg| etg)).*", r"\2", regex=True) # Remove apartment info to not confuse it with floor .str.replace( r"(.*)(ap|apt|app|appt|appart|appartment|appartement|chambre|room)\s*\w?\d+(.*)", r"\1 \3", regex=True, ) .str.replace(r"(.*)(code|digicode)\s*\w?\d+(.*)", r"\1 \3", regex=True) # Take number at start. .str.replace(r"^(\d+)(,|\s+).*", r"\1", regex=True) # Ignore anything with non-numeric symbols entirely. .str.replace(r".*\D+.*", "", regex=True) .str.replace("^$", "NaN") .fillna("NaN") .astype(float) )If the `floor` column is empty, parse out floor info from the `special_instructions` column that must have been used before the `floor` column was introduced (slightly different parsing logic than above).addresses["special_instructions"] = ( addresses["special_instructions"] # Get rid of accents and lower case everything. .str.normalize("NFKD") .str.encode("ascii", errors="ignore") .str.decode("utf8") .astype("string") .str.casefold() # Replace common text that messes up the matching. .str.replace(".", "") .str.replace(":", "") .str.replace(";", "") .str.replace("'", "'") .str.replace("36b25", "") .str.replace("n°", "") .str.replace("#", "") .str.replace("face l'assanceur", "") .str.replace("\(drt\)", "") .str.replace("floor", "") .str.replace("et demi", "") .str.replace("et droite", "") .str.replace("droite", "") .str.replace("droit", "") .str.replace("a gauche", "") .str.replace("e gauche", "") .str.replace("gauche", "") .str.replace("entrez", "") .str.replace("serez", "") .str.replace("dussol", "") .str.replace("soler", "") .str.replace("sonner", "") .str.replace("code", "") .str.replace("perez", "") .str.replace("-", "") .str.replace("\s+", " ", regex=True) .str.strip() # Abbreviations. .str.replace( r"^.*?((\d+)\s?(er|ere|em|eme|ele|ieme|bis|(e|g|st|nd|rd|th|z)($|,|\s+))).*", r"\2", regex=True, ) # French written out. .str.replace(r".*(rdc|rez|sol|ground).*", "0", regex=True) .str.replace(r".*(premiere|premier).*", "1", regex=True) .str.replace(r".*(deuxieme).*", "2", regex=True) .str.replace(r".*(troisieme).*", "3", regex=True) .str.replace(r".*(quatrieme).*", "4", regex=True) .str.replace(r".*(cinquieme).*", "5", regex=True) .str.replace(r".*(sixieme).*", "6", regex=True) .str.replace(r".*(septieme).*", "7", regex=True) .str.replace(r".*(huitieme).*", "8", regex=True) .str.replace(r".*(neuvieme).*", "9", regex=True) .str.replace(r".*(dixieme).*", "10", regex=True) .str.replace(r"^.*?((etage|etg) (\d+))($|\D+.*)", r"\3", regex=True) .str.replace(r"^.*?((\d+)(etage| etage|etg| etg)).*", r"\2", regex=True) # Remove apartment info to not confuse it with floor. .str.replace( r"(.*)(ap|apt|app|appt|appart|appartment|appartement|chambre|room)\s*\w?\d+(.*)", r"\1 \3", regex=True, ) .str.replace(r"(.*)(code|digicode)\s*\w?\d+(.*)", r"\1 \3", regex=True) # Ignore anything with non-numeric symbols entirely. .str.replace(r".*\D+.*", "", regex=True) .str.replace("^$", "NaN") .fillna("NaN") .astype(float) )Fill in `floor` from `special_instructions` and cast the type.msk = addresses["floor"].isnull() & addresses["special_instructions"].notnull() addresses.loc[msk, "floor"] = addresses.loc[msk, "special_instructions"].values del addresses["special_instructions"] addresses = addresses.astype({"floor": "Int64"})Only keep the realisic numbers.addresses.loc[addresses["floor"].notnull() & (addresses["floor"] > 40), "floor"] = pd.NAMost addresses have no floor number given.assert len(addresses.loc[addresses["floor"].isnull(), "floor"]) == 307_973Most `floor`s are near the ground floor, which is plausible.addresses["floor"].value_counts().sort_index()Deduplicate The number of addresses (ca. 663,000) is inflated, probably due to some sort of automated re-entering.assert len(addresses) == 662_922First, merge all addresses with the same `place_id`, `latitude` / `longitude`, `city`, `zip`, `street`, *and* `floor` into one entry, namely its first occurrence.addresses["floor"] = addresses["floor"].fillna(999) # dummy -> No grouping with NaN's by = ["place_id", "latitude", "longitude", "city_id", "city", "zip", "street", "floor"] addresses = ( addresses.reset_index() .set_index(by) .merge( ( addresses.reset_index() .groupby(by)[["id"]] .min() .rename(columns={"id": "merged_on_id"}) ), left_index=True, right_index=True, ) .reset_index() .astype({"place_id": "string", "city": "string", "street": "string"}) )Keep a dictionary `address_merger` to map the ID's that are merged away to the ones that are kept.address_merger = collections.defaultdict(lambda: np.NaN) address_merger.update( { id_: merged_on_id for _, id_, merged_on_id in addresses[["id", "merged_on_id"]].itertuples() } ) addresses = ( addresses[addresses["id"] == addresses["merged_on_id"]] .set_index("id") .sort_index()[ [ "created_at", "place_id", "latitude", "longitude", "city_id", "city", "zip", "street", "floor", ] ] ) addresses["floor"] = addresses["floor"].replace(999, pd.NA).astype("Int64")Only about 178,000 addresses remain!assert len(addresses) == 178_101Second, many addresses are still redundant as they are referring to *different* `floor`s in the *same* house or their `street` name is written differently. We create a `primary_id` column that holds the ID of the first occurrence of an address independent of the exact spelling of the `street` name and the `floor` number.That column is created via grouping the remaining addresses twice, once with their GPS location, and second by a simplified version of `street`. The latter accounts for slightly different `latitude` / `longitude` pairs of the same location, potentially due to an update in the Google Maps database.by = ["place_id", "latitude", "longitude"] addresses = ( addresses.reset_index() .set_index(by) .merge( ( addresses.reset_index() .groupby(by)[["id"]] .min() .rename(columns={"id": "unified1_id"}) ), left_index=True, right_index=True, ) .reset_index() .set_index("id") .sort_index() .astype({"place_id": "string"})[ [ "unified1_id", "created_at", "place_id", "latitude", "longitude", "city_id", "city", "zip", "street", "floor", ] ] ) addresses["street_simple"] = ( addresses["street"] .str.replace("Avenue", "Ave") .str.replace("Place", "Pl") .str.replace(".", "") .str.replace("-", "") .str.replace(" ", "") .str.lower() ) by = ["city_id", "street_simple"] addresses = ( addresses.reset_index() .set_index(by) .merge( ( addresses.reset_index() .groupby(by)[["id"]] .min() .rename(columns={"id": "unified2_id"}) ), left_index=True, right_index=True, ) .reset_index() .set_index("id") .sort_index()[ [ "unified1_id", "unified2_id", "created_at", "place_id", "latitude", "longitude", "city_id", "city", "zip", "street", "floor", ] ] )So, an address may be a duplicate of *two* different earlier addresses and we choose the earliest one.addresses["primary_id"] = addresses[["unified1_id", "unified2_id"]].min(axis=1) del addresses["unified1_id"] del addresses["unified2_id"] addresses = addresses[ [ "primary_id", "created_at", "place_id", "latitude", "longitude", "city_id", "city", "zip", "street", "floor", ] ]A tricky issue is that an address could be identified as a duplicate of an earlier one that itself is a duplicate of an even earlier one. The following loop does the trick and maps each address to its earlierst version._address_unifier = { id_: unified_id for _, id_, unified_id in addresses.reset_index()[["id", "primary_id"]].itertuples() } while True: if (addresses["primary_id"] != addresses["primary_id"].map(_address_unifier)).any(): addresses["primary_id"] = addresses["primary_id"].map(_address_unifier) else: breakOnly about 87,000 of the remaining 178,000 addresses are unique locations disregarding `floor`s and different spellings of the `street` name._addresses = addresses.reset_index() msk = _addresses["id"] == _addresses["primary_id"] del _addresses assert msk.sum() == 87_287To not overwrite a Python built-in in the ORM layer.addresses = addresses.rename(columns={"zip": "zip_code"})Clean Dataaddresses.head() addresses.info() assert ( hashlib.sha256(addresses.to_json().encode()).hexdigest() == "4f9f3b63a9b2472bf07207d0e06f4901619066121d6bb5fd3ad4ebf21b590410" )Restaurants Raw Data Load restaurants associated with *all* addresses in the target cities. Further below, *all* restaurants are shown to have a clean address.restaurants = pd.read_sql_query( f""" SELECT id, created_at, name, address_id, estimated_prep_duration FROM {config.ORIGINAL_SCHEMA}.businesses WHERE address_id IN ( SELECT id FROM {config.ORIGINAL_SCHEMA}.addresses WHERE city_id IN %(city_ids)s ) AND created_at < '{config.CUTOFF_DAY}' ORDER BY id """, con=connection, index_col="id", params={"city_ids": city_ids}, parse_dates=["created_at"], ) restaurants["name"] = restaurants["name"].astype("string") restaurants.head() restaurants.info() assert len(restaurants) == 1_654Adjust Time Zonerestaurants["created_at"] = clean_datetime(restaurants["created_at"])Simplify Namesrestaurants["name"] = ( restaurants["name"] .str.replace("\s+", " ", regex=True) .str.replace("'", "'") # Get rid off accents. .str.normalize("NFKD") .str.encode("ascii", errors="ignore") .str.decode("utf8") .astype("string") .str.title() # To find duplicates further below. .str.replace(" & ", " And ") .str.replace("The ", "") .str.replace("Pasta Pizza ", "") .str.replace(" - Bar A Taboule", "") .str.replace("- ", "") .str.replace(" - Petit-Dejeuner", "") .str.replace("Lyon", "") .str.replace("La Burgeria Saint Mande", "La Fromagette Saint Mande") .str.replace("Mansou'", "Mansouria") .str.strip() )Use Merged Addressesrestaurants["address_id"] = restaurants["address_id"].map(address_merger) assert not restaurants["address_id"].isnull().any()Deduplicate Restaurants with the same name at the same (unified) address are merged.restaurants = restaurants.merge( addresses["primary_id"], left_on="address_id", right_index=True ) restaurants = restaurants.rename(columns={"primary_id": "primary_address_id"}) by = ["name", "primary_address_id"] restaurants = ( restaurants.reset_index() .set_index(by) .merge( ( restaurants.reset_index() .groupby(by)[["id"]] .min() .rename(columns={"id": "merged_on_id"}) ), left_index=True, right_index=True, ) .reset_index() .astype({"name": "string"}) )Keep a dictionary to map the ID's that are merged away to the ones that are kept.restaurants_merger = collections.defaultdict(lambda: np.NaN) restaurants_merger.update( { id_: merged_on_id for _, id_, merged_on_id in restaurants[["id", "merged_on_id"]].itertuples() } ) restaurants = ( restaurants[restaurants["id"] == restaurants["merged_on_id"]] .set_index("id") .sort_index()[["created_at", "name", "address_id", "estimated_prep_duration"]] ) assert len(restaurants) == 1_644Clean Datarestaurants.head() restaurants.info() assert ( hashlib.sha256(restaurants.to_json().encode()).hexdigest() == "8eb852690c027e2fcc0d9cf988391741b1cd028e35c494766f8c21d4ea1722b7" )Couriers Raw Data Only load couriers that worked in one of the target cities (i.e., had an order) and include the vehicle information.couriers = pd.read_sql_query( f""" SELECT couriers.id, couriers.created_at, MD5(couriers.name) AS name, vehicle_types.icon as vehicle, couriers.speed, vehicle_bag_types.capacity, couriers.pay_per_hour, couriers.pay_per_order FROM {config.ORIGINAL_SCHEMA}.couriers LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.vehicles ON couriers.vehicle_id = vehicles.id LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.vehicle_types ON vehicles.vehicle_type_id = vehicle_types.id LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.vehicle_bag_types ON vehicles.vehicle_bag_type_id = vehicle_bag_types.id WHERE couriers.id in ( SELECT DISTINCT deliveries.courier_id FROM {config.ORIGINAL_SCHEMA}.orders INNER JOIN {config.ORIGINAL_SCHEMA}.deliveries ON orders.id = deliveries.order_id WHERE orders.featured_business_id IN ( SELECT -- Subquery based off the restaurants query above! id FROM {config.ORIGINAL_SCHEMA}.businesses WHERE address_id IN ( SELECT id FROM {config.ORIGINAL_SCHEMA}.addresses WHERE city_id IN %(city_ids)s ) AND created_at < '{config.CUTOFF_DAY}' ) AND deliveries.courier_id IS NOT NULL ) ORDER BY couriers.id """, con=connection, index_col="id", params={"city_ids": city_ids}, parse_dates=["created_at"], ) couriers = couriers.astype({"name": "string", "vehicle": "string"}) couriers.head() couriers.info() assert len(couriers) == 2_471Adjust Time Zonecouriers["created_at"] = clean_datetime(couriers["created_at"])Deduplicate Couriers with the same name either have the same phone number or signed up within a short time window: They are merged.by = ["name"] couriers = ( couriers.reset_index() .set_index(by) .merge( ( couriers.reset_index() .groupby(by)[["id"]] .max() # merged on the latest courier! .rename(columns={"id": "merged_on_id"}) ), left_index=True, right_index=True, ) .reset_index() .astype({"name": "string"}) )Keep a dictionary to map the ID's that are merged away to the ones that are kept.couriers_merger = collections.defaultdict(lambda: np.NaN) couriers_merger.update( { id_: merged_on_id for _, id_, merged_on_id in couriers[["id", "merged_on_id"]].itertuples() } ) couriers = ( couriers[couriers["id"] == couriers["merged_on_id"]] .set_index("id") .sort_index()[ ["created_at", "vehicle", "speed", "capacity", "pay_per_hour", "pay_per_order"] ] ) assert len(couriers) == 2_469Clean Salary The column `pay_per_hour` defaults to `0` in the database definition. The actual default value is EUR 7,50, which is also the mode in the dataset.couriers["pay_per_hour"].value_counts()The column `pay_per_order` defaults to `0` in the database definition. A more realistic value is EUR 2 (i.e., 200 cents), which is the mode in the dataset.couriers["pay_per_order"].value_counts()Whenever a `0` appears in `pay_per_order`, the corresponding `pay_per_hour` is `0` in all cases except one, which is the highest paid courier.assert ((couriers["pay_per_order"] == 0) & (couriers["pay_per_hour"] == 0)).sum() == 158 assert ((couriers["pay_per_order"] == 0) & (couriers["pay_per_hour"] > 0)).sum() == 1 couriers[(couriers["pay_per_order"] == 0) & (couriers["pay_per_hour"] > 0)]Couriers with `0`s in both columns receive the default payment scheme.msk_0_pay = (couriers["pay_per_hour"] == 0) & (couriers["pay_per_order"] == 0) couriers.loc[msk_0_pay, "pay_per_hour"] = 750 couriers.loc[msk_0_pay, "pay_per_order"] = 200Couriers with a `0` in the `pay_per_hour` column, receive a fixed salary of EUR 7,50.couriers.loc[couriers["pay_per_hour"] == 0, "pay_per_hour"] = 750The column `pay_per_order` contains obvious typos that are corrected.couriers.loc[ couriers["pay_per_order"].isin([1, 2, 20, 2000, 20000]), "pay_per_order" ] = 200 couriers.loc[couriers["pay_per_order"] == 1400000, "pay_per_order"] = 400Distribution of the various `pay_per_hour` / `pay_per_order` combinations.collections.Counter( (y, z) for (x, y, z) in couriers[["pay_per_hour", "pay_per_order"]].itertuples() )Clean Datacouriers.head() couriers.info() assert ( hashlib.sha256(couriers.to_json().encode()).hexdigest() == "a1059e93095842120a58c4f74145bb6a14aeb94f47a0d77043896c70f4772afe" )Orders Raw Data The order related data is spread over many different tables in the original database. Also, some data is not even normalized. The following SQL query puts all the data into one big relation that is cleaned further below.orders = pd.read_sql_query( f""" SELECT orders.id, deliveries.id AS delivery_id, MD5(CONCAT(orders.email, orders.phone_number)) as customer_id, -- anonymize the customer data orders.order_placed_at AS placed_at, CASE WHEN orders.preorder IS FALSE THEN TRUE ELSE FALSE END AS ad_hoc, CASE WHEN orders.preorder is TRUE THEN orders.scheduled_dropoff_at ELSE NULL END AS scheduled_delivery_at, deliveries.status, cancellations.cancelled_at, orders.featured_business_id as restaurant_id, orders.order_sent_at AS restaurant_notified_at, orders.order_received_at AS restaurant_confirmed_at, orders.estimated_prep_duration, orders.estimated_prep_buffer, deliveries.courier_id, deliveries.courier_dispatched_at AS dispatch_at, deliveries.courier_notified_at, deliveries.courier_accepted_at, courier_no_accept_confirmed.issue AS courier_no_accept_confirmed_issue, orders.pickup_address_id, orders.scheduled_pickup_at, deliveries.courier_picked_up_at AS pickup_at, left_pickups.left_pickup_at, courier_late_at_pickup.issue AS courier_late_at_pickup_issue, courier_waited_at_pickup.issue AS courier_waited_at_pickup_issue, courier_no_pickup_confirmed.issue AS courier_no_pickup_confirmed_issue, orders.dropoff_address_id AS delivery_address_id, deliveries.first_estimated_dropoff_at AS first_estimated_delivery_at, deliveries.courier_dropped_off_at AS delivery_at, courier_waited_at_delivery.issue AS courier_waited_at_delivery_issue, courier_no_delivery_confirmed.issue AS courier_no_delivery_confirmed_issue, orders.utilization, items_totals.sub_total, orders.delivery_fee, orders.total, deliveries.delivery_distance AS logged_delivery_distance, deliveries.courier_avg_speed AS logged_avg_courier_speed, CAST(deliveries.courier_avg_speed_distance AS INTEGER) AS logged_avg_courier_speed_distance, delivery_timings.accepting_time AS logged_accepting_time, delivery_timings.courier_reaction_time AS logged_reaction_time, delivery_timings.to_pickup_time AS logged_to_pickup_time, delivery_timings.expected_wait_pickup_time AS expected_wait_pickup_time, delivery_timings.wait_pickup_time AS logged_wait_pickup_time, delivery_timings.pickup_time AS logged_pickup_time, delivery_timings.courier_late AS logged_courier_late_time, delivery_timings.vendor_late AS logged_restaurant_late_time, delivery_timings.to_dropoff_time AS logged_to_delivery_time, delivery_timings.expected_dropoff_time AS expected_delivery_time, delivery_timings.dropoff_time AS logged_delivery_time, delivery_timings.delivery_late AS logged_delivery_late_time, delivery_timings.total_time AS logged_total_time, delivery_timings.confirmed_total_time AS logged_confirmed_total_time FROM {config.ORIGINAL_SCHEMA}.orders LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.deliveries ON orders.id = deliveries.order_id LEFT OUTER JOIN ( SELECT order_id, CAST(100 * SUM(price) AS INTEGER) AS sub_total FROM {config.ORIGINAL_SCHEMA}.order_records GROUP BY order_id ) AS items_totals ON orders.id = items_totals.order_id LEFT OUTER JOIN {config.ORIGINAL_SCHEMA}.delivery_timings ON deliveries.id = delivery_timings.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'DispatchIssue' AND category = 'no_courier_interaction' GROUP BY delivery_id ) AS courier_no_accept_confirmed ON deliveries.id = courier_no_accept_confirmed.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'PickupIssue' AND category = 'waiting' GROUP BY delivery_id ) AS courier_waited_at_pickup ON deliveries.id = courier_waited_at_pickup.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'PickupIssue' AND category = 'late' GROUP BY delivery_id ) AS courier_late_at_pickup ON deliveries.id = courier_late_at_pickup.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'PickupIssue' AND category = 'no_courier_interaction' GROUP BY delivery_id ) AS courier_no_pickup_confirmed ON deliveries.id = courier_no_pickup_confirmed.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'DropoffIssue' AND category = 'waiting' GROUP BY delivery_id ) AS courier_waited_at_delivery ON deliveries.id = courier_waited_at_delivery.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'DropoffIssue' AND category = 'late' GROUP BY delivery_id ) AS courier_late_at_delivery ON deliveries.id = courier_late_at_delivery.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(notes) AS issue FROM {config.ORIGINAL_SCHEMA}.issues WHERE type = 'DropoffIssue' AND category = 'no_courier_interaction' GROUP BY delivery_id ) AS courier_no_delivery_confirmed ON deliveries.id = courier_no_delivery_confirmed.delivery_id LEFT OUTER JOIN ( SELECT delivery_id, courier_id, MAX(created_at) AS left_pickup_at FROM ( SELECT delivery_id, (metadata -> 'courier_id')::TEXT::INTEGER AS courier_id, created_at FROM {config.ORIGINAL_SCHEMA}.delivery_transitions WHERE to_state = 'left_pickup' ) AS left_pickups GROUP BY delivery_id, courier_id ) AS left_pickups ON deliveries.id = left_pickups.delivery_id AND deliveries.courier_id = left_pickups.courier_id LEFT OUTER JOIN ( SELECT delivery_id, MAX(created_at) AS cancelled_at FROM {config.ORIGINAL_SCHEMA}.delivery_transitions WHERE to_state = 'cancelled' GROUP BY delivery_id ) AS cancellations ON deliveries.id = cancellations.delivery_id WHERE orders.featured_business_id IN ( SELECT -- Subquery based off the restaurants query above! id FROM {config.ORIGINAL_SCHEMA}.businesses WHERE address_id IN ( SELECT id FROM {config.ORIGINAL_SCHEMA}.addresses WHERE city_id IN %(city_ids)s ) AND created_at < '{config.CUTOFF_DAY}' ) AND scheduled_dropoff_at < '{config.CUTOFF_DAY}' AND deliveries.is_primary IS TRUE ORDER BY orders.id """, con=connection, index_col="id", params={"city_ids": city_ids}, parse_dates=[ "placed_at", "scheduled_delivery_at", "cancelled_at", "restaurant_notified_at", "restaurant_confirmed_at", "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", "left_pickup_at", "first_estimated_delivery_at", "delivery_at", ], ) orders = orders.astype( { "customer_id": "string", "status": "string", "estimated_prep_duration": "Int64", "courier_id": "Int64", "courier_no_accept_confirmed_issue": "string", "courier_late_at_pickup_issue": "string", "courier_waited_at_pickup_issue": "string", "courier_no_pickup_confirmed_issue": "string", "courier_waited_at_delivery_issue": "string", "courier_no_delivery_confirmed_issue": "string", "logged_avg_courier_speed_distance": "Int64", "logged_accepting_time": "Int64", "logged_reaction_time": "Int64", "logged_to_pickup_time": "Int64", "expected_wait_pickup_time": "Int64", "logged_wait_pickup_time": "Int64", "logged_pickup_time": "Int64", "logged_courier_late_time": "Int64", "logged_restaurant_late_time": "Int64", "logged_to_delivery_time": "Int64", "expected_delivery_time": "Int64", "logged_delivery_time": "Int64", "logged_delivery_late_time": "Int64", "logged_total_time": "Int64", "logged_confirmed_total_time": "Int64", } ) orders.head() orders.info() assert len(orders) == 661_314Adjust Time Zonesfor column in [ "placed_at", "scheduled_delivery_at", "cancelled_at", "restaurant_notified_at", "restaurant_confirmed_at", "dispatch_at", "courier_notified_at", "courier_accepted_at", "scheduled_pickup_at", "pickup_at", "left_pickup_at", "first_estimated_delivery_at", "delivery_at", ]: orders[column] = clean_datetime(orders[column])Use Merged Addresses About 0.02 % of the orders belong to discarded addresses and are discarded also.orders["pickup_address_id"] = orders["pickup_address_id"].map(address_merger) orders["delivery_address_id"] = orders["delivery_address_id"].map(address_merger) msk = orders["pickup_address_id"].isnull() | orders["delivery_address_id"].isnull() orders = orders[~msk].astype({"pickup_address_id": int, "delivery_address_id": int,}) assert msk.sum() == 160Use Merged Restaurantsorders["restaurant_id"] = orders["restaurant_id"].map(restaurants_merger) assert not orders["restaurant_id"].isnull().any()Use Merged Couriersorders["courier_id"] = orders["courier_id"].map(couriers_merger).astype("Int64")Verify that the couriers' IDs are the same in `couriers` and `orders`.assert set(couriers.index) == set( orders.loc[orders["courier_id"].notnull(), "courier_id"].unique() )Clean User IDs Convert the MD5 hashed emails and phone numbers into integer ID's.orders["customer_id"] = ( orders["customer_id"] .map({y: x for (x, y) in enumerate(orders["customer_id"].unique(), start=1)}) .astype({"customer_id": int}) )Ad-hoc vs. Scheduled Orders Ad-hoc orders never have a `scheduled_delivery_at` value set, and scheduled orders always have it set.assert not ( (orders["ad_hoc"] == True) & orders["scheduled_delivery_at"].notnull() ).any() assert not ( (orders["ad_hoc"] == False) & orders["scheduled_delivery_at"].isnull() ).any()For all adjusted timestamps we add `*_corrected` columns indicating if a correction is made in the following.for column in [ "scheduled_delivery_at", "cancelled_at", "restaurant_notified_at", "restaurant_confirmed_at", "estimated_prep_duration", "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", "left_pickup_at", "delivery_at", ]: orders[column + "_corrected"] = False orders.loc[orders[column].isnull(), column + "_corrected"] = pd.NASome customers managed to place scheduled orders for the past. These are converted into ad-hoc orders.msk = orders["scheduled_delivery_at"] < orders["placed_at"] orders.loc[msk, "ad_hoc"] = True orders.loc[msk, "scheduled_delivery_at"] = pd.NaT orders.loc[msk, "scheduled_delivery_at_corrected"] = True assert msk.sum() == 11Orders scheduled within the next 30 minutes are treated as ad-hoc orders. With the median fulfillment time of ad-hoc orders being 34 minutes, it is absolutely unrealistic to fulfill such a scheduled order on time. This should not influence the KPIs in a bad way.msk = (orders["ad_hoc"] == False) & ( orders["scheduled_delivery_at"] - orders["placed_at"] < datetime.timedelta(minutes=30) ) orders.loc[msk, "ad_hoc"] = True orders.loc[msk, "scheduled_delivery_at"] = pd.NaT orders.loc[msk, "scheduled_delivery_at_corrected"] = True assert msk.sum() == 3_267For scheduled orders, `scheduled_delivery_at` is mostly set to quarters of an hour. The seconds part is always `0`.assert not ( (orders["ad_hoc"] == False) & (orders["scheduled_delivery_at"].dt.second != 0) ).any()If a customer managed to enter something other than a quarter of an hour as `scheduled_delivery_at`, we adjust that.msk = (orders["ad_hoc"] == False) & ( orders["scheduled_delivery_at"].dt.minute % 15 != 0 ) round_down = msk & (orders["scheduled_delivery_at"].dt.minute % 15 < 8) orders.loc[round_down, "scheduled_delivery_at"] = orders.loc[ round_down, "scheduled_delivery_at" ] - (orders.loc[round_down, "scheduled_delivery_at"].dt.minute % 15).map( lambda m: datetime.timedelta(minutes=m) ) round_up = msk & (orders["scheduled_delivery_at"].dt.minute % 15 >= 8) orders.loc[round_up, "scheduled_delivery_at"] = orders.loc[ round_up, "scheduled_delivery_at" ] + (orders.loc[round_up, "scheduled_delivery_at"].dt.minute % 15).map( lambda m: datetime.timedelta(minutes=(15 - m)) ) orders.loc[msk, "scheduled_delivery_at_corrected"] = True assert msk.sum() == 6Timestamps All timestamps in `orders` must occur in a strict sequence (i.e., order) according to the delivery process. A tiny fraction of the orders has timestamps that do not comply with that and are adjusted in the following. `placed_at` must always be the earliest of all timestamps.for column in [ "scheduled_delivery_at", "cancelled_at", "restaurant_notified_at", "restaurant_confirmed_at", "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", "left_pickup_at", "first_estimated_delivery_at", "delivery_at", ]: assert not (orders["placed_at"] >= orders[column]).any()Rarely, a restaurant confirmed an order before it was notified about it. We keep `restaurant_confirmed_at` in these cases.msk = orders["restaurant_notified_at"] >= orders["restaurant_confirmed_at"] orders.loc[msk, "restaurant_notified_at"] = pd.NaT orders.loc[msk, "restaurant_notified_at_corrected"] = True assert msk.sum() == 47Whenever `restaurant_notified_at` or `restaurant_confirmed_at` is later than `pickup_at`, we discard the values.msk = orders["restaurant_notified_at"] >= orders["pickup_at"] orders.loc[msk, "restaurant_notified_at"] = pd.NaT orders.loc[msk, "restaurant_notified_at_corrected"] = True assert msk.sum() == 73 msk = orders["restaurant_confirmed_at"] >= orders["pickup_at"] orders.loc[msk, "restaurant_confirmed_at"] = pd.NaT orders.loc[msk, "restaurant_confirmed_at_corrected"] = True assert msk.sum() == 2_001If a courier forgot to confirm the pickup, `pickup_at` and `delivery_at` are the same.msk = orders["delivery_at"] == orders["pickup_at"] orders.loc[msk, "pickup_at"] = pd.NaT orders.loc[msk, "pickup_at_corrected"] = True assert msk.sum() == 16 msk = orders["delivery_at"] == orders["left_pickup_at"] orders.loc[msk, "left_pickup_at"] = pd.NaT orders.loc[msk, "left_pickup_at_corrected"] = True assert msk.sum() == 15`delivery_at` must be the latest of all dispatch-related timestamps.for column in [ "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", "left_pickup_at", ]: assert not (orders["delivery_at"] <= orders[column]).any()In about 14,500 cases the `left_pickup_at` lies before or on `pickup_at`. This only affects orders between September 6 and October 17. We discard these timestamps.msk = orders["left_pickup_at"] < orders["pickup_at"] orders.loc[msk, "left_pickup_at"] = pd.NaT orders.loc[msk, "left_pickup_at_corrected"] = True assert msk.sum() == 14_013 assert orders.loc[msk, "placed_at"].min().date() == datetime.date(2016, 9, 6) assert orders.loc[msk, "placed_at"].max().date() == datetime.date(2016, 10, 17) msk = orders["left_pickup_at"] == orders["pickup_at"] orders.loc[msk, "left_pickup_at"] = pd.NaT orders.loc[msk, "left_pickup_at_corrected"] = True assert msk.sum() == 496 for column in [ "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", ]: assert not (orders["left_pickup_at"] <= orders[column]).any()Rarely, `pickup_at` is earlier than or equal to `dispatch_at`, `courier_notified_at`, or `courier_accepted_at`. They are discarded.msk = orders["pickup_at"] <= orders["dispatch_at"] orders.loc[msk, "dispatch_at"] = pd.NaT orders.loc[msk, "dispatch_at_corrected"] = True assert msk.sum() == 15 msk = orders["pickup_at"] <= orders["courier_notified_at"] orders.loc[msk, "courier_notified_at"] = pd.NaT orders.loc[msk, "courier_notified_at_corrected"] = True assert msk.sum() == 8 assert set(orders.loc[msk, "status"].unique()) == set(["cancelled"]) msk = orders["pickup_at"] <= orders["courier_accepted_at"] orders.loc[msk, "courier_accepted_at"] = pd.NaT orders.loc[msk, "courier_accepted_at_corrected"] = True assert msk.sum() == 15 for column in ["dispatch_at", "courier_notified_at", "courier_accepted_at"]: assert not (orders["pickup_at"] <= orders[column]).any()For about 66.000 orders `courier_accepted_at` equals `dispatch_at` or lies before it. We assume the former is correct and discard the latter.msk = orders["courier_accepted_at"] <= orders["dispatch_at"] orders.loc[msk, "dispatch_at"] = pd.NaT orders.loc[msk, "dispatch_at_corrected"] = True assert msk.sum() == 65_848If `courier_accepted_at` is equal or before `courier_notified_at`, we discard the latter.msk = orders["courier_accepted_at"] <= orders["courier_notified_at"] orders.loc[msk, "courier_notified_at"] = pd.NaT orders.loc[msk, "courier_notified_at_corrected"] = True assert msk.sum() == 165_585 for column in ["dispatch_at", "courier_notified_at"]: assert not (orders["courier_accepted_at"] <= orders[column]).any()For some more orders, `courier_notified_at` lies before `dispatch_at`. Manual analysis reveals that in most of these cases, the courier did not hit "accept". We discard `dispatch_at` as the timings between `courier_notified_at` and `courier_accepted_at` fit the issue messages.msk = orders["courier_notified_at"] <= orders["dispatch_at"] orders.loc[msk, "dispatch_at"] = pd.NaT orders.loc[msk, "dispatch_at_corrected"] = True assert msk.sum() == 3_397Ad-hoc orders that were placed before 11 in the morning and after 23 in the evening are discarded. Most of them were cancelled anyways.msk = (orders["ad_hoc"] == True) & ( (orders["placed_at"].dt.hour <= 10) | (orders["placed_at"].dt.hour >= 23) ) orders = orders[~msk] assert msk.sum() == 337The orders scheduled for 11:15 (=1) and 11:30 (=37) are scheduled for 11:45. Most of them were not delivered until 12 anyways. This is in line with the 30-minute minimum horizon above.msk = (orders["scheduled_delivery_at"].dt.hour == 11) & ( orders["scheduled_delivery_at"].dt.minute == 30 ) orders.loc[msk, "scheduled_delivery_at"] += datetime.timedelta(minutes=15) orders.loc[msk, "scheduled_delivery_at_corrected"] = True assert msk.sum() == 37 msk = (orders["scheduled_delivery_at"].dt.hour == 11) & ( orders["scheduled_delivery_at"].dt.minute == 15 ) orders.loc[msk, "scheduled_delivery_at"] += datetime.timedelta(minutes=30) orders.loc[msk, "scheduled_delivery_at_corrected"] = True assert msk.sum() == 1 assert not ( (orders["scheduled_delivery_at"].dt.hour == 11) & (orders["scheduled_delivery_at"].dt.minute == 0) ).any()Orders with a scheduled delivery before 11 in the morning and after 23 in the evening are discarded.msk = (orders["ad_hoc"] == False) & ( (orders["scheduled_delivery_at"].dt.hour <= 10) | (orders["scheduled_delivery_at"].dt.hour >= 23) ) orders = orders[~msk] assert msk.sum() == 159 ad_hoc = orders["ad_hoc"] == True scheduled = ~ad_hocOrder Stati There are only cancelled and completed orders. We replace the `status` column with a boolean `cancelled` column.assert set(orders["status"].unique()) == set(["cancelled", "completed"]) orders["cancelled"] = False msk = orders["status"] == "cancelled" orders.loc[msk, "cancelled"] = True del orders["status"] assert msk.sum() == 23_552Some cancelled orders still have a `delivery_at` value. All of them have a dummy value for the `cancelled_at` value (cf., below). For roughly two thirds of them, the time between pickup and delivery is so small that it seems unrealistic that they actually were delivered. In these cases, we take `delivery_at` as the realistic `cancelled_at` value. The ones that could have been delivered realistically are treated as completed orders.claimed_to_be_delivered = (orders["cancelled"] == True) & orders[ "delivery_at" ].notnull() assert ( orders.loc[claimed_to_be_delivered, "cancelled_at"].min() == orders.loc[claimed_to_be_delivered, "cancelled_at"].max() == datetime.datetime(2016, 10, 18, 9, 52, 45) ) realistically_delivered = ( orders["delivery_at"] - orders["pickup_at"] ).dt.total_seconds() > 120 msk = claimed_to_be_delivered & realistically_delivered orders.loc[msk, "cancelled"] = False orders.loc[msk, "cancelled_at"] = pd.NaT orders.loc[msk, "cancelled_at_corrected"] = True msk = claimed_to_be_delivered & ~realistically_delivered orders.loc[msk, "cancelled_at"] = orders.loc[msk, "delivery_at"] orders.loc[msk, "cancelled_at_corrected"] = True orders.loc[msk, "delivery_at"] = pd.NaT orders.loc[msk, "delivery_at_corrected"] = pd.NA assert claimed_to_be_delivered.sum() == 159 assert (claimed_to_be_delivered & realistically_delivered).sum() == 61 assert (claimed_to_be_delivered & ~realistically_delivered).sum() == 98Only cancelled orders have a `cancelled_at` value.cancelled = orders["cancelled"] == True completed = orders["cancelled"] == False assert not orders.loc[cancelled, "cancelled_at"].isnull().any() assert not orders.loc[completed, "cancelled_at"].notnull().any()Cancelled Orders For about 40% of the orders the `cancelled_at` field was only filled in after a system change on October 18 (i.e., in a batch). For these orders, this field is not meaningful because of that. We discard it.batch = orders["cancelled_at"] == datetime.datetime(2016, 10, 18, 9, 52, 45) orders.loc[cancelled & batch, "cancelled_at"] = pd.NaT orders.loc[cancelled & batch, "cancelled_at_corrected"] = True assert (cancelled & batch).sum() == 9_410When a restaurant was notified about an order after the order was cancelled, we discard `restaurant_notified_at` and `restaurant_confirmed_at`.msk = orders["cancelled_at"] <= orders["restaurant_notified_at"] orders.loc[msk, "restaurant_notified_at"] = pd.NaT orders.loc[msk, "restaurant_notified_at_corrected"] = True assert msk.sum() == 6 msk = orders["cancelled_at"] <= orders["restaurant_confirmed_at"] orders.loc[msk, "restaurant_confirmed_at"] = pd.NaT orders.loc[msk, "restaurant_confirmed_at_corrected"] = True assert msk.sum() == 1_253When an order was dispatched in the moment it was cancelled, we adjust that.msk = orders["cancelled_at"] == orders["dispatch_at"] orders.loc[msk, "dispatch_at"] -= datetime.timedelta(seconds=1) orders.loc[msk, "dispatch_at_corrected"] = True assert msk.sum() == 3When a courier was notified about or accepted an order in the moment it was cancelled, we adjust that.msk = orders["cancelled_at"] == orders["courier_notified_at"] orders.loc[msk, "courier_notified_at"] -= datetime.timedelta(seconds=1) orders.loc[msk, "courier_notified_at_corrected"] = True assert msk.sum() == 1 msk = orders["cancelled_at"] == orders["courier_accepted_at"] orders.loc[msk, "courier_accepted_at"] -= datetime.timedelta(seconds=1) orders.loc[msk, "courier_accepted_at_corrected"] = True assert msk.sum() == 8When a courier picked up an order in the moment it was cancelled, we adjust that.msk = orders["cancelled_at"] == orders["pickup_at"] orders.loc[msk, "pickup_at"] -= datetime.timedelta(seconds=1) orders.loc[msk, "pickup_at_corrected"] = True assert msk.sum() == 1Verify that `cancelled_at` is indeed the latest timestamp in every row.orders["_max_datetime"] = pd.NaT orders["_max_datetime"] = orders[ [ "restaurant_notified_at", "restaurant_confirmed_at", "dispatch_at", "courier_notified_at", "courier_accepted_at", "pickup_at", "left_pickup_at", "delivery_at", "cancelled_at", ] ].max(axis=1) assert not ( cancelled & ~batch & (orders["cancelled_at"] != orders["_max_datetime"]) ).any() del orders["_max_datetime"]Timings The times in between the timestamps can be used to obtain timings of individual steps in the delivery process. In the original database, such timings were already logged. In the following, we validate the timestamps against the timings and only keep the timestamps as the timings are then calculated as `@property`s in the ORM layer. `confirmed_total_time` is the difference between `placed_at` and `delivery_at`. It is set only for completed orders and useful only for ad_hoc orders. The `Order` class has a `total_time` property that computes that value.all_data_available = ( orders["logged_confirmed_total_time"].notnull() & orders["delivery_at"].notnull() & orders["placed_at"].notnull() ) good_data = ( orders["logged_confirmed_total_time"] - ((orders["delivery_at"] - orders["placed_at"]).dt.total_seconds().round()) ).abs() <= 5 del orders["logged_confirmed_total_time"] assert (all_data_available & good_data).sum() == 635_768 assert (all_data_available & good_data & completed).sum() == 635_768 assert (all_data_available & good_data & ad_hoc).sum() == 561_340 round( (all_data_available & good_data & ad_hoc).sum() / (all_data_available & ad_hoc).sum(), 3, )The best guess for `accepting_time` is the difference between `dispatch_at` and `courier_accepted_at`. `Order.time_to_accept` models that.all_data_available = ( orders["logged_accepting_time"].notnull() & orders["courier_accepted_at"].notnull() & orders["dispatch_at"].notnull() ) good_data = ( orders["logged_accepting_time"] - ( (orders["courier_accepted_at"] - orders["dispatch_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 assert (all_data_available & good_data).sum() == 345_803 assert (all_data_available & good_data & completed).sum() == 345_803 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)We use `accepting_time` to extrapolate missing values for `dispatch_at`.extrapolate = ( orders["dispatch_at"].isnull() & orders["courier_accepted_at"].notnull() & orders["logged_accepting_time"].notnull() ) accept_time = orders["logged_accepting_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) extrapolated_dispatch_at = orders["courier_accepted_at"] - accept_time still_wrong = extrapolated_dispatch_at >= orders["courier_notified_at"] msk = extrapolate & ~still_wrong orders.loc[msk, "dispatch_at"] = extrapolated_dispatch_at.loc[msk] orders.loc[msk, "dispatch_at_corrected"] = True del orders["logged_accepting_time"] assert extrapolate.sum() == 67_372 assert (extrapolate & ~still_wrong).sum() == 61_545The best guess for `reaction_time` is the difference between `courier_notified_at` and `courier_accepted_at`. `Order.time_to_react` models that in the ORM.all_data_available = ( orders["logged_reaction_time"].notnull() & orders["courier_accepted_at"].notnull() & orders["courier_notified_at"].notnull() ) good_data = ( orders["logged_reaction_time"] - ( (orders["courier_accepted_at"] - orders["courier_notified_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 assert (all_data_available & good_data).sum() == 165_355 assert (all_data_available & good_data & completed).sum() == 165_355 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)We use `reaction_time` to extrapolate missing values for `courier_notified_at`.extrapolate = ( orders["courier_notified_at"].isnull() & orders["courier_accepted_at"].notnull() & orders["logged_reaction_time"].notnull() ) extrapolated_courier_notified_at = ( orders["courier_accepted_at"] # Some values for logged_reaction_time are <= 0. - orders["logged_reaction_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA and x > 0 else pd.NaT ) ) still_wrong = extrapolated_courier_notified_at <= orders["dispatch_at"] msk = extrapolate & ~still_wrong orders.loc[msk, "courier_notified_at"] = extrapolated_courier_notified_at.loc[msk] orders.loc[msk, "courier_notified_at_corrected"] = True assert extrapolate.sum() == 214_043 assert (extrapolate & ~still_wrong).sum() == 213_290No need to extrapolate `courier_accepted_at` from `courier_notified_at`.assert not ( orders["courier_notified_at"].notnull() & orders["courier_accepted_at"].isnull() & orders["logged_reaction_time"].notnull() ).any() del orders["logged_reaction_time"]`estimated_prep_duration` equals `expected_wait_pickup_time`. As the latter is not filled in for cancelled orders, we keep the former.Also, `estimated_prep_duration` is only filled in starting with May 24. It is always a multiple of `60`, so it is stored as full minutes.all_data_available = ( orders["estimated_prep_duration"].notnull() & orders["expected_wait_pickup_time"].notnull() ) good_data = ( orders["estimated_prep_duration"] - orders["expected_wait_pickup_time"] ).abs() <= 5 no_duration = orders["estimated_prep_duration"].isnull() assert not (no_duration & orders["expected_wait_pickup_time"].notnull()).any() assert (~no_duration & orders["expected_wait_pickup_time"].isnull()).sum() == 19_865 assert not ( (orders["placed_at"].dt.date > datetime.date(2016, 5, 24)) & orders["expected_wait_pickup_time"].isnull() & (orders["cancelled"] == False) ).any() del orders["expected_wait_pickup_time"] assert orders.loc[no_duration, "placed_at"].min().date() == datetime.date(2016, 2, 21) assert orders.loc[no_duration, "placed_at"].max().date() == datetime.date(2016, 5, 24) assert orders.loc[~no_duration, "placed_at"].min().date() == datetime.date(2016, 5, 24) assert orders.loc[~no_duration, "placed_at"].max().date() == datetime.date(2017, 1, 31) assert not (~no_duration & (orders["estimated_prep_duration"] % 60 != 0)).any() round((all_data_available & good_data).sum() / all_data_available.sum(), 3)`estimated_prep_duration` is the difference between `restaurant_notified_at` and `scheduled_pickup_at` when allowing up to half a minute of clock skew. `restaurant_confirmed_at` only works in about 40% of the cases. So, if and when a restaurant confirms an order, does not affect the dispatching process.all_data_available = ( orders["estimated_prep_duration"].notnull() & orders["restaurant_notified_at"].notnull() & orders["scheduled_pickup_at"].notnull() ) good_data = ( orders["estimated_prep_duration"] - ( (orders["scheduled_pickup_at"] - orders["restaurant_notified_at"]) .dt.total_seconds() .round() ) ).abs() <= 35 assert (all_data_available & good_data).sum() == 539_668 assert (all_data_available & good_data & completed).sum() == 524_709 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)We use `estimated_prep_duration` to correct about a third of the 1.5% of `restaurant_notified_at` that are off for orders after May 24.duration = orders["estimated_prep_duration"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) calc_restaurant_notified_at = orders["scheduled_pickup_at"] - duration not_wrong = ( completed & (orders["placed_at"] < calc_restaurant_notified_at) & (calc_restaurant_notified_at < orders["restaurant_confirmed_at"]) ) msk = all_data_available & ~good_data & not_wrong orders.loc[msk, "restaurant_notified_at"] = calc_restaurant_notified_at.loc[msk] orders.loc[msk, "restaurant_notified_at_corrected"] = True assert (all_data_available & ~good_data).sum() == 7_514 assert msk.sum() == 2_425 assert orders.loc[msk, "placed_at"].min().date() == datetime.date(2016, 5, 24) assert orders.loc[msk, "placed_at"].max().date() == datetime.date(2017, 1, 31)Also, we use `estimated_prep_duration` to extrapolate missing `restaurant_notified_at` values for orders after May 24.duration = orders["estimated_prep_duration"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) extrapolated = orders["scheduled_pickup_at"] - duration extrapolate = ( orders["restaurant_notified_at"].isnull() & orders["scheduled_pickup_at"].notnull() & orders["estimated_prep_duration"].notnull() ) still_wrong = ( (extrapolated <= orders["placed_at"]) | (extrapolated >= orders["restaurant_confirmed_at"]) | (extrapolated >= orders["cancelled_at"]) ) msk = extrapolate & ~still_wrong orders.loc[msk, "restaurant_notified_at"] = extrapolated.loc[msk] orders.loc[msk, "restaurant_notified_at_corrected"] = True assert extrapolate.sum() == 469 assert msk.sum() == 374 assert orders.loc[msk, "placed_at"].min().date() == datetime.date(2016, 5, 29)Vice versa, we extrapolate `estimated_prep_duration` as the difference of `scheduled_pickup_at` and `restaurant_notified_at` for orders before May 24.extrapolated = orders["scheduled_pickup_at"] - orders["restaurant_notified_at"] extrapolated = (extrapolated.dt.total_seconds() // 60 * 60).astype("Int64") extrapolate = ( orders["restaurant_notified_at"].notnull() & orders["scheduled_pickup_at"].notnull() & orders["estimated_prep_duration"].isnull() ) orders.loc[extrapolate, "estimated_prep_duration"] = extrapolated.loc[extrapolate] orders.loc[extrapolate, "estimated_prep_duration_corrected"] = True assert extrapolate.sum() == 108_398 assert orders.loc[extrapolate, "placed_at"].min().date() == datetime.date(2016, 2, 21) assert orders.loc[extrapolate, "placed_at"].max().date() == datetime.date(2016, 5, 24)More than 99.9% of the orders with `estimated_prep_duration` set, have this value be under 45 minutes. We view the remaining ones as outliers and adjust them.more_than_45_mins = orders["estimated_prep_duration"].notnull() more_than_45_mins &= orders["estimated_prep_duration"] > 45 * 60 orders.loc[more_than_45_mins, "estimated_prep_duration"] = 45 * 60 orders.loc[more_than_45_mins, "estimated_prep_duration_corrected"] = True assert more_than_45_mins.sum() == 449 round((~more_than_45_mins).sum() / orders["estimated_prep_duration"].notnull().sum(), 5)We create a boolean column `pickup_not_confirmed` out of the text column `courier_no_pickup_confirmed_issue`.orders["courier_no_pickup_confirmed_issue"].value_counts() orders["pickup_not_confirmed"] = False msk = orders["courier_no_pickup_confirmed_issue"].notnull() orders.loc[msk, "pickup_not_confirmed"] = True msk = orders["pickup_at"].isnull() orders.loc[msk, "pickup_not_confirmed"] = pd.NA del orders["courier_no_pickup_confirmed_issue"] assert orders["pickup_not_confirmed"].sum() == 34_966`logged_to_pickup_time` and `logged_pickup_time` constitute the difference between `courier_accepted_at` and `pickup_at`. `logged_pickup_time` is negative in rare cases.assert not (orders["logged_to_pickup_time"] < 0).any() assert (orders["logged_pickup_time"] < 0).sum() == 30 all_data_available = ( orders["logged_to_pickup_time"].notnull() & orders["logged_pickup_time"].notnull() & (orders["logged_pickup_time"] >= 0) & orders["pickup_at"].notnull() & orders["courier_accepted_at"].notnull() ) good_data = ( orders["logged_to_pickup_time"] + orders["logged_pickup_time"] - ((orders["pickup_at"] - orders["courier_accepted_at"]).dt.total_seconds().round()) ).abs() <= 5 pickup_not_confirmed = orders["pickup_not_confirmed"] == True assert (all_data_available & good_data).sum() == 599_195 assert (all_data_available & good_data & completed).sum() == 599_111 assert (all_data_available & (good_data | pickup_not_confirmed)).sum() == 604_483 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)For the 6% where `pickup_at` does not relate back to `courier_accepted_at`, we correct the former. Unconfirmed pickups seem to not be the cause of these inconsistencies.calc_pickup_at = ( orders["courier_accepted_at"] + orders["logged_to_pickup_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) + orders["logged_pickup_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) ) msk = all_data_available & ~good_data orders.loc[msk, "pickup_at"] = calc_pickup_at.loc[msk] orders.loc[msk, "pickup_at_corrected"] = True assert (all_data_available & ~good_data).sum() == 38_015 assert (all_data_available & ~good_data & pickup_not_confirmed).sum() == 5_288Keep other timestamps consistent after the correction.msk = orders["pickup_at"] <= orders["restaurant_notified_at"] orders.loc[msk, "restaurant_notified_at"] = pd.NaT orders.loc[msk, "restaurant_notified_at_corrected"] = True assert msk.sum() == 107 msk = orders["pickup_at"] <= orders["restaurant_confirmed_at"] orders.loc[msk, "restaurant_confirmed_at"] = pd.NaT orders.loc[msk, "restaurant_confirmed_at_corrected"] = True assert msk.sum() == 892With `logged_to_pickup_time` we calculate a new timestamp `reached_pickup_at`.to_pickup_time = orders["logged_to_pickup_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA and x > 0 else pd.NaT ) reached_pickup_at = orders["courier_accepted_at"] + to_pickup_time orders["reached_pickup_at"] = pd.NaT msk = ( completed & reached_pickup_at.notnull() & (reached_pickup_at < orders["pickup_at"]) ) orders.loc[msk, "reached_pickup_at"] = reached_pickup_at.loc[msk] assert msk.sum() == 530_724`logged_courier_late_time` and `logged_restaurant_late_time` are always set together. The ca. 110,000 missing values are spread over the entire horizon.assert not ( ( orders["logged_courier_late_time"].notnull() & orders["logged_restaurant_late_time"].isnull() ) | ( orders["logged_courier_late_time"].isnull() & orders["logged_restaurant_late_time"].notnull() ) ).any() assert orders.loc[ orders["logged_courier_late_time"].isnull(), "placed_at" ].min().date() == datetime.date(2016, 2, 22) assert orders.loc[ orders["logged_courier_late_time"].isnull(), "placed_at" ].max().date() == datetime.date(2017, 1, 31) assert orders.loc[ orders["logged_courier_late_time"].notnull(), "placed_at" ].min().date() == datetime.date(2016, 2, 21) assert orders.loc[ orders["logged_courier_late_time"].notnull(), "placed_at" ].max().date() == datetime.date(2017, 1, 31)`logged_courier_late_time` is mostly explained with `reached_pickup_at` and `scheduled_pickup_at`. `Order.courier_early` and `Order.courier_late` model that in the ORM.all_data_available = ( orders["logged_courier_late_time"].notnull() & orders["reached_pickup_at"].notnull() & orders["scheduled_pickup_at"].notnull() ) good_data = ( orders["logged_courier_late_time"] - ( (orders["reached_pickup_at"] - orders["scheduled_pickup_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 assert (all_data_available & good_data).sum() == 471_553 assert (all_data_available & good_data & completed).sum() == 471_553 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)`logged_restaurant_late_time` is mostly explained with `pickup_at` and `scheduled_pickup_at`. `logged_restaurant_late_time` is also `0` quite often, indicating no timing was taken. `Order.restaurant_early` and `Order.restaurant_late` model that in the ORM.all_data_available = ( orders["logged_restaurant_late_time"].notnull() & orders["pickup_at"].notnull() & orders["scheduled_pickup_at"].notnull() ) good_data = ( orders["logged_restaurant_late_time"] - ((orders["pickup_at"] - orders["scheduled_pickup_at"]).dt.total_seconds().round()) ).abs() <= 5 restaurant_not_timed = orders["logged_restaurant_late_time"] == 0 assert (all_data_available).sum() == 503_179 assert (all_data_available & good_data).sum() == 245_714 assert (all_data_available & restaurant_not_timed).sum() == 246_362 assert (all_data_available & (good_data | restaurant_not_timed)).sum() == 488_512 restaurant_timed = orders["logged_restaurant_late_time"] != 0 round( (all_data_available & restaurant_timed & good_data).sum() / (all_data_available & restaurant_timed).sum(), 3, )`logged_wait_pickup_time` is unfortunately not a good timing to extrapolate when a meal was ready to picked up by the courier. It is only good to explain the difference between `reached_pickup_at` and `left_pickup_at`, which is not really the time the courier had to wait. Also, the field seems to only be tracked correctly if the courier was late.all_data_available = ( (orders["logged_wait_pickup_time"]).notnull() & orders["reached_pickup_at"].notnull() & orders["left_pickup_at"].notnull() ) good_data = ( orders["logged_wait_pickup_time"] - ( (orders["left_pickup_at"] - orders["reached_pickup_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 round( (all_data_available & good_data).sum() / (all_data_available).sum(), 3, ) all_data_available = ( (orders["logged_wait_pickup_time"]).notnull() & (orders["logged_courier_late_time"] >= 0) & orders["reached_pickup_at"].notnull() & orders["left_pickup_at"].notnull() ) good_data = ( orders["logged_wait_pickup_time"] - ( (orders["left_pickup_at"] - orders["reached_pickup_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 round( (all_data_available & good_data).sum() / (all_data_available).sum(), 3, ) del orders["logged_courier_late_time"] del orders["logged_restaurant_late_time"] del orders["logged_wait_pickup_time"]We create a boolean column `delivery_not_confirmed` out of the text column `courier_no_pickup_confirmed_issue`.orders["courier_no_delivery_confirmed_issue"].value_counts() orders["delivery_not_confirmed"] = False msk = orders["courier_no_delivery_confirmed_issue"].notnull() orders.loc[msk, "delivery_not_confirmed"] = True msk = orders["delivery_at"].isnull() orders.loc[msk, "delivery_not_confirmed"] = pd.NA del orders["courier_no_delivery_confirmed_issue"] assert orders["delivery_not_confirmed"].sum() == 13_817`logged_to_delivery_time` and `logged_delivery_time` constitute the difference between `pickup_at` and `delivery_at`. Without the `pickup_at` corrections above, not 91% but only 86% of the differences would work. `Order.time_to_delivery` and `Order.time_at_delivery` model that in the ORM.assert not (orders["logged_to_delivery_time"] < 0).any() assert not (orders["logged_delivery_time"] < 0).any() all_data_available = ( orders["logged_to_delivery_time"].notnull() & orders["logged_delivery_time"].notnull() & orders["delivery_at"].notnull() & orders["pickup_at"].notnull() ) good_data = ( orders["logged_to_delivery_time"] + orders["logged_delivery_time"] - ((orders["delivery_at"] - orders["pickup_at"]).dt.total_seconds().round()) ).abs() <= 5 delivery_not_confirmed = orders["delivery_not_confirmed"] == True assert (all_data_available & good_data).sum() == 572_609 assert (all_data_available & good_data & completed).sum() == 572_609 assert (all_data_available & (good_data | delivery_not_confirmed)).sum() == 581_700 round( (all_data_available & (good_data | delivery_not_confirmed)).sum() / all_data_available.sum(), 3, )`courier_waited_at_delivery_issue` is filled in whenever the courier needed to wait for the customer at delivery. It is also filled in if the courier forgot to confirm the delivery, which mostly happened at the end of a shift. If a courier needed to wait for more than 45 minutes, that is summarized as 'waiting about 1, 2, or 3 hours.'orders["courier_waited_at_delivery_issue"].value_counts().sort_index()We convert `courier_waited_at_delivery_issue` into `courier_waited_at_delivery` to validate it further below.waited_at_delivery = ( orders["courier_waited_at_delivery_issue"] .str.replace(r"\D+", "", regex=True) .fillna("NaN") .replace("", "NaN") .astype(float) .astype("Int64") ) orders["courier_waited_at_delivery"] = pd.NA orders["courier_waited_at_delivery"] = orders["courier_waited_at_delivery"].astype( "Int64" ) hours = orders["courier_waited_at_delivery_issue"].str.contains("hour").fillna(0) orders.loc[hours, "courier_waited_at_delivery"] = ( 60 * 60 * waited_at_delivery.loc[hours] ) mins = orders["courier_waited_at_delivery_issue"].str.contains("minutes").fillna(0) orders.loc[mins, "courier_waited_at_delivery"] = 60 * waited_at_delivery.loc[mins] customer_late = orders["courier_waited_at_delivery_issue"].notnull() del orders["courier_waited_at_delivery_issue"] assert hours.sum() == 254 assert mins.sum() == 30_512For the roughly 9% of orders where `logged_to_delivery_time` and `logged_delivery_time` do not explain `delivery_at`, the latter is corrected. However, this is only done if the courier did not have to wait for the customer or forgot to confirm the delivery, which wrongly shows up as waiting for the customer as well.calc_delivery_at = ( orders["pickup_at"] + orders["logged_to_delivery_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) + orders["logged_delivery_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA else pd.NaT ) ) del orders["logged_delivery_time"] orders["delivery_at_orig"] = orders["delivery_at"] msk = ( all_data_available & ~good_data & (~customer_late | (customer_late & delivery_not_confirmed)) ) orders.loc[msk, "delivery_at"] = calc_delivery_at.loc[msk] orders.loc[msk, "delivery_at_corrected"] = True assert (all_data_available & ~good_data).sum() == 64_543 assert (all_data_available & ~good_data & ~customer_late).sum() == 49_122 assert ( all_data_available & ~good_data & customer_late & delivery_not_confirmed ).sum() == 5_241 assert (all_data_available & ~good_data & delivery_not_confirmed).sum() == 9_091With `logged_to_delivery_time` we calculate a new timestamp `reached_delivery_at`.to_delivery_time = orders["logged_to_delivery_time"].map( lambda x: datetime.timedelta(seconds=x) if x is not pd.NA and x > 0 else pd.NaT ) reached_delivery_at = orders["pickup_at"] + to_delivery_time del orders["logged_to_delivery_time"] orders["reached_delivery_at"] = pd.NaT msk = ( completed & reached_delivery_at.notnull() & (reached_delivery_at < orders["delivery_at"]) ) orders.loc[msk, "reached_delivery_at"] = reached_delivery_at.loc[msk] assert msk.sum() == 608_160Some `left_pickup_at` values conflict with that and are discarded.msk = orders["left_pickup_at"] >= orders["reached_delivery_at"] orders.loc[msk, "left_pickup_at"] = pd.NaT orders.loc[msk, "left_pickup_at_corrected"] = True assert msk.sum() == 4_215`logged_delivery_late_time` is the difference between `scheduled_delivery_at` and `delivery_at` for pre-orders. `Order.delivery_early` and `Order.delivery_late` model that in the ORM.all_data_available = ( scheduled & orders["logged_delivery_late_time"].notnull() & orders["delivery_at"].notnull() & orders["scheduled_delivery_at"].notnull() ) good_data = ( orders["logged_delivery_late_time"] - ( (orders["delivery_at"] - orders["scheduled_delivery_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 assert (all_data_available & good_data).sum() == 73_658 assert (all_data_available & good_data & completed).sum() == 73_658 assert (all_data_available & (good_data | delivery_not_confirmed)).sum() == 73_659 round( (all_data_available & (good_data | delivery_not_confirmed)).sum() / all_data_available.sum(), 3, )`expected_delivery_time` is simply the difference between `placed_at` and `scheduled_delivery_at`, for both ad-hoc and pre-orders. So, the field provides no new information.all_data_available = ( orders["expected_delivery_time"].notnull() & orders["placed_at"].notnull() & orders["scheduled_delivery_at"].notnull() ) good_data = ( orders["expected_delivery_time"] - ( (orders["scheduled_delivery_at"] - orders["placed_at"]) .dt.total_seconds() .round() ) ).abs() <= 5 del orders["expected_delivery_time"] assert (all_data_available & good_data).sum() == 74_386 round((all_data_available & good_data).sum() / all_data_available.sum(), 3)`courier_waited_at_delivery` can be mostly explained as the difference between `reached_delivery_at` and `delivery_at`. `Order.courier_waited_at_delivery` models that in the ORM.all_data_available = ( orders["courier_waited_at_delivery"].notnull() & orders["delivery_at"].notnull() & orders["reached_delivery_at"].notnull() ) good_data = ( orders["courier_waited_at_delivery"] - ( (orders["delivery_at"] - orders["reached_delivery_at"]) .dt.total_seconds() .round() ) ).abs() <= 90 imprecise_wait_times = orders["courier_waited_at_delivery"].fillna(0) >= 45 * 60 assert (all_data_available & good_data).sum() == 26_268 assert (all_data_available & good_data & completed).sum() == 26_268 assert (all_data_available & (good_data | imprecise_wait_times)).sum() == 26_499 round( (all_data_available & (good_data | imprecise_wait_times)).sum() / all_data_available.sum(), 3, )We keep `courier_waited_at_delivery` as a boolean field here to be used by `Order.courier_waited_at_delivery`.msk = orders["delivery_at"].notnull() & orders["courier_waited_at_delivery"].notnull() orders["courier_waited_at_delivery"] = pd.NA orders.loc[orders["delivery_at"].notnull(), "courier_waited_at_delivery"] = False orders.loc[msk, "courier_waited_at_delivery"] = True assert orders["courier_waited_at_delivery"].sum() == msk.sum() == 30_658Statistical Columns Keep the columns that log the courier's speed.orders = orders.rename( columns={ "logged_avg_courier_speed": "logged_avg_speed", "logged_avg_courier_speed_distance": "logged_avg_speed_distance", } ) unrealistic = orders["logged_delivery_distance"] > 12_000 orders.loc[unrealistic, "logged_delivery_distance"] = pd.NA assert unrealistic.sum() == 17Clean Dataorders = orders[ [ # Generic columns "delivery_id", "customer_id", "placed_at", "ad_hoc", "scheduled_delivery_at", "scheduled_delivery_at_corrected", "first_estimated_delivery_at", "cancelled", "cancelled_at", "cancelled_at_corrected", # Price related columns "sub_total", "delivery_fee", "total", # Restaurant related columns "restaurant_id", "restaurant_notified_at", "restaurant_notified_at_corrected", "restaurant_confirmed_at", "restaurant_confirmed_at_corrected", "estimated_prep_duration", "estimated_prep_duration_corrected", "estimated_prep_buffer", # Dispatch related columns "courier_id", "dispatch_at", "dispatch_at_corrected", "courier_notified_at", "courier_notified_at_corrected", "courier_accepted_at", "courier_accepted_at_corrected", "utilization", # Pickup related columns "pickup_address_id", "reached_pickup_at", "pickup_at", "pickup_at_corrected", "pickup_not_confirmed", "left_pickup_at", "left_pickup_at_corrected", # Delivery related columns "delivery_address_id", "reached_delivery_at", "delivery_at", "delivery_at_corrected", "delivery_not_confirmed", "courier_waited_at_delivery", # Statistical columns "logged_delivery_distance", "logged_avg_speed", "logged_avg_speed_distance", ] ].sort_index() orders.head() orders.info() for column in orders.columns: if column.endswith("corrected"): print(column, (orders[column] == True).sum()) assert ( hashlib.sha256(orders.to_json().encode()).hexdigest() == "c548084f094bd220f3aff7e9b7072a4964127f6962dffd54f21c8d1f5b846a7f" )All couriers had at least one order.assert set(couriers.reset_index()["id"]) == set( orders.loc[orders["courier_id"].notnull(), "courier_id"].unique() )Only keep restaurants that had at least one order.restaurants = restaurants.reset_index() msk = restaurants["id"].isin(orders["restaurant_id"].unique()) restaurants = restaurants[msk].set_index("id") assert (~msk).sum() == 6Only keep addresses with pickups or deliveries.addresses = addresses.reset_index() msk = addresses["id"].isin( set(restaurants["address_id"]) | set(orders["pickup_address_id"]) | set(orders["delivery_address_id"]) ) addresses = addresses[msk].set_index("id") assert (~msk).sum() == 100 discarded_addresses = set(addresses["primary_id"]) - set(addresses.reset_index()["id"]) for old_primary_id in set(addresses["primary_id"]) - set(addresses.reset_index()["id"]): msk = addresses["primary_id"] == old_primary_id new_primary_id = addresses[msk].index.min() addresses.loc[msk, "primary_id"] = new_primary_idStore the Resultsconfig.CLEAN_SCHEMA cities.to_sql( "cities", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, ) addresses.to_sql( "addresses", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, ) restaurants.to_sql( "restaurants", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, ) couriers.to_sql( "couriers", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, ) customers = pd.DataFrame({"id": orders["customer_id"].unique()}).set_index("id") customers.to_sql( "customers", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, ) orders.to_sql( "orders", con=connection, schema=config.CLEAN_SCHEMA, if_exists="append", index=True, )Exemplos práticos de otimização de portfólio com computação clássica e quântica Esse notebook é um suporte ao nosso artigo na RBEF. "Link do artigo aqui" Portfólio ou CarteiraA **carteira** ou **portfólio** de um investidor é basicamente o seu investimento em diferentes tipos de ativos.Por exemplo, se você tem investimentos (ações) em 5 empresas, digamos: Google, Amazon, Tesla, Petrobras e Vale. Essas 5 empresas compõem sua carteira de investimentos. Ativos, Retorno e Risco**Ativos** pode ser de vários tipos. São exemplos de ativos (lista não euxastiva):- Ações- Criptomoedas (e. g. Bitcoin - BTC)- Dinheiro- Cota de fundos de investimento- Títulos do TesouroNormalmente, quando você constrói um portfólio, é aconselhável diversificar seus ativos ou comprar diferentes tipos de ativos de diferentes empresas. Para todos os ativos, você obterá lucro após um período de tempo especificado. No entanto, o lucro pode não ser o mesmo para cada investimento feito.Esse lucro, ou prejuízo, é o que chamamos de **retorno**. Por exemplo, você obterá retornos de ações quando o valor de mercado subir e/ou com a distribuição de lucros "em dinheiro" na forma de dividendos.Há diversos aspectos de **risco**, como falência das empresas, recuperação judicial, risco regulatório etc. Para o contexto de gestão de portfólio interessa o risco associado às flutuações no valor do ativo, ou volatilidade do ativo. Para certos ativos, seu valor é altamente volátil, ou seja, o valor flutua bastante, e. g. Bitcoin. Enquanto alguns outros ativos, como títulos do governo e certas ações são mais estáveis, sendo relativamente mais resistentes às condições de mercado, mas podem dar retornos menores em comparação com aqueles de alto risco. Otimização de portfólioUm bom portfólio é aquele que nos dá o máximo retorno com o mínimo de risco. Porém, como decidimos, entre as infinitas combinações possíveis de carteiras, aquela que é ótima? Ou seja, como é feita a otimização de portfólio?A Teoria Moderna do Portfólio, formulada por , também conhecida como análise de variância média, é um processo matemático que permite ao usuário maximizar os retornos para um determinado nível de risco.A teoria assume que todos os investidores são avessos ao risco, ou seja, se houver uma escolha entre carteiras de baixo e alto riscos com os mesmos retornos, um investidor escolherá uma com baixo risco.De um modo geral, a teoria incentiva a diversificação de ativos. Ele diz que um ativo A de alta variação, se combinado com diversos ativos B e C, onde A, B e C têm pouca ou nenhuma correlação, pode nos dar uma carteira com baixa variação de retornos. Este é um ponto crucial da Teoria. Outro ponto crucial é mostrar que buscar maiores retornos significa se expor a mais risco (volatilidade). Fronteira EficienteSabemos que cada ativo em uma carteira tem sua própria taxa de retorno e riscos esperados. É possível criar várias combinações de ativos que podem fornecer retornos elevados para um nível de risco predefinido. Da mesma forma, pode haver várias carteiras que oferecem o menor risco para um retorno esperado predefinido.Fronteira eficiente é determinada através do gráfico com 'retornos' (Y) versus 'volatilidade' (X). Mostra o conjunto de carteiras ideais que oferecem o maior retorno esperado para um determinado nível de risco ou o menor risco para um determinado nível de retorno esperado.As carteiras que estão abaixo da fronteira eficiente são sub-ótimas porque não fornecem retorno suficiente para o nível de risco ou têm um risco maior para a taxa de retorno definida.Veja um gráfico ilustrativo que iremos recuperar na sequência.![fronteira.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHQAn0DASIAAhEBAxEB/8QAHAABAQEBAQEBAQEAAAAAAAAAAAIBAwQFBgcI/8QAQhAAAgECAwQGBQwCAgICAwEAAQIAAxEEEiETMUFRBSJSYXGRBhUjMkIUMzVVYnJzgZOhwtJDwbHRB+EkgkXw8bL/xAAZAQEBAQEBAQAAAAAAAAAAAAAAAQIDBAX/xAAoEQEBAAICAQQCAQUBAQAAAAAAAQIRAyEEEjFBURMi0QUjYXGhMlL/2gAMAwEAAhEDEQA/AP8AP8REBNiJqQInRaTujMqMVBsSBoL7v9zNjU7DeU1tERL2NTsN5RsanYbyjYiJ02NTsN5RsanYbygc5Si8oUahPuN5ToKTj4D5SbS1IFpsrZv2D5Rs37B8pGUxK2b9g+) Conceitos Fundamentais em Otimização de Portfólio- **Variância**A Variância nos preços das ações de uma empresa é um indicador importante de quão volátil esse investimento será, ou seja, como os retornos podem flutuar.$\sigma^2 = \frac{1}{N-1} \sum_{i = 1}^N (x_i - \bar{x})^2$- **Volatilidade**A volatilidade é medida como o desvio padrão das ações de uma empresa:$\sigma = \sqrt{ \frac{1}{N-1} \sum_{i = 1}^N (x_i - \bar{x})^2 }$- **Volatilidade Anual**O que obtemos da raiz quadrada da variância é o desvio padrão diário (os dados são diários). Para convertê-lo em desvio padrão anual, multiplicamos a variância por $250$ (dias de negociação em um ano).- **Covariância**A covariância mede a relação direcional entre os retornos de dois ativos.Uma covariância positiva significa que os **retornos** dos dois ativos se movem juntos (ambos caem ou sobem), enquanto uma covariância negativa significa que eles se movem inversamente (quando um sobe ou outro tende a cair e vice versa). O risco (assumido como a volatilidade) pode ser reduzido em uma carteira combinando ativos que têm uma covariância negativa.- **Correlação**A correlação, nos setores de finanças e investimento, é uma estatística que mede o grau em que dois títulos se movem em relação um ao outro. As correlações são usadas no gerenciamento avançado de portfólio, calculado como o coeficiente de correlação, que tem um valor entre $-1$ e $+1$. Você pode pensar na correlação como uma versão normalizada da covariância, em que os valores são restritos a estar entre $[-1,+1]$.Uma correlação de $-1$ significa relação negativa, ou seja, se a correlação entre o Ativo A e o Ativo B for $-1$, se o Ativo A aumentar, o Ativo B diminuirá.Uma correlação de $+1$ significa relação positiva, ou seja, se a correlação entre o Ativo A e o Ativo B for $+1$, se o Ativo A aumentar, o Ativo B aumenta.Uma correlação nula, $0$, significa nenhuma relação, ou seja, se a correlação entre o Ativo A e o Ativo B for $0$, eles não têm qualquer correlação um sobre o outro.- **Retornos esperados**Os retornos esperados de um ativo são simplesmente a média da variação percentual nos preços de suas ações. Portanto, o valor do retorno esperado que obtemos aqui são os retornos esperados diários.Para um valor de retorno anual esperado, você precisará reamostrar os dados a cada ano, como verá mais adiante.Para retornos esperados, você precisa definir pesos para os ativos escolhidos.Em termos mais simples, isso significa que você precisa decidir qual porcentagem de seu dinheiro total deseja manter nas ações de cada empresa.- **Pesos**Os pesos representamm a alocação percentual dos investimentos entre os ativos da carteia. Eles devem somar 1 (100% do montante investido). Adiante vamos definir uma matriz de pesos aleatórios para achar a carteira ótima. Portanto, o problema de otimização de portfólio nada mais é do que encontrar os valores ótimos dos pesos que maximizam os retornos esperados enquanto minimizam o risco (desvio padrão == volatilidade).- **Sharpe Ratio (SR)**Esse índice é o retorno médio obtido em excesso da taxa livre de risco por unidade de volatilidade ou risco total. Como já visto, a volatilidade é uma medida das flutuações de preço de um ativo ou portfólio.A taxa de retorno livre de risco é o retorno de um investimento com risco zero, o que significa que é o retorno que os investidores poderiam esperar por não correr risco.A carteira de risco ideal é aquela com o maior Sharpe Ratio. $ SR = \frac{R_p - R_f}{\sigma_o}, $onde:$R_p$ é o retorno do portfólio, $R_f$ é taxa de lucro livre de risco e $\sigma_p$ é o desvio padrão do portfólio (volatilidade).No código abaixo, para descobrir o portfólio com Sharpe Ratio máximo, usamos três taxas de retorno livre de risco (três cenários qualitativamente distintos): a taxa de poupança, a CDI e o IPCA acumulado dos últimos 12 meses. Otimização Clássica Irrestrita#Install some packges !pip install yfinance # Useful packages import numpy as np import pandas as pd from pandas_datareader import data as pdr import yfinance as yfin yfin.pdr_override() # import matplotlib.pyplot as plt # Read Data # Tesla (TSLA), Facebook (FB), Magazine Luiza e Vale do Rio Doce # Obs: o símbolo das ações brasileiras precisam de um ".SA" para a API funcionar listOfstocks = ['TSLA', 'FB', 'MGLU3.SA', 'VALE3.SA'] test = pdr.get_data_yahoo(listOfstocks, start="2020-01-01", end="2021-09-20") test.head() # Escolhendo apenas o preço de fechamento (Close price) test = test['Close'] test.head() # Plot # Note a diferença de escala dos valores das ações. Uma prática corriqueira é tomar o "log". # Note ainda que as ações brasileiras estão cotadas em Reais e as americanas em Dólar. test.plot(); # log testLog = np.log(test) testLog.plot(); # Vamos escolher apenas ações brasileiras por simplicidade e nos últimos 5 anos #ativos = ['BRKM5.SA', 'ITUB4.SA', 'PETR3.SA', 'VALE3.SA', 'MGLU3.SA', 'KLBN4.SA'] ativos = ['BRKM5.SA', 'ITUB4.SA', 'VALE3.SA', 'KLBN4.SA'] # Braskem, Itaú, Vale e Kablin -> cotação em Reais (R$) #ativos = ['BRKM5.SA', 'ITUB4.SA', 'VALE3.SA', 'CIEL3.SA'] # Braskem, Itaú, Vale e Kablin -> cotação em Reais (R$) #ativos = ['AAPL', 'NKE', 'GOOGL', 'AMZN'] # Apple, Nike, Google e Amazon -> cotação em dólar # df = pdr.get_data_yahoo(ativos, start="2016-01-01", end="2021-09-20") df = df['Close'] # só preço de fechamento # #df.head() #df.plot(title = 'Preço de Fechameto dos Ativos'); df.plot(figsize=[10,10]) plt.xlabel('Data', fontsize=25) plt.ylabel('Preço de Fechamento',fontsize=25) plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.legend(loc = 'best', fontsize=18); plt.show() np.log(df+1).plot(title = 'Log (Preço)'); # Matriz de covariança #cov_matrix = df.pct_change().apply(lambda x: np.log(1+x)).cov() # Log of percentage change cov_matrix = df.pct_change().cov() # Sem log of percentage change cov_matrix = cov_matrix.dropna() cov_matrix # plot da Matriz de covariança plt.imshow(cov_matrix, interpolation='nearest') plt.colorbar() plt.show() # Matriz de correlação #corr_matrix = df.pct_change().apply(lambda x: np.log(1+x)).corr() corr_matrix = df.pct_change().corr() corr_matrix = corr_matrix.dropna() corr_matrix # plot da Matriz de correlação plt.imshow(corr_matrix, interpolation='nearest') plt.colorbar() plt.show() # Retorno anual ind_er = df.resample('Y').last().pct_change().mean() ind_er # Volatilidade # A volatilidade é dada pelo desvio padrão anual. Multiplicamos a volatilidade diária por 250 porque há 250 dias de negociação / ano. #ann_sd = df.pct_change().apply(lambda x: np.log(1+x)).std().apply(lambda x: x*np.sqrt(250)) ann_sd = df.pct_change().std().apply(lambda x: x*np.sqrt(250)) ann_sd vol = df.pct_change().std().apply(lambda x: x*np.sqrt(250)) vol assets = pd.concat([ind_er, ann_sd], axis=1) # Creating a table for visualising returns and volatility of assets assets.columns = ['Returno', 'Risco'] assets p_ret = [] # Define uma "empty list" para os retornos do portfolio p_vol = [] # Define uma "empty list" para a volatilidade do portfolio p_weights = [] # Define uma "empty list" para os pesos dos ativos no portfolio num_assets = len(df.columns) num_portfolios = 20000 for portfolio in range(num_portfolios): weights = np.random.random(num_assets) #Escolhendo pesos aleatórios para os ativos weights = weights/np.sum(weights) p_weights.append(weights) returns = np.dot(weights, ind_er) #Retornos: produto entre os retornos esperados individuais dos ativos e seus pesos p_ret.append(returns) var = cov_matrix.mul(weights, axis=0).mul(weights, axis=1).sum().sum() #Variância do portfolio sd = np.sqrt(var) #Desvio padrão diário ann_sd = sd*np.sqrt(250) #Desvio padrão anual == volatilidade p_vol.append(ann_sd) data = {'Retorno':p_ret, 'Risco':p_vol} for counter, symbol in enumerate(df.columns.tolist()): #print(counter, symbol) data[symbol+' weight'] = [w[counter] for w in p_weights] portfolios = pd.DataFrame(data) portfolios.head(n = 10) # Visualizar os n primeiros portfólio dos 10000 portfolios criados com pesos aleatórios # Plot da Fronteira Eficiente portfolios.plot.scatter(x='Risco', y='Retorno', c='green', marker='o', s=10, alpha=0.15, grid=True, figsize=[10,10]); min_vol_port = portfolios.iloc[portfolios['Risco'].idxmin()] # idxmin() retorna o valor mínimo na coluna especificada min_vol_port max_ret_port = portfolios.iloc[portfolios['Retorno'].idxmax()] # idxmax() retorna o valor máximo na coluna especificada max_ret_port # Portfólio ótimo (melhor Sharpe Ratio) # lucro livre de risco (risk factor). Poupança últimos 12 meses ~ 1.56% rf0 = 0.0156 opt_ris_por0 = portfolios.iloc[((portfolios['Retorno']-rf0)/portfolios['Risco']).idxmax()] opt_ris_por0 # Portfólio ótimo (melhor Sharpe Ratio) # lucro livre de risco (risk factor). CDI/SELIC últimos 12 meses ~ 2.86% rf1 = 0.0286 opt_ris_por1 = portfolios.iloc[((portfolios['Retorno']-rf1)/portfolios['Risco']).idxmax()] opt_ris_por1 # Portfólio ótimo (melhor Sharpe Ratio) # lucro livre de risco (risk factor). IPCA últimos 12 meses ~ 10.06% # pode ser pensado como CBDs de 10% de retorno anual protegido pelo fundo garantidor de crédito (FGC) rf2 = 0.1006 opt_ris_por2 = portfolios.iloc[((portfolios['Retorno']-rf2)/portfolios['Risco']).idxmax()] opt_ris_por2 # Plotando o portfólio com volatilidade mínima e o com lucro máximo plt.subplots(figsize=[10,10]) xx = portfolios['Risco'] yy = portfolios['Retorno'] plt.scatter(xx, yy , c='g', marker='o', s=15, alpha=0.2, label = 'Portfólios'); plt.scatter(min_vol_port[1], min_vol_port[0], c='b', marker='v', s=150, alpha=0.75, label = 'Risco Mínimo'); plt.scatter(max_ret_port[1], max_ret_port[0], c='r', marker='^', s=150, alpha=0.75, label = 'Retorno Máximo'); plt.scatter(opt_ris_por0[1], opt_ris_por0[0], c='m', marker='o', s=150, alpha=0.75, label = 'Fator de Risco (Poupança)'); plt.scatter(opt_ris_por1[1], opt_ris_por1[0], c='0', marker='s', s=150, alpha=0.75, label = 'Fator de Risco (CDI)'); plt.scatter(opt_ris_por2[1], opt_ris_por2[0], c='y', marker='*', s=150, alpha=0.75, label = 'Fator de Risco (IPCA)'); plt.xlabel('Risco', fontsize=25) plt.ylabel('Retorno',fontsize=25) plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.legend(loc = 'best', fontsize=18); new_cov = cov_matrix.to_numpy() new_cov ind_er.to_numpy() mu = np.divide(ind_er.to_numpy() - rf0, vol.values) muMétodos de Computação Quântica - Qiskit%%capture !pip install qiskit from qiskit import Aer from qiskit.circuit.library import TwoLocal from qiskit.aqua import QuantumInstance from qiskit.finance.applications.ising import portfolio from qiskit.optimization.applications.ising.common import sample_most_likely from qiskit.finance.data_providers import RandomDataProvider from qiskit.aqua.algorithms import VQE, QAOA, NumPyMinimumEigensolver from qiskit.aqua.components.optimizers import COBYLA #import numpy as np #import matplotlib.pyplot as plt #import datetimeOtimização Revisitada - Abordagem restrita bináriaResolver o seguinte problema de otimização: \begin{split}\begin{aligned} \min_{x \in \{0, 1\}^n} q x^T \sigma x - \mu^T x\\ \text{com a restrição: } 1^T x = B \end{aligned}\end{split}onde:- $x \in \{0,1\}^n$ denota o vetor de variáveis ​​de decisão binárias, que indicam quais ativos escolher ($x = 1$) e quais não escolher ($x =0$),- $\mu \in \mathbf{R}^n$ define os retornos esperados para os ativos, - $\sigma \in \mathbf{R}^{n\times n}$ especifica a matriz de covariância entre os ativos,- $q>0$ controla o apetite de risco do gestor do portfólio,- e $B$ denota o orçamento, neste caso, o número de ativos a serem selecionados dentre $n$ disponíveis.Observações:1 - simplificações assumidas: i) todos os ativos tem o mesmo preço normalizado como $1$ (pensando no log dos preços, não parece uma simplificação tão absurda), ii) o orçamento total deve ser gasto, ou seja, é necessário selecionar exatamente $B$ ativos. 2- $q$ pode ser visto como uma medida do apetite ao risco pois, ao aumentar $q$, precisamos de maiores retornos para o alcançar o mínimo da função objetiva, o que, pela Fronteira Eficiente, exige maiores riscos!3- A restrição $1^T x = B$ pode ser imaginada como oriunda de um termo de penalidade do tipo $\alpha (1^T x - B)^2$ incluído explicitamente na função objetiva. Ou seja, o problema com a restrição acima é idêntico à otimização do seguinte:\begin{split}\begin{aligned} \min_{x \in \{0, 1\}^n} q x^T \sigma x - \mu^T x + \alpha (1^T x - B)^2 \end{aligned}\end{split} pois considerando $\alpha >0$, a menor contribuição do termo de regularização ocorre quando $1^T x = B$.4- O problema resultante pode ser mapeado para um hamiltoniano cujo estado fundamental corresponde à solução ótima.Reference: https://qiskit.org/documentation/finance/tutorials/01_portfolio_optimization.htmlEssa secção mostra como usar o Variational Quantum Eigensolver (VQE) e o Quantum Approximate Optimization Algorithm (QAOA) para encontrar a solução ideal para um determinado conjunto de parâmetros.mu = ind_er.to_numpy() # retorno sigma = cov_matrix.to_numpy() # covariança q = 0.75 # set risk factor budget = num_assets // 2 # set budget penalty = num_assets # set parameter to scale the budget penalty term # qubitOp, offset = portfolio.get_operator(mu, sigma, q, budget, penalty) # Imprimir resultado de forma elegante def index_to_selection(i, num_assets): s = "{0:b}".format(i).rjust(num_assets) x = np.array([1 if s[i]=='1' else 0 for i in reversed(range(num_assets))]) return x def print_result(result): selection = sample_most_likely(result.eigenstate) value = portfolio.portfolio_value(selection, mu, sigma, q, budget, penalty) print('Optimal: selection {}, value {:.4f}'.format(selection, value)) eigenvector = result.eigenstate if isinstance(result.eigenstate, np.ndarray) else result.eigenstate.to_matrix() probabilities = np.abs(eigenvector)**2 i_sorted = reversed(np.argsort(probabilities)) print('\n----------------- Full result ---------------------') print('selection\tvalue\t\tprobability') print('---------------------------------------------------') for i in i_sorted: x = index_to_selection(i, num_assets) value = portfolio.portfolio_value(x, mu, sigma, q, budget, penalty) probability = probabilities[i] print('%10s\t%.4f\t\t%.4f' %(x, value, probability)) # Solução "clássica" com Numpy exact_eigensolver = NumPyMinimumEigensolver(qubitOp) result = exact_eigensolver.run() print_result(result)Optimal: selection [1 0 0 1], value -0.6146 ----------------- Full result --------------------- selection value probability --------------------------------------------------- [1 0 0 1] -0.6146 1.0000 [1 1 1 1] 15.2160 0.0000 [0 1 1 1] 3.5294 0.0000 [1 0 1 1] 3.2719 0.0000 [0 0 1 1] -0.4142 0.0000 [1 1 0 1] 3.3294 0.0000 [0 1 0 1] -0.3569 0.0000 [0 0 0 1] 3.6996 0.0000 [1 1 1 0] 3.5154 0.0000 [0 1 1 0] -0.1707 0.0000 [1 0 1 0] -0.4283 0.0000 [0 0 1 0] 3.8860 0.0000 [1 1 0 0] -0.3710 0.0000 [0 1 0 0] 3.9432 0.0000 [1 0 0 0] 3.6854 0.0000 [0 0 0 0] 16.0000 0.0000QAOA - Qiskitbackend = Aer.get_backend('statevector_simulator') seed = 50 cobyla = COBYLA() cobyla.set_options(maxiter=250) qaoa = QAOA(qubitOp, cobyla, 3) qaoa.random_seed = seed quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed) result = qaoa.run(quantum_instance) print_result(result)/usr/local/lib/python3.7/dist-packages/qiskit/utils/deprecation.py:62: DeprecationWarning: Using a qobj for run() is deprecated as of qiskit-aer 0.9.0 and will be removed no sooner than 3 months from that release date. Transpiled circuits should now be passed directly using `backend.run(circuits, **run_options). return func(*args, **kwargs)Observe como o "solver" penaliza carteiras com número de ações diferentes do orçamento == 2. Por exemplo, veja o valor da função objetivo para [0 0 0 0] e [1 1 1 1]. VQE - Qiskitbackend = Aer.get_backend('statevector_simulator') seed = 50 cobyla = COBYLA() cobyla.set_options(maxiter=500) ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=3, entanglement='full') vqe = VQE(qubitOp, ry, cobyla) vqe.random_seed = seed quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed) result = vqe.run(quantum_instance) print_result(result)/usr/local/lib/python3.7/dist-packages/qiskit/utils/deprecation.py:62: DeprecationWarning: Using a qobj for run() is deprecated as of qiskit-aer 0.9.0 and will be removed no sooner than 3 months from that release date. Transpiled circuits should now be passed directly using `backend.run(circuits, **run_options). return func(*args, **kwargs)Observe como o "solver" também penaliza carteiras com número de ações diferentes do orçamento == 2. Por exemplo, verifique o valor da função objetivo para [0 0 0 0] e [1 1 1 1]. Métodos de Otimização Quânticos - Cimatec QAOAO QAOA é um algoritmo variacional da computação quântica, desenvolvido por \cite{faihi2014}, adequado para resolver problemas de otimização combinatória. Nesta seção, apresentaremos um problema de otimização combinatória que pode ser modelado como um modelo ferromagnético popular na mecânica estatística: o \textit{Modelo de Ising} (ou \textit{Hamiltoniano de Ising}). O algoritmo consiste em modelar a função objetivo (F.O.) como um \textit{Hamiltoniano de Ising} e transformá-la em operadores unitários, aplicar tais operadores em um estado de entrada, medir os valores esperados, atualizar os parâmetros e repetir o processo até que a função custo seja minimizada. Quando a energia da F.O. é minimizada, a distribuição de probabilidades (obtida após a medição da função de onda $|\psi\rangle$ com os parâmetros ótimos) apresenta uma ocorrência maior do autoestado de $|\psi\rangle$ que fornece a melhor configuração para o problema. Modelo de IsingO modelo do comportamento de materiais ferromagnéticos pode ser descrito pelo \textit{Hamiltoniano de Ising}. O modelo representa uma cadeia de partículas de spin 1/2, que interagem em pares, sendo que cada constituinte está sujeito à um campo magnético de magnitude $h_i$. Como partículas de spin 1/2 podem assumir apenas duas direções de momento de dipolo ("up" e "down") a interação entre cada par de partículas fornece uma energia (positiva ou negativa) de acordo com as orientações dos spins. Para encontrar a configuração dos momentos de dipolo que fornece o menor valor de energia possível do sistema, a seguinte função objetivo deve ser minimizada: \begin{equation} C(z) = - \sum_{}^{}z_iz_j - \sum_{i}^{}h_iz_i .\end{equation} As variáveis $z_i$ da função objetivo podem assumir apenas valores unitários, sejam eles positivos ou negativos, ou seja, $z_i \in \{-1,+1\}$. As variáveis podem ser escritas como operadores de Pauli-Z ($\sigma_z$), já que $\pm 1$ são autovalores de $\sigma_z$, associados às autofunções de spin das partículas. Operador de FasePara que seja possível mapear um problema de variáveis binárias $z_i$ para um problema de otimização de autovalores, uma estratégia é substituir as variáveis $z_i \in \{-1,+1\}$ por operadores unitários (que possuem autovalores $\lambda \in \{-1,+1\}$). Diante disso, é possível criar um Operador de Fase que representa as interações entre as partículas adjacentes, bem como a resposta das partículas ao campo magnético aplicado. Trata-se de escrever a função objetivo do problema como um operador unitário para que possa ser aplicada diretamente no circuito. Como visto, o operador descrito na Equação \ref{fo_ising} é não-unitário, visto que não satisfaz o critério da Equação 7. Contudo, cada termo da soma (negligenciando os coeficientes) é um operador unitário. Para descrever este Hamiltoniano define-se o Operador Gamma através da relação\begin{equation} U(C,\gamma)=e^{-i\gamma C(Z)}=e^{i\gamma\sum_{}^{}Z_iZ_j + i\gamma\sum_{i}^{}h_iZ_i} .\end{equation}Como $C(z)$ é expressa em termos dos operadores Pauli-Z e o produto tensorial entre eles geram matrizes diagonais, $U(C,\gamma)$ também é uma matriz diagonal (porém unitária), onde o parâmetro $\gamma$ é um dos parâmetros variacionais usados no QAOA. Desse modo, o operador $U(C,\gamma)$ pode ser escrito como a operação unitária\begin{equation} U(C,\gamma)=\prod_{}^{}e^{i\gamma Z_iZ_j}\prod_{i=0}^{N}e^{i\gamma h_iZ_i} .\end{equation}Esse operador (assim como produtos tensoriais entre matrizes de Pauli-Z) apresenta a forma de uma matriz diagonal, onde os elementos da diagonal são exponenciais complexas que adicionam fases no estado quântico no qual é aplicado. Operador de MisturaO operador $U(B,\beta)$ é aplicado com o objetivo de misturar as amplitudes (gerando interferências destrutivas e construtivas) possibilitando explorar melhor o espaço de busca e aumentando as chances de encontrar, com maior precisão, o ponto ótimo global da função objetivo. O Operador de Mistura pode ser descrito, matematicamente, como\begin{equation} U(B,\beta) = e^{-i\beta \sum_{j=0}^{N} X_j} \equiv \prod_{j=0}^{N}e^{-i\beta X_j}.\end{equation}Tal operador pode ser construído utilizando apenas operações de rotação em \textit{qubits} individuais do tipo RX. Contudo,a eficiência desse operador está diretamente ligada ao tempo de convergência e à acurácia do algoritmo. A Equação \ref{mix} descreve o operador em sua forma mais simples, representado apenas como rotações em torno do eixo $x$ da \textit{Esfera de Bloch}. Função objetivo do QAOA: Medindo Valores EsperadosAs operações unitárias apresentadas nas subseções anteriores podem ser representadas como diagramas de circuitos quânticos. No QAOA, cada conjunto das operações em questão são conhecidas como \textit{layers}. A $i$-ésima \textit{layer} do circuito pode ser escrita como\begin{equation} U(\gamma_i,\beta_i) = U(B,\beta_i)U(C,\gamma_i),\end{equation}e, portanto, a sequência desses operadores representa o operador geral do QAOA. Quanto maior o número de \textit{layers}, maior o número de parâmetros do circuito e, consequentemente, maior a acurácia do resultado. Assim como no VQE, a função objetivo do QAOA também é um valor esperado. A diferença entre eles é que, no VQE, utiliza-se um ansatz genérico e no QAOA, por sua vez, o ansatz é construído à partir da função objetivo. A atuação do operador geral do QAOA sobre o estado de entrada $|s\rangle$ pode ser considerada da seguinte maneira:\begin{equation} |\vec{\gamma},\vec{\beta} \rangle = (U(B,\beta_n)U(C,\gamma_n))...(U(B, \beta_1)U(C,\gamma_1))(U(B,\beta_0)U(C,\gamma_0))|s\rangle.\end{equation}Como é de interesse encontrar a configuração de menor energia para o hamiltoniano, a função objetivo do problema é, novamente, a função dos valores esperados de energia, que se minimizada, retorna o menor autovalor de energia do sistema. A função objetivo do problema pode ser expressa como\begin{equation} \langle C(Z) \rangle \equiv \langle\vec{\gamma},\vec{\beta}|C(Z)|\vec{\gamma},\vec{\beta} \rangle.\end{equation}Como visto nos capitulos anteriores, o valor esperado de energia também pode ser escrito como um valor médio. Quando o estado quântico é preparado e medido uma quantidade suficiente de vezes, a distribuição de probabilidades obtida nos possibilita calcular o valor esperado como a soma da quantidade de vezes que cada autoestado da função de onda foi medido, multiplicada por suas respectivas energias associadas. Portanto, a função objetivo também pode ser escrita como\begin{equation} \langle\vec{\gamma},\vec{\beta}|C(Z)|\vec{\gamma},\vec{\beta} \rangle = E_0\alpha_0\langle\vec{\gamma},\vec{\beta}|0\rangle + E_1\alpha_1\langle\vec{\gamma},\vec{\beta}|1\rangle + E_2\alpha_2\langle\vec{\gamma},\vec{\beta}|2\rangle + ... + E_n\alpha_n\langle\vec{\gamma},\vec{\beta}|n\rangle = \sum_{i=0}^{n}E_ip_i.\end{equation}Os parâmetros variacionais, $\gamma$ e $\beta$, são atualizados iterativamente através de métodos clássicos de otimização até que o menor autovalor de energia seja encontrado. Por se tratar de problemas de otimização combinatória, o interesse do problema é no autoestado da base computacional associado ao menor autovalor, que é composto pela \textit{bitstring} formada pela configuração ótima do problema. Como visto, o QAOA pode ser usado para encontrar a configuração que minimiza a energia do \textit{Hamiltoniano de Ising}. Por se tratar de um problema de otimização combinatória, percebe-se que o algoritmo é um bom candidato a resolver problemas dessa natureza. O Método Variacional da Mecânica QuânticaComo discutido no Capítulo 2, informações sobre um estado quântico podem ser obtidas por meio de um conjunto de medidas sobre o sistema. Embora nosso objetivo seja encontrar o menor autovalor de uma matriz, a função objetivo do problema é calculada através valor esperado da matriz em questão, visto que se uma função de onda parametrizada por um conjunto de parâmetros $\theta$ é criada, o princípio variacional nos garante que\begin{equation} \langle \mathcal{H}_\theta \rangle \equiv \langle \psi_\theta | \mathcal{H}|\psi_\theta \rangle \geq \lambda_{min}.\end{equation}Portanto, se o espaço de busca é suficientemente grande e o objetivo do problema é minimizar o valor esperado da matriz $ \mathcal{H}$, existe um conjunto de parâmetros $\theta'$ que minimiza a função objetivo. Diante disso, podemos afirmar que uma boa e confiável aproximação para o menor autovalor $\lambda{min}$ da matriz $\mathcal{H}$ pode ser dada por\begin{equation} \langle \mathcal{H}_{\theta'}\rangle_{min} \approx \lambda_{min}.\end{equation}Uma explicação intuitiva do método variacional pode ser apresentada da seguinte maneira: O valor esperado (ou valor médio) de uma grandeza, pode assumir infinitos valores entre os respectivos máximo e mínimo. Logo, o valor médio de um observável físico nunca será menor que seu autovalor mínimo e nunca será maior que seu autovalor máximo. Otimização de Portfólio usando o QAOAO problema da otimização de portfólio pode ser modelado por meio de uma \textit{Quadratic Unconstrained Binary Optimization} (QUBO) e transformada para um Modelo de Ising através da transformação de variáveis $\{0,+1\} \rightarrow \{-1,+1\}$. Explicar (Gleydson/Anton)- O que é - Tipos- Exemplos- QAOA- Aplicar à otimização de portfólio New Section**Ejercicio**![circuito_mallas.PNG](figuras/circuito_mallas.PNG)A partir de este circuito encuentre las expresiones matemáticas para el voltaje en cada resistencia en función de $Vi$.\begin{align}V1 &= expresión\\V2 &= expresión\\V3 &= expresión\\V4 &= expresión\\V5 &= expresión\\V6 &= expresión\\V7 &= expresión\\V8 &= expresión\\\end{align} Considerando que todas las resistencias son de $1 k\Omega$, encuentre los voltajes para cuando $Vi = 15V$# Celda para códigoConsiderando que todas las resistencias son de $5 k\Omega$, encuentre los voltajes para cuando $Vi = 10V$# Celda para códigoConsiderando que $ R1 = R2 = R4 = 5 k\Omega$, $ R3 = R5 = R8 = 2 k\Omega$ y $ R6 = R7 = 100 \Omega$, encuentre los voltajes para cuando $Vi = 12V$# Celda para códigoWorking with motifs and distributions in Python Counting stuff in PythonPython's [defaultdict](https://docs.python.org/3/library/collections.htmlcollections.defaultdict) variant of the standard dictionary is useful for counting stuff, e.g. occurrences of nucleotides in a sequence:import collections sequence = "CATGATCTCATCGTACGCAACG" counts = collections.defaultdict(int) for char in sequence : counts[char]+=1 countsWithout defaultdict we would have needed to first check if a key is in the dictionary:sequence = "CATGATCTCATCGTACGCAACG" counts = {} for char in sequence : if char not in counts : counts[char]=0 counts[char]+=1 counts sequence = "CATGATCTCATCGTACGCAACG" counts = {'A':0, 'C':0,'G':0,'T':0} for char in sequence : counts[char]+=1 countsThe final way we can program this uses Python's [Counter](https://docs.python.org/3/library/collections.htmlcollections.Counter) class:collections.Counter("CATGATCTCATCGTACGCAACG")Sampling from a probability distributionWhen implementing randomized algorithms we often need to make random choices according to a probability distribution, i.e. we need to *sample* from that distribution.The simplest distribution to sample from is the **multinomial distribution**, which is specified by a vector of parameters$p_1, \ldots, p_n$ where $\sum_{i=1}^n p_i = 1$.To sample from a multinomial distribution, bin the numbers between 0 and 1 into $n$ bins, each with a size $p_i$. Then pick a random number between 0 and 1 and decide the outcome according to which bin it falls into.To implement this, you can useimport random random.random()which generates a pseudo-random number between 0 and 1. ExerciseCreate a random DNA sequence that follows the probability distribution given by the numbers```[0.1, 0.2, 0.5,0.2]```To test your code, verify that it generates nucleotides with the desired probability distribution. As an alternative, we can use numpy's [choice](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html) method of a random number generator:import numpy rng = numpy.random.default_rng() distribution = [0.1, 0.2, 0.5,0.2] rng.choice(['A','C','G','T'], p=distribution)or use numpy's [multinomial](https://numpy.org/doc/stable/reference/random/generated/numpy.random.multinomial.html) method:distribution = [0.1, 0.2, 0.5,0.2] rng.multinomial(1, distribution)Working with motifsIn our last exercise, we will look at the sequences of introns in the model plant arabidopsis.The first few nucleotides and the last few nucleotides in each sequence have signals in them that help delineate the boundaries of the introns.First we will read the sequences of those introns, and then analyze the nucleotide composition at each end of the sequence.""" A parser for FASTA files. It can handle files that are local or on the web. Gzipped files do not need to be unzipped. """ import os from urllib.request import urlopen def myopen(fileName) : if not ( os.path.exists(fileName) and os.path.isfile(fileName) ): raise ValueError('file does not exist at %s' % fileName) import gzip fileHandle = gzip.GzipFile(fileName) gzippedFile = True try : line = fileHandle.readline() fileHandle.close() except : gzippedFile = False if gzippedFile : return gzip.GzipFile(fileName) else : return open(fileName) class MalformedInput : "Exception raised when the input file does not look like a fasta file." pass class FastaRecord : """Represents a record in a fasta file.""" def __init__(self, header, sequence): """Create a record with the given header and sequence.""" self.header = header self.sequence = sequence def __str__(self) : return '>' + self.header + '\n' + self.sequence + '\n' def _fasta_itr_from_file(file_handle) : "Provide an iteration through the fasta records in file." h = file_handle.readline()[:-1] if h[0] != '>': raise MalformedInput() h = h[1:] seq = [] for line in file_handle: line = line[:-1] # remove newline if line[0] == '>': yield FastaRecord(h,''.join(seq)) h = line[1:] seq = [] continue seq.append(line) yield FastaRecord(h,''.join(seq)) def _fasta_itr_from_web(file_handle) : "Iterate through a fasta file posted on the web." h = file_handle.readline().decode("utf-8")[:-1] if h[0] != '>': raise MalformedInput() h = h[1:] seq = [] for line in file_handle: line = line.decode("utf-8")[:-1] # remove newline if line[0] == '>': yield FastaRecord(h,''.join(seq)) h = line[1:] seq = [] continue seq.append(line) yield FastaRecord(h,''.join(seq)) def _fasta_itr_from_name(fname): "Iterate through a fasta file with the given name." f = myopen(fname) for rec in _fasta_itr_from_file(f) : yield rec def _fasta_itr(src): """Provide an iteration through the fasta records in file `src'. Here `src' can be either a file name or a url of a file. """ if type(src) == str : if src.find("http")>=0 : file_handle = urlopen(src) return _fasta_itr_from_web(file_handle) else : return _fasta_itr_from_name(src) else: raise TypeError class fasta_itr (object) : """An iterator through a Fasta file""" def __init__(self, src) : """Create an iterator through the records in src.""" self.__itr = _fasta_itr(src) def __iter__(self) : return self def __next__(self) : return self.__itr.__next__()Using this fasta parser, let's read the sequences, which are posted on github:fasta_iterator = fasta_itr("https://raw.githubusercontent.com/asabenhur/CS425/main/data/arabidopsis_introns.fasta") sequences = [fasta_record.sequence for fasta_record in fasta_iterator] len(sequences) sequence_length = 10 donor_sequences = [sequence[:sequence_length] for sequence in sequences] counts = [{'A':0, 'C':0,'G':0,'T':0} for i in range(sequence_length)] for sequence in donor_sequences : for i in range(len(sequence)): counts[i][sequence[i]] += 1 countsAn overview of rigid body dynamics $$% vector\newcommand{\v}[1]{\mathbf{\vec{1}}}% unit vector\newcommand{\u}[1]{\mathbf{\hat{1}}}% dot product\newcommand{\dp}[2]{1 \cdot 2}% cross product\newcommand{\cp}[2]{1 \times 2}% rotation matrix\newcommand{\R}[2]{{}^{1} R ^{2}}% vector derivative\newcommand{\d}[2]{\frac{{}^2d1}{dt}}% angular velocity\newcommand{\av}[2]{{}^{2} \v{\omega} {}^{1}}% angular acceleration\newcommand{\aa}[2]{{}^{2} \v{\alpha} {}^{1}}% position\newcommand{\pos}[2]{\v{r} {}^{2/1}}% velocity\newcommand{\vel}[2]{{}^2 \v{v} {}^{1}}% acceleration\newcommand{\acc}[2]{{}^2 \v{a} {}^{1}}$$ Rigid body dynamics is concerned with describing the motion of systems composed of solid bodies; such as vehicles, skeletons, robots [1-4]:![Examples of rigid body systems](files/figures/example_rigid_body_systems.svg) This document borrows heavily from [5, 6]. Newton's Second Law For all these systems, our goal is to determine how each body's position changes in time. Newton told us that the acceleration $\v{a}$ of a system is proportional to the force $\v{F}$ applied to it:$$\v{F} = m\v{a}$$Newton gave us the bodies' acceleration, but we would really prefer to obtain their position. Thus, Newton gave us a second order ordinary differential equation for the quantity we seek, i.e. $\v{a}=\frac{d^2}{dt^2}\v{x}$. This equation is actually far too simple for the systems we want to study, but it reveals that there are three topics to consider in describing a rigid body system: its kinematics ($\v{a}$), its mass properties ($m$), and the external forces applied to the system ($\v{F}$). In this notebook, we present the tools necessary to mathematically describe a rigid body system. Once equipped with a mathematical description of a system, we can generate equations that describe its motion. Regardless, we always end up with second-order ordinary differential equations in time. Vectors Newton's second law is a vector equation. A vector is a quantity that has a **magnitude** and a **direction**. For example, "5 miles East" is a vector quantity with magnitude 5 miles and direction East. We draw them as arrows:![What does a vector look like?](files/figures/vector_basics.svg)We represent the magnitude of a vector $\v{v}$ as $|\v{v}|$. We represent the direction of a vector $\v{v}$ using a unit vector $\u{u}$ (magnitude of 1) that has the same direction as $\v{v}$: $$\u{u} = \frac{\v{v}}{|\v{v}|}$$ We will work with the following vector quantities: positions, velocities, accelerations, forces, angular velocities, and torques/moments. Don't think about these vectors as linear algebra vectors. Our vectors always have a physical interpretation (and thus are always 2 or 3 dimensinonal), while linear algebra vectors are often more abstract. Addition When we add vector $\v{b}$ to vector $\v{a}$, the result is a vector that starts at the tail of $\v{a}$ and ends at the tip of $\v{b}$:![Vector addition](files/figures/vector_addition.svg)from __future__ import print_function, division from sympy import init_printing init_printing(use_latex='mathjax', pretty_print=False)Physics vectors in SymPy are created by first specifying a reference frame and using the associated unit vectors to construct vectors of arbritrary magnitude and direction. Reference frames will be discussed later on and for now it is only important that `N.x`, `N.y`, and `N.z` are three mutally orthoganl vectors of unit length.from sympy.physics.vector import ReferenceFrame N = ReferenceFrame('N')Simple scalar variables can be import from SymPy with:from sympy.abc import c, d, e, f, g, hFinally, the unit vectors and scalars can be combined to create vectors.a = c * N.x + d * N.y + e * N.z a a.to_matrix(N) b = f * N.x + g * N.y + h * N.z bThe magnitude of a vector can be found with::a.magnitude() a + bScaling Multiplying a vector by a scalar changes its magnitude, but not its direction. Scaling by a negative number changes a vector's magnitude and reverses its sense (rotates it by 180 degrees).![Vector scaling](files/figures/vector_scaling.svg)b = 2 * a b c = -a cExerciseCreate three vectors that lie in the $XY$ plane where each vector is:1. of length $l$ that is at an angle of $\frac{\pi}{4}$ degrees from the $X$ axis2. of length $10$ and is in the $-Y$ direction3. of length $l$ and is $\theta$ radians from the $Y$ axisFinally, add vectors 1 and 2 and substract $5$ times the third vector.*Hint: SymPy has variables and trigonometic functions, for example `from sympy import tan, pi`*%load exercise_solutions/n01_vector_addition_scaling.pyDot product (scalar product) The dot product, which yields a scalar quantity, is defined as:$$\v{a} \cdot \v{b} = |\v{a}||\v{b}| \cos{\theta}$$where $\theta$ is the angle between the two vectors. It is used to determine:* the angle between two vectors: $\theta = \cos^{-1}[\v{a} \cdot \v{b} / (|\v{a}||\v{b}|)]$* a vector's magnitude: $ |\v{v}| = \sqrt{\v{v} \cdot \v{v}} $* the length of a vector along a direction/unit vector $\u{u}$ (called the projection): $ \mbox{proj}_{\u{u}}\v{v} = \v{v} \cdot \u{u}$* if two vectors are perpendicular: $ \v{a} \cdot \v{b} = 0 \mbox{ if } \v{a} \perp \v{b} $* compute power: $ P = \dp{\v{F}}{\v{v}}$Also, dot products are used to convert a vector equation into a scalar equation (by "dotting" an entire equation with a vector).![Vector dot product](files/figures/vector_dot.svg)from sympy.abc import c, d, e, f, g, h from sympy.physics.vector import ReferenceFrame, dot N = ReferenceFrame('N') a = c * N.x + d * N.y + e * N.z b = f * N.x + g * N.y + h * N.z dot(a, b)ExerciseGiven the vectors $\v{v}_1 = a \hat{\mathbf{n}}_x + b\hat{\mathbf{n}}_y + a \hat{\mathbf{n}}_z$ and $\v{v}_2=b \hat{\mathbf{n}}_x + a\hat{\mathbf{n}}_y + b \hat{\mathbf{n}}_z$ find the angle between the two vectors using the dot product.%load exercise_solutions/n01_vector_dot_product.pyCross product (vector product) The cross product, which yields a vector quantity, is defined as:$$ \cp{\v{a}}{\v{b}} = |\v{a}||\v{b}| \sin\theta \u{u}$$where $\theta$ is the angle between the two vectors, and $\u{u}$ is the unit vector perpendicular to both $\v{a}$ and $\v{b}$ whose sense is given by the right-hand rule. It is used to:* obtain a vector/direction perpendicular to two other vectors* determine if two vectors are parallel: $\cp{\v{a}}{\v{b}} = \v{0} \mbox{ if } \v{a} \parallel \v{b}$* compute moments: $ \cp{\v{r}}{\v{F}}$* compute the area of a triangle![Vector cross product](files/figures/vector_cross.svg)from sympy.abc import c, d, e, f, g, h from sympy.physics.vector import ReferenceFrame, cross N = ReferenceFrame('N') a = c * N.x + d * N.y + e * N.z b = f * N.x + g * N.y + h * N.z cross(a, b)ExerciseGiven three points located in reference frame $N$ by:$$\v{p}_1 = 23 \u{n}_x - 12 \u{n}_y \\\v{p}_2 = 16 \u{n}_x + 2 \u{n}_y - 4 \u{n}_z \\\v{p}_3 = \u{n}_x + 14 \u{n}_z$$Find the area of the triangle bounded by these three points using the cross product.*Hint: Search online for the relationship of the cross product to triangle area.*%load exercise_solutions/n01_vector_cross_product.pySome vector properties * The order in which you add them does not matter: $\v{a} + \v{b} = \v{b} + \v{a}$* You can distrubute a scalar among vectors: $ s (\v{a} + \v{b}) = s\v{a} + s\v{b} $**Dot product*** You can pull out scalars: $ c \v{a} \cdot d \v{b} = cd (\v{a} \cdot \v{b})$* Order does not matter: $\dp{\v{a}}{\v{b}} = \dp{\v{b}}{\v{a}}$* You can distribute: $\dp{\v{a}}{(\v{b} + \v{c})} = \dp{\v{a}}{\v{b}} + \dp{\v{a}}{\v{c}}$**Cross product*** Crossing a vector with itself "cancels" it: $\cp{\v{a}}{\v{b}} = \vec{0}$* You can pull out scalars: $ c \v{a} \times d \v{b} = cd (\v{a} \times \v{b})$* Order DOES matter (because of the right-hand rule): $\cp{\v{a}}{\v{b}} = -\cp{\v{b}}{\v{a}}$* You can distribute: $\cp{\v{a}}{(\v{b} + \v{c})} = \cp{\v{a}}{\v{b}} + \cp{\v{a}}{\v{c}}$* They are NOT associative: $\cp{\v{a}}{(\cp{\v{b}}{\v{c}})} \neq \cp{(\cp{\v{a}}{\v{b}})}{\v{c}}$ Reference frames A reference frame (or simply, frame) is a rigid 3D object. We always attach a reference frame to rigid bodies in order to describe their motion. We may also use "empty" reference frames to make a system easier to model.A reference frame has some *location* in space, but it does *not* have a position. Reference frames contain points, and those *points* have positions.A reference frame also has an *orientation* in space. To specify its orientation, we choose a vector basis whose orientation is fixed with respect to the reference frame (but there are infinitely many vector bases we *could* label on the frame). In general, we are only interested in the vector bases we attach to reference frames; from here on, we will instead refer to reference frames in the places where we referred vector bases. That is, we express vectors in a reference frame instead of in a vector basis.A reference frame's location and orientation vary in time. Two important attributes of a reference frame are its **angular velocity** $\v{\omega}$ and its **angular acceleration** $\v{\alpha}$; we'll describe these shortly.A **Newtonian reference frame** is one in which Newton's second law holds.![Reference frames](files/figures/reference_frame.svg) Expressing vectors with a vector basis We have shown you what a vector $\v{v}$ looks like, but have yet to express an actual vector mathematically. To do so, we first choose three unit vectors $\u{a}_x$, $\u{a}_y$, and $\u{a}_z$ whose directions we accept as given. Consider the human jumper from above; we choose:* $\u{a}_x$ to point forward,* $\u{a}_y$ to point upwards,* $\u{a}_z$ to point out of the plane (to the subject's right).![Express a vector in different bases](files/figures/vector_express.svg)These three unit vectors are mutually perpendicular. For pratical reasons, we will always make sure that's the case. If so, the three vectors define a vector basis. We can express the position of the subject's hand from its toes in terms of these three vectors:$$ \v{r} = d_x \u{a}_x + d_y \u{a}_y + 0 \u{a}_z$$We call $d_x$ the **measure** of $\v{r}$ along $\u{a}_x$, and it is equal to $\v{r} \cdot \u{a}_x$. Note that a vector basis does not have an origin.We could have chosen a different vector basis, such as $\u{b}_x$, $\u{b}_y$, $\u{b}_z$. Then, we would express $\v{r}$ as:$$ \v{r} = f_x \u{b}_x + f_y \u{b}_y + 0 \u{b}_z$$Using this alternative vector basis does not change the fact that $\v{r}$ is the position of the hand from the toes; it simply changes how we *express* this quantity. It is possible to express a single vector in infinitely many ways, since we can choose to use any valid vector basis. In the next section, we will learn how to relate different vector bases to each other. Operating on vectors expressed in a basisOnce we express a vector in a vector basis, it is easy to perform operations on it with vectors expressed in the same basis. Consider the two vectors:* $\v{a} = a_x \u{n}_x + a_y \u{n}_y + a_z \u{n}_z$* $\v{b} = b_x \u{n}_x + b_y \u{n}_y + b_z \u{n}_z$Here are the addition, dot, and cross operations between these two vectors:$$\v{a} + \v{b} = (a_x + b_x) \u{n}_x + (a_y + b_y) \u{n}_y + (a_z + b_z) \u{n}_z \\\dp{\v{a}}{\v{b}} = a_x b_x + a_y b_y + a_z b_z\\\cp{\v{a}}{\v{b}} = \det{ \begin{bmatrix} \u{n}_x & \u{n}_y & \u{n}_z \\ a_x & a_y & a_z \\ b_x & b_y & b_z \end{bmatrix}}$$ We must specify a vector basisWhen a vector is expressed in typical linear algebra notation, information is lost. For example, we don't know the basis in which the following vector is expressed:$$\v{v} = \begin{bmatrix} v_x \\ v_y \\ v_z \end{bmatrix}$$If we don't know the basis in which $v_x$, $v_y$, and $v_z$ are its measures, we cannot add $\v{v}$ to another vector, etc. To express a vector in matrix form, we must carry along the basis in which it is expressed. One option for doing so is the following:$$[\v{v}]_{n} = \begin{bmatrix} v_x \\ v_y \\ v_z \end{bmatrix}_{n} $$The notation $[\v{v}]_{n}$ specifies that $\v{v}$ is expressed in the vector basis $\u{n}_x$, $\u{n}_y$, $\u{n}_z$.from sympy.abc import c, d, e, f, g, h, theta from sympy.physics.vector import ReferenceFrame, dot, cross A = ReferenceFrame('A') B = A.orientnew('B', 'Axis', (theta, A.z)) a = c * A.x + d * A.y + e * A.z b = f * B.x + g * B.y + h * B.z a + b dot(a, b) cross(a, b) (a+b).express(A)Rotation matrices (direction cosine matrices) In almost every problem, we make use of multiple vector bases. The reason is that there is usually a particular basis in which a vector is most conveniently expressed. And, that convenient basis is usually not the same for all vectors we'll deal with. A side effect is that we will often want to change the basis in which a vector is expressed. To do so, we use a rotation matrix (also called a direction cosine matrix). The rotation matrix ${}^a R^b$ allows us to take a vector $\v{v}$ expressed in $\u{b}_x$, $\u{b}_y$, $\u{b}_z$ and re-express it in $\u{a}_x$, $\u{a}_y$, $\u{a}_z$:$$[\v{v}]_{a} = {}^a R^b ~ [\v{v}]_{b}$$The rotation matrix is given by dot products across the two the vector bases:$$\R{a}{b} = \begin{bmatrix} \dp{\u{a}_x}{\u{b}_x} & \dp{\u{a}_x}{\u{b}_y} & \dp{\u{a}_x}{\u{b}_z} \\ \dp{\u{a}_y}{\u{b}_x} & \dp{\u{a}_y}{\u{b}_y} & \dp{\u{a}_y}{\u{b}_z} \\ \dp{\u{a}_z}{\u{b}_x} & \dp{\u{a}_z}{\u{b}_y} & \dp{\u{a}_z}{\u{b}_z} \\ \end{bmatrix}$$Because of the nature of vector bases, this matrix is symmetric and orthogonal. If we instead have a vector in basis $a$ and want to express it in $b$, we can simply use the inverse of $\R{a}{b}$. Since the matrix is orthogonal, its inverse is the same as its transpose.$$\R{b}{a} = (\R{a}{b})^{-1} = (\R{a}{b})^T \\[\v{v}]_{b} = {}^b R^a ~ [\v{v}]_{a} \\[\v{v}]_{b} = ({}^a R^b)^T ~ [\v{v}]_{a}$$The columns of $\R{a}{b}$ are the unit vectors $\u{b}_x$, $\u{b}_y$, $\u{b}_z$ expressed in $a$:$$\R{a}{b} = \begin{bmatrix} [\u{b}_x]_a & [\u{b}_y]_a & [\u{b}_z]_a \end{bmatrix}$$ Successive rotationsWe'll usually need to re-express a vector multiple times. Luckily, we can do so by multiplying rotation matrices together:$$\R{d}{a} = (\R{d}{c} )(\R{c}{b}) (\R{b}{a}) \\[\v{v}]_{d} = \R{d}{a} [\v{v}]_{a} \\[\v{v}]_{d} = (\R{d}{c} )(\R{c}{b}) (\R{b}{a})[\v{v}]_{a} $$ A point of confusion: rotating vs. re-expressingSometimes, rotation matrices are used to rotate vectors; that is, cause the vector to point somewhere different. That is NOT how we are using rotation matrices here. Rotating a vector changes the vector itself, while we are only changing how the *same* vector is expressed.B.dcm(A)ExerciseCreate two reference frames, the first should be attached to your laptop keyboard surface. For the first frame, the $Z$ axis should be directed from the Q key to the P key. The $Y$ unit vector should be directed from the shift key to the tab key. Now on the screen, attach a reference frame where the $Z$ axis is directed from the right side of the screen to the left and lies in the plane of the screen. The $Y$ axis should be directed from the top of the screen to the hinge.The angle between the laptop and screen is $\theta$ such that $\theta=0$ corresponds to the laptop being closed and $0 < \theta < \pi$ is the laptop being open. With this create a vector that starts at the bottom left hand corner of the wrist rests and ends at the top right corner of the screen. Use $w$ for the width and $l$ for the length of the laptop.Print the vector expressed in the keyboard frame.*Hint: You may need to create more than two frames and a simple sketch will help.*%load exercise_solutions/n01_vector_rotation.pyDerivatives of vectors Consider the vector $\u{a}_x$ in the figure above. To an observer sitting on $A$, $\u{a}_x$ never changes; it is fixed rigidly to $A$. Therefore, the observer would say the time derivative of $\u{a}_x$ is $\v{0}$. However, an observer on $N$ would indeed observe that $\u{a}_x$ changes in time. For this reason, when we take the time derivative of a vector, we must specify the frame in which we take the derivative. The derivative of a generic vector $\v{p}$ in frame $N$ is denoted as:$$\d{\v{p}}{N}$$Consider a vector $\v{p}$ expressed in $A$:$$\v{p} = p_x \u{a}_x + p_y \u{a}_y + p_z \u{a}_z$$Its time derivative in frame $A$ is:$$\d{\v{p}}{A} = \dot{p}_x \u{a}_x + \dot{p}_y \u{a}_y + \dot{p}_z \u{a}_z$$Here, we have benefitted from the fact that $\u{a}_x$, $\u{a}_y$, and $\u{a}_z$ are constant in $A$. We are not so fortunate when taking the derivative in $N$, since these basis vectors are not constant in $N$:$$\d{\v{p}}{N} = \dot{p}_x \u{a}_x + p_x \d{\u{a}_x}{N} + \dot{p}_y \u{a}_y + p_y \d{\u{a}_y}{A} + \dot{p}_z \u{a}_z + p_z \d{\u{a}_z}{N}$$This formula for the derivative in $N$ of a vector expressed in $A$ is not so great to use. Once we introduce angular velocity, we will have a much better way to compute such quantities.a a.diff(c, A)Angular velocity and angular acceleration A reference frame's angular velocity describes the rate of change of the frame's orientation. Consider frame $A$. Since angular velocity is a vector quantity, we must specify the frame from which we observe the change in $A$'s orientation. $\av{A}{N}$: the angular velocity of frame $A$ as observed from frame $N$There are some complicated formulas for $\av{A}{N}$, but you usually don't need them. Typically, you know $\av{A}{N}$ by inspection. Take the linkage below:![Angular velocity](files/figures/angular_velocity.svg)In this linkage, the only way that frame/body $B$ can move with respect to $A$ is by rotating about $B_o$ by the angle $q_1$. Thus, by inspection:$$\av{B}{A} = \dot{q}_1 \u{b}_z$$$C$ is attached to $B$ similarly:$$\av{C}{B} = \dot{q}_2 \u{c}_z$$ Angular velocity addition theoremWe can add angular velocities together, similar to how we multiplied reference frames:$$\av{C}{A} = \av{B}{A} + \av{C}{B}$$ Derivative theoremFor any vector $\v{p}$, the following equation relates the derivative of $\v{p}$ in two different reference frames via the angular velocity between these two frames:$$\d{\v{p}}{A} = \d{\v{p}}{B} + \av{B}{A} \times \v{p}$$Again, this works for *any* vector, not just position vectors.This theorem is really important, and is the primary way that we compute derivatives of vectors in other frames. Angular accelerationThe equations of rigid body dynamics will also require angular accelerations $\aa{B}{A}$ of the rigid bodies in the system, but this can usually be computed automatically from $\av{B}{A}$. $\aa{A}{N}$: the angular acceleration of frame $A$ as observed from frame $N$B.ang_vel_in(A) from sympy import Function from sympy.abc import t theta = Function('theta')(t) theta theta.diff(t) B = A.orientnew('B', 'Axis', (theta, A.z)) B.ang_vel_in(A)Position, velocity, and acceleration PositionPosition vectors have the special property that two points must be specified. For example, if I want to obtain the position of point $P$ in the figure above, I must specify the point from which I want that position. $\pos{Q}{P}$: the position of point $Q$ with respect to point $P$.In modeling, we often must write down various position vectors via inspection. VelocityThe velocity of a point is the derivative of its position, and must have associated with it the frame in which the derivative is taken.$\vel{Q}{N}$: the velocity of point $Q$ in frame $N$Previously, we used the symbol $\v{v}$ to denote a generic vector. Henceforth, $\v{v}$ refers to a velocity. If $N_o$ is a point fixed in $N$, then:$$\vel{Q}{N}=\d{\pos{Q}{N_o}}{N}$$When using PyDy, we rarely need to use inspection to determine the velocity of points of interest. Instead, we are usually in the situation that we want the velocity (in $N$) of point $Q$ fixed on body $B$, and we already know the velocity of another point $P$ fixed on $B$. In this case, we use the following formula to obtain $\vel{Q}{N}$ (`v2pt_theory` in PyDy):$$\vel{Q}{N} = \vel{P}{N} + \av{B}{N} \times \pos{Q}{P}$$ AccelerationAn acceleration of a point is the derivative of its velocity, and must have associated with it the frame in which the derivative is taken.$\acc{Q}{N}$: the acceleration of point $Q$ in frame $N$Henceforth, $\v{a}$ refers to an acceleration. If the velocity of $Q$ is given, then the acceleration is:$$\acc{Q}{N}=\d{\vel{Q}{N}}{N}$$Similarly to velocity, we rarely need to use inspection to determine the acceleration of points of interest. Instead, we are usually in the situation that we want the acceleration (in $N$) of point $Q$ fixed on body $B$, and we already know the velocity of another point $P$ fixed on $B$. In this case, we use the following formula to obtain $\acc{Q}{N}$ (`a2pt_theory` in PyDy):$$\acc{Q}{N} = \acc{P}{N} + \aa{B}{N} \times \pos{Q}{P} + \av{B}{N} \times (\av{B}{N} \times \pos{Q}{P})$$ Inertial properties Each particle or rigid body has interial properties. We will assume that these properties are constant with respect to time. Each particle in a system has a scalar mass and each rigid body has a scalar mass located at it's center of mass and an inertia dyadic (or tensor) that represents how that mass is distributed in space, which is typically defined with respect to the center of mass.Just as we do with vectors above, we will use a basis dependent expression of tensors. The inertia of a 3D rigid body is typically expressed as a tensor (symmetric 3 x 3 matrix).$$I = \begin{bmatrix} I_{xx} & I_{xy} & I_{xz} \\ I_{xy} & I_{yy} & I_{yz} \\ I_{xz} & I_{yz} & I_{zz} \end{bmatrix}_N$$The three terms on the diagnol are the moments of inertia and represent the resistance to angular acceleration about the respective axis in the subscript. The off diagonal terms are the products of inertia and represent the coupled resistance to angular acceleration from one axis to another. The $N$ subscript denotes that this tensor is expressed in the $N$ reference frame.We can write this tensor as a dyadic to allow for easy combinations of inertia tensors expressed in different frames, just like we combine vectors expressed in different frames above. This basis dependent tensor takes the form:$$I = I_{xx} \begin{bmatrix} 1 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{bmatrix}_N + I_{xy} \begin{bmatrix} 0 & 1 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{bmatrix}_N + I_{xz} \begin{bmatrix} 0 & 0 & 1 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{bmatrix}_N + I_{yx} \begin{bmatrix} 0 & 0 & 0 \\ 1 & 0 & 0 \\ 0 & 0 & 0 \end{bmatrix}_N + I_{yy} \begin{bmatrix} 0 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{bmatrix}_N + I_{yz} \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 1 \\ 0 & 0 & 0 \end{bmatrix}_N + \\ I_{zx} \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 1 & 0 & 0 \end{bmatrix}_N + I_{zy} \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 1 & 0 \end{bmatrix}_N + I_{zz} \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 1 \end{bmatrix}_N$$These "unit" tensors are simply the outer product of the associated unit vectors and can be written as such:$$ I = I_{xx} \u{n}_x \otimes \u{n}_x +I_{xy} \u{n}_x \otimes \u{n}_y +I_{xz} \u{n}_x \otimes \u{n}_z +I_{yx} \u{n}_y \otimes \u{n}_x +I_{yy} \u{n}_y \otimes \u{n}_y +I_{yz} \u{n}_y \otimes \u{n}_z +I_{zx} \u{n}_z \otimes \u{n}_x +I_{zy} \u{n}_z \otimes \u{n}_y +I_{zz} \u{n}_z \otimes \u{n}_z $$ Inertia dyadics and tensors can be created in the following way:from sympy import symbols from sympy.physics.mechanics import ReferenceFrame, inertia ixx, iyy, izz, ixy, iyz, ixz = symbols('I_xx I_yy I_zz I_xy I_yz I_xz') N = ReferenceFrame('N') I = inertia(N, ixx, iyy, izz, ixy, iyz, ixz) I I.to_matrix(N)Forces and moments/torques Forces are vectors which are applied to specific points (bound vectors) and moments/torques are vectors than describe rotational load applied to a body. Both can simply be described as vectors but either a point or reference frame must be associated with each, respectively.**Equal and Opposite**Don't forget Newton's third law of motion. If there is a force or torque, there is always an equal and opposit force or torque acting on the opposing point or reference frame.from sympy.abc import a, b, c from sympy.physics.vector import ReferenceFrame, Point A = ReferenceFrame('A') P = Point('P') f = a * A.x + b * A.y + b * A.z # We will typically denote a force as tuple of a vector and a point. force = (f, P) fAssignment 2**Name:** **Date:** 2/10/2021 Step 1: Asking the user for 10 integersuser_inputs=[] for i in range (1,11): # making the range that so that 10 integer are added user_input = input("Please enter 10 integer:") user_inputs.append(user_input) # add the integers to the list print("The 10 integers have been entered",user_inputs)Please enter 10 integer:1 Please enter 10 integer:2 Please enter 10 integer:3 Please enter 10 integer:4 Please enter 10 integer:5 Please enter 10 integer:6 Please enter 10 integer:7 Please enter 10 integer:8 Please enter 10 integer:9 Please enter 10 integer:10 The 10 integers have been entered ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']Step 2: Reminding them how many integers they have putuser_inputs =[] for i in range(1, 11): user_input = input("Please enter 10 integer:") user_inputs.append(user_input) # add the integers to the list print(f"Integer #{i}:", user_input) #remind them the # of integer print("The 10 integers, you entered are:", user_inputs) # show the list of integersPlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:2 Integer #2: 2 Please enter 10 integer:3 Integer #3: 3 Please enter 10 integer:4 Integer #4: 4 Please enter 10 integer:5 Integer #5: 5 Please enter 10 integer:6 Integer #6: 6 Please enter 10 integer:7 Integer #7: 7 Please enter 10 integer:8 Integer #8: 8 Please enter 10 integer:9 Integer #9: 9 Please enter 10 integer:10 Integer #10: 10 The 10 integers, you entered are: ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']Step 3: Accepting only integers and stop when reached 10 triesuser_inputs=[] b=[1,2,3,4,5,6,7,8,9,10] while True: for i in range (1,11): try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) print(f"Integer #{i}:", user_input) break except: pass print("Only integer are allowed. Try again") if len(user_inputs) == len(b): #making sure 10 integers are added break print("You have entered:", user_inputs) print(user_inputs)Please enter 10 integer:1 Integer #1: 1 You have entered: [1] Please enter 10 integer:2 Integer #1: 2 You have entered: [1, 2] Please enter 10 integer:3 Integer #1: 3 You have entered: [1, 2, 3] Please enter 10 integer:4 Integer #1: 4 You have entered: [1, 2, 3, 4] Please enter 10 integer:5 Integer #1: 5 You have entered: [1, 2, 3, 4, 5] Please enter 10 integer:6 Integer #1: 6 You have entered: [1, 2, 3, 4, 5, 6] Please enter 10 integer:7 Integer #1: 7 You have entered: [1, 2, 3, 4, 5, 6, 7] Please enter 10 integer:8 Integer #1: 8 You have entered: [1, 2, 3, 4, 5, 6, 7, 8] Please enter 10 integer:9 Integer #1: 9 You have entered: [1, 2, 3, 4, 5, 6, 7, 8, 9] Please enter 10 integer:10 Integer #1: 10 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]Step 4: Pausing when entered incorrect value and resumeuser_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs)Please enter 10 integer:1 Integer #1: 1 Please enter 10 integer:bob Only integer are allowed. Try again Please enter 10 integer:3 Integer #2: 3 Please enter 10 integer:5 Integer #3: 5 Please enter 10 integer:4.5 Only integer are allowed. Try again Please enter 10 integer:7 Integer #4: 7 Please enter 10 integer:9 Integer #5: 9 Please enter 10 integer:10 Integer #6: 10 Please enter 10 integer:11 Integer #7: 11 Please enter 10 integer:12 Integer #8: 12 Please enter 10 integer:12 Integer #9: 12 Please enter 10 integer:13 Integer #10: 13 The 10 integers, you have entered: [1, 3, 5, 7, 9, 10, 11, 12, 12, 13]Step 5: Calculating minimumdef minimum (a): #defining a minimum function minimum_num = a[0] for i in a[1:]: if i < minimum_num: minimum_num = i return (minimum_num) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Minimum:",minimum(user_inputs)) #asking for the minPlease enter 10 integer:4 Integer #1: 4 Please enter 10 integer:5 Integer #2: 5 Please enter 10 integer:6 Integer #3: 6 Please enter 10 integer:2 Integer #4: 2 Please enter 10 integer:7 Integer #5: 7 Please enter 10 integer:8 Integer #6: 8 Please enter 10 integer:9 Integer #7: 9 Please enter 10 integer:10 Integer #8: 10 Please enter 10 integer:33 Integer #9: 33 Please enter 10 integer:45 Integer #10: 45 The 10 integers, you have entered: [4, 5, 6, 2, 7, 8, 9, 10, 33, 45] Minimum: 2Step 6: Calculating maximumdef maximum (c):#defining a maximum function maximum_num = c[0] for x in c: if x > maximum_num: maximum_num=x return (maximum_num) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Maximum:",maximum(user_inputs))#asking for maxPlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:3 Integer #2: 3 Please enter 10 integer:55 Integer #3: 55 Please enter 10 integer:6 Integer #4: 6 Please enter 10 integer:7 Integer #5: 7 Please enter 10 integer:43 Integer #6: 43 Please enter 10 integer:64 Integer #7: 64 Please enter 10 integer:7 Integer #8: 7 Please enter 10 integer:3 Integer #9: 3 Please enter 10 integer:2 Integer #10: 2 The 10 integers, you have entered: [1, 3, 55, 6, 7, 43, 64, 7, 3, 2] Maximum: 64Step 7: Rangedef Range (a):#defining a minimum function '''Function return largest number of a list -the lower number of the list''' return (maximum(a)-minimum(a)) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Range:",Range(user_inputs))#asking for rangePlease enter 10 integer:4 Integer #1: 4 Please enter 10 integer:6 Integer #2: 6 Please enter 10 integer:66 Integer #3: 66 Please enter 10 integer:2 Integer #4: 2 Please enter 10 integer:1 Integer #5: 1 Please enter 10 integer:7 Integer #6: 7 Please enter 10 integer:9 Integer #7: 9 Please enter 10 integer:10 Integer #8: 10 Please enter 10 integer:33 Integer #9: 33 Please enter 10 integer:bob Only integer are allowed. Try again Please enter 10 integer:5 Integer #10: 5 The 10 integers, you have entered: [4, 6, 66, 2, 1, 7, 9, 10, 33, 5] Range: 65Step 8: Meandef Mean(a): '''Function calculate the the sum of values in the list and the total numbers of the list return the mean''' sum_list=sum(a) len_list=len(a) mean_num=c=sum_list/len_list return(mean_num) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Mean:",Mean(user_inputs))#asking for meanPlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:2 Integer #2: 2 Please enter 10 integer:3 Integer #3: 3 Please enter 10 integer:4 Integer #4: 4 Please enter 10 integer:5 Integer #5: 5 Please enter 10 integer:6 Integer #6: 6 Please enter 10 integer:7 Integer #7: 7 Please enter 10 integer:8 Integer #8: 8 Please enter 10 integer:9 Integer #9: 9 Please enter 10 integer:10 Integer #10: 10 The 10 integers, you have entered: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] Mean: 5.5Step 9: Variancedef Variance(a): m=Mean(a) b=sum((x-m)**2 for x in a) len_list=len(a) variance_num=b/len_list return(variance_num) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Variance:",Variance(user_inputs))#calculate the variancePlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:4 Integer #2: 4 Please enter 10 integer:4 Integer #3: 4 Please enter 10 integer:5 Integer #4: 5 Please enter 10 integer:6 Integer #5: 6 Please enter 10 integer:7 Integer #6: 7 Please enter 10 integer:8 Integer #7: 8 Please enter 10 integer:9 Integer #8: 9 Please enter 10 integer:10 Integer #9: 10 Please enter 10 integer:11 Integer #10: 11 The 10 integers, you have entered: [1, 4, 4, 5, 6, 7, 8, 9, 10, 11] Variance: 8.65Confirming with librariesimport statistics test_list=[1, 4, 4, 5, 6, 7, 8, 9, 10, 11] a=statistics.pvariance(test_list) print(a)8.65Step 10: Standard deviationimport math def Standard(a): variance_list=Variance(a) standard_num=math.sqrt(variance_list) return(standard_num) user_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 #continue the number if is correct input print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: #break when reach 10 break print("The 10 integers, you have entered:", user_inputs) print("Standard deviation:",Standard(user_inputs))# calculate standard deviationPlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:4 Integer #2: 4 Please enter 10 integer:4 Integer #3: 4 Please enter 10 integer:5 Integer #4: 5 Please enter 10 integer:6 Integer #5: 6 Please enter 10 integer:7 Integer #6: 7 Please enter 10 integer:8 Integer #7: 8 Please enter 10 integer:9 Integer #8: 9 Please enter 10 integer:10 Integer #9: 10 Please enter 10 integer:11 Integer #10: 11 The 10 integers, you have entered: [1, 4, 4, 5, 6, 7, 8, 9, 10, 11] Standard deviation: 2.9410882339705484Step 11: Running all the code togetheruser_inputs =[] n=0 while True: try: user_input = int(input("Please enter 10 integer:")) user_inputs.append(user_input) n += 1 print(f"Integer #{n}:", user_input) except: #making an excepion print("Only integer are allowed. Try again") if n==10: break print("The 10 integers, you have entered:", user_inputs) print("Minimum:",minimum(user_inputs)) #asking for the min print("Maximum:",maximum(user_inputs))#asking for max print("Range:",Range(user_inputs))#asking for range print("Mean:",Mean(user_inputs))#asking for mean print("Variance:",Variance(user_inputs))#calculate the variance print("Standard deviation:",Standard(user_inputs))# calculate standard deviationPlease enter 10 integer:1 Integer #1: 1 Please enter 10 integer:4 Integer #2: 4 Please enter 10 integer:4 Integer #3: 4 Please enter 10 integer:5 Integer #4: 5 Please enter 10 integer:6 Integer #5: 6 Please enter 10 integer:7 Integer #6: 7 Please enter 10 integer:8 Integer #7: 8 Please enter 10 integer:9 Integer #8: 9 Please enter 10 integer:10 Integer #9: 10 Please enter 10 integer:11 Integer #10: 11 The 10 integers, you have entered: [1, 4, 4, 5, 6, 7, 8, 9, 10, 11] Minimum: 1 Maximum: 11 Range: 10 Mean: 6.5 Variance: 8.65 Standard deviation: 2.9410882339705484Imports and model specifications# Import dependencies import torch as th import syft as sy import torch.nn as nn import torch.nn.functional as F import grid as gr # Hook hook = sy.TorchHook(th) me = hook.local_worker me.is_client_worker = False # Connect to nodes alice = gr.WebsocketGridClient(hook, "http://localhost:3001", id="Alice") alice.connect() bob = gr.WebsocketGridClient(hook, "http://localhost:3000", id="Bob") charlie = gr.WebsocketGridClient(hook, "http://localhost:3002", id="James") dan = gr.WebsocketGridClient(hook, "http://localhost:3003", id="Dan") bob.connect() charlie.connect() dan.connect() gr.connect_all_nodes([bob, alice, charlie, dan]) data_shape = (1, 1) data = th.zeros(data_shape) # Model Owner # Support fetch plan + AST tensor class Net(sy.Plan): def __init__(self): super(Net, self).__init__(id="fc3") self.fc1 = nn.Linear(1,1) self.add_to_state(["fc1"]) def forward(self, x): return self.fc1(x) plan = Net() plan.build(data) print(plan(data)) from IPython.display import display_html def restart_kernel() : display_html("",raw=True) restart_kernel() # Import dependencies import torch as th import syft as sy import torch.nn as nn import torch.nn.functional as F import grid as gr # Hook hook = sy.TorchHook(th) me = hook.local_worker me.is_client_worker = False # Connect to nodes alice = gr.WebsocketGridClient(hook, "http://localhost:3001", id="Alice") alice.connect() bob = gr.WebsocketGridClient(hook, "http://localhost:3000", id="Bob") charlie = gr.WebsocketGridClient(hook, "http://localhost:3002", id="James") dan = gr.WebsocketGridClient(hook, "http://localhost:3003", id="Dan") bob.connect() charlie.connect() dan.connect() gr.connect_all_nodes([bob, alice, charlie, dan]) plan.fix_prec().share(bob, charlie, crypto_provider=dan) alice.serve_encrypted_model(plan) alice.models model_copy = alice.download_model("fc3") x_sh = data.fix_prec().share(bob, charlie, crypto_provider=dan) model_copy(x_sh).get().float_prec() # TODO: this should be done internally new_state_ids = [] for state_id in fetched_plan.state_ids: a_sh = me._objects[state_id].fix_prec().share(bob, charlie, crypto_provider=dan).get() # TODO: this should be stored automatically me._objects[a_sh.id] = a_sh new_state_ids.append(a_sh.id) fetched_plan.state_ids fetched_plan.replace_ids(fetched_plan.state_ids, new_state_ids) fetched_plan.state_ids = new_state_ids me._objects %%time print(fetched_plan(x_ptr).get().float_prec()) # Support fetching a plan plan_func = False if plan_func: @sy.func2plan(args_shape=[(1,)], state={"bias": th.tensor([3.0])}) def plan_mult_3(x, state): bias = state.read("bias") x = x * bias return x else: class Net(sy.Plan): def __init__(self): super(Net, self).__init__(id="net2") self.fc1 = nn.Linear(1, 1) self.add_to_state(["fc1"]) def forward(self, x): return self.fc1(x) plan_mult_3 = Net() plan_mult_3.build(th.tensor(1)) sent_plan = plan_mult_3.send(alice) print(sent_plan.id) from IPython.display import display_html def restart_kernel() : display_html("",raw=True) restart_kernel() # Import dependencies import torch as th import syft as sy import torch.nn as nn import torch.nn.functional as F import grid as gr # Hook hook = sy.TorchHook(th) me = hook.local_worker me.is_client_worker = False # Connect to nodes alice = gr.WebsocketGridClient(hook, "http://localhost:3001", id="Alice") alice.connect() bob = gr.WebsocketGridClient(hook, "http://localhost:3000", id="Bob") charlie = gr.WebsocketGridClient(hook, "http://localhost:3002", id="James") dan = gr.WebsocketGridClient(hook, "http://localhost:3003", id="Dan") bob.connect() charlie.connect() dan.connect() gr.connect_all_nodes([bob, alice, charlie, dan]) # Fetch plan fetched_plan = alice.fetch_plan("net2") # TODO: this should be done internally new_state_ids = [] for state_id in fetched_plan.state_ids: # TODO: we should not have direct access to the weights a_sh = me._objects[state_id].get() # TODO: this should be stored automatically me._objects[a_sh.id] = a_sh new_state_ids.append(a_sh.id) fetched_plan.replace_ids(fetched_plan.state_ids, new_state_ids) fetched_plan.state_ids = new_state_ids x = th.tensor([1.]) print(fetched_plan(x))tensor([0.2805], requires_grad=True)Rate-control Empirical Analysis
Simple linear regressionWe performed a simple linear regression of the bitrate with respect to the quantizer,operating on the logarithm of both.The data set used was all of the video clips on https://media.xiph.org/video/derf/as well as subset3 (for extra I-frame data).To enable processing an arbitrarily large data set, an online regression algorithm was implemented.In practice, [440MB of text formatted data](https://ba.rr-dav.id.au/data/rav1e/rc-data.tar.xz) were sufficient.The raw final state of the online regression for each segment follows.%matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg') from matplotlib import pyplot as plt plt.rcParams['svg.fonttype'] = 'none' from glob import glob import numpy as np from pprint import pprint import tarfile from tqdm import tqdm_notebook # Klotz, . "UPDATING SIMPLE LINEAR REGRESSION." # Statistica Sinica 5, no. 1 (1995): 399-403. # http://www.jstor.org/stable/24305577 def online_simple_regression(accumulator, x, y): Ax_, Ay_, Sxy, Sxx, n_, minx, maxx = accumulator or (0, 0, 0, 0, 0, None, None) first = n_ == 0 n = n_ + x.size rt_n, rt_n_ = np.sqrt((n, n_), dtype=np.float128) Ax = (Ax_*n_ + x.sum(dtype=np.float128))/n Ay = (Ay_*n_ + y.sum(dtype=np.float128))/n minx = x.min() if first else min(minx, x.min()) maxx = x.max() if first else max(maxx, x.max()) X = Ax if first else (Ax_*rt_n_ + Ax*rt_n)/(rt_n_ + rt_n) Y = Ay if first else (Ay_*rt_n_ + Ay*rt_n)/(rt_n_ + rt_n) Sxx += np.sum((x - X)**2) Sxy += np.sum((x - X)*(y - Y)) return Ax, Ay, Sxy, Sxx, n, minx, maxx def conv_px(s): w, h = s.split(b'x') return int(w)*int(h) conv_fti = [b'I', b'P', b'B0', b'B1'].index def collect(filename, queues): px, log_target_q, byte_size, frame_type = np.loadtxt( filename, dtype=np.int64, delimiter=',', converters={1: conv_px, 4: conv_fti}, skiprows=1, usecols=range(1, 5), unpack=True) blog64q57_ibpp = np.round(( np.log2(px, dtype=np.float128) - np.log2(byte_size*8, dtype=np.float128) )*2**57).astype(np.int64) # These are the fixed point found by repeating this whole process boundaries = [ [0, 381625*2**40, 655352*2**40, 967797*2**40], [0, 356802*2**40, 848173*2**40, 967797*2**40], [0, 288436*2**40, 671307*2**40, 967797*2**40], [0, 264708*2**40, 622760*2**40, 967797*2**40] ] for fti in np.unique(frame_type): buckets = list(zip(boundaries[fti][:-1], boundaries[fti][1:])) for bi, bucket in enumerate(buckets): low, high = bucket idx = (frame_type==fti) & (log_target_q >= low) & (log_target_q < high) if np.sum(idx, dtype=int) == 0: continue b = (bi << 2) | fti x, y = log_target_q[idx], blog64q57_ibpp[idx] queue = queues.get(b, ([], [])) queue[0].append(x) queue[1].append(y) queues[b] = queue def aggregate(queues, partials): for b, queue in queues.items(): x, y = np.concatenate(queue[0]), np.concatenate(queue[1]) partials[b] = online_simple_regression(partials.get(b, None), x, y) queues.clear() partials = dict() # https://ba.rr-dav.id.au/data/rav1e/rc-data.tar.xz with tarfile.open('rc-data.tar.xz', 'r:xz') as tf: queues, last_name = dict(), None for ti in tqdm_notebook(tf, total=1077*255, leave=False): name = ti.name.split('/')[0] if last_name and name != last_name: aggregate(queues, partials) last_name = name collect(tf.extractfile(ti), queues) aggregate(queues, partials) pprint(partials){0: (2.7695336845023429016e+17, -1.07133222745900214305e+17, 8.397389236899963756e+38, 1.1092601296011764081e+39, 91218, 9581914802246888, 419495824559273192), 1: (2.5070212498929263503e+17, -1.5282410383799979604e+16, 5.773449575496993589e+39, 5.3433937458969078745e+39, 469305, 0, 389174377867415552), 2: (2.1203328628257383575e+17, 64756017185446.597675, 1.7081708964304293988e+39, 1.3053204002923686526e+39, 222579, 56629159325661976, 317096453837818648), 3: (2.07697267279316528e+17, 2.6367481275468926898e+16, 8.87068088650291795e+38, 6.4929291690119722136e+38, 232617, 113258318651323952, 281862280268830256), 4: (5.5969475326659699072e+17, 1.33446813443680867414e+17, 9.2122867143093983015e+38, 9.627956722873028641e+38, 122946, 422111132843500776, 719865965107815656), 5: (6.456860630541701375e+17, 5.6144108286815278803e+17, [...]Fixed-point approximationThe regression results are converted to a fixed-point representation,with the exponent in Q6 and the scale in Q3.plt.figure(figsize=(7, 6)) plt.axis('equal') plt.xticks([0, 10]) plt.yticks([0, 10]) plt.minorticks_on() plt.grid(b=True, which='major') plt.grid(b=True, which='minor', alpha=0.2) segments = dict() for b, accumulator in partials.items(): Ax, Ay, Sxy, Sxx, n, minx, maxx = accumulator fti = b & 3 beta = Sxy/Sxx alpha = Ay - beta*Ax exp = int(np.round(beta*2**6)) beta_ = exp/2**6 alpha_ = Ay - beta_*Ax scale = int(np.round(np.exp2(3 - alpha_/2**57))) label = ['I', 'P', 'B0', 'B1'][fti] print('%2s: exp=%d scale=%d bucket=%d' % (label, exp, scale, b>>2)) xs, ys = segments.get(label, ([], [])) xs = [minx/2**57, maxx/2**57] ys = [xs[0]*beta_ + alpha_/2**57, xs[1]*beta_ + alpha_/2**57] xs_, ys_ = segments.get(label, ([], [])) xs_.extend(xs) ys_.extend(ys) segments[label] = (xs_, ys_) best = dict() for label, xy in segments.items(): plt.plot(xy[0], xy[1], label=label) plt.legend();I: exp=48 scale=36 bucket=0 I: exp=61 scale=55 bucket=1 I: exp=77 scale=129 bucket=2 P: exp=69 scale=32 bucket=0 B0: exp=84 scale=30 bucket=0 B1: exp=87 scale=27 bucket=0 B1: exp=139 scale=84 bucket=1 B0: exp=120 scale=68 bucket=1 P: exp=104 scale=84 bucket=1 B1: exp=61 scale=1 bucket=2 B0: exp=68 scale=4 bucket=2 P: exp=83 scale=19 bucket=2The endpoints of each linear regression, rounding only the exponent, are detailed in the following output.We use a cubic interpolation of these points to adjust the segment boundaries.pprint(segments){'B0': ([0.39294372842822706, 2.2002986504858395, 2.250924723555803, 5.114042917135398, 5.137773646423531, 7.379457452900416], [-1.4148623981702452337, 0.95729093703037120824, 1.1300545385380504622, 6.4984011514997921106, 6.5594675085524934565, 8.941256552934184157]), 'B1': ([0.7858874568564541, 1.955812458298777, 2.0338149703000394, 4.745877754380938, 4.768869761992487, 7.380207231894769], [-0.7078397903627942796, 0.88252700847286351155, 1.032874654152443289, 6.9231360133281455494, 7.0480332114869880963, 9.536964237487599543]), 'I': ([0.06648789020906937, 2.910837019748877, 2.928984366459142, 4.995073556916916, 5.018565704152715, 7.21693359533783], [-2.1348326785650876126, -0.0015708314102318150611, 0.016045673317236682116, [...]Piecewise-linear fitWe applied a 3-segment piecewise-linear fit. The boundaries were aligned to integer values of pixels-per-bit,while optimizing for similarity to a cubic interpolation of the control points(log-quantizer as a function of log-bitrate).plt.figure(figsize=(7, 6)) plt.axis('equal') plt.xticks([0, 10]) plt.yticks([0, 10]) plt.minorticks_on() plt.grid(b=True, which='major') plt.grid(b=True, which='minor', alpha=0.2) from scipy import optimize for ft, xy in segments.items(): f = np.poly1d(np.polyfit(np.array(xy[1]).astype(float), np.array(xy[0]).astype(float), 3)) ys = np.linspace(min(xy[1]), max(xy[1]), 20) def cost(X): y0 = np.array([ys[0], X[0], X[1], ys[-1]]).astype(float) x0 = f(y0) f0 = np.where(ysI [1. 4.] [381625. 655352.] P [ 2. 139.] [356802. 848173.] B0 [ 2. 92.] [288436. 671307.] B1 [ 2. 126.] [264708. 622760.]Tutorial: Ontology interface[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/simphony/docs/master?filepath=docs%2Fsource%2Fjupyter%2Fontology_interface.ipynb "Click to run the tutorial yourself!") This tutorial introduces the interface to the installed ontologies. The code presented is based on [this example](https://github.com/simphony/osp-core/blob/master/examples/ontology_example.py). Background In an ontological framework, ontology entities are used as a knowledge representation form. Those can be further categorized in two groups: ontology individuals ([assertional knowledge](https://en.wikipedia.org/wiki/Abox)), and ontology classes, relationships and attributes ([terminological knowledge](https://en.wikipedia.org/wiki/Tbox)).In a [previous tutorial](./cuds_api.html), we have discussed how to work with CUDS objects, which represent ontology individuals. In this tutorial, we present the API of all the other entities instead: ontology classes, relationships and attributes. These are defined in an ontology installation file in [YAML](../yaml.md) or [OWL](../owl.md) format. The presented API enables you to access the entities and navigate within an ontology. In this tutorial, we will work both with the `city` namespace, the example namespace from OSP-core, and the `math` namespace from the [Elementary Multiperspective Material Ontology (EMMO)](https://github.com/emmo-repo/EMMO), for which an installation file is also provided with OSP-core.Please install the ontologies running the commands below if you have not installed them yet.# Install the ontologies !pico install city !pico install emmoINFO 2021-03-31 16:16:53,174 [osp.core.ontology.installation]: Will install the following namespaces: ['city'] INFO 2021-03-31 16:16:53,187 [osp.core.ontology.yml.yml_parser]: Parsing YAML ontology file /home/jose/.local/lib/python3.9/site-packages/osp/core/ontology/docs/city.ontology.yml INFO 2021-03-31 16:16:53,209 [osp.core.ontology.yml.yml_parser]: You can now use `from osp.core.namespaces import city`. INFO 2021-03-31 16:16:53,209 [osp.core.ontology.parser]: Loaded 202 ontology triples in total INFO 2021-03-31 16:16:53,223 [osp.core.ontology.installation]: Installation successful INFO 2021-03-31 16:16:53,753 [osp.core.ontology.installation]: Will install the following namespaces: ['emmo'] INFO 2021-03-31 16:16:53,756 [osp.core.ontology.parser]: Parsing /home/jose/.local/lib/python3.9/site-packages/osp/core/ontology/docs/emmo.yml INFO 2021-03-31 16:16:53,756 [osp.core.ontology.parser]: Downloading https://raw.githubusercontent.com/emmo-repo/emmo-repo.github.io/master/versions/1.0.0[...]Accessing entities: the namespace objectTo access ontology entities, we first need to know the aliases of the installed ontology namespaces. In each ontology [YAML installation file](../yaml.md), the namespace(s) that it contains is(are) stated at the top of the file. For example, at the top of the [city ontology installation file](https://github.com/simphony/osp-core/blob/master/osp/core/ontology/docs/city.ontology.yml) you may find:```yaml---version: "0.0.3"namespace: "city"ontology: ...```Alternatively, you can use [pico ontology installation tool](../utils.mdpico-installs-cuds-ontologies) to see the installed namespaces:!pico listPackages: - city - emmo Namespaces: - xml - rdf - rdfs - xsd - cuba - isq - ns1 - ns2 - owl - city - mereotopology - physical - top - semiotics - perceptual - reductionistic - holistic - physicalistic - math - properties - materials - metrology - models - manufacturing - siunitsOnce we know the name of the namespace that we want to use, we import it in python. For this tutorial, we are importing the namespaces `city` and `math`. Through those imported namespace python objects, the entities within the namespaces can be accessed:from osp.core.namespaces import city from osp.core.namespaces import mathThere are several ways to access an ontology entity in OSP-core, which are summarized by the following list and will be demonstrated shortly after.- By **suffix**. For example, for the namespace `city`, whose [IRI](https://fusion.cs.uni-jena.de/fusion/blog/2016/11/18/iri-uri-url-urn-and-their-differences/) is `http://www.osp-core.com/city`, fetching by the suffix `Citizen` would return the ontology entity with IRI `http://www.osp-core.com/cityCitizen`.- By **label**. Fetchs the entity by the label that has been assigned to it using either the `rdfs:label` or `skos:prefLabel` predicates.- By **IRI**. The full [IRI](https://fusion.cs.uni-jena.de/fusion/blog/2016/11/18/iri-uri-url-urn-and-their-differences/) of an ontology entity is provided in order to fetch it.- By **string**. Using a string, for example `"city.LivingBeing"`. This is only useful in some special cases. The **most convenient way** to access an ontology entity is using the **dot notation** in python. For example, `city.Citizen`. This method is a shorthand for fetching by suffix or label: - When the keyword `reference_by_label` is set to `True` (enabled) in the [ontology YAML installation file](../owl.md), the dot notation is a shorthand for fetching by label. This keyword is **enabled** in the `math` namespace.- When the keyword `reference_by_label` is set to `False` (disabled) or not set, the dot notation is a shorthand for fetching by suffix instead. This keyword is **disabled** in the `city` namespace. To get a list of all the entities available within a namespace, run `list(namespace)`. Tip The dot notation supports IPython autocompletion. For example, when working on a Jupyter notebook, once the namespace has been imported, it is possible to get suggestions for the entity names by writing `namespace.` and pressing TAB. **Accessing an ontology entity by suffix**Let's fetch the Citizen class, whose IRI is `http://www.osp-core.com/cityCitizen`. The keyword, `reference_by_label` is set to `False`, so one can just use the dot notation.city.CitizenAnother alternative is using the `get_from_suffix` method from the namespace object. This is useful when the suffix contains characters that Python does not accept as property names, such as spaces or dashes.city.get_from_suffix('Citizen')Note that the suffix is case sensitive, and therefore the following would produce an error.#city.citizen # -> Fails.**Accessing an ontology entity by label**Let's fetch the Integer class, whose IRI is `http://emmo.info/emmo/middle/mathEMMO_f8bd64d5_5d3e_4ad4_a46e_c30714fecb7f`. The keyword `reference_by_label` is set to `True`, so we just use the dot notation.math.IntegerAnother alternative is using the square bracket notation on the namespace object. This is useful when the suffix contains characters that Python does not accept as property names, such as spaces or dashes.math['Integer']Fetching by label is NOT case sensitive when using the dot notation, but it is when using square brackets, so the following behavior is expected.#math['integer'] # -> Fails. math.integer # -> Works.**Accessing an ontology entity by IRI**This is only possible using the `get_from_iri` method from the namespace object. For example, let's fetch the Integer entity again.math.get_from_iri('http://emmo.info/emmo/middle/math#EMMO_f8bd64d5_5d3e_4ad4_a46e_c30714fecb7f')**Access entities using a string**Sometimes you only have a string refering to an entity. Using the `get_entity` function you can get the corresponding python object easily:from osp.core.namespaces import get_entity # noqa: E402 print("\nYou can get an entity with a string") print(get_entity("city.LivingBeing")) print(get_entity("city.LivingBeing") == city.LivingBeing)You can get an entity with a string city.LivingBeing TrueAccessing an entity's name, IRI and namespace Each ontology entity has an associated name which can be accessed using the `name` property.city.LivingBeing.nameThe IRI of an entity might be accessed using the `iri` property.math.Real.iriIn addition, it is possible to get the namespace object to which the entity belongs using the `namespace` property.math.Equation.namespaceAccessing super- and subclassesUsing the properties `superclasses` and `subclasses` it is easy to navigate the ontology. Direct superclasses and subclasses can also be accessed:print("\nYou can access the superclasses and the subclasses") print(city.LivingBeing.superclasses) print(city.LivingBeing.subclasses) print("\nYou can access the direct superclasses and subclasses") print(city.LivingBeing.direct_superclasses) print(city.LivingBeing.direct_subclasses) print("\nYou can access a description of the entities") print(city.LivingBeing.description) print("\nYou can test if one entity is a subclass / superclass of another") print(city.Person.is_subclass_of(city.LivingBeing)) print(city.LivingBeing.is_superclass_of(city.Person))You can access the superclasses and the subclasses {, } {, , } You can access the direct superclasses and subclasses {} {} You can access a description of the entities A being that lives You can test if one entity is a subclass / superclass of another True TrueTesting the type of the entitiesIn the ontology, three types of entities can be defined: classes, relationships and attributes. OSP-core has its own vocabulary, the [CUBA namespace](../yaml.mdthe-cuba-namespace), which describes, among other things, such entity types. Relationships are subclasses of `CUBA.RELATIONSHIP` and attributes are subclasses of `CUBA.ATTRIBUTE`. There are different Python objects for the different entity types. You can use both to check which type of entity you are dealing with:from osp.core.namespaces import cuba # noqa: E402 # These are the classes for the ontology entities from osp.core.ontology import ( # noqa: F401, E402 OntologyEntity, OntologyClass, OntologyRelationship, OntologyAttribute ) print("\nYou can test if an entity is a class") print(isinstance(city.LivingBeing, OntologyClass)) print(not city.LivingBeing.is_subclass_of(cuba.relationship) and not city.LivingBeing.is_subclass_of(cuba.attribute)) print("\nYou can test if an entity is a relationship") print(isinstance(city.hasInhabitant, OntologyRelationship)) print(city.hasInhabitant.is_subclass_of(cuba.relationship)) print("\nYou can test if an entity is a attribute") print(isinstance(city.name, OntologyAttribute)) print(city.name.is_subclass_of(cuba.attribute))You can test if an entity is a class True True You can test if an entity is a relationship True True You can test if an entity is a attribute True TrueOperations specific to ontology classesThe different types of entities differ in the operations they offer. For classes, you can access the attributes:print("\nYou can get the attributes of an ontology class and their defaults") print(city.Citizen.attributes) print("\nYou can get the non-inherited attributes and their defaults") print(city.Citizen.own_attributes) print(city.LivingBeing.own_attributes)You can get the attributes of an ontology class and their defaults {: (rdflib.term.Literal(''), False, None), : (rdflib.term.Literal('25', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')), False, None)} You can get the non-inherited attributes and their defaults {} {: (rdflib.term.Literal(''), False, None), : (rdflib.term.Literal('25', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')), False, None)}In addition, OSP-core has special support for the `owl:Restriction` and `owl:Composition` classes of the [Web Ontology Language (OWL)](https://en.wikipedia.org/wiki/Web_Ontology_Language) (check the [OWL ontology specification](https://www.w3.org/TR/owl2-syntax/) for more details). Such OWL classes are represented by the python classes `Restriction` and `Composition`. See [operations specific to ontology axioms](Operations-specific-to-ontology-axioms) for more information.For example, in the city ontology, the citizens have a restriction on the name and age attributes: a citizen must have exactly one name and one age. These axioms can be accessed using the `axioms` property, which returns both the restriction and compositions affecting the class.tuple(str(x) for x in city.Citizen.axioms)Operations specific to ontology axioms For restrictions, the quantifier, the target, the restriction type and the relationship/attribute (depending on whether it is a restriction of the relationship type or attribute type) may be accessed.restriction = city.Citizen.axioms[0] print(restriction) print(restriction.quantifier) print(restriction.target) print(restriction.rtype) print(restriction.attribute)city.name QUANTIFIER.EXACTLY 1 QUANTIFIER.EXACTLY 1 RTYPE.ATTRIBUTE_RESTRICTION city.nameFor compositions, both the operator and operands can be accesed.from osp.core.ontology.oclass_composition import Composition composition = tuple(x for x in math.Integer.axioms if type(x) is Composition)[0] print(composition) print(composition.operator) print(composition.operands)(math.Mathematical OPERATOR.AND perceptual.Symbol) OPERATOR.AND [, ]Operations specific to ontology relationshipsYou can access the inverse of relationships.print("\nYou can get the inverse of a relationship") print(city.hasInhabitant.inverse)You can get the inverse of a relationship city.INVERSE_OF_hasInhabitantOperations specific to attributesYou can acces the datatype and the argument name of attributes.print("\nYou can get the argument name of an attribute. " "The argument name is used as keyword argument when instantiating CUDS objects.") print(city.age.argname) print("\nYou can get the datatype of attributes") print(city.age.datatype) print("\nYou can use the attribute to convert values " "to the datatype of the attribute") result = city.age.convert_to_datatype("10") print(type(result), result) print("\nAnd likewise to convert values to the python basic type " "associated with the datatype of the attribute.") result = city.name.convert_to_basic_type(5) print(type(result), result)You can get the argument name of an attribute. The argument name is used as keyword argument when instantiating CUDS objects. age You can get the datatype of attributes http://www.w3.org/2001/XMLSchema#integer You can use the attribute to convert values to the datatype of the attribute 10 And likewise to convert values to the python basic type associated with the datatype of the attribute. 5Check the API Reference for more details on the methods [_convert_to_datatype_](../api_ref.mdosp.core.ontology.attribute.OntologyAttribute.convert_to_datatype) and [_convert_to_basic_type_](../api_ref.mdosp.core.ontology.attribute.OntologyAttribute.convert_to_basic_type). Creating CUDS using ontology classesYou can call ontology classes to create CUDS objects. To learn more, have a look at the [CUDS API tutorial](./cuds_api.html).print("\nYou can instantiate CUDS objects using ontology classes") print(city.Citizen(name="", age=42)) print("\nYou can check if a CUDS object is an instance of a ontology class") print(city.Citizen(name="", age=42).is_a(city.Citizen)) print(city.Citizen(name="", age=42).is_a(city.LivingBeing)) print("\nYou can get the ontology class of a CUDS object.") print(city.Citizen(name="", age=42).oclass)You can instantiate CUDS objects using ontology classes city.Citizen: e0947100-9c40-415f-92c8-a86b796dbb01 You can check if a CUDS object is an instance of a ontology class True True You can get the ontology class of a CUDS object. city.CitizenViterbi Algorithm for Connecting Fragments in Long-Range PathsThe Viterbi Algorithm is a dynamic programming approach to finding an optimal path between a starting state and a goal state. For our neuron reconstruction problem, the states are defined as fragments, while the traversals are the connections made between endpoints using evidence observed from image intensity data. This notebook illustrates a simple example of the Viterbi Algorithm connecting synthetic fragments in a 10x10 grid example.import numpy as np from brainlit.algorithms.connect_fragments.tests.grid_generator import grid_gen from brainlit.algorithms.connect_fragments.dynamic_programming_viterbi import viterbi_algorithm import matplotlib.pyplot as pltRunning a 10x10 Grid Example# Load the 10x10 example from grid_generator img, lbls, _ , somas = grid_gen(10) plt.figure() plt.title("Image data") plt.imshow(img[:,:,0]) plt.figure() plt.title("Pre-generated Labels") plt.imshow(lbls[:,:,0]) # Initiate the algorithm class alg = viterbi_algorithm(img, lbls, somas, [1,1,1]) print("Soma label: ", somas) # Manually identify the endpoints. Note that Labels 2 and 5 are "blobs" and do not have endpoints endpoints = {} endpoints[1] = ((0,0,0),(0,2,0)) endpoints[3] = ((3,3,0),(6,5,0)) endpoints[4] = ((7,7,0),(7,8,0)) # Assign the endpoints alg.end_points = endpointsRunning Viterbi AlgorithmWithin the Viterbi class object, we first need to compute the distance matrices. After that, we can run the Viterbi algorithm to find the best path.def print_path(alg, path): c = alg.connection_mat path_lbls = path[1] for i in range(len(path_lbls)-1): from_lbl = path_lbls[i] to_lbl = path_lbls[i+1] print(f"From {from_lbl} to {to_lbl}: {c[0][from_lbl][to_lbl]}, {c[1][from_lbl][to_lbl]}") alg.compute_all_dists() top_path, sorted_paths = alg.viterbi_frag(1, K=4, somas=alg.somas) print(top_path) print_path(alg, top_path)(1.1241229611526466, [1, 3, 4, 5, 5]) From 1 to 3: [0 2 0], [3 3 0] From 3 to 4: [6 5 0], [7 7 0] From 4 to 5: [7 8 0], [9 9 0] From 5 to 5: [0 0 0], [0 0 0]Novoic ML challenge – image data IntroductionWelcome to the Novoic ML challenge!This is an open-ended ML challenge to help us identify exceptional researchers and engineers. The guidance below describes an open-source dataset that you can use to demonstrate your research skills, creativity, coding ability, scientific communication or anything else you think is important to the role.We recommend you spend around three hours on this (more or less if you wish), which you do not have to do in one go. Please make use of any resources you like. Feel free to shoot us an email with any questions that you might have!This is the image version of the challenge. Also available are text and audio versions. You can access all three from [this GitHub repo](https://github.com/novoic/ml-challenge).Best of luck – we're looking forward to seeing what you can do! Prepare the dataCopy the dataset to a local directory – this should take around 10 minutes.!mkdir -p data !gsutil -m cp -r gs://novoic-ml-challenge-image-data/* ./dataData descriptionThe data comprises 17,125 images in jpg format. Each image is of a realistic scene typically containing a number of objects.There are 20 object classes of interest: aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, dining table, dog, horse, motorbike, person, potted plant, sheep, sofa, train, TV monitor. Each image is labelled with one of three numbers for each object class:- -1 (no objects of this class feature in the image)- 1 (at least one object of this class features in the image)- 0 (at least one object of this class features in the image but they are all difficult to recognise)import IPython IPython.display.Image(filename='data/images/2012_004258.jpg') IPython.display.Image(filename='data/images/2008_007739.jpg')Each object class file (e.g. `aeroplane.txt`) contains the name of the image without the extension (e.g. `2008_007739`) followed by a space and then the class label (e.g. `-1`).For more information about the dataset, see its `README.md`.Directory structure:```data/├── images/ dir for jpg files├── aeroplane.txt aeroplane object class labels├── bicycle.txt bicycle object class labels├── bird.txt bird object class labels├── boat.txt boat object class labels├── bottle.txt bottle object class labels├── bus.txt bus object class labels├── car.txt car object class labels├── cat.txt cat object class labels├── chair.txt chair object class labels├── cow.txt cow object class labels├── diningtable.txt dining table object class labels├── dog.txt dog object class labels├── horse.txt horse object class labels├── motorbike.txt motorbike object class labels├── person.txt person object class labels├── pottedplant.txt potted plant object class labels├── sheep.txt sheep object class labels├── sofa.txt sofa object class labels├── train.txt train object class labels├── tvmonitor.txt TV monitor object class labels├── LICENSE└── README.md``` The challengeThis is an open-ended challenge and we want to witness your creativity. Some obvious suggestions:- Data exploration/visualization- Binary/multiclass classification- Anomaly detection- Unsupervised clustering- Model explainabilityYou're welcome to explore one or more of these topics, or do something entirely different.Create, iterate on, and validate your work in this notebook, using any packages of your choosing.**The role you are interviewing for will contain a fair amount of deep learning knowledge. Because of that, we expect your submission to contain some deep learning, using your favorite framework. Our preference is PyTorch.** You can access a GPU via `Runtime -> Change runtime type` in the toolbar. Submission instructionsOnce you're done, send us this `.ipynb` notebook (or a link to it hosted on Google Drive/GitHub with appropriate permissions) by email, ensuring that outputs from cells (text, plots etc) are preserved.If you haven't applied already, make sure you submit an application first through our [job board](https://novoic.com/careers/). Your submissionThe below sets up TensorFlow as an example but feel free to use any framework you like.import torchConvert NASA data to Orion formatIn this notebook we download the data from the Telemanom S3 bucket and reformat itas Orion pipelines expect. Download the dataimport io import os import urllib import zipfile DATA_URL = 'https://s3-us-west-2.amazonaws.com/telemanom/data.zip' if not os.path.exists('data'): response = urllib.request.urlopen(DATA_URL) bytes_io = io.BytesIO(response.read()) with zipfile.ZipFile(bytes_io) as zf: zf.extractall() train_signals = os.listdir('data/train') test_signals = os.listdir('data/test') train_signals == test_signalsConvert the NPY matrices to CSVsWe convert the NPY matrices to CSV files with two columns: `timestamp` and `value`.For this, what we do is loading both the train and test matrices for each signalsand concantenate them to generate a single matrix for each signal.Afterwards, we add a timestamp column by taking the value 1222819200 (2008-10-01T00:00:00)as for the first row and then increasing the timestamp by 21600 seconds (6h) for each other row.import pandas as pd import numpy as np def build_df(data, start=0): index = np.array(range(start, start + len(data))) timestamp = index * 21600 + 1222819200 return pd.DataFrame({'timestamp': timestamp, 'value': data[:, 0]}) data = build_df(np.load('data/train/S-1.npy')) data.head()Store the results as CSVos.makedirs('csv', exist_ok=True) for signal in train_signals: name = signal[:-4] train_np = np.load('data/train/' + signal) test_np = np.load('data/test/' + signal) data = build_df(np.concatenate([train_np, test_np])) data.to_csv('csv/' + name + '.csv', index=False) train = build_df(train_np) train.to_csv('csv/' + name + '-train.csv', index=False) test = build_df(test_np, start=len(train)) test.to_csv('csv/' + name + '-test.csv', index=False) s1 = pd.read_csv('csv/S-1.csv') s1.head() s1.shape s1_train = pd.read_csv('csv/S-1-train.csv') s1_train.head() s1_train.tail() s1_train.shape s1_test = pd.read_csv('csv/S-1-test.csv') s1_test.head() s1_test.shapeData Mining Project by (December 2017) Project Overview*Category: Classificaton**DataSet: Titanic DataSet(Source: Kaggle.com)* Librariesimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import sklearn from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_scoreImport the datatrain_data = pd.read_csv('C:/Users/chiragrank/Downloads/train.csv')Understanding the data Shapeprint("Training data shape: ", train_data.shape)Training data shape: (891, 12)The data has 12 attributes and 891 rows Overview of the attributes and its values in the datatrain_data.head()Variable Name DescriptionSurvived(target) Survived (1) or died (0) Pclass Passenger’s class Name Passenger’s name Sex Passenger’s sex Age Passenger’s age SibSp Number of siblings/spouses aboard Parch Number of parents/children aboard Ticket Ticket number Fare Fare Cabin Cabin Embarked Port of embarkation(C = Cherbourg, Q = Queenstown, S = Southampton) Attribute Structuretrain_data.info() #calculating the missing values print("\n" + "-"*50 + "\n") print(train_data.isnull().sum()) RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): PassengerId 891 non-null int64 Survived 891 non-null int64 Pclass 891 non-null int64 Name 891 non-null object Sex 891 non-null object Age 714 non-null float64 SibSp 891 non-null int64 Parch 891 non-null int64 Ticket 891 non-null object Fare 891 non-null float64 Cabin 204 non-null object Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.6+ KB -------------------------------------------------- PassengerId 0 Survived 0 Pclass 0 Name 0 Sex 0 Age 177 SibSp 0 Parch 0 Ticket 0 Fare 0 Cabin 687 Embarked 2 dtype: int64Observations:Total entries is 891.Age, Cabin and Embark has null values with values 177, 687 and 2 respecttivelySeven features are integer or floats in train dataset.Five features are strings (object) in train dataset Data Exploration(Analyse and Identify the patterns) Do Passengers paying more has better survival rate? 1) Survival Rate for Individual Passenger Classprint(train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)) cl_dead = train_data[train_data['Survived']==0]['Pclass'].value_counts() cl_survive = train_data[train_data['Survived']==1]['Pclass'].value_counts() df = pd.DataFrame([cl_survive, cl_dead]) df.index = [['Survived','Dead']] df.plot(kind='bar',figsize=(10,6))Pclass Survived 0 1 0.629630 1 2 0.472826 2 3 0.242363Observation:The Probablity of the survival rate is lowest for Class value = 3 which is quite visible from the plot. 2) Survival rate for the fare valuestrain_data['FareCategory'] = pd.qcut(train_data['Fare'], 7) print (train_data[['FareCategory', 'Survived']].groupby(['FareCategory'], as_index=False).mean()) figure = plt.figure(figsize=(15,8)) plt.hist([train_data[train_data['Survived']==1]['Fare'],train_data[train_data['Survived']==0]['Fare']], stacked=True, bins = 20 ,label = ['Survived','Dead']) plt.ylabel('Frequency of passengers') plt.xlabel('Fare value') plt.legend()FareCategory Survived 0 (-0.001, 7.75] 0.207143 1 (7.75, 8.05] 0.200000 2 (8.05, 12.475] 0.316327 3 (12.475, 19.258] 0.406250 4 (19.258, 27.9] 0.445312 5 (27.9, 56.929] 0.456000 6 (56.929, 512.329] 0.685039Observation: The survival rate is as high as around 68 percent for high fare passengers which is atleast 20 percent more than the lower and even considerably higher than the lowest bracket. 3) Do Size of the Family affect the survival rate?train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1 print (train_data[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())FamilySize Survived 0 1 0.303538 1 2 0.552795 2 3 0.578431 3 4 0.724138 4 5 0.200000 5 6 0.136364 6 7 0.333333 7 8 0.000000 8 11 0.000000Observation:The survival rate shows increase initially but then decreases as the size of the family increase from 4. So lets analyse further to arrive at definite conclusiontrain_data['Alone'] = 0 train_data.loc[train_data['FamilySize'] == 1, 'Alone'] = 1 print (train_data[['Alone', 'Survived']].groupby(['Alone'], as_index=False).mean())Alone Survived 0 0 0.505650 1 1 0.303538Observation:We can say that travelling alone do increase the survival rate. 4) Do Port of Embarkation affect the survival rate?#Fill the missing values with Southampton port train_data['Embarked'] = train_data['Embarked'].fillna('S') # print (train_data[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()) plt.hist(train_data[train_data['Survived']==1]['Embarked'])Observation: The survival rate is highest for Southampton. Lower for Cherbourg and Lowest for Queenstown 5) Do sex and age affect the survival rate?train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)The survival rate of the female is 74 percent as against 18 percent of the malesex_survive = train_data[train_data['Survived']==1]['Sex'].value_counts() sex_dead = train_data[train_data['Survived']==0]['Sex'].value_counts() df = pd.DataFrame([sex_survive,sex_dead]) df.index = ['Survived','Dead'] df.plot(kind='bar',figsize=(10,6))Observation:The survival rate of the female is more than the menplt.figure(figsize=(15,8)) ax = plt.subplot() ax.scatter(train_data[train_data['Survived']==1]['Age'],train_data[train_data['Survived']==1]['Fare'],c='green',s=40) ax.scatter(train_data[train_data['Survived']==0]['Age'],train_data[train_data['Survived']==0]['Fare'],c='red',s=40) ax.set_xlabel('Age') ax.set_ylabel('Fare') ax.legend(('survived','dead'),scatterpoints=1,loc='upper right',fontsize=15,)Observation:The survival rate decreases with the age and Increases with the fare value. Data Processing(Feature Engiering and Feature Selection)data = pd.read_csv('C:/Users/chiragrank/Downloads/train.csv') # Remove the target variable from the training data targets = data.Survived data.drop('Survived', 1, inplace=True)Title Variable Processing# Split the entries in the variables, #replace them with four categories: Mr, Miss, Mrs and Master #convert them into numerical values. data['Title'] = data['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip()) data['Title'] = data['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') data['Title'] = data['Title'].replace('Mlle', 'Miss') data['Title'] = data['Title'].replace('Ms', 'Miss') data['Title'] = data['Title'].replace('Mme', 'Mrs') title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} data['Title'] = data['Title'].map(title_mapping) data['Title'] = data['Title'].fillna(0)Age Variable Processingdata['Age'].isnull().sum()Age variable has 177 null values.# Replacing those null values with random integers within one standard deviation of the mean. null_age = data['Age'].isnull().sum() avg_age = data['Age'].mean() std_age = data['Age'].std() age_random_list = np.random.randint(avg_age - std_age, avg_age + std_age, size = null_age) data['Age'][np.isnan(data['Age'])] = age_random_list data['Age'] = data['Age'].astype(int)C:\Users\chiragrank\AppData\Local\Continuum\anaconda3\lib\site-packages\ipykernel_launcher.py:7: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy import sysFare Variable Processing#Converting the values of Fare variable into four levels with Fare less than 7.91 = 0, #greater than 7.91 and less than 27.9 equals to 1, greater than 27.9 and less than 56.929 equals to 2, #and greater than 56.926 as 3 data.loc[ data['Fare'] <= 7.91, 'Fare'] = 0 data.loc[(data['Fare'] > 7.91) & (data['Fare'] <= 27.9), 'Fare'] = 1 data.loc[(data['Fare'] > 27.9) & (data['Fare'] <= 56.929), 'Fare'] = 2 data.loc[ data['Fare'] > 56.929, 'Fare'] = 3 data['Fare'] = data['Fare'].astype(int)Converting categorical variables into numerical variable.# Embarkment point, Sex and Title variable has categories. #So using the label encoder from sklearn to convert the categorial variable into numerical variable labelEncoder=LabelEncoder() data['Embarked'] = data['Embarked'].fillna('S') data['Embarked']=labelEncoder.fit_transform(data['Embarked']) data['Sex']=labelEncoder.fit_transform(data['Sex']) data['Title']=labelEncoder.fit_transform(data['Title'])Dropping the variables Observation:Variables Dropped: PassengerID, Name, Ticket, CabinPassengerID, Name, Ticket because they are identity variable with almost unique values.Cabin variable because it has 687 and 327 missing values which is 77 percent of the training data and 78 percent of the test datadata = data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis = 1)Prediction Splitting the data, Training the model: 70 percent data with 623 entries, Testing the model: 30 percent data with 268 entries Classifiers Used:1) Support Vector Machine2) Random Forest3) KNN 4) Gaussian Nave Bayes Ensemble Classifiers5) Gradient Boosting6) AdaBoost7) Voting Classifier using svc, gnb, abc, rftrain_x , test_x , train_y , test_y = train_test_split( data , targets , train_size = .7 ) print(train_x.shape , test_x.shape , train_y.shape , test_y.shape)(623, 8) (268, 8) (623,) (268,)Support Vector Machinesvc = SVC() svc.fit(train_x, train_y) predicted_svc = svc.predict(test_x) acc_svc = (accuracy_score(test_y, predicted_svc)) * 100 acc_svcRandom Forestrf = RandomForestClassifier(n_estimators=100) rf.fit(train_x, train_y) predicted_rf = rf.predict(test_x) acc_rf = (accuracy_score(test_y, predicted_rf)) * 100 acc_rfKNNknn = KNeighborsClassifier(n_neighbors = 3) knn.fit(train_x, train_y) predicted_knn = knn.predict(test_x) acc_knn = (accuracy_score(test_y, predicted_knn)) * 100 acc_knnGaussian Naive Bayesgsn = GaussianNB() gsn.fit(train_x, train_y) predicted_gsn = gsn.predict(test_x) acc_gsn = (accuracy_score(test_y, predicted_gsn)) * 100 acc_gsnGradient Boostinggbc = GradientBoostingClassifier() gbc.fit(train_x, train_y) predicted_gbc = gbc.predict(test_x) acc_gbc = (accuracy_score(test_y, predicted_gbc)) * 100 acc_gbcAdaBoostabc = AdaBoostClassifier() abc.fit(train_x, train_y) predicted_abc = abc.predict(test_x) acc_abc = (accuracy_score(test_y, predicted_abc)) * 100 acc_abcVoting Classifiervc = VotingClassifier(estimators=[ ('svc', svc), ('abc', abc), ('gbc', gbc), ('rf', rf)], voting='hard') vc.fit(train_x, train_y) predicted_vc = vc.predict(test_x) acc_vc = (accuracy_score(test_y, predicted_vc)) * 100 acc_vcModel Comparisonmodels = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Random Forest', 'Naive Bayes', 'Gradient Boosting', 'AdaBoost', 'Voting Classifier' ], 'Score': [acc_svc, acc_knn, acc_rf, acc_gsn, acc_gbc, acc_abc, acc_vc]}) models.sort_values(by='Score', ascending=False)Installing/ Importing required libraries!pip install transformers import pandas as pd import numpy as np import seaborn as sns import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader, random_split from sklearn.preprocessing import LabelEncoder import torch.optim as optim import matplotlib.pyplot as plt from transformers import AutoTokenizer from tqdm import tqdm_notebook as tqdm import transformers from transformers import BertForTokenClassification, AdamW,BertModel %matplotlib inline device = "cuda" if torch.cuda.is_available() else "cpu" deviceData# !pip install kaggle # !mkdir ~/.kaggle # !cp kaggle.json ~/.kaggle/ # !chmod 600 ~/.kaggle/kaggle.json # !kaggle datasets download -d abhinavwalia95/entity-annotated-corpus # import zipfile # with zipfile.ZipFile('/content/drive/MyDrive/entity-annotated-corpus.zip', 'r') as zip_ref: # zip_ref.extractall("/content/drive/MyDrive") # !pip install transformers df = pd.read_csv('/content/drive/MyDrive/ner.csv', encoding = "ISO-8859-1", error_bad_lines=False) df1 = pd.read_csv('/content/drive/MyDrive/ner_dataset.csv', encoding = "ISO-8859-1", error_bad_lines=False) # df1 = df1.loc[:1000] df1['Sentence #'] = df1['Sentence #'].fillna(method = 'ffill') Label_encoder = LabelEncoder() df1["Tag"] = Label_encoder.fit_transform(df1["Tag"]) sns.countplot(df1['Tag']) plt.xticks(rotation=90) plt.show() sentences = list(df1.groupby("Sentence #")["Word"].apply(list).reset_index()['Word'].values) vals = list(df1.groupby("Sentence #")["Tag"].apply(list).reset_index()['Tag'].values) sentences = [" ".join(s) for s in sentences] # from torch.nn.utils.rnn import pad_sequence,pack_padded_sequence tokenizer = transformers.BertTokenizer.from_pretrained( 'bert-base-cased', do_lower_case=True ) # df1['Tag'] = df1['Tag'] + 1 class ner_dataset(Dataset): def __init__(self,sentences,vals,tokenizer,max_len): self.sentences = sentences self.vals = vals self.tokenizer = tokenizer self.max_len = max_len def __getitem__(self,idx): s = self.sentences[idx].split(" ") v = self.vals[idx] d = {'input_ids':[],'attention_mask':[],'labels':[]} text = [] labels = [] mask = [] for w in range(len(s)) : i, l = self.align_labels(self.tokenizer,s[w],v[w]) text.extend(i['input_ids']) labels.extend(l) mask.extend(i['attention_mask']) d['input_ids'] = [101] + self.pad(text+ [102],self.max_len) d['labels'] = [0] + self.pad(labels+ [0],self.max_len) d['attention_mask'] = [1] + self.pad(mask+ [1],self.max_len) d['input_ids'] = torch.tensor(d['input_ids']) d['labels'] = torch.tensor(d['labels']) d['attention_mask'] = torch.tensor(d['attention_mask']) return d def __len__(self): return len(self.sentences) def align_labels(self,tokenizer,word,label): word = tokenizer(word,add_special_tokens=False) labels = [] for i in range(len(word['input_ids'])): labels.append(label) return word,labels def pad(self,s,max_len): pad_len = max_len - len(s) if pad_len>0: for i in range(pad_len): s.append(0) return s[:max_len-1] dataset = ner_dataset(sentences,vals,tokenizer,100) train_dataset, test_dataset = random_split(dataset,[int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)]) train_dataloader = DataLoader(train_dataset,batch_size=32,shuffle=False) test_dataloader = DataLoader(train_dataset,batch_size=32,shuffle=False) dataset[1]Modelclass ner_model(nn.Module): def __init__(self,num_class): super(ner_model,self).__init__() self.num_class = num_class self.bert = transformers.BertModel.from_pretrained( "bert-base-uncased" ) self.logit = nn.Linear(768,self.num_class) def forward(self,ids,mask): x = self.bert(ids, attention_mask=mask) x = self.logit(x['last_hidden_state']) return x model = ner_model(df1['Tag'].nunique()) num_class = df1['Tag'].nunique() model = model.to(device) # model = TheModelClass(*args, **kwargs) # model = transformers.BertModel.from_pretrained('/content/drive/MyDrive/ner_model.pth') param_optimizer = list(model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_parameters = [ { "params": [ p for n, p in param_optimizer if not any( nd in n for nd in no_decay ) ], "weight_decay": 0.001, }, { "params": [ p for n, p in param_optimizer if any( nd in n for nd in no_decay ) ], "weight_decay": 0.0, }, ] optimizer = optim.AdamW(optimizer_parameters,lr=1e-5) loss_fn = nn.CrossEntropyLoss() def loss_fn1(output, target, mask, num_labels): lfn = nn.CrossEntropyLoss() active_loss = mask.view(-1) == 1 active_logits = output.view(-1, num_labels) active_labels = torch.where( active_loss, target.view(-1), torch.tensor(lfn.ignore_index).type_as(target) ) loss = lfn(active_logits, active_labels) return lossTrainingclass train_model: def __init__(self, train_loader, test_loader, task, model, optimizer, criterion, epochs, ): self.train_loader = train_loader self.test_loader = test_loader self.task = task self.optimizer = optimizer self.criterion = criterion self.epochs = epochs self.model = model def train_it(self): """ """ history = {'train_loss':[], 'test_loss':[], 'train_score':[], 'test_score':[]} for epoch in range(self.epochs): model.train() for loader in tqdm(self.train_loader): self.optimizer.zero_grad() d = loader ids, msk, ys= d['input_ids'].to(device), d['attention_mask'].to(device), d['labels'].to(device) preds= self.predict(ids,msk) # print(preds.shape,ys.shape) loss = loss_fn1(preds, ys, msk, num_class) loss.backward() self.optimizer.step() model.eval() # history['train_loss'].append(self.calc_loss(self.train_loader)) history['test_loss'].append(self.calc_loss(self.test_loader)) # history['train_score'].append(self.calc_score(self.train_loader)) # history['test_score'].append(self.calc_score(self.test_loader)) if epoch%1==0: print(f"Iteration : {epoch}") torch.save(model.state_dict(), '/content/drive/MyDrive/model_ner2.pth') fig = plt.figure(figsize = (12,7)) plt.subplot(1, 2, 1) # plt.plot(history['train_loss'], color = 'red', label = 'Training Loss') plt.plot(history['test_loss'], color = 'green', label = 'Validation Loss') plt.legend() # plt.subplot(1, 2, 2) # plt.plot(history['train_score'], color = 'red', label = 'Training Score') # plt.plot(history['test_score'], color = 'green', label = 'Validation Score' ) # plt.legend() plt.show() # fig = plt.figure() # ax = fig.add_subplot(111, projection = '3d') # ax.scatter(X[:,0], X[:,1], model(torch.tensor(X).float())[:,0].detach().numpy()) # plt.show() return self.model, history def r2(self, xs, ys): return r2_score(ys.reshape(-1,), self.predict(xs).numpy()) def acc(self, ys, preds): maxs, cls = torch.max(preds, axis = 1) return torch.sum(cls==ys.reshape(-1,)).item()/ys.shape[0] def predict(self,xs,msk): return self.model(xs,msk) def calc_loss(self,loader): loss = [] for idx, data in enumerate(loader): ids, msk, ys= data['input_ids'].to(device), data['attention_mask'].to(device), data['labels'].to(device) preds,msk,ys = self.predict(ids,msk).reshape(-1,num_class).detach().cpu(),data['attention_mask'].detach().cpu(),ys.detach().cpu() loss.append(loss_fn1(preds, ys, msk, num_class).item()) return sum(loss)/len(loss) def calc_score(self,loader): scores = [] for data in loader: ids, msk, ys= data['input_ids'].to(device), data['attention_mask'].to(device), data['labels'].to(device) preds,ys = self.predict(ids,msk).reshape(-1,num_class).detach().cpu(),ys.detach().cpu() if self.task == 'reg': scores.append(self.r2(ys,preds)) else: scores.append(self.acc(ys,preds)) return np.mean(scores) trainer = train_model(train_dataloader, test_dataloader, 'clf', model, optimizer, loss_fn, 3,) history = trainer.train_it()/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:34: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`Inferencetest_model = model.to('cpu') test_model.eval() text = """ The Indian central government on Tuesday extended the nationwide Covid-19 containment measures till October 31 """ def ner_infer(text): lent = len(text.split(" ")) infer_dataset = ner_dataset([text],[[1]*lent],tokenizer,100) ids, msk, ys= infer_dataset[0]['input_ids'], infer_dataset[0]['attention_mask'], infer_dataset[0]['labels'] out = test_model(ids.unsqueeze(0).to('cpu'),msk.unsqueeze(0).to('cpu'))[0] out_idx = [torch.argmax(o,dim=-1).item() for o in out] pred = [Label_encoder.classes_[i] for i in out_idx] infer_dict = {'Word':[],'len_token':[],'preds':[]} k,j =1,1 for i in text.split(' '): t = len(tokenizer(i,add_special_tokens=False)['input_ids']) k,j = j,j+t infer_dict['Word'].append(i) infer_dict['len_token'].append(t) infer_dict['preds'].append(pred[k:j]) return pd.DataFrame(infer_dict) ner_infer(text)User experience - Gabbar%matplotlib inline %config InlineBackend.figure_format = 'retina' import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set_style('ticks') pd.set_option('display.precision', 2) pd.set_option('display.max_columns', None)Reverting changesetsreverting_path = '../downloads/reverting/' revertings = pd.read_csv(reverting_path + 'attributes.csv') print(revertings.shape) revertings.head() # Drop all duplicate attributes. print('Shape before dropping duplicates: {}'.format(revertings.shape)) revertings = revertings.drop_duplicates(subset='changeset_id') print('Shape after dropping duplicates: {}'.format(revertings.shape)) reverting_by_user = revertings.groupby('user_name')['changeset_id'].count() print('Users with one or more reverting changesets: {}'.format(reverting_by_user.size)) revertings[revertings['user_changesets'] > 100].shape print(revertings[revertings['user_changesets'] < 20].shape) revertings[revertings['user_changesets'] < 20].to_csv(reverting_path + 'new-users-reverting.csv', index=False) def get_reverting_count(user): return reverting_by_user[user] revertings['reverting_changesets'] = revertings['user_name'].apply(get_reverting_count) revertings = revertings.sort_values(by='reverting_changesets', ascending=False) revertings.drop_duplicates(subset='user_name').head() ax = revertings.plot.scatter(x='user_changesets', y='reverting_changesets') ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('Number of changesets by user') ax.set_ylabel('Number of reverting changesets') reverting_corr = revertings.corr() reverting_corr reverting_mask = np.zeros_like(reverting_corr, dtype=np.bool) reverting_mask[np.triu_indices_from(reverting_mask)] = True cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(reverting_corr, vmax=.8, mask=reverting_mask, cmap=cmap, square=True);Reverted changesetsreverted_path = '../downloads/reverted/' reverteds = pd.read_csv(reverted_path + 'attributes.csv') print(reverteds.shape) reverteds.head() # Drop all duplicate attributes. print('Shape before dropping duplicates: {}'.format(reverteds.shape)) reverteds = reverteds.drop_duplicates(subset='changeset_id') print('Shape after dropping duplicates: {}'.format(reverteds.shape)) reverted_by_user = reverteds.groupby('user_name')['changeset_id'].count() print('Users with one or more reverted changesets: {}'.format(reverted_by_user.size)) reverteds[reverteds['user_changesets'] > 100].shape print(reverteds[reverteds['user_changesets'] < 20].shape) reverteds[reverteds['user_changesets'] < 20].to_csv(reverted_path + 'new-users-reverted.csv', index=False) def get_reverted_count(user): return reverted_by_user[user] reverteds['reverted_changesets'] = reverteds['user_name'].apply(get_reverted_count) reverteds = reverteds.sort_values(by='reverted_changesets', ascending=False) reverteds.drop_duplicates(subset='user_name').head() ax = reverteds.plot.scatter(x='user_changesets', y='reverted_changesets') # ax.set_xscale('log') # ax.set_yscale('log') ax.set_xlabel('Number of changesets by user') ax.set_ylabel('Number of reverted changesets') reverteds.sort_values(by='user_changesets', ascending=False).head() # Removing the outliers. ax = reverteds[ (reverteds['reverted_changesets'] < 20) & (reverteds['user_changesets'] < 10000) ].plot.scatter(x='user_changesets', y='reverted_changesets') # ax.set_xscale('log') # ax.set_yscale('log') ax.set_xlabel('Number of changesets by user') ax.set_ylabel('Number of reverted changesets') reverted_corr = reverteds.corr() reverted_corr reverted_mask = np.zeros_like(reverted_corr, dtype=np.bool) reverted_mask[np.triu_indices_from(reverted_mask)] = True sns.heatmap(reverted_corr, vmax=.8, mask=reverted_mask, cmap=cmap, square=True);1. Removing the Rowsdf.dropna()2. Imputersimputer = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent') imputer.fit(df.iloc[:,1:3].values) df.iloc[:,1:3] = imputer.transform(df.iloc[:,1:3].values) dfETL the data in the first out of a list of book reviews dataset from AWS S3 datasets in order to match the schema.# Install Java, Spark, and Findspark !apt-get install openjdk-8-jdk-headless -qq > /dev/null !wget -q http://www-us.apache.org/dist/spark/spark-2.4.7/spark-2.4.7-bin-hadoop2.7.tgz !tar xf spark-2.4.7-bin-hadoop2.7.tgz !pip install -q findspark # Set Environment Variables import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-2.4.7-bin-hadoop2.7" # Start a SparkSession import findspark findspark.init() # import postgres connection !wget https://jdbc.postgresql.org/download/postgresql-42.2.9.jar from pyspark.sql import SparkSession spark = SparkSession.builder.appName("AmazonHWBook1").config("spark.driver.extraClassPath","/content/postgresql-42.2.9.jar").getOrCreate() # load csv from AWS S3 from pyspark import SparkFiles # Load in user_data.csv from S3 into a DataFrame url = "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Books_v1_02.tsv.gz" spark.sparkContext.addFile(url) # make header 'true' and sep '\t' # verify data is connected df = spark.read.option('header', 'true').csv(SparkFiles.get("amazon_reviews_us_Books_v1_02.tsv.gz"), inferSchema=True, sep='\t', timestampFormat="mm/dd/yy") df.show(10) # print df schema df.printSchema()root |-- marketplace: string (nullable = true) |-- customer_id: integer (nullable = true) |-- review_id: string (nullable = true) |-- product_id: string (nullable = true) |-- product_parent: integer (nullable = true) |-- product_title: string (nullable = true) |-- product_category: string (nullable = true) |-- star_rating: integer (nullable = true) |-- helpful_votes: integer (nullable = true) |-- total_votes: integer (nullable = true) |-- vine: string (nullable = true) |-- verified_purchase: string (nullable = true) |-- review_headline: string (nullable = true) |-- review_body: string (nullable = true) |-- review_date: timestamp (nullable = true)Count number of records(rows) in this dataset# count rows row_number = df.count() print(f"Number of records in this dataset: {row_number}")Number of records in this dataset: 3105520Create user dataframe to match **review_id_table** table in the schema# make df for review review_df = df.select(["review_id", "customer_id", "product_id", "product_parent","review_date"]) review_df.show(10) # double checking that datatype matches schema review_df.dtypes # change review_date from timestamp to yyyy-mm-dd format and verify change from pyspark.sql.types import DateType review_df = review_df.withColumn("review_date", review_df['review_date'].cast(DateType())) review_df.dtypesCreate user dataframe to match **products** table in the schema# make df for products and drop duplicates product_df = df.select(["product_id", "product_title"]).drop_duplicates() product_df.show(10) # double checking that datatype matches schema product_df.dtypesCreate user dataframe to match **customers** table in the schema# count the number of customers customer = df.groupby('customer_id').agg({"customer_id": "count"}) #rename count(customer_id) column to match schema customer_df = customer.withColumnRenamed('count(customer_id)', 'customer_count') customer_df.show(10) # double checking that datatype matches schema customer_df.dtypes # change customer_count from bigint to int & verify change from pyspark.sql.types import IntegerType customer_df = customer_df.withColumn("customer_count",customer_df["customer_count"].cast(IntegerType())) customer_df.dtypesCreate user dataframe to match **vine_table** table in the schema# make df to match schema vine_df = df.select(["review_id", "star_rating","helpful_votes","total_votes","vine"]) vine_df.show(10) # double checking that datatype matches schema vine_df.dtypesLoad the DataFrames that correspond to tables into an RDS instance# Configure settings for RDS mode = "append" jdbc_url="jdbc:postgresql://awsbookreview.ccll2vhyal1k.us-east-1.rds.amazonaws.com:5432/postgres" config = {"user":"root", "password": "", "driver":"org.postgresql.Driver"} # Write DataFrame to review_df table in RDS review_df.write.jdbc(url=jdbc_url, table='review_id_table', mode=mode, properties=config) # Write DataFrame to product_df table in RDS product_df.write.jdbc(url=jdbc_url, table='products', mode=mode, properties=config) # Write DataFrame to customer_df table in RDS customer_df.write.jdbc(url=jdbc_url, table='customers', mode=mode, properties=config) # Write DataFrame to vine_df table in RDS vine_df.write.jdbc(url=jdbc_url, table='vine_table', mode=mode, properties=config)Are Vine Reviews Trustworthy? A Comparison between Vine and Non-Vine Reviewers Count vine members# Load in a sql function to use columns from pyspark.sql.functions import col # Find vine users vine_users = vine_df.filter(col("vine") == "Y") vine_users.show() # count rows to verify viners = vine_users.count() print(f"Number of vine users in this dataset: {viners}")Number of vine users in this dataset: 2Count number of non-vine members# Find non-vine users novine_users = vine_df.filter(col("vine") == "N") novine_users.show() # count rows no_viners = novine_users.count() print(f"Number of non-vine users in this dataset: {no_viners}")Number of non-vine users in this dataset: 3105513Total votes by star rating amongst non-vine usersfrom pyspark.sql.functions import desc novine_reviews_df = novine_users.select(["star_rating","total_votes"])\ .groupBy("star_rating")\ .agg({"total_votes": "count"}) novine_reviews_df.orderBy(desc("count(total_votes)")).show(truncate=False) novine_tots_reviews = novine_users.select(["star_rating","total_votes", "helpful_votes"])\ .groupBy("star_rating")\ .agg({"total_votes": "sum", "helpful_votes": "sum"}) df1 = novine_tots_reviews.orderBy(desc("sum(total_votes)")) df1.show(truncate=False)+-----------+----------------+------------------+ |star_rating|sum(total_votes)|sum(helpful_votes)| +-----------+----------------+------------------+ |5 |20824555 |17667965 | |1 |6321283 |2965719 | |4 |5524832 |4539419 | |3 |3060700 |2139865 | |2 |2777252 |1676047 | +-----------+----------------+------------------+Select columns to find more information about the purchases made by the customers.# select columns and group by product selected_df = df.select("product_id","product_title","star_rating","helpful_votes","total_votes","vine","verified_purchase") grouped_df = selected_df.groupBy("product_id","product_title")\ .agg({"total_votes":"sum","helpful_votes":"sum","star_rating":"avg"})TOP 10 TOTAL VOTES# order by sum of total votes ordered_df = grouped_df.orderBy(desc("sum(total_votes)")) ordered_df.show(10, truncate=False)+----------+------------------------------------------------------------------------------------------------------+----------------+------------------+------------------+ |product_id|product_title |sum(total_votes)|avg(star_rating) |sum(helpful_votes)| +----------+------------------------------------------------------------------------------------------------------+----------------+------------------+------------------+ |0895260174|Unfit For Command: Swift Boat Veterans Speak Out Against |130574 |3.4481835564053536|75801 | |0525947647|Lies and the Lying Liars Who Tell Them |66835 |3.8495873968492123|30753 | |1400050308|Treason: Liberal Treachery from the Cold War to the War on Terrorism |48221 |2.96657381615[...]TOP 10 HELPFUL VOTES# order by sum of helpful votes ordered_df2 = grouped_df.orderBy(desc("sum(helpful_votes)")) ordered_df2.show(10, truncate=False)+----------+------------------------------------------------------------------------------------------------------+----------------+------------------+------------------+ |product_id|product_title |sum(total_votes)|avg(star_rating) |sum(helpful_votes)| +----------+------------------------------------------------------------------------------------------------------+----------------+------------------+------------------+ |0895260174|Unfit For Command: Swift Boat Veterans Speak Out Against |130574 |3.4481835564053536|75801 | |0525947647|Lies and the Lying Liars Who Tell Them |66835 |3.8495873968492123|30753 | |0446677450|, Poor Dad: What the Rich Teach Their Kids About Money--That the Poor and Middle Class Do Not!|32725 |3.64426229508[...]TOP 10 AVERAGE STAR RATING# order by sum of helpful votes ordered_df3 = grouped_df.orderBy(desc("avg(star_rating)")) ordered_df3.show(10, truncate=False)+----------+------------------------------------------------------------------------------------------------------------------------+----------------+----------------+------------------+ |product_id|product_title |sum(total_votes)|avg(star_rating)|sum(helpful_votes)| +----------+------------------------------------------------------------------------------------------------------------------------+----------------+----------------+------------------+ |0316141674|The Team That Couldn't Lose: Who is Sending the Plays That Make the Team Unstoppable? (Matt Christopher Sports Classics)|29 |5.0 |28 | |0385148070|The Old Gods Waken |26 |5.0 |22 | |059042808X|The Pike River Phantom [...]Vine and Non-Vine Customers# comparing total number of verified/non-verified vine/non-vine members group_vine = selected_df.groupBy("vine","verified_purchase")\ .agg({"total_votes":"sum","helpful_votes":"sum","star_rating":"avg"}) group_vine.na.drop().show() vine_selected = selected_df.filter(col("vine") == "Y") vine_selected.show() novine_selected = selected_df.filter(col("vine") == "N") novine_selected.show(10, truncate=False)+----------+--------------------------------------------------------------+-----------+-------------+-----------+----+-----------------+ |product_id|product_title |star_rating|helpful_votes|total_votes|vine|verified_purchase| +----------+--------------------------------------------------------------+-----------+-------------+-----------+----+-----------------+ |0385730586|Sisterhood of the Traveling Pants (Book 1) |4 |2 |3 |N |N | |0811828964|The Bad Girl's Guide to Getting What You Want |3 |5 |5 |N |N | |1844161560|Eisenhorn (A Warhammer 40,000 Omnibus) |4 |1 |22 |N |N | |0373836635|Colby Conspiracy (Colby Agency) |5 |2 |2 |N |N | |0262181533|The Psychology of Proof: Dedu[...]CAPSTONE PROJECT: BATTLE OF THE NEIGHBORHOODS Singapore Visitors and Expatriates Venue Recommendation ________ I. PURPOSEThis document provides the details of my final peer reviewed assignment for the IBM Data Science Professional Certificate program – Coursera Capstone.________ II. INTRODUCTIONSingapore is a small country and one of the most visited countries in Asia. There are a lot of websites where travelers can check and retrieve recommendations of places to stay or visit. However, most of these websites provides recommendation simply based on usual tourist attractions or key residential areas that are mostly expensive or already known for travelers based on certain keywords like "Hotel", or "Backpackers" etc. The intention on this project is to collect and provide a data driven recommendation that can supplement the recommendation with statistical data. This will also be utilizing data retrieved from Singapore open data sources and FourSquare API venue recommendations.The sample recommender in this notebook will provide the following use case scenario:* A person planning to visit Singapore as a Tourist or an Expat and looking for a reasonable accommodation.* The user wants to receive venue recommendation where he can stay or rent an HDB apartment with close proximity to places of interest or search category option.* The recommendation should not only present the most viable option, but also present a comparison table of all possible town venues.For this demonstration, this notebook will make use of the following data:* Singapore Median Rental Prices by town.* Popular Food venues in the vicinity. (Sample category selection)Note: While this demo makes use of Food Venue Category, Other possible categories can also be used for the same implementation such as checking categories like:* Outdoors and Recreation* Nightlife* Nearby Schools, etc.I will limit the scope of this search as FourSquare API only allows 50 free venue query limit per day when using a free user access. III. DATA ACQUISITIONThis demonstration will make use of the following data sources: Singapore Towns and median residential rental prices.Data will retrieved from Singapore open dataset from median rent by town and flattype from https://data.gov.sg website. The original data source contains median rental prices of Singapore HDB units from 2005 up to 2nd quarter of 2018. I will retrieve rental the most recent recorded rental prices from this data source (Q2 2018) being the most relevant price available at this time. For this demonstration, I will simplify the analysis by using the average rental prices of all available flat type. Singapore Towns location data retrieved using Google maps API.Data coordinates of Town Venues will be retrieved using google API. I also make use of MRT stations coordinate as a more important center of for all towns included in venue recommendations. Singapore Top Venue Recommendations from FourSquare API(FourSquare website: www.foursquare.com)I will be using the FourSquare API to explore neighborhoods in selected towns in Singapore. The Foursquare explore function will be used to get the most common venue categories in each neighborhood, and then use this feature to group the neighborhoods into clusters. The following information are retrieved on the first query:* Venue ID* Venue Name* Coordinates : Latitude and Longitude* Category NameAnother venue query will be performed to retrieve venue ratings for each location. Note that rating information is a paid service from FourSquare and we are limited to only 50 queries per day. With this constraint, we limit the category analysis with only one type for this demo. I will try to retrieve as many ratings as possible for each retrieved venue ID. IV. METHODOLOGY Singapore Towns List with median residential rental prices.The source data contains median rental prices of Singapore HDB units from 2005 up to 2nd quarter of 2018. I will retrive the most recent recorded rental prices from this data source (Q2 2018) being the most relevant price available at this time. For this demonstration, I will simplify the analysis by using the average rental prices of all available flat type.**Data Cleanup and re-grouping.** The retrieved table contains some un-wanted entries and needs some cleanup.The following tasks will be performed:* Drop/ignore cells with missing data.* Use most current data record.* Fix data types. Importing Python LibrariesThis section imports required python libraries for processing data. While this first part of python notebook is for data acquisition, we will use some of the libraries make some data visualization.!conda install -c conda-forge folium=0.5.0 --yes # comment/uncomment if not yet installed. !conda install -c conda-forge geopy --yes # comment/uncomment if not yet installed import numpy as np # library to handle data in a vectorized manner import pandas as pd # library for data analsysis # Numpy and Pandas libraries were already imported at the beginning of this notebook. pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import json # library to handle JSON files from geopy.geocoders import Nominatim # convert an address into latitude and longitude values from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe # Matplotlib and associated plotting modules import matplotlib.cm as cm import matplotlib.colors as colors # import k-means from clustering stage from sklearn.cluster import KMeans import folium # map rendering library import requests # library to handle requests import lxml.html as lh import bs4 as bs import urllib.request print('Libraries imported.') from IPython.display import HTML import base64 # Extra Helper scripts to generate download links for saved dataframes in csv format. def create_download_link( df, title = "Download CSV file", filename = "data.csv"): csv = df.to_csv() b64 = base64.b64encode(csv.encode()) payload = b64.decode() html = '{title}' html = html.format(payload=payload,title=title,filename=filename) return HTML(html)1. Downloading Singapore towns list with and median residential rental pricesimport zipfile import os !wget -q -O 'median-rent-by-town-and-flat-type.zip' "https://data.gov.sg/dataset/b35046dc-7428-4cff-968d-ef4c3e9e6c99/download" zf = zipfile.ZipFile('./median-rent-by-town-and-flat-type.zip') sgp_median_rent_by_town_data = pd.read_csv(zf.open("median-rent-by-town-and-flat-type.csv")) sgp_median_rent_by_town_data.rename(columns = {'town':'Town'}, inplace = True) sgp_median_rent_by_town_data.head()Data Cleanup and re-grouping.The retrieved table contains some un-wanted entries and needs some cleanup.The following tasks will be performed:* Drop/ignore cells with missing data.* Use most current data record.* Fix data types.# Drop rows with rental price == 'na'. sgp_median_rent_by_town_data_filter=sgp_median_rent_by_town_data[~sgp_median_rent_by_town_data['median_rent'].isin(['-','na'])] # Take the most recent report which is "2018-Q2" sgp_median_rent_by_town_data_filter=sgp_median_rent_by_town_data_filter[sgp_median_rent_by_town_data_filter['quarter'] == "2018-Q2"] # Now that all rows reports are "2018-Q2", we dont need this column anymore. sgp_median_rent_by_town_data_filter=sgp_median_rent_by_town_data_filter.drop(['quarter'], axis=1) # Ensure that median_rent column is float64. sgp_median_rent_by_town_data_filter['median_rent']=sgp_median_rent_by_town_data_filter['median_rent'].astype(np.float64)* Note: We can separate the analysis HDB unit size to be more accurate, For this demonstration however, We will do a simplier analysis by using a median price for all available rental units regardless of its size.singapore_average_rental_prices_by_town = sgp_median_rent_by_town_data_filter.groupby(['Town'])['median_rent'].mean().reset_index() singapore_average_rental_prices_by_town* Adding geographical coordinates of each town location.# The code was removed by Watson Studio for sharing.google_key=hidden_from_view2. Retrieve town coordinates.Google api will be used to retrive the coordinates (latitude and longitude of each town centers. For this exercise, I just used the MRT stations as the center points of each evaluated towns.The town coordinates will be used in retrieval of Foursquare API location data.singapore_average_rental_prices_by_town['Latitude'] = 0.0 singapore_average_rental_prices_by_town['Longitude'] = 0.0 for idx,town in singapore_average_rental_prices_by_town['Town'].iteritems(): address = town + " MRT station, Singapore" ; # I prefer to use MRT stations as more important central location of each town url = 'https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}'.format(address,google_key) lat = requests.get(url).json()["results"][0]["geometry"]["location"]['lat'] lng = requests.get(url).json()["results"][0]["geometry"]["location"]['lng'] singapore_average_rental_prices_by_town.loc[idx,'Latitude'] = lat singapore_average_rental_prices_by_town.loc[idx,'Longitude'] = lng # Alternative if above does not work. # CODE IS DISABLED <<< if {0}: >>> if (0): geo = Nominatim(user_agent='Mypythonapi') for idx,town in singapore_average_rental_prices_by_town['Town'].iteritems(): coord = geo.geocode(town + ' ' + "Singapore", timeout = 10) if coord: singapore_average_rental_prices_by_town.loc[idx,'Latitude'] = coord.latitude singapore_average_rental_prices_by_town.loc[idx,'Longitude'] = coord.longitude else: singapore_average_rental_prices_by_town.loc[idx,'Latitude'] = NULL singapore_average_rental_prices_by_town.loc[idx,'Longitude'] = NULL singapore_average_rental_prices_by_town.set_index("Town")Generate Singapore basemap.geo = Nominatim(user_agent='My-IBMNotebook') address = 'Singapore' location = geo.geocode(address) latitude = location.latitude longitude = location.longitude print('The geograpical coordinate of Singapore {}, {}.'.format(latitude, longitude)) # create map of Singapore using latitude and longitude values map_singapore = folium.Map(location=[latitude, longitude],tiles="OpenStreetMap", zoom_start=10) # add markers to map for lat, lng, town in zip( singapore_average_rental_prices_by_town['Latitude'], singapore_average_rental_prices_by_town['Longitude'], singapore_average_rental_prices_by_town['Town']): label = town label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=4, popup=label, color='blue', fill=True, fill_color='#87cefa', fill_opacity=0.5, parse_html=False).add_to(map_singapore) map_singapore fileName = "singapore_average_rpbt.csv" linkName = "Singapore Average Rental Prices" create_download_link(singapore_average_rental_prices_by_town,linkName,fileName)V. Segmenting and Clustering Towns in Singapore Retrieving FourSquare Places of interest.Using the Foursquare API, the **explore** API function was be used to get the most common venue categories in each neighborhood, and then used this feature to group the neighborhoods into clusters. The *k*-means clustering algorithm was used for the analysis.Fnally, the Folium library is used to visualize the recommended neighborhoods and their emerging clusters.In the ipynb notebook, the function **getNearbyVenues** extracts the following information for the dataframe it generates:* Venue ID* Venue Name* Coordinates : Latitude and Longitude* Category NameThe function **getVenuesByCategory** performs the following: 1. **category** based venue search to simulate user venue searches based on certain places of interest. This search extracts the following information: * Venue ID * Venue Name * Coordinates : Latitude and Longitude * Category Name 2. For each retrieved **venueID**, retrive the venues category rating.# The code was removed by Watson Studio for sharing. # The code was removed by Watson Studio for sharing.CLIENT_ID = hidden CLIENT_SECRET = hidden VERSION = 20190102 LIMIT = 801. Exploring Neighbourhood in Singapore Using the following foursquare api query url, search venues on all boroughs in selected Singapore towns.> `https://api.foursquare.com/v2/venues/`**search**`?client_id=`**CLIENT_ID**`&client_secret=`**CLIENT_SECRET**`&ll=`**LATITUDE**`,`**LONGITUDE**`&v=`**VERSION**`&query=`**QUERY**`&radius=`**RADIUS**`&limit=`**LIMIT** Retrieving data from FourSquare API is not so straight forward. It returns a json list top venues to visit to city. The scores however, is retrieved on a separate query to the FourSquare Venue API and is limited to 50 queries per day when using a free FourSquare subscription. The following functions generates the query urls and processes the returned json data into dataframe.The function **getNearbyVenues** extracts the following information for the dataframe it generates:* Venue ID* Venue Name* Coordinates : Latitude and Longitude* Category NameThe function **getVenuesByCategory** performs the following: 1. **category** based venue search to simulate user venue searches based on certain places of interest. This search extracts the following information: * Venue ID * Venue Name * Coordinates : Latitude and Longitude * Category Name 2. For each retrieved **venueID**, retrive the venues category rating.The generated data frame in the second function contains the following column: Column NameDescription TownTown NameTown LatitudeTowns MRT station LatitudeTown LongitudeTown MRT station LatitudeVenueIDFourSquare Venue IDVenueNameVenue NamescoreFourSquare Venue user ratingcategoryCategory group namecatIDCategory IDlatitudeVenue Location - latitudelongitudeVenue Location - longitudeimport time # --------------------------------------------- # The following function retrieves the venues given the names and coordinates and stores it into dataframe. FOURSQUARE_EXPLORE_URL = 'https://api.foursquare.com/v2/venues/explore?' FOURSQUARE_SEARCH_URL = 'https://api.foursquare.com/v2/venues/search?' def getNearbyVenues(names, latitudes, longitudes, radius=500): global CLIENT_ID global CLIENT_SECRET global FOURSQUARE_EXPLORE_URL global FOURSQUARE_SEARCH_URL global VERSION global LIMIT venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print('getNearbyVenues',names) cyclefsk2() # create the API request URL url = '{}&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( FOURSQUARE_EXPLORE_URL,CLIENT_ID,CLIENT_SECRET,VERSION, lat,lng,radius,LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name,lat,lng, v['venue']['id'],v['venue']['name'], v['venue']['location']['lat'],v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in results]) time.sleep(2) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Town','Town Latitude','Town Longitude','Venue','Venue Latitude','Venue Longitude','Venue Category'] return(nearby_venues) FOURSQUARE_SEARCH_URL = 'https://api.foursquare.com/v2/venues/search?' # SEARCH VENUES BY CATEGORY # Dataframe : venue_id_recover # - store venue id to recover failed venues id score retrieval later if foursquare limit is exceeded when getting score. venue_id_rcols = ['VenueID'] venue_id_recover = pd.DataFrame(columns=venue_id_rcols) def getVenuesByCategory(names, latitudes, longitudes, categoryID, radius=500): global CLIENT_ID global CLIENT_SECRET global FOURSQUARE_EXPLORE_URL global FOURSQUARE_SEARCH_URL global VERSION global LIMIT venue_columns = ['Town','Town Latitude','Town Longitude','VenueID','VenueName','score','category','catID','latitude','longitude'] venue_DF = pd.DataFrame(columns=venue_columns) print("[#Start getVenuesByCategory]") for name, lat, lng in zip(names, latitudes, longitudes): cyclefsk2() print(name,",",end='') #print('getVenuesByCategory',categoryID,name) ; # DEBUG: be quiet # create the API request URL url = '{}client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}&categoryId={}'.format( FOURSQUARE_SEARCH_URL,CLIENT_ID,CLIENT_SECRET,VERSION,lat,lng,radius,LIMIT,categoryID) # make the GET request results = requests.get(url).json() # Populate dataframe with the category venue results # Extracting JSON data values for jsonSub in results['response']['venues']: #print(jsonSub) # JSON Results may not be in expected format or incomplete data, in that case, skip! ven_id = 0 try: # If there are any issue with a restaurant, retry or ignore and continue # Get location details ven_id = jsonSub['id'] ven_cat = jsonSub['categories'][0]['pluralName'] ven_CID = jsonSub['categories'][0]['id'] ven_name = jsonSub['name'] ven_lat = jsonSub['location']['lat'] ven_lng = jsonSub['location']['lng'] venue_DF = venue_DF.append({ 'Town' : name, 'Town Latitude' : lat, 'Town Longitude': lng, 'VenueID' : ven_id, 'VenueName' : ven_name, 'score' : 'nan', 'category' : ven_cat, 'catID' : ven_CID, 'latitude' : ven_lat, 'longitude' : ven_lng}, ignore_index=True) except: continue # END OF LOOP, return. print("\n[#Done getVenuesByCategory]") return(venue_DF) FOURSQUARE_SEARCH_URL = 'https://api.foursquare.com/v2/venues/search?' # SEARCH VENUES BY CATEGORY # Dataframe : venue_id_recover # - store venue id to recover failed venues id score retrieval later if foursquare limit is exceeded when getting score. venue_id_rcols = ['VenueID','Score'] venue_id_recover = pd.DataFrame(columns=venue_id_rcols) def getVenuesIDScore(venueID): global CLIENT_ID global CLIENT_SECRET global FOURSQUARE_EXPLORE_URL global FOURSQUARE_SEARCH_URL global VERSION global LIMIT global venue_id_recover print("[#getVenuesIDScore]") venID_URL = 'https://api.foursquare.com/v2/venues/{}?client_id={}&client_secret={}&v={}'.format(venueID,CLIENT_ID,CLIENT_SECRET,VERSION) print(venID_URL) venID_score = 0.00 # Process results try: venID_result = requests.get(venID_URL).json() venID_score = venID_result['response']['venue']['rating'] except: venue_id_recover = venue_id_recover.append({'VenueID' : venueID, 'Score' : 0.0}) cyclefsk2() return ["error",0.0] return ["success",venID_score] singapore_average_rental_prices_by_town.dtypes venue_columns = ['Town','Town Latitude','Town Longitude','VenueID','VenueName','score','category','catID','latitude','longitude'] singapore_town_venues = pd.DataFrame(columns=venue_columns)Search Venues with recommendations on : Food Venues (Restaurants,Fastfoods, etc.)To demonstrate user selection of places of interest, We will use this Food Venues category in our further analysis.* This Foursquare search is expected to collect venues in the following category: * category * Food Courts * Coffee Shops * Restaurants * Cafés * Other food venues# Food Venues : Restaurants, Fastfoods, Etc # For testing if (0): categoryID = "4d4b7105d754a06377d81259" town_names = ['ANG MO KIO'] lat_list = [1.3699718] lng_list = [103.8495876] tmp = getVenuesByCategory(names=town_names,latitudes=lat_list,longitudes=lng_list,categoryID=categoryID) singapore_town_venues = pd.concat([singapore_town_venues,tmp], ignore_index=True) # Food Venues : Restaurants, Fastfoods, Etc categoryID = "4d4b7105d754a06374d81259" town_names = singapore_average_rental_prices_by_town['Town'] lat_list = singapore_average_rental_prices_by_town['Latitude'] lng_list = singapore_average_rental_prices_by_town['Longitude'] singapore_food_venues = getVenuesByCategory(names=town_names,latitudes=lat_list,longitudes=lng_list,categoryID=categoryID)[#Start getVenuesByCategory] ANG MO KIO ,BEDOK ,BISHAN ,BUKIT BATOK ,BUKIT MERAH ,BUKIT PANJANG ,CENTRAL ,CHOA CHU KANG ,CLEMENTI ,GEYLANG ,HOUGANG ,JURONG EAST ,JURONG WEST ,KALLANG/WHAMPOA ,MARINE PARADE ,PASIR RIS ,PUNGGOL ,QUEENSTOWN ,SEMBAWANG ,SENGKANG ,SERANGOON ,TAMPINES ,TOA PAYOH ,WOODLANDS ,YISHUN , [#Done getVenuesByCategory]* Save collected Singapore food venues by town into csv for future use.# Save collected Singapore food venues by town into csv for future use. fileName = "singapore_food_venues.Category.csv" linkName = "IBM Storage Link:singapore_food_venues.Category.csv" create_download_link(singapore_food_venues,linkName,fileName)Search Venues with recommendations on : Outdoors and RecreationNote: * 2nd Test: Retrieve venues for Outdoors and Recreation.* This section can be ran separately due to maximum limit encountered when using Foursquare free API version. I have saved simmilar results in github to run the same analyis.# Disable for this run demo. if (0): # Outdoors & Recreation, categoryID = "4d4b7105d754a06377d81259" town_names = singapore_average_rental_prices_by_town['Town'] lat_list = singapore_average_rental_prices_by_town['Latitude'] lng_list = singapore_average_rental_prices_by_town['Longitude'] singapore_outdoor_venues_by_town = getVenuesByCategory(names=town_names,latitudes=lat_list,longitudes=lng_list,categoryID=categoryID) # Save collected Singapore Outdoors & Recreation venues by town into csv for future use. # singapore_outdoor_venues_by_town.to_csv('singapore_outdoorAndRecration.Category.csv',index=False) fileName = "singapore_outdoorAndRecration.Category.csv" linkName = "IBM Storage Link:singapore_outdoorAndRecration.Category.csv" create_download_link(singapore_food_venues,linkName,fileName)Search Venues with recommendations on : Singapore NightLifeNote: * 3nd Test: Retrieve venues for Outdoors and Recreation venues that are accessible at night. This includes places like NightClubs, Bars and places of interest operating 24 hours.* This section can be ran separately due to maximum limit encountered when using Foursquare free API version. I have saved simmilar results in github to run the same analyis.# Disable for this run demo. if (0): #Nightlife Spot = 4d4b7105d754a06376d81259 categoryID = "4d4b7105d754a06376d81259" town_names = singapore_average_rental_prices_by_town['Town'] lat_list = singapore_average_rental_prices_by_town['Latitude'] lng_list = singapore_average_rental_prices_by_town['Longitude'] singapore_Nightlife_by_town = getVenuesByCategory(names=town_names,latitudes=lat_list,longitudes=lng_list,categoryID=categoryID) # Save collected Singapore Outdoors & Recreation venues by town into csv for future use. # singapore_outdoor_venues_by_town.to_csv('singapore_outdoorAndRecration.Category.csv',index=False) fileName = "singapore_Nightlife_by_town.Category.csv" linkName = "IBM Storage Link:singapore_Nightlife_by_town.Category.csv" create_download_link(singapore_Nightlife_by_town,linkName,fileName) # The code was removed by Watson Studio for sharing.In this section, We use the FourSquare API to retrieve venue scores of locations. Note that there is max query limit of 50 in FourSquare API for free subscription. So use or query carefully.score_is_NAN = len(singapore_food_venues[singapore_food_venues['score'].isnull()].index.tolist()) print("Current score=NaN count=",score_is_NAN) for idx in singapore_food_venues[singapore_food_venues['score'].isnull()].index.tolist(): venueID = singapore_food_venues.loc[idx,'VenueID'] status,score = getVenuesIDScore(venueID) if status == "success": singapore_food_venues.loc[idx,'score'] = score score_is_NAN = len(singapore_food_venues[singapore_food_venues['score'].isnull()].index.tolist()) print("PostRun score=NaN count=",score_is_NAN) print('Done',end='')Current score=NaN count= 0 PostRun score=NaN count= 0 Done* Note: Re-run continuation, reload saved csv file. Reloading previously saved runs to avoid re-running FourSquare API.# The code was removed by Watson Studio for sharing.* Combine venues collection into one dataframe : singapore_town_venues# If all categories are called if (0): singapore_town_venues = pd.concat([singapore_food_venues,singapore_outdoor_venues_by_town,singapore_Nightlife_by_town], ignore_index=True) #else singapore_town_venues = singapore_food_venues singapore_town_venues.shapeData cleanup uneeded entries* Eliminate possible venue duplicates.* Improve the quality of our venue selection by removing venues with no ratings or 0.0# Eliminate possible venue duplicates. singapore_town_venues = singapore_town_venues[venue_columns] # Drop rows with score == 0 singapore_town_venues = singapore_town_venues[singapore_town_venues.score > 0.0] # Drop rows with missing elements singapore_town_venues = singapore_town_venues.dropna(axis='columns') singapore_town_venues.shape singapore_town_venues.head() # Save town venues collection. # This list is already intersting data for display in different webpages. fileName = "recommended.singapore_town_venues.csv" linkName = "IBM Storage Link:recommended_singapore_town_venues.csv" create_download_link(singapore_food_venues,linkName,fileName)Check venue count per town.singapore_town_venues.groupby('Town').count() # Verify the dtypes singapore_town_venues.dtypesHow many unique categories can be curated from all the returned venues?# Count number of categories that can be curated. print('There are {} uniques categories.'.format(len(singapore_town_venues['category'].unique())))There are 67 uniques categories.What are the top 20 most common venue types?# Check top 10 most frequently occuring venue type singapore_town_venues.groupby('category')['VenueName'].count().sort_values(ascending=False)[:20]What are the top 20 venues given with highest score rating?# Top 10 venues with highest given score rating singapore_town_venues.groupby(['Town','category'])['score'].mean().sort_values(ascending=False)[:20]Analyze Each Singapore Town nearby recommended venues# one hot encoding sg_onehot = pd.get_dummies(singapore_town_venues[['category']], prefix="", prefix_sep="") # add Town column back to dataframe sg_onehot['Town'] = singapore_town_venues['Town'] # move neighborhood column to the first column fixed_columns = [sg_onehot.columns[-1]] + list(sg_onehot.columns[:-1]) sg_onehot = sg_onehot[fixed_columns] # Check returned one hot encoding data: print('One hot encoding returned "{}" rows.'.format(sg_onehot.shape[0])) # Regroup rows by town and mean of frequency occurrence per category. sg_grouped = sg_onehot.groupby('Town').mean().reset_index() print('One hot encoding re-group returned "{}" rows.'.format(sg_grouped.shape[0])) sg_grouped.head()One hot encoding returned "644" rows. One hot encoding re-group returned "25" rows.Analyze Singapore Town most visited venuesnum_top_venues = 10 for town in sg_grouped['Town']: print("# Town=< "+town+" >") temp = sg_grouped[sg_grouped['Town'] == town].T.reset_index() temp.columns = ['venue','freq'] temp = temp.iloc[1:] temp['freq'] = temp['freq'].astype(float) temp = temp.round({'freq': 2}) print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues)) print('\n')# Town=< ANG MO KIO > venue freq 0 Food Courts 0.18 1 Fast Food Restaurants 0.12 2 Japanese Restaurants 0.06 3 Dessert Shops 0.06 4 Sushi Restaurants 0.06 5 Cafés 0.06 6 Snack Places 0.03 7 Noodle Houses 0.03 8 Ramen Restaurants 0.03 9 Restaurants 0.03 # Town=< BEDOK > venue freq 0 Coffee Shops 0.21 1 Food Courts 0.10 2 Sushi Restaurants 0.07 3 Japanese Restaurants 0.07 4 Fast Food Restaurants 0.07 5 American Restaurants 0.03 6 Chinese Restaurants 0.03 7 Sandwich Places 0.03 8 Indonesian Restaurants 0.03 9 Indian Restaurants 0.03 # Town=< BISHAN > venue freq 0 Coffee Shops 0.14 1 Japanese Restaurants 0.11 2 Chinese Restaurants 0.11 3 Food Courts 0.08 4 Fast Food Restaurants 0.08 5 Cafés 0.08 6 Bubble Tea Shops 0.0[...]First, let's write a function to sort the venues in descending order.def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] num_top_venues = 10 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = ['Town'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe town_venues_sorted = pd.DataFrame(columns=columns) town_venues_sorted['Town'] = sg_grouped['Town'] for ind in np.arange(sg_grouped.shape[0]): town_venues_sorted.iloc[ind, 1:] = return_most_common_venues(sg_grouped.iloc[ind, :], num_top_venues) print(town_venues_sorted.shape) town_venues_sorted.head()(25, 11)Clustering NeighborhoodsRun *k*-means to cluster the Towns into 5 clusters.# set number of clusters kclusters = 5 sg_grouped_clustering = sg_grouped.drop('Town', 1) # run k-means clustering kmeans = KMeans(n_clusters=kclusters, random_state=1).fit(sg_grouped_clustering) # check cluster labels generated for each row in the dataframe print(kmeans.labels_[0:10]) print(len(kmeans.labels_)) town_venues_sorted.head() town_venues_sorted = town_venues_sorted.set_index("Town") sg_merged = singapore_average_rental_prices_by_town.set_index("Town") # add clustering labels sg_merged['Cluster Labels'] = kmeans.labels_ # merge sg_grouped with singapore_average_rental_prices_by_town to add latitude/longitude for each neighborhood sg_merged = sg_merged.join(town_venues_sorted) sg_merged* Save csv copy of merged data# Save town cluster collection. # This list is already interesting data for display in different webpages. fileName = "sg_top_clusters.csv" linkName = "IBM Storage Link:" + fileName create_download_link(sg_merged,linkName,fileName) # create map map_clusters = folium.Map(location=[latitude, longitude], tiles="Openstreetmap", zoom_start=11) # set color scheme for the clusters x = np.arange(kclusters) ys = [i+x+(i*x)**2 for i in range(kclusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(sg_merged['Latitude'], sg_merged['Longitude'], sg_merged.index.values,kmeans.labels_): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=10, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=1).add_to(map_clusters) map_clusters# The code was removed by Watson Studio for sharing. # The code was removed by Watson Studio for sharing. # The code was removed by Watson Studio for sharing.Задача разбора мусорки**Когда все документы берутся как документы из одной кучи%load_ext autoreload %autoreload 2 %matplotlib inline from dataset import DatasetTextExtractor from dataset import add_pred_target_name from vectorizers import Tfidf, CountVec from sklearn.decomposition import PCA from vizualization import plot_cluster, plot_confusion_matrix random_state = 42Load data# загрузим и сохраним датасет from pathlib import Path root_path = Path('./') dataset_raw_path = root_path.joinpath('dataset') dataset_save_path = root_path.joinpath('saved_dataset.pickle') dl = DatasetTextExtractor() if dataset_save_path.is_file(): dataset = dl.load(dataset_save_path) else: dataset = dl.read(dataset_raw_path) dl.save(dataset_save_path)Vectorizevectors_path = root_path.joinpath('vectors') vectors_path.mkdir(parents=True, exist_ok=True) # tfidf tf_idf_vectors_save_path = vectors_path.joinpath('tfidf.pickle') tfidf_vectorizer = Tfidf() if tf_idf_vectors_save_path.is_file(): tfidf_vectors = tfidf_vectorizer.load(tf_idf_vectors_save_path) else: tfidf_vectors = tfidf_vectorizer.fit_transform(dataset['text'].values) tfidf_vectorizer.save(tf_idf_vectors_save_path) # count count_vectors_save_path = vectors_path.joinpath('count.pickle') count_vectorizer = CountVec() if count_vectors_save_path.is_file(): count_vectors = count_vectorizer.load(count_vectors_save_path) else: count_vectors = count_vectorizer.fit_transform(dataset['text'].values) count_vectorizer.save(count_vectors_save_path)Кластеризацияfrom sklearn import metrics from sklearn.cluster import KMeans, AgglomerativeClustering, AffinityPropagation, SpectralClustering, DBSCAN import hdbscan def clusterization(estimator, X, dataset, key): """ estimator - кластеризатор X - вектора dataset - исходный датасет который обновляем полученными данными key - уникальная строка для метода веторизации, кластеризатор, по ней будут формировать в dataset названия столбцов Обучить кластеризатор Вывести метрики Добавить в исходный датасет предсказанные кластера и названия кластеров Визуализировать кластер Вывести """ # train estimator.fit(X) labels = estimator.labels_ # metrics # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) print(estimator) print('Estimated number of clusters: %d' % n_clusters_) print('Estimated number of noise points: %d' % n_noise_) if n_clusters_: print("Silhouette Coefficient on X: %0.3f" % metrics.silhouette_score(X, labels)) predicted_target_column_name = f'{key}_target' predicted_target_name_column_name = f'{predicted_target_column_name}_name' dataset[predicted_target_column_name] = labels # reduce data pca_2D = PCA(n_components=2, random_state=random_state) reduced_pca_2D = pca_2D.fit_transform(X.toarray()) print("Silhouette Coefficient on reduced 2D tfidf: %0.3f" % metrics.silhouette_score(reduced_pca_2D, labels)) # cluster visualization with cluster ID plot_cluster(features=reduced_pca_2D, y_pred=labels, y_labels=dataset[['file_name']]) add_pred_target_name(dataset, predicted_target_column_name, 'target_name', predicted_target_name_column_name) # plot confusion matrix y_true = dataset.target_name.values y_pred = dataset[predicted_target_name_column_name].values cm_labels = dataset.target_name.unique() plot_confusion_matrix(y_true, y_pred, cm_labels) # cluster visualization with kind names plot_cluster(features=reduced_pca_2D, y_pred=y_true, y_labels=dataset[['file_name']])Кластеризация разными алгоритмами Первый график - распределение кластеров в кластеризации Второй график - эталонное распределение данных по кластерам Таблица - пересечение предсказанных лейблов и эталонных.Задача: Сейчас минимальное количество ошибок в текущем состоянии - 52. Попробуй понизить их число в 2 раза. K-meansn_clusters = 5 estimator = KMeans(n_clusters=n_clusters, random_state=42) clusterization(estimator, tfidf_vectors, dataset, 'kmeans_tfidf') from sklearn import preprocessing tfidf_vectors_normalized = preprocessing.normalize(tfidf_vectors, norm='l2') clusterization(estimator, tfidf_vectors_normalized, dataset, 'kmeans_tfidf_normalized')DBSCANestimator = DBSCAN(eps=0.3, min_samples=10) clusterization(estimator, tfidf_vectors, dataset, 'dbscan_tfidf_03_10') estimator = DBSCAN(eps=0.3, min_samples=10, metric='cosine') clusterization(estimator, tfidf_vectors, dataset, 'dbscan_tfidf_03_10_cosine') estimator = DBSCAN(eps=0.3, min_samples=10, metric='cosine', algorithm='brute') clusterization(estimator, tfidf_vectors, dataset, 'dbscan_tfidf_03_10_cosine_brute') estimator = DBSCAN(eps=0.9, min_samples=500, metric='cosine', algorithm='brute') clusterization(estimator, tfidf_vectors, dataset, 'dbscan_tfidf_01_100_cosine_brute')hdbscanimport hdbscan estimator = hdbscan.HDBSCAN() clusterization(estimator, tfidf_vectors, dataset, 'hdbscan')ML Pipeline PreparationFollow the instructions below to help you create your ML pipeline. 1. Import libraries and load data from database.- Import Python libraries- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)- Define feature and target variables X and y# import libraries import pandas as pd import numpy as np from sqlalchemy import create_engine from matplotlib import pyplot as plt import re import dill as pickle import time import datetime import pickle from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.multioutput import MultiOutputClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report, confusion_matrix # load data from database - change path accordingly engine = create_engine('sqlite:///../data/data_db/disaster_responses.db') df = pd.read_sql_table('DisasterResponses', engine) df.head() df.info() # The messages will be the features, while the categories will be the target variables X = df.message.to_numpy() y = df[df.columns[4:]].to_numpy() # Check size of dataset print("Complete feature dataset size = ", X.shape, "; complete target variables size = ", y.shape) # Check what's in X, y X[0], y[0] # Finally, save the names of the category labels category_names = list(df.columns[4:]) # Check categories category_names2. Write a tokenization function to process your text datadef tokenize(text): # normalize case and remove punctuation text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # tokenize text tokens_raw = word_tokenize(text) # lemmatize and remove stop words tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens_raw if (word not in stopwords.words('english'))] return tokens # Test function for the first 5 messages for message in X[:5]: tokens = tokenize(message) print(message) print(tokens, '\n')Weather update - a cold front from Cuba that could pass over Haiti ['weather', 'update', 'cold', 'front', 'cuba', 'could', 'pas', 'haiti'] Is the Hurricane over or is it not over ['hurricane'] Looking for someone but no name ['looking', 'someone', 'name'] UN reports Leogane 80-90 destroyed. Only Hospital St. Croix functioning. Needs supplies desperately. ['un', 'report', 'leogane', '80', '90', 'destroyed', 'hospital', 'st', 'croix', 'functioning', 'need', 'supply', 'desperately'] says: west side of Haiti, rest of the country today and tonight ['say', 'west', 'side', 'haiti', 'rest', 'country', 'today', 'tonight']3. Split data in test/train# perform train test split X_train, X_test, y_train, y_test = train_test_split(X, y) # Check size of datasets - default split is 75% train / 25% test print("Training feature dataset size = ", X_train.shape, "; training target variables size = ", y_train.shape) print("Testing feature dataset size = ", X_test.shape, "; testing target variables size = ", y_test.shape)Training feature dataset size = (19521,) ; training target variables size = (19521, 36) Testing feature dataset size = (6507,) ; testing target variables size = (6507, 36)4. Build a machine learning pipelineThis machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.model = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())), ])5.a. Train pipeline# train classifier, keeping track of time t0= time.clock() model.fit(X_train, y_train); t1 = time.clock() - t0 print('elapsed time = ', t1)elapsed time = 404.6722135.b. Load saved pipelineIn case the pipeline was saved previously you can load it here# Load dictionary filename = '../models/models_files/cv_trained_model.pkl' model_dict = pickle.load(open(filename, 'rb')) # Get content X_train = model_dict['X_train'] y_train = model_dict['y_train'] X_test = model_dict['X_test'] y_test = model_dict['y_test'] model = model_dict['model']6. Test your pipeline - Predict outcomes based on test data - Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.# Predict on test data y_pred = model.predict(X_test) # Display example of results print(classification_report(y_test[:,0], y_pred[:,0]))precision recall f1-score support 0 0.71 0.40 0.51 1536 1 0.84 0.95 0.89 4971 accuracy 0.82 6507 macro avg 0.77 0.68 0.70 6507 weighted avg 0.81 0.82 0.80 6507Run the following cells to show the report for all the labels, on screen# Iterate and print on screen and file for ind_1 in range(y_pred.shape[1]): print('-----------------------------------------------------------------------------------------') print('Label = ', category_names[ind_1]) c_rep = classification_report(y_test[:,ind_1], y_pred[:,ind_1], output_dict=True, zero_division=0) kk = list(c_rep.keys()) for ind_2 in range(len(c_rep) - 3): print('Value = ', kk[ind_2], ': precision = ', "{:.2f}".format(c_rep[kk[ind_2]]['precision']), '; recall = ', "{:.2f}".format(c_rep[kk[ind_2]]['recall']), '; f1-s =', "{:.2f}".format(c_rep[kk[ind_2]]['f1-score']), '; support =', c_rep[kk[ind_2]]['support'])You can run the following cells if you want to save the previous report on a text file# You can change this accordingly to your wish output_file_path = '../models/models_files/' # You can pick the format for the score file name output_file_name = 'results_pipeline.txt' output_file_full_name = output_file_path + output_file_name # Iterate and print on file with open(output_file_full_name, "w") as text_file: print('-----------------------------------------------------------------------------------------', file=text_file) print("Date and time when this report was generated: ", datetime.datetime.now(), file=text_file) for ind_1 in range(y_pred.shape[1]): print('-----------------------------------------------------------------------------------------', file=text_file) print('Label = ', category_names[ind_1], file=text_file) c_rep = classification_report(y_test[:,ind_1], y_pred[:,ind_1], output_dict=True, zero_division=0) kk = list(c_rep.keys()) for ind_2 in range(len(c_rep) - 3): print('Value = ', kk[ind_2], ': precision = ', "{:.2f}".format(c_rep[kk[ind_2]]['precision']), '; recall = ', "{:.2f}".format(c_rep[kk[ind_2]]['recall']), '; f1-s =', "{:.2f}".format(c_rep[kk[ind_2]]['f1-score']), '; support =', c_rep[kk[ind_2]]['support'], file=text_file)7. Export your model as a pickle file If you have just now created the pipeline, save it and the data in a pickle file# Create dictionary for pickle file model_dict_2 = {'X_train':X_train, 'y_train':y_train, 'X_test':X_test, 'y_test':y_test, 'model':model} # Save dictionary filename = '../models/models_files/pipeline_trained_model.pkl' pickle.dump(model_dict_2, open(filename, 'wb'))8. Improve your modelUse grid search to find better parameters.# Find what are the parameters of the pipeline model.get_params() # Define grid search parameters parameters = { # 'vect__ngram_range': ((1, 1), (1, 2)), 'vect__max_df': (0.5, 0.75, 1.0), # 'vect__max_features': (None, 5000, 10000), # 'tfidf__use_idf': (True, False), # 'clf__estimator__n_estimators': [50, 100, 200], # 'clf__estimator__min_samples_split': [2, 3, 4] } cv = GridSearchCV(model, param_grid=parameters)**NOTE:** Running the grid search on the full parameter space can take a **_lot_** of time# train classifier, keeping track of time t0= time.clock() cv.fit(X_train, y_train); t1 = time.clock() - t0Fashion-MNIST Classification This notebooks shows an example of using `vflow` on the Fashion-MNIST dataset using deep neural networks. It requires installing pytorch and torchvision (`pip install torch torchvision`).%load_ext autoreload %autoreload 2 from vflow import Vset, init_args, build_vset, dict_to_df import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import ToTensor from functools import partial # load data training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor(), ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor(), ) # Define model class NeuralNetwork(nn.Module): def __init__(self, fc1=512, fc2=512): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28 * 28, fc1), nn.ReLU(), nn.Linear(fc1, fc2), nn.ReLU(), nn.Linear(fc2, 10), nn.ReLU() ) def forward(self, x): x = self.flatten(x) logits = self.linear_relu_stack(x) return logits # Get cpu or gpu device for training. device = "cuda" if torch.cuda.is_available() else "cpu" print("Using {} device".format(device)) def train(model, dataloader, loss_fn, epochs, **kwargs): # initialize model with **kwargs if isinstance(model, type): model = model(**kwargs) optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) size = len(dataloader.dataset) for t in range(epochs): # print(f"Epoch {t+1}\n-------------------------------") for batch, (X, y) in enumerate(dataloader): X, y = X.to(device), y.to(device) # Compute prediction error pred = model(X) loss = loss_fn(pred, y) # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() if batch % 100 == 0: loss, current = loss.item(), batch * len(X) # print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") return model def test(dataloader, model, loss_fn): size = len(dataloader.dataset) num_batches = len(dataloader) model.eval() test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: X, y = X.to(device), y.to(device) pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= num_batches correct /= size return correctUsing cpu device`build_vset(name, obj, param_dict, verbose=True)` can be used to construct a `Vset` by currying `obj` with all possible combinations of parameters in `param_dict`.We can use it here to simplify hyperparameter tuning:batch_size = 64 loss_fn = nn.CrossEntropyLoss() # Create data loaders. train_dataloader = DataLoader(training_data, batch_size=batch_size) test_dataloader = DataLoader(test_data, batch_size=batch_size) train_data, test_data = init_args((train_dataloader, test_dataloader), names=['train_data', 'test_data']) # fit neural network modeling_set = build_vset('modeling', train, {'fc1': [256, 512], 'fc2': [256, 512]}, NeuralNetwork, loss_fn=loss_fn, epochs=5) modeling_set.fit(train_data)If using `build_vset` with `verbose=True`, we can visualize parameter combinations in our dataframe by passing `param_key=vset_name` to `dict_to_df`:test_nn = partial(test, loss_fn=loss_fn) hard_metrics_set = Vset(name='hard_metrics', modules=[test_nn], module_keys=["acc"]) hard_metrics = hard_metrics_set.evaluate(test_data, modeling_set.out) df = dict_to_df(hard_metrics, param_key='modeling') dfNumerical Operations#Addition a = 10 b = 20 print(a+b) #Subtraction a = 10 b = 20 print(a-b) #Mult a = 10 b = 20 print(a*b) #Division a = 10 b = 20 print(a/b) #Exponents a = 2 b = 3 print(a**b) #Mod: For non-programmers this is probably unfamilar. It returns the remainder of a/b a = 3 b = 2 print(a%b)1Boolean Operationsa = True b = False print(a and b) a = True b = False print(a or b)True> Training only on Answered Correctly and Part of Question (not encoded only scaled)x = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x2 = pd.read_csv('x2.csv', header = None) x = np.array(x) x2 = np.array(x2) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x2 = x2.reshape(297146, 29, 1) new_x = np.concatenate((x,x2), axis = 2) new_x.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 64) x_train.shape, y_train.shape x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 2))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 5, batch_size = 64, workers = 6) #0.6543 y_pred = classifier.predict(x_test) y_pred_rounded = [] for i in y_pred: if i >= 0.5: y_pred_rounded.append(1) else: y_pred_rounded.append(0) y_pred_rounded = np.array(y_pred_rounded) accuracy_score(y_test, y_pred_rounded)> Training only on Answered Correctly and Part of Question (one hot encoded)x = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x = np.array(x) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x3 = np.loadtxt("x_part.txt") x3.shape x3 = x3.reshape(297146, 29, 6) x3[0:2] new_x = np.concatenate((x,x3), axis = 2) new_x.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 64) x_train.shape, y_train.shape x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 7))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 5, batch_size = 64, workers = 6) #0.6543 y_pred = classifier.predict(x_test) y_pred_rounded = [] for i in y_pred: if i >= 0.5: y_pred_rounded.append(1) else: y_pred_rounded.append(0) y_pred_rounded = np.array(y_pred_rounded) from sklearn.metrics import confusion_matrix,accuracy_score confusion_matrix(y_test, y_pred_rounded)>> Training Multiple epochsclassifier.fit(x_train, y_train, epochs = 100, batch_size = 64, workers = 6)Epoch 1/100 4411/4411 [==============================] - 515s 117ms/step - loss: 0.6638 Epoch 2/100 4411/4411 [==============================] - 523s 119ms/step - loss: 0.6518 Epoch 3/100 4411/4411 [==============================] - 504s 114ms/step - loss: 0.6496 Epoch 4/100 4411/4411 [==============================] - 505s 114ms/step - loss: 0.6487 Epoch 5/100 4411/4411 [==============================] - 504s 114ms/step - loss: 0.6478 Epoch 6/100 4411/4411 [==============================] - 519s 118ms/step - loss: 0.6473 Epoch 7/100 4411/4411 [==============================] - 528s 120ms/step - loss: 0.6466 Epoch 8/100 4411/4411 [==============================] - 518s 117ms/step - loss: 0.6461 Epoch 9/100 4411/4411 [==============================] - 515s 117ms/step - loss: 0.6459 Epoch 10/100 4411/4411 [==============================] - 517s 117ms/step - loss: 0.6456 Epoch 11/100 2356/4411 [===============>..............] - ETA: 4:06 - loss: 0.6450> Training on Answered Correctly, Prior Question Had Explanation and Part (one hot encoded)x = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x1 = pd.read_csv('x1.csv', header = None) x = np.array(x) x1 = np.array(x1) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x1 = x1.reshape(297146, 29, 1) new_x = np.concatenate((x,x1), axis = 2) x3 = np.loadtxt("x_part.txt") x3.shape x3 = x3.reshape(297146, 29, 6) new_x = np.concatenate((new_x,x3), axis = 2) new_x.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 64) x_train.shape, y_train.shape, x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 5, batch_size = 64, workers = 6) y_pred = classifier.predict(x_test) y_pred_rounded = [] for i in y_pred: if i >= 0.5: y_pred_rounded.append(1) else: y_pred_rounded.append(0) y_pred_rounded = np.array(y_pred_rounded) from sklearn.metrics import confusion_matrix,accuracy_score confusion_matrix(y_test, y_pred_rounded) accuracy_score(y_test, y_pred_rounded)>> Training multiple epochsclassifier.fit(x_train, y_train, epochs = 100, batch_size = 64, workers = 6)Epoch 1/100 4411/4411 [==============================] - 569s 129ms/step - loss: 0.6631 Epoch 2/100 4411/4411 [==============================] - 575s 130ms/step - loss: 0.6519 Epoch 3/100 4411/4411 [==============================] - 539s 122ms/step - loss: 0.6491 Epoch 4/100 4411/4411 [==============================] - 519s 118ms/step - loss: 0.6480 Epoch 5/100 4411/4411 [==============================] - 526s 119ms/step - loss: 0.6470 Epoch 6/100 4411/4411 [==============================] - 514s 117ms/step - loss: 0.6463 Epoch 7/100 4411/4411 [==============================] - 513s 116ms/step - loss: 0.6458 Epoch 8/100 4411/4411 [==============================] - 514s 116ms/step - loss: 0.6453 Epoch 9/100 4411/4411 [==============================] - 515s 117ms/step - loss: 0.6447 Epoch 10/100 4411/4411 [==============================] - 523s 118ms/step - loss: 0.6447 Epoch 11/100 4411/4411 [==============================] - 516s 117ms/step - loss: 0.6438 Epoch 12/100 4411/4411 [======[...]> Changing Random Statex = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x1 = pd.read_csv('x1.csv', header = None) x = np.array(x) x1 = np.array(x1) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x1 = x1.reshape(297146, 29, 1) new_x = np.concatenate((x,x1), axis = 2) x3 = np.loadtxt("x_part.txt") x3.shape x3 = x3.reshape(297146, 29, 6) new_x = np.concatenate((new_x,x3), axis = 2) new_x.shape>> Random State = 1from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 1) x_train.shape, y_train.shape, x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 20, batch_size = 64, workers = 6)Epoch 1/20 4411/4411 [==============================] - 509s 115ms/step - loss: 0.6642 Epoch 2/20 4411/4411 [==============================] - 807s 183ms/step - loss: 0.6520 Epoch 3/20 4411/4411 [==============================] - 804s 182ms/step - loss: 0.6494 Epoch 4/20 4411/4411 [==============================] - 783s 177ms/step - loss: 0.6481 Epoch 5/20 4411/4411 [==============================] - 489s 111ms/step - loss: 0.6473 Epoch 6/20 4411/4411 [==============================] - 491s 111ms/step - loss: 0.6465 Epoch 7/20 4411/4411 [==============================] - 491s 111ms/step - loss: 0.6463 Epoch 8/20 4411/4411 [==============================] - 492s 111ms/step - loss: 0.6456 Epoch 9/20 4411/4411 [==============================] - 492s 112ms/step - loss: 0.6451 Epoch 10/20 4411/4411 [==============================] - 491s 111ms/step - loss: 0.6447 Epoch 11/20 4411/4411 [==============================] - 492s 111ms/step - loss: 0.6441 Epoch 12/20 4411/4411 [==================[...]>> Random State = 20from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 20) x_train.shape, y_train.shape, x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 20, batch_size = 64, workers = 6)Epoch 1/20 4411/4411 [==============================] - 502s 114ms/step - loss: 0.6670 Epoch 2/20 4411/4411 [==============================] - 517s 117ms/step - loss: 0.6533 Epoch 3/20 4411/4411 [==============================] - 499s 113ms/step - loss: 0.6508 Epoch 4/20 4411/4411 [==============================] - 489s 111ms/step - loss: 0.6488 Epoch 5/20 4411/4411 [==============================] - 504s 114ms/step - loss: 0.6482 Epoch 6/20 2517/4411 [================>.............] - ETA: 3:42 - loss: 0.6465> Implementing GRUclassifier = Sequential() classifier.add(GRU(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(GRU(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(GRU(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(GRU(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(GRU(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 5, batch_size = 128, workers = 6)Epoch 1/5 2206/2206 [==============================] - 381s 172ms/step - loss: 0.6621 Epoch 2/5 2206/2206 [==============================] - 392s 177ms/step - loss: 0.6519 Epoch 3/5 2206/2206 [==============================] - 411s 186ms/step - loss: 0.6498 Epoch 4/5 2206/2206 [==============================] - 405s 184ms/step - loss: 0.6487 Epoch 5/5 2206/2206 [==============================] - 386s 175ms/step - loss: 0.6477> Increasing Complexity of the Modelx = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x1 = pd.read_csv('x1.csv', header = None) x = np.array(x) x1 = np.array(x1) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x1 = x1.reshape(297146, 29, 1) new_x = np.concatenate((x,x1), axis = 2) x3 = np.loadtxt("x_part.txt") x3.shape x3 = x3.reshape(297146, 29, 6) new_x = np.concatenate((new_x,x3), axis = 2) new_x.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 64) x_train.shape, y_train.shape, x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 256, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 200, batch_size = 128, workers = 6) classifier = Sequential() classifier.add(LSTM(units = 256, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 256)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 200, batch_size = 128, workers = 6) y_pred = classifier.predict(x_test) y_pred_rounded = [] for i in y_pred: if i >= 0.5: y_pred_rounded.append(1) else: y_pred_rounded.append(0) y_pred_rounded = np.array(y_pred_rounded) from sklearn.metrics import confusion_matrix,accuracy_score confusion_matrix(y_test, y_pred_rounded) accuracy_score(y_test, y_pred_rounded)Training the Model for 200 Epochsx = pd.read_csv('x.csv', header = None) y = pd.read_csv('y.csv', header = None) x1 = pd.read_csv('x1.csv', header = None) x = np.array(x) x1 = np.array(x1) y = np.array(y) y = y.reshape(y.shape[0]) x = x.reshape(297146, 29, 1) x1 = x1.reshape(297146, 29, 1) new_x = np.concatenate((x,x1), axis = 2) x3 = np.loadtxt("x_part.txt") x3.shape x3 = x3.reshape(297146, 29, 6) new_x = np.concatenate((new_x,x3), axis = 2) new_x.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_x, y, test_size = 0.05, random_state = 0) x_train.shape, y_train.shape, x_test.shape, y_test.shape classifier = Sequential() classifier.add(LSTM(units = 128, return_sequences = True, input_shape = (x_train.shape[1], 8))) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128, return_sequences = True)) classifier.add(Dropout(0.2)) classifier.add(LSTM(units = 128)) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dense(units = 32, activation = 'relu')) classifier.add(Dense(units = 1)) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy') classifier.fit(x_train, y_train, epochs = 200 , batch_size = 64, workers = 6) plt.figure(figsize=(15,9)) plt.plot(loss_data[0], loss_data[1]) plt.xlabel("Epoch", fontsize = 18) plt.ylabel("Training Loss", fontsize = 18) y_pred = classifier.predict(x_test) y_pred_rounded = [] for i in y_pred: if i >= 0.5: y_pred_rounded.append(1) else: y_pred_rounded.append(0) y_pred_rounded = np.array(y_pred_rounded) accuracy_score(y_test, y_pred_rounded) classifier.save('E:\RNN_Saved')INFO:tensorflow:Assets written to: E:\RNN_Saved\assetsCarcione et al. (2007), Figures 6-9Reproduced by ([@prisae](https://github.com/prisae)).> **., , and , 2007** > Cross-property relations between electrical conductivity and the seismic velocity of rocks. > Geophysics, 72, E193-E204; DOI: [10.1190/1.2762224](https://doi.org/10.1190/1.2762224). Requirements- `NumPy`- `SciPy`- `IPython`- `Jupyter`- `matplotlib`**NOTE:** I created these scripts in the early stage of my PhD, somewhen in 2010/2011 (if you are interested in my thesis you can find it [here](https://werthmuller.org/research), it comes with all source code, unfortunately without the real data due to copyrights). It was my first go at Python, so don't be too harsh ;). Many things would probably be included in `bruges`, `welly`, or another package by now, I don't know. The only thing I did at this point was to extract the required functions and translate them from Python 2 to Python 3.import numpy as np from copy import deepcopy as dc import matplotlib.pyplot as plt import vel2resSee the notes above: I quick and dirty translated the Python 2 code to Python 3. By doing this, there might have happened funny things (0- and NaN-checks I did etc, which were not properly translated). To not clutter this notebooks with warnings I ignore all warnings here. To work properly with all the functions one would have to be a bit more careful...np.seterr(all='ignore') # Plot-style adjustments %matplotlib inline plt.rcParams['figure.dpi'] = 100Figure 6 Calculation figures 6 and 8data = vel2res.carc_tab1('shale') vel2res.carc_der(data, 500) data['a_k'] = np.array(-3) data['a_f'] = np.array(1.) data['p_e'] = np.array(.15) rho_b = data['rho_b'] rho_0 = data['rho_0'] vp_b = data['vp_b'] tdata = dc(data) tdata['rho_b'] = rho_0 # Calculation rho_gt = vel2res.in2por2out(data, vel2res.por_v_harm, vel2res.rho_glov) rho_ht = vel2res.in2por2out(data, vel2res.por_v_harm, vel2res.rho_herm) rho_st = vel2res.in2por2out(data, vel2res.por_v_harm, vel2res.rho_self) vel_ar = vel2res.in2por2out(tdata, vel2res.por_r_arch, vel2res.vp_raym) sig_ar = np.nan_to_num(1./vel2res.in2por2out(data, vel2res.por_v_raym, vel2res.rho_arch)) vel_br = vel2res.in2por2out(tdata, vel2res.por_r_hsub2, vel2res.vp_raym) sig_br = np.nan_to_num(1./vel2res.in2por2out(data, vel2res.por_v_raym, vel2res.rho_hsub2)) sig_at = np.nan_to_num(1./vel2res.in2por2out(data, vel2res.por_v_harm, vel2res.rho_arch)) vel_gar = vel2res.in2por2out(tdata, vel2res.por_r_arch, vel2res.vp_gass) sig_gar = np.nan_to_num(1./vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_arch)) vel_ghe = vel2res.in2por2out(data, vel2res.por_r_herm, vel2res.vp_gass) rho_ghe = vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_herm) vel_gcr = vel2res.in2por2out(data, vel2res.por_r_crim, vel2res.vp_gass) rho_gcr = vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_crim) vel_gss = vel2res.in2por2out(data, vel2res.por_r_self, vel2res.vp_gass) rho_gss = vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_self) vel_ghm = vel2res.in2por2out(data, vel2res.por_r_hslb, vel2res.vp_gass) vel_ghp = vel2res.in2por2out(data, vel2res.por_r_hsub, vel2res.vp_gass) rho_ghm = vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_hsub) rho_ghp = vel2res.in2por2out(data, vel2res.por_v_gass, vel2res.rho_hslb)Plotfig6 = plt.figure(6) plt.axvline(data['vp_s'], linewidth=1, color='k') plt.axvline(data['vp_f'], linewidth=1, color='k') plt.plot(vp_b, sig_at, 'b-', label='Archie/time-average') plt.plot(vel_ar, 1./rho_0, 'g--', linewidth=1) plt.plot(vp_b, sig_ar, 'g-', label='Archie/Raymer') plt.plot(vp_b[1:], 1./rho_gt[1:], 'r-', label='Glover/time-average') plt.plot(vp_b, 1./rho_ht, 'c-', label='Hermance/time-average') plt.plot(vp_b, 1./rho_st, 'm-', label='Self-similar/time-average') plt.plot(vel_br, 1./rho_0, 'y--', linewidth=1) plt.plot(vp_b, sig_br, 'y-', label='HS/Raymer') plt.legend() plt.title("Carcione et al., 2007, Figure 6") plt.xlabel("Velocity (km/s)") plt.ylabel("Conductivity (S/m)") plt.axis([1.0, 4.2, 0.0, 0.45]) plt.show()Figure 6. Cross-property relations for different models of the overburden (shale saturated with brine). Original Figure 6![title](Figure6.jpg) Figure 7**Important**: Equation (49) in Carcione et al is wrong. It is given as$$v_P = 2.2888\left(Z\frac{\sigma}{\sigma_f}\right)^{1/6}\ ,$$where the P-wave velocity $v_P$ is in km/s, depth $Z$ in km, and the conductivities in S/m.However, the correct equation is$$v_P = 2.2888\left(Z\frac{\sigma_f}{\sigma}\right)^{1/6}\ ,$$as for instance given in *The Rock Physics Handbook* by Mavko et al., 2009.Looking at the figures you might think hey, the curve from the equation in Carcione et al (original figure) looks much better then the curve from the equation by Mavko et al (my figure). It is misleading. The Faust-equation is a function of depth. So the curve will change depending at which depth you are. The other curves are not. So the different curves can not be compared just like that without taking other aspects into the analysis too. Calculationdata2 = vel2res.carc_tab1('sand') vel2res.carc_der(data2, 500) data2['a_k'] = np.array(-3) data2['a_f'] = np.array(1.) data2['m_e'] = np.array(2.) data2['p_e'] = np.array(.15) data2['depth'] = np.array(2.) rho_b2 = data2['rho_b'] vp_b2 = data2['vp_b'] # Calculation rho_gt2 = vel2res.in2por2out(data2, vel2res.por_v_harm, vel2res.rho_glov) rho_ht2 = vel2res.in2por2out(data2, vel2res.por_v_harm, vel2res.rho_herm) rho_st2 = vel2res.in2por2out(data2, vel2res.por_v_harm, vel2res.rho_self) vel_ghe2 = vel2res.in2por2out(data2, vel2res.por_r_herm, vel2res.vp_gass) rho_ghe2 = vel2res.in2por2out(data2, vel2res.por_v_gass, vel2res.rho_herm) vel_gcr2 = vel2res.in2por2out(data2, vel2res.por_r_crim, vel2res.vp_gass) rho_gcr2 = vel2res.in2por2out(data2, vel2res.por_v_gass, vel2res.rho_crim) vel_gss2 = vel2res.in2por2out(data2, vel2res.por_r_self, vel2res.vp_gass) rho_gss2 = vel2res.in2por2out(data2, vel2res.por_v_gass, vel2res.rho_self) vel_ghm2 = vel2res.in2por2out(data2, vel2res.por_r_hslb, vel2res.vp_gass) vel_ghp2 = vel2res.in2por2out(data2, vel2res.por_r_hsub, vel2res.vp_gass) rho_ghm2 = vel2res.in2por2out(data2, vel2res.por_v_gass, vel2res.rho_hsub) rho_ghp2 = vel2res.in2por2out(data2, vel2res.por_v_gass, vel2res.rho_hslb) rho_ft2 = vel2res.rho_faus(data2)Plot# PLOT NON-GASSMANN RELATIONS fig7 = plt.figure(7) plt.axvline(data2['vp_s'], linewidth=1, color='k') plt.axvline(data2['vp_f'], linewidth=1, color='k') plt.plot(vp_b2, 1000./rho_gt2, '-', label='Glover/time-average') plt.plot(vp_b2, 1000./rho_ht2, '-', label='Hermance/time-average') plt.plot(vp_b2, 1000./rho_st2, '-', label='Self-similar/time-average') plt.plot(vp_b2, 1000./rho_ft2, '-', label='Faust') plt.legend() plt.title("Carcione et al., 2007, Figure 7") plt.xlabel("Velocity (km/s)") plt.ylabel("Conductivity (mS/m)") plt.axis([0.0, 6.0, 0.0, 1]) plt.show()Figure 7. Cross-property relations for different models of the reservoir (sandstone saturated with oil). Archie-based relations are not shown because the conductivity is negligible (it is assumed that $\sigma_s$ = 0). The Faust curve corresponds to 2-km depth.**Note:** Y-axis in Figures 7 and 8 by a factor 10 different from Carcione. -> This is a typo in the paper, either in the plot or in Table 1. And again, the Faust-curve by Carcione is wrong, see my comment above. Original Figure 7![title](Figure7.jpg) Figure 8 CalculationWas done above together with Figure 6. Plot# PLOT GASSMANN RELATIONS fig8 = plt.figure(8) plt.axvline(data['vp_s'], linewidth=1, color='k') plt.axvline(data['vp_f'], linewidth=1, color='k') plt.plot(vel_gar, 1./rho_0, 'b--', linewidth=1) plt.plot(vp_b[1:], sig_gar[1:], 'b-', label='Archie') plt.plot(vel_ghe, 1./rho_b, 'g--', linewidth=1) plt.plot(vp_b[1:], 1./rho_ghe[1:], 'g-', label='Hermance') plt.plot(vel_gcr, 1./rho_b, 'r--', linewidth=1) plt.plot(vp_b[1:], 1./rho_gcr[1:], 'r-', label='CRIM') plt.plot(vel_gss, 1./rho_b, 'c--', linewidth=1) plt.plot(vp_b[1:], 1./rho_gss[1:], 'c-', label='Self-similar') plt.plot(vel_ghm, 1./rho_b, 'm--', linewidth=1) plt.plot(vp_b[1:], 1./rho_ghm[1:], 'm-', label='HS-') plt.plot(vel_ghp, 1./rho_b, 'y--', linewidth=1) plt.plot(vp_b[1:], 1./rho_ghp[1:], 'y-', label='HS+') plt.legend(loc=1) plt.text(1.7, .36, 'Gassmann relations') plt.title("Carcione et al., 2007, Figure 8") plt.xlabel("Velocity (km/s)") plt.ylabel("Conductivity (S/m)") plt.axis([1.0, 4.5, 0.0, 0.4]) plt.show()Figure 8. Cross-property relations for different conductivity models of the overburden (shale saturated with brine), combined with the Gassmann equation. The dashed lines correspond to the HS bounds. Original Figure 8![title](Figure8.jpg) Figure 9 CalculationWas done above together with Figure 7. Plot# PLOT GASSMANN RELATIONS fig9 = plt.figure(9) plt.axvline(data2['vp_s'], linewidth=1, color='k') plt.axvline(data2['vp_f'], linewidth=1, color='k') plt.plot(vel_ghe2, 1000./rho_b2, 'b--', linewidth=1) plt.plot(vp_b2[1:], 1000./rho_ghe2[1:], 'b-', label='Hermance') plt.plot(vel_gcr2, 1000./rho_b2, 'g--', linewidth=1) plt.plot(vp_b2[1:], 1000./rho_gcr2[1:], 'g-', label='CRIM') plt.plot(vel_gss2, 1000./rho_b2, 'r--', linewidth=1) plt.plot(vp_b2[1:], 1000./rho_gss2[1:], 'r-', label='Self-similar') plt.plot(vel_ghm2, 1000./rho_b2, 'm--') plt.plot(vp_b2[1:], 1000./rho_ghm2[1:], 'm-', label='HS-') plt.plot(vel_ghp2, 1000./rho_b2, 'y--') plt.plot(vp_b2[1:], 1000./rho_ghp2[1:], 'y-', label='HS+') plt.legend(loc=5) plt.text(.2, .93, 'Gassmann relations') plt.title("Carcione et al., 2007, Figure 9") plt.xlabel("Velocity (km/s)") plt.ylabel("Conductivity (mS/m)") plt.axis([0.0, 6.0, 0.0, 1]) plt.show()Mentor-Mentee Match, via Vectorization, PCA(Principal Component Analysis), Clustering, and Test Search for the Best Model This notebook is part1, Vectorization, and part2 PCA Goal: modeling the possibility in end-result match of 1 mentee to multiple mentor candidates, or 1 mentor to multiple mentee candidates, or multiple mentor and mentee candidates in the same clustering. By vectorization and PCA preparation, for clustering of mentor and mentee in the similarity group of same interests, by considering gender, time zone, interest of life, and tech stack. One similarity group in one clustering of mentor and mentee; then the client can manually choose within the same clustering for the final match, as the client mentioned currently they would still prefer human intelligence for the final match with various reasons. The benefit of this model is one mentor may have bandwidth to coach multiple mentees, and one mentee may have several optional mentors for match-maker's decision; delivered by reading the same clustering the options provided for the match-maker.import pandas as pd pd.set_option('display.max_colwidth', 500) import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.feature_extraction.text import CountVectorizer from sklearn.cluster import AgglomerativeClustering #from sklearn.metrics import calinski_harabaz_score, silhouette_score, davies_bouldin_score from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report from sklearn.pipeline import Pipeline from tqdm import tqdm_notebook as tqdm # mock-up dataset in csv file with 10 observations # we could also take the excel file from client converted to csv for modeling experiments raw_df = pd.read_csv('Mentor_Mentee.csv')The dataset (could be an excel file from client) consists of personal info, tech stack(For mentee, it is the interest level to learn on scale of 1 to 10. For mentor, it is the proficiency level on scale of 1 to 10.) Bios, could be a check-box type survey or each person's own words; and list of different interests of life on scale of 1 to 10.raw_df.head()Start preparing vectorizationdf = raw_df.drop(['ID', '', '', 'Gender', 'Time zone'],axis=1) df.head() def string_convert(x): """ First converts the lists in the DF into strings """ if isinstance(x, list): return ' '.join(x) else: return x # Looping through the columns and applying the function for col in df.columns: df[col] = df[col].apply(string_convert) dfVectorizationdef vectorization(df, columns): """ Using recursion, iterate through the df until all the categories have been vectorized """ column_name = columns[0] # Checking if the column name has been removed already if column_name not in ['Bios', 'Movies','Religion', 'Music', 'Books', 'Sports']: return df if column_name in ['Religion']: df[column_name.lower()] = df[column_name].cat.codes df = df.drop(column_name, 1) return vectorization(df, df.columns) else: # Instantiating the Vectorizer vectorizer = CountVectorizer() # Fitting the vectorizer to the Bios x = vectorizer.fit_transform(df[column_name]) # Creating a new DF that contains the vectorized words df_wrds = pd.DataFrame(x.toarray(), columns=vectorizer.get_feature_names()) # Concating the words DF with the original DF new_df = pd.concat([df, df_wrds], axis=1) # Dropping the column because it is no longer needed in place of vectorization new_df = new_df.drop(column_name, axis=1) return vectorization(new_df, new_df.columns) # Creating the vectorized DF vect_df = vectorization(df, df.columns)/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead. warnings.warn(msg, category=FutureWarning)Scaling the data It will assist our clustering algorithm’s performance, is scaling categories. This will potentially decrease the time it takes to fit and transform our clustering algorithm to the dataset.scaler = MinMaxScaler() # vect_df = pd.DataFrame(scaler.fit_transform(new_df), # columns=new_df.columns, # index=new_df.index) vect_df = pd.DataFrame(scaler.fit_transform(vect_df), index=vect_df.index, columns=vect_df.columns) #pd.DataFrame(scaler.fit_transform(vect_df), index=vect_df.index, columns=vect_df.columns) # Creating a new DF that contains the vectorized words vectorizer = CountVectorizer() x = vectorizer.fit_transform(df['Bios']) df_wrds = pd.DataFrame(x.toarray(), columns=vectorizer.get_feature_names()) df_wrds # Concatenating the words DF with the original DF new_df = pd.concat([df, df_wrds], axis=1) # Dropping the Bios because it is no longer needed in place of vectorization new_df.drop('Bios', axis=1, inplace=True) # Viewing the new DF new_df new_df.shapeend of vectorization Part2, PCA(Principal Component Analysis)In order for us to reduce this large feature set, we implement PCA. This technique will reduce the dimensionality of our dataset but still retain much of the variability or valuable statistical information. The following plot will visually tell us the number of features account for the variance. X axis: of Features accounting for % of the Variance Y axis: Percent of variancefrom sklearn.decomposition import PCA # Instantiating PCA pca = PCA() # Fitting and Transforming the DF df_pca = pca.fit_transform(new_df) # Plotting to determine how many features should the dataset be reduced to plt.style.use("bmh") plt.figure(figsize=(14,4)) print(pca.explained_variance_ratio_.cumsum()) plt.plot(np.cumsum((pca.explained_variance_ratio_))) plt.show() print(np.cumsum((pca.explained_variance_ratio_)))[0.46490907 0.64503732 0.78693831 0.85267768 0.90996079 0.9570419 0.97795939 0.99095967 1. 1. ]After running our code, the number of features that account for 95% of the variance is 6. With that number in mind, we can apply it to our PCA function to reduce the number of Principal Components or Features in our last DF to 6 from 10. Finding the Right Number of Clusters Below, we will be running some code that will run our clustering algorithm with differing amounts of clusters.from sklearn.metrics import calinski_harabasz_score, silhouette_score, davies_bouldin_score # Setting the amount of clusters to test out cluster_cnt = [i for i in range(2, 9, 1)] # Establishing empty lists to store the scores for the evaluation metrics ch_scores = [] s_scores = [] db_scores = [] # The DF for evaluation eval_df = df_pca # Looping through different iterations for the number of clusters for i in tqdm(cluster_cnt): # Clustering with different number of clusters clust = AgglomerativeClustering(n_clusters=i, linkage='complete') clust.fit(eval_df) cluster_assignments = clust.labels_ # Appending the scores to the empty lists ch_scores.append(calinski_harabasz_score(eval_df, cluster_assignments)) s_scores.append(silhouette_score(eval_df, cluster_assignments)) db_scores.append(davies_bouldin_score(eval_df, cluster_assignments))/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:16: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` app.launch_new_instance()CIFAR-10 Multiple ClassesLet's go over another example of using Keras and building out CNNs. This time will use another famous data set, the CIFAR-10 dataset which consists of 10 different image types. ----- The DataCIFAR-10 is a dataset of 50,000 32x32 color training images, labeled over 10 categories, and 10,000 test images.from tensorflow.keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train.shape data=[(x_train, y_train), (x_test, y_test)] import pickle pickle.dump(data,open("cifar_data","wb")) data=pickle.load(open("cifar_data","rb")) x_train[0].shape import matplotlib.pyplot as plt # FROG plt.imshow(x_train[0]) # HORSE plt.imshow(x_train[12])PreProcessingx_train[0] x_train[0].shape x_train.max() x_train = x_train/225 x_test = x_test/255 x_train.shape x_test.shapeLabelsfrom keras.utils import to_categorical y_train.shape y_train[0] y_cat_train = to_categorical(y_train,10) y_cat_train.shape y_cat_train[0] y_cat_test = to_categorical(y_test,10)---------- Building the Modelfrom keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D, Flatten model = Sequential() ## FIRST SET OF LAYERS # CONVOLUTIONAL LAYER model.add(Conv2D(filters=32, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # POOLING LAYER model.add(MaxPool2D(pool_size=(2, 2))) ## SECOND SET OF LAYERS # CONVOLUTIONAL LAYER model.add(Conv2D(filters=32, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # POOLING LAYER model.add(MaxPool2D(pool_size=(2, 2))) # FLATTEN IMAGES FROM 28 by 28 to 764 BEFORE FINAL LAYER model.add(Flatten()) # 256 NEURONS IN DENSE HIDDEN LAYER (YOU CAN CHANGE THIS NUMBER OF NEURONS) model.add(Dense(256, activation='relu')) # LAST LAYER IS THE CLASSIFIER, THUS 10 POSSIBLE CLASSES model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() model.fit(x_train,y_cat_train,verbose=1,epochs=10) # Careful, don't overwrite our file! # model.save('cifar_10epochs.h5') model.metrics_names model.evaluate(x_test,y_cat_test) from sklearn.metrics import classification_report predictions = model.predict_classes(x_test) print(classification_report(y_test,predictions))precision recall f1-score support 0 0.80 0.59 0.68 1000 1 0.83 0.76 0.80 1000 2 0.44 0.65 0.52 1000 3 0.50 0.40 0.44 1000 4 0.50 0.75 0.60 1000 5 0.52 0.57 0.54 1000 6 0.70 0.74 0.72 1000 7 0.88 0.56 0.69 1000 8 0.82 0.74 0.78 1000 9 0.81 0.68 0.74 1000 avg / total 0.68 0.64 0.65 10000Optional: Large Modelmodel = Sequential() ## FIRST SET OF LAYERS # CONVOLUTIONAL LAYER model.add(Conv2D(filters=32, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # CONVOLUTIONAL LAYER model.add(Conv2D(filters=32, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # POOLING LAYER model.add(MaxPool2D(pool_size=(2, 2))) ## SECOND SET OF LAYERS # CONVOLUTIONAL LAYER model.add(Conv2D(filters=64, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # CONVOLUTIONAL LAYER model.add(Conv2D(filters=64, kernel_size=(4,4),input_shape=(32, 32, 3), activation='relu',)) # POOLING LAYER model.add(MaxPool2D(pool_size=(2, 2))) # FLATTEN IMAGES FROM 28 by 28 to 764 BEFORE FINAL LAYER model.add(Flatten()) # 512 NEURONS IN DENSE HIDDEN LAYER (YOU CAN CHANGE THIS NUMBER OF NEURONS) model.add(Dense(512, activation='relu')) # LAST LAYER IS THE CLASSIFIER, THUS 10 POSSIBLE CLASSES model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.fit(x_train,y_cat_train,verbose=1,epochs=20) model.evaluate(x_test,y_cat_test) from sklearn.metrics import classification_report predictions = model.predict_classes(x_test) print(classification_report(y_test,predictions)) model.save('larger_CIFAR10_model.h5')Preparamos Datosle = preprocessing.LabelEncoder() le.fit(cancer_df['diagnosis']) cancer_df['diagnosis_cod'] = le.transform(cancer_df['diagnosis']) cancer_df = cancer_df.drop(['Unnamed: 32','id','diagnosis'], axis=1) cancer_df #le.inverse_transform(iris_df.species_cod) # separamos datos en data y target cancer_data = cancer_df.drop(['diagnosis_cod'], axis=1) cancer_target = cancer_df.diagnosis_cod # Separamos los Datos.... Entrenamiento y test #? train_test_split() X_train, X_test, y_train, y_test = train_test_split(cancer_data, cancer_target, test_size=0.33, random_state=None, shuffle =None) print('Set de datos para Entrenamiento =',len(X_train)) print('Set de datos para Test',len(X_test)) print('Total',len(X_test)+len(X_train)) print('Data Shape=',X_test.shape) print('Target Shape =',y_test.shape) X_train.head() #X_train.columnsSet de datos para Entrenamiento = 381 Set de datos para Test 188 Total 569 Data Shape= (188, 30) Target Shape = (188,)SVM Cancer Data Setmodel=SVC(C = 1.0) model.fit(X_train,y_train) # ? cross_val_score() cross_val_score(model, cancer_data, cancer_target) model.fit(X_train,y_train) print ("Score with data Tes",model.score(X_test,y_test)) print ("Score with data Train",model.score(X_train,y_train)) warnings.filterwarnings('ignore') ind = 125 print(cancer_data.iloc[ind]) print('specie',cancer_target.iloc[ind], le.inverse_transform(cancer_target.iloc[ind])) x_new = cancer_data.ix[ind] print('\n======== PREDICTION ========') prediction = model.predict([x_new.values]) print('Specie prediction',prediction, le.inverse_transform(prediction))radius_mean 13.850000 texture_mean 17.210000 perimeter_mean 88.440000 area_mean 588.700000 smoothness_mean 0.087850 compactness_mean 0.061360 concavity_mean 0.014200 concave points_mean 0.011410 symmetry_mean 0.161400 fractal_dimension_mean 0.058900 radius_se 0.218500 texture_se 0.856100 perimeter_se 1.495000 area_se 17.910000 smoothness_se 0.004599 compactness_se 0.009169 concavity_se 0.009127 concave points_se 0.004814 symmetry_se 0.012470 fractal_dimension_se 0.001708 radius_worst 15.490000 texture_worst 23.580000 perimeter_worst 100.300000 area_worst 725.900000 smoothness_worst 0.115700 compactness_worst 0.135000 concavity_wo[...]Clasification Reportwarnings.filterwarnings('ignore') p = model.predict(X_test) print ('Accuracy:', accuracy_score(y_test, p)) print ('\nConfusion Matrix:\n', confusion_matrix(y_test, p)) print ('\nClassification Report:', classification_report(y_test, p))Accuracy: 0.6170212765957447 Confusion Matrix: [[116 0] [ 72 0]] Classification Report: precision recall f1-score support 0 0.62 1.00 0.76 116 1 0.00 0.00 0.00 72 avg / total 0.38 0.62 0.47 188Diabetes Data Set# Cargamos los datos del fichero CSV diabetes_df = pd.read_csv('../../data/06_diabetes.csv') # Imprimimos los datos cargados con pandas #diabetes_df.info() diabetes_df # separamos datos en data y target diabetes_data = diabetes_df.drop(['diagnosis'], axis=1) diabetes_target = diabetes_df.diagnosis # Separamos los Datos.... Entrenamiento y test #? train_test_split() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(diabetes_data, diabetes_target, test_size=0.33, random_state=None, shuffle =None) print('Set de datos para Entrenamiento =',len(X_train)) print('Set de datos para Test',len(X_test)) print('Total',len(X_test)+len(X_train)) print('Data Shape=',X_test.shape) print('Target Shape =',y_test.shape) X_train.head() #X_train.columns model=SVC(C = 0.1, kernel='linear') model.fit(X_train,y_train) # ? cross_val_score() cross_val_score(model, cancer_data, cancer_target) model.fit(X_train,y_train) print ("Score with data Tes",model.score(X_test,y_test)) print ("Score with data Train",model.score(X_train,y_train)) warnings.filterwarnings('ignore') ind = 56 print(diabetes_data.iloc[ind]) print('specie',diabetes_target.iloc[ind], le.inverse_transform(diabetes_target.iloc[ind])) x_new = diabetes_data.ix[ind] print('\n======== PREDICTION ========') prediction = model.predict([x_new.values]) print('Specie prediction',prediction, le.inverse_transform(prediction))Pregnancies 7.000 Glucose 187.000 BloodPressure 68.000 SkinThickness 39.000 Insulin 304.000 BMI 37.700 DiabetesPedigreeFunction 0.254 Age 41.000 Name: 56, dtype: float64 specie 1 M ======== PREDICTION ======== Specie prediction [1] ['M']Clasification Reportp = model.predict(X_test) print ('Accuracy:', accuracy_score(y_test, p)) print ('\nConfusion Matrix:\n', confusion_matrix(y_test, p)) print ('\nClassification Report:', classification_report(y_test, p))Accuracy: 0.7677165354330708 Confusion Matrix: [[140 26] [ 33 55]] Classification Report: precision recall f1-score support 0 0.81 0.84 0.83 166 1 0.68 0.62 0.65 88 avg / total 0.76 0.77 0.77 254DATA PRE-PROCESSING:# Data Preprocessing: def data_preprocessing(movie_metadata, credits): start = time.time() print("Extracting Relevant Metadata...") lim_movie_metadata = movie_metadata[["id", "original_title", "overview", "genres"]] print("Imputing Missing Values in Overview...") lim_movie_metadata["overview"].fillna("", inplace = True) print("Extracting Genres...") lim_movie_metadata["list_of_genres"] = lim_movie_metadata["genres"].apply(extract_info2) print("Changing dtype of 'id' from string to int...") lim_movie_metadata["id"] = lim_movie_metadata["id"].apply(change_type_id) print("Extracting Actor and Character Names for All Movies...") credits["list_of_actors"] = credits["cast"].apply(extract_info1, col_name = "name") credits["list_of_characters"] = credits["cast"].apply(extract_info1, col_name = "character") print("Creating Dataframe of Extracted Credits Data...") lim_credits = credits[["id", "list_of_actors", "list_of_characters"]] print("Size of Credits: {}\tSize of Metadata: {}\n".format(lim_credits.shape, lim_movie_metadata.shape)) print("Merging All Extracted Data...") extracted_movie_data = lim_credits.merge(lim_movie_metadata, on = "id", how = "left") metadata = extracted_movie_data[["id", "original_title", "overview", "list_of_actors", "list_of_characters", "list_of_genres"]] print("Creating More Attributes (Combination of two or more columns)...") metadata['metadata'] = metadata[metadata.columns[1:]].apply(lambda x: ', '.join(x.dropna().astype(str)), axis = 1) # all combined metadata['overview_genre'] = metadata[metadata.columns[[2,5]]].apply(lambda x: ', '.join(x.dropna().astype(str)), axis = 1) # overview + genre metadata['overview_actors'] = metadata[metadata.columns[[2, 3]]].apply(lambda x: ', '.join(x.dropna().astype(str)), axis = 1) # overview + actors print("Size of Merged Data: {}".format(metadata.shape)) end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print("\nTime Taken for Pre-processing:{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds)) return metadataCOMPUTING SIMILARITY MATRIX:# Function to create Similarity Matrix using Cosine Similarity or Linear Kernel: def compute_similarity_matrix(metadata, col, formula): start = time.time() tfidf_mat = tfidf.fit_transform(metadata[col]) print("\nShape of TFIDF Matrix: {}".format(tfidf_mat.shape)) if formula == "linear kernel": sim_mat = linear_kernel(tfidf_mat, tfidf_mat) elif formula == "cosine": sim_mat = cosine_similarity(tfidf_mat, tfidf_mat) else: print("Invalid Formula!") end = time.time() hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print("\nTime Taken to Compute Similarity Matrix:{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds)) return sim_matRECOMMENDING MOVIES:# Function to compute Content - Based Recommendation System: def content_based_recommender(input_string, sim_mat, topk, metadata, movie_mapper): movie_index = movie_mapper[input_string] sim_score = list(enumerate(sim_mat[movie_index])) sim_score = sorted(sim_score, key = lambda x: x[1], reverse = True) sim_score = sim_score[1:topk] movie_indices = [i[0] for i in sim_score] return (metadata["original_title"].iloc[movie_indices]) def create_movie_mapper(metadata, col_name): movie_mapper = pd.Series(metadata.index, index = metadata[col_name]) return movie_mapper if __name__ == "__main__": # Getting Pre-processed Data: data = data_preprocessing(movie_metadata, credits) # Computing Similarity Matrix: on_col = "overview" sim_mat = compute_similarity_matrix(data, on_col, "cosine") # Creating Movie Names Mapper: movie_mapper = create_movie_mapper(data, "original_title") # Getting Recommendations: topk = 15 for mname in ["Star Wars", "Toy Story", "Jumanji"]: print("\nTop {} Recommendations for {} based on {}:\n".format(topk, mname, on_col.replace("_", " and ").title().replace("And", "and"))) recommendations = content_based_recommender(mname, sim_mat, topk, data, movie_mapper) recs = pd.DataFrame(recommendations) print(recs)Extracting Relevant Metadata... Imputing Missing Values in Overview... Extracting Genres... Changing dtype of 'id' from string to int... Extracting Actor and Character Names for All Movies... Creating Dataframe of Extracted Credits Data... Size of Credits: (45476, 3) Size of Metadata: (45466, 5) Merging All Extracted Data... Creating More Attributes (Combination of two or more columns)... Size of Merged Data: (45538, 9) Time Taken for Pre-processing:00:00:52.93 Shape of TFIDF Matrix: (45538, 75827) Time Taken to Compute Similarity Matrix:00:00:27.53 Top 15 Recommendations for Star Wars based on Overview: original_title 1157 The Empire Strikes Back 30498 The Star Wars Holiday Special 26616 Star Wars: The Force Awakens 1170 Return of the Jedi 34220 Maciste alla corte del Gran Khan 1270 [...]PiPeline> example of ML pipeline. This file will become your README and also the index of your documentation. Install `pip install PipelineMLNbdev` How to use Fill me in please! Don't forget code examples:data = get_data("data/new_maisons-nan.csv") data.head() data.info() X = data.iloc[:,:-2] y = data["classe"]Example of our TransformeeMaison Classtrsf = TransformeeMaison(dateTo='age') new_data = trsf.fit(X).transform(X) new_data.head()Let's apply our Pipeline PipyX_train,X_test,y_train,y_test = split_data(X,y,0.2) from sklearn.pipeline import Pipeline from sklearn.decomposition import PCA from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler Pipe = Pipeline([ ('trsf',TransformeeMaison()), ('ss',StandardScaler()), ('pca',PCA()), ('svm',SVC()) ]) ##les params sont pour le gridsearch pour trouver la meilleur combinaison Params ={ 'trsf__dateTo':('age','annee'), 'pca__n_components' : (2,3), 'svm__kernel':('linear','rbf') }initialisation of Pipy Classp =Pipy(Pipe,Params)searching best Paramsp.gridSearchy(X_train,y_train) p.gridBestEstimator()Introduction To PyBrainThis is a tutorial to introduce basic knowledge of Pybrain and follow that by implementing rudimentary neural networks to solve simple classification problems. While neural networks generally takes a while to understand, this tutorial provides an simple and fast way to experience some of the capabilities neural networks have. PyBrain is short for **Py**thon-**B**ased **R**einforcement Learning, **A**rtificial **I**ntelligence and **N**eural Network Library. Though no longer actively developed, PyBrain is simpler than PyLearn and other packages out there since the library features perceptron as well as neural networks. Table of Contents- [Required Packages](Required-Packages)- [Understanding Our Dataset](Understanding-Our-Dataset)- [Getting Data In](Getting-Data-In)- [Creating the Neural Network](Creating-the-Neural-Network)- [Summary and Further Resources](Summary-and-Further-Resources) Required Packages- PyBrain- Numpy- Matplotlib- Scikit-LearnBefore getting started, you'll need to install the various libraries that we will be using. You can install all of the required pacakges through `pip`: > pip install --upgrade pybrain > pip install --upgrade numpy > pip install --upgrade pandas > pip install --upgrade matplotlib > pip install --upgrade scikit-learnimport pybrain import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearnUnderstanding Our DatasetThe Iris flower data set also known as Fisher's Iris data set is a data set created by in 1936. The data set contains 50 samples from three species of Iris flower. For each sample, four features were used to measure the flower: length and width of the sepals and petals (cm). To understand our data better, let's take a look into the Iris flower data set included in Scikit-Learn.from sklearn import datasets iris = datasets.load_iris() X = iris.data y = iris.target # Print first 5 samples of the data set print ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"] print X[:5] # Print dimensions of the data set print ["Rows", "Cols"] print X.shape['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'] [[ 5.1 3.5 1.4 0.2] [ 4.9 3. 1.4 0.2] [ 4.7 3.2 1.3 0.2] [ 4.6 3.1 1.5 0.2] [ 5. 3.6 1.4 0.2]] ['Rows', 'Cols'] (150, 4)Stored in our variable ```X```, we have our data, where each row was a sample and the columns' indices being Sepal Length, Sepal Width, Petal Length and Petal Width. Here we see the first 5 samples in the data set. The entire data set has 150 samples, with 4 features per sample.# Print target results print y.tolist() yList = y.tolist() # List of unique elements print np.unique(y) # Counts of each type of Iris print "Count of Iris setosa: " + str(yList.count(0)) print "Count of Iris versicolor: " + str(yList.count(1)) print "Count of Iris virginica: " + str(yList.count(2))[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] [0 1 2] Count of Iris setosa: 50 Count of Iris versicolor: 50 Count of Iris virginica: 50Our particular data set includes only three types of irises shown in ```y``` as values from 0 to 2 representing a particular type. The type of irises include: Iris setosa, Iris versicolor, and Iris virginica. In our data set we have 50 samples of each type of Iris.To understand what each flower type's characteristics are like, we can do some exploratory data analysis on our data. Let's first split our data set into separate lists based on its type.# Split iris.data to 3 lists of length 50 listOfIrises = np.split(X, 3) # Load the data into Panda DataFrames IrisOne = pd.DataFrame(listOfIrises[0], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) IrisTwo = pd.DataFrame(listOfIrises[1], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) IrisThree = pd.DataFrame(listOfIrises[2], columns=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"], index=range(50)) # Retrieve sepal length and petal length sepalLengthOne = IrisOne['Sepal Length'].values sepalLengthTwo = IrisTwo['Sepal Length'].values sepalLengthThree = IrisThree['Sepal Length'].values petalLengthOne = IrisOne['Petal Length'].values petalLengthTwo = IrisTwo['Petal Length'].values petalLengthThree = IrisThree['Petal Length'].values sepalWidthOne = IrisOne['Sepal Width'].values sepalWidthTwo = IrisTwo['Sepal Width'].values sepalWidthThree = IrisThree['Sepal Width'].values petalWidthOne = IrisOne['Petal Width'].values petalWidthTwo = IrisTwo['Petal Width'].values petalWidthThree = IrisThree['Petal Width'].values print("Iris setosa Statistics (Blue)") print(IrisOne.describe()) print print("Iris versicolor Statistics (Green)") print(IrisTwo.describe()) print print("Iris virginica Statistics (Red)") print(IrisThree.describe()) print ############# ## PLOT #1 ## ############# _, ax = plt.subplots() # Plot points in scatter plot ax.scatter(sepalLengthOne.tolist(), petalLengthOne.tolist(), color='blue') ax.scatter(sepalLengthTwo.tolist(), petalLengthTwo.tolist(), color='green') ax.scatter(sepalLengthThree.tolist(), petalLengthThree.tolist(), color='red') # Set x and y labels ax.set_xlabel('Sepal Length (cm)') ax.set_ylabel('Petal Length (cm)') ax.set_title('Sepal Length v Petal Length') plt.show()Iris setosa Statistics (Blue) Sepal Length Sepal Width Petal Length Petal Width count 50.00000 50.000000 50.000000 50.00000 mean 5.00600 3.418000 1.464000 0.24400 std 0.35249 0.381024 0.173511 0.10721 min 4.30000 2.300000 1.000000 0.10000 25% 4.80000 3.125000 1.400000 0.20000 50% 5.00000 3.400000 1.500000 0.20000 75% 5.20000 3.675000 1.575000 0.30000 max 5.80000 4.400000 1.900000 0.60000 Iris versicolor Statistics (Green) Sepal Length Sepal Width Petal Length Petal Width count 50.000000 50.000000 50.000000 50.000000 mean 5.936000 2.770000 4.260000 1.326000 std 0.516171 0.313798 0.469911 0.197753 min 4.900000 2.000000 3.000000 1.000000 25% 5.600000 2.525000 4.000000 1.200000 50% 5.900000 2.800000 [...]By plotting sepal length on the x-axis and petal length on the y-axis, we can see noticeable differences in the types of Iris flower. The blue-labeled Irises have a mean petal length of 1.46 while the mean petal lengths of green and red labeled Irises each have values of 4.26 and 5.55 respectively. We can also see a difference in the range of sepal lengths of the blue compared to red and green, where blue has a range of (4.3, 5.8) while red and green have (4.9, 7.0) and (4.9, 7.9) respectively. Now let's plot sepal width with petal width and analyze the scatter plot.############# ## PLOT #2 ## ############# import matplotlib.pyplot as plt _ , ax2 = plt.subplots() # Plot points in scatter plot ax2.scatter(sepalWidthOne.tolist(), petalWidthOne.tolist(), color='blue') ax2.scatter(sepalWidthTwo.tolist(), petalWidthTwo.tolist(), color='green') ax2.scatter(sepalWidthThree.tolist(), petalWidthThree.tolist(), color='red') # Set x and y labels ax2.set_xlabel('Sepal Width (cm)') ax2.set_ylabel('Petal Width (cm)') ax2.set_title('Sepal Width v Petal Width') plt.show()By plotting sepal width on the x-axis and petal width on the y-axis, we can see noticeable differences in the types of Iris flower. Similar to the previous plot, we see a discrepancy of blue with red and green. Red in this plot seems to have a little bit more of a distance from green's scatter. The blue-labeled Irises have a mean petal width of 0.24 while the mean petal width of green and red labeled Irises each have values of 1.32 and 2.02 respectively.Similar to how we would discriminate the differences that we see in these plots, a neural network would use similar logic to determine what to classify an input. Getting Data InLet's first import the Python modules from Pybrain that we'll need for creating our neural network.from pybrain.datasets.classification import ClassificationDataSet from pybrain.tools.shortcuts import buildNetwork from pybrain.utilities import percentError from pybrain.supervised.trainers import BackpropTrainer from pybrain.structure.modules import SoftmaxLayerNow that we have taken a look at the data, and we have imported the necessary components of our neural network, let's now load the data and specify the parameters for the target data.# initialize empty data set data = ClassificationDataSet(inp=4, nb_classes=3, class_labels=["Iris setosa", "Iris versicolor", "Iris virginica"]) # append data samples to the data set for i in range(len(X)): data.addSample(X[i], y[i])Here we instantiate our data set to take in our input. The parameter `inp` specifies the dimensionality of our input, which is 4 in our case the four features of the flower. The parameter `nb_classes` is used to explicitly state the number of target classes for our outputs. Lastly, the parameter `class_labels` is used to name the three classes we have targeted previously.After initializing our empty data set, we input the data and target by row into the dataset. Common in training neural networks, we'll split our datasets in three: `training`, `validation`, and `testing` data.- The `training` data is used to train our neural network, letting our algorithm adjust parameters based on the data.- The `validation` data is used to eliminate which models paired with their parameters overfits the data. Here we are verifying that an increase in accuracy over the training data, will correspond to an increase in accuracy over data not shown to the neural network before (ie the validation data). Any decrease in accuracy means that we are overfitting to the data in our training dataset.- The `testing` data is used for testing the final model to see the predictive power of the model (usually a float between 0, 1).We can split the data using Pybrain's own function `splitWithProportion`. We will use the proportion of 70%, 15%, 15% respectively as recommended by sources like Mathworks:# Split by 70% followed by 50% training, rest = data.splitWithProportion(0.7) validation, testing = rest.splitWithProportion(0.5) # Check the ratios of our data sets print "Data ratio:" print len(training), ":", len(validation), ":", len(testing) print print "Percentage training data:" print 1.0*len(training)/len(data) print "Percentage validation data:" print 1.0*len(validation)/len(data) print "Percentage testing data:" print 1.0*len(testing)/len(data)Data ratio: 105 : 22 : 23 Percentage training data: 0.7 Percentage validation data: 0.146666666667 Percentage testing data: 0.153333333333Since we are doing a classification problem with neural networks, its best that we encode the classes using one output neuron per class. This is because we will eventually use a Softmaxlayer activation function to determine the class, which returns a binary output.# Convert target data into a binary representation that is suitable for our classification problem training._convertToOneOfMany() validation._convertToOneOfMany() testing._convertToOneOfMany()Doing this converts the target classes into a list of three binary outputs, an encoding that is suitable for three neurons.# Take a look at the first 10 target outputs after the encoding targets = training['target'] for i in range(10): print targets[i][1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0] [1 0 0]Lastly let's just check all of our data sets for the right dimensionalities. They should all have inputs of 4 dimensions, and outputs of 3 (the three classes we defined). Then let's check if we properly imported all the data from the iris data set into our blank data set.print training.indim, training.outdim print validation.indim, validation.outdim print testing.indim, testing.outdim print len(data['input']), len(data['target'])4 3 4 3 4 3 150 150Everything looks good, so lets move on! Creating the Neural Network Finally reaching the part where we create the neural network, let's first define our neural network. In the code below, we instantiate our network to have 4 inputs, 3 hidden neurons, and 3 output neurons. Neurons are key structures in neural networks, that simulate how our brains work. They take in multiple binary inputs, $n_1, n_2, n_3, ...$ and produces a singular binary output. Our brains work in the same way because we choose to take in certain information when making decisions. To mimic even further how our brains work, there are weights associated to each input $w_1, w_2, w_3, ...$, similar to how we value certain information more than others. To create the binary output, the neuron would get a sum of the weighted inputs: $\sum{n_i w_i}$ and case on if it reached a certain total called the $threshold$ $value$. These neurons are the basic building blocks of the construction of neural networks.Lastly, we use the outclass called a `SoftmaxLayer` because we are trying to solve a classification problem. The Softmax activation function used in the final layer of the network converts a vector and an index to a real value, in our case 0/1.Next, we instantiate our backpropagation trainer. This trainer will run training data and calculates the gradient of a loss function for all the weights used in the network. Then the trainer will attempt to optimize the weights using the loss function gradient to minimize the loss function. To think about this, imagine the errors that occur in the later stages of the neural network and how they are closely related to the errors in the previous neurons. This trainer works backwards through the layers to optimize and minimize losses.# Creating our neural network neuralNet = buildNetwork(4, 3, 3, outclass=SoftmaxLayer) # Creating our backpropagation Trainer trainer = BackpropTrainer(neuralNet, dataset=training, momentum=0.1, verbose=False, weightdecay=0.01) # Here we train our backpropagation trainer on our training data set for 100 cycles trainingError, validationError = trainer.trainUntilConvergence(dataset=training, maxEpochs=100) # Let's plot the error with the number of cycles the trainer has gone through _ , ax3 = plt.subplots() ax3.plot(trainingError, 'g', validationError, 'r') ax3.set_xlabel('Epochs') ax3.set_ylabel('Error (Percentage)') ax3.set_title('Training Error (Green) v Validation Error (red)') plt.show()In the graph above, we can see the error percentage trend towards 0, which is a good sign because it mean we are getting better accuracy. In the graph above, we are only training the network for 100 epochs, which is equivalent to 100 cycles. Generally, the more cycles that one iterates through, the better accuracy one gets. Let's try that below by running the trainer on the training set for 500 cycles. You can also turned on verbosity, to see the percentage errors at each cycle. The final total error I had after 500 cycles hovered around 0.02.trainer.trainOnDataset(training, 500)The reason why our percentage error at the end hovered around 0.02 and never went significantly below that level was because of our training data set. Imagine learning from only one textbook. At a certain point, you'll be saturated with information from the one textbook. However, since you only understand ideas from your textbook, when you find information from the web that you've never seen before, you can only get so much right. Lastly, let's take a look at the percent error our model has on the remaining part of the data set we never touched: the `testing` data.output = neuralNet.activateOnDataset(testing).argmax(axis=1) percentError(output, testing['class'])from IPython.display import ImageSome simple mathematics operations +, -, *, /, **, %, //a = 5 b = 10 c = 3 d = 2 print(f'a + b = {a+b}, ',f'a - b = {a-b}, ', f'a * b = {a*b}, ', f'a / b = {a/b}')a + b = 15, a - b = -5, a * b = 50, a / b = 0.5Powerprint(f'c ** d = {c**d}')c ** d = 9Integer remainderprint(f'b % a = {b%a}', f"b %% c = {b%c}" )b % a = 0 b %% c = 1Integer divisionprint(f'b // a = {b//a}')b // a = 2Complex numbers: and this is why I recommend avoiding naming your variables "`i`" or "`j`"print(1 + 5j)(1+5j)List: look, it is a list There is another class named "tuple", and tuples are just lists, except that they are harder to manipulate at the element level. If you want to have a list that does not change over iterations, it is better to go with tuples. You can make a list with "[ ]" or "list()". The difference between them is that one is an implicit function, while the other is an explicit function.list_1 = [1,2,3,4,5] list_2 = list((1,2,3,4,5)) print(type(list_1),type(list_2)) print('list_1: ',list_1,'list_2: ',list_2) list_1: [1, 2, 3, 4, 5] list_2: [1, 2, 3, 4, 5]Arbitrariness of a list: it does not matter what you put into the list as long as you keep them a listlist_3 = [1,'p',set(),'1',[8373,9],['f']] print(list_3)[1, 'p', set(), '1', [8373, 9], ['f']]List index: how do you access an element in the list? It is important to note that counting in python starts with "0", in other words, the first position of a list is "0". However, the last position is "-1".my_list = ['p','r','o','b','e'] print(my_list[0]) print(my_list[-1]) nested_list = ["Happy", [2,0,1,5]] print(nested_list[0][1])aNegative indexingtry: print(my_list[-1]) print(my_list[-5]) print(my_list[-10]) except Exception as e: print('Error!! ',e)e p Error!! list index out of rangeSlicing the lists: getting a subset of the list's elementsmy_list = ['p','r','o','g','r','a','m','i','z'] # elements 3rd to 5th print(my_list[2:5]) # elements beginning to 4th print(my_list[:-5]) # elements 6th to end print(my_list[5:]) # elements beginning to end print(my_list[:]) Image(url='https://cdn.programiz.com/sites/tutorial2program/files/element-slicling.jpg')Change or add elements to a list# mistake values odd = [2, 4, 6, 8] # change the 1st item odd[0] = 1 # Output: [1, 4, 6, 8] print(odd) # change 2nd to 4th items odd[1:4] = [3, 5, 7] # Output: [1, 3, 5, 7] print(odd)[1, 4, 6, 8] [1, 3, 5, 7]Append: this the language of "adding"odd = [1, 3, 5] odd.append(7) # Output: [1, 3, 5, 7] print(odd) odd.extend([9, 11, 13]) # Output: [1, 3, 5, 7, 9, 11, 13] print(odd)[1, 3, 5, 7] [1, 3, 5, 7, 9, 11, 13]There are more explicit ways: "+" and "*" operationsodd = [1, 3, 5] # Output: [1, 3, 5, 9, 7, 5] print(odd + [9, 7, 5]) #Output: ["re", "re", "re"] print(["re"] * 3)[1, 3, 5, 9, 7, 5] ['re', 're', 're']Remove elements from a list# Output: ['p', 'r', 'b', 'l', 'e', 'm'] my_list = ['p', 'r', 'b', 'l', 'e', 'm'] print(my_list) # delete multiple items del my_list[1:5] # Output: ['p', 'm'] print(my_list) # delete entire list del my_list # Error: List not defined try: print(my_list) except Exception as e: print(e)['p', 'r', 'b', 'l', 'e', 'm'] ['p', 'm'] name 'my_list' is not definedThe error occurs because we deleted "my_list" We can also use `remove()` or `pop()` `pop()` removes and returns the last item if the index is not provided `clear()` can also be used to empty a listmy_list = ['p','r','o','b','l','e','m'] my_list.remove('p') # Output: ['r', 'o', 'b', 'l', 'e', 'm'] print(my_list) # Output: 'o' print(my_list.pop(1)) # Output: ['r', 'b', 'l', 'e', 'm'] print(my_list) # Output: 'm' print(my_list.pop())['r', 'o', 'b', 'l', 'e', 'm'] o ['r', 'b', 'l', 'e', 'm'] m2 more useful list methods: `sort()` and `reverse()`my_list = [3, 8, 1, 6, 0, 8, 4] print('original: ',my_list) my_list.sort() print('sorted: ',my_list) my_list.reverse() print('reverse of the sorted',my_list)original: [3, 8, 1, 6, 0, 8, 4] sorted: [0, 1, 3, 4, 6, 8, 8] reverse of the sorted [8, 8, 6, 4, 3, 1, 0]Important: list methods are applied to the list directly and is not reversable List comprehension: elegant way to create new lists, aka, one-liner for-looppow2 = [2 ** x for x in range(10)] # reads as: for values from 0 to 9, compute 2 to the power of values print(pow2) pow2 = [] for x in range(10): pow2.append(2 ** x) print(pow2)[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]Nested with if-statementodd = [x for x in range(20) if x % 2 == 1] print(odd)[1, 3, 5, 7, 9, 11, 13, 15, 17, 19]Check if an element is in the listmy_list = ['p','r','o','b','l','e','m'] # Output: True print("'p' in my_list:",'p' in my_list) # Output: False print("'a' in my_list: ",'a' in my_list) # Output: True print("'c' not in my_list",'c' not in my_list)'p' in my_list: True 'a' in my_list: False 'c' not in my_list Truefor-loop and listfor fruit in ['apple','banana','mango']: print("I like",fruit)I like apple I like banana I like mangoAdvanced list operation: `all()`, `any()`, `enumerate()`, `len()`my_conditions = [True,True,False] # are all conditions True? print(all(my_conditions)) # are any conditions True? print(any(my_conditions))False Trueenumerator and iterator: `enumerate()` is an enumerator that can wrap around an iterator such as a list, so that you can iterate not only through the items in this list but also the index numbers of the itemsfor ii,element in enumerate(['a','b','c','d','e','f']): print(ii,element)0 a 1 b 2 c 3 d 4 e 5 fSet: they are good for comprehending Venn diagramsmy_set = set([1,2,3]) # convert a list to a set print(my_set){1, 2, 3}As you can see, a set is contained by "{}", and you can create a set by directly {1,2,3}. But I don't recommend that. Explicitly calling function `set()` avoids the confusion between sets and dictionaries (will be introduced below)# set of mixed datatypes my_set = {1.0, "Hello", (1, 2, 3)} print(my_set){1.0, 'Hello', (1, 2, 3)}Sets do not have duplicatesmy_set = {1,2,3,4,3,2} print(my_set){1, 2, 3, 4}Manipulate sets# initialize my_set my_set = {1,3} print(my_set) # if you uncomment line 9, # you will get an error # TypeError: 'set' object does not support indexing #my_set[0] # add an element # Output: {1, 2, 3} my_set.add(2) print(my_set) # add multiple elements # Output: {1, 2, 3, 4} my_set.update([2,3,4]) print(my_set) # add list and set # Output: {1, 2, 3, 4, 5, 6, 8} my_set.update([4,5], {1,6,8}) print(my_set) # initialize my_set my_set = {1, 3, 4, 5, 6} print(my_set) # discard an element # Output: {1, 3, 5, 6} my_set.discard(4) print(my_set) # remove an element # Output: {1, 3, 5} my_set.remove(6) print(my_set) # discard an element # not present in my_set # Output: {1, 3, 5} my_set.discard(2) print(my_set) # remove an element # not present in my_set # If you uncomment line 27, # you will get an error. # Output: KeyError: 2 #my_set.remove(2) # initialize my_set # Output: set of unique elements my_set = set("HelloWorld") print(my_set) # pop an element # Output: random element print(my_set.pop()) # pop another element # Output: random element my_set.pop() print(my_set) # clear my_set #Output: set() my_set.clear() print(my_set){'o', 'l', 'e', 'd', 'H', 'W', 'r'} o {'e', 'd', 'H', 'W', 'r'} set()Set operationsA = {1, 2, 3, 4, 5} B = {4, 5, 6, 7, 8}Union: can be operated by " | ", but I don't recommend thisImage(url = 'https://cdn.programiz.com/sites/tutorial2program/files/set-union.jpg') A.union(B) B.union(A)Intersection: can be operated by "&", but I don't recommended thisImage(url='https://cdn.programiz.com/sites/tutorial2program/files/set-intersection.jpg') # initialize A and B A = {1, 2, 3, 4, 5} B = {4, 5, 6, 7, 8} A.intersection(B) B.intersection(A)Difference: can be operated by "-", but I don't recommend thisImage(url='https://cdn.programiz.com/sites/tutorial2program/files/set-difference.jpg') # initialize A and B A = {1, 2, 3, 4, 5} B = {4, 5, 6, 7, 8} A.difference(B) B.difference(A)Symmetric difference: can be operated by "^", but I don't recommend thisImage(url='https://cdn.programiz.com/sites/tutorial2program/files/set-symmetric-difference.jpg') # initialize A and B A = {1, 2, 3, 4, 5} B = {4, 5, 6, 7, 8} A.symmetric_difference(B) B.symmetric_difference(A)Many other set methods:`isdisjoint()`: check if 2 sets have a null intersection`issubset()`: check if another set contains this set`issuperset()`: check if this contains another setAthough there are methods like `add()`, `clear()`, `copy()`, but they are not recommended for use in common scientific computations Dictionary: a special set that consists of both "keys" and "values"my_dictionary = {} print(type(my_dictionary)) my_dictionary = {1} print(type(my_dictionary)) my_dictionary = {'name': 'John', 1: [2, 4, 3]} print(my_dictionary) my_dictionary = dict({1:'apple', 2:'ball'}) print(my_dictionary) my_dictionary = dict([(1,'apple'), (2,'ball')]) print(my_dictionary) # I usually use this one: my_dictionary = dict( banana = 'apply', plane = 'ball') print(my_dictionary) # but caution: my_dictionary = dict(1='apple',2='ball')Manipulate dictionarymy_dictionary = {'name':'Jack', 'age': 26} # update value my_dictionary['age'] = 27 #Output: {'age': 27, 'name': 'Jack'} print(my_dictionary) # add item my_dictionary['address'] = 'Downtown' # Output: {'address': 'Downtown', 'age': 27, 'name': 'Jack'} print(my_dictionary){'name': 'Jack', 'age': 27} {'name': 'Jack', 'age': 27, 'address': 'Downtown'}Iterate through a dictionarya_dictionary = {item:item+1 for item in range(20)} print(a_dictionary) a_dictionary = {} # empty dictionary for item in range(20): a_dictionary[item] = item + 1 print(a_dictionary) for key,value in a_dictionary.items(): print(key,value) print(a_dictionary.keys()) print(a_dictionary.values()) # convert the dictionary properties to lists print(list(a_dictionary.keys())) print(list(a_dictionary.values()))[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]Dictionary is very useful when we work with pandas, a Python library that works with tabular data A preview of how useful it is:import pandas as pd results = {'variable1': [1,2,3,4,5,6], 'variable2': ['a','r','d','f','u','j'], 'variable3': [True,True,False,True,True,False]} results_table = pd.DataFrame(results) print(results) print(results_table){'variable1': [1, 2, 3, 4, 5, 6], 'variable2': ['a', 'r', 'd', 'f', 'u', 'j'], 'variable3': [True, True, False, True, True, False]} variable1 variable2 variable3 0 1 a True 1 2 r True 2 3 d False 3 4 f True 4 5 u True 5 6 j FalseSimple mathematics calculations: caution - data typea = 1 b = 2 print(type(a),type(b)) print(b - a) print(a / b) # this will be zero in python2 a = 1.# notice the difference b = 2 print(type(a),type(b)) print(b - a) print(a / b) # this will be a float in python2Query OpenAlex for works authored by a personThis notebook queries the [OpenAlex API](https://docs.openalex.org/api) via its `/works` endpoint for works authored by a person. It takes an ORCID URL as input which is used to filter for works where '`authorships.author.orcid`' matches the given ORCID URL.From the resulting list of works we output all DOIs.# Prerequisites: import requests # dependency to make HTTP callsThe input for this notebook is an ORCID URL, e.g. '`https://orcid.org/0000-0003-2499-7741`'# input parameter example_orcid="https://orcid.org/0000-0003-2499-7741"We use it to query the OpenAlex API for works that specified the ORCID URL within their metadata in the field '`authorships.author.orcid`'. Since the API uses [pagination](https://docs.openalex.org/api/get-lists-of-entitiespagination), we need to loop through all pages to get the complete result set.# OpenAlex endpoint to query for works OPENALEX_API_WORKS = "https://api.openalex.org/works" # query all works that are connected to orcid def query_openalex_for_person2works(orcid): page = 1 max_page = 1 while page <= max_page: params = {'filter': 'authorships.author.orcid:'+orcid, 'page': page} response = requests.get(url=OPENALEX_API_WORKS, params=params, headers= {'Accept': 'application/json'}) response.raise_for_status() result=response.json() # calculate max page number in first loop if max_page == 1: max_page = determine_max_page(result) page = page + 1 yield result # calculate max number of result pages def determine_max_page(response_data): item_count = response_data['meta']['count'] items_per_page = response_data['meta']['per_page'] max_page_ceil = item_count // items_per_page + bool(item_count % items_per_page) return max_page_ceil # ---- example execution list_of_pages=query_openalex_for_person2works(example_orcid)From the resulting list of works we extract and print out title and DOI. *Note: works that do not have a DOI assigned, will not be printed.*# from the result pages we get from the OpenAlex API, extract the data about works def extract_works_from_page(page): return [work for work in page.get('results') or []] # extract DOI from work def extract_doi(work): doi=work.get('ids', {}).get('doi') or "" doi_id=doi.replace("https://doi.org/", "") if doi else doi title=work.get('display_name', "") return doi_id, title #--- example execution for page in list_of_pages or []: works=extract_works_from_page(page) for work in works or []: doi,title=extract_doi(work) if doi: print(f"{doi}, {title}")10.3897/rio.7.e66264, OPTIMETA – Strengthening the Open Access publishing system through open citations and spatiotemporal metadata 10.11588/ip.2020.2.73938, Bericht vom 4. VIVO-Workshop 2019 10.15488/9424, ConfIDent – An Open Platform for FAIR Conference Metadata 10.31263/voebm.v72i2.2808, Open Science und die Bibliothek – Aktionsfelder und Berufsbild 10.21105/joss.01182, VIVO: a system for research discovery 10.11588/ip.2019.1.61729, Bericht vom 3. VIVO-Workshop 2018 10.11588/ip.2019.1.49609, Problematische Aspekte bibliometrie-basierter Forschungsevaluierung 10.1016/j.procs.2019.01.074, The Research Core Dataset (KDSF) in the Linked Data context 10.11588/ip.2018.1.49357, Vitro - ein universell einsetzbarer Editor für Ontologien und Instanzen 10.3897/rio.4.e31656, Reference implementation for open scientometric indicators (ROSI) 10.5281/zenodo.1464108, VIVO - eine Einführung 10.11588/ip.2018.1.46819, Anforderungen an Forschungsinformationssysteme in Deutschland durch Forschende und F[...]Marriott Hotel Category Change 2020 Analysis# import pandas import pandas as pd import numpy as np # read data from csv data=pd.read_csv("marriott-category-changes-2020.csv") data.head() print(data.shape)(2185, 7)Question 1Discribe the data types for each feature/column, e.g., xxx feature's data type is String, yyy feature's data type is float, etc.data.dtypesQuestion 2- How many hotels are in this dataset?- The hotels are from how many unique brands?- Which destination/country has the most hotels listed in this dataset? List the total number of hotels in that country- How many brands in China have hotel category changes?print(data.shape[0]) print(" ") print(data['Brand'].nunique()) print(" ") print(data['Destination'].value_counts()) print(" ") temp=data.query('Destination=="China"') temp=temp.query('`New Category` != `Current Category`') print(temp['Brand'].nunique())2185 30 USA 1548 Canada 76 China 68 Spain 34 Mexico 26 ... Ethiopia 1 Sri Lanka 1 Macau 1 Suriname 1 Lithuania 1 Name: Destination, Length: 98, dtype: int64 14Question 3- What's the percentage of hotels worldwide with category upgrade in 2020?data.query('`New Category` >`Current Category`').shape[0]/data.shape[0]Question 4- List hotels with category changes greater than 1 if any, such as changing from category 3 to 5 or from category 7 to 4- List all JW Marriott hotels in China that have a category upgradedisplay(data.query('`New Category` - `Current Category`> 1 or `New Category` - `Current Category`< -1' )) temp2=data.query('Destination=="China"') display (temp2.query('`New Category` > `Current Category` & Brand =="JW Marriott"'))Question 5Assume you are in Feburary 2020 and the category changes will take effect on March 4, 2020. You are planning your trip to Florence, Italy and Hong Kong, China in April. You only stay in category 8 hotel (existing category 8 or future category 8) and want to optimize your point spending. Based on the data, which hotel you should book? when should you book your hotels for Florence and Hong Kong? Why?temp3=data.query('Destination in ["China","Italy"] ') display(temp3.query('`New Category` == 8 or `Current Category`==8'))Generate predictions for a new list of chemicals for Instrinic Clearance¶ - Step 1: Identify substances of interest and their SMILES codes - Use KNIME to convert SMILES into a V2000 sdf file- See KNIME workflow presented in models directory (httk/models) for example knwf file generated in KNIME 3.7.2- Step 2: Use sdf file to generate Pubchem and ToxPrint Fingerprints using KNIME and the Chemotyper- Step 3: Use sdf file to generate OPERA descriptors (v2.6)import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import os import glob from sklearn import model_selection from sklearn import preprocessing from sklearn.metrics import r2_score import pickle def normalizeDescriptors(X): scaler = preprocessing.StandardScaler().fit(X) transformed = scaler.transform(X) x_norm = pd.DataFrame(transformed, index = X.index) x_norm.columns = X.columns return(x_norm) raw_dir = '/home/grace/Documents/python/httk/data/raw/' processed_dir = '/home/grace/Documents/python/httk/data/processed/' interim_dir = '/home/grace/Documents/python/httk/data/interim/' figures_dir = '/home/grace/Documents/python/httk/reports/figures/' external_dir = '/home/grace/Documents/python/httk/data/external/' models_dir = '/home/grace/Documents/python/httk/models/'Load descriptors needed for intrinsic clearance (regression model) Looks like per Table S6 this model only needs Pubchem and ToxPrint fingerprints.pubchem = pd.read_csv(processed_dir+'Fub_Pubchem.csv') pubchem.head() txps = pd.read_excel(processed_dir+'ToxPrints.xlsx') clint_features_reg = pd.read_csv(external_dir+'Clint_Features_Regression.csv') retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_reg.loc[0,'Fingerprints'].split(',')] retain[0] = retain[0].replace("[", "") retain[len(retain)-1] = retain[len(retain)-1].replace("]",'') #retain #pubchem.set_index('CASRN', inplace = True) pubchem txps.drop(['DTXSID', 'PREFERRED_NAME'], axis =1, inplace = True) txps.set_index('INPUT', inplace = True) #txps ids = list(set(txps.index & pubchem.index)) txps_ = txps.loc[ids] pubchem_ = pubchem.loc[ids] descriptors = pd.concat([pubchem_, txps_], axis = 1) descriptors.shape fingerprints_clintReg = descriptors.loc[:,retain] fingerprints_clintReg.shapeLoad saved modelclint_rf = pickle.load(open(models_dir+'clintReg_rf.sav', 'rb')) len(clint_rf.feature_importances_)Looks like there is a match in terms of number of descriptors expected...predicted_clint_rf = pd.DataFrame((10**clint_rf.predict(fingerprints_clintReg)), fingerprints_clintReg.index ) predicted_clint_rf.columns = ['pred_clint_rf'] predicted_clint_rf[predicted_clint_rf.index == '1007-28-9']MARS News Title and Paragraphcounter = 0 browser.visit(mars_news) html = browser.html soup = BeautifulSoup(html, "html.parser") newsDetails = soup.findAll('ul', attrs={'class': 'item_list'}) for articlesList in newsDetails: for indArticle in articlesList: if counter == 0: news_date = indArticle.find('div', attrs={'class': 'list_date'}).text news_title = indArticle.find('div', attrs={'class': 'content_title'}).text news_p = indArticle.find('div', attrs={'class': 'article_teaser_body'}).text counter = counter + 1 news_p # ----- SUPPORTING CODES ------ #for a in price_box.find_all('a'): # print(a.get('href')) #for getting link # print(a.text) #for getting text between the linkMARS Image#Scraping the URL browser.visit(mars_image) html = browser.html soup = BeautifulSoup(html, "html.parser") #Finding the latest News Title Header featureImage = soup.find('article', attrs={'class':'carousel_item'}) featuredImageBuffer = featureImage.attrs['style'] featured_image_url = "https://www.jpl.nasa.gov" + featuredImageBuffer[featuredImageBuffer.find('url') + 5: len(featuredImageBuffer) - 3]Twitter Details on temperature#This is to get the latest tweet counter = 0 #Scraping the URL browser.visit(twitter_details) html = browser.html soup = BeautifulSoup(html, "html.parser") #Finding all the tweets content allTweetsContainter = soup.findAll('div', attrs={'class': 'js-tweet-text-container'}) #Iterating the tweets to get the temperature details for allTweetsContainter in allTweetsContainter: if("Sol" in allTweetsContainter.find('p').text and "pressure" in allTweetsContainter.find('p').text and "daylight" in allTweetsContainter.find('p').text and counter == 0): mars_weather = allTweetsContainter.find('p').text counter = counter + 1 mars_weatherMars Facts#This is to get the Mars Facts counter = 0 import pandas as pd #Scraping the URL browser.visit(mars_facts) html = browser.html soup = BeautifulSoup(html, "html.parser") #Finding all the facts marsFacts = soup.findAll('table')[0] tables = pd.read_html(mars_facts) tables df = tables[0] df.columns = ['desc', 'facts'] desc = df["desc"].values.tolist() facts = df["facts"].values.tolist()Mars Hemisphere#Scraping the URL browser.visit(mars_hemisphere) html = browser.html soup = BeautifulSoup(html, 'html.parser') #marsHemisphere marsHemisphere = soup.findAll('div', attrs={'class': 'item'}) #Adding the text to the lit, so that you can use the linktext imageTextList = [] #Iterating the class -- item and storing the tex for a in marsHemisphere: for b in a.findAll('a'): if(len(b.text) > 0): imageTextList.append(b.text) # This is use to launch the home page browser.visit(mars_hemisphere) html = browser.html browser.url hemisphere_image_urls = [] for text in imageTextList: #Parent Page browser.visit(mars_hemisphere) html = browser.html #Click Action browser.click_link_by_partial_text(text) #Child Page html = browser.html soup = BeautifulSoup(html, 'html.parser') image = soup.findAll('img') for a in image: if('full' in a.get('src')): dictDetails = {'title':text, 'img_url' : 'https://astrogeology.usgs.gov' + a.get('src') } hemisphere_image_urls.append(dictDetails) hemisphere_image_urls marsDetails = { "news_p": news_p, "news_title": news_title, "featured_image_url": featured_image_url, "mars_weather": mars_weather, "desc":desc, "facts":facts, "hemisphere_image_urls":hemisphere_image_urls } marsDetails marsDetails['hemisphere_image_urls'][0]['title'] titleList = [] imageList = [] for i in range(4): titleList.append(hemisphere_image_urls[i]['title']) imageList.append(hemisphere_image_urls[i]['img_url']) for i in range(4): print(i) marsDetails marsDetails['hemisphere_image_urls'][0]['title'] for a in marsDetails: for b in hemisphere_image_urls: print(b)Train a GPT-2 Text-Generating Model This notebook demonstrates how to run the [GPT-2 Text-Generating Model demo](https://github.com/DrSnowbird/gpt-2-simple-docker/). See the link for more details about the model, including evaluation metrics and credits. Modifications by @DrSnowbird1. This notebook has been minorly modifed by @DrSnowbird to use **Tensorflow v2.x**2. Steps is reduced from **1,000 down to 200** since this Demo is just to show concept not for achieving good accuracy fine-tuned model.3. Removed the use of Google-Drive to simplify and avoid the problem in the gpt-2-simple copy-from-gdrive with hard-code path 'My Drive' which not existing. Train a GPT-2 Text-Generating Model w/ GPU For Free by [](http://minimaxir.com)*Last updated: November 10th, 2019*Retrain an advanced text generating neural network on any text dataset **for free on a GPU using Collaboratory** using `gpt-2-simple`!For more about `gpt-2-simple`, you can visit [this GitHub repository](https://github.com/minimaxir/gpt-2-simple). You can also read my [blog post](https://minimaxir.com/2019/09/howto-gpt2/) for more information how to use this notebook!To get started:1. Copy this notebook to your Google Drive to keep it and save your changes. (File -> Save a Copy in Drive)2. Make sure you're running the notebook in Google Chrome.3. Run the cells below:%tensorflow_version 2.x !pip install -q gpt-2-simple import gpt_2_simple as gpt2 from datetime import datetime from google.colab import filesBuilding wheel for gpt-2-simple (setup.py) ... [?25l[?25hdoneGPUColaboratory uses either a Nvidia T4 GPU or an Nvidia K80 GPU. The T4 is slightly faster than the old K80 for training GPT-2, and has more memory allowing you to train the larger GPT-2 models and generate more text.You can verify which GPU is active by running the cell below.!nvidia-smiThu Dec 9 01:57:08 2021 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 495.44 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 | | N/A 65C P8 32W / 149W | 0MiB / 11441MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-------[...]Downloading GPT-2If you're retraining a model on new text, you need to download the GPT-2 model first. There are three released sizes of GPT-2:* `124M` (default): the "small" model, 500MB on disk.* `355M`: the "medium" model, 1.5GB on disk.* `774M`: the "large" model, cannot currently be finetuned with Colaboratory but can be used to generate text from the pretrained model (see later in Notebook)* `1558M`: the "extra large", true model. Will not work if a K80 GPU is attached to the notebook. (like `774M`, it cannot be finetuned).Larger models have more knowledge, but take longer to finetune and longer to generate text. You can specify which base model to use by changing `model_name` in the cells below.The next cell downloads it from Google Cloud Storage and saves it in the Colaboratory VM at `/models/`, e.g., `/models/124M`.This model isn't permanently saved in the Colaboratory VM; you'll have to redownload it if you want to retrain it at a later time.gpt2.download_gpt2(model_name="124M")Fetching checkpoint: 1.05Mit [00:00, 261Mit/s] Fetching encoder.json: 1.05Mit [00:00, 8.21Mit/s] Fetching hparams.json: 1.05Mit [00:00, 808Mit/s] Fetching model.ckpt.data-00000-of-00001: 498Mit [00:12, 39.8Mit/s] Fetching model.ckpt.index: 1.05Mit [00:00, 650Mit/s] Fetching model.ckpt.meta: 1.05Mit [00:00, 8.86Mit/s] Fetching vocab.bpe: 1.05Mit [00:00, 9.97Mit/s]Mounting Google DriveThe best way to get input text to-be-trained into the Colaboratory VM, and to get the trained model *out* of Colaboratory, is to route it through Google Drive *first*.Running this cell (which will only work in Colaboratory) will mount your personal Google Drive in the VM, which later cells can use to get data in/out. (it will ask for an auth code; that auth is not saved anywhere)# gpt2.mount_gdrive()Uploading a Text File to be Trained to ColaboratoryIn the Colaboratory Notebook sidebar on the left of the screen, select *Files*. From there you can upload files:![alt text](https://i.imgur.com/TGcZT4h.png)Upload **any smaller text file** (<10 MB) and update the file name in the cell below, then run the cell.!wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt -O "shakespeare.txt" file_name = "shakespeare.txt"If your text file is larger than 10MB, it is recommended to upload that file to Google Drive first, then copy that file from Google Drive to the Colaboratory VM.#gpt2.copy_file_from_gdrive(file_name)Finetune GPT-2The next cell will start the actual finetuning of GPT-2. It creates a persistent TensorFlow session which stores the training config, then runs the training for the specified number of `steps`. (to have the finetuning run indefinitely, set `steps = -1`)The model checkpoints will be saved in `/checkpoint/run1` by default. The checkpoints are saved every 500 steps (can be changed) and when the cell is stopped.The training might time out after 4ish hours; make sure you end training and save the results so you don't lose them!**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (Runtime -> Restart Runtime). You will need to rerun imports but not recopy files.Other optional-but-helpful parameters for `gpt2.finetune`:* **`restore_from`**: Set to `fresh` to start training from the base GPT-2, or set to `latest` to restart training from an existing checkpoint.* **`sample_every`**: Number of steps to print example output* **`print_every`**: Number of steps to print training progress.* **`learning_rate`**: Learning rate for the training. (default `1e-4`, can lower to `1e-5` if you have <1MB input data)* **`run_name`**: subfolder within `checkpoint` to save the model. This is useful if you want to work with multiple models (will also need to specify `run_name` when loading the model)* **`overwrite`**: Set to `True` if you want to continue finetuning an existing model (w/ `restore_from='latest'`) without creating duplicate copies.sess = gpt2.start_tf_sess() gpt2.finetune(sess, dataset=file_name, model_name='124M', steps=200, restore_from='fresh', run_name='run1', print_every=10, sample_every=100, save_every=200 )Loading checkpoint models/124M/model.ckpt INFO:tensorflow:Restoring parameters from models/124M/model.ckpt Loading dataset...After the model is trained, you can copy the checkpoint folder to your own Google Drive.If you want to download it to your personal computer, it's strongly recommended you copy it there first, then download from Google Drive. The checkpoint folder is copied as a `.rar` compressed file; you can download it and uncompress it locally.#gpt2.copy_checkpoint_to_gdrive(run_name='run1')You're done! Feel free to go to the **Generate Text From The Trained Model** section to generate text based on your retrained model. Load a Trained Model CheckpointRunning the next cell will copy the `.rar` checkpoint file from your Google Drive into the Colaboratory VM.# This line is commented by our DrSnowbird! # # gpt2.copy_checkpoint_from_gdrive(run_name='run1')The next cell will allow you to load the retrained model checkpoint + metadata necessary to generate text.**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (**Runtime -> Restart Runtime**). Don't 'terminate' colab session - or, your will lost the fine-tuned trained model above!**Otherwise, the 'load_gpt2(sess, ...)' will fail**!**You will need to rerun imports as the following parts of code.**%tensorflow_version 2.x !pip install -q gpt-2-simple import gpt_2_simple as gpt2 from datetime import datetime from google.colab import files sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='run1')Loading checkpoint checkpoint/run1/model-300 INFO:tensorflow:Restoring parameters from checkpoint/run1/model-300Generate Text From The Trained ModelAfter you've trained the model or loaded a retrained model from checkpoint, you can now generate text. `generate` generates a single text from the loaded model.gpt2.generate(sess, run_name='run1') def saveListToFile(textList, outFileName): with open(outFileName, 'w') as f: f.write(textList) text = gpt2.generate(sess, return_as_list=True)[0] saveListToFile(text, 'generated-text-01.txt')If you're creating an API based on your model and need to pass the generated text elsewhere, you can do `text = gpt2.generate(sess, return_as_list=True)[0]`You can also pass in a `prefix` to the generate function to force the text to start with a given character sequence and generate text from there (good if you add an indicator when the text starts).You can also generate multiple texts at a time by specifing `nsamples`. Unique to GPT-2, you can pass a `batch_size` to generate multiple samples in parallel, giving a massive speedup (in Colaboratory, set a maximum of 20 for `batch_size`).Other optional-but-helpful parameters for `gpt2.generate` and friends:* **`length`**: Number of tokens to generate (default 1023, the maximum)* **`temperature`**: The higher the temperature, the crazier the text (default 0.7, recommended to keep between 0.7 and 1.0)* **`top_k`**: Limits the generated guesses to the top *k* guesses (default 0 which disables the behavior; if the generated output is super crazy, you may want to set `top_k=40`)* **`top_p`**: Nucleus sampling: limits the generated guesses to a cumulative probability. (gets good results on a dataset with `top_p=0.9`)* **`truncate`**: Truncates the input text until a given sequence, excluding that sequence (e.g. if `truncate=''`, the returned text will include everything before the first ``). It may be useful to combine this with a smaller `length` if the input texts are short.* **`include_prefix`**: If using `truncate` and `include_prefix=False`, the specified `prefix` will not be included in the returned text.gpt2.generate(sess, length=250, temperature=0.7, prefix="LORD", nsamples=5, batch_size=5)LORD FITZWATER: A stone is thrown to his head, That blasts forth a loud cry; That is my voice, if this be true. LORD FITZWATER: Your voices are no more. RICHARD: No more. ANGELO: What have you done? LADY CAPULET: I am hanged. ANGELO: What have you done? RICHARD: You know not; You know not. ANGELO: Nay, I do. LADY CAPULET: You know not, You know not, Richmond. ANGELO: Your voices are no more. RICHARD: No more. LADY CAPULET: No more. ANGELO: You have done it, you know not; You have not, you know not, you know not. LADY CAPULET: You have not done it, you know not; There is no more to it. ANGELO: No ==================== LORD: O, the heavens do not think the devil hath set us down So low, As we have set down, So low, So low, So low, To take an upright stand! CORIOLANUS: Well, sir, I'll undertake The exercise of my power in Rome. SICINIUS: That's my mind. CORIOLANUS: My mind! SICINIUS: I'll undertake it with my mind. CORIOLANUS: My mind! SICINIUS: I'll undertake it with m[...]For bulk generation, you can generate a large amount of text to a file and sort out the samples locally on your computer. The next cell will generate a generated text file with a unique timestamp.You can rerun the cells as many times as you want for even more generated texts!gen_file = 'gpt2_gentext_{:%Y%m%d_%H%M%S}.txt'.format(datetime.utcnow()) print(f'gen_file=${gen_file}') gpt2.generate_to_file(sess, destination_path=gen_file, length=500, temperature=0.7, nsamples=100, batch_size=20 ) # may have to run twice to get file to download files.download(gen_file)Generate Text From The Pretrained Model NOT using Fine-Tuned Model at all!If you want to generate text from the pretrained model, not a finetuned model, pass `model_name` to `gpt2.load_gpt2()` and `gpt2.generate()`.This is currently the only way to generate text from the 774M or 1558M models with this notebook. Make sure that you 'restart the runtime and then jump here to startUSE_PRE_TRAINED_MODEL=False if USE_PRE_TRAINED_MODEL: model_name = "774M" gpt2.download_gpt2(model_name=model_name) sess2 = gpt2.start_tf_sess() gpt2.load_gpt2(sess2, model_name=model_name, reuse=False) gpt2.generate(sess2, model_name=model_name, prefix="The secret of life is", length=100, temperature=0.7, top_p=0.9, nsamples=5, batch_size=5 )EtceteraIf the notebook has errors (e.g. GPU Sync Fail), force-kill the Colaboratory virtual machine and restart it with the command below:# !kill -9 -1Importing Librariesimport numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as snsReading the Dataset :-dataset=pd.read_csv(r"C:\gctc\Admission_Predict.csv") dataset.head()Checking whether any null values are present in the datasetdataset.isnull().any() dataset.drop('Serial No.',axis=1,inplace=True) #removing serial column dataset.head() dataset.describe() #to view some static details dataset.info() #concise summary of dataset x=dataset.iloc[:,0:7].values #select perticular cell of dataset y=dataset.iloc[:,7:].values x y x.shape # y.shapeData Vizualizationsns.pairplot(dataset) #seaborn data vizualization corr_matrix=dataset.corr() plt.figure(figsize=(12,12)) sns.heatmap(corr_matrix,annot = True) plt.show()spliting data into train and testdataset.columns x= dataset.drop(columns=['Chance of Admit ']) y=dataset['Chance of Admit '] x.shape y.shape x=np.array(x) y=np.array(y) y=y.reshape(-1,1) y.shape from sklearn.preprocessing import StandardScaler x_ss=StandardScaler() X=x_ss.fit_transform(x) y_ss=StandardScaler() y=y_ss.fit_transform(y) from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=10) y_train=(y_train>0.5) y_test=(y_test>0.5) from sklearn.linear_model.logistic import LogisticRegression cls=LogisticRegression(random_state=0) lr=cls.fit(X_train,y_train) ypred=lr.predict(X_test) ypredModel Evaluationfrom sklearn.metrics import accuracy_score,recall_score,roc_auc_score,confusion_matrix print("\nAccuracy score : %f" %(accuracy_score(y_test,ypred)*100)) print("Recall score : %f" %(recall_score(y_test,ypred)*100)) print("ROC score : %f\n" %(roc_auc_score(y_test,ypred)*100)) print(confusion_matrix(y_test,ypred))Accuracy score : 87.500000 Recall score : 92.000000 ROC score : 88.727273 [[47 8] [ 2 23]]Saving the Modelimport pickle pickle.dump(lr,open('university.pkl','wb')) model=pickle.load(open('university.pkl','rb')) modelLeast Squares Regression with Gradient Boostingx = np.linspace(-5,20, 200) data_x = x data_y = 1/np.sqrt(2*np.pi) * np. exp( -(x+2)**2 / (2) ) +\ 1/np.sqrt(2*np.pi) * np. exp( -(x-4)**2 / (2) ) *0.8 +\ 1/np.sqrt(2*np.pi) * np. exp( -(x-15)**2 / (5) ) * 3 data_y -= np.mean(data_y) data_y += np.random.rand(200)/4 plt.figure(figsize=(12,5)) plt.scatter(data_x, data_y);1. fit a regressor with high bias We choose a shallow decision tree regressorfrom sklearn.tree import DecisionTreeRegressor def weak_learner(data, target, max_depth=1): tree = DecisionTreeRegressor(criterion='mse', max_depth=max_depth) tree.fit(data[:,None], target) pred = tree.predict(data[:,None]); return predCompute residuals $r_{im} = \frac{\partial}{\partial F(x_i)} L(F(x_i), y_i) = y_i - F(x_i)\text{, for } i = 1,...,n.$pred_y = [] pred_y.append( weak_learner(data_x, data_y) ) plt.figure(figsize=(18,5)) plt.subplot(121); plt.plot(data_x, pred_y[-1], color='orange', linewidth=4); plt.scatter(data_x, data_y); plt.title("orignal data with prediction"); ylim = plt.ylim(); residuals = data_y - pred_y[-1] plt.subplot(122); plt.scatter(data_x, residuals); plt.title("residuals");2. fit next regressor to the residualspred_y.append( weak_learner(data_x, residuals) ) plt.figure(figsize=(18,5)) plt.subplot(121); plt.plot(data_x, pred_y[-1], color='orange', linewidth=4); plt.scatter(data_x, residuals); plt.title("previus residuals with prediction"); residuals = residuals - pred_y[-1] plt.subplot(122); plt.scatter(data_x, residuals); plt.title("residuals");**To simplify, we omit choosing weight for the classifier!** 3. repeat M timesfor i in range(5): pred_y.append( weak_learner(data_x, residuals) ) residuals = residuals - pred_y[-1] plt.figure(figsize=(18,5)) plt.subplot(121); plt.plot(data_x, pred_y[-1], color='orange', linewidth=4); plt.scatter(data_x, residuals); plt.title("previus residuals with prediction"); residuals = residuals - pred_y[-1] plt.subplot(122); plt.scatter(data_x, residuals); plt.title("residuals");We see that residuals become more and more noisy, when there's no information except this noise, overfitting will begin! 4. connect regressorsconnected_predicitons = np.array(pred_y).sum(0) plt.figure(figsize=(9,5)) plt.plot(data_x, connected_predicitons, color='orange', linewidth=4); plt.scatter(data_x, data_y); plt.title("orignal data with prediction"); plt.ylim(ylim);Interactive visualizationdef draw(M, learning_rate=1.0, tree_depth=1): pred_y = [] for i in range(M): #if len(pred_y) < M: residuals = data_y - np.array(pred_y).sum(0) pred_y.append( weak_learner(data_x, residuals * learning_rate, max_depth = tree_depth) ) connected_predicitons = np.array(pred_y[:M]).sum(0) plt.figure(figsize=(12,5)) plt.plot(data_x, connected_predicitons, color='orange', linewidth=4); plt.scatter(data_x, data_y); plt.title("M value: " + str(M)); plt.ylim(ylim); interactive_plot = interactive( draw, M=widgets.IntSlider(min=1,max=200,step=1,value=1, continuous_update=False), learning_rate=widgets.FloatText(step=0.1,value=1,continuous_update=True), tree_depth=widgets.IntText(step=1,value=1, continuous_update=True)) output = interactive_plot.children[-1] output.layout.height = '350px' interactive_plotBinary Classification with Gradient Boosting Loading explanatory datafrom sklearn import datasets dataset = datasets.load_breast_cancer() data = dataset['data'] data -= np.mean(data) targ = dataset['target'] train_x = data[:400] train_y = targ[:400] test_x = data[400:] test_y = targ[400:] print('training shape: ', train_x.shape) print('test set shape: ', test_x.shape) plt.figure(figsize=(10,6)) plt.scatter(data[:, 0], data[:, 1], c=targ, alpha=0.5, cmap='jet', s=50);training shape: (400, 30) test set shape: (169, 30)We will treat classification similarly as regression**We round predicted float value to the closest integer**from sklearn.tree import DecisionTreeRegressor def weak_classify(data, target): tree = DecisionTreeRegressor(criterion='mse', max_depth=1) tree.fit(data, target) pred = tree.predict(data); return pred, tree def predict(trees, data, learning_rate=1): pred = trees[0].predict(test_x) for tree in trees[1:]: pred += tree.predict(test_x) * learning_rate return np.round( pred )Create first learnerprediction = [] trees = [] tr_errors = [] ts_errors = [] pred, tree = weak_classify(train_x, train_y) prediction.append(pred) trees.append(tree) resi = train_y - prediction[-1] tr_errors.append( np.sum( np.round(np.array(prediction).sum(0)) == train_y) / train_y.shape[0] ) ts_errors.append( np.sum(predict(trees, test_x) == test_y)/test_y.shape[0] ) print( 'training accuracy: ', tr_errors[-1] ) print( 'test accuracy: ', ts_errors[-1] )training accuracy: 0.925 test accuracy: 0.893491124260355Create next M learners**The same way as before - fit the next learner to the residuals of the previous one!**learning_rate = 0.5 for i in range(100): # M = 100 here pred, tree = weak_classify(train_x, resi) prediction.append(pred * learning_rate) trees.append(tree) resi = (resi - prediction[-1]) # calculate new residuals # save errors to history array tr_errors.append( np.sum(np.round( np.array(prediction).sum(0) ) == train_y) / train_y.shape[0] ) ts_errors.append( np.sum(predict(trees, test_x) == test_y)/test_y.shape[0] ) print( 'training error: ', np.max(np.array(tr_errors)) ) print( 'test error: ', np.max(np.array(ts_errors)) ) plt.figure(figsize=(15,4)) plt.plot(tr_errors, linewidth=5, label='training') plt.plot(ts_errors, linewidth=5, label='validation') plt.xlabel('M value') plt.ylabel('accuracy') plt.legend(loc=4)It can be compared with Random Forest Classifierfrom sklearn.ensemble import RandomForestClassifier as RFC forest = RFC(n_estimators=100) forest.fit(train_x, train_y) forest.score(test_x, test_y)Binary Classification with Adaptive Boosting 0) We introduce a probability distributiondistrib = np.ones(train_x.shape[0]) / train_x.shape[0] print("Probability distribution:", distrib[:10], "...") idx = np.random.choice(range(train_x.shape[0]), size=train_x.shape[0], replace=True, p=distrib) idx = np.unique(idx) temporal_train_x = train_x [ idx ] temporal_train_y = train_y [ idx ] print("Uniques in sampled set:", idx.shape[0])Probability distribution: [0.0025 0.0025 0.0025 0.0025 0.0025 0.0025 0.0025 0.0025 0.0025 0.0025] ... Uniques in sampled set: 2501) Create first classifier on it**Decision Tree stomp**pred, tree = weak_classify(temporal_train_x, temporal_train_y) pred = np.round(tree.predict(train_x)) accuracy = np.sum(pred==train_y) / pred.shape[0] print( 'training accuracy: ', accuracy ) print( 'test accuracy: ', np.sum( np.round(tree.predict(test_x)) == test_y) / test_y.shape[0] )training accuracy: 0.9125 test accuracy: 0.8934911242603552) Compute classifier weight $\alpha_t = \frac12 \ln(\frac{1-\epsilon_t}{\epsilon_t})$def classifier_weight(accuracy): return 1/2 * np.log((accuracy)/(1-accuracy)) weight = classifier_weight(accuracy) print(weight)1.17227464604653873) Compute new data distribution $D_{t+1}(i) = \frac{D_{t} \exp(-\alpha_t y_i h_t (x_i)) }{Z_t}$YtimesH = np.array([-1,1])[(pred == train_y)*1] # swap from [0,1] to [-1,1]! distrib = distrib * np.exp( -weight * YtimesH ) distrib /= np.sum(distrib) plt.hist(distrib);4) Sample training datasetidx = np.random.choice(range(train_x.shape[0]), size=train_x.shape[0], replace=True, p=distrib) idx = np.unique(idx) temporal_train_x = train_x [ idx ] temporal_train_y = train_y [ idx ] print("Uniques in sampled set:", idx.shape[0])Uniques in sampled set: 1855) Repeat!def AdaBoost_step(forest, weights, distribution): # Sample from distribution idx = np.random.choice(range(train_x.shape[0]), size=int(train_x.shape[0]/6), replace=False, p=distribution) # A little tweak - add some easy examples to stabilize the training idx2 = np.random.choice(range(train_x.shape[0]), size=int(train_x.shape[0]/6), replace=False, p=(1/distribution)/np.sum(1/distribution)) idx = np.append(idx, idx2) idx = np.unique(idx) temporal_train_x = train_x [ idx ] temporal_train_y = train_y [ idx ] # Classify with new classifier pred, tree = weak_classify(temporal_train_x, temporal_train_y) forest.append(tree) pred = np.round(tree.predict(train_x)) accuracy = np.sum(pred==train_y) / pred.shape[0] # Compute classifier weight weight = classifier_weight(accuracy) weights.append(weight) train_acc = np.sum(forest_predict(forest, weights, train_x)==train_y)/train_y.shape[0] test_acc = np.sum(forest_predict(forest, weights, test_x)==test_y)/test_y.shape[0] # Compute data distribution YtimesH = np.array([-1,1])[(pred == train_y)*1] distribution = distribution * np.exp( -weight * YtimesH ) distribution /= np.sum(distribution) return forest, weights, distribution, train_acc, test_acc def forest_predict(forest, weights, data): prediction = np.zeros(data.shape[0]) #weights = np.array(weights) / sum(weights) for tree, weight in zip(forest, weights): prediction += (tree.predict(data) * 2 - 1) * weight return (prediction>=0)*1we reset the distributionforest = [] weights = [] tr_acc = [] ts_acc = [] distribution = distribution = np.ones(train_x.shape[0]) / train_x.shape[0]and make M iterationsfor i in range(200): forest, weights, distribution, acc, ts = AdaBoost_step(forest, weights, distribution) tr_acc.append(acc) ts_acc.append(ts) print('training accuracy:', max(tr_acc)) print('test accuracy:', max(ts_acc))training accuracy: 0.985 test accuracy: 0.9704142011834319and plot the training curvesplt.figure(figsize=(15,4)) plt.plot(tr_acc, linewidth=5, label='training') plt.plot(ts_acc, linewidth=5, label='validation') plt.xlabel('M value') plt.ylabel('accuracy') plt.legend(loc=4)It can be compared with stable, sklearn AdaBoostfrom sklearn.ensemble import AdaBoostClassifier as ABC forest = ABC(n_estimators=1000) forest.fit(train_x, train_y) forest.score(test_x, test_y)02 geolocation churn > Combining data on geo location level, given that the current calculation is done on planning_area (far too few points), I will just be doing visualisation with powerBI. Library# Library #exports import pandas as pd import numpy as np import os from zipfile import ZipFile from scipy import spatial import matplotlib.pyplot as plt from tsfresh import extract_features from tsfresh.feature_selection.relevance import calculate_relevance_table import tsfresh #exports from sklearn.cluster import AgglomerativeClustering from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor, RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.inspection import plot_partial_dependence from sklearn.impute import SimpleImputer imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') imp_med = SimpleImputer(missing_values=np.nan, strategy='median') from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import balanced_accuracy_score, accuracy_score, classification_report from sklearn.inspection import permutation_importance from collections import defaultdictFunctions#exports def read_tsv(file:str)->pd.DataFrame: return pd.read_csv(file, compression='gzip', sep='\t') def gzip_reading(gzip_file)->dict: 'Read all tsv.gz files in the zip file and returning a dictionary (key:filename, value:data)' archive = ZipFile(gzip_file, 'r') files = {name: archive.open(name) for name in archive.namelist() if (name.endswith('.gz') and not name.startswith('_'))} files_names = [i.split('.')[0] for i in files.keys()] # reading the designated files into dict dt = {} for name, key in zip(files_names, files.keys()): dt[name] = read_tsv(files[key]) return dt def load_directory_files_dict(dir_path)->dict: 'Load all pkl files in the directory into dict' L1file_list = os.listdir(path_load) L1file_list = [i for i in L1file_list if not i.startswith(".")] L1name_list = [i.split("_")[0]+"_"+i.split("_")[1].replace(".pkl","") for i in L1file_list] dt = {} for name, key in zip(L1file_list, L1name_list): dt[key] = pd.read_pickle(os.path.join(path_load,name)) return dtData# data path_load = os.path.join("Data","L1") path_save = os.path.join("Data","L2") dt = load_directory_files_dict(path_load) raw = gzip_reading('telco_demo_datasets.zip')geo profilegeo_train = dt['geo_train'] geo_loc = (dt['geo_location'] .groupby('planning_area', as_index=False) .size() .rename({'size':'visits'}, axis=1) ) geo_census = dt['geo_census'] geo_school = dt['geo_school'] geo_coor = dt['geo_coor'] # combining data geo_dt = (geo_train .merge(geo_coor) .merge(geo_loc) .merge(geo_census) .merge(geo_school) ) # print data geo_dt.head() geo_dt.shapeWith only 21 records and 20 features... I should have perhaps calculate it on finer lat, lon instaed of planning area... outputgeo_dt.to_pickle(os.path.join(path_save, "geo_profile.pkl")) geo_dt.to_csv(os.path.join(path_save, "geo_profile.csv"))geo visit Locationclustering on locations visited for users- hclust on lat,lon with 60 clusters- calculate the mean churn % within clustersX_cluster_dt = raw['telco_locations'][['latitude','longitude','msisdn']].merge(dt['user_train'][['msisdn','churn']]) X_cluster_dt.head() cluster = AgglomerativeClustering(n_clusters=60, affinity='euclidean', linkage='ward') cluster.fit_predict(X_cluster_dt[['latitude','longitude']]) X_cluster_dt['cluster'] = cluster.labels_ X_cluster_dt['churn'] = X_cluster_dt.churn X_cluster_agg_dt = X_cluster_dt.groupby('cluster', as_index=False).agg({'latitude':'median','longitude':'median','churn':'mean'}) X_cluster_agg_dt.head()outputX_cluster_agg_dt.to_pickle(os.path.join(path_save, "geo_visit.pkl")) X_cluster_agg_dt.to_csv(os.path.join(path_save, "geo_visit.csv")) # plt.scatter(X['latitude'],X['longitude'], c=cluster.labels_, cmap='rainbow')HIV Model 10/15/2018# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * HIVsystem = System(gamma=1.36, tao=0.2, mu=0.00136, beta=0.00027, alpha=0.036, rho=0.1, pi=100, delta=0.33, sigma=2, t0=0, t_end=120, dt=0.1) HIVstate = State(R=200, L=0, E=0, V=100, CD4=1000) def update_func(state,t,system): unpack(system) r, l, e, v, cd4 = state dr = gamma*tao - mu*r - beta*r*v dl = rho*beta*r*v - mu*l - alpha*l de = (1 - rho)*beta*r*v + alpha*l - delta*e dv = pi*e - sigma*v r += dr * dt l += dl * dt e += de * dt v += dv * dt cd4 = 1000*(1-tao) + r + l + e return State(R=r, L=l, E=e, V=v, CD4=cd4) update_func(HIVstate,1,HIVsystem) def run_simulation(system, update_func, state): unpack(system) frame = TimeFrame(columns=state.index) frame.row[t0] = state for t in linrange(t0,t_end, dt): frame.row[t+dt] = update_func(frame.row[t], t, system) return frame frameData = run_simulation(HIVsystem, update_func, HIVstate) frameData.head() fig1 = plt.figure() ax1 = fig1.add_subplot(111) ax1.plot(frameData.R,'r') ax1.set_ylabel('R (r)') ax2 = ax1.twinx() ax2.set_yscale('linear') ax2.plot(frameData.L,'g') ax2.plot(frameData.E,'b') ax2.set_ylabel('L (g), E (b)') fig2 = plt.figure() ax1 = fig2.add_subplot(111) ax1.plot(frameData.CD4,'g') ax1.set_ylabel('CD4 Lymphocytes (g)') ax2 = ax1.twinx() ax2.set_yscale('log') ax2.plot(frameData.V,'r') ax2.set_ylabel('V (r)')**Необходимые библиотеки**import cv2 import numpy as np**Подготовка изображения к созданию модели**fileStl='example.stl' fileIm=r'12.jpg' stl=open(fileStl, 'w') im=cv2.imread(fileIm) ''' * * Чтобы не было резких границ сгаживаем, а также нормируем чтобы несильно вытянутой была модель * ''' im = cv2.normalize(im, im, 0, 20, norm_type=cv2.NORM_MINMAX) im = cv2.GaussianBlur(im,(0,0),2) im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im=cv2.resize(im,(300,100)) #gray = 255-gray #blur = cv2.resize(blur,(400,400)) #gray = cv2.normalize(gray, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)**Устройство STL-формата**solid name # название facet normal 0 0 0 outer loop vertex cd_1[0] cd_1[1] cd_1[2] # первая вершина треугольника vertex cd_2[0] cd_2[1] cd_2[2] # вторая вершина треугольника vertex cd_3[0] cd_3[1] cd_3[2] # третья вершина треугольника endloop endfacet endsolid**Начинаем накладывать треугольную сетку** По сути выдавливаем нашу модель из изображения ![gignces3tiqs1wmvxzzitehiaya.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgEAAAEVCAYAAAB5STZxAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAEyGSURBVHhe7d0HeFNl+wbwp4O2QMveyhaVIeAA/cTNlu36UL6/CIKoCIILcaC4F4oDB7hwIOICFUVEREHcIC72liF7lNH9z/3mfdvTNEmze5Lcv+vKlXNO0jRNC8/9jvOehAIHISIioriTqO+JiIgozjAEEBERxSmGACIiojjFEEBERBSnGAKIiIjiFEMAERFRnGIIICIiilMMAURERHGKIYCIiILyxx9/yDnnnCPVq1fXR0IjKytL7r77bqlbt67MnTtXHy3y2muvSaNGjeSuu+7SRwKD1/nmm2/kwIEDMmbMGPVz/PXXX/pR+9i9e7cMGDBAtm7dqo8EjyGAiCgO7du3T9atW6f3PMvOzpaNGzeqQr9q1So5cuSIfqTISSedJG3atJE9e/boI/7bvn273iqSmpoqAwcOVO/VnT59+kjlypWlSZMm+oj/5s+fL/PmzZMzzjhDKlWqJP3791c/R15enn6Gf44ePSo7d+7Ue8Xt379fli9fLvn5+fqIezk5ObJp0yb1ma9YsaLwM0c4GTVqlEycOFHthwJDABFRHPrwww+lR48ees+zRx99VKZMmSI//vijXH755XLNNde4DQKBWrNmjdx8881y2WWXPRL)# Функция по созданию треугольника def makeTriangle(cd_1, cd_2, cd_3): stl.write(o_1+"facet normal 0 0 0") stl.write(o_2 + "outer loop") stl.write(o_3 + "vertex " + " ".join(cd_1)) stl.write(o_3 + "vertex " + " ".join(cd_2)) stl.write(o_3 + "vertex " + " ".join(cd_3)) stl.write(o_2 + "endloop \n\tendfacet") x=0 y=0 cd_1=['0', '0', '0'] #первая вершина треульника в формате (x, y, intensity) cd_2=['0', '0', '0'] #вторая вершина треульника в формате (x, y, intensity) cd_3=['0', '0', '0'] #третья вершина треульника в формате (x, y, intensity) file='STL_project-1.stl' o_1="\n\t" o_2="\n\t\t" o_3="\n\t\t\t" stl.write("solid") # Делаем основу модели for i in range(im.shape[1]-1): cd_1=[str(i),"0","0"] cd_3=[str(i+1),str(im.shape[0]-1),"0"] cd_2=[str(i),str(im.shape[0]-1),"0"] makeTriangle(cd_1, cd_2, cd_3) for i in range(im.shape[1]-1): cd_1=[str(i+1),str(im.shape[0]-1),"0"] cd_3=[str(i),"0","0"] cd_2=[str(i+1),"0","0"] makeTriangle(cd_1, cd_2, cd_3) # Делаем рельеф for i in range(im.shape[1]): for k in range(im.shape[0]-1): if i!=im.shape[1]-1: try: cd_1=[str(i), str(k), str(im[k, i]) ] cd_2=[str(i + 1), str(k), str(im[k, i+1]) ] cd_3=[str(i+1), str(k+1), str(im[k+1,i+1])] except: print('er') makeTriangle(cd_1, cd_2, cd_3) try: cd_1=[str(i), str(k), str(im[k, i]) ] cd_2=[str(i+1),str(k+1), str(im[k+1, i+1])] cd_3=[str(i), str(k+1), str(im[k+1,i]) ] except: print('er') makeTriangle(cd_1, cd_2, cd_3) print('Done!')Done!4.4 Decentralized SVRG with Exact Diffusion 4.4.1 Problem and decentralized stochastic algorithmsConsider the optimization problem:$$\min_{x \in \mathbb{R}^d} \quad f(x) = \frac{1}{n}\sum_{i=1}^n [f_i(x) = \mathbb{E}_{\xi_i \sim D_i}\{F(x,\xi_i)\}]$$where $f_i(x)$ is local to node $i$, and random variable $\xi_i$ denotes the local data that follows distribution $D_i$. Each node $i$ can locally evaluate stochastic gradient $\nabla F(x;\xi_i)$ (not the real gradient $\nabla f_i(x)$); it must communicate to access information from other nodes.In Section 3.6, we discussed one widely-used approach - ATC-DSGD to solve the above problem. Given the network topology and the weight matrix, the ATC-DSGD will update as follows:\begin{align}x_i^{(k+1)} = \sum_{j=1}^n w_{ij} \Big(x_j^{(k)} - \alpha \nabla F(x_j^{(k)};\xi_j^{(k)}) \Big), \quad i=1,\cdots,n.\end{align}where $\xi_i^{(k)}$ is a realization of the random variable $\xi_i$ at iteration $k$. Furthermore, it is established in Section 3.6 that under smoothness and strongly-convex assumptions, ATC-DSGD suffers from a limiting bias as follows:\begin{align}\label{dsgd-convergence}\lim_{k\to \infty}\frac{1}{n}\sum_{i=1}^n \mathbb{E}\|x_i^{(k)} - x^\star\|^2 = O\Big(\underbrace{\frac{\alpha \sigma^2}{n} + \frac{\rho^2 \alpha^2 \sigma^2}{1-\rho}}_{\rm sto.\ bias} + \underbrace{\frac{\rho^2 \alpha^2 b^2}{(1-\rho)^2}}_{\rm inconsis.\ bias} \Big). \hspace{1cm} \mbox{(ATC-DSGD-Limiting-Bias)}\end{align}It is observed that ATC-DSGD suffers from two sources of bias:- A stochastic limiting bias caused by the stochastic gradient noise $\sigma^2$;- An inconsistency bias caused by the data heterogeneity $b^2 = \frac{1}{n}\sum_{i=1}^n\|\nabla f_i(x^\star)\|^2$This motivates us that, to correct the limiting bias, we have to design algorithms that can remove both the stochastic bias and inconsistency bias. In Section 4.2, we have discussed that SVRG can remove the stochastic bias when the problem is in the finite-sum form. In Section 4.3, we have discussed that Exact-diffusion can remove the inconsistency bias. A natural idea is to integrate SVRG to Exact-diffusion so that the new algorithm can converge **exactly** to the global solution $x^\star$ without any limiting bias. 4.4.2 Exact Diffusion with SVRG In this section we disscuss the new algorithm. First, we assume each local $f_i(x) = \mathbb{E}_{\xi_i \sim D_i}\{F(x,\xi_i)\} = \frac{1}{M}\sum_{m=1}^M F_i(x; \xi_{m,i})$. In other words, we assume each $f_i(x)$ is in a **finite-sum** form. The Diffusion-SVRG algorithm is shown as follows. Each node $i$ will run the following algorithm in a parallel manner:> \begin{align}& \mbox{For $s = 1,2,\cdots$}\\& \hspace{5mm} \tilde{x}_i = \tilde{x}_{i,s-1} \\& \hspace{5mm} \tilde{\mu}_i = \frac{1}{M}\sum_{m=1}^M \nabla F(\tilde{x}_i, \xi_{m,i}) \\& \hspace{5mm} x_{i}^0 = \tilde{x}_i \\& \hspace{5mm} \mbox{For $k = 1,2,\cdots,K$}\\& \hspace{15mm}\mbox{Randomly pick $m_{k} \in \{1,\cdots, M\}$ and compute the stochastic gradient as}\\ & \hspace{15mm}g^k_{i} = \nabla F(x_i^{k-1};\xi_{m_k,i}) - \nabla F(\tilde{x}_i;\xi_{m_k,i}) + \tilde{\mu}_i \\& \hspace{15mm}\mbox{Update local variables $x_i$ as follows} \\& \hspace{15mm}\psi_i^{k} = x_i^{k} - \alpha \nabla g^k_{i} \\& \hspace{15mm}\phi_i^{k} = \psi_i^{k} + x_i^{k} - \psi_i^{k-1} \\& \hspace{15mm}x_i^{k+1} = \sum_{j=1}^n \bar{w}_{ij} \phi_j^{k} \\& \hspace{5mm} \mbox{Set $\tilde{x}_{i,s} = x^K_i$}\end{align}When each $f_i(x)$ is smooth and strongly convex, and the gradient noise is unbiased and bounded, one can show the above Diffusion-SVRG algorithm can converge to the global solution $x^\star$ in a linear rate. 4.4.3 An example: least-square problemIn this section, we will show a demo on how to solve a decentralized least-square problem with decentralized SVRG with Exact Diffusion. Suppose $n$ computing nodes collaborate to solve the following problem:$$\min_x \quad \frac{1}{n}\sum_{i=1}^n \|A_i x - b_i\|^2$$where $\{A_i, b_i\}$ are local data held in node $i$. 4.4.3.1 Set up Environmentimport ipyparallel as ipp rc = ipp.Client(profile="bluefog") dview = rc[:] # A DirectView of all engines dview.block = True %%px import numpy as np import bluefog.torch as bf import torch from bluefog.common import topology_util import networkx as nx import matplotlib.pyplot as plt %matplotlib inline bf.init()4.4.4.2 Generate local data $A_i$ and $b_i$%%px def generate_data(m, n): A = torch.randn(m, n).to(torch.double) x_o = torch.randn(n, 1).to(torch.double) ns = 0.1 * torch.randn(m, 1).to(torch.double) b = A.mm(x_o) + ns return A, b %%px def distributed_grad_descent(A, b, maxite=5000, alpha=1e-1): x_opt = torch.zeros(n, 1, dtype=torch.double) for _ in range(maxite): # calculate local gradient grad_local = A.t().mm(A.mm(x_opt) - b) # global gradient grad = bf.allreduce(grad_local, name="gradient") # distributed gradient descent x_opt = x_opt - alpha * grad grad_local = A.t().mm(A.mm(x_opt) - b) grad = bf.allreduce(grad_local, name="gradient") # global gradient # evaluate the convergence of distributed gradient descent # the norm of global gradient is expected to 0 (optimality condition) global_grad_norm = torch.norm(grad, p=2) if bf.rank() == 0: print( "[Distributed Grad Descent] Rank {}: global gradient norm: {}".format( bf.rank(), global_grad_norm ) ) return x_optIn the following code we run distributed gradient descent to achieve the global solution $x^\star$ to the optimization problem. To validate whether $x^\star$ is optimal, it is enough to examine $\frac{1}{n}\sum_{i=1}^n \nabla f_i(x^\star) = 0$.%%px m, n = 500, 10 A, b = generate_data(m, n) x_opt = distributed_grad_descent(A, b, maxite=1000, alpha=1e-4)[stdout:3] [Distributed Grad Descent] Rank 0: global gradient norm: 1.4937122611144555e-124.4.4.3 One step of SVRG with Exact DiffusionIn this section, we depict the convergence curve of the SVRG algorithm with Exact Diffusion in a decentralized environment. We will utilize the $x^\star$ achieved by distributed gradient descent as the optimal solution. First, we define one step of the SVRG with Exact Diffusion.%%px def SVRG_exact_diffusion_one_step(x, A, b, pre_psi, alpha=1e-2): m = A.size()[0] x_0 = x.clone() grad_0 = A.t().mm(A.mm(x_0) - b)/m # Precompute the full gradient of the step for _ in range(m): # Select a random sample i = torch.randint(m,(1,)).item() sample = A[i:i+1] # Gradient estimation using SVRG grad_est_i = sample.t()*(sample.mm(x)-b[i]) grad_est_0 = sample.t()*(sample.mm(x_0)-b[i]) grad_est = grad_est_i - grad_est_0 + grad_0 # Correct bias using Exact Diffusion psi = x-alpha*grad_est phi = psi + x - pre_psi # Communication with neighbors x = bf.neighbor_allreduce(phi) pre_psi = psi.clone() return x, psiNext we run the SVRG algorithm with Exact Diffusion.%%px # Set topology as ring topology. G = topology_util.RingGraph(bf.size()) bf.set_topology(G) max_iter = 300 alpha = 5e-4 x = torch.zeros(n, 1, dtype=torch.double) # Initialize x for exact diffusion psi = x rel_error = torch.zeros((max_iter, 1)) for ite in range(max_iter): if ite % 10 == 0: if bf.rank() == 0: print("Progress {}/{}".format(ite, max_iter)) x, psi = SVRG_exact_diffusion_one_step(x, A, b, psi, alpha=alpha) rel_error[ite] = torch.norm(x - x_opt, p=2) / torch.norm(x_opt, p=2) # collect relative error rel_error = dview.pull("rel_error", block=True) rel_error_avg = sum(rel_error) / len(rc.ids) import matplotlib.pyplot as plt %matplotlib inline plt.semilogy(rel_error_avg) plt.xlabel("Iteration", fontsize=16) plt.ylabel("Relative error", fontsize=16)How to Run UncertaintyForestThis set of four tutorials (`uncertaintyforest_running_example.ipynb`, `uncertaintyforest_posteriorestimates.ipynb`, `uncertaintyforest_conditionalentropyestimates.ipynb`, and `uncertaintyforest_mutualinformationestimates.ipynb`) will explain the UncertaintyForest class. After following these tutorials, you should have the ability to run UncertaintyForest on your own machine and generate Figures 1, 2, and 3 from [this paper](https://arxiv.org/pdf/1907.00325.pdf), which help you to visualize a comparison of the estimated posteriors and conditional entropy values for several different algorithms.If you haven't seen it already, take a look at other tutorials to setup and install the ProgLearn package: `installation_guide.ipynb`.*Goal: Train the UncertaintyForest classifier on some training data and produce a metric of accuracy on some test data* Import required packages and set parameters for the forestfrom proglearn.forest import UncertaintyForest from proglearn.sims import generate_gaussian_parity # Real Params. n_train = 50000 # number of training data points n_test = 1000 # number of testing data points num_trials = 10 # number of trials n_estimators = 100 # number of estimatorsWe've done a lot. Can we just run it now? Yes! Create and train our UncertaintyForest First, generate our data:X, y = generate_gaussian_parity(n_train+n_test)Now, split that data into training and testing data. We don't want to accidently train on our test data.X_train = X[0:n_train] # Takes the first n_train number of data points and saves as X_train y_train = y[0:n_train] # same as above for the labels X_test = X[n_train:] # Takes the remainder of the data (n_test data points) and saves as X_test y_test = y[n_train:] # same as above for the labels import numpy as np import math from sklearn.preprocessing import KBinsDiscretizer def KBinsDiscretize(data_x, n_bins=0, alpha=3.322, encode="ordinal", strategy="uniform"): """ """ # Makes n_bins optional, calculates optimal n_bins by default # Sturges Rule - num_bins = 1 + 3.322 * log_10(num_inputs) if n_bins == 0: # cap bins at 256 n_bins = min(math.floor(1 + alpha * math.log10(data_x.shape[0])), 256) kbins = KBinsDiscretizer(n_bins, encode='ordinal', strategy='uniform') kbins.fit(data_x) binned_x = kbins.transform(data_x) return binned_x X_train_binned = [] for n_bins in range(0, n_train, 1000): X_train_binned.append(KBinsDiscretize(X_train, n_bins))Then, create our forest: Then fit our learner:import timeit bins = 1000 x_bins = [] UF_binned = [] training_times = [] for x in X_train_binned: UF = UncertaintyForest(n_estimators = n_estimators) starttime = timeit.default_timer() UF_binned.append(UF.fit(x, y_train)) x_bins.append(bins) training_times.append(timeit.default_timer() - starttime) bins += 1000 import matplotlib.pyplot as plt plt.figure() plt.plot(x_bins, training_times) plt.xlabel("Number of bins") plt.ylabel("Training time") plt.show()Well, we're done. Exciting right? Produce a metric of accuracy for our learnerWe've now created our learner and trained it. But to actually show if what we did is effective at predicting the class labels of the data, we'll create some test data (with the same distribution as the train data) and see if we classify it correctly.X_test, y_test = generate_gaussian_parity(n_test) # creates the test data predictions = [] accuracies = [] for forest in UF_binned: prediction = forest.predict(X_test) accuracies.append(sum(predictions == y_test)/n_test) UF = UF_binned[0] print(UF) predictions = UF.predict(X_test) # predict the class labels of the test dataTo see the learner's accuracy, we'll now compare the predictions with the actual test data labels. We'll find the number correct and divide by the number of data.predictions = UF.predict(X_test) # predict the class labels of the test data accuracy = sum(predictions == y_test)/n_testAnd, let's take a look at our accuracy:print(accuracy)0.945Algorithm Clustering Ant%matplotlib inline from random import * import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import random as rd import math as mt from copy import copy, deepcopy from functools import reduce # Estado que cada célula pode assumir NO_ANT = 0 ANT_UNLOADED = 1 ANT_L1 = 2 ANT_L2 = 3 NO_LOAD = 0 L1 = 196 L2 = 46 EMPTY_CELL = (NO_ANT, NO_LOAD) NO_ANT_W_L1 = (NO_ANT, L1) NO_ANT_W_L2 = (NO_ANT, L2) ANT_UNLOADED_NO_LOAD = (ANT_UNLOADED, NO_LOAD) def neighborhood(grid, pair): x = pair[0] min_x = (x - 1) if ((x - 1) >= 0) else x max_x = (x + 1) if ((x + 1) < len(grid)) else x y = pair[1] min_y = (y - 1) if ((y - 1) >= 0) else y max_y = (y + 1) if ((y + 1) < len(grid)) else y return list({ (min_x, min_y), (min_x, y), (min_x, max_y), (max_x, min_y), (max_x, y),(max_x, max_y), (x, min_y), (x, y), (x, max_y) }) # Cálculo da fração de itens percebidos na vizinhaça da formiga # Pode ser percebido como visibilidade de cada formiga def f(grid, ant, load_type): neighbors = neighborhood(grid, ant) quantity = 0 for n in neighbors: value = grid[n[0]][n[1]] if value[1] == load_type: quantity += 1 return quantity/len(neighbors) # Cálculo da probabilidade de uma formiga pegar um item uma vez descarregada e numa célula com item # p_pick = (k1/(k1+f))² def p_pick(grid, k1, ant, load_type): return mt.pow((k1 / (k1 + f(grid, ant, load_type))), 2) # Cálculo da probabilidade de uma formiga deixar um item uma vez carregada e numa célula sem item # p_drop = (f/(k2+f))² def p_drop(grid, k2, ant, load_type): return mt.pow((f(grid, ant, load_type) / (k2 + f(grid, ant, load_type))), 2) def move_ant(grid, ants, ant): possible_positions = neighborhood(grid, ant) condition = True while condition: # Pega uma posição aleatória new_pos = possible_positions[randint(0, len(possible_positions) - 1)] # Valor desta posição aleatória new_value = grid[new_pos[0]][new_pos[1]] if new_value[0] == NO_ANT: # Verifica se a nova posição não há formiga old_pos = ant old_value = grid[old_pos[0]][old_pos[1]] grid[old_pos[0]][old_pos[1]] = (NO_ANT, old_value[1]) grid[new_pos[0]][new_pos[1]] = (old_value[0], new_value[1]) i = ants.index(ant) ants.remove(ant) ants.insert(i, new_pos) condition = False else: possible_positions.remove(new_pos) if (len(possible_positions) == 0): condition = False return grid, ants def ACA(max_it, size_grid, n_ants, n_load, n_load2, k1, k2): grid = [[EMPTY_CELL] * size_grid for i in range(size_grid)] ants = [] # Posição das formigas loads = [] # Posição das cargas # ANT_UNLOADED_NO_LOAD = {ANT_UNLOADED, NO_LOAD} # Posicionamento inicial das formigas no grid ant = 0 while ant < n_ants: x, y = (randint(0, size_grid - 1), randint(0, size_grid - 1)) if grid[x][y] == EMPTY_CELL: grid[x][y] = ANT_UNLOADED_NO_LOAD ants.append((x,y)) ant = ant + 1 # NO_ANT_W_L1 = {NO_ANT, L1} # Posicionamento inicial das cargas no grid load = 0 while load < n_load: x,y = (randint(0, size_grid - 1), randint(0, size_grid - 1)) if grid[x][y] == EMPTY_CELL: grid[x][y] = NO_ANT_W_L1 loads.append((x, y)) load = load + 1 # NO_ANT_W_L2 = {NO_ANT, L2} # Posicionamento inicial das cargas no grid load2 = 0 while load2 < n_load2: x,y = (randint(0, size_grid - 1), randint(0, size_grid - 1)) if grid[x][y] == EMPTY_CELL: grid[x][y] = NO_ANT_W_L2 loads.append((x, y)) load2 = load2 + 1 init_grid = deepcopy(grid) for t in range(max_it): # Repetir para max_it iterações for a in ants: # Repetir para cada formiga content_pos = grid[a[0]][a[1]] if content_pos[0] == ANT_UNLOADED and content_pos[1] != NO_LOAD: # Se a formiga está descarregada e há um item if rd.random() <= p_pick(grid, k1, a, content_pos[1]): if content_pos[1] == L1: grid[a[0]][a[1]] = (ANT_L1, NO_LOAD) elif content_pos[1] == L2: grid[a[0]][a[1]] = (ANT_L2, NO_LOAD) if content_pos[0] != ANT_UNLOADED and content_pos[1] == NO_LOAD: # Se a formiga está carregada e não há um item load_type = 0 if content_pos[0] == ANT_L1: load_type = L1 elif content_pos[0] == ANT_L2: load_type = L2 if rd.random() <= p_drop(grid, k2, a, load_type): grid[a[0]][a[1]] = (ANT_UNLOADED, load_type) grid, ants = move_ant(grid, ants, a) # Movimentar aleatoriamente a formiga num_ants_loaded = 0 for ant in ants: ant_value = grid[ant[0]][ant[1]] if ant_value[0] != ANT_UNLOADED: num_ants_loaded = num_ants_loaded + 1 return init_grid, grid, num_ants_loaded*100.0/len(ants) def matrix_plot(g): m = [[0] * len(g) for i in range(len(g))] for i in range(len(g)): for j in range(len(g)): value = g[i][j] if value == EMPTY_CELL: m[i][j] = 0 elif value[0] == NO_ANT or value[0] == ANT_UNLOADED: m[i][j] = value[1] else: m[i][j] = 80 return m # Inicialização do algoritmo i,e,p = ACA(max_it=100000, size_grid=35, n_ants=10, n_load=100, n_load2=100, k1=0.05, k2=0.95) plt.rcParams['figure.figsize'] = [5, 5] plt.imshow(matrix_plot(i)) plt.show() plt.rcParams['figure.figsize'] = [5, 5] plt.imshow(matrix_plot(e)) plt.show() print(p) def plotMatrices(times, max_it, size_grid, n_ants, n_load, n_load2, k1, k2): rows = times columns = 2 images = [] p_title = [] ps = [] for i in range(rows): start,end,p = ACA(max_it, size_grid, n_ants, n_load, n_load2, k1, k2) images.append(matrix_plot(start)) images.append(matrix_plot(end)) p_title.append('') p_title.append(p) ps.append(p) for j in range(rows * columns): image = images[j] plt.rcParams['figure.figsize'] = [10, 10] plt.subplot(rows, columns, j + 1) plt.title('{}'.format(p_title[j])) plt.imshow(image) plt.axis('off') plt.tight_layout() plt.show() avg = reduce((lambda x, y: x + y), ps)/(len(ps) * 1.0) print('Average: {}'.format(avg)) plotMatrices(times=50, max_it=100000, size_grid=35, n_ants=100, n_load=100, n_load2=100, k1=0.05, k2=0.95)/usr/local/lib/python3.5/dist-packages/matplotlib/tight_layout.py:209: UserWarning: tight_layout cannot make axes height small enough to accommodate all axes decorations warnings.warn('tight_layout cannot make axes height small enough 'Q1def remove_even(L): return [e for e in L if e % 2 != 0] l = [1, 2, 4, 5, 10, 6, 3] remove_even(l)Q2def merge_arrays(l1, l2): n, m = len(l1), len(l2) res = [None] * (n + m) i = j = k = 0 while i < n and j < m: if l1[i] <= l2[j]: res[k] = l1[i] i += 1 else: res[k] = l2[j] j += 1 k += 1 if i < n: res[j:] = l1[i:] elif j < m: res[i:] = l2[j:] return res arr1 = [1, 3, 4, 5] arr2 = [2, 6, 7, 8] merge_arrays(arr1, arr2)Q3def find_sum(l, value): n = len(l) for i in range(n): diff = value - l[i] if diff in l[i + 1:]: return[l[i], diff] return False l = [1,21,3,14,5,60,7,6] n = 81 find_sum(l, n) def find_sum(l, value): found_values = set() for e in l: diff = value - e if diff in found_values: return [e, diff] found_values.add(e) return False l = [1,21,3,14,5,60,7,6] n = 81 find_sum(l, n)Q4def find_product(arr): n = len(arr) res = [None] * n for i in range(n): tmp = 1 for j in range(n): if i != j: tmp *= arr[j] res[i] = tmp return res arr = [1, 2, 3, 4] find_product(arr) def find_product(arr): n = len(arr) ans = [None] * n left = 1 for i, e in enumerate(arr): ans[i] = left left *= e right = 1 for i in range(n - 1, -1, -1): ans[i] *= right right *= arr[i] return ans arr = [1, 2, 3, 4] find_product(arr)Q5def find_min(arr): ans = arr[0] for e in arr[1:]: if e < ans: ans = e return ans arr = [9, 2, 3, 6] find_min(arr)Q6def find_first_uniq(l): for i, e in enumerate(l): if e not in l[:i] and e not in l[i + 1:]: return e arr = [9, 2, 3, 2, 6, 6] find_first_uniq(arr) def find_first_uniq(l): counts = dict.fromkeys(l, 0) for e in l: counts[e] +=1 for e in l: if counts[e] == 1: return e arr = [9, 2, 3, 2, 6, 6] find_first_uniq(arr)Q7def find_second_max(l): max1 = max2 = l[0] for e in l[1:]: if e > max1: max2 = max1 max1 = e elif e > max2 or max1 == max2: max2 = e return max2 arr = [9, 2, 3, 6] find_second_max(arr)Q8def right_rotate(l, n): return l[-n:] + l[:-n] lst = [1, 2, 3, 4, 5] n = 3 right_rotate(lst, n)Q9def re_arrange(l): return [e for e in l if e < 0] + [e for e in l if e >= 0] arr = [10, -1, 20, 4, 5, -9, -6] re_arrange(arr)Q10def max_min(l): res = [] n = len(l) for i in range(0, n // 2, 1): res.append(l[-(i + 1)]) res.append(l[i]) if n % 2 != 0: res.append(l[n // 2]) return res lst = [1,2,3,4,5] max_min(lst)Real# real real=pd.read_csv("../data/elect_data/train/5032AB.csv",header=None,index_col=0) dates=[ "20.05.05", "20.07.25","19.04.02", '19.12.09', '20.04.17',"19.04.12", "19.11.08", "19.12.27", "20.05.15",'19.09.20', "19.10.07", '19.09.19', "20.02.25",'20.06.01','19.12.29','19.12.25'] save_sample_plot(real.loc[dates])Gendates=[ "20.04.14","20.06.13","20.05.15","20.04.20", '20.04.11','20.05.07','20.07.10','20.06.01', '20.06.06',"20.05.05",'20.04.08','20.08.08', "20.04.10","20.07.25","20.04.07",'20.04.09'] generated=pd.read_csv("../output/experiments/generate/5032AB_test_all_true_label_w=0.1.csv",header=None,index_col=0) save_sample_plot(generated.loc[dates])Start of baseline evaluationfrom glob import glob from pathlib import Path from typing import Tuple from fractions import Fraction from bisect import bisect import pandas as pd import numpy as np from harmonic_inference.utils import eval_utils as eu from harmonic_inference.utils import harmonic_utils as hu from harmonic_inference.data.data_types import ChordType, PitchType, KeyMode, TRIAD_REDUCTION, ALL_ONE_TYPE_REDUCTION results = {} for file in glob("baseline/*.csv"): file_path = Path(file) results[file_path.name] = pd.read_csv(file, header=None, names=['on', 'off', 'key', 'degree', 'type', 'inv']) # Output is in quarter notes, labels are in whole notes results[file_path.name]["on"] /= 4 results[file_path.name]["off"] /= 4 keys = set() degrees = set() types = set() inversions = set() for df in results.values(): for k in df['key'].unique(): keys.add(k) for d in df['degree'].unique(): degrees.add(d) for t in df['type'].unique(): types.add(t) for i in df['inv'].unique(): inversions.add(i) def key_to_tonic_mode(key: str, pitch_type: PitchType = PitchType.TPC) -> Tuple[int, KeyMode]: key = key.replace('-', 'b') key = key.replace('+', '#') tonic = hu.get_pitch_from_string(key, pitch_type) mode = KeyMode.MAJOR if key[0].isupper() else KeyMode.MINOR return tonic, mode def type_to_chord_type(type_str: str) -> ChordType: return { 'D7': ChordType.MAJ_MIN7, 'M': ChordType.MAJOR, 'd': ChordType.DIMINISHED, 'd7': ChordType.DIM7, 'm': ChordType.MINOR, 'm7': ChordType.MIN_MIN7, 'Gr+6': ChordType.DIM7, 'h7': ChordType.HALF_DIM7, }[type_str] def get_root_tonic_and_mode( degree_str: str, tonic: int, mode: KeyMode, pitch_type: PitchType = PitchType.TPC ) -> Tuple[int, int, KeyMode]: if isinstance(degree_str, int): degree_str = str(degree_str) degree_str = degree_str.replace('-', 'b') degree_str = degree_str.replace('+', '#') if '/' in degree_str: key, degree_str = degree_str.split('/') relative_transposition = hu.get_interval_from_scale_degree(key, False, mode, pitch_type=pitch_type) tonic = hu.transpose_pitch(tonic, relative_transposition, pitch_type=pitch_type) if key in ['5']: mode = KeyMode.MAJOR elif key in ['7']: mode = KeyMode.MINOR elif key in ['1']: mode = mode degree_interval = hu.get_interval_from_scale_degree(degree_str, False, mode, pitch_type=pitch_type) root = hu.transpose_pitch(tonic, degree_interval, pitch_type=pitch_type) return root, tonic, mode def get_all(key: str, degree: str, type_str: str, inv: str) -> Tuple[int, ChordType, int, int, KeyMode]: inv = int(inv) chord_type = type_to_chord_type(type_str) tonic, mode = key_to_tonic_mode(key) root, tonic, mode = get_root_tonic_and_mode(degree, tonic, mode) return root, chord_type, inv, tonic, mode for df in results.values(): roots = [] chord_types = [] invs = [] tonics = [] modes = [] for _, row in df.iterrows(): root, chord_type, inv, tonic, mode = get_all(row['key'], row['degree'], row['type'], row['inv']) roots.append(root) chord_types.append(chord_type) invs.append(inv) tonics.append(tonic) modes.append(mode) df["root_tpc"] = roots df["chord_type"] = chord_types df["inversion"] = invs df["tonic"] = tonics df["mode"] = modes def get_label_df(filename: str) -> pd.DataFrame: filename = filename[:-21] + "results.tsv" file = glob(f'outputs/**/{filename}', recursive=True)[0] return pd.read_csv(file, sep='\t', index_col=0, converters={'duration': Fraction}), file def get_row_at_onset(df, onset): index = min(bisect(list(df['off']), float(onset)), len(df) - 1) return df.iloc[index] def evaluate_df(key, df): label_df, filename = get_label_df(key) root_accs = [] chord_accs = [] triad_accs = [] seventh_accs = [] key_accs = [] full_accs = [] onset = 0 for _, label_row in label_df.iterrows(): est_row = get_row_at_onset(df, onset) onset += label_row['duration'] tonic_str = label_row['gt_key'].split(':')[0] if '/' in tonic_str: tonic_str = tonic_str.split('/')[0] gt_tonic = hu.get_pitch_from_string(tonic_str, pitch_type=PitchType.TPC) gt_mode = KeyMode.MAJOR if label_row['gt_key'][0].isupper() else KeyMode.MINOR gt_chord = label_row['gt_chord'] gt_inv = int(gt_chord[-1]) root_str = gt_chord.split(':')[0] if '/' in root_str: root_str = root_str.split('/')[0] gt_root = hu.get_pitch_from_string(root_str, pitch_type=PitchType.TPC) gt_chord_type = hu.get_chord_type_from_string(gt_chord.split(':')[1].split(',')[0]) chord_dist = eu.get_chord_distance( gt_root, gt_chord_type, gt_inv, est_row['root_tpc'], est_row['chord_type'], est_row['inversion'], ) chord_accs.append(1 - chord_dist) root_dist = eu.get_chord_distance( gt_root, gt_chord_type, 0, est_row['root_tpc'], est_row['chord_type'], 0, reduction=ALL_ONE_TYPE_REDUCTION ) root_accs.append(1 - root_dist) triad_dist = eu.get_chord_distance( gt_root, gt_chord_type, 0, est_row['root_tpc'], est_row['chord_type'], 0, reduction=TRIAD_REDUCTION ) triad_accs.append(1 - triad_dist) seventh_dist = eu.get_chord_distance( gt_root, gt_chord_type, 0, est_row['root_tpc'], est_row['chord_type'], 0, ) seventh_accs.append(1 - seventh_dist) key_dist = eu.get_key_distance( gt_tonic, gt_mode, est_row['tonic'], est_row['mode'], ) key_accs.append(1 - key_dist) full_accs.append(1 if chord_dist + key_dist == 0 else 0) root_acc = float(np.average(root_accs, weights=label_df['duration'])) chord_acc = float(np.average(chord_accs, weights=label_df['duration'])) key_acc = float(np.average(key_accs, weights=label_df['duration'])) full_acc = float(np.average(full_accs, weights=label_df['duration'])) triad_acc = float(np.average(triad_accs, weights=label_df['duration'])) seventh_acc = float(np.average(seventh_accs, weights=label_df['duration'])) return { "Root": root_acc, "Triad": triad_acc, "Seventh": seventh_acc, "Chord": chord_acc, "Key": key_acc, "Full": full_acc, }, filename results_vals = {} import re for key, df in results.items(): eval_dict, name = evaluate_df(key, df) if not "Beethoven" in name: continue print(name) for acc, val in eval_dict.items(): if acc not in results_vals: results_vals[acc] = [] results_vals[acc].append(val) print(f" {acc}: {val}") for acc, val_list in results_vals.items(): print(f"{acc}: {sum(val_list) / len(val_list)}") from pathlib import Path from fractions import Fraction import pandas as pd from music21.converter import parse m21_score = parse(Path("../functional-harmony/data/BPS/scores/bps_01_01.mxl")) m21_score = m21_score.flattenParts() m21_score = m21_score.stripTies() for note in m21_score.recurse().notes: if note.isChord: chord = note print("Chord") for note in chord.notes: print(note.pitch.name, note.pitch.octave, chord.duration.quarterLength, chord.offset, chord.measureNumber, note.tie, chord.tie) print("End Chord") else: print(note.offset print(note.pitch.name, note.pitch.octave, note.duration.quarterLength, note.offset, note.measureNumber) for offset, measure in m21_score.measureOffsetMap().items(): print(offset, measure[0].timeSignature) import importlib from pathlib import Path import harmonic_inference.data.piece as piece importlib.reload(piece) notes, measures_df = piece.get_score_piece_from_music_xml(Path("../functional-harmony/data/BPS/scores/bps_01_01.mxl"), "") measures_df[40:50] list(note for note in notes if note.onset[0] in [48, 49])Test loading functional-harmony datafrom glob import glob from tqdm import tqdm from pathlib import Path import logging import harmonic_inference.data.piece as piece import importlib importlib.reload(piece) for file_path in tqdm(glob("../functional-harmony/data/**/*.mxl", recursive=True)[173:]): music_xml_path = Path(file_path) label_csv_path = music_xml_path.parent.parent / "chords" / Path(str(music_xml_path.stem) + ".csv") if not label_csv_path.exists(): logging.error(f"Label file {label_csv_path} does not exist. Skipping.") continue print(music_xml_path) score = piece.get_score_piece_from_music_xml(music_xml_path, label_csv_path)Results / Confusion Matrixfrom glob import glob from tqdm import tqdm from fractions import Fraction import pandas as pd import numpy as np import matplotlib.pyplot as plt from harmonic_inference.data.data_types import PitchType, ChordType, KeyMode from harmonic_inference.utils.harmonic_utils import get_pitch_from_string, get_chord_type_from_string def get_results_df(path): dfs = [] for tsv in tqdm(glob(path, recursive=True)): dfs.append(pd.read_csv(tsv, sep="\t", converters={"duration": Fraction}, index_col=0)) if len(dfs) == 0: return None results_df = pd.concat(dfs, ignore_index=True) for type in ["gt", "est"]: results_df[f"{type}_key_tonic"] = 0 results_df[f"{type}_key_mode"] = 0 results_df[f"{type}_chord_root"] = 0 results_df[f"{type}_chord_type"] = 0 results_df[f"{type}_chord_inv"] = 0 keys = np.concatenate((results_df["gt_key"].unique(), results_df["est_key"].unique())) for key in tqdm(keys, desc="Working on keys..."): key_tonic, key_mode = key.split(":") for type in ["gt", "est"]: results_df.loc[results_df[f"{type}_key"] == key, f"{type}_key_tonic"] = get_pitch_from_string(key_tonic, PitchType.MIDI) results_df.loc[results_df[f"{type}_key"] == key, f"{type}_key_mode"] = KeyMode[key_mode.split(".")[1]] chords = np.concatenate((results_df["gt_chord"].unique(), results_df["est_chord"].unique())) for chord in tqdm(chords, desc="Working on chords..."): inv = int(chord[-1]) chord_str = chord.split(",")[0] chord_root, chord_type = chord_str.split(":") for type in ["gt", "est"]: results_df.loc[results_df[f"{type}_chord"] == chord, f"{type}_chord_root"] = get_pitch_from_string(chord_root, PitchType.TPC) results_df.loc[results_df[f"{type}_chord"] == chord, f"{type}_chord_type"] = get_chord_type_from_string(chord_type) results_df.loc[results_df[f"{type}_chord"] == chord, f"{type}_chord_inv"] = inv return results_df results_df = get_results_df("outputs/dcml-csm-1/**/*_results.tsv") results_df def get_heat_map_matrix(results_df): heat_map = np.zeros((len(ChordType), len(ChordType) + 2)) for i, chord_type in tqdm(enumerate(ChordType)): chord_type_df = results_df.loc[results_df["gt_chord_type"] == chord_type] if len(chord_type_df) == 0: continue total_dur = float(chord_type_df["duration"].sum()) correct_root_df = chord_type_df.loc[chord_type_df["gt_chord_root"] == chord_type_df["est_chord_root"]] heat_map[i, 0] = float(total_dur - correct_root_df['duration'].sum()) for j, est_chord_type in enumerate(ChordType, start=1): selected_df = correct_root_df.loc[correct_root_df["est_chord_type"] == est_chord_type] if est_chord_type == chord_type: correct_type_df = selected_df selected_dur = float(selected_df["duration"].sum()) heat_map[i, j] = selected_dur if len(correct_type_df) > 0: correct_inv_df = correct_type_df.loc[(correct_root_df["gt_chord_inv"] == correct_root_df["est_chord_inv"])] heat_map[i, -1] = 1 - float(correct_inv_df['duration'].sum() / correct_type_df['duration'].sum()) return heat_map def normalize_heat_map(heat_map): for i, row in enumerate(heat_map): if np.sum(row[:-1]) == 0: continue heat_map[i, :-1] /= np.sum(row[:-1]) xticks = [ "M", "m", "o", "+", "MM7", "d7", "mM7", "mm7", "o7", "%7", "+7", "+M7", ] heat_map = get_heat_map_matrix(results_df) normalize_heat_map(heat_map) plt.xlabel("Estimated Chord Type", labelpad=-15) plt.ylabel("Ground Truth Chord Type", rotation=90) plt.xticks(ticks=np.arange(len(ChordType) + 2), labels=["Incorrect Root"] + xticks + ["Incorrect Inv."], rotation=90) plt.yticks(ticks=np.arange(len(ChordType)), labels=xticks) plt.tight_layout(pad=0) plt.imshow(heat_map, vmin=0, vmax=1) plt.colorbar() plt.savefig("figs/heatmap.png", pad_inches=0) major_heat_map = get_heat_map_matrix(results_df.loc[results_df["gt_key_mode"] == KeyMode.MAJOR]) normalize_heat_map(major_heat_map) plt.xlabel("Estimated Chord Type", labelpad=-15) plt.ylabel("Ground Truth Chord Type", rotation=90) plt.xticks(ticks=np.arange(len(ChordType) + 2), labels=["Incorrect Root"] + xticks + ["Incorrect Inv."], rotation=90) plt.yticks(ticks=np.arange(len(ChordType)), labels=xticks) plt.tight_layout(pad=0) plt.imshow(major_heat_map, vmin=0, vmax=1) plt.colorbar() plt.savefig("figs/heatmap_major.png", pad_inches=0) minor_heat_map = get_heat_map_matrix(results_df.loc[results_df["gt_key_mode"] == KeyMode.MINOR]) normalize_heat_map(minor_heat_map) plt.xlabel("Estimated Chord Type", labelpad=-15) plt.ylabel("Ground Truth Chord Type", rotation=90) plt.xticks(ticks=np.arange(len(ChordType) + 2), labels=["Incorrect Root"] + xticks + ["Incorrect Inv."], rotation=90) plt.yticks(ticks=np.arange(len(ChordType)), labels=xticks) plt.imshow(minor_heat_map, vmin=0, vmax=1) plt.colorbar() plt.tight_layout(pad=0) plt.savefig("figs/heatmap_minor.png", pad_inches=0) def get_acc_given_inversion(results_df, inv): inv_df = results_df.loc[results_df["gt_chord_inv"] == inv] correct_df = inv_df.loc[inv_df["gt_chord"] == inv_df["est_chord"]] return float(correct_df["duration"].sum() / inv_df["duration"].sum()) results_df = get_results_df("outputs/dcml-csm-1/**/*_results.tsv") for inv in range(4): print(f"Inv {inv} {get_acc_given_inversion(results_df, inv)}") def get_acc(results_df): total_dur = float(results_df["duration"].sum()) correct_dur = float( results_df.loc[ ( (results_df["gt_key"] == results_df["est_key"]) & (results_df["gt_chord"] == results_df["est_chord"]) ), "duration", ].sum() ) return correct_dur / total_dur results_df = get_results_df("outputs/dcml-csm-1/**/*_results.tsv") acc = get_acc(results_df) acc_minor = get_acc(results_df.loc[results_df["gt_key_mode"] == KeyMode.MINOR]) acc_major = get_acc(results_df.loc[results_df["gt_key_mode"] == KeyMode.MAJOR]) print(f"Overall: {acc}") print(f"Minor: {acc_minor}") print(f"Major: {acc_major}") for chord_type in ChordType: try: print(f"{chord_type}: {get_acc(results_df.loc[results_df['gt_chord_type'] == chord_type])}") except: pass mode_type_heat_map = np.zeros((len(KeyMode), len(ChordType))) for i, mode in enumerate(KeyMode): for j, chord_type in enumerate(ChordType): try: acc = get_acc(results_df.loc[(results_df['gt_chord_type'] == chord_type) & (results_df['gt_key_mode'] == mode)]) except: continue print(f"{mode}, {chord_type} = {acc}") mode_type_heat_map[i, j] = acc plt.xlabel("Chord Type", fontsize=12) plt.ylabel("Mode", rotation=90, fontsize=12) plt.xticks(ticks=np.arange(len(ChordType)), labels=xticks, rotation=90, fontsize=12) plt.yticks(ticks=np.arange(len(KeyMode)), labels=["Major", "Minor"], fontsize=12) plt.imshow(mode_type_heat_map, vmin=0, vmax=1) plt.colorbar(orientation="horizontal", shrink=0.5, pad=0.23) plt.tight_layout(pad=0) plt.savefig("figs/acc_by_mode_type.png", pad_inches=0) accs = {} for dir in glob("outputs/dcml-csm-1/*"): results_df_comp = get_results_df(dir + "/**/*_results.tsv") if results_df_comp is None: continue accs[dir.split("/")[-1]] = get_acc(results_df_comp) for key, value in accs.items(): print(f"{key}: {value}")Converting results TSV to chord-eval comparison for ICMPCfrom tqdm import tqdm from glob import glob from pathlib import Path import pandas as pd from harmonic_inference.data.data_types import ChordType, PitchType, TRIAD_REDUCTION from harmonic_inference.utils.harmonic_constants import STRING_TO_CHORD_TYPE from harmonic_inference.utils.harmonic_utils import get_pitch_from_string, get_pitch_string in_path = "outputs/icmpc/*/Mozart-Sonatas/*_results.tsv" for results_tsv in tqdm(glob(in_path)): results_df = pd.read_csv(results_tsv, sep="\t") for prefix in ["gt", "est"]: results_df[f"{prefix}_chord_root"] = 0 results_df[f"{prefix}_chord_type"] = 0 results_df[f"{prefix}_chord_inv"] = 0 results_df["root_correct"] = 0 results_df["triad_correct"] = 0 results_df["7th_correct"] = 0 results_df["inv_correct"] = 0 results_df["full_correct"] = 0 for idx, row in results_df.iterrows(): gt_root_str, gt_other_str, gt_inv_str = row["gt_chord"].split(":") gt_chord_type_str, _ = gt_other_str.split(",") gt_root = get_pitch_from_string(gt_root_str, PitchType.MIDI) gt_chord_type = STRING_TO_CHORD_TYPE[gt_chord_type_str] est_root_str, est_other_str, est_inv_str = row["est_chord"].split(":") est_chord_type_str, _ = est_other_str.split(",") est_root = get_pitch_from_string(est_root_str, PitchType.MIDI) est_chord_type = STRING_TO_CHORD_TYPE[est_chord_type_str] results_df.loc[idx, "gt_chord_root"] = gt_root results_df.loc[idx, "gt_chord_type"] = str(gt_chord_type) results_df.loc[idx, "gt_chord_inv"] = gt_inv_str results_df.loc[idx, "est_chord_root"] = est_root results_df.loc[idx, "est_chord_type"] = str(est_chord_type) results_df.loc[idx, "est_chord_inv"] = est_inv_str results_df.loc[idx, "root_correct"] = gt_root == est_root results_df.loc[idx, "triad_correct"] = TRIAD_REDUCTION[gt_chord_type] == TRIAD_REDUCTION[est_chord_type] results_df.loc[idx, "7th_correct"] = gt_chord_type == est_chord_type results_df.loc[idx, "inv_correct"] = gt_inv_str == est_inv_str results_df.loc[idx, "full_correct"] = gt_inv_str == est_inv_str and gt_root == est_root and gt_chord_type == est_chord_type tsv_path = Path(results_tsv) out_path = tsv_path.parent / (tsv_path.name[:-4] + "chord-eval.tsv") results_df.to_csv(out_path, sep="\t")mel scale and frequencyfrom matplotlib import pyplot as plt import numpy as np x = np.linspace(0, 5000, num = 50000) y = 2595 * np.log10(1 + x / 700) # print(len(x)) x0 = 1000 y0 = 2595 * np.log10(1 + x0 / 700) plt.plot(x, y) plt.scatter(x0, y0) plt.plot([x0, x0], [0, y0], 'k--') plt.plot([0, x0], [x0, y0], 'k--') plt.xlabel('f(hz)') plt.ylabel('Mel(f)') plt.title('relationship between linear and mel scale') plt.xlim(0, x[-1]) plt.ylim(0, y[-1]) # plt.savefig('mel_vs_f.png') plt.show()how to use librosa to draw a waveform of a audio filefrom matplotlib import pyplot as plt import numpy as np import librosa # 使用librosa读取音频 inputWavPath = "/home/newdisk/XWLB-GNLBKX/dataset_02/wav/000012.wav" y, sr = librosa.load(inputWavPath) yNum = np.arange(len(y)) # 截取前0.3s的音频 sampleSignal = y[0:int(sr*0.8)] sampleNum = np.arange(len(sampleSignal)) plt.figure(figsize=(11, 7), dpi=500) plt.subplot(211) plt.plot(yNum/sr, y, color='black') plt.plot(sampleNum/sr, sampleSignal, color='blue') plt.xlabel('Time(sec)') plt.ylabel('Amplitude') plt.title('Waveform') plt.subplot(212) plt.plot(sampleNum/sr, sampleSignal, color='blue') plt.xlabel('Time(sec)') plt.ylabel('Amplitude') plt.title('0~0.3s waveform') plt.tight_layout() # plt.savefig('.\\temp\\waveform.png', dpi=500) plt.show()how to draw mel spectrumimport librosa from matplotlib import pyplot as plt import numpy as np inputWavPath = "/home/newdisk/XWLB-GNLBKX/dataset_02/wav/000012.wav" sampleRate = 44100 preemphasis = 0.97 nFft = 2205 frameLength = 0.05 frameShift = 0.01 fMin = 0 fMax = sampleRate / 2 eps = 1e-10 nMle = 80 winLength = int(sampleRate * frameLength) hopLength = int(sampleRate * frameShift) melBasis = librosa.filters.mel( sampleRate, nFft, nMle, fmin=fMin, fmax=fMax) def getSpectrogram(inputWavPath): y, sr = librosa.load(inputWavPath) y = np.append(y[0], y[1:]-preemphasis*y[:-1]) #预加重 linear = librosa.stft( y=y, n_fft=nFft, hop_length=hopLength, win_length=winLength) mag = np.abs(linear) mel = np.dot(melBasis, mag) mel = np.log10(np.maximum(eps, mel)) mel = mel.T.astype(np.float32) return mel def plotSpectrogram(spectrogram, filePath): spectrogram = spectrogram.T fig = plt.figure(figsize=(16, 9)) plt.imshow(spectrogram, aspect='auto', origin='lower') plt.colorbar() plt.xlabel('frames') plt.tight_layout() plt.savefig(filePath, dpi=500) plt.show() melspec = getSpectrogram(inputWavPath) plotSpectrogram(melspec, '../temp/mel_spectrogram.png')extract MFCCimport librosa from matplotlib import pyplot as plt import numpy as np from scipy.fftpack import dct num_ceps = 12 #MFCC阶数,可选2~13 mfcc = dct(melspec, type=2, axis=1, norm='ortho')[:, 1:(num_ceps + 1)] plotSpectrogram(mfcc, '../temp/mfcc.png') (nframes, ncoeff) = mfcc.shape cep_lifter = 22 n = np.arange(ncoeff) lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter) mfcc *= lift plotSpectrogram(mfcc, '../temp/mfcc_list.png')均值方差归一化frame_num = melspec.shape[0] cep_sum = np.sum(melspec, axis=0) cep_squ_sum = np.sum(np.square(melspec), axis=0) cep_mean = cep_sum / frame_num cep_std = cep_squ_sum / frame_num - np.square(cep_mean)pip: [pip wiki](https://es.wikipedia.org/wiki/Pip_(administrador_de_paquetes)) pypi: [pypi](https://pypi.org/) ----[pandas](https://pypi.org/project/pandas/)----[numpy](https://numpy.org/)--- sintaxis```pip install nombre_paquetepip install nombre_paquete==X.Y.Zpip install --upgrade nombre_paquetepip install -r requirements.txtpip install git+https://github.com/repositorio/nombre_paquete```import pandas as pd pip install pandas pip install pandas==1.2.5 import numpy as np pip install numpy pip install --upgrade pip[ejemplo de requirements.txt](https://github.com/Yelp/requirements-tools)# pip install pip==21.1.3 pip list pip freezeabsl-py==0.12.0 aionotify==0.2.0 alabaster==0.7.12 anaconda-client==1.7.2 anaconda-navigator==1.10.0 anaconda-project==0.8.3 anyio==3.0.1 applaunchservices==0.2.1 appnope @ file:///opt/concourse/worker/volumes/live/0291c9e1-4b15-459f-623e-2770f55be269/volume/appnope_1594338395037/work appscript @ file:///opt/concourse/worker/volumes/live/50ca4c96-3090-40bb-6981-3a6114ed0af4/volume/appscript_1594840187551/work argh==0.26.2 argon2-cffi @ file:///opt/concourse/worker/volumes/live/59af29ac-4890-416e-7ab7-794f8d6f7ecd/volume/argon2-cffi_1596828548321/work asn1crypto @ file:///tmp/build/80754af9/asn1crypto_1596577642040/work astroid @ file:///opt/concourse/worker/volumes/live/21fd14a9-2a7e-484b-7394-5a9912cdcf80/volume/astroid_1592498459180/work astropy==4.0.2 astunparse==1.6.3 async-generator==1.10 atomicwrites==1.4.0 attrs @ file:///tmp/build/80754af9/attrs_1604765588209/work autopep8 @ file:///tmp/build/80754af9/autopep8_1596578164842/work Babel @ file:///tmp/build/807[...]Football Predictor Model In this notebook we develop a football match result predictor from scratch with sklearn library, testing differents models and compare performance of each one. The idea is then use this model for a more complex system, create a API Rest Load, clean and Transform dataimport numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter import pandas as pd import numpy as np import matplotlib.ticker as ticker from sklearn import preprocessing from sklearn import metrics df = pd.read_csv('dataset.csv') df.head() df['result'] = df.apply(lambda x : 'wins' if x.home_score > x.away_score else 'loses' if x.home_score < x.away_score else 'draws', axis=1) df.head() # dic = eval(df['home_team'].values) teams = list(dict.fromkeys(df['home_team'].values.tolist() + df['away_team'].values.tolist())) dic = { teams[i] : (i+1) for i in range(len(teams)) } df['home_team_id']= df['home_team'].map(dic) df['away_team_id']= df['away_team'].map(dic) df.head() X = df[ ['home_team_id', 'away_team_id'] ].values X[0:5] y = df['result'].values y[0:5] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) classifier = [] accuracy = []Train set: (33268, 2) (33268,) Test set: (8318, 2) (8318,)Compare classifiersfrom sklearn.neighbors import KNeighborsClassifier MAX_K = 50 mean_acc = np.zeros((MAX_K-1)) std_acc = np.zeros((MAX_K-1)) ConfustionMx = []; for n in range(1,MAX_K): #Train Model model = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) # Predict yhat = model.predict(X_test) # Measure accuracy from model with K = n mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best K plt.plot(range(1,MAX_K),mean_acc,'g') plt.fill_between(range(1,MAX_K),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.legend(('Accuracy ', '+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Number of Nabors (K)') plt.tight_layout() plt.show() print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) KNN_K = mean_acc.argmax()+1 KNN = KNeighborsClassifier(n_neighbors = KNN_K).fit(X_train,y_train) classifier.append("KNeighborsClassifier") accuracy.append(mean_acc.max()) from sklearn.tree import DecisionTreeClassifier MAX_DEPTH = 50 mean_acc = np.zeros((MAX_K-1)) std_acc = np.zeros((MAX_K-1)) ConfustionMx = []; for n in range(1,MAX_DEPTH): #Train Model model = DecisionTreeClassifier(criterion="entropy", max_depth = n).fit(X_train,y_train) # Predict yhat = model.predict(X_test) # Measure accuracy from model with depth = n mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best K plt.plot(range(1,MAX_DEPTH),mean_acc,'g') plt.fill_between(range(1,MAX_DEPTH),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.legend(('Accuracy ', '+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Max depth (K)') plt.tight_layout() plt.show() print( "The best accuracy was with", mean_acc.max(), "with depth=", mean_acc.argmax()+1) DT_DEPTH = mean_acc.argmax()+1 DT = DecisionTreeClassifier(criterion="entropy", max_depth = DT_DEPTH).fit(X_train,y_train) classifier.append("DecisionTreeClassifier") accuracy.append(mean_acc.max()) from sklearn.linear_model import LogisticRegression MAX_C = 50 logloss_liblinear = np.zeros((MAX_C-1)) mean_acc_liblinear = np.zeros((MAX_C-1)) for n in range(1,MAX_C): #Train Model lr = LogisticRegression(C=(n/100), solver='liblinear').fit(X_train,y_train) # Predict yhat = lr.predict(X_test) # Measure accuracy from model with C = n # logloss_liblinear[n-1] = log_loss(y_test, yhat) mean_acc_liblinear[n-1] = metrics.accuracy_score(y_test, yhat) logloss_sag = np.zeros((MAX_C-1)) mean_acc_sag = np.zeros((MAX_C-1)) std_acc_sag = np.zeros((MAX_C-1)) for n in range(1,MAX_C): #Train Model lr = LogisticRegression(C=(n/100), solver='sag').fit(X_train,y_train) # Predict yhat = lr.predict(X_test) # Measure accuracy from model with C = n # logloss_sag[n-1] = log_loss(y_test, yhat) mean_acc_sag[n-1] = metrics.accuracy_score(y_test, yhat) std_acc_sag[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) # Now I plot results and choose the best model plt.plot(range(1,MAX_C),mean_acc_liblinear,'g') plt.plot(range(1,MAX_C),mean_acc_sag,'b') plt.legend(('liblinear ', 'sag')) plt.ylabel('Accuracy ') plt.xlabel('Regularization (C)') plt.tight_layout() plt.show() print( "The best accuracy liblinear was with", mean_acc_liblinear.max(), "with C=", (mean_acc_liblinear.argmax()+1)/100) print( "The best accuracy sag was with", mean_acc_sag.max(), "with C=", (mean_acc_sag.argmax()+1)/100) classifier.append("LogisticRegression") accuracy.append(mean_acc_liblinear.max()) from sklearn.ensemble import RandomForestClassifier RF = RandomForestClassifier(n_estimators = 200, random_state = 1, class_weight = 'balanced') RF.fit(X_train,y_train) yhat = RF.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy RandomForest was", mean_acc) classifier.append("RandomForestClassifier") accuracy.append(mean_acc) from sklearn.ensemble import AdaBoostClassifier AB = AdaBoostClassifier(n_estimators = 200, random_state = 2) AB.fit(X_train,y_train) yhat = AB.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy AdaBoost was", mean_acc) classifier.append("AdaBoostClassifier") accuracy.append(mean_acc) from sklearn.naive_bayes import GaussianNB GNB = GaussianNB() GNB.fit(X_train,y_train) yhat = GNB.predict(X_test) mean_acc = metrics.accuracy_score(y_test, yhat) print( "Accuracy GaussianNB was", mean_acc) classifier.append("GaussianNB") accuracy.append(mean_acc) import matplotlib.pyplot as plt; plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt y_pos = np.arange(len(classifier)) plt.barh(y_pos, accuracy, align='center', alpha=0.5) plt.yticks(y_pos, classifier) plt.xlabel('Accuracy') plt.title('Classifier type')Based on resultds we adapt the following classifier for our footbal predictor model:model = AdaBoostClassifier(n_estimators = 200, random_state = 2).fit(X_train,y_train)Test our model Is time to test our model, we first test it in an scenario that never occurs, football match between Argentina and Galicia. We expect to Argentina wins this match :)index = df.index[((df['home_team'] == 'Argentina') & (df['away_team'] == 'Galicia')) | (df['home_team'] == 'Galicia') & (df['away_team'] == 'Argentina') ].tolist() indexWe just verify that in our model this match never occurs. It is time to play our virtual match!home_team = dic['Argentina'] away_team = dic['Galicia'] print("Argentina team idx", home_team) print("Galicia team idx", away_team) x1 = [[home_team,away_team]] yhat = model.predict(x1) yhatArgentina wins! Now we could try with another example more realistichome_team = dic['Paraguay'] away_team = dic['Argentina'] x1 = [[home_team,away_team]] yhat = model.predict(x1) yhatSave model and schema In this part we are goint to save our model for future use and save the schema dictionary that we have use to test our modelfrom sklearn.externals import joblib import sqlite3 # Save model joblib.dump(model, 'football_predictor_model.pkl')Create schemaconn = sqlite3.connect('football.db') # The database will be saved in the location where your 'py' file is saved c = conn.cursor() c.execute(''' DROP TABLE IF EXISTS results; ''') c.execute(''' DROP TABLE IF EXISTS teams; ''') c.execute('''CREATE TABLE teams( [id] INTEGER PRIMARY KEY NOT NULL, [country] TEXT NOT NULL ) ''') c.execute('''CREATE TABLE results( [home_team_id] INTEGER REFERENCES teams(id) NOT NULL, [away_team_id] INTEGER REFERENCES teams(id) NOT NULL, [home_score] INTEGER NOT NULL, [away_score] INTEGER NOT NULL, [result] TEXT NOT NULL ) ''')Save dictionaryteams = pd.DataFrame(dic.items(), columns=['country', 'id']) teams.head() teams.to_sql('teams', conn, if_exists='replace', index = False)Save resultsresults = df[ ['home_team_id', 'away_team_id', 'home_score', 'away_score', 'result'] ] results.head() results.to_sql('results', conn, if_exists='replace', index = False)ニューストピック分類 Base Modelの評価 with GPU Google ドライブの接続 以下を実行。表示されるurlからGoogleアカウントにサイン。表示される認証用のコードを以下に表示されるテキストボックスにコピペ。 → サインインしたGoogleアカウントのGoogleドライブがマウントされる。from google.colab import drive drive.mount("/content/drive/")Mounted at /content/drive/主な設定data_path = '/content/drive/My Drive/Colab Notebooks/data' # 作業用フォルダライブラリのインストール!pip install transformers==4.10 !pip install datasets==1.11 !pip install fugashi==1.1 !pip install ipadic==1.0 import os import torch import pandas as pd from transformers import BertForSequenceClassification, BertJapaneseTokenizerモデルの読み込みloaded_model = BertForSequenceClassification.from_pretrained(data_path) loaded_model.cuda() loaded_tokenizer = BertJapaneseTokenizer.from_pretrained(data_path)テストデータの読み込みcat_info = pd.read_csv(os.path.join(data_path, "cat_info.csv"), index_col=False) cat_names = cat_info['name'] cat_names news_test = pd.read_csv( os.path.join(data_path, "news_test.csv" ), index_col=False) news_test.head()サンプルデータの予測# 形態素解析 MAX_LENGTH = 512 def tokenize(text): words = loaded_tokenizer.tokenize(text) word_ids = loaded_tokenizer.convert_tokens_to_ids(words) # インデックスに変換 word_tensor = torch.tensor([word_ids[:MAX_LENGTH]]) # テンソルに変換 return word_tensor index = 30 sample_text = news_test.loc[index, 'text'] sample_label = news_test.loc[index, 'label'] print('text :', sample_text) print('true label:', sample_label, cat_names[sample_label]) x = tokenize(sample_text) x = x.cuda() # GPU対応 y = loaded_model(x) # 予測 pred = y[0].argmax(-1).item() # 最大値のインデックス print("pred label:", pred, cat_names[pred])text : 「ゴルフ婚活」や「料理婚活」など、ただ食事やお酒を共にするだけでは無く、イベント性をプラスした"婚活"はもはや定番。共通の話題が生まれやすく、自然と仲良くなれるので、女性からもアプローチがしやすい所が魅力的です。そんな中、結婚情報サイト「youbride(ユーブライド)」が3日、代々木公園の清掃を通して交流を深める、地球にやさしいエコと婚活のイベント「クリーンアップ婚活イベント」を開催しました。エコと婚活の融合なんて、新鮮でイマドキ!清々しい秋晴れの日、集まった「youbride」会員は男性20名、女性20名の計40名。「清掃活動って疲れそう…」と思う方が多いかもしれませんが、「クリーンアップ婚活イベント」はあくまで、外の気持ちの良い空気を楽しみながら、街をキレイにしていく"プチ"エコ活動。高すぎるヒールでなければ、ブーツやパンプスでお洒落をして参加できるのが魅力的です。代々木公園の入口から、それぞれチームを作ってイベントがスタート。男性がゴミをつかむトングを、女性がゴミ袋を持つので自然と連携がとれ、2人は急接近。タバコの吸いがらが道に落ちている事が多かった事から「タバコは吸いますか?」「吸いますが、携帯灰皿を持ち歩いています」等と、普通の婚活パーティでは出ない話題も飛び出し、お互いの事をよく知る事が出来ました。エコ活動終了後は、ワインやお料理が楽しめるカフェバーに移動してパーティがスタート。「お疲れ様でした!」の乾杯の後、自由に会話を楽しみました。温泉旅行やギフト券が当たるゲームは、エコ活動で気が合ったパートナー、その時には話せなかった参加者の垣根を越えて大盛り上がり。終始和やかで楽しいパーティに。気の合った者同士はアドレス交換するなど、しっかり婚活の主旨も忘れていませんでした。イベントに参加した方々からは終了後、参加者に感想を聞いたアンケートでは、ほとんどの方が「こういったイベント型の婚活パーティははじめてだったので楽しかったです」「皆さん、自然に関心がある方で色々とお話が出来ました」と好感触。椅子に座ってテーブル越しにお話をするだけでは分からない、人柄を知ることが出来る新鮮な体験だった様です。普通の婚活じゃつまらない、仲良くなれないと嘆く婚活女子の皆さんは今回の「クリーンアップ婚活」の様な“進化型婚活”をチェックしてみてはいかが?・youbrid[...]精度評価と性能評価from sklearn.metrics import accuracy_score from tqdm import tqdm test_texts = news_test['text'] test_labels = news_test['label'] %%time preds = [] for text in tqdm(test_texts): x = tokenize(text) x = x.cuda() # GPU対応 y = loaded_model(x) # 予測 pred = y[0].argmax(-1).item() # 最大値のインデックス preds.append(pred) print(accuracy_score(test_labels, preds))0.9381107491856677Numerical Integration and Reaction KineticsIn addition to processing experimental data from the real world, Python can also be used to generate simulations of physical systems that change over time. In this notebook, we will practice performing numerical integration to solve systems of differential equations that describe chemical systems. To simulate a physical system, we need to describe how that system changes over time as a function of its current state. This description often takes the form of a system of ordinary differential equations (ODEs). Although solving ODEs analytically is sometimes difficult or impossible, their solutions can be approximated by numerically integrating them over time, given some initial conditions. Python provides a collection of powerful general-purpose numeral integration tools that can be used for solving an initial value problem (IVP) of this kind. We will be using the `solve_ivp` function for this purpose. The `solve_ivp` function takes three inputs:1. An arbitrary function describing the derivative of the variable(s)2. A time span on which to compute the solution to the IVP3. The initial conditions at the beginning of the time spanThe function returns a bundle of information to us. In particular it gives us the following:1. An array of times within the range specified in the input2. The value of the function at every time in the arrayLearn more about how `solve_ivp` works here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html Example 1: Radioactive DecayYou have successfully synthesized a 10 mg sample of yttrium-87 and want to how much will be left after a month. Since $^{87}\text{Y}$ undergoes beta decay to $^{87}\text{Sr}$ with a half-life of about $t_{1/2} \approx 3.4\ \text{days}$, we can describe the amount of $^{87}\text{Y}$ over time with the following initial value problem.$$ \frac{\text{d}y}{\text{d}t} = - \frac{\ln(2)}{t_{1/2}}y \qquad \qquad y(0) = y_0 $$Here $y$ is the mass of yttrium-87 that changes over time $t$, while and $y_0 = 10\ \text{mg}$ is the initial amount at $t=0$. Here's how we compute the solution in Python:import numpy as np from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants t12 = 3.4 # half-life of 3.4 days y0 = [10] # starting with 10 mg (has to be in a list or array) # the derivitive of y as a function of t and y def yprime(t, y): return - (np.log(2) / t12) * y # we want to see how the system changes over one month t_span = [0, 31] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times y = sol.y[0] # the value of the function at each time # plot the results plt.figure(figsize=(10,3)) plt.plot(t, y) plt.title("Mass of yttrium-87 over time") plt.xlabel("time (days)") plt.ylabel("mass (mg)") plt.show()The solution makes sense because if we solve this IVP analytically by normal methods of solving differential equations, we obtain a decaying exponential function. Try modifying $t_{1/2}$ and $y_0$ to see how the output changes. Although an analytical solution is easy to obtain for this system, using Python is much easier for more complex IVPs.You may have noticed a couple of strange things in the example above. When specifying the initial value `y0 = [10]` it was required to contain it inside a list or array. Additionally, we extracted the solution with `sol.y[0]`. The reason for both is that `solve_ivp` is designed to work for IVPs with any number of variables. Next we will explore an example of a such a multi-variable IVP. Example 2: Predator-Prey DynamicsIn the nearby area there are populations of both hawks and rabbits. When there are lots of rabbits, the hawks thrive on an abundance of food, decimating the rabbit population. But as their food source dwindles, the hawk population falls, leading to a resurgence of rabbits as they freely reproduce. We can use the [Lotka-Volterra Model](https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations) to simulate this behavior. If $r$ represents the number of rabbits and $h$ represents the number of hawks, then the population dynamics are described by the following IVP.\begin{align*}\frac{\text{d}r}{\text{d}t} &= a r - b rh & r(0) &= r_0 \\\frac{\text{d}h}{\text{d}t} &= -c h + d rh & h(0) &= h_0 \\\end{align*}For this simulation, let $a=8$, $b=2$, $c=3$, and $d=1$. Assume we start with $r_0 = 50$ rabbits and $h_0 = 50$ hawks.from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants a = 8 b = 2 c = 3 d = 1 # array of initial conditions [r0, h0] y0 = [50, 50] # the derivatives of both r0 and h0 over time def yprime(t, y): r = y[0] # unpack arguments h = y[1] rprime = a*r - b*r*h # compute derivatives hprime = -c*h + d*r*h return [rprime, hprime] # pack them up again # specify time span of solution t_span = [0, 20] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times r = sol.y[0] # unpack both variables h = sol.y[1] # plot the results plt.figure(figsize=(10,3)) plt.plot(t, r) plt.plot(t, h) plt.title("Lotka-Volterra Model") plt.xlabel("time (years)") plt.ylabel("population (individuals)") plt.legend(["rabbits", "hawks"]) plt.show()As expected, the rabbit and hawk populations oscillate over time. **Python Question 1**You can now apply these concepts to simulate a chemical reaction with first-order kinetics. Consider the following reversible association/dissociation reaction. This could represent an acid-base or solubility process, for example.$$ \text{A} + \text{B} \quad {}_{\xleftarrow[k_2]{}}^{ \xrightarrow{k_1}} \quad \text{AB} \\[0.5em] $$Assuming a first order kinetics mechanism, the system is described by the following IVP (make sure you understand how this was derived).$$ \begin{align*}\frac{\text{d}[\text{A}]}{\text{d}t} &= - k_1 [\text{A}][B] + k_2[\text{AB}] & \left [\text{A}] \right |_{t=0} &= [\text{A}]_0 \\\frac{\text{d}[\text{B}]}{\text{d}t} &= - k_1 [\text{A}][\text{B}] + k_2[\text{AB}] & \left [\text{B}] \right |_{t=0} &= [\text{B}]_0 \\\frac{\text{d}[\text{AB}]}{\text{d}t} &= k_1 [\text{A}][\text{B}] - k_2[\text{AB}] & \left [\text{AB}] \right |_{t=0} &= [\text{AB}]_0\end{align*} $$Assume the initial conditions $[\text{A}]_0 = 0.1\ \text{M}$, $[\text{B}]_0 = 0.2\ \text{M}$, and $[\text{AB}]_0 = 0\ \text{M}$. Let the rate constants be $k_1 = 0.5 \ \text{M}^{-1}\text{s}^{-1}$ and $k_2 = 0.01 \ \text{s}^{-1}$. Complete the code below to simulate the reaction over the course of 120 seconds.from scipy.integrate import solve_ivp import matplotlib.pyplot as plt # define constants k1 = ??? k2 = ??? # define initial conditions [A0, B0, AB0] y0 = [???, ???, ???] # the derivatives of all chemical species over time def yprime(t, y): A, B, AB = y[0], y[1], y[2] # unpack arguments Aprime = ??? # compute derivatives Bprime = ??? ABprime = ??? return [Aprime, Bprime, ABprime] # pack them up again # specify time span of solution t_span = [0, 20] # compute the solution sol = solve_ivp(yprime, t_span, y0) # unpack the pieces we want t = sol.t # an array of times A = sol.y[0] # unpack both variables B = sol.y[1] C = sol.y[2] # plot the results plt.figure(figsize=(10,3)) plt.plot(t, A) plt.plot(t, B) plt.plot(t, C) plt.title("First Order Kinetics") plt.xlabel("time (s)") plt.ylabel("concentration (M)") plt.legend(["[A]", "[B]", "[C]"]) plt.show()DAT210x - Programming with Python for DS Module5- Lab7import random, math import pandas as pd import numpy as np import scipy.io from mpl_toolkits.mplot3d import Axes3D import matplotlib import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split matplotlib.style.use('ggplot') # Look Pretty # Leave this alone until indicated: Test_PCA = FalseA Convenience Function This method is for your visualization convenience only. You aren't expected to know how to put this together yourself, although you should be able to follow the code by now:def plotDecisionBoundary(model, X, y): print("Plotting...") fig = plt.figure() ax = fig.add_subplot(111) padding = 0.1 resolution = 0.1 #(2 for benign, 4 for malignant) colors = {2:'royalblue', 4:'lightsalmon'} # Calculate the boundaris x_min, x_max = X[:, 0].min(), X[:, 0].max() y_min, y_max = X[:, 1].min(), X[:, 1].max() x_range = x_max - x_min y_range = y_max - y_min x_min -= x_range * padding y_min -= y_range * padding x_max += x_range * padding y_max += y_range * padding # Create a 2D Grid Matrix. The values stored in the matrix # are the predictions of the class at at said location xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)) # What class does the classifier say? Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour map plt.contourf(xx, yy, Z, cmap=plt.cm.seismic) plt.axis('tight') # Plot your testing points as well... for label in np.unique(y): indices = np.where(y == label) plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8) p = model.get_params() plt.title('K = ' + str(p['n_neighbors'])) plt.show()The Assignment Load in the dataset, identify nans, and set proper headers. Be sure to verify the rows line up by looking at the file in a text editor.df = pd.read_csv('./Datasets/breast-cancer-wisconsin.data', header=None, na_values='?') # NaNs in data set are '?' df.columns = ['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status'] # Do any dropna - Note later steps tell you to use fillna instead df.head(5)Copy out the status column into a slice, then drop it from the main dataframe. Always verify you properly executed the drop by double checking (printing out the resulting operating)! Many people forget to set the right axis here.If you goofed up on loading the dataset and notice you have a `sample` column, this would be a good place to drop that too if you haven't already.# Retrieve labels as Series labels = pd.Series(df.loc[:, 'status']) # Drop 'sample' and 'status' columns from main dataframe keepdfcols = ['thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses'] df = df.loc[:, keepdfcols ] df.head(5)With the labels safely extracted from the dataset, replace any nan values with the mean feature / column value:# Fill meanvalues dict with column averages meanvalues = {} for col in df.columns: meanvalues[col] = df.loc[:, col].mean() print("Mean Values:") print(meanvalues) df = df.fillna(value=meanvalues) print("\nNaN as Mean Check: {0:f} (Should display as decimal fraction)".format(df.loc[23, 'nuclei']))Mean Values: {'thickness': 4.417739628040057, 'size': 3.13447782546495, 'shape': 3.207439198855508, 'adhesion': 2.8068669527896994, 'epithelial': 3.216022889842632, 'nuclei': 3.5446559297218156, 'chromatin': 3.4377682403433476, 'nucleoli': 2.866952789699571, 'mitoses': 1.5894134477825466} NaN as Mean Check: 3.544656 (Should display as decimal fraction)Do train_test_split. Use the same variable names as on the EdX platform in the reading material, but set the random_state=7 for reproducibility, and keep the test_size at 0.5 (50%).X = df y = labels X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=7)Experiment with the basic SKLearn preprocessing scalers. We know that the features consist of different units mixed in together, so it might be reasonable to assume feature scaling is necessary. Print out a description of the dataset, post transformation. Recall: when you do pre-processing, which portion of the dataset is your model trained upon? Also which portion(s) of your dataset actually get transformed?# Probably want some form of scaling - start with Normalization. # For pre-processing, scale both training and testing set. Labels don't need to be scaled. from sklearn.preprocessing import Normalizer from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MaxAbsScaler from sklearn.preprocessing import StandardScaler # Rotate through these preprocessors to see effect on scoring # BE SURE TO RESTART FROM TOP INSTRUCTION AND NOT THIS STEP AFTER CHANGING THE PREPROCESSOR # preproc = Normalizer().fit(X_train) preproc = MinMaxScaler().fit(X_train) # preproc = MaxAbsScaler().fit(X_train) # preproc = StandardScaler().fit(X_train) # preproc = X_train # No change print("Prior scaling:") print(X_train.head(5)) X_train = pd.DataFrame(preproc.transform(X_train)) X_test = pd.DataFrame(preproc.transform(X_test)) print("After scaling:") print(X_train.head(5))Prior scaling: thickness size shape adhesion epithelial nuclei chromatin \ 28 2 1 1 1 2 1.0 2 376 1 1 1 1 2 1.0 2 453 4 5 5 8 6 10.0 10 401 3 1 1 1 2 1.0 1 254 9 10 10 1 10 8.0 3 nucleoli mitoses 28 1 1 376 1 1 453 7 1 401 1 1 254 3 1 After scaling: 0 1 2 3 4 5 6 \ 0 0.111111 0.000000 0.000000 0.000000 0.111111 0.000000 0.111111 1 0.000000 0.000000 0.000000 0.000000 0.111111 0.000000 0.111111 2 0.333333 0.444444 0.444444 0.777778 0.555556 1.000000 1.000000 3 0.222222 0.000000 0.000000 0.000000 0.111111 0.000000 0.000000 4 0.888889 [...]Dimensionality Reduction PCA and Isomap are your new best friendsmodel = None if Test_PCA: print('Computing 2D Principle Components') # TODO: Implement PCA here. Save your model into the variable 'model'. # You should reduce down to two dimensions. from sklearn.decomposition import PCA import pandas as pd # pca = PCA(n_components=2, svd_solver='full') pca = PCA(n_components=2) model = pca.fit(X_train) else: print('Computing 2D Isomap Manifold') # TODO: Implement Isomap here. Save your model into the variable 'model' # Experiment with K values from 5-10. # You should reduce down to two dimensions. from sklearn import manifold iso = manifold.Isomap(n_neighbors=5, n_components=2) model = iso.fit(X_train)Computing 2D Isomap ManifoldTrain your model against data_train, then transform both `data_train` and `data_test` using your model. You can save the results right back into the variables themselves.data_train = model.transform(X_train) data_test = model.transform(X_test)Implement and train `KNeighborsClassifier` on your projected 2D training data here. You can name your variable `knmodel`. You can use any `K` value from 1 - 15, so play around with it and see what results you can come up. Your goal is to find a good balance where you aren't too specific (low-K), nor are you too general (high-K). You should also experiment with how changing the weights parameter affects the results.from sklearn.neighbors import KNeighborsClassifier # Uncomment this section to run trials to find best K # accuracies = [] # # for neighbors in range(15): # knn = KNeighborsClassifier(n_neighbors=(neighbors + 1), weights='uniform') # # knn.fit(data_train, y_train) # # acc = knn.score(data_test, y_test) # # accuracies.append(acc) # # print(str(neighbors + 1), ': ', str(acc)) # # pdacc = pd.Series(accuracies) # print(pdacc.unique()) # # knmodel = knn # Put best K value model into this, and run next steps, while commenting out trial loop above. # These values manually transcribed from loop output: # - From Normalizer, best K seems to be 3 (acc: 0.845714285714) # - From MinMaxScaler, best K seems to be 3 (acc: 0.965714285714) # - From MaxAbsScaler, best K seems to be 3, 4, or 5 (acc: 0.957142857143) # - From StandardScaler, best K seems to be 3 (acc: 0.957142857143) # Best is K = 3, MinMaxScaler # NOTE: Best weights overall is 'distance', but this lowers the score for K = 3, MinMaxScaler by a couple # tenths of a percent. knn = KNeighborsClassifier(n_neighbors=3, weights='distance') knn.fit(data_train, y_train) knmodel = knnBe sure to always keep the domain of the problem in mind! It's WAY more important to errantly classify a benign tumor as malignant, and have it removed, than to incorrectly leave a malignant tumor, believing it to be benign, and then having the patient progress in cancer. Since the UDF weights don't give you any class information, the only way to introduce this data into SKLearn's KNN Classifier is by "baking" it into your data. For example, randomly reducing the ratio of benign samples compared to malignant samples from the training set. Calculate and display the accuracy of the testing set:knn.score(data_test, y_test) # plotDecisionBoundary(knmodel, X_test, y_test) plotDecisionBoundary(knmodel, data_test, y_test) # Note increating K makes the plot less grainy and the number of blue outliers in the red domain decreasePlotting...This exercise will require you to pull some data from the Qunadl API. Qaundl is currently the most widely used aggregator of financial market data. As a first step, you will need to register a free account on the http://www.quandl.com website. Additional steps added to exercies: 1) Add .env to .gitignore file 2) Create .env file for API key 3) Add your API_KEY to the .env file 4) Intall python-dotenv 5) Import necessary modules to read the key Article with additional info here: http://jonathansoma.com/lede/foundations-2019/classes/apis/keeping-api-keys-secret/# 1) Done in git or github the key should only be on local machines # 2) If values already exist it shouldn't overwrite them. Step varies depending on operating system. # This works on Windows. # !type NUL >> .env # This works on Mac. # !touch .env # 3) Done manually - Your .env file should have PROJECT_API_KEY={Your_Key} # 4) Use pip to install dotenv - Only needs to be done once - First time using the ! to install stuff # !pip install python-dotenv # 5) Need os and dotenv import os from dotenv import load_dotenv load_dotenv()Returning to the original projectAfter you register, you will be provided with a unique API key, that you should store:# Store the API key as a string - according to PEP8, constants are always named in all upper case API_KEY = os.getenv('PROJECT_API_KEY')Qaundl has a large number of data sources, but, unfortunately, most of them require a Premium subscription. Still, there are also a good number of free datasets. For this mini project, we will focus on equities data from the Frankfurt Stock Exhange (FSE), which is available for free. We'll try and analyze the stock prices of a company called Carl Zeiss Meditec, which manufactures tools for eye examinations, as well as medical lasers for laser eye surgery: https://www.zeiss.com/meditec/int/home.html. The company is listed under the stock ticker AFX_X. You can find the detailed Quandl API instructions here: https://docs.quandl.com/docs/time-series While there is a dedicated Python package for connecting to the Quandl API, we would prefer that you use the *requests* package, which can be easily downloaded using *pip* or *conda*. You can find the documentation for the package here: http://docs.python-requests.org/en/master/ Finally, apart from the *requests* package, you are encouraged to not use any third party Python packages, such as *pandas*, and instead focus on what's available in the Python Standard Library (the *collections* module might come in handy: https://pymotw.com/3/collections/ ).Also, since you won't have access to DataFrames, you are encouraged to us Python's native data structures - preferably dictionaries, though some questions can also be answered using lists.You can read more on these data structures here: https://docs.python.org/3/tutorial/datastructures.html Keep in mind that the JSON responses you will be getting from the API map almost one-to-one to Python's dictionaries. Unfortunately, they can be very nested, so make sure you read up on indexing dictionaries in the documentation provided above.# First, import the relevant modules import requests import collections import math database_code='FSE' dataset_code='AFX_X' return_format='json' frequency='daily' # Dates are 'YYYY-MM-DD' start_date='2020-11-06' end_date='2020-11-06' order='desc' api_key=API_KEY # Now, call the Quandl API and pull out a small sample of the data (only one day) to get a glimpse # into the JSON structure that will be returne # API usage example: https://docs.quandl.com/docs/in-depth-usage # Frankfurt Stock Exchange: https://www.quandl.com/data/FSE-Frankfurt-Stock-Exchange # url starts with = 'https://www.quandl.com/api/v3/datasets/' # Quandle Guides: https://docs.quandl.com/docs/time-series url = 'https://www.quandl.com/api/v3/datasets/{}/{}.{}?collapse={}&start_date={}&end_date={}&order={}&api_key={}' res = requests.get(url.format(database_code, dataset_code, return_format, frequency, start_date, end_date, order, API_KEY)) print(res.status_code) print(res.headers) # The start and end date are 2020-11-06 print(res.text) # Inspect the JSON structure of the object you created, and take note of how nested it is, # as well as the overall structure json = res.json() print(json) print(json['dataset']['column_names']) print(json['dataset']['data'])[['2020-11-06', 115.2, 115.8, 114.8, 115.1, None, 190.0, 21831.0, None, None, None]]These are your tasks for this mini project:1. Collect data from the Franfurt Stock Exchange, for the ticker AFX_X, for the whole year 2017 (keep in mind that the date format is YYYY-MM-DD).2. Convert the returned JSON object into a Python dictionary.3. Calculate what the highest and lowest opening prices were for the stock in this period.4. What was the largest change in any one day (based on High and Low price)?5. What was the largest change between any two days (based on Closing Price)?6. What was the average daily trading volume during this year?7. (Optional) What was the median trading volume during this year. (Note: you may need to implement your own function for calculating the median.) Task 1Collect data from the Franfurt Stock Exchange, for the ticker AFX_X, for the whole year 2017 (keep in mind that the date format is YYYY-MM-DD).start_date='2017-01-01' end_date='2017-12-31' order='asc' url = 'https://www.quandl.com/api/v3/datasets/{}/{}.{}?collapse={}&start_date={}&end_date={}&order={}&api_key={}' res_2017 = requests.get(url.format(database_code, dataset_code, return_format, frequency, start_date, end_date, order, API_KEY)) assert res_2017.status_code == 200Task 2Convert the returned JSON object into a Python dictionary.# Response as json print(res_2017.text) # Simply do response.json() since the response comes from using the requests library to get a Python dictionary. dict_2017 = res_2017.json() print(dict_2017['dataset']){'id': 10095370, 'dataset_code': 'AFX_X', 'database_code': 'FSE', 'name': 'Carl Zeiss Meditec (AFX_X)', 'description': 'Stock Prices for Carl Zeiss Meditec (2020-11-02) from the Frankfurt Stock Exchange.

Trading System: Xetra

ISIN: DE0005313704', 'refreshed_at': '2020-12-01T14:48:09.907Z', 'newest_available_date': '2020-12-01', 'oldest_available_date': '2000-06-07', 'column_names': ['Date', 'Open', 'High', 'Low', 'Close', 'Change', 'Traded Volume', 'Turnover', 'Last Price of the Day', 'Daily Traded Units', 'Daily Turnover'], 'frequency': 'daily', 'type': 'Time Series', 'premium': False, 'limit': None, 'transform': None, 'column_index': None, 'start_date': '2017-01-01', 'end_date': '2017-12-31', 'data': [['2017-01-02', 34.99, 35.94, 34.99, 35.8, None, 44700.0, 1590561.0, None, None, None], ['2017-01-03', 35.9, 35.93, 35.34, 35.48, None, 70618.0, 2515473.0, None, None, None], ['2017-01-04', 35.48, 35.51, 34.75, 35.19, None, 54408.0, 1906810.0, None, None, None], ['2017-01-0[...]Task 3Calculate what the highest and lowest opening prices were for the stock in this period.# The open is the 2nd column name in the dataset dict_2017['dataset']['column_names'][1] # The corresponding data is located in the dataset < data < date (will need to iterate this) > 2nd location (1 index) # For example, this is the opening price on the second trading day of the year (1/4/2017) dict_2017['dataset']['data'][2][1] # Using list comprehension in Python open = [data[1] for data in dict_2017['dataset']['data']] print(open)[34.99, 35.9, 35.48, 35.02, 34.91, 35.29, 34.8, 34.95, 35.38, 34.98, 34.85, 35.06, 35.04, 35.04, 34.54, 34.04, 34.0, 34.42, 35.07, 34.83, 35.38, 35.24, 34.75, 35.95, 36.02, 36.06, 35.56, 35.98, 36.2, 36.65, 37.37, 38.81, 38.5, 38.8, 38.8, 39.25, 38.85, 39.6, 39.72, 39.77, 39.75, 40.38, 41.19, 41.38, 41.12, 41.25, 41.5, 41.13, 41.61, 41.53, 41.4, 41.2, 41.4, 41.4, 41.47, 41.26, 41.8, 38.25, 39.01, 38.94, 38.73, 38.95, 39.39, 40.02, 39.77, 40.15, 39.5, 41.1, 40.96, 40.9, 41.46, 41.62, 42.02, 42.06, None, None, 42.24, 41.94, 42.5, 41.97, 42.01, 41.93, 41.88, 41.51, 42.17, None, 41.89, 42.2, 41.86, 42.52, 43.0, 41.83, 43.5, 43.4, 45.18, 45.09, 45.15, 45.06, 44.0, 43.74, 44.16, 43.67, 43.92, 44.8, 44.8, 45.61, 45.05, 45.22, 46.12, 46.8, 47.12, 47.01, 47.8, 46.77, 47.31, 46.5, 46.52, 46.34, 45.66, 46.9, 46.48, 47.46, 47.03, 47.29, 46.95, 47.23, 46.68, 45.73, 45.01, 45.29, 45.83, 44.67, 45.5, 44.79, 44.64, 44.94, 44.29, 44.67, 45.07, 45.6, 45.5, 45.06, 45.74, 45.57, 45.31, 44.7, 44.91, 45.16,[...]Planned on using: print("Minimum open was: ", min(open)) print("Maximum open was: ", max(open)) Cannot use the above because of the following error: TypeError: float() argument must be a string or a number, not 'NoneType' This was happening because I have data in my sample that don't have values. Could remove this easily in pandas or numpy; but the directions recommend not using those. Might come back to this later to manually do in Python if time permits.# set min open to largest number to make sure it is replaced with the lowest open float (decimal) during the loop. min_open = float("inf") # set max open to smallest number to make sure it is replaced with the highest open during the loop. max_open = float("-inf") for data in dict_2017['dataset']['data']: if data[1] != None: if data[1] < min_open: min_open = data[1] if data[1] > max_open: max_open = data[1] print("Minimum open was:", min_open) print("Maximum open was:", max_open)Minimum open was: 34.0 Maximum open was: 53.11Task 4What was the largest change in any one day (based on High and Low price)?high = [data[2] for data in dict_2017['dataset']['data']] print(high) low = [data[3] for data in dict_2017['dataset']['data']] print(low) # These look like they don't have problem/missing values and are the same length. print(len(high)) print(len(low)) biggest_change = 0.00 for data in dict_2017['dataset']['data']: change = data[2] - data[3] if change > biggest_change: biggest_change = change date = data[0] high = data[2] low = data[3] print(f"The biggest daily change was {biggest_change:.2f} on {date} from a high of {high} to a low of {low}.")The biggest daily change was 2.81 on 2017-05-11 from a high of 46.06 to a low of 43.25.Task 5What was the largest change between any two days (based on Closing Price)?close = [data[4] for data in dict_2017['dataset']['data']] print(close) # Took awhile to figure this out. Solved a simpiler problem with this info: https://stackoverflow.com/questions/33335940/calculate-difference-between-two-values-python x = [5.05, 1.06, 1.6, 3, 3.99] test = [abs(x[i+1]-x[i]) for i in range(len(x)-1)] print(test) print(max(test)) # Looking for the absolute change because the task didn't specify if it had to be a positive or negative daily change. close_diff = [abs(close[i+1]-close[i]) for i in range(len(close)-1)] print(close_diff) max_close_diff = max(close_diff) print(f"The biggest close difference from one day to the next was {max_close_diff:.2f}.")The biggest close difference from one day to the next was 2.56.Task 6What was the average daily trading volume during this year?volume = [data[6] for data in dict_2017['dataset']['data']] print(volume) avg_volume = sum(volume)/len(volume) print(f"The average trading volume during 2017 was {avg_volume:,.0f}.")The average trading volume during 2017 was 89,124.Task 7 (Optional) What was the median trading volume during this year. (Note: you may need to implement your own function for calculating the median.)volume.sort() print(volume) median = int(math.ceil(len(volume)/2)) print(median) print(f"The median trading volume during 2017 was {volume[128]:,.0f}.")The median trading volume during 2017 was 76,600.Building a datasetSx = np.array([0, 1, 2.5, 3, 4, 5], dtype=np.float32) Sy = np.array([0.6, 0, 2, 2.2, 4.7, 5], dtype=np.float32) # Plotting in graph plt.scatter(Sx, Sy) # Graph axis names and grids plt.grid(True) plt.xlabel('Sx') plt.ylabel('Sy')How can we get a line that goes through all the points given in the above graph?We can not achieve this using a single straight line. What if we can combine multiple straight lines? Lets assume a straight line $$y = wx + c$$We can build a polynomial by merging multiple straing lines using the the following equations:$$l_0 = \sigma (w_0x + c_0) \\l_1 = \sigma (w_1x + c_1) \\l_2 = w_2l_0 + w_3l_1 + c_2 \\$$Here,$$\sigma(x) = \frac{1}{1+e^{-x}}$$Now, $l_2$ is a polynomial with multiple straight lines.w0 = tf.Variable(1.2, dtype=tf.float32) w1 = tf.Variable(2., dtype=tf.float32) w2 = tf.Variable(-0.5, dtype=tf.float32) w3 = tf.Variable(1.2, dtype=tf.float32) c0 = tf.Variable(1.2, dtype=tf.float32) c1 = tf.Variable(2, dtype=tf.float32) c2 = tf.Variable(0.8, dtype=tf.float32) def line_fn(x): l0 = tf.nn.sigmoid(w0*x + c0) l1 = tf.nn.sigmoid(w1*x + c1) l2 = w2*l0 + w3*l1 + c2 return l2 # A function which would plot the line def plot_line(): clear_output(wait=True) p = np.arange(0, 5, 0.1) plt.plot(p, line_fn(p).numpy()) # Plotting in graph plt.scatter(Sx, Sy) # Graph axis names and grids plt.grid(True) plt.xlabel('Sx') plt.ylabel('Sy') plt.show() plot_line()Gradient descending algorithm:$$m_{t} = m_{t-1} - lr \; \frac{\partial \;\; loss(l(x), y)}{\partial m} $$$$loss(l(x), y) = (l(x) - y)^2$$ Here,* $t$ = Time step* $x$ = Input* $y$ = Output* $m$ = Updatable variable* $loss(\cdot, \cdot)$ = Loss function* $lr$ = Learning rate* $l(\cdot)$ = Line function# learning rate lr = 1 total_steps = 30000 for step in range(total_steps): #print(f"Step {step+1:2}:") #print("-"*30) with tf.GradientTape() as tape: # Stating what variables need to be partially differentiated and calibrated tape.watch([w0, w1, w2, w3, c0, c1, c2]) # Passing the points to the line function pred_y = line_fn(Sx) # Calculating the difference/loss of the output (pred_y) of the function # w.r.t. the known output (Sy) loss = (pred_y - Sy) * (pred_y - Sy) # Calculating the gradients w.r.t. the partially diff. parameters # and the generated output loss grads = tape.gradient(loss, [w0, w1, w2, w3, c0, c1, c2]) # For some values, the gradient values can be very big, we call it # exploding gradients. To tackle such problem we are clipping the values to 1 grads = tf.clip_by_norm(grads, 1) # Updating the gradients w0 = w0 - lr * grads[0] w1 = w1 - lr * grads[1] w2 = w2 - lr * grads[2] w3 = w3 - lr * grads[3] c0 = c0 - lr * grads[4] c1 = c1 - lr * grads[5] c2 = c2 - lr * grads[6] if step%1000 == 0: plot_line() print(f"Step {step+1:2}:") print(f"Loss: {sum(loss)}") print(f"Lr: {lr:.4f}") #time.sleep(0.25) lr -= lr * 0.1Lets check the final resultplot_line()$\theta$ Estimation## Calculating MSE match_signatures <- function(tensor_estimate, true_tensor){ ## function to make sure that we are comparing the same signatures by maximizing the cross product between tensors ## assumes that tensor_estimate is in GPU, and true_tensor is already in CPU and base R array tensor_sim <- tensor_estimate dist_models <- true_tensor %*% t(tensor_sim) matching <- solve_LSAP(dist_models, maximum=TRUE) tensor_sim <- tensor_sim[matching,] return(tensor_sim) } ## Plotting results #=========================================== ## Comparing MAP estimates to ground truth for theta library(clue) library(ggplot2) plot_prevalenceQC <- function(x, y, plot_title){ cor_val = round(cor(x,y),2) theta_df <- data.frame(x=x, y=y) theta_df %>% ggplot(aes(x=x, y=y)) + geom_point(alpha=0.5) + geom_abline(slope = 1,color="red") + theme_classic(base_size = 16) + labs(x=" parameter estimate",y="true value", title=paste(plot_title,"correlation:",cor_val)) } library(clue) library(ggplot2) eta_est = mod0$VIparam$lambda$clone() theta_est = torch_cat(c(eta_est, torch_zeros(1,D, device=device)), dim=1) theta_est = nnf_softmax(theta_est, dim=1) theta_true = truth_vals$theta$clone() theta_sim <- as_array(theta_est$cpu()) true_theta_cpu <- as_array(theta_true$cpu()) dist_models <- true_theta_cpu %*% t(theta_sim) matching <- solve_LSAP(dist_models, maximum=TRUE) theta_sim <- theta_sim[matching,] p1 = plot_prevalenceQC(matrix(theta_sim,ncol=1),matrix(true_theta_cpu,ncol=1),plot_title = "Theta estimation") p1$\Gamma$ estimation## compare the estimated Gammas to the real Gammas library(tidyverse) Gamma_pred = as_array(mod0$VIparam$Xi$cpu()$clone()) Gamma_true = as_array(truth_vals$Gamma$cpu()$clone()) pred_zeta = mod0$VIparam$zeta$cpu()$clone() pred_gamma_sigma = as_array(mod0$ Bparam$gamma_sigma$cpu()) perm = as.numeric(matching) ## Match the Gammas to the previous matching with the thetas. if ( K %in% perm[1:(K-1)]){ gamS = Gamma_pred[which(perm == (K)),] Gamma_pred = Gamma_pred - gamS Gamma_pred[which(perm == (K)),] = Gamma_pred[which(perm == (K)),,drop=FALSE] - gamS tmp = perm[which(perm == (K))] perm[which(perm == (K))] = perm[K] perm[K] = tmp } perm_index = perm[1:(K-1)] plot_gamma_df = data.frame(gamma_est = matrix(Gamma_pred[perm[1:(K-1)],],ncol=1), gamma_true = matrix(Gamma_true, ncol=1), gamma_sigs = matrix(apply(as_array(pred_zeta[perm_index]), 1, diag), nc=1), gamma_sigs_ = matrix(rep(pred_gamma_sigma[perm[1:(K-1)]], p), nc=1)) head(plot_gamma_df) get_CI <- function(m,v, prcntile){ tmp = rnorm(n=10000,mean = m, sd = v) return(quantile(tmp, prcntile)) } plot_gamma_df = plot_gamma_df %>% rowwise() %>% mutate(upper_CI = get_CI(gamma_est, sqrt(gamma_sigs), 0.975)) %>% mutate(lower_CI = get_CI(gamma_est, sqrt(gamma_sigs), 0.125)) %>% ungroup() p2 = plot_gamma_df %>% ggplot(aes(y= gamma_est, x = gamma_true)) + geom_point(size=2) + geom_linerange(aes(ymin = lower_CI, ymax = upper_CI), alpha=0.5) + geom_abline(slope=1,linetype="dashed", lwd=2, color = "dodgerblue3") + theme_classic(base_size = 16) + labs(x="true value",y="parameter estimate", title="Gamma Estimation") p2T, factors esimationsplotting_factors <- function(sim,true, title){ corr_sim = round(cor(sim,true),2) plot_df = data.frame(x=sim,y=true) plot_df %>% ggplot(aes(x=x,y=y)) + geom_point(alpha=0.5) + geom_abline(slope = 1,color="red") + theme_classic(base_size = 16) + labs(x=" parameter estimate",y="true value", title=paste(title,"correlation:",corr_sim)) } perm_index = perm[1:K] T0_sim = as_array(mod0$Bparam$T0[..,perm_index]$clone()$flatten()$cpu()) T0_true = as_array(truth_vals$T0$clone()$flatten()$cpu()) p3 = plotting_factors(T0_sim,T0_true,"T0") p3 ## factors perm_index = as.numeric(matching) factors_sim = c( as_array(mod0$Bparam$factors$bt$clone()$cpu()[1:2,as.integer(matching)]$flatten()), as_array(mod0$Bparam$factors$br$clone()$cpu()[1:2,as.integer(matching)]$flatten()), as_array(mod0$Bparam$factors$epi$clone()$cpu()[,as.integer(matching)]$flatten()), as_array(mod0$Bparam$factors$nuc$clone()$cpu()[,as.integer(matching)]$flatten()), as_array(mod0$Bparam$factors$clu$clone()$cpu()[,as.integer(matching)]$flatten()) ) factors_true = c( as_array(truth_vals$factors$bt$clone()$cpu()$flatten()), as_array(truth_vals$factors$br$clone()$cpu()$flatten()), as_array(truth_vals$factors$epi$clone()$cpu()$flatten()), as_array(truth_vals$factors$nuc$clone()$cpu()$flatten()), as_array(truth_vals$factors$clu$clone()$cpu()$flatten()) ) p4 = plotting_factors(factors_sim,factors_true,"Factors") p4 library(ggpubr) options(repr.plot.width=10, repr.plot.height=8) px = ggarrange(p1,p2,p3,p4,ncol =2,nrow = 2,labels = c("A","B","C","D")) px ggsave(px, filename="../figures/estimation_simulation.pdf", device="pdf",height=8, width=10)What is the use of SumTime?- Adds time strings- can convert/add time strings from a wide variety of input options- Flexible time additions and assignment- Convert to datetime.timedelta object- A handy tool for quick time calculations! Import class from filefrom ArithTime import SumTime as stdeclare SumTime object# declaring st("1hrs 20mins") is equivalent to st("1 20") s1 = st("1hrs 20mins") s_blank = st() s_blank_2 = st('') print(s_blank.get_hrs(), s_blank.get_minutes(), s_blank.get_secs(), "s_blank =>", s_blank) print(s_blank_2.get_hrs(), s_blank_2.get_minutes(), s_blank_2.get_secs(), "s_blank_2 =>",s_blank_2)0 0 0 s_blank => 0 0 0 s_blank_2 =>Addition with valid time strings [time_value][flag][time_value][flag] - time_value = \d or any Integer - flag = hrs | mins | secs - = not compulsory, can be any character.# perform addition of time strings s1 = st("1 00") print(s1) s1 += ["20mins" ,"1hrs 10mins" , "50mins"] + ["10mins" , "0hrs-60secs" , "10mins"] print(s1) s_mins = st("1hrs 10mins") print(s_mins, 'repr=>',repr(s_mins))1hrs 3hrs 41mins 1hrs 10mins repr=> SumTime('1 10 0')convert to datetime.timedelta objecttd = s1.to_timedelta() print(type(td)) print(td.seconds) 13260Addition on SumTime object__Other than adding 2 SumTime objectsadding strings with SumTime object is also possible (as shown above)__ Possible operations all together!s2 = st("0 59") # 0hrs 59 minutes (read definition in init to know more possible input types) # perform SumTime object addition print(f"s1={s1}") s3 = s1 + s2 print(f"s3={s3}\n") # get individual time flags print(16*'---') print("Printing hrs, mins, secs individually:") print(s3.get_hrs(), 'hrs') print(s3.get_minutes(), 'mins') print(s3.get_secs(), 'secs') # print SumTime object print("s3 as a whole =>",s3) print(16*'---') #store in string format s3_str_fmt = str(s3) print("store in string format:", type(s3_str_fmt), s3_str_fmt,'\n\n') # format conversions sum_time_obj = st.to_sumtime(str(s3), return_type='obj') # return as SumTime object time_list = st.to_sumtime(str(s3), return_type='list') # return list[hrs, mins, secs] time_string = st.to_sumtime(str(s3), return_type='str') # return as str format, (this can be use to create a new instance of class) print(f"object:{sum_time_obj}\nlist_return:{time_list}\nstring_return:{time_string}\n{16*'---'}") new_inst = st(time_string) print("New instance:",repr(new_inst)) print("SumTime object:", type(sum_time_obj), sum_time_obj)s1=3hrs 41mins s3=4hrs 40mins ------------------------------------------------ Printing hrs, mins, secs individually: 4 hrs 40 mins 0 secs s3 as a whole => 4hrs 40mins ------------------------------------------------ store in string format: 4hrs 40mins object:4hrs 40mins list_return:[4, 40, None] string_return:4 40 0 ------------------------------------------------ New instance: SumTime('4 40 0') SumTime object: 4hrs 40minsFunctions:-def say_hello(xyxyxy = 'there!'): #A default value is taken up print(f'Hello {xyxyxy}') say_hello('Yeet') say_hello() def add_num(num1,num2): #don't need to define the type of variable...dynamic setting. return num1+num2 #return obviously, returns the value, so you may store it. i = add_num(2,3) i ''' because of dynamic type setting, it is susceptible to bugs. See this: ''' num1 = input('num1:') num2 = input('num2:') def sum_num(num1,num2): return num1+num2 sum_num(num1,num2) #output is the string concatenation of the input strings. num1 = int(input('num1:')) num2 = int(input('num2:')) def sum_num(num1,num2): return num1+num2 sum_num(num1,num2) def even_check(num): '''check if the number is even or not''' # if num%2: # return false # else: # return true return num%2 == 0 even_check(int(input("What's the number you want to check? "))) #Return true if any number is even inside a list mylist = [1,3,5,2] def even_in_list(mylist): for n in mylist: if n%2 == 0: return True else: pass # return False even_in_list(mylist) #Return all the even numbers in the list. mylist = [1,1,2,4,5,7] def even_in_list_2(mylist): evenlist = [] for n in mylist: if n%2==0: evenlist.append(n) else: pass return evenlist even_in_list_2(mylist) #Tuple unpacking with functions. stock_prices = [('A',10),('B',20),('C',30)] for item in stock_prices: print(item) for ticker,prices in stock_prices: print(prices + (0.2*prices)) employee_stats = [('Adam',50),('Bernard',48),('Charlie',56)] def best_employee_month(employeeStats): current_max_hrs = 0 best_employee = '' for employee,hours in employeeStats: if hours>current_max_hrs: current_max_hrs = hours best_employee = employee else: pass return (best_employee,current_max_hrs) # name,hours = best_employee_month(employee_stats) #this is a bit misleading as you might not know how many values the n-tuple returns. employee_best_details = best_employee_month(employee_stats) employee_best_details name,hours = employee_best_details print(f'The best employee of the month was {name} with record-breaking {hours} hours')The best employee of the month was Charlie with record-breaking 56 hoursMonte Cup gameexample = [1,2,3,4,5,6,7] from random import shuffle shuffle(example) example #shuffle doesn't return any list..so we define a slight modified shuffle: def shuffle_list(mylist): shuffle(mylist) return mylist result = shuffle_list(example) result game_scene = ['','O',''] shuffle_list(game_scene) def player_guess(): guess = '' while guess not in ['0','1','2','3']: guess = input('Pick a number 0,1,2 or 3:') return int(guess) def check_guess(game_scene,my_guess): if game_scene[my_guess] == 'O': print("Correct!") else: print("Wrong, ", game_scene, " but your guess was ", my_guess) def play_the_4_cup_game(): #initial list mylist = ['','','O',''] #shuffled list shuffled_list = shuffle_list(mylist) #User guess index = player_guess() #Check if it matches check_guess(shuffled_list, index) play_the_4_cup_game()Pick a number 0,1,2 or 3:1 Wrong, ['O', '', '', ''] but your guess was 1*Args and *kwargs#For an arbitrary number of arguements, we use *args parameter, in functions def myfunc(*arsg): print(sum(arsg)) #by convention, we always use args variable in such functions, so it is more understandable. myfunc(8,58,7,9,2,4,5,7,1) def tuple_returningFn (*args): for n in args: print(n) tuple_returningFn(1,2,4,1) def myfunc(**kwargs): if 'fruit' in kwargs: print("the fruit you mentioned is {}".format(kwargs['fruit'])) else: print('No fruit for you!') return kwargs myfunc(fruit = 'plums', dino = 'sereptosaurus') # def mixed_arguments(*args, **kwargs): def myfunc(*args): even = [] for n in args: if not n%2: even.append(n) return even#We have a data which classified if patients have heart disease or not according to features in it. #We will try to use this data to create a model which tries predict if a patient has this disease or not. #We will use Random Forest (classification) algorithm. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier #model evaluation from sklearn.model_selection import train_test_split, cross_val_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import precision_score, recall_score, f1_score from sklearn.metrics import plot_roc_curve df=pd.read_csv("/content/heart.csv") df.head() df.target.value_counts() df.isnull().sum() df.describe() df.info() sns.countplot(x="target", data=df, palette="bwr") plt.show() countNoDisease = len(df[df.target == 0]) countHaveDisease = len(df[df.target == 1]) print("Percentage of Patients Haven't Heart Disease: {:.2f}%".format((countNoDisease / (len(df.target))*100))) print("Percentage of Patients Have Heart Disease: {:.2f}%".format((countHaveDisease / (len(df.target))*100))) sns.countplot(x='sex', data=df, palette="mako_r") plt.xlabel("Sex (0 = female, 1= male)") plt.show() countFemale = len(df[df.sex == 0]) countMale = len(df[df.sex == 1]) print("Percentage of Female Patients: {:.2f}%".format((countFemale / (len(df.sex))*100))) print("Percentage of Male Patients: {:.2f}%".format((countMale / (len(df.sex))*100))) df.groupby('target').mean() pd.crosstab(df.age,df.target).plot(kind="bar",figsize=(20,6)) plt.title('Heart Disease Frequency for Ages') plt.xlabel('Age') plt.ylabel('Frequency') plt.savefig('heartDiseaseAndAges.png') plt.show() pd.crosstab(df.sex,df.target).plot(kind="bar",figsize=(15,6),color=['#1CA53B','#AA1111' ]) plt.title('Heart Disease Frequency for Sex') plt.xlabel('Sex (0 = Female, 1 = Male)') plt.xticks(rotation=0) plt.legend(["Haven't Disease", "Have Disease"]) plt.ylabel('Frequency') plt.show() #Lest compare age, thalach(Max heart rate) and target plt.figure(figsize=(10,6)) #scatter with positive examples plt.scatter(df.age[df.target==1], df.thalach[df.target==1], c="salmon"); #scatter with negative example plt.scatter(df.age[df.target==0], df.thalach[df.target==0], c="black"); plt.title("Heart Disease in function of age and thalach") plt.xlabel("Age") plt.ylabel("Thalache (max heart rate)") plt.legend(["Disease","No Disease"]); # check the dstribution of the age column with a histogram df.age.plot.hist(figsize=(10,6)); pd.crosstab( df.cp,df.target) #make thr cross tab visual pd.crosstab(df.cp, df.target).plot(kind="bar", figsize=(10,6), color=["darkblue","lightblue"]) plt.title("Heart Disease frequency per chest pain type") plt.xlabel("Chest Pain type") plt.ylabel("Amount") plt.legend(["No Disease","Disease"]) plt.xticks(rotation=0); df.corr() # Lets make our corelation matrix a bit prettier corr_matrix= df.corr() fig, ax= plt.subplots(figsize=(10,6)) ax= sns.heatmap(corr_matrix, annot= True, linewidths=0.5, fmt=".2f", cmap="twilight_r"); X=df.drop("target", axis=1) y=df["target"] #split data into training and test split np.random.seed(42) #Split into trin and test set X_train, X_test, y_train,y_test= train_test_split(X,y,test_size=0.2) len(X_train), len(y_train) # Put models in a dictionary models={"Logistic Regression": LogisticRegression(), "KNN":KNeighborsClassifier(), "Random Forest": RandomForestClassifier()} #create a function to fit and score models def fit_and_score(models,X_train, X_test, y_train, y_test): """ Fits and evaluates given ML models. models: a dict of diff SciKit learn ML models X_test : test set (no label) X_train: training set (no label) y_train : training labels y_test : test labels """ #set random seed np.random.seed(42) #Make dictionary to keep model scores model_scores={} #Loop through models for name, model in models.items(): #fit model to data model.fit(X_train, y_train) #evaluate the model and store in score dict model_scores[name]= model.score(X_test, y_test) return model_scores model_scores= fit_and_score(models=models, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test) model_scores model_compare =pd.DataFrame(model_scores, index=["accuracy"]) model_compare.T.plot.bar(); #lets tune KNN train_scores = [] test_scores = [] #Create a list of diff values of n_neighbours neighbors = range(1,21) #Setup KNN Instance knn = KNeighborsClassifier() #Loop thriugh diff n_neighbours for i in neighbors: knn.set_params(n_neighbors = i) #Fit the algo knn.fit(X_train, y_train) #Update the training scores list train_scores.append(knn.score(X_train, y_train)) #Update the test scores test_scores.append(knn.score(X_test, y_test)) train_scores test_scores #Visualize plt.plot(neighbors, train_scores, label="Train score") plt.plot(neighbors, test_scores, label="Test score") plt.xticks(np.arange(1,21,1)) plt.xlabel("Number of neighbors") plt.ylabel("Model score") plt.legend() print(f"Maximum KNN score on the test data :{max(test_scores)*100:.2f}%") # Create a hyperparameter grid for LogisticRegression log_reg_grid = {"C" : np.logspace(-4, 4, 20), "solver": ["liblinear"]} #Create a hyperparam grid for RandomForestClassifier rf_grid = {"n_estimators" : np.arange(10, 1000, 50), "max_depth" : [None,3,5,10], "min_samples_split" : np.arange(2, 20, 2), "min_samples_leaf": np.arange(1, 20, 2)} # Tune LogisticRegression np.random.seed(42) #Setup random hyperparams search for LogisticRegression rs_log_reg= RandomizedSearchCV(LogisticRegression(), param_distributions = log_reg_grid, cv=5, n_iter=20, verbose=True) #Fit random hyperparam search model for LogisticRegression rs_log_reg.fit(X_train, y_train) rs_log_reg.best_params_ rs_log_reg.score(X_test, y_test) np.random.seed(42) #Setup random hyperparam search for RandomFOrestClassifier rs_ref = RandomizedSearchCV(RandomForestClassifier(), param_distributions = rf_grid, cv=5, n_iter= 20, verbose=True) # Fit random hyperparam search model for RandomForestCLassifier rs_ref.fit(X_train, y_train) #Finding best params rs_ref.best_params_ #Evaluate RandomSearchCV search on RandomForestClassifier model rs_ref.score(X_test, y_test) # Different hyperparameters for LR Model log_reg_grid = {"C": np.logspace(-4,4,30), "solver": ["liblinear"]} #Setup grid hyperparameter search for LogisticRegression gs_log_reg = GridSearchCV(LogisticRegression(), param_grid= log_reg_grid, cv=5, verbose=True) #Fit grid hyperparam search model gs_log_reg.fit(X_train, y_train); gs_log_reg.best_params_ # Evaluate GridSearchCV for LR model gs_log_reg.score(X_test, y_test) model_scores # make predictions y_preds=gs_log_reg.predict(X_test) y_preds # Import ROC curve fucntion but we have done this previously. # roc curve and calculate AUC metric plot_roc_curve(gs_log_reg, X_test, y_test) sns.set(font_scale=1.5) def plot_conf_mat(y__test, y_preds): """ Plots a confusion matrix using Seaborn's heatmap(). """ fig, ax = plt.subplots(figsize=(3, 3)) ax = sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, # Annotate the boxes cbar=False) plt.xlabel("Predicted label") # predictions go on the x-axis plt.ylabel("True label") # true labels go on the y-axis plot_conf_mat(y_test, y_preds) print(classification_report(y_test, y_preds)) # check our best hyperparams gs_log_reg.best_params_ # create a new classifier with best params clf= LogisticRegression(C=0.20433597178569418, solver="liblinear") # Cross validated accuracy cv_acc= cross_val_score(clf, X, y, cv=5, scoring="accuracy") cv_acc cv_acc=np.mean(cv_acc) cv_acc # Cross validated precision cv_precision= cross_val_score(clf, X, y, cv=5, scoring="precision") cv_precision=np.mean(cv_precision) cv_precision # Cross validated recall cv_recall= cross_val_score(clf, X, y, cv=5, scoring="recall") cv_recall=np.mean(cv_recall) cv_recall # Cross validated f1 cv_f1= cross_val_score(clf, X, y, cv=5, scoring="f1") cv_f1=np.mean(cv_f1) cv_f1 # putting it in a graph visualize cv_metrics= pd.DataFrame({"Accuracy": cv_acc, "Precision": cv_precision, "Recall": cv_recall, "f1": cv_f1}, index=[0]) cv_metrics.T.plot.bar(title="Cross validated classification metrics", legend=False)Unit testingUnit testing is a method for testing code to make sure a set of test cases work as expected. Tests can also be used to help track and plan the functionality of your code. Example: Calculate the distance between two angles (in degrees)Let's start by writing a simple version of our function:def angle_distance(a, b): return abs(b - a)Writing our first testNow we will write a test to see if this works as expected. For this example the test is in the same file as the original code, but in a real case the test should be contained in its own file or folder. As long as the filename has the word `test` in it calling the command `nosetests` will automatically find and run the file.import unittest class TestAngle(unittest.TestCase): def test_small_angles(self): '''Test distance between small angles''' a = 10 b = 90 expected = 80 result = angle_distance(a, b) self.assertEqual(result, expected)Running the testTo run the test inside the notebook we can use this block, typically this would be run from the console with the command `nosetests`.unittest.main(argv=['first-arg-is-ignored'], exit=False);. ---------------------------------------------------------------------- Ran 1 test in 0.001s OKAdd test for edge case 1We can see that our test has worked as expected, but we have only tested one case, we need to also test some edge cases.class TestAngle(unittest.TestCase): def test_small_angles(self): '''Test distance between small angles''' a = 10 b = 90 expected = 80 result = angle_distance(a, b) self.assertEqual(result, expected) def test_large_angles(self): '''Test distance between large angles''' a = 0 b = 270 expected = 90 result = angle_distance(a, b) self.assertEqual(result, expected) unittest.main(argv=['first-arg-is-ignored'], exit=False);F. ====================================================================== FAIL: test_large_angles (__main__.TestAngle) Test distance between large angles ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/ipykernel_9930/4248502634.py", line 16, in test_large_angles self.assertEqual(result, expected) AssertionError: 270 != 90 ---------------------------------------------------------------------- Ran 2 tests in 0.001s FAILED (failures=1)Fix the first bugThis test has failed, we have found a bug in our code! We have not accounted for the angle wrapping around, the largest distance two angles can be is `180` before they start getting closer again. Let's update our function:def angle_distance(a, b): return abs(b - a) % 180 unittest.main(argv=['first-arg-is-ignored'], exit=False);.. ---------------------------------------------------------------------- Ran 2 tests in 0.001s OKAdd test for edge case 2The code should be able to handle distances between angles on either side of the branch cut.class TestAngle(unittest.TestCase): def test_small_angles(self): '''Test distance between small angles''' a = 10 b = 90 expected = 80 result = angle_distance(a, b) self.assertEqual(result, expected) def test_large_angles(self): '''Test distance between large angles''' a = 0 b = 270 expected = 90 result = angle_distance(a, b) self.assertEqual(result, expected) def test_wrapping_angles(self): '''Test distance around wrapping angle''' a = 1 b = 359 expected = 2 result = angle_distance(a, b) self.assertEqual(result, expected) unittest.main(argv=['first-arg-is-ignored'], exit=False);..F ====================================================================== FAIL: test_wrapping_angles (__main__.TestAngle) Test distance around wrapping angle ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/ipykernel_9930/4238174130.py", line 24, in test_wrapping_angles self.assertEqual(result, expected) AssertionError: 178 != 2 ---------------------------------------------------------------------- Ran 3 tests in 0.001s FAILED (failures=1)Fix the second bugLooks like we have found another breaking point in our code. We will update it again:def angle_distance(a, b): d = abs(b - a) return min(360 - d, d) unittest.main(argv=['first-arg-is-ignored'], exit=False);... ---------------------------------------------------------------------- Ran 3 tests in 0.001s OKAdd test for edge case 3What about the case where the original angles are larger than `360`? We will write another test:class TestAngle(unittest.TestCase): def test_small_angles(self): '''Test distance between small angles''' a = 10 b = 90 expected = 80 result = angle_distance(a, b) self.assertEqual(result, expected) def test_large_angles(self): '''Test distance between large angles''' a = 0 b = 270 expected = 90 result = angle_distance(a, b) self.assertEqual(result, expected) def test_wrapping_angles(self): '''Test distance around wrapping angle''' a = 1 b = 359 expected = 2 result = angle_distance(a, b) self.assertEqual(result, expected) def test_large_input_angles(self): '''Test distance when input angles are above 360''' a = 720 b = 270 expected = 90 result = angle_distance(a, b) self.assertEqual(result, expected) unittest.main(argv=['first-arg-is-ignored'], exit=False);.F.. ====================================================================== FAIL: test_large_input_angles (__main__.TestAngle) Test distance when input angles are above 360 ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/ipykernel_9930/4200136150.py", line 32, in test_large_input_angles self.assertEqual(result, expected) AssertionError: -90 != 90 ---------------------------------------------------------------------- Ran 4 tests in 0.002s FAILED (failures=1)Fix the third bugTo fix the bug for this case we need to make sure the input values are re-cast in the range `[0, 360)`:def angle_distance(a, b): d = abs((b % 360) - (a % 360)) return min(360 - d, d) unittest.main(argv=['first-arg-is-ignored'], exit=False);.... ---------------------------------------------------------------------- Ran 4 tests in 0.001s OKCleaning up the testsLooking at our tests now we can see that is really the same test four times over, just with different values. `unittest` allows us to create `subTests` for cases like this:class TestAngle(unittest.TestCase): def test_small_angles(self): '''Test distance between angles''' a = [10, 0, 1, 720] b = [90, 270, 359, 270] expected = [80, 90, 2, 90] for i, j, e in zip(a, b, expected): with self.subTest(a=i, b=j): result = angle_distance(i, j) self.assertEqual(result, e) unittest.main(argv=['first-arg-is-ignored'], exit=False);. ---------------------------------------------------------------------- Ran 1 test in 0.001s OKLets explore the groups of the arms:groups = gk.get_group_of_arms() display('Number of groups: %d' % groups.max()) display('Group size : Number of groups') display(Counter(Counter(groups).values()))Shape from anglesmolsys = msm.convert(msm.demo['Met-enkephalin']['vacuum.msmpk']) molsys_cub = msm.build.solvate(molsys, box_geometry='cubic', clearance='14.0 angstroms', engine='PDBFixer') molsys_oct = msm.build.solvate(molsys, box_geometry='truncated octahedral', clearance='14.0 angstroms', engine='PDBFixer') molsys_dod = msm.build.solvate(molsys, box_geometry='rhombic dodecahedral', clearance='14.0 angstroms', engine='PDBFixer') angles_cub = msm.get(molsys_cub, target='system', box_angles=True) msm.pbc.box_shape_from_box_angles(angles_cub) angles_oct = msm.get(molsys_oct, target='system', box_angles=True) msm.pbc.box_shape_from_box_angles(angles_oct) angles_dod = msm.get(molsys_dod, target='system', box_angles=True) msm.pbc.box_shape_from_box_angles(angles_dod) msm.pbc.box_shape_from_box_angles([[70.0, 80.0, 90.0]] * msm.puw.unit('degrees'))Extract plain text from Wikidumpfp = '/home/mostendorff/datasets/wikinews_en/20201201/enwikinews-20201201.json.gz' fp = '/home/mostendorff/datasets/wikinews_en/20201201/enwikinews-20201201-pages-meta-current.xml.bz2' #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 <> # Copyright (C) 2012 <> # Copyright (C) 2018 <> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump. Uses multiprocessing internally to parallelize the work and process the dump more quickly. Notes ----- If you have the `pattern `_ package installed, this module will use a fancy lemmatization to get a lemma of each token (instead of plain alphabetic tokenizer). See :mod:`gensim.scripts.make_wiki` for a canned (example) command-line script based on this module. """ import bz2 import logging import multiprocessing import re import signal from pickle import PicklingError # LXML isn't faster, so let's go with the built-in solution from xml.etree.ElementTree import iterparse from gensim import utils # cannot import whole gensim.corpora, because that imports wikicorpus... from gensim.corpora.dictionary import Dictionary from gensim.corpora.textcorpus import TextCorpus logger = logging.getLogger(__name__) ARTICLE_MIN_WORDS = 50 """Ignore shorter articles (after full preprocessing).""" # default thresholds for lengths of individual tokens TOKEN_MIN_LEN = 2 TOKEN_MAX_LEN = 15 RE_P0 = re.compile(r'', re.DOTALL | re.UNICODE) """Comments.""" RE_P1 = re.compile(r' ].*?)(|/>)', re.DOTALL | re.UNICODE) """Footnotes.""" RE_P2 = re.compile(r'(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$', re.UNICODE) """Links to languages.""" RE_P3 = re.compile(r'{{([^}{]*)}}', re.DOTALL | re.UNICODE) """Template.""" RE_P4 = re.compile(r'{{([^}]*)}}', re.DOTALL | re.UNICODE) """Template.""" RE_P5 = re.compile(r'\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE) """Remove URL, keep description.""" RE_P6 = re.compile(r'\[([^][]*)\|([^][]*)\]', re.DOTALL | re.UNICODE) """Simplify links, keep description.""" RE_P7 = re.compile(r'\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) """Keep description of images.""" RE_P8 = re.compile(r'\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) """Keep description of files.""" RE_P9 = re.compile(r' ].*?)(|/>)', re.DOTALL | re.UNICODE) """External links.""" RE_P10 = re.compile(r' ].*?)(|/>)', re.DOTALL | re.UNICODE) """Math content.""" RE_P11 = re.compile(r'<(.*?)>', re.DOTALL | re.UNICODE) """All other tags.""" RE_P12 = re.compile(r'(({\|)|(\|-(?!\d))|(\|}))(.*?)(?=\n)', re.UNICODE) """Table formatting.""" RE_P13 = re.compile(r'(?<=(\n[ ])|(\n\n)|([ ]{2})|(.\n)|(.\t))(\||\!)([^[\]\n]*?\|)*', re.UNICODE) """Table cell formatting.""" RE_P14 = re.compile(r'\[\[Category:[^][]*\]\]', re.UNICODE) """Categories.""" RE_P15 = re.compile(r'\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE) """Remove File and Image templates.""" RE_P16 = re.compile(r'\[{2}(.*?)\]{2}', re.UNICODE) """Capture interlinks text and article linked""" RE_P17 = re.compile( r'(\n.{0,4}((bgcolor)|(\d{0,1}[ ]?colspan)|(rowspan)|(style=)|(class=)|(align=)|(scope=))(.*))|' r'(^.{0,2}((bgcolor)|(\d{0,1}[ ]?colspan)|(rowspan)|(style=)|(class=)|(align=))(.*))', re.UNICODE ) """Table markup""" IGNORED_NAMESPACES = [ 'Wikipedia', 'Category', 'File', 'Portal', 'Template', 'MediaWiki', 'User', 'Help', 'Book', 'Draft', 'WikiProject', 'Special', 'Talk' ] """MediaWiki namespaces that ought to be ignored.""" def filter_example(elem, text, *args, **kwargs): """Example function for filtering arbitrary documents from wikipedia dump. The custom filter function is called _before_ tokenisation and should work on the raw text and/or XML element information. The filter function gets the entire context of the XML element passed into it, but you can of course choose not the use some or all parts of the context. Please refer to :func:`gensim.corpora.wikicorpus.extract_pages` for the exact details of the page context. Parameters ---------- elem : etree.Element XML etree element text : str The text of the XML node namespace : str XML namespace of the XML element title : str Page title page_tag : str XPath expression for page. text_path : str XPath expression for text. title_path : str XPath expression for title. ns_path : str XPath expression for namespace. pageid_path : str XPath expression for page id. Example ------- .. sourcecode:: pycon >>> import gensim.corpora >>> filter_func = gensim.corpora.wikicorpus.filter_example >>> dewiki = gensim.corpora.WikiCorpus( ... './dewiki-20180520-pages-articles-multistream.xml.bz2', ... filter_articles=filter_func) """ # Filter German wikipedia dump for articles that are marked either as # Lesenswert (featured) or Exzellent (excellent) by wikipedia editors. # ********************* # regex is in the function call so that we do not pollute the wikicorpus # namespace do not do this in production as this function is called for # every element in the wiki dump _regex_de_excellent = re.compile(r'.*\{\{(Exzellent.*?)\}\}[\s]*', flags=re.DOTALL) _regex_de_featured = re.compile(r'.*\{\{(Lesenswert.*?)\}\}[\s]*', flags=re.DOTALL) if text is None: return False if _regex_de_excellent.match(text) or _regex_de_featured.match(text): return True else: return False def find_interlinks(raw): """Find all interlinks to other articles in the dump. Parameters ---------- raw : str Unicode or utf-8 encoded string. Returns ------- list List of tuples in format [(linked article, the actual text found), ...]. """ filtered = filter_wiki(raw, promote_remaining=False, simplify_links=False) interlinks_raw = re.findall(RE_P16, filtered) interlinks = [] for parts in [i.split('|') for i in interlinks_raw]: actual_title = parts[0] try: interlink_text = parts[1] except IndexError: interlink_text = actual_title interlink_tuple = (actual_title, interlink_text) interlinks.append(interlink_tuple) legit_interlinks = [(i, j) for i, j in interlinks if '[' not in i and ']' not in i] return legit_interlinks def filter_wiki(raw, promote_remaining=True, simplify_links=True): """Filter out wiki markup from `raw`, leaving only text. Parameters ---------- raw : str Unicode or utf-8 encoded string. promote_remaining : bool Whether uncaught markup should be promoted to plain text. simplify_links : bool Whether links should be simplified keeping only their description text. Returns ------- str `raw` without markup. """ # parsing of the wiki markup is not perfect, but sufficient for our purposes # contributions to improving this code are welcome :) text = utils.to_unicode(raw, 'utf8', errors='ignore') text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0' return remove_markup(text, promote_remaining, simplify_links) def remove_markup(text, promote_remaining=True, simplify_links=True): """Filter out wiki markup from `text`, leaving only text. Parameters ---------- text : str String containing markup. promote_remaining : bool Whether uncaught markup should be promoted to plain text. simplify_links : bool Whether links should be simplified keeping only their description text. Returns ------- str `text` without markup. """ text = re.sub(RE_P2, '', text) # remove the last list (=languages) # the wiki markup is recursive (markup inside markup etc) # instead of writing a recursive grammar, here we deal with that by removing # markup in a loop, starting with inner-most expressions and working outwards, # for as long as something changes. text = remove_template(text) text = remove_file(text) iters = 0 while True: old, iters = text, iters + 1 text = re.sub(RE_P0, '', text) # remove comments text = re.sub(RE_P1, '', text) # remove footnotes text = re.sub(RE_P9, '', text) # remove outside links text = re.sub(RE_P10, '', text) # remove math content text = re.sub(RE_P11, '', text) # remove all remaining tags text = re.sub(RE_P14, '', text) # remove categories text = re.sub(RE_P5, '\\3', text) # remove urls, keep description if simplify_links: text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only # remove table markup text = text.replace("!!", "\n|") # each table head cell on a separate line text = text.replace("|-||", "\n|") # for cases where a cell is filled with '-' text = re.sub(RE_P12, '\n', text) # remove formatting lines text = text.replace('|||', '|\n|') # each table cell on a separate line(where |{{a|b}}||cell-content) text = text.replace('||', '\n|') # each table cell on a separate line text = re.sub(RE_P13, '\n', text) # leave only cell content text = re.sub(RE_P17, '\n', text) # remove formatting lines # remove empty mark-up text = text.replace('[]', '') # stop if nothing changed between two iterations or after a fixed number of iterations if old == text or iters > 2: break if promote_remaining: text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text return text def remove_template(s): """Remove template wikimedia markup. Parameters ---------- s : str String containing markup template. Returns ------- str Сopy of `s` with all the `wikimedia markup template `_ removed. Notes ----- Since template can be nested, it is difficult remove them using regular expressions. """ # Find the start and end position of each template by finding the opening # '{{' and closing '}}' n_open, n_close = 0, 0 starts, ends = [], [-1] in_template = False prev_c = None for i, c in enumerate(s): if not in_template: if c == '{' and c == prev_c: starts.append(i - 1) in_template = True n_open = 1 if in_template: if c == '{': n_open += 1 elif c == '}': n_close += 1 if n_open == n_close: ends.append(i) in_template = False n_open, n_close = 0, 0 prev_c = c # Remove all the templates starts.append(None) return ''.join(s[end + 1:start] for end, start in zip(ends, starts)) def remove_file(s): """Remove the 'File:' and 'Image:' markup, keeping the file caption. Parameters ---------- s : str String containing 'File:' and 'Image:' markup. Returns ------- str Сopy of `s` with all the 'File:' and 'Image:' markup replaced by their `corresponding captions `_. """ # The regex RE_P15 match a File: or Image: markup for match in re.finditer(RE_P15, s): m = match.group(0) caption = m[:-2].split('|')[-1] s = s.replace(m, caption, 1) return s def tokenize(content, token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True): """Tokenize a piece of text from Wikipedia. Set `token_min_len`, `token_max_len` as character length (not bytes!) thresholds for individual tokens. Parameters ---------- content : str String without markup (see :func:`~gensim.corpora.wikicorpus.filter_wiki`). token_min_len : int Minimal token length. token_max_len : int Maximal token length. lower : bool Convert `content` to lower case? Returns ------- list of str List of tokens from `content`. """ # TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.) return [ utils.to_unicode(token) for token in utils.tokenize(content, lower=lower, errors='ignore') if token_min_len <= len(token) <= token_max_len and not token.startswith('_') ] def get_namespace(tag): """Get the namespace of tag. Parameters ---------- tag : str Namespace or tag. Returns ------- str Matched namespace or tag. """ m = re.match("^{(.*?)}", tag) namespace = m.group(1) if m else "" if not namespace.startswith("http://www.mediawiki.org/xml/export-"): raise ValueError("%s not recognized as MediaWiki dump namespace" % namespace) return namespace _get_namespace = get_namespace def extract_pages(f, filter_namespaces=False, filter_articles=None): """Extract pages from a MediaWiki database dump. Parameters ---------- f : file File-like object. filter_namespaces : list of str or bool Namespaces that will be extracted. Yields ------ tuple of (str or None, str, str) Title, text and page id. """ elems = (elem for _, elem in iterparse(f, events=("end",))) # We can't rely on the namespace for database dumps, since it's changed # it every time a small modification to the format is made. So, determine # those from the first element we find, which will be part of the metadata, # and construct element paths. elem = next(elems) namespace = get_namespace(elem.tag) ns_mapping = {"ns": namespace} page_tag = "{%(ns)s}page" % ns_mapping text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping title_path = "./{%(ns)s}title" % ns_mapping ns_path = "./{%(ns)s}ns" % ns_mapping pageid_path = "./{%(ns)s}id" % ns_mapping for elem in elems: if elem.tag == page_tag: title = elem.find(title_path).text text = elem.find(text_path).text if filter_namespaces: ns = elem.find(ns_path).text if ns not in filter_namespaces: text = None if filter_articles is not None: if not filter_articles( elem, namespace=namespace, title=title, text=text, page_tag=page_tag, text_path=text_path, title_path=title_path, ns_path=ns_path, pageid_path=pageid_path): text = None pageid = elem.find(pageid_path).text yield title, text or "", pageid # empty page will yield None # Prune the element tree, as per # http://www.ibm.com/developerworks/xml/library/x-hiperfparse/ # except that we don't need to prune backlinks from the parent # because we don't use LXML. # We do this only for s, since we need to inspect the # ./revision/text element. The pages comprise the bulk of the # file, so in practice we prune away enough. elem.clear() _extract_pages = extract_pages # for backward compatibility def process_article(args, tokenizer_func=tokenize, token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True): """Parse a Wikipedia article, extract all tokens. Notes ----- Set `tokenizer_func` (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`) parameter for languages like Japanese or Thai to perform better tokenization. The `tokenizer_func` needs to take 4 parameters: (text: str, token_min_len: int, token_max_len: int, lower: bool). Parameters ---------- args : (str, bool, str, int) Article text, lemmatize flag (if True, :func:`~gensim.utils.lemmatize` will be used), article title, page identificator. tokenizer_func : function Function for tokenization (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`). Needs to have interface: tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str. token_min_len : int Minimal token length. token_max_len : int Maximal token length. lower : bool Convert article text to lower case? Returns ------- (list of str, str, int) List of tokens from article, title and page id. """ text, lemmatize, title, pageid = args text = filter_wiki(text) if lemmatize: result = utils.lemmatize(text) else: result = tokenizer_func(text, token_min_len, token_max_len, lower) return result, title, pageid def init_to_ignore_interrupt(): """Enables interruption ignoring. Warnings -------- Should only be used when master is prepared to handle termination of child processes. """ signal.signal(signal.SIGINT, signal.SIG_IGN) def _process_article(args): """Same as :func:`~gensim.corpora.wikicorpus.process_article`, but with args in list format. Parameters ---------- args : [(str, bool, str, int), (function, int, int, bool)] First element - same as `args` from :func:`~gensim.corpora.wikicorpus.process_article`, second element is tokenizer function, token minimal length, token maximal length, lowercase flag. Returns ------- (list of str, str, int) List of tokens from article, title and page id. Warnings -------- Should not be called explicitly. Use :func:`~gensim.corpora.wikicorpus.process_article` instead. """ tokenizer_func, token_min_len, token_max_len, lower = args[-1] args = args[:-1] return process_article( args, tokenizer_func=tokenizer_func, token_min_len=token_min_len, token_max_len=token_max_len, lower=lower ) class WikiCorpus(TextCorpus): """Treat a Wikipedia articles dump as a read-only, streamed, memory-efficient corpus. Supported dump formats: * wiki--pages-articles.xml.bz2 * wiki-latest-pages-articles.xml.bz2 The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk. Notes ----- Dumps for the English Wikipedia can be founded at https://dumps.wikimedia.org/enwiki/. Attributes ---------- metadata : bool Whether to write articles titles to serialized corpus. Warnings -------- "Multistream" archives are *not* supported in Python 2 due to `limitations in the core bz2 library `_. Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import datapath, get_tmpfile >>> from gensim.corpora import WikiCorpus, MmCorpus >>> >>> path_to_wiki_dump = datapath("enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2") >>> corpus_path = get_tmpfile("wiki-corpus.mm") >>> >>> wiki = WikiCorpus(path_to_wiki_dump) # create word->word_id mapping, ~8h on full wiki >>> MmCorpus.serialize(corpus_path, wiki) # another 8h, creates a file in MatrixMarket format and mapping """ def __init__(self, fname, processes=None, lemmatize=utils.has_pattern(), dictionary=None, filter_namespaces=('0',), tokenizer_func=tokenize, article_min_tokens=ARTICLE_MIN_WORDS, token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True, filter_articles=None): """Initialize the corpus. Unless a dictionary is provided, this scans the corpus once, to determine its vocabulary. Parameters ---------- fname : str Path to the Wikipedia dump file. processes : int, optional Number of processes to run, defaults to `max(1, number of cpu - 1)`. lemmatize : bool Use lemmatization instead of simple regexp tokenization. Defaults to `True` if you have the `pattern `_ package installed. dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional Dictionary, if not provided, this scans the corpus once, to determine its vocabulary **IMPORTANT: this needs a really long time**. filter_namespaces : tuple of str, optional Namespaces to consider. tokenizer_func : function, optional Function that will be used for tokenization. By default, use :func:`~gensim.corpora.wikicorpus.tokenize`. If you inject your own tokenizer, it must conform to this interface: `tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str` article_min_tokens : int, optional Minimum tokens in article. Article will be ignored if number of tokens is less. token_min_len : int, optional Minimal token length. token_max_len : int, optional Maximal token length. lower : bool, optional If True - convert all text to lower case. filter_articles: callable or None, optional If set, each XML article element will be passed to this callable before being processed. Only articles where the callable returns an XML element are processed, returning None allows filtering out some articles based on customised rules. Warnings -------- Unless a dictionary is provided, this scans the corpus once, to determine its vocabulary. """ self.fname = fname self.filter_namespaces = filter_namespaces self.filter_articles = filter_articles self.metadata = False if processes is None: processes = max(1, multiprocessing.cpu_count() - 1) self.processes = processes self.lemmatize = lemmatize self.tokenizer_func = tokenizer_func self.article_min_tokens = article_min_tokens self.token_min_len = token_min_len self.token_max_len = token_max_len self.lower = lower if dictionary is None: self.dictionary = Dictionary(self.get_texts()) else: self.dictionary = dictionary @property def input(self): return self.fname def get_texts(self): """Iterate over the dump, yielding a list of tokens for each article that passed the length and namespace filtering. Uses multiprocessing internally to parallelize the work and process the dump more quickly. Notes ----- This iterates over the **texts**. If you want vectors, just use the standard corpus interface instead of this method: Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import datapath >>> from gensim.corpora import WikiCorpus >>> >>> path_to_wiki_dump = datapath("enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2") >>> >>> for vec in WikiCorpus(path_to_wiki_dump): ... pass Yields ------ list of str If `metadata` is False, yield only list of token extracted from the article. (list of str, (int, str)) List of tokens (extracted from the article), page id and article title otherwise. """ articles, articles_all = 0, 0 positions, positions_all = 0, 0 tokenization_params = (self.tokenizer_func, self.token_min_len, self.token_max_len, self.lower) texts = ( (text, self.lemmatize, title, pageid, tokenization_params) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces, self.filter_articles) ) pool = multiprocessing.Pool(self.processes, init_to_ignore_interrupt) try: # process the corpus in smaller chunks of docs, because multiprocessing.Pool # is dumb and would load the entire input into RAM at once... for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1): for tokens, title, pageid in pool.imap(_process_article, group): articles_all += 1 positions_all += len(tokens) # article redirects and short stubs are pruned here if len(tokens) < self.article_min_tokens or \ any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES): continue articles += 1 positions += len(tokens) if self.metadata: yield (tokens, (pageid, title)) else: yield tokens except KeyboardInterrupt: logger.warning( "user terminated iteration over Wikipedia corpus after %i documents with %i positions " "(total %i articles, %i positions before pruning articles shorter than %i words)", articles, positions, articles_all, positions_all, self.article_min_tokens ) except PicklingError as exc: raise PicklingError( f'Can not send filtering function {self.filter_articles} to multiprocessing, ' 'make sure the function can be pickled.' ) from exc else: logger.info( "finished iterating over Wikipedia corpus of %i documents with %i positions " "(total %i articles, %i positions before pruning articles shorter than %i words)", articles, positions, articles_all, positions_all, self.article_min_tokens ) self.length = articles # cache corpus length finally: pool.terminate() import logging import re from smart_open import open from xml.etree import cElementTree import json import pandas as pd from urllib.parse import urlparse from gensim.corpora.wikicorpus import get_namespace, filter_wiki from gensim.scripts.segment_wiki import extract_page_xmls from os import listdir from os.path import isfile, join logger = logging.getLogger(__name__) def find_sources(text, sources_translations, footnote_pattern, url_pattern): sources = [] for footnote in footnote_pattern.findall(text): footnote_title = list(footnote)[0].replace(" ", "").lower() footnote_content = list(footnote)[1].split("\n*")[1:] if footnote_title in sources_translations: for raw_source in footnote_content: sources += url_pattern.findall(raw_source) return sources def clean_sources(sources): cleaned_sources = [] for source in sources: parse = urlparse(source) if ( (parse.path == "" or parse.path == "/") and parse.params == "" and parse.query == "" ): continue cleaned_sources.append(source) return cleaned_sources def get_pages_from_wiki_dump(wiki_dump_path, max_doc_count=0): sources_translations = ["quellen", "sources", "quelle", "source"] category_pattern = re.compile("\[\[(Category|Kategorie|Catégorie):(.*?)\]\]") footnote_pattern = re.compile(r"==(.+?)==(.+?)\n *\n", flags=re.DOTALL) url_pattern = re.compile(r"https?://[^\s|\]]+") blank_pattern = re.compile(r"^\s*$") with open(wiki_dump_path, "rb") as xml_fileobj: page_xmls = extract_page_xmls(xml_fileobj) i = 0 wrong_ns = 0 no_sources = 0 no_text = 0 redirect = 0 docs = [] for i, page_xml in enumerate(page_xmls): elem = cElementTree.fromstring(page_xml) filter_namespaces = ("0",) namespace = get_namespace(elem.tag) ns_mapping = {"ns": namespace} text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping title_path = "./{%(ns)s}title" % ns_mapping ns_path = "./{%(ns)s}ns" % ns_mapping title = elem.find(title_path).text text = elem.find(text_path).text ns = elem.find(ns_path).text if ns not in filter_namespaces: wrong_ns += 1 continue try: categories = [c for _, c in category_pattern.findall(text)] sources = find_sources( text, sources_translations, footnote_pattern, url_pattern ) cleaned_text = category_pattern.sub("", text) cleaned_text = footnote_pattern.sub("", cleaned_text) # replace Wikipedia template links cleaned_text = re.sub(r'\{\{w\|(.*?)\|(.*?)\}\}', r'\2', cleaned_text) cleaned_text = filter_wiki(cleaned_text) passages = [ passage for passage in cleaned_text.split("\n\n") if blank_pattern.match(passage) == None ] sources = clean_sources(sources) if len(" ".join(passages).split()) == 0: no_text += 1 continue if "#REDIRECT" in cleaned_text or "#redirect" in cleaned_text: redirect += 1 continue if sources == []: no_sources += 1 continue docs.append( { "title": title, "text": passages, "categories": categories, "sources": sources, } ) if 0 < max_doc_count < len(docs): break except (TypeError, ValueError) as e: logger.error(f"Cannot read page #{i} - {title}: {e}") print( "Pages read: {}\nPages returned: {}\nWrong namespace: {}\nNo sources: {}\nNo text: {}\nRedirect: {}".format( i + 1, len(docs), wrong_ns, no_sources, no_text, redirect ) ) return docs def stats(json_path, csv_path, save_csv): titles = [] num_words = [] num_sources = [] files = [ join(json_path, f) for f in listdir(json_path) if isfile(join(json_path, f)) and join(json_path, f)[-4:] == "json" ] for filename in files: with open(filename) as json_file: doc = json.load(json_file) title = doc["title"] text = " ".join(doc["text"]) sources = doc["sources"] if len(text.split()) == 0: print(title) print(text) titles.append(title) num_words.append(len(text.split())) num_sources.append(len(sources)) data = {"title": titles, "num_words": num_words, "num_sources": num_sources} df = pd.DataFrame(data=data) if save_csv: df.to_csv(csv_path, index=False) print(df.describe()) def read_index(index_path): with open(index_path, "r") as f: data = f.read() index = {} for line in data.split("\n"): elems = line.split("\t") if len(elems) == 2: index[elems[0]] = int(elems[1]) return index def write_index(index, index_path): with open(index_path, "w") as f: for k, v in index.items(): f.write("{}\t{:06d}\n".format(k, v)) docs = get_pages_from_wiki_dump(fp, 100) docs[0] source = ''' {{date|November 13, 2004}} {{Brazil}} {{w|Hu Jintao|Hu Jintao}}, the {{w|President of the People's Republic of China|President}} of the [[People's Republic of China]] had lunch today with the {{w|President of Brazil|President}} of [[Brazil]], {{w|Luiz Inácio Lula da Silva|Luiz Inácio Lula da Silva}}, at the ''Granja do Torto'', the President's country residence in the {{w|Brazilian Federal District|Brazilian Federal District}}. Lunch was a traditional Brazilian {{w|barbecue|barbecue}} with different kinds of meat. Some Brazilian ministers were present at the event: {{w||}} (Economy), {{w|pt:|E}} ({{w|Ministry of Science and Technology (Brazil)|Science and Technology}}), {{w||}} (Agriculture), {{w||}} (Development), {{w|Celso Amorim|Celso Amorim}} ({{w|Ministry of External Relations (Brazil)|Exterior Relations}}), {{w|Dilma Rousseff|Dilma Rousseff}} (Mines and Energy). Also present were {{w|pt:|}} ({{w|Vale (mining company)|Vale do Rio Doce}} company president) and ({{w|Petrobras|Petrobras}}, government oil company, president). This meeting is part of a new {{w|political economy|political economy}} agreement between Brazil and China where Brazil has recognized mainland China's {{w|socialist market economy|market economy}} status, and China has promised to buy more {{w|economy of Brazil|Brazilian products}}. {{haveyoursay}} == Sources == {{wikipedia|Workers' Party (Brazil)|Brazilian Workers's Party}} *{{source | url = http://br.news.yahoo.com/041113/25/p0en.html | title = Presidente da China almoça churrasco com Lula | author = {{w|Agência Estado}} | pub = Yahoo! Notícias | date = November 13, 2004 | lang = pt | brokenURL = true | archiveurl = http://web.archive.org/web/20051030032711/http://br.news.yahoo.com/041113/25/p0en.html | archivedescription = archived on archive.org, 2005-10-30 }} *{{source |url=http://www.highbeam.com/doc/1P1-102429439.html |title=Chinese president treated to typical Brazilian barbecue |author={{w|Associated Press}} |pub=HighBeam Research |date=November 13, 2004 | brokenURL = true | archiveurl = http://web.archive.org/web/20160421214654/https://www.highbeam.com/doc/1P1-102429439.html | archivedescription = archived on archive.org, 2016-04-21}} *{{source|url=http://news.bbc.co.uk/2/low/americas/4008499.stm |title=Brazil backs China on trade bid |author= |pub=BBC News |date=November 12, 2004}} *{{source|url=http://www.chinadaily.com.cn/english/doc/2004-05/24/content_333379.htm |title=Brazil sees market economy in China |author= |pub=China Daily |date=May 24, 2004}} ''' print(source) print(re.sub(r'\{\{w\|(.*?)\|(.*?)\}\}', r'\2', source)){{date|November 13, 2004}} {{Brazil}} , the President of the [[People's Republic of China]] had lunch today with the President of [[Brazil]], , at the ''Granja do Torto'', the President's country residence in the Brazilian Federal District. Lunch was a traditional Brazilian barbecue with different kinds of meat. Some Brazilian ministers were present at the event: (Economy), (Science and Technology), (Agriculture), Luiz (Development), (Exterior Relations), (Mines and Energy). Also present were (Vale do Rio Doce company president) and (Petrobras, government oil company, president). This meeting is part of a new political economy agreement between Brazil and China where Brazil has recognized mainland China's market economy status, and China has promised to buy more Brazilian products. {{haveyoursay}} == Sources == {{wikipedia|Wor[...]Creating Nodes using Python "This is a code that anyone who wants to can use to easiy create, update import or delete data and create relationships in neo4j by using Python"from py2neo import Graph, Node, Relationship import getpass print("Welcome to Neo4j using Python \n") while True: try: username = input("\nPlease Enter your Neo4j Database Username to Continue \n") password = getpass.getpass("\nPlease Enter your Neo4j Database Password to Continue \n") break except: print("\nIncorrect Username or Password Try Again \n") g = Graph(auth = (username,password)) while True: input_val = int(input('\nPlease select one the options given below \n 1. To Create a Node \n 2. To Create a Relationship \n 3. To Load a CSV File \n 4. To Change/Update a Property Name \n 5. To Delete a Node \n Press 0 to Exit \n \n')) if input_val == 1: label_name = input("\nEnter the Label Name \n") property_val = input('\nEnter the Properties \n') query = "CREATE (n:" + label_name + "{" + property_val + "})" g.run(query) print("\nNode Created \n") elif input_val == 2: label_name1 = input("\nEnter the First Label Name \n") label_name2 = input("\nEnter the Second Label Name \n") relationship_condition1 = input("\nEnter the Condition for the First Label\n") relationship_condition2 = input("\nEnter the Condition for the Second Label\n") relationship_name = input("Enter the Name of the Relationship \n") query = "MATCH (n:" + label_name1 + ")," + "(m:" + label_name2 + ")" + "WHERE n." +relationship_condition1 + "=" + " m." + relationship_condition2 +" CREATE " + "(n)-" + "[:" + relationship_name + "]" + "->(m)" g.run(query) print("\nRelationship Created \n") elif input_val == 3: file_name = input("\nEnter the Name of the File \n") label_name = input("\nEnter the Label Name \n") property_val = input("\nEnter the Properties \n") string_list = [] string_val = "" for i in property_val: if i != ",": string_val = string_val + i elif i == ",": string_list.append(string_val) string_val = "" length_string = len(string_list) final_string = "" j = 0 string_val = "" for i in string_list: if string_val == "": string_val = i + ":column[" + str(j) + "]" final_string = final_string + string_val j = j+1 elif string_val != "": string_val = "," + i + ":column[" + str(j) + "]" final_string = final_string + string_val j = j+1 query = 'LOAD CSV FROM "file:///' + file_name + '"' + " AS " + "column" + " CREATE (n:" + label_name + "{" + final_string + "})" g.run(query) print(query) print("\nFile Successfully Imported \n") elif input_val == 4: operation_to_be_performed = int(input("\n Select any one of the Following Options \n 1. To Create a New Property for a Specific Node \n 2. To Update an Existing Property for a Specific Node \n")) if operation_to_be_performed == 1: label_name = input("\nEnter the Label Name \n") label_condition = input('\nEnter the Condition for the Node in the format (Property Name = "Property Value") \n') updated_condition = input('\n Enter the Name of the New Property in the format (Property Name = "Property Value") \n') query = "MATCH (n:" + label_name + ")" + " WHERE n." + label_condition +" SET n." + updated_condition g.run(query) print("\nProperty Created Successfully \n") elif operation_to_be_performed == 2: label_name = input("\nEnter the Label Name \n") label_condition = input('\nEnter the Condition for the Node in the format (Property Name = "Property Value") \n') updated_condition = input('\n Enter the Name of the New Property in the format (Property Name = "Property Value") \n') query = "MATCH (n:" + label_name + ")" + " WHERE n." + label_condition +" SET n." + updated_condition g.run(query) print("\nProperty Updated Successfully \n") elif input_val == 5: label_name = input("\nEnter the Label Name \n") label_condition = input("\nEnter the Condition for the Node in the format (Property Name = Property Value) \n") query = "MATCH (n:" + label_name + ")" + " WHERE n." + label_condition + " DELETE n" g.run(query) print("\nNode Deleted Successfully \n") elif input_val == 0: break print("Successfully Logged Out")Welcome to Neo4j using Python Please Enter your Neo4j Database Username to Continue neo4j Please Enter your Neo4j Database Password to Continue ········ Please select one the options given below 1. To Create a Node 2. To Create a Relationship 3. To Load a CSV File 4. To Change/Update a Property Name 5. To Delete a Node Press 0 to Exit 1 Enter the Label Name TestLabel12345 Enter the Properties name:"Test Label",age:20,info:"This is a Test Node" Node Created Please select one the options given below 1. To Create a Node 2. To Create a Relationship 3. To Load a CSV File 4. To Change/Update a Property Name 5. To Delete a Node Press 0 to Exit 0 Successfully Logged OutFunctions for creating annotated dataset for binary sequence classification. The sequence in this case is a sentence, and it is a positive sample if deemed relevant in the data pre-processing# necessary library imports import pandas as pd import numpy as np df_train_cleaned = pd.read_pickle("../data_frames/df_train_cleaned.pkl") df_relevant_sentences = pd.read_pickle("../data_frames/df_relevant_sentences_lemma_stop.pkl") # create the dataset with the corresponding labels def label_data(df, df_relevant_sentences): data_map = {} num_removed = 0 for index, row in df.iterrows(): sentences = row['context_raw'] answer = row['correct_answer_raw'] sent_with_ans_id = row['answer_location'] relevant_sentence_ids = df_relevant_sentences.iloc[index]['ranked_matching_sentence_ids'] sent_ids = [sent_with_ans_id] count = 0 # add (max 3 including sentence with answer) highest ranked sententces for sent_id in relevant_sentence_ids: if count < 2 and sent_id != sent_with_ans_id: sent_ids.append(sent_id) count += 1 # label all sentences and add to map for idx, sent in enumerate(sentences): key = ' '.join(sent) label = 0 if idx in sent_ids: # sentence is relevant! label = 1 if key in data_map: if label > 0: data_map[key]['label'] = label else: data_point = {'context_nr': index, 'label': label, 'sentence': sent } data_map[key] = data_point # format labels and add answer labels for v in data_map.values(): v['label'] = int(v['label']) labeled_data = list(data_map.values()) print('num data points: ', len(labeled_data)) return labeled_data labeled_data = label_data(df_train_cleaned, df_relevant_sentences) labeled_df = pd.DataFrame(labeled_data) labeled_df.to_pickle("./data/labeled_sentence_classification_training_data.pkl") labeled_df.head()num data points: 8280!git clone https://github.com/Namsik-Yoon/pygcn.git import os import sys os.chdir('/content/pygcn') sys.path.insert(1, '/content/pygcn') !python setup.py install import random import numpy as np import scipy.sparse as sp import torch def encode_onehot(labels): classes = set(labels) classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) return labels_onehot def normalize(mx): """Row-normalize sparse matrix""" rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx def sparse_mx_to_torch_sparse_tensor(sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy( np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) path="/content/pygcn/data/cora/" dataset="cora" ## content 데이터 ## [논문의 아이디,각 논문의 feature, 논문의 장르] idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str)) idx_features_labels ## 각 논문의 feature features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32) ## 논문의 장르(onehot) labels = encode_onehot(idx_features_labels[:, -1]) idx = np.array(idx_features_labels[:, 0], dtype=np.int32) idx_map = {j: i for i, j in enumerate(idx)} ## 논문 id 맵핑 dict ## cite 데이터 ## [참조된 논문, 참조한 논문] edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32) edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape) ## cite 데이터로 directed graph의 adj matrix 생성 adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32) adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) features = normalize(features) adj = normalize(adj + sp.eye(adj.shape[0])) features = torch.FloatTensor(np.array(features.todense())) labels = torch.LongTensor(np.where(labels)[1]) adj = sparse_mx_to_torch_sparse_tensor(adj) features,features.shape labels,labels.shape adj,adj.shape idx_train = torch.LongTensor(range(1200)) idx_val = torch.LongTensor(range(1200,1500)) idx_test = torch.LongTensor(range(1500,2708)) import torch import torch.nn as nn import torch.optim as optim random_seed = 2021 torch.manual_seed(random_seed) torch.cuda.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) # if use multi-GPU torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(random_seed) random.seed(random_seed) class GCN_layer(nn.Module): def __init__(self, in_features, out_features, A): super(GCN_layer, self).__init__() self.in_features = in_features self.out_features = out_features self.A = A self.fc = nn.Linear(in_features, out_features) def forward(self, X): return self.fc(torch.spmm(self.A, X)) #이웃 정보 종합 class GCN(nn.Module): def __init__(self, num_feature, num_class, A): super(GCN, self).__init__() self.feature_extractor = nn.Sequential( GCN_layer(num_feature, 16, A), nn.ReLU(), GCN_layer(16, num_class, A) ) def forward(self, X): return self.feature_extractor(X) class FCN(nn.Module): def __init__(self, num_feature, num_class): super(FCN, self).__init__() self.feature_extractor = nn.Sequential( nn.Linear(num_feature, 16), nn.ReLU(), nn.Linear(16, num_class) ) def forward(self, x): return self.feature_extractor(x) def train(model, Loss, optimizer, num_epochs): train_loss_arr = [] test_loss_arr = [] best_test_loss = 99999999 best_ACC = 0 early_stop, early_stop_max = 0., 10. for epoch in range(num_epochs): # Forward Pass model.train() output = model(features) train_loss = criterion(output[idx_train], labels[idx_train]) # Backward and optimize train_loss.backward() optimizer.step() train_loss_arr.append(train_loss.data) if epoch % 20 == 0: model.eval() output = model(features) val_loss = criterion(output[idx_val], labels[idx_val]) test_loss = criterion(output[idx_test], labels[idx_test]) val_acc = accuracy(output[idx_val], labels[idx_val]) test_acc = accuracy(output[idx_test], labels[idx_test]) test_loss_arr.append(test_loss) if best_ACC < val_acc: best_ACC = val_acc early_stop = 0 final_ACC = test_acc print('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}, Test ACC: {:.4f} *'.format(epoch, num_epochs, train_loss.data, test_loss, test_acc)) else: early_stop += 1 print('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}, Test ACC: {:.4f}'.format(epoch, num_epochs, train_loss.data, test_loss, test_acc)) if early_stop >= early_stop_max: break print("Final Accuracy::", final_ACC) def accuracy(output, labels): preds = output.max(1)[1].type_as(labels) correct = preds.eq(labels).double() correct = correct.sum() return correct / len(labels) # FCN 학습 돌려서 epoch에 따른 Loss 확인 model = FCN(features.size(1) , labels.unique().size(0)) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0001) train(model, criterion, optimizer, 1000) # GCN 학습 돌려서 epoch에 따른 Loss 확인 model = GCN(features.size(1) , labels.unique().size(0), adj) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0001) train(model, criterion, optimizer, 1000)Generate Sentinel-1 annual medianTo be used as1. geometric reference%matplotlib inline from sarcube import * # set up dask import dask from dask.delayed import delayed from dask.distributed import LocalCluster, Client import tempfile n_workers = 1 mem_per_worker = 4e9 # 8e9 is 8GB (8,000,000,000 bytes) cluster = LocalCluster(local_dir=tempfile.gettempdir(), n_workers=n_workers, threads_per_worker=2, memory_limit=mem_per_worker) client = Client(cluster) client # make a grid of tiles import geopandas shpfile = geopandas.read_file('../../sensor_data_maps/Albers_Australia_Coast_and_Islands.shp') dc = sarcube() from datacube.helpers import write_geotiff from datacube.utils import geometry import os def run_tile(x, y, tilename, outputdir = 's1_median', redo = False): if not os.path.exists(outputdir): os.mkdir(outputdir) band = 'vv' year = '2018' outpath = os.path.join(outputdir,'S1_%s_%s_%s.nc'%(band.upper(), year, tilename.replace(',','_'))) if os.path.exists(outpath): if not redo: return else: os.system('rm %s'%outpath) data = dc.load(product ='s1_gamma0_scene', speckle_filter=None, db=False, group_by = 'solar_day', measurements=[band], resampling='bilinear', dask_chunks={'time': -1, 'x': 500, 'y': 500}, resolution=(-25, 25), crs = 'EPSG:3577', x = x, y = y, time = ('%s-01-01'%year, '%s-12-31'%year), ) if not 'vv' in data: return ref = xr.apply_ufunc(np.nanmedian, data.vv, kwargs = {'axis':-1}, input_core_dims=[['time']], dask='parallelized', output_dtypes=[np.float32]) ref.compute() if (~np.isnan(ref)).sum().values==0: print("Empty tile",tilename) return ref.attrs['crs'] = geometry.CRS(data.attrs['crs']).wkt #write_geotiff(outpath, ref) ref.to_netcdf(outpath) return outpath for index in shpfile.index.values: print("tile",shpfile.loc[index]['label']) x = (shpfile.loc[index]['X_MIN'], shpfile.loc[index]['X_MAX']) y = (shpfile.loc[index]['Y_MIN'], shpfile.loc[index]['Y_MAX']) #if not shpfile.loc[index]['label'] == '15,-40': continue outfile = run_tile(x, y, shpfile.loc[index]['label']) !ls /g/data1a/u46/users/fxy120/radar/example_applications/s1_median/ x,y,tilename=x, y, shpfile.loc[index]['label'] year='2017' data = dc.load(product ='s1_gamma0_scene', speckle_filter=None, db=False, group_by = 'solar_day', measurements=['vv','vh'], resampling='bilinear', dask_chunks={'time': -1, 'x': 500, 'y': 500}, resolution=(-25,25), crs = 'EPSG:3577', x = x, y = y, time = ('%s-01-01'%year, '%s-12-31'%year), ) 'vv' in data from hdstats import nangeomedian_pcm import dask def reshape_for_geomedian(ds, axis='time'): dims = set(v.dims for v in ds.data_vars.values()) if len(dims) != 1: raise ValueError("All bands should have same dimensions") dims = dims.pop() if len(dims) != 3: raise ValueError("Expect 3 dimensions on input") if axis not in dims: raise ValueError(f"No such axis: {axis}") dims = tuple(d for d in dims if d != axis) + ('band', axis) nodata = set(getattr(v, 'nodata', None) for v in ds.data_vars.values()) if len(nodata) == 1: nodata = nodata.pop() else: nodata = None # xx: {y, x}, band, time xx = ds.to_array(dim='band').transpose(*dims) if nodata is not None: xx.attrs.update(nodata=nodata) return xx def xr_geomedian(ds, axis='time', where=None, **kwargs): """ :param ds: xr.Dataset|xr.DataArray|numpy array Other parameters: **kwargs -- passed on to pcm.gnmpcm maxiters : int 1000 eps : float 0.0001 num_threads: int| None None """ def norm_input(ds, axis): if isinstance(ds, xr.DataArray): xx = ds if len(xx.dims) != 4: raise ValueError("Expect 4 dimensions on input: y,x,band,time") if axis is not None and xx.dims[3] != axis: raise ValueError(f"Can only reduce last dimension, expect: y,x,band,{axis}") return None, xx, xx.data elif isinstance(ds, xr.Dataset): xx = reshape_for_geomedian(ds, axis) return ds, xx, xx.data else: # assume numpy or similar xx_data = ds if xx_data.ndim != 4: raise ValueError("Expect 4 dimensions on input: y,x,band,time") return None, None, xx_data ds, xx, xx_data = norm_input(ds, axis) is_dask = dask.is_dask_collection(xx_data) if where is not None: if is_dask: raise NotImplementedError("Dask version doesn't support output masking currently") if where.shape != xx_data.shape[:2]: raise ValueError("Shape for `where` parameter doesn't match") set_nan = ~where else: set_nan = None if is_dask: if xx_data.shape[-2:] != xx_data.chunksize[-2:]: xx_data = xx_data.rechunk(xx_data.chunksize[:2] + (-1, -1)) data = da.map_blocks(lambda x: nangeomedian_pcm(x, **kwargs), xx_data, name='geomedian', dtype=xx_data.dtype, drop_axis=3) else: data = nangeomedian_pcm(xx_data, **kwargs) if set_nan is not None: data[set_nan, :] = np.nan if xx is None: return data dims = xx.dims[:-1] cc = {k: xx.coords[k] for k in dims} xx_out = xr.DataArray(data, dims=dims, coords=cc) if ds is None: xx_out.attrs.update(xx.attrs) return xx_out ds_out = xx_out.to_dataset(dim='band') for b in ds.data_vars.keys(): src, dst = ds[b], ds_out[b] dst.attrs.update(src.attrs) return ds_out test = data.apply(xr_geomedian)目標: 使用 Seaborn 自帶的dataset, 利用 PANDAS 處理來繪製資料集的可是畫圖表重點: 不同型態的類別要套用的對應圖像是不一樣的作業: 取得另一個 dataset: titanic, (1) 做條形圖(2) 利用 facet grid 繪圖並分析(3) 繪製小提琴圖import pandas as pd from matplotlib import pyplot as plt import seaborn as sns # 取得資料集 df = sns.load_dataset('titanic') df df.info() # 直接使用 PANDAS DataFrame 當作參數 # 條形圖()顯示分類變數和連續變數之間的關係。數據以矩形條表示,其中條的長度表示該類別中數據的比例。 sns.barplot( x='sex', y ='survived', hue='class', data=df ) plt.show()在上面的示例中,我們可以看到每個班級中男性和女性的平均存活率。從情節中,我們可以理解,女性存活人數比男性多。在男性和女性中,更多的存活率來自頭等艙。barplot 中的特殊情況是顯示每個類別中的觀測值的"否",而不是計算第二個變數的統計資訊。 繪製數據子集的小倍數。 FacetGrid 示例,FacetGrid 類有助於可視化一個變數的分佈,以及使用多個面板在數據集子集中分別顯示多個變數之間的關係。# 瞭解性別在各艙等的分布的存活率 g = sns.FacetGrid( df, col='sex' ) g.map( sns.barplot, 'pclass', 'survived', ci=None, order=[1,2,3], palette='Blues' ) plt.show() # 先檢視各艙位存活人數,此時可以使用groupby函數進行分類 # 其中 survived=1表示存活,survived=0表示死亡,將survived加總即為各艙等生存人數。 df.groupby( 'pclass' ).survived.sum() # 加上性別 survived = df.groupby( ['pclass','sex'] ).survived.sum() survived survived.plot( kind='bar' ) plt.show() # 使用 pd.crosstab 函數繪製交叉表,交叉表可以很直觀的依據艙位等級及性別來查看存活人數及死亡人數。 # 繪製堆疊條形圖,x軸代表依據艙等分成男性及女性,y軸代表人數,其中藍色代表死亡人數,橘色代表存活人數。 survived_counts = pd.crosstab( [df.pclass, df.sex], df.survived ) survived_counts survived_counts.plot.bar( stacked=True ) plt.show() # 直接使用 PANDAS Dataframe 當作參數 # 條形圖()顯示分類變數和連續變數之間的關係。數據以矩形條表示,其中條的長度表示該類別中數據的比例。 sns.violinplot( data=survived_counts ) plt.show() # 瞭解性別在各艙等的分布的存活率 ''' 在這邊coding g = sns.FacetGrid g.map h = sns.FacetGrid h.map ''' g = sns.FacetGrid( df, col='survived' ) g.map( plt.hist, 'pclass' ) plt.show() h = sns.FacetGrid( df, col='survived' ) h.map( plt.hist, 'sex' ) plt.show()Read shapefilesf_path = os.path.join('..', 'vehicle_based_heatmap', 'new_link', 'new_link_latlon') link_sf = shapefile.Reader(sf_path) shapeRecs = link_sf.shapeRecords() link_map = {} for data in shapeRecs: link_map[data.record[1]] = {} link_map[data.record[1]]['points'] = [] for _item in data.shape.points: link_map[data.record[1]]['points'].append([round(_item[0], 5), round(_item[1], 5)]) link_map[data.record[1]]['ffs'] = data.record[4] link_map[data.record[1]]['lanes'] = data.record[5] link_map[data.record[1]]['length'] = data.record[6] link_map[data.record[1]]['isOD'] = data.record[7] # print(data.record) # print(link_map) # breakRead link congestion file && process to geojson formatvehloc_path = os.path.join('..', '..', '..', 'data', 'input_files_MckeesRocks_SPC', 'link_cong', 'link_cong_raw.txt') # create a new python dict to contain our geojson data, using geojson format geojson_car = {'type':'FeatureCollection', 'features':[]} geojson_truck = {'type':'FeatureCollection', 'features':[]} with open(vehloc_path, 'r') as infile: lines = infile.readlines() for line in lines: data = line.rstrip().split(' ') _link_ID = data[0] _interval = int(data[1])//180 if (link_map[_link_ID]['isOD'] == '0') and (_interval % 2 == 0): _interval //= 2 if _interval < 9: _car_flow = float(data[2]) _truck_flow = float(data[3]) _car_fft = float(data[6]) _truck_fft = float(data[7]) _car_cong = float(data[4])/_car_fft _truck_cong = float(data[5])/_truck_fft if data[8] == 'inf': _car_speed = float(link_map[_link_ID]['ffs']) else: _car_speed = float(data[8]) if data[9] == 'inf': _truck_speed = float(link_map[_link_ID]['ffs']) * 0.8 else: _truck_speed = float(data[9]) # create a feature template to fill in feature_car = {'type':'Feature', 'properties':{}, 'geometry':{'type': 'LineString', 'coordinates':[] }} feature_truck = {'type':'Feature', 'properties':{}, 'geometry':{'type': 'LineString', 'coordinates':[] }} for i in range(len(link_map[_link_ID]['points'])): feature_car['geometry']['coordinates'].append(link_map[_link_ID]['points'][i]) feature_truck['geometry']['coordinates'].append(link_map[_link_ID]['points'][i]) feature_car['properties']['ID'] = _link_ID feature_car['properties']['ffs_car'] = link_map[_link_ID]['ffs'] feature_car['properties']['lanes'] = link_map[_link_ID]['lanes'] feature_car['properties']['length'] = link_map[_link_ID]['length'] feature_car['properties']['time_point'] = _interval feature_car['properties']['car_flow'] = _car_flow feature_car['properties']['car_fft'] = _car_fft feature_car['properties']['car_cong'] = _car_cong feature_car['properties']['car_speed'] = _car_speed feature_truck['properties']['ID'] = _link_ID feature_truck['properties']['ffs_car'] = link_map[_link_ID]['ffs'] feature_truck['properties']['lanes'] = link_map[_link_ID]['lanes'] feature_truck['properties']['length'] = link_map[_link_ID]['length'] feature_truck['properties']['time_point'] = _interval feature_truck['properties']['truck_flow'] = _truck_flow feature_truck['properties']['truck_fft'] = _truck_fft feature_truck['properties']['truck_cong'] = _truck_cong feature_truck['properties']['truck_speed'] = _truck_speed # add this feature to the list of features inside our dict geojson_car['features'].append(feature_car) geojson_truck['features'].append(feature_truck) geojson_str_car = json.dumps(geojson_car, indent=2) geojson_str_truck = json.dumps(geojson_truck, indent=2) # save the geojson result to a file output_filename_car = 'link_cong_car.geojson' with open(output_filename_car, 'w') as output_file_car: output_file_car.write('var data_cong_car = {};'.format(geojson_str_car)) output_filename_truck = 'link_cong_truck.geojson' with open(output_filename_truck, 'w') as output_file_truck: output_file_truck.write('var data_cong_truck = {};'.format(geojson_str_truck)) # how many features did we save to the geojson file? print('{} geotagged features saved to car file'.format(len(geojson_car['features']))) print('{} geotagged features saved to truck file'.format(len(geojson_truck['features'])))132552 geotagged features saved to car file 132552 geotagged features saved to truck filePythonにおける正規表現指定したパターンに一致する文字列を置換、抽出したい場合、正規表現がよく使われます。 今回は、自然言語処理において便利な正規表現をいくつか紹介します。 正規表現は、文章に前処理を行う際に活躍します。 subによる置換正規表現を使う際は、reをインポートします。 `re.sub`により、文字列の置換を行うことができます。import re s = "私は柴犬が好きです。" s = re.sub("柴犬", "シャム猫", s) # 文字列sの「柴犬」を「シャム猫」に置き換える print(s)複数の文字の指定以下の例では、複数の文字を別の文字に置き換えています。import re s = "私は黒犬と白猫が好きです。" s = re.sub("[犬猫]", "馬", s) # []内の各文字(「犬」、「猫」)を「馬」に置き換える print(s)[ ]内の各文字を、特定の文字に置き換えることができます。 否定以下の例では、ある特定の文字以外を置き換えています。import re s = "私は黒犬と白猫が好きです。" s = re.sub("[^犬猫]", "馬", s) # 「犬」、「猫」以外を「馬」に置き換える print(s)^を使うことで、「犬」、「猫」以外の全ての文字が「馬」と入れ替わりました。 繰り返し以下の例では、\+ の記号を使用することで文字の1回以上の繰り返しを表現しています。import re s = "私は柴犬犬犬犬犬犬犬犬犬犬犬犬犬が好きです。" s = re.sub("犬+", "犬", s) # 「犬」の繰り返しを「犬」に置き換える print(s)「犬」の繰り返しを、「犬」に置き換えました。 ルビの除去これまで学んできた正規表現のルールを組み合わせて、文字列からルビを除去します。import re s = "私は柴犬【しばいぬ】とシャム猫【しゃむねこ】が大好きです。" s = re.sub("【[^】]+】", "", s) # 【と】の間に】以外の文字が複数ある箇所を、空の文字列に置き換える print(s)`[^】]+`の箇所は、】以外の文字の繰り返しを意味します。 従って、`【[^】]+】`は【と】の間に】以外の文字が複数ある箇所を表します。 課題:以下のコードにおける文字列sから、正規表現を使ってルビを除去してみましょう。import re s = "白馬【はくば】に乗って、草原【そうげん】を駆けるのが夢です。"Copyright 2021 DeepMind Technologies Limited.Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License athttps://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This colab accompanies the paper 'Spurious normativity enhances learning of compliance and enforcement behavior in artificial agents' in PNAS 2022 by Koster et al.import numpy as np import matplotlib.pyplot as plt import pickle import scipy.stats import seaborn as sns import tempfile from google.colab import files import warnings warnings.simplefilter('ignore', category=RuntimeWarning)Load Data.f = tempfile.NamedTemporaryFile() !gsutil cp "gs://dm_spurious_normativity/spurious_normativity.pkl" {f.name} with open(f.name, 'rb') as pickle_file: data = pickle.load(pickle_file) population_data = data[0] probe_data = data[1]Population Data population_data contains data from the 3 conditions: * 'no rule', * 'important rule',* 'silly rule'Each of those conditions has 7 variables that were logged for eachpopulation.* Collective Return* Total Berries Eaten* Total Taboo Berries Eaten* Total Punishments* Total Misdirected Punishing* Fraction of Time Spent Marked* Fracton of Time Spent PoisonedEach entry is indexed by a combination of the condition and metric, e.g.:'important rule Collective Return'Each of those entries contains a list, containing different populations.5 for no rule, 15 for the other two conditions.Each population consists of a tuple: the data of the x and y axis to plot this metric in that particular condition of one population. Probe data probe_data contains 15 variables that respond to a probe task in oneexperimental condition. The variables are, for the no rules condition: * 'no_rule_berry_1' - how quickly berry 1 was approached, the actually poisonous berry.* 'no_rule_berry_2' - how quickly berry 2 was approached, the harmless berry that is taboo in the silly rules condition.* 'no_rule_berry_healthy' - how quickly other berries were approached.* 'no_rule_zap_marked' - how quickly a marked player was zapped.* 'no_rule_zap_unmarked' - how quickly the unmarked players were zapped.These metrics are repeated for the important_rule and silly_rule condition:* 'important_rule_berry_1'* 'important_rule_berry_2'* 'important_rule_healthy'* 'important_zap_marked'* 'important_zap_unmarked'* 'silly_rule_berry_1'* 'silly_rule_berry_2'* 'silly_rule_healthy' * 'silly_zap_marked'* 'silly_zap_unmarked'Each entry contains an array with the shape [N, 20]. N is the number ofindependent populations that were run and the 20 refers to the number of samplesalong the training trajectory for which the probes were ran. N = 5 for no_ruleand N = 15 for silly_rule and important_rule. Figure 4n_rows = 2 n_cols = 3 condition_legends = ['no rule', 'important rule', 'silly rule'] colors_per_condition = [(.8, .9, 25./255), (230./255, 25./255, 75./255), (60./255, 180./255, 75./255)] metrics_titles = ['Total Misdirected Punishing', 'Total Punishments', 'Fraction of Time Spent Marked', 'Fracton of Time Spent Poisoned', 'Total Taboo Berries Eaten', 'Collective Return'] alphabet = ['A. ', 'B. ', 'C. ', 'D. ', 'E. ', 'F. '] y_lims_per_metric = [(0, 10), (0, 60), (0, 1), (0, 0.7), (0, 120), (0, 5500)] plotcounter = 1 plt.figure(facecolor='white') fig, ax = plt.subplots(n_rows, n_cols, figsize=(25, n_rows*7), facecolor='w') for metric, letter, y_lims in zip( metrics_titles, alphabet, y_lims_per_metric): plt.subplot(n_rows, n_cols, plotcounter) for condition, line_color in zip(condition_legends, colors_per_condition): entry = condition + ' ' + metric condition_data = population_data[entry] # The data do not have the same shape so we need to put them on a # canvas of nans to concatenate them. data_frame_for_mean = np.empty((int(1e5), len(condition_data))) data_frame_for_mean.fill(np.nan) for p, population in enumerate(condition_data): trajectory = condition_data[p][1] data_frame_for_mean[0:trajectory.shape[0], p] = trajectory y = np.nanmean(data_frame_for_mean, axis=1) # SEM y_error = np.divide( np.nanstd(data_frame_for_mean, axis=1), np.sqrt(len(condition_data))) x = np.arange(0, 1e9, 1e4) plt.plot(x, y, color=line_color) plt.fill_between(x, y-y_error, y+y_error, alpha=0.4, color=line_color, label='_nolegend_') plt.title(letter + metric, fontsize=18, fontweight='bold') plt.legend(condition_legends, loc='best', fontsize=12) plt.xlabel('Timesteps trained', fontsize=14) plt.xlim(0, 1e9) plt.ylim(y_lims) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plotcounter += 1 plt.savefig('fig4.png', dpi=500) files.download('fig4.png') # Statistics for Fig 4 plot, t-tests per timebin populations = 15 timebins = 10 start_x = 0 end_x = 1e8 for tb in range(timebins): silly_means = np.zeros((populations)) important_means = np.zeros((populations)) for p in range(populations): silly_x = population_data['silly rule Collective Return'][p][0] silly_y = population_data['silly rule Collective Return'][p][1] silly_index = (silly_x > start_x) & (silly_x < end_x) silly_mean = np.nanmean(silly_y[silly_index]) silly_means[p] = silly_mean important_x = population_data['important rule Collective Return'][p][0] important_y = population_data['important rule Collective Return'][p][1] important_index = (important_x > start_x) & (important_x < end_x) important_mean = np.nanmean(important_y[important_index]) important_means[p] = important_mean t, p = scipy.stats.ttest_ind(silly_means, important_means) print('For timebin ', tb+1, ' from ', start_x, ' to ', end_x) print('Difference between silly and important rule condition:') print('t =', np.round(t, decimals=3), ', p =', np.round(p, decimals=4)) start_x += 1e8 end_x += 1e8For timebin 1 from 0 to 100000000.0 Difference between silly and important rule condition: t = -22.346 , p = 0.0 For timebin 2 from 100000000.0 to 200000000.0 Difference between silly and important rule condition: t = -15.622 , p = 0.0 For timebin 3 from 200000000.0 to 300000000.0 Difference between silly and important rule condition: t = 1.226 , p = 0.2305 For timebin 4 from 300000000.0 to 400000000.0 Difference between silly and important rule condition: t = 3.941 , p = 0.0005 For timebin 5 from 400000000.0 to 500000000.0 Difference between silly and important rule condition: t = 3.256 , p = 0.003 For timebin 6 from 500000000.0 to 600000000.0 Difference between silly and important rule condition: t = 2.429 , p = 0.0218 For timebin 7 from 600000000.0 to 700000000.0 Difference between silly and important rule condition: t = 0.213 , p = 0.8329 For timebin 8 from 700000000.0 to 800000000.0 Difference between silly and important rule condition: t = [...]Figure 5fig_5_conditions = ['important rule', 'silly rule'] fig_5_metrics = ['Total Punishments', 'Fracton of Time Spent Poisoned'] cutoffs = [(0, 2e8), (2e8, 4e8)] data_for_correlation = {} for condition in fig_5_conditions: for metric, cutoff in zip(fig_5_metrics, cutoffs): entry_name = condition + ' ' + metric data_in_entry = population_data[entry_name] mean_values = np.zeros(len(data_in_entry)) for i, d in enumerate(data_in_entry): x = d[0] y = d[1] index_vec = np.where((x > cutoff[0]) & (x < cutoff[1])) mean_values[i] = np.mean(y[index_vec]) data_for_correlation[entry_name] = mean_values fig = plt.figure(figsize=(5, 5), facecolor='white') ax = fig.add_subplot(111) sns.regplot(x=data_for_correlation['silly rule Fracton of Time Spent Poisoned'], y=data_for_correlation['silly rule Total Punishments'], color='green') sns.regplot(x=data_for_correlation['important rule Fracton of Time Spent Poisoned'], y=data_for_correlation['important rule Total Punishments'], color='red') plt.xlabel('Fraction of time spent poisoned (later)', fontsize=12) plt.ylabel('Punishments of players (early)', fontsize=12, labelpad=0) plt.title('Early punishment reduces later poisoning', fontweight='bold') plt.legend(['silly rule', 'important rule'], loc='upper right') plt.xticks([0, .2, .4, .6]) plt.savefig('fig5.png', dpi=500) files.download('fig5.png') # Statistics for Fig 5 plot sr_corr_pop = scipy.stats.pearsonr( data_for_correlation['silly rule Fracton of Time Spent Poisoned'], data_for_correlation['silly rule Total Punishments']) ir_corr_pop = scipy.stats.pearsonr( data_for_correlation['important rule Fracton of Time Spent Poisoned'], data_for_correlation['important rule Total Punishments']) print('Silly Rule: r =', np.round(sr_corr_pop[0], decimals=3), 'p =', np.round(sr_corr_pop[1], decimals=3)) print('Important Rule: r =', np.round(ir_corr_pop[0], decimals=3), 'p =', np.round(ir_corr_pop[1], decimals=3))Silly Rule: r = -0.39 p = 0.151 Important Rule: r = -0.799 p = 0.0Figure 6def error_line(var, color): ym = np.mean(var, axis=0) plt.plot(ym, color=color) ye = np.divide(np.nanstd(var, axis=0), np.sqrt(5)) # x axis is always 20 # because that is how often the agent was sampled during learning plt.fill_between(range(20), ym-ye, ym+ye, alpha=0.2, color=color) def populate_axis(): plt.ylim((0, 1)) y_label = 'Timesteps until termination' plt.ylabel(y_label, fontsize=12, labelpad=-10) plt.yticks([0, 1], [0, 1]) plt.yticks([0, 1], [30, 0]) plt.xlim((0, 20)) plt.xlabel('Timesteps trained', fontsize=12, labelpad=-10) plt.xticks([0, 20], ['0', '1e9']) plt.figure(facecolor='w') fig, ax = plt.subplots(2, 3, figsize=(15, 10), facecolor='w') plt.subplot(2, 3, 1) error_line(probe_data['no_rule_berry_1'], 'pink') error_line(probe_data['no_rule_berry_2'], 'teal') error_line(probe_data['no_rule_berry_healthy'], 'blue') populate_axis() plt.legend(['Poisonous', 'Healthy in this condition', 'Healthy in all conditions']) plt.title('B. Berries: No Rule', fontweight='bold') plt.subplot(2, 3, 2) error_line(probe_data['important_rule_berry_1'], 'pink') error_line(probe_data['important_rule_berry_2'], 'teal') error_line(probe_data['important_rule_healthy'], 'blue') populate_axis() plt.legend(['Poisonous and Taboo', 'Healthy in this condition', 'Healthy in all conditions']) plt.title('C. Berries: Important Rule', fontweight='bold') plt.subplot(2, 3, 3) error_line(probe_data['silly_rule_berry_1'], 'pink') error_line(probe_data['silly_rule_berry_2'], 'teal') error_line(probe_data['silly_rule_healthy'], 'blue') populate_axis() plt.legend(['Poisonous and Taboo', 'Taboo in this condition', 'Healthy in all conditions']) plt.title('D. Berries: Silly Rule', fontweight='bold') plt.subplot(2, 3, 4) error_line(probe_data['important_rule_berry_1'], 'red') error_line(probe_data['silly_rule_berry_1'], 'green') populate_axis() plt.legend(['important rule', 'silly rule']) plt.title('E. Poison berry', fontweight='bold') plt.subplot(2, 3, 5) error_line(probe_data['important_zap_marked'], 'red') error_line(probe_data['silly_zap_marked'], 'green') populate_axis() plt.legend(['important rule', 'silly rule']) plt.title('F. Punishing marked player', fontweight='bold') marked_player_important_mean = np.mean( probe_data['important_zap_marked'][:, 0:4], axis=1) marked_player_silly_mean = np.mean( probe_data['silly_zap_marked'][:, 0:4], axis=1) berry1_important_mean = np.mean( probe_data['important_rule_berry_1'][:, 4:8], axis=1) berry_1_silly_mean = np.mean( probe_data['silly_rule_berry_1'][:, 4:8], axis=1) ax = plt.subplot(2, 3, 6) # Multiply values by 30 because the probe episodes have 30 timesteps. sns.regplot(x=berry_1_silly_mean*30, y=marked_player_silly_mean*30, color='green', label='silly rule') sns.regplot(x=berry1_important_mean*30, y=marked_player_important_mean*30, color='red', label='important rule') plt.xlabel('Timesteps to approach poisoned berry (later)', fontsize=12) plt.ylabel('Timesteps to punish marked player (early)', fontsize=12, labelpad=0) plt.xticks([0, 5, 10, 15], [30, 25, 20, 15]) plt.yticks([2, 5, 8], [28, 25, 22]) plt.title('G. Early punishment reduces later poisoning', fontweight='bold') h, l = ax.get_legend_handles_labels() ax.legend(reversed(h), reversed(l), loc='upper right') plt.savefig('fig6.png', dpi=500) files.download('fig6.png') # Stats for Figure 6 F sr_corr_probe = scipy.stats.pearsonr( berry_1_silly_mean, marked_player_silly_mean) ir_corr_probe = scipy.stats.pearsonr( berry1_important_mean, marked_player_important_mean) print('Silly Rule: r =', np.round(sr_corr_probe[0], decimals=3), 'p =', np.round(sr_corr_probe[1], decimals=3)) print('Important Rule: r =', np.round(ir_corr_probe[0], decimals=3), 'p =', np.round(ir_corr_probe[1], decimals=3))Silly Rule: r = -0.46 p = 0.085 Important Rule: r = -0.864 p = 0.0KNN AlgorithimThe k-nearest neighbors (KNN) algorithm is a simple, supervised machine learningalgorithm that can be used to solve both classification and regression problems. This algorithm classify a set of given feature based on the pupularity of votes on nearest neighbors classes.An object is classified by a plurality vote of its neighbors, with the object being assigned to the class most common among its ``k`` nearest neighbors (``k`` is a positive integer, typically small and normally an `odd` number). If ``k = 1``, then the object is simply assigned to the class of that single nearest neighbor. How does KNN works?* KNN calculates the euclidian distance between an object's neighbors and based on the number of neighbors denoted by `k` then the algorithm will classify the object based on the plurality vote of its neighbors. Consider an object A sorounded by many objects as follows:``` * * * .A o o```If the value of `k` (number of neighbors) is three this means that the three clossest neighbors are ``[*, o, *]`` based on this fact we can see that the most common neighbor is a `*` therefore the `KNN` will classify point `A` as a `*`. How do ``KNN`` calculate Euclidean distance?* Euclidean distance is the distance between two points based on the pythagoream theorem. It is calculated using either of the following two fomulars.1. ![img](https://www.gstatic.com/education/formulas2/397133473/en/euclidean_distance.svg)_**(p,q )**_= two points in Euclidean n-space**_qi_**, **_pi_** = Euclidean vectors, starting from the origin of the space (initial point)_**n**_ = n-space2.![img](https://www.researchgate.net/profile/Young-Sun-Lee-2/publication/263889770/figure/fig1/AS:890653479284740@1589359745492/An-example-of-Euclidean-distance-between-two-objects-on-variables-X-and-Y.png) ImplementationWe are then going to create a simple `KNN` algorithm based on `sk-learn` library from scratch as follows: Importsimport numpy as np from collections import Counter class KNN: def __init__(self, k=3): """ KNN - takes in the number of neighbors default value is 3 """ self.k = k def fit(self, X, y): """ The fit method takes in features and labels and store them """ self.X_train = X self.y_train = y def predict(self, X): """ This function takes in a list of features and returns a list of predicated labels """ y_preds = [self._predict(x) for x in X] return np.array(y_preds) def _euclidean_distance(self, x1, x2): """ a private function that calculates the euclidean distance between two points """ return np.sqrt(np.sum((x1 - x2) ** 2)) def _predict(self, x): """ this function takes a single feature and returns a single prediction. """ # compute the distances between x and all x_train features distances = [self._euclidean_distance(x, x_train) for x_train in self.X_train] # sort the distances and return the indices of the first k nearest neighbors k_idx = np.argsort(distances)[:self.k] # extracting the labels of the nearest neighbor k_neighbors_labels = [self.y_train[i] for i in k_idx] # most common label return Counter(k_neighbors_labels).most_common(1)[0][0] def evaluate(self,y_true, y_pred): """ returns the algorithm's accuracy """ return np.sum(y_true==y_pred)/len(y_pred)Testing the `KNN` algorithm.We are going to use the dataset from `sklearn` the `iris` dataset to test our `KNN` algorithm.from sklearn import datasets from sklearn.model_selection import train_test_split iris = datasets.load_iris() X, y = iris.data, iris.target X.shape, y.shape X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) X_train.shape, X_test.shape, y_train.shape, y_test.shapeA single training row contains 4 features.X_train[0]`KNN` classifier instanceclf = KNN(k=3) clf.fit(X_train, y_train)Classifier Evaluation.preds = clf.predict(X_test) preds[:2] clf.evaluate(y_test, preds)You can play around the number of neighbors the classifier instance can take, but an odd number is always good.Part 2def move_pos(pos, move): return pos[0] + move[0], pos[1] + move[1] def count_neighbors(black_hexagons, pos): return sum(1 for m in MOVES.values() if move_pos(pos, m) in black_hexagons) def get_interesting_positions(black_hexagons): result = set() for pos in black_hexagons: result.add(pos) for move in MOVES.values(): result.add(move_pos(pos, move)) return result def update_hexagons(black_hexagons): result = set() for pos in get_interesting_positions(black_hexagons): is_black = pos in black_hexagons n = count_neighbors(black_hexagons, pos) if is_black and 1 <= n <= 2: result.add(pos) elif not is_black and n == 2: result.add(pos) return result def run_process(black_hexagons, days): for i in range(days): black_hexagons = update_hexagons(black_hexagons) print(f"Day {i+1}: {len(black_hexagons)}") return black_hexagons run_process(flip_hexagons(INPUT.split("\n")), 100)Day 1: 304 Day 2: 339 Day 3: 413 Day 4: 416 Day 5: 406 Day 6: 463 Day 7: 445 Day 8: 473 Day 9: 486 Day 10: 541 Day 11: 550 Day 12: 560 Day 13: 607 Day 14: 637 Day 15: 658 Day 16: 633 Day 17: 704 Day 18: 705 Day 19: 753 Day 20: 788 Day 21: 827 Day 22: 768 Day 23: 882 Day 24: 825 Day 25: 900 Day 26: 938 Day 27: 975 Day 28: 952 Day 29: 942 Day 30: 1100 Day 31: 1047 Day 32: 1123 Day 33: 1162 Day 34: 1148 Day 35: 1114 Day 36: 1268 Day 37: 1223 Day 38: 1275 Day 39: 1334 Day 40: 1419 Day 41: 1313 Day 42: 1455 Day 43: 1500 Day 44: 1502 Day 45: 1551 Day 46: 1597 Day 47: 1612 Day 48: 1648 Day 49: 1733 Day 50: 1768 Day 51: 1741 Day 52: 1829 Day 53: 1875 Day 54: 1937 Day 55: 1897 Day 56: 1966 Day 57: 2031 Day 58: 2074 Day 59: 2146 Day 60: 2089 Day 61: 2160 Day 62: 2300 Day 63: 2246 Day 64: 2289 Day 65: 2425 Day 66: 2420 Day 67: 2427 Day 68: 2486 Day 69: 2600 Day 70: 2593 Day 71: 2577 Day 72: 2692 Day 73: 2747 Day 74: 2864 Day 75: 2846 Day 76: 2838 Day 77: 2854 Day 78: 2962 Day 79: 3096 Day 80: 319[...]1.4 Grundlagen in Python Von www.python.org - www.python.org, GPL, Link [Python](https://www.python.org/) gehört derzeit zu den beliebtesten Programmiersprachen weltweit und wird vielfältig eingesetzt.Besonders überzeugt Python durch:- eine sehr einfache Syntax- plattformunabhängig- eine starke Entwickler-CommunityWir schauen uns nun die Grundlagen von Python an, damit die folgenden Projekte besser nachvollziehbar sind. Wie im vorangegangenen Kurs gilt, dass Programmierkenntnisse nicht notwendig sind!Wir werden alle Code-Abschnitte so erläutern, dass nicht die Technik dahinter, sondern die Anwendung im Vordergrund steht. Datentypen und VariablenIn der Programmierung arbeiten wir mit verschiedenen **Datentypen**; dazu gehören unter anderem - Zahlen (123, -987, 23.234), - Zeichenketten (""), - oder auch Bedingungen (42 > 0)Wir speichern konkrete Werte dieser Datentypen in sogenannten **Variablen** ab:eine_zahl = 42 noch_eine_zahl = 2 eine_zeichenkette = ""Wir greifen kurz zwei Dinge vorweg, die wir gleich genauer betrachten werden:- `print`: Hiermit können wir allerlei Daten ausgeben lassen- `type`: Zeigt den Datentyp einer Variable anDamit können wir sehen, welche Datentypen unsere Variablen haben:print(type(eine_zahl)) print(type(noch_eine_zahl)) print(type(eine_zeichenkette))Python bietet unter anderem folgende Datentypen an:- `int`: ganze Zahlen (12, -214)- `float`: Dezimalzahlen (0.429, -1241)- `bool`: Wahrheitsangaben (True/False)- `str`: Zeichenketten ("", "Christian", "Johannes")- `list`: Listen von Daten (["", "Christian"])- `dict`: Schlüssel-Wert Angaben ({"anzahl_teilnehmer": 3000})Darüber hinaus gibt es noch weitere, wie etwa `bytes`, `complex`, `tuple`, die bei [W3Schools](https://www.w3schools.com/python/python_datatypes.asp) gefunden werden können. KommentareNeben Variablen und Konstanten gibt es auch die Möglichkeit, Kommentare im Code zu setzen.Wir werden dies direkt nutzen, um im Code selbst Stellen zu erklären.# Kommentare werden über eine Raute eingeleitet print("Das ist eine Konstante") # hier geben wir eine einfache Zeichenkette ausOperationenAuf Variablen können nun verschiedenste **Operationen** ausgeführt werden. Für Zahlen liegen etwa arithmetische Operationen nahe:print(eine_zahl + noch_eine_zahl) print(eine_zahl - noch_eine_zahl) print(eine_zahl * noch_eine_zahl) print(eine_zahl / noch_eine_zahl)Wir sehen, dass die letzte Operation eine leicht andere Ausgabe hat, als die ersten drei Operationen.Geben wir hierzu den Datentyp aus, erkennen wir einen Unterschied:print(type(eine_zahl / noch_eine_zahl))Python managed für uns automatisiert den Datentypen.Da eine Division potenziell zu Kommastellen führt, wird ein `int` in einen `float` transformiert.So versteht Python etwa auch, dass wir keine Zahlen zu Sätzen hinzufügen können:print(eine_zeichenkette + eine_zahl)Wenn wir möchten, können wir das aber auch selbst übernehmen, und angeben, welchen Datentyp eine Variable haben soll.So kann Python etwa Zeichenketten über ein `+` miteinander kombinieren. Diese Operation entspricht einer Konkatenation.print(eine_zeichenkette + str(eine_zahl))FunktionenEin solches Verhalten können wir als Python **Funktion** ausdrücken.Eine Funktion entspricht einer Maschine, die Eingaben verarbeitet, um Ausgaben zu produzieren.def addiere_zwei_zahlen(erste_zahl, zweite_zahl): print("Ich addiere " + str(erste_zahl) + " und " + str(zweite_zahl)) summe = erste_zahl + zweite_zahl return summeDie Bezeichnung der Variablen gilt dabei nur im Rahmen der Funktionsdefinition. Wir können nun jede Variable - oder auch Konstante - übergeben, die wir möchten.print(addiere_zwei_zahlen(eine_zahl, noch_eine_zahl)) print(addiere_zwei_zahlen(60, 40))Wenn wir dabei Parameter übergeben, die nicht von unserer Funktion verarbeitet werden können, erzeugen wir einen Fehler:print(addiere_zwei_zahlen("Das wird einen Fehler erzeugen", 40))Das Ergebnis einer Funktion können wir dabei natürlich ebenfalls wieder als Eingabe für eine andere Funktion verwenden, oder erstmal als Variable abspeichern:eine_summe = addiere_zwei_zahlen(eine_zahl, noch_eine_zahl) print(eine_summe)Sandbox RNN for Turkey Electricity demandimport numpy as np import pandas as pd import tensorflow as tf from keras.models import Sequential from keras.layers import Embedding, SimpleRNNLoad dataelectricity = pd.read_csv('../../datasets/public/turkey_elec.csv', parse_dates=[0], names=['date', 'usage']) print(electricity.info())Kerasmodel = Sequential() model.add(Embedding(1000, 32)) model.add(SimpleRNN(32)) model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_2 (Embedding) (None, None, 32) 32000 _________________________________________________________________ simple_rnn_2 (SimpleRNN) (None, 32) 2080 ================================================================= Total params: 34,080 Trainable params: 34,080 Non-trainable params: 0 _________________________________________________________________Chapter 8 -- Sorting in Linear Timeclass Counting(object): def __init__(self, max_value): self.max_value = max_value self._counts = [0] * (self.max_value + 1) def sort(self, iterable): result = [0] * (len(iterable) + 1) for value in iterable: self._counts[value] += 1 for index in range(1, len(self._counts)): self._counts[index] += self._counts[index - 1] for index in list(range(len(iterable)))[::-1]: # we iterate backwards for stable order value = iterable[index] result[self._counts[value]] = value self._counts[value] -= 1 return result[1:] # because python is 0 indexed, and we've been pretending otherwise array = [5,4,3,2,1,0] ceiling = max(array) Counting(ceiling).sort(array) from sorts import insertion_sort class Bucket(object): def sort(self, iterable): bucket_list = [[] for i in range(10)] for value in iterable: index = int(value * 10) bucket_list[index].append(value) result = [] for bucket in bucket_list: result.extend(bucket) return insertion_sort(result) array = [0.876, 0.589, 0.567, 0.456, 0.345, 0.234, 0.123, 0] Bucket().sort(array)Demonstration of Mlflow via energy forecasting.ML flow is a ML lifecycle management tool and is ideal for logging and the analysis of model results.This is a showcase for ML Flow capabilities, based on the articlehttp://the-odd-dataguy.com/be-more-efficient-to-produce-ml-models-with-mlflowand a github https://github.com/jeanmidevacc/mlflow-energyforecastNOTE: It requires the storage account name AccountName and key AccountKey to be set further below.!pip install pandas --upgrade --user !pip install mlflow --upgrade --user !pip install joblib --upgrade --user !pip install numpy --upgrade --user !pip install scipy --upgrade --user !pip install scikit-learn --upgrade --user !pip install boto3 --upgrade --user import time import json import os from joblib import Parallel, delayed import pandas as pd import numpy as np import scipy from sklearn.model_selection import train_test_split, KFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, explained_variance_score from sklearn.exceptions import ConvergenceWarning import mlflow import mlflow.sklearn from mlflow.tracking import MlflowClient from warnings import simplefilter simplefilter(action='ignore', category = FutureWarning) simplefilter(action='ignore', category = ConvergenceWarning) # Ensure Minio access os.environ['MLFLOW_S3_ENDPOINT_URL'] = 'http://minio-service.kubeflow.svc.cluster.local:9000' os.environ['AWS_ACCESS_KEY_ID'] = 'minio' os.environ['AWS_SECRET_ACCESS_KEY'] = 'XXXXXX'Data preparation# Collect the data df_nationalconsumption_electricity_daily = pd.read_csv("https://raw.githubusercontent.com/jeanmidevacc/mlflow-energyforecast/master/data/rtu_data.csv") df_nationalconsumption_electricity_daily.set_index(["day"], inplace = True) # Prepare the training set and the testing set df_trainvalidate_energyconsumption = df_nationalconsumption_electricity_daily[df_nationalconsumption_electricity_daily["datastatus"] == "Définitif"] del df_trainvalidate_energyconsumption["datastatus"] df_test_energyconsumption = df_nationalconsumption_electricity_daily[df_nationalconsumption_electricity_daily["datastatus"] == "Consolidé"] del df_test_energyconsumption["datastatus"] print("Size of the training set : ",len(df_trainvalidate_energyconsumption)) print("Size of the testing set : ",len(df_test_energyconsumption)) # Define the inputs and the output output = "dailyconsumption" allinputs = list(df_trainvalidate_energyconsumption.columns) allinputs.remove(output) print("Output to predict : ", output) print("Inputs for the prediction : ", allinputs) # Build different set of featurws for the model possible_inputs = { "all" : allinputs, "only_allday_inputs" : ["weekday", "month", "is_holiday", "week"], "only_allweatheravg_inputs" : ["avg_min_temperature", "avg_max_temperature", "avg_mean_temperature","wavg_min_temperature", "wavg_max_temperature", "wavg_mean_temperature"], "only_meanweather_inputs_avg" : ["avg_mean_temperature"], "only_meanweather_inputs_wavg" : ["wavg_mean_temperature"], } # Prepare the output of the model array_output_train = np.array(df_trainvalidate_energyconsumption[output]) array_output_test = np.array(df_test_energyconsumption[output]) # connect to remote server remote_server_uri = "http://mlflow.mlflow.svc.cluster.local:5000" mlflow.set_tracking_uri(remote_server_uri) # Launch the experiment on mlflow experiment_name = "electricityconsumption-forecast" mlflow.set_experiment(experiment_name) # Define the evaluation function that will do the computation of the different metrics of accuracy (RMSE,MAE,R2) def evaluation_model(y_test, y_pred): rmse = np.sqrt(mean_squared_error(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) metrics = { "rmse" : rmse, "r2" : r2, "mae" : mae, } return metricsKNN regressorfrom sklearn.neighbors import KNeighborsRegressor def train_knnmodel(parameters, inputs, tags, log = False): with mlflow.start_run(nested = True): # Prepare the data array_inputs_train = np.array(df_trainvalidate_energyconsumption[inputs]) array_inputs_test = np.array(df_test_energyconsumption[inputs]) # Build the model tic = time.time() model = KNeighborsRegressor(parameters["nbr_neighbors"], weights = parameters["weight_method"]) model.fit(array_inputs_train, array_output_train) duration_training = time.time() - tic # Make the prediction tic1 = time.time() prediction = model.predict(array_inputs_test) duration_prediction = time.time() - tic1 # Evaluate the model prediction metrics = evaluation_model(array_output_test, prediction) # Log in the console if log: print(f"KNN regressor:") print(parameters) print(metrics) # Log in mlflow (parameter) mlflow.log_params(parameters) # Log in mlflow (metrics) metrics["duration_training"] = duration_training metrics["duration_prediction"] = duration_prediction mlflow.log_metrics(metrics) # log in mlflow (model) mlflow.sklearn.log_model(model, f"model") # Tag the model mlflow.set_tags(tags) AccountName='XXXXXXX' AccountKey='XXXXXXX' # Test the different combinations os.environ["AZURE_STORAGE_CONNECTION_STRING"] = "DefaultEndpointsProtocol=https;AccountName="+AccountName+";AccountKey="+AccountKey+";EndpointSuffix=core.windows.net" configurations = [] for nbr_neighbors in [1,2,5,10]: for weight_method in ['uniform','distance']: for field in possible_inputs: parameters = { "nbr_neighbors" : nbr_neighbors, "weight_method" : weight_method } tags = { "model" : "knn", "inputs" : field } configurations.append([parameters, tags]) train_knnmodel(parameters, possible_inputs[field], tags)MLP regressorfrom sklearn.neural_network import MLPRegressor def train_mlpmodel(parameters, inputs, tags, log = False): with mlflow.start_run(nested = True): # Prepare the data array_inputs_train = np.array(df_trainvalidate_energyconsumption[inputs]) array_inputs_test = np.array(df_test_energyconsumption[inputs]) # Build the model tic = time.time() model = MLPRegressor( hidden_layer_sizes = parameters["hidden_layers"], activation = parameters["activation"], solver = parameters["solver"], max_iter = parameters["nbr_iteration"], random_state = 0) model.fit(array_inputs_train, array_output_train) duration_training = time.time() - tic # Make the prediction tic1 = time.time() prediction = model.predict(array_inputs_test) duration_prediction = time.time() - tic1 # Evaluate the model prediction metrics = evaluation_model(array_output_test, prediction) # Log in the console if log: print(f"Random forest regressor:") print(parameters) print(metrics) # Log in mlflow (parameter) mlflow.log_params(parameters) # Log in mlflow (metrics) metrics["duration_training"] = duration_training metrics["duration_prediction"] = duration_prediction mlflow.log_metrics(metrics) # log in mlflow (model) mlflow.sklearn.log_model(model, f"model") # Tag the model mlflow.set_tags(tags) for hiddenlayers in [4,8,16]: for activation in ["identity","logistic",]: for solver in ["lbfgs"]: for nbriteration in [10,100,1000]: for field in possible_inputs: parameters = { "hidden_layers" : hiddenlayers, "activation" : activation, "solver" : solver, "nbr_iteration" : nbriteration } tags = { "model" : "mlp", "inputs" : field } train_mlpmodel(parameters, possible_inputs[field], tags)Use a handmade model (scipy approach)class PTG: def __init__(self, thresholds_x0, thresholds_a, thresholds_b): self.thresholds_x0 = thresholds_x0 self.thresholds_a = thresholds_a self.thresholds_b = thresholds_b def get_ptgmodel(self, x, a, b, x0): return np.piecewise(x, [x < x0, x >= x0], [lambda x: a*x + b , lambda x : a*x0 + b]) def fit(self, dfx, y): x = np.array(dfx) # Define the bounds bounds_min = [thresholds_a[0], thresholds_b[0], thresholds_x0[0]] bounds_max = [thresholds_a[1], thresholds_b[1], thresholds_x0[1]] bounds = (bounds_min, bounds_max) # Fit a model popt, pcov = scipy.optimize.curve_fit(self.get_ptgmodel, x, y, bounds = bounds) # Get the parameter of the model a = popt[0] b = popt[1] x0 = popt[2] self.coefficients = [a, b, x0] def predict(self,dfx): x = np.array(dfx) predictions = [] for elt in x: forecast = self.get_ptgmodel(elt, self.coefficients[0], self.coefficients[1], self.coefficients[2]) predictions.append(forecast) return np.array(predictions) def train_ptgmodel(parameters, inputs, tags, log = False): with mlflow.start_run(nested = True): # Prepare the data df_inputs_train = df_trainvalidate_energyconsumption[inputs[0]] df_inputs_test = df_test_energyconsumption[inputs[0]] # Build the model tic = time.time() model = PTG(parameters["thresholds_x0"], parameters["thresholds_a"], parameters["thresholds_b"]) model.fit(df_inputs_train, array_output_train) duration_training = time.time() - tic # Make the prediction tic1 = time.time() prediction = model.predict(df_inputs_test) duration_prediction = time.time() - tic1 # Evaluate the model prediction metrics = evaluation_model(array_output_test, prediction) # Log in the console if log: print(f"PTG:") print(parameters) print(metrics) # Log in mlflow (parameter) mlflow.log_params(parameters) # Log in mlflow (metrics) metrics["duration_training"] = duration_training metrics["duration_prediction"] = duration_prediction mlflow.log_metrics(metrics) # log in mlflow (model) mlflow.sklearn.log_model(model, f"model") # Tag the model mlflow.set_tags(tags) # Define the parameters of the model thresholds_x0 = [0, 20] thresholds_a = [-200000, -50000] thresholds_b = [1000000, 3000000] parameters = { "thresholds_x0" : thresholds_x0, "thresholds_a" : thresholds_a, "thresholds_b" : thresholds_b } for field in ["only_meanweather_inputs_avg", "only_meanweather_inputs_wavg"]: tags = { "model" : "ptg", "inputs" : field } train_ptgmodel(parameters, possible_inputs[field], tags, log = False)Evaluate mlflow results# Select the run of the experiment df_runs = mlflow.search_runs(experiment_ids="1") print("Number of runs done : ", len(df_runs)) # Quick sorting to get the best models based on the RMSE metric df_runs.sort_values(["metrics.rmse"], ascending = True, inplace = True) df_runs.head() # Get the best one runid_selected = df_runs.head(1)["run_id"].values[0] runid_selectedIndicator Value Variance in Argentinaargentina_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \ (indicator_data['CountryCode'] == 'AR')] argentina_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'AR')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind1.Year, y=argentina_df_ind1.Value, data=argentina_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind2.Year, y=argentina_df_ind2.Value, data=argentina_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind3.Year, y=argentina_df_ind3.Value, data=argentina_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind4.Year, y=argentina_df_ind4.Value, data=argentina_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind5.Year, y=argentina_df_ind5.Value, data=argentina_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind6.Year, y=argentina_df_ind6.Value, data=argentina_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind7.Year, y=argentina_df_ind7.Value, data=argentina_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind8.Year, y=argentina_df_ind8.Value, data=argentina_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(argentina_df_ind1.Year, argentina_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=argentina_df_ind9.Year, y=argentina_df_ind9.Value, data=argentina_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in Argentina') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90)Indicator Value Variance in Brazilbrazil_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) &\ (indicator_data['CountryCode'] == 'BR')] brazil_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) &\ (indicator_data['CountryCode'] == 'BR')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind1.Year, y=brazil_df_ind1.Value, data=brazil_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind2.Year, y=brazil_df_ind2.Value, data=brazil_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind3.Year, y=brazil_df_ind3.Value, data=brazil_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind4.Year, y=brazil_df_ind4.Value, data=brazil_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind5.Year, y=brazil_df_ind5.Value, data=brazil_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind6.Year, y=brazil_df_ind6.Value, data=brazil_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind7.Year, y=brazil_df_ind7.Value, data=brazil_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind8.Year, y=brazil_df_ind8.Value, data=brazil_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(brazil_df_ind1.Year, brazil_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=brazil_df_ind9.Year, y=brazil_df_ind9.Value, data=brazil_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in brazil') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90)Indicator Value Variance in EcuadorEcuador_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \ (indicator_data['CountryCode'] == 'EC')] Ecuador_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'EC')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind1.Year, y=Ecuador_df_ind1.Value, data=Ecuador_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind2.Year, y=Ecuador_df_ind2.Value, data=Ecuador_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind3.Year, y=Ecuador_df_ind3.Value, data=Ecuador_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind4.Year, y=Ecuador_df_ind4.Value, data=Ecuador_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind5.Year, y=Ecuador_df_ind5.Value, data=Ecuador_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind6.Year, y=Ecuador_df_ind6.Value, data=Ecuador_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind7.Year, y=Ecuador_df_ind7.Value, data=Ecuador_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind8.Year, y=Ecuador_df_ind8.Value, data=Ecuador_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Ecuador_df_ind1.Year, Ecuador_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Ecuador_df_ind9.Year, y=Ecuador_df_ind9.Value, data=Ecuador_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in Ecuador') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90)Indicator Value Variance in IndiaIndia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \ (indicator_data['CountryCode'] == 'IN')] India_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'IN')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind1.Year, y=India_df_ind1.Value, data=India_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind2.Year, y=India_df_ind2.Value, data=India_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind3.Year, y=India_df_ind3.Value, data=India_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind4.Year, y=India_df_ind4.Value, data=India_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind5.Year, y=India_df_ind5.Value, data=India_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in India') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind6.Year, y=India_df_ind6.Value, data=India_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in India') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind7.Year, y=India_df_ind7.Value, data=India_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind8.Year, y=India_df_ind8.Value, data=India_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(India_df_ind1.Year, India_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=India_df_ind9.Year, y=India_df_ind9.Value, data=India_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in India') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90)Indicator Value Variance in LibyaLibya_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \ (indicator_data['CountryCode'] == 'LY')] Libya_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'LY')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind1.Year, y=Libya_df_ind1.Value, data=Libya_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind2.Year, y=Libya_df_ind2.Value, data=Libya_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind3.Year, y=Libya_df_ind3.Value, data=Libya_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind4.Year, y=Libya_df_ind4.Value, data=Libya_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind5.Year, y=Libya_df_ind5.Value, data=Libya_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind6.Year, y=Libya_df_ind6.Value, data=Libya_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind7.Year, y=Libya_df_ind7.Value, data=Libya_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind8.Year, y=Libya_df_ind8.Value, data=Libya_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(Libya_df_ind1.Year, Libya_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=Libya_df_ind9.Year, y=Libya_df_ind9.Value, data=Libya_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in Libya') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90)Indicator Value Variance in South_AfricaSouth_Africa_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \ (indicator_data['CountryCode'] == 'ZA')] South_Africa_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \ (indicator_data['CountryCode'] == 'ZA')] import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind1.Year, y=South_Africa_df_ind1.Value, data=South_Africa_df_ind1, ax=ax1, color="#4c337f") ax1.set_title('Agricultural land (% of land area) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Agricultural land (% of land area)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind2.Year, y=South_Africa_df_ind2.Value, data=South_Africa_df_ind2, ax=ax1, color="#4c337f") ax1.set_title('Birth rate, crude (per 1,000 people) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Birth rate, crude (per 1,000 people)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind3.Year, y=South_Africa_df_ind3.Value, data=South_Africa_df_ind3, ax=ax1, color="#4c337f") ax1.set_title('Age dependency ratio (% of working-age population) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Age dependency ratio (% of working-age population)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind4.Year, y=South_Africa_df_ind4.Value, data=South_Africa_df_ind4, ax=ax1, color="#4c337f") ax1.set_title('Exports of goods and services (% of GDP) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Exports of goods and services (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind5.Year, y=South_Africa_df_ind5.Value, data=South_Africa_df_ind5, ax=ax1, color="#4c337f") ax1.set_title('GDP (current US$) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("GDP (current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind6.Year, y=South_Africa_df_ind6.Value, data=South_Africa_df_ind6, ax=ax1, color="#4c337f") ax1.set_title('GDP growth (annual %) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("GDP growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind7.Year, y=South_Africa_df_ind7.Value, data=South_Africa_df_ind7, ax=ax1, color="#4c337f") ax1.set_title('Population growth (annual %) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Population growth (annual %)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind8.Year, y=South_Africa_df_ind8.Value, data=South_Africa_df_ind8, ax=ax1, color="#4c337f") ax1.set_title('Total reserves (includes gold, current US$) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Total reserves (includes gold, current US$)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) import seaborn as sns fig = plt.figure(figsize=(16,6)) ax1 = fig.add_subplot(1, 1, 1) #ax1.plot(South_Africa_df_ind1.Year, South_Africa_df_ind1.Value, label='orig_cmbnd_ln_to_value', color='c', linestyle='--') ax1 = sns.pointplot(x=South_Africa_df_ind9.Year, y=South_Africa_df_ind9.Value, data=South_Africa_df_ind9, ax=ax1, color="#4c337f") ax1.set_title('Trade (% of GDP) in South_Africa') ax1.set_xlabel("Year") ax1.set_ylabel("Trade (% of GDP)") for ax in fig.axes: plt.sca(ax) plt.xticks(rotation=90) argentina_df_ind1.drop(argentina_df_ind1.columns[[0, 1, 2, 4]], axis=1, inplace=True) plt.plot(argentina_df_ind1) a_df.drop(a_df.columns[[0, 1, 2, 4]], axis=1, inplace=True) a_df new_df = df[(df['Indicator Code'].isin(['ER.FSH.AQUA.MT', 'EN.ATM.CO2E.KD.GD'])) & (df['Country Code'] == 'IND')] new_df.drop(new_df.columns[[0, 1, 2, 3, 4]], axis=1, inplace=True) new_df.head()1. `region_1` and `region_2` are pretty uninformative names for locale columns in the dataset. Create a copy of `reviews` with these columns renamed to `region` and `locale`, respectively.renamed = reviews.rename(columns = {'region_1':'region','region_2':'locale'}) renamed.head(2)2. Set the index name in the dataset to `wines`.reindexed = reviews.rename_axis('wines', axis='rows') reindexed.head(2)3. The [Things on Reddit](https://www.kaggle.com/residentmario/things-on-reddit/data) dataset includes product links from a selection of top-ranked forums ("subreddits") on reddit.com. Run the cell below to load a dataframe of products mentioned on the */r/gaming* subreddit and another dataframe for products mentioned on the *r//movies* subreddit.gaming_products = pd.read_csv("gaming.csv") gaming_products['subreddit'] = "r/gaming" movie_products = pd.read_csv("movies.csv") movie_products['subreddit'] = "r/movies"Create a DataFrame of products mentioned on either subreddit.combined_products = pd.concat([gaming_products,movie_products])4. The [Powerlifting Database](https://www.kaggle.com/dansbecker/powerlifting-database) dataset on Kaggle includes one CSV table for powerlifting meets and a separate one for powerlifting competitors. Run the cell below to load these datasets into dataframes:powerlifting_meets = pd.read_csv("meets.csv") powerlifting_competitors = pd.read_csv("openpowerlifting.csv")Both tables include references to a `MeetID`, a unique key for each meet (competition) included in the database. Using this, generate a dataset combining the two tables into one.powerlifting_combined = powerlifting_meets.set_index("MeetID").join(powerlifting_competitors.set_index("MeetID"))Open In Colab Visualize- Learn to use Matplotlib- Subplots- Multiple plots- Bar plotsimport pandas as pd remote_file = "https://raw.githubusercontent.com/LearnPythonWithRune/FinancialDataAnalysisWithPython/main/files/AAPL.csv" data = pd.read_csv(remote_file, index_col=0, parse_dates=True) data.head() data.plot() import matplotlib.pyplot as plt %matplotlib notebook data.plot() data['Close'].plot() fig, ax = plt.subplots() data['Close'].plot(ax=ax) ax.set_ylabel("Price") ax.set_title("AAPL") fig, ax = plt.subplots(2, 2) data['Open'].plot(ax=ax[0, 0], title="Open") data['High'].plot(ax=ax[0, 1], title="High") data['Low'].plot(ax=ax[1, 0], title="Low") data['Close'].plot(ax=ax[1, 1], title="Close") plt.tight_layout() fig, ax = plt.subplots() data['Volume'].loc['2020-07-01':'2020-08-15'].plot.barh(ax=ax)Inference AnalysisIn this notebook we're looking at the statistical significance of distribution differences between audio features of hits versus non-hits. This is not testing all features, only those that seemed relevant for the model built in the EDA notebook.import pandas as pd # Import Data train = pd.read_csv('../data/processed/train.csv',sep='\t',parse_dates=['date','entry_date','exit_date','peak_date','max_leap_date']).set_index(['artist','title']) hits = train.loc[train.hit == True,:].copy() nhits = train.loc[train.hit == False,:].copy() import numpy as np import matplotlib as mlp import matplotlib.pyplot as plt import seaborn as sns # Seed Random np.random.seed(500) # Set default for figure sizes mlp.rcParams['figure.figsize'] = (16,6) def bins_rule_of_thumb(data): return int(np.sqrt(len(data))) # Define Comparison Function Difference of Means def mean_diff(data1,data2): mean1 = np.mean(data1) mean2 = np.mean(data2) return mean1 - mean2 # Create Permutation Sample Function def permutation_sample(data1,data2): """ Creates a Permutation Sample from two data sets and returns two permutated samples with the same length as the original sets. """ data_both = np.concatenate((data1,data2)) data_perm = np.random.permutation(data_both) perm_sample_1 = data_perm[:len(data1)] perm_sample_2 = data_perm[len(data1):] return (perm_sample_1,perm_sample_2) # Define function to generate Permutation Replicates def draw_perm_reps(data1,data2,func,size=1): perm_replicates = np.empty(size) for i in range(size): perm_sample_1,perm_sample_2 = permutation_sample(data1,data2) perm_replicates[i] = func(perm_sample_1,perm_sample_2) return perm_replicates def ecdf(data): """ Returns the x,y values for data for plotting as an ecdf. """ # Sort the data along the x-axis x = np.sort(data) # Index the data at equidistant intervals y = np.arange(1, len(x) + 1) / len(x) return x,y def test_mean_diff(data1,data2,mean_diff_val,h0_diff=0): perm_replicates = draw_perm_reps(data1,data2,mean_diff,10000) if mean_diff_val > 0: p = np.sum(perm_replicates >= mean_diff_val) / len(perm_replicates) print("p: {}".format(p)) else: p = np.sum(perm_replicates <= mean_diff_val) / len(perm_replicates) print("p: {}".format(p)) CI = np.percentile(perm_replicates,[2.5,97.5]) print("CI: {}".format(CI)) ME = CI[1] - np.mean(perm_replicates) print("ME: {}".format(ME)) fig, (ax1,ax2) = plt.subplots(1,2,figsize=(18,5)) plt.subplot(1,2,1) dist_ax = sns.distplot(perm_replicates,bins=25,color='gray') max_dist_y = np.array([h.get_height() for h in dist_ax.patches]).max() _ = plt.xlabel('Mean Difference of {}'.format(data1.name)) _ = plt.ylabel('Density') if mean_diff_val >= 0: _ = plt.xlim(-mean_diff_val - 0.005,mean_diff_val + 0.005) else: _ = plt.xlim(mean_diff_val - 0.005,-mean_diff_val + 0.005) _ = plt.legend(['Mean Difference Distribution \nfor p(hits)=p(nhits)'],loc='upper left') _ = plt.annotate(s='Point Estimate',xy=(mean_diff_val,0),xytext=(mean_diff_val,max_dist_y*0.2), arrowprops={'width':1.5,'headwidth':5,'color': 'red'}) ## CI_area = perm_replicates[perm_replicates <= CI[0]] ## _ = plt.hist(CI_area) plt.subplot(1,2,2) x,y = ecdf(perm_replicates) y_interp = np.interp(x,x,y) _ = plt.plot(x,y_interp,color='gray') if mean_diff_val >= 0: _ = plt.xlim(-mean_diff_val - 0.005,mean_diff_val + 0.005) else: _ = plt.xlim(mean_diff_val - 0.005,-mean_diff_val + 0.005) _ = plt.xlabel('Mean Difference of {}'.format(data1.name)) _ = plt.ylabel('Cumulative Probability') _ = plt.legend(['Mean Difference Distribution \nfor p(hits)=p(nhits)'],loc='lower right') if mean_diff_val >= 0: _ = plt.annotate(s='Point Estimate',xy=(mean_diff_val,1),xytext=(mean_diff_val,0.8), arrowprops={'width':1.5,'headwidth':5,'color': 'red'}) else: _ = plt.annotate(s='Point Estimate',xy=(mean_diff_val,0),xytext=(mean_diff_val,0.2), arrowprops={'width':1.5,'headwidth':5,'color': 'red'})Z-tests are so-called "robust tests", which means that since we're comparing the distributions of means which tend to be normally distributed we don't necessarily need normality in the data. Nevertheless, it gives some reassurance to know that we're dealing with a normal distribution.import scipy.stats as stats def lreg_line(slope,intercept,test_data): x_lreg = np.array([min(test_data),max(test_data)]) y_lreg = slope * x_lreg + intercept return x_lreg,y_lreg def qq_plot(data): data_no_na = data.dropna() fig, ax = plt.subplots(1,2,figsize=(16, 5)) _ = plt.subplot(1,2,1) (x,y), (slope,intercept,r) = stats.probplot(data_no_na, dist="norm", plot=None) _ = plt.plot(x,y,marker='.',linestyle='none',color='red',alpha=0.5) x_lreg,y_lreg = lreg_line(slope,intercept,x) _ = plt.plot(x_lreg,y_lreg,color='gray') _ = plt.xlabel('Quantiles') _ = plt.ylabel('Ordered Values') _ = plt.title('Normal Probability Plot') _ = plt.subplot(1,2,2) _ = plt.hist(data_no_na, color='gray',bins=30) _ = plt.xlabel('Value') _ = plt.ylabel('Frequency') features = ['acousticness','loudness','instrumentalness','danceability','valence','energy','tempo','duration_ms','time_signature','key','mode'] no_discrete_ft = ['acousticness','loudness','instrumentalness','danceability','valence','energy','tempo','duration_ms'] for feature in no_discrete_ft: qq_plot(train[feature]) plt.title(feature)Unfortunately, we're not dealing with normal distributions in this case. We'll see that normality is achieved by using the mean difference with permutations in the following plots. Danceability Distribution To better understand whether the distributions are statistically significantly different we'll use a Z-Test. H0: The Danceability Distribution for Hits and Non-Hits is the same (i.e. the mean diff is 0). H1: The Danceability Distribution for Hits and Non-Hits is not the same. alpha = 0.05hits_danceability = hits.danceability.dropna() nhits_danceability = nhits.danceability.dropna() hits_nhits_diff = hits_danceability.mean() - nhits_danceability.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.danceability.mean(), nhits.danceability.mean(), hits_nhits_diff,h0_diff)) sns.boxplot(x='hit',y='danceability',data=train,order=[True,False]) plt.show() test_mean_diff(hits_danceability,nhits_danceability,hits_nhits_diff)Hits Mean: 0.5957135504885993 Non-Hits Mean: 0.5295059136044526 Mean Diff: 0.06620763688414666 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of danceability between Hits and Non-Hits are statistically significant. Energy Distributionhits_energy = hits.energy.dropna() nhits_energy = nhits.energy.dropna() hits_nhits_diff = hits_energy.mean() - nhits_energy.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.energy.mean(), nhits.energy.mean(), hits_nhits_diff,h0_diff)) sns.boxplot(x='hit',y='energy',data=train,order=[True,False]) plt.show() test_mean_diff(hits_energy,nhits_energy,hits_nhits_diff)Hits Mean: 0.6176291400651467 Non-Hits Mean: 0.5279292037126051 Mean Diff: 0.08969993635254148 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of energy between Hits and Non-Hits are statistically significant. Acousticness Distributionhits_acousticness = hits.acousticness.dropna() nhits_acousticness = nhits.acousticness.dropna() hits_nhits_diff = hits_acousticness.mean() - nhits_acousticness.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.acousticness.mean(), nhits.acousticness.mean(), hits_nhits_diff,h0_diff)) sns.boxplot(x='hit',y='acousticness',data=train,order=[True,False]) plt.show() test_mean_diff(hits_acousticness,nhits_acousticness,hits_nhits_diff)Hits Mean: 0.2987878794723128 Non-Hits Mean: 0.43604737878122823 Mean Diff: -0.13725949930891557 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of acousticness between Hits and Non-Hits are statistically significant. Loudness Distributionhits_loudness = hits.loudness.dropna() nhits_loudness = nhits.loudness.dropna() hits_nhits_diff = hits_loudness.mean() - nhits_loudness.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.loudness.mean(), nhits.loudness.mean(), hits_nhits_diff,h0_diff)) _ = sns.boxplot(x='hit',y='loudness',data=train,order=[True,False]) plt.show() test_mean_diff(hits_loudness,nhits_loudness,hits_nhits_diff)Hits Mean: -8.802725342019542 Non-Hits Mean: -11.022258743912465 Mean Diff: 2.219533401892921 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of loudness between Hits and Non-Hits are statistically significant. Speechiness Distributionhits_speechiness = hits.speechiness.dropna() nhits_speechiness = nhits.speechiness.dropna() hits_nhits_diff = hits_speechiness.mean() - nhits_speechiness.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.speechiness.mean(), nhits.speechiness.mean(), hits_nhits_diff,h0_diff)) _ = sns.boxplot(x='hit',y='speechiness',data=train,order=[True,False]) plt.show() test_mean_diff(hits_speechiness,nhits_speechiness,hits_nhits_diff)Hits Mean: 0.06883432573289902 Non-Hits Mean: 0.09073435582822086 Mean Diff: -0.02190003009532182 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of speechiness between Hits and Non-Hits are statistically significant. Valence Distributionhits_valence = hits.valence.dropna() nhits_valence = nhits.valence.dropna() hits_nhits_diff = hits_valence.mean() - nhits_valence.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.valence.mean(), nhits.valence.mean(), hits_nhits_diff,h0_diff)) _ = sns.boxplot(x='hit',y='valence',data=train,order=[True,False]) plt.show() test_mean_diff(hits_valence,nhits_valence,hits_nhits_diff)Hits Mean: 0.6109236091205211 Non-Hits Mean: 0.5214109423818861 Mean Diff: 0.08951266673863512 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of valence between Hits and Non-Hits are statistically significant. Instrumentalness Distributionhits_instrumentalness = hits.instrumentalness.dropna() nhits_instrumentalness = nhits.instrumentalness.dropna() hits_nhits_diff = hits_instrumentalness.mean() - nhits_instrumentalness.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.instrumentalness.mean(), nhits.instrumentalness.mean(), hits_nhits_diff,h0_diff)) _ = sns.boxplot(x='hit',y='instrumentalness',data=train,order=[True,False]) plt.show() test_mean_diff(hits_instrumentalness,nhits_instrumentalness,hits_nhits_diff)Hits Mean: 0.03421712542280131 Non-Hits Mean: 0.15391891948833092 Mean Diff: -0.11970179406552961 H0 Diff: 0We can reject H0 for alpha = 0.001 and have gathered evidence that the distributions of valence between Hits and Non-Hits are statistically significant. Duration (in ms)hits_duration_ms = hits.duration_ms.dropna() nhits_duration_ms = nhits.duration_ms.dropna() hits_nhits_diff = hits_duration_ms.mean() - nhits_duration_ms.mean() h0_diff = 0 print("Hits Mean: {}\nNon-Hits Mean: {}\nMean Diff: {}\nH0 Diff: {}".format(hits.duration_ms.mean(), nhits.duration_ms.mean(), hits_nhits_diff,h0_diff)) _ = sns.boxplot(x='hit',y='duration_ms',data=train,order=[True,False]) plt.show() test_mean_diff(hits_duration_ms,nhits_duration_ms,hits_nhits_diff)Hits Mean: 222314.18592833876 Non-Hits Mean: 235538.301245968 Mean Diff: -13224.115317629243 H0 Diff: 0Preparing the reads[Loose et al] published their raw read files on ENA. This script uses four of these sets which contain reads of amplicons. These were processed using different "read until" scripts (or none at all), but that doesn't matter. What does matter is to get as much real reads as possible.%load_ext autoreload %autoreload 2 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn import porekit import re import pysam import random import feather %matplotlib inlineLoad metadata for 4 datasetsdirectories = ["AmpliconOddEvenControl", "AmpliconOddReadUntil", "AmpliconEvenReadUntil", "Balanced"] meta_frames = [] for d in directories: print(d) df = porekit.gather_metadata("/home/andi/nanopore/%s" % d, workers=4) print(df.shape[0]) meta_frames.append(df) meta = pd.concat (meta_frames) for df in meta_frames: print (df.shape)(7576, 27) (25999, 27) (20032, 27) (24386, 27)The individual filenames will look like this:meta_frames[0].index.values[0] meta_frames[1].index.values[0] meta_frames[2].index.values[0] meta_frames[3].index.values[0]Merging alignment data[Loose et al] provide all the intermediate data files necessary to recreate their figures. Among these, there are some alignment files in SAM format.Because it doesn't make sense to classify complement sequences/matches in the Read Until context, we only use the "Template" strands.def sam_to_dataframe(file_name): sam = pysam.AlignmentFile(file_name) records = [] for i, segment in enumerate(sam): d = dict() for k in ["query_name", "reference_start", "reference_end", "mapping_quality", ]: d[k] = getattr(segment, k) records.append(d) alignments = pd.DataFrame.from_records(records) return alignments base = "/home/andi/nanopore/RUFigs/data" bams = ["/fig3/RU_dudu/RU_dudu_Template.bam", "/fig3/RU_udud/RU_udud_Template.bam", "/fig3/NO_RU/NO_RU_Template.bam", "/fig4/200/200_Template.bam", ] alignments = pd.concat([sam_to_dataframe(base+file_name) for file_name in bams])Unfortunately filenames and sequence names tend to get a bit mangled when going from Fast5 to SAM, for various reasons. As of now, there is no particular convention for naming read files or naming the exported sequences. On the one hand I don't feel like it is a good idea to abuse filenames as character seperated database rows, on the other hand, using the unique read id from the Fast5 File isn't very human-friendly either.To assign genomic coordinates to the reads, a regular expression extracts four numbers from the file name/query name making each read unique and matchable.regexp = re.compile(r'_(?P\d+)_(?P\d+)_ch(?P\d+)_file(?P\d+)') def extract(s): try: return "_".join(regexp.search(s).groups()) except: return "" alignments["alignment_key"] = alignments.query_name.map(extract) meta["alignment_key"] = meta.index.map(extract) alignments["alignment_key"].map(lambda s: s.split("_")[0]).unique() meta["run_number"] = meta["alignment_key"].map(lambda s: s.split("_")[0]) meta2 = meta.reset_index().merge(alignments).set_index("filename") meta2.shape meta = meta2Visualizing the alignmentsThis is just a simple histogram showing where the "reference_start" values fall.f, ax = plt.subplots() f.set_figwidth(13) ax.hist(meta.reference_start, bins=110);Processing the amplicons[Loose et al] pooled 11 amplicons. Each read has to be assigned retroactively to one of these, represented by number from 0 to 10.amplicons = [(52,1980), (2065,3965), (4070,5989), (6059,7981), (8012,9947), (10008,11963), (12006,13941), (14011,15945), (16076,17987), (18022,19972), (20053,21979), ] def amplicon_from_position(pos): for i,c in enumerate(amplicons): a,b = c if a<=pos<=b: return i meta["amplicon"] = meta.reference_start.map(amplicon_from_position)How many reads failed to be assigned?meta.amplicon.isnull().sum()Purge these:meta = meta[np.isnan(meta.amplicon)==False] meta.shapeThe number of viable reads is diminishing quickly. But this can't be helped.How many reads longer than 500 bases are assigned to each amplicon?meta.query("template_length>500").groupby("amplicon").format.count()Unfortunately some amplicons are severely underrepresented, with one going as low as 635 reads.This is a big problem for dividing the data into training and test sets, because blindly sampling from total pool may skew this balance even further. The algorithms will then bias against the least represented amplicons to gain a bit of extra accuracy, which is not what we want. With ten times as much data we could balance both the training and the test set. As it is, I chose to balance the test set only, to get a more realistic view of the performance. My assumption is that, over multiple repetitions of amplification / library preparation and sequencing runs, the amplicons should be roughly equally distributed.To balance the test set, 200 reads from each amplicon are chosen. This makes for a very weak test set. But again, this can't be helped at this point.sufficient = meta.query("template_length>=500") all_files = sufficient.index.values test_files = [] for i in range(11): sub = sufficient[sufficient.amplicon==i] test_files += list(np.random.choice(sub.index.values, 200)) training_files = list(set(sufficient.index.values) - set(test_files)) len(training_files), len(test_files) test_data = sufficient.ix[np.array(test_files)] feather.write_dataframe(test_data, "amplicon_test_metadata.feather") training_data = sufficient.ix[np.array(training_files)] feather.write_dataframe(training_data, "amplicon_training_metadata.feather")Sage-Combinat Widgets Demo The Sage-Combinat widgets library provides interactive visual editors for some of the combinatorial objects of the Sage library, based on Jupyter's widgets framework. At this stage, the focus is on objects that have a natural representation as a collection of cells layed upon a grid, like integer partitions, tableaux, aztec diamonds, or matrices.We start by importing the library:from sage_combinat_widgets import * from sage_combinat_widgets.grid_view_widget import * s = SkewTableau([[None, None, 1, 2], [None, 1], [4]]) GridViewWidget(s) sp = SkewPartition([[7, 4, 2, 1],[2, 1, 1]]) PartitionGridViewWidget(sp) #w.addable_cells()Interactive edition of integer partitionsIn the following example, we create an integer partition, and then launch an interactive visual editor for this partition (with buttons):p = Partition([3,3,2,1]) PartitionGridViewWidget(p)Note that `p` itself is left unchanged::pTo recover the new partition, you need to give a name to the editor, and access the new partition from there:w = PartitionGridViewWidget(p) w w.valueInteractive editor as building block for interactive applications Thanks to Jupyter's widgets framework, such an interactive editor is not just a gadget by itself, but can be used as building block for interactive applications. Here is a minimal example letting the user interactively explore how the hook lengths of a partition evolves with the partition:%display unicode_art p_input = PartitionGridViewWidget(Partition([2,1])) @interact def f(p = p_input): return Tableau(p.hook_lengths())As a more advanced application, we build a mini application for interactively exploring products of Schur functions:S = SymmetricFunctions(QQ) s = S.s() la_input = PartitionGridViewWidget(Partition([2,1])) mu_input = PartitionGridViewWidget(Partition([2,1])) @interact def f(la = la_input, mu = mu_input): return s[la] * s[mu] la_input.value, mu_input.valueInteractive edition of Young tableaux*(with text inputs)*# A Young tableau t = StandardTableaux(15).random_element() t1 = Tableau(list(t)) # A Grid View for this tableau wt1 = GridViewWidget(t1) wt1-> try to edit/remove a cell-> try to add an integer in an empty cellwt1.value t2 = StandardTableau(list(t)) wt2 = GridViewWidget(t2) wt2-> Now, try to add 18-> Try to add 16 A few Graphs*(with buttons)*from sage.graphs.generators.basic import GridGraph from sage_combinat_widgets.grid_view_widget import DisabledButtonCell gg = GridGraph((4,7)) wgg = GridViewWidget(gg, cell_widget_classes=[DisabledButtonCell]) wggAztec Diamond Graphfrom sage.graphs.generators.families import AztecDiamondGraph az = AztecDiamondGraph(4) waz = GridViewWidget(az, cell_layout=buttoncell_smaller_layout, cell_widget_classes=[DisabledButtonCell], blank_widget_class=BlankButton) wazMatricesfrom sage.matrix.matrix_space import MatrixSpace S = MatrixSpace(ZZ, 4,3) m = S.random_element() m wm = GridViewWidget(m) wm wm.append_row((1,2,3,4)) wm.insert_column(2, [1,1,1]) wm.value wm.remove_row() wm.remove_column() R = PolynomialRing(QQ, 9, 'x') A = matrix(R, 3, 3, R.gens()) wA = GridViewWidget(A) wA wA.append_row() wA.append_column(['x1','x2','x3']) wA.value**Deep Q-Learning with SpaceInvaders** In this assignment, I downloaded the SpaceInvaders ROM, which is supported by OpenAI Gym. I install and configure Anaconda, PyTorch, OpenAI Gym develop environment, and import game ROM. To apply Deep Q Learning to the game, I wrote Deep Q Network, game agent and different policies. By using different policies, changing different hyperparameters to observe the impact of Deep Q Learning on game ai capabilities. SpaceInvaders is a fixed shooting game where players can fire at the enemy. The goal is to destroy all enemies by shooting. The player has three lives. If the enemy reaches the bottom of the screen, the game will end immediately. The enemy fired to destroy the player. There is a fixed defensive cover in the game, which can be destroyed by players and enemies. Install Environment!pip install tensorflow==2.5.0rc0 gym keras-rl2 gym[atari] from ale_py import ALEInterface from ale_py.roms import SpaceInvaders ale = ALEInterface() ale.loadROM(SpaceInvaders)Import Libraries%matplotlib inline import gym import math import random import numpy as np import matplotlib import matplotlib.pyplot as plt from collections import namedtuple from itertools import count from PIL import Image import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.transforms as TSet up displayis_ipython = 'inline' in matplotlib.get_backend() if is_ipython: from IPython import displayDeep Q-Networkclass DQN(nn.Module): def __init__(self, img_height, img_width): super().__init__() self.fc1 = nn.Linear(in_features=img_height*img_width*3, out_features=24) self.fc2 = nn.Linear(in_features=24, out_features=32) self.out = nn.Linear(in_features=32, out_features=6) def forward(self, t): t = t.flatten(start_dim=1) t = F.relu(self.fc1(t)) t = F.relu(self.fc2(t)) t = self.out(t) return tExperience classExperience = namedtuple( 'Experience', ('state', 'action', 'next_state', 'reward') )Replay Memoryclass ReplayMemory(): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.push_count = 0 def push(self, experience): if len(self.memory) < self.capacity: self.memory.append(experience) else: self.memory[self.push_count % self.capacity] = experience self.push_count += 1 def sample(self, batch_size): return random.sample(self.memory, batch_size) def can_provide_sample(self, batch_size): return len(self.memory) >= batch_sizeEpsilon Greedy Strategyclass EpsilonGreedyStrategy(): def __init__(self, start, end, decay): self.start = start self.end = end self.decay = decay def get_exploration_rate(self, current_step): return self.end + (self.start - self.end) * \ math.exp(-1. * current_step * self.decay)Reinforcement Learning Agentclass Agent(): def __init__(self, strategy, num_actions, device): self.current_step = 0 self.strategy = strategy self.num_actions = num_actions self.device = device def select_action(self, state, policy_net): rate = strategy.get_exploration_rate(self.current_step) self.current_step += 1 if rate > random.random(): action = random.randrange(self.num_actions) #explore return torch.tensor([action]).to(device) else: with torch.no_grad(): return policy_net(state).argmax(dim=1).to(device) #exploitSpaceInvaders Environment Managerclass SpaceInvadersEnvManager(): def __init__(self, device): self.device = device self.env = gym.make('SpaceInvaders-v0').unwrapped self.env.reset() self.current_screen = None self.done = False def reset(self): self.env.reset() self.current_screen = None def close(self): self.env.close() def render(self, mode='human'): return self.env.render(mode) def num_actions_available(self): return self.env.action_space.n def take_action(self, action): _, reward, self.done, _ = self.env.step(action.item()) return torch.tensor([reward], device=self.device) def just_starting(self): return self.current_screen is None def get_state(self): if self.just_starting() or self.done: self.current_screen = self.get_processed_screen() black_screen = torch.zeros_like(self.current_screen) return black_screen else: s1 = self.current_screen s2 = self.get_processed_screen() self.current_screen = s2 return s2 - s1 def get_screen_height(self): screen = self.get_processed_screen() return screen.shape[2] def get_screen_width(self): screen = self.get_processed_screen() return screen.shape[3] def get_processed_screen(self): screen = self.render('rgb_array').transpose((2, 0, 1)) screen = self.crop_screen(screen) return self.transform_screen_data(screen) def crop_screen(self, screen): screen_height = screen.shape[1] # Strip off top and bottom top = int(screen_height * 0.4) bottom = int(screen_height * 0.8) screen = screen[:, top:bottom, :] return screen def transform_screen_data(self, screen): # Convert to float, rescale, convert to tensor screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Use torchvision package to compose image transforms resize = T.Compose([ T.ToPILImage() , T.Resize((40,90)) , T.ToTensor() ]) return resize(screen).unsqueeze(0).to(self.device)Outputs of Experiencedef plot(values, moving_avg_period): plt.figure(2) plt.clf() plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Score') plt.plot(values) moving_avg = get_moving_average(moving_avg_period, values) plt.plot(moving_avg) plt.pause(0.001) print("Episode", len(values), "\n", \ moving_avg_period, "episode score avg:", moving_avg[-1]) if is_ipython: display.clear_output(wait=True) def get_moving_average(period, values): values = torch.tensor(values, dtype=torch.float) if len(values) >= period: moving_avg = values.unfold(dimension = 0, size = period, step = 1) \ .mean(dim=1).flatten(start_dim=0) moving_avg = torch.cat((torch.zeros(period-1), moving_avg)) return moving_avg.numpy() else: moving_avg = torch.zeros(len(values)) return moving_avg.numpy()Tensor Processingdef extract_tensors(experiences): batch = Experience(*zip(*experiences)) t1 = torch.cat(batch.state) t2 = torch.cat(batch.action) t3 = torch.cat(batch.reward) t4 = torch.cat(batch.next_state) return (t1, t2, t3, t4)Q-Value Calculatorclass QValues(): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @staticmethod def get_current(policy_net, states, actions): return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1)) @staticmethod def get_next(target_net, next_states): final_state_locations = next_states.flatten(start_dim=1) \ .max(dim=1)[0].eq(0).type(torch.bool) non_final_state_locations = (final_state_locations == False) non_final_states = next_states[non_final_state_locations] batch_size = next_states.shape[0] values = torch.zeros(batch_size).to(QValues.device) values[non_final_state_locations] = target_net(non_final_states).max(dim=1)[0].detach() return valuesMain Program and Answers **1. Establish a baseline performance. How well did your Deep Q-learning do on your problem?**total_episodes = 1000learning_rate = 0.001gamma = 0.999epsilon = 1.0max_epsilon = 1.0min_epsilon = 0.01decay_rate = 0.001 With this baseline performance, our RL program with the SpaceInvaders-v0 gives us a score of 184.7 which is considerably not good. According to the graph, the score from the beginning is almost as high as the score at the end.It shows that our RL program do not learn how to play this game to win for higher score.It may be because the number of actions is large (6), that is, there are many choices for each operation. And e-greedy exploration strategy is not good enough, leading to the need for more episodes to obtain better learning results.batch_size = 256 gamma = 0.999 eps_start = 1 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close()**2. What are the states, the actions, and the size of the Q-table?**State: States are different RGB image of the screen, which is an array of shape (210, 160, 3). The enviroment get each state every k frames, where k is uniformly sampled from {2,3,4}. Actions: ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE']. There are 6 actions in this game.The size of the Q-table: The formula: The number of states * The number of actions. Each state has different choices, the number of choices is the number of actions.temp = gym.make('SpaceInvaders-v0') print(temp.env.get_action_meanings()) print(temp.env.observation_space.shape)['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'] (210, 160, 3)**3. What are the rewards? Why did you choose them?**The rewards are scores of the game. Because the ultimate goal of the game is to get a higher score. The score expresses the player's level of playing the game. A higher score means a deeper understanding of the game mechanics. The scores are also easy to count and compare, which is beneficial to the use of the learning process. **4. How did you choose alpha and gamma in the Bellman equation? Try at least one additional value for alpha and gamma. How did it change the baseline performance?**![formula.svg](attachment:formula.svg)The values of learning rate(alpha) near to 1 make the changes in $Q$ more fast. The learning rate determines the extent to which newly acquired information covers old information. A factor of 0 makes the agent learn nothing (using only prior knowledge), and a factor of 1 makes the agent consider only the most recent information (ignoring prior knowledge to explore possibilities). In a completely certain environment, a learning rate of aplha = 1 is the best. When the problem is random, the algorithm converges to a learning rate that requires it to be reduced to zero under certain technical conditions.When the alpha is 0.001, the score is 184.7. When the alpha is changed to 0.999, the score is 123.5. It can be found that a larger alpha value results in a significant drop in scores. This shows that the game enviroment tends to be more random. The smaller the alpha, the more use of previous information can get a better game score. The discount factor gamma determines the importance of future rewards. A factor of 0 will cause the agent to only consider the current reward short-sightedness, while a factor close to 1 will make it strive for long-term high rewards. If the discount factor reaches or exceeds 1, the operating value may diverge. For gamma = 1, if there is no terminal state, or if the agent never reaches the terminal state, all environmental history will become infinitely long, and the utility of additive, undiscounted rewards will usually become infinite.When the gamma is 0.999, the score is 184.7. When the gamma is changed to 0.1, the score is 200.95. It can also be seen that the score curve with low gamma value changes drastically. But the overall score is higher than the case of high gamma value. This shows that in this game, future rewards should not be considered too much. More use of current rewards can lead to higher scores faster.batch_size = 256 gamma = 0.999 eps_start = 1 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.999 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close() batch_size = 256 gamma = 0.1 eps_start = 1 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close()**5. Try a policy other than e-greedy. How did it change the baseline performance?** I implemented a policy of using random sampling to determine actions. In theory, the strategy of using random sampling means that the larger the number of actions, the smaller the probability that the best behavior can be randomized. Therefore, the experimental results of random sampling should be worse. In practice, by comparing the same number of trials, I found that after 1000 episodes, the agent's game score is 144.15. This is lower than e-greedy's 184.7. In other words, changing the e-greedy policy to a random sampling policy will make the baseline performance worse.class RandomSamplingStrategy(): def __init__(self, start, end, decay): self.start = start self.end = end self.decay = decay class Agent(): def __init__(self, strategy, num_actions, device): self.current_step = 0 self.strategy = strategy self.num_actions = num_actions self.device = device def select_action(self, state, policy_net): self.current_step += 1 action = random.randrange(self.num_actions) #explore return torch.tensor([action]).to(device) batch_size = 256 gamma = 0.999 eps_start = 1 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = RandomSamplingStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close()**6. How did you choose your decay rate and starting epsilon? Try at least one additional value for epsilon and the decay rate. How did it change the baseline performance? What is the value of epsilon when if you reach the max steps per episode?** In order to strike this balance between development and exploration, I use the so-called epsilon greedy strategy. We define an exploration rate. We initially set it as 1. This exploration rate is the probability that our agent explores the environment instead of using it. And e = 1 which is 100% sure that the agent will start from exploring the environment.As the agent learns more about the environment, at the beginning of each new episode, e will decay at a certain rate we set, so that as the agent learns more about the environment, the possibility of exploration Becoming smaller and smaller. Once there is an opportunity to explore and learn more about the environment, the agent becomes "greedy" in using the environment.To determine whether the agent will choose exploration or exploitation at each time step, we generate a random number between 0 and 1. If this number is greater than epsilon, then the agent will choose its next action via exploitation.I set the decay rate to 0.001 and 0.5 respectively. After experiments, when the decay rate is 0.5, the score is 144.5. When the decay rate is 0.001, the score is 184.7. Therefore, an excessively large decay rate will quickly reduce the possibility of exploration. The agent will continue to reuse the previously not the best aciton to operate. This is not conducive to agent getting higher scores.I also set the starting epsilon to 1 and 0.5 respectively. After experimentation, when starting epsilon is 0.5, the score is 147.6. When starting epsilon is 1, the score is 184.7. Therefore, too small starting epsilon will limit the possibility of exploration at the beginning. The agent will choose bad actions in the early stage, wasting a lot of unnecessary experiments. This is also not conducive to the agent getting a higher score.When decay rate is 0.001 and starting epsilon is 1, I reach the max steps per episode.batch_size = 256 gamma = 0.999 eps_start = 0.5 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close() batch_size = 256 gamma = 0.999 eps_start = 1 eps_end = 0.01 eps_decay = 0.5 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: plot(episode_scores, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close()**7. What is the average number of steps taken per episode?** In the loop of each episode, steps will be executed continuously until the end of the game. So after a single step at the end of the game, I recorded the number of steps and output it as a chart. From the chart, the average number of steps taken per episode is 618.84.def plot_steps(values, steps_avg_period): plt.figure(2) plt.clf() plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Steps') plt.plot(values) steps_avg = get_steps_average(steps_avg_period, values) plt.pause(0.001) print("Episode", len(values), "\n", \ steps_avg_period, "episode steps avg:", steps_avg[-1]) if is_ipython: display.clear_output(wait=True) def get_steps_average(period, values): values = torch.tensor(values, dtype=torch.float) if len(values) >= period: steps_avg = values.unfold(dimension = 0, size = period, step = 1) \ .mean(dim=1).flatten(start_dim=0) steps_avg = torch.cat((torch.zeros(period-1), steps_avg)) return steps_avg.numpy() else: steps_avg = torch.zeros(len(values)) return steps_avg.numpy() batch_size = 256 gamma = 0.999 eps_start = 1 eps_end = 0.01 eps_decay = 0.001 target_update = 10 memory_size = 100000 lr = 0.001 num_episodes = 1000 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") em = SpaceInvadersEnvManager(device) strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay) agent = Agent(strategy, em.num_actions_available(), device) memory = ReplayMemory(memory_size) policy_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net = DQN(em.get_screen_height(), em.get_screen_width()).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.Adam(params=policy_net.parameters(), lr=lr) episode_scores = [] steps = [] for episode in range(num_episodes): em.reset() state = em.get_state() episode_scores.append(0) steps.append(0) for timestep in count(): action = agent.select_action(state, policy_net) reward = em.take_action(action) next_state = em.get_state() memory.push(Experience(state, action, next_state, reward)) state = next_state episode_scores[episode] += reward if(memory.can_provide_sample(batch_size)): experiences = memory.sample(batch_size) states, actions, rewards, next_states = extract_tensors(experiences) current_q_values = QValues.get_current(policy_net, states, actions) next_q_values = QValues.get_next(target_net, next_states) target_q_values = (next_q_values * gamma) + rewards loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if em.done: steps[episode] = timestep plot_steps(steps, 100) break if episode % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) em.close()---Title: Autocorrelation time estimationDate: 2017-10-16Category: Data AnalysisSlug: autocorrSummary: this is one of the trickiest parts of any MCMC analysisMath: true---%matplotlib inline %config InlineBackend.figure_format = "retina" from __future__ import print_function from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20*This is a cross post from the new* [emcee documentation](http://emcee.readthedocs.io).In this tutorial, we will discuss a method for convincing yourself that your chains are sufficiently converged.This can be a difficult subject to discuss because it isn't formally possible to guarantee convergence for any but the simplest models, and therefore any argument that you make will be circular and heuristic.However, some discussion of autocorrelation analysis is (or should be!) a necessary part of any publication using MCMC.With emcee, we follow [Goodman & Weare (2010)](http://msp.berkeley.edu/camcos/2010/5-1/p04.xhtml) and recommend using the *integrated autocorrelation time* to quantify the effects of sampling error on your results.The basic idea is that the samples in your chain are not independent and you must estimate the effective number of independent samples.There are other convergence diagnostics like the [Gelman–Rubin statistic](http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/305.pdf) (*Note: you should not compute the G–R statistic using multiple chains in the same emcee ensemble because the chains are not independent!*) but, since the integrated autocorrelation time directly quantifies the Monte Carlo error (and hence the efficiency of the sampler) on any integrals computed using the MCMC results, it is the natural quantity of interest when judging the robustness of an MCMC analysis. Monte Carlo errorThe goal of every MCMC analysis is to evaluate integrals of the form$$\mathrm{E}_{p(\theta)}[f(\theta)] = \int f(\theta)\,p(\theta)\,\mathrm{d}\theta \quad.$$If you had some way of generating $N$ samples $\theta^{(n)}$ from the probability density $p(\theta)$, then you could approximate this integral as$$\mathrm{E}_{p(\theta)}[f(\theta)] \approx \frac{1}{N} \sum_{n=1}^N f(\theta^{(n)})$$where the sum is over the samples from $p(\theta)$.If these samples are independent, then the sampling variance on this estimator is$$\sigma^2 = \frac{1}{N}\,\mathrm{Var}_{p(\theta)}[f(\theta)]$$and the error decreses as $1/\sqrt{N}$ as you generate more samples.In the case of MCMC, the samples are not independent and the error is actually given by$$\sigma^2 = \frac{\tau_f}{N}\,\mathrm{Var}_{p(\theta)}[f(\theta)]$$where $\tau_f$ is the *integrated autocorrelation time* for the chain $f(\theta^{(n)})$.In other words, $N/\tau_f$ is the effective number of samples and $\tau_f$ is the number of steps that are needed before the chain "forgets" where it started.This means that, if you can estimate $\tau_f$, then you can estimate the number of samples that you need to generate to reduce the relative error on your target integral to (say) a few percent.**Note:** It is important to remember that $\tau_f$ depends on the specific function $f(\theta)$.This means that there isn't just *one* integrated autocorrelation time for a given Markov chain.Instead, you must compute a different $\tau_f$ for any integral you estimate using the samples. Computing autocorrelation timesThere is a great discussion of methods for autocorrelation estimation in [a set of lecture notes by ](https://pdfs.semanticscholar.org/0bfe/9e3db30605fe2d4d26e1a288a5e2997e7225.pdf) and the interested reader should take a look at that for a more formal discussion, but I'll include a summary of some of the relevant points here.The integrated autocorrelation time is defined as$$\tau_f = \sum_{\tau=-\infty}^\infty \rho_f(\tau)$$where $\rho_f(\tau)$ is the normalized autocorrelation function of the stochastic process that generated the chain for $f$.You can estimate $\rho_f(\tau)$ using a finite chain $\{f_n\}_{n=1}^N$ as$$\hat{\rho}_f(\tau) = \hat{c}_f(\tau) / \hat{c}_f(0)$$where$$\hat{c}_f(\tau) = \frac{1}{N - \tau} \sum_{n=1}^{N-\tau} (f_n - \mu_f)\,(f_{n+\tau}-\mu_f)$$and$$\mu_f = \frac{1}{N}\sum_{n=1}^N f_n \quad.$$(Note: In practice, it is actually more computationally efficient to compute $\hat{c}_f(\tau)$ using a fast Fourier transform than summing it directly.)Now, you might expect that you can estimate $\tau_f$ using this estimator for $\rho_f(\tau)$ as$$\hat{\tau}_f \stackrel{?}{=} \sum_{\tau=-N}^{N} \hat{\rho}_f(\tau) = 1 + 2\,\sum_{\tau=1}^N \hat{\rho}_f(\tau)$$but this isn't actually a very good idea.At longer lags, $\hat{\rho}_f(\tau)$ starts to contain more noise than signal and summing all the way out to $N$ will result in a very noisy estimate of $\tau_f$.Instead, we want to estimate $\tau_f$ as$$\hat{\tau}_f (M) = 1 + 2\,\sum_{\tau=1}^M \hat{\rho}_f(\tau)$$for some $M \ll N$.As discussed by Sokal in the notes linked above, the introduction of $M$ decreases the variance of the estimator at the cost of some added bias and he suggests choosing the smallest value of $M$ where $M \ge C\,\hat{\tau}_f (M)$ for a constant $C \sim 5$.Sokal says that he finds this procedure to work well for chains longer than $1000\,\tau_f$, but the situation is a bit better with emcee because we can use the parallel chains to reduce the variance and we've found that chains longer than about $50\,\tau$ are often sufficient. A toy problemTo demonstrate this method, we'll start by generating a set of "chains" from a process with known autocorrelation structure.To generate a large enough dataset, we'll use [celerite](http://celerite.readthedocs.io):import numpy as np import matplotlib.pyplot as plt np.random.seed(123456) # Build the celerite model: import celerite from celerite import terms kernel = terms.RealTerm(log_a=0.0, log_c=-6.0) kernel += terms.RealTerm(log_a=0.0, log_c=-2.0) # The true autocorrelation time can be calculated analytically: true_tau = sum(2*np.exp(t.log_a-t.log_c) for t in kernel.terms) true_tau /= sum(np.exp(t.log_a) for t in kernel.terms) true_tau # Simulate a set of chains: gp = celerite.GP(kernel) t = np.arange(2000000) gp.compute(t) y = gp.sample(size=32) # Let's plot a little segment with a few samples: plt.plot(y[:3, :300].T) plt.xlim(0, 300) plt.xlabel("step number") plt.ylabel("$f$") plt.title("$\\tau_\mathrm{{true}} = {0:.0f}$".format(true_tau), fontsize=14);Now we'll estimate the empirical autocorrelation function for each of these parallel chains and compare this to the true function.def next_pow_two(n): i = 1 while i < n: i = i << 1 return i def autocorr_func_1d(x, norm=True): x = np.atleast_1d(x) if len(x.shape) != 1: raise ValueError("invalid dimensions for 1D autocorrelation function") n = next_pow_two(len(x)) # Compute the FFT and then (from that) the auto-correlation function f = np.fft.fft(x - np.mean(x), n=2*n) acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real acf /= 4*n # Optionally normalize if norm: acf /= acf[0] return acf # Make plots of ACF estimate for a few different chain lengths window = int(2*true_tau) tau = np.arange(window+1) f0 = kernel.get_value(tau) / kernel.get_value(0.0) # Loop over chain lengths: fig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True) for n, ax in zip([10, 100, 1000], axes): nn = int(true_tau * n) ax.plot(tau / true_tau, f0, "k", label="true") ax.plot(tau / true_tau, autocorr_func_1d(y[0, :nn])[:window+1], label="estimate") ax.set_title(r"$N = {0}\,\tau_\mathrm{{true}}$".format(n), fontsize=14) ax.set_xlabel(r"$\tau / \tau_\mathrm{true}$") axes[0].set_ylabel(r"$\rho_f(\tau)$") axes[-1].set_xlim(0, window / true_tau) axes[-1].set_ylim(-0.05, 1.05) axes[-1].legend(fontsize=14);This figure shows how the empirical estimate of the normalized autocorrelation function changes as more samples are generated.In each panel, the true autocorrelation function is shown as a black curve and the empricial estimator is shown as a blue line.Instead of estimating the autocorrelation function using a single chain, we can assume that each chain is sampled from the same stochastic process and average the estimate over ensemble members to reduce the variance.It turns out that we'll actually do this averaging later in the process below, but it can be useful to show the mean autocorrelation function for visualization purposes.fig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True) for n, ax in zip([10, 100, 1000], axes): nn = int(true_tau * n) ax.plot(tau / true_tau, f0, "k", label="true") f = np.mean([autocorr_func_1d(y[i, :nn], norm=False)[:window+1] for i in range(len(y))], axis=0) f /= f[0] ax.plot(tau / true_tau, f, label="estimate") ax.set_title(r"$N = {0}\,\tau_\mathrm{{true}}$".format(n), fontsize=14) ax.set_xlabel(r"$\tau / \tau_\mathrm{true}$") axes[0].set_ylabel(r"$\rho_f(\tau)$") axes[-1].set_xlim(0, window / true_tau) axes[-1].set_ylim(-0.05, 1.05) axes[-1].legend(fontsize=14);Now let's estimate the autocorrelation time using these estimated autocorrelation functions.Goodman & Weare (2010) suggested averaging the ensemble over walkers and computing the autocorrelation function of the mean chain to lower the variance of the estimator and that was what was originally implemented in emcee.Since then, @fardal on GitHub [suggested that other estimators might have lower variance](https://github.com/dfm/emcee/issues/209).This is absolutely correct and, instead of the Goodman & Weare method, we now recommend computing the autocorrelation time for each walker (it's actually possible to still use the ensemble to choose the appropriate window) and then average these estimates.Here is an implementation of each of these methods and a plot showing the convergence as a function of the chain length:# Automated windowing procedure following Sokal (1989) def auto_window(taus, c): m = np.arange(len(taus)) < c * taus if np.any(m): return np.argmin(m) return len(taus) - 1 # Following the suggestion from Goodman & Weare (2010) def autocorr_gw2010(y, c=5.0): f = autocorr_func_1d(np.mean(y, axis=0)) taus = 2.0*np.cumsum(f)-1.0 window = auto_window(taus, c) return taus[window] def autocorr_new(y, c=5.0): f = np.zeros(y.shape[1]) for yy in y: f += autocorr_func_1d(yy) f /= len(y) taus = 2.0*np.cumsum(f)-1.0 window = auto_window(taus, c) return taus[window] # Compute the estimators for a few different chain lengths N = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 10)).astype(int) gw2010 = np.empty(len(N)) new = np.empty(len(N)) for i, n in enumerate(N): gw2010[i] = autocorr_gw2010(y[:, :n]) new[i] = autocorr_new(y[:, :n]) # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G\&W 2010") plt.loglog(N, new, "o-", label="DFM 2017") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.axhline(true_tau, color="k", label="truth", zorder=-100) plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14);In this figure, the true autocorrelation time is shown as a horizontal line and it should be clear that both estimators give outrageous results for the short chains.It should also be clear that the new algorithm has lower variance than the original method based on Goodman & Weare.In fact, even for moderately long chains, the old method can give dangerously over-confident estimates.For comparison, we have also plotted the $\tau = N/50$ line to show that, once the estimate crosses that line, The estimates are starting to get more reasonable.This suggests that you probably shouldn't trust any estimate of $\tau$ unless you have more than $F\times\tau$ samples for some $F \ge 50$.Larger values of $F$ will be more conservative, but they will also (obviously) require longer chains. A more realistic exampleNow, let's run an actual Markov chain and test these methods using those samples.So that the sampling isn't completely trivial, we'll sample a multimodal density in three dimensions.import emcee def log_prob(p): return np.logaddexp(-0.5*np.sum(p**2), -0.5*np.sum((p-4.0)**2)) sampler = emcee.EnsembleSampler(32, 3, log_prob) sampler.run_mcmc(np.concatenate((np.random.randn(16, 3), 4.0+np.random.randn(16, 3)), axis=0), 500000, progress=True);100%|██████████| 500000/500000 [08:08<00:00, 1023.55it/s]Here's the marginalized density in the first dimension.chain = sampler.get_chain()[:, :, 0].T plt.hist(chain.flatten(), 100) plt.gca().set_yticks([]) plt.xlabel(r"$\theta$") plt.ylabel(r"$p(\theta)$");And here's the comparison plot showing how the autocorrelation time estimates converge with longer chains.# Compute the estimators for a few different chain lengths N = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]), 10)).astype(int) gw2010 = np.empty(len(N)) new = np.empty(len(N)) for i, n in enumerate(N): gw2010[i] = autocorr_gw2010(chain[:, :n]) new[i] = autocorr_new(chain[:, :n]) # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G\&W 2010") plt.loglog(N, new, "o-", label="DFM 2017") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14);As before, the short chains give absurd estimates of $\tau$, but the new method converges faster and with lower variance than the old method.The $\tau = N/50$ line is also included as above as an indication of where we might start trusting the estimates. What about shorter chains?Sometimes it just might not be possible to run chains that are long enough to get a reliable estimate of $\tau$ using the methods described above.In these cases, you might be able to get an estimate using parametric models for the autocorrelation.One example would be to fit an [autoregressive model](https://en.wikipedia.org/wiki/Autoregressive_model) to the chain and using that to estimate the autocorrelation time.As an example, we'll use [celerite](http://celerite.readthdocs.io) to fit for the maximum likelihood autocorrelation function and then compute an estimate of $\tau$ based on that model.The celerite model that we're using is equivalent to a second-order ARMA model and it appears to be a good choice for this example, but we're not going to promise anything here about the general applicability and we caution care whenever estimating autocorrelation times using short chains.from scipy.optimize import minimize def autocorr_ml(y, thin=1, c=5.0): # Compute the initial estimate of tau using the standard method init = autocorr_new(y, c=c) z = y[:, ::thin] N = z.shape[1] # Build the GP model tau = max(1.0, init/thin) kernel = terms.RealTerm(np.log(0.9*np.var(z)), -np.log(tau), bounds=[(-5.0, 5.0), (-np.log(N), 0.0)]) kernel += terms.RealTerm(np.log(0.1*np.var(z)), -np.log(0.5*tau), bounds=[(-5.0, 5.0), (-np.log(N), 0.0)]) gp = celerite.GP(kernel, mean=np.mean(z)) gp.compute(np.arange(z.shape[1])) # Define the objective def nll(p): # Update the GP model gp.set_parameter_vector(p) # Loop over the chains and compute likelihoods v, g = zip(*( gp.grad_log_likelihood(z0, quiet=True) for z0 in z )) # Combine the datasets return -np.sum(v), -np.sum(g, axis=0) # Optimize the model p0 = gp.get_parameter_vector() bounds = gp.get_parameter_bounds() soln = minimize(nll, p0, jac=True, bounds=bounds) gp.set_parameter_vector(soln.x) # Compute the maximum likelihood tau a, c = kernel.coefficients[:2] tau = thin * 2*np.sum(a / c) / np.sum(a) return tau # Calculate the estimate for a set of different chain lengths ml = np.empty(len(N)) ml[:] = np.nan for j, n in enumerate(N[1:8]): i = j+1 thin = max(1, int(0.05*new[i])) ml[i] = autocorr_ml(chain[:, :n], thin=thin) # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G\&W 2010") plt.loglog(N, new, "o-", label="DFM 2017") plt.loglog(N, ml, "o-", label="DFM 2017: ML") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14);Building a simple model# import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target print(iris.DESCR) X_train, X_test, y_train, y_test = train_test_split(X, y) rfc = RandomForestClassifier(n_estimators=100, n_jobs=2) rfc.fit(X_train, y_train) print("Accuracy = %0.2f" % accuracy_score (y_test, rfc.predict(X_test))) print(classification_report(y_test, rfc.predict(X_test)))Accuracy = 0.97 precision recall f1-score support 0 1.00 1.00 1.00 17 1 1.00 0.92 0.96 13 2 0.89 1.00 0.94 8 micro avg 0.97 0.97 0.97 38 macro avg 0.96 0.97 0.97 38 weighted avg 0.98 0.97 0.97 38Model serialization / marshallingpickle.dump(rfc, open("iris_rfc.pkl", "wb")) my_random_forest = pickle.load(open("iris_rfc.pkl", "rb")) my_random_forest print(classification_report(y_test, my_random_forest.predict(X_test)))precision recall f1-score support 0 1.00 1.00 1.00 17 1 1.00 0.92 0.96 13 2 0.89 1.00 0.94 8 micro avg 0.97 0.97 0.97 38 macro avg 0.96 0.97 0.97 38 weighted avg 0.98 0.97 0.97 38Next we will start a flask service. That's in the file 'flask_demo.py'Once it's started, we can use this bit of code to call it.url = "http://localhost:9000/api" data = json.dumps({'sl':5.84,'sw':3.0, 'pl':3.75,'pw':1.1}) print(data) r = requests.post(url, data) print(r.json()){"sl": 5.84, "sw": 3.0, "pl": 3.75, "pw": 1.1} {'results': {'y_hat': 1}}Recursion, Memoization and Dynamic Programming Remember how we talk about using recursion and dynamic programming. One interesting thing to do is to implement the solution to a common problem called Fibonnaci numbers on these two styles and compare the compute time. The Fibonacci series looks something like: `0, 1, 1, 2, 3, 5, 8, 13, 21 …` and so on. Any person can quickly notice the pattern. `f(n) = f(n-1) + f(n-2)` So, let's walk through a recursive implementation that solves this problem.def fib(n): if n < 2: return n return fib(n-2) + fib(n-1) %time fib(30)CPU times: user 296 ms, sys: 0 ns, total: 296 ms Wall time: 295 msNow, the main problem of this algorithm is that we are computing some of the subproblems more than once. For instance, to compute fib(4) we would compute fib(3) and fib(2). However, to compute fib(3) we also have to compute fib(2). Say hello to memoization. A technique called memoization we are cache the results of previously computed sub problems to avoid unnecessary computations.m = {} def fibm(n): if n in m: return m[n] m[n] = n if n < 2 else fibm(n-2) + fibm(n-1) return m[n] %time fibm(30)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 17.6 µsBut the question is, can we do better than this? The use of the array is helpful, but when calculating very large numbers, or perhaps on memory contraint environments it might not be desirable. This is where Dynamic Programming fits the bill. In DP we take a bottom-up approach. Meaning, we solve the next Fibonacci number we can with the information we already have.def fibdp(n): if n == 0: return 0 prev, curr = (0, 1) for i in range(2, n+1): newf = prev + curr prev = curr curr = newf return curr %time fibdp(30)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 6.44 µsIn this format, we don’t need to recurse or keep up with the memory intensive cache dictionary. These, add up to an even better performance. Let's now give it a try with factorials. Remember `4! = 4 * 3 * 2 * 1 = 24`. Can you give it try?def factr(n): if n < 3: return n return n * factr(n - 1) %time factr(30) m = {} def factm(n): if n in m: return m[n] m[n] = n if n < 3 else n * factr(n - 1) return m[n] %time factm(30) def factdp(n): if n < 3: return n fact = 2 for i in range(3, n + 1): fact *= i return fact %time factdp(30)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 7.87 µsLet's think of a slightly different problem. Imagine that you want to find the cheapest way to go from city A to city B, but when you are about to buy your ticket, you see that you could hop in different combinations of route and get a much cheaper price than if you go directly. How do you efficiently calculate the best possible combination of tickets and come up with the cheapest route? We will start with basic recursion and work on improving it until we reach dynamic programming.For this last problem in dynamic programming, create 2 functions that calculates the cheapest route from city A to B. I will give you the recursive solution, you will build one with memoization and the one with dynamic programming.import numpy as npUtility function to get fares between citiesdef get_fares(n_cities, max_fare): np.random.seed(123456) fares = np.sort(np.random.random((n_cities, n_cities)) * max_fare).astype(int) for i in range(len(fares)): fares[i] = np.roll(fares[i], i + 1) np.fill_diagonal(fares, 0) for i in range(1, len(fares)): for j in range(0, i): fares[i][j] = -1 return faresLet's try it out with 4 cities and random fares with a max of 1000.n_cities = 4 max_fare = 1000 fares = get_fares(n_cities, max_fare) fares[1][2] = 50 faresHere is the recursive solution:def cheapestr(s, d, c): if s == d or s == d - 1: return c[s][d] cheapest = c[s][d] for i in range(s + 1, d): tmp = cheapestr(s, i, c) + cheapestr(i, d, c) cheapest = tmp if tmp < cheapest else cheapest return cheapest %time cheapestr(0, len(fares[0]) - 1, fares)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 68.7 µsNow, you build the memoization one:m = {} def cheapestm(s, d, c): if s == d or s == d - 1: return c[s][d] if s in m and d in m[s]: return m[s][d] cheapest = c[s][d] for i in range(s + 1, d): tmp = cheapestm(s, i, c) + cheapestm(i, d, c) cheapest = tmp if tmp < cheapest else cheapest m[s] = {} m[s][d] = cheapest return m[s][d] %time cheapestm(0, len(fares[0]) - 1, fares)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 22.6 µsFaster, you see?Now, do the dynamic programming version.def cheapestdp(s, d, c): cheapest = c[0] for i in range(2, len(c)): for j in range(1, i): new_route = cheapest[j] + c[j][i] cheapest[i] = new_route if cheapest[i] > new_route else cheapest[i] return cheapest[-1] %time cheapestdp(0, len(fares[0]) - 1, fares)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 61.5 µsLet's now try with a larger example:n_cities = 18 # this will take a little before 20 seconds. Try not to make it any larger :) max_fare = 1000 fares = get_fares(n_cities, max_fare) fares %time cheapestr(0, len(fares[0]) - 1, fares) %time cheapestm(0, len(fares[0]) - 1, fares) %time cheapestdp(0, len(fares[0]) - 1, fares)CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 75.8 µsDataset For the dataset, we use [Sentiment140](http://help.sentiment140.com/for-students) And we will build a simple model that decides positive and negative tweets# download dataset import zipfile import urllib.request if not os.path.exists("dataset"): os.makedirs("dataset") if not os.path.exists(os.path.join("dataset", "sentiment140")): urllib.request.urlretrieve("http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip", os.path.join("dataset", "sentiment140.zip")) with zipfile.ZipFile(os.path.join("dataset", "sentiment140.zip"), 'r') as inFile: inFile.extractall(os.path.join("dataset", "sentiment140")) os.remove(os.path.join("dataset", "sentiment140.zip")) FILE_PATH_TRAINING = os.path.join("dataset", "sentiment140", "training.1600000.processed.noemoticon.csv") FILE_PATH_PROCESSED = os.path.join("dataset", "sentiment140", "processed.csv") FILE_PATH_TEST_NO_USE = os.path.join("dataset", "sentiment140", "testdata.manual.2009.06.14.csv")Preprocessing 1. remove extra space, characters, and process @words 2. pad the sentencesif not os.path.exists(FILE_PATH_PROCESSED): tmp = pd.read_csv(FILE_PATH_TRAINING, names=["Target", "ID", "Date", "QueryInfo", "UserName", "Text"], encoding="latin-1") tmp.to_csv(FILE_PATH_PROCESSED, encoding="utf-8", index=False) training_data = pd.read_csv(FILE_PATH_PROCESSED, encoding="utf-8") training_data = training_data[["Target", "Text"]] import re from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english") ch_range = list(range(97, 123)) + list(range(65, 91)) + [ord(' '), ord('\'')] def process_str(raw_string): global ch_range global stemmer # first remove url, @username, etc raw_string = re.sub(r"(@|#)([A-Z]|[a-z]|[0-9]|_)+", "", raw_string) raw_string = re.sub(r"(http|https)://([A-Z]|[a-z]|[0-9]|/|\.)+", "", raw_string) # remove characters other than [a-z][A-Z][0-9]['!?] or empty space new_string = "".join([ch.lower() if ord(ch) in ch_range else ' ' for ch in list(raw_string)]) # remove extra space, and also convert plural form to singular new_string = new_string.strip() new_string = " ".join([stemmer.stem(word) for word in new_string.split()]) return new_string training_data["Text"] = training_data["Text"].apply(process_str) training_data["Text"].head(10) # remove empty string rows, or with only one word training_data.drop(training_data["Text"][training_data["Text"] == ""].index, inplace=True) training_data = training_data[training_data["Text"].str.contains(" ")] training_data.drop_duplicates(inplace=True) training_data.reset_index(drop=True) training_data.head(10) training_data.to_csv(os.path.join("dataset", "sentiment140", "final.csv"), index=False) X_train_processed = training_data["Text"].copy() y_train = training_data["Target"].copy() PAD_MAXLEN = 45 MAX_FEATURES = 20000 tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=MAX_FEATURES) tokenizer.fit_on_texts(X_train_processed) X_train_seq = tokenizer.texts_to_sequences(X_train_processed) X_train_pad = tf.keras.preprocessing.sequence.pad_sequences(X_train_seq, padding="post", maxlen=PAD_MAXLEN) TOKEN_VOCAB_SIZE = len(tokenizer.word_index) + 1 TOKEN_VOCAB_SIZE X_train_pad.shape X_train_pad[:2] y_train_processed = y_train.replace(4, 1).to_numpy().ravel() y_train_processed.shape # shuffle the dataset once to prepare for training index_permut = np.random.permutation(len(y_train_processed)) X_train_final = np.array(X_train_pad)[index_permut] y_train_final = np.array(y_train_processed)[index_permut] print(X_train_final[:5]) print(y_train_final[:5]) with open(os.path.join("dataset", "sentiment140", "data.pickle"), "wb") as outFile: pickle.dump([X_train_final, y_train_final, tokenizer], outFile)__Exercise 1__import nltk, re, pprint read_expr = nltk.sem.Expression.fromstring read_expr('Asi -> -Bsu') # Asi: "Agnus sings", Bsu: "Bertie sulks" read_expr('Cr & Cb') # Cr: "Cyril runs", Cb: "Cyril barks" read_expr('-r -> s') # r: "rain", s: "snow" read_expr('-((Oc | Tc) -> Ih)') # Oc: "Olive comes", Tc: "Tofu comes", Ih: "Irene is happy" read_expr('-(Pc | Ps)') # Pc: "Pat coughed", Ps: "Pat sneezed" read_expr('(Ica -> -Yco) -> (Yca -> -Ico)') # Ico: "I come", Yco: "you come", Ica: "I call", Yca: "You call"__Exercise 2__read_expr('like(angus,cyril) & hate(irene,cyril)') read_expr('taller(tofu,bertie)') read_expr('love(bruce,bruce) & love(pat,pat)') # or read_expr('love(bruce,bruce) & love(pat,bruce)') read_expr('see(cyril,bertie) & -see(angus,bertie)') read_expr('fourleggedfriend(cyril)') read_expr('near(tofu,olive) & near(olive,tofu)')__Exercise 3__read_expr('exists x.like(angus,x) & exists y.like(y,julia)') read_expr('exists x.(dog(x) & love(angus,x) & love(x,angus))') read_expr('-exists x.smile(x,pat)') read_expr('exists x.(cough(x) & sneeze(x))') read_expr('-exists x.(cough(x) | sneeze(x))') read_expr('exists x.(love(bruce,x) & -equal(x,bruce))') read_expr('-exists x.(love(x,pat) & -equal(x,matthew))') read_expr('all x.(like(cyril,x) & -equal(x,irene))') read_expr('exists x.asleep(x) & all y.(-equal(x,y) & -asleep(y))')__Exercise 4__read_expr(r'\x.(feed(x,cyril) & give(x,cappuccino,angus))') read_expr(r'\x.give(pat,warandpeace,x)') read_expr(r'all y.(\x.love(y,x))') read_expr(r'all y.(\x.(love(y,x) | detest(y,x)))') read_expr(r'all y.(\x.(love(y,x) & -detest(y,x)))')__Exercise 5__read_expr = nltk.sem.Expression.fromstring e1 = read_expr(r'\x.(exists y.love(x,y))') e2 = read_expr('pat') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # Translation: Pat loves someone. e1 = read_expr(r'\x.(exists y.(love(x,y) | love(y,x)))') e2 = read_expr('pat') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # Translation: Pat loves someone or someone loves Pat. e1 = read_expr(r'\x.(walk(fido))') e2 = read_expr('pat') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify())walk(fido)__Exercise 6__e1 = read_expr(r'\P.\x.all y.(dog(y) -> P(x,pat))') e2 = read_expr('chase') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # \x.all y.(dog(y) -> chase(x,pat)) e1 = read_expr(r'\P.\x.exists y.(dog(y) & P(pat,x))') e2 = read_expr('chase') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # \x.exists y.(dog(y) & chase(pat,x)) e1 = read_expr(r'\P x0 x1.exists y.(present(y) & P(x1,y,x0))') e2 = read_expr('give') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # \x0 x1.exists y.(present(y) & give(x1,y,x0))\x0 x1.exists y.(present(y) & give(x1,y,x0))__Exercise 7__e1 = read_expr(r'\P.exists y.(dog(x) & P(x))') e2 = read_expr('bark') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # exists y.(dog(x) & bark(x)) e1 = read_expr(r'\P.P(fido)') e2 = read_expr('bark') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # bark(fido) e1 = read_expr(r'\P. all x.(dog(x) -> bark(x))') e2 = read_expr('\\P. all x.(dog(x) -> P(x))') e3 = nltk.sem.ApplicationExpression(e1, e2) print(e3.simplify()) # all x.(dog(x) -> bark(x))all x.(dog(x) -> bark(x))__Exercise 11__ (unfinished)# Sentences: # 1) Once upon a time there was a little boy, and he wanted to be a cock-a-doo-dle-doo. # 2) So he was a cock-a-doo-dle-doo. # 3) And he wanted to fly up into the sky. # 4) So he did fly up into the sky. # Should translate to: # 1) be(boy) & want(boy, be(boy,cock-a-doo-dle-doo)) # 2) be(boy,cock-a-doo-dle-doo) # 3) want(boy, fly(boy)) # 4) fly(boy) from nltk import load_parser cp = load_parser('file:chapter_10_ex11.fcfg') # 2) query = 'So he was a cock-a-doo-dle-doo' trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) # 3) query = 'And he wanted to fly up into the sky' trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) # 1a) query = 'Once upon a time there was a little boy' cp = load_parser('file:chapter_10_ex11.fcfg') trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) # 1b) query = 'he wanted to be a cock-a-doo-dle-doo' cp = load_parser('file:chapter_10_ex11.fcfg') trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) # 1) query = 'Once upon a time there was a little boy and he wanted to be a cock-a-doo-dle-doo' cp = load_parser('file:chapter_10_ex11.fcfg') trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) # 4) query = 'So he did fly up into the sky' cp = load_parser('file:chapter_10_ex11.fcfg') trees = list(cp.parse(query.split())) answer = trees[0].label()['SEM'] answer = [s for s in answer if s] q = ' '.join(answer) print(q) dom = {'b', 'c'} v = """ boy => b cockadoodledoo => c fly => {b} be => {(b)}, (b, c)} """ # don't know how to add the want with the nested phrase :/ val = nltk.Valuation.fromstring(v) print(val) m = nltk.Model(dom, val) g = nltk.Assignment(dom) m.evaluate('fly ( boy )', g) m.evaluate('fly ( cockadoodledoo )', g) m.evaluate('-fly ( boy )', g) m.evaluate('be ( boy , cockadoodledoo )', g) m.evaluate('be ( boy )', g) m.evaluate('be ( cockadoodledoo , boy )', g)Scikit-Learn Hyperparameter Tuning Using local data (data was created from preprocessor script) Install fedml_gcp packagepip install fedml_gcp-1.0.0-py3-none-any.whl --force-reinstallProcessing ./fedml_gcp-1.0.0-py3-none-any.whl Collecting google Using cached google-3.0.0-py2.py3-none-any.whl (45 kB) Collecting hdbcli Using cached hdbcli-2.10.13-cp34-abi3-manylinux1_x86_64.whl (11.7 MB) Collecting beautifulsoup4 Using cached beautifulsoup4-4.10.0-py3-none-any.whl (97 kB) Collecting soupsieve>1.2 Using cached soupsieve-2.2.1-py3-none-any.whl (33 kB) Installing collected packages: soupsieve, beautifulsoup4, hdbcli, google, fedml-gcp Attempting uninstall: soupsieve Found existing installation: soupsieve 2.2.1 Uninstalling soupsieve-2.2.1: Successfully uninstalled soupsieve-2.2.1 Attempting uninstall: beautifulsoup4 Found existing installation: beautifulsoup4 4.10.0 Uninstalling beautifulsoup4-4.10.0: Successfully uninstalled beautifulsoup4-4.10.0 Attempting uninstall: hdbcli Found existing installation: hdbcli 2.10.13 Uninstalling hdbcli-2.10.13: Successfully uninstalled hdbcli-2.10.13 Attempting uninstall: goo[...]Import Librariesfrom fedml_gcp import DwcGCP import numpy as np import pandas as pd import jsonCreate DwcGCP Instance to access class methods and train model It is expected that the bucket name passed here already exists in Cloud Storage.dwc = DwcGCP(project_name='fed-ml', bucket_name='fedml-bucket')Data setupIn this example, we are using local data for training.Before running this cell, please make sure to have run the Data Preprocessor model example. That model will write to an output directory for the bucket specified in that models arguments. The output directory will contain the preprocessed_data.csv and labels.csv files used for this model. Download those files for use and write them to the Hyperparameter Tuning Script package folder.dwc.download_blob('fedml-bucket', 'datapreprocessor/output/preprocessed_data.csv', 'HyperparameterTuning/trainer/preprocessed_data.csv') dwc.download_blob('fedml-bucket', 'datapreprocessor/output/y_train.csv', 'HyperparameterTuning/trainer/labels.csv')Downloaded storage object datapreprocessor/output/preprocessed_data.csv from bucket fedml-bucket to local file HyperparameterTuning/trainer/preprocessed_data.csv. Downloaded storage object datapreprocessor/output/y_train.csv from bucket fedml-bucket to local file HyperparameterTuning/trainer/labels.csv.Create tar bundle of script folder so GCP can use it for trainingBefore running this cell, please ensure that the script package has all the necessary files for a training job.dwc.make_tar_bundle('HyperparameterTuning.tar.gz', 'HyperparameterTuning', 'h_tuning/train/HyperparameterTuning.tar.gz')File HyperparameterTuning.tar.gz uploaded to h_tuning/train/HyperparameterTuning.tar.gz.Train Model GCP takes in training inputs that are specific to the training job and the environment needed.In the training inputs, we are the python module. This is the module that your script package is named, and it references the task.py file inside the script package.hyperparameters = { 'max_depth': [2, 4, 6], 'n_estimators': [100, 250, 300], 'max_features': [4, 5, 6, 'sqrt'], 'min_samples_leaf': [25, 30] } hyperparameters = json.dumps(hyperparameters) training_inputs = { 'scaleTier': 'BASIC', 'packageUris': ['gs://fedml-bucket/h_tuning/train/HyperparameterTuning.tar.gz', "gs://fedml-bucket/fedml_gcp-1.0.0-py3-none-any.whl"], 'pythonModule': 'trainer.task', 'args': ['--preprocessed_file_name', 'preprocessed_data.csv', '--labels_file_name', 'labels.csv', '--hyperparameters', hyperparameters, '--n_jobs', '24', '--bucket_name', 'fedml-bucket'], 'region': 'us-east1', 'jobDir': 'gs://fedml-bucket', 'runtimeVersion': '2.5', 'pythonVersion': '3.7', 'scheduling': {'maxWaitTime': '3600s', 'maxRunningTime': '7200s'} } dwc.train_model('h_tuning_final_train1', training_inputs)Training Job Submitted Succesfully Job status for fed-ml.h_tuning_final_train1: state : QUEUEDDeploy modeldwc.deploy(model_name='h_tuning_final_deploy1', model_location='/h_tuning/model', version='v1', region='us-east1'){'name': 'projects/fed-ml/models/h_tuning_final_deploy1', 'regions': ['us-east1'], 'etag': '070g99Zt5C8='} {'name': 'projects/fed-ml/operations/create_h_tuning_final_deploy1_version-1633569633431', 'metadata': {'@type': 'type.googleapis.com/google.cloud.ml.v1.OperationMetadata', 'createTime': '2021-10-07T01:20:33Z', 'operationType': 'CREATE_VERSION', 'modelName': 'projects/fed-ml/models/h_tuning_final_deploy1', 'version': {'name': 'projects/fed-ml/models/h_tuning_final_deploy1/versions/version', 'deploymentUri': 'gs://fedml-bucket/h_tuning/model', 'createTime': '2021-10-07T01:20:33Z', 'runtimeVersion': '2.5', 'etag': 'tJSihQU6IYA=', 'framework': 'SCIKIT_LEARN', 'machineType': 'mls1-c1-m2', 'pythonVersion': '3.7'}}}Download Images and Preprocessfrom google.colab import drive drive.mount('/content/gdrive')Raw Image Data Download https://github.com/VisionLearningGroup/taskcv-2017-public/tree/master/classification#!wget http://csr.bu.edu/ftp/visda17/clf/validation.tar !tar xvf /content/gdrive/MyDrive/Projects/syn_real_gan/data/01_raw/validation.tar ''' !wget http://csr.bu.edu/ftp/visda17/clf/train.tar !tar xvf train.tar !wget http://csr.bu.edu/ftp/visda17/clf/test.tar !tar xvf test.tar !wget https://raw.githubusercontent.com/VisionLearningGroup/taskcv-2017-public/master/classification/data/image_list.txt ''' drive.flush_and_unmount()Data Statpwd lsShow number of files in each class.import os, os.path # simple version for working with CWD #dir = '/content/gdrive/MyDrive/Projects/syn_real_gan/data/01_raw' dir = '/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate' def print_num_files(dir): file_count = 0 for name in os.listdir(dir): if os.path.isfile(dir+'/'+name): file_count += 1 elif os.path.isdir(dir+'/'+name): print_num_files(dir+'/'+name) print(f'{dir}:{file_count}') print_num_files(dir) import os,shutil import random from tqdm import tqdm import cv2 def build_cycle_gan_dir(cls_name,target_dir): os.makedirs(target_dir,exist_ok=True) os.makedirs(os.path.join(target_dir,'trainA' ) ,exist_ok=True) os.makedirs(os.path.join(target_dir, 'trainB' ) ,exist_ok=True) os.makedirs(os.path.join(target_dir,'testA' ) ,exist_ok=True) os.makedirs(os.path.join(target_dir,'testB' ) ,exist_ok=True) raw_dir = '/content/gdrive/MyDrive/Projects/syn_real_gan/data/01_raw' full_dir = '/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/5_classes' B_dir = os.path.join(raw_dir,'validation',cls_name) A_dir = os.path.join(raw_dir,'train',cls_name) print(f'{cls_name} list dir ...') A_dir_files = os.listdir(A_dir) B_dir_files = os.listdir(B_dir) print(f'{cls_name} shuffling ...') random.shuffle(A_dir_files) random.shuffle(B_dir_files) def process_file(file_path,target_path,full_path): im = cv2.imread(file_path) sp = im.shape if not (sp[0]/sp[1] < 0.4 or sp[0]/sp[1] > 2.5): if not (sp[0] < 200 or sp[1] < 200): shutil.copy(file_path,target_path) shutil.copy(file_path,full_path) print(f'{file_path}:{sp} moved') return True return False count_train = 0 for f in tqdm(B_dir_files): if process_file(os.path.join(B_dir,f),os.path.join(target_dir,'trainB'),os.path.join(full_dir,'trainB')): count_train += 1 for f in tqdm(A_dir_files[:count_train]): shutil.copy(os.path.join(A_dir,f),os.path.join(target_dir,'trainA')) shutil.copy(os.path.join(A_dir,f),os.path.join(full_dir,'trainA')) ''' for f in tqdm(A_dir_files[:200]): shutil.copy(os.path.join(A_dir,f),os.path.join(target_dir,'trainA')) for f in tqdm(B_dir_files[:200]): shutil.copy(os.path.join(B_dir,f),os.path.join(target_dir,'trainB')) for f in tqdm(A_dir_files[200:220]): shutil.copy(os.path.join(A_dir,f),os.path.join(target_dir,'testA')) for f in tqdm(B_dir_files[200:220]): shutil.copy(os.path.join(B_dir,f),os.path.join(target_dir,'testB')) ''' def move_train_to_test(cls_name,target_dir): full_dir = '/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/5_classes' A_dir = os.path.join(target_dir,'trainA') B_dir = os.path.join(target_dir,'trainB') A_test_dir = os.path.join(target_dir,'testA') B_test_dir = os.path.join(target_dir,'testB') full_A_dir = os.path.join(full_dir,'trainA') full_B_dir = os.path.join(full_dir,'trainB') full_A_test_dir = os.path.join(full_dir,'testA') full_B_test_dir = os.path.join(full_dir,'testB') print(f'{cls_name} list dir ...') A_dir_files = os.listdir(A_dir) B_dir_files = os.listdir(B_dir) print(f'{cls_name} shuffling ...') random.shuffle(A_dir_files) random.shuffle(B_dir_files) for f in tqdm(A_dir_files[:50]): shutil.move(os.path.join(A_dir,f),A_test_dir) shutil.move(os.path.join(full_A_dir,f),full_A_test_dir) for f in tqdm(B_dir_files[:50]): shutil.move(os.path.join(B_dir,f),B_test_dir) shutil.move(os.path.join(full_B_dir,f),full_B_test_dir) build_cycle_gan_dir('car','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/car') build_cycle_gan_dir('motorcycle','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/motorcycle') build_cycle_gan_dir('horse','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/horse') build_cycle_gan_dir('aeroplane','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/aeroplane') build_cycle_gan_dir('plant','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/plant') move_train_to_test('car','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/car') move_train_to_test('motorcycle','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/motorcycle') move_train_to_test('horse','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/horse') move_train_to_test('aeroplane','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/aeroplane') move_train_to_test('plant','/content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/plant') rm -r /content/gdrive/MyDrive/Projects/syn_real_gan/data/02_intermediate/car/testA/*Forecasting in Hindsight: Assessing if Baltimore's 8 Day Forecasted Temperatures are in Line with Historical Norms Fall has arrived, and with it, cooler temperatures are to be expected. But, knowing temperatures can fluctuate from day to day, are the forecasted temperatures typical for this time of year?This notebook seeks to answer this question. To resolve it, we will be taking the eight day forecast from [OpenWeather API](https://openweathermap.org/api) and comparing it to historical data for the same dates between 1950 and 2018. The API data was pulled on October 3, 2020 for dates including October 3, 2020 to October 10, 2020. The historical weather data was taken from Carnegie Mellon University's compiled United States Weather Station data taken from the weather station at Baltimore-Washington Thurgood Marshall International Airport (BWI). These datasets can be found [here](https://kilthub.cmu.edu/articles/dataset/Compiled_daily_temperature_and_precipitation_data_for_the_U_S_cities/7890488?file=20881932). The datasets were cleaned and merged into a single csv file, which will be used here.We will use this information to review whether the forecast is typical and within norms for this time historically.If you wish to review how this data was retrieved and cleaned, including how to utilize the API, please refer to {NOTEBOOK LINK}. TerminologyFor reference, here are some terms from the table:- Date: This is the date for the given temperature readings or forecast. - Decade: This is the decade that the corresponding date belongs to. - Year: This is the year of the corresponding date. - Month: This is the month of the corresponding date. - Day: This is the day of the corresponding date.- min: This is the minimum or low temperature of the corresponding date.- max: This is the maximum or high temperature of the corresponding date.- mean: This is the mean or average temperature of the corresponding date. SummaryThe forecasted temperatures for Baltimore, MD, USA for the week dating from October 3, 2020 to October 10, 2020 are common for the time period historically. The average forecasted high temperature is approximately 1.5 degrees Fahrenheit below the average high from 1950 to 2018. However, the average forecasted low temperature is more than 6 degrees Fahrenheit above the the average low from 1950 to 2018. What is most notable is that the temperature is rather stable, as the difference between the forecasted high and low temperatures is only 63.68% of the average historic difference between high and low temperatures. This means that we can expect less temperature change during the upcoming week than is typical. No heavy coats needed yet. LimitationsOne of the main limitations of this project regards the dataset. It is only 568 rows. This is due mainly to the small window of time we are exploring (an eight day period). Additionally, while the original dataset from Carnegie Mellon University contains data beginning in 1871, we decided to remove all data from before 1950. The reasoning behind this is that the original US Weather Station for Baltimore was located in downtown Baltimore until 1950, when it was moved to its current location at the airport that is now known as BWI. For consistency of data, the decision to shrink the dataset was made. Further exploration to compare the forecast to dates beginning in the 1870s could be done, however. I. Importing Needed Tools and DatasetFor this project, we will need pandas, numpy, matplotlib, and seaborn. We will also pull our dataset from the Data folder.# import tools / libraries / csv import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.mlab as mlab import seaborn as sns df = pd.read_csv(r'~/Github/DATA601_Project2/Data/CombinedWeather.csv')II. Create Necessary ObjectsWe will also need to create objects that include subsets of data, as well as lists of historic maximum, minimum, and mean temperatures. These objects will be used in graphing.# Create objects historic = df[df['Year'] < 2020] forecast = df[df['Year'] == 2020] # Historic max temperatures max_Oct3 = historic[historic['Day'] == 3]['max'].max() max_Oct4 = historic[historic['Day'] == 4]['max'].max() max_Oct5 = historic[historic['Day'] == 5]['max'].max() max_Oct6 = historic[historic['Day'] == 6]['max'].max() max_Oct7 = historic[historic['Day'] == 7]['max'].max() max_Oct8 = historic[historic['Day'] == 8]['max'].max() max_Oct9 = historic[historic['Day'] == 9]['max'].max() max_Oct10 = historic[historic['Day'] == 10]['max'].max() max_temps = [max_Oct3, max_Oct4, max_Oct5, max_Oct6, max_Oct7, max_Oct8, max_Oct9, max_Oct10] # Historic min temperatures min_Oct3 = historic[historic['Day'] == 3]['min'].min() min_Oct4 = historic[historic['Day'] == 4]['min'].min() min_Oct5 = historic[historic['Day'] == 5]['min'].min() min_Oct6 = historic[historic['Day'] == 6]['min'].min() min_Oct7 = historic[historic['Day'] == 7]['min'].min() min_Oct8 = historic[historic['Day'] == 8]['min'].min() min_Oct9 = historic[historic['Day'] == 9]['min'].min() min_Oct10 = historic[historic['Day'] == 10]['min'].min() min_temps = [min_Oct3, min_Oct4, min_Oct5, min_Oct6, min_Oct7, min_Oct8, min_Oct9, min_Oct10] # Historic mean temperatures mean_Oct3 = historic[historic['Day'] == 3]['mean'].mean() mean_Oct4 = historic[historic['Day'] == 4]['mean'].mean() mean_Oct5 = historic[historic['Day'] == 5]['mean'].mean() mean_Oct6 = historic[historic['Day'] == 6]['mean'].mean() mean_Oct7 = historic[historic['Day'] == 7]['mean'].mean() mean_Oct8 = historic[historic['Day'] == 8]['mean'].mean() mean_Oct9 = historic[historic['Day'] == 9]['mean'].mean() mean_Oct10 = historic[historic['Day'] == 10]['mean'].mean() mean_temps = [mean_Oct3, mean_Oct4, mean_Oct5, mean_Oct6, mean_Oct7, mean_Oct8, mean_Oct9, mean_Oct10] # Historic average high temperatures avghigh_Oct3 = historic[historic['Day'] == 3]['max'].mean() avghigh_Oct4 = historic[historic['Day'] == 4]['max'].mean() avghigh_Oct5 = historic[historic['Day'] == 5]['max'].mean() avghigh_Oct6 = historic[historic['Day'] == 6]['max'].mean() avghigh_Oct7 = historic[historic['Day'] == 7]['max'].mean() avghigh_Oct8 = historic[historic['Day'] == 8]['max'].mean() avghigh_Oct9 = historic[historic['Day'] == 9]['max'].mean() avghigh_Oct10 = historic[historic['Day'] == 10]['max'].mean() avghigh_temps = [avghigh_Oct3, avghigh_Oct4, avghigh_Oct5, avghigh_Oct6, avghigh_Oct7, avghigh_Oct8, avghigh_Oct9, avghigh_Oct10] # Historic average low temperatures avglow_Oct3 = historic[historic['Day'] == 3]['min'].mean() avglow_Oct4 = historic[historic['Day'] == 4]['min'].mean() avglow_Oct5 = historic[historic['Day'] == 5]['min'].mean() avglow_Oct6 = historic[historic['Day'] == 6]['min'].mean() avglow_Oct7 = historic[historic['Day'] == 7]['min'].mean() avglow_Oct8 = historic[historic['Day'] == 8]['min'].mean() avglow_Oct9 = historic[historic['Day'] == 9]['min'].mean() avglow_Oct10 = historic[historic['Day'] == 10]['min'].mean() avglow_temps = [avglow_Oct3, avglow_Oct4, avglow_Oct5, avglow_Oct6, avglow_Oct7, avglow_Oct8, avglow_Oct9, avglow_Oct10] days = ['Oct 3', 'Oct 4', 'Oct 5', 'Oct 6', 'Oct 7', 'Oct 8', 'Oct 9', 'Oct 10']III. Check for OutliersTo determine if mean or median is preferrable, we need to verify that there are no to few outliers. If there are multiple outliers, we will need to use median.# Box plot of high temperatures aplot = sns.boxplot(x = historic['Day'], y = historic['max'], data = historic) aplot.axes.set_title('Box Plot of Baltimore Daily High Temp: Oct 3-10, 1950-2018', fontsize=16) aplot.set_xlabel('October', fontsize=14) aplot.set_ylabel('Temperature (F)', fontsize=14) aplot.figure.savefig('BoxPlotHigh.png', bbox_inches = 'tight') # Histogram of high temperatures x = historic['max'] num_bins = 9 n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5) # Histogram settings plt.xlabel('Temperature (F)') plt.ylabel('Frequency') plt.title('Histogram of Baltimore Daily High Temp: Oct 3-10, 1950-2018') plt.savefig('HighTempHistogram.png', bbox_inches = 'tight') plt.show() # Box plot of low temperatures bplot = sns.boxplot(x = historic['Day'], y = historic['min'], data = historic) bplot.axes.set_title('Box Plot of Baltimore Daily Low Temp: Oct 3-10, 1950-2018', fontsize=16) bplot.set_xlabel('October', fontsize=14) bplot.set_ylabel('Temperature (F)', fontsize=14) bplot.figure.savefig('BoxPlotLow.png', bbox_inches = 'tight') # Histogram of low temperatures x = historic['min'] num_bins = 9 n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5) # Histogram settings plt.xlabel('Temperature (F)') plt.ylabel('Frequency') plt.title('Histogram of Baltimore Daily Low Temp: Oct 3-10, 1950-2018') plt.savefig('LowTempHistogram.png', bbox_inches = 'tight') plt.show()Given that there is only a single outlier in the box plots above, the relative stability of temperatures, and that the temperatures are distributed fairly normally, we can use mean. IV. Comparing Forecast to Past DataNow that we have seen the distribution of the historical data, we should compare the forecast to it. In the first chart, we will compare the forecasted temperature to extremes and the mean. The shaded region is the temperature range of the forecast.# Line 1 - Historical max x1 = days y1 = max_temps plt.plot(x1, y1, label = "Historic Max") # Line 2 - Historical min x2 = days y2 = min_temps plt.plot(x2, y2, label = "Historic Min") # Line 3 - Forecast max x3 = days y3 = forecast['max'] plt.plot(x3, y3, label = "Forecast High") # Line 4 - Forecast min x4 = days y4 = forecast['min'] plt.plot(x4, y4, label = "Forecast Low") # Line 5 - Historical mean x5 = days y5 = mean_temps plt.plot(x5, y5, label = "Historic Mean") # Graph settings plt.grid(True) plt.xlabel('Day', fontsize=15) plt.ylabel('Temperature (F)', fontsize=15) plt.title('Comparison of Forecasted Temperatures to Historic Extreme Temperatures',fontsize=20) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') plt.fill_between(days, y3, y4) plt.savefig('ForecastComparison1.png', bbox_inches = 'tight') plt.show() print('Forecasted temperature range is shaded area')We see that the forecasted weather is well within the extremes and only strays from the mean temperature slightly on October 7. So the forecast is not one of extremes.Let's compare the forecasted high and low temperatures to the historic averages.# Compare average highs over this range # Line 1 - Average Historical High Temp x1 = days y1 = avghigh_temps plt.plot(x1, y1, label = "Historic Avg High") # Line 2 - Forecasted High Temp x2 = days y2 = forecast['max'] plt.plot(x2, y2, label = "Forecast High") # Graph settings plt.grid(True) plt.xlabel('Day', fontsize=15) plt.ylabel('Temperature (F)', fontsize=15) plt.title('Comparison of Forecasted High Temperatures to Historic Average High Temperatures',fontsize=20) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') plt.savefig('ForecastComparison2.png', bbox_inches = 'tight') plt.show() # Calculate average distance between historical average high temp and forecasted high temp avghigh_diff = forecast['max'].mean() - historic['max'].mean() limavghigh_diff = round(avghigh_diff, 3) print('Average temperature difference between forecasted high temperatures and historical average high temperatures:', limavghigh_diff, 'degrees Fahrenheit') # Graph the difference between temperatures # Create variables objects = ('Forecast Avg High','Historic Avg High') y_pos = np.arange(len(objects)) TempsH = [forecast['max'].mean(), historic['max'].mean()] # Create graph plt.bar(y_pos, TempsH, align='center', color = ['orange', 'blue'], alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel('Temperature (F)') plt.title('Comparison of Average High Temp') plt.savefig('HighTempBar.png', bbox_inches = 'tight') plt.show()We see that there is very little difference in the average high temperatures between the forecast and historic values. In fact, the difference is only -1.503 degrees Fahrenheit, meaning the forecast is just slightly lower than the average. Let's take a look at the low temperatures.# Compare average lows over this range # Line 1 - Average Historical High Temp x1 = days y1 = avglow_temps plt.plot(x1, y1, label = "Historic Avg Low") # Line 2 - Forecasted High Temp x2 = days y2 = forecast['min'] plt.plot(x2, y2, label = "Forecast Low") # Graph settings plt.grid(True) plt.xlabel('Day', fontsize=15) plt.ylabel('Temperature (F)', fontsize=15) plt.title('Comparison of Forecasted Low Temperatures to Historic Average Low Temperatures',fontsize=20) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') plt.savefig('ForecastComparison3.png', bbox_inches = 'tight') plt.show() # Calculate average distance between forecasted low temp and historic average low temp avglow_diff = forecast['min'].mean() - historic['min'].mean() limavglow_diff = round(avglow_diff, 3) print('Avg difference between forecasted high and historical avg high temperatures:', limavglow_diff, 'degrees Fahrenheit') # Graph the difference between temperatures # Create variables objects1 = ('Forecast Avg Low','Historic Avg Low') y_pos1 = np.arange(len(objects1)) TempsL = [forecast['min'].mean(), historic['min'].mean()] # Create graph plt.bar(y_pos1, TempsL, align='center', color = ['orange', 'blue'], alpha=0.5) plt.xticks(y_pos1, objects1) plt.ylabel('Temperature (F)') plt.title('Comparison of Average Low Temp') plt.savefig('LowTempBar.png', bbox_inches = 'tight') plt.show()There is a greater difference between the average low temperatures of the forecasted and historic values. The forecast calls for low temperatures that are, on average, 6.214 degrees Fahrenheit above the historic mean. We can therefore expect nighttime temperatures to be slightly warmer than usual.Let's explore these futher to get a better view of how the forecasted temperature compares to the historic averages.# Line 1 - Historical avg high x1 = days y1 = avghigh_temps plt.plot(x1, y1, label = "Hist Avg High") # Line 2 - Historical min x2 = days y2 = avglow_temps plt.plot(x2, y2, label = "Hist Avg Low") # Line 3 - Forecast max x3 = days y3 = forecast['max'] plt.plot(x3, y3, label = "Forecast High") # Line 4 - Forecast min x4 = days y4 = forecast['min'] plt.plot(x4, y4, label = "Forecast Low") # Line 5 - Historical mean x5 = days y5 = mean_temps plt.plot(x5, y5, label = "Hist Avg Mean") # Graph settings plt.grid(True) plt.xlabel('Day', fontsize=15) plt.ylabel('Temperature (F)', fontsize=15) plt.title('Comparison of Forecast to Hist Avg Temperatures',fontsize=20) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') plt.fill_between(days, y3, y4) plt.savefig('ForecastComparison4.png', bbox_inches = 'tight') plt.show() print('Forecasted temperature range is shaded area')What we see is that the forecast calls for temperatures that are mostly within the historic averages. What stands out is that the difference between the forecast's daily high and low temperatures is smaller than the gap between the historic average high and low temperatures. This would suggest that the temperature is more stable than usual, with less temperature change over the course of the given days than is to be expected. Let's calculate this.# use Numpy to calculate average difference between high and low temps forecast_diff = np.average(np.average([forecast['max'] - forecast['min']])) forecast_diff = round(forecast_diff, 3) historic_diff = np.average(np.average([historic['max'] - historic['min']])) historic_diff = round(historic_diff, 3) # Print print('The difference between the forecasted daily high and low temperatures is:', forecast_diff, 'degrees Fahrenheit') print('The difference between the historic avg daily high and low temperatures is:', historic_diff, 'degrees Fahrenheit') print('The forecast daily temperature range is smaller than the historic average by', historic_diff - forecast_diff, 'degrees Fahrenheit, or', round(100-(forecast_diff/historic_diff)*100, 3), '%')The difference between the forecasted daily high and low temperatures is: 13.532 degrees Fahrenheit The difference between the historic avg daily high and low temperatures is: 21.25 degrees Fahrenheit The forecast daily temperature range is smaller than the historic average by 7.718 degrees Fahrenheit, or 36.32 %fr_lake and fr_land in NARVAL and QUBICCimport os import sys import xarray as xr import numpy as np import pandas as pd import importlib import matplotlib.pyplot as plt # For psyplot import psyplot.project as psy import matplotlib as mpl # %matplotlib inline # %config InlineBackend.close_figures = False psy.rcParams['plotter.maps.xgrid'] = False psy.rcParams['plotter.maps.ygrid'] = False mpl.rcParams['figure.figsize'] = [10., 8.] # path = '/pf/b/b309170/my_work/QUBICC/data_var_vertinterp/cl/' # file = 'int_var_hc2_02_p1m_cl_ml_20041110T010000Z.nc' fr_land_qubicc = '/pf/b/b309170/my_work/QUBICC/data_var_vertinterp_R02B05/fr_land/fr_land_R02B05.nc' fr_lake_qubicc = '/pf/b/b309170/my_work/QUBICC/data_var_vertinterp_R02B05/fr_lake/fr_lake_R02B05.nc' fr_lake_narval = '/pf/b/b309170/my_work/NARVAL/grid_extpar/fr_lake_R02B05_NARVAL_fg_DOM01.nc' fr_land_narval = '/pf/b/b309170/my_work/NARVAL/grid_extpar/fr_land_R02B05_NARVAL_fg_DOM01.nc' # path = '/pf/b/b309170/my_work/QUBICC/' # file_cg = 'data_hor_interp/hc2_02_p1m_cl_ml_20041105T150000Z.nc' # file_orig = 'some_orig_data/hc2_02_p1m_cl_ml_20041105T150000Z.nc'Fraction of landimport xarray as xr da = xr.open_dataset(fr_land_qubicc) np.mean(da.land.values)Of horizontal coarse-graining (psyplot): If you get the error 'ValueError: Can only plot 2-dimensional data!', then you need to use cdo setgrid on the file first.psy.plot.mapplot() # Note that the cloud cover scheme used was a 0-1 cloud cover scheme. maps = psy.plot.mapplot(fr_land_qubicc, dims = {'name': 'land'}, projection='robin', cmap='RdBu_r', title='QUBICC: Fraction of land') maps = psy.plot.mapplot(fr_land_narval, dims = {'name': 'fr_land'}, projection='robin', cmap='RdBu_r', title='Horizontally interpolated cloud cover on 20041105 at 15:00 (on layer 40)') # plt.savefig('horizontally_coarse_grained_cloud_cover.pdf')Fraction of lakes# Note that the cloud cover scheme used was a 0-1 cloud cover scheme. maps = psy.plot.mapplot(fr_lake_qubicc, dims = {'name': 'lake'}, projection='robin', cmap='RdBu_r', title='Cloud cover on 20041105 at 15:00 (on layer 40)') # Note that the cloud cover scheme used was a 0-1 cloud cover scheme. maps = psy.plot.mapplot(fr_lake_narval, dims = {'name': 'FR_LAKE'}, projection='robin', cmap='RdBu_r', title='Cloud cover on 20041105 at 15:00 (on layer 40)')range()In this short lecture we will be discussing the range function. We haven't developed a very deep level of knowledge of functions yet, but we can understand the basics of this simple (but extremely useful!) function.range() allows us to create a list of numbers ranging from a starting point *up to* an ending point. We can also specify step size. Lets walk through a few examples:range(0,10) x =range(0,10) type(x) start = 0 #Default stop = 20 x = range(start,stop) xGreat! Notice how it went *up to* 20, but doesn't actually produce 20. Just like in indexing. What about step size? We can specify that as a third argument:x = range(start,stop,2) #Show xAwesome! Well thats it...or is it? Python 3 Alert!You might have been wondering, what happens if I want to use a huge range of numbers? Can my computer store that all in memory?Great thinking! This is a dilemma that can be solve with the use of a generator. For a simplified explanation: A generator allows the generation of generated objects that are provided at that instance but does not store every instance generated into memory.This means a generator would not create a list to generate like range() does, but instead provide a one time generation of the numbers in that range. Python 2 has a built-in range generator called xrange(). It is recommended to use xrange() for **for** loops in Python 2. The good news is in Python 3, range() behaves as a generator and you don't need to worry about it. Let's see a quick example with xrange()for num in range(10): print num for num in xrange(10): print num0 1 2 3 4 5 6 7 8 9First, create your HamiltonianH = p[1]*p[1]/4 + o*o/4/q[1]/q[1] - 1/a/2/q[1] - 2/sympy.sqrt(q[1]*q[1]+q[2]*q[2]) + (a+2)*p[2]*p[2]/a/4; HSubstitute Hamiltonian to a class `Hamiltonian`ham = Hamilton.Hamiltonian(H, [p[1],p[2]], [q[1],q[2]])Expand Hamiltonian around an equilibrium pointequilibrium_points = [0, 0, o*o*a/(4*a+1), 0] ham.expand_around_equilibrium(equilibrium_points, max_degree=4)`rescale()` will rescale the coefficient of $p_i$ and $q_i$ham.rescale() ham.coeff_subs([(a, (8-l*l)/(4*l*l-4))]) ham.rotate45()Now we are able to calculate Birkhoff normal form.Substitute the above `Hamiltonian` to a class `LieTransform`birkhoff = Birkhoff.LieTransform.fromHamiltonian(ham) birkhoff.exec()Initialized do exec() and calculate normal form Lie transform completed!! do normalform() to print normal form of input HamiltonianRun `normalform()` and you will see Birkhoff normal form!!birkhoff.normalform()Марковские цепиАлексей Артемов $e^0$. Что такое марковская цепь? Пусть $E$ - некоторое дискретное (конечное или счётное) множество, которое называют _пространством состояний_. **Примеры:** * $E_1 = \{\text{солнечно}, \text{пасмурно}, \text{дождь}, \text{снег}\}$ - пространство погодных условий * $E_2 = \{\text{а}, \text{б}, \ldots, \text{я}\}$ - пространство кириллических букв * $E_3 = \mathbb{N} = \{0, 1, \ldots, \}$ - пространство целых чисел (число студентов в классе) Если система находится в состоянии $i \in E$ в момент времени $n$, то в момент времени $n + 1$ она может перейти в состояние $j \in E$ с _переходной вероятностью_$p_{ij}$. **Примеры:** * Для кириллицы $p_{\text{п},\text{р}} = 0.278, \quad p_{\text{п},\text{ы}} = 0.009$ * Число студентов в классе может изменяться лишь на один: $p_{k,k + 1} = p, \quad p_{k,k + 1} = q, \quad p_{k,k} = 1 - p - q$ Свойства переходной вероятности:$$ \forall i, j \in E \quad p_{ij} \geq 0 \quad\text{ и } \quad\forall i \in E \quad \sum_{j \in E} p_{ij} = 1.$$Переходные вероятности образуют _матрицу переходных вероятностей_ $P = (p_{ij})_{i, j \in E}$. **Марковская цепь** с пространством состояний $E$ и матрицей переходных вероятностей $P$ - это случайный процесс с дискретным временем $X = (X_{n})_{n \in \mathbb{N}}$, $X_{n} \in E$, для которого * известны начальные распределения $\alpha_{i} \equiv \Pr({X_{0} = i})$, * верно _марковское свойство_: для любого натурального $n$ и любых $i_{0}, i_{1}, \ldots, i_{n - 1}, i, j$ $$ \Pr({X_{n + 1} = j | X_{n} = i}) = \\ \Pr({X_{n + 1} = j | X_{0} = i_{0}, \ldots, X_{n - 1} = i_{n - 1}, X_{n} = i}) = p_{ij}, $$ если условные вероятности хорошо определены, то есть $\Pr({X_{0} = i_{0}, \dots, X_{n} = i}) > 0$.Неформально говоря, марковское свойство означает, что то, как система будет развиваться в текущий момент, не зависит от того, что было в прошлом и зависит только от настоящего. $\log_4 16$. Оценка матрицы переходных вероятностей Дана последовательность наблюдений$$X_1, X_2, \ldots, X_N, \qquad X_i \in E.$$Как подсчитать матрицу переходных вероятностей $p_{ij} = \Pr({X_{n + 1} = j | X_{n} = i})$? **Закон больших чисел:** частота некоторого события в серии независимых испытаний приближается (и остается близкой) к его вероятности:$$\nu_n(A) \to \Pr(A) (n \to \infty), \quad \nu_n(A) = \frac {n_A} {n}$$ $$p_{ij} = \Pr({X_{n + 1} = j | X_{n} = i}) \approx \frac 1 N \sum\limits_{n = 1} I(X_{n + 1} = j | X_{n} = i)$$**Пример:** оценка марковской цепи, управляющей буквами русского алфавитаwith open('data/book1.txt') as book: data = book.read() data = data.decode('utf-8') import re from itertools import izip text = ''.join(re.findall(U'[А-Яа-яё]+', data)).lower() RUSSIAN = u'абвгдеёжзийклмнопрстуфхцчшщьыъэюя' # Создадим массив размера n x n, где n = 33 - число символов русского алфавита mc = dict.fromkeys(RUSSIAN, None) for c in RUSSIAN: mc[c] = dict.fromkeys(RUSSIAN, 0) # Подсчитаем число вхождений каждого символа после каждого for cp, cn in zip(text[:-1], text[1:]): mc[cp][cn] += 1 # Отнормируем на единицу for cp, count_by_cn in mc.iteritems(): norm = sum(count_by_cn.values()) mc[cp] = {cn: float(count) / max(1, float(norm)) for cn, count in count_by_cn.iteritems()}**Практика:** оценивание вероятностей с помощью марковской цепи. $\frac {27} {9}$. Траектории марковской цепи Теперь вопрос: допустим, что у нас есть какая-то траектория (последовательность состояний). Какова её вероятность? Ответ на этот вопрос даст одна простая теорема.**Теорема о состояниях марковской цепи**Для любого натурального $n$ и любых $i_{0}, i_{1}, \dots, i_{n - 1}, i, j$$$ \Pr({X_{0} = i_{0}, X_{1} = i_{1}, \dots, X_{n} = i_{n}}) = \alpha_{i_{0}} p_{i_{0}i_{1}} \ldots p_{i_{n - 1}i_{n}}.$$ **Пример:** Пусть вероятность начального состояния цепи для русских букв равна $\frac {1} {33}$.Чему равна вероятность наблюдать строку "мама"? А "константинопольский"? А "мамамамамамамамамама"?s = u'константинопольский' proba = 1. / 3. for cp, cn in zip(s[:-1], s[1:]): proba *= mc[cp][cn] print proba1.22856902902e-07**Следствие.** Для любого натурального $n$ и любого $i_{n} \in E$$$ \Pr({X_{n} = i_{n}}) = \sum_{i_{0}, \ldots, i_{n - 1} \in E} \alpha_{i_{0}} p_{i_{0}i_{1}} \ldots p_{i_{n - 1}i_{n}}.$$**Вопрос:** как подсчитать вероятность встретить в 10-буквенном слове букву "й"? Но обычно нас не интересует полный путь, а лишь начало и конец. Поэтому вводят вероятность перейти из состояния $i$ в состояние $j$ за $n$ шагов:$$ p_{ij}^{(n)} = \Pr({X_{n} = j | X_{0} = i})$$Чему равна эта вероятность? Воспользуемся теоремой о состояниях:$$ \Pr({X_{n} = j | X_{0} = i}) = \frac{\Pr({X_{n} = j, X_{0} = i})} {\Pr({X_{0} = i})} = \sum_{i_{1}, \ldots, i_{n - 1} \in E} \frac{\Pr({X_{0} = i, X_{1} = i_{1}, \dots, X_{n - 1} = i_{n - 1}, X_{n} - j})} {\Pr({X_{0} = i})} = \sum_{i_{1}, \ldots, i_{n - 1} \in E} p_{ii_{1}} \ldots p_{i_{n - 1}j}.$$ Если мы посмотрим на случай $n = 2$, то полученное выражение очень похоже на скалярное произведение строк матрицы переходной вероятности. Оказывается, что это не так уж и далеко от истины.**Теорема.** Пусть $P^{(n)} = (p_{ij}^{(n)})_{i,j \in E}$. Тогда $P^{(n)} = P \cdot P \cdot \ldots \cdot P = P^{n}$. **Вопрос:** как подсчитать вероятность, что слово из трех букв начинается на букву "х" и заканчивается на букву "й"?start = u'х' end = u'й' prior = 1. / len(RUSSIAN) for c in RUSSIAN: proba = prior * mc[start][c] * mc[c][end] if proba > 0: print ''.join([start, c, end]), probaхай 4.38012629344e-05 хвй 3.23286414874e-08 хей 1.32206754118e-05 хзй 1.39947943493e-08 хий 3.92959699592e-05 хйй 2.40654985476e-10 хкй 2.20844273506e-08 хлй 3.10674088228e-08 хмй 2.17792583355e-08 хой 0.000395888720768 хрй 1.45905654848e-08 хсй 2.74295252423e-08 хтй 1.48074842563e-08 хуй 2.94834300926e-06 ххй 6.76195608092e-09 хэй 5.89171184121e-07 хяй 1.58134181297e-07Это работает не всегда. Почему же? Потому что никто не обещал, что переходная вероятность не зависит от шага. Если она действительно не зависит, то говорят, что марковская цепь _однородна_. $2^2$. Генерирование выборок из марковской цепи Как создать реализацию длины $N$ из марковской цепи? 1. Сгенерировать начальное состояние согласно распределению $\alpha_{i} \equiv \Pr({X_{0} = i})$, положить $n \leftarrow 0$. 2. Пока $n < N$, повторять: * Имея контекст $X_{n}$, сгенерировать состояние $X_{n+1}$ из распределения $\Pr({X_{n+1} | X_{n}})$ * Положить $n \leftarrow n + 1$import numpy as np mc_matrix = np.zeros((33, 33)) for cp in RUSSIAN: for cn in RUSSIAN: mc_matrix[RUSSIAN.index(cp)][RUSSIAN.index(cn)] = mc[cp][cn] s = [] start = np.random.choice(list(RUSSIAN)) s.append(start) length = 10 for i in xrange(length): index = RUSSIAN.index(s[i]) next_char = np.random.choice(list(RUSSIAN), p=mc_matrix[index]) s.append(next_char) print ''.join(s)шехашиожикоPSNR# for key in df_1.keys(): key = "TEST02 (2)" print(key) sheet18 = df_18[key] sheet28 = df_28[key] sheet38 = df_38[key] sheet116 = df_116[key] sheet216 = df_216[key] sheet316 = df_316[key] sheet132 = df_132[key] sheet232 = df_232[key] sheet332 = df_332[key] bpp = sheet18["bpp"] p18 = sheet18["PSNR"] p28 = sheet28["PSNR"] p38 = sheet38["PSNR"] p116 = sheet116["PSNR"] p216 = sheet216["PSNR"] p316 = sheet316["PSNR"] p132 = sheet132["PSNR"] p232 = sheet232["PSNR"] p332 = sheet332["PSNR"] plt.figure(figsize=(5.5,5)) # DCTとの差を表示する # plt.scatter(bpp,p28-p18,label="8x8 TPHCLT3",marker=",") # plt.plot(bpp,p28-p18) # plt.scatter(bpp,p38-p18,label="8x8 DMLCT",marker="^") # plt.plot(bpp,p38-p18) # plt.scatter(bpp,(p216/p116-1)*100,label="16x16 TPHCLT3",marker="+") # plt.plot(bpp,(p216/p116-1)*100) # plt.scatter(bpp,(p316/p116-1)*100,label="16x16 DMLCT",marker="x") # plt.plot(bpp,(p316/p116-1)*100) plt.scatter(bpp,(p232/p132-1)*100,label="32x32 TPHCLT3",marker="1") plt.plot(bpp,(p232/p132-1)*100) plt.scatter(bpp,(p332/p132-1)*100,label="32x32 DMLCT",marker="*") plt.plot(bpp,(p332/p132-1)*100) plt.xlabel("Bit Rate[bits/pixel]") plt.ylabel("Improvement ratio[%]") # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') plt.legend() # plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。 # plt.show() plt.savefig("output/psnr_diff" + key + ".png",dpi=300,bbox_inches='tight') plt.show()TEST02 (2)MSSIMkey = "N8RGB_2" print(key) sheet18 = df_18[key] sheet28 = df_28[key] sheet38 = df_38[key] sheet116 = df_116[key] sheet216 = df_216[key] sheet316 = df_316[key] sheet132 = df_132[key] sheet232 = df_232[key] sheet332 = df_332[key] bpp = sheet18["bpp"] p18 = sheet18["MSSIM"] p28 = sheet28["MSSIM"] p38 = sheet38["MSSIM"] p116 = sheet116["MSSIM"] p216 = sheet216["MSSIM"] p316 = sheet316["MSSIM"] p132 = sheet132["MSSIM"] p232 = sheet232["MSSIM"] p332 = sheet332["MSSIM"] fig = plt.figure(figsize=(5.5,5)) # fig = plt.figure(figsize=(5.5,5)) ax = fig.add_subplot(1,1,1) # Axesを作成 # DCTとの差を表示する # plt.scatter(bpp,p28-p18,label="8x8 TPHCLT3",marker=",") # plt.plot(bpp,p28-p18) # plt.scatter(bpp,p38-p18,label="8x8 DMLCT",marker="^") # plt.plot(bpp,p38-p18) plt.scatter(bpp,(p216/p116-1)*100,label="16x16 TPHCLT3",marker="+") plt.plot(bpp,(p216/p116-1)*100) plt.scatter(bpp,(p316/p116-1)*100,label="16x16 DMLCT",marker="x") plt.plot(bpp,(p316/p116-1)*100) # plt.scatter(bpp,(p232/p132-1)*100,label="32x32 TPHCLT3",marker="1") # plt.plot(bpp,(p232/p132-1)*100) # plt.scatter(bpp,(p332/p132-1)*100,label="32x32 DMLCT",marker="*") # plt.plot(bpp,(p332/p132-1)*100) # ax.set_ylim(-0.00,0.003) # ax.yaxis.set_major_formatter(FixedOrderFormatter(-2 ,useMathText=True)) # ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0)) ax.set_xlabel("Bit Rate[bits/pixel]") ax.set_ylabel("Improvement ratio[%]") # ax.legend(bbox_to_anchor=(-1.05, 1), loc='upper left') ax.legend() plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。 # plt.savefig("output/mssim" + key + "_" + option + ".png",dpi=300) plt.savefig("output/mssim_diff" + key + ".png",dpi=300) plt.show()N8RGB_2MSDS# https://mkacky.wordpress.com/2014/06/24/matplotlib%E3%81%A7%E6%A3%92%E3%82%B0%E3%83%A9%E3%83%95%EF%BC%88%E6%A8%AA%E3%81%AB%E4%B8%A6%E3%81%B9%E3%82%8B%EF%BC%89/ for key in df_1.keys(): # key = "Airplane" bpp_index = 0 print(key) orisheet = df_4[key] sheet1 = df_1[key] sheet2 = df_2[key] sheet3 = df_3[key] p1 = np.array([sheet1["MSDS1"][bpp_index], sheet1["MSDS2"][bpp_index]]) p2 = np.array([sheet2["MSDS1"][bpp_index], sheet2["MSDS2"][bpp_index]]) p3 = np.array([sheet3["MSDS1"][bpp_index], sheet3["MSDS2"][bpp_index]]) orip = np.array([orisheet["MSDS1"][0], orisheet["MSDS2"][0]]) data = np.array([p1,p2,p3,orip]) # 軸の目盛り # plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False) fig = plt.figure(figsize=(5.5,5)) ax = fig.add_subplot(1,1,1) # Axesを作成 # 種類 num_sample = 4 # MSDS1, MSDS2の2つ num_item_per_sample = 2 ## 凡例用のラベル(サンプル数だけ必要) legend_labels = ["DCT", "TPHLCT3", "DMLCT","Original"] ## 棒グラフの幅 width = 0.25 ## 余白 margin = 0.2 ## 1尺度あたりのデータを並べるのに必要な幅。 block = width * num_sample + margin ## 棒グラフ(長方形)の左下の位置の基準にするポイント ind = np.arange(num_item_per_sample) * block ## 各サンプルについて、棒グラフを描画する for i in range(num_sample): ax.bar( ind + width*i, ## 棒グラフの左下の点の座標。データ毎に少しずつズラす data[i], ## 各始点にプロットされる1次元配列 width, ## 棒の幅 label=legend_labels[i] ## 棒の凡例名 ) ## x軸に表示するラベルを設定する xlabels = np.array(["MSDS1", "MSDS2"]) ##x軸にラベルを表示する位置を設定する。 xlocs = ind + width * num_sample / 2 - width/2 ## xtics(labelの位置, label), labelは1次元配列 ax.set_xticks(xlocs) ax.set_xticklabels(xlabels) ## 余白を加味したx軸方向の変域 # ax.set_xlim(-margin, ind[-1]+width*num_sample+margin) ax.yaxis.set_major_formatter(FixedOrderFormatter(3 ,useMathText=True)) ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0)) ax.legend() plt.savefig("output/msds" + key + "_" + option + ".png",dpi=300) plt.show() # https://mkacky.wordpress.com/2014/06/24/matplotlib%E3%81%A7%E6%A3%92%E3%82%B0%E3%83%A9%E3%83%95%EF%BC%88%E6%A8%AA%E3%81%AB%E4%B8%A6%E3%81%B9%E3%82%8B%EF%BC%89/ for key in df_1.keys(): # key = "Airplane" bpp_index = 3 print(key) orisheet = df_4[key] sheet1 = df_1[key] sheet2 = df_2[key] sheet3 = df_3[key] p1 = np.array([sheet1["MSDS1"][bpp_index], sheet1["MSDS2"][bpp_index]]) p2 = np.array([sheet2["MSDS1"][bpp_index], sheet2["MSDS2"][bpp_index]]) p3 = np.array([sheet3["MSDS1"][bpp_index], sheet3["MSDS2"][bpp_index]]) orip = np.array([orisheet["MSDS1"][0], orisheet["MSDS2"][0]]) data = np.array([p1,p2,p3,orip]) # 軸の目盛り # plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False) fig = plt.figure(figsize=(5.5,5)) ax = fig.add_subplot(1,2,1) # Axesを作成 # 種類 num_sample = 4 # MSDS1, MSDS2の2つ num_item_per_sample = 1 ## 凡例用のラベル(サンプル数だけ必要) legend_labels = ["DCT", "TPHLCT3", "DMLCT","Original"] ## 棒グラフの幅 width = 0.25 ## 余白 margin = 0.2 ## 1尺度あたりのデータを並べるのに必要な幅。 block = width * num_sample + margin ## 棒グラフ(長方形)の左下の位置の基準にするポイント ind = np.arange(num_item_per_sample) * block ## 各サンプルについて、棒グラフを描画する for i in range(num_sample): ax.bar( ind + width*i, ## 棒グラフの左下の点の座標。データ毎に少しずつズラす data[i], ## 各始点にプロットされる1次元配列 width, ## 棒の幅 label=legend_labels[i] ## 棒の凡例名 ) ## x軸に表示するラベルを設定する xlabels = np.array(["MSDS1", "MSDS2"]) ##x軸にラベルを表示する位置を設定する。 xlocs = ind + width * num_sample / 2 - width/2 ## xtics(labelの位置, label), labelは1次元配列 ax.set_xticks(xlocs) ax.set_xticklabels(xlabels) ## 余白を加味したx軸方向の変域 # ax.set_xlim(-margin, ind[-1]+width*num_sample+margin) ax.yaxis.set_major_formatter(FixedOrderFormatter(3 ,useMathText=True)) ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0)) # ax.legend() ############################################################################################################### p1 = np.array([sheet1["MSDS2"][bpp_index]]) p2 = np.array([sheet2["MSDS2"][bpp_index]]) p3 = np.array([sheet3["MSDS2"][bpp_index]]) orip = np.array([orisheet["MSDS2"][0]]) data = np.array([p1,p2,p3,orip]) # 軸の目盛り # plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False) # fig = plt.figure(figsize=(5.5,5)) ax = fig.add_subplot(1,2,2) # Axesを作成 # 種類 num_sample = 4 # MSDS1, MSDS2の2つ num_item_per_sample = 1 ## 凡例用のラベル(サンプル数だけ必要) legend_labels = ["DCT", "TPHLCT3", "DMLCT","Original"] ## 棒グラフの幅 width = 0.25 ## 余白 margin = 0.2 ## 1尺度あたりのデータを並べるのに必要な幅。 block = width * num_sample + margin ## 棒グラフ(長方形)の左下の位置の基準にするポイント ind = np.arange(num_item_per_sample) * block ## 各サンプルについて、棒グラフを描画する for i in range(num_sample): ax.bar( ind + width*i, ## 棒グラフの左下の点の座標。データ毎に少しずつズラす data[i], ## 各始点にプロットされる1次元配列 width, ## 棒の幅 label=legend_labels[i] ## 棒の凡例名 ) ## x軸に表示するラベルを設定する xlabels = np.array(["MSDS2"]) ##x軸にラベルを表示する位置を設定する。 xlocs = ind + width * num_sample / 2 - width/2 ## xtics(labelの位置, label), labelは1次元配列 ax.set_xticks(xlocs) ax.set_xticklabels(xlabels) ## 余白を加味したx軸方向の変域 # ax.set_xlim(-margin, ind[-1]+width*num_sample+margin) ax.yaxis.set_major_formatter(FixedOrderFormatter(3 ,useMathText=True)) ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0)) ax.legend(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1) # ax.legend(bbox_to_anchor=(1, 0.5), loc='center right', borderaxespad=1) plt.savefig("output/msds" + key + "_" + option + ".png",dpi=300) plt.show()GridWorld Reinforcement LearningReinforcement Learning implementation of GridWorld in [Reinforcement Learning: An Introduction, p72](https://web.stanford.edu/class/psych209/Readings/SuttonBartoIPRLBook2ndEd.pdf) and [Markov Decision Process and Exact Solution Methods](https://people.eecs.berkeley.edu/~pabbeel/cs287-fa19/slides/Lec2-mdps-exact-methods.pdf). The following descibes the environment and the agent:- The agent (robot) lives in a grid- Walls block the agent’s path- The agent’s actions do not always go as planned: - 80% of the time, the action North takes the agent North (if there is no wall there) - 10% of the time, North takes the agent West; 10% East - If there is a wall in the direction the agent would have been taken, the agent stays put `GridWorld` ClassThe `GridWorld` class solves both the model-based and model-free version of the problem.- Normal states are represented as coordinates *i.e.* $(x,y,0)$.- Goal states are flagged by its third coordinate *i.e.* $(x,y,1)$.- `GridWorld` has a `step` method to let the agent perform an action. - `GridWorld` has a `render` method to render the environment and agent.- For the model-based version of the problem, it can perform `ValueIteration` and `PolicyIteration` to determine the optimal policy.- For the model-free version of the problem, it uses `keras-rl`, `tensorflow`, and `gym` to determine the optimal policy.from gym import Env from gym.spaces import Discrete, Box import numpy as np import cv2 class GridWorld(Env): # Action space actions = [0, 1, 2, 3] # up, right, down, left convert = {0: "↑", 1: "→", 2: "↓", 3: "←", ".": "."} # Agent rendering scale = 100 agent = cv2.resize(cv2.imread("assets/Robot.jpg")/255, (scale, scale)) def __init__(self, size, start = [0,0,0], noise = 0.2, reward = 0, gamma = 0.9): # Open AI Gym parameters self.action_space = Discrete(4) self.observation_space = Box(low = 0, high = max(size), shape=(1, 3)) self.length = 1000 # Environment and State parameters self.blocked = [] self.nterminal = [] self.size = size self.N = self.size[0] * self.size[1] self.state = start self.start = start # Other parameters self.noise = noise self.reward_map = {i:reward for i in range(self.N)} self.gamma = gamma # MDP initialization self.value = [0 for _ in range(2*self.N)] self.policy = [['.'] for _ in range(self.N)] self.Q = [[0 for _ in range(len(self.actions))] for _ in range(self.N)] # Canvas initialization self.canvas = np.ones((self.size[1] * self.scale, self.size[0] * self.scale, 3)) * 1 self.bumped = -1 self.oops = "" # Converting from integers to coordinate systems def to_state(self, coord): return self.size[0] * coord[1] + coord[0] + self.N * coord[2] def to_coord(self, state): return [(state%self.N)%self.size[0], (state%self.N)//self.size[0], state//self.N] # Add a reward for a specific state def add_reward(self, coord, val): self.reward_map[self.to_state(coord)] = val # Add blocked states def add_blocked(self, coord): self.blocked.append(self.to_state(coord)) x = coord[0]; y = self.size[1] - coord[1] self.canvas[y*self.scale-self.scale : y*self.scale, x*self.scale : x*self.scale+self.scale] = 0 # Add near terminal states (penultimate states from goal states) def add_nterminal(self, coord, val): self.nterminal.append(self.to_state(coord)) goal_coord = coord.copy() goal_coord[2] = 1 self.add_reward(goal_coord, val) x = coord[0]; y = self.size[1] - coord[1] adj = 1 - abs(val) / 10 self.canvas[y*self.scale-self.scale : y*self.scale, x*self.scale : x*self.scale+self.scale] = [adj,1,adj] if val > 0 else [adj,adj,1] # Method to let the agent perform an action def step(self, action, noise = None, state = None): curr_state = self.state if state is None else state.copy() curr_noise = self.noise if noise is None else noise self.length -= 0 if state is None else 1 # Check terminal states if curr_state[2] == 1: return curr_state, self.reward_map[self.to_state(curr_state)], True, {} # Check near terminal states if self.to_state(curr_state) in self.nterminal: curr_state[2] = 1 return curr_state, self.reward_map[self.to_state(curr_state)], True, {} # Add noise p = np.random.uniform() if p < curr_noise / 2: action = (action + 1 + 4) % 4 self.oops = "Oops..." elif p < curr_noise: action = (action - 1 + 4) % 4 self.oops = "Oops..." else: self.oops = "" # Move to next state if action == 0: # up curr_state = [curr_state[0], curr_state[1] + 1, curr_state[2]] elif action == 1: # right curr_state = [curr_state[0] + 1, curr_state[1], curr_state[2]] elif action == 2: # down curr_state = [curr_state[0], curr_state[1] - 1, curr_state[2]] elif action == 3: # left curr_state = [curr_state[0] - 1, curr_state[1], curr_state[2]] # Check walls and blocked if self.to_state(curr_state) not in self.blocked: walled_state = [min(self.size[0]-1,max(curr_state[0],0)), min(self.size[1]-1,max(curr_state[1],0)), curr_state[2]] if curr_state != walled_state: self.bumped = action curr_state = walled_state else: self.bumped = -1 else: curr_state = self.state if state is None else state self.bumped = action # Calculate reward reward = self.reward_map[self.to_state(curr_state)] # Check if is done done = self.length == 0 # Set placeholder for info info = {} # Update self.state if state is None: self.state = curr_state # Return step information return curr_state, reward, done, info # Value Iteration to determine the optimal policy def ValueIteration(self, max_iter = 1000, tolerance = 1e-5, show_iter = False): for it in range(max_iter): new_value = self.value.copy() # Create new copy delta = 0 # Max across all states # Check all states for state in range(self.N): if state in self.blocked: # Skip blocked states continue # Current coordinate curr_coord = self.to_coord(state) max_value = -1e9 # Run through all actions for action in self.actions: self.Q[state][action] = 0 # Check all possible next states for action_next in self.actions: # (1 - noise) if correct next state if action_next == action: prob = 1 - self.noise # noise / 2 if adjacent next state elif action_next != (action + 2) % 4: prob = self.noise / 2 # 0 if opposite next state else: prob = 0 state_next, reward, done, info = self.step(action_next, noise=0, state=curr_coord) state_next = self.to_state(state_next) # Get sum self.Q[state][action] += prob * (reward + self.gamma * self.value[state_next]) # Get max across all actions max_value = max(max_value, self.Q[state][action]) # Update max value across all states delta = max(delta, abs(max_value - new_value[state])) # Update current state new_value[state] = max_value # Replace variable self.value = new_value # Break if threshold is reached if delta < tolerance or it == max_iter - 1: print("Final") self.OptimalPolicy() self.show_current(policy=True) break # Display iteration if show_iter: print(f"Iteration: {it+1}") self.OptimalPolicy() self.show_current() # Find Optimal Policy def OptimalPolicy(self): for state in range(self.N): if state in self.nterminal + self.blocked: # Skip nterminal and blocked states continue self.policy[state] = list(np.flatnonzero(self.Q[state] == np.max(self.Q[state]))) # Policy Evaluation def PolicyEvaluation(self, max_iter = 100, tolerance = 1e-5): for it in range(max_iter): new_value = self.value.copy() # Create new copy delta = 0 # Max across all states # Check all states for state in range(self.N): curr_value = 0 if state in self.blocked: # Skip blocked states continue # Current coordinate curr_coord = self.to_coord(state) # Action from policy action = np.random.choice(self.policy[state], size=1)[0] action = np.random.randint(0,4) if isinstance(action, str) else action # Check all possible next states for action_next in self.actions: # (1 - noise) if correct next state if action_next == action: prob = 1 - self.noise # noise / 2 if adjacent next state elif action_next != (action + 2) % 4: prob = self.noise / 2 # 0 if opposite next state else: prob = 0 state_next, reward, done, info = self.step(action_next, noise=0, state=curr_coord) state_next = self.to_state(state_next) # Get sum curr_value += prob * (reward + self.gamma * self.value[state_next]) # Update max value across all states delta = max(delta, abs(curr_value - new_value[state])) # Update current state new_value[state] = curr_value # Replace variable self.value = new_value # Return if threshold is reached if delta < tolerance or it == max_iter - 1: break # Policy Improvement def PolicyImprovement(self): self.policy = [['.'] for _ in range(self.N)] self.Q = [[0 for _ in range(len(self.actions))] for _ in range(self.N)] # Check all states for state in range(self.N): if state in self.blocked + self.nterminal: # Skip blocked states continue # Current coordinate curr_coord = self.to_coord(state) # Run through all actions for action in self.actions: # Check all possible next states for action_next in self.actions: # (1 - noise) if correct next state if action_next == action: prob = 1 - self.noise # noise / 2 if adjacent next state elif action_next != (action + 2) % 4: prob = self.noise / 2 # 0 if opposite next state else: prob = 0 state_next, reward, done, info = self.step(action_next, noise=0, state=curr_coord) state_next = self.to_state(state_next) # Get sum self.Q[state][action] += prob * (reward + self.gamma * self.value[state_next]) # Get optimal policy self.policy[state] = list(np.flatnonzero(self.Q[state] == np.max(self.Q[state]))) # Policy Iteration to determine optimal policy def PolicyIteration(self, max_iter = 100, tolerance = 1e-5, show_iter = False): self.value = [0 for _ in range(2*self.N)] self.policy = [['.'] for _ in range(self.N)] for it in range(max_iter): old_policy = self.policy.copy() self.PolicyEvaluation(max_iter, tolerance) self.PolicyImprovement() # Break if policy did not change if self.policy == old_policy or it == max_iter - 1: print("Final") self.show_current(policy=True) break # Display iteration if show_iter: print(f"Iteration: {it+1}") self.show_current() # Convert action to arrows def to_arrow(self, action): if action in self.convert: return self.convert[action] return "." # Print the current value function and/or policy def show_current(self, value=True, policy=False): if value: print("Value Function") for y in range(self.size[1]-1,-1,-1): out = "" for x in range(self.size[0]): out += str(round(self.value[self.to_state([x,y,0])],2)) + "\t" print(out) if policy: print("\nCurrent Policy") for y in range(self.size[1]-1,-1,-1): out = "" for x in range(self.size[0]): out += self.to_arrow(self.policy[self.to_state([x,y,0])][0]) + "\t" print(out) print('- '*20 + '\n') # Add grids to render def add_grids(self, canvas): for x in range(0, self.size[0]): for y in range(0, self.size[1]): canvas[y*self.scale, 0:self.size[0]*self.scale] = 0 canvas[0:self.size[1]*self.scale, x*self.scale] = 0 return canvas # Overlay agent def overlay_agent(self): x = self.state[0]; y = self.size[1] - self.state[1] new_canvas = self.canvas.copy() if self.state[2] == 1: self.oops = "Goal State reached" for nx in range(x*self.scale, x*self.scale+self.scale): for ny in range(y*self.scale-self.scale, y*self.scale): if np.sum(self.agent[ny - y*self.scale, nx - x*self.scale]) > 2.9: new_canvas[ny, nx] = self.canvas[ny, nx] else: new_canvas[ny, nx] = self.agent[ny - y*self.scale, nx - x*self.scale] return new_canvas # Other errors def overlay_errors(self, canvas): x = self.state[0]; y = self.size[1] - self.state[1] # Add bumped area x_range = range(0); y_range = range(0) if self.bumped == 0: x_range = range(x*self.scale, x*self.scale+self.scale) y_range = range(y*self.scale-self.scale, int((y-0.95)*self.scale)) if self.bumped == 1: x_range = range(int((x+0.95)*self.scale), x*self.scale+self.scale) y_range = range(y*self.scale-self.scale, y*self.scale) if self.bumped == 2: x_range = range(x*self.scale, x*self.scale+self.scale) y_range = range(int((y+0.95)*self.scale)-self.scale, y*self.scale) if self.bumped == 3: x_range = range(x*self.scale, int((x-0.95)*self.scale+self.scale)) y_range = range(y*self.scale-self.scale, y*self.scale) for nx in x_range: for ny in y_range: canvas[ny, nx] = [226/255,43/255,138/255] # Add noise pos = (0, int(self.scale*0.2)) cv2.putText(canvas, self.oops, pos, cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5, color=0) return canvas # Render the environment and agent def render(self, mode = "human", wait=800): new_canvas = self.overlay_agent() new_canvas = self.add_grids(new_canvas) new_canvas = self.overlay_errors(new_canvas) if mode == "human": cv2.imshow("GridWorld", new_canvas) cv2.waitKey(wait) else: return new_canvas # Reset the environment and agent def reset(self): self.state = self.start self.bumped = -1 self.oops = "" self.value = [0 for _ in range(2*self.N)] self.policy = [['.'] for _ in range(self.N)] return self.state # Close all render windows def close(self): cv2.destroyAllWindows()Model-Free Reinforcement LearningThe functions below trains an agent in the environment. Create Reinforcement Learning Modelimport numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.optimizers import Adam def build_model(states, actions): model = Sequential() model.add(Flatten(input_shape=states)) model.add(Dense(100, activation='relu')) model.add(Dense(80, activation='relu')) model.add(Dense(60, activation='relu')) model.add(Dense(40, activation='relu')) model.add(Dense(20, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(actions, activation='linear')) return modelBuild Agentfrom rl.agents import DQNAgent, SARSAAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory def build_agent(model, actions): memory = SequentialMemory(limit=50000, window_length=1) policy = BoltzmannQPolicy() agent = DQNAgent(model=model, memory=memory, policy=policy, nb_actions=actions, nb_steps_warmup=100, target_model_update=1e-2) # policy = EpsGreedyQPolicy() # agent = SARSAAgent(model=model, policy=policy, nb_actions=actions, nb_steps_warmup=10) return agentTrain Agentdef train_agent(env): states = env.observation_space.shape actions = env.action_space.n model = build_model(states, actions) agent = build_agent(model, actions) agent.compile(Adam(learning_rate=1e-2), metrics=['mae']) agent.fit(env, nb_steps=10000, visualize=False, verbose=1) return agentExampleThe example below is based on the following environment.env = GridWorld([5,5], start=[0,1,0], reward=0) env.add_blocked([1,2,0]) env.add_blocked([1,3,0]) env.add_blocked([3,2,0]) env.add_nterminal([0,0,0], -10) env.add_nterminal([1,0,0], -10) env.add_nterminal([2,0,0], -10) env.add_nterminal([3,0,0], -10) env.add_nterminal([4,0,0], -10) env.add_nterminal([2,2,0], 1) env.add_nterminal([4,2,0], 10)Sample SimulationThe cell below accepts a string containing the direction of the agent and renders a sample simulation.to_int = {'u': 0, 'r': 1, 'd': 2, 'l': 3} n_state = env.reset() done = False score = 0 actions = input('Input Move List: ') # Example: uuurrrrddd env.render() for action in actions: if action in to_int: action = to_int[action] else: print('Please input a valid action') env.close() break n_state, reward, done, info = env.step(action) score += reward env.render() if done: break print(f'Score: {score}') env.close()Score: 1Model-Based CaseThis subsection presents the optimal policy using `ValueIteration` and `PolicyIteration`.env.ValueIteration(show_iter=False, max_iter=100) env.PolicyIteration(show_iter=False, max_iter=100)Final Value Function 4.48 5.17 5.88 6.68 7.51 3.93 0 6.03 7.51 8.65 3.45 0 1.0 0 10.0 2.93 2.0 3.31 5.72 8.48 -10.0 -10.0 -10.0 -10.0 -10.0 Current Policy → → → → ↓ ↑ . → → ↓ ↑ . . . . ↑ ↑ → → ↑ . . . . . - - - - - - - - - - - - - - - - - - - -Model-Free CaseThis subsection determines the optimal policy using the previously defined functions.Note: Weird behavior can be observed in the model-free case. This might be due to lack of iterations and/or the type of policy/agent used.agent = train_agent(env) scores = agent.test(env, nb_episodes=1, visualize=True) env.close()Testing for 1 episodes ... Episode 1: reward: 10.000, steps: 46Defining the different data path# Data path containing all of the raw and processed data data = '../Data/' # Result path containing all the results from the analysisi resultpath = '../Results/' # ID for the PPI which year it's from PPI_ID = "2018_08"Loading all the tables Loading the raw counts# Load the primary raw screen counts raw_hipo_fec = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_EggLaying_HpoRNAi.csv')) raw_hipo_ova = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_Ova_HpoRNAi.csv')) raw_xRNAi_fec = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_EggLaying.csv')) # Load the prediction results raw_hipo_fec_pred = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_EggLaying_HpoRNAi_Pred.csv')) raw_hipo_ova_pred = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_Ova_HpoRNAi_Pred.csv')) raw_xRNAi_fec_pred = pd.read_csv(os.path.join(data,'Screen', 'Raw', 'Raw_EggLaying_Pred.csv')) # Remove Control from dataset raw_hipo_fec = raw_hipo_fec[raw_hipo_fec['FbID'] != 'Control'] raw_hipo_ova = raw_hipo_ova[raw_hipo_ova['FbID'] != 'Control'] raw_xRNAi_fec = raw_xRNAi_fec[raw_xRNAi_fec['FbID'] != 'Control'] # Same on the prediction dataset raw_hipo_fec_pred = raw_hipo_fec_pred[raw_hipo_fec_pred['FbID'] != 'Control'] raw_hipo_ova_pred = raw_hipo_ova_pred[raw_hipo_ova_pred['FbID'] != 'Control'] raw_xRNAi_fec_pred = raw_xRNAi_fec_pred[raw_xRNAi_fec_pred['FbID'] != 'Control'] # # Append the prediction count to the primary screen results # raw_hipo_fec = raw_hipo_fec.append(raw_hipo_fec_pred) # raw_hipo_ova = raw_hipo_ova.append(raw_hipo_ova_pred) # raw_xRNAi_fec = raw_xRNAi_fec.append(raw_xRNAi_fec_pred) # # Remove any duplicate # raw_hipo_fec.drop_duplicates(inplace=True) # raw_hipo_ova.drop_duplicates(inplace=True) # raw_xRNAi_fec.drop_duplicates(inplace=True) hipo_ova = pd.read_csv(os.path.join(data,'Screen', 'hipo_ova_clean.csv')) xRNAi_fec = pd.read_csv(os.path.join(data,'Screen', 'xRNAi_fec_clean.csv')) hipo_fec = pd.read_csv(os.path.join(data,'Screen', 'hipo_fec_clean.csv')) hipo_ova_pred = pd.read_csv(os.path.join(data,'Screen', 'hipo_ova_clean_pred.csv')) xRNAi_fec_pred = pd.read_csv(os.path.join(data,'Screen', 'xRNAi_fec_clean_pred.csv')) hipo_fec_pred = pd.read_csv(os.path.join(data,'Screen', 'hipo_fec_clean_pred.csv')) hipo_ova = hipo_ova[hipo_ova['FbID'] != 'Control'] xRNAi_fec = xRNAi_fec[xRNAi_fec['FbID'] != 'Control'] hipo_fec = hipo_fec[hipo_fec['FbID'] != 'Control'] hipo_ova_pred = hipo_ova_pred[hipo_ova_pred['FbID'] != 'Control'] xRNAi_fec_pred = xRNAi_fec_pred[xRNAi_fec_pred['FbID'] != 'Control'] hipo_fec_pred = hipo_fec_pred[hipo_fec_pred['FbID'] != 'Control'] # Calculate the mean for all datasets mean_ova_gene = hipo_ova.groupby(['FbID'], as_index=False).mean() mean_fec_gene = hipo_fec.groupby(['FbID', 'Condition'], as_index=False).mean() mean_xRNAi_gene = xRNAi_fec.groupby(['FbID', 'Condition'], as_index=False).mean() mean_ova_gene_pred = hipo_ova_pred.groupby(['FbID'], as_index=False).mean() mean_fec_gene_pred = hipo_fec_pred.groupby(['FbID', 'Condition'], as_index=False).mean() mean_xRNAi_gene_pred = xRNAi_fec_pred.groupby(['FbID', 'Condition'], as_index=False).mean() # Calculate the std for ovariole number (only because the other datasets have only 1 measurement) std_ova_gene = hipo_ova.groupby(['FbID']).std().reset_index() std_ova_gene_pred = hipo_ova_pred.groupby(['FbID']).std().reset_index() # mean_ova_gene = mean_ova_gene.append(mean_ova_gene_pred) # mean_fec_gene = mean_fec_gene.append(mean_fec_gene_pred) # mean_xRNAi_gene = mean_xRNAi_gene.append(mean_xRNAi_gene_pred) # Here we select all the genes that were tested in the screen, # because the first screen was Hipo RNAi EggLaying measurement, this dataset contains all the tested genes screen_genes = mean_fec_gene['FbID'].unique() screen_genes_pred = list(mean_fec_gene_pred['FbID'].unique()) modules = pd.read_csv(os.path.join(resultpath, "Modules_Table_{}.csv".format(PPI_ID))) for fbid in modules[modules['SeedStatus'].str.contains('Connector')]['FbID'].values: if fbid not in screen_genes_pred: screen_genes_pred.append(fbid) assert(len(mean_fec_gene['FbID'].unique()) == 463) assert(len(mean_fec_gene_pred['FbID'].unique()) == 42)Loading gene namesnames = pd.read_table(os.path.join(data,'GeneName.csv'))Loading the signaling pathway metadatasignaling = pd.read_csv(os.path.join(data,'signaling.csv'))Loading the PPI networkG = nx.read_graphml(os.path.join(data, 'PPIs', 'PPI_{}.graphml'.format(PPI_ID)))Loading the networks modules# Modules computed in the notebook file: Seed-Connector ova_module_G = nx.read_graphml(os.path.join(resultpath,'Ova_module_{}.graphml'.format(PPI_ID))) fec_module_G = nx.read_graphml(os.path.join(resultpath,'Hpo_EggL_module_{}.graphml'.format(PPI_ID))) xRNAi_module_G = nx.read_graphml(os.path.join(resultpath,'EggL_module_{}.graphml'.format(PPI_ID))) core_module_G = nx.read_graphml(os.path.join(resultpath,'Core_module_{}.graphml'.format(PPI_ID))) # The list of connector genes connectors= pd.read_csv(os.path.join(resultpath,"ConnectorGeneList_{}.csv".format(PPI_ID))) # Grab the list of genes for each modules ova_module = ova_module_G.nodes() fec_module = fec_module_G.nodes() xRNAi_module = xRNAi_module_G.nodes() core_module = core_module_G.nodes()Loading the network metricsbetweenness = pd.read_csv(os.path.join(data, "ScreenPPI_Betweenness.csv")) closeness = pd.read_csv(os.path.join(data, "ScreenPPI_Closeness.csv")) eigenvector = pd.read_csv(os.path.join(data, "ScreenPPI_Eigenvector.csv")) degrees_cen = pd.read_csv(os.path.join(data, "ScreenPPI_DegreeCentrality.csv"))Creating the table Step 1: Make the list of genestable = pd.DataFrame(screen_genes, columns=['FbID']) table_pred = pd.DataFrame(screen_genes_pred, columns=['FbID']) table = table.merge(raw_hipo_fec[['FbID', 'Condition']], how='left', on='FbID') table = table.rename(columns={'Condition':'CG number'}) table_pred = table_pred.merge(raw_hipo_fec_pred[['FbID', 'Condition']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Condition':'CG number'}) table = table.merge(names, how='left', on='FbID') table_pred = table_pred.merge(names, how='left', on='FbID')Step 2: Add the screen data for each genes Hippo RNAi Egg Laying# Hippo RNAi Egg Laying screen egg counts # First we merge the existing table using the FbID column for 1 to 1 matching. # Then we rename that collumn for a unique name in the global database table # Rinse and repeat for all values table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Day 1'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_Day_1_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_1_Egg_Count'}) table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Day 2 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_Day_2_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_2_Egg_Count'}) table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Day 3'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_Day_3_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_3_Egg_Count'}) table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Day 4 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_Day_4_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_4_Egg_Count'}) table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Day 5'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_Day_5_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_5_Egg_Count'}) table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Sum'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'HippoRNAi_EggL_All_Days_Egg_Sum_Zscore', 'Count':'HippoRNAi_EggL_All_Days_Egg_Sum_Count'}) # table = table.merge(mean_fec_gene[mean_fec_gene['Condition'] == 'Sum'][['FbID', 'Batch']], how='left', on='FbID') # table = table.rename(columns={'Batch':'HippoRNAi_EggL_Batch'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Day 1'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_Day_1_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_1_Egg_Count'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Day 2 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_Day_2_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_2_Egg_Count'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Day 3'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_Day_3_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_3_Egg_Count'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Day 4 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_Day_4_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_4_Egg_Count'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Day 5'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_Day_5_Egg_Zscore', 'Count':'HippoRNAi_EggL_Day_5_Egg_Count'}) table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Sum'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'HippoRNAi_EggL_All_Days_Egg_Sum_Zscore', 'Count':'HippoRNAi_EggL_All_Days_Egg_Sum_Count'}) # table_pred = table_pred.merge(mean_fec_gene_pred[mean_fec_gene_pred['Condition'] == 'Sum'][['FbID', 'Batch']], how='left', on='FbID') # table_pred = table_pred.rename(columns={'Batch':'HippoRNAi_EggL_Batch'})Egg Laying# Egg Laying screen egg counts # We use the same technic as above table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Day 1'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_Day_1_Egg_Zscore', 'Count':'EggL_Day_1_Egg_Count'}) table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Day 2 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_Day_2_Egg_Zscore', 'Count':'EggL_Day_2_Egg_Count'}) table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Day 3'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_Day_3_Egg_Zscore', 'Count':'EggL_Day_3_Egg_Count'}) table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Day 4 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_Day_4_Egg_Zscore', 'Count':'EggL_Day_4_Egg_Count'}) table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Day 5'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_Day_5_Egg_Zscore', 'Count':'EggL_Day_5_Egg_Count'}) table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Sum'][['FbID', 'Count', 'Z']], how='left', on='FbID') table = table.rename(columns={'Z':'EggL_All_Days_Egg_Sum_Zscore', 'Count':'EggL_All_Days_Egg_Sum_Count'}) # table = table.merge(mean_xRNAi_gene[mean_xRNAi_gene['Condition'] == 'Sum'][['FbID','Batch']], how='left', on='FbID') # table = table.rename(columns={'Batch':'EggL_Batch'}) # Egg Laying screen egg counts # We use the same technic as above table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Day 1'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_Day_1_Egg_Zscore', 'Count':'EggL_Day_1_Egg_Count'}) table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Day 2 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_Day_2_Egg_Zscore', 'Count':'EggL_Day_2_Egg_Count'}) table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Day 3'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_Day_3_Egg_Zscore', 'Count':'EggL_Day_3_Egg_Count'}) table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Day 4 '][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_Day_4_Egg_Zscore', 'Count':'EggL_Day_4_Egg_Count'}) table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Day 5'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_Day_5_Egg_Zscore', 'Count':'EggL_Day_5_Egg_Count'}) table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Sum'][['FbID', 'Count', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Z':'EggL_All_Days_Egg_Sum_Zscore', 'Count':'EggL_All_Days_Egg_Sum_Count'}) # table_pred = table_pred.merge(mean_xRNAi_gene_pred[mean_xRNAi_gene_pred['Condition'] == 'Sum'][['FbID','Batch']], how='left', on='FbID') # table_pred = table_pred.rename(columns={'Batch':'EggL_Batch'})Ovariole Number# Hippo RNAi Ovariole number counts screen # We can merge all the data directly because we do not have many conditions in this table table = table.merge(mean_ova_gene[['FbID', 'OvarioleNb', 'Z']], how='left', on='FbID') table = table.rename(columns={'Batch':'HippoRNAi_Ova_Batch', 'OvarioleNb':'HippoRNAi_Ova_OvarioleNb_Mean_Count', 'Z':'HippoRNAi_Ova_OvarioleNb_Mean_Zscore'}) table = table.merge(std_ova_gene[['FbID', 'OvarioleNb']], how='left', on='FbID') table = table.rename(columns={'OvarioleNb':'HippoRNAi_Ova_OvarioleNb_Std_Count'}) table_pred = table_pred.merge(mean_ova_gene_pred[['FbID', 'OvarioleNb', 'Z']], how='left', on='FbID') table_pred = table_pred.rename(columns={'Batch':'HippoRNAi_Ova_Batch', 'OvarioleNb':'HippoRNAi_Ova_OvarioleNb_Mean_Count', 'Z':'HippoRNAi_Ova_OvarioleNb_Mean_Zscore'}) table_pred = table_pred.merge(std_ova_gene_pred[['FbID', 'OvarioleNb']], how='left', on='FbID') table_pred = table_pred.rename(columns={'OvarioleNb':'HippoRNAi_Ova_OvarioleNb_Std_Count'}) # But we want to add the raw data to the table, so we extract the 20 columns from the raw counts raw = raw_hipo_ova[raw_hipo_ova['FbID'].notnull()][['Fly 1', 'Fly 1.1', 'Fly 2', 'Fly 2.1', 'Fly 3', 'Fly 3.1', 'Fly 4', 'Fly 4.1', 'Fly 5', 'Fly 5.1', 'Fly 6', 'Fly 6.1', 'Fly 7', 'Fly 7.1', 'Fly 8', 'Fly 8.1', 'Fly 9','Fly 9.1', 'Fly 10', 'Fly 10.1', 'FbID']].groupby(['FbID']).mean().reset_index() raw_pred = raw_hipo_ova_pred[raw_hipo_ova_pred['FbID'].notnull()][['Fly 1', 'Fly 1.1', 'Fly 2', 'Fly 2.1', 'Fly 3', 'Fly 3.1', 'Fly 4', 'Fly 4.1', 'Fly 5', 'Fly 5.1', 'Fly 6', 'Fly 6.1', 'Fly 7', 'Fly 7.1', 'Fly 8', 'Fly 8.1', 'Fly 9','Fly 9.1', 'Fly 10', 'Fly 10.1', 'FbID']].groupby(['FbID']).mean().reset_index() # We merge them table = table.merge(raw, how='left', on='FbID') table_pred = table_pred.merge(raw_pred, how='left', on='FbID') # And then we rename them to follow the naming scheme table = table.rename(columns={'Fly 1' : "HippoRNAi_Ova_OvarioleNb_Fly_1.1_Count", 'Fly 1.1' : "HippoRNAi_Ova_OvarioleNb_Fly_1.2_Count", 'Fly 2' : "HippoRNAi_Ova_OvarioleNb_Fly_2.1_Count", 'Fly 2.1' : "HippoRNAi_Ova_OvarioleNb_Fly_2.2_Count", 'Fly 3' : "HippoRNAi_Ova_OvarioleNb_Fly_3.1_Count", 'Fly 3.1' : "HippoRNAi_Ova_OvarioleNb_Fly_3.2_Count", 'Fly 4' : "HippoRNAi_Ova_OvarioleNb_Fly_4.1_Count", 'Fly 4.1' : "HippoRNAi_Ova_OvarioleNb_Fly_4.2_Count", 'Fly 5' : "HippoRNAi_Ova_OvarioleNb_Fly_5.1_Count", 'Fly 5.1' : "HippoRNAi_Ova_OvarioleNb_Fly_5.2_Count", 'Fly 6' : "HippoRNAi_Ova_OvarioleNb_Fly_6.1_Count", 'Fly 6.1' : "HippoRNAi_Ova_OvarioleNb_Fly_6.2_Count", 'Fly 7' : "HippoRNAi_Ova_OvarioleNb_Fly_7.1_Count", 'Fly 7.1' : "HippoRNAi_Ova_OvarioleNb_Fly_7.2_Count", 'Fly 8' : "HippoRNAi_Ova_OvarioleNb_Fly_8.1_Count", 'Fly 8.1' : "HippoRNAi_Ova_OvarioleNb_Fly_8.2_Count", 'Fly 9' : "HippoRNAi_Ova_OvarioleNb_Fly_9.1_Count", 'Fly 9.1' : "HippoRNAi_Ova_OvarioleNb_Fly_9.2_Count", 'Fly 10' : "HippoRNAi_Ova_OvarioleNb_Fly_10.1_Count", 'Fly 10.1': "HippoRNAi_Ova_OvarioleNb_Fly_10.2_Count" }) # And then we rename them to follow the naming scheme table_pred = table_pred.rename(columns={'Fly 1' : "HippoRNAi_Ova_OvarioleNb_Fly_1.1_Count", 'Fly 1.1' : "HippoRNAi_Ova_OvarioleNb_Fly_1.2_Count", 'Fly 2' : "HippoRNAi_Ova_OvarioleNb_Fly_2.1_Count", 'Fly 2.1' : "HippoRNAi_Ova_OvarioleNb_Fly_2.2_Count", 'Fly 3' : "HippoRNAi_Ova_OvarioleNb_Fly_3.1_Count", 'Fly 3.1' : "HippoRNAi_Ova_OvarioleNb_Fly_3.2_Count", 'Fly 4' : "HippoRNAi_Ova_OvarioleNb_Fly_4.1_Count", 'Fly 4.1' : "HippoRNAi_Ova_OvarioleNb_Fly_4.2_Count", 'Fly 5' : "HippoRNAi_Ova_OvarioleNb_Fly_5.1_Count", 'Fly 5.1' : "HippoRNAi_Ova_OvarioleNb_Fly_5.2_Count", 'Fly 6' : "HippoRNAi_Ova_OvarioleNb_Fly_6.1_Count", 'Fly 6.1' : "HippoRNAi_Ova_OvarioleNb_Fly_6.2_Count", 'Fly 7' : "HippoRNAi_Ova_OvarioleNb_Fly_7.1_Count", 'Fly 7.1' : "HippoRNAi_Ova_OvarioleNb_Fly_7.2_Count", 'Fly 8' : "HippoRNAi_Ova_OvarioleNb_Fly_8.1_Count", 'Fly 8.1' : "HippoRNAi_Ova_OvarioleNb_Fly_8.2_Count", 'Fly 9' : "HippoRNAi_Ova_OvarioleNb_Fly_9.1_Count", 'Fly 9.1' : "HippoRNAi_Ova_OvarioleNb_Fly_9.2_Count", 'Fly 10' : "HippoRNAi_Ova_OvarioleNb_Fly_10.1_Count", 'Fly 10.1': "HippoRNAi_Ova_OvarioleNb_Fly_10.2_Count" }) # Finally we finished entering the raw data into the table so we append both tables together prior to adding the metadata. table = table.append(table_pred)Step 3: Adding the network metrics# Merge and rename as above table = table.merge(betweenness, how='left', on='FbID') table = table.rename(columns={'Betweeness':'PIN_betweenness_centrality'}) table = table.merge(closeness, how='left', on='FbID') table = table.rename(columns={'Closeness':'PIN_closeness_centrality'}) table = table.merge(eigenvector, how='left', on='FbID') table = table.rename(columns={'EigenVector':'PIN_eigenvector_centrality'}) table = table.merge(degrees_cen, how='left', on='FbID') table = table.rename(columns={'DegreeC':'PIN_degree_centrality'})Step 4: Adding the newtork modules# We create 4 columns, where if a gene is found in a module it is 1 if not a 0 # np.where is key here, if condition == true, then X, else Y table['HippoRNAi_Ova_Module'] = np.where(table['FbID'].isin(ova_module), 1, 0) table['HippoRNAi_EggL_Module'] = np.where(table['FbID'].isin(fec_module), 1, 0) table['EggL_Module'] = np.where(table['FbID'].isin(xRNAi_module), 1, 0) table['Core_Module'] = np.where(table['FbID'].isin(core_module), 1, 0) meta_modules = np.array([np.where(table['FbID'].isin(ova_module), 1, 0), np.where(table['FbID'].isin(fec_module), 1, 0), np.where(table['FbID'].isin(xRNAi_module), 1, 0)]) modules = ["001","100","010","111","011","101","110","000"] modules_leg = ["I","II","III","IV","V","VI","VII",""] res = [] for el in meta_modules.T: i = modules.index(''.join([str(s) for s in el])) res.append(modules_leg[i]) table['Meta_Module'] = res table.columnsStep 5: Adding Signaling pathways# We need to make this tidy data, so we need to add one column per signaling patway, with a 0 or a 1 # We iterate over all signaling pathway and add a column for each with 1 and 0s using the same np.where technic as above # But we first need to make the list of FbID that have this signaling pathway # that is: pathway_genes = signaling[signaling['Sig'] == pathway]['FbID'] for pathway in signaling['Sig'].unique(): pathway_genes = signaling[signaling['Sig'] == pathway]['FbID'] table['{}_pathway'.format(pathway)] = np.where(table['FbID'].isin(pathway_genes), 1, 0)Step 6: Adding Connector genestable['HpoOva_Connector'] = table['FbID'].isin(connectors[connectors['Module'] == 'Ova']['FbID'].values) table['HpoEggL_Connector'] = table['FbID'].isin(connectors[connectors['Module'] == 'HpoFec']['FbID'].values) table['EggL_Connector'] = table['FbID'].isin(connectors[connectors['Module'] == 'xRNAiFec']['FbID'].values) table['Core_Connector'] = table['FbID'].isin(connectors[connectors['Module'] == 'Core']['FbID'].values)Step 7: Cleaning upfinal_order = ['FbID', 'CG number', 'NAME', 'SYMBOL', 'HippoRNAi_EggL_Day_1_Egg_Count', 'HippoRNAi_EggL_Day_1_Egg_Zscore', 'HippoRNAi_EggL_Day_2_Egg_Count', 'HippoRNAi_EggL_Day_2_Egg_Zscore', 'HippoRNAi_EggL_Day_3_Egg_Count', 'HippoRNAi_EggL_Day_3_Egg_Zscore', 'HippoRNAi_EggL_Day_4_Egg_Count', 'HippoRNAi_EggL_Day_4_Egg_Zscore', 'HippoRNAi_EggL_Day_5_Egg_Count', 'HippoRNAi_EggL_Day_5_Egg_Zscore', 'HippoRNAi_EggL_All_Days_Egg_Sum_Count', 'HippoRNAi_EggL_All_Days_Egg_Sum_Zscore', 'EggL_Day_1_Egg_Count', 'EggL_Day_1_Egg_Zscore', 'EggL_Day_2_Egg_Count', 'EggL_Day_2_Egg_Zscore', 'EggL_Day_3_Egg_Count', 'EggL_Day_3_Egg_Zscore', 'EggL_Day_4_Egg_Count', 'EggL_Day_4_Egg_Zscore', 'EggL_Day_5_Egg_Count', 'EggL_Day_5_Egg_Zscore', 'EggL_All_Days_Egg_Sum_Count', 'EggL_All_Days_Egg_Sum_Zscore', 'HippoRNAi_Ova_OvarioleNb_Fly_1.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_1.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_2.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_2.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_3.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_3.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_4.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_4.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_5.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_5.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_6.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_6.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_7.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_7.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_8.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_8.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_9.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_9.2_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_10.1_Count', 'HippoRNAi_Ova_OvarioleNb_Fly_10.2_Count', 'HippoRNAi_Ova_OvarioleNb_Mean_Count', 'HippoRNAi_Ova_OvarioleNb_Std_Count', 'HippoRNAi_Ova_OvarioleNb_Mean_Zscore', 'PIN_betweenness_centrality', 'PIN_closeness_centrality', 'PIN_eigenvector_centrality', 'PIN_degree_centrality', 'HippoRNAi_Ova_Module', 'HippoRNAi_EggL_Module', 'EggL_Module', 'Core_Module', 'Meta_Module', 'HpoOva_Connector', 'HpoEggL_Connector', 'EggL_Connector', 'Core_Connector', 'FGF_pathway', 'VEGF_pathway', 'Toll_pathway', 'JNK_pathway', 'EGF_pathway', 'mTOR_pathway', 'FOXO_pathway', 'SHH_pathway', 'Hippo_pathway', 'JAK/STAT_pathway', 'Wnt_pathway', 'Notch_pathway', 'TGF B_pathway', 'MAPK_pathway'] table = table[final_order] def round_centrality(x): return round(x, 10) for col in [c for c in table.columns if 'PPI_' in c]: table[col] = table[col].apply(round_centrality) for col in [c for c in table.columns if 'PPI_' in c]: table[col] = table[col].fillna('NotInPPI') for col in [c for c in table.columns if '_Module' in c]: if not 'Meta' in col: PPIcheck = table['PIN_closeness_centrality'].values tmp = table[col].values.astype(str) for i in range(len(tmp)): if PPIcheck[i] == 'NotInPPI': tmp[i] = 'NotInPPI' else: if tmp[i] == '0': tmp[i] = 'False' else: tmp[i] = 'True' table[col] = tmp for col in [c for c in table.columns if '_Connector' in c]: PPIcheck = table['PIN_closeness_centrality'].values tmp = table[col].values.astype(object) for i in range(len(tmp)): if PPIcheck[i] == 'NotInPPI': tmp[i] = "NotInPPI" table[col] = tmp def round_zscore(x): return round(x, 4) for col in [c for c in table.columns if '_Zscore' in c]: table[col] = table[col].apply(round_zscore) # Adding the CG number for the missing connector table.at[table[table['FbID'] == 'FBgn0027619'].index, 'CG number'] = "CG12131"Asserting that the data is correctly entered# Test that the number of genes in the database is equal to the number of gene screened in the primary screen assert(len(table) == len(screen_genes) + len(screen_genes_pred)) # -1 to get rid of the control ID total_datapoints = len(screen_genes) + len(screen_genes_pred) # Test that the number of values in each screen correspond to the raw table for each collumn # We iterate over all the collumns # For each collumns we define a test # try the assertion # if wrong then print an error message for column in table.columns: if 'Module' in column: try: assert(len(table[table[column].notna()]) == total_datapoints) except: print("Discrepancy in column: {}".format(column)) elif '_pathway' in column: try: assert(len(table[table[column].notna()]) == total_datapoints) except: print("Discrepancy in column: {}".format(column)) elif 'HippoRNAi_EggL' in column: try: assert(len(table[table[column].notna()]) == total_datapoints - 1) except: print("Discrepancy in column: {}".format(column)) elif 'EggL' in column and not "Connector" in column: try: assert(len(table[table[column].notna()]) == len(mean_xRNAi_gene['FbID'].unique()) + len(screen_genes_pred) - 1) except: print("Discrepancy in column: {}".format(column)) elif 'HippoRNAi_Ova' in column: try: assert(len(table[table[column].notna()]) == len(mean_ova_gene['FbID'].unique()) + len(screen_genes_pred) - 1) except: print("Discrepancy in column: {}".format(column)) elif 'Connector_Gene' == column: try: assert(len(table[table[column] == 1]) == len(connectors['FbID'].unique())) except: print("Discrepancy in column: {}".format(column)) elif 'PIN_' in column: try: assert(len(table[table[column] != 'NotInPPI']) == len(table[table['FbID'].isin(G.nodes())])) except: print("Discrepancy in column: {}".format(column)) elif "CG" in column: try: assert(len(table[table[column].notna()]) == total_datapoints) except: print("Discrepancy in column: {}".format(column))Discrepancy in column: CG number Discrepancy in column: HippoRNAi_EggL_Day_1_Egg_Count Discrepancy in column: HippoRNAi_EggL_Day_1_Egg_Zscore Discrepancy in column: HippoRNAi_EggL_Day_2_Egg_Count Discrepancy in column: HippoRNAi_EggL_Day_2_Egg_Zscore Discrepancy in column: HippoRNAi_EggL_Day_3_Egg_Count Discrepancy in column: HippoRNAi_EggL_Day_3_Egg_Zscore Discrepancy in column: HippoRNAi_EggL_Day_4_Egg_Count Discrepancy in column: HippoRNAi_EggL_Day_4_Egg_Zscore Discrepancy in column: HippoRNAi_EggL_Day_5_Egg_Count Discrepancy in column: HippoRNAi_EggL_Day_5_Egg_Zscore Discrepancy in column: HippoRNAi_EggL_All_Days_Egg_Sum_Count Discrepancy in column: HippoRNAi_EggL_All_Days_Egg_Sum_Zscore Discrepancy in column: EggL_Day_1_Egg_Count Discrepancy in column: EggL_Day_1_Egg_Zscore Discrepancy in column: EggL_Day_2_Egg_Count Discrepancy in column: EggL_Day_2_Egg_Zscore Discrepancy in column: EggL_Day_3_Egg_Count Discrepancy in column: EggL_Day_3_Egg_Zscore Discrepancy in column: EggL_Day_[...]Saving the tabletable.to_csv(os.path.join(resultpath, "MasterTable.csv"), index=False)**Segmentation with SkyDL****Setup software libraries**from DeepSky import deepsky import env**Training of Deep Learning models****Create `Trainer` object**SkyTrainer = deepsky.Trainer(privatekey_path = env.privatekey_path)**Get token**SkyTrainer.get_token(email='')Skydipper login password: ·········Image compositesSkyTrainer.composite(slugs=['Sentinel-2-Top-of-Atmosphere-Reflectance', 'USDA-NASS-Cropland-Data-Layers'],\ init_date = '2016-01-01', end_date = '2016-12-31', lat=37.9563, lon=-121.2929, zoom=9)Creation of GeostoreWe select the areas from which we will export the training, validation and testing data.train_atts = {"type": "FeatureCollection", "features": [{"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-122.882080078125, 40.50126945841645], [-122.1240234375, 40.50126945841645], [-122.1240234375, 41.008920735004885], [-122.882080078125, 41.008920735004885], [-122.882080078125, 40.50126945841645]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-122.2283935546875, 39.00637903337455], [-121.607666015625, 39.00637903337455], [-121.607666015625, 39.46588451142044], [-122.2283935546875, 39.46588451142044], [-122.2283935546875, 39.00637903337455]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-120.355224609375, 38.77978137804918], [-119.608154296875, 38.77978137804918], [-119.608154296875, 39.342794408952365], [-120.355224609375, 39.342794408952365], [-120.355224609375, 38.77978137804918]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-121.90979003906249, 37.70555348721583], [-120.9814453125, 37.70555348721583], [-120.9814453125, 38.39764411353178], [-121.90979003906249, 38.39764411353178], [-121.90979003906249, 37.70555348721583]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-120.03662109374999, 37.45741810262938], [-119.1851806640625, 37.45741810262938], [-119.1851806640625, 38.08268954483802], [-120.03662109374999, 38.08268954483802], [-120.03662109374999, 37.45741810262938]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-120.03662109374999, 37.45741810262938], [-119.1851806640625, 37.45741810262938], [-119.1851806640625, 38.08268954483802], [-120.03662109374999, 38.08268954483802], [-120.03662109374999, 37.45741810262938]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-120.03662109374999, 37.45741810262938], [-119.1851806640625, 37.45741810262938], [-119.1851806640625, 38.08268954483802], [-120.03662109374999, 38.08268954483802], [-120.03662109374999, 37.45741810262938]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-112.554931640625, 33.0178760185549], [-111.588134765625, 33.0178760185549], [-111.588134765625, 33.78827853625996], [-112.554931640625, 33.78827853625996], [-112.554931640625, 33.0178760185549]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-112.87353515625, 40.51379915504413], [-111.829833984375, 40.51379915504413], [-111.829833984375, 41.28606238749825], [-112.87353515625, 41.28606238749825], [-112.87353515625, 40.51379915504413]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-108.19335937499999, 39.095962936305476], [-107.1826171875, 39.095962936305476], [-107.1826171875, 39.85915479295669], [-108.19335937499999, 39.85915479295669], [-108.19335937499999, 39.095962936305476]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-106.875, 37.142803443716836], [-105.49072265625, 37.142803443716836], [-105.49072265625, 38.18638677411551], [-106.875, 38.18638677411551], [-106.875, 37.142803443716836]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-117.31201171875001, 43.27720532212024], [-116.01562499999999, 43.27720532212024], [-116.01562499999999, 44.134913443750726], [-117.31201171875001, 44.134913443750726], [-117.31201171875001, 43.27720532212024]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-115.7080078125, 44.69989765840318], [-114.7412109375, 44.69989765840318], [-114.7412109375, 45.36758436884978], [-115.7080078125, 45.36758436884978], [-115.7080078125, 44.69989765840318]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-120.65185546875, 47.517200697839414], [-119.33349609375, 47.517200697839414], [-119.33349609375, 48.32703913063476], [-120.65185546875, 48.32703913063476], [-120.65185546875, 47.517200697839414]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-119.83886718750001, 45.69083283645816], [-118.38867187500001, 45.69083283645816], [-118.38867187500001, 46.694667307773116], [-119.83886718750001, 46.694667307773116], [-119.83886718750001, 45.69083283645816]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-107.09472656249999, 47.45780853075031], [-105.84228515625, 47.45780853075031], [-105.84228515625, 48.31242790407178], [-107.09472656249999, 48.31242790407178], [-107.09472656249999, 47.45780853075031]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-101.57958984375, 46.93526088057719], [-100.107421875, 46.93526088057719], [-100.107421875, 47.945786463687185], [-101.57958984375, 47.945786463687185], [-101.57958984375, 46.93526088057719]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-101.162109375, 44.32384807250689], [-99.7119140625, 44.32384807250689], [-99.7119140625, 45.22848059584359], [-101.162109375, 45.22848059584359], [-101.162109375, 44.32384807250689]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-100.5908203125, 41.261291493919884], [-99.25048828124999, 41.261291493919884], [-99.25048828124999, 42.114523952464246], [-100.5908203125, 42.114523952464246], [-100.5908203125, 41.261291493919884]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-97.9541015625, 37.142803443716836], [-96.65771484375, 37.142803443716836], [-96.65771484375, 38.13455657705411], [-97.9541015625, 38.13455657705411], [-97.9541015625, 37.142803443716836]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-112.78564453124999, 32.91648534731439], [-111.357421875, 32.91648534731439], [-111.357421875, 33.925129700072], [-112.78564453124999, 33.925129700072], [-112.78564453124999, 32.91648534731439]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-106.435546875, 35.15584570226544], [-105.22705078125, 35.15584570226544], [-105.22705078125, 36.13787471840729], [-106.435546875, 36.13787471840729], [-106.435546875, 35.15584570226544]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-97.3828125, 32.45415593941475], [-96.2841796875, 32.45415593941475], [-96.2841796875, 33.22949814144951], [-97.3828125, 33.22949814144951], [-97.3828125, 32.45415593941475]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-97.97607421875, 35.04798673426734], [-97.00927734375, 35.04798673426734], [-97.00927734375, 35.764343479667176], [-97.97607421875, 35.764343479667176], [-97.97607421875, 35.04798673426734]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-97.97607421875, 35.04798673426734], [-97.00927734375, 35.04798673426734], [-97.00927734375, 35.764343479667176], [-97.97607421875, 35.764343479667176], [-97.97607421875, 35.04798673426734]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-95.4052734375, 47.62097541515849], [-94.24072265625, 47.62097541515849], [-94.24072265625, 48.28319289548349], [-95.4052734375, 48.28319289548349], [-95.4052734375, 47.62097541515849]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-94.19677734375, 41.27780646738183], [-93.09814453125, 41.27780646738183], [-93.09814453125, 42.13082130188811], [-94.19677734375, 42.13082130188811], [-94.19677734375, 41.27780646738183]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-93.71337890625, 37.75334401310656], [-92.6806640625, 37.75334401310656], [-92.6806640625, 38.51378825951165], [-93.71337890625, 38.51378825951165], [-93.71337890625, 37.75334401310656]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-90.63720703125, 34.615126683462194], [-89.47265625, 34.615126683462194], [-89.47265625, 35.69299463209881], [-90.63720703125, 35.69299463209881], [-90.63720703125, 34.615126683462194]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-93.05419921875, 30.44867367928756], [-91.77978515625, 30.44867367928756], [-91.77978515625, 31.57853542647338], [-93.05419921875, 31.57853542647338], [-93.05419921875, 30.44867367928756]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-90.02197265625, 44.276671273775186], [-88.59374999999999, 44.276671273775186], [-88.59374999999999, 44.98034238084973], [-90.02197265625, 44.98034238084973], [-90.02197265625, 44.276671273775186]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-90.63720703125, 38.41055825094609], [-89.49462890625, 38.41055825094609], [-89.49462890625, 39.18117526158749], [-90.63720703125, 39.18117526158749], [-90.63720703125, 38.41055825094609]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-87.56103515625, 35.62158189955968], [-86.28662109375, 35.62158189955968], [-86.28662109375, 36.4566360115962], [-87.56103515625, 36.4566360115962], [-87.56103515625, 35.62158189955968]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-90.63720703125, 31.93351676190369], [-89.49462890625, 31.93351676190369], [-89.49462890625, 32.731840896865684], [-90.63720703125, 32.731840896865684], [-90.63720703125, 31.93351676190369]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-69.54345703125, 44.68427737181225], [-68.5107421875, 44.68427737181225], [-68.5107421875, 45.336701909968134], [-69.54345703125, 45.336701909968134], [-69.54345703125, 44.68427737181225]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-73.212890625, 41.49212083968776], [-72.35595703125, 41.49212083968776], [-72.35595703125, 42.032974332441405], [-73.212890625, 42.032974332441405], [-73.212890625, 41.49212083968776]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-77.93701171875, 38.70265930723801], [-76.97021484375, 38.70265930723801], [-76.97021484375, 39.26628442213066], [-77.93701171875, 39.26628442213066], [-77.93701171875, 38.70265930723801]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-79.25537109375, 35.44277092585766], [-78.15673828125, 35.44277092585766], [-78.15673828125, 36.13787471840729], [-79.25537109375, 36.13787471840729], [-79.25537109375, 35.44277092585766]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-81.4306640625, 33.55970664841198], [-80.44189453125, 33.55970664841198], [-80.44189453125, 34.288991865037524], [-81.4306640625, 34.288991865037524], [-81.4306640625, 33.55970664841198]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-84.90234375, 33.394759218577995], [-83.91357421875, 33.394759218577995], [-83.91357421875, 34.19817309627726], [-84.90234375, 34.19817309627726], [-84.90234375, 33.394759218577995]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-82.28759765625, 28.246327971048842], [-81.2548828125, 28.246327971048842], [-81.2548828125, 29.209713225868185], [-82.28759765625, 29.209713225868185], [-82.28759765625, 28.246327971048842]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-109.88525390624999, 42.65012181368022], [-108.56689453125, 42.65012181368022], [-108.56689453125, 43.50075243569041], [-109.88525390624999, 43.50075243569041], [-109.88525390624999, 42.65012181368022]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-117.61962890624999, 39.04478604850143], [-116.65283203124999, 39.04478604850143], [-116.65283203124999, 39.740986355883564], [-117.61962890624999, 39.740986355883564], [-117.61962890624999, 39.04478604850143]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-102.67822265625, 31.42866311735861], [-101.71142578125, 31.42866311735861], [-101.71142578125, 32.26855544621476], [-102.67822265625, 32.26855544621476], [-102.67822265625, 31.42866311735861]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-119.47631835937499, 36.03133177633187], [-118.58642578124999, 36.03133177633187], [-118.58642578124999, 36.55377524336089], [-119.47631835937499, 36.55377524336089], [-119.47631835937499, 36.03133177633187]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-116.224365234375, 33.091541548655215], [-115.56518554687499, 33.091541548655215], [-115.56518554687499, 33.568861182555565], [-116.224365234375, 33.568861182555565], [-116.224365234375, 33.091541548655215]]]}}]} valid_atts = {"type": "FeatureCollection", "features": [{"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-122.13208008, 41.25126946], [-121.37402344, 41.25126946], [-121.37402344, 41.75892074], [-122.13208008, 41.75892074], [-122.13208008, 41.25126946]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-121.15979004, 38.45555349], [-120.23144531, 38.45555349], [-120.23144531, 39.14764411], [-121.15979004, 39.14764411], [-121.15979004, 38.45555349]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-111.80493164, 33.76787602], [-110.83813477, 33.76787602], [-110.83813477, 34.53827854], [-111.80493164, 34.53827854], [-111.80493164, 33.76787602]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-106.125, 37.89280344], [-104.74072266, 37.89280344], [-104.74072266, 38.93638677], [-106.125, 38.93638677], [-106.125, 37.89280344]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-119.08886719, 46.44083284], [-117.63867188, 46.44083284], [-117.63867188, 47.44466731], [-119.08886719, 47.44466731], [-119.08886719, 46.44083284]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-99.84082031, 42.01129149], [-98.50048828, 42.01129149], [-98.50048828, 42.86452395], [-99.84082031, 42.86452395], [-99.84082031, 42.01129149]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-96.6328125, 33.20415594], [-95.53417969, 33.20415594], [-95.53417969, 33.97949814], [-96.6328125, 33.97949814], [-96.6328125, 33.20415594]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-93.44677734, 42.02780647], [-92.34814453, 42.02780647], [-92.34814453, 42.8808213], [-93.44677734, 42.8808213], [-93.44677734, 42.02780647]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-89.27197266, 45.02667127], [-87.84375, 45.02667127], [-87.84375, 45.73034238], [-89.27197266, 45.73034238], [-89.27197266, 45.02667127]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-68.79345703, 45.43427737], [-67.76074219, 45.43427737], [-67.76074219, 46.08670191], [-68.79345703, 46.08670191], [-68.79345703, 45.43427737]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-80.68066406, 34.30970665], [-79.69189453, 34.30970665], [-79.69189453, 35.03899187], [-80.68066406, 35.03899187], [-80.68066406, 34.30970665]]]}}, {"type": "Feature", "properties": {}, "geometry": {"type": "Polygon", "coordinates": [[[-116.86962891, 39.79478605], [-115.90283203, 39.79478605], [-115.90283203, 40.49098636], [-116.86962891, 40.49098636], [-116.86962891, 39.79478605]]]}}]} test_atts = {"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-93.8836669921875,44.47299117260252],[-92.581787109375,44.47299117260252],[-92.581787109375,45.463983441272724],[-93.8836669921875,45.463983441272724],[-93.8836669921875,44.47299117260252]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-86.72607421875,39.38526381099774],[-85.69335937499999,39.38526381099774],[-85.69335937499999,40.153686857794035],[-86.72607421875,40.153686857794035],[-86.72607421875,39.38526381099774]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-81.683349609375,26.066652138577403],[-80.37597656249999,26.066652138577403],[-80.37597656249999,27.576460076262716],[-81.683349609375,27.576460076262716],[-81.683349609375,26.066652138577403]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-115.09277343749999,47.16730970131578],[-113.57666015625,47.16730970131578],[-113.57666015625,48.50932644976633],[-115.09277343749999,48.50932644976633],[-115.09277343749999,47.16730970131578]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-120.59692382812499,36.43012234551576],[-119.28955078124999,36.43012234551576],[-119.28955078124999,37.125286284966805],[-120.59692382812499,37.125286284966805],[-120.59692382812499,36.43012234551576]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-123.85986328124999,42.21224516288584],[-121.81640624999999,42.21224516288584],[-121.81640624999999,43.929549935614595],[-123.85986328124999,43.929549935614595],[-123.85986328124999,42.21224516288584]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-81.298828125,37.87485339352928],[-79.9365234375,37.87485339352928],[-79.9365234375,38.85682013474361],[-81.298828125,38.85682013474361],[-81.298828125,37.87485339352928]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-89.1650390625,40.111688665595956],[-87.5390625,40.111688665595956],[-87.5390625,41.31082388091818],[-89.1650390625,41.31082388091818],[-89.1650390625,40.111688665595956]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-119.794921875,39.757879992021756],[-119.3115234375,39.757879992021756],[-119.3115234375,40.26276066437183],[-119.794921875,40.26276066437183],[-119.794921875,39.757879992021756]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-120.65185546875,41.590796851056005],[-120.0146484375,41.590796851056005],[-120.0146484375,42.22851735620852],[-120.65185546875,42.22851735620852],[-120.65185546875,41.590796851056005]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-119.25659179687499,43.1090040242731],[-118.531494140625,43.1090040242731],[-118.531494140625,43.476840397778936],[-119.25659179687499,43.476840397778936],[-119.25659179687499,43.1090040242731]]]}},{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-111.654052734375,36.89719446989036],[-110.90698242187499,36.89719446989036],[-110.90698242187499,37.22158045838649],[-111.654052734375,37.22158045838649],[-111.654052734375,36.89719446989036]]]}}]} SkyTrainer.create_geostore_from_geojson(attributes=[train_atts, valid_atts, test_atts], zoom=4)Number of training polygons: 46 Number of validation polygons: 12 Number of test polygons: 12Data pre-processingWe normalize the composite images to have values from 0 to 1.SkyTrainer.normalize_images(scale=30, norm_type='global')Select input/output bandsSkyTrainer.select_bands(input_bands = ['B1','B2','B3','B4','B5','B6','B7','B8A','B8','B11','B12','ndvi','ndwi'],\ output_bands = ['cropland', 'land', 'water', 'urban']) SkyTrainer.imagesCreate TFRecords for trainingSkyTrainer.export_TFRecords(sample_size = 1000, kernel_size = 256) SkyTrainer.versions SkyTrainer.versions['training_params'].iloc[3]Training the model in AI PlatformSkyTrainer.train_model_ai_platform(model_type='CNN', model_output='segmentation', model_architecture='segnet',\ model_name='land_use_4', model_description='This model segmentates the image into 4 different categories: land, cropland, urban, and water', batch_size=8, epochs=25) SkyTrainer.training_params SkyTrainer.models SkyTrainer.versionsDeployed the model to AI PlatformBefore it's possible to get predictions from the trained model, it needs to be deployed on AI Platform. The first step is to create the model. The second step is to create a version. See [this guide](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models) for details. Note that models and versions can be monitored from the [AI Platform models page](http://console.cloud.google.com/ai-platform/models) of the Cloud Console.SkyTrainer.deploy_model_ai_platform() SkyTrainer.versions*** **Prediction of Deep Learning models****Create `Predictor` object**SkyPredictor = deepsky.Predictor()**Get token**SkyPredictor.get_token(email='')Select modelSkyPredictor.models versions = SkyPredictor.select_model(model_name='land_use_4')Select versionSkyPredictor.select_version(version=versions[1])Creation of GeostoreWe select the areas in which we will perform the prediction.atts={'geojson': {'type': 'FeatureCollection', 'features': [{'type': 'Feature', 'properties': {}, 'geometry': {'type': 'Polygon', 'coordinates': [[[-105.32695770263672,39.97922477476731], [-105.14671325683594,39.97922477476731], [-105.14671325683594,40.06309590736528], [-105.32695770263672,40.06309590736528], [-105.32695770263672,39.97922477476731]]]}}]}} SkyPredictor.create_geostore_from_geojson(atts)Predicting in AI PlatformSkyPredictor.predict_ai_platform()____________________def display_change(a, b): a = a[0] b = b[0] print("Original consumption: {} BTU".format(round(a, 2))) print("Modified consumption: {} BTU".format(round(b, 2))) print("Absolute savings: {} BTU".format(round((a - b), 2))) print("Percentage savings: {} %".format(round((1 - (b / a)) * 100, 2))) house = X_test.iloc[3:4] house['total square footage'] original = model.predict(house) house['main water heater age'] house_modified = house.copy() house_modified.loc[:, 'level of insulation'].replace('Poorly insulated', 'Well insulated', inplace = True) display_change(original, model.predict(house_modified)) house_modified = house.copy() house_modified.loc[:, 'energy star qualified windows'].replace('No', 'Yes', inplace = True) display_change(original, model.predict(house_modified)) house_modified = house.copy() house_modified.loc[:, "main space heating equipment type"].replace( "Central furnace", "Heat pump", inplace=True ) display_change(original, model.predict(house_modified)) house_modified = house.copy() house_modified.loc[:, "age of main space heating equipment"].replace( "5 to 9 years old", "Less than 2 years old", inplace=True ) display_change(original, model.predict(house_modified)) house_modified = house.copy() house_modified.loc[:, 'smart thermostat'].replace('No', 'Yes', inplace = True) display_change(original, model.predict(house_modified)) top_features = pd.DataFrame( {"feature_name": model.booster_.feature_name(), "importance": model.booster_.feature_importance()} ) top_features.sort_values(by = 'importance', ascending = False, inplace = True) top_features.drop([250, 249, 25], inplace = True) for i in [20, 30, 40, 45, 50, 100, 300]: model = LGBMRegressor( boosting_type="gbdt", n_estimators=40000, reg_lambda=2, num_leaves=20, learning_rate=0.001, subsample=0.2, colsample_bytree=0.2, n_jobs=8, ) X_train_top = X_train.iloc[:, top_features.iloc[:i].index] X_test_top = X_test.iloc[:, top_features.iloc[:i].index] model.fit(X=X_train_top, y=y_train) y_pred = model.predict(X_test_top) r2 = r2_score(y_test, y_pred) print("n_featires: {0}, test: {1}".format(i, r2)) print(top_features.iloc[:40].feature_name)242 total_square_footage 241 total_heated_square_footage 253 dry_bulb_design_temperature_(f)_expected_to_be... 251 annual_average_ground_water_temperature_(f)_fo... 248 heating_degree_days,_30-year_average_1981-2010... 245 cooling_degree_days,_30-year_average_1981-2010... 243 total_uncooled_square_footage 179 respondent_age 240 total_cooled_square_footage 252 dry_bulb_design_temperature_(f)_expected_to_be... 61 frequency_of_microwave_use 150 n._of_inside_light_bulbs_turned_on_at_least_4_... 114 winter_temperature_when_no_one_is_home_during_... 115 winter_temperature_at_night 132 summer_temperature_when_no_one_is_home_during_... 85 frequency_of_clothes_dryer_use 52 frequency_of_use_of_cooktop_part_of_stove 184 [...]US Vacancy Rate National Econcometric Data CollectionGoal: Organize your data to streamline the next steps of yourcapstone#Import pandas, matplotlib.pyplot, and seaborn import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import numpy as np #change directory to get FRED data path= '/Users/josephfrasca/Coding_Stuff/Springboard/Capstone_2/data/raw/fredgraph' os.chdir(path) os.getcwd() os.listdir()Data Loading# load FRED data - current US national data df_yr = pd.read_csv('Annual.csv') df_m = pd.read_csv('Monthly.csv') df_q = pd.read_csv('Quarterly.csv')Data Definition - Before Merge○ Goal: Gain an understanding of your data features to inform thenext steps of your project.○ Time estimate: 1-2 hours■ Column names■ Data types■ Description of the columns■ Counts and percents unique values■ Ranges of values- Hint: here are some useful questions to ask yourself during thisprocess:- Do your column names correspond to what those columnsstore?- Check the data types of your columns. Are they sensible?- Calculate summary statistics for each of your columns, suchas mean, median, mode, standard deviation, range, andnumber of unique values. What does this tell you about yourdata? What do you now need to investigate?#see info/describe/unique value counts to get a summary of the data df_yr.info() df_yr.describe() df_yr.nunique() df_yr['MEHOINUSA672N'].value_counts(normalize=True)*100 df_m.info() df_m.describe() df_m['TLRESCONS'].value_counts(normalize=True)*100 df_q.info() df_q['RRVRUSQ156N'].value_counts(normalize=True)*100 df_q.describe() #Call the head/tail method to explore FRED data #SPPOPGROWUSA has data from 1961-01-01 to 2019-01-01 #MEHOINUSA672N has data from 1984-01-01 to 2018-01-01 df_yr ''' UNRATE data from 1948-01-01 to 2020-07-01 INTDSRUSM193N from 1950-01-01 to 2020-07-01 CUUR0000SEHA from 1914-12-01 to 2020-07-01 CSUSHPINSA from 1987-01-01 to 2020-06-01 HOUST from 1959-01-01 to 2020-07-01 WPUIP2311001 from 1986-06-01 to 2020-07-01 TLRESCONS from 2002-01-01 to 2020-06-01 ''' df_m.tail() #RRVRUSQ156N is from 1956-01-01 to 2020-04-01 df_q.tail()Data Cleaning○ Goal: Clean up the data in order to prepare it for the next steps ofyour project.○ Time estimate: 1-2 hours ■ NA or missing values■ Duplicates- Hint: don’t forget about the following awesome Python functions for datacleaning, which make life a whole lot easier:- loc[] - filter your data by label- iloc[] - filter your data by indexes- apply() - execute a function across an axis of a DataFrame- drop() - drop columns from a DataFrame- is_unique() - check if a column is a unique identifier- Series methods, such as str.contains(), which can be used to check ifa certain substring occurs in a string of a Series, and str.extract(),which can be used to extract capture groups with a certain regex (orregular expression ) pattern- numPy methods like .where(), to clean columns. Recall that suchmethods have the structure: np.where(condition, then, else)- DataFrame methods to check for null values, such asdf.isnull().values.any()#convert DATE columns to datetime object df_q['DATE']= pd.to_datetime(df_q['DATE']) df_q.dtypes df_m['DATE']= pd.to_datetime(df_m['DATE']) df_m.dtypes df_yr['DATE']= pd.to_datetime(df_yr['DATE']) df_yr.dtypesData Joining#merge yearly df with quarterly: df2 df2 = pd.merge_ordered(df_yr, df_q, fill_method="ffill") df2 #merge df2 with monthly df: df3 df3 = pd.merge_ordered(df2, df_m, fill_method="ffill") df3.iloc[853:] df3.dtypes #replace '.'s with NaN df3 = df3.replace(".", np.nan) #convert dtype objects to floats as needed df3 = df3.astype({"MEHOINUSA672N": float, "UNRATE": float, "INTDSRUSM193N": float, "CUUR0000SEHA": float, "CSUSHPINSA": float, "HOUST": float, "WPUIP2311001": float, "TLRESCONS": float}) df3.dtypes #explore combined datas in df3 - stored in data definition table df3.info() df3.describe().T df3.nunique() df3['RRVRUSQ156N'].value_counts(normalize=True)*100 #change column headers to make more user friendly df3.rename(columns = {'SPPOPGROWUSA':'uspop_growth', 'MEHOINUSA672N': 'med_hIncome', 'UNRATE': 'unemplt_rate', 'INTDSRUSM193N': 'int_rate', 'CUUR0000SEHA': 'cpi_rent', 'CSUSHPINSA': 'homePrice_index', 'HOUST': 'newHouse_starts', 'WPUIP2311001': 'ppi_resConstruct', 'RRVRUSQ156N': 'rentl_vacnyRate', 'TLRESCONS': 'resConstruct_spending'}, inplace = True) df3.T #deal with NAN #determine the outlier for NaNs nonnull_counts = df3.apply(lambda x: x.count(), axis=1) nonnull_counts = pd.DataFrame(nonnull_counts) nonnull_counts nonnull_counts.describe() #plot nonnull counts, seems like index 400-1100 is ballpark 25-75% nonnull_counts.plot() plt.xlabel('index number') plt.ylabel('non-null counts') plt.show() nonnull_counts.plot.hist() plt.xlabel('non-null counts') #to get within 1 standard deviation of non null data, need to use rows 421-1266 inclusive nonnull_counts.iloc[420:] #the years outside 1 standard deviation of NaN values are pre 1950 and post 6/2020 df3.iloc[421:1267].T #vacany rate data starts at year 1956, so we can confidently drop rows without rental vacancy rate and get the majority of the data #create another data frame that drops rows with no vacany rate data missing_vacancy = df3['rentl_vacnyRate'].isnull() df3_1956 = df3[missing_vacancy == False] df3_1956.T #check if there are any any NaN values left in Rental vacany rate. df3_1956['rentl_vacnyRate'].isnull().values.any() #calculate percent NaN for df3_1956 sum(df3_1956.isna().sum())/sum(df3_1956.count())*100 #calculate percent NaN per column df3_1956.isna().sum()/len(df3_1956)*100 #calculate percent not NaN per column df3_1956.count()/len(df3_1956)*100 df3_1956.describe().T #compare df3 to df3_1956 df3.describe().T #examine distribution of values, don't seem to be any extreme outliers df3_1956.hist(figsize=(15,10)) plt.subplots_adjust(hspace=0.5);Save Datadf3_1956.to_csv(r'/Users/josephfrasca/Coding_Stuff/Springboard/Capstone_2/data/interim/df3_1956', index=False)MetaProgramming In Python Classes in Python - What is a class in Python?class Test: pass a = Test() a type(a) type(Test) type(type)Classes - Nothing but instances of types. Class technically is a sugar over the native 'type' What is type in Python?type? TestWithType = type('TestWithType', (object,), {}) type(TestWithType) ins1 = TestWithType() type(ins1) type('TestWithType', (object,), {})()'type' is an important native structure used for creating classes. Life Cycle involved in a class - Vanillaclass TestClass: def __new__(cls, *args, **kwargs): print('new method called') instance = super(TestClass, cls).__new__(cls, *args, **kwargs) return instance def __call__(self, a, b, c): self.call_count += 1 print('call method called') return a * b * c def __init__(self): self.call_count = 0 super(TestClass, self).__init__() print('init method called') def get_call_count(self): return self.call_count a = TestClass() a(1,2,3) a.get_call_count()What is type? 'type' defines how a class behaves in Python. Got it. Well then - Can I change 'how' a class behaves in Python? - MetaClasses Metaclassesclass MySingletonMeta(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(MySingletonMeta, cls).__call__(*args) return cls._instances[cls] class MySingletonClass(metaclass=MySingletonMeta): def __init__(self): self.i = 1 a = MySingletonClass() b = MySingletonClass() type(a), id(a) , type(b), id(b)LifeCycle with Metaclassesclass MyMetaClass(type): _test_attribute = 1 def __new__(cls, *args, **kwargs): print("metaclass new method called") return super(MyMetaClass, cls).__new__(cls, *args, **kwargs) def __call__(cls, *args, **kwargs): print("metaclass call method called") return super(MyMetaClass, cls).__call__(*args, **kwargs) def __init__(self, *args, **kwargs): print("metaclass init method called") return super(MyMetaClass, self).__init__(*args, **kwargs) def test_method_1(self): print("MyMetaClass - Test method 1 called") class MyClass(metaclass=MyMetaClass): def __new__(cls, *args, **kwargs): print("instance new method called") return super(MyClass, cls).__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): print("instance init method called") return super(MyClass, self).__init__(*args, **kwargs) ins2 = MyClass() MyClass._test_attribute MyClass.__mro__ MyMetaClass.__mro__Pattern 1 : Abstract Classesfrom abc import ABCMeta, ABC, abstractmethod ABCMeta? class MyAbstractClass(metaclass=ABCMeta): def __init__(self): pass @abstractmethod def my_abstract_method(self): pass MyAbstractClass() class MyChildClass(MyAbstractClass): def __init__(self): pass def my_abstract_method(self): pass mcc = MyChildClass() mccPattern 2 : Abstract family of singleton classes - Combine two metaclassesclass MySingletonABCMeta(ABCMeta): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(MySingletonABCMeta, cls).__call__(*args) return cls._instances[cls] class MyAbstractSingletonClass(metaclass=MySingletonABCMeta): def __init__(self): pass @abstractmethod def my_abstract_method(self): pass MyAbstractSingletonClass() class MyAbstractSingletonChild(MyAbstractSingletonClass): def __init__(self): pass def my_abstract_method(self): pass a1 = MyAbstractSingletonChild() b1 = MyAbstractSingletonChild() type(a1), id(a1), type(b1), id(b1)Pattern 3 : Pooled Objectsclass MyBeanMeta(type): _instances = {} def __call__(cls, *args): print(args) key = tuple((cls, args)) if key not in cls._instances: cls._instances[key] = super(MyBeanMeta, cls).__call__(*args) return cls._instances[key] class MyBeanClass(metaclass=MyBeanMeta): def __init__(self, a ): self.a = a bn1 = MyBeanClass(1) bn2 = MyBeanClass(2) bn3 = MyBeanClass(3) bn4 = MyBeanClass(1) id(bn1), id(bn2), id(bn3), id(bn4)Pattern 4 : Logging using Metaclassesimport logging logging.basicConfig(filename='example.log', level=logging.INFO) logging.debug('This message should go to the log file') logging.info('So should this') logging.warning('And this, too') class MyLogSingletonMeta(type): logger = logging.getLogger('abc') _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: instance = super(MyLogSingletonMeta, cls).__call__(*args) cls._instances[cls] = instance instance.__dict__['logger'] = logging.getLogger('abc') return cls._instances[cls] class MyLogEnabledClass(metaclass=MyLogSingletonMeta): def test_function(self): self.logger.info('Inside test_function method of Log Enabled class') pass lec_instance1 = MyLogEnabledClass() lec_instance2 = MyLogEnabledClass() lec_instance1.test_function() print(id(lec_instance1), id(lec_instance2)) !cat example.log class MyLogger: def __init__(self, logger=None): self.logger = logger def __call__(self, func): def wrapper(*args, **kwargs): if self.logger is None: print(str(func) + " is called") else: self.logger.info(str(func) + " is called") return func(*args, **kwargs) return wrapper class MyLoggingMeta(type): def __new__(cls, name, bases, attrs): for item, value in attrs.items(): if callable(value): print("Function item :" + str(item), str(value), type(value)) attrs[item] = MyLogger()(value) else: print(str(item), str(value), type(value)) return super(MyLoggingMeta, cls).__new__(cls, name, bases, attrs) class MyClass1(metaclass=MyLoggingMeta): def test_m1(self): pass def test_m2(self): pass a= MyClass1() a.test_m2() a.test_m1() is calledPattern 5 : Sealed classesclass MySealedMeta(type): def __new__(cls, name, bases, attrs): all_metaclasses = [type(x) for x in bases] if MySealedMeta in all_metaclasses: raise TypeError("Sealed class cannot be sublcassed") return super(MySealedMeta, cls).__new__(cls, name, bases, attrs) class MySealedClass(metaclass=MySealedMeta): pass class MyChildOfSealed(MySealedClass): passLoad datacorpus_dir = '../../data/corpus/' model_ngrams_dir = '../../data/ngrams/' model_gpt2_dir = '../../data/gpt2/' test = pd.read_csv(f'{corpus_dir}test.csv')Load resultstestL = pd.read_csv(f'{model_ngrams_dir}testL.csv') testKNI = pd.read_csv(f'{model_ngrams_dir}testKNI.csv') testGPT2 = pd.read_csv(f'{model_gpt2_dir}testGPT2.csv') for i in range(10): testGPT2[f'{i}'] = testGPT2[f'{i}'].str.lower()Helper functiongen_wer_table = lambda df: pd.DataFrame({f'{col}': [wer(correct, changed) for correct, changed in zip(test.cor, df[f'{col}'])] for col in range(10)}) wercor = pd.DataFrame({'0': [wer(correct, changed) for correct, changed in zip(test.cor, test.err)]}) wercor.mean()Lidstone (add-a smoothing) trigramwerL = gen_wer_table(testL) werL.mean() werL.min(axis=1).mean() for i in range(10): print(i, sum(werL[f'{i}'] == werL.min(axis=1)))0 611 1 489 2 428 3 465 4 463 5 483 6 493 7 556 8 617 9 776Trigram with Kneser-Ney smoothing with InterpolationwerKNI = gen_wer_table(testKNI) werKNI.mean() werKNI.min(axis=1).mean() for i in range(10): print(i, sum(werKNI[f'{i}'] == werKNI.min(axis=1)))0 1553 1 527 2 395 3 358 4 350 5 319 6 339 7 350 8 357 9 519GPT-2werGPT2 = gen_wer_table(testGPT2) werGPT2.mean() werGPT2.min(axis=1).mean() for i in range(10): print(i, sum(werGPT2[f'{i}'] == werGPT2.min(axis=1)))0 1906 1 1082 2 732 3 591 4 519 5 496 6 463 7 431 8 415 9 410AllwerMIN = np.array([werL.min(axis=1), werKNI.min(axis=1), werGPT2.min(axis=1)]).min(axis=0) for i in range(10): print(i, sum(werL[f'{i}'] == werMIN)) for i in range(10): print(i, sum(werKNI[f'{i}'] == werMIN)) for i in range(10): print(i, sum(werGPT2[f'{i}'] == werMIN))0 1372 1 680 2 401 3 306 4 253 5 237 6 223 7 198 8 198 9 207Support for alternating automata The following automata are what we will use as examples.aut1, aut2, aut3, aut4, aut5 = spot.automata(''' HOA: v1 tool: "ltl3ba" "1.1.3" name: "VWAA for FGa && GFb" States: 6 Start: 0 acc-name: co-Buchi Acceptance: 1 Fin(0) AP: 2 "a" "b" properties: trans-labels explicit-labels state-acc univ-branch very-weak --BODY-- State: 0 "(FG(a) && GF(b))" [t] 3&1 State: 1 "GF(b)" [(1)] 1 [(!1)] 2&1 State: 2 "F(b)" {0} [(1)] 5 [(!1)] 2 State: 3 "FG(a)" {0} [(0)] 4 [t] 3 State: 4 "G(a)" [(0)] 4 State: 5 "t" [t] 5 --END-- /* Example from ADL's PSL2TGBA talk. */ HOA: v1 States: 3 Start: 0 acc-name: co-Buchi Acceptance: 1 Fin(0) AP: 3 "a" "b" "p" --BODY-- State: 0 "(a;a*;b)*" {0} [0] 1 [!0] 2 State: 1 "a*;b;(a;a*;b)*" {0} [0&1&2] 0&1 [!1&2] 1 [!0&!1] 2 [!0&1&2] 0 State: 2 [t] 2 --END-- HOA: v1 States: 5 Start: 3 acc-name: co-Buchi Acceptance: 1 Fin(0) AP: 3 "a" "b" "p" --BODY-- State: 0 "(a;a*;b)*" {0} [0] 1 [!0] 2 State: 1 "a*;b;(a;a*;b)*" {0} [0&1&2] 0&1 [!1&2] 1 [!0&!1] 2 [!0&1&2] 0 State: 2 [t] 2 State: 3 [0] 4&0 State: 4 [t] 3 --END-- HOA: v1 States: 3 Start: 0 acc-name: co-Buchi Acceptance: 1 Fin(0) AP: 3 "a" "b" "p" --BODY-- State: 0 "(a;a*;b)*" {0} [0] 1 [!0] 2 State: 1 "a*;b;(a;a*;b)*" {0} [0&1&2] 0&1 [!1&2] 1 [!0&!1] 2 [!0&1&2] 0 State: 2 [t] 2 --END-- HOA: v1 tool: "ltl3dra" "0.2.2" name: "VWAA for GFa" States: 3 Start: 0 acc-name: co-Buchi Acceptance: 1 Fin(0) AP: 1 "a" properties: trans-labels explicit-labels state-acc univ-branch very-weak --BODY-- State: 0 "GF(a)" [t] 1&0 State: 1 "F(a)" {0} [(0)] 2 [t] 1 State: 2 "t" [t] 2 --END-- ''')Various display options Here is the default output, using the `bav` options as set by default in the first cell.display_inline(aut1, aut2, aut3, aut4, aut5)If the state labels take too much space, you can reduce the size of the automaton by forcing states to be numbered with option `1`. The original label is still displayed as a tooltip when the mouse is over the state.Note that passing option `show=...` to `display_inline` is similar to calling `aut.show(...)` on each argument.display_inline(aut1, aut2, aut3, aut4, aut5, show='.bav1')When working with alternating automata, it is quite common to hide "true states", and display "exiting transitions instead". You can do that with option `u`.display_inline(aut1, aut2, aut3, aut4, aut5, show='.bav1u')Let's make sure that option `u` and `s` (to display SCCs) work well together:display_inline(aut1, aut2, aut3, aut4, aut5, show='.bav1us')Alternation removal The `remove_alternation()` function works on any alternating automaton that is weak (not necessarily very weak), i.e., in each SCC all transition should belong to the same accepting sets.The second argument of `remove_alternation()`, set to `True` below, simply asks for states to be labeled to help debugging. As the function builds Transition-based Generalized Büchi acceptance, it can be worthwhile to apply `scc_filter()` in an attempt to reduce the number of acceptance sets.The next cell shows this two-step process on our first example automaton.nba1t = spot.remove_alternation(aut1, True) nba1 = spot.scc_filter(nba1t, True) display_inline(aut1.show('.bav1u'), nba1t, nba1)Let's apply this process to the other 4 automata (which are not very-weak, unlike `aut1`). The states marked with `~` are part of a break-point construction.nba2, nba3, nba4, nba5 = [spot.scc_filter(spot.remove_alternation(a, True), True) for a in (aut2, aut3, aut4, aut5)] display_inline(nba2, nba3, nba4, nba5)The following demonstrate that very weak (non-alternating) Büchi automata can be complemented via alternation removal.pos = spot.automaton("""HOA: v1 name: "(a & (Fa R XFb)) | (!a & (G!a U XG!b))" States: 6 Start: 0 AP: 2 "a" "b" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc semi-deterministic --BODY-- State: 0 [0] 1 [!0] 2 [!0] 3 State: 1 [!1] 1 [1] 4 State: 2 {0} [!1] 2 State: 3 [!0] 3 [!0] 5 State: 4 {0} [t] 4 State: 5 {0} [!0&!1] 5 --END--""") altneg = spot.dualize(pos) neg = spot.remove_alternation(altneg) display_inline(pos, altneg.show('.bvu'), neg) # Issue #382. w = spot.parse_word('cycle{!a&b}').as_automaton() assert pos.intersects(w) != neg.intersects(w)2020년 5월 29일 금요일 BaekJoon - 2225번 : 합분해 문제 : https://www.acmicpc.net/problem/2225 블로그 : https://somjang.tistory.com/entry/BaeKJoon-2225%EB%B2%88-%ED%95%A9%EB%B6%84%ED%95%B4-Python 첫번째 시도inputNums = input() inputNums = inputNums.split() N = int(inputNums[0]) K = int(inputNums[1]) nc = [[0]*(N+1) for _ in range(K+1)] nc[0][0] = 1 # nc[0][0] = 1 for i in range(1, K+1): for j in range(0, N+1): nc[i][j] = nc[i-1][j] + nc[i][j-1] nc[i][j] = nc[i][j] % 1000000000 # print(nc) print(nc[K][N])2.1. Learning the basics of the Unix shellpwd ls ls -l cd images pwd ls cd .. pwd ls -la ~/.ipython mkdir md_files mv md_files markdown_files ls rmdir markdown_filesMulticlass logistic regression from scratchIf you've made it through our tutorials on linear regression from scratch, then you're past the hardest part. You already know how to load and manipulate data, build computation graphs on the fly, and take derivatives. You also know how to define a loss function, construct a model, and write your own optimizer. Nearly all neural networks that we'll build in the real world consist of these same fundamental parts. The main differences will be the type and scale of the data and the complexity of the models. And every year or two, a new hipster optimizer comes around, but at their core they're all subtle variations of stochastic gradient descent.In [the previous chapter](logistic-regressio-gluon.ipynb), we introduced logistic regression, a classic algorithm for performing binary classification.We implemented a model $$\hat{y} = \sigma( \boldsymbol{x} \boldsymbol{w}^T + b)$$where $\sigma$ is the sigmoid squashing function.This activation function on the final layer was crucial because it forced our outputs to take values in the range [0,1]. That allowed us to interpret these outputs as probabilties.We then updated our parameters to give the true labels (which take values either 1 or 0)the highest probability.In that tutorial, we looked at predicting whether or not an individual's income exceeded $50k based on features available in 1994 census data. Binary classification is quite useful. We can use it to predict spam vs. not spamor cancer vs not cancer. But not every problem fits the mold of binary classification. Sometimes we encounter a problem where each example could belong to one of $k$ classes.For example, a photograph might depict a cat or a dog or a zebra or ... (you get the point).Given $k$ classes, the most naive way to solve a *multiclass classification* problem is to train $k$ different binary classifiers $f_i(\boldsymbol{x})$. We could then predict that an example $\boldsymbol{x}$ belongs to the class $i$ for which the probability that the label applies is highest:$$\max_i {f_i(\boldsymbol{x})}$$There's a smarter way to go about this. We could force the output layer to be a discrete probability distribution over the $k$ classes.To be a valid probability distribution, we'll want the output $\hat{y}$ to (i) contain only non-negative values, and (ii) sum to 1. We accomplish this by using the *softmax* function.Given an input vector $z$, softmax does two things. First, it exponentiates (elementwise) $e^{z}$, forcing all values to be strictly positive.Then it normalizes so that all values sum to $1$.Following the softmax operation computes the following$$\text{softmax}(\boldsymbol{z}) = \frac{e^{\boldsymbol{z}} }{\sum_{i=1}^k e^{z_i}}$$Because now we have $k$ outputs and not $1$ we'll need weights connecting each of our inputs to each of our outputs. Graphically, the network looks something like this:![](https://github.com/zackchase/mxnet-the-straight-dope/blob/master/img/simple-softmax-net.png?raw=true)We can represent these weights one for each input node, output node pair in a matrix $W$.We generate the linear mapping from inputs to outputs via a matrix-vector product $\boldsymbol{x} W + \boldsymbol{b}$. Note that the bias term is now a vector, with one component for each output node.The whole model, including the activation function can be written:$$\hat{y} = \text{softmax}(\boldsymbol{x} W + \boldsymbol{b})$$This model is sometimes called *multiclass logistic regression*. Other common names for it include *softmax regression* and *multinomial regression*.For these concepts to sink in, let's actually implement softmax regression,and pick a slightly more interesting dataset this time. We're going to classify images of handwritten digits like these:![png](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/mnist.png) About batch trainingIn the above, we used plain lowercase letters for scalar variables, bolded lowercase letters for **row** vectors, and uppercase letters for matrices. Assume we have $d$ inputs and $k$ outputs. Let's note the shapes of the various variables explicitly as follows:$$\underset{1 \times k}{\boldsymbol z} = \underset{1 \times d}{\boldsymbol{x}}\ \underset{d \times k}{W} + \underset{1 \times k}{\boldsymbol{b}}$$Often we would one-hot encode the output label, for example $\hat y = 5$ would be $\boldsymbol {\hat y}_{one-hot} = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]$ when one-hot encoded for a 10-class classfication problem. So $\hat{y} = \text{softmax}(\boldsymbol z)$ becomes$$\underset{1 \times k}{\boldsymbol{\hat{y}}_{one-hot}} = \text{softmax}_{one-hot}(\underset{1 \times k}{\boldsymbol z})$$When we input a batch of $m$ training examples, we would have matrix $\underset{m \times d}{X}$ that is the vertical stacking of individual training examples $\boldsymbol x_i$, due to the choice of using row vectors.$$X=\begin{bmatrix} \boldsymbol x_1 \\ \boldsymbol x_2 \\ \vdots \\ \boldsymbol x_m\end{bmatrix}=\begin{bmatrix} x_{11} & x_{12} & x_{13} & \dots & x_{1d} \\ x_{21} & x_{22} & x_{23} & \dots & x_{2d} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ x_{m1} & x_{m2} & x_{m3} & \dots & x_{md}\end{bmatrix}$$Under this batch training situation, ${\boldsymbol{\hat{y}}_{one-hot}} = \text{softmax}({\boldsymbol z})$ turns into$$Y = \text{softmax}(Z) = \text{softmax}(XW + B)$$where matrix $\underset{m \times k}{B}$ is formed by having $m$ copies of $\boldsymbol b$ as follows$$ B = \begin{bmatrix} \boldsymbol b \\ \boldsymbol b \\ \vdots \\ \boldsymbol b\end{bmatrix}=\begin{bmatrix} b_{1} & b_{2} & b_{3} & \dots & b_{k} \\ b_{1} & b_{2} & b_{3} & \dots & b_{k} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ b_{1} & b_{2} & b_{3} & \dots & b_{k}\end{bmatrix}$$In actual implementation we can often get away with using $\boldsymbol b$ directly instead of $B$ in the equation for $Z$ above, due to [broadcasting](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html).Each row of matrix $\underset{m \times k}{Z}$ corresponds to one training example. The softmax function operates on each row of matrix $Z$ and returns a matrix $\underset{m \times k}Y$, each row of which corresponds to the one-hot encoded prediction of one training example. ImportsTo start, let's import the usual libraries.from __future__ import print_function import numpy as np import mxnet as mx from mxnet import nd, autograd, gluon mx.random.seed(1)Set ContextWe'll also want to set the compute context where our data will typically live and where we'll be doing our modeling. Feel free to go ahead and change `model_ctx` to `mx.gpu(0)` if you're running on an appropriately endowed machine.data_ctx = mx.cpu() model_ctx = mx.cpu() # model_ctx = mx.gpu()The MNIST datasetThis time we're going to work with real data, each a 28 by 28 centrally cropped black & white photograph of a handwritten digit. Our task will be come up with a model that can associate each image with the digit (0-9) that it depicts.To start, we'll use MXNet's utility for grabbing a copy of this dataset. The datasets accept a transform callback that can preprocess each item. Here we cast data and label to floats and normalize data to range [0, 1]:def transform(data, label): return data.astype(np.float32)/255, label.astype(np.float32) mnist_train = gluon.data.vision.MNIST(train=True, transform=transform) mnist_test = gluon.data.vision.MNIST(train=False, transform=transform)There are two parts of the dataset for training and testing. Each part has N items and each item is a tuple of an image and a label:image, label = mnist_train[0] print(image.shape, label)Note that each image has been formatted as a 3-tuple (height, width, channel). For color images, the channel would have 3 dimensions (red, green and blue). Record the data and label shapesGenerally, we don't want our model code to care too much about the exact shape of our input data. This way we could switch in a different dataset without changing the code that follows. Let's define variables to hold the number of inputs and outputs.num_inputs = 784 num_outputs = 10 num_examples = 60000Machine learning libraries generally expect to find images in (batch, channel, height, width) format. However, most libraries for visualization prefer (height, width, channel). Let's transpose our image into the expected shape. In this case, matplotlib expects either (height, width) or (height, width, channel) with RGB channels, so let's broadcast our single channel to 3.im = mx.nd.tile(image, (1,1,3)) print(im.shape)Now we can visualize our image and make sure that our data and labels line up.import matplotlib.pyplot as plt plt.imshow(im.asnumpy()) plt.show()Ok, that's a beautiful five. Load the data iteratorNow let's load these images into a data iterator so we don't have to do the heavy lifting.batch_size = 64 train_data = mx.gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)We're also going to want to load up an iterator with *test* data. After we train on the training dataset we're going to want to test our model on the test data. Otherwise, for all we know, our model could be doing something stupid (or treacherous?) like memorizing the training examples and regurgitating the labels on command.test_data = mx.gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)Allocate model parametersNow we're going to define our model. For this example, we're going to ignore the multimodal structure of our data and just flatten each image into a single 1D vector with 28x28 = 784 components. Because our task is multiclass classification, we want to assign a probability to each of the classes $P(Y = c \mid X)$ given the input $X$. In order to do this we're going to need one vector of 784 weights for each class, connecting each feature to the corresponding output. Because there are 10 classes, we can collect these weights together in a 784 by 10 matrix.We'll also want to allocate one offset for each of the outputs. We call these offsets the *bias term* and collect them in the 10-dimensional array ``b``.W = nd.random_normal(shape=(num_inputs, num_outputs),ctx=model_ctx) b = nd.random_normal(shape=num_outputs,ctx=model_ctx) params = [W, b]As before, we need to let MXNet know that we'll be expecting gradients corresponding to each of these parameters during training.for param in params: param.attach_grad()Multiclass logistic regressionIn the linear regression tutorial, we performed regression, so we had just one output $\hat{y}$ and tried to push this value as close as possible to the true target $y$. Here, instead of regression, we are performing *classification*, where we want to assign each input $X$ to one of $L$ classes. The basic modeling idea is that we're going to linearly map our input $X$ onto 10 different real valued outputs ``y_linear``. Then, before outputting these values, we'll want to normalize them so that they are non-negative and sum to 1. This normalization allows us to interpret the output $\hat{y}$ as a valid probability distribution.def softmax(y_linear): exp = nd.exp(y_linear-nd.max(y_linear, axis=1).reshape((-1,1))) norms = nd.sum(exp, axis=1).reshape((-1,1)) return exp / norms sample_y_linear = nd.random_normal(shape=(2,10)) sample_yhat = softmax(sample_y_linear) print(sample_yhat)Let's confirm that indeed all of our rows sum to 1.print(nd.sum(sample_yhat, axis=1))But for small rounding errors, the function works as expected. Define the modelNow we're ready to define our modeldef net(X): y_linear = nd.dot(X, W) + b yhat = softmax(y_linear) return yhatThe cross-entropy loss functionBefore we can start training, we're going to need to define a loss function that makes sense when our prediction is a probability distribution. The relevant loss function here is called cross-entropy and it may be the most common loss function you'll find in all of deep learning. That's because at the moment, classification problems tend to be far more abundant than regression problems. The basic idea is that we're going to take a target Y that has been formatted as a one-hot vector, meaning one value corresponding to the correct label is set to 1 and the others are set to 0, e.g. ``[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]``. The basic idea of cross-entropy loss is that we only care about how much probability the prediction assigned to the correct label. In other words, for true label 2, we only care about the component of yhat corresponding to 2. Cross-entropy attempts to maximize the log-likelihood given to the correct labels.def cross_entropy(yhat, y): return - nd.sum(y * nd.log(yhat+1e-6))OptimizerFor this example we'll be using the same stochastic gradient descent (SGD) optimizer as last time.def SGD(params, lr): for param in params: param[:] = param - lr * param.gradWrite evaluation loop to calculate accuracyWhile cross-entropy is nice, differentiable loss function, it's not the way humans usually evaluate performance on multiple choice tasks. More commonly we look at accuracy, the number of correct answers divided by the total number of questions. Let's write an evaluation loop that will take a data iterator and a network, returning the model's accuracy averaged over the entire dataset.def evaluate_accuracy(data_iterator, net): numerator = 0. denominator = 0. for i, (data, label) in enumerate(data_iterator): data = data.as_in_context(model_ctx).reshape((-1,784)) label = label.as_in_context(model_ctx) label_one_hot = nd.one_hot(label, 10) output = net(data) predictions = nd.argmax(output, axis=1) numerator += nd.sum(predictions == label) denominator += data.shape[0] return (numerator / denominator).asscalar()Because we initialized our model randomly, and because roughly one tenth of all examples belong to each of the ten classes, we should have an accuracy in the ball park of .10.evaluate_accuracy(test_data, net)Execute training loopepochs = 5 learning_rate = .005 for e in range(epochs): cumulative_loss = 0 for i, (data, label) in enumerate(train_data): data = data.as_in_context(model_ctx).reshape((-1,784)) label = label.as_in_context(model_ctx) label_one_hot = nd.one_hot(label, 10) with autograd.record(): output = net(data) loss = cross_entropy(output, label_one_hot) loss.backward() SGD(params, learning_rate) cumulative_loss += nd.sum(loss).asscalar() test_accuracy = evaluate_accuracy(test_data, net) train_accuracy = evaluate_accuracy(train_data, net) print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" % (e, cumulative_loss/num_examples, train_accuracy, test_accuracy))Using the model for predictionLet's make it more intuitive by picking 10 random data points from the test set and use the trained model for predictions.# Define the function to do prediction def model_predict(net,data): output = net(data) return nd.argmax(output, axis=1) # let's sample 10 random data points from the test set sample_data = mx.gluon.data.DataLoader(mnist_test, 10, shuffle=True) for i, (data, label) in enumerate(sample_data): data = data.as_in_context(model_ctx) print(data.shape) im = nd.transpose(data,(1,0,2,3)) im = nd.reshape(im,(28,10*28,1)) imtiles = nd.tile(im, (1,1,3)) plt.imshow(imtiles.asnumpy()) plt.show() pred=model_predict(net,data.reshape((-1,784))) print('model predictions are:', pred) breakcreation d'une matrice tf-idf sur des espèces collectés dans la base gbifA) tf-idf- load and clean gbif dataset- create a Tf matrix- create a tf-idf matrixB) find a documents similarity with a query search, - recherche du mode de calcul de distance le plus approprié Term Frequency (tf): gives us the frequency of the word in each document in the corpus. It is the ratio of number of times the word appears in a document compared to the total number of words in that document. It increases as the number of occurrences of that word within the document increases. Each document has its own tf- tf_i = nombre d'occurence du terme i / nombre total dans le document Inverse Data Frequency (idf): used to calculate the weight of rare words across all documents in the corpus. The words that occur rarely in the corpus have a high IDF score.- idf_i = log (Nombre de documents) / nombre de documents dans lequel le terme i apparait - Combining these two tf and idf, we come up with the TF-IDF score (w) for a word in a document in the corpustf-idf = tfi * idf Le TF-IDF (term frequency-inverse document frequency) - Méthode de pondration souvent utilisée en recherche d'information et en particulier dans la fouille de textes.Elle permet d'évaluer l'importance d'un terme contenu dans un document, relativement à une collection ou un corpus. Le poids augmente proportionnellement au nombre d'occurrences du mot dans le document. Il varie également en fonction de la fréquence du mot dans le corpus. Des variantes de la formule originale sont souvent utilisées dans des moteurs de recherche pour apprécier la pertinence d'un document en fonction des critères de recherche de l'utilisateur.import sys sys.path.append('../lib') from search_gbif import load_clean_and_generate_tf_idf, transform_query, read_clean_dataset, load_tfidf from scipy.spatial import distance fname = "../data/tfidf/data_gbif.json"A) tf-idf- load and clean gbif dataset- create a Tf matrix- create a tf-idf matrixload_clean_and_generate_tf_idf(fname)load dataset, and clean - We suppress key fields, and convert to minus First doc, before { 'canonicalName': '', 'higherClassificationMap': {}, 'key': 0, 'kingdom': 'incertae sedis', 'kingdomKey': 0, 'rank': 'KINGDOM', 'scientificName': '', 'status': 'DOUBTFUL', 'synonym': False} then {'terms': ['incertae sedis', 'incertae sedis', 'incertae sedis', 'kingdom', 'doubtful']} - Verification:B) find a documents similarity to a query- load tdif, gbif dataset# load models data_names, X, vectorizer_model = load_tfidf() print(" load gbif dataset, and clean") fname = "../data/tfidf/data_gbif.json" dataset = read_clean_dataset(fname) print('- Verification:') print('head:') display(dataset['terms'].head()) print('tail:') display(dataset['terms'].tail())c:\users\christian\appdata\local\programs\python\python36\lib\site-packages\sklearn\base.py:311: UserWarning: Trying to unpickle estimator TfidfTransformer from version 0.20.1 when using version 0.19.1. This might lead to breaking code or invalid results. Use at your own risk. UserWarning) c:\users\christian\appdata\local\programs\python\python36\lib\site-packages\sklearn\base.py:311: UserWarning: Trying to unpickle estimator TfidfVectorizer from version 0.20.1 when using version 0.19.1. This might lead to breaking code or invalid results. Use at your own risk. UserWarning)calculate different distance, with one queryif True: query = ['zygnematophyceae zygomycota'] print('Query:', query[0]) x0 = transform_query(vectorizer_model, query) kind = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', \ 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\ 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] for metric in kind: d = distance.cdist(x0, X.toarray(), metric) print('metric', metric) print(d)Query: zygnematophyceae zygomycota metric braycurtis [[1. 1. 1. ... 1. 1. 1.]] metric canberra [[6. 5. 5. ... 8. 8. 8.]] metric chebyshev [[0.71861851 0.77977446 0.86693916 ... 0.90983784 0.9142572 0.91997606]] metric cityblock [[3.15855387 2.91872833 2.84320793 ... 3.05431457 3.04143086 3.02398492]] metric correlation [[1.00636631 1.0054856 1.00520879 ... 1.00598318 1.00593587 1.0058718 ]] metric cosine [[1. 1. 1. ... 1. 1. 1.]] metric dice [[1. 1. 1. ... 1. 1. 1.]] metric euclidean [[1.41421356 1.41421356 1.41421356 ... 1.41421356 1.41421356 1.41421356]] metric hamming [[0.01538462 0.01282051 0.01282051 ... 0.02051282 0.02051282 0.02051282]] metric jaccard [[1. 1. 1. ... 1. 1. 1.]] metric kulsinski [[1. 1. 1. ... 1. 1. 1.]] metric mahalanobis [[ nan 60119386.88941303 19749607.17384794 ... nan nan nan]] metric matching [[0.01538462 0.01282051 0.01282051 ... 0.02051282 0.02051282 0.02051282]] metric minkowski [[1.41421356 1.414[...]- les distances "chebyshev" et "seuclidean" apparaissent les plus intéressantes Definition- The Chebyshev distance between two n-vectors u and v is the maximum norm-1 distance between their respective elements.- seuclidean = Computes the standardized Euclidean distance Distance chebyshev Versus distance standard euclidianif True: metric_lst = ['chebyshev','seuclidean'] test_lst = ['anthocerotophyta', 'archaea kingdom accepted', 'chromista ochrophyta thalassiosirales'] for i, test in enumerate(test_lst): x0 = transform_query(vectorizer_model, [test]) for metric in metric_lst: d = distance.cdist(x0, X.toarray(), metric)[0] print('query:',i+1, '"', test, '"', ', metric:', metric) index_lst = sorted(range(len(d)), key=lambda k: d[k]) dataset['d'] = d print(dataset['terms'][index_lst[0:10]]) print()query: 1 " anthocerotophyta " , metric: chebyshev 12 [plantae, anthocerotophyta, plantae, anthocero... 111 [plantae, anthocerotophyta, plantae, anthocero... 210 [plantae, anthocerotophyta, plantae, anthocero... 309 [plantae, anthocerotophyta, plantae, anthocero... 408 [plantae, anthocerotophyta, plantae, anthocero... 507 [plantae, anthocerotophyta, plantae, anthocero... 606 [plantae, anthocerotophyta, plantae, anthocero... 705 [plantae, anthocerotophyta, plantae, anthocero... 804 [plantae, anthocerotophyta, plantae, anthocero... 903 [plantae, anthocerotophyta, plantae, anthocero... Name: terms, dtype: object query: 1 " anthocerotophyta " , metric: seuclidean 12 [plantae, anthocerotophyta, plantae, anthocero... 111 [plantae, anthocerotophyta, plantae, anthocero... 210 [plantae, anthocerotophyta, plantae, anthocero... 309 [plantae, anthocerotophyta, plantae, anthocero... 408 [plantae, anthocerotophyta, plantae, anthocero... 507 [plantae,[...]Image Classification Using SVMimport numpy as np import cv2 from matplotlib import pyplot as plt from skimage import io import glob import re import pandas as pd from sklearn.utils import Bunch from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.svm import SVCLoad data and return a structured datasettrain_csv = pd.read_csv("./data/TrainAnnotations.csv") def load_images(path): files = glob.glob(path + "*.jpg") images = [] hsv_data = [] file_name = [] annotations =[] for file in files: name = re.sub("./data/TrainData/", "", file) image = cv2.imread(file) hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)[:,:,0] for rows in train_csv.iterrows(): if rows[1].file_name == name: annotation = rows[1].annotation annotations.append(annotation) file_name.append(name) images.append(image) hsv_data.append(hsv.flatten()) file_name = np.array(file_name) images = np.array(images) hsv_data = np.array(hsv_data) annotations = np.array(annotations) return Bunch(file_name = file_name, images = images, hsv_data = hsv_data, annotations = annotations) image_dataset = load_images("./data/TrainData/") image_dataset # imgs =['000006.jpg', '000016.jpg', '000032.jpg', '000097.jpg', '000104.jpg', '000122.jpg', '000237.jpg', '000253.jpg', # '000265.jpg', '000300.jpg'] # annotations = [] # for file in imgs: # for rows in train_csv.iterrows(): # # print(rows) # if rows[1].file_name == file: # annotation = rows[1].annotation # annotations.append(annotation) # imgs = np.array(imgs) # annotations = np.array(annotations) # x = Bunch(imgs = imgs, # annotations = annotations) # print(x){'imgs': array(['000006.jpg', '000016.jpg', '000032.jpg', '000097.jpg', '000104.jpg', '000122.jpg', '000237.jpg', '000253.jpg', '000265.jpg', '000300.jpg'], dtype='Split Data into Train and TestX_train, X_test, y_train, y_test = train_test_split( image_dataset.hsv_data, image_dataset.annotations, test_size=0.2,random_state=109) X_train.shapeTrain Dataclf = SVC(kernel='linear') clf.fit(X_train, y_train)Predictiony_pred = clf.predict(X_test)Get Accuracyprint(accuracy_score(y_test,y_pred)) print(classification_report(y_test, y_pred))precision recall f1-score support 0 0.82 0.93 0.87 90 1 0.75 0.60 0.67 35 2 0.77 0.80 0.78 25 3 0.96 0.77 0.86 31 4 1.00 0.96 0.98 24 micro avg 0.84 0.84 0.84 205 macro avg 0.86 0.81 0.83 205 weighted avg 0.84 0.84 0.84 205Perform cross-validation* Using the entire datasetscores = cross_val_score(clf, image_dataset.hsv_data, image_dataset.annotations, cv=5) scoresLoad test data# load all test data; convert them into HSV space and collect hue channel testfiles = glob.glob("./data/TestData/*.jpg") testfiles.sort() Test = [cv2.imread(f) for f in testfiles] Test = np.array([cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,0].flatten() for img in Test]) # making prediction on the test datab yhat_test = clf.predict(Test) # function to turn prediction into one hot coding format def vectorize_result(nclass, j): """ Return a nclass-dimensional unit vector with 1.0 in the j-th position and zero elsewhere """ e = np.zeros((nclass,1)) e[j] = 1.0 return e # convert test prediction into one hot-coding format encode = [vectorize_result(5, yhat_test[i]) for i in range(yhat_test.shape[0])] pred_df = pd.DataFrame(np.array(encode).reshape((yhat_test.shape[0], 5)).astype(np.uint8)) # output prediction pred_df.to_csv("prediction.csv", header=False, index=False)Scatter Plotwith PrintManager(): scenario = Scenario("823") _, data_avg = plot_scatter_capacity_vs_cost_curve_slope(scenario, "all", "coal") data_avg _, data_avg = plot_scatter_capacity_vs_cost_curve_slope(scenario, "Eastern", ["coal", "ng"]) data_avgExponential Moving Averageimport matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy import stats import seaborn as sns import scipy.stats as ss # Make plots larger plt.rcParams['figure.figsize'] = (15, 9) AAPL = pd.read_csv('AAPL_New.csv') AAPL.set_index('Date') AAPL['OpenTmr'] = AAPL['Open'].shift(-1) AAPL['OpenClose'] = (AAPL['Open']+ AAPL['Close'])/2 AAPL['HighLow'] = (AAPL['High']+ AAPL['Low'])/2 AAPL['OCHL'] = (AAPL['Open']+ AAPL['Close']+AAPL['High']+ AAPL['Low'])/4 # AAPL = AAPL.fillna(method='ffill') AAPL.tail(10) # AAPL = AAPL.dropna(0) N = AAPL.shape[0] # total num days num_boot = 300# total num bootstrap T= 250 # start day window = 200 # training period window df = AAPL[['Close']] df.reset_index(level=0, inplace=True) df.columns=['ds','y'] # exponential weighted functions X = df.y.ewm(span=5, adjust=False).mean() Y =AAPL['OpenTmr'][-(N-(T+1)):].values #X = np.column_stack([np.ones((T,1)),X]) print(X.shape) print(Y.shape) #movAverage1= (movAvg) def EMA(X, Y): T = X.shape[0] #print(T) #mu = (AAPL['Open'].mean(),AAPL['Open '].mean(),AAPL['Open '].mean()) #cov = [[1,0.75,-0.35],[0.75,1,0.9],[-0.35,0.9,1]] #F = np.random.multivariate_normal(mu,cov,T) #Sample for Y,X X = np.column_stack([np.ones((T,1)),X]) #T = X.shape[0] N = X.shape #beta = np.array([0.56,2.53,2.05,1.78]) #beta.shape=(N[1],1) #Y =X@beta+np.random.normal(0,1,(T,1)) #Y=AAPL['Open '].values #print(T) invXX = np.linalg.inv(X.transpose()@X) beta_hat = invXX@X.transpose()@Y y_hat = X@beta_hat residuals = Y-y_hat sigma2 = (1/T)*residuals.transpose()@residuals sigma = np.sqrt(sigma2) #variance - covariance of beta_hat varcov_beta_hat = (sigma2)*invXX std_beta_hat = np.sqrt(T*np.diag(varcov_beta_hat)) R_square = 1-(residuals.transpose()@residuals)/(T*np.var(Y)) adj_R_square = 1-(1-R_square)*(T-1)/(T - N[1]) #Testing Coefficents:beta_i #Null Hypotesis t_stat = (beta_hat.transpose()-0)/std_beta_hat p_val = 1-ss.norm.cdf(t_stat) #Test of joint significance F_stat= (beta_hat.transpose()@np.linalg.inv(varcov_beta_hat)@beta_hat/N[1])/(residuals.transpose()@residuals/(T-N[1])) p_val_F= 1 - ss.f.cdf(F_stat,N[1]-1, T-N[1]) return beta_hat,y_hat def ema_bootstrap(): T = 250 #print(T) N = X.shape[0] #print(N) yhat_ema = np.zeros(N-(T+1)) window = 200 num_boost = 300 # increase for t in range(T+1,N): X_train = df.y.ewm(span=5, adjust=False).mean()[t-window:t-1] #X_train = np.column_stack([np.ones((len(X_train),1)),X_train]) Y_train = AAPL[['OpenTmr']][t-window:t-1].values #print(X_train.shape) #print(Y_train.shape) X_pred = df.y.ewm(span=5, adjust=False).mean()[t-1:t] X_pred = np.column_stack([np.ones((len(X_pred),1)),X_pred]) yhat_train = EMA(X_train , Y_train)[1] res_train = Y_train - yhat_train y_pred_all = np.zeros(num_boost) for i in range (0,num_boost): #err = np.random.choice(res_train,(window-1, ),replace = True) err = res_train y_bstr = yhat_train + err beta_bstr = EMA(X_train,y_bstr)[0] #print(X_pred.shape) #print(beta_bstr.shape) y_pred_bstr = X_pred@beta_bstr y_pred_all[i] = y_pred_bstr y_pred_ema = y_pred_all.mean() yhat_ema[t-(T+1)] = y_pred_ema rmse_ema = np.sqrt(np.mean((Y[:-1] - yhat_ema[:-1])**2)) return yhat_ema,rmse_ema y, rmse = ema_bootstrap() rmse len(y) AAPL.Date.iloc[-200:] y[-200:] EMA_predict = pd.DataFrame() EMA_predict['Date'] = AAPL.Date.iloc[-200:] EMA_predict['Predict_EMA'] = y[-200:] EMA_predict EMA_predict.to_csv('EMA_prediction.csv')download the pre-processed data and trained modelimport subprocess import os download_savepath = '../../data/download' os.makedirs(download_savepath, exist_ok=True) if not os.path.exists('%s/pbmc_multiome_ad.h5ad'%download_savepath): subprocess.run('wget -P %s https://storage.googleapis.com/scbasset_tutorial_data/pbmc_multiome_ad.h5ad'%download_savepath, shell=True) if not os.path.exists('%s/pbmc_multiome_best_model.h5'%download_savepath): subprocess.run('wget -P %s https://storage.googleapis.com/scbasset_tutorial_data/pbmc_multiome_best_model.h5'%download_savepath, shell=True) if not os.path.exists('%s/pbmc_multiome_train_test_val.h5'%download_savepath): subprocess.run('wget -P %s https://storage.googleapis.com/scbasset_tutorial_data/pbmc_multiome_train_test_val.h5'%download_savepath, shell=True)load the dataad_file = '../../data/download/pbmc_multiome_ad.h5ad' trained_model = '../../data/download/pbmc_multiome_best_model.h5' h5_file = '../../data/download/pbmc_multiome_train_test_val.h5' os.makedirs("results", exist_ok=True) # read h5ad file ad = anndata.read_h5ad(ad_file) # load model model = make_model(32, ad.shape[0], show_summary=False) model.load_weights(trained_model)intercept vs. depthintercept = get_intercept(model) # get_intercept function sc.pp.filter_cells(ad, min_counts=0) f, ax = plt.subplots(figsize=(4,4)) r = scipy.stats.pearsonr(intercept, np.log10(ad.obs['n_genes']))[0] sns.scatterplot(intercept, np.log10(ad.obs['n_genes']), ax=ax) ax.set_xlabel('intercept') ax.set_ylabel('log10(n_peaks)') ax.set_title('Pearson R: %.3f'%r) f.savefig('results/intercept.pdf')/home/yuanh/programs/anaconda3/envs/scbasset/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarningcell embeddingsproj = get_cell_embedding(model) # get_cell_embedding function pd.DataFrame(proj).to_csv('results/projection_atac.csv') ad.obsm['projection'] = pd.read_csv('results/projection_atac.csv', index_col=0).values sc.pp.neighbors(ad, use_rep='projection') sc.tl.umap(ad) sc.tl.leiden(ad) sc.pl.umap(ad, color='leiden')/home/yuanh/programs/anaconda3/envs/scbasset/lib/python3.7/site-packages/numba/np/ufunc/parallel.py:355: NumbaWarning: The TBB threading layer requires TBB version 2019.5 or later i.e., TBB_INTERFACE_VERSION >= 11005. Found TBB_INTERFACE_VERSION = 9107. The TBB threading layer is disabled. warnings.warn(problem)imputationf = h5py.File(h5_file, 'r') X = f['X'][:].astype('float32') Y = f['Y'][:].astype('float32') Y_norm = imputation_Y_normalize(X, model)What is machine learning, and how does it work? ![Machine learning](images/01_robot.png) Agenda- What is machine learning?- What are the two main categories of machine learning?- What are some examples of machine learning?- How does machine learning "work"? What is machine learning?One definition: "Machine learning is the semi-automated extraction of knowledge from data"- **Knowledge from data**: Starts with a question that might be answerable using data- **Automated extraction**: A computer provides the insight- **Semi-automated**: Requires many smart decisions by a human What are the two main categories of machine learning?**Supervised learning**: Making predictions using data - Example: Is a given email "spam" or "ham"?- There is an outcome we are trying to predict ![Spam filter](images/01_spam_filter.png) **Unsupervised learning**: Extracting structure from data- Example: Segment grocery store shoppers into clusters that exhibit similar behaviors- There is no "right answer" ![Clustering](images/01_clustering.png) How does machine learning "work"?High-level steps of supervised learning:1. First, train a **machine learning model** using **labeled data** - "Labeled data" has been labeled with the outcome - "Machine learning model" learns the relationship between the attributes of the data and its outcome2. Then, make **predictions** on **new data** for which the label is unknown ![Supervised learning](images/01_supervised_learning.png) The primary goal of supervised learning is to build a model that "generalizes": It accurately predicts the **future** rather than the **past**! Questions about machine learning- How do I choose **which attributes** of my data to include in the model?- How do I choose **which model** to use?- How do I **optimize** this model for best performance?- How do I ensure that I'm building a model that will **generalize** to unseen data?- Can I **estimate** how well my model is likely to perform on unseen data?from IPython.core.display import HTML def css_styling(): styles = open("styles/custom.css", "r").read() return HTML(styles) css_styling()Introduction to Python Accessing Relational Databasesimport os import sys import time import datetime import numpy as np import pandas as pd import sqlite3Acessing [SQLite](https://docs.python.org/3/library/sqlite3.html)conn = sqlite3.connect(os.path.join("..","Data",'sqlite_example.db')) cur = conn.cursor() cur.execute("SELECT name FROM sqlite_master WHERE type='table';") print(cur.fetchall()) # Create table cur.execute('''CREATE TABLE IF NOT EXISTS stocks (date text, trans text, symbol text, qty real, price real)''') # Insert a row of data cur.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)") # Save (commit) the changes conn.commit() # We can also close the connection if we are done with it. # Just be sure any changes have been committed or they will be lost. conn.close() conn = sqlite3.connect(os.path.join("..","Data",'sqlite_example.db')) cur = conn.cursor() cur.execute("SELECT name FROM sqlite_master WHERE type='table';") print(cur.fetchall()) t = ('RHAT',) #tuple with just one element cur.execute('SELECT * FROM stocks WHERE symbol=?', t) print(cur.fetchone()) # Larger example that inserts many records at a time purchases = [('2006-03-28', 'BUY', 'IBM', 1000, 45.00), ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00), ('2006-04-06', 'SELL', 'IBM', 500, 53.00), ] cur.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases) for row in cur.execute('SELECT * FROM stocks ORDER BY price'): print(row)('2006-01-05', 'BUY', 'RHAT', 100.0, 35.14) ('2006-03-28', 'BUY', 'IBM', 1000.0, 45.0) ('2006-04-06', 'SELL', 'IBM', 500.0, 53.0) ('2006-04-05', 'BUY', 'MSFT', 1000.0, 72.0)Using Pandasconn = sqlite3.connect(os.path.join("..","Data",'sqlite_example.db')) df = pd.read_sql_query("SELECT * from stocks ORDER BY price", conn) # verify that result of SQL query is stored in the dataframe print(df.head()) conn.close()date trans symbol qty price 0 2006-01-05 BUY RHAT 100.0 35.14Deleting the database file:os.remove(os.path.join("..","Data",'sqlite_example.db'))Accessing existing [Database](https://www.sqlitetutorial.net/sqlite-sample-database/):![Chinook Schema](../Data/sqlite-sample-database-color.jpg)conn = sqlite3.connect(os.path.join("..","Data",'chinook.sqlite')) cur = conn.cursor() query = ''' SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%'; ''' cur.execute(query) for c in cur.fetchall(): print(c) query = ''' SELECT sql FROM sqlite_master WHERE name = 'albums'; ''' cur.execute(query) for c in cur.fetchall(): print(c[0]) query = ''' SELECT albumid, title FROM albums ORDER BY title ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT trackid, name, composer, unitprice FROM tracks LIMIT 10 OFFSET 10; ''' cur.execute(query) for c in cur.fetchall(): print(c) query = ''' SELECT name, milliseconds, albumid FROM tracks ORDER BY milliseconds DESC, albumid ASC; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT DISTINCT city, country FROM customers ORDER BY country; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT name, milliseconds, bytes, albumid FROM tracks WHERE albumid = 1 AND milliseconds > 250000; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT name, albumid, composer FROM tracks WHERE composer LIKE '%Smith%' ORDER BY albumid; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT name, albumid, mediatypeid FROM tracks WHERE mediatypeid IN (2, 3); ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT InvoiceId, BillingAddress, Total FROM invoices WHERE Total BETWEEN 14.91 and 18.86 ORDER BY Total; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT InvoiceId, BillingAddress, Total FROM invoices WHERE Total NOT BETWEEN 1 and 20 ORDER BY Total; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT Title, Name FROM albums INNER JOIN artists ON artists.ArtistId = albums.ArtistId; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT Name, Title FROM artists LEFT JOIN albums ON artists.ArtistId = albums.ArtistId ORDER BY Name; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) query = ''' SELECT tracks.trackid, tracks.name AS track, albums.title AS album, artists.name AS artist FROM tracks INNER JOIN albums ON albums.albumid = tracks.albumid INNER JOIN artists ON artists.artistid = albums.artistid WHERE artists.artistid = 10; ''' cur.execute(query) for c in cur.fetchmany(10): print(c) #https://www.sqlitetutorial.net/Customer Id is not importantx_train, x_test = train_test_split(data, test_size=0.1419, random_state=42) x_train.shape, x_test.shapeTotal Charges has some empty valuesidx = x_train[x_train.TotalCharges == ' '].index x_train = x_train.drop(idx) idx = x_test[x_test.TotalCharges == ' '].index x_test = x_test.drop(idx) x_train.TotalCharges = pd.to_numeric(x_train.TotalCharges) x_test.TotalCharges = pd.to_numeric(x_test.TotalCharges) x_train = x_train.reset_index(drop= True) x_test = x_test.reset_index(drop= True) x_test internet = pd.get_dummies(x_train.InternetService, prefix= 'Net', prefix_sep='_') payment = pd.get_dummies(x_train.PaymentMethod) contract = pd.get_dummies(x_train.Contract) x_train = x_train.drop(['InternetService','PaymentMethod', 'Contract'], axis=1) x_train = pd.concat([x_train, internet, payment, contract], axis=1) internet = pd.get_dummies(x_test.InternetService, prefix= 'Net', prefix_sep='_') payment = pd.get_dummies(x_test.PaymentMethod) contract = pd.get_dummies(x_test.Contract) x_test = x_test.drop(['InternetService','PaymentMethod', 'Contract'], axis=1) x_test = pd.concat([x_test, internet, payment, contract], axis=1) x_test.shape x_train.shape a = pd.get_dummies(x_train.DeviceProtection, prefix= 'DeviceProtection', prefix_sep='_') b = pd.get_dummies(x_train.OnlineBackup, prefix= 'OnlineBackup', prefix_sep='_') c = pd.get_dummies(x_train.OnlineSecurity, prefix= 'OnlineSecurity', prefix_sep='_') d =pd.get_dummies(x_train.TechSupport, prefix= 'TechSupport', prefix_sep='_') x_train = x_train.drop(['DeviceProtection', 'OnlineBackup', 'OnlineSecurity', 'TechSupport'], axis=1) x_train = pd.concat([x_train, a, b ,c, d], axis=1) a = pd.get_dummies(x_test.DeviceProtection, prefix= 'DeviceProtection', prefix_sep='_') b = pd.get_dummies(x_test.OnlineBackup, prefix= 'OnlineBackup', prefix_sep='_') c = pd.get_dummies(x_test.OnlineSecurity, prefix= 'OnlineSecurity', prefix_sep='_') d =pd.get_dummies(x_test.TechSupport, prefix= 'TechSupport', prefix_sep='_') x_test = x_test.drop(['DeviceProtection', 'OnlineBackup', 'OnlineSecurity', 'TechSupport'], axis=1) x_test = pd.concat([x_test, a, b ,c, d], axis=1)Encoding the categorical Variablesencoder = LabelEncoder() for col in x_train: if x_train[col].dtype == object: x_train[col] = encoder.fit_transform(x_train[col]) for col in x_test: if x_test[col].dtype == object: encoder.fit(x_test[col]) x_test[col] = encoder.transform(x_test[col]) x_testCategorizing the numerical valueplt.hist(x_train.tenure, bins= 4, histtype = 'bar') plt.show() tensure_cat= pd.cut(x_train['tenure'], bins=4, labels=["ten1-18", "ten19-36", "ten37-54", "ten55-72"]) tensure_cat.value_counts() tensure_cat = pd.get_dummies(tensure_cat) x_train = pd.concat([x_train, tensure_cat], axis =1) tensure_cat= pd.cut(x_test['tenure'], bins=4, labels=["ten1-18", "ten19-36", "ten37-54", "ten55-72"]) tensure_cat.value_counts() tensure_cat = pd.get_dummies(tensure_cat) x_test = pd.concat([x_test, tensure_cat], axis =1) x_train = x_train.drop('tenure', axis= 1) x_test = x_test.drop('tenure', axis= 1) x_train.to_csv('feature_x_train.csv', index= False) x_test.to_csv('feature_x_test.csv', index= False) x_train.shape x_test.shape第一次作业——数据画图 I. No Preprocessing & Gradient Ascent 1) Load Datasetpre_or_not = 0 ascent_way = 0 filename='./data.txt' #文件目录 dataMat = [] labelMat = [] fr = open(filename) for line in fr.readlines(): # lineArr = line.strip().split() lineArr = line.split(",") # print(lineArr[0], lineArr[1], lineArr[2]) lineArr_0 = float(lineArr[0]) if pre_or_not == 0 else float(lineArr[0]) / 10 lineArr_1 = float(lineArr[1]) if pre_or_not == 0 else float(lineArr[1]) / 10 dataMat.append([1.0, lineArr_0, lineArr_1]) #前面的1,表示方程的常量。比如两个特征X1,X2,共需要三个参数,W1+W2*X1+W3*X2 labelMat.append(int(lineArr[2]))2) Logistic Regression Figureweights = np.load('./data/no_pre_Grad_weights.npy') font1 = {'family' : 'Times New Roman', 'weight' : 'normal', 'size' : 15, } dataArr = np.array(dataMat) n = np.shape(dataArr)[0] xcord1 = []; ycord1 = [] xcord2 = []; ycord2 = [] for i in range(n): if np.int(labelMat[i])== 1: xcord1.append(dataArr[i,1]) ycord1.append(dataArr[i,2]) else: xcord2.append(dataArr[i,1]) ycord2.append(dataArr[i,2]) fig = plt.figure(dpi=600) ax = fig.add_subplot(111) label1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='v') label2 = ax.scatter(xcord2, ycord2, s=50, c='green', marker='*') # x = arange(-3.0, 3.0, 0.1) x_min = np.min(dataArr[:,1]) x_max = np.max(dataArr[:,1]) # weights = weights.getA() x = np.arange(x_min, x_max, 1) y = (-weights[0] - weights[1] * x) / weights[2] print("Drawing") lr_line = ax.plot(x, y, '-k', linewidth=4) ax.legend((label1, label2), ('Passing', 'Failing'), prop=font1) plt.xlabel('X1', fontdict=font1) plt.ylabel('X2', fontdict=font1) plt.savefig('./figure/no_pre_Grad_lrline.pdf') plt.show()Drawing3) Weights save txtweight_record = np.load('./data/no_pre_Grad_weight_record.npy') cycles, w_num, one = np.shape(weight_record) weight_0 = np.zeros(cycles) weight_1 = np.zeros(cycles) weight_2 = np.zeros(cycles) for i in range(cycles): weight_0[i] = weight_record[i][0][0] weight_1[i] = weight_record[i][1][0] weight_2[i] = weight_record[i][2][0] weight_0.reshape((cycles,)) weight_1.reshape((cycles,)) weight_2.reshape((cycles,)) np.savetxt('./data/no_pre_Grad_weight_0.txt', weight_0) np.savetxt('./data/no_pre_Grad_weight_1.txt', weight_0) np.savetxt('./data/no_pre_Grad_weight_2.txt', weight_0)4) Loss save txtloss_record = np.load('./data/no_pre_Grad_loss_record.npy') np.savetxt('./data/no_pre_Grad_loss.txt', loss_record)5) Error Rate save txterrRate_record = np.load('./data/no_pre_Grad_errRate_record.npy') np.savetxt('./data/no_pre_Grad_errRate.txt', errRate_record)Save all the npy as txt I. Save the weightsweight_record = np.load('./data/no_pre_stocGrad_weight_record.npy') cycles, w_num, one = np.shape(weight_record) weight_0 = np.zeros(cycles) weight_1 = np.zeros(cycles) weight_2 = np.zeros(cycles) for i in range(cycles): weight_0[i] = weight_record[i][0][0] weight_1[i] = weight_record[i][1][0] weight_2[i] = weight_record[i][2][0] weight_0.reshape((cycles,)) weight_1.reshape((cycles,)) weight_2.reshape((cycles,)) np.savetxt('./data/no_pre_stocGrad_weight_0.txt', weight_0) np.savetxt('./data/no_pre_stocGrad_weight_1.txt', weight_0) np.savetxt('./data/no_pre_stocGrad_weight_2.txt', weight_0) weight_record = np.load('./data/pre_Grad_weight_record.npy') cycles, w_num, one = np.shape(weight_record) weight_0 = np.zeros(cycles) weight_1 = np.zeros(cycles) weight_2 = np.zeros(cycles) for i in range(cycles): weight_0[i] = weight_record[i][0][0] weight_1[i] = weight_record[i][1][0] weight_2[i] = weight_record[i][2][0] weight_0.reshape((cycles,)) weight_1.reshape((cycles,)) weight_2.reshape((cycles,)) np.savetxt('./data/pre_Grad_weight_0.txt', weight_0) np.savetxt('./data/pre_Grad_weight_1.txt', weight_0) np.savetxt('./data/pre_Grad_weight_2.txt', weight_0) weight_record = np.load('./data/pre_stocGrad_weight_record.npy') cycles, w_num, one = np.shape(weight_record) weight_0 = np.zeros(cycles) weight_1 = np.zeros(cycles) weight_2 = np.zeros(cycles) for i in range(cycles): weight_0[i] = weight_record[i][0][0] weight_1[i] = weight_record[i][1][0] weight_2[i] = weight_record[i][2][0] weight_0.reshape((cycles,)) weight_1.reshape((cycles,)) weight_2.reshape((cycles,)) np.savetxt('./data/pre_stocGrad_weight_0.txt', weight_0) np.savetxt('./data/pre_stocGrad_weight_1.txt', weight_0) np.savetxt('./data/pre_stocGrad_weight_2.txt', weight_0)II. Save the lossloss_record = np.load('./data/no_pre_stocGrad_loss_record.npy') np.savetxt('./data/no_pre_stocGrad_loss.txt', loss_record) loss_record = np.load('./data/pre_Grad_loss_record.npy') np.savetxt('./data/pre_Grad_loss.txt', loss_record) loss_record = np.load('./data/pre_stocGrad_loss_record.npy') np.savetxt('./data/pre_stocGrad_loss.txt', loss_record)III. Save the errRateerrRate_record = np.load('./data/no_pre_stocGrad_errRate_record.npy') np.savetxt('./data/no_pre_stocGrad_errRate.txt', errRate_record) errRate_record = np.load('./data/pre_Grad_errRate_record.npy') np.savetxt('./data/pre_Grad_errRate.txt', errRate_record) errRate_record = np.load('./data/pre_stocGrad_errRate_record.npy') np.savetxt('./data/pre_stocGrad_errRate.txt', errRate_record)IV. Save the weightsweights = np.load('./data/no_pre_Grad_weights.npy') np.savetxt('./data/no_pre_Grad_weights.txt', weights) weights = np.load('./data/no_pre_stocGrad_weights.npy') np.savetxt('./data/no_pre_stocGrad_weights.txt', weights) weights = np.load('./data/pre_Grad_weights.npy') np.savetxt('./data/pre_Grad_weights.txt', weights) weights = np.load('./data/pre_stocGrad_weights.npy') np.savetxt('./data/pre_stocGrad_weights.txt', weights)Contextualizing iCdR703 Annotation conversionfrom riptide import * iCdR703 = cobra.io.load_json_model('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/reconstructions/iCdR703.json') iter1 = ['rxn02269_c', 'rxn09142_c', 'rxn09145_c', 'rxn00620_c', 'rxn01105_c', 'rxn05293_c', 'rxn02286_c', 'rxn09147_c', 'rxn01953_c', 'rxn02011_c', 'rxn07466_c', 'rxn09429_c', 'rxn03852_c', 'rxn13140_c', 'rxn09065_c', 'rxn01520_c', 'rxn13914_c', 'rxn09063_c', 'rxn09348_c', 'rxn05744_c', 'rxn08040_c'] iter2 = ['rxn02269_c', 'rxn13139_c', 'rxn00620_c', 'rxn05293_c', 'rxn09341_c', 'rxn02286_c', 'rxn01953_c', 'rxn02011_c', 'rxn07466_c', 'rxn09429_c', 'rxn09126_c', 'rxn09124_c', 'rxn13140_c', 'rxn09065_c', 'rxn01520_c', 'rxn00553_c', 'rxn03852_c', 'rxn13914_c', 'rxn09063_c', 'rxn05744_c', 'rxn08040_c', 'rxn09163_c'] iter3 = ['rxn02269_c', 'rxn09145_c', 'rxn13139_c', 'rxn00620_c', 'rxn05293_c', 'rxn02286_c', 'rxn09147_c', 'rxn01953_c', 'rxn00605_c', 'rxn02011_c', 'rxn00579_c', 'rxn07466_c', 'rxn09429_c', 'rxn03852_c', 'rxn13140_c', 'rxn09065_c', 'rxn03164_c', 'rxn01520_c', 'rxn09063_c', 'rxn09348_c', 'rxn05744_c', 'rxn08040_c', 'rxn09163_c'] iter4 = ['rxn02269_c', 'rxn13139_c', 'rxn00620_c', 'rxn05293_c', 'rxn02286_c', 'rxn01953_c', 'rxn00605_c', 'rxn02011_c', 'rxn00579_c', 'rxn07466_c', 'rxn09429_c', 'rxn09126_c', 'rxn09124_c', 'rxn13140_c', 'rxn09065_c', 'rxn01520_c', 'rxn03852_c', 'rxn13914_c', 'rxn09063_c', 'rxn09348_c', 'rxn05744_c', 'rxn08040_c', 'rxn09163_c'] new_rxn_ids = set(iter1 + iter2 + iter3 + iter4) iter1_mdm = ['rxn01014_c', 'rxn10175_c', 'rxn02476_c', 'rxn00777_c', 'rxn01359_c', 'rxn12676_c', 'rxn02928_c', 'rxn01538_c', 'rxn01182_c', 'rxn12239_c', 'rxn03167_c', 'rxn05039_c', 'rxn10484_c', 'rxn05291_c', 'rxn10030_c', 'rxn01256_c', 'rxn01644_c', 'rxn08131_c', 'rxn00530_c', 'rxn00010_c', 'rxn10053_c', 'rxn01669_c'] iter2_mdm = ['rxn01014_c', 'rxn10175_c', 'rxn02476_c', 'rxn00777_c', 'rxn00970_c', 'rxn13251_c', 'rxn02928_c', 'rxn01538_c', 'rxn12239_c', 'rxn03167_c', 'rxn05039_c', 'rxn10484_c', 'rxn05291_c', 'rxn01256_c', 'rxn01644_c', 'rxn08131_c', 'rxn00530_c', 'rxn00958_c', 'rxn00010_c', 'rxn10053_c', 'rxn01669_c','rxn01539_c'] iter3_mdm = ['rxn01014_c', 'rxn10175_c', 'rxn02476_c', 'rxn00777_c', 'rxn12676_c', 'rxn13251_c', 'rxn02928_c', 'rxn01538_c', 'rxn01182_c', 'rxn12239_c', 'rxn03167_c', 'rxn05039_c', 'rxn10484_c', 'rxn10030_c', 'rxn01256_c', 'rxn01644_c', 'rxn08131_c', 'rxn00530_c', 'rxn00010_c', 'rxn10053_c', 'rxn01669_c'] mdm_rxn_ids = set(iter1_mdm + iter2_mdm + iter3_mdm) iter1_ncmm = ['rxn00285_c', 'rxn05227_c', 'rxn10036_c', 'rxn10840_c', 'rxn00341_c', 'rxn13012_c', 'rxn09119_c', 'rxn10663_c'] iter2_ncmm = ['rxn00285_c', 'rxn05227_c', 'rxn10036_c', 'rxn10840_c', 'rxn00341_c', 'rxn13012_c', 'rxn09119_c', 'rxn10663_c'] ncmm_rxn_ids= set(iter1_ncmm + iter2_ncmm) final_gapfilled = new_rxn_ids.union(mdm_rxn_ids) final_gapfilled = final_gapfilled.union(ncmm_rxn_ids) for x in final_gapfilled: print(x + '\t' + iCdR703.reactions.get_by_id(x).name) iCdR703 # Read in gene list from iCdR703 genre_genes = set() for gene in iCdR703.genes: genre_genes |= set([gene.id]) # Parse PATRIC lookup table refseq_dict = {} gene_dict = {} with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/CdR20291_genes.tsv', 'r') as gene_catalog: header = gene_catalog.readline() for line in gene_catalog: line = line.split() if not line[0] in genre_genes: continue else: refseq_dict[line[1]] = line[0] gene_dict[line[2]] = line[0] mapping_genes = ['rsbW','sigB','16S rRNA_1','23S rRNA_1','5S rRNA_1','CDR20291_0001','CDR20291_0002','serS1','CDR20291_0004','dnaH','CDR20291_0006','recD','CDR20291_0008','16S rRNA_2','23S rRNA_2','5S rRNA_2','CDR20291_0009','pyc','CDR20291_0011','ctsR','CDR20291_0013','CDR20291_0014','clpC','radA','CDR20291_0017','CDR20291_0018','CDR20291_0019','CDR20291_0020','CDR20291_0021','CDR20291_0022','CDR20291_0023','CDR20291_0024','acoA','acoB','acoC','acoL','CDR20291_0029','CDR20291_0030','CDR20291_0031','CDR20291_0032','CDR20291_0033','CDR20291_0034','CDR20291_0035','ispD_1','ispF','proS','CDR20291_0039','gltX','cysS','CDR20291_0042','CDR20291_0043','CDR20291_0044','CDR20291_0045','CDR20291_0046','CDR20291_0047','CDR20291_0048','CDR20291_0049','sigH','CDR20291_0051','rpmG','secE','nusG','rplK','rplA','rplJ','rplL','CDR20291_0059','rpoB','rpoC','rpsL','rpsG','fusA','CDR20291_0065','rpsJ','rplC','rplD','rplW','rplB','rpsS','rplV','rpsC','rplP','rpmC','rpsQ','rplN','rplX','rplE','rpsN','rpsH','rplF','rplR','rpsE','rpmD','rplO','prlA','adk','map1','CDR20291_0090','infA','rpmJ','rpsM','rpsK','rpsD','rpoA','rplQ','CDR20291_0098','CDR20291_0099','CDR20291_0100','truA1','CDR20291_0102','rpsI','cwlD','16S rRNA_3','23S rRNA_3','16S rRNA_4','23S rRNA_4','5S rRNA_3','aspC','nrdD','nrdG','23S rRNA_5','5S rRNA_4','CDR20291_0108','CDR20291_0109','CDR20291_0110','ptb','buk','CDR20291_0113','CDR20291_0114','CDR20291_0115','CDR20291_0116','CDR20291_0117','CDR20291_0118','glmS','CDR20291_0120','CDR20291_0121','murA','spoIIC','CDR20291_0124','spoIIID','mreB1','fabZ','CDR20291_0128','metE','CDR20291_0130','CDR20291_0131','CDR20291_0132','CDR20291_0133','CDR20291_0134','CDR20291_0135','CDR20291_0136','CDR20291_0137','CDR20291_0138','CDR20291_0139','CDR20291_0140','CDR20291_0141','secA1','prfB','CDR20291_0144','CDR20291_0145','CDR20291_0146','CDR20291_0147','CDR20291_0148','CDR20291_0149','rimI','gcp_1','hpdB','hpdC','hpdA','CDR20291_0155','CDR20291_0156','CDR20291_0157','CDR20291_0158','CDR20291_0159','CDR20291_0160','CDR20291_0161','CDR20291_0162','CDR20291_0163','CDR20291_0164','CDR20291_0165','CDR20291_0166','CDR20291_0167','CDR20291_0168','CDR20291_0169','CDR20291_0170','CDR20291_0171','CDR20291_0172','fdxA','CDR20291_0174','CDR20291_0175','CDR20291_0176','CDR20291_0177','CDR20291_0178','CDR20291_0179','gluD','CDR20291_0181','CDR20291_0182','CDR20291_0183','CDR20291_0184','pyrB','pyrK','pyrD_1','pyrE','CDR20291_0189','CDR20291_0190','CDR20291_0191','CDR20291_0192','cls_1','groES','groEL','CDR20291_0196','CDR20291_0197','guaA','CDR20291_0199','CDR20291_0200','CDR20291_0201','CDR20291_0202','CDR20291_0203','CDR20291_0204','CDR20291_0205','CDR20291_0206','CDR20291_0207','CDR20291_0208','CDR20291_0209','CDR20291_0210','CDR20291_0211','CDR20291_0212','CDR20291_0213','CDR20291_0214','purE','purC','purF','purG','purN','purH','purD','purL','CDR20291_0223','CDR20291_0224','CDR20291_0225','CDR20291_0226','CDR20291_0227','CDR20291_0228','fliN_1','flgM','CDR20291_0231','flgK','flgL','CDR20291_0234','csrA','fliS1','fliS2','fliD','CDR20291_0239','fliC','CDR20291_0241','CDR20291_0242','CDR20291_0243','CDR20291_0244','CDR20291_0245','CDR20291_0246','CDR20291_0247','flgB','flgC','fliE','fliF','fliG','fliH','fliI','fliJ','fliK','flgD','flgE','CDR20291_0259','motA','motB','fliL','CDR20291_0263','fliP','fliQ','flhB','flhA','flhF','fleN','fliA','CDR20291_0271','flgG','CDR20291_0273','fliM','fliN_2','CDR20291_0276','htpG','dhaT','spl','CDR20291_0280','CDR20291_0281','CDR20291_0282','CDR20291_0283','CDR20291_0284','CDR20291_0285','CDR20291_0286','CDR20291_0287','CDR20291_0288','CDR20291_0289','CDR20291_0290','CDR20291_0291','CDR20291_0292','CDR20291_0293','CDR20291_0294','CDR20291_0295','CDR20291_0296','CDR20291_0297','CDR20291_0298','CDR20291_0299','CDR20291_0300','rbsR','rbsK','rbsB','rbsA','rbsC','argE','CDR20291_0307','abgB1','CDR20291_0309','CDR20291_0310','CDR20291_0311','CDR20291_0312','CDR20291_0313','CDR20291_0314','CDR20291_0315','CDR20291_0316','CDR20291_0317','CDR20291_0318','16S rRNA_5','23S rRNA_6','5S rRNA_5','CDR20291_0319','CDR20291_0320','CDR20291_0321','CDR20291_0322','CDR20291_0323','CDR20291_0324','CDR20291_0325','CDR20291_0326','CDR20291_0327','CDR20291_0328','cbiM','cbiN','cbiQ','cbiO','pcrA','CDR20291_0334','CDR20291_0335','ppiB','CDR20291_0337','ppaC','adhE_1','CDR20291_0340','CDR20291_0341','CDR20291_0342','CDR20291_0343','CDR20291_0344','CDR20291_0345','CDR20291_0346','CDR20291_0347','CDR20291_0348','CDR20291_0349','CDR20291_0350','CDR20291_0351','CDR20291_0352','CDR20291_0353','CDR20291_0354','CDR20291_0355','CDR20291_0356','CDR20291_0357','CDR20291_0358','bglF_1','bglA','bglG_1','CDR20291_0362','CDR20291_0363','CDR20291_0364','ldhA','hadA','hadI','hadB','hadC','acdB','etfB1','etfA1','CDR20291_0373','fba','CDR20291_0375','CDR20291_0376','CDR20291_0377','CDR20291_0378','CDR20291_0379','CDR20291_0380','CDR20291_0381','CDR20291_0382','CDR20291_0383','rocR','CDR20291_0385','CDR20291_0386','CDR20291_0387','oraS','oraE','CDR20291_0390','CDR20291_0391','CDR20291_0392','CDR20291_0393','CDR20291_0394','CDR20291_0395','CDR20291_0396','CDR20291_0397','CDR20291_0398','CDR20291_0399','CDR20291_0400','CDR20291_0401','CDR20291_0402','CDR20291_0403','CDR20291_0404','CDR20291_0405','CDR20291_0406','CDR20291_0407','CDR20291_0408','CDR20291_0409','malL','CDR20291_0411','blaR','blaI','CDR20291_0414','CDR20291_0415','CDR20291_0416','CDR20291_0417','CDR20291_0418','CDR20291_0419','spaF','spaE','spaG','spaR','spaK','CDR20291_0425','CDR20291_0426','CDR20291_0427','CDR20291_0428','CDR20291_0429','CDR20291_0430','sugE','CDR20291_0432','CDR20291_0433','CDR20291_0434','CDR20291_0435','CDR20291_0436','CDR20291_0437','CDR20291_0438','CDR20291_0439','CDR20291_0440','CDR20291_0441','CDR20291_0442','CDR20291_0443','CDR20291_0444','CDR20291_0445','CDR20291_0446','CDR20291_0447','CDR20291_0448','CDR20291_0449','CDR20291_0450','CDR20291_0451','CDR20291_0452','CDR20291_0453','CDR20291_0454','CDR20291_0455','CDR20291_0456','CDR20291_0457','cheB','cheC','cheD','cheW','CDR20291_0462','CDR20291_0463','cheA','CDR20291_0465','cheR','CDR20291_0467','CDR20291_0468','CDR20291_0469','CDR20291_0470','CDR20291_0471','CDR20291_0472','CDR20291_0473','CDR20291_0474','CDR20291_0475','sleC','sleB','CDR20291_0478','sip2_1','sip2_2','CDR20291_0481','CDR20291_0482','glsA','CDR20291_0484','nfo','CDR20291_0486','CDR20291_0487','CDR20291_0488','CDR20291_0489','nth','CDR20291_0491','CDR20291_0492','CDR20291_0493','CDR20291_0494','CDR20291_0495','CDR20291_0496','CDR20291_0497','thrS','CDR20291_0499','CDR20291_0500','CDR20291_0501','CDR20291_0502','CDR20291_0503','CDR20291_0504','CDR20291_0505','CDR20291_0506','gapN','CDR20291_0508','CDR20291_0509','CDR20291_0510','CDR20291_0511','CDR20291_0512','CDR20291_0513','CDR20291_0514','CDR20291_0515','CDR20291_0516','CDR20291_0517','CDR20291_0518','CDR20291_0519','CDR20291_0520','CDR20291_0521','cotJB1','cotJC1','ogt1','CDR20291_0525','CDR20291_0526','CDR20291_0527','CDR20291_0528','CDR20291_0529','CDR20291_0530','CDR20291_0531','CDR20291_0532','CDR20291_0533','CDR20291_0534','CDR20291_0535','CDR20291_0536','CDR20291_0537','CDR20291_0538','CDR20291_0539','CDR20291_0540','CDR20291_0541','CDR20291_0542','CDR20291_0543','CDR20291_0544','CDR20291_0545','CDR20291_0546','CDR20291_0547','CDR20291_0548','CDR20291_0549','CDR20291_0550','CDR20291_0551','CDR20291_0552','CDR20291_0553','CDR20291_0554','CDR20291_0555','CDR20291_0556','CDR20291_0557','CDR20291_0558','CDR20291_0559','CDR20291_0560','CDR20291_0561','CDR20291_0562','CDR20291_0563','CDR20291_0564','CDR20291_0565','CDR20291_0566','CDR20291_0567','CDR20291_0568','CDR20291_0569','CDR20291_0570','CDR20291_0571','CDR20291_0572','CDR20291_0573','CDR20291_0574','CDR20291_0575','CDR20291_0576','CDR20291_0577','CDR20291_0578','u2','u1','tcdD','tcdB','tcdE','tcdA','dtxA','d1','CDR20291_0587','d2','d3','d4','CDR20291_0591','CDR20291_0592','CDR20291_0593','CDR20291_0594','CDR20291_0595','CDR20291_0596','CDR20291_0597','CDR20291_0598','CDR20291_0599','CDR20291_0600','CDR20291_0601','CDR20291_0602','CDR20291_0603','CDR20291_0604','CDR20291_0605','CDR20291_0606','CDR20291_0607','CDR20291_0608','CDR20291_0609','CDR20291_0610','infC','rpmI','rplT','CDR20291_0614','CDR20291_0615','CDR20291_0616','CDR20291_0617','CDR20291_0618','CDR20291_0619','CDR20291_0620','CDR20291_0621','CDR20291_0622','CDR20291_0623','CDR20291_0624','pheS','pheT','CDR20291_0627','CDR20291_0628','CDR20291_0629','CDR20291_0630','CDR20291_0631','CDR20291_0632','CDR20291_0633','pepD','CDR20291_0635','CDR20291_0636','argS','CDR20291_0638','CDR20291_0639','CDR20291_0640','CDR20291_0641','CDR20291_0642','cooS','cooC','fhs','fchA','folD','CDR20291_0648','CDR20291_0649','CDR20291_0650','CDR20291_0651','CDR20291_0652','CDR20291_0653','CDR20291_0654','CDR20291_0655','gcvH','CDR20291_0657','CDR20291_0658','CDR20291_0659','CDR20291_0660','CDR20291_0661','CDR20291_0662','CDR20291_0663','hisK','CDR20291_0665','CDR20291_0666','CDR20291_0667','CDR20291_0668','glpK1','CDR20291_0670','CDR20291_0671','CDR20291_0672','CDR20291_0673','CDR20291_0674','CDR20291_0675','CDR20291_0676','CDR20291_0677','CDR20291_0678','CDR20291_0679','CDR20291_0680','iscS1','thiI','CDR20291_0683','CDR20291_0684','CDR20291_0685','plfA','plfB','CDR20291_0688','CDR20291_0689','CDR20291_0690','gutM','gutA','srlE_1','srlE', 'srlB_1','gutD','CDR20291_0697','CDR20291_0698','spoIIAA','spoIIAB','sigF','spoVAC','spoVAD','spoVAE','CDR20291_0705','CDR20291_0706','CDR20291_0707','CDR20291_0708','CDR20291_0709','CDR20291_0710','CDR20291_0711','CDR20291_0712','CDR20291_0713','CDR20291_0714','CDR20291_0715','CDR20291_0716','CDR20291_0717','CDR20291_0718','CDR20291_0719','CDR20291_0720','CDR20291_0721','CDR20291_0722','CDR20291_0723','CDR20291_0724','nadE','CDR20291_0726','CDR20291_0727','CDR20291_0728','CDR20291_0729','act','crt1','CDR20291_0732','CDR20291_0733','CDR20291_0734','CDR20291_0735','CDR20291_0736','CDR20291_0737','CDR20291_0738','CDR20291_0739','CDR20291_0740','floX','CDR20291_0742','CDR20291_0743','CDR20291_0744','CDR20291_0745','CDR20291_0746','CDR20291_0747','CDR20291_0748','CDR20291_0749','CDR20291_0750','CDR20291_0751','CDR20291_0752','CDR20291_0753','CDR20291_0754','rbr_1','CDR20291_0756','rbo','CDR20291_0758','CDR20291_0759','CDR20291_0760','CDR20291_0761','CDR20291_0762','CDR20291_0763','CDR20291_0764','CDR20291_0765','CDR20291_0766','CDR20291_0767','CDR20291_0768','CDR20291_0769','CDR20291_0770','CDR20291_0771','CDR20291_0772','CDR20291_0773','CDR20291_0774','CDR20291_0775','CDR20291_0776','CDR20291_0777','CDR20291_0778','abgB2','CDR20291_0780','CDR20291_0781','CDR20291_0782','oppB','oppC','oppA','oppD','oppF','CDR20291_0788','CDR20291_0789','CDR20291_0790','CDR20291_0791','CDR20291_0792','CDR20291_0793','CDR20291_0794','CDR20291_0795','CDR20291_0796','CDR20291_0797','modA','modB_1','modC','maa','CDR20291_0802','CDR20291_0803','CDR20291_0804','CDR20291_0805','CDR20291_0806','CDR20291_0807','CDR20291_0808','tlpB_1','CDR20291_0810','CDR20291_0811','glgC','glgD','glgA','glgP','CDR20291_0816','CDR20291_0817','speA_1','speD','speE','speB','cspA','CDR20291_0823','CDR20291_0824','CDR20291_0825','tlpB_2','CDR20291_0827','CDR20291_0828','dinB','opuCA','opuCC','CDR20291_0832','CDR20291_0833','CDR20291_0834','CDR20291_0835','CDR20291_0836','CDR20291_0837','CDR20291_0838','CDR20291_0839','CDR20291_0840','CDR20291_0841','leuA','leuC','leuD','leuB','CDR20291_0846','CDR20291_0847','serA','CDR20291_0849','CDR20291_0850','CDR20291_0851','CDR20291_0852','CDR20291_0853','CDR20291_0854','CDR20291_0855','fumA','fumB','CDR20291_0858','CDR20291_0859','CDR20291_0860','CDR20291_0861','CDR20291_0862','CDR20291_0863','CDR20291_0864','CDR20291_0865','nagA','glmD','CDR20291_0868','CDR20291_0869','CDR20291_0870','CDR20291_0871','CDR20291_0872','CDR20291_0873','CDR20291_0874','CDR20291_0875','CDR20291_0876','CDR20291_0877','CDR20291_0878','CDR20291_0879','potA','potB','potC','potD','CDR20291_0884','CDR20291_0885','CDR20291_0886','CDR20291_0887','CDR20291_0888','mnaA','CDR20291_0890','16S rRNA_6','23S rRNA_7','5S rRNA_6','CDR20291_0891','CDR20291_0892','CDR20291_0893','CDR20291_0894','CDR20291_0895','addB','addA','sbcD','sbcC','CDR20291_0900','CDR20291_0901','pepT','CDR20291_0903','CDR20291_0904','CDR20291_0905','CDR20291_0906','CDR20291_0907','CDR20291_0908','CDR20291_0909','bcd2','etfB2','etfA2','crt2','hbd','thlA1','CDR20291_0916','CDR20291_0917','acpP_1','CDR20291_0919','CDR20291_0920','CDR20291_0921','CDR20291_0922','ccpA','CDR20291_0924','CDR20291_0925','CDR20291_0926','CDR20291_0927','CDR20291_0928','CDR20291_0929','CDR20291_0930','CDR20291_0931','CDR20291_0932','CDR20291_0933','CDR20291_0934','CDR20291_0935','CDR20291_0936','CDR20291_0937','CDR20291_0938','CDR20291_0939','moeA','mobB','CDR20291_0942','CDR20291_0943','CDR20291_0944','CDR20291_0945','zupT','CDR20291_0947','CDR20291_0948','CDR20291_0949','CDR20291_0950','dhaB1','dhaB2','CDR20291_0953','CDR20291_0954','CDR20291_0955','CDR20291_0956','CDR20291_0957','CDR20291_0958','CDR20291_0959','CDR20291_0960','CDR20291_0961','CDR20291_0962','CDR20291_0963','polA','coaE','CDR20291_0966','CDR20291_0967','CDR20291_0968','CDR20291_0969','gloA','CDR20291_0971','CDR20291_0972','rnfC','rnfD','rnfG','rnfE','rnfA','rnfB','CDR20291_0979','maf','radC','mreB2','mreC','CDR20291_0984','CDR20291_0985','minC','divIVB','minE','mrdB','mgsA','CDR20291_0991','CDR20291_0992','CDR20291_0993','norV','CDR20291_0995','CDR20291_0996','cafA','rplU','CDR20291_0999','rpmA','obg','CDR20291_1002','hom1','CDR20291_1004','CDR20291_1005','CDR20291_1006','CDR20291_1007','etfB3','etfA3','CDR20291_1010','CDR20291_1011','ackA','CDR20291_1013','rpmF','fapR','plsX','fabH','fabK','fabD','fabG','acpP_2','fabF','CDR20291_1023','CDR20291_1024','CDR20291_1025','CDR20291_1026','CDR20291_1027','CDR20291_1028','fbp','spoIIIAA','spoIIIAB','spoIIIAC','spoIIIAD','spoIIIAE','spoiIIIAF','spoIIIAG','spoIIIAH','CDR20291_1038','nusB','gcp_2','xseA','xseB','ispA','CDR20291_1044','dxs','CDR20291_1046','recN','CDR20291_1048','CDR20291_1049','bltD','spoIVB','spo0A','CDR20291_1053','CDR20291_1054','CDR20291_1055','CDR20291_1056','CDR20291_1057','nudF','CDR20291_1059','xerD1','deoB','deoD','deoA','CDR20291_1064','CDR20291_1065','CDR20291_1066','CDR20291_1067','CDR20291_1068','CDR20291_1069','CDR20291_1070','CDR20291_1071','CDR20291_1072','CDR20291_1073','CDR20291_1074','CDR20291_1075','CDR20291_1076','CDR20291_1077','CDR20291_1078','vanZ','CDR20291_1080','CDR20291_1081','CDR20291_1082','CDR20291_1083','CDR20291_1084','efp','CDR20291_1086','rnc','CDR20291_1088','smc','ftsY','CDR20291_1091','ffh','rpsP','CDR20291_1094','rimM','trmD','rplS','CDR20291_1098','CDR20291_1099','CDR20291_1100','CDR20291_1101','rnhB','CDR20291_1103','CDR20291_1104','CDR20291_1105','CDR20291_1106','CDR20291_1107','CDR20291_1108','CDR20291_1109','CDR20291_1110','CDR20291_1111','CDR20291_1112','CDR20291_1113','topA','codY','CDR20291_1116','CDR20291_1117','CDR20291_1118','iscS2','CDR20291_1120','trmU','alaS','CDR20291_1123','CDR20291_1124','CDR20291_1125','CDR20291_1126','fur','CDR20291_1128','CDR20291_1129','CDR20291_1130','dacF','CDR20291_1132','CDR20291_1133','CDR20291_1134','scpA','scpB','CDR20291_1137','CDR20291_1138','CDR20291_1139','pepA','CDR20291_1141','CDR20291_1142','CDR20291_1143','CDR20291_1144','acd','dnaF','CDR20291_1147','nusA','CDR20291_1149','CDR20291_1150','infB','rbfA','CDR20291_1153','truB','ribC','CDR20291_1156','rpsO','CDR20291_1158','comR','CDR20291_1160','CDR20291_1161','CDR20291_1162','dapG','tepA','ftsK','CDR20291_1166','CDR20291_1167','pgsA','recA','CDR20291_1170','CDR20291_1171','sip3','bacA1','xerD2','CDR20291_1175','CDR20291_1176','malX_1','CDR20291_1178','glvG','CDR20291_1180','CDR20291_1181','exoA','CDR20291_1183','CDR20291_1184','glnA','CDR20291_1186','CDR20291_1187','CDR20291_1188','CDR20291_1189','CDR20291_1190','CDR20291_1191','CDR20291_1192','CDR20291_1193','CDR20291_1194','CDR20291_1195','CDR20291_1196','cspB','CDR20291_1198','CDR20291_1199','CDR20291_1200','CDR20291_1201','CDR20291_1202','CDR20291_1203','CDR20291_1204','CDR20291_1205','CDR20291_1206','CDR20291_1207','CDR20291_1208','CDR20291_1209','CDR20291_1210','CDR20291_1211','CDR20291_1212','CDR20291_1213','CDR20291_1214','CDR20291_1215','CDR20291_1216','CDR20291_1217','CDR20291_1218','CDR20291_1219','CDR20291_1220','CDR20291_1221','CDR20291_1222','CDR20291_1223','CDR20291_1224','CDR20291_1225','CDR20291_1226','CDR20291_1227','glpK2','CDR20291_1229','CDR20291_1230','CDR20291_1231','CDR20291_1232','CDR20291_1233','CDR20291_1234','CDR20291_1235','CDR20291_1236','CDR20291_1237','CDR20291_1238','CDR20291_1239','CDR20291_1240','CDR20291_1241','CDR20291_1242','CDR20291_1243','CDR20291_1244','CDR20291_1245','CDR20291_1246','CDR20291_1247','glpQ','CDR20291_1249','CDR20291_1250','CDR20291_1251','CDR20291_1252','CDR20291_1253','CDR20291_1254','ddl','CDR20291_1256','CDR20291_1257','CDR20291_1258','CDR20291_1259','CDR20291_1260','CDR20291_1261','CDR20291_1262','CDR20291_1263','CDR20291_1264','CDR20291_1265','CDR20291_1266','CDR20291_1267','CDR20291_1268','CDR20291_1269','CDR20291_1270','CDR20291_1271','CDR20291_1272','CDR20291_1273','CDR20291_1274','CDR20291_1275','CDR20291_1276','CDR20291_1277','CDR20291_1278','CDR20291_1279','CDR20291_1280','CDR20291_1281','CDR20291_1282','CDR20291_1283','CDR20291_1284','CDR20291_1285','CDR20291_1286','CDR20291_1287','CDR20291_1288','CDR20291_1289','CDR20291_1290','CDR20291_1291','CDR20291_1292','CDR20291_1293','pabA','pabB','pabC','CDR20291_1297','folE','folP','folB','folK','CDR20291_1302','dnaG','rpoD1','CDR20291_1305','CDR20291_1306','wrbA','CDR20291_1308','CDR20291_1309','CDR20291_1310','CDR20291_1311','CDR20291_1312','CDR20291_1313','CDR20291_1314','CDR20291_1315','CDR20291_1316','CDR20291_1317','CDR20291_1318','CDR20291_1319','CDR20291_1320','CDR20291_1321','CDR20291_1322','CDR20291_1323','CDR20291_1324','CDR20291_1325','CDR20291_1326','feoA1','feoB1','CDR20291_1329','CDR20291_1330','ssuC','ssuB','ssuA','CDR20291_1334','CDR20291_1335','CDR20291_1336','CDR20291_1337','CDR20291_1338','metN','metI','metQ','CDR20291_1342','CDR20291_1343','CDR20291_1344','proC1','CDR20291_1346','CDR20291_1347','rpoD2','CDR20291_1349','deoC','CDR20291_1351','CDR20291_1352','CDR20291_1353','CDR20291_1354','CDR20291_1355','CDR20291_1356','CDR20291_1357','CDR20291_1358','CDR20291_1359','CDR20291_1360','panC','panB','CDR20291_1363','CDR20291_1364','CDR20291_1365','feoB2','feoA2','CDR20291_1368','CDR20291_1369','tyrR','CDR20291_1371','CDR20291_1372','CDR20291_1373','CDR20291_1374','pyrC','CDR20291_1376','CDR20291_1377','CDR20291_1378','CDR20291_1379','CDR20291_1380','CDR20291_1381','CDR20291_1382','CDR20291_1383','sseA','CDR20291_1385','aspB','CDR20291_1387','CDR20291_1388','CDR20291_1389','CDR20291_1390','CDR20291_1391','CDR20291_1392','CDR20291_1393','CDR20291_1394','CDR20291_1395','hisZ','hisG','hisC','hisB','hisH','hisA','hisF','hisI','CDR20291_1404','CDR20291_1405','CDR20291_1406','sigV','CDR20291_1408','CDR20291_1409','CDR20291_1410','CDR20291_1411','CDR20291_1412','ilvC','ilvB','CDR20291_1415','CDR20291_1416','CDR20291_1417','CDR20291_1418','CDR20291_1419','CDR20291_1420','CDR20291_1421','CDR20291_1422','CDR20291_1423','CDR20291_1424','CDR20291_1425','CDR20291_1426','CDR20291_1427','CDR20291_1428','CDR20291_1429','CDR20291_1430','CDR20291_1431','CDR20291_1432','CDR20291_1433','CDR20291_1434', 'CDR20291_1435','CDR20291_1436','CDR20291_1437','CDR20291_1438','CDR20291_1439','CDR20291_1440','CDR20291_1441','CDR20291_1442','CDR20291_1443','CDR20291_1444','CDR20291_1445','CDR20291_1446','CDR20291_1447','CDR20291_1448','CDR20291_1449','CDR20291_1450','CDR20291_1451','CDR20291_1452','CDR20291_1453','CDR20291_1454','CDR20291_1455','CDR20291_1456','CDR20291_1457','CDR20291_1458','CDR20291_1459','CDR20291_1460','CDR20291_1461','CDR20291_1462','CDR20291_1463','CDR20291_1464','CDR20291_1465','CDR20291_1466','CDR20291_1467','CDR20291_1468','CDR20291_1469','CDR20291_1470','CDR20291_1471','CDR20291_1472','CDR20291_1473','pgm1','CDR20291_1475','CDR20291_1476','hom2','CDR20291_1478','hisD','CDR20291_1480','CDR20291_1481','CDR20291_1482','CDR20291_1483','CDR20291_1484','CDR20291_1485','CDR20291_1486','CDR20291_1487','CDR20291_1488','kdpA','kdpB','kdpC','cysM','cysA','map2','CDR20291_1495','CDR20291_1496','thiD','thiK','thiE1','CDR20291_1500','CDR20291_1501','CDR20291_1502','CDR20291_1503','CDR20291_1504','CDR20291_1505','CDR20291_1506','CDR20291_1507','CDR20291_1508','CDR20291_1509','CDR20291_1510','CDR20291_1511','CDR20291_1512','CDR20291_1513','CDR20291_1514','CDR20291_1515','CDR20291_1516','CDR20291_1517','CDR20291_1518','CDR20291_1519','CDR20291_1520','CDR20291_1521','vanR','vanS','vanG','CDR20291_1525','vanTG','CDR20291_1527','CDR20291_1528','sodA','asnB','CDR20291_1531','CDR20291_1532','CDR20291_1533','CDR20291_1534','CDR20291_1535','CDR20291_1536','CDR20291_1537','CDR20291_1538','CDR20291_1539','CDR20291_1540','CDR20291_1541','CDR20291_1542','CDR20291_1543','CDR20291_1544','CDR20291_1545','CDR20291_1546','CDR20291_1547','CDR20291_1548','CDR20291_1549','CDR20291_1550','CDR20291_1551','lplA','CDR20291_1553','CDR20291_1554','CDR20291_1555','gcvPB','CDR20291_1557','CDR20291_1558','CDR20291_1559','guaD','CDR20291_1561','cysK','CDR20291_1563','CDR20291_1564','CDR20291_1565','CDR20291_1566','CDR20291_1567','CDR20291_1568','CDR20291_1569','CDR20291_1570','CDR20291_1571','CDR20291_1572','pcp','CDR20291_1574','CDR20291_1575','CDR20291_1576','CDR20291_1577','CDR20291_1578','CDR20291_1579','iunH','CDR20291_1581','CDR20291_1582','CDR20291_1583','CDR20291_1584','CDR20291_1585','CDR20291_1586','CDR20291_1587','trxA1','trxB1','CDR20291_1590','CDR20291_1591','CDR20291_1592','CDR20291_1593','CDR20291_1594','ribH','ribA','ribB','ribD','recQ','thiC','thiS','thiF','thiG','thiH','thiE2','CDR20291_1606','CDR20291_1607','CDR20291_1608','CDR20291_1609','CDR20291_1610','moaB','moaA','moaC','CDR20291_1614','CDR20291_1615','CDR20291_1616','CDR20291_1617','CDR20291_1618','CDR20291_1619','CDR20291_1620','CDR20291_1621','CDR20291_1622','CDR20291_1623','CDR20291_1624','CDR20291_1625','CDR20291_1626','CDR20291_1627','CDR20291_1628','CDR20291_1629','CDR20291_1630','CDR20291_1631','CDR20291_1632','CDR20291_1633','CDR20291_1634','CDR20291_1635','CDR20291_1636','grdG','grdF','CDR20291_1639','gltC','add','CDR20291_1642','CDR20291_1643','hgdC','CDR20291_1645','CDR20291_1646','CDR20291_1647','CDR20291_1648','CDR20291_1649','CDR20291_1650','def1','CDR20291_1652','CDR20291_1653','CDR20291_1654','CDR20291_1655','CDR20291_1656','CDR20291_1657','CDR20291_1658','CDR20291_1659','CDR20291_1660','CDR20291_1661','gapA','CDR20291_1663','CDR20291_1664','ogt2','CDR20291_1666','CDR20291_1667','CDR20291_1668','CDR20291_1669','CDR20291_1670','CDR20291_1671','CDR20291_1672','CDR20291_1673','CDR20291_1674','CDR20291_1675','CDR20291_1676','CDR20291_1677','CDR20291_1678','truA2','argG','CDR20291_1681','CDR20291_1682','CDR20291_1683','CDR20291_1684','CDR20291_1685','CDR20291_1686','CDR20291_1687','CDR20291_1688','CDR20291_1689','CDR20291_1690','CDR20291_1691','CDR20291_1692','CDR20291_1693','CDR20291_1694','CDR20291_1695','CDR20291_1696','CDR20291_1697','CDR20291_1698','scrR','sacA','scrK','CDR20291_1702','CDR20291_1703','CDR20291_1704','CDR20291_1705','CDR20291_1706','CDR20291_1707','CDR20291_1708','CDR20291_1709','CDR20291_1710','cmk','CDR20291_1712','ispH','CDR20291_1714','ade','bcp','CDR20291_1717','CDR20291_1718','cysD','metA','CDR20291_1721','ftsH1','kdpD','kdpE','CDR20291_1725','CDR20291_1726','CDR20291_1727','aroB','aroA','aroC','pheA','aroE','aroK','tyrC','CDR20291_1735','CDR20291_1736','CDR20291_1737','CDR20291_1738','CDR20291_1739','CDR20291_1740','CDR20291_1741','CDR20291_1742','CDR20291_1743','CDR20291_1744','CDR20291_1745','CDR20291_1746','CDR20291_1747','CDR20291_1748','CDR20291_1749','CDR20291_1750','CDR20291_1751','CDR20291_1752','CDR20291_1753','CDR20291_1754','CDR20291_1755','CDR20291_1756','CDR20291_1757','CDR20291_1758','CDR20291_1759','CDR20291_1760','CDR20291_1761','CDR20291_1762','CDR20291_1763','CDR20291_1764','CDR20291_1765','yobD','CDR20291_1767','CDR20291_1768','CDR20291_1769','CDR20291_1770','CDR20291_1771','CDR20291_1772','CDR20291_1773','CDR20291_1774','CDR20291_1775','CDR20291_1776','CDR20291_1777','CDR20291_1778','CDR20291_1779','CDR20291_1780','CDR20291_1781','CDR20291_1782','CDR20291_1783','CDR20291_1784','CDR20291_1785','CDR20291_1786','CDR20291_1787','CDR20291_1788','CDR20291_1789','CDR20291_1790','CDR20291_1791','CDR20291_1792','CDR20291_1793','CDR20291_1794','CDR20291_1795','CDR20291_1796','CDR20291_1797','CDR20291_1798','CDR20291_1799','CDR20291_1800','CDR20291_1801','CDR20291_1802','CDR20291_1803','CDR20291_1804','CDR20291_1805','vncR','vncS','CDR20291_1808','CDR20291_1809','CDR20291_1810','CDR20291_1811','CDR20291_1812','CDR20291_1813','CDR20291_1814','CDR20291_1815','CDR20291_1816','CDR20291_1817','CDR20291_1818','CDR20291_1819','CDR20291_1820','CDR20291_1821','CDR20291_1822','CDR20291_1823','CDR20291_1824','CDR20291_1825','CDR20291_1826','CDR20291_1827','pduQ','pduU','pduV','CDR20291_1831','CDR20291_1832','eutA','eutB','eutC','eutL','CDR20291_1837','CDR20291_1838','eutM','eutT','CDR20291_1841','CDR20291_1842','eutN','CDR20291_1844','eutH','eutQ','CDR20291_1847','CDR20291_1848','CDR20291_1849','CDR20291_1850','CDR20291_1851','CDR20291_1852','CDR20291_1853','dinR','CDR20291_1855','CDR20291_1856','CDR20291_1857','spoVS','accA','accD','accC','accB','CDR20291_1863','CDR20291_1864','CDR20291_1865','CDR20291_1866','CDR20291_1867','CDR20291_1868','CDR20291_1869','CDR20291_1870','CDR20291_1871','CDR20291_1872','CDR20291_1873','CDR20291_1874','CDR20291_1875','CDR20291_1876','CDR20291_1877','CDR20291_1878','CDR20291_1879','CDR20291_1880','CDR20291_1881','CDR20291_1882','CDR20291_1883','CDR20291_1884','CDR20291_1885','CDR20291_1886','CDR20291_1887','CDR20291_1888','CDR20291_1889','CDR20291_1890','CDR20291_1891','CDR20291_1892','CDR20291_1893','CDR20291_1894','CDR20291_1895','CDR20291_1896','hfQ','miaA','mutL','mutS','CDR20291_1901','CDR20291_1902','CDR20291_1903','CDR20291_1904','CDR20291_1905','CDR20291_1906','CDR20291_1907','CDR20291_1908','CDR20291_1909','CDR20291_1910','CDR20291_1911','trpP','CDR20291_1913','CDR20291_1914','cspC_1','CDR20291_1916','CDR20291_1917','CDR20291_1918','CDR20291_1919','CDR20291_1920','CDR20291_1921','CDR20291_1922','CDR20291_1923','CDR20291_1924','fldX','ispD_2','ilvD','CDR20291_1928','CDR20291_1929','CDR20291_1930','CDR20291_1931','CDR20291_1932','clpB','CDR20291_1934','CDR20291_1935','CDR20291_1936','CDR20291_1937','CDR20291_1938','CDR20291_1939','CDR20291_1940','racX','CDR20291_1942','argF','argM','argB','argJ','argC','CDR20291_1948','CDR20291_1949','CDR20291_1950','CDR20291_1951','CDR20291_1952','CDR20291_1953','CDR20291_1954','CDR20291_1955','CDR20291_1956','rluB','CDR20291_1958','CDR20291_1959','lysA','lysC','CDR20291_1962','CDR20291_1963','CDR20291_1964','CDR20291_1965','glnS','CDR20291_1967','CDR20291_1968','CDR20291_1969','CDR20291_1970','CDR20291_1971','CDR20291_1972','CDR20291_1973','CDR20291_1974','CDR20291_1975','CDR20291_1976','CDR20291_1977','CDR20291_1978','CDR20291_1979','xdhA1','pucC1','pbuX_1','CDR20291_1983','pyrD_2','CDR20291_1985','xdhA2','pucC2','xdhC','CDR20291_1989','CDR20291_1990','CDR20291_1991','dpaL1','CDR20291_1993','CDR20291_1994','cutS','CDR20291_1996','CDR20291_1997','CDR20291_1998','CDR20291_1999','CDR20291_2000','CDR20291_2001','CDR20291_2002','CDR20291_2003','CDR20291_2004','CDR20291_2005','CDR20291_2006','CDR20291_2007','CDR20291_2008','CDR20291_2009','CDR20291_2010','CDR20291_2011','CDR20291_2012','CDR20291_2013','CDR20291_2014','CDR20291_2015','CDR20291_2016','fsaB','CDR20291_2018','CDR20291_2019','CDR20291_2020','CDR20291_2021','CDR20291_2022','bipA','CDR20291_2024','thrC','thrB','CDR20291_2027','CDR20291_2028','CDR20291_2029','CDR20291_2030','CDR20291_2031','CDR20291_2032','CDR20291_2033','CDR20291_2034','ispG','CDR20291_2036','dxr','CDR20291_2038','CDR20291_2039','CDR20291_2040','sA','uppS_1','CDR20291_2043','rrf','pyrH','tsf','rpsB','CDR20291_2048','CDR20291_2049','CDR20291_2050','CDR20291_2051','CDR20291_2052','CDR20291_2053','CDR20291_2054','CDR20291_2055','CDR20291_2056','CDR20291_2057','CDR20291_2058','CDR20291_2059','CDR20291_2060','CDR20291_2061','CDR20291_2062','CDR20291_2063','gabT','CDR20291_2065','CDR20291_2066','CDR20291_2067','CDR20291_2068','CDR20291_2069','ldh','CDR20291_2071','msrAB','CDR20291_2073','hcp','CDR20291_2075','CDR20291_2076','CDR20291_2077','CDR20291_2078','CDR20291_2079','CDR20291_2080','CDR20291_2081','CDR20291_2082','CDR20291_2083','CDR20291_2084','CDR20291_2085','CDR20291_2086','CDR20291_2087','csdA','CDR20291_2089','CDR20291_2090','CDR20291_2091','CDR20291_2092','CDR20291_2093','mapA','pgmB_1','CDR20291_2096','CDR20291_2097','CDR20291_2098','CDR20291_2099','CDR20291_2100','CDR20291_2101','CDR20291_2102','CDR20291_2103','CDR20291_2104','CDR20291_2105','CDR20291_2106','CDR20291_2107','CDR20291_2108','CDR20291_2109','CDR20291_2110','CDR20291_2111','CDR20291_2112','aldH','CDR20291_2114','CDR20291_2115','CDR20291_2116','CDR20291_2117','CDR20291_2118','CDR20291_2119','CDR20291_2120','CDR20291_2121','CDR20291_2122','CDR20291_2123','aroD','sat','CDR20291_2126','CDR20291_2127','CDR20291_2128','nirC','asrC','asrB','asrA','CDR20291_2133','CDR20291_2134','CDR20291_2135','CDR20291_2136','CDR20291_2137','CDR20291_2138','nanA','nanE','CDR20291_2141', 'CDR20291_2142','CDR20291_2143','asnS','CDR20291_2145','cspC_2','cspBA','CDR20291_2148','CDR20291_2149','CDR20291_2150','CDR20291_2151','kamA','CDR20291_2153','CDR20291_2154','CDR20291_2155','CDR20291_2156','CDR20291_2157','CDR20291_2158','CDR20291_2159','CDR20291_2160','CDR20291_2161','CDR20291_2162','CDR20291_2163','CDR20291_2164','CDR20291_2165','CDR20291_2166','CDR20291_2167','fruK','CDR20291_2169','CDR20291_2170','CDR20291_2171','CDR20291_2172','CDR20291_2173','CDR20291_2174','CDR20291_2175','araD','CDR20291_2177','CDR20291_2178','CDR20291_2179','CDR20291_2180','CDR20291_2181','CDR20291_2182','CDR20291_2183','CDR20291_2184','CDR20291_2185','CDR20291_2186','CDR20291_2187','CDR20291_2188','CDR20291_2189','CDR20291_2190','CDR20291_2191','CDR20291_2192','CDR20291_2193','CDR20291_2194','CDR20291_2195','CDR20291_2196','CDR20291_2197','cspD','CDR20291_2200','modB_2','CDR20291_2202','CDR20291_2203','CDR20291_2204','CDR20291_2205','CDR20291_2206','CDR20291_2207','rpe','rpiB1','tkt\'_1','tkt_1','CDR20291_2212','gatD','gatC','gatB','gatA','CDR20291_2217','tal1','xpt','mtlD','mtlF','mtlR','mtlA','guaB','CDR20291_2225','CDR20291_2226','abfH','abfT','CDR20291_2229','abfD','sucD','cat1','CDR20291_2233','CDR20291_2234','CDR20291_2235','CDR20291_2236','grdD','grdC','grdB','grdA','grdE','trxA2','trxB3','grdX','CDR20291_2245','CDR20291_2246','CDR20291_2247','CDR20291_2248','CDR20291_2249','CDR20291_2250','CDR20291_2251','CDR20291_2252','CDR20291_2253','CDR20291_2254','CDR20291_2255','CDR20291_2256','nadC','nadB','nadA','CDR20291_2260','CDR20291_2261','CDR20291_2262','CDR20291_2263','CDR20291_2264','CDR20291_2265','CDR20291_2266','iorB','iorA','CDR20291_2269','CDR20291_2270','CDR20291_2271','CDR20291_2272','CDR20291_2273','CDR20291_2274','CDR20291_2275','CDR20291_2276','CDR20291_2277','CDR20291_2278','CDR20291_2279','CDR20291_2280','CDR20291_2281','CDR20291_2282','CDR20291_2283','CDR20291_2284','CDR20291_2285','CDR20291_2286','CDR20291_2287','CDR20291_2288','CDR20291_2289','cotJB2','cotJC2','CDR20291_2292','dut','CDR20291_2294','CDR20291_2295','CDR20291_2296','CDR20291_2297','CDR20291_2298','CDR20291_2299','CDR20291_2300','ppdK','CDR20291_2302','CDR20291_2303','CDR20291_2304','srlB_2','CDR20291_2306','CDR20291_2307','srlE_2','srlA','CDR20291_2310','CDR20291_2311','CDR20291_2312','acp','CDR20291_2314','CDR20291_2315','CDR20291_2316','CDR20291_2317','CDR20291_2318','CDR20291_2319','CDR20291_2320','CDR20291_2321','CDR20291_2322','CDR20291_2323','glyS','glyQ','CDR20291_2326','recO','CDR20291_2328','era','d','CDR20291_2331','CDR20291_2332','CDR20291_2333','spoIV','CDR20291_2335','CDR20291_2336','CDR20291_2337','rpsU_1','rpsU_2','CDR20291_2340','CDR20291_2341','CDR20291_2342','prmA','CDR20291_2344','CDR20291_2345','CDR20291_2346','CDR20291_2347','CDR20291_2348','CDR20291_2349','CDR20291_2350','CDR20291_2351','glcK','dnaJ','dnaK','grpE','hrcA','hemN','CDR20291_2358','CDR20291_2359','lepA','CDR20291_2361','spoIIP','gpr','CDR20291_2364','CDR20291_2365','rpsT','CDR20291_2367','CDR20291_2368','CDR20291_2369','CDR20291_2370','CDR20291_2371','CDR20291_2372','CDR20291_2373','ung','CDR20291_2375','CDR20291_2376','CDR20291_2377','CDR20291_2378','CDR20291_2379','CDR20291_2380','CDR20291_2381','CDR20291_2382','CDR20291_2383','pmi','CDR20291_2385','selB','fdhA','selD','comE','CDR20291_2390','CDR20291_2391','argH','CDR20291_2393','CDR20291_2394','CDR20291_2395','CDR20291_2396','CDR20291_2397','CDR20291_2398','CDR20291_2399','CDR20291_2400','CDR20291_2401','CDR20291_2402','CDR20291_2403','CDR20291_2404','CDR20291_2405','tdcB','aspD','CDR20291_2408','CDR20291_2409','leuS','CDR20291_2411','CDR20291_2412','nadD','CDR20291_2414','gbeA','CDR20291_2416','CDR20291_2417','CDR20291_2418','CDR20291_2419','CDR20291_2420','CDR20291_2421','CDR20291_2422','CDR20291_2423','CDR20291_2424','CDR20291_2425','elaC','nox','CDR20291_2428','CDR20291_2429','CDR20291_2430','CDR20291_2431','CDR20291_2432','CDR20291_2433','CDR20291_2434','CDR20291_2435','CDR20291_2436','CDR20291_2437','CDR20291_2438','tlpB_3','CDR20291_2440','CDR20291_2441','CDR20291_2442','coaD','CDR20291_2444','recG','CDR20291_2446','CDR20291_2447','rpmB','CDR20291_2449','CDR20291_2450','CDR20291_2451','CDR20291_2452','CDR20291_2453','CDR20291_2454','CDR20291_2455','CDR20291_2456','CDR20291_2457','CDR20291_2458','CDR20291_2459','CDR20291_2460','CDR20291_2461','CDR20291_2462','CDR20291_2463','stk','stp','CDR20291_2466','rsmB','CDR20291_2468','CDR20291_2469','fmt','def2','priA','coaBC','rpoZ','gmk','CDR20291_2476','dapF','ltaE','fbpA','CDR20291_2480','uraA','pyrR','CDR20291_2483','lspA','CDR20291_2485','CDR20291_2486','CDR20291_2487','CDR20291_2488','CDR20291_2489','CDR20291_2490','CDR20291_2491','CDR20291_2492','trpS','mtnN','CDR20291_2495','CDR20291_2496','CDR20291_2497','CDR20291_2498','CDR20291_2499','CDR20291_2500','CDR20291_2501','CDR20291_2502','CDR20291_2503','CDR20291_2504','CDR20291_2505','CDR20291_2506','CDR20291_2507','CDR20291_2508','CDR20291_2509','CDR20291_2510','CDR20291_2511','CDR20291_2512','spoIVA','CDR20291_2514','CDR20291_2515','CDR20291_2516','CDR20291_2517','glyC','CDR20291_2519','CDR20291_2520','CDR20291_2521','CDR20291_2522','CDR20291_2523','CDR20291_2524','CDR20291_2525','CDR20291_2526','CDR20291_2527','CDR20291_2528','CDR20291_2529','sigG','sigE','spoIIGA','CDR20291_2533','ftsZ','sbp','CDR20291_2536','CDR20291_2537','CDR20291_2538','murG','ftsW','murD','mraY','murF','spoVD','CDR20291_2545','mraW','lgt','CDR20291_2548','CDR20291_2549','CDR20291_2550','CDR20291_2551','murE','CDR20291_2553','crr_1','ptsG','licT','CDR20291_2557','appF','appD','appA','appB','appC','CDR20291_2563','thlA2','ctfA','ctfB','bdhA','CDR20291_2568','CDR20291_2569','nifJ','CDR20291_2571','CDR20291_2572','CDR20291_2573','CDR20291_2574','CDR20291_2575','sspA','CDR20291_2577','CDR20291_2578','hpt_1','CDR20291_2580','CDR20291_2581','CDR20291_2582','asnA','CDR20291_2584','CDR20291_2585','CDR20291_2586','CDR20291_2587','CDR20291_2588','CDR20291_2589','brnQ','CDR20291_2591','CDR20291_2592','CDR20291_2593','CDR20291_2594','CDR20291_2595','ydiB','CDR20291_2597','CDR20291_2598','CDR20291_2599','CDR20291_2600','CDR20291_2601','galE','gtaB_1','CDR20291_2604','CDR20291_2605','CDR20291_2606','CDR20291_2607','23S rRNA_8','16S rRNA_7','CDR20291_2608','CDR20291_2609','CDR20291_2610','CDR20291_2611','CDR20291_2612','CDR20291_2613','CDR20291_2614','glyA_1','CDR20291_2616','CDR20291_2617','CDR20291_2618','CDR20291_2619','CDR20291_2620','CDR20291_2621','CDR20291_2622','mleN','CDR20291_2624','CDR20291_2625','CDR20291_2626','CDR20291_2627','aspS','hisS','CDR20291_2630','CDR20291_2631','dtd','relA','apt','recJ','ubiB','CDR20291_2637','CDR20291_2638','agrD','agrB','CDR20291_2641','CDR20291_2642','ptsI','ptsH','CDR20291_2645','CDR20291_2646','CDR20291_2647','CDR20291_2648','CDR20291_2649','uppS_2','CDR20291_2651','CDR20291_2652','CDR20291_2653','CDR20291_2654','CDR20291_2655','CDR20291_2656','CDR20291_2657','CDR20291_2658','rkpK','tuaG','CDR20291_2661','CDR20291_2662','CDR20291_2663','CDR20291_2664','CDR20291_2665','CDR20291_2666','manC','pgm2','mviN','CDR20291_2670','CDR20291_2671','CDR20291_2672','CDR20291_2673','CDR20291_2674','CDR20291_2675','cwp84','CDR20291_2677','cwp66','CDR20291_2679','CDR20291_2680','secA2','slpA','CDR20291_2683','CDR20291_2684','CDR20291_2685','CDR20291_2686','CDR20291_2687','CDR20291_2688','CDR20291_2689','CDR20291_2690','tgt','CDR20291_2692','queA','ruvB','ruvA','ruvC','CDR20291_2697','CDR20291_2698','CDR20291_2699','CDR20291_2700','CDR20291_2701','garR','CDR20291_2703','CDR20291_2704','CDR20291_2705','CDR20291_2706','CDR20291_2707','CDR20291_2708','CDR20291_2709','CDR20291_2710','CDR20291_2711','CDR20291_2712','CDR20291_2713','CDR20291_2714','CDR20291_2715','tlpB_4','CDR20291_2717','CDR20291_2718','CDR20291_2719','CDR20291_2720','CDR20291_2721','CDR20291_2722','CDR20291_2723','CDR20291_2724','glyA_2','abgT','CDR20291_2727','CDR20291_2728','CDR20291_2729','CDR20291_2730','serS2','CDR20291_2732','CDR20291_2733','CDR20291_2734','CDR20291_2735','rbr_2','CDR20291_2737','CDR20291_2738','CDR20291_2739','CDR20291_2740','CDR20291_2741','dltC','dltB','dltA','dltD','CDR20291_2746','CDR20291_2747','CDR20291_2748','CDR20291_2749','CDR20291_2750','CDR20291_2751','CDR20291_2752','CDR20291_2753','CDR20291_2754','CDR20291_2755','CDR20291_2756','CDR20291_2757','CDR20291_2758','CDR20291_2759','CDR20291_2760','CDR20291_2761','CDR20291_2762','tldD','CDR20291_2764','CDR20291_2765','kdgT','uxaA', 'uxaA','CDR20291_2769','CDR20291_2770','fhuC','fhuG','fhuB','fhuD','CDR20291_2775','celC','CDR20291_2777','celF','celB','licB','CDR20291_2781','CDR20291_2782','CDR20291_2783','CDR20291_2784','CDR20291_2785','CDR20291_2786','ntpD','ntpB','ntpA','ntpG','ntpC','ntpE','ntpK','ntpI','CDR20291_2795','CDR20291_2796','CDR20291_2797','CDR20291_2798','CDR20291_2799','adhE_2','CDR20291_2801','dpaB','dpaA','CDR20291_2804','CDR20291_2805','bioY','CDR20291_2807','CDR20291_2808','CDR20291_2809','CDR20291_2810','CDR20291_2811','CDR20291_2812','CDR20291_2813','CDR20291_2814','CDR20291_2815','CDR20291_2816','CDR20291_2817','CDR20291_2818','CDR20291_2819','CDR20291_2820','bacA2','CDR20291_2822','CDR20291_2823','CDR20291_2824','CDR20291_2825','CDR20291_2826','CDR20291_2827','CDR20291_2828','nrdF','nrdE','CDR20291_2831','CDR20291_2832','CDR20291_2833','CDR20291_2834','CDR20291_2835','CDR20291_2836','CDR20291_2837','CDR20291_2838','CDR20291_2839','CDR20291_2840','CDR20291_2841','CDR20291_2842','CDR20291_2843','CDR20291_2844','CDR20291_2845','CDR20291_2846','CDR20291_2847','CDR20291_2848','CDR20291_2849','CDR20291_2850','CDR20291_2851','CDR20291_2852','CDR20291_2853','CDR20291_2854','CDR20291_2855','CDR20291_2856','CDR20291_2857','CDR20291_2858','CDR20291_2859','CDR20291_2860','CDR20291_2861','crr_2','CDR20291_2863','malY','malX_2','CDR20291_2866','CDR20291_2867','CDR20291_2868','CDR20291_2869','CDR20291_2870','CDR20291_2871','CDR20291_2872','CDR20291_2873','CDR20291_2874','CDR20291_2875','pepI','CDR20291_2877','CDR20291_2878','CDR20291_2879','CDR20291_2880','CDR20291_2881','CDR20291_2882','CDR20291_2883','CDR20291_2884','CDR20291_2885','CDR20291_2886','CDR20291_2887','CDR20291_2888','CDR20291_2889','CDR20291_2890','CDR20291_2891','CDR20291_2892','CDR20291_2893','glvA','glvC','glvR','CDR20291_2897','xylA','xylB','xylR','CDR20291_2901','CDR20291_2902','CDR20291_2903','CDR20291_2904','CDR20291_2905','CDR20291_2906','CDR20291_2907','CDR20291_2908','CDR20291_2909','CDR20291_2910','CDR20291_2911','CDR20291_2912','agaY_1','tagT','tagK','CDR20291_2916','CDR20291_2917','ascB','CDR20291_2919','CDR20291_2920','CDR20291_2921','CDR20291_2922','garK','CDR20291_2924','hrsA','CDR20291_2926','CDR20291_2927','CDR20291_2928','treR','treA','CDR20291_2931','CDR20291_2932','CDR20291_2933','bglA1','CDR20291_2935','bglG1','CDR20291_2937','CDR20291_2938','CDR20291_2939','CDR20291_2940','CDR20291_2941','CDR20291_2942','CDR20291_2943','CDR20291_2944','CDR20291_2945','CDR20291_2946','CDR20291_2947','CDR20291_2948','CDR20291_2949','CDR20291_2950','CDR20291_2951','CDR20291_2952','CDR20291_2953','bglA2','bglF_2','bglG_2','CDR20291_2957','CDR20291_2958','tndX','CDR20291_2960','CDR20291_2961','CDR20291_2962','CDR20291_2963','CDR20291_2964','CDR20291_2965','CDR20291_2966','CDR20291_2967','ascB2','CDR20291_2969','bglG2','CDR20291_2971','pgmB_2','CDR20291_2973','acsB3','CDR20291_2975','CDR20291_2976','CDR20291_2977','CDR20291_2978','CDR20291_2979','CDR20291_2980','CDR20291_2981','CDR20291_2982','CDR20291_2983','CDR20291_2984','CDR20291_2985','CDR20291_2986','CDR20291_2987','CDR20291_2988','CDR20291_2989','CDR20291_2990','CDR20291_2991','CDR20291_2992','CDR20291_2993','CDR20291_2994','CDR20291_2995','CDR20291_2996','CDR20291_2997','CDR20291_2998','CDR20291_2999','CDR20291_3000','CDR20291_3001','CDR20291_3002','CDR20291_3003','CDR20291_3004','CDR20291_3005','CDR20291_3006','CDR20291_3007','CDR20291_3008','CDR20291_3009','CDR20291_3010','smpB','CDR20291_3012','CDR20291_3013','CDR20291_3014','CDR20291_3015','CDR20291_3016','CDR20291_3017','rnr','CDR20291_3019','CDR20291_3020','CDR20291_3021','CDR20291_3022','CDR20291_3023','secG','CDR20291_3025','eno','gpmI','tpi','pgK','gapB','cggR','glnF','xdhA3','CDR20291_3034','CDR20291_3035','pbuX_2','CDR20291_3037','CDR20291_3038','CDR20291_3039','dpaL2','CDR20291_3041','CDR20291_3042','tdcF','CDR20291_3044','CDR20291_3045','CDR20291_3046','CDR20291_3047','CDR20291_3048','CDR20291_3049','CDR20291_3050','CDR20291_3051','CDR20291_3052','CDR20291_3053','CDR20291_3054','CDR20291_3055','CDR20291_3056','CDR20291_3057','cme','CDR20291_3059','CDR20291_3060','CDR20291_3061','CDR20291_3062','CDR20291_3063','CDR20291_3064','CDR20291_3065','CDR20291_3066','CDR20291_3067','CDR20291_3068','CDR20291_3069','CDR20291_3070','CDR20291_3071','CDR20291_3072','CDR20291_3073','CDR20291_3074','CDR20291_3075','snorO','CDR20291_3077','hslO','CDR20291_3079','CDR20291_3080','CDR20291_3081','sdaB','dapA1','asd','dapA2','dapB1','dapD','CDR20291_3088','dapB2','bclA2','hpt_2','CDR20291_3092','CDR20291_3093','CDR20291_3094','CDR20291_3095','CDR20291_3096','prdF','CDR20291_3098','CDR20291_3099','CDR20291_3100','prdB','CDR20291_3102','prdA','prdR','prdC','CDR20291_3106','sspB','CDR20291_3108','CDR20291_3109','CDR20291_3110','folC','CDR20291_3112','CDR20291_3113','valS','CDR20291_3115','CDR20291_3116','CDR20291_3117','phoU','phoT','CDR20291_3120','CDR20291_3121','CDR20291_3122','CDR20291_3123','CDR20291_3124','CDR20291_3125','CDR20291_3126','CDR20291_3127','CDR20291_3128','CDR20291_3129','pepF','CDR20291_3131','CDR20291_3132','CDR20291_3133','feoA3','feoB3','CDR20291_3136','CDR20291_3137','CDR20291_3138','CDR20291_3139','CDR20291_3140','CDR20291_3141','proC2','pflD','pflE','CDR20291_3145','pgi','CDR20291_3147','CDR20291_3148','CDR20291_3149','CDR20291_3150','CDR20291_3151','CDR20291_3152','CDR20291_3153','CDR20291_3154','CDR20291_3155','CDR20291_3156','CDR20291_3157','CDR20291_3158','CDR20291_3159','CDR20291_3160','engB','lon','chrA','chrA', 'clpX','clpP1','tig','CDR20291_3168','rph','dnaL','CDR20291_3171','CDR20291_3172','CDR20291_3173','hydN1','hydA','hydN2','CDR20291_3177','fdhD','fdhF','CDR20291_3180','CDR20291_3181','CDR20291_3182','CDR20291_3183','CDR20291_3184','CDR20291_3185','CDR20291_3186','CDR20291_3187','CDR20291_3187A','CDR20291_3188','CDR20291_3189','CDR20291_3190','CDR20291_3191','CDR20291_3192','bclA3','CDR20291_3194','clpP2','CDR20291_3196','CDR20291_3197','CDR20291_3198','CDR20291_3199','CDR20291_3200','CDR20291_3201','CDR20291_3202','CDR20291_3203','CDR20291_3204','CDR20291_3205','CDR20291_3206','CDR20291_3207','CDR20291_3208','CDR20291_3209','CDR20291_3210','CDR20291_3211','CDR20291_3212','CDR20291_3213','CDR20291_3214','CDR20291_3215','pykF','pfkA','dnaE','CDR20291_3219','CDR20291_3220','CDR20291_3221','CDR20291_3222','CDR20291_3223','murB','CDR20291_3225','cls_2','hymA','hymB','hymC','CDR20291_3230','hprK','uvrC','uvrA','uvrB','CDR20291_3235','CDR20291_3236','CDR20291_3237','CDR20291_3238','CDR20291_3239','5S rRNA_7','23S rRNA_9','16S rRNA_8','CDR20291_3240','hemB','hemD','hemC','cbiK','cbiJ','cbiH','cbiG','cbiF','cbiT','cbiE','cbiD','cbiC','CDR20291_3253','cobD','cbiB','cbiA','cbiP','CDR20291_3258','cobS','cobU','cobT','CDR20291_3262','CDR20291_3263','CDR20291_3264','CDR20291_3265','CDR20291_3266','CDR20291_3267','celM','CDR20291_3269','agaY_2','agaS','CDR20291_3272','lacC','CDR20291_3274','agaA','CDR20291_3276','CDR20291_3277','CDR20291_3278','CDR20291_3279','CDR20291_3280','CDR20291_3281','CDR20291_3282','CDR20291_3283','CDR20291_3284','CDR20291_3285','CDR20291_3286','CDR20291_3287','CDR20291_3288','CDR20291_3289','CDR20291_3290','CDR20291_3291','CDR20291_3292','CDR20291_3293','CDR20291_3294','tkt\'_2','tkt_2','CDR20291_3297','CDR20291_3298','alr','CDR20291_3300','CDR20291_3301','acpS','atpC','atpD','atpG','atpA','atpH','atpF','atpE','atpB','atpI','atpZ','CDR20291_3313','CDR20291_3314','upp','rpiB2','CDR20291_3317','CDR20291_3318','CDR20291_3319','prfA','hemK','CDR20291_3322','rpmE','rho','gtaB_2','CDR20291_3326','spoIIE','CDR20291_3328','CDR20291_3329','CDR20291_3330','CDR20291_3331','CDR20291_3332','hupA','CDR20291_3334','spoIIIF','spoVT','prsA','mfd','pth','CDR20291_3340','CDR20291_3341','CDR20291_3342','CDR20291_3343','CDR20291_3344','CDR20291_3345','CDR20291_3346','CDR20291_3347','CDR20291_3348','CDR20291_3349','CDR20291_3350','prs','gcaD','spoVG','purR','murC','CDR20291_3356','CDR20291_3357','CDR20291_3358','CDR20291_3359','ksgA','CDR20291_3361','CDR20291_3362','CDR20291_3363','CDR20291_3364','CDR20291_3365','CDR20291_3366','phnM','phnL','phnK','phnJ','phnI','phnH','phnG','CDR20291_3374','metG','spmB','spmA','CDR20291_3378','CDR20291_3379','CDR20291_3380','CDR20291_3381','CDR20291_3382','CDR20291_3383','CDR20291_3384','CDR20291_3385','speA_2','CDR20291_3387','5S rRNA_8','23S rRNA_10','16S rRNA_9','CDR20291_3388','lysS','greA','dusB','CDR20291_3392','CDR20291_3393','CDR20291_3394','birA','ftsH2','CDR20291_3397','CDR20291_3398','murI','CDR20291_3400','spoIIR','CDR20291_3402','ipk','CDR20291_3404','veg','CDR20291_3406','CDR20291_3407','CDR20291_3408','CDR20291_3409','CDR20291_3410','CDR20291_3411','CDR20291_3412','CDR20291_3413','CDR20291_3414','mdeA','CDR20291_3416','CDR20291_3417','CDR20291_3418','CDR20291_3419','CDR20291_3420','CDR20291_3421','CDR20291_3422','CDR20291_3423','CDR20291_3424','CDR20291_3425','pyrAB1','pyrAA1','pyrAB2','pyrAA2','pyrF','CDR20291_3431','CDR20291_3432','CDR20291_3433','CDR20291_3434','CDR20291_3435','luxS','CDR20291_3437','CDR20291_3438','CDR20291_3439','CDR20291_3440','CDR20291_3441','CDR20291_3442','CDR20291_3443','CDR20291_3444','CDR20291_3445','CDR20291_3446','CDR20291_3447','CDR20291_3448','CDR20291_3449','CDR20291_3450','CDR20291_3451','CDR20291_3452','CDR20291_3453','CDR20291_3454','CDR20291_3455','CDR20291_3456','CDR20291_3457','CDR20291_3458','CDR20291_3459','CDR20291_3460','CDR20291_3461','CDR20291_3462','CDR20291_3463','CDR20291_3464','CDR20291_3465','CDR20291_3466','CDR20291_3467','CDR20291_3468','CDR20291_3469','CDR20291_3470','CDR20291_3471','CDR20291_3472','CDR20291_3473','CDR20291_3474','int-Tn','CDR20291_3476','CDR20291_3477','CDR20291_3478','CDR20291_3479','CDR20291_3480','CDR20291_3481','CDR20291_3482','CDR20291_3483','phnA','CDR20291_3485','CDR20291_3486','kdgA','CDR20291_3488','CDR20291_3489','CDR20291_3490','CDR20291_3491','CDR20291_3492','CDR20291_3493','CDR20291_3494','CDR20291_3495','CDR20291_3496','CDR20291_3497','CDR20291_3498','CDR20291_3499','CDR20291_3500','CDR20291_3501','CDR20291_3502','CDR20291_3503','CDR20291_3504','CDR20291_3505','CDR20291_3506','CDR20291_3507','CDR20291_3508','CDR20291_3509','CDR20291_3510','CDR20291_3511','CDR20291_3512','CDR20291_3513','purA','CDR20291_3515','dnaB','rplI','CDR20291_3518','CDR20291_3519','CDR20291_3520','rpsR','ssb','rpsF','CDR20291_3524','CDR20291_3525','CDR20291_3526','CDR20291_3527','CDR20291_3528','CDR20291_3529','CDR20291_3530','spo0J','soj','CDR20291_3533','CDR20291_3534','gidA','trmE','jag','oxaA1','CDR20291_3539','rnpA','rpmH'] # Parse RNASeq results rough = {} smooth = {} with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/tamayo_rnaseq.tsv', 'r') as transcription: header = transcription.readline() for line in transcription: line = line.split() if len(line) == 0: continue try: gene = refseq_dict[line[0]] except: try: gene = gene_dict[line[0]] except: continue rough[gene] = (float(line[1]) + float(line[2])) / 2.0 smooth[gene] = (float(line[3]) + float(line[4])) / 2.0Phase Variation# BHI bhi = ['cpd00001_e', # water 'cpd00035_e', # alanine 'cpd00041_e', # aspartic acid 'cpd00023_e', # glutamic acid 'cpd00119_e', # histidine 'cpd00107_e', # leucine 'cpd00060_e', # methionine 'cpd00161_e', # threonine 'cpd00069_e', # tyrosine 'cpd00084_e', # cysteine 'cpd00033_e', # glycine 'cpd00322_e', # isoleucine 'cpd00066_e', # phenylalanine 'cpd00054_e', # serine 'cpd00065_e', # tryptophan 'cpd00156_e', # valine 'cpd00220_e', # riboflavin 'cpd00644_e', # pantothentate(B5) 'cpd00393_e', # folate(B9) 'cpd00133_e', # niacin(PP) 'cpd00263_e', # Pyridoxine 'cpd00104_e', # Biotin 'cpd00149_e', # Cobalt 'cpd00971_e', # sodium 'cpd00099_e', # Chloride 'cpd00205_e', # potassium 'cpd00009_e', # phosphate 'cpd00063_e', # calcium 'cpd00254_e', # magnesium 'cpd10515_e', # Fe2+ 'cpd00030_e', # Mn2+ 'cpd00242_e', # Carbonate 'cpd00226_e', # hypoxanthine 'cpd01242_e', # thyminose 'cpd00307_e', # cytosine 'cpd00092_e', # uracil 'cpd00117_e', # D-Alanine 'cpd00067_e', # H+ 'cpd00567_e', # D-Proline 'cpd00132_e', # L-Asparagine 'cpd00210_e', # Taurine 'cpd00320_e', # D-Aspartate 'cpd03279_e', # Deoxyinosine 'cpd00246_e', # Inosine 'cpd00311_e', # Guanosine 'cpd00367_e', # Cytidine 'cpd00277_e', # Deoxyguanosine 'cpd00182_e', # Adenosine 'cpd00654_e', # Deoxycytidine 'cpd00412_e', # Deoxyuridine 'cpd00438_e', # Deoxyadenosine 'cpd00274_e', # Citrulline 'cpd00186_e', # D-Glutamate 'cpd00637_e', # D-Methionine 'cpd00105_e', # D-Ribose 'cpd00305_e', # Thiamin 'cpd00309_e', # Xanthine 'cpd00098_e', # Choline 'cpd00053_e', # L-Glutamine 'cpd00039_e', # Lysine 'cpd00027_e', # D-glucose 'cpd00079_e', # D-glucose-6-phosphate 'cpd00064_e', # Ornithine 'cpd00051_e', # D-arginine 'cpd00207_e'] # Guanine bhi_exch = set(['EX_' + x for x in bhi]) for rxn in iCdR703.exchanges: rxn.lower_bound = 0. if rxn.id in bhi_exch: rxn.lower_bound = -1000. iCdR703_rough = riptide.maxfit_contextualize(model=iCdR703, transcriptome=rough) iCdR703_smooth = riptide.maxfit_contextualize(model=iCdR703, transcriptome=smooth) riptide.save_output(riptide_obj=iCdR703_rough, path='/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/riptide_rough_maxfit') riptide.save_output(riptide_obj=iCdR703_smooth, path='/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/riptide_smooth_maxfit') iCdR703_smooth = riptide.contextualize(model=iCdR703, transcriptome=smooth, fraction=0.325) iCdR703_rough = riptide.contextualize(model=iCdR703, transcriptome=rough, fraction=0.375)Initializing model and integrating transcriptomic data... Pruning zero flux subnetworks... Analyzing context-specific flux distributions... Reactions pruned to 284 from 1313 (78.37% change) Metabolites pruned to 283 from 1243 (77.23% change) Flux through the objective DECREASED to ~46.05 from ~125.62 (63.34% change) Context-specific metabolism correlates with transcriptome (r=0.149, p=0.012 *) RIPTiDe completed in 18 seconds Initializing model and integrating transcriptomic data... Pruning zero flux subnetworks... Analyzing context-specific flux distributions... Reactions pruned to 284 from 1313 (78.37% change) Metabolites pruned to 283 from 1243 (77.23% change) Flux through the objective DECREASED to ~53.07 from ~125.62 (57.75% change) Context-specific metabolism does not correlate with transcriptome (r=0.106, n.s.) RIPTiDe completed in 18 secondsAnalysis Growth ratefrom scipy import stats from random import sample rough_growth = [(1. / numpy.median(x)) * 3600. for x in list(set(iCdR703_rough.flux_samples['biomass']))] print('Rough doubling time: ' + str(round(numpy.median(rough_growth), 2)) + ' minutes') smooth_growth = [(1. / numpy.median(x)) * 3600. for x in list(set(iCdR703_smooth.flux_samples['biomass']))] print('Smooth doubling time: ' + str(round(numpy.median(smooth_growth), 2)) + ' minutes') pvals = [] maxRange = len(list(iCdR703_rough.flux_samples['biomass'])) * 10 for x in range(1, maxRange): rough_sample = sample(list(iCdR703_rough.flux_samples['biomass']), 10) smooth_sample = sample(list(iCdR703_smooth.flux_samples['biomass']), 10) pvals.append(stats.wilcoxon(x=rough_sample, y=smooth_sample)[1]) print('p-value', numpy.median(pvals)) with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/doubling_times.tsv', 'w') as outFile: rough_growth = 'rough\t' + '\t'.join([str(round(x,2)) for x in rough_growth]) + '\n' outFile.write(rough_growth) smooth_growth = 'smooth\t' + '\t'.join([str(round(x,2)) for x in smooth_growth]) + '\n' outFile.write(smooth_growth)Rough doubling time: 140.71 minutes Smooth doubling time: 94.05 minutes p-value 0.05933611988090862Subrate utilization / Secretion# Inferring media condition def find_growth_substrates(riptide): substrates = [] exchanges = list(set([x.id for x in riptide.model.reactions if 'EX_' in x.id])) for rxn in exchanges: if numpy.median(riptide.flux_samples[rxn]) < 0.0: substrate_id = riptide.model.reactions.get_by_id(rxn).reactants[0].id substrates.append(substrate_id) substrates = set(substrates) print(str(len(substrates)) + ' growth substrates found') return substrates def find_byproducts(riptide): byproducts = [] exchanges = list(set([x.id for x in riptide.model.reactions if 'EX_' in x.id])) for rxn in exchanges: if numpy.median(riptide.flux_samples[rxn]) > 0.0: byproduct_id = riptide.model.reactions.get_by_id(rxn).reactants[0].id byproducts.append(byproduct_id) byproducts = set(byproducts) print(str(len(byproducts)) + ' secreted byproducts found') return byproducts def find_element_sources(riptide): # Isolate exchange reactions exchanges = [] for rxn in riptide.model.reactions: if len(rxn.reactants) == 0 or len(rxn.products) == 0: exchanges.append(rxn.id) sources = {} c_source = ['cpd_id', 0.0] n_source = ['cpd_id', 0.0] # PArse exchange flux samples for imported metabolites for rxn in exchanges: flux = abs(numpy.median(riptide.flux_samples[rxn])) if flux > 1e-6: metabolite = riptide.model.reactions.get_by_id(rxn).reactants[0] sources[metabolite.id] = {} # Multiply elemental components by median flux absolute value for element in metabolite.elements.keys(): element_supply = round(float(metabolite.elements[element]) * flux, 3) sources[metabolite.id][element] = element_supply # Identify largest sources of carbon and nitrogen if element == 'C' and element_supply > c_source[1]: c_source = [metabolite.id, element_supply] elif element == 'N' and element_supply > n_source[1]: n_source = [metabolite.id, element_supply] print('Primary carbon source: ' + riptide.model.metabolites.get_by_id(c_source[0]).name + ' (' + str(c_source[1]) + ')') print('Primary nitrogen source: ' + riptide.model.metabolites.get_by_id(n_source[0]).name + ' (' + str(n_source[1]) + ')') return sources rough_substrates = find_growth_substrates(iCdR703_rough) rough_sources = find_element_sources(iCdR703_rough) rough_byproducts = find_byproducts(iCdR703_rough) smooth_substrates = find_growth_substrates(iCdR703_smooth) smooth_sources = find_element_sources(iCdR703_smooth) smooth_byproducts = find_byproducts(iCdR703_smooth) rough_only_substrates = rough_substrates.difference(smooth_substrates) smooth_only_substrates = smooth_substrates.difference(rough_substrates) print('Rough only substrates:') for x in rough_only_substrates: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) print('\nSmooth only substrates:') for x in smooth_only_substrates: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) rough_only_byproducts = rough_byproducts.difference(smooth_byproducts) smooth_only_byproducts = smooth_byproducts.difference(rough_byproducts) print('Rough only byproducts:') for x in rough_only_byproducts: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) print('\nSmooth only byproducts:') for x in smooth_only_byproducts: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) a = [2345,1,342,2,4,34,234,234,5,65,4,7,54,4,8,8,56,354,6,4,7,865,84,3,45] list(numpy.quantile(a, [0.25, 0.5, 0.75])) # Save median flux sampled values to supplementary table with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/results/tables/rough_exchange_flux.tsv', 'w') as outFile: outFile.write('id\tname\tq1\tmedian\tq3\n') exchanges = [x.id for x in iCdR703_rough.model.boundary] for rxn in exchanges: curr_name = iCdR703_rough.model.reactions.get_by_id(rxn).name curr_fluxes = list(iCdR703_rough.flux_samples[rxn]) flux_summary = list(numpy.quantile(curr_fluxes, [0.25, 0.5, 0.75])) flux_summary = [str(round(x,3)) for x in flux_summary] entry = rxn + '\t' + curr_name + '\t' + flux_summary[0] + '\t' + flux_summary[1] + '\t' + flux_summary[2] + '\n' outFile.write(entry) with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/results/tables/smooth_exchange_flux.tsv', 'w') as outFile: outFile.write('id\tname\tq1\tmedian\tq3\n') exchanges = [x.id for x in iCdR703_smooth.model.boundary] for rxn in exchanges: curr_name = iCdR703_smooth.model.reactions.get_by_id(rxn).name curr_fluxes = list(iCdR703_smooth.flux_samples[rxn]) flux_summary = list(numpy.quantile(curr_fluxes, [0.25, 0.5, 0.75])) flux_summary = [str(round(x,3)) for x in flux_summary] entry = rxn + '\t' + curr_name + '\t' + flux_summary[0] + '\t' + flux_summary[1] + '\t' + flux_summary[2] + '\n' outFile.write(entry)Gene Essentialityimport cobra import cobra.flux_analysis minGrowth = iCdR703.slim_optimize() * 0.01 base_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR703, threshold=minGrowth) base_essential_genes = set([x.id for x in base_essential_genes]) print(str(len(base_essential_genes)) + ' core essential genes found') minGrowth = iCdR703_rough.model.slim_optimize() * 0.01 rough_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR703_rough.model, threshold=minGrowth) rough_essential_genes = set([x.id for x in rough_essential_genes]) #rough_essential_genes = rough_essential_genes.difference(base_essential_genes) print(str(len(rough_essential_genes)) + ' rough-specific essential genes') minGrowth = iCdR703_smooth.model.slim_optimize() * 0.01 smooth_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR703_smooth.model, threshold=minGrowth) smooth_essential_genes = set([x.id for x in smooth_essential_genes]) #smooth_essential_genes = smooth_essential_genes.difference(base_essential_genes) print(str(len(smooth_essential_genes)) + ' smooth-specific essential genes') # Contrast groups overlap_essential = smooth_essential_genes.intersection(rough_essential_genes) smooth_only_essential = smooth_essential_genes.difference(rough_essential_genes) rough_only_essential = rough_essential_genes.difference(smooth_essential_genes) # Display results print('Rough-only essential genes:') for x in rough_only_essential: print(x + '\t' + iCdR703.genes.get_by_id(x).name) print('\nSmooth-only essential genes:') for x in smooth_only_essential: print(x + '\t' + iCdR703.genes.get_by_id(x).name) print('Overlap essential genes, total:', len(overlap_essential)) for x in overlap_essential: print(x + '\t' + iCdR703.genes.get_by_id(x).name) # Save results to a table with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/results/tables/genes_table_S4.tsv','w') as outfile: outfile.write('condition\tgene_id\tgene_name\n') for w in base_essential_genes: w = iCdR703.genes.get_by_id(w) entry = 'uncontextualized\t' + w.id + '\t' + w.name.replace(' ', '_') + '\n' outfile.write(entry) for x in overlap_essential: x = iCdR703.genes.get_by_id(x) entry = 'both_contexts\t' + x.id + '\t' + x.name.replace(' ', '_') + '\n' outfile.write(entry) for y in smooth_only_essential: y = iCdR703.genes.get_by_id(y) entry = 'smooth_only\t' + y.id + '\t' + y.name.replace(' ', '_') + '\n' outfile.write(entry) for z in rough_only_essential: z = iCdR703.genes.get_by_id(z) entry = 'rough_sporulation_only\t' + z.id + '\t' + z.name.replace(' ', '_') + '\n' outfile.write(entry)Reaction EssentialityminGrowth = iCdR703.slim_optimize() * 0.01 base_essential_reactions = cobra.flux_analysis.find_essential_reactions(iCdR703, threshold=minGrowth) base_essential_reactions = set([x.id for x in base_essential_reactions]) print(str(len(base_essential_reactions)) + ' core essential reactions found') minGrowth = iCdR703_rough.model.slim_optimize() * 0.01 rough_essential_reactions = cobra.flux_analysis.find_essential_reactions(iCdR703_rough.model, threshold=minGrowth) rough_essential_reactions = set([x.id for x in rough_essential_reactions]) print(str(len(rough_essential_reactions)) + ' rough-specific essential reactions') minGrowth = iCdR703_smooth.model.slim_optimize() * 0.01 smooth_essential_reactions = cobra.flux_analysis.find_essential_reactions(iCdR703_smooth.model, threshold=minGrowth) smooth_essential_reactions = set([x.id for x in smooth_essential_reactions]) print(str(len(smooth_essential_reactions)) + ' smooth-specific essential reactions') # Contrast groups overlap_essential_rxns = smooth_essential_reactions.intersection(rough_essential_reactions) smooth_only_essential_rxns = smooth_essential_reactions.difference(rough_essential_reactions) rough_only_essential_rxns = rough_essential_reactions.difference(smooth_essential_reactions) # Display results print('Rough-only essential reactions:') for x in rough_only_essential_rxns: print(x + '\t' + iCdR703.reactions.get_by_id(x).name) print('\nSmooth-only essential reactions:') for x in smooth_only_essential_rxns: print(x + '\t' + iCdR703.reactions.get_by_id(x).name) # Save results to a table with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/results/tables/reactions_table_S4.tsv','w') as outfile: outfile.write('condition\treaction_id\treaction_name\n') for w in base_essential_reactions: w = iCdR703.reactions.get_by_id(w) entry = 'uncontextualized\t' + w.id + '\t' + w.name.replace(' ', '_') + '\n' outfile.write(entry) for x in overlap_essential_rxns: x = iCdR703.reactions.get_by_id(x) entry = 'both_contexts\t' + x.id + '\t' + x.name.replace(' ', '_') + '\n' outfile.write(entry) for y in smooth_only_essential_rxns: y = iCdR703.reactions.get_by_id(y) entry = 'smooth_only\t' + y.id + '\t' + y.name.replace(' ', '_') + '\n' outfile.write(entry) for z in rough_only_essential_rxns: z = iCdR703.reactions.get_by_id(z) entry = 'rough_sporulation_only\t' + z.id + '\t' + z.name.replace(' ', '_') + '\n' outfile.write(entry)Topology# Genes r_genes = set([x.id for x in iCdR703_rough.model.genes]) s_genes = set([x.id for x in iCdR703_smooth.model.genes]) print('Rough-only genes:', len(r_genes.difference(s_genes))) print('Smooth-only genes:', len(s_genes.difference(r_genes))) # Reactions r_reactions = set([x.id for x in iCdR703_rough.model.reactions]) s_reactions = set([x.id for x in iCdR703_smooth.model.reactions]) print('Rough-only reactions:', len(r_reactions.difference(s_reactions))) print('Smooth-only reactions:', len(s_reactions.difference(r_reactions))) # Metabolites r_metabolites = set([x.id for x in iCdR703_rough.model.metabolites]) s_metabolites = set([x.id for x in iCdR703_smooth.model.metabolites]) print('Rough-only metabolites:', len(r_metabolites.difference(s_metabolites))) print('Smooth-only metabolites:', len(s_metabolites.difference(r_metabolites))) # Compare gene pruning between groups rough_pruned = iCdR703_rough.pruned['genes'] smooth_pruned = iCdR703_smooth.pruned['genes'] rough_only_genes = smooth_pruned.difference(rough_pruned) smooth_only_genes = rough_pruned.difference(smooth_pruned) print('Rough only genes:') for x in rough_only_genes: print(x + '\t' + iCdR703.genes.get_by_id(x).name) print('\nSmooth only genes:') for x in smooth_only_genes: print(x + '\t' + iCdR703.genes.get_by_id(x).name) # Reactions rough_pruned = iCdR703_rough.pruned['reactions'] smooth_pruned = iCdR703_smooth.pruned['reactions'] rough_only_reactions = smooth_pruned.difference(rough_pruned) smooth_only_reactions = rough_pruned.difference(smooth_pruned) print('Rough only reactions:') for x in rough_only_reactions: print(x + '\t' + iCdR703.reactions.get_by_id(x).name) print('\nSmooth only reactions:') for x in smooth_only_reactions: print(x + '\t' + iCdR703.reactions.get_by_id(x).name) # Metabolites rough_pruned = iCdR703_rough.pruned['metabolites'] smooth_pruned = iCdR703_smooth.pruned['metabolites'] rough_only_metabolites = smooth_pruned.difference(rough_pruned) smooth_only_metabolites = rough_pruned.difference(smooth_pruned) print('Rough only metabolites:') for x in rough_only_metabolites: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) print('\nSmooth only metabolites:') for x in smooth_only_metabolites: print(x + '\t' + iCdR703.metabolites.get_by_id(x).name) import numpy import cobra import cobra.flux_analysis def _getKey(item): return item[1] # Scale each active exchange back and examine its influence on objective flux def find_primary_sources(model, flux_samples=False, fraction=0.01, cutoff=0.8, pfba_fraction=0.8): # Requires a model # Optional: flux_samples = flux samples pandas dataframe associated with model # fraction = percent of median flux to limit exchange reactions by # cutoff = quantile of top C and N sources to report # pfba_fraction = fraction of optimal objective value for pfba solution sources = {} c_sources = [] c_influence = [] n_sources = [] n_influence = [] p_source = ['p_source', -1.0] s_source = ['s_source', -1.0] objVal = model.slim_optimize() pfba_solution = cobra.flux_analysis.pfba(model, fraction_of_optimum=pfba_fraction) # Parse exchange flux samples for imported metabolites exchanges = [rxn.id for rxn in model.boundary] for rxn in exchanges: if isinstance(flux_samples, pandas.DataFrame): current_samples = list(set(flux_samples[rxn])) flux = numpy.mean(current_samples) else: flux = pfba_solution.fluxes[rxn] if flux >= -1e-6: continue # Skip exported byproducts or unused reactions bound = flux * fraction # Test for disproportionate effect on objective old_bounds = model.reactions.get_by_id(rxn).bounds model.reactions.get_by_id(rxn).bounds = (bound, bound) new_objVal = model.slim_optimize() model.reactions.get_by_id(rxn).bounds = old_bounds # Reset bounds if str(new_objVal) == 'nan': new_objVal = objVal * fraction # Correct for nan # Calculate the degree of change to objective value if new_objVal != objVal: flux_ratio = objVal / new_objVal adjustment = abs(flux) * flux_ratio else: adjustment = 1. # Normalize elemental component contributions metabolite = model.reactions.get_by_id(rxn).reactants[0] sources[metabolite.id] = {} for element in metabolite.elements.keys(): element_supply = float(metabolite.elements[element]) * adjustment if element_supply > 0.: element_supply = numpy.log(element_supply) sources[metabolite.id][element] = element_supply # Identify largest sources of main elements if element == 'C' and element_supply > 0.0: c_sources.append([metabolite.id, element_supply]) c_influence.append(element_supply) elif element == 'N' and element_supply > 0.0: n_sources.append([metabolite.id, element_supply]) n_influence.append(element_supply) elif element == 'P' and element_supply > 0.0: p_source = [metabolite.id, element_supply] elif element == 'S' and element_supply > 0.0: s_source = [metabolite.id, element_supply] # Rank by largest contributions c_sources = sorted(c_sources, reverse=True, key=_getKey) n_sources = sorted(n_sources, reverse=True, key=_getKey) print('Top carbon sources:') current = max(c_influence) x = 0 while current >= numpy.quantile(c_influence, cutoff): print(model.metabolites.get_by_id(c_sources[x][0]).name + ' (' + str(round(c_sources[x][1],3)) + ')') current = c_sources[x][1] x += 1 print('\nTop nitrogen sources:') current = max(n_influence) x = 0 while current >= numpy.quantile(n_influence, cutoff): print(model.metabolites.get_by_id(n_sources[x][0]).name + ' (' + str(round(n_sources[x][1],3)) + ')') current = n_sources[x][1] x += 1 print('\nPrimary phosphorous source:') print(model.metabolites.get_by_id(p_source[0]).name + ' (' + str(round(p_source[1],3)) + ')') print('\nPrimary sulfur source:') print(model.metabolites.get_by_id(s_source[0]).name + ' (' + str(round(s_source[1],3)) + ')') return sources rough_sources = find_primary_sources(iCdR703_rough.model, flux_samples=iCdR703_rough.flux_samples) smooth_sources = find_primary_sources(iCdR703_smooth.model, flux_samples=iCdR703_smooth.flux_samples) smooth_ex = set([x.id for x in iCdR703_smooth.model.exchanges]) rough_ex = set([x.id for x in iCdR703_rough.model.exchanges]) smooth_only = smooth_ex.difference(rough_ex) rough_only = rough_ex.difference(smooth_ex) print('Smooth') for rxn in smooth_only: flux = numpy.mean(iCdR703_smooth.flux_samples[rxn]) print(rxn, iCdR703_smooth.model.reactions.get_by_id(rxn).name, flux) print('\nRough') for rxn in rough_only: flux = numpy.mean(iCdR703_rough.flux_samples[rxn]) print(rxn, iCdR703_rough.model.reactions.get_by_id(rxn).name, flux) for rxn_a in iCdR703_smooth.model.boundary: try: fluxes_a = numpy.mean(iCdR703_smooth.flux_samples[rxn_a.id]) fluxes_b = numpy.mean(iCdR703_rough.flux_samples[rxn_a.id]) except: continue if fluxes_a < 0 or fluxes_b < 0: print(rxn_a.id, iCdR703.reactions.get_by_id(rxn_a.id).name, round(fluxes_a,3), round(fluxes_b,3)) for rxn_a in iCdR703_smooth.model.boundary: try: fluxes_a = numpy.mean(iCdR703_smooth.flux_samples[rxn_a.id]) fluxes_b = numpy.mean(iCdR703_rough.flux_samples[rxn_a.id]) except: continue if fluxes_a > 0 or fluxes_b > 0: print(rxn_a.id, iCdR703.reactions.get_by_id(rxn_a.id).name, round(fluxes_a,3), round(fluxes_b,3)) with open('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/transcript/tamayo_etal/phase_variation/exch_fluxes_maxfit.tsv', 'w') as outFile: outFile.write('reaction_id\tsubstrate\tsmooth\trough\n') for exch in iCdR703.exchanges: try: flux_smooth = str(round(numpy.mean(iCdR703_smooth.flux_samples[exch.id]), 3)) except: flux_smooth = 'NA' try: flux_rough = str(round(numpy.mean(iCdR703_rough.flux_samples[exch.id]), 3)) except: flux_rough = 'NA' line = exch.id + '\t' + list(exch.reactants)[0].name + '\t' + flux_smooth + '\t' + flux_rough + '\n' outFile.write(line)Contribution to Biomassimport cobra import symengine import pandas from scipy import stats from random import sample import cobra.flux_analysis import numpy import warnings # Contribution to biomass def find_contribution(genre): warnings.filterwarnings('ignore') # Test each exchange metabolite's contribute to the objective biomass = genre.slim_optimize() consume = {} produce = {} with genre as g: for rxn in g.exchanges: if rxn.lower_bound < 0.0: substrate = list(rxn.metabolites)[0].name old_bound = rxn.lower_bound rxn.lower_bound = old_bound * 0.01 new_biomass = g.slim_optimize() if str(new_biomass) == 'nan': rxn.lower_bound = old_bound continue else: effect = (g.slim_optimize() / biomass) * 100.0 effect = round(100.0 - effect, 3) if effect > 1.0: consume[substrate] = effect rxn.lower_bound = old_bound elif rxn.upper_bound > 0.0: substrate = list(rxn.metabolites)[0].name old_bound = rxn.upper_bound rxn.upper_bound = old_bound * 0.01 new_biomass = g.slim_optimize() if str(new_biomass) == 'nan': rxn.upper_bound = old_bound continue else: effect = (g.slim_optimize() / biomass) * 100.0 effect = round(100.0 - effect, 3) if effect > 1.0: produce[substrate] = effect rxn.upper_bound = old_bound warnings.filterwarnings('default') return {'consumes' : consume, 'produces' : produce} # Find respective contributions to optimal biomass smooth_important = find_contribution(iCdR703_smooth.model) rough_important = find_contribution(iCdR703_rough.model) smooth_important rough_important1. Fido Unified Downloader Fido (Federated Internet Data Obtainer) is a unified interface for the search and retrival of solar physics data regardless of the underlying client or webservice through which the data is obtained, e.g. VSO, JSOC, etc. In SunPy 1.0 Fido now makes use of an asynchronous download stream through the module `parfive` Lets look at how we can search and download some solar physics data#lets search for some SDO/AIA data over some time results = Fido.search(a.Time('2014/05/15 08:00', '2014/05/15 08:10'), a.Instrument('AIA')) results results.file_numQueries to Fido can be make more specific by using other attributes. Lets narrow the search to only look at one wavelength, with a cadence of 1 minute. This makes use of Astropy unitsresults = Fido.search(a.Time('2014/05/15 08:00', '2014/05/15 08:10'), a.Instrument('AIA'), a.Wavelength(171*u.angstrom), a.vso.Sample(1*u.minute)) resultsThe search results can then be downloaded via the fetch functionaia_files = Fido.fetch(results) aia_files[0:10]More detailed searchs with FidoFido searches can include searchs from multiple instruments, wavelengths, times etc. This is achieved through the | opertaior which acts as a logical OR operator.results = Fido.search(a.Time('2014/05/15 08:00', '2014/05/15 08:10'), a.Instrument('AIA')|a.Instrument('SWAP')) # the results can be indexed to access a subset of the search results results[:,0] files = Fido.fetch(results[:,0])2. Handeling DataTypes (Map and Timeseries)SunPy provides core data type classes that are designed to provide a consistent interface acress data types (lightcurves and images) as well as data sources from numerous instruments and observations. They handle all of the manipulation necessary to read data in from mission-specific files. The two main datatypes in SunPy are the TimeSeries and Map classes. 2.1 TimeSeriesThe structure of a TimeSeries consists of times and measurements and the underlying structure is that of a `pandas.DataFrame`. SunPy TimeSeries supports time-series data from a wide range of solar-focussed instruments.Lets look at an example of using TimeSeries to deal with GOES/XRS observationssearch_results = Fido.search(a.Time('2013-10-28 00:00','2013-10-28 12:00'),a.Instrument('XRS')) goes_files = Fido.fetch(search_results[0]) goes_lc = ts.TimeSeries(goes_files) goes_lc.peek()We can inspect the meta information in the TimeSeries object.goes_lc.metaAnd inspect the associated unitsgoes_lc.unitsWe can also inspect and manipulate the datagoes_lc.data[0:10] goes_lc.data['xrsb'][0:5] = 10 goes_lc.data['xrsb'][0:10]We can also truncate the data to specific times of interest#new_goes_lc = goes_lc.truncate('2013-10-28 01:00', '2013-10-28 02:30') #new_goes_lc.peek()2.2 Map The SunPy Map class provides the data type structure to store 2-dimensional data associated with a coordinate system. This allows users to store and manipulate images of the Sun and the heliosphere. Like TimeSeries, Map explicitly supports observations from mulitple instruments, as well as 2D data with associated WCS coordinate information.Lets look at an example of image analysis with Mapaia_map = sunpy.map.Map(aia_files[0]) aia_map.peek()/Users/laurahayes/anaconda3/lib/python3.7/site-packages/matplotlib/figure.py:445: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure. % get_backend())We can inspect the map, the meta data etcaia_map aia_map.metaOne of the key features is the coordinate frame underlying Map through the defined WCS and utilizes the SunPy coordinate subpackageaia_map.coordinate_frame3. CoordinatesSunPy coordinates allows us to deal with points in physical space, and the SunPy coordinates subpackage provides definitions of and transformations between several reference frames commonly used in solar physics. This allows us to manipulate Maps and take advantage of WCS Axes for plottingThese reference frames and their associated transformations are implemented using the `astropy.coordinates` subpackage and extend Astropy’s coordinate frame transformation graph to include solar coordinates# # Helioviewer Client # from sunpy.net.helioviewer import HelioviewerClient # hv = HelioviewerClient() # file_stereo = hv.download_jp2('2014/05/15 08:00', observatory="STEREO_A", instrument="SECCHI",detector='COR2' ) # file_stereo3.1 Using WCS Axesfile_stereo = '2014_05_15__07_54_00_005__STEREO-A_SECCHI_COR2_white-light.jp2' map_stereo = sunpy.map.Map(file_stereo) map_stereo #print the coordinate frame map_stereo.coordinate_frameWe can now take advantage of WCS Axes to plot the STEREO map in its coordinate framefig = plt.figure(figsize=(15, 15)) ax1 = fig.add_subplot(1, 2, 1, projection=map_stereo) map_stereo.plot(axes=ax1, vmax=800) map_stereo.draw_limb()This plot is in the coordinate frame with respect to the observer location of the STEREO spacecraft. A number of bright object are also seen from this field of view. We can search for local astronomical bodies, get their coordinates at the time of the observation and in the coordinate frame of the observer (i.e. as seen from STEREO)# get the location of mars mars = get_body_heliographic_stonyhurst('mars', map_stereo.date, observer=map_stereo.observer_coordinate) mars_hpc = mars.transform_to(frames.Helioprojective(observer=map_stereo.observer_coordinate))INFO: Apparent body location accounts for 1269.96 seconds of light travel time [sunpy.coordinates.ephemeris]We can now again make use of WCS axes to plot this coordinate of mars on the same plot as STEREO mapfig = plt.figure(figsize=(15, 15)) ax1 = fig.add_subplot(1, 2, 1, projection=map_stereo) map_stereo.plot(axes=ax1, vmax=800) ax1.plot_coord(mars_hpc, 's', color='white', fillstyle='none', markersize=12, label='Mars') plt.legend()3.1.1 Positions of planets with respect to observer positionBuilding upon this, we can search for the other planets with respect to the STEREO spacecraft (i.e. with respect to the observer of this Map)#print the coordinate of the observer location map_stereo.observer_coordinateWe can now get coordinates of other planets in Heliographic stoneyhurst to compare positions to the observer locationplanet_list = ['earth', 'venus', 'mars', 'mercury'] planet_coord = [get_body_heliographic_stonyhurst(this_planet, time=map_stereo.date) for this_planet in planet_list] #Plotting the locations of the STEREO instrument and planets all with respect to the Sun fig = plt.figure(figsize=(10, 10)) ax1 = plt.subplot(2, 1, 1, projection='polar') plt.polar(np.deg2rad(map_stereo.observer_coordinate.lon), map_stereo.observer_coordinate.radius.to('au'), 'x',label='STEREO') for this_planet, this_coord in zip(planet_list, planet_coord): plt.polar(np.deg2rad(this_coord.lon), this_coord.radius, 'o', label=this_planet) plt.legend() ax2 = plt.subplot(2, 1, 2) ax2.plot(map_stereo.observer_coordinate.radius.to('au'), map_stereo.observer_coordinate.lat, 'x', label='STEREO') for this_planet, this_coord in zip(planet_list, planet_coord): ax2.plot(this_coord.radius, this_coord.lat, 'o', label=this_planet) ax2.set_ylabel('Heliographic Latitude [deg]') plt.legend()3.1.2 Venus TransitLets look at another example to making use of SunPy coordinates and WCS axes to look at the Venus transit observation from SDO/AIAfile_venus = '20120606_040731_UTC.0041.fits' map_venus = sunpy.map.Map(file_venus) map_venus.peek() from astropy.coordinates import solar_system_ephemeris solar_system_ephemeris.set('de432s')In this plot we can clearly see an object transiting the Sun. We can now get the position of Venus in the coordinate frame of the observer (SDO) and plot the location of Venus on this same plot# now get venus position venus = get_body_heliographic_stonyhurst('venus', map_venus.date, observer=map_venus.observer_coordinate) venus_hpc = venus.transform_to(frames.Helioprojective(observer=map_venus.observer_coordinate)) fov = 100 * u.arcsec top_right = SkyCoord(venus_hpc.Tx + fov, venus_hpc.Ty + fov, frame=map_venus.coordinate_frame) bottom_left = SkyCoord(venus_hpc.Tx - fov, venus_hpc.Ty - fov, frame=map_venus.coordinate_frame) smap = map_venus.submap(top_right, bottom_left) fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection=smap) # now get venus position smap.plot(axes=ax) smap.draw_limb() ax.plot_coord(venus_hpc, 'x', color='white') log.info('Test')INFO: Test [unknown]Tetrahedra in a gridIn this example, we are using clique packing to find the maximal number of vertex disjoint tetrahedra that canbe packed in a grid graph.import networkx as nx from itertools import product, combinations # GraphILP API: import networkx graphs and use clique packing from graphilp.imports import networkx as impnx from graphilp.packing import clique_packing as cp # Use Matplotlib for plotting our 3d grid graph and the packed tetrahedra %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection # Allow some interaction in the 3d visualisation from ipywidgets import interact, IntSliderSet up the grid graph# choose edge length of grid graph n = 4 # start with a standard grid graph G = nx.grid_graph(dim=(n, n, n)) # allow different colours for different types of edges edge_colors = {} for e in G.edges(): edge_colors[e] = 'k' # extend the grid graph to allow for a nice collection of tetrahedra: # create new vertices at the cube centres, connect them to the cube vertices, and add diagonals to the cubes new_edges = [] for node in G.nodes(): if node[0] < n-1 and node[1] < n-1 and node[2] < n-1: # centre points new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0], node[1], node[2]))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0], node[1]+1, node[2]))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0], node[1], node[2]+1))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0], node[1]+1, node[2]+1))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0]+1, node[1], node[2]))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0]+1, node[1]+1, node[2]))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0]+1, node[1], node[2]+1))) new_edges.append(((node[0]+0.5, node[1]+0.5, node[2]+0.5), (node[0]+1, node[1]+1, node[2]+1))) # cube diagonals new_edges.append(((node[0], node[1], node[2]), (node[0]+1, node[1], node[2]+1))) new_edges.append(((node[0], node[1], node[2]+1), (node[0]+1, node[1], node[2]))) new_edges.append(((node[0], node[1]+1, node[2]), (node[0]+1, node[1]+1, node[2]+1))) new_edges.append(((node[0], node[1]+1, node[2]+1), (node[0]+1, node[1]+1, node[2]))) new_edges.append(((node[0], node[1], node[2]), (node[0], node[1]+1, node[2]+1))) new_edges.append(((node[0], node[1]+1, node[2]), (node[0], node[1], node[2]+1))) new_edges.append(((node[0]+1, node[1], node[2]), (node[0]+1, node[1]+1, node[2]+1))) new_edges.append(((node[0]+1, node[1]+1, node[2]), (node[0]+1, node[1], node[2]+1))) new_edges.append(((node[0], node[1], node[2]), (node[0]+1, node[1]+1, node[2]))) new_edges.append(((node[0]+1, node[1], node[2]), (node[0], node[1]+1, node[2]))) new_edges.append(((node[0], node[1], node[2]+1), (node[0]+1, node[1]+1, node[2]+1))) new_edges.append(((node[0]+1, node[1], node[2]+1), (node[0], node[1]+1, node[2]+1))) G.add_edges_from(new_edges) # show additional edges in light grey for e in G.edges(): if e not in edge_colors: edge_colors[e] = '#AAAAAA'Plot the grid graphX = [node[0] for node in G.nodes()] Y = [node[1] for node in G.nodes()] Z = [node[2] for node in G.nodes()] fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, projection='3d') ax.scatter(X, Y, Z) for edge in G.edges(): ax.plot([edge[0][0], edge[1][0]], [edge[0][1], edge[1][1]], [edge[0][2], edge[1][2]], c=edge_colors[edge])Set up optimisation problemoptG = impnx.read(G) m = cp.create_model(optG, 4)Solve optimisation problem and extract solutionm.optimize() cliques = cp.extract_solution(optG, m) # get edges per clique clique_dict = {} for edge, clique_no in cliques.items(): if clique_no > 0: if clique_no not in clique_dict: clique_dict[clique_no] = [] clique_dict[clique_no].append(edge)Visualise solution# each 4-clique can be interpreted as a tetrahedron with four faces triangles = [] for clique_name, clique in clique_dict.items(): for triple in combinations(clique, 3): triangles.append(triple) def update(h = 10.0, w = 390.0): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, projection='3d') ax.view_init(h, w) ax.scatter(X, Y, Z) for edge in G.edges(): ax.plot([edge[0][0], edge[1][0]], [edge[0][1], edge[1][1]], [edge[0][2], edge[1][2]], c=edge_colors[edge]) tri = Poly3DCollection(triangles) tri.set_alpha(0.5) tri.set_edgecolor('#FF0000') ax.add_collection3d(tri) interact(update, w=IntSlider(min=0, max=360, step=5, value=250), h=IntSlider(min=0, max=90, step=5, value=25));King County Housing Data - Linear Regression AssignmentData for this assignment was obtained from Kaggle: Complete the following challenges below to improve iteratively your home price estimation and practice implementing predictive linear regression models. Bivariate RegressionPick the X variable that you think will be the most correlated with Y. Split your dataset into a 80-20 train-test-split (80% of data for training, and 20% for testing).Train a regression model using this single X and single Y variable. Once you have trained the model and obtained its coefficients, plot the points on a graph and fit your line of best fit to the graph.Report your Root Mean Squared Error and R-Squared for this model.import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred' '/datasets/master/kc_house_data.csv') pd.set_option('display.max_columns', 100) print(df.shape) df.head() sns.set(style="ticks", color_codes=True) sns.pairplot(data=df, y_vars=['price'], x_vars=df.columns.drop('price'), plot_kws={'alpha': 0.1, 'linewidth':0}); targets = ['price'] predictors = ['sqft_living'] y = df[targets].values X = df[predictors].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = LinearRegression() model.fit(X_train, y_train) beta_0 = model.intercept_ beta_i = model.coef_[0] plt.scatter(X_train, y_train, alpha=0.1) plt.plot(X_train, [beta_i * _ + beta_0 for _ in X_train]); plt.scatter(X_test, y_test, alpha=0.1) plt.plot(X_test, [beta_i * _ + beta_0 for _ in X_test]);Two-variable Multiple RegressionTo ramp up slowly, pick a second X variable that you think will be the most correlated with Y. Split your dataset into a 50-50 test-train-split (50% of data for training, and 50% for testing).Train a regression model using these two X variables. Once you have trained the model and obtained its coefficients, plot the points on a graph and fit your **plane** of best fit to the graph.Report your Root Mean Squared Error and R-squared for this model.predictors = ['sqft_living', 'grade'] y = df[targets].values X = df[predictors].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = LinearRegression() model.fit(X_train, y_train) beta_0 = model.intercept_ beta_i = model.coef_[0] from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(df['sqft_living'], df['grade'], df[targets], c='red', alpha=0.25) # Set axes to size of scatter data x1 = np.array(ax.get_xlim()) y1 = np.array(ax.get_ylim()) xx, yy = np.meshgrid(x1,y1) zz = beta_i[0]*xx + beta_i[1]*yy + beta_0[0] # Plot Plane plt3d = plt.gca(projection='3d') # Add Opacity to Plane plt3d.plot_surface(xx, yy, zz, alpha=0.5) plt.title("Regression Plane") plt.show() # from sklearn.metrics import mean_squared_error, r2_score y_test_predict = model.predict(X_test) RMSE = np.sqrt(mean_squared_error(y_test, y_test_predict)) R2 = r2_score(y_test, y_test_predict) print("RMSE:", RMSE) print("R^2:", R2)RMSE: 243902.4135296513 R^2: 0.5419797450455692Multiple RegressionNow using all available X variables, split your data into test and training datasets, train your model, obtain its coefficients, and report the Root Mean Squared Error and R-squared values.df[targets + predictors].cov() predictors = [ 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'yr_built', 'lat', 'long', 'sqft_living15', 'sqft_lot15' ] y = df[targets].values X = df[predictors].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = LinearRegression() model.fit(X_train, y_train) beta_0 = model.intercept_ beta_i = model.coef_[0] y_test_predict = model.predict(X_test) RMSE = np.sqrt(mean_squared_error(y_test, y_test_predict)) R2 = r2_score(y_test, y_test_predict) print("RMSE:", RMSE) print("R^2:", R2)RMSE: 199136.76998962593 R^2: 0.7009570190793255Demonstration of python function pointersThere is no good reason to write this paticular code. If you want a matrix add pyJvsip allready knows how to do that.In the madd function we demonstrate creating a function pointer using colview and then use it to do and elementwise add of a matrix a column at a time.import pyJvsip as pjv def madd(A,B,C): assert '__View' in repr(A) and 'pyJvsip' in repr(A),'Input parameters must be views of type pyJvsip.__View.' assert type(A) == type(B) and type(A) == type(C),'Input paramteters must be the same type' assert 'mview' in A.type,'Only matrix views are supported for madd.' L = A.rowlength a = A.colview b = B.colview c = C.colview for i in range(L): pjv.add(a(i),b(i),c(i)) return CBelow we create vector views and then recover the block to bind a matrix view on the data space. We then create some data using the ramp method on the vector views and excercise the madd function above.a = pjv.create('vview_d',50) b = pjv.create('vview_d',50) c = pjv.create('vview_d',50) A=a.block.bind(0,4,3,1,4) B=b.block.bind(0,4,3,1,4) C=c.block.bind(0,4,3,1,4) a.ramp(0.0,.01) b.ramp(0.0,1.0) madd(A,B,C); print('A = '); A.mprint('%.2f') print('B= '); B.mprint('%.2f') print("A + B = C = ");C.mprint('%.2f')A = [ 0.00 0.01 0.02 0.03; 0.04 0.05 0.06 0.07; 0.08 0.09 0.10 0.11] B= [ 0.00 1.00 2.00 3.00; 4.00 5.00 6.00 7.00; 8.00 9.00 10.00 11.00] A + B = C = [ 0.00 1.01 2.02 3.03; 4.04 5.05 6.06 7.07; 8.08 9.09 10.10 11.11]Clutch problem Defining problemimport numpy as np from desdeo_problem import ScalarConstraint from desdeo_problem.Objective import _ScalarObjective from desdeo_problem.Variable import variable_builder from desdeo_problem.Problem import MOProblem from desdeo_emo.EAs import RVEA # constants and variables mu = 0.5 rho = 0.0000078 Mf = 3 n = 250 Jz = 55 w = np.pi * n / 30 def Mh(x): x = np.atleast_2d(x) return (2 / 3) * mu * x[:, 5 - 2] * x[:, 6 - 2] * ((x[:, 3 - 2] ** 3 - x[:, 2 - 2] ** 3) / (x[:, 3 - 2] ** 2 - x[:, 2 - 2] ** 2)) * 0.001 def th(x): x = np.atleast_2d(x) return Jz * w / (Mh(x) + Mf) # objectives def f_1(x): x = np.atleast_2d(x) return np.pi * (x[:, 3 - 2]**2 - x[:, 2 - 2]**2) * x[:, 4 - 2] * (x[:, 6 - 2] + 1) * rho def f_2(x): return th(x) def f_3(x): x = np.atleast_2d(x) return x[:, 6 - 2] def f_4(x): x = np.atleast_2d(x) return x[:, 3 - 2] def f_5(x): x = np.atleast_2d(x) return x[:, 5 - 2] obj1 = _ScalarObjective("obj1", f_1, maximize=True) obj2 = _ScalarObjective("obj2", f_2, maximize=True) obj3 = _ScalarObjective("obj3", f_3) obj4 = _ScalarObjective("obj4", f_4, maximize=True) obj5 = _ScalarObjective("obj5", f_5, maximize=True) # decision variables var_names = ["x1", "x2", "x3", "x4", "x5"] # Make sure that the variable names are meaningful to you. initial_values = np.array([70, 100, 2, 800, 5]) lower_bounds = [60, 90, 1, 600, 1] upper_bounds = [80, 110, 3, 1000, 10] bounds = np.stack((lower_bounds, upper_bounds)) variables = variable_builder(var_names, initial_values, upper_bounds=upper_bounds, lower_bounds=lower_bounds) # constraints Rimin = 60 Romax = 110 Amin = 1.5 deltaR = 20 Amax = 3 delta = 0.5 Lmax = 30 Zmax = 10 Vsrmax = 10000 # mu = 0.5 # rho = 0.0000078 s = 1.5 Ms = 40 # Mf = 3 # n = 250 Pmax = 1 # 10 # Jz = 55 tmax = 15 Fmax = 1000 def S(x): x = np.atleast_2d(x) return np.pi * (x[:, 3 - 2]**2 - x[:, 2 - 2]**2) def Prz(x): x = np.atleast_2d(x) return x[:, 5 - 2] / S(x) def Rsr(x): x = np.atleast_2d(x) return (2 / 3) * ((x[:, 3 - 2]**3 - x[:, 2 - 2]**3) / (x[:, 3 - 2]**2 - x[:, 2 - 2]**2)) def Vsr(x): x = np.atleast_2d(x) return (np.pi * Rsr(x) * n) / 30 # w = pi * n / 30 # Mh = (2 / 3) * mu * x(5 - 1) * x(6 - 1) * ((x(3 - 1) ** 3 - x(2 - 1) ** 3) / (x(3 - 1) ** 2 - x(2 - 1) ** 2)) * 0.001 # c(1) = -x(2 - 1) + Rimin # c(2) = -Romax + x(3 - 1) def c_1(x, _): x = np.atleast_2d(x) return -(x[:, 3 - 2] - x[:, 2 - 2]) + deltaR # c(4) = -x(4 - 1) + Amin # c(5) = -Amax + x(4 - 1) def c_2(x, _): x = np.atleast_2d(x) return -Lmax + (x[:, 6 - 2] + 1) * (x[:, 4 - 2] + delta) # c(3) = -Zmax + (x(6 - 1) + 1) # c(8) = -x(6 - 1) + 1 def c_3(x, _): return -Pmax + Prz(x) def c_4(x, _): return -Pmax * Vsrmax + Prz(x) * Vsr(x) def c_5(x, _): return -Vsrmax + Vsr(x) def c_6(x, _): return -tmax + th(x) def c_7(x, _): return -Mh(x) + s * Ms def c_8(x, _): return -th(x) # c(15) = -x(5 - 1) # c(16) = -Fmax + x(5 - 1) cons1 = ScalarConstraint("c_1", 5, 5, c_1) cons2 = ScalarConstraint("c_2", 5, 5, c_2) cons3 = ScalarConstraint("c_3", 5, 5, c_3) cons4 = ScalarConstraint("c_4", 5, 5, c_4) cons5 = ScalarConstraint("c_5", 5, 5, c_5) cons6 = ScalarConstraint("c_6", 5, 5, c_6) cons7 = ScalarConstraint("c_7", 5, 5, c_7) cons8 = ScalarConstraint("c_8", 5, 5, c_8) # problem problem = MOProblem(objectives=[obj1, obj2, obj3, obj4, obj5], variables=variables, constraints=[cons1, cons2, cons3, cons4, cons5, cons6, cons7, cons8]) evolver = RVEA(problem, interact=True, n_iterations=5, n_gen_per_iter=100) plot, pref = evolver.requests() print(plot.content["dimensions_data"]) print(pref[0].content['message'])obj1 obj2 obj3 obj4 obj5 minimize -1 -1 1 -1 -1 ideal 5.40695 44.9687 1.02766 109.937 998.679 nadir None None None None None Please provide preferences. There is four ways to do this. You can either: 1: Select preferred solution(s) 2: Select non-preferred solution(s) 3: Specify a reference point worse than or equal to the ideal point 4: Specify desired ranges for objectives. In case you choose 1, please specify index/indices of preferred solutions in a numpy array (indexing starts from 0). For example: numpy.array([1]), for choosing the solutions with index 1. numpy.array([2, 4, 5, 16]), for choosing the solutions with indices 2, 4, 5, and 16. 2, please specify index/indices of non-preferred solutions in a numpy array (indexing starts from 0). For example: numpy.array([3]), for choosing the solutions with index 3. numpy.array([1, 2]), for choosing the solutions with indices 1 and 2[...]Confidence intervalmu = 10 sigma = 5 means = [] for _ in range(1000): dd = np.random.normal(mu,sigma,100) means.append(np.mean(dd)) dd = np.random.normal(mu,sigma,10000) 100*np.mean(dd>15) gm = np.mean(means) ss = np.std(means) gm,ss plt.hist(means,25,normed=1); plt.plot(2*[gm-ss],[0,1],'k') plt.plot(2*[gm+ss],[0,1],'k') means = np.array(means) print(100*np.mean((means>gm-ss) & (meansgm-2*ss) & (meansgm-3*ss) & (meansメモ再帰は簡単かと思っていたら云々。1. ややこしい問題、atcoder の問題を解くのに再帰を使うにはコツがありそう1. すぐに stack overflow になるので再帰ができたらすぐに末尾再帰 tail recursion に書き直すrecursion pdf* thinking recursively by * Introduction to Recursive Programming# apg4b a programmer's guide for beginers # sum with recursion # 0 から 与えられた数宇までの合計を算出する %%writefile temp.cpp #include using namespace std; int sum (int n){ if (n == 1) return 1; else return (n + sum(n-1)); } int main() { int n; cin >> n; cout << sum(n) << endl; } !g++ temp.cpp; echo 10 |./a.out !g++ temp.cpp; echo 1000 |./a.out #=> ちょっと大きい数字でエラーになる # factorial with recursion # 与えられた数宇の階乗を計算する %%writefile temp.cpp #include using namespace std; unsigned long long factorial (int n){ if (n == 1) return 1; else return (n * factorial(n-1)); } int main() { int n; cin >> n; cout << factorial(n) << endl; } !g++ temp.cpp; echo 5 |./a.out !g++ temp.cpp; echo 10 |./a.out !g++ temp.cpp; echo 50 |./a.out # 大きな数を扱うネットにあった例 # 入力に 250 を使っていた # 出力は 500桁近い # そのままでは動かなかったので書き直してみる %%writefile temp.cpp #include using namespace std; int main() { long k = 1; cin >> k; if (k <= 33) { unsigned long long fact = 1; fact = 1; for (int b = k; b >= 1; b--) { fact = fact * b; } cout << "\nThe factorial of " << k << " is " << fact << endl; } else { int numArr[10000]; int total, rem = 0, count; register int i; //int i; for (i = 0; i < 10000; i++) numArr[i] = 0; numArr[10000] = 1; for (count = 2; count <= k; count++) { while (i > 0) { total = numArr[i] * count + rem; rem = 0; if (total > 9) { numArr[i] = total % 10; rem = total / 10; } else { numArr[i] = total; } i--; } rem = 0; total = 0; i = 10000; } cout << "The factorial of " << k << " is " << endl; for (i = 0;i < 10000;i++) { if (numArr[i] != 0 || count == 1) { cout << numArr[i]; count = 1; } } cout << endl; } cout << endl; } # 動くようになった !g++ temp.cpp; echo 10 |./a.out !g++ temp.cpp; echo 50 |./a.out !g++ temp.cpp; echo 250 |./a.out # haskell で計算した結果 10 50 250 # 合っている # 3628800 # 30414093201713378043612608166064768844377641568960512000000000000 # 3232856260909107732320814552024368470994843717673780666747942427112823747555111209488817915371028199450928507353189432926730931712808990822791030279071281921676527240189264733218041186261006832925365133678939089569935713530175040513178760077247933065402339006164825552248819436572586057399222641254832982204849137721776650641276858807153128978777672951913990844377478702589172973255150283241787320658188482062478582659808848825548800000000000000000000000000000000000000000000000000000000000000 # フィボナッチ数を計算する %%writefile temp.cpp #include using namespace std; unsigned long long fibonacci (int n){ if (n == 0) return 0; else if (n == 1) return 1; else return (fibonacci(n-1) + fibonacci(n-2)); } int main() { int n; cin >> n; cout << fibonacci(n) << endl; } !g++ temp.cpp; echo 1 |./a.out !g++ temp.cpp; echo 2 |./a.out !g++ temp.cpp; echo 3 |./a.out !g++ temp.cpp; echo 4 |./a.out !g++ temp.cpp; echo 5 |./a.out !g++ temp.cpp; echo 6 |./a.out # Write the binary representation %%writefile temp.cpp #include using namespace std; void base2(int n){ if (n==1) cout << n; else { base2(n/2); cout << n%2; } } int main() { int n; cin >> n; base2(n); cout << endl; } !g++ temp.cpp; echo 1 |./a.out !g++ temp.cpp; echo 2 |./a.out !g++ temp.cpp; echo 3 |./a.out !g++ temp.cpp; echo 4 |./a.out !g++ temp.cpp; echo 5 |./a.out !g++ temp.cpp; echo 6 |./a.outHome Assignment 1 **Maximum score** - 10 points. **Deadline: 23:59, February 7, 2021.****Submission:** via email . Topic: "HA1 Name Surname Group" Reminder on variable assignment:We can store a value (of different data types: integer and float numbers, strings, data structures) in a variable. We use a variable assignment for it. Let's type something in this format: ```variable_name = value```, where ```=``` is called as assignment operator. If we run this code, Python creates an object for storing this value in memory, it automatically identifies its data type, then it creates a pointer (link) and we can address it by calling the variable name. We come up with the most appropriate variable name ourselves. When we call a variable's name, we can check what is the value. Task 1. Bank depositYou have decided to take your savings to the bank and open a deposit.The interest rate on the deposit is 5% per annum, which are added to the deposit amount after a year. You are ready to deposit 1000 dollars 60 cents for 3 years. Calculate how much money you can get 3 years after the deposit is closed.Hints: Please note that in order to calculate the interest for the year, it is better to convert the entire deposit amount into cents. Use the standard devision operator (not integer devision) in all calculations. In your final answer, round the cents to two digits after the floating point.*Given:*The variables `interest_rate`, `dollars`, `cents`, `years`.*Output:*Using the function print() display the message in the output: "In 3 years you will get X dollars Y cents", where X and Y are replaced by the results of your calculations accordingly.interest_rate = 5 dollars = 1000 cents = 60 years = 3 # YOUR CODE HERETask 2. Bank deposit versus individual investment accountYour friend told you, that you can open an individual investment account (IIA) and get a refund of tax deduction of 13% from the amount of the deposit.1. Calculate which method will bring you more money after 3 years: a bank deposit or an individual investment account? Print the difference in money. 2. Calculate what should be a minimum refund of tax deduction, so that it would be better to use it instead of a bank deposit? The answer should be rounded till 3 digits after the floating point.# 1. YOUR CODE HERE # 2. YOUR CODE HERETask 3. User info in a new social networkLet's say, you want to create a social network for specialists and fans of international relations. You need to register each user by asking and storing the information about his/her name, surname, age, country, political preference, favorite dictator, favorite diplomat.Write a program using input() and prompts with relevant questions for a new user. After running your code, the user should answer each of your questions. As a result, you should obtain 7 variables (e.g. 'name', 'surname', etc. -- name the variables as you like) with the answers entered by a user. Convert the value in a variable with age into an integer number. Others should be in string format, check that this condition is satisfied.Run your code and answer to your questions by yourself to check that everything works fine and as expected. Reminder on input() funciton```pythonmy_name = input() read a single line and store it in the variable "my_name"print('Hi ' + my_name + '!')```or we can add a prompt for a user:```pythonmy_name = input("What's your name?") do the same as above + show the message to a userprint('Hi ' + my_name + '!')```Try to run these lines of code in a cells (with code format) and see the difference.# YOUR CODE HEREVertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components Run in Colab View on GitHub Open in Google Cloud Notebooks OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML tabular classification workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).You'll build a pipeline that looks like this: DatasetThe dataset used for this tutorial is the UCI Machine Learning ['Dry beans dataset'](https://archive.ics.uci.edu/ml/datasets/Dry+Bean+Dataset), from: . and ., (2020), "Multiclass Classification of Dry Beans Using Computer Vision and Machine Learning Techniques."In Computers and Electronics in Agriculture, 174, 105507. [DOI](https://doi.org/10.1016/j.compag.2020.105507). ObjectiveIn this tutorial, you create an AutoML tabular classification using a pipeline with components from `google_cloud_pipeline_components`.The steps performed include:- Create a `Dataset` resource.- Train an AutoML `Model` resource.- Creates an `Endpoint` resource.- Deploys the `Model` resource to the `Endpoint` resource.The components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). CostsThis tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environmentIf you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.Otherwise, make sure your environment meets this notebook's requirements. You need the following:- The Cloud Storage SDK- Git- Python 3- virtualenv- Jupyter notebook running in a virtual environment with Python 3The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).2. [Install Python 3](https://cloud.google.com/python/setupinstalling_python).3. [Install virtualenv](Ihttps://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3.4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter.5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter.6. Open this notebook in the Jupyter Notebook Dashboard. InstallationInstall the latest version of Vertex SDK for Python.import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAGInstall the latest GA version of *google-cloud-storage* library as well.! pip3 install -U google-cloud-storage $USER_FLAGInstall the latest GA version of *google-cloud-pipeline-components* library as well.! pip3 install $USER kfp google-cloud-pipeline-components --upgradeRestart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True)Check the versions of the packages you installed. The KFP SDK version should be >=1.6.! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ! python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))"Before you begin GPU runtimeThis tutorial does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_IDRegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)REGION = "us-central1" # @param {type: "string"}TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")Authenticate your Google Cloud account**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.# If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS ''Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.! gsutil mb -l $REGION $BUCKET_NAMEFinally, validate access to your Cloud Storage bucket by examining its contents:! gsutil ls -al $BUCKET_NAMEService Account**If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your GCP project id from gcloud shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].strip() print("Service Account:", SERVICE_ACCOUNT)Set service account access for Vertex PipelinesRun the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAMESet up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constantsimport google.cloud.aiplatform as aipVertex AI constantsSetup up the following constants for Vertex AI:- `API_ENDPOINT`: The Vertex AI API service endpoint for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` services.# API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)Vertex Pipelines constantsSetup up the following constants for Vertex Pipelines:PIPELINE_ROOT = "{}/pipeline_root/beans".format(BUCKET_NAME)Additional imports.from typing import NamedTuple import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import dsl from kfp.v2.dsl import (ClassificationMetrics, Input, Metrics, Model, Output, component)Initialize Vertex SDK for PythonInitialize the Vertex SDK for Python for your project and corresponding bucket.aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)Define a metrics evaluation custom componentIn this tutorial, you define one custom pipeline component. The remaining components are pre-builtcomponents for Vertex AI services.The custom pipeline component you define is a Python-function-based component.Python function-based components make it easier to iterate quickly by letting you build your component code as a Python function and generating the component specification for you.Note the `@component` decorator. When you evaluate the `classification_model_eval` function, the component is compiled to what is essentially a task factory function, that can be used in the the pipeline definition.In addition, a `tabular_eval_component.yaml` component definition file will be generated. The component `yaml` file can be shared & placed under version control, and used later to define a pipeline step.The component definition specifies a base image for the component to use, and specifies that the `google-cloud-aiplatform` package should be installed. When not specified, the base image defaults to Python 3.7The custom pipeline component retrieves the classification model evaluation generated by the AutoML tabular training process, parses the evaluation data, and renders the ROC curve and confusion matrix for the model. It also uses given metrics threshold information and compares that to the evaluation results to determine whether the model is sufficiently accurate to deploy.*Note:* This custom component is specific to an AutoML tabular classification.@component( base_image="gcr.io/deeplearning-platform-release/tf2-cpu.2-3:latest", output_component_file="tabular_eval_component.yaml", packages_to_install=["google-cloud-aiplatform"], ) def classification_model_eval_metrics( project: str, location: str, # "us-central1", api_endpoint: str, # "us-central1-aiplatform.googleapis.com", thresholds_dict_str: str, model: Input[Model], metrics: Output[Metrics], metricsc: Output[ClassificationMetrics], ) -> NamedTuple("Outputs", [("dep_decision", str)]): # Return parameter. import json import logging from google.cloud import aiplatform as aip # Fetch model eval info def get_eval_info(client, model_name): from google.protobuf.json_format import MessageToDict response = client.list_model_evaluations(parent=model_name) metrics_list = [] metrics_string_list = [] for evaluation in response: print("model_evaluation") print(" name:", evaluation.name) print(" metrics_schema_uri:", evaluation.metrics_schema_uri) metrics = MessageToDict(evaluation._pb.metrics) for metric in metrics.keys(): logging.info("metric: %s, value: %s", metric, metrics[metric]) metrics_str = json.dumps(metrics) metrics_list.append(metrics) metrics_string_list.append(metrics_str) return ( evaluation.name, metrics_list, metrics_string_list, ) # Use the given metrics threshold(s) to determine whether the model is # accurate enough to deploy. def classification_thresholds_check(metrics_dict, thresholds_dict): for k, v in thresholds_dict.items(): logging.info("k {}, v {}".format(k, v)) if k in ["auRoc", "auPrc"]: # higher is better if metrics_dict[k] < v: # if under threshold, don't deploy logging.info("{} < {}; returning False".format(metrics_dict[k], v)) return False logging.info("threshold checks passed.") return True def log_metrics(metrics_list, metricsc): test_confusion_matrix = metrics_list[0]["confusionMatrix"] logging.info("rows: %s", test_confusion_matrix["rows"]) # log the ROC curve fpr = [] tpr = [] thresholds = [] for item in metrics_list[0]["confidenceMetrics"]: fpr.append(item.get("falsePositiveRate", 0.0)) tpr.append(item.get("recall", 0.0)) thresholds.append(item.get("confidenceThreshold", 0.0)) print(f"fpr: {fpr}") print(f"tpr: {tpr}") print(f"thresholds: {thresholds}") metricsc.log_roc_curve(fpr, tpr, thresholds) # log the confusion matrix annotations = [] for item in test_confusion_matrix["annotationSpecs"]: annotations.append(item["displayName"]) logging.info("confusion matrix annotations: %s", annotations) metricsc.log_confusion_matrix( annotations, test_confusion_matrix["rows"], ) # log textual metrics info as well for metric in metrics_list[0].keys(): if metric != "confidenceMetrics": val_string = json.dumps(metrics_list[0][metric]) metrics.log_metric(metric, val_string) # metrics.metadata["model_type"] = "AutoML Tabular classification" logging.getLogger().setLevel(logging.INFO) aip.init(project=project) # extract the model resource name from the input Model Artifact model_resource_path = model.uri.replace("aiplatform://v1/", "") logging.info("model path: %s", model_resource_path) client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. client = aip.gapic.ModelServiceClient(client_options=client_options) eval_name, metrics_list, metrics_str_list = get_eval_info( client, model_resource_path ) logging.info("got evaluation name: %s", eval_name) logging.info("got metrics list: %s", metrics_list) log_metrics(metrics_list, metricsc) thresholds_dict = json.loads(thresholds_dict_str) deploy = classification_thresholds_check(metrics_list[0], thresholds_dict) if deploy: dep_decision = "true" else: dep_decision = "false" logging.info("deployment decision is %s", dep_decision) return (dep_decision,)Define an AutoML tabular classification pipeline that uses components from `google_cloud_pipeline_components`DISPLAY_NAME = "automl-beans{}".format(TIMESTAMP) PIPELINE_NAME = "automl-tabular-beans-training-v2" MACHINE_TYPE = "n1-standard-4" @kfp.dsl.pipeline(name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT) def pipeline( bq_source: str = "bq://aju-dev-demos.beans.beans1", display_name: str = DISPLAY_NAME, project: str = PROJECT_ID, gcp_region: str = REGION, api_endpoint: str = API_ENDPOINT, thresholds_dict_str: str = '{"auRoc": 0.95}', ): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name=display_name, bq_source=bq_source ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name=display_name, optimization_prediction_type="classification", optimization_objective="minimize-log-loss", budget_milli_node_hours=1000, column_transformations=[ {"numeric": {"column_name": "Area"}}, {"numeric": {"column_name": "Perimeter"}}, {"numeric": {"column_name": "MajorAxisLength"}}, {"numeric": {"column_name": "MinorAxisLength"}}, {"numeric": {"column_name": "AspectRation"}}, {"numeric": {"column_name": "Eccentricity"}}, {"numeric": {"column_name": "ConvexArea"}}, {"numeric": {"column_name": "EquivDiameter"}}, {"numeric": {"column_name": "Extent"}}, {"numeric": {"column_name": "Solidity"}}, {"numeric": {"column_name": "roundness"}}, {"numeric": {"column_name": "Compactness"}}, {"numeric": {"column_name": "ShapeFactor1"}}, {"numeric": {"column_name": "ShapeFactor2"}}, {"numeric": {"column_name": "ShapeFactor3"}}, {"numeric": {"column_name": "ShapeFactor4"}}, {"categorical": {"column_name": "Class"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="Class", ) model_eval_task = classification_model_eval_metrics( project, gcp_region, api_endpoint, thresholds_dict_str, training_op.outputs["model"], ) with dsl.Condition( model_eval_task.outputs["dep_decision"] == "true", name="deploy_decision", ): deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=project, machine_type=MACHINE_TYPE, )Compile the pipelineNext, compile the pipeline.from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tabular classification_pipeline.json".replace(" ", "_"), )Run the pipelineNext, run the pipeline.DISPLAY_NAME = "beans_" + TIMESTAMP job = aip.PipelineJob( display_name=DISPLAY_NAME, template_path="tabular classification_pipeline.json".replace(" ", "_"), pipeline_root=PIPELINE_ROOT, parameter_values={"project": PROJECT_ID, "display_name": DISPLAY_NAME}, ) job.run()Click on the generated link to see your run in the Cloud Console. Compare the parameters and metrics of the pipelines run from their tracked metadataNext, you use the Vertex SDK for Python to compare the parameters and metrics of the pipeline runs. Wait until the pipeline runs have finished to run the next cell.pipeline_df = aip.get_pipeline_df(pipeline=PIPELINE_NAME) print(pipeline_df.head(2))Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial -- *Note:* this is auto-generated and not all resources may be applicable for this tutorial:- Dataset- Pipeline- Model- Endpoint- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucketdelete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True try: if delete_model and "DISPLAY_NAME" in globals(): models = aip.Model.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) model = models[0] aip.Model.delete(model) print("Deleted model:", model) except Exception as e: print(e) try: if delete_endpoint and "DISPLAY_NAME" in globals(): endpoints = aip.Endpoint.list( filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time" ) endpoint = endpoints[0] endpoint.undeploy_all() aip.Endpoint.delete(endpoint.resource_name) print("Deleted endpoint:", endpoint) except Exception as e: print(e) if delete_dataset and "DISPLAY_NAME" in globals(): if "tabular" == "tabular": try: datasets = aip.TabularDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TabularDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "image": try: datasets = aip.ImageDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.ImageDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "text": try: datasets = aip.TextDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TextDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "video": try: datasets = aip.VideoDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.VideoDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) try: if delete_pipeline and "DISPLAY_NAME" in globals(): pipelines = aip.PipelineJob.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) pipeline = pipelines[0] aip.PipelineJob.delete(pipeline.resource_name) print("Deleted pipeline:", pipeline) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAMEModelinput_image = Input(shape=(28,28,3), batch_shape=(None,28,28,3), dtype=float,name='Input_Image') conv = Conv2D(filters=10, kernel_size=5, padding='same', activation='relu')(input_image) conv = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid')(conv) conv = Conv2D(filters=8, kernel_size=3, padding='valid', activation='relu')(conv) conv = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid')(conv) fmap = Flatten()(conv) # Classification Head 1 - Length Classification x = Dense(128, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(fmap) x = Dense(32, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) length = Dense(1, activation='sigmoid', name='length', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) # Classification Head 2 - Width Classification x = Dense(64, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(fmap) x = Dense(8, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) width = Dense(1, activation='sigmoid', name='width', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) # Classification Head 3 - Colour Classification x = Dense(64, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(fmap) x = Dense(8, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) colour = Dense(1, activation='sigmoid', name='colour', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) # Classification Head 4 - Angle Classification x = Dense(128, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(fmap) x = Dense(64, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) angle = Dense(12, activation='softmax', name='angle', kernel_initializer=RandomNormal(stddev=0.01), bias_initializer=RandomNormal(stddev=0.01))(x) model = Model(inputs=input_image, outputs=[length, width, colour, angle]) model.compile(optimizer='adam', loss={'length':'binary_crossentropy', 'width':'binary_crossentropy', 'colour':'binary_crossentropy', 'angle':'categorical_crossentropy'}, metrics={'length':'accuracy', 'width':'accuracy', 'colour':'accuracy', 'angle':'accuracy'}, loss_weights=[5,2,1,50]) print(model.summary()) history1 = model.fit(x=x_train, y={'length':y_length_train, 'width':y_width_train, 'colour':y_colour_train, 'angle':y_angle_train}, batch_size=3200, epochs=15, verbose=1, validation_data=(x_validate, {'length':y_length_validate, 'width':y_width_validate, 'colour':y_colour_validate, 'angle':y_angle_validate}), shuffle=False # data is pre-shuffled, don't waste computation time ) model.save('Q2-v3.h5') metrics = model.evaluate(x=x_test, y={'length':y_length_test, 'width':y_width_test, 'colour':y_colour_test, 'angle':y_angle_test}, verbose=1 ) print(model.metrics_names, metrics) print(history1.history.keys()) plt.plot(history1.history['loss'], label='total loss') plt.plot(history1.history['length_loss'], label='length loss') plt.plot(history1.history['width_loss'], label='width loss') plt.plot(history1.history['colour_loss'], label='colour loss') plt.plot(history1.history['angle_loss'], label='angle loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() plt.plot(np.array(history1.history['loss'])/58, label='Normalized total loss') plt.plot(history1.history['length_loss'], label='length loss') plt.plot(history1.history['width_loss'], label='width loss') plt.plot(history1.history['colour_loss'], label='colour loss') plt.plot(history1.history['angle_loss'], label='angle loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() plt.plot(history1.history['length_acc'], label='Tr: Length') plt.plot(history1.history['width_acc'], label='Tr: Width') plt.plot(history1.history['colour_acc'], label='Tr: Colour') plt.plot(history1.history['angle_acc'], label='Tr: Angle') plt.xlabel('Epoch') plt.ylabel('Training Accuracy') plt.legend() plt.show() plt.plot(history1.history['val_length_acc'], label='Val: Length') plt.plot(history1.history['val_width_acc'], label='Val: Width') plt.plot(history1.history['val_colour_acc'], label='Val: Colour') plt.plot(history1.history['val_angle_acc'], label='Val: Angle') plt.xlabel('Epoch') plt.ylabel('Validation Accuracy') plt.legend() plt.show() length_p, width_p, colour_p, angle_p = model.predict(x=x_test, verbose=1) # Convert_to_class_labels length_p = length_p.round().flatten() width_p = width_p.round().flatten() colour_p = colour_p.round().flatten() angle_p = angle_p.argmax(axis=1) # Get absolute class label def get_absolute_class_label(l,w,c,a): return 42*l + 24*w + 12*c + a y = get_absolute_class_label(y_length_test, y_width_test, y_colour_test, y_angle_test.argmax(axis=1)) y_hat = get_absolute_class_label(length_p, width_p, colour_p, angle_p) print('Accuracies:') acc_length = accuracy_score(y_length_test, length_p.round()) print('Length:', acc_length) acc_width = accuracy_score(y_width_test, width_p.round()) print('Width:', acc_width) acc_colour = accuracy_score(y_colour_test, colour_p.round()) print('Colour:', acc_colour) acc_angle = accuracy_score(y_angle_test.argmax(axis=1), angle_p) print('Angle:', acc_angle) print('Average Accuracy:', np.average([acc_length, acc_width, acc_colour, acc_angle])) conmat = confusion_matrix(y, y_hat) np.savez_compressed('v3_conmat.npz', conmat=conmat) fig, ax = plt.subplots(1,1,figsize=(25,22)) im = ax.imshow(conmat) plt.colorbar(im) plt.show() recall = recall_score(y, y_hat, average=None) precision = precision_score(y, y_hat, average=None) fscores = f1_score(y, y_hat, average=None) np.savez_compressed('v3fscores.npz', fscores=fscores, recall=recall, precision=precision) plt.plot(recall, label='Precision') plt.plot(recall, label='Recall') plt.plot(fscores, label='F Score') plt.xlabel('Class') plt.ylabel('Score') plt.legend() plt.show() print('------Length-----') length_conmat = confusion_matrix(y_length_test, length_p) print(length_conmat) length_fscore = f1_score(y_length_test, length_p) length_precision = precision_score(y_length_test, length_p) length_recall = recall_score(y_length_test, length_p) print('Precision:', length_precision) print('Recall:', length_recall) print('F score:', length_fscore) print('------Width-----') width_conmat = confusion_matrix(y_width_test, width_p) print(width_conmat) width_fscore = f1_score(y_width_test, width_p) width_precision = precision_score(y_width_test, width_p) width_recall = recall_score(y_width_test, width_p) print('Precision:', width_precision) print('Recall:', width_recall) print('F score:', width_fscore) print('------Colour-----') colour_conmat = confusion_matrix(y_colour_test, colour_p) print(colour_conmat) colour_fscore = f1_score(y_colour_test, colour_p) colour_precision = precision_score(y_colour_test, colour_p) colour_recall = recall_score(y_colour_test, colour_p) print('Precision:', colour_precision) print('Recall:', colour_recall) print('F score:', colour_fscore) print('------Angle-----') angle_conmat = confusion_matrix(y_angle_test.argmax(axis=1), angle_p) print(angle_conmat) angle_fscore = f1_score(y_angle_test.argmax(axis=1), angle_p, average=None) angle_precision = precision_score(y_angle_test.argmax(axis=1), angle_p, average=None) angle_recall = recall_score(y_angle_test.argmax(axis=1), angle_p, average=None) plt.plot(angle_precision, label='Precision') plt.plot(angle_recall, label='Recall') plt.plot(angle_fscore, label='F Score') plt.xlabel('Angles') plt.ylabel('Score') plt.legend() plt.show() np.savetxt('angle_conmat_v3.txt', angle_conmat, delimiter=' & ', fmt='%3d', newline='\\\\\n')Phase 3 Weighted Baggingimport pandas as pd import matplotlib.pyplot as plt from os import listdir from os.path import isfile, join import os import re import csv import codecs import gensim import itertools import numpy as np import pandas as pd import operator import sys from nltk import ngrams from collections import Counter from string import punctuation from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from iwillwin.trainer.supervised_trainer import KerasModelTrainer from iwillwin.data_utils.data_helpers import DataTransformer, DataLoader from iwillwin.config import dataset_config from iwillwin.data_utils.feature_engineering import FeatureCreator from fuzzywuzzy import fuzz from nltk.corpus import stopwords from tqdm import tqdm from scipy.stats import skew, kurtosis from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis from nltk import word_tokenize import seaborn as sns %matplotlib inline import lightgbm as lgb from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.model_selection import KFold import os import re import csv import codecs import numpy as np import pandas as pd import operator from os import listdir from os.path import isfile, join ######################################## ## import packages ######################################## import os import re import csv import codecs import numpy as np np.random.seed(1337) import pandas as pd import operator import sys from string import punctuation from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from iwillwin.trainer.supervised_trainer import KerasModelTrainer from iwillwin.data_utils.data_helpers import DataTransformer, DataLoader from iwillwin.config import dataset_config from keras.utils import to_categorical NB_WORDS, MAX_SEQUENCE_LENGTH = 50000, 50 data_transformer = DataTransformer(max_num_words=NB_WORDS, max_sequence_length=MAX_SEQUENCE_LENGTH, char_level=False, normalization=True, features_processed=True) trains_nns, tests_nns, labels = data_transformer.prepare_data(dual=False) print("Number of unique words", len(data_transformer.tokenizer.index_docs)) trains_meta = trains_nns[2] tests_meta = tests_nns[2] train_df = pd.read_csv('../data/dataset/train.csv') test_df = pd.read_csv('../data/dataset/test.csv') rumor_words = ['辟谣', '谣言', '勿传', '假的'] def is_rumor(text): if type(text) != str: print(text, type(text)) return 0 for rumor_word in rumor_words: if rumor_word in text: return 1 return 0 def has_split_symbol(text): if type(text) != str: return 0 if '|' in text: return 1 return 0 for df in [train_df, test_df]: df['has_|'] = df['title2_zh'].apply(has_split_symbol) df['has_rumor_words'] = df['title2_zh'].apply(is_rumor) train_has_rumor = train_df.has_rumor_words.values test_has_rumor = test_df.has_rumor_words.values trick_trains_features = np.concatenate((trains_nns[2], train_has_rumor.reshape((-1, 1))), axis=1) trick_tests_features = np.concatenate((tests_nns[2], test_has_rumor.reshape((-1, 1))), axis=1) oof_file_names = sorted([f for f in listdir('../data/oofs/') if isfile(join('../data/oofs/', f)) and f != '.gitkeep']) preds_file_names = [name.replace('-Train', '') for name in oof_file_names] oofs = [] preds = [] for name in oof_file_names: oofs.append(pd.read_csv('../data/oofs/' + name)) for name in preds_file_names: preds.append(pd.read_csv('../data/output/' + name)) for i, name in enumerate(oof_file_names): print(i, name) trains = pd.DataFrame() tests = pd.DataFrame() for i in range(len(oof_file_names)): for label_type in ['agreed', 'disagreed', 'unrelated']: trains['oofs_{}_{}'.format(i, label_type)] = oofs[i][label_type].values tests['oofs_pred{}_{}'.format(i, label_type)] = preds[i][label_type].values oof_file_names unrelated = pd.DataFrame() agreeds = pd.DataFrame() disagreeds = pd.DataFrame() #check_oofs = True check_oofs = False if check_oofs: for i, oof in enumerate(oofs): agreeds['oofs_agreed_{}'.format(i)] = oofs[i]['agreed'].values unrelated['oofs_unrelated_{}'.format(i)] = oofs[i]['unrelated'].values disagreeds['oofs_disagreeds_{}'.format(i)] = oofs[i]['disagreed'].values else: for i, oof in enumerate(oofs): agreeds['oofs_agreed_{}'.format(i)] = preds[i]['agreed'].values unrelated['oofs_unrelated_{}'.format(i)] = preds[i]['unrelated'].values disagreeds['oofs_disagreeds_{}'.format(i)] = preds[i]['disagreed'].values agreeds.corr() disagreeds.corr() unrelated.corr()Prepare Different Inputs# Only use oofs ensemble_trains = trains.values ensemble_tests = tests.values # Use oof and meta features #ensemble_trains = np.concatenate((trains.values, trains_meta), axis=1) #ensemble_tests = np.concatenate((tests.values, tests_meta), axis=1) # use oofs and meta-features #ensemble_trains = np.concatenate((trains.values, trick_trains_features), axis=1) #ensemble_tests = np.concatenate((tests.values, trick_tests_features), axis=1) #ensemble_trains = trick_trains_features #ensemble_tests = trick_tests_featuresLGBMfrom sklearn import metrics def fit_every_feature_model(feature_data, label, feature_test_data, fold_count=3, predict=True): predictions = np.zeros(shape=[len(feature_test_data), 3]) fold_size = len(feature_data) // fold_count oofs = [] log_loss = 0 for fold_id in range(fold_count): print("Fold : ", fold_id) fold_start = fold_size * fold_id fold_end = fold_start + fold_size if fold_id == fold_count - 1: fold_end = len(feature_data) train_x = np.concatenate([feature_data[:fold_start], feature_data[fold_end:]]) train_y = np.concatenate([label[:fold_start], label[fold_end:]]) val_x = feature_data[fold_start:fold_end] val_y = label[fold_start:fold_end] lgb_train = lgb.Dataset(train_x, train_y) lgb_val = lgb.Dataset(val_x, val_y) lgb_params = { 'boosting_type' : 'gbdt', 'objective' : 'multiclass', 'num_class':3, 'metric' : {'multi_logloss',}, 'learning_rate' : 0.01, 'feature_fraction' : 0.8, 'bagging_fraction': 0.9, 'bagging_freq': 1, 'num_leaves' : 4, 'max_depth': 16, 'random_state': 42, 'nthread': 8, } lgbm_model = lgb.train(lgb_params, lgb_train, num_boost_round=100000, valid_sets=[lgb_train, lgb_val], early_stopping_rounds=1000, verbose_eval=100) lgb.plot_importance(lgbm_model) plt.show() if predict: prediction = lgbm_model.predict(feature_test_data, num_iteration=lgbm_model.best_iteration) oof_prediction = lgbm_model.predict(val_x, num_iteration=lgbm_model.best_iteration) score = metrics.log_loss(val_y, oof_prediction) print("Fold", fold_id, "log loss", score, "in", lgbm_model.best_iteration) log_loss += score oofs.append(oof_prediction) predictions += prediction del lgbm_model predictions /= fold_count print("Training Finish") return predictions, log_loss / fold_count, oofs pred, log_loss, oofs = fit_every_feature_model(ensemble_trains, labels, ensemble_tests, fold_count=10) log_loss oofs = np.concatenate(oofs) oofs.argmax(axis=1) def np_weighted_accuracy(y_true, y_pred): weight = np.array([[1/16, 1/15, 1/5]]) norm = [(1/16) + (1/15) + (1/5)] weight_mask = weight * y_true weight_mask = np.max(weight_mask, axis=-1) norms = np.sum(weight_mask) y_true = np.argmax(y_true, axis=-1) y_pred = np.argmax(y_pred, axis=-1) res = ((y_true == y_pred) * weight_mask).sum() / norms return res score = np_weighted_accuracy(to_categorical(labels), oofs) print("score", score) oofs_dir = "../data/ensemble/oofs/" output_dir = "../data/ensemble/pred/" onehot_pred_dir = "../data/ensemble/nn_one_hot/" model_submit_prefix = "LightGBM-Ensemble" oofs_path = oofs_dir + model_submit_prefix output_path = output_dir + model_submit_prefix one_hot_pred_path = onehot_pred_dir + "One-Hot" + model_submit_prefix print("Predicting training results...") oofs = pd.DataFrame({"unrelated": oofs[:, 0], "agreed": oofs[:, 1], "disagreed": oofs[:, 2]}) submit_path = oofs_path + "-Train-L{:4f}-NB{:d}.csv".format(score, NB_WORDS) oofs.to_csv(submit_path, index=False) test_predicts = pd.DataFrame({"unrelated": pred[:, 0], "agreed": pred[:, 1], "disagreed": pred[:, 2]}) submit_path = output_path + "-L{:4f}-NB{:d}.csv".format(score, NB_WORDS) test_predicts.to_csv(submit_path, index=False) # 0.3343 print("Predicting labeled testing results...") ids = pd.read_csv("../data/dataset/test.csv") pred_labels = test_predicts.idxmax(axis=1) sub = pd.DataFrame({"Id": ids['id'].values, "Category": pred_labels}) submit_path = one_hot_pred_path + "-L{:4f}-NB{:d}.csv".format(score, NB_WORDS) sub.to_csv(submit_path, index=False)score 0.8601171890929914 Predicting training results...循环- 循环是一种控制语句块重复执行的结构- while 适用于广度遍历- for 开发中经常使用while 1: print('12345') #死循环 while False: print('1') #不输出while 循环- 当一个条件保持真的时候while循环重复执行语句- while 循环一定要有结束条件,否则很容易进入死循环- while 循环的语法是: while loop-contunuation-conndition: Statementi = 0 while i< 4: i +=1 print('12345') for i in 'abcd': print(i) i = 'abcd' a[0] i = 'abcd' i=0 while i<4: print(a[i]) i +=1示例:sum = 0i = 1while i <10: sum = sum + i i = i + 1 错误示例:sum = 0i = 1while i <10: sum = sum + ii = i + 1- 一旦进入死循环可按 Ctrl + c 停止 EP:![](../Photo/143.png)![](../Photo/144.png)number = eval(input('a')) max = number while number !=0: number = eval(input('a')) if number > max: max = number print(max) print(number) i = 1 while i!=0: i -=0.8 print('i'),,,,,,,验证码- 随机产生四个字母的验证码,如果正确,输出验证码正确。如果错误,产生新的验证码,用户重新输入。- 验证码只能输入三次,如果三次都错,返回“别爬了,我们小网站没什么好爬的”- 密码登录,如果三次错误,账号被锁定import random i = random.randrange(65,91) print(i) temp = "" for i in range(6): num = random.randrange(0, 4) if num == 3 or num == 1: rad2 = random.randrange(0, 10) temp = temp + str(rad2) else: rad = random.randrange(65, 91) c = chr(rad) temp = temp + c import random b = '' j=1 while j<4: for i in range(4): a = chr(random.randint(65,90)) b +=a print(b) c = eval(input('输入验证码')) if c==b: print('输入正确') elif c!=b: print('error') if j==3: print('nonono') import matplotlib x_list =尝试死循环 实例研究:猜数字- 你将要编写一个能够随机生成一个0到10之间的且包括两者的数字程序,这个程序- 提示用户连续地输入数字直到正确,且提示用户输入的数字是过高还是过低 使用哨兵值来控制循环- 哨兵值来表明输入的结束- ![](../Photo/54.png) 警告![](../Photo/55.png) for 循环- Python的for 循环通过一个序列中的每个值来进行迭代- range(a,b,k), a,b,k 必须为整数- a: start- b: end- k: step- 注意for 是循环一切可迭代对象,而不是只能使用rangefor i in range(1,5,2): print(i) for i in range(1,5,): print(i) a = 10 dir(a)在Python里面一切皆对象 EP:- ![](../Photo/145.png)i = 1 sum = 0 while sum < 10000: sum = sum + i i += 1 print(i,sum)142 10011嵌套循环- 一个循环可以嵌套另一个循环- 每次循环外层时,内层循环都会被刷新重新完成循环- 也就是说,大循环执行一次,小循环会全部执行一次- 注意:> - 多层循环非常耗时 - 最多使用3层循环for i in range(3): for j in range(3): for k in range(3): print(i,j,k)0 0 0 0 0 1 0 0 2 0 1 0 0 1 1 0 1 2 0 2 0 0 2 1 0 2 2 1 0 0 1 0 1 1 0 2 1 1 0 1 1 1 1 1 2 1 2 0 1 2 1 1 2 2 2 0 0 2 0 1 2 0 2 2 1 0 2 1 1 2 1 2 2 2 0 2 2 1 2 2 2EP:- 使用多层循环完成9X9乘法表- 显示50以内所有的素数for i in range (1,10): for j in range(1,10): print(j,"x",i,"=",i*j,"\t",end="") if i==j: print("") break for i in range (1,10): for j in range (1,i+1): print(i,'x',j,'=',i*j,' ',end='') print() try: xxxx except: xxxx else: xxxx for i in range(2,50): for j in range(2,i):关键字 break 和 continue- break 跳出循环,终止循环- continue 跳出此次循环,继续执行for i in range(10): print(i) if i%2==0: break0注意![](../Photo/56.png)![](../Photo/57.png) Homework- 1 ![](../Photo/58.png)date =eval(input("Enter an integer(the input ends" +" if it is 0):")) sum =0 z=0 f=0 while date !=0: if date>0: z+=1 else: f+=1 sum +=date date=eval(input("Enter an integer (the input ends " + "if it is 0):")) v=sum/(z+f) print('和为:',sum) print('平均值为:',v) print('正数个数:',z) print('负数个数:',f)Enter an integer(the input ends if it is 0):1 Enter an integer (the input ends if it is 0):2 Enter an integer (the input ends if it is 0):-1 Enter an integer (the input ends if it is 0):3 Enter an integer (the input ends if it is 0):0 和为: 5 平均值为: 1.25 正数个数: 3 负数个数: 1- 2![](../Photo/59.png)num=10000 s=0 for i in range (1,15): num=num*1.05 if i>=10: s+=num print(str(s))90006.7105267852- 3![](../Photo/58.png) - 4![](../Photo/60.png)a=10 for i in range(100,1000): if i%5==0 and i%6==0: print(i,end=' ') a=a-1 if a==0: print('\n') a=10120 150 180 210 240 270 300 330 360 390 420 450 480 510 540 570 600 630 660 690 720 750 780 810 840 870 900 930 960 990- 5![](../Photo/61.png)n=12000 while n*n>12000: n-=1 print('最小的满足条件的n为:' +str(n+1)) n=12000 while n*n*n>12000: n-=1 print('最大的满足条件的n为:' +str(n))最小的满足条件的n为:110 最大的满足条件的n为:22- 6![](../Photo/62.png)a=eval(input('贷款数:')) b=eval(input('贷款周期:')) for i in range(5): r=0.05+i*0.00125贷款数:10000 贷款周期:5- 7![](../Photo/63.png)num1=0 num2=0 n=50000 for i in range(1,n+1): num1=num1+1/n n-=1 print('从右到左合计:'+str(num1)) n=50000 for i in range(1,n+1): num2=num2+1/i print('从右到左合计:'+str(num2))从右到左合计:11.397003949278519 从右到左合计:11.397003949278504- 8![](../Photo/64.png)a=1 b=3 num5=0 while a<99: num5 +=a/b a=b b=b+2 print(num5)45.124450303050196- 9![](../Photo/65.png)s=0 i=10000 while(i<=100000): for j in range(1,i+1): s=s+(-1)**(j+1)/(2*j-1) p=4*s print('当i='+ str(i) +'时,PI='+str(p)) i=i+10000 s=0当i=10000时,PI=3.1414926535900345 当i=20000时,PI=3.1415426535898248 当i=30000时,PI=3.141559320256462 当i=40000时,PI=3.1415676535897985 当i=50000时,PI=3.1415726535897814 当i=60000时,PI=3.141575986923102 当i=70000时,PI=3.141578367875482 当i=80000时,PI=3.1415801535897496 当i=90000时,PI=3.1415815424786238 当i=100000时,PI=3.1415826535897198- 10 ![](../Photo/66.png)def approximateNumber(num:int): result = [] for divisor in range(1,num): temp = [] for dividend in range(1,divisor): if divisor%dividend==0: temp.append(dividend) tempSum = sum(temp) if tempSum == divisor: result.append(tempSum) return result print(approximateNumber(1000))- 11![](../Photo/67.png)s = 0 for i in range(1,8): for j in range(1,8): print(i,j) s=s+1 print('the total number of all combinations is ',s)1 1 1 2 1 3 1 4 1 5 1 6 1 7 2 1 2 2 2 3 2 4 2 5 2 6 2 7 3 1 3 2 3 3 3 4 3 5 3 6 3 7 4 1 4 2 4 3 4 4 4 5 4 6 4 7 5 1 5 2 5 3 5 4 5 5 5 6 5 7 6 1 6 2 6 3 6 4 6 5 6 6 6 7 7 1 7 2 7 3 7 4 7 5 7 6 7 7 the total number of all combinations is 49- 12![](../Photo/68.png)1 **2 + 2**2 + 3**2 + 5.5 *2 + 5.6 **2 + 6 **2 + 7**2 + 8**2 + 9 **2 + 10 **2 1 + 2 + 3 + 5.5 + 5.6 + 6 + 7 + 8+ 9 + 10_*Option Pricing with qGANs*_ IntroductionIn this notebook, we discuss how a Quantum Machine Learning Algorithm, namely a quantum Generative Adversarial Network (qGAN), can facilitate the pricing of a European call option. More specifically, a qGAN can be trained such that a quantum circuit models the spot price of an asset underlying a European call option. The resulting model can then be integrated into a Quantum Amplitude Estimation based algorithm to evaluate the expected payoff - see [European Call Option Pricing](3_european_call_option_pricing.ipynb). For further details on learning and loading random distributions by training a qGAN please refer to Quantum Generative Adversarial Networks for Learning and Loading Random Distributions. Zoufal, Lucchi, Woerner. 2019.import matplotlib.pyplot as plt import numpy as np from qiskit.circuit import ParameterVector from qiskit.aqua.algorithms import IterativeAmplitudeEstimation from qiskit.finance.components.uncertainty_problems import EuropeanCallExpectedValue from qiskit.aqua.components.uncertainty_models import UnivariateVariationalDistribution, NormalDistribution from qiskit.circuit.library import RealAmplitudes from qiskit import QuantumRegister, QuantumCircuit from qiskit.aqua.components.initial_states import Custom from qiskit.aqua import aqua_globals, QuantumInstance from qiskit import AerUncertainty ModelThe Black-Scholes model assumes that the spot price at maturity $S_T$ for a European call option is log-normally distributed. Thus, we can train a qGAN on samples from a log-normal distribution and use the result as an uncertainty model underlying the option.In the following, we construct a quantum circuit that loads the uncertainty model. The circuit output reads $$ \big| g_{\theta}\rangle = \sum_{j=0}^{2^n-1}\sqrt{p_{\theta}^{j}} \big| j \rangle , $$where the probabilities $p_{\theta}^{j}$, for $j\in \left\{0, \ldots, {2^n-1} \right\}$, represent a model of the target distribution.# Set upper and lower data values bounds = np.array([0.,7.]) # Set number of qubits used in the uncertainty model num_qubits = [3] # Set entangler map entangler_map = [] for i in range(sum(num_qubits)): entangler_map.append([i, int(np.mod(i+1, sum(num_qubits)))]) # Load the trained circuit parameters g_params = [0.29399714, 0.38853322, 0.9557694, 0.07245791, 6.02626428, 0.13537225] # Set an initial state for the generator circuit init_dist = NormalDistribution(sum(num_qubits), mu=1., sigma=1., low=bounds[0], high=bounds[1]) init_distribution = np.sqrt(init_dist.probabilities) init_distribution = Custom(num_qubits=sum(num_qubits), state_vector=init_distribution) # construct the variational form var_form = RealAmplitudes(sum(num_qubits), entanglement=entangler_map, reps=1, initial_state=init_distribution) var_form.entanglement_blocks = 'cz' theta = ParameterVector('θ', var_form.num_parameters) var_form = var_form.assign_parameters(theta) # Set generator circuit g_circuit = UnivariateVariationalDistribution(sum(num_qubits), var_form, g_params, low=bounds[0], high=bounds[1]) g_circuit._var_form_params = theta # construct circuit factory for uncertainty model uncertainty_model = g_circuitEvaluate Expected PayoffNow, the trained uncertainty model can be used to evaluate the expectation value of the option's payoff function with Quantum Amplitude Estimation.# set the strike price (should be within the low and the high value of the uncertainty) strike_price = 2 # set the approximation scaling for the payoff function c_approx = 0.25 # construct circuit factory for payoff function european_call = EuropeanCallExpectedValue( uncertainty_model, strike_price=strike_price, c_approx=c_approx )Plot the probability distributionNext, we plot the trained probability distribution and, for reasons of comparison, also the target probability distribution.# Evaluate trained probability distribution uncertainty_model.set_probabilities(QuantumInstance(Aer.get_backend('statevector_simulator'))) x = uncertainty_model.values y = uncertainty_model.probabilities # Sample from target probability distribution N = 100000 log_normal = np.random.lognormal(mean=1, sigma=1, size=N) log_normal = np.round(log_normal) log_normal = log_normal[log_normal <= 7] log_normal_samples = [] for i in range(8): log_normal_samples += [np.sum(log_normal==i)] log_normal_samples = np.array(log_normal_samples / sum(log_normal_samples)) # Plot distributions plt.bar(x, y, width=0.2, label='trained distribution', color='royalblue') plt.xticks(x, size=15, rotation=90) plt.yticks(size=15) plt.grid() plt.xlabel('Spot Price at Maturity $S_T$ (\$)', size=15) plt.ylabel('Probability ($\%$)', size=15) plt.plot(log_normal_samples,'-o', color ='deepskyblue', label='target distribution', linewidth=4, markersize=12) plt.legend(loc='best') plt.show()Evaluate Expected PayoffNow, the trained uncertainty model can be used to evaluate the expectation value of the option's payoff function analytically and with Quantum Amplitude Estimation.# Evaluate payoff for different distributions payoff = np.array([0,0,0,1,2,3,4,5]) ep = np.dot(log_normal_samples, payoff) print("Analytically calculated expected payoff w.r.t. the target distribution: %.4f" % ep) ep_trained = np.dot(y, payoff) print("Analytically calculated expected payoff w.r.t. the trained distribution: %.4f" % ep_trained) # Plot exact payoff function (evaluated on the grid of the trained uncertainty model) x = uncertainty_model.values y_strike = np.maximum(0, x - strike_price) plt.plot(x, y_strike, 'ro-') plt.grid() plt.title('Payoff Function', size=15) plt.xlabel('Spot Price', size=15) plt.ylabel('Payoff', size=15) plt.xticks(x, size=15, rotation=90) plt.yticks(size=15) plt.show() # set target precision and confidence level epsilon = 0.01 alpha = 0.05 # construct amplitude estimation ae = IterativeAmplitudeEstimation(epsilon=epsilon, alpha=alpha, a_factory=european_call) result = ae.run(quantum_instance=Aer.get_backend('qasm_simulator'), shots=100) conf_int = np.array(result['confidence_interval']) print('Exact value: \t%.4f' % ep_trained) print('Estimated value: \t%.4f' % (result['estimation'])) print('Confidence interval:\t[%.4f, %.4f]' % tuple(conf_int)) import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyrightLinear regression with Professor Mittens, a.k.a. recipe for linear regression. OverviewIn this notebook we will learn how to use regression to study the factors that affect the number of pats cats will recieve. This will start with a visual inspection of the data, followed by the development of a linear model to explain the data. Along the way we will answer a few questions such as: does coat colour influence the number of pats, is a long coat better than a short coat, and how important is the volume of a meow. Specifying regression modelsA very popular way to describe regression models is with "formulae" as popularised by R. The [R documentation on formulae](https://cran.r-project.org/doc/manuals/R-intro.htmlFormulae-for-statistical-models) is a good place to learn how to use these properly. For example, here is the syntax we will use today,- `y ~ x1 + x2` will make a linear model with the predictors $x_1$ and $x_2$.- `y ~ x1 * x2` includes the terms $x_1 + x_2 + x_1x_2$- `y ~ x1 : x2` includes *just* the interaction term $x_1x_2$- `y ~ C(x)` specifies that $x$ is a catagorical variable **NOTE** this is not necessary in R.%matplotlib inline import pandas as pd import numpy as np import statsmodels.api as sm import statsmodels.formula.api as smf import altair as alt from functools import reduceHelping cats get more patsProfessor Mittens in interested in helping cats optimise the number of pats they can get. To learn more about this, he has interviewed 1000 cats and taken measurements of their behaviour and appearance. The data in `cat-pats.csv` contains measurments of the following:- `time_outdoors` is the number of hours that the cat is out of their primary dwelling,- `coat_colour` is either tortoiseshell, white, or "other" encoded as integers 1, 2, and 3 respectively,- `weight` is the weight of the cat in kilograms,- `height` is their height in centimeters,- `loudness` is a measure of how loud their meow is, the units are not known,- `whisker_length` is the length of their whiskers in centimeters,- `is_longhaired` is a Boolean variable equal to 1 if the cat is of a longhaired breed and 0 if it is of a shorthaired breed,- `coat_length` is the length of their fur in centimeters,- and `num_pats` is the number of pats they received on the day they were interviewed.The variable we are interested in explaining is `num_pats`. Although this is a discrete variable, we will ignore this aspect of the data and consider it as a continuous value. This is a useful simplifying assumption, as you learn more about regression, in particular generalized linear models, you will see additional ways to handle this. For this example, you can consider it a continuous variable though.The types of questions that Professor Mittens is interested in answering are as follows:1. Do any of the variables correlate with the number of pats that the cats recieve?2. Under a naive model, how much of the variability in pats can they explain? Do all the variables need to be included?3. Does the coat colour matter?4. Among short-haired breeds they say longer hair is better, among long-haired breeds they say short hair is better, who is correct?5. **If a cat can choose to spend more time outdoors, or practise meowing louder, which will get them more pats?** Read in the data and generate some scatter plots to see if there are any good predictors of the number of patsThe data is in the file `cat-pats.csv` so read this into a data frame using `pd.read_csv` and go from there. I have used altair to generate my scatter plots based on [this example](https://altair-viz.github.io/gallery/scatter_matrix.html) but you can use whatever you feel most comfortable with. It might be useful to use colour to see if `coat_colour` and `is_longhaired` are important. QuestionBased on these figures, what variables appear to relate to the number of pats? What do you notice about the catagorical variables `coat_colour` and `is_longhaired`?cat_df = pd.read_csv("cat-pats.csv") cat_df col_names = cat_df.columns.tolist() predictor_names = col_names.copy() predictor_names.remove("num_pats") alt.Chart(cat_df).mark_circle().encode( alt.X(alt.repeat("column"), type='quantitative', scale=alt.Scale(zero=False)), alt.Y(alt.repeat("row"), type='quantitative', scale=alt.Scale(zero=False)), color = "is_longhaired:O" #O here stands for Ordinal since longhaired is ordinal; N would be Nominal ).properties( width=100, height=100 ).repeat( row=["num_pats"], column=predictor_names ) # When you have continuous variables, plot here like this, but with categorical, good to use colour, which is why he did that # import matplotlib.pyplot as plt # import seaborn as sns # sns.FacetGrid(cat_df,col=predictor_names,row='num_pats')Compute the correlation between each variable and the number of pats, what looks important QuestionDoes the the correlation matrix raise any further questions? Does it handle the catagorical variables correctly?cat_df.corr(method='pearson') # just focus on num_pats; good way to pre-select variables since it's a pain to make a million plots to see # Yennie: Also, can do heat map of this to visualize this! sns.heatmap(cat_df.corr()) import seaborn as sns #Hannah: If anyone’s interested, seaboarn has a nice easy function for plotting correlation maps # I posted some code above with some additional arguments, copied from this source (https://seaborn.pydata.org/examples/many_pairwise_correlations.html). Has some nice additional functionality if you need it in other work :) # sns.heatmap(corr, vmax=.3, center=0, # square=True, linewidths=.5, # cbar_kws={"shrink": .5}) corr = cat_df.corr() sns.heatmap(corr, annot = True, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) #Cameron: Pairplot in seaboarn can be done with sns.pairplot(cat_df,hue="is_longhaired",y_vars='num_pats',x_vars=predictor_names)What is $R^2$?Sometimes called the *coefficient of determination*, this statistic measures the proportion of the variance in the response variable that is explained by the regression model. In the case of simple linear regression it is just the correlation squared, it can also be calculated as the ratio of the regression sum of squares and the total sum of squares. $$R^2 = \frac{\text{RegSS}}{\text{TSS}}$$It can be thought of as the proportion of the total variance that is explained by the regression model. What is an *adjusted* $R^2$?For a fixed number of observations, as the number of covariates increases you can get explain as much of the variability as you want! The adjusted $R^2$ is a way to penalise using too many covariates. The adjusted $R^2$ for a model with $n$ observations and $p$ coefficients is given by the following:$$\tilde{R}^2 = 1 - \frac{n - 1}{n - p}\left(1 - R^2\right)$$ Under a naive model, how much of the variability in pats can they explain?Run an ordinary linear regression with all of the variables and see what percentage of the variability in the number of pats is explained. Make sure that you have used the catagorical variables correctly. Can be be confident in rejecting the null hypothesis that none of these variables is associated with the number of pats received?cat_df lm_xw = smf.ols("num_pats ~ time_outdoors + weight + height + loudness + C(coat_colour) + coat_length + whisker_length + C(is_longhaired)",cat_df).fit() print(lm_xw.summary()) lm_1 = smf.ols("num_pats ~ time_outdoors + C(coat_colour) + weight + height + loudness + whisker_length + C(is_longhaired) + coat_length", cat_df).fit() print(lm_1.summary())OLS Regression Results ============================================================================== Dep. Variable: num_pats R-squared: 0.571 Model: OLS Adj. R-squared: 0.567 Method: Least Squares F-statistic: 146.4 Date: Thu, 12 Nov 2020 Prob (F-statistic): 4.17e-175 Time: 14:12:13 Log-Likelihood: -3240.1 No. Observations: 1000 AIC: 6500. Df Residuals: 990 BIC: 6549. Df Model: 9 Covariance Type: nonrobust ========================================================================================= coef std e[...]**Note:** f-statistic is used to test the null hypothesis that none of the variables are related to the num_pats and all coefficients should be zeroProb f statistic is the p-value of the null model being no relationship between the variables and responseSo in this case we reject the null hypothesis that none of the variables are related to the response--If you have coat color 2 you get on avg 2+ more pats than coat color 1, if you have coat color 3 you have on avg 8 more than coat color 1, if you have coat color 2 you for coat color 1, it becomes the default value, so the coef for cc1 falls into the interceptkeeping all other things constant if you sampled cats and get two exactly the same except cc, then would be 2 more pats on avg...@Yennie - I think one can differentiate between impact and significance with the latter being the first sanity check to do. Once you have 2 variables that are significant, you can see which one is bigger to assess their respective contributions to the y variablecoef of coat color is gamma in the lecture.. two different intercepts with the same gradientPATRICK:WE should look at both the correlation and regression output to see if there is an issue of colinearity... don't expect 1 test to reveal all problemsIf you reindexed the coat color, the intercept might change but the coef will notbut if you had a tiny interaction variable, it will changeVIF is a good default minimum test but it is not necessarily going to catch all concerning relationshipsPaulius:By the way, if you would like to get a separate dummy variable in your pandas dataframe (and just plug in that to the regression equation), such as: coat_color1, coat_color2, coat_color3, you can use this code:cat_df2 = pd.concat([cat_df, pd.get_dummies(cat_df['coat_colour'], prefix = 'coat_color')], axis = 1) Principle of Marginality -when you interpret these variables you have to consider all other things held constantsm.graphics.influence_plot(lm_1, criterion="cooks") sm.graphics.plot_fit(lm_1, "coat_length") cat_df[543:546] cat_df.describe()The variance (of the coefficients) is inflated when there is colinearity... Question: Is colinearity an issue in this model? Do all of the variables need to be included?Compute the VIF to see if there is a concerning amount of colinearity between any of the covariates.col_names = cat_df.copy().columns.tolist() col_names.remove("num_pats") # we don't use the response variable in VIF calc def join_strings(xs, sep): return reduce(lambda a, b: a + sep + b, xs) for v in col_names: cns = col_names.copy() cns.remove(v) formula = v + " ~ " + join_strings(cns, " + ") coef_det = smf.ols(formula, data = cat_df).fit() vif = 1 / (1 - coef_det.rsquared) if vif > 3: print("\n" + 80 * "=") print(v) print(vif) # Yacopo for VIF from statsmodels.stats.outliers_influence import variance_inflation_factor from statsmodels.tools.tools import add_constant X = add_constant(df.iloc[:, :-1]) pd.Series([variance_inflation_factor(X.values, i) for i in range(X.shape[1])], index=X.columns) #Yen: Jacopo, what is add_constant? #Yacopo: apparently you need a column of 1 at the beginning of your dataframe. It's the intercept pretty much # Goda for VIF # variables = fitted_naive.model.exog # names = fitted_naive.model.exog_names # vif = [variance_inflation_factor(variables, i) for i in range(variables.shape[1])] # pd.DataFrame(zip(names, vif), columns=["names", 'vif']) lm_3 = smf.ols("num_pats ~ time_outdoors + C(coat_colour) + weight + height + loudness + C(is_longhaired) + coat_length", cat_df).fit() print(lm_3.summary())OLS Regression Results ============================================================================== Dep. Variable: num_pats R-squared: 0.571 Model: OLS Adj. R-squared: 0.567 Method: Least Squares F-statistic: 164.7 Date: Sun, 08 Nov 2020 Prob (F-statistic): 3.82e-176 Time: 18:33:20 Log-Likelihood: -3240.3 No. Observations: 1000 AIC: 6499. Df Residuals: 991 BIC: 6543. Df Model: 8 Covariance Type: nonrobust ========================================================================================= coef std e[...]pruned away a variable that is not informative because it's been informed by other variablescan see p-value of loudness and coat_length have decreased.... Does coat colour matter?1. Make a box plot of the number of pats by coat colour to see this pattern.2. Fit an additional linear model without the coat colour as a covariate to see how much of the explained variability comes from the inclusion of coat colour in the model.coat_df = cat_df.loc[:,["coat_colour", "num_pats"]].copy() alt.Chart(coat_df).mark_boxplot().encode( x='coat_colour:O', y='num_pats:Q' ).properties( width=600, height=300 ) lm_with_colour = smf.ols("num_pats ~ time_outdoors + C(coat_colour) + weight + height + loudness + C(is_longhaired) + coat_length", cat_df).fit() lm_without_colour = smf.ols("num_pats ~ time_outdoors + weight + height + loudness + C(is_longhaired) + coat_length", cat_df).fit() # print(lm_with_colour.summary()) # print(lm_without_colour.summary()) a = (lm_with_colour.resid ** 2).sum() b = (lm_without_colour.resid ** 2).sum() print('a ' + str(a)) print('b ' + str(b)) (b - a) / b df = sm.datasets.get_rdataset("Guerry", "HistData").data df = df[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna() df['Region'].unique()Among short-haired breeds they say longer hair is better, among long-haired breeds they say short hair is better, who is correct?Since in the figures above we saw that the breed longhaired/shorthaired appears to separate the data, it may be useful to consider different models on each subset. Fit a linear model to each subset of the data and see that the effect of the coat length is in each case.lm_4a = smf.ols("num_pats ~ time_outdoors + C(coat_colour) + weight + height + loudness + coat_length", cat_df[cat_df["is_longhaired"] == 1]).fit() lm_4b = smf.ols("num_pats ~ time_outdoors + C(coat_colour) + weight + height + loudness + coat_length", cat_df[cat_df["is_longhaired"] == 0]).fit() print(lm_4a.summary()) print(lm_4b.summary())OLS Regression Results ============================================================================== Dep. Variable: num_pats R-squared: 0.444 Model: OLS Adj. R-squared: 0.436 Method: Least Squares F-statistic: 55.53 Date: Sun, 08 Nov 2020 Prob (F-statistic): 3.57e-58 Time: 18:52:21 Log-Likelihood: -1563.8 No. Observations: 494 AIC: 3144. Df Residuals: 486 BIC: 3177. Df Model: 7 Covariance Type: nonrobust ======================================================================================= coef std err [...]The out-of-sample fragility problem would be better because the 5 commodity futures are less correlated.# 3 Hedging & Replication (20pts) # Continue to use the same data file from the previous problem.2 # Suppose we want to invest in EEM, but hedge out SPY. Do this by estimating a regression of EEM # on SPY. # • Do NOT include an intercept. # • Use the full sample of data. # 1. (5pts) What is the optimal hedge ratio over the full sample of data? That is, for every dollar # invested in EEM, what would you invest in SPY? # 2. (5pts) What is the mean, volatility, and Sharpe ratio of the hedged position, had we applied # that hedge throughout the full sample? Annualize the statistics. # 3. (5pts) Does it have the same mean as EEM? Why or why not? # 4. (5pts) Suppose we estimated a multifactor regression where in addition to SPY, we had IWM # as a regressor. Why might this regression be difficult to use for attribution or even hedging? #1 def reg_stats(df, annual_fac): reg_stats = pd.DataFrame(data = None, index = df.columns, columns = ['beta', 'Treynor Ratio', 'Information Ratio']) for col in df.columns: # Drop the NAs in y y = df[col].dropna() # Align the X with y X = sm.add_constant(factor_data['SPY US Equity'].loc[y.index]) reg = sm.OLS(y, X).fit() reg_stats.loc[col, 'beta'] = reg.params[1] reg_stats.loc[col, 'Treynor Ratio'] = (df[col].mean() * annual_fac) / reg.params[1] reg_stats.loc[col, 'Information Ratio'] = (reg.params[0] / reg.resid.std()) * np.sqrt(annual_fac) return reg_stats.astype(float).round(4) factor_data_subset = pd.DataFrame() factor_data_subset['SPY US Equity'] = factor_data['SPY US Equity'] factor_data_subset['EEM US Equity'] = factor_data['EEM US Equity'] table1 = reg_stats(factor_data_subset,12) table1 # optimal hedge would be .9863 table1['beta']['EEM US Equity'] #2 factor_data_subset['EEM new'] = factor_data_subset['EEM US Equity']*table1['beta']['EEM US Equity'] factor_new_hedge = factor_data factor_new_hedge['EEM US Equity'] = factor_data_subset['EEM new'] corr_matrix_new_hedge = corr(factor_new_hedge) w_summary(w_eem, covariance_matrix_eem, mean, 12) # 4 Modeling Risk (20pts) # Continue to use the same data file used in the previous problem. But for this problem use the total # returns of SPY and EFA. That is, use the returns as given in the spreadsheet–without subtracting # USGG3M Index. # 1. (10pts) SPY and EFA are highly correlated, yet SPY has had a much higher return. How # confident are we that SPY will overperform EFA over the next 10 years? # To answer the question, # • use statistical estimates of the total returns of SPY and EFA over the full sample. # • Assume that log returns for both assets are normally distributed. # 2. (10pts) Calculate the 60-month rolling volatility of EFA. # Use the latest estimate of the volatility (Sep 2021), along with the normality formula, to calculate # a Sep 2021 estimate of the 1-month, 1% VaR. In using the VaR formula, assume that the mean # is zero. # 1. factor_data_full = pd.read_excel('proshares_analysis_data.xlsx', sheet_name = 'merrill_factors') def summary_stats(df, annual_fac): report = pd.DataFrame() report['Mean'] = df.mean() * annual_fac report['Vol'] = df.std() * np.sqrt(annual_fac) report['Sharpe'] = report['Mean'] / report['Vol'] return round(report, 4) summary_stats(factor_data_full,12) def tail_risk_report(data, q): df = data.copy() df.index = data.index.date report = pd.DataFrame(columns = df.columns) report.loc['Skewness'] = df.skew() report.loc['Excess Kurtosis'] = df.kurtosis() report.loc['VaR'] = df.quantile(q) report.loc['Expected Shortfall'] = df[df < df.quantile(q)].mean() cum_ret = (1 + df).cumprod() rolling_max = cum_ret.cummax() drawdown = (cum_ret - rolling_max) / rolling_max report.loc['Max Drawdown'] = drawdown.min() report.loc['MDD Start'] = None report.loc['MDD End'] = drawdown.idxmin() report.loc['Recovery Date'] = None for col in df.columns: report.loc['MDD Start', col] = (rolling_max.loc[:report.loc['MDD End', col]])[col].idxmax() recovery_df = (drawdown.loc[report.loc['MDD End', col]:])[col] # modify the threshold for recovery from 0 to 0.001 try: report.loc['Recovery Date', col] = recovery_df[recovery_df >= 0].index[0] report.loc['Recovery period (days)'] = (report.loc['Recovery Date'] - report.loc['MDD Start']).dt.days except: report.loc['Recovery Date', col] = None report.loc['Recovery period (days)'] = None return round(report,4) tail_risk_report() #2 from arch import arch_model from arch import arch_model #expanding series sigma_expanding = factor_data_full['EFA US Equity'].shift(1).dropna().expanding(60).apply(lambda x: ((x**2).sum()/len(x))**(0.5)) #rolling windows sigma_rolling = factor_data_full['EFA US Equity'].shift(1).dropna().rolling(60).apply(lambda x: ((x**2).sum()/len(x))**(0.5)) #EWMA theta = 0.97 sigma_EWMA = pd.Series(None,dtype='float64',index=sigma_expanding.index) sigma_EWMA.iloc[0] = 0.15 for i in range(1,len(sigma_EWMA)): sigma_EWMA.iloc[i] = (theta * sigma_EWMA.iloc[i-1]**2 + (1 - theta) * (factor_data_full['EFA US Equity'].dropna().iloc[i-1]**2))**0.5 #GARCH GARCH = arch_model(factor_data_full['EFA US Equity'].dropna(), vol='Garch', p=1, o=0, q=1, dist='Normal',rescale=False) GARCH_model = GARCH.fit() sigma_garch = pd.Series(None,dtype='float64',index=sigma_expanding.index) sigma_garch.iloc[0] = 0.15 for i in range(1,len(sigma_garch)): sigma_garch.iloc[i] = (GARCH_model.params['omega'] + sigma_garch.iloc[i-1]**2 * GARCH_model.params['beta[1]'] + GARCH_model.params['alpha[1]']*(factor_data_full['EFA US Equity'].dropna().iloc[i-1]**2))**0.5Iteration: 1, Func. Count: 6, Neg. LLF: 7783286.747106821 Iteration: 2, Func. Count: 16, Neg. LLF: 3263.677234457262 Iteration: 3, Func. Count: 22, Neg. LLF: -176.00358443756494 Iteration: 4, Func. Count: 29, Neg. LLF: -219.74932142550813 Iteration: 5, Func. Count: 34, Neg. LLF: -219.47488156458598 Iteration: 6, Func. Count: 40, Neg. LLF: -214.49058809234523 Iteration: 7, Func. Count: 47, Neg. LLF: -219.74707091676112 Iteration: 8, Func. Count: 53, Neg. LLF: -219.92127126568946 Iteration: 9, Func. Count: 58, Neg. LLF: -219.92048006765228 Iteration: 10, Func. Count: 64, Neg. LLF: -219.92199285228276 Iteration: 11, Func. Count: 69, Neg. LLF: -219.92199438738953 Iteration: 12, Func. Count: 73, Neg. LLF: -219.92199438144254 Optimization terminated successfully (Exit mode 0) Current function value: -219.92199438738953 [...]Stock Markets Internal LibraryUsing [Alpha Vantage](https://www.alphavantage.co/). Free : 5 API requests per minute; 500 API requests per day. Use library function to use the cache/pickle as much as possible, to avoid exceeding free quote quota - https://www.alphavantage.co/documentation/ - https://en.wikipedia.org/wiki/List_of_S%26P_500_companies- https://github.com/twopirllc/pandas-ta/blob/master/examples/AIExample.ipynbimport matplotlib.pyplot as plt import pandas as pd import numpy as np plt.rcParams['figure.figsize'] = [20, 10] pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo&datatype=csv',index_col=0)WGETInstead of hitting alphavantage everytime, we cache it in **wdata.AVGet()**import wdata abt = wdata.AVGet('ABT') ibm = wdata.AVGet('IBM') abt.iloc[::-1]['adjusted_close'].plot() # Time Series is reversed, [::-1] to reverse it df1 = abt[['adjusted_close']].rename(columns={'adjusted_close': 'ABT'}) df1['IBM'] = ibm['adjusted_close'] df1['FB'] = wdata.AVGet('FB')['adjusted_close'] df1[::-1].plot() #import talib #abt['MA'] = talib.SMA(abt['adjusted_close'],100) abt[::-1][['adjusted_close','MA']].plot()Pandas-TA- https://github.com/twopirllc/pandas-ta- https://github.com/twopirllc/pandas-ta/blob/master/examples/AIExample.ipynb- pip install alphaVantage-api- pip install pandas-taimport pandas_ta as ta asset = abt[:400][::-1].copy() asset.ta.adjusted = "adjusted_close" asset.ta.ema(length=8, append=True) asset.ta.ema(length=21, append=True) asset.ta.ema(length=50, append=True) asset[asset.columns[5:]].tail() print(asset.ta.ema(length=2, append=False).head(5)) print(asset.close.head(5)) asset[["close", "EMA_8", "EMA_21", "EMA_50"]].plot()Trend Returns and Cumulative Trend Returnslong = ta.ema(asset.close, 8) > ta.ema(asset.close, 21) trendy = asset.ta.trend_return(trend=long, cumulative=True, trade_offset=-1, append=True) trendy.tail() # Third Column is the long trend; binary sequencesJoinhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.join.htmlcltr = trendy.iloc[:,0] tr = trendy.iloc[:,1] trendy.iloc[:,:2].plot(figsize=(16, 3)) cltr.plot(figsize=(16, 3), kind="area", stacked=False, alpha=0.25, grid=True) capital = 10000 total_return = cltr.cumsum() * capital positive_return = total_return[total_return > 0] negative_return = total_return[total_return <= 0] trdf = pd.DataFrame({"tr+": positive_return, "tr-": negative_return}) trdf.plot(figsize=(16, 5), kind="area", stacked=False, alpha=0.25, grid=True) long_trend = (trendy.iloc[:,-2] > 0).astype(int) short_trend = (1 - long_trend).astype(int) long_trend.plot(figsize=(16, 0.85), kind="area", stacked=True, alpha=0.25) short_trend.plot(figsize=(16, 0.85), kind="area", stacked=True, alpha=0.25) entries = (trendy.iloc[:,-1] > 0).astype(int) * asset.close entries[entries < 0.0001] = np.nan entries.name = "Entry" exits = (trendy.iloc[:,-1] < 0).astype(int) * asset.close exits[exits < 0.0001] = np.nan exits.name = "Exit" total_trades = trendy.iloc[:,-1].abs().sum() print(f"Total Trades: {total_trades}") all_trades = trendy.iloc[:,-1].copy().fillna(0) all_trades = all_trades[all_trades != 0] trades = pd.DataFrame({"Signal": all_trades, entries.name: entries.dropna(), exits.name: exits.dropna()}) trades['PnL'] = (trades.Exit - trades.Entry.shift(1)) / trades.Entry.shift(1) (1 + trades.PnL).prod() trades asset.close - asset.close.shift(1) # chart = asset["close"] #asset[["close", "SMA_10", "SMA_20", "SMA_50", "SMA_200"]] # chart = asset[["close", "SMA_10", "SMA_20"]] chart = asset[["close", "EMA_8", "EMA_21", "EMA_50"]] chart.plot(figsize=(16, 10), grid=True) entries.plot(figsize=(16, 10), marker="^", color='green', markersize=12, alpha=0.8) exits.plot(figsize=(16, 10), marker="v", color='#FF0000', markersize=12, alpha=0.8, grid=True)import numpy as np import pandas as pd from keras.optimizers import SGD from keras.preprocessing import image from os.path import join from PIL import Image from scipy import misc from keras.models import Sequential import tensorflow as tf import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image from sklearn.model_selection import train_test_split from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D, Softmax, ZeroPadding2D from tensorflow.keras import datasets, layers, models from tensorflow import keras from tensorflow.keras import layers from keras.layers.core import Flatten, Dense, Dropout from keras.layers import Dense, Conv2D, MaxPool2D , Flatten from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input from tensorflow.keras.utils import plot_model from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import decode_predictions from keras.applications.vgg16 import VGG16 from keras.optimizers import Adam train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, vertical_flip=True, horizontal_flip=True, rotation_range=90, width_shift_range=0.1, height_shift_range=0.1, validation_split=0.3) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( '/content/drive/MyDrive/Colab Notebooks/train', target_size=(224, 224), batch_size=32, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( '/content/drive/MyDrive/Colab Notebooks/train', target_size=(224, 224), batch_size=32, class_mode='categorical') #print(train_generator) #print(validation_generator) model = Sequential() model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Flatten()) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=2, activation="softmax")) opt = Adam(lr=0.001) model.summary() model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) #model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #model.fit(train_generator,steps_per_epoch=32,epochs=10,validation_data=validation_generator,validation_steps=800) ## Sonra drop out ve batch normalization ekleyerek modeli optimize edebilirsinizFound 2026 images belonging to 2 classes. Found 2026 images belonging to 2 classes.第8回目の講義中の練習問題回答 `for` ループ1. `for`ループを使って、1 から 5 まで表示しなさい。for N in [1, 2, 3, 4, 5]: print(N)1 2 3 4 52. 1番目で書いたコードを一行で表示するように修正しなさい。for N in [1,2,3,4,5]: print(N, end=' ')1 2 3 4 53. `range`を使って、1から5まで一行に表示しなさい。for N in range(1,6): print(N, end=" ")1 2 3 4 5`range`は普通0から始まる。`in range(start,end+1,step)`という意味。 4. 1 から 5 までの一つのリストを作りなさい。list(range(1,6))5. 3の倍数のNループを作りなさい。但し、Nは30より小さいまたは等しいとする。for N in range(3,31,3): print(N)3 6 9 12 15 18 21 24 27 306. 1から5までの合計を計算しなさい。#totalを初期化する total = 0 for N in range(1,6): total += N #totalを毎回Nと加算させる print(total)157. `places`リストを作り、`for`ループを使って、スペースを”-”と代入して、最初の文字が大文字にした`new_places`リストを作りなさい。`places = ["kamigyo ku","nakagyo ku", "sakyo ku"]`を`new_places=['Kamigyo-ku', 'Nakagyo-ku', 'Sakyo-ku']`にしなさい。places = ["kamigyo ku","nakagyo ku", "sakyo ku"] new_places = [] for place in places: new_places.append(place.capitalize().replace(" ","-")) print(new_places) new_places = ["kamigyo ku","nakagyo ku", "sakyo ku"] for i in range(len(new_places)): new_places[i] = new_places[i].capitalize().replace(" ", "-") print(new_places)['Kamigyo-ku', 'Nakagyo-ku', 'Sakyo-ku']8. かごの中にある果物の数を数えましょう。かごの辞書は下記通り。`basket_items = {'apples': 4, 'oranges': 19, 'kites': 3, 'sandwiches': 8}`果物のリストは下記通り。`fruits = ['apples', 'oranges', 'pears', 'peaches', 'grapes', 'bananas']`total = 0 basket_items = {'apples': 4, 'oranges': 19, 'kites': 3, 'sandwiches': 8} fruits = ['apples', 'oranges', 'pears', 'peaches', 'grapes', 'bananas'] for object, calculate in basket_items.items(): if object in fruits: total += calculate print("かごの中には{}個の果物が入っている.".format(total))かごの中には23個の果物が入っている.9. かごの中には果物ではないものを数えなさい。total = 0 for object, calculate in basket_items.items(): if object not in fruits: total += calculate print("かごの中には{}個のものが果物ではない.".format(total))かごの中には11個のものが果物ではない.`while` ループ1. 1から10まで`while`ループを使って、表示しなさい。同じく、`for`ループを使って表示しなさい。比較してみましょう。i = 1 #初期化 while i < 11: #最後の値 print(i) i += 1 #ステップ for i in range(1,11): print(i)1 2 3 4 5 6 7 8 9 102. 1から10までの合計を`while`と`for`ループを使って、計算しなさい。i = 1 total = 0 while i <= 10: total += i i += 1 print(total) total = 0 for i in range(1,11): total += i print(total)553. 50の一番近い2乗を`while`ループを使って、表示しなさい。答えは`49`。limit = 50 num = 0 while (num+1)**2 < limit: num += 1 nearest_square = num ** 2 print(nearest_square)49`break`と`continue`1. 1から20の奇数を`continue`を使って、表示しなさい。for n in range(20): #もし計算のあまりが0の場合、ループをスキップ if n % 2 == 0: continue print(n, end=' ')1 3 5 7 9 11 13 15 17 192. `break`を使って、下記の文章を100文字まで表示しなさい。`機械学習では、センサやデータベースなどに由来するサンプルデータを入力して解析を行い、そのデータから有用な規則、ルール、知識表現、判断基準などを抽出し、アルゴリズムを発展させる。そのアルゴリズムは、まずそのデータ(訓練例と呼ぶ)を生成した潜在的機構の特徴(確率分布)を捉え、複雑な関係を識別・定量化する。次に学習・識別したパターンを用いて新たなデータについて予測・決定を行う。データは、観測された変数群のとる関係の具体例と見ることができる`paragraph = "機械学習では、センサやデータベースなどに由来するサンプルデータを入力して解析を行い、そのデータから有用な規則、ルール、知識表現、判断基準などを抽出し、アルゴリズムを発展させる。そのアルゴリズムは、まずそのデータ(訓練例と呼ぶ)を生成した潜在的機構の特徴(確率分布)を捉え、複雑な関係を識別・定量化する。次に学習・識別したパターンを用いて新たなデータについて予測・決定を行う。データは、観測された変数群のとる関係の具体例と見ることができる" for p in paragraph: if len(paragraph) >= 100: sentence = paragraph[:100] break print(sentence)機械学習では、センサやデータベースなどに由来するサンプルデータを入力して解析を行い、そのデータから有用な規則、ルール、知識表現、判断基準などを抽出し、アルゴリズムを発展させる。そのアルゴリズムは、まず生入力スクリプト1. ユーザーを自分の名前を入力させてください。name = input("名前: ") print(name)名前: Fairoza Fairoza2.ユーザーに`こんにちは`とあいさつしましょう。print("こんにちは {}!".format(name))こんにちは Fairoza!3. ユーザーの年齢を聞きましょう。age = input("年齢: ") print(age)年齢: 28 284. ユーザーの5年後の年齢を表示しなさい。age = int(age) age_5_years_later = age + 5 print(age_5_years_later)335. 下記のように出力しましょう。`こんにちは さん! さんは今歳ですが、5年後は歳になりますよ。`print("こんにちは{}さん! {}さんは今{}歳ですが、5年後は{}歳になりますよ".format(name, name, age, age_5_years_later))こんにちはFairozaさん! Fairozaさんは今28歳ですが、5年後は33歳になりますよ6. もしたくさんのユーザーがいて、`for`ループを使って、どうやって簡単に表示できますか?names = input("コマで区切って名前を入力して: ").title().split(",") ages = input("コマで区切って年齢を入力して: ").split(",") message = "こんにちは{}さん! {}さんは今{}歳ですが、5年後は{}歳になりますよ" for name, age in zip(names, ages): print(message.format(name, name, age, int(age)+5))コマで区切って名前を入力して: Fairoza, Fairuz コマで区切って年齢を入力して: 28, 21 こんにちはFairozaさん! Fairozaさんは今28歳ですが、5年後は33歳になりますよ こんにちは Fairuzさん! Fairuzさんは今 21歳ですが、5年後は26歳になりますよエラーと例外1. 変数を設定せずに、表示しようとしたらどうなりますか?print(x)2. 文字列と整数を算術しようとしたら、どうなりますか?1 + 'abc'3. 存在しないリストのインデックスをアクセスしようとしたら、どうなりますか?L = ["a", "b", "c"] L[1000] L = ["a", "b", "c"] L["d"]4. `try`と`except`の関数を使って、エラーを見つかりましょう。下記の文を実行してみてください。```try: print("this gets executed first")except: print("this gets executed only if there is an error")```try: print("this gets executed first") except: print("this gets executed only if there is an error")this gets executed first5. `x=1/0`を`try`のところに足してみたら?try: print("let's try something:") x = 1 / 0 # ZeroDivisionError except: print("something bad happened!")let's try something: something bad happened!6. 例外を`raise`しましょう。raise RuntimeError("my error message")7. `x=10`とすると、`x`は5以上の場合、例外を`raise`してください。x = 10 if x > 5: raise Exception('x should not exceed 5. The value of x was {}'. format(x))8. `try`と`except`の他に、`else`と`finally`は例外を実行させる。`finally`なら、どんなことがあっても、実行させる。try: print("try something here") except: print("this happens only if it fails") else: print("this happens only if it succeeds") finally: print("this happens no matter what")try something here this happens only if it succeeds this happens no matter what[作業目標]- 仿造範例的 One Hot Encoding, 將指定的資料進行編碼 [作業重點]- 將 sub_train 進行 One Hot Encoding 編碼 (In[4], Out[4])import os import numpy as np import pandas as pd # 設定 data_path, 並讀取 app_train dir_data = './data/' f_app_train = os.path.join(dir_data, 'application_train.csv') app_train = pd.read_csv(f_app_train)作業將下列部分資料片段 sub_train 使用 One Hot encoding, 並觀察轉換前後的欄位數量 (使用 shape) 與欄位名稱 (使用 head) 變化sub_train = pd.DataFrame(app_train['WEEKDAY_APPR_PROCESS_START']) print(sub_train.shape) sub_train.head() """ Your Code Here """download the data as csv file#imports import requests from requests import session import os from dotenv import load_dotenv, find_dotenv #payload for post request payload = { 'action' : 'login', 'username' : os.environ.get("kaggle_username"), 'password' : ("kaggle_password") } def extract_data(url, file_path) : ''' extract data from kaggle ''' #set up a session with session() as c : c.post('https://www.kaggle.com/account/login',data=payload) #open a file to write to with open(file_path, 'w') as handle : response = c.get(url, stream=True) for block in response.iter_content(1024) : handle.write(block) train_url = 'https://www.kaggle.com/c/titanic/train.csv' test_url = 'https://www.kaggle.com/c/titanic/test.csv' #paths raw_data_path = os.path.join(os.path.pardir, 'data', 'raw') train_data_path = os.path.join(raw_data_path, 'train.csv') test_data_path = os.path.join(raw_data_path, 'test.csv') #extract data extract_data(train_url, train_data_path) extract_data(test_url, test_data_path) !ls -l ../data/rawtotal 24 -rw-r--r-- 1 home 197121 8793 Jul 27 23:10 test.csv -rw-r--r-- 1 home 197121 8793 Jul 27 23:10 train.csvwrite a script to to download dataget_raw_data_script_file = os.path.join(os.path.pardir, 'src', 'data', 'get_raw_data.py') %%writefile $get_raw_data_script_file # -*- coding: utf-8 -*- #imports import requests from requests import session import os from dotenv import load_dotenv, find_dotenv import logging #payload for post request payload = { 'action' : 'login', 'username' : os.environ.get("kaggle_username"), 'password' : ("kaggle_password") } def extract_data(url, file_path) : ''' extract data from kaggle ''' #set up a session with session() as c : c.post('https://www.kaggle.com/account/login',data=payload) #open a file to write to with open(file_path, 'w') as handle : response = c.get(url, stream=True) for block in response.iter_content(1024) : handle.write(block) def main(project_dir) : ''' main method ''' logger = logging.getLogger(__name__) logger.info('getting raw data') #urls train_url = 'https://www.kaggle.com/c/titanic/train.csv' test_url = 'https://www.kaggle.com/c/titanic/test.csv' # file paths raw_data_path = os.path.join(os.path.pardir, 'data', 'raw') train_data_path = os.path.join(raw_data_path, 'train.csv') test_data_path = os.path.join(raw_data_path, 'test.csv') #extract data extract_data(train_url, train_data_path) extract_data(test_url, test_data_path) logger.info('downloaded the raw training and test data') if __name__ == '__main__': project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir) #logger log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) #find the .env dotenv_path = find_dotenv() #load the variables load_dotenv(dotenv_path) #call main main(project_dir) !python $get_raw_data_script_file2018-07-27 00:17:23,663 - __main__ - INFO - getting raw data 2018-07-27 00:17:24,467 - __main__ - INFO - downloaded the raw training and test dataA notebook to test the developed `cmdperm` package.%matplotlib inline %load_ext autoreload %autoreload 2 #%config InlineBackend.figure_format = 'svg' #%config InlineBackend.figure_format = 'pdf' import cmdprod as cp import cmdprod.main as main import numpy as np # pk = cp.Param('k', ['gauss', 'imq'], '--k') # pkparams = cp.Param('kparams', [1, 2], '--kparams') kgroup = cp.ParamGroup( ['k', 'kparams'], [('gauss', 1.0), ('imq', [-0.5, 1.0]), ('imq', [-0.5, 10])], ['--k', '--kparams'] ) a = cp.Param('A', ['a0', 'a1']) b = cp.Param('B', np.linspace(0, 1, 2)) args = cp.Args([kgroup, a, b])Write as lines# formatter = cp.IAFArgparse(pv_sep=' ') args_processor = cp.APPrint(prefix='script.py ', suffix=' &\n') args_processor.iaf.value_formatter.list_value_sep = ', ' args_processor(args)Write as filesiaf = cp.IAFArgparse(pv_sep=' \\\n') args_file_proc = cp.APPerBashFile( dest_path='gen_files/', token_path='gen_files/', iaf=iaf, file_begin='#!/bin/bash', file_end='# end of script', line_begin='echo "test" ', line_end=' # line ends', ) args_file_proc(args)TESTINGtranslator(prefix+'Who is ?', max_length=100) translate('Who is ?') translate('Who is ?') translate('What is the time zone of Salt Lake City?') pretty_translate('select distinct ?sbj where { ?sbj wdt:P35 wd:Q127998 . ?sbj wdt:P31 wd:Q6256 }','Who is the country for head of state of Mahmoud Abbas?') pretty_translate('select distinct ?sbj where { ?sbj wdt:P35 wd:Q127998 . ?sbj wdt:P31 wd:Q6256 }',"What country is Mahmoud Abbas the head of state of?") pretty_translate('select distinct ?sbj where { ?sbj wdt:P35 wd:Q127998 . ?sbj wdt:P31 wd:Q6256 }','Who is the country for head of state of Mahmoud Abbas?') pretty_translate("SELECT ?answer WHERE { wd:Q16538 wdt:P725 ?answer . ?answer wdt:P106 wd:Q177220}","Which female actress is the voice over on South Park and is employed as a singer?".lower()) pretty_translate("SELECT ?answer WHERE { wd:Q16538 wdt:P725 ?answer . ?answer wdt:P106 wd:Q177220}","Which female actress on South Park is the voice over and is used as a singer?") # Paul Erdős (Q173746) pretty_translate("select distinct ?answer where { wd:Q173746 wdt:P3973 ?answer}","Which is the PIM authority ID of Paul Erd?") pretty_translate("SELECT ?obj WHERE { wd:Q1045 p:P1082 ?s . ?s ps:P1082 ?obj . ?s pq:P585 ?x filter(contains(YEAR(?x),'2009')) }", "What was the population of Somalia in 2009-0-0?") translate('Humans born in New York City') #random query - answer seems correct # From QALD target = "ASK WHERE { a }" q = "Are Taiko some kind of Japanese musical instrument?" pretty_translate(target, q) # From QALD target = "PREFIX dct: PREFIX dbc: SELECT DISTINCT ?uri WHERE { ?uri dct:subject dbc:Assassins_of_Julius_Caesar }" q = "Who killed Caesar?" pretty_translate(target, q) # From QALD q = 'What is the highest mountain in Germany?' target = "PREFIX rdfs: PREFIX rdf: PREFIX onto: \nSELECT ?uri WHERE { ?uri rdf:type onto:Mountain ; onto:elevation ?elevation ; onto:locatedInArea } ORDER BY DESC(?elevation) LIMIT 1" pretty_translate(target, q) # From QALD q = 'Which American presidents were in office during the Vietnam War?' target = "PREFIX dbo: PREFIX res: PREFIX dct: PREFIX dbc: SELECT ?uri WHERE { ?uri dct:subject dbc:Presidents_of_the_United_States . res:Vietnam_War dbo:commander ?uri }" pretty_translate(target, q) # From QALD q = 'How many gold medals did win at the 2008 Olympics?' target = "PREFIX dbo: PREFIX dbr: PREFIX rdf: PREFIX rdfs: SELECT Count(?sub) as ?c WHERE { ?sub dbo:goldMedalist dbr:Michael_Phelps . filter (contains (str(?sub), \"2008\") && contains (str(?sub), \"Olympics\")) }" pretty_translate(target, q) # From QALD q = 'What is the profession of ?' target = "PREFIX dbpedia2: PREFIX res: SELECT DISTINCT ?string WHERE { res:Frank_Herbert dbpedia2:occupation ?string }" pretty_translate(target, q) # From QALD q = 'How many seats does the home stadium of FC Porto have?' target = "PREFIX dbo: PREFIX dbp: PREFIX dbr: PREFIX rdf: PREFIX rdfs: PREFIX db: SELECT ?capacity WHERE { { dbr:FC_Porto dbo:ground ?ground . ?ground dbo:capacity ?capacity } UNION { dbr:FC_Porto dbo:ground ?ground . ?ground dbp:capacity ?capacity } }" pretty_translate(target, q) # From QALD q = 'Which frequent flyer program has the most airlines?' target = "SELECT ?uri WHERE { ?airline . ?airline ?uri. } GROUP BY ?uri ORDER BY DESC(COUNT(DISTINCT ?airline)) OFFSET 0 LIMIT 1" pretty_translate(target, q) # From QALD q = 'Which European countries have a constitutional monarchy?' target = "PREFIX dbo: PREFIX dct: PREFIX dbc: PREFIX dbr: SELECT ?uri WHERE { ?uri dct:subject dbc:Countries_in_Europe ; dbo:governmentType dbr:Constitutional_monarchy }" pretty_translate(target, q) # From QALD q = 'Which countries have places with more than two caves?' target = "PREFIX dbo: PREFIX rdf: SELECT DISTINCT ?uri WHERE { ?cave rdf:type dbo:Cave ; dbo:location ?uri . ?uri rdf:type dbo:Country } GROUP BY ?uri HAVING ( COUNT(?cave) > 2 )" pretty_translate(target, q) # From QALD q = 'Which airports are located in California, USA?' target = "SELECT DISTINCT ?uri WHERE { ?uri a { ?uri } UNION { ?uri } UNION { ?uri ?city . ?city } UNION { ?uri } }" pretty_translate(target, q) # From QALD q = "What are the nicknames of San Francisco?" target = "SELECT DISTINCT ?string WHERE { res:San_Francisco foaf:nick ?string }" pretty_translate(target, q) # From QALD q = "What is ’s birth name?" target = "SELECT DISTINCT ?string WHERE { res:Angela_Merkel dbp:birthName ?string }" pretty_translate(target, q) # From QALD q = "Who is the mayor of Berlin?" target = "SELECT DISTINCT ?uri WHERE { res:Berlin dbp:leader ?uri }" pretty_translate(target, q) # From QALD q = "Which software has been published by Mean Hamster Software?" target = "SELECT DISTINCT ?uri WHERE { ?uri rdf:type onto:Software { ?uri prop:publisher \"Mean Hamster Software\"@en } UNION { ?uri onto:publisher res:Mean_Hamster_Software } }" pretty_translate(target, q) # From QALD q = "Which country was born in?" target = "SELECT DISTINCT ?country WHERE { { dbr:Bill_Gates dbo:birthPlace ?birthPlace . ?birthPlace dbo:country ?country } UNION { dbr:Bill_Gates dbo:birthPlace ?birthPlace . ?birthPlace dbo:isPartOf ?place . ?place dbo:country ?country } }" pretty_translate(target, q) # From QALD q = "How many grand-children did have?" target = "SELECT COUNT(DISTINCT ?y AS ?y) WHERE { ?x . ?x ?y . }" pretty_translate(target, q) # From QALD q = "Give me all professional skateboarders from Sweden." target = "SELECT DISTINCT ?uri WHERE { ?uri dbo:occupation dbr:Skateboarder { ?uri dbo:birthPlace dbr:Sweden } UNION { ?uri dbo:birthPlace ?place . ?place dbo:country dbr:Sweden } }" pretty_translate(target, q) # From QALD q = "Which monarchs of the United Kingdom were married to a German?" target = "SELECT DISTINCT ?uri WHERE { ?uri rdf:type yago:WikicatMonarchsOfTheUnitedKingdom ; dbo:spouse ?spouse . ?spouse dbo:birthPlace res:Germany }" pretty_translate(target, q) # From QALD q = "Give me all Argentine films." target = "SELECT DISTINCT ?uri WHERE { { ?uri rdf:type yago:ArgentineFilms } UNION { ?uri rdf:type dbo:Film { ?uri dbo:country res:Argentina } UNION { ?uri dbp:country \"Argentina\"@en } } }" pretty_translate(target, q) # From QALD q = "How did die?" target = "SELECT DISTINCT ?s WHERE { ?s }" pretty_translate(target, q) # From QALD q = "Where did died?" target = "." pretty_translate(target, q) # From QALD q = "Which classes does the Millepede belong to?" target = "SELECT DISTINCT ?String WHERE { res:Millipede dbp:taxon ?String }" pretty_translate(target, q) # From QALD q = "Which classes does the Millepede belong to?" target = "SELECT DISTINCT ?String WHERE { res:Millipede dbp:taxon ?String }" pretty_translate(target, q)Google Translate API with Python requestPython library is cool, as many know that we can simply use https://pypi.org/project/googletrans/ which is free and unlimited.But how about we do it ourselves for fun-sake! With basic POST request you can directly make a call to google translate directly!Google Translate API documentation (that I mimick the "curl" example)https://cloud.google.com/translate/docs/basic/setup-basic Getting start1. Please follow the [document](https://cloud.google.com/translate/docs/basic/setup-basic) on the official website2. You should now have... - created a project for google translation api - download private json key - set GOOGLE_APPLICATION_CREDENTIALS env variable to the full PATH to your json file - install **gcloud sdk**To check if you're ready for curl request, try running this command on your console:`gcloud auth application-default print-access-token`This should reture access token for google translation APINote: `json` file contains key and links need for you to refresh you token after it expires. Running the `gcloud` commands does all the work for requesting for a token or refreshing the tokenimport requests import json import subprocess token = subprocess.check_output([ r'C:\Users\Acer\AppData\Local\Google\Cloud SDK\google-cloud-sdk\bin\gcloud.cmd', 'auth', 'application-default', 'print-access-token' ]).strip().decode("utf-8") headers = { 'Authorization': 'Bearer {token}'.format(token = token) } data = { 'q' : 'Xing★內刷毛孕婦打底褲側邊條紋春裝外穿托腹長褲顯瘦打底加大尺碼孕婦褲', 'source': 'zh', 'target': 'en', 'format': 'text' } response = requests.post( url = 'https://translation.googleapis.com/language/translate/v2', headers = headers, data = data ) json.loads(response.text)Naive Approach For Full Text Search- Serial Search- Analogy: Flipping through a dictionary from beginning to end to find a word.import re def simple_search(word, corpus:list): list_of_docs = [] for i, doc in enumerate(corpus): for w in re.findall(f'[a-z]+', doc.lower()): if w == word: list_of_docs.append(i) return list_of_docs %time result = simple_search('chocolate', corpus)CPU times: user 299 ms, sys: 6.45 ms, total: 306 ms Wall time: 319 msSmarter Way: Using an Index- Inverted Index- {'word' : [doc1, doc2, doc3]}def create_index(corpus): index = {} for i, doc in enumerate(corpus): for w in re.findall(f'[a-z]+', doc.lower()): if w in index: #if we've already seen the word before index[w].append(i) else: index[w] = [i] return index %time index = create_index(corpus) %timeit result = index['chocolate']73.8 ns ± 2.4 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)- Naive Search: ~300ms- Indexed Search: ~70 ns Limitations:- Build the index frequently. Maybe not so good for dynamic data.- No concept of natural language (e.g. lemmatization, stop words)- We cant do phrase search / multiple words. - Typos (Fuzzy searching)- Ranking?! --- Full Text Search in PostGres--- FTS representation in PostGres:- We have a datatype called `tsvector`, which basically is pre-processed and stripped down to a basic form (e.g. lemmatized)- We then have `tsquery`, which is a search query that is normalized into lexemes / lemmas.- In order to match a query to a vector, we have to use the strange `@@` syntax. Example Queries, Part 1 (on individual strings first, not the data set yet):**(1) Basic Matching**: `SELECT to_tsvector('this dish was horrible') @@ to_tsquery('horrible');`- What's nice is that it's case insensitive! So it automatically does lowercase.**(2) Multiple words**: `SELECT to_tsvector('this dish was horrible') @@ plainto_tsquery('horrible dish');`- *plainto_tsquery* transforms unformatted text querytext to tsquery. The text is parsed and normalized much as for to_tsvector, then the & (AND) Boolean operator is inserted between surviving words.**(3) Under the Hood**: `SELECT to_tsvector('this dish was horrible');`- Note that it returns the tokenized sentence! Even does lemmatiziation for us (e.g. horrible -> horribl)**(4) Interesting case**: `SELECT to_tsvector('A nice day for a car ride') @@ plainto_tsquery('I am riding a bike'); `- Returns False! Interesting, because it contains extra information in bike and therefore does not match. Bike isn’t included therefore the document isn’t relevant. But **I am riding** DOES work! --- Example Queries, Part 2 (on the food review data set!):**(5) Searching a column**: `SELECT id, text FROM food_reviews WHERE to_tsvector(text) @@ to_tsquery('horrible');`- Works, but it's slow (took ~2.5 seconds to return all results)**(6) Another**: `SELECT id,text FROM food_reviews WHERE to_tsvector(text) @@ plainto_tsquery('dry pasta');`**(7) Concatenating Multiple Cols**: `SELECT summary,text FROM food_reviews WHERE to_tsvector(summary || ' ' || text) @@ plainto_tsquery('addictive flavor');`**(8) Enforce Word Order**: `SELECT text FROM food_reviewsWHERE to_tsvector(text) @@ to_tsquery('addictive flavor');`- Only return results where "addictive" is immediately followed by "flavor" --- Example Queries, Part 3 (on the food review data set!):- **Indexing and Ranking!** **(9) Create an Inverted Index**:CREATE INDEX document_idx ON food_reviews USING GIN (to_tsvector('english', summary || ' ' || coalesce(text, '')));- The coalesce function will default to `' '` (empty string) if the text contains NA values.- The creation of the index took about 6 seconds!! **(10) Query on the Index**:SELECT * FROM food_reviews WHERE to_tsvector('english', summary || ' ' || coalesce(text, '')) @@ to_tsquery('horrible');- I'm sure there's a better way to give that whole index a nickname but I didn't have time to figure it out.- The query took about **0.7 milliseconds!!** - If you run `EXPLAIN ANALYZE` before the query. **(11) Ranking the results**:- Disclaimer: I don't exactly know the mechanics of the algorithm behind how it's ranking the results, but from skimming the PostGres documention, it looks like it's fairly well-explained.SELECT summary, text, ts_rank(to_tsvector(summary || ' ' || coalesce(text, '')), plainto_tsquery('addicting flavor')) AS rank FROM food_reviews ORDER BY rank DESC LIMIT 5;my1stNN.ipynb (or MNIST digits classification with TensorFlow) This task will be submitted for peer review, so make sure it contains all the necessary outputs!import numpy as np from sklearn.metrics import accuracy_score from matplotlib import pyplot as plt %matplotlib inline import tensorflow as tf print("We're using TF", tf.__version__) import sys sys.path.append("..") import grading import matplotlib_utils from importlib import reload reload(matplotlib_utils) import keras_utils from keras_utils import reset_tf_sessionLook at the dataIn this task we have 50000 28x28 images of digits from 0 to 9.We will train a classifier on this data.import preprocessed_mnist X_train, y_train, X_val, y_val, X_test, y_test = preprocessed_mnist.load_dataset() # X contains rgb values divided by 255 print("X_train [shape %s] sample patch:\n" % (str(X_train.shape)), X_train[1, 15:20, 5:10]) print("A closeup of a sample patch:") plt.imshow(X_train[1, 15:20, 5:10], cmap="Greys") plt.show() print("And the whole sample:") plt.imshow(X_train[1], cmap="Greys") plt.show() print("y_train [shape %s] 10 samples:\n" % (str(y_train.shape)), y_train[:10])Linear modelYour task is to train a linear classifier $\vec{x} \rightarrow y$ with SGD using TensorFlow.You will need to calculate a logit (a linear transformation) $z_k$ for each class: $$z_k = \vec{x} \cdot \vec{w_k} + b_k \quad k = 0..9$$And transform logits $z_k$ to valid probabilities $p_k$ with softmax: $$p_k = \frac{e^{z_k}}{\sum_{i=0}^{9}{e^{z_i}}} \quad k = 0..9$$We will use a cross-entropy loss to train our multi-class classifier:$$\text{cross-entropy}(y, p) = -\sum_{k=0}^{9}{\log(p_k)[y = k]}$$ where $$[x]=\begin{cases} 1, \quad \text{if $x$ is true} \\ 0, \quad \text{otherwise} \end{cases}$$Cross-entropy minimization pushes $p_k$ close to 1 when $y = k$, which is what we want.Here's the plan:* Flatten the images (28x28 -> 784) with `X_train.reshape((X_train.shape[0], -1))` to simplify our linear model implementation* Use a matrix placeholder for flattened `X_train`* Convert `y_train` to one-hot encoded vectors that are needed for cross-entropy* Use a shared variable `W` for all weights (a column $\vec{w_k}$ per class) and `b` for all biases.* Aim for ~0.93 validation accuracyX_train_flat = X_train.reshape((X_train.shape[0], -1)) print(X_train_flat.shape) X_val_flat = X_val.reshape((X_val.shape[0], -1)) print(X_val_flat.shape) import keras # we use keras only for keras.utils.to_categorical y_train_oh = keras.utils.to_categorical(y_train, 10) y_val_oh = keras.utils.to_categorical(y_val, 10) print(y_train_oh.shape) print(y_train_oh[:3], y_train[:3]) # run this again if you remake your graph s = reset_tf_session() # Model parameters: W and b W = ### YOUR CODE HERE ### tf.get_variable(...) with shape[0] = 784 b = ### YOUR CODE HERE ### tf.get_variable(...) # Placeholders for the input data input_X = ### YOUR CODE HERE ### tf.placeholder(...) for flat X with shape[0] = None for any batch size input_y = ### YOUR CODE HERE ### tf.placeholder(...) for one-hot encoded true labels # Compute predictions logits = ### YOUR CODE HERE ### logits for input_X, resulting shape should be [input_X.shape[0], 10] probas = ### YOUR CODE HERE ### apply tf.nn.softmax to logits classes = ### YOUR CODE HERE ### apply tf.argmax to find a class index with highest probability # Loss should be a scalar number: average loss over all the objects with tf.reduce_mean(). # Use tf.nn.softmax_cross_entropy_with_logits on top of one-hot encoded input_y and logits. # It is identical to calculating cross-entropy on top of probas, but is more numerically friendly (read the docs). loss = ### YOUR CODE HERE ### cross-entropy loss # Use a default tf.train.AdamOptimizer to get an SGD step step = ### YOUR CODE HERE ### optimizer step that minimizes the loss s.run(tf.global_variables_initializer()) BATCH_SIZE = 512 EPOCHS = 40 # for logging the progress right here in Jupyter (for those who don't have TensorBoard) simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy") for epoch in range(EPOCHS): # we finish an epoch when we've looked at all training samples batch_losses = [] for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE): # data is already shuffled _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]}) # collect batch losses, this is almost free as we need a forward pass for backprop anyway batch_losses.append(batch_loss) train_loss = np.mean(batch_losses) val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) # this part is usually small train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat})) # this is slow and usually skipped valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat})) simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)Naive Bayes - Trabalho Questão 1Implemente um classifacor Naive Bayes para o problema de predizer a qualidade de um carro. Para este fim, utilizaremos um conjunto de dados referente a qualidade de carros, disponível no [UCI](https://archive.ics.uci.edu/ml/datasets/car+evaluation). Este dataset de carros possui as seguintes features e classe:** Attributos **1. buying: vhigh, high, med, low2. maint: vhigh, high, med, low3. doors: 2, 3, 4, 5, more4. persons: 2, 4, more5. lug_boot: small, med, big6. safety: low, med, high** Classes **1. unacc, acc, good, vgoodimport csv import pandas as pd import random def loadCsv(filename): lines = csv.reader(open(filename, "r")) dataset = list(lines) for i in range(len(dataset)): dataset[i] = [x for x in dataset[i]] return dataset filename = 'carData.csv' dataset = loadCsv(filename) print(('O arquivo {0} foi carregado com {1} linhas').format(filename, len(dataset))) def splitDataset(dataset, splitRatio): trainSize = int(len(dataset) * splitRatio) trainSet = [] testSet = list(dataset) while len(trainSet) < trainSize: index = random.randrange(len(testSet)) trainSet.append(testSet.pop(index)) return [trainSet, testSet] splitRatio = 0.7 newDF = df.values.tolist() train, teste = splitDataset(newDF, splitRatio) def separateFeaturesByClass(dataset): separated = {} for i in range(len(dataset)): row = dataset.iloc[i] if (row[-1] not in separated): separated[row[-1]] = [] separated[row[-1]].append(row) return separated def tableFrequency(dataset): dic = {} attributes = dataset.columns for attribute in attributes: for value in dataset[attribute].unique(): if value not in dic.keys(): dic[value] = [] dic[value].append((dataset.groupby(attribute)[attribute].count()[value], attribute)) for value in dataset[dataset.columns[-1]].unique(): # Delete class (the last attribute) del dic[value] return dic def frequencyByClass(dataset): separated = separateFeaturesByClass(dataset) frequency = {} for classValue, instances in separated.items(): frequency[classValue] = tableFrequency(pd.DataFrame(instances)) for classValue in dataset.classe.unique(): #index = 0 for attr in dataset.columns[:-1]: for value in dataset[attr]: if value not in frequency[classValue]: frequency[classValue][value] = [(0, attr)] #index += 1 return frequency def calculateProbByAttribute(dataset, attr): frequencyClass = frequencyByClass(dataset) frequencyClassValue = {key: [] for key in frequencyClass.keys()} totalValuesByClass = {key: 0 for key in frequencyClass.keys()} freqByValue = {key: 0 for key in dataset[:][attr]} frequenciesClassKeys, frequenciesClassValues = zip(*frequencyByClass(dataset).items()) for classIndex in range(len(frequenciesClassKeys)): keys = frequenciesClassValues[classIndex].keys() for key in keys: for freq in frequenciesClassValues[classIndex][key]: if freq[1] == attr: frequencyClassValue[frequenciesClassKeys[classIndex]].append((key, freq[0])) freqByValue[key] += freq[0] totalValuesByClass[frequenciesClassKeys[classIndex]] += freq[0] totalValues = sum(totalValuesByClass.values()) probByClass = {key: totalValuesByClass[key]/totalValues for key in totalValuesByClass} probByValue = {key: value/totalValues for key, value in freqByValue.items()} probByClassValue = {key: [] for key in totalValuesByClass} for key in frequenciesClassKeys: for valueTuple, freqTuple in frequencyClassValue[key]: probByClassValue[key].append((valueTuple, freqTuple/totalValuesByClass[key])) return [probByClass, probByValue, probByClassValue] def tableProbability(dataset): tableProb = {} for attr in dataset.columns[:-1]: tableProb[attr] = calculateProbByAttribute(dataset, attr) return tableProb def predict(summaries, inputVector): higherProbability = -1 classHigherProbability = None attributes = list(summaries.keys()) randomAttribute = attributes[0] classes = list(summaries[randomAttribute][0].keys()) for classValue in classes: prob = 1 for index in range(len(attributes)): probClass = summaries[attributes[index]][0][classValue] probValue = summaries[attributes[index]][1][inputVector[index]] valueInClass = summaries[attributes[index]][2][classValue] for probValueClass in valueInClass: if probValueClass[0] == inputVector[index]: probIntersValueClass = probValueClass[1] prob = prob * (probIntersValueClass / probValue) prob = prob * probClass if prob > higherProbability: higherProbability = prob classHigherProbability = classValue return classHigherProbability def getPredictions(summaries, testSet): predictions = [] for i in range(len(testSet)): result = predict(summaries, testSet.iloc[i]) predictions.append(result) return predictions def getAccuracy(testSet, predictions): correct = 0 for i in range(len(testSet)): if testSet.iloc[i][-1] == predictions[i]: correct += 1 return (correct/float(len(testSet))) * 100.0 def main(): filename = 'carData.csv' datasetColumns = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'classe'] df = pd.read_csv(filename, sep=',', names=datasetColumns) dataset = df.values.tolist() splitRatio = 0.75 trainSet, testSet = splitDataset(dataset, splitRatio) print(('Dataset {0}, Training {1}, Test {2}').format(len(dataset), len(trainSet), len(testSet))) trainingSet = pd.DataFrame(trainSet, columns=datasetColumns) testingSet = pd.DataFrame(testSet, columns=datasetColumns) summary = tableProbability(trainingSet) predictions = getPredictions(summary, testingSet) accuracy = getAccuracy(testingSet, predictions) print(('Acurácia de {:.3f}%').format(accuracy)) main()Dataset 1728, Training 1296, Test 432 Acurácia de 74.769%Questão 2Crie uma versão de sua implementação usando as funções disponíveis na biblioteca SciKitLearn para o Naive Bayes ([veja aqui](http://scikit-learn.org/stable/modules/naive_bayes.html))import pandas as pd import numpy as np import sklearn df = pd.read_csv("carData.csv",names=["buying","maint","doors","persons","lug_boot","safety","Classes"]) #Transform data into numerical df = df.replace('vhigh', 4) df = df.replace('high', 3) df = df.replace('med', 2) df = df.replace('low', 1) df = df.replace('more', 6) df = df.replace('big', 3) df = df.replace('small', 1) df = df.replace('unacc', 1) df = df.replace('acc', 2) df = df.replace('good', 3) df = df.replace('vgood', 4) df = df.replace('5more', 6) df = df.replace('2', 2) df = df.replace('3', 3) df = df.replace('4', 4) from sklearn.model_selection import train_test_split x = df.drop(["Classes"],axis=1) y = df["Classes"] x_train, x_test, y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=101) from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() y_pred = gnb.fit(x_train, y_train).predict(x_test) from sklearn.metrics import accuracy_score final = accuracy_score(y_pred, y_test) finalDo some basic plots of article stats only Words per paragraph# Words per paragraph a = art[0] pwords = [len(p['context'].split()) for p in a['paragraphs']] figure(num=None, figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k') plt.bar(range(len(pwords)), pwords, align='center', alpha=0.5) plt.xlabel('Paragraph #') plt.ylabel('# Words') plt.show() plt.hist(pwords, bins=30) # arguments are passed to np.histogram plt.title("Hist of words per paragraph") plt.ylabel('# Paragraphs') plt.show()Words per articleawords = [] for a in art: pwords = [len(p['context'].split()) for p in a['paragraphs']] awords.append(sum(pwords)) myvar = awords varname = '# words' # Plot bargraph plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #') # Plot histogram plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')Print words per articleawords = [] for a in art: pwords = [len(p['context'].split()) for p in a['paragraphs']] awords.append(sum(pwords)) for i,awords in enumerate(awords): print("Article # " + str(i) + ": " + art[i]['title'] + ', ' + str(awords) + " words.")Article # 0: Beyoncé, 9099 words. Article # 1: Frédéric_Chopin, 9050 words. Article # 2: Sino-Tibetan_relations_during_the_Ming_dynasty, 9591 words. Article # 3: IPod, 5236 words. Article # 4: The_Legend_of_Zelda:_Twilight_Princess, 3966 words. Article # 5: Spectre_(2015_film), 5113 words. Article # 6: 2008_Sichuan_earthquake, 7037 words. Article # 7: New_York_City, 14934 words. Article # 8: To_Kill_a_Mockingbird, 8908 words. Article # 9: Solar_energy, 4938 words. Article # 10: Kanye_West, 10883 words. Article # 11: Buddhism, 11834 words. Article # 12: American_Idol, 11605 words. Article # 13: Dog, 6557 words. Article # 14: 2008_Summer_Olympics_torch_relay, 9754 words. Article # 15: Genome, 1771 words. Article # 16: Comprehensive_school, 2247 words. Article # 17: Republic_of_the_Congo, 2879 words. Article # 18: Prime_minister, 2578 words. Article # 19: Institute_of_technology, 5388 words. Article # 20: Wayback_Machine, 1908 words. Article # 21: Dutch_Republic, 1565 words. Article # 22:[...]Sentences per article# Number of sentences per article from nltk.tokenize import sent_tokenize asentences = [] for a in art: psentences = [len(sent_tokenize(p['context'])) for p in a['paragraphs']] asentences.append(sum(psentences)) myvar = asentences varname = '# sentences' # Plot bargraph plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #') # Plot histogram plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles') # Total number of sentences in dataset import statistics Nsent_train = statistics.mean(myvar[0:Ntrain-1])* Ntrain Nsent_dev = statistics.mean(myvar[Ntrain:]) *Ndev print("Nsent_train={}, Nsent_dev={},Nsent_tot={}".format(Nsent_train,Nsent_dev,Nsent_train+Nsent_dev))Nsent_train=94291.32879818595, Nsent_dev=6392.0,Nsent_tot=100683.32879818595Questions per articlearts[0]['paragraphs'][0]['qas'][0]['answers'][0] aquestions = [] for a in art: pquestions = [len(p['qas']) for p in a['paragraphs']] aquestions.append(sum(pquestions)) myvar = aquestions varname = '# questions' # Plot bargraph plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #') # Plot histogram plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')Answers per article# import mpld3 # mpld3.enable_notebook() aanswers = [] for a in art: qanswers = [len(q['answers']) for p in a['paragraphs'] for q in p['qas']] aanswers.append(sum(qanswers)) myvar = aanswers varname = '# answers' # Plot bargraph plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #') # Plot histogram plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')Words per answer# import mpld3 # mpld3.enable_notebook() art[0]['paragraphs'][0]['qas'][0]['answers'][0] art[0]['paragraphs'][0]['qas'][0]['is_impossible'] from utils_NLP import extract_no_stopwords a_wordsperanswer = [] for a in art: awords = [len(extract_no_stopwords(ans['text'].strip().split())) for p in a['paragraphs'] for q in p['qas'] for ans in q['answers'] if not q['is_impossible']] if len(awords) > 0: a_wordsperanswer.append(sum(awords)/len(awords)) else: a_wordsperanswer.append(0) myvar = a_wordsperanswer varname = '# words per answer' # Plot bargraph plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #') # Plot histogram plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')Investigate articles with zero answers From figure above, looks like there are some articles near the end that have zero associated answers. See what's going on there# a = art[Ntrain-4] # pquestions = [q['question'] for p in a['paragraphs'] for q in p['qas']] # qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']] # qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] # qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']] # print(qisimpossible) # a = art[Ntrain-3] # pquestions = [q['question'] for p in a['paragraphs'] for q in p['qas']] # qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']] # qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] # qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']] # print(qisimpossible) # a = art[Ntrain-2] # qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] # print(qisimpossible) # a = art[Ntrain-1] # qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] # print(qisimpossible) # a = art[Ntrain-0] # qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] # print(qisimpossible) arts[0]['paragraphs'][0]['qas'][0]['is_impossible']Fraction of questions that are impossible# [q['is_impossible'] for a in art for p in a['paragraphs']] # arts[0]['paragraphs'][0]['qas'][0]['is_impossible'] aquestions = [] aisimpossible = [] aimpquestionsratio = [] for a in art: pquestions = [len(p['qas']) for p in a['paragraphs']] pisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']] aquestions.append(sum(pquestions)) aisimpossible.append(sum(pisimpossible)) aimpquestionsratio.append(sum(pisimpossible)/sum(pquestions)*100) figure(num=None, figsize=(15, 4),facecolor='w', edgecolor='k') barlist = plt.bar(range(len(aimpquestionsratio)), aimpquestionsratio, align='center', alpha=0.5) plt.xlabel('Article #') plt.ylabel('# Questions') for i in range(Ntrain,Ntrain+Ndev): barlist[i].set_color('r') plt.show() # figure(num=None, figsize=(15, 4), dpi=80, facecolor='w', edgecolor='k') f, (ax1, ax2) = plt.subplots(1, 2, sharey=False,figsize=(15, 4)); ax1.hist(aimpquestionsratio[0:Ntrain-1], bins=30); # arguments are passed to np.histogram ax1.set_title("Train data: Narticles=" + str(Ntrain)); ax1.set_ylabel('N Articles'); ax1.set_xlabel('# Words'); ax2.hist(aimpquestionsratio[Ntrain:], bins=30); # arguments are passed to np.histogram ax2.set_title("Dev data: Narticles=" + str(Ndev)); ax2.set_xlabel('# Words');Seems like a few articles near the end contain 100% unanswerable questions. Also, a large number of articles contain 100% answerable questions. The rest are a mix of about 50/50 Answers verbatim in text Run the analysis Test that blanks are properly indip = art2[0]['paragraphs'][0] c = p['context'] cs = c.split() bc = p['blank_classification'] for i in range(len(bc)): if bc[i]: print('Blank at word #' + str(i) + ' ' + cs[i]) print( p['context']) print( p['context_blanked']) print(context_split) temp = 'ASDFASfdsf' temp.lower() a = ['lets','walk','the','dog'] b = ['dog'] b in a a = 'lets walk the dog' b = 'dog' b in a # Gather all questions together answers = [] all_questions = [qa['question'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']] all_answers = [a['text'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']] answer_is_verbatim_in_context = [a['text'] in p['context'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']] answers_per_article[for a in art for p in a['paragraphs'] for ] print('Num answers: ' + str(len(all_answers))) print('Num true bools: ' + str(sum(answer_is_verbatim_in_context))) # Find all answers less than 3 words in duration. These are ideal candidates for fill in the blank questions a=all_answers[0] Nmax=3 answer_is_short = [len(a.split()) <= Nmax for a in all_answers] # Count number of trues print('Num answers: ' + str(len(all_answers))) print('Num true bools: ' + str(sum(answer_is_short))) print('Percent: ' + str(sum(answer_is_short)/len(all_answers)*100)) # Display all all answers and their validity for q,a, context_bool in zip(all_questions,all_answers,answer_is_short): print(f"{a}\t\t{context_bool}") # Display all question-answer pairs and their validity for q,a, context_bool in zip(all_questions,all_answers,answer_is_short): print(f"{q}\t{a}\t{context_bool}") unique_answers = set(all_answers) print('All answers: ' + str(len(all_answers))) print('Unique answers: ' + str(len(unique_answers))) unique_answers text = [p['context'] for a in art for p in a['paragraphs']] text = ' '.join(text[:]) textAnalyze poorly performing NER# This article on music, NER performs poorly; fails to capture key concepts art = arts[105:107] # A few short articles # Might need to use word2vec or other embedding as additional features # List all answers associatd with a specific question ind_art = Ntrain-0 Nq = len(art[ind_art]['paragraphs'][0]['qas']) for i in range(Nq): print(art[ind_art]['paragraphs'][0]['qas'][i]['answers']) i=0 art[ind_art]['paragraphs'][0]['qas'][i]['answers'][0]ONNX# clone YOLOv5 and reset to a specific git checkpoint that has been verified working !git clone https://github.com/ultralytics/yolov5 # clone repo %cd yolov5 !git reset --hard 68211f72c99915a15855f7b99bf5d93f5631330f !cp /content/drive/MyDrive/20210420YOLOV5ID/best.pt . %matplotlib inline # Some standard imports import io import numpy as np from torch import nn import torch.utils.model_zoo as model_zoo import torch.onnx import torch.nn as nn import torch.nn.init as init class SuperResolutionNet(nn.Module): def __init__(self, upscale_factor, inplace=False): super(SuperResolutionNet, self).__init__() self.relu = nn.ReLU(inplace=inplace) self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights() def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.relu(self.conv3(x)) x = self.pixel_shuffle(self.conv4(x)) return x def _initialize_weights(self): init.orthogonal_(self.conv1.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv2.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv3.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv4.weight) # Create the super-resolution model by using the above model definition. torch_model = SuperResolutionNet(upscale_factor=3) # Load pretrained model weights model_url = '/content/drive/MyDrive/20210420YOLOV5ID/best.pt' batch_size = 1 # just a random number # Initialize model with the pretrained weights map_location = lambda storage, loc: storage if torch.cuda.is_available(): map_location = None torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location)) # set the model to inference mode torch_model.eval() pip uninstall utils pip install onnx !python /content/yolov5/models/export.py !cp /content/yolov5/best.onnx /content/drive/MyDrive/20210420YOLOV5ID/Predefined Metrics in Symbolic Module Importing some of the predefined tensors. All the metrics are comprehensively listed in EinsteinPy documentation.import sympy from sympy import simplify from einsteinpy.symbolic import RicciScalar from einsteinpy.symbolic.predefined import Schwarzschild, DeSitter, AntiDeSitter, Minkowski, find sympy.init_printing() # for pretty printingPrinting the metrics for visualizationAll the functions return instances of :py:class:`~einsteinpy.symbolic.metric.MetricTensor`sch = Schwarzschild() sch.tensor() Minkowski(c=1).tensor() DeSitter().tensor() AntiDeSitter().tensor()Calculating the scalar (Ricci) curavturesThey should be constant for De-Sitter and Anti-De-Sitter spacetimes.scalar_curvature_de_sitter = RicciScalar.from_metric(DeSitter()) scalar_curvature_anti_de_sitter = RicciScalar.from_metric(AntiDeSitter()) scalar_curvature_de_sitter.expr scalar_curvature_anti_de_sitter.exprOn simplifying the expression we got above, we indeed obtain a constantsimplify(scalar_curvature_anti_de_sitter.expr)Searching for a predefined metricfind function returns a list of available functionsfind("sitter")M5 Bottom Salinity- from erddap gridded productimport datetime from erddapy import ERDDAP import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt import matplotlib.dates as mdates import cmocean #using xarray for bathymetry data read import xarray as xa #search and boundary parameters server_url = 'http://akutan.pmel.noaa.gov:8080/erddap/' e = ERDDAP(server=server_url) df = pd.read_csv(e.get_search_url(response='csv', search_for='Mooring bs5 gridded')) print(sorted(df['Dataset ID'].values)) from requests.exceptions import HTTPError dfs = {} for dataset_id in sorted(df['Dataset ID'].values): print(dataset_id) try: d = ERDDAP(server=server_url, protocol='griddap', response='nc', ) d.dataset_id = dataset_id dfs.update({dataset_id: d.to_xarray()}) except: print("error") fig, ax = plt.subplots(figsize=(12,3)) for dn,ds in dfs.items(): ax.pcolormesh(pd.to_datetime(ds.time),ds.depth,ds.salinity, vmin=31,vmax=32, cmap=cmocean.cm.haline) ax.invert_yaxis() #get values greater than 50 for dn,ds in dfs.items(): ds.where(ds.depth>50)[['temperature','salinity']].dropna(dim='depth',how='all').dropna(dim='time',how='all').to_dataframe().dropna(how='any').to_csv(f'{dn}.csv') #now some where empty from final gridded data... so pull up preliminary data e = ERDDAP(server=server_url) df = pd.read_csv(e.get_search_url(response='csv', search_for='Mooring bs5 preliminary')) print(sorted(df['Dataset ID'].values)) from requests.exceptions import HTTPError dfs = {} for dataset_id in sorted(df['Dataset ID'].values): print(dataset_id) try: d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id = dataset_id df_m = d.to_pandas( index_col='time (UTC)', parse_dates=True, skiprows=(1,) # units information can be dropped. ) df_m.sort_index(inplace=True) df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)] dfs.update({dataset_id:df_m}) except: print("error") for dn,ds in dfs.items(): ds[ds.depth > 50][['depth','salinity','temperature']].dropna().to_csv(f'{dn}.csv')Scrape by classfrom bs4 import BeautifulSoup import pandas as pd from requests import get from pathlib import Path # Define urls to scrape url_list = ['https://www.rijksoverheid.nl/ministeries/ministerie-van-buitenlandse-zaken/het-werk-van-bz-in-de-praktijk/weblogs', 'https://www.rijksoverheid.nl/ministeries/ministerie-van-buitenlandse-zaken/het-werk-van-bz-in-de-praktijk/weblogs?pagina=2' ] def get_information(url_list): weblog_urls = [] dates = [] for url in url_list: # Get response from GET request response = get(url) # Capture HTML from URL html_soup = BeautifulSoup(response.text, 'html.parser') # Capture all href's with weblog class for a in html_soup.find_all('a', {'class': 'weblog'}, href=True): weblog_urls.append(a['href']) # Capture the datetime values per weblog for m in html_soup.find_all('p', {'class': 'meta'}): for i in m.find_all('time'): if i.has_attr('datetime'): dates.append(i['datetime']) # Create dataframe weblog_df = pd.DataFrame({'url': weblog_urls, 'date': pd.to_datetime(dates) } ) # Return blogs from october and november return weblog_df[(weblog_df['date'] >= '2020-10-01') & (weblog_df['date'] <= '2020-11-30')] weblogs_df = get_information(url_list) weblogs_df # Join to use in regex filter '|'.join(weblogs_df['url'])1er modelo -> C=1e-6model_1 = LogisticRegression(C=1e-6, max_iter=1000) model_1.fit(train_img, train_lbl) model_1.coef_.shape model_1.intercept_.shape test_img[0], test_lbl[0] test_img[:10].shape score = model_1.score(test_img, test_lbl) score lbl_pred = model_1.predict(test_img) cm = confusion_matrix(test_lbl, lbl_pred) plt.figure(figsize=(8,8)) sns.heatmap(cm, annot=True, linewidths=.5, square = True, cmap = 'Greens_r'); plt.ylabel('Actual label', size = 15) plt.xlabel('Predicted label', size = 15) plt.title(f'Accuracy Score: {np.round(100*score,2)}%', size = 20) plt.show()2do modelo -> C=1e-*7*model_2 = LogisticRegression(C=1e-7, max_iter=1000) model_2.fit(train_img, train_lbl) model_2.n_iter_ score_2 = model_2.score(test_img, test_lbl) score_2 lbl_pred_2 = model_2.predict(test_img) cm_2 = confusion_matrix(test_lbl, lbl_pred) plt.figure(figsize=(8,8)) sns.heatmap(cm_2, annot=True, linewidths=.5, square = True, cmap = 'Purples_r'); plt.ylabel('Actual label', size = 15) plt.xlabel('Predicted label', size = 15) plt.title(f'Accuracy Score: {np.round(100*score_2,2)}%', size = 20) plt.show()3er modelo -> C=1e-4model_3 = LogisticRegression(C=1e-4, max_iter=30000) model_3.fit(train_img, train_lbl) model_3.n_iter_ score_3 = model_3.score(test_img, test_lbl) score_3 lbl_pred_3 = model_3.predict(test_img) cm_3 = confusion_matrix(test_lbl, lbl_pred) plt.figure(figsize=(8,8)) sns.heatmap(cm_3, annot=True, linewidths=.5, square = True, cmap = 'Spectral_r'); plt.ylabel('Actual label', size = 15) plt.xlabel('Predicted label', size = 15) plt.title(f'Accuracy Score: {np.round(100*score_3,2)}%', size = 20) plt.show()4er modelo -> C=1e-5model_4 = LogisticRegression(C=1e-5, max_iter=10000) model_4.fit(train_img, train_lbl) model_4.n_iter_ score_4 = model_4.score(test_img, test_lbl) score_4 lbl_pred_4 = model_4.predict(test_img) cm_4 = confusion_matrix(test_lbl, lbl_pred) plt.figure(figsize=(8,8)) sns.heatmap(cm_4, annot=True, linewidths=.5, square = True, cmap = 'icefire'); plt.ylabel('Actual label', size = 15) plt.xlabel('Predicted label', size = 15) plt.title(f'Accuracy Score: {np.round(100*score_4,2)}%', size = 20) plt.show()PIC-SURE API use-case: quick analysis on BioDataCatalyst data This is a tutorial notebook aimed to get the user quickly up and running with the R PIC-SURE API. It covers the main functionalities of the API. PIC-SURE R API What is PIC-SURE? -->As part of the BioData Catalyst initiative, the Patient Information Commons Standard Unification of Research Elements (PIC-SURE) platform has been integrating clinical and genomic datasets from multiple TOPMed and TOPMed related studies funded by the National Heart Lung and Blood Institute (NHLBI).Original data exposed through PIC-SURE API encompasses a large heterogeneity of data organization underneath. PIC-SURE hides this complexity and exposes the different study datasets in a single tabular format. By simplifying the process of data extraction, it allows investigators to focus on the downstream analyses and to facilitate reproducible sciences. More about PIC-SUREThe API is available in two different programming languages, python and R, enabling investigators to query the databases the same way using either language.PIC-SURE is a larger project from which the R/python PIC-SURE API is only a brick. Among other things, PIC-SURE also offers a graphical user interface that allows researchers to explore variables across multiple studies, filter patients that match criteria, and create cohorts from this interactive exploration.The R API is actively developed by the Avillach Lab at Harvard Medical School.PIC-SURE API GitHub repo:* https://github.com/hms-dbmi/pic-sure-r-adapter-hpds* https://github.com/hms-dbmi/pic-sure-r-client* https://github.com/hms-dbmi/pic-sure-biodatacatalyst-r-adapter-hpds ------- Getting your own user-specific security token **Before running this notebook, please be sure to review the "Get your security token" documentation, which exists in the NHLBI_BioData_Catalyst [README.md file](https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NHLBI_BioData_Catalystget-your-security-token). It explains about how to get a security token, which is mandatory to access the databases.** Environment set-up Pre-requisites- R 3.4 or later Install packages Install the following:- packages listed in the `requirements.R` file- PIC-SURE API components (from Github) - PIC-SURE Adapter - PIC-SURE Clientsource("R_lib/requirements.R") Sys.setenv(TAR = "/bin/tar") options(unzip = "internal") install.packages("https://cran.r-project.org/src/contrib/Archive/devtools/devtools_1.13.6.tar.gz", repos=NULL, type="source") install.packages("https://cran.r-project.org/src/contrib/R6_2.5.1.tar.gz", repos=NULL, type="source") install.packages("https://cran.r-project.org/src/contrib/hash_2.2.6.1.tar.gz", repos=NULL, type="source") install.packages(c("urltools"),repos = "http://cran.us.r-project.org") devtools::install_github("hms-dbmi/pic-sure-r-client", force=T) devtools::install_github("hms-dbmi/pic-sure-r-adapter-hpds", force=T) devtools::install_github("hms-dbmi/pic-sure-biodatacatalyst-r-adapter-hpds", force=T)Loading user-defined functionssource("R_lib/utils.R")Connecting to a PIC-SURE resource The following is required to get access to data through the PIC-SURE API: - Network URL- Resource id- User-specific security token If you have not already retrieved your user-specific token, please refer to the "Get your security token" section of the [README.md](https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NHLBI_BioData_Catalystget-your-security-token) file.# Set required information as variables PICSURE_network_URL <- "https://picsure.biodatacatalyst.nhlbi.nih.gov/picsure" resource_id <- "02e23f52-f354-4e8b-992c-d37c8b9ba140" token_file <- "token.txt" token <- scan(token_file, what = "character") # Establish connection to PIC-SURE connection <- picsure::connect(url = PICSURE_network_URL, token = token) # it may take several minutes to connect and download the initialization data resource <- bdc::get.resource(connection, resourceUUID = resource_id)Two objects are created here: a `connection` and a `resource` object.Since we will only be using a single resource, **the `resource` object is actually the only one we will need to proceed with data analysis hereafter**.It is connected to the specific data source ID we specified and enables us to query and retrieve data from this database. Getting help with the PIC-SURE API You can get help with PIC-SURE library functions by using the `?` operator?bdc::get.resource()Using the *variables dictionary* Once a connection to the desired resource has been established, we first need to understand which variables are available in the database. To this end, we will use the `find.in.dictionary` function. For instance, looking for variables containing the term `COPD` is done this way:dictionary_search <- bdc::find.in.dictionary(resource, "COPD")Four different functions can be used to retrieve results from a dictionary search: `extract.count()`, `extract.keys()`, `extract.entries()`, and `extract.dataframe()`.print(list("Count" = bdc::extract.count(dictionary_search), # How many dictionary entries contained "COPD"? "Keys" = bdc::extract.keys(dictionary_search)[1:3], # Show the first three unique dictionary keys that contain "COPD" "Entries" = bdc::extract.entries(dictionary_search)[1:3,])) # Show the first three entries that contain "COPD" # Save the entries from the "COPD" search to 'df_dictionary_copd' df_dictionary_copd <- bdc::extract.entries(dictionary_search)**`bdc::extract.dataframe()` retrieves the result of the dictionary search in a data.frame format. This way, it enables us to:*** Use the various information exposed in the dictionary (patientCount, variable type ...) as criteria for variable selection.* Use the row names of the DataFrame to get the actual variables names, to be used in the query, as shown below.Variable names, as currently implemented in the API, aren't very practical to use right away for two reasons:1. Very long2. Presence of backslashes that requires modification right after copy-pasting.However, using the dictionary to select variables can help access the variable names. Let's say we want to retrieve every variable from the COPDGene study. One way to proceed is to retrieve the whole dictionary for those variables in the form of a data.frame, as below:plain_variablesDict <- bdc::find.in.dictionary(resource, "COPDGene") %>% # Search for "COPDGene" bdc::extract.entries() # Retrieve unique entries from the search plain_variablesDict[10:20,] # Display entries 10 through 20The dictionary currently returned by the API provides information about the variables, such as:- observationCount: number of entries with non-null value- categorical: type of the variables, True if strings, False if numerical- min/max: only provided for numerical variables- HpdsDataType: 'phenotypes' or 'genotypes'. Currently, the API only exposes 'phenotypes' variables Extract full data dictionary to CSV Using the `bdc::find.in.dictionary` we can extact the entire data dictionary by performing an empty search and saving it to `fullVariablesDict`:fullVariablesDict <- bdc::find.in.dictionary(resource, "") %>% # Search for '', or get entire dictionary bdc::extract.entries() # Extract unique entries dim(fullVariablesDict) # Print the dimensions of fullVariablesDict (rows, columns)Check that the `fullVariablesDict` dataframe contains some values.fullVariablesDict[0:5,]We can than write the data frame that contains the full data dictionary to a CSV file.dataDictFile <- "data_dictionary.csv" # Name of output file saveDictFrame <- fullVariablesDict[ , c("name", "patientCount", "min", "categorical", "observationCount", "max", "HpdsDataType", "description")] write.csv(saveDictFrame, dataDictFile, row.names = FALSE)You should now see a data_dictionary.csv in the Jupyter Hub file explorer. Parsing variable names We can use a simple function, `get_multiIndex_variablesDict`, defined in `R_lib/utils.R` to add a little more information to the variable dictionary and to simplify working with variables names.Although not an official feature of the API, such functionality illustrates how to quickly select groups of related variables.Printing part of the parsed names dictionary allows us to quickly see the tree-like organization of the variable names. Moreover, original and simplified variable names are now stored respectively in the `varName` and `simplified_varName` columns (simplified variable names is simply the last component of the variable name, that is usually the most informative to know what each variable is about).# Display the variables tree hierarchy from the variables name variablesDict <- get_multiIndex_variablesDict(plain_variablesDict) head(variablesDict) # Show first few rows of variablesDictBelow is a simple example to illustrate the simplicity of using a multiIndex dictionary. Let's say we are interested in every variable pertaining to the terms "asthma" and "smoking".asthma <- str_detect(variablesDict$level_2, 'asthma') # Does the level_2 variable name contain "asthma"? mask_asthma <- variablesDict$level_2[!is.na(asthma) & asthma] # All level_2 variable names not NA and containing "asthma" smoking <- str_detect(variablesDict$level_2, 'smoking') # Does the level_2 variable name contain "smoking"? mask_smoking <- variablesDict$level_2[!is.na(smoking) & smoking] # All level_2 variable names not NA and containing "smoking" # Subsetting variablesDict to only level_2 variable names not NA and containing "asthma" or "smoking" asthma_and_smoking_variables <- variablesDict[!is.na(variablesDict$level_2) & variablesDict$level_2 %in% c(mask_asthma, mask_smoking), ] # View the new subsetted dataframe asthma_and_smoking_variablesAlthough pretty simple, it can be easily combined with other filters to quickly select one or more desired groups of variables. Querying and retrieving data The second cornerstone of the API are the `query` functions (`bdc::query.anyof`, `bdc::query.select`, `bdc::query.filter`, `bdc::query.require`). They are the entering point to retrieve data from the resource. First, we need to create a query object.my_query <- bdc::new.query(resource = resource)The query object has several methods that enable to build a query. | Method | Arguments / Input | Output||--------|-------------------|-------|| bdc::query.select.add() | variable names (string) or list of strings | all variables included in the list (no record subsetting)|| bdc::query.require.add() | variable names (string) or list of strings | all variables; only records that do not contain null values for input variables || bdc::query.anyof.add() | variable names (string) or list of strings | all variables; only records that contain at least one non-null value for input variables || bdc::query.filter.add() | variable name and additional filtering values | input variable; only records that match filter criteria | All 4 methods can be combined when building a query. The record eventually returned by the query have to meet all the different specified filters. Building the query *In the following example, we are going to answer the following research question:**What is the age distribution of patients that stopped smoking between 20 and 70 years in the COPDGene study?**To answer this, we will first build a query to return data associated with patients in the COPDGene study who completely stopped smoking between the ages of 20 and 70 years. For these entries, we will pull the age that they stopped smoking along with any other categorical variables which have more than 4000 entries.* First, we create a mask `yo_stop_smoking_varnme` to isolate the variable pertaining to the following text:`How old were you when you completely stopped smoking? [Years old]`# Peek at the filtered dataframe fullVariablesDict[str_detect(fullVariablesDict$name, "How old were you when you completely stopped smoking"), ] # Create 'mask' where simplified_name is variable of interest mask <- variablesDict["simplified_name"] == "How old were you when you completely stopped smoking? [Years old]" # Apply mask to variablesDict and retrieve "name" info yo_stop_smoking_varname <- variablesDict[mask, "name"] %>% unlist() %>% unname() yo_stop_smoking_varname <- as.character(yo_stop_smoking_varname) mask_cat <- plain_variablesDict["categorical"] == TRUE # Get all categorical variables mask_count <- plain_variablesDict["observationCount"] > 4000 # Get all variables with 4000+ entries selected_vars <- plain_variablesDict[mask_cat & mask_count, "name"] %>% as.list() selected_vars <- lapply(selected_vars, as.character) bdc::query.filter.add(query = my_query, keys = yo_stop_smoking_varname, min=20, max=70) bdc::query.select.add(query = my_query, keys = selected_vars[1:50])Selecting consent groups PIC-SURE will limit results based on which study and/or patient consent groups for which the researcher has been individually authorized to use. However, sometimes you might need to limit your results further to only contain a subset of the groups.To view the available consent groups, you can use the `query.show()` function. Look for the list of values under `query > categoryFilters > \\_consents\\`.bdc::query.show(bdc::new.query(resource = resource))In order to update the values, the existing list needs to be cleared first, then replaced. (phs000179.c2 is one consent code used in the COPDGene study.) It is safe to ignore the warning about "the condition has length > 1 ..." because we use a single vector as an argument.# Delete current consents bdc::query.filter.delete(query = my_query, keys = "\\_consents\\") bdc::query.filter.add(query = my_query, keys = "\\_consents\\", as.list(c("phs000179.c2")))*Note that trying to manually add a consent group which you are not authorized to access will result in errors downstream.* Retrieving the data Once our query object is finally built, we use the `query.run` function to retrieve the data corresponding to our query.my_df <- bdc::query.run(my_query, result.type = "dataframe") dim(my_df) # Dimensions of the new dataframe head(my_df) # Show first few rowsFrom this point, we can proceed with the data management and analysis using any other R function or libraries. Remember our original question: what is the distribution of the age that patients stopped smoking between 20 and 70 years old in the COPDGene study?To investigate this, we can narrow the new dataframe to the column saved before in `yo_stop_smoking_varname`.parsed_data <- my_df[yo_stop_smoking_varname] # Select only data from column saved before names(parsed_data)[1] <- 'age_stopped_smoking' # Rename long column to age_stopped_smokingNow we can visualize our results with `ggplot` or other plotting tools in R.ggplot(data = parsed_data) + geom_histogram(mapping = aes(x=age_stopped_smoking), bins=15) + labs(x = "Age stopped smoking, years old", y = "Count") + theme_bw()Retrieving data from query run through PIC-SURE UIIt is possible for you to retrieve the results of a query that you have previously run using the PIC-SURE UI. To do this you must "select data for export", then select the information that you want the query to return and then click "prepare data export". Once the query is finished executing, a group of buttons will be presented. Click the "copy query ID to clipboard" button to copy your unique query identifier so you can paste it into your notebook.Paste your query's ID into your notebook and assign it to a variable. You then use the `bdc::query.getResults(yourResource, yourQueryUUID)` function using an initialized resource object to retrieve the data from your query as shown below. The screenshot below shows the button of interest in the PIC-SURE UI. It shows that the previously run query has a Query ID of `bf3ddba5-de5f-460b-bcbc-ff56410d3075`. At this point a copy-paste process is used to provide the Query ID to the API, as shown in the example code below. To run this code you must replace the example query ID with a query ID from a query that you have run in the PIC-SURE API. Note that query IDs are not permanent and will expire.PICSURE_network_URL <- "https://picsure.biodatacatalyst.nhlbi.nih.gov/picsure" resource_id <- "02e23f52-f354-4e8b-992c-d37c8b9ba140" token_file <- "token.txt" token <- scan(token_file, what = "character") connection <- picsure::connect(url = PICSURE_network_URL, token = token) resource <- bdc::get.resource(connection, resourceUUID = resource_id) # To run this using your notebook you must replace it with the ID value of a query that you have run. DataSetID <- '02e23f52-f354-4e8b-992c-d37c8b9ba140' my_csv_str <- bdc::query.getResults(resource, DataSetID) my_df <- read.table(textConnection(my_csv_str), sep = ",") dim(my_df) head(my_df)Investigate development of MC errorfrom hypnettorch.utils import misc import matplotlib.pyplot as plt import numpy as np import os from scipy.spatial.distance import cdist import sys from time import time import torch # Pretend that notebook is located in base directory of this repo. curr_dir = os.path.basename(os.path.abspath(os.curdir)) base_dir = os.path.abspath('../..') if curr_dir == 'nngp' and base_dir != sys.path[0]: sys.path.insert(0, base_dir) from data.gmm_utils import get_circle_gmm_instance from nngp import MLPKernel %matplotlib inline %load_ext autoreload %autoreload 2 device = 'cpu' def compute_kernels(data, config_dict): # Compute kernel matrices. for k in config_dict: start = time() if k['name'] == 'rbf': rbf_kernel = RBF() K = rbf_kernel(data.detach().cpu().numpy()) else: mlp_kernel = MLPKernel(sigma2_w=1., sigma2_b=1., **k['params']) if k['name'].startswith('analytic'): K = mlp_kernel.kernel_analytic(data, **k['kernel_params']).numpy() elif k['name'].startswith('efficient'): K = mlp_kernel.kernel_efficient(data, **k['kernel_params']).numpy() else: K = mlp_kernel.kernel_mc(data, **k['kernel_params']).numpy() k['kernel'] = K print('Kernel "%s" computation took %f seconds.' % (k['name'], time()-start)) def kernel_estimation_error(title, kernel_dicts, show_plots=True): # Note, `mc_size == -1` means the efficient kernel was used. K_dict = {} for d in kernel_dicts: if d['name'].startswith('analytic'): assert 0 not in K_dict.keys() K_dict[0] = d['kernel'] nan_mask = np.isnan(d['kernel']) if np.any(nan_mask): print('%s has %d nan entries!' % (d['name'], nan_mask.sum())) elif d['name'].startswith('efficient'): assert -1 not in K_dict.keys() K_dict[-1] = d['kernel'] else: assert 'num_samples' in d['kernel_params'] num_mc = d['kernel_params']['num_samples'] assert num_mc not in K_dict.keys() K_dict[num_mc] = d['kernel'] err_dict = {} for k, v in K_dict.items(): if k == 0: continue # Compute relative error err_dict[k] = np.abs(K_dict[k] - K_dict[0]) / K_dict[0] mc_sizes = list(err_dict.keys()) mc_sizes.sort() err_means = [err_dict[n][~nan_mask].mean() for n in mc_sizes] err_stds = [err_dict[n][~nan_mask].std() for n in mc_sizes] if show_plots: plt.title(title) plt.errorbar(mc_sizes, err_means, yerr=err_stds, fmt='.k') plt.xscale('log') plt.show() for n in mc_sizes: plt.title(title + ' MC-%d' % n) plt.hist(K_dict[n].flatten()) plt.show() return (mc_sizes, err_means, err_stds) if False: means = [np.array([-2, -2]), np.array([2, 2])] covs = [1 * np.eye(len(mean)) for mean in means] modes = get_gmm_tasks(means=means, covs=covs, num_train=10, num_test=100) data = GMMData(modes, classification=True, use_one_hot=False) else: data = get_circle_gmm_instance(sigmas=[.5]*2, num_train=20, num_test=100, use_one_hot=False, radius=4, offset=-np.pi/4, rseed=1) X = data.get_train_inputs() Y = data.get_train_outputs().squeeze() colors = misc.get_colorbrewer2_colors(family='Dark2') plt.title('GMM') plt.scatter(X[Y==0,0], X[Y==0,1], c=colors[4]) plt.scatter(X[Y==1,0], X[Y==1,1], c=colors[5]) plt.ylim(-10, 10) plt.xlim(-10, 10) plt.show() # We use symmetrical values -1 / 1 for classification Y[Y==0] = -1 X_torch = data.input_to_torch_tensor(X, device=device) kernels = [ ### Relu ### {'name': 'analytic_relu_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {}}, {'name': 'relu_1l_mc100', 'params': {'n_layer': 1, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100}}, {'name': 'relu_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 1000}}, {'name': 'relu_1l_mc10k', 'params': {'n_layer': 1, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 10000}}, {'name': 'relu_1l_mc100k', 'params': {'n_layer': 1, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_relu_2l', 'params': {'n_layer': 2, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {}}, {'name': 'relu_2l_mc100', 'params': {'n_layer': 2, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100}}, {'name': 'relu_2l', 'params': {'n_layer': 2, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 1000}}, {'name': 'relu_2l_mc10k', 'params': {'n_layer': 2, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 10000}}, {'name': 'relu_2l_mc100k', 'params': {'n_layer': 2, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_relu_4l', 'params': {'n_layer': 4, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {}}, {'name': 'relu_4l_mc100', 'params': {'n_layer': 4, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100}}, {'name': 'relu_4l', 'params': {'n_layer': 4, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 1000}}, {'name': 'relu_4l_mc10k', 'params': {'n_layer': 4, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 10000}}, {'name': 'relu_4l_mc100k', 'params': {'n_layer': 4, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_relu_8l', 'params': {'n_layer': 8, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {}}, {'name': 'relu_8l_mc100', 'params': {'n_layer': 8, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100}}, {'name': 'relu_8l', 'params': {'n_layer': 8, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 1000}}, {'name': 'relu_8l_mc10k', 'params': {'n_layer': 8, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 10000}}, {'name': 'relu_8l_mc100k', 'params': {'n_layer': 8, 'nonlinearity': torch.nn.ReLU()}, 'kernel_params': {'num_samples': 100000}}, ### Error Function ### {'name': 'analytic_erf_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.erf}, 'kernel_params': {}}, {'name': 'erf_1l_mc100', 'params': {'n_layer': 1, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100}}, {'name': 'erf_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 1000}}, {'name': 'erf_1l_mc10k', 'params': {'n_layer': 1, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 10000}}, {'name': 'erf_1l_mc100k', 'params': {'n_layer': 1, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_erf_2l', 'params': {'n_layer': 2, 'nonlinearity': torch.erf}, 'kernel_params': {}}, {'name': 'erf_2l_mc100', 'params': {'n_layer': 2, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100}}, {'name': 'erf_2l', 'params': {'n_layer': 2, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 1000}}, {'name': 'erf_2l_mc10k', 'params': {'n_layer': 2, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 10000}}, {'name': 'erf_2l_mc100k', 'params': {'n_layer': 2, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_erf_4l', 'params': {'n_layer': 4, 'nonlinearity': torch.erf}, 'kernel_params': {}}, {'name': 'erf_4l_mc100', 'params': {'n_layer': 4, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100}}, {'name': 'erf_4l', 'params': {'n_layer': 4, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 1000}}, {'name': 'erf_4l_mc10k', 'params': {'n_layer': 4, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 10000}}, {'name': 'erf_4l_mc100k', 'params': {'n_layer': 4, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100000}}, {'name': 'analytic_erf_8l', 'params': {'n_layer': 8, 'nonlinearity': torch.erf}, 'kernel_params': {}}, {'name': 'erf_8l_mc100', 'params': {'n_layer': 8, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100}}, {'name': 'erf_8l', 'params': {'n_layer': 8, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 1000}}, {'name': 'erf_8l_mc10k', 'params': {'n_layer': 8, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 10000}}, {'name': 'erf_8l_mc100k', 'params': {'n_layer': 8, 'nonlinearity': torch.erf}, 'kernel_params': {'num_samples': 100000}}, ### Cosine ### {'name': 'analytic_cos_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.cos}, 'kernel_params': {}}, {'name': 'cos_1l_mc100', 'params': {'n_layer': 1, 'nonlinearity': torch.cos}, 'kernel_params': {'num_samples': 100}}, {'name': 'cos_1l', 'params': {'n_layer': 1, 'nonlinearity': torch.cos}, 'kernel_params': {'num_samples': 1000}}, {'name': 'cos_1l_mc10k', 'params': {'n_layer': 1, 'nonlinearity': torch.cos}, 'kernel_params': {'num_samples': 10000}}, {'name': 'cos_1l_mc100k', 'params': {'n_layer': 1, 'nonlinearity': torch.cos}, 'kernel_params': {'num_samples': 100000}}, ] compute_kernels(X_torch, kernels) from matplotlib.font_manager import FontProperties from matplotlib.ticker import FuncFormatter, MaxNLocator arch_names = ['relu_1l', 'relu_2l', 'relu_4l', 'relu_8l', 'erf_1l', 'erf_2l', 'erf_4l', 'erf_8l'] arch_names = ['relu_2l', 'relu_8l', 'erf_2l', 'erf_8l'] arch_labels = ['2-layer ReLU', '8-layer ReLU', '2-layer erf', '8-layer erf'] #arch_names = ['cos_1l'] #arch_labels = ['1-layer Cosine'] errs = {} errs_lbls = {} for i, name in enumerate(arch_names): curr_kernels = [k for k in kernels if name in k['name']] curr_errs = kernel_estimation_error(name, curr_kernels, show_plots=False) errs[name] = curr_errs errs_lbls[name] = arch_labels[i] colors = misc.get_colorbrewer2_colors(family='Dark2') small_font = FontProperties() small_font.set_size(16) ts, lw, ms = 15, 8, 140 # text fontsize, line width, marker size figsize = (12, 6) fig, axes = plt.subplots(figsize=figsize) #plt.title('MC estimation error') for i, aname in enumerate(errs.keys()): sizes = [s + 2*(i-1.5)*s/10 for s in errs[aname][0]] plt.scatter(sizes, errs[aname][1], c=colors[i], s=lw*6, label=errs_lbls[aname]) plt.errorbar(sizes, errs[aname][1], yerr=errs[aname][2], c=colors[i], fmt='.') plt.xscale('log') plt.legend(prop=small_font, loc='upper center', bbox_to_anchor=(0.47, -0.2), ncol=5) plt.ylabel('relative MC error', fontsize=ts) plt.xlabel('No. of MC samples', fontsize=ts) plt.axhline(y=0, color='k', linestyle=(0, (5, 10))) axes.grid(False) axes.set_facecolor('w') axes.axhline(y=axes.get_ylim()[0], color='k', lw=lw) axes.axvline(x=axes.get_xlim()[0], color='k', lw=lw) #plt.xticks([0, 1, 2, 3], fontsize=ts) plt.xticks(fontsize=ts) plt.yticks([0, .05, .1, .15], fontsize=ts) axes.tick_params(axis='both', length=lw, direction='out', width=lw/2.) filename = 'mc_error' if filename is not None: fpath = filename plt.savefig(fpath + '.pdf', bbox_inches='tight') plt.savefig(fpath + '.png', bbox_inches='tight') plt.show() ######################### plt.title('MC estimation error') for i, aname in enumerate(errs.keys()): sizes = [s + i*s/10 for s in errs[aname][0]] plt.scatter(sizes, errs[aname][1], label=aname) plt.xscale('log') plt.xlabel('num samples') plt.ylabel('error') plt.legend() plt.show()Scalingfrom sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_data = scaler.fit_transform(data1) print(scaled_data)[[ 1.29153238 -1.13827979 0.27908825 ... -1.61909203 1.90288227 -0.67917961] [-0.5389489 -0.47965843 -0.09701618 ... 0.64786643 -0.85997281 -0.48562324] [-0.27283273 -0.09912164 -0.96607302 ... 0.67042323 -0.0384044 -0.46537561] ... [-0.37231541 1.13030491 0.0088773 ... 0.28695762 -0.66120626 -0.63775406] [ 0.44841668 -0.40647827 -0.59727159 ... -0.34463279 1.14094382 -0.63775406] [ 1.11495062 -0.15034774 -0.33801514 ... -2.09278484 1.6246091 -0.62954556]]K Means Clusteringfrom sklearn.cluster import KMeans cost_values=[] for i in range(1,15): kmeans = KMeans(n_clusters=i) kmeans.fit(scaled_data) cost_values.append(kmeans.inertia_) plt.plot(cost_values) #inertia is calculated as the sum of squared distance for each point to it's closest centroid, i.e., its assigned cluster. kmeans = KMeans(n_clusters = 3) clusters = kmeans.fit_predict(scaled_data) # countries are divided into 3 categories namely underdeveloped represented by 0, developing represented by 1 and developed represented by 2 print(clusters) data['clusters'] = clusters data data.describe()Test if Europeana metadata* [This Jupyter Notebook](https://github.com/salgo60/open-data-examples/blob/master/Europeana%20SPARQL%20->%20Pandas.ipynb)* [EDMObjectTemplatesProviders](https://github.com/europeana/corelib/wiki/EDMObjectTemplatesProviders)import json,sys import pandas as pd import sys from SPARQLWrapper import SPARQLWrapper, JSON def get_sparql_dataframe(endpoint_url, query): """ Helper function to convert SPARQL results into a Pandas data frame. """ user_agent = "salgo60/%s.%s" % (sys.version_info[0], sys.version_info[1]) sparql = SPARQLWrapper(endpoint_url, agent=user_agent) sparql.setQuery(query) sparql.setReturnFormat(JSON) result = sparql.query() print("nu",result) processed_results = json.load(result.response) cols = processed_results['head']['vars'] out = [] for row in processed_results['results']['bindings']: item = [] for c in cols: item.append(row.get(c, {}).get('value')) out.append(item) return pd.DataFrame(out, columns=cols) endpoint_url = "http://data.europeana.eu/" queryEuropeana = """PREFIX dc: PREFIX dc: PREFIX edm: SELECT ?o ?title ?creator ?sameas ?aggregatedCHO WHERE { ?o dc:title ?title . OPTIONAL {?o dc:creator ?creator} OPTIONAL {?o owl:sameAs ?sameas} OPTIONAL {?o edm:aggregatedCHO ?aggregatedCHO} OPTIONAL {?o edm:isRelatedTo ?isRelatedTo} } limit 10 """ #Take down all in a pandas dataset results = get_sparql_dataframe(endpoint_url, queryEuropeana) endpoint_url = "http://data.europeana.eu/" queryEuropeana = """PREFIX dc: PREFIX dc: PREFIX edm: SELECT ?o ?title ?creator ?sameas ?aggregatedCHO WHERE { ?o dc:title ?title . OPTIONAL {?o dc:creator ?creator} OPTIONAL {?o owl:sameAs ?sameas} OPTIONAL {?o edm:aggregatedCHO ?aggregatedCHO} OPTIONAL {?o edm:isRelatedTo ?isRelatedTo} } limit 10 """ #Take down all in a pandas dataset results = get_sparql_dataframe(endpoint_url, queryEuropeana)JupyterDash Demo - JupyterCon 2020 **Follow me:**- LinkedIn: https://www.linkedin.com/in/beatrizmaiads/- GitHub: https://github.com/beatrizmaiads Introducing JupyterDash JupyterDash is the new **plotly library** that facilitates the creation of Dash applications from Jupyter environments (for example, Classic Notebook, JupyterLab, Visual Studio Code notebooks, PyCharm notebooks, etc.).**References:**- Python Implementation: https://dash.plotly.com/- R Implementation: https://dashr.plotly.com/ **Dash Installation**To get started right away, install the ```jupyter-dash``` package using pip:```$ pip install jupyter-dash``` Import Librariesimport pandas as pd import dash import dash_core_components as dcc import dash_html_components as html import plotly.express as px from jupyter_dash import JupyterDash from dash.dependencies import Input, OutputLoad Data - CO2 Emissiondf = pd.read_csv('FuelConsumptionCo2.csv') df.head() # dimensionality of the DataFrame print(f'Number of Lines: {df.shape[0]}') print(f'Number of Columns: {df.shape[1]}')Number of Lines: 1067 Number of Columns: 13Construct the app and callbacks# Build App app = JupyterDash(__name__) app.layout = html.Div([ html.H1('JupyterDash Demo'), dcc.Graph(id='graph'), html.Label([ 'colorscale', dcc.Dropdown( id='colorscale-dropdown', clearable=False, value='plasma', options=[ {'label': c, 'value': c} for c in px.colors.named_colorscales() ]) ]), ]) # Define callback to update graph @app.callback( Output('graph', 'figure'), [Input('colorscale-dropdown', 'value')] ) def update_figure(colorscale): return px.scatter( df, x='ENGINESIZE', y='CO2EMISSIONS', color='CYLINDERS', color_continuous_scale=colorscale, render_mode="webgl", title='Scatter Plot - CO2 Emission (ENGINESIZE x CO2EMISSIONS)' ) # Run app and display result inline in the notebook app.run_server(mode='inline')Numpy (Часть 2) > 🚀 В этой практике нам понадобятся: `numpy==1.21.2` > 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2` Содержание * [Булевы операции над массивами](Bulevy_operatsii_nad_massivami) * [Задание](Zadanie)* [Больше индексации](Bol_she_indeksatsii)* [Операции с массивами](Operatsii_s_massivami)* [Операции по осям](Operatsii_po_osjam) * [Задание](Zadanie_1)# Как всегда, в начале ноутбука делаем все необходимые импорты import numpy as npБулевы операции над массивами Массивы numpy поддерживают операции сравнения, которые генерируют так называемые маски.arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print(arr) # Произведём сравнение с числом result = arr > 6 print(result) print(result.dtype)Как видно, в результате операции создается такой же массив, но с булевым типом. Так мы получаем маску элементов, которые соответсвуют условию булевого выражения.Как и с простым булевым типом, маски можно совмещать булевыми операциями:result_1 = arr > 6 result_2 = arr < 10 print(result_1) print(result_2) print('----------') # Следующие две операции идентичны, но для второй скобки обязательны! print(result_1 & result_2) print((arr > 6) & (arr < 10))При работе с масками полезно помнить про функции `np.all()` и `np.any()`, которые имеют следующие описания:- `all` - проверка на то, что все элементы в массиве имеют значение `True` (аналог операции И);- `any` - проверка, что хотя бы один элемент в массиве имеет значение `True` (аналог операции ИЛИ). ЗаданиеОпределите, имеется ли хотя бы одно значение больше пяти и меньше восьми в массиве:arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print(arr) # TODO - определить, имеется ли в массиве хотя бы один элемент, # отвечающий условию больше пяти и меньше восьмиБольше индексации Помимо индексации с помощью диапазонов и списков индексов numpy поддерживает индексацию с помощью булевых массивов.arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) # Получим маску и воспользуемся ею для индексации mask = arr > 6 print(mask) print(arr[mask])Обратите внимание, такая индексация по всему массиву приводит к созданию 1D массива из-за того, что элементы имеют неравномерное расположение, так что они представляются в выпрямленном виде (flattened).> Выпрямленное представление - приведение массива любой размерности к 1D представлению. Для этого есть метод `ndarray.flatten()`. Происходит это путём разворачивания массива в одномерный, проходом по индексам, начиная с последней размерности: в 2D случае мы берем элемент 1-го рядя, 1-й колонки, затем 1-го ряда, 2-й колонки, как закончим со всем рядом, то переходим на следующий и снова по колонкам. Для 3D массивов - сначала полностью разворачивается глубина, затем колонки, затем ряды.arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) print(arr) print(arr.flatten())Операции с массивами Мало смысла в создании массивов без возможности сделать с ними что-либо.x = np.array([[1,2],[3,4]], dtype=np.float64) y = np.array([[5,6],[7,8]], dtype=np.float64) # Сложение print(x + y) print(np.add(x, y)) # Вычитание print(x - y) print(np.subtract(x, y)) # Поэлементное умножение print(x * y) print(np.multiply(x, y)) # Поэлементное деление print(x / y) print(np.divide(x, y)) # Вычисление корня каждого элемента print(np.sqrt(x))Отлично, мы рассмотрели основные операции над массивами. Операции поэлементные, результаты такие же по размеру, как и операнды. Помимо рассмотренных существуют и множество других, например, экспонента, логарифм и т.д.Помимо поэлементных операций, которые не составляют сложностей в представлении, существует операция матричного умножения. В данном случае мы имеем варианты перемножения матриц, векторов и скаляров.# Умножение матрицы на скаляр - релизуется как поэлементное перемножение k = 1.5 print(k*x) v = np.array([1, 2]) # Матричное умножение реализуется через оператор @ или функцию np.dot() # или метод ndarray.dot() # Для умножения матрица-вектор print(x @ v) print(x.dot(v)) print(np.dot(x, v)) # Двух векторов w = np.array([3, 4]) print(w @ v) print(w.dot(v)) print(np.dot(w, v)) # Так и для перемножения матриц print(x @ y) print(x.dot(y)) print(np.dot(x, y))Правила умножения матриц и векторов здесь идентичны математическим, то есть соседние размерности должны соотноситься: $(m, n)*(n, k)=(m, k)$.Прекрасно! Раз мы разобрались с тем, как делать базовые операции, то осталось лишь последняя базовая операция - транспонирование.# Для транспонирования можно воспользоваться атрибутом ndarray.T # или функцией np.transpose() x = np.array([[1, 2, 3], [4, 5, 6]]) print(x) print(x.shape) print('--------') print(np.transpose(x)) print(x.T) print(x.T.shape)Операции по осям Если ранее операции позволяли модифицировать массив и получать новые массивы с помощью базовых операций, то сейчас мы рассмотрим более "аггрегуирующие" операции, например, получение среднего значения, суммы и т.д.При разборе обратим внимание на один важный аргумент в операциях `axis`.x = np.array([[1, 2, 3], [5, 4, 3]]) print(x) # Возьмем среднее значение всего массива print(np.mean(x)) print(np.mean(x, axis=None)) # Получим сумму значений всего массива print(np.sum(x)) print(np.sum(x, axis=None))Отлично, работает как надо, но зачем этот аргумент `axis`? Все очень просто, он управляет тем, по какой оси (размерности) делается операция. Нужно это, чтобы получить, например, среднее по каждому столбцу или строке.# Получим среднее по каждому столбцу print(np.mean(x, axis=0)) # Получим среднее по каждой строке print(np.mean(x, axis=1))Так как же это воспринимать? Помните индексацию слайсами (диапазонами)? Чтобы получить весь второй столбец, мы пишем $[:, 2]$, что означает "все строки стобца под индексом 2". Так и тут, если мы хотим, чтобы операция выполнилась по всем строкам (например, для результата по столбцам), то пишем `axis=0`. Если хотим, чтобы операция проходила по колонкам (для каждой строки), то пишем `axis=1`. То есть, в `axis` задается тот индекс, вдоль которого выполняется операция.Можно подходить к вопросу более формально. Например, мы имеем матрицу размера $(10, 13)$. При указании `axis=0` мы получим результат вычисления операции по каждому столбцу или массив размером $(13, )$. Через аргумент `axis` мы указываем размерность, которую схлопнем до единицы. Для случая многомерных массивов, например с размером $(8, 3, 40, 20)$, указывая `axis=1`, мы получаем результат с размером $(8, 40, 20)$.> Такие возможности задания осей хороши тем, что не нужно писать циклы прохода по всем строкам, чтобы получить результат - это делает код более читаемым и производительным. ЗаданиеНапишите операцию получения суммы элементов по всем стобцам:data = np.random.randint(low=0, high=5, size=(3, 6)) print(data) # TODO - вектор сумм элементов по каждому столбцуTumor Response to Treatment# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint Mean_tumor_vol = clinical_data_complete.groupby(["Drug","Timepoint"]).mean()["Tumor Volume (mm3)"] Mean_tumor_vol # Convert to DataFrame Mean_tumor_vol = pd.DataFrame(Mean_tumor_vol) Mean_tumor_vol = Mean_tumor_vol.reset_index() # Preview DataFrame Mean_tumor_vol.head() # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint Tumor_vol_SE = clinical_data_complete.groupby(["Drug", "Timepoint"]).sem()["Tumor Volume (mm3)"] # Convert to DataFrame Tumor_vol_SE = pd.DataFrame(Tumor_vol_SE) # Preview DataFrame Tumor_vol_SE.head().reset_index() # Minor Data Munging to Re-Format the Data Frames Mean_tumor_vol = Mean_tumor_vol.reset_index() Mean_tumor_vol_pivot_mean = Mean_tumor_vol.pivot(index="Timepoint", columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked Mean_tumor_vol_pivot_mean.head() # Minor Data Munging to Re-Format the Data Frames Tumor_vol_SE = Tumor_vol_SE.reset_index() Tumor_vol_pivot_SE = Tumor_vol_SE.pivot(index="Timepoint", columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked Tumor_vol_pivot_SE.head() # Generate the Plot (with Error Bars) plt.errorbar(Mean_tumor_vol_pivot_mean.index, Mean_tumor_vol_pivot_mean["Capomulin"], yerr=Tumor_vol_pivot_SE["Capomulin"], color="r", marker="o", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(Mean_tumor_vol_pivot_mean.index, Mean_tumor_vol_pivot_mean["Infubinol"], yerr=Tumor_vol_pivot_SE["Infubinol"], color="b", marker="^", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(Mean_tumor_vol_pivot_mean.index, Mean_tumor_vol_pivot_mean["Ketapril"], yerr=Tumor_vol_pivot_SE["Ketapril"], color="g", marker="s", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(Mean_tumor_vol_pivot_mean.index, Mean_tumor_vol_pivot_mean["Placebo"], yerr=Tumor_vol_pivot_SE["Placebo"], color="k", marker="d", markersize=5, linestyle="dashed", linewidth=0.50) plt.title("Tumor Response to Treatment") plt.ylabel("Tumor Volume (mm3)") plt.xlabel("Time (Days)") plt.grid(True) plt.legend(loc="best", fontsize="small", fancybox=True) # Save the Figure plt.savefig("C:/Users/17703/Shreya/Pymaceuticals/Fig1.png") # Show the Figure plt.show() # Save the Figure # Show the Figure plt.show()![Tumor Response to Treatment](../Images/treatment.png) Metastatic Response to Treatment# Store the Mean Met. Site Data Grouped by Drug and Timepoint # Convert to DataFrame # Preview DataFrame # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint # Convert to DataFrame # Preview DataFrame # Minor Data Munging to Re-Format the Data Frames # Preview that Reformatting worked # Generate the Plot (with Error Bars) # Save the Figure # Show the Figure![Metastatic Spread During Treatment](../Images/spread.png) Survival Rates# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric) # Convert to DataFrame # Preview DataFrame # Minor Data Munging to Re-Format the Data Frames # Preview the Data Frame # Generate the Plot (Accounting for percentages) # Save the Figure # Show the Figure plt.show()![Metastatic Spread During Treatment](../Images/survival.png) Summary Bar Graph# Calculate the percent changes for each drug # Display the data to confirm # Store all Relevant Percent Changes into a Tuple # Splice the data between passing and failing drugs # Orient widths. Add labels, tick marks, etc. # Use functions to label the percentages of changes # Call functions to implement the function calls # Save the Figure # Show the Figure fig.show()USPTO data Exploration> Looking into the USPTOyield data# hide import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline # hide default_plot_kwargs = { 'color': '#541FA3', } title_fontsize = 18 label_fontsize = 16Getting the data filesThe USPTO yields data files are too large to be uploaded to this GitHub repo and have to be downloaded from this [Box folder](https://ibm.ent.box.com/v/uspto-yields-data) and placed into `../data/uspto`. The data was extracted from the data set published by Lowe [Chemical reactions from US patents (1976-Sep2016)](https://figshare.com/articles/Chemical_reactions_from_US_patents_1976-Sep2016_/5104873), which has catalized research on ML for chemical reactions in recent years. USPTO data - mass scalesWe observed that `milligram scale` and `gram scale` reactions had different yield distributions. Gram scale reaction are often optimized, whereas milligram scale are not optimized and yields might be often lower. In academia predicting the non-optimized yield is more relevant. Therefore, we splitted the USPTO reactions, which contained yield and product mass information into two categories (gram, milligram) at a threshold of 1 gram.# data gram_df = pd.read_csv('../data/uspto/yields_above_200622.tsv', sep='\t', index_col=0) gram_df['calc_yield'] = gram_df.calc_yield_above milligram_df = pd.read_csv('../data/uspto/yields_below_200622.tsv', sep='\t', index_col=0) milligram_df['calc_yield'] = milligram_df.calc_yield_below # data fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,6)) fig.suptitle('Yield distributions - USPTO', fontsize=title_fontsize) ax1.set_ylabel('Count', fontsize=label_fontsize) ax1.set_xlabel('Yields [%] - gram scale', fontsize=label_fontsize) gram_df.calc_yield.hist(bins=20, ax=ax1, **default_plot_kwargs) ax2.set_xlabel('Yields [%] - milligram scale', fontsize=label_fontsize) milligram_df.calc_yield.hist(bins=20, ax=ax2, **default_plot_kwargs) # hide # data # fig.tight_layout() fig.savefig('images/uspto_yield_distributions.pdf') fig.savefig('images/uspto_yield_distributions.png') # data pd.concat([gram_df['calc_yield'].describe(),milligram_df['calc_yield'].describe()], axis=1)Prepare the data# data milligram_df['rxn'] = milligram_df.tokenized_reactants_inputs.str.replace(' ', '') + '>>' + milligram_df.tokenized_products_inputs.str.replace(' ', '') milligram_df['scaled_yield'] = milligram_df.calc_yield / 100 milligram_train_random_split_df = milligram_df[milligram_df.random_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) milligram_test_random_split_df = milligram_df[milligram_df.random_split=='test'][['rxn', 'scaled_yield']] milligram_train_time_split_df = milligram_df[milligram_df.time_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) milligram_test_time_split_df = milligram_df[milligram_df.time_split=='test'][['rxn', 'scaled_yield']] #hide # data milligram_train_random_split_df.to_csv('../data/uspto/milligram_train_random_split.tsv', sep='\t') milligram_test_random_split_df.to_csv('../data/uspto/milligram_test_random_split.tsv', sep='\t') milligram_train_time_split_df.to_csv('../data/uspto/milligram_train_time_split.tsv', sep='\t') milligram_test_time_split_df.to_csv('../data/uspto/milligram_test_time_split.tsv', sep='\t')Gram scale# data gram_df['rxn'] = gram_df.tokenized_reactants_inputs.str.replace(' ', '') + '>>' + gram_df.tokenized_products_inputs.str.replace(' ', '') gram_df['scaled_yield'] = gram_df.calc_yield / 100 gram_train_random_split_df = gram_df[gram_df.random_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) gram_test_random_split_df = gram_df[gram_df.random_split=='test'][['rxn', 'scaled_yield']] gram_train_time_split_df = gram_df[gram_df.time_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) gram_test_time_split_df = gram_df[gram_df.time_split=='test'][['rxn', 'scaled_yield']] #hide # data gram_train_random_split_df.to_csv('../data/uspto/gram_train_random_split.tsv', sep='\t') gram_test_random_split_df.to_csv('../data/uspto/gram_test_random_split.tsv', sep='\t') gram_train_time_split_df.to_csv('../data/uspto/gram_train_time_split.tsv', sep='\t') gram_test_time_split_df.to_csv('../data/uspto/gram_test_time_split.tsv', sep='\t')USPTO - smoothed# data gram_df = pd.read_csv('../data/uspto/yields_above_200622.tsv', sep='\t', index_col=0) gram_smoothed_df = pd.read_csv('../data/uspto/yields_above_smooth_200622.tsv', sep='\t', index_col=0) gram_df['calc_yield'] = gram_smoothed_df['4NN-2'].values milligram_df = pd.read_csv('../data/uspto/yields_below_200622.tsv', sep='\t', index_col=0) milligram_smoothed_df = pd.read_csv('../data/uspto/yields_below_smooth_200622.tsv', sep='\t', index_col=0) milligram_df['calc_yield'] = milligram_smoothed_df['4NN-2'].values # data fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,6)) fig.suptitle('Yield distributions - USPTO smoothed 4NN-2', fontsize=title_fontsize) ax1.set_ylabel('Count', fontsize=label_fontsize) ax1.set_xlabel('Yields [%] - gram scale', fontsize=label_fontsize) gram_df.calc_yield.hist(bins=20, ax=ax1, **default_plot_kwargs) ax2.set_xlabel('Yields [%] - milligram scale', fontsize=label_fontsize) milligram_df.calc_yield.hist(bins=20, ax=ax2, **default_plot_kwargs) # hide # data # fig.tight_layout() fig.savefig('images/uspto_yield_distributions_4nn-2.pdf') fig.savefig('images/uspto_yield_distributions_4nn-2.png') #hide # data milligram_df['rxn'] = milligram_df.tokenized_reactants_inputs.str.replace(' ', '') + '>>' + milligram_df.tokenized_products_inputs.str.replace(' ', '') milligram_df['scaled_yield'] = milligram_df.calc_yield / 100 milligram_train_random_split_df = milligram_df[milligram_df.random_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) milligram_test_random_split_df = milligram_df[milligram_df.random_split=='test'][['rxn', 'scaled_yield']] milligram_train_time_split_df = milligram_df[milligram_df.time_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) milligram_test_time_split_df = milligram_df[milligram_df.time_split=='test'][['rxn', 'scaled_yield']] milligram_train_random_split_df.to_csv('../data/uspto/milligram_smooth_train_random_split.tsv', sep='\t') milligram_test_random_split_df.to_csv('../data/uspto/milligram_smooth_test_random_split.tsv', sep='\t') milligram_train_time_split_df.to_csv('../data/uspto/milligram_smooth_train_time_split.tsv', sep='\t') milligram_test_time_split_df.to_csv('../data/uspto/milligram_smooth_test_time_split.tsv', sep='\t') #hide # data gram_df['rxn'] = gram_df.tokenized_reactants_inputs.str.replace(' ', '') + '>>' + gram_df.tokenized_products_inputs.str.replace(' ', '') gram_df['scaled_yield'] = gram_df.calc_yield / 100 gram_train_random_split_df = gram_df[gram_df.random_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) gram_test_random_split_df = gram_df[gram_df.random_split=='test'][['rxn', 'scaled_yield']] gram_train_time_split_df = gram_df[gram_df.time_split=='train'][['rxn', 'scaled_yield']].sample(frac=1., random_state=42) gram_test_time_split_df = gram_df[gram_df.time_split=='test'][['rxn', 'scaled_yield']] gram_train_random_split_df.to_csv('../data/uspto/gram_smooth_train_random_split.tsv', sep='\t') gram_test_random_split_df.to_csv('../data/uspto/gram_smooth_test_random_split.tsv', sep='\t') gram_train_time_split_df.to_csv('../data/uspto/gram_smooth_train_time_split.tsv', sep='\t') gram_test_time_split_df.to_csv('../data/uspto/gram_smooth_test_time_split.tsv', sep='\t')Prototype stopped: bad performance Sentiment Analysis PrototypingPrototype code for rule-based sentiment analysis.import os, sys, re, string sys.path.append("..") from config import credentials import dropbox import numpy as np import pandas as pd import nltk nltk.data.path.append("../data/external/nltk_data") from nltk import word_tokenize from sklearn.metrics import confusion_matrix, classification_report, accuracy_score import matplotlib.pyplot as plt %matplotlib inlineLoadingteam_dbx = dropbox.DropboxTeam(credentials.dropbox_team_access_token) team_root = team_dbx.with_path_root(dropbox.common.PathRoot.namespace_id( credentials.dropbox_team_namespace_id)) user_dbx = team_root.as_user(credentials.dropbox_team_member_id) data_path = "/Data/CSVData" test_fpath = os.path.join(data_path, "TestData", "forSentAnalysis.csv") _, res = user_dbx.files_download(fpath) test_data = pd.read_csv(res.raw) test_data.shape sentiws_path = "../data/external/SentiWS_v2.0" positive_fpath = os.path.join(sentiws_path, "SentiWS_v2.0_Positive.txt") negative_fpath = os.path.join(sentiws_path, "SentiWS_v2.0_Negative.txt") # POSITIVE words positive = pd.read_csv(positive_fpath, sep="\t", names=["word_pos", "polarity", "inflections"]) positive[["word", "pos"]] = positive.word_pos.str.split("|", expand=True) positive = positive[["word", "polarity", "pos", "inflections"]] pos_total = positive.word.count() + positive.inflections.str.split(",").dropna().apply(lambda x: len(x)).sum() print("POS total (incl. inflections):", pos_total) # NEGATIVE words negative = pd.read_csv(negative_fpath, sep="\t", names=["word_pos", "polarity", "inflections"]) negative[["word", "pos"]] = negative.word_pos.str.split("|", expand=True) negative = negative[["word", "polarity", "pos", "inflections"]] neg_total = negative.word.count() + negative.inflections.str.split(",").dropna().apply(lambda x: len(x)).sum() print("NEG total (incl. inflections):", neg_total)POS total (incl. inflections): 16716 NEG total (incl. inflections): 18217Preprocessing SentiWS lexicondef make_lexicon(polarity_df, lexicon): """ Makes lexicon of pos/neg words with corresponding polarity score. Util func: Appends words and inflections with corresponding polarity value to lexicon dict. """ for _, row in polarity_df.iterrows(): lexicon[row["word"].lower()] = row["polarity"] if row["inflections"] is not np.nan: words = row["inflections"].split(",") for word in words: lexicon[word.lower()] = row["polarity"] return lexicon lexicon = {} lexicon = make_lexicon(positive, make_lexicon(negative, lexicon))Test cleansing and tokenizationdef clean_text(text): """ Util: Cleans text string. > Lowercase string > Replace game scores with "GAME_SCORE" placeholder > Punctuation removal > Replace numbers with "NUM" placeholder """ lowercased = text.lower() scores_removed = re.sub(r"(\d+) ?(-|:) ?(\d+)", "GAME_SCORE ", lowercased) punctuations = string.punctuation + "„" + "”" punct_removed = scores_removed.translate(str.maketrans("", "", punctuations)) num_replaced = re.sub(r"\b\d+\b", "NUM", punct_removed) return num_replaced test_data["cleaned_txt"] = test_data.text.apply(clean_text) test_data["tokens"] = (test_data.cleaned_txt.apply(word_tokenize))Filter samples with pos/neu/neg sentiment onlyratings_dict = {0: "positive", 10: "neutral", 20: "negative", 30: "offensive", -2: "notAssessable"} test_data["rating"] = test_data.replace({"Rating": ratings_dict}).Rating.astype(str) test_subset = test_data.loc[(test_data.rating == "positive") | (test_data.rating == "neutral") | (test_data.rating == "negative")] test_subset = test_subset.copy() test_subset.shapeSentiment AssignmentRule-based approach, using SentiWS v2.0. Policy: Summing polarity scoresdef assign_sentiment_polar(tokens, lexicon): """ Computes sentiment score by summing polarity scores """ score = 0 for word in tokens: if word in lexicon.keys(): score += lexicon[word] return score test_subset["sentiment_score_polar"] = test_subset.tokens.apply(lambda x: assign_sentiment_polar(x, lexicon))ClassificationPolicy: 0 -> neutral (& kW); negative; > 0 -> positivetest_subset["sentiment_polar"] = test_subset.sentiment_score_polar.apply(lambda x: "negative" if x < 0 else "positive" if x > 0 else "neutral")Policy: Frequency of pos/neg wordsdef assign_sentiment_freq(tokens, lexicon): """ Computes sentiment score by summing-up count of pos/neg words """ neg, pos = 0, 0 for word in tokens: if word in lexicon.keys(): if lexicon[word] > 0: pos += 1 else: neg += 1 if pos > neg: return "positive" elif neg > pos: return "negative" else: return "neutral"ClassificationPolicy: same count or 0 -> neutral; more pos -> positive; more neg -> negativetest_subset["sentiment_freq"] = test_subset.tokens.apply(lambda x: assign_sentiment_freq(x, lexicon))Evaluationlabels = ratings_dict = ["positive", "neutral", "negative"]Confusion Matrix Policy: Summing polarity scorescm_polar = confusion_matrix(test_subset.rating, test_subset.sentiment_polar, labels=labels) cm_df_polar = pd.DataFrame(cm_polar, columns=labels, index=labels) cm_df_polar.index.name = "True" cm_df_polarPolicy: Frequency of pos/neg wordscm_freq = confusion_matrix(test_subset.rating, test_subset.sentiment_freq, labels=labels) cm_df_freq = pd.DataFrame(cm_freq, columns=labels, index=labels) cm_df_freq.index.name = "True" cm_df_freqClassification Report Policy: Summing polarity scoresprint(classification_report(test_subset.rating, test_subset.sentiment_polar, labels=labels))precision recall f1-score support positive 0.29 0.62 0.40 138 neutral 0.36 0.09 0.14 311 negative 0.58 0.70 0.63 396 accuracy 0.46 845 macro avg 0.41 0.47 0.39 845 weighted avg 0.45 0.46 0.41 845Policy: Frequency of pos/neg wordsprint(classification_report(test_subset.rating, test_subset.sentiment_freq, labels=labels))precision recall f1-score support positive 0.26 0.70 0.38 138 neutral 0.37 0.22 0.28 311 negative 0.61 0.45 0.52 396 accuracy 0.41 845 macro avg 0.41 0.46 0.39 845 weighted avg 0.46 0.41 0.41 845IntroductionSometimes it doesn't matter whether your query is efficient or not. For example, you might write a query you expect to run only once, and it might be working on a small dataset. In this case, anything that gives you the answer you need will do.But what about queries that will be run many times, like a query that feeds data to a website? Those need to be efficient so you don't leave users waiting for your website to load.Or what about queries on huge datasets? These can be slow and cost a business a lot of money if they are written poorly.Most database systems have a **query optimizer** that attempts to interpret/execute your query in the most effective way possible. But several strategies can still yield huge savings in many cases. Some useful functionsWe will use two functions to compare the efficiency of different queries:- `show_amount_of_data_scanned()` shows the amount of data the query uses.- `show_time_to_run()` prints how long it takes for the query to execute.#$HIDE_INPUT$ from google.cloud import bigquery from time import time client = bigquery.Client() def show_amount_of_data_scanned(query): # dry_run lets us see how much data the query uses without running it dry_run_config = bigquery.QueryJobConfig(dry_run=True) query_job = client.query(query, job_config=dry_run_config) print('Data processed: {} GB'.format(round(query_job.total_bytes_processed / 10**9, 3))) def show_time_to_run(query): time_config = bigquery.QueryJobConfig(use_query_cache=False) start = time() query_result = client.query(query, job_config=time_config).result() end = time() print('Time to run: {} seconds'.format(round(end-start, 3)))Strategies 1) Only select the columns you want. It is tempting to start queries with **SELECT * FROM ...**. It's convenient because you don't need to think about which columns you need. But it can be very inefficient.This is especially important if there are text fields that you don't need, because text fields tend to be larger than other fields.star_query = "SELECT * FROM `bigquery-public-data.github_repos.contents`" show_amount_of_data_scanned(star_query) basic_query = "SELECT size, binary FROM `bigquery-public-data.github_repos.contents`" show_amount_of_data_scanned(basic_query)In this case, we see a 1000X reduction in data being scanned to complete the query, because the raw data contained a text field that was 1000X larger than the fields we might need. 2) Read less data.Both queries below calculate the average duration (in seconds) of one-way bike trips in the city of San Francisco.more_data_query = """ SELECT MIN(start_station_name) AS start_station_name, MIN(end_station_name) AS end_station_name, AVG(duration_sec) AS avg_duration_sec FROM `bigquery-public-data.san_francisco.bikeshare_trips` WHERE start_station_id != end_station_id GROUP BY start_station_id, end_station_id LIMIT 10 """ show_amount_of_data_scanned(more_data_query) less_data_query = """ SELECT start_station_name, end_station_name, AVG(duration_sec) AS avg_duration_sec FROM `bigquery-public-data.san_francisco.bikeshare_trips` WHERE start_station_name != end_station_name GROUP BY start_station_name, end_station_name LIMIT 10 """ show_amount_of_data_scanned(less_data_query)Since there is a 1:1 relationship between the station ID and the station name, we don't need to use the `start_station_id` and `end_station_id` columns in the query. By using only the columns with the station IDs, we scan less data. 3) Avoid N:N JOINs.Most of the JOINs that you have executed in this course have been **1:1 JOINs**. In this case, each row in each table has at most one match in the other table.![JOIN](https://i.imgur.com/fp7oMLq.png)Another type of JOIN is an **N:1 JOIN**. Here, each row in one table matches potentially many rows in the other table. ![JOIN](https://i.imgur.com/7PxE0Mr.png)Finally, an **N:N JOIN** is one where a group of rows in one table can match a group of rows in the other table. Note that in general, all other things equal, this type of JOIN produces a table with many more rows than either of the two (original) tables that are being JOINed.![JOIN](https://i.imgur.com/UsNZZoz.png)big_join_query = """ SELECT repo, COUNT(DISTINCT c.committer.name) as num_committers, COUNT(DISTINCT f.id) AS num_files FROM `bigquery-public-data.github_repos.commits` AS c, UNNEST(c.repo_name) AS repo INNER JOIN `bigquery-public-data.github_repos.files` AS f ON f.repo_name = repo WHERE f.repo_name IN ( 'tensorflow/tensorflow', 'facebook/react', 'twbs/bootstrap', 'apple/swift', 'Microsoft/vscode', 'torvalds/linux') GROUP BY repo ORDER BY repo """ show_time_to_run(big_join_query) small_join_query = """ WITH commits AS ( SELECT COUNT(DISTINCT committer.name) AS num_committers, repo FROM `bigquery-public-data.github_repos.commits`, UNNEST(repo_name) as repo WHERE repo IN ( 'tensorflow/tensorflow', 'facebook/react', 'twbs/bootstrap', 'apple/swift', 'Microsoft/vscode', 'torvalds/linux') GROUP BY repo ), files AS ( SELECT COUNT(DISTINCT id) AS num_files, repo_name as repo FROM `bigquery-public-data.github_repos.files` WHERE repo_name IN ( 'tensorflow/tensorflow', 'facebook/react', 'twbs/bootstrap', 'apple/swift', 'Microsoft/vscode', 'torvalds/linux') GROUP BY repo ) SELECT commits.repo, commits.num_committers, files.num_files FROM commits INNER JOIN files ON commits.repo = files.repo ORDER BY repo """ show_time_to_run(small_join_query)A Supervised approach to rating predction. In this notebook, we feed the LDA and word2vec predictions into a supervised algorithm in order to predict the rating differential. First, let's get the rating differential...import pandas as pd from itertools import chain import cPickle as pickle reviews = pd.read_pickle('../output/bar_reviews_cleaned_and_tokenized.pickle') training_users = pickle.load(open('../output/training_users.pickle', 'rb')) test_users = pickle.load(open('../output/test_users.pickle', 'rb')) # Make the active review set training only review_train = reviews[reviews.user_id.isin(training_users)] review_test = reviews[reviews.user_id.isin(test_users)]Load the LDA Modelsimport sys sys.path.append('../vectorsearch/') import LDA # Load the LDA models for businesses and companies review_lda = LDA.LoadLDAModel('../output/LDA_model_reviews.pickle') bus_lda = LDA.LoadLDAModel('../output/LDA_model_bus.pickle')Vectorize the docs_reviews for use as featuresdef GenerateInputOutput(review_set, lda_model): ''' Given a list of reviews... Returns docs_reviews : list document string for each review bus_ids : list ids for each business rev_diff: list difference between the user rating and average rating ''' # For each business, generate list of average reviews... avg_reviews = review_set.groupby('business_id').mean()['stars'] # Get the differential for each review rev_diff = map(lambda (bus_id, stars): stars - avg_reviews[bus_id], zip(review_set.business_id.values, review_set.stars.values) ) # Get the review text docs_reviews = [" ".join(list(chain.from_iterable(rev))) for rev in review_set.cleaned_tokenized.values] # Convert the documents into vectorized form as input to LDA. # *These are the LDA features* doc_LDA_topic_vectors = lda_model.get_doc_topics(docs_reviews) # List of business ids for each review bus_ids = review_set.business_id.values return doc_LDA_topic_vectors, bus_ids, rev_diff # Generate for test and training data. doc_LDA_topic_vectors_train, bus_ids_train, rev_diff_train = GenerateInputOutput(review_train, review_lda) doc_LDA_topic_vectors_test, bus_ids_test, rev_diff_test = GenerateInputOutput(review_test, review_lda) # Normalize the topic vectors... doc_LDA_topic_vectors_train = [top/np.sqrt(np.dot(top,top)) for top in doc_LDA_topic_vectors_train] doc_LDA_topic_vectors_test = [top/np.sqrt(np.dot(top,top)) for top in doc_LDA_topic_vectors_test]Get the business topic reviews# This is business ids corresponding to the business LDA vectors bus_lda_ids = pickle.load(open('../output/bus_ids_bars_LDA.pickle', 'rb')) # pd.dataframe('bus_id', 'topic_vector') # # for each review, lookup the corresponding business topic vector # for bus_id in bus_ids_train[:2]: # The topic vector for a given business is given by this dataframe. bus_lda_ids = pickle.load(open('../output/bus_ids_bars_LDA.pickle', 'rb')) bus_vectors = pd.DataFrame() bus_vectors['business_id'] = bus_lda_ids transformed = bus_lda.lda.fit_transform(bus_lda.tf) bus_vectors['topic_vector'] = [bus_topic_vec for bus_topic_vec in transformed] normed_topic_vecs = map(lambda topic_vec: topic_vec/sqrt(np.dot(topic_vec, topic_vec)), bus_vectors.topic_vector) bus_vectors.topic_vector = normed_topic_vecs bus_vectors.to_pickle('../output/business_LDA_vectors.pickle') #print bus_vectors.shape # Find business topic vector each review review_bus_vectors_train = pd.DataFrame({'business_id':bus_ids_train}) review_bus_vectors_train = pd.merge(review_bus_vectors_train, bus_vectors, how='left', on='business_id') # Same for test set. review_bus_vectors_test = pd.DataFrame({'business_id':bus_ids_test}) review_bus_vectors_test = pd.merge(review_bus_vectors_test, bus_vectors, how='left', on='business_id') # Some businesses don't have topic vectors... drop those. # Need to also drop them from the relative reviews blacklist_train = [] for i, rev in enumerate(review_bus_vectors_train.topic_vector.values): if np.isnan(rev).any(): blacklist_train.append(i) blacklist_test = [] for i, rev in enumerate(review_bus_vectors_test.topic_vector.values): if np.isnan(rev).any(): blacklist_test.append(i) review_bus_vectors_train['review_diff'] = rev_diff_train review_bus_vectors_test['review_diff'] = rev_diff_test review_bus_vectors_train['review_topic_vector'] = [doc for doc in doc_LDA_topic_vectors_train] review_bus_vectors_test['review_topic_vector'] = [doc for doc in doc_LDA_topic_vectors_test] # Drop the blacklisted businesses.... review_bus_vectors_train = review_bus_vectors_train.drop(review_bus_vectors_train.index[blacklist_train]) review_bus_vectors_test = review_bus_vectors_test.drop(review_bus_vectors_test.index[blacklist_test])Stack the input vectorsX_TRAIN = np.append(np.vstack(review_bus_vectors_train.review_topic_vector.values), np.vstack(review_bus_vectors_train.topic_vector.values), axis=1) Y_TRAIN = review_bus_vectors_train.review_diff.values X_TEST = np.append(np.vstack(review_bus_vectors_test.review_topic_vector.values), np.vstack(review_bus_vectors_test.topic_vector.values), axis=1) Y_TEST = review_bus_vectors_test.review_diff.values np.save('../output/bar_X_TRAIN.npy', X_TRAIN) np.save('../output/bar_Y_TRAIN.npy', Y_TRAIN) np.save('../output/bar_X_TEST.npy', X_TEST) np.save('../output/bar_Y_TEST.npy', Y_TEST) print X_TRAIN.shape print Y_TRAIN.shape(186752, 40) (186752,)Get all businesses that were reviewed by a user The objective function we want to optimize is the L2 loss on the difference between the users actual rating minus the average (this is $f$) and the predicted rating differential for the business $J = (f-\hat{f})^2$. In contrast to preducting the rating directly, this will allow the supervised alogrithm to try and predict deviations from the average behavior. Hence we can try to find underdogs, or places that may not be rated well, but have a high probability of being liked by the user.from sklearn.ensemble import RandomForestRegressor def RunRFClassifier(n_samples, X, Y, **kwargs): RF = RandomForestRegressor(**kwargs) RF.fit(X[:n_samples], Y[:n_samples]) return RF def getRMS_error(RF, X, Y): Y_predict = RF.predict(X) MSE = (Y-Y_predict)**2 RMS_errors = np.sqrt(np.average(MSE)) return RMS_errors, MSE RF_settings = { 'n_estimators':500, 'max_depth':10, 'min_samples_split':2, 'min_samples_leaf':5, 'min_weight_fraction_leaf':0.0, 'max_features':'auto', 'max_leaf_nodes':None, 'bootstrap':True, 'oob_score':True, 'n_jobs':12, 'random_state':0} RF = RunRFClassifier(20000, X_TRAIN, Y_TRAIN, **RF_settings) RMS_train, MSE_train = getRMS_error(RF, X_TRAIN[:n_samples], Y_TRAIN[:n_samples]) RMS_test, MSE_test = getRMS_error(RF, X_TEST[:n_samples], Y_TEST[:n_samples]) print 'RMS Training Error', RMS_train print 'RMS Test Error', RMS_test bins = np.linspace(0,4,41) plt.hist(Y_TRAIN[:n_samples]**2, bins, histtype='step', label='Random') plt.hist(MSE_train, bins, histtype='step', label='RF Train') plt.hist(MSE_test, bins, histtype='step', label='RF Test',) plt.yscale('log') plt.legend(frameon=False) from sklearn.neural_network import MLPRegressor MLP = MLPRegressor(hidden_layer_sizes=(50, ), activation='relu', algorithm='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08) print np.dot(X_TRAIN[:2000],X_TRAIN[:2000].T).shape for i in range(100): print np.sqrt(np.dot(X_TEST[i],X_TEST[i]))1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.41421356237 1.4142[...]Train a Whitehouse press briefings ModelThis is based off a notebook created by [](http://minimaxir.com) originally (so most of the credits go there!) and updated by [](https://abdulhannan.in). The license is [here](scrollTo=wmTXWNUygS5E).*Last updated: May 3, 2020*Retrain an advanced text generating neural network on any text dataset **for free on a GPU using Collaboratory** using Max Woolf's `gpt-2-simple` library!For more about `gpt-2-simple`, you can visit [this GitHub repository](https://github.com/minimaxir/gpt-2-simple). You can also read this [blog post](https://minimaxir.com/2019/09/howto-gpt2/) for more information how to use this notebook!To get started:1. Copy this notebook to your Google Drive to keep it and save your changes. (File -> Save a Copy in Drive)2. Make sure you're running the notebook in Google Chrome.3. Run the cells below:%tensorflow_version 1.x !pip install -q gpt-2-simple import gpt_2_simple as gpt2 from datetime import datetime from google.colab import files import tensorflow as tfTensorFlow 1.x selected. Building wheel for gpt-2-simple (setup.py) ... [?25l[?25hdone WARNING:tensorflow: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons * https://github.com/tensorflow/io (for I/O related ops) If you depend on functionality not listed there, please file an issue.GPUOne of the main advantages of Google Colaboratory is that you can GPU access for free! Yes, for FREE! Hence, this is the perfect places to train your model from scratch.Run this command to see if there is a GPU connected to this instance.print('GPU connected?\n The answer is: ', tf.test.is_gpu_available())If the answer is `False`, then go to Runtime -> Change Runtime Type -> Under 'Hardware Accelerator', choose 'GPU'. Then re-run this notebook from the beginning. Downloading GPT-2If you're retraining a model on new text, you need to download the GPT-2 model first. There are three released sizes of GPT-2:* `124M` (default): the "small" model, 500MB on disk.* `355M`: the "medium" model, 1.5GB on disk.* `774M`: the "large" model, cannot currently be finetuned with Colaboratory but can be used to generate text from the pretrained model (see later in Notebook)* `1558M`: the "extra large", true model. Will not work if a K80 GPU is attached to the notebook. (like `774M`, it cannot be finetuned).Larger models have more knowledge, but take longer to finetune and longer to generate text. You can specify which base model to use by selecting the ceel from the dropdown form in the next cell.The next cell downloads it from Google Cloud Storage and saves it in the Colaboratory VM at `/models/`.This model isn't permanently saved in the Colaboratory VM; you'll have to redownload it if you want to retrain it at a later time.#@title Choose how big a model you want to train. #@markdown Bigger model takes more GPU space and time to train! model_name = "355M" #@param ["124M", "355M", "774M", "1558M"] gpt2.download_gpt2(model_name=model_name)Fetching checkpoint: 1.05Mit [00:00, 361Mit/s] Fetching encoder.json: 1.05Mit [00:00, 73.1Mit/s] Fetching hparams.json: 1.05Mit [00:00, 223Mit/s] Fetching model.ckpt.data-00000-of-00001: 498Mit [00:04, 110Mit/s] Fetching model.ckpt.index: 1.05Mit [00:00, 254Mit/s] Fetching model.ckpt.meta: 1.05Mit [00:00, 127Mit/s] Fetching vocab.bpe: 1.05Mit [00:00, 149Mit/s]Mounting Google DriveThe best way to get input text to-be-trained into the Colaboratory VM, and to get the trained model *out* of Colaboratory, is to route it through Google Drive *first*.Running this cell (which will only work in Colaboratory) will mount your personal Google Drive in the VM, which later cells can use to get data in/out. (it will ask for an auth code; that auth is not saved anywhere)gpt2.mount_gdrive()Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly Enter your authorization code: ·········· Mounted at /content/driveUploading a Text File to be Trained to ColaboratoryIn the Colaboratory Notebook sidebar on the left of the screen, select *Files*. From there you can upload files:![alt text](https://i.imgur.com/TGcZT4h.png)Upload **any smaller text file** (<10 MB) and update the file name in the cell below, then run the cell.# import some packages which will be used import pandas as pd from bs4 import BeautifulSoup import urllib.request # define some methods which will be used to extract data from the HTML websites def get_html_text(url_p): print('Reading URL: %s'%(url_p)) html_t = urllib.request.urlopen(url_p).read() soup_t = BeautifulSoup(html_t) # kill all script and style elements for script in soup_t(["script", "style", 'aside', 'head']): script.extract() # rip it out return soup_t.body.main def get_soup(url_p): html_t = urllib.request.urlopen(url_p).read() soup_t = BeautifulSoup(html_t) # kill all script and style elements for script in soup_t(["script", "style", 'aside', 'head']): script.extract() # rip it out return soup_t url_list = 'https://www.whitehouse.gov/search/?s=Coronavirus+Task+Force&wpsolr_fq%5B0%5D=issue_str%3AHealthcare' base_url = 'https://www.whitehouse.gov' soup_l = get_soup(url_list) urls = [] c_url = 'https://www.whitehouse.gov/search/?s=Coronavirus+Task+Force&wpsolr_fq%5B0%5D=issue_str%3AHealthcare' page_num = 0 while True: page_num+=1 soup_l = get_soup(c_url) for s in soup_l.find_all('article'): link = s.find("h2", {'class': 'briefing-statement__title'}) if link is not None: urls.append(link.a['href']) # print(link.a['href']) next = soup_l.find("a", {'class':'pagination__next'}) # print(next) if next is None: break; c_url = base_url + next.get('href') print('Number of pages parsed: %d'%(page_num)) # print(urls[1]) samples = list(map(lambda x: get_html_text(x).get_text().strip(), urls)) #@markdown ### Enter a name for the file that would be used to save the extracted text to Google Drive: op_file = 'input2.txt' #@param {type: "string"} def save_text_gdrive(filename, samples): with open('/content/drive/My Drive/%s' % (filename), 'w') as f: for inp in samples: f.write(inp + '\n\n')If your text file is larger than 10MB, it is recommended to upload that file to Google Drive first, then copy that file from Google Drive to the Colaboratory VM. Now that you have saved the file to Google Drive, you can now use this file whenever you want for any other tasks too!#@markdown Enter the name of the file in google Drive for the text on which you want to train. file_name = 'input2.txt' #@param {type: "string"} gpt2.copy_file_from_gdrive(file_name)Finetune GPT-2The next cell will start the actual finetuning of GPT-2. It creates a persistent TensorFlow session which stores the training config, then runs the training for the specified number of `steps`. (to have the finetuning run indefinitely, set `steps = -1`)The model checkpoints will be saved in `/checkpoint/run1` by default. The checkpoints are saved every 500 steps (can be changed) and when the cell is stopped.The training might time out after 4ish hours; make sure you end training and save the results so you don't lose them!**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (Runtime -> Restart Runtime). You will need to rerun imports but not recopy files.Other optional-but-helpful parameters for `gpt2.finetune`:* **`restore_from`**: Set to `fresh` to start training from the base GPT-2, or set to `latest` to restart training from an existing checkpoint.* **`sample_every`**: Number of steps to print example output* **`print_every`**: Number of steps to print training progress.* **`learning_rate`**: Learning rate for the training. (default `1e-4`, can lower to `1e-5` if you have <1MB input data)* **`run_name`**: subfolder within `checkpoint` to save the model. This is useful if you want to work with multiple models (will also need to specify `run_name` when loading the model)* **`overwrite`**: Set to `True` if you want to continue finetuning an existing model (w/ `restore_from='latest'`) without creating duplicate copies.#@markdown Give a name for your run. This will be used as a name to store all the intermediate text, mdoels and checkpoints. run_name = 'run1' #@param {type:"string"} sess = gpt2.start_tf_sess() gpt2.finetune(sess, dataset=file_name, model_name=model_name, steps=1000, restore_from='fresh', run_name=run_name, print_every=10, sample_every=200, save_every=500 )WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/gpt_2_simple/src/sample.py:17: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/gpt_2_simple/src/memory_saving_gradients.py:62: get_backward_walk_ops (from tensorflow.contrib.graph_editor.select) is deprecated and will be removed after 2019-06-06. Instructions for updating: Please use tensorflow.python.ops.op_selector.get_backward_walk_ops. Loading checkpoint models/355M/model.ckpt INFO:tensorflow:Restoring parameters from models/355M/model.ckptAfter the model is trained, you can copy the checkpoint folder to your own Google Drive.If you want to download it to your personal computer, it's strongly recommended you copy it there first, then download from Google Drive. The checkpoint folder is copied as a `.rar` compressed file; you can download it and uncompress it locally.gpt2.copy_checkpoint_to_gdrive(run_name=run_name)You're done! Feel free to go to the **Generate Text From The Trained Model** section to generate text based on your retrained model. Load a Trained Model CheckpointRunning the next cell will copy the `.rar` checkpoint file from your Google Drive into the Colaboratory VM.#@markdown Add the name of the run which you stored on to your Google Drive after running the above set of cells. #@markdown **PS** if you have run the training part of the section above, you do not have to run this cell! run_name = run_name #@param {type: "string"} gpt2.copy_checkpoint_from_gdrive(run_name=run_name)The next cell will allow you to load the retrained model checkpoint + metadata necessary to generate text.**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (Runtime -> Restart Runtime). You will need to rerun imports but not recopy files.sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name=run_name)Loading checkpoint checkpoint/run1/model-1000 INFO:tensorflow:Restoring parameters from checkpoint/run1/model-1000Generate Text From The Trained ModelAfter you've trained the model or loaded a retrained model from checkpoint, you can now generate text. `generate` generates a single text from the loaded model.gpt2.generate(sess, run_name=run_name)The United Nations has been working very hard.  They’ve been working for a long time, as you know.  We’re now at a point where we have to call off the assembly, which unfortunately took place this morning.  We’ll be doing it in the future. The United Nations has been doing excellent work.  They’ve been talking about everything. In the House, they’ve had a lot to say.  They’ve had a lot of discussion.  There’s a lot of people talking to each other.  There’s a lot of people that are really getting together, and they’re talking about things that are very important. We have a great relationship with China.  They had a very short period of time ago.  We’ve had a very good relationship.  We’re making a lot of progress there.  And they’ve been — in exchange for allowing us to do this, we’re going to be bringing our ships back to Guam and our air bridge back into the United States. I just spoke to President Xi.  I had a very good talk with the President of China.  It was a long talk.  And I’m [...]If you're creating an API based on your model and need to pass the generated text elsewhere, you can do `text = gpt2.generate(sess, return_as_list=True)[0]`You can also pass in a `prefix` to the generate function to force the text to start with a given character sequence and generate text from there (good if you add an indicator when the text starts).You can also generate multiple texts at a time by specifing `nsamples`. Unique to GPT-2, you can pass a `batch_size` to generate multiple samples in parallel, giving a massive speedup (in Colaboratory, set a maximum of 20 for `batch_size`).Other optional-but-helpful parameters for `gpt2.generate` and friends:* **`length`**: Number of tokens to generate (default 1023, the maximum)* **`temperature`**: The higher the temperature, the crazier the text (default 0.7, recommended to keep between 0.7 and 1.0)* **`top_k`**: Limits the generated guesses to the top *k* guesses (default 0 which disables the behavior; if the generated output is super crazy, you may want to set `top_k=40`)* **`top_p`**: Nucleus sampling: limits the generated guesses to a cumulative probability. (gets good results on a dataset with `top_p=0.9`)* **`truncate`**: Truncates the input text until a given sequence, excluding that sequence (e.g. if `truncate=''`, the returned text will include everything before the first ``). It may be useful to combine this with a smaller `length` if the input texts are short.* **`include_prefix`**: If using `truncate` and `include_prefix=False`, the specified `prefix` will not be included in the returned text.#@markdown Here, change the prefic to whatever you like, I have kept this so that it generates something like the press house briefings! It is a good experiment to try removing it completely and seeing what you get 😊 gpt2.generate(sess, length=1023, temperature=0.7, prefix="Q Mr. President,", nsamples=5 # batch_size=2 )Q Mr. President, 24 states — 24 states — in addition to New York and 34 before — reported 100 or more test positive for coronavirus.  Around 1,200 of those — 34 — are already home.  Can you reassure Americans that those are clean settings and that the coronavirus is going to be contained? THE PRESIDENT:  So, when Governor Cuomo was saying in the last time we were here — and I can assure you, Chris, governors across the country, including governor Chin, saying, �We’ve had 100 or more tests.  We’ve had no cases.  This is like — you know, we hope it’s going to go away.  But it’s going to come back because we had a great testing system.  We had more than enough testing to go — as many as we could.  And then we moved over to more — we moved over to — Q    And when did you make the decision to go on a nationwide exclusion? THE PRESIDENT:  — statewide exclusion? Q    No, no, not nationwide exclusion.  I mean, the states didn’t know the virus existed.  And the CDC — and I’m sure you know, Bre[...]For bulk generation, you can generate a large amount of text to a file and sort out the samples locally on your computer. The next cell will generate a generated text file with a unique timestamp.You can rerun the cells as many times as you want for even more generated texts!gen_file = 'gpt2_gentext_{:%Y%m%d_%H%M%S}.txt'.format(datetime.utcnow()) gpt2.generate_to_file(sess, destination_path=gen_file, length=500, temperature=0.7, nsamples=100, batch_size=20 ) # may have to run twice to get file to download files.download(gen_file)Generate Text From The Pretrained ModelIf you want to generate text from the pretrained model, not a finetuned model, pass `model_name` to `gpt2.load_gpt2()` and `gpt2.generate()`.This is currently the only way to generate text from the 774M or 1558M models with this notebook.model_name = "774M" gpt2.download_gpt2(model_name=model_name) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, model_name=model_name) gpt2.generate(sess, model_name=model_name, prefix="The secret of life is", length=100, temperature=0.7, top_p=0.9, nsamples=5, batch_size=5 )The secret of life is that it's really easy to make it complicated," said , the host of the popular science show "ye the Science Guy." "And this is one of the reasons why we all need to be smarter about science, because we can't keep up with the amazing things that are going on all the time." While Nye is correct that "everything that's going on all the time" is making the world a better place, he misses the point. This is not ==================== The secret of life is in the rhythm of the universe. It's not a mystery. It's not a mystery to me. It's the nature of the universe. It's the beauty of the universe. It's the way the universe works. It's the way the universe is. It's the way the universe is going to work. It's the way the universe is. It's the way the universe is. It's the way the universe is. It's the way the universe is. It's the way ==================== The secret of life is in the universe. - The Red Devil It's the end of the world as we know it, and the[...]EtceteraIf the notebook has errors (e.g. GPU Sync Fail), force-kill the Colaboratory virtual machine and restart it with the command below:!kill -9 -1Attribution scores Statistics In this notebook we retrieve attribution scores statistics and analyze our hypothesis using significance testings.%load_ext autoreload %autoreload 2 import os import sys module_path = os.path.dirname(os.path.dirname(os.path.abspath(os.path.join('.')))) if module_path not in sys.path: print('Add root path to system path: ', module_path) sys.path.append(module_path) module_path += '/' import re import gc import tqdm import argparse import numpy as np import datetime import time import spacy import pandas as pd from sklearn.metrics import f1_score from torch import optim import torch.nn as nn from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from src.utils.preprocess_utils import * from src.training.train_utils import train_model, test_model from src.evaluation.test_save_stats import * from src.utils.utils import * from src.evaluation.xai_utils import * from src.evaluation.xai_bert_utils import BertModelWrapper import captum from captum.attr import LayerIntegratedGradients, TokenReferenceBase, visualization, IntegratedGradients from typing import Any, Iterable, List, Tuple, Union from IPython.core.display import HTML, display[nltk_data] Downloading package stopwords to [nltk_data] C:\Users\Richard\AppData\Roaming\nltk_data... [nltk_data] Package stopwords is already up-to-date!Initialization ⚠️ Before running the cells below, make sure to run :- test_save_stats.py --model=MODEL_NAME--saved_model_path=PATH_TO_MODEL (see source code for more details) + any model parameters neededThe code saves the samples for which the model is sure of its prediction (ie. when it the probability is either really close to 1 (Hate) or close to 0 (Neutral)). We are now going to visualize the explainability of the model (ie. the importance of words in the model's decision) respectively for True Positives (TP), False Positives (FP), True Negatives (TN) and False Negatives(FN). **Choose your configuration here for the attribution statistics retrieval**```python Your model type (Currently only BERT is supported)model_type = 'DistillBert' Dataset to retrieve stats from OffensEvaltest_dataset_name = 'offenseval' Implicit Hatetest_dataset_name = 'implicithate' Covid Hatetest_dataset_name = 'covidhate' Models weights and test inference stats Trained on OffensEvalsaved_model_path = module_path + SAVED_MODELS_PATH + 'DistillBert_2022-04-15_02-48-34_trained_testAcc=0.8026.pth'stats_path = module_path + STATS_CSV + "stats_DistillBert_2022-04-15_02-48-34_test_crossentropy_offenseval.csv" Trained on Implicit Hatesaved_model_path = module_path + SAVED_MODELS_PATH + 'DistillBert_2022-04-18_02-48-16_trained_testAcc=0.7585.pth'stats_path = module_path + STATS_CSV + "stats_DistillBert_2022-04-18_02-48-16_test_crossentropy_implicithate.csv" Trained on Covid Hatesaved_model_path = module_path + SAVED_MODELS_PATH + 'DistillBert_2022-04-18_02-24-40_trained_testAcc=0.8397.pth'stats_path = module_path + STATS_CSV + "stats_DistillBert_2022-04-18_02-24-40_test_crossentropy_covidhate.csv"```# Your model type model_type = 'DistillBert' # Dataset to retrieve stats from test_dataset_name = 'offenseval' # Models weights and test inference stats saved_model_path = module_path + SAVED_MODELS_PATH + 'DistillBert_2022-04-18_02-24-40_trained_testAcc=0.8397.pth' stats_path = module_path + STATS_CSV + "stats_DistillBert_2022-04-18_02-24-40_test_crossentropy_covidhate.csv"⚠️ When retrieving attribution scores stats, the saved pkl file is quite big (~313 Mo for offenseval, ~880 Mo for implicithate and ~170 Mo for covidhate), please use `IN_PLACE = True` to not save the pkl file and do all the operations in-place ⚠️# Stats retrieval parameters IN_PLACE = False # Does not save pkl file # Path to attrib stats attrib_stats_pkl_path = '_'.join(re.split('_', stats_path)[:-1]) + '_' + test_dataset_name + '.pkl' stats_exists = os.path.exists(attrib_stats_pkl_path) print(attrib_stats_pkl_path) print('Stats already exists', stats_exists) print('IN_PLACE', IN_PLACE) # Skip attribution stats retrieval process if .pkl exists SKIP_RETRIEVAL = stats_exists and not IN_PLACE print('Skipping the stats retrieval:', SKIP_RETRIEVAL) # Specific other parameters fix_length = NoneD:\Projets\Georgia Tech\Comp Social Science\cs6471-project/stats-results/stats_DistillBert_2022-04-18_02-24-40_test_crossentropy_offenseval.pkl Stats already exists True IN_PLACE False Skipping the stats retrieval: TrueInitialization for stats retrievaldict_model_id_training_set = {'2022-04-15_02-48-34': 'offenseval', '2022-04-18_02-48-16': 'implicithate', '2022-04-18_02-24-40': 'covidhate'} # Get model_id model_id = get_model_id_from_path(saved_model_path) print('Model trained on', dict_model_id_training_set[model_id]) if not SKIP_RETRIEVAL: device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') print("Device:", device) field, tokenizer, _, _, _ = get_datasets(test_dataset_name, test_dataset_name, model_type, fix_length, module_path=module_path) print("Loading vocabulary...") vocab_stoi, vocab_itos = get_vocab_stoi_itos(field) print("Vocabulary Loaded") if not SKIP_RETRIEVAL: print("Loading Model...") model = load_model(model_type, field, device) model = load_trained_model(model, saved_model_path, device) print("Model Loaded.") print("Loading Stats Data..") df = pd.read_csv(stats_path) df = df.drop(columns=["Unnamed: 0"]) print(df.shape) df.head()Loading Stats Data.. (458, 6)Initialization of Integrated Gradientsif not SKIP_RETRIEVAL: bert_model_wrapper = BertModelWrapper(model).to(device) ig = IntegratedGradients(bert_model_wrapper)Statistics retrieval of the Attribution scoresimport pickle from scipy import stats as scipy_stats from matplotlib import cm import matplotlib.colors as mcolors from matplotlib.colors import ListedColormap import seaborn as sns from xai_bert_utils import model_explainability_bert_with_stats, process_attributions_stats %%time if not SKIP_RETRIEVAL: max_samples = len(df) stats = model_explainability_bert_with_stats(bert_model_wrapper, tokenizer, ig, df,\ max_samples, device) if not IN_PLACE: with open(attrib_stats_pkl_path, 'wb') as f: pickle.dump(stats, f) else: with open(attrib_stats_pkl_path, 'rb') as f: stats = pickle.load(f) print('length stats', len(stats)) %%time dict_word_attributions = {} dict_attrib_stats = {'original_idx': [], 'label': [], 'pred': [], 'pred_ind': [], 'attributions': [],} for stat in stats: attributions_ig, delta, tokens, input_ids, input_embedding, pred, pred_ind, label, original_idx = stat attributions = process_attributions_stats(attributions_ig) for idx in range(len(tokens)): word = tokens[idx] attribution = attributions[idx] if word not in dict_word_attributions: dict_word_attributions[word] = [attribution] else: dict_word_attributions[word] += [attribution] dict_attrib_stats['original_idx'].append(original_idx) dict_attrib_stats['label'].append(label) dict_attrib_stats['pred'].append(pred) dict_attrib_stats['pred_ind'].append(pred_ind) dict_attrib_stats['attributions'].append(np.mean(attributions))Wall time: 1.93 sWord attributions score with Word Cloudfrom wordcloud import WordCloud, STOPWORDS def plot_wordcloud_attribution(attrib_words, invert=False, width=800, height=800, min_font_size=8, save_fig_path=''): comment_words = '' stopwords = set(STOPWORDS) inverted = -1 if invert else 1 for word, val in attrib_words: new_word = (word + ' ') * int(val*100 * inverted) comment_words += new_word wordcloud = WordCloud(width=width, height=height, background_color='white', stopwords=stopwords, min_font_size=min_font_size, collocations=False).generate(comment_words) # plot the WordCloud image plt.figure(figsize = (8, 8), facecolor = None) plt.imshow(wordcloud) plt.axis("off") plt.tight_layout(pad = 0) if len(save_fig_path) > 0: plt.savefig(save_fig_path) plt.show() dict_word_attributions_mean = {} for word in dict_word_attributions: dict_word_attributions_mean[word] = np.mean(dict_word_attributions[word]) attrib_words = dict_word_attributions_mean.items() # Highest attribution scores high_attrib_words = sorted(attrib_words, reverse=True, key=lambda x: x[1])[:10] print(high_attrib_words) # Plot wordcloud save_fig_path = module_path + FIGURES_PATH + 'high_attrib_wordcloud_{}_{}_{}.png'.format(model_id, dict_model_id_training_set[model_id], test_dataset_name) plot_wordcloud_attribution(attrib_words, save_fig_path=save_fig_path) # Lowest attribution scores low_attrib_words = sorted(attrib_words, reverse=False, key=lambda x: x[1])[:10] print(low_attrib_words) # Plot wordcloud save_fig_path = module_path + FIGURES_PATH + 'low_attrib_wordcloud_{}_{}_{}.png'.format(model_id, dict_model_id_training_set[model_id], test_dataset_name) plot_wordcloud_attribution(attrib_words, invert=True, save_fig_path=save_fig_path)[('accomplishment', -0.7368402759100321), ('[SEP]', -0.6462047877900636), ('##ads', -0.6360545245529382), ('##ticus', -0.5430265275160613), ('awe', -0.5354271857939892), ('nueva', -0.5130254478353787), ('follows', -0.48293290956732454), ('referendum', -0.466608899820457), ('pants', -0.4136565144131343), ('rb', -0.41212050259862776)]Instance attribution score Distributionsdf_attrib_stats = pd.DataFrame.from_dict(dict_attrib_stats) df_attrib_stats non_hate_attrib_stats = df_attrib_stats.loc[df_attrib_stats['label'] == 0]['attributions'] hate_attrib_stats = df_attrib_stats.loc[df_attrib_stats['label'] == 1]['attributions'] save_fig_path = module_path + FIGURES_PATH + 'attrib_pred_plot_{}_{}.png'.format(model_id, test_dataset_name) sns.lmplot(x='pred', y='attributions', data=df_attrib_stats, hue='label', fit_reg=False) plt.title('Attribution scores by prediction score for {}'.format(test_dataset_name)) plt.savefig(save_fig_path) plt.show() r, pvalue = scipy_stats.spearmanr(df_attrib_stats['pred'], df_attrib_stats['attributions']) print('r, pvalue between pred and attributions', round(r, 4), pvalue) sns.lmplot(x='label', y='attributions', data=df_attrib_stats, fit_reg=False) plt.show() r, pvalue = scipy_stats.spearmanr(df_attrib_stats['label'], df_attrib_stats['attributions']) print('r, pvalue between label and attributions', round(r, 4), pvalue) x_min, x_max = -0.2, 0.2 save_fig_path = module_path + FIGURES_PATH + 'attrib_distribution_{}_{}_{}.png'.format(model_id, dict_model_id_training_set[model_id], test_dataset_name) sns.histplot(x='attributions', data=df_attrib_stats, hue='label') # Vertical lines are the mean of the distributions plt.axvline(x=non_hate_attrib_stats.mean(), color='steelblue', ls='--', lw=2.5, label='Mean Non-Hate') plt.axvline(x=hate_attrib_stats.mean(), color='orange', ls='--', lw=2.5, label='Mean Non-Hate') plt.xlim(x_min, x_max) plt.title('Attribution distribution for {} trained on {}'.format(test_dataset_name, dict_model_id_training_set[model_id])) plt.savefig(save_fig_path) plt.show()Significance testingspvalue_threshold = 0.001Normality testsfrom scipy.stats import normaltest print('Non hate attrib stats') value, pvalue = normaltest(non_hate_attrib_stats) print('value:', value, 'pvalue:', pvalue) if pvalue >= pvalue_threshold: print('It is likely that non_hate_attrib_stats is normal') else: print('It is unlikely that non_hate_attrib_stats is normal') print('Hate attrib stats') value, pvalue = normaltest(hate_attrib_stats) print('value:', value, 'pvalue:', pvalue) if pvalue >= pvalue_threshold: print('It is likely that hate_attrib_stats is normal') else: print('It is unlikely that hate_attrib_stats is normal')Hate attrib stats value: 162.06084748270735 pvalue: 6.440715306795656e-36 It is unlikely that hate_attrib_stats is normalNon-parametric tests for distribution similarityfrom scipy.stats import ks_2samp from scipy.stats import mannwhitneyu value, pvalue = ks_2samp(non_hate_attrib_stats, hate_attrib_stats) print('value:', value, 'pvalue:', pvalue) if pvalue > pvalue_threshold: print('Samples are likely drawn from the same distributions (fail to reject H0)') else: print('Samples are likely drawn from different distributions (reject H0)') value, pvalue = mannwhitneyu(non_hate_attrib_stats, hate_attrib_stats) print('value:', value, 'pvalue:', pvalue) if pvalue > pvalue_threshold: print('Samples are likely drawn from the same distributions (fail to reject H0)') else: print('Samples are likely drawn from different distributions (reject H0)')value: 67728.5 pvalue: 0.04118753659824401 Samples are likely drawn from the same distributions (fail to reject H0)Effect size using the non parametric Spearman Correlation Testp-value too high, we leave effect size for future studies and interpretation.We obtain different results for each run of the cell below with our uniform sampling method.# Sample non_hate_attrib_stats to have the same number of samples as hate_attrib_stats r, pvalue = scipy_stats.spearmanr(non_hate_attrib_stats.sample(n=len(hate_attrib_stats)), hate_attrib_stats) print('r, pvalue between pred and attributions', round(r, 4), pvalue)r, pvalue between pred and attributions 0.1486 0.021313876077016063Data Mining and Machine Learning Edgar Acuna Error Estimation (Prediction Estimation) October 2018 Error Estimation using Cross validation. Dataset Diabetes#Ejemplo de estimacion de la prediccion por Validacion Cruzada #usando el clasifidor LDA y la base de datos Diabetes import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.model_selection import ShuffleSplit from sklearn.metrics import recall_score, f1_score from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import LeaveOneOut names= ['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7','V8','class'] diab=pd.read_table("http://academic.uprm.edu/eacuna/diabetes.dat",header=None,names=names) #Obteniendo el vector de clases y la matriz de predictoras y=diab["class"] X=diab.iloc[:,0:8] y1=y.as_matrix() X1=X.as_matrix()Estimating the error by resubstitution#Haciendo el analisis discriminante y calculando el porcentaje de precision ldadis = LinearDiscriminantAnalysis().fit(X1,y1) #Tasa de precision ldadis.score(X1, y1) #Computing the recall pred=ldadis.predict(X1) recall_score(y1,pred) #computing f1 score f1_score(y1,pred) #Estimando la precision por validacion cruzada from sklearn.model_selection import cross_val_score clf = LinearDiscriminantAnalysis() scores = cross_val_score(clf, X1, y1, cv=10) scores #Hallando la precision media y un intervalo de confianza print("CV Accuracy: %0.3f (+/- %0.3f)" % (scores.mean(), scores.std() * 2)) #Tasa de precision usando validacion cruzada usando 10 repeticiones con 10 folds cada una clf = LinearDiscriminantAnalysis() pred1=[] for i in range (0,11): cv = ShuffleSplit() scores = cross_val_score(clf, X1,y1, cv=cv) pred1.append(scores) print "Accuracy by cross validacion=", np.mean(pred1),"+/-",np.std(pred1)Accuracy by cross validacion= 0.775088547816 +/- 0.0495902905372Error Estimation by the holdout method#Estimando el error por el metodo holdout X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.3,random_state=0) X_train, y_train X_test, y_test ldadiab = LinearDiscriminantAnalysis().fit(X_train, y_train) ldadiab.score(X_test, y_test) #Estimando la precision por el metodo holdout con 50 muestras pred=[] for i in range(0,51): X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.33) ldadiab = LinearDiscriminantAnalysis().fit(X_train, y_train) pred.append(ldadiab.score(X_test, y_test)) print("Accuracy by holdout: %0.3f (+/- %0.3f)" % (np.mean(pred), np.std(pred)))Accuracy by holdout: 0.770 (+/- 0.022)Error Estimation for the Vehicle dataset#Ejemplo2. Leyendo los datos de vehiculos que tienen 4 clases y 18 predictoras df1=pd.read_csv("c://PW-PR/vehicle.csv") df1.info() #Convirtiendo en matriz la tabla de predictoras y la columna de clases y=df1['Class'] X=df1.iloc[:,0:18] y1=y.as_matrix() X1=X.as_matrix() #Haciendo el analisis discriminante y calculando el porcentaje de precision ldadis = LinearDiscriminantAnalysis().fit(X1,y1) #Tasa de precision ldadis.score(X1, y1) #Estimando la precision por validacion cruzada from sklearn.model_selection import cross_val_score clf = LinearDiscriminantAnalysis() scores = cross_val_score(clf, X1, y1, cv=10) scores #Hallando la precision media y un intervalo de confianza print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) #Estimando el error por el metodo holdout X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.3, random_state=0) X_train, y_train X_test, y_test ldaveh = LinearDiscriminantAnalysis().fit(X_train, y_train) ldaveh.score(X_test, y_test)Developing an AI applicationGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. The project is broken down into multiple steps:* Load and preprocess the image dataset* Train the image classifier on your dataset* Use the trained classifier to predict image contentWe'll lead you through each part which you'll implement in Python.When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.# Imports here import numpy as np import matplotlib.pyplot as plt import torch import torch.optim as optim from torch import nn from torch.utils.data.sampler import SubsetRandomSampler from torch.utils.data.dataloader import DataLoader import torchvision.transforms as transforms import torchvision.datasets import torchvision.models as models from PIL import ImageLoad the dataHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). You can [download the data here](https://s3.amazonaws.com/content.udacity-data.com/courses/nd188/flower_data.zip). The dataset is split into two parts, training and validation. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. If you use a pre-trained network, you'll also need to make sure the input data is resized to 224x224 pixels as required by the networks.The validation set is used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.The pre-trained networks available from `torchvision` were trained on the ImageNet dataset where each color channel was normalized separately. For both sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.data_dir = './flower_data' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' # TODO: Define your transforms for the training and validation sets train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomRotation(degrees=45), transforms.RandomResizedCrop(size=224), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) valid_transforms = transforms.Compose([transforms.Resize(size=256), transforms.CenterCrop(size=224), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) # TODO: Load the datasets with ImageFolder train_image_dataset = torchvision.datasets.ImageFolder(root=train_dir, transform=train_transforms) valid_image_dataset = torchvision.datasets.ImageFolder(root=valid_dir, transform=valid_transforms) # TODO: Using the image datasets and the trainforms, define the dataloaders train_dataloader = DataLoader(dataset=train_image_dataset, batch_size=32, shuffle=True, num_workers=0) valid_dataloader = DataLoader(dataset=valid_image_dataset, batch_size=32, shuffle=True, num_workers=0) class_to_idx = train_image_dataset.class_to_idx # Obtain one batch of training images dataiter = iter(train_dataloader) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display images.shape img = images[0, :, :, :] plt.imshow(np.transpose(a=img, axes=(1, 2, 0)))Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).Label mappingYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.import json with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) cat_to_nameBuilding and training the classifierNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.We're going to leave this part up to you. If you want to talk through it with someone, chat with your fellow students! You can also ask questions on the forums or join the instructors in office hours.Refer to [the rubric](https://review.udacity.com/!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout* Train the classifier layers using backpropagation using the pre-trained network to get the features* Track the loss and accuracy on the validation set to determine the best hyperparametersWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.# TODO: Build and train your network def build_model(class_to_idx): model = models.resnet18(pretrained=True) # Freezing the features for param in model.parameters(): param.requires_grad_(False) from collections import OrderedDict model.fc = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(512, 512)), ('relu', nn.ReLU()), ('fc2', nn.Linear(512, 102)), ('output', nn.LogSoftmax(dim=1)) ])) # Setting trainable parameters for trainable_params in model.fc.parameters(): trainable_params.requires_grad_(True) model.class_to_idx = class_to_idx print('Model created!') return model model = build_model(class_to_idx=class_to_idx) # Check if CUDA is available train_on_gpu = torch.cuda.is_available() if train_on_gpu: model.cuda() train_on_gpu # Specify loss function (categorical cross-entropy) criterion = nn.NLLLoss() # Specify optimizer learning_rate = 0.0001 optimizer = optim.Adam(params=model.fc.parameters(), lr=learning_rate) from tqdm import tqdm_notebook as tqdm best_weights_path = './models/lab_best_model_state_dict.pth' # Number of epochs to train the model n_epochs = 1000 valid_loss_min = np.Inf # track change in validation loss for epoch in range(1, n_epochs + 1): # Keep track of training and validation loss train_loss = 0.0 valid_loss = 0.0 # train the model model.train() for data, target in tqdm(train_dataloader): # Move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # Clear the gradients of all optimized variables optimizer.zero_grad() # Forward pass: compute predicted outputs by passing inputs to the model output = model(data) # Calculate the batch loss loss = criterion(output, target) # Backward pass: compute gradient of the loss with respect to model parameters loss.backward() # Perform a single optimization step (parameter update) optimizer.step() # Update training loss train_loss += loss.item() * data.size(0) # Validate the model model.eval() for data, target in valid_dataloader: # Move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # Forward pass: compute predicted outputs by passing inputs to the model output = model(data) # Calculate the batch loss loss = criterion(output, target) # Update average validation loss valid_loss += loss.item() * data.size(0) # Calculate average losses train_loss = train_loss / len(train_dataloader.dataset) valid_loss = valid_loss / len(valid_dataloader.dataset) # Print training / validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) # Save model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), best_weights_path) valid_loss_min = valid_lossSave the checkpointNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.```model.class_to_idx = image_datasets['train'].class_to_idx```Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.# TODO: Save the checkpoint # Load the best weights full_model_path = './models/lab_best_model_full_model.pth' state_dict = torch.load(best_weights_path) full_model_dict = {'model_name': 'WojciechNetBasedOnResNet', 'lr': learning_rate, 'epochs': n_epochs, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), 'class_to_idx' : model.class_to_idx } torch.save(full_model_dict, full_model_path)Loading the checkpointAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.# TODO: Write a function that loads a checkpoint and rebuilds the model full_model_dict_loaded = torch.load(full_model_path) lr_loaded = full_model_dict_loaded['lr'] class_to_idx_loaded = full_model_dict_loaded['class_to_idx'] model = build_model(class_to_idx=class_to_idx_loaded) model.load_state_dict(full_model_dict_loaded['state_dict']) optimizer = optim.Adam(params=model.parameters(), lr=lr_loaded)Inference for classificationNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```First you'll need to handle processing the input image such that it can be used in your network. Image PreprocessingYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' size = 224 # TODO: Process a PIL image for use in a PyTorch model width, height = image.size if height > width: height = int(max(height * size / width, 1)) width = int(size) else: width = int(max(width * size / height, 1)) height = int(size) resized_image = image.resize((width, height)) # Calculating points for cropping the image, left bottom and top right x_left = (width - size) / 2 y_bottom = (height - size) / 2 x_right = x_left + size y_top = y_bottom + size # Cropping the image cropped_image = image.crop((x_left, y_bottom, x_right, y_top)) np_image = np.array(cropped_image) / 255. mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) np_image_array = (np_image - mean) / std np_image_array = np_image.transpose((2, 0, 1)) return np_image_array process_image(image = Image.open('./flower_data/valid/10/image_07094.jpg'))To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).def imshow(image, ax=None, title=None): """Imshow for Tensor.""" if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.numpy().transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return axClass PredictionOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.htmltorch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```def predict(image_path, model, top_num=5): # Process image image = process_image(image=Image.open(image_path)) # Numpy -> Tensor image_tensor = torch.from_numpy(image).type(torch.FloatTensor) # Adding additional dimension to the it model_input = image_tensor.unsqueeze(0) # Calculating the probabilities probabilities = torch.exp(model.forward(model_input)) # Top probabilities top_probabilities, top_labels = probabilities.topk(top_num) top_probabilities = top_probabilities.detach().numpy().tolist()[0] top_labels = top_labels.detach().numpy().tolist()[0] # Convert indices to classes idx_to_class = {val: key for key, val in model.class_to_idx.items()} top_k_labels = [idx_to_class[lab] for lab in top_labels] top_flowers = [cat_to_name[idx_to_class[lab]] for lab in top_labels] return top_probabilities, top_k_labels, top_flowers def predict(image_path, model, topk=5, use_gpu=True): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' model.eval() train_on_gpu = torch.cuda.is_available() model = model.cuda() if train_on_gpu else model.cpu() image = Image.open(image_path) np_array = process_image(image) tensor = torch.from_numpy(np_array) tensor_on_device = tensor.float().cuda() if train_on_gpu else tensor with torch.no_grad(): var_inputs = torch.autograd.Variable(tensor_on_device) var_inputs = var_inputs.unsqueeze(0) output = model.forward(var_inputs) predictions = torch.exp(output).data.topk(topk) probabilities = predictions[0].cpu() if use_gpu else predictions[0] classes = predictions[1].cpu() if use_gpu else predictions[1] class_to_idx_inverted = {model.class_to_idx[k]: k for k in model.class_to_idx} mapped_classes = [] for label in classes.numpy()[0]: mapped_classes.append(class_to_idx_inverted[label]) return probabilities.numpy()[0], mapped_classes image_path = './flower_data/valid/10/image_07094.jpg' probabilities, classes = predict(image_path, model) print(probabilities) print(classes)[0.5137469 0.14028846 0.0900024 0.0895931 0.03145264] ['31', '22', '70', '10', '14']Sanity CheckingNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the validation accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.# TODO: Display an image along with the top 5 classes image_path = './flower_data/valid/10/image_07094.jpg' probabilities, classes = predict(image_path, model) max_index = np.argmax(probabilities) max_probability = probabilities[max_index] label = classes[max_index] fig = plt.figure(figsize=(10,10)) ax1 = plt.subplot2grid((15,9), (0,0), colspan=9, rowspan=9) ax2 = plt.subplot2grid((15,9), (9,2), colspan=5, rowspan=5) image = Image.open(image_path) ax1.axis('off') ax1.set_title(cat_to_name[label]) ax1.imshow(image) labels = [] for i_class in classes: labels.append(cat_to_name[i_class]) y_pos = np.arange(5) ax2.set_yticks(y_pos) ax2.set_yticklabels(labels) ax2.invert_yaxis() ax2.set_xlabel('Probability') ax2.barh(y_pos, probabilities, xerr=0, align='center') plt.show()Attribution multiple images at onceThis notebook computes masks for the 0.99 quantile and computes the attribution of many images%load_ext autoreload %autoreload 2 # imports import glob import logging import os import sys sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('../..')) import cv2 import numpy as np import tensorflow as tf from PIL import Image from matplotlib import pyplot as plt from tqdm.notebook import tqdm from deepexplain.tf.v1_x import DeepExplain from plot_utils import plot, plt from tf_pose import common from tf_pose.common import CocoPart from tf_pose.estimator import BodyPart, TfPoseEstimator from tf_pose.networks import get_graph_path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" logging.getLogger("tensorflow").setLevel(logging.CRITICAL) logging.getLogger('TfPoseEstimatorRun').setLevel(logging.ERROR) logging.getLogger('DeepExplain').setLevel(logging.ERROR) logging.getLogger('TfPoseEstimator').setLevel(logging.ERROR) # params 432, 368 w, h = 432, 368 model = 'cmu' resize_out_ratio = 2.0 e = TfPoseEstimator(get_graph_path(model), target_size=(w, h), trt_bool=False) image_path = '../data/images/highFrequencyData/*.jpg' test_data = [f for f in glob.glob(image_path)] NORMALIZATION_FLAG = True def return_uncertain_part(humans): part = None cur_min = 1.0 if len(humans) == 0: return BodyPart(0, CocoPart.RShoulder.value, 0, 0, 0.0) for body_part in humans[0].body_parts.keys(): if humans[0].body_parts[body_part].score < cur_min: part = humans[0].body_parts[body_part] cur_min = humans[0].body_parts[body_part].score return part results = [] # compute attribution only for 99% quantile = 0.995 for image_path in tqdm(test_data): current_stats = {} # read file current_stats["image"] = common.read_imgfile(image_path, w, h) # compute humans and draw them onto the image humans = e.inference(current_stats["image"], resize_to_default=( w > 0 and h > 0), upsample_size=resize_out_ratio) current_stats["image_result"] = TfPoseEstimator.draw_humans( current_stats["image"], humans, imgcopy=True) # compute the worst attribution part current_stats["part"] = return_uncertain_part(humans) # get the resulting confidence map current_stats["heatmap"] = e.heatMat[:, :, current_stats["part"].part_idx] # normalize mask if NORMALIZATION_FLAG: total_confidence_value = np.sum(current_stats["heatmap"]) current_stats["heatmap"] /= total_confidence_value # compute a mask quant = np.quantile(current_stats["heatmap"], quantile) current_stats["mask"] = current_stats["heatmap"] > quant current_stats["mask"] = Image.fromarray( np.uint8(current_stats["mask"]*255)) current_stats["mask"] = np.array( current_stats["mask"].resize((54, 46), Image.ANTIALIAS)) # use peaks as the mask # peak = e.peaks[:, :, current_stats["part"].part_idx] # mask = e.peaks[:, :, current_stats["part"].part_idx] >= np.max(e.peaks[:, :, current_stats["part"].part_idx]) # peak[mask] = 1.0 # peak = Image.fromarray(peak) # current_stats["mask"] = np.array(peak.resize((54, 46))) # get the current session sess = e.persistent_sess # Since we will explain it, the model has to be wrapped in a DeepExplain context with DeepExplain(session=sess, graph=e.graph) as de: input_tensor = e.tensor_image output_tensor = e.tensor_heatMat[:, :, :, current_stats["part"].part_idx] xs = tf.expand_dims(current_stats["image"], 0).eval(session=sess) xs = xs.astype('float64') Y_shape = [None] + [1, 46, 54] # size of heatmaps ys = np.expand_dims(np.expand_dims( current_stats["mask"], axis=0), axis=0) baseline = np.zeros(list(xs.shape)[1:]) # baseline to compare against current_stats['Saliency maps'] = de.explain( 'saliency', T=output_tensor, X=input_tensor, xs=xs, ys=ys, Y_shape=Y_shape) current_stats['Gradient * Input'] = de.explain( 'grad*input', T=output_tensor, X=input_tensor, xs=xs, ys=ys, Y_shape=Y_shape) results.append(current_stats) # Plot attributions %matplotlib inline n_cols = int(len(results[0])) n_rows = int(len(results)) fig_scale = 3 fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=( 3*n_cols*fig_scale, 3*n_rows*fig_scale)) for i, result in enumerate(results): ax = axes.flatten()[i*n_cols] ax.imshow(result['image']) ax.set_title('Original') ax.axis('off') for j, (key, value) in enumerate(result.items()): axj = axes.flatten()[i*n_cols + j] if key == 'image' or key == 'image_result': axj.imshow(cv2.cvtColor(value, cv2.COLOR_BGR2RGB)) axj.set_title(f'{key} {result["part"].get_part_name()}', fontdict={ 'fontsize': 20}) axj.axis('off') elif key == 'mask': axj.imshow(cv2.cvtColor(value, cv2.COLOR_BGR2RGB)) axj.set_title(key, fontdict={'fontsize': 20}) elif key == 'heatmap': heat_image = axj.imshow(value, cmap=plt.cm.hot, alpha=1.0) axj.set_title(key, fontdict={'fontsize': 20}) fig.colorbar(heat_image, ax=axj, shrink=0.63) elif key == 'humans' or key == 'part': continue else: xi = (result['image'] - np.min(result['image'])).astype('float64') xi /= np.max(xi) plot(value[0], xi=xi, axis=axj, dilation=.5, percentile=99, alpha=.2).set_title(key, fontdict={'fontsize': 20})In this code, I built my own Black Jack games. It was one of my first hobby projects with Python when I started learning in 2017. I believe the code is still functional and if you'd like you can play a game of blackjack.import numpy as np import random suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11} playing = True class Card: def __init__(self,suit,rank): self.suit=suit self.rank=rank def __str__(self): return str(self.rank)+ ' of ' +str(self.suit) class Deck: def __init__(self): self.deck=[] for suit in suits: for rank in ranks: self.deck.append(Card(suit,rank)) def __str__(self): deck_comp='' for card in self.deck: deck_comp+='\n'+card.__str__() return deck_comp def shuffle(self): return random.shuffle(self.deck) def deal(self): return self.deck.pop() class Hand: def __init__(self): self.aces=0 self.values=0 self.card=[] def addcard(self,Card): self.card.append(Card) self.values+=values[Card.rank] if Card.rank=='Ace': self.aces+=1 def adjustace(self): if self.values>21 and self.aces: self.values-=10 self.aces-=1 class Chips: def __init__(self): self.total=100 self.bet=0 def win_bet(self): self.total+=self.bet def lose_bet(self): self.total-=self.bet def hit(hand,deck): hand.addcard(deck.deal()) hand.adjustace() def hit_stand(hand,deck): global playing while True: x=input("Do you want to hit or stay? H or S: ") if x[0].lower()=='h': hit(hand,deck) elif x[0].lower()=='s': print("Player Stays, Dealers Turn") playing =False else: print("Try again, incorrect input") continue break def take_bet(chips): while True: try: chips.bet=int(input("Enter amount you want to bet: ")) except ValueError: print("Enter integer values") else: if chips.bet>chips.total: print("Enter value less than " +str(chips.total)) else: break def show_some(player,dealer): print("Dealers Hand \n") print("") print('',dealer.card[1]) print("\nPlayer's Hand:", *player.card, sep='\n ') def show_all(player,dealer): print("\nDealer's Hand:", *dealer.card, sep='\n ') print("Dealer's Hand =",dealer.values) print("\nPlayer's Hand:", *player.card, sep='\n ') print("Player's Hand =",player.values) def player_busts(chips): print("Player busts!") chips.lose_bet() def player_wins(chips): print("Player wins!") chips.win_bet() def dealer_busts(chips): print("Dealer busts!") chips.win_bet() def dealer_wins(chips): print("Dealer wins!") chips.lose_bet() def push(): print("Dealer and Player tie! It's a push.") player_chips=Chips() while True: deck=Deck() deck.shuffle() player_hand=Hand() player_hand.addcard(deck.deal()) player_hand.addcard(deck.deal()) dealer_hand=Hand() dealer_hand.addcard(deck.deal()) dealer_hand.addcard(deck.deal()) take_bet(player_chips) show_some(player_hand,dealer_hand) while playing: hit_stand(player_hand,deck) show_some(player_hand,dealer_hand) if player_hand.values>21: player_busts(player_chips) break if player_hand.values<=21: while dealer_hand.values<17: hit(dealer_hand,deck) show_all(player_hand,dealer_hand) if dealer_hand.values>21: dealer_busts(player_chips) elif player_hand.values>dealer_hand.values: player_wins(player_chips) elif player_hand.valuesEnter amount you want to bet: 10 Dealers Hand King of Hearts Player's Hand: Nine of Clubs Eight of Clubs Do you want to hit or stay? H or S: h Dealers Hand King of Hearts Player's Hand: Nine of Clubs Eight of Clubs Six of Hearts Player busts! players winnings stand at 90 Do you want to play again ? Y or Ny Enter amount you want to bet: 10 Dealers Hand Eight of Hearts Player's Hand: Four of Spades Two of Hearts Do you want to hit or stay? H or S: h Dealers Hand Eight of Hearts Player's Hand: Four of Spades Two of Hearts Five of Clubs Do you want to hit or stay? H or S: h Dealers Hand Eight of Hearts Player's Hand: Four of Spades Two of Hearts Five of Clubs Four of Clubs Do you want to hit or stay? H or S: h Dealers Hand Eight of Hearts Player's Hand: Four of Spades Two of Hearts Five of Clubs Four of Clubs Ace of Diamonds Do you want to hit or stay? H or S: s P[...]Choosing the right data file format for numerical computingThis notebook will go over the pros and cons of various data file formats common in numerical python workflows. It'll cover various concerns when storing data on-disk and how popular formats address these challenges; the what and why of these formats. Who am I?* pronounced like Haze* Software Developer (Sr. Instrumentation Technologist) Space Science and Engineering Center (SSEC) University of Wisconsin - Madison* Satpy and Vispy developer* @djhoese on Twitter and GitHubThis notebook:https://github.com/djhoese/data-file-formats Introduction Why write data to disk?1. State or Cache * a long running application needs to start where it left off * reuse calculation in future executions * user settings/preferences are saved for later use2. Data Archival/Distribution/Sharing * results are shared with other people * results are shared with other software What kind of files are we working with?* Plain text or binary file formats* Primarily numeric data* Optionally add custom metadata What are we **NOT** talking about?* Databases* Python's pickle format* Storing user application settings* **Custom** binary formats (no community/organization support) Why does file format matter?There are many different formats that we can use in Python and we havedifferent ways to access them. Each one comes with various advantages anddisadvantages. When it comes to choosing a file format for a task we shouldbe concerned with a few key things:* **Write speeds** How long does it take to get data from it's in-memory format to disk?* **Read speeds** How long does it take to get data from disk to a usable in-memory format?* **On-disk size** How big are the files?* **In-memory size** How much memory does it take to open the file and get data out? Are there multiple copies of the data in memory?* **Flexibility/Usefulness** Can I do anything else with the format? Store metadata? Can I use it for this other use case? Can I lazily load data? Can I access data remotely? Can I compress the data? How much data can I store in one file?* **Format Availability** Is the format only readable by Python software? Do my users need to learn something new to read it? If the format isn't "built in" to a programming environment, can I easily install the necessary libraries? Warmup - Plain TextLet's start with an example that uses plain text to write the last time thatthe function was called. We first need to import a few modules and thencreate our function to append to our data file called `last_time.txt`.import os import sys from datetime import datetime, timedelta DATA_FILE = 'last_time.txt' def write_time(): with open(DATA_FILE, 'a') as time_file: now = datetime.utcnow() data_line = "{:f}\n".format(now.timestamp()) time_file.write(data_line)In our function we get the current time in[UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). We convertit to epoch time (a.k.a. POSIX time), seconds since `1970-01-01 00:00:00`, andwrite the string representation to the file as a single line.Let's add a couple times to the file by running the `write_time` a coupletimes below.write_time()And we can use the command `head` to print out the first couple lines of the file's contents:!head $DATA_FILEWe've written data to the file, now let's read it back out. The below functionwill read the content's of the file in to a list.def read_times(): exec_times = [] with open(DATA_FILE, 'r') as time_file: for line in time_file: time_val = float(line.strip()) exec_times.append(time_val) return exec_times read_times()[:10]The code above gives us a simple example of saving data from software to afile on disk. We wrote a single value at a time and accumulate moreinformation as time went on. We were able to read these data backin to python at a later time. Could we have done anything differently? Plain Text - Pros/ConsLet's take the above concerns and look at our text file from before and thecode we used to access it.Pros * Human readable* Simple code (no external libraries)* Easily usable by other languages/tools* Could read one value at a time (but we didn't) Cons* Have to convert data to/from string and float (slow)* Representing each 8 byte float (64-bit) as ~17 ASCII bytes* Unknown precision of data values, how many decimal points?* Don't know how many elements until it is fully read* Can't easily seek to a specific index/element* Code: Read as a list instead of a numpy array and used a python for loop (potentially slow) And here's what a single value of our text file looks like on disk (8-bit ASCII character): Byte Offset Value 01 15 26 38 46 56 67 78 83 92 10. 118 122 137 144 156 168 17\n We can perform a quick timing to see how long it takes to read the file:txt_time_write = %timeit -o -n 10000 -r 1 write_time() txt_time_read = %timeit -o -n 100 -r 1 read_times()The NumPy library also provides a function for loading data from text files.Let's try it and see how it compares.import numpy as np txt_time_read_np = %timeit -o -n 100 -r 1 np.loadtxt(DATA_FILE)It seems that in this specific case and with all of the extra checks numpyperforms, it actually takes longer to read the data with numpy. When it comesto simple human-readable formats, we couldn't have gone much simpler.The remainder of this document will go through different use cases andthe file formats that we have as options. We'll apply this type ofperformance analysis to our format choices. Flat BinaryWhen human-readability isn't necessary, another option for storing simpledata structures is a flat binary file. A flat binary file consists of theraw data values stored contiguously as a flat array. Let's rewrite our codefrom above to write to a flat binary file using the numpy library.import numpy as np BIN_DATA_FILE = 'last_time.dat' def write_time_binary(): with open(BIN_DATA_FILE, 'a') as time_file: now = datetime.utcnow() np.array([now.timestamp()], dtype=np.float64).tofile(time_file) def read_times_binary(): return np.fromfile(BIN_DATA_FILE, dtype=np.float64) bin_time_write = %timeit -o -n 100000 -r 1 write_time_binary() bin_time_read = %timeit -o -n 100 -r 7 read_times_binary()Perform Tip: Memory MapsBy default, when reading a file from disk we have to transfer data fromthe hard disk to system memory. That means that when creating somethinglike a numpy array from a binary data file we are transferring **all** of thefile's contents from disk in to memory when we might not use it right away; a very slow operation. There is alazier, generally more efficient, method called memory mapping (a.k.a. mmap inC, memmap by numpy). By creating a memory map, we allocate the virtual memoryspace for our data, but the operating system won't load the data from diskuntil we need it. Memory mapping avoids extra copies of file data in memory, works very well with random accesses to data in a file, can be cached more efficiently, shared between processes more effectively, and is generally your best option for reading large files that are difficult to hold in memory at a single time.Further reading:* [Stackoverflow answer discussing memory maps](https://stackoverflow.com/a/6383253/433202)* [Memory-mapped File - Wikipedia](https://en.wikipedia.org/wiki/Memory-mapped_file)Going back to the above binary file usage...def read_times_binary_memmap(): return np.memmap(BIN_DATA_FILE, mode='r', dtype=np.float64) bin_time_read_mmap = %timeit -o -n 100 -r 7 read_times_binary_memmap() bin_time_read.average / bin_time_read_mmap.averageKeep in mind that memory mapping isn't **loading** the data in to memory so this isn't technically a fair comparison. However, as we use the memory mapped array object we should see better performance than a traditional read of the file.Note that we could also use memory maps to write data. This is most beneficial if we are writing to random locations in the file. Flat Binary - Pros/Cons Pros * Simple code* Readable by any programming language (*see Cons)* Minimum on-disk size without compression* Fast reading and memory mappable* Supports N-dimensional data* Subsettable Cons * Not human readable* No shape, data type, or byte order information stored* Platform dependent (not shareable)Note from numpy docs: > Do not rely on the combination of tofile and fromfile for data storage, as the binary files generated are are not platform independent. In particular, no byte-order or data-type information is saved. Data can be stored in the platform independent .npy format using save and load instead. Storage layout (64-bit float): Byte Offset Value 01568667832.827468 81568667832.827796 161568667832.827835 Bonus - Numpy's .npy formatAs mention in the cons above, a flat binary file *only* has the data and noother information. This means we have to keep track of this informationourselves. To help with this numpy provides a `.npy` format which stores thisinformation inside the file alongside the binary data. Here's a quick exampleto create a `.npy` file using[`np.save`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.save.htmlnumpy.save).np.save('last_time.npy', np.array([1., 2., 3.]))When we are ready to read the data back we can use[`np.load`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.load.htmlnumpy.load).This method gives us the option to open the data as a memory map, giving usthe space and speed advantages of a flat binary while avoiding the formatmetadata issues. Keep in mind that this format is only readable by numpy andwould require an additional library in any other language.np.load('last_time.npy', mmap_mode='r')Comma-Separated Values (CSV)So far we've dealt with a single stream (a.k.a. field or variable) of data,but what if we need to store more? When it comes to multiple 1-dimensionalvariables one of the more common solutions is a Comma-Separate Values (CSV)file. We could use numpy again, but instead we'll use the `pandas` libraryfor its more powerful handling of tabular data like we would store in a CSV.We'll start by loading some example data used by the seaborn python package.Their example data is stored as a[CSV file on GitHub](https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv).We use the pandas[`read_csv`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)function. This function has a lot of options, but we'll use the defaults for now.import pandas as pd import numpy as np seaborn_iris_url = 'https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv' data = pd.read_csv(seaborn_iris_url) print(data.shape) data.head()If we were making our CSV file from a pandas dataframe we can use `to_csv`:my_df = pd.DataFrame(np.random.random((150, 5)), columns=['A', 'B', 'C', 'D', 'E']) my_df.head() my_df.to_csv('randoms.csv', index=False)Pandas also provides options for memory mapping the text file to reduce I/Ooverhead.my_df = pd.read_csv('randoms.csv', memory_map=True) my_df.head()Performance Tip: Chunking and IteratingSo far we've been loading all data at once or depending on memory mapping toreduce the amount of data that was loaded at any one time. Another possibleimprovement can come from loading chunks of data at a time. This is similarto what we did with the original plain text file iterating over the lines ofthe file one at a time. By chunking and iterating over the data we can process files that would notfit in memory otherwise. For more info on chunking, see the[pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.htmliterating-through-files-chunk-by-chunk).reader = pd.read_csv('randoms.csv', iterator=True, chunksize=4) for idx, chunk_df in enumerate(reader): print(chunk_df) if idx >= 2: breakThis reduces the amount of memory being used at any one time while we work with a single chunk.We can change how big of a chunk we get by calling `get_chunk` with the number ofrows to put in the DataFrame returned. Note how we are continuing our reading from the above cells since the reader is an iterator.reader.get_chunk(6)CSV - Pros/Cons Pros* Human readable* Can be read in Microsoft Excel (non-programmer collaboration)* Lazy/iterable loading possible* Row-based operations are relatively fast Cons* Slow to read/write* Wasted disk space* Require reading all columns to get value for a single column* Column-based operations are slow (see below)An unfortunate storage layout for a 3 column CSV (8-bit ASCII characters): Byte Offset Value Column 00.4433,1 70.623,2 13setosa\n3 200.8866,1 270.31,2 32virginica\n3 420.6644,1 The above table shows how a CSV file may be stored on disk. If we don't force all floating point numbers to have the same number of digits or string fields are not all the same size then we can't be sure how to quickly get all values for a single column (by calculating offsets) without reading every value for every column. This makes doing column-based calculations, like the average of an entire field, very slow and wasteful. If we are doing a calculation that requires all values in a single row, then this structure is fairly convenient; we can parse one row at a time efficiently. Further Reading* [Dask DataFrames](https://docs.dask.org/en/latest/dataframe.html) for distributed and lazy pandas dataframes Review 1* Store as text: Human-readable but slow* Store as binary: Easily indexable and fast to read/write* Memory maps: Better I/O in most cases* Chunking: Good when working on one chunk at a time* Flat binary: Simple, quick solution, multiple dimensions, no structure ParquetParquet is another tabular data format and can be thought of as the highperformance binary version of the CSV file.Analytics workflows typically don't need to read every column from a tabularformat and storing data in something like a CSV file can be very wasteful.The first difference in what Parquet brings to the table (pun intended) is storing data bycolumn (image right) instead of by row (image left).Credit: https://www.kdnuggets.com/2017/02/apache-arrow-parquet-columnar-data.htmlIf we keep the columns together it is easier to access one column more easilyand more quickly than data stored row by row. Performance Tip: Spatial and Temporal LocalityModern CPUs have multiple levels of caching. The closer a cache level is to the CPU (the computation) the smaller it is and less it can store at any one time. These closer cache levels are also much faster to get data from. Conversely, caches that are further from the CPU are larger and slower to access. The diagram shows the various caching levels common in modern computers where L1 caches are the smallest and fastest, then L2, L3, main RAM memory, and finally the hard disk; the largest and slowest storage on the local machine. Credit: https://software.rajivprab.com/2018/04/29/myths-programmers-believe-about-cpu-caches/If we want the best performance out of the file format we are using then we want to do as many operations as possible with what is in the L1 cache before replacing it with new data. Otherwise, we could suffer from reloading the same data from slower caches. **Temporal locality** is the idea that if we access a particular memory location, we are very likely to access that same location again in the near future. **Spatial locality** is the idea that if we access a particular memory location, an operation in the near future is probably going to involve the data right next to it. Modern computers will assume this to be true and will predictively cache things to get the best performance. That means our best option is to use the memory the way the computer thinks we are going to use it.Further Reading: [Wikipedia](https://en.wikipedia.org/wiki/Locality_of_reference) Performance Tip: Single Instruction, Multiple Data (SIMD)In addition to the multiple cache levels, modern CPUs can also operate on multiple pieces of data at the same time with a single CPU instruction. These vectorized instructions are referred to as SIMD. The CPU is told to do one operation on many values (ex. add 5 to every value) and can perform it on multiple values at a time, in parallel. Even though SIMD instructions are low-level, NumPy does a lot of the work for us by telling the CPU to use SIMD instructions when possible. However, this usually depends on taking advantage of locality (see above) so that the CPU has all the values it is going to operate on.Further Reading: [Wikipedia](https://en.wikipedia.org/wiki/SIMD)Parquet's design for how data is stored on disk and operated on in-memory tries to take advantage of these concepts. Let's run through a basic parquet example to see how we can read and write a parquet file and how we can control some aspects of these complex topics. We'll start by generating some random data to play with. We'll re-assign the `'B'` column so not all of our data is random (to show the results of compression better).import numpy as np import pandas as pd data = pd.DataFrame(np.random.random((100_000, 5)), columns=['A', 'B', 'C', 'D', 'E']) data['B'] = np.repeat(np.arange(100), 1000) print(data.shape) data.head()Let's write this data to a local parquet file using the `fastparquet` library (there are others):import fastparquet as fp fp.write('randoms_default.parq', data) !ls -lh randoms_default.parq parq_file = fp.ParquetFile('randoms_default.parq') parq_file.to_pandas().head()By default fastparquet makes a single file with no compression and only stores entire columns together (no row-groups). Although these defaults may not provide the best performance for a particular use case, the format itself provides us some nice conviences. For example, since the data is stored by column we can quickly and efficiently load a limited set of the columns:data2 = parq_file.to_pandas(['C', 'D']) data2.head()Let's take advantage of some of the other features of parquet by writing a new file from our original iris data:fp.write('randoms_gzip.parq', data, compression='GZIP') !ls -lh randoms*.parqWe can see that using compression can save us a lot of space in our simple data file.So far we've been storing entire columns contiguously, one large block of data. It is also possible to define "row groups" to better control how our data is organized on disk. Row groups are grouping a certain number of rows together, but still by column. Let's look at what a parquet file with row groups might look like:Credit: https://www.dremio.com/tuning-parquet/By defining row groups (the orange groups in the above image), we allow ourselves better performance when working on row-based operations while still getting the advantages of storing data by column. We can define our row group sizes using `row_group_offsets` keyword argument in `fastparquet`:fp.write('randoms_row_group.parq', data, compression='GZIP', row_group_offsets=1000) !ls -lh randoms*.parqPandas has its own methods for reading parquet files (using the PyArrow library underneath):import pandas as pd %timeit pd.read_parquet('randoms_row_group.parq').head()And Dask can also load parquet files (using fastparquet underneath). By using Dask we can load the data lazily and in parallel with multiple worker threads or processes:import dask.dataframe as dd %timeit dd.read_parquet('randoms_row_group.parq').head()Performance Tip: Block sizesEach storage device (cache, RAM, disk) will typically have a default size for the single "chunks" or "units" of data that they will operate on or store in one contiguous location or provide to another storage element. This is typically referred to as the "block size" and can have different names depending on what is being talked about and by whom. To get the best performance we want to try to operate on and store our data in a way that is compatible with the block size of our storage device. If we want to work with 5KB chunks, but our storage device operates on 4KB blocks, we will need to load two 4KB blocks for every 5KB chunk we want to work with. By aligning your workflow with the block size of your storage you can avoid unnecessary I/O operations and delays. However, this isn't always something you have control over or knowlege of.For example, with parquet we want to store our row groups with sizes that take advantage of the block size of the storage while getting the most out of the parquet format. See [Tuning Parquet](https://www.dremio.com/tuning-parquet/) for more details.Credit: https://www.dremio.com/tuning-parquet/The last important parquet feature we should talk about is the ability to store a single parquet dataset as a directory of files instead of one single file. This structure can allow better performance when storing data on cloud file systems or other high performance storage solutions (ex. Apache Hive). We can control this in `fastparquet` by using the `file_scheme` keyword argument:fp.write('randoms_row_group_hive.parq', data, compression='GZIP', row_group_offsets=1000, file_scheme='hive') !ls -lh randoms_row_group_hive.parq/ | headParquet - Pros/Cons Pros* Column-based selection very fast* Multiple compression algorithms available* Row groups allow for more control over storage* Cloud storage friendly* Can be stored as single file or directory of files Cons* Complex (lots of options and choices)* Limited to tabular data (mostly)Further Reading:* [Tuning Parquet](https://www.dremio.com/tuning-parquet/)* [fastparquet Documentation](https://fastparquet.readthedocs.io/en/latest/index.html)* [fastparquet Usage Notes](https://fastparquet.readthedocs.io/en/latest/details.html)* [Apache Arrow](https://arrow.apache.org/)* [Pandas - Scaling to large datasets](https://dev.pandas.io/docs/user_guide/scale.html) HDF5So far we've been dealing with tabular data where our entire dataset can be represented by a 2D array (rows, columns). Sometimes your data may require a more complex hierarchical structure. One good solution for these more complex structures is the Hierarchical Data Format (HDF); specifically the HDF5 file format. HDF5 is a self-describing format which means that in addition to the raw data you can store all related metadata (when it was recorded, where it was recorded, etc).Pandas and other python libraries do provide utility functions for writing and reading HDF5 files (.h5 extension), but we will be using the `h5py` library here to step through the structure of the files. The HDF5 data model is made up of a few key components or ideas:* File: The single object stored on disk* Datasets: Multidimensional data array with attributes and other metadata* Groups: A collection of objects (datasets or groups)* Attributes: Key-value store of metadata on groups or datasetsCredit: https://www.neonscience.org/about-hdf5import h5py h = h5py.File('test.h5', 'w') hHDF5 files have an implicit "root" group where groups or datasets can be added or attributes can be set.h['/'] h.attrs dict(h.attrs) from datetime import datetime h.attrs['start_time'] = datetime.utcnow().isoformat(" ") h.attrs['source'] = "Dave's Notebook" h.attrs['revision'] = 1 h.attrs['inputs'] = ['a.txt', 'b.txt', 'c.text'] dict(h.attrs)We can store most builtin Python data types in our attributes to store various pieces of metadata about our file. These are typically referred to as "global" attributes.We can also add data to our file as Datasets. A Dataset can be created with the data specified:ds1 = h.create_dataset('dataset1', data=np.random.random((10, 100, 100))) ds1 ds1.attrs['valid_range'] = [0, 1] ds1.attrs['missing_value'] = np.nan dict(ds1.attrs)A dataset can also be created and have data filled in later. There are a lot of options available when creating a dataset:h.create_dataset? ds2 = h.create_dataset('dataset2', shape=(5, 100), dtype=np.uint8, compression='gzip', chunks=(2, 100), fillvalue=255) ds2[2:4] = np.arange(100) ds2[0] = np.arange(100, 200) ds2[-1] = np.arange(150, 250)Whether we are reading or writing we can use slicing syntax to access parts of the data array (only loading/writing what's needed) or the entire thing.If we look at the data we wrote to the `"dataset2"` dataset, notice how the second row of data uses the `fillvalue` we set above.ds2[:]We can create groups to describe more complex structures of our data (groups with groups with datasets, etc):g1 = h.create_group('geolocation') g1.attrs['my_key'] = 'my_val' # sub-group of g1 g12 = g1.create_group('geocentric') # another group off of the root g2 = h.create_group('observed_data') # dataset as part of a group obs_a = g2.create_dataset('observation_a', data=np.arange(10.)) h.close()The HDF5 C library comes with the `h5dump` command line tool for investigating an HDF5 file in different ways. Here we use the `-n` flag to list the contents of the file without their attributes.!h5dump -n test.h5HDF5 - Pros/Cons Pros* Complex structures* Per-dataset compression* Multidimensional arrays* Metadata Cons* Complex (lots of options and choices)* Limited parallel/thread-safe operations* Not cloud storage friendlyHDF5 supports a lot of complex features including references HDF5 objects from external files or using references to point to other locations in the same file. These features are left as an exercise for the reader to explore.Further Reading:* [HDF5 Data Model](https://support.hdfgroup.org/HDF5/doc1.6/UG/03_Model.html)* [h5py Documentation](http://docs.h5py.org/en/stable/index.html)* [HDF5 Reference Objects](http://docs.h5py.org/en/stable/refs.html) NetCDF4NetCDF4 is another format that supports storing multidimensional arrays like HDF5, but provides a simpler/flatter structure to ease use of the files. Under the hood NetCDF4 is actually built on top of the HDF5 format. As such, almost all of the features provided by HDF5 are also available in NetCDF4 (chunking, per-dataset compression, etc). NetCDF4 is a very popular and common format for storing scientific data. When mixed with metadata standards like those defined by the [Climate and Forecast (CF) group](http://cfconventions.org/) files become much easier to distribute and use between different applications.The NetCDF4 C library not only allows you to read and write NetCDF4 files, but can act as a generic reading library for formats that fulfill the NetCDF4 Data Model. This means that the NetCDF4 C library can read NetCDF4, NetCDF3, and HDF4 files. In future versions it will also be able to read the Zarr format (see next section). The NetCDF4 data model is very similar to HDF5, but with a few key differences. The first major difference is that HDF5 Datasets are called "Variables". The second major addition is the more formal definition of labeled Dimensions.Credit: https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_data_model.htmlWe'll start exploring the NetCDF4 data model by creating a simple NetCDF4 file using the `netcdf4-python` library.import numpy as np import netCDF4 nc = netCDF4.Dataset('test.nc', 'w') ncNow we'll add some data, but before we do that we need to define the dimensions for our variable.y_dim = nc.createDimension('y') x_dim = nc.createDimension('x', 100) var1 = nc.createVariable('var1', np.uint16, dimensions=('y', 'x')) var1[:] = np.random.randint(1, 65535, size=(200, 100))We can also define variables with the same name as a dimension. These are known as "coordinate" variables.x_var = nc.createVariable('x', np.float32, dimensions=('x',)) x_var[:] = np.linspace(1000.0, 1500.0, 100)Just like HDF5 we can assign metadata attributes to our variables or to the global attributes of the file:nc.created = 'now' var1.valid_range = [0, 65535] var1.standard_name = 'toa_brightness_temperature'When we defined the `y` dimension we didn't specify a size. In NetCDF4 this means the dimension is 'unlimited' and can grow as needed. We can check the size of a dimension by looking at `len(dim)`. Let's add more data to the `var1` variable and see how the dimensions change.print(len(y_dim)) var1[205] = np.arange(100) print(len(y_dim)) nc nc['var1'] nc.close()One of the easiest ways to work with NetCDF files in python is through the Xarray library by using it's `open_dataset` and `to_netcdf` functions. Xarray can take advantage of the dask library for lazy loading our data and computing things in parallel. By specifying the `chunks` keyword argument we can tell Xarray to use dask.Credit: http://xarray.pydata.org/en/stable/data-structures.htmlimport xarray as xr xnc = xr.open_dataset('test.nc', chunks={'y': 50, 'x': 100}) xnc print(xnc.dims) print(xnc.attrs) print(xnc['var1'].dims) print(xnc['var1'].coords) xnc['var1']Xarray helps us when slicing data by applying the same slices to our coordinates.xnc['var1'][:50, 50:75] xnc.close()Similar to `h5dump` the NetCDF4 library comes with a `ncdump` utility:!ncdump -h test.ncNetCDF4 - Pros/Cons Pros* Simpler HDF5 structures* Per-variable compression and chunking* Multidimensional arrays* Named dimensions and coordinate variables* Data model compatible with Xarray (limited support for groups)* Metadata Cons* Limited parallel/thread-safe operations* Not cloud storage friendlyFurther Reading:* [xarray.open_mfdatasets](http://xarray.pydata.org/en/stable/generated/xarray.open_mfdataset.html) - Load multiple files as one Dataset* [OpenDAP](https://en.wikipedia.org/wiki/OPeNDAP) - Read remote NetCDF files without loading the entire file ZarrOne of the major downsides to HDF5 and NetCDF4 is that they do not allow for parallel processing or do not work well with cloud storage solutions. To address these limitations and others the Zarr format was developed. Zarr is developed heavily by the scientific Python community including the developers of dask and xarray. In short, Zarr is like a NetCDF4 file if it was stored as a directory of files instead of one single file.Since Zarr uses the same data model as NetCDF/Xarray, we can skip directly to what the format looks like and how it is used. We'll start by using the NetCDF file we made before.import xarray as xr xnc = xr.open_dataset('test.nc', chunks={'y': 50, 'x': 100}) xnc xnc.to_zarr('test.zarr') !tree -a test.zarr/Above we can see that `to_zarr` defaulted to using the chunking of our dask arrays when writing to zarr storage. Each chunk shows up as its own file with metadata (both user metadata and array information) as separate files.!cat test.zarr/var1/.zarray !cat test.zarr/var1/.zattrsTo read zarr files we can either use the `zarr` python library or use Xarray again:zds = xr.open_zarr('test.zarr') zdsZarr - Pros/Cons Pros* Designed for efficient storage and use* Per-chunk compression* Multiple storage options (zip file, database, etc)* Multidimensional arrays* Named dimensions and coordinate variables* Data model compatible with Xarray (limited support for groups)* Metadata* Future support in NetCDF4 C library Cons* Relatively new* Library not available in every popular programming languageFurther Reading:* [zarr Documentation](https://zarr.readthedocs.io/en/stable/)* [Builtin zarr Storages](https://zarr.readthedocs.io/en/stable/api/storage.html) Conclusion* Text is slow, but might be needed if humans need to read it* Text is the easiest format to share with other languages/programs (excel)* Compression is good for space saving but not read/write speeds* Chunking data is good* Parquet and Zarr are cloud friendly* File formats are complex* There are so many file formatsThis Notebook:https://github.com/djhoese/data-file-formats Honarable MentionsThese files are no less useful than the above formats, but we have to draw the line somewhere.* Cloud Optimized GeoTIFFs (COG)* Apache Avro (row-based)* Apache Arrow (in-memory)* ORC* JSON* XML!rm -r *.zarr *.parq *.csv *.dat *.txt *.nc *.h5 *.npyBasic Tools: PythonPython is interpreted language used by many people.List = [1, 2, 3, 4, 5] print(f'List {List} size is {len(List)}')List [1, 2, 3, 4, 5] size is 5vis_using_node_titles_htmlThe vis.js website provides a 'getting started' example for its Network tool: http://visjs.org/docs/network/This notebook looks at recreating this in Jupyter notebooks, and adding node titles which display when the nodes is hovered over. Using an external html file with local linksfrom IPython.display import IFrame IFrame('vis_using_node_titles_local_links.html',650,450)Using an external file with web linksIFrame('vis_using_node_titles_web_links.html',650,450)PensionPension-related calculations.# when do you drop out of job life? # take number from config but make it changeable # what's money worth then compared to now? ~60% ==> inflation # take money from config but make it changeable # how many years do you plan to receive the pension money # take number from config but make it changeable # how much do you spend atm ~ per day, per week, per month, per year? # calc but adjustable # how much do you receive then from the govt then per month/year? # from cfg but adjustable # how much did you already put aside extra? # from cfg but adjustable # calculate how much you require per day/week/month/year: require = current_spend/inflation # calculate how much you need extra per day/week/month/year: require - receive # calculate how much you should put aside per day/week/month/year # addable options: interactively adjust how much you you should put aside - would receive - whats the difference from required would be %load_ext autoreload %autoreload 2 %matplotlib widget import wealth.pension wealth.pension.pension_info()Exercise 1Write a function `create_n_random_particles` that takes the arguments `n` (number of particles), `m` (mass of every particle) and a domain within to generate a random number (as in the class above).It should create an array with `n` elements and `dtype=particle_dtype` and then return that array.For each particle, the mass should be initialized to the value of `m` and the potential `phi` initialized to zero.For the `x` component of a given particle `p`, you might do something like```pythonp['x'] = domain * numpy.random.random()```@njit def create_n_random_particles(n, m, domain=1): ''' Creates `n` particles with mass `m` with random coordinates between 0 and `domain` ''' parts = numpy.zeros((n), dtype=particle_dtype) for p in parts: p.m = m p.phi = 0 p.x = domain*numpy.random.random() p.y = domain*numpy.random.random() p.y = domain*numpy.random.random() #your code here return partsTest it out!time_taken = %timeit -o parts = create_n_random_particles(1000, .001, 1)10000 loops, best of 3: 63.5 µs per loopExercise 2 Write a JITted function `distance` to calculate the distance between two particles of dtype `particle_dtype` Here's the `distance` method from the `Particle` class as a reference:```pythondef distance(self, other): return ((self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2)**.5```@njit def distance(part1, part2): '''calculate the distance between two particles''' return ((part1['x'] - part2['x'])**2 + (part1['x'] - part2['x'])**2 + (part1['x'] - part2['x'])**2 )**.5 # your code hereTry it out!distance(parts[0], parts[1])Exercise 3 Modify the original `direct_sum` function (copied below for reference) to instead work a NumPy array of particles. Loop over each element in the array and calculate its total potential.```pythondef direct_sum(particles): """ Calculate the potential at each particle using direct summation method. Arguments: particles: the list of particles """ for i, target in enumerate(particles): for source in (particles[:i] + particles[i+1:]): r = target.distance(source) target.phi += source.m / r```@njit def direct_sum(particles): # take it away for i,target in enumerate(particles): for j,source in enumerate(particles): if i!=j: r = distance(target,source) target['phi'] += source['m']/r numba_time = %timeit -o direct_sum(parts)The slowest run took 10.02 times longer than the fastest. This could mean that an intermediate result is being cached. 1 loop, best of 3: 33.1 ms per loopМодель определения токсичных комментариев**Проект №12 Яндекс.Практикум - Data Science** Описание проекта**Исходные данные:**Интернет-магазин «Викишоп» запускает новый сервис. Теперь пользователи могут редактировать и дополнять описания товаров, как в вики-сообществах. То есть клиенты предлагают свои правки и комментируют изменения других. Магазину нужен инструмент, который будет искать токсичные комментарии и отправлять их на модерацию.В вашем распоряжении набор данных с разметкой о токсичности правок.**Цель проекта:**Разработать модель классификации комментариев на позитивные и негативные.**Условия задачи:**Значение метрики качества **F1** не меньше **0.75**. Структура проекта* [1. Загрузка и анализ данных](start)* [2. Подготовка данных](preparation)* [3. Обучение модели](model)* [4. Тестирование](testing)* [5. Выводы](conclusion) 1. Загрузка и анализ данных Импортируем необходимые библиотекиimport re import nltk import string import spacy import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from nltk.corpus import stopwords from textblob import TextBlob, Word nltk.download('stopwords') nltk.download('wordnet') nltk.download('punkt') nltk.download('averaged_perceptron_tagger')[nltk_data] Downloading package stopwords to [nltk_data] C:\Users\Sergio\AppData\Roaming\nltk_data... [nltk_data] Package stopwords is already up-to-date! [nltk_data] Downloading package wordnet to [nltk_data] C:\Users\Sergio\AppData\Roaming\nltk_data... [nltk_data] Package wordnet is already up-to-date! [nltk_data] Downloading package punkt to [nltk_data] C:\Users\Sergio\AppData\Roaming\nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package averaged_perceptron_tagger to [nltk_data] C:\Users\Sergio\AppData\Roaming\nltk_data... [nltk_data] Package averaged_perceptron_tagger is already up-to- [nltk_data] date!Загрузим данные в DataFramedataset = 'toxic_comments.csv' try: data = pd.read_csv(f'../datasets/{dataset}', sep=',') print(f'Прочитан файл с данными: "./datasets/{dataset}"') except: try: data = pd.read_csv(f'/datasets/{dataset}', sep=',') # yandex.praktikum print(f'Прочитан файл с данными: "/datasets/{dataset}"') except Exception as err: print(repr(err)) data.head(5) data.info() RangeIndex: 159571 entries, 0 to 159570 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 text 159571 non-null object 1 toxic 159571 non-null int64 dtypes: int64(1), object(1) memory usage: 2.4+ MBОписание данных:* text - текст комментария* toxic — целевой признакprint(f"Кол-во текстов класса 0: {data['toxic'].value_counts()[0]} ({(data['toxic'].value_counts()[0]/data.shape[0])*100:.2f}%)") print(f"Кол-во текстов класса 1: {data['toxic'].value_counts()[1]} ({(data['toxic'].value_counts()[1]/data.shape[0])*100:.2f}%)")Кол-во текстов класса 0: 143346 (89.83%) Кол-во текстов класса 1: 16225 (10.17%)Выводы:* отсутствуют пропуски в данных* классы несбалансированы, учтём это при обучении моделей 2. Подготовка данных Лемматиизируем текст с помощью библиотек **spacy** и **TextBlob** Реализуем функции лемматизации# TextBlob lemmatizer def lemmatize_tblob(text): sentence = TextBlob(text) tag_dict = {"J": 'a', "N": 'n', "V": 'v', "R": 'r'} words_and_tags = [(w, tag_dict.get(pos[0], 'n')) for w, pos in sentence.tags] lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags] return " ".join(lemmatized_list) # Spacy lemmatizer def lemmatize_spacy(text, lemmatizer): doc = lemmatizer(text) lemm_text = " ".join([token.lemma_ for token in doc]) return lemm_textФункция очистки текста (оставляет только буквы, кавычки и пробелы)def clear(text): cleaned = re.sub(r"[^a-zA-Z\' ]", ' ', text) return " ".join(cleaned.split())Лемматизируем текст%%time data['lemm_tblob'] = data['text'].apply(lemmatize_tblob) %%time sp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) data['lemm_spacy'] = data['text'].apply(lemmatize_spacy, lemmatizer=sp)Wall time: 18min 30sОчистим лемматизированных текст от лишних символов%%time data['lemm_tblob'] = data['lemm_tblob'].apply(clear) data['lemm_spacy'] = data['lemm_spacy'].apply(clear) data print(data['lemm_spacy'][4]) print(data['lemm_tblob'][4]) print(data['lemm_spacy'][5]) print(data['lemm_tblob'][5]) print(data['lemm_spacy'][6]) print(data['lemm_tblob'][6]) print(data['lemm_spacy'][9]) print(data['lemm_tblob'][9]) print(data['lemm_spacy'][11]) print(data['lemm_tblob'][11])you sir be my hero any chance you remember what page that be on You sir be my hero Any chance you remember what page that 's on congratulation from I as well use the tool well talk Congratulations from me a well use the tool well talk COCKSUCKER before you pis around on my work COCKSUCKER BEFORE YOU PISS AROUND ON MY WORK alignment on this subject and which be contrary to those of DuLithgow alignment on this subject and which be contrary to those of DuLithgow bbq be a man and let discuss it maybe over the phone bbq be a man and let discuss it maybe over the phoneВыводы:* лемматизировали тексты используя библиотеки **spacy** и **TextBlob*** для **TextBlob** применили технику POS-tag (part-of-speech), в **spacy** она используется по умолчанию* значимых отличий в лемматизированных текстах разными библиотеками не замечено* лемматизация с **spacy** работает незначительно быстрее, чем с **TextBlob*** сравним на обученных моделях есть ли различия в качестве при использования лемматизированных текстов используя **spacy** и **TextBlob** 3. Обучение модели Разделим данные на тестовую и тренировочную выборку в пропорции 4:1target = data['toxic'] # Do not use extra space corpus_train, corpus_test, target_train, target_test = train_test_split(data['lemm_spacy'].values.astype('U'), target, test_size=0.2, stratify=target, random_state=42)Создадим матрицу оценки важности слов, рассчитав для каждого слова TF-IDFstop_words = set(stopwords.words('english')) count_vect = TfidfVectorizer(stop_words=stop_words) tf_idf = count_vect.fit_transform(corpus_train) print("Размер массива TF-IDF:", tf_idf.shape) target = data['toxic'] # Do not use extra space corpus_train_tblob, corpus_test_tblob, target_train_tblob, target_test_tblob = train_test_split(data['lemm_tblob'].values.astype('U'), target, test_size=0.2, stratify=target, random_state=42) stop_words = set(stopwords.words('english')) count_vect_tblob = TfidfVectorizer(stop_words=stop_words) tf_idf_tblob = count_vect_tblob.fit_transform(corpus_train) print("Размер массива TF-IDF:", tf_idf_tblob.shape)Размер массива TF-IDF: (127656, 138588)Random Forrest%%time # Parameters selected on smaller amount of data model = RandomForestClassifier(ccp_alpha=0.005, n_jobs=-1) parameters = {'max_depth' : [None, 30], 'class_weight' : [None, {0: 1, 1: 5}, {0: 1, 1: 8}]} clf = GridSearchCV(model, parameters) clf.fit(tf_idf, target_train) model_rfc = clf.best_estimator_ model_rfcWall time: 1h 14minПроверим F1 и баланс классов на обучающей выборкеpredicted = model_rfc.predict(tf_idf) f1 = f1_score(target_train, predicted) print(f1) unique, counts = np.unique(predicted, return_counts=True) print(dict(zip(unique, counts)))0.08057362507392077 {0: 127108, 1: 548}Logistic Regression%%time model = LogisticRegression(solver='lbfgs', n_jobs=-1, random_state=91) parameters = {'class_weight': [{0: 1, 1: 5}, {0: 1, 1: 7}]} clf = GridSearchCV(model, parameters) clf.fit(tf_idf, target_train) model_logreg = clf.best_estimator_ model_logregWall time: 49.8 sПроверим F1 и баланс классов на обучающей выборкеpredicted = model_logreg.predict(tf_idf) f1 = f1_score(target_train, predicted) print(f1) unique, counts = np.unique(predicted, return_counts=True) print(dict(zip(unique, counts)))0.8742113422504237 {0: 112899, 1: 14757}Выводы:* подобраны наилучшие гиперпараметры для **Random Forrest** и для **Logistic Regression*** на Random Forrest не удалось получить приемлимое значение F1 даже на обучающих данных, при этом при таком большом количестве параметро обучается очень долго* Logistic Regression немного переобучается 4. Тестирование Посчитаем TF-IDF для тестовой выборкиtf_idf_test = count_vect.transform(corpus_test) print("Размер массива TF-IDF:", tf_idf_test.shape) predicted_test = model_rfc.predict(tf_idf_test) print(f"F1 score Random Forrest: {f1_score(target_test, predicted_test)}") predicted_test = model_logreg.predict(tf_idf_test) print(f"F1 score Logistic Regression: {f1_score(target_test, predicted_test)}")F1 score Logistic Regression: 0.7809917355371901Сравним есть ли разница по сравнению с лемматизированных текстом с помощью TextBLobtf_idf_test_tblob = count_vect_tblob.transform(corpus_test_tblob) print("Размер массива TF-IDF:", tf_idf_test_tblob.shape) predicted_test_tblob = model_logreg.predict(tf_idf_test_tblob) print(f"F1 score Logistic Regression (TextBlob): {f1_score(target_test, predicted_test)}")F1 score Logistic Regression (TextBlob): 0.7809917355371901Scraper für Krypto-Kurse Wir interessieren uns in diesem Notebook für Krypto-Coins.Die Webseite https://coinmarketcap.com/ führt Marktdaten zu den hundert wichtigsten Coins auf.Mit einem einfachen Scraper werden wir diese Daten beschaffen und rudimentär analysieren. Vorbereitungimport requests from bs4 import BeautifulSoup import pandas as pd import timeScraperpath = ''Liste von allen Kryptowährungen Zuerst kucken wir auf der Seite, welches die 100 grössten Kryptowährungen sind, und laden uns Namen und Links derselbigen.base_url = 'https://coinmarketcap.com/' response = requests.get(base_url) doc = BeautifulSoup(response.text, "html.parser") currencies = doc.find_all('a', class_='currency-name-container link-secondary') currencies[0] len(currencies) currency_list = [] for currency in currencies: this_currency = {} this_currency['name'] = currency.text this_currency['link'] = currency['href'] currency_list.append(this_currency) df_currencies = pd.DataFrame(currency_list) df_currencies.head(2) df_currencies['link'] = df_currencies['link'].str.extract('/currencies/(.+)/') df_currencies.head(2) df_currencies.to_csv(path + 'currencies.csv', index=False)Daten von den einzelnen Währungen Zuerst testen wir mit einer Probewährung aus, wie wir an die Informationen kommen.base_url = 'https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20181012&end=20191012' response = requests.get(base_url) doc = BeautifulSoup(response.text, "html.parser") days = doc.find_all('tr', class_='text-right') days_list = [] cells = days[0].find_all('td') cells this_day = {} this_day['date'] = cells[0].text this_day['open'] = cells[1].text this_day['high'] = cells[2].text this_day['low'] = cells[3].text this_day['close'] = cells[4].text this_day['volume'] = cells[5].text this_day['marketcap'] = cells[6].text this_day for day in days: this_day = {} cells = day.find_all('td') this_day['date'] = cells[0].text this_day['open'] = cells[1].text this_day['high'] = cells[2].text this_day['low'] = cells[3].text this_day['close'] = cells[4].text this_day['volume'] = cells[5].text this_day['marketcap'] = cells[6].text days_list.append(this_day) df = pd.DataFrame(days_list) df.head(2)Nun wenden wir den Scraper auf alle Währungen andf_currencies = pd.read_csv(path + 'currencies.csv') df_currencies.head(2) len(df_currencies) currencies = df_currencies.to_dict(orient='records') url_start = 'https://coinmarketcap.com/currencies/' url_end = '/historical-data/?start=20181012&end=20191012' for currency in currencies: print ('working on: ' + currency['name']) url = url_start + currency['link'] + url_end response = requests.get(url) doc = BeautifulSoup(response.text, "html.parser") # print (doc) days = doc.find_all('tr', class_='text-right') days_list = [] this_day = {} for day in days: this_day = {} cells = day.find_all('td') this_day['date'] = cells[0].text this_day['open'] = cells[1].text this_day['high'] = cells[2].text this_day['low'] = cells[3].text this_day['close'] = cells[4].text this_day['volume'] = cells[5].text this_day['marketcap'] = cells[6].text days_list.append(this_day) df = pd.DataFrame(days_list) filename = currency['name'] + '.csv' df.to_csv(path + 'data/' + filename, index=False) time.sleep(10) print('Done')working on: Bitcoin working on: Ethereum working on: XRP working on: Tether working on: Bitcoin Cash working on: Litecoin working on: EOS working on: Binance Coin working on: Bitcoin SV working on: Stellar working on: TRON working on: Cardano working on: UNUS SED LEO working on: Monero working on: Chainlink working on: Huobi Token working on: IOTA working on: Dash working on: Tezos working on: Ethereum Classic working on: Cosmos working on: NEO working on: Maker working on: USD Coin working on: Crypto.com Coin working on: NEM working on: Ontology working on: Dogecoin working on: Zcash working on: Basic Attenti... working on: Paxos Standard working on: HedgeTrade working on: VeChain working on: TrueUSD working on: Qtum working on: Decred working on: Ravencoin working on: 0x working on: V Systems working on: ZB working on: Bitcoin Gold working on: Holo working on: ABBC Coin working on: EDUCare working on: OmiseGO working on: Swipe working on: DigiByte working on: Centrality working on: A[...]2D array- mscdef kadane(arr, start, finish, n): # initialize sum, maxSum and Sum = 0 maxSum = -999999999999 i = None # Just some initial value to check # for all negative values case finish[0] = -1 # local variable local_start = 0 for i in range(n): Sum += arr[i] if Sum < 0: Sum = 0 local_start = i + 1 elif Sum > maxSum: maxSum = Sum start[0] = local_start finish[0] = i # There is at-least one # non-negative number if finish[0] != -1: return maxSum # Special Case: When all numbers # in arr[] are negative maxSum = arr[0] start[0] = finish[0] = 0 # Find the maximum element in array for i in range(1, n): if arr[i] > maxSum: maxSum = arr[i] start[0] = finish[0] = i return maxSum # The main function that finds maximum # sum rectangle in M[][] def findMaxSum(M): global ROW, COL # Variables to store the final output maxSum, finalLeft = -999999999999, None finalRight, finalTop, finalBottom = None, None, None left, right, i = None, None, None temp = [None] * ROW Sum = 0 start = [0] finish = [0] # Set the left column for left in range(COL): # Initialize all elements of temp as 0 temp = [0] * ROW # Set the right column for the left # column set by outer loop for right in range(left, COL): # Calculate sum between current left # and right for every row 'i' for i in range(ROW): temp[i] += M[i][right] # Find the maximum sum subarray in # temp[]. The kadane() function also # sets values of start and finish. # So 'sum' is sum of rectangle between # (start, left) and (finish, right) which # is the maximum sum with boundary columns # strictly as left and right. Sum = kadane(temp, start, finish, ROW) # Compare sum with maximum sum so far. # If sum is more, then update maxSum # and other output values if Sum > maxSum: maxSum = Sum finalLeft = left finalRight = right finalTop = start[0] finalBottom = finish[0] # Prfinal values print("(Top, Left)", "(", finalTop, finalLeft, ")") print("(Bottom, Right)", "(", finalBottom, finalRight, ")") print("Max sum is:", maxSum) # Driver Code ROW = 4 COL = 5 M = [[1, 2, -1, -4, -20], [-8, -3, 4, 2, 1], [3, 8, 10, 1, 3], [-4, -1, 1, 7, -6]] # Function call findMaxSum(M)(Top, Left) ( 1 1 ) (Bottom, Right) ( 3 3 ) Max sum is: 29MACHINE LEARNING AND STATISTICS PROJECT 2020In this notebook I am creating my project for the Machine Learning and Statistics 2020 module.the following sections: References used for this project completion, project instructions as specified by the lecturer, the purpose of the project, and the different sections explaining the processes with markdown cells along with further comments inside the code cells. ***References- [Fundamentals DA project's repository ](https://github.com/Ainara12/Fundamentals-Project/blob/master/Fundamentals%20DA-Project.ipynb)- [Introduction to Linear and Polynomial Regression](https://towardsdatascience.com/introduction-to-linear-regression-and-polynomial-regression-f8adc96f31cb)- [Numpy polyfit function documentation](https://numpy.org/doc/stable/reference/generated/numpy.polyfit.html)- [Numpy poly1d function documentation](https://numpy.org/doc/stable/reference/generated/numpy.poly1d.html)- [Lecturer's simple linear regression Jupyter notebook](https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/simple-linear-regression.ipynb)- [Machine Learning Polynomial Regression](https://www.w3schools.com/python/python_ml_polynomial_regression.asp)- [Introduction to Keras](https://keras.io/getting_started/intro_to_keras_for_engineers/)- [Lecturer's linear regression in Keras Jupyter notebook](https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/keras-linear.ipynb)- [Neural network with Keras tutorial](https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/)- [Classification and regression using Keras tutorial](https://stackabuse.com/tensorflow-2-0-solving-classification-and-regression-problems/) - [Linear regression using Python Sklearn video tutorial](https://www.youtube.com/watch?v=b0L47BeklTE&ab_channel=RylanFowers) - [How to deploy a machine learning module using flask](https://towardsdatascience.com/deploy-a-machine-learning-model-using-flask-da580f84e60c) - [Saving a machine learning model](https://www.geeksforgeeks.org/saving-a-machine-learning-model/)- [How to save ML models](https://www.kaggle.com/prmohanty/python-how-to-save-and-load-ml-models) *** Project instructions![Project%20Instructions.jpg](attachment:Project%20Instructions.jpg) Overall purpose of this projectMy goal with this project is to use machine learning to make predictions using the 'powerproduction' dataset. I have some works done for this in my project for the module Fundamentals of Data Analysis that I will use as reference to create a model that predicts output wind turbine power from wind speed values [Link to Fundamentals DA project in github](https://github.com/Ainara12/Fundamentals-Project/blob/master/Fundamentals%20DA--Project.ipynb).Once this model is complete I will create a script that runs a web service based on this model and a Dockerfile to build and run the web service in a container. To achieve this goal I am using the information and knowledge gathered during the course of this module along with the references consulted and detailed in this document. Dataset analysis Loading and observing raw datasetBefore we get into create a prediction model, I am looking into the raw dataset to get a feel into how it looks and see some descriptive statistics. See works in code cells below:#Importing the modules I am going to use import pandas as pd #to load and organize dataset import matplotlib.pyplot as plt #for visualisation import numpy as np #to work with arrays and apply regression functions #loading dataset data=pd.read_csv(r'Powerproduction dataset.csv', header=0) #converting this dataset into dataframe with pandas powerdata=pd.DataFrame (data, columns = ['speed','power']) print(powerdata) #initially describing dataset using pandas functionalities powerdata.describe()The code cells above show us the structure of this dataset. This dataset is based on a real life scenario in which we have 499 rows (initially 500 but I dropped first row as had no information on it) and two columns for wind speed and power generated. The dataset shows the relation between the wind speed and the power output derived from this speed.Using command describe we get some general information such as the mean or average for 'speed' being 12.62 while power has 48.11, the std and minimum and max values. Now on the cells below I am plotting this data so we can see a clearer picture of the correlation of these 2 variables.#Plotting raw dataset %matplotlib inline plt.rcParams['figure.figsize']=([15, 10]) #plotting, speed is shown blue dots while power is the green section plt.plot( 'speed', 'y1', data=powerdata, marker='o', markerfacecolor='blue', markersize=3, color='skyblue', linewidth=4) plt.plot( 'power', 'y2', data=powerdata, marker='2', color='olive', linewidth=4) plt.legend()#adding legend plt.show()As seen above and considering my analysis in [Fundamentals-DA project's repository](https://github.com/Ainara12/Fundamentals-Project/blob/master/Fundamentals%20DA--Project.ipynb), I have concluded that this dataset seems to fit better in a Polynomial Linear Regression. A Polynomial Linear Regression, uses the relationship between the variables to find the best way to draw a line between data points, but this line does not need to be straight.This type of linear regression has some advantages that might be useful for this dataset analysis and prediction, such as :- It has a broad range of functions that can be a fit under it.- It fits a wide range of curvature source.Some inconvenientes might also arise:- The presence of outliers can affect very hard the results- There are in general fewer model validation tools for the detection of outliers in this type of regression than for simple linear.See below an example on how I created the curve that fits this data and found the **R-Squared value** to be very high ( closer to 1). In order to apply the polynomial regression to our plot I am creating a variable that will use **numpy poly1d** and **numpy polyfit** functions to generate this curve to fit the data.#Applying Polynomial regression to our plot: #first I separate the 2 variables to represent them power=powerdata['speed'] speed=powerdata['power'] Polynom=np.poly1d(np.polyfit(speed, power,deg=3)) xline=np.linspace(0.0, 120, 200) # creatingand even spaced axis #plotting these elements with a scatter plot plt.rcParams['figure.figsize']=([15, 10]) plt.scatter( speed, power)# Using scatterplot to represent the two variables plt.plot(xline, Polynom(xline), 'r*')#using function to add our xline along wit the result of the poly1d and polyfit functions plt.show()Now we calculate **R-squared value** also called **Coefficient of determination** , this value measures how well a regression model fits to the data. We can calculate using the **numpy polyfit function** and then proceed to square this value. This coefficient can be positive or negative. **Numpy polyfit function** uses Pearson's correlation coefficient , which differs from the **coefficient of determination** above described as it measures the strength of the linear relationship between 2 sets of observations, in this case how much the output power depends on the wind speed;power output is the dependent variable (*y* value) . It also tells whether the relationship is positive or negative. In this case since we have a Polynomial linear regression , I am using **Sklearn** to obtain the **coefficient of determination** .I followed steps found on this [guide](https://www.w3schools.com/python/python_ml_polynomial_regression.asp)I am describing the process on cell below.#calculating R-squared value with sklearn #first, I import sklearn and specific module from sklearn.metrics import r2_score #Using r2_score functionality with my previous created variable 'Polynom' r2=r2_score(power,Polynom(speed) ) print('The R-squared value is: ',r2)The R-squared value is: 0.7321827537382541How we interpret the R-squared value? The closest a value is to 1 the better fit, in this case with 0.7321827537382541 we have a moderate to very good fit. This implies that most of the changes in the dependent variable or *y* (power) are explained by the corresponding changes in *x* or independent variable (speed). Presenting and training *Model*In this section I am going to create a model to predict values, based on what we have learn in previous sections about this dataset. Once I created the model I will analyse its accuracy. Presenting modelMy model is based on this [guide/tutorial](https://www.w3schools.com/python/python_ml_polynomial_regression.asp) in which after confirming that Polynomial Regression fits the data very well, I use module **r2_score** from **sklearn** in order to predict how much power will be generated from a specific wind speed value. In this example we are going to test the model considering that wind speed is :2.179.Let's see the steps I have taken below and what output power result we obtain:#Using my variable Polynom created in previous steps Polynom=np.poly1d(np.polyfit(speed, power,deg=3)) #I enter the value that I want to find out with this model Power_generated = Polynom(2.179) print('This is the amount of power generated considering win speed is 2.179 mph:') print(Power_generated)This is the amount of power generated considering win speed is 2.179 mph: 5.912036065122074Experimenting with KerasIn this section of the project I am experimenting with sklearn neural networks, following [lecture's tutorials](https://web.microsoftstream.com/video/b3c0a6ba-86b6-4f4a-bc1d-48d26c868bea). My focus here is to have a portion of the data that will be part of the train model and reach then more accurate results. See my attemp below: What is Keras for Machine learning?Keras for machine learning is Keras for machine learning is a Python library that can be used on top of TensorFlow to make deep learning models implementation faster and easier.I am using this [tutorial](https://stackabuse.com/tensorflow-2-0-solving-classification-and-regression-problems/) as reference.See steps below.- First I import the necessary 'Sklearn' modules to split the data into training and test sets.- I import 'Tensorflow keras layers' and 'Tensorflow keras models' to create and train the model.#First I import the needed modules to divide dataset into training and #test sets from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=0) from tensorflow.keras.layers import Input, Dense, Activation,Dropout from tensorflow.keras.models import Model- Next step is to create the model using 'Sequential'. There are two ways to build Keras models ( Functional and Sequential) and in this case I am using same way as the tutorial as this is a simplest type of model, with a linear stock of layers. For this dataset I think is enough to use Sequential.#Creating model model = kr.models.Sequential() model.add(kr.layers.Dense(2, input_shape=(1,), activation='sigmoid', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform")) model.add(kr.layers.Dense(1, activation='linear', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform")) model.compile(kr.optimizers.Adam(lr=0.001), loss='mean_squared_error')#Adjusting learn rate to 0.001 for more accuracyIn the next step we are training the model, I selected batch size 10 which means the model will pass 10 values every time and an epoch of 500.#training model history= power_model = model.fit(x_train, y_train, batch_size=10, epochs=1000, verbose=1, validation_split=0.2)Epoch 1/1000 32/32 [==============================] - 0s 4ms/step - loss: 242.0819 - val_loss: 243.7709 Epoch 2/1000 32/32 [==============================] - 0s 2ms/step - loss: 242.0652 - val_loss: 243.7523 Epoch 3/1000 32/32 [==============================] - 0s 2ms/step - loss: 242.0866 - val_loss: 243.6749 Epoch 4/1000 32/32 [==============================] - 0s 2ms/step - loss: 242.0461 - val_loss: 243.7570 Epoch 5/1000 32/32 [==============================] - 0s 3ms/step - loss: 242.1178 - val_loss: 243.7638 Epoch 6/1000 32/32 [==============================] - 0s 2ms/step - loss: 242.0587 - val_loss: 243.7550 Epoch 7/1000 32/32 [==============================] - 0s 2ms/step - loss: 242.0300 - val_loss: 243.6990 Epoch 8/1000 32/32 [==============================] - 0s 3ms/step - loss: 242.0920 - val_loss: 243.7663 Epoch 9/1000 32/32 [==============================] - 0s 3ms/step - loss: 242.0689 - val_loss: 243.6947 Epoch 10/1000 32/32 [==============================] - 0s 3ms/st[...]Now we plot the result. And can see that the result changes as the model learn everytime.#plotting results #resizing plt.rcParams['figure.figsize']=([15, 10]) plt.plot(poly['x'], poly['y'], label='actual') plt.plot(poly['x'], model.predict(poly['x']), label='prediction') plt.legend() #checking History object to see record of the loss values #of the loss values and metric values during training '''Source:https://www.tensorflow.org/guide/keras/train_and_evaluate ''' history.history # Making predictions with this model: prediction=model.predict([4.0,0.234]) print('This is the amount of power generated for the wind speed you have provided:') print(prediction)This is the amount of power generated for the wind speed(s) you have provided: [[4.3476424] [4.3300843]]To evaluate this model, I used the method shown in this tutorial, which is the root mean squared error method. This method consist of he square root of the mean of the square of all of the error.#Evaluating the performance of a regression model on test set using #root mean squared error method from sklearn.metrics import mean_squared_error from math import sqrt pred_train = model.predict(x_train) print(np.sqrt(mean_squared_error(y_train,pred_train))) pred = model.predict(x_test) print(np.sqrt(mean_squared_error(y_test,pred)))15.996012304201834 17.039363794649415Conclusion about this modelLooking at the shape that the prediction is creating it seems that this prediction model is more accurate.Cosidering the evaluation method we have performed in the markdown cell above using root mean squared error, we can see that both the train and the set model are performing good as they both give similar values. In cases where for example, the train model is performing better than the test model we talk about 'overfitting'. 'Overfitting' means that the model has an excessively complex structure and learns both the existing relations among data and noise. On the other hand, 'Underfitting'is often the consequence of model being unable to encapsulate the relations among data.[source](https://realpython.com/train-test-split-python-data/underfitting-and-overfitting). Creating *Model* using SklearnAfter my approaches made in previous sections, I have decided to use one last approach with module 'Sklearn'.Using Keras and Tensorflow to create ad train models using this dataset was good practice and for sure I will keep learning about it. I would like now to use this model for linear regression using [this tutorial](https://www.youtube.com/watch?v=b0L47BeklTE&ab_channel=RylanFowers) which I think it might be easier and more accurate to make predictions as my model to be included on the second part of this project.See details of the process in the markdown and code cells below:#First we import the modules we are going to use from sklearn.linear_model import LinearRegression #import linear regression model from sklearn.model_selection import train_test_split #to divide data import pandas as pd import numpy as np import matplotlib.pyplot as plt #loading data in this section for better understanding Powerdataset=pd.read_csv('Powerproduction dataset.csv', delimiter=',') x=Powerdataset['speed'] y=Powerdataset['power'] #plotting dataset to have this included on this section: Powerdataset.plot(kind='scatter', x= 'speed', y='power') plt.show() #Let's create our linear regression model #doing test train split X_train, X_test, y_train, y_test= train_test_split(Powerdataset.speed, Powerdataset.power) #Let's see how this split looks plt.scatter(X_train, y_train, label='Training Data', color='g', alpha=.7) plt.scatter(X_test, y_test, label='Test Data', color= 'r', alpha=.7) plt.legend() plt.title('Test train split') plt.show()Once we have our data split so one part is used for training and the other part is used for testing, as we can see in the visualization above this cell, we move onto the Model creation section.#Model creation #naming model LR as Linear Regression as in the tutorial for #easier understanding LR=LinearRegression() LR.fit(X_train.values.reshape(-1,1), y_train.values)#Adding x_train and y_train values and reshaping X_train values as they need to be in #a 1d shape for this to workThe next step is to use the model to predict in out test data. See below:# Predicting prediction=LR.predict(X_test.values.reshape(-1,1)) #Plotting X_test against prediction results in same plot plt.plot(X_test, prediction, label='Linear Regression', color='r') plt.scatter(X_test, y_test, label='Actual Test Data', color='b', alpha=.7) plt.legend() plt.show()As seen in previous sections where we found the **R squared value** for Simple Linear regression model used on this dataset is 0.531347729791333, which is a low to moderate fit. Let's try this model to predict the power output based on the wind speed values we enter as input on our model.#Making predictions for specific values #using command predict we enter a sample wind speed: print('This is the power generated considering your input: ') LR.predict(np.array([[25.00]]))[0]This is the power generated considering your input:We can see that the power generated result we received is the same as the power result we can see in our plot above. The model seems to be accurate enough.Finally we will 'Sklearn' score function to evaluate this model's accuracy.#score function LR.score(X_test.values.reshape(-1,1),y_test.values)Considering the maximum score is 1.0, this does not seem a bad model to fit in this dataset. Saving my model The last action I am going to do with this model is to save it using 'Joblib' module so it can be used by our server to predict the power values depending on the wind speed. I have used this [source](https://www.kaggle.com/prmohanty/python-how-to-save-and-load-ml-models) and this [source](https://towardsdatascience.com/deploy-a-machine-learning-model-using-flask-da580f84e60c) to find out the best approach.#First we import the module joblib import joblib joblib_file = "joblib_LR_Model.pkl" joblib.dump(LR, "joblib_LR_Model.pkl" ) #saving model as a pickle file. # Load from file joblib_LR_model = joblib.load(joblib_file) joblib_LR_model #Calculate test score score = joblib_LR_model.score(X_test.values.reshape(-1,1),y_test.values) print(score) print("Test score: {0:.2f} %".format(100 * score)) # Predict the Labels using the reloaded Model Ypredict = joblib_LR_model.predict(np.array([[25.00]]))[0] print('This is the power generated considering your input:',Ypredict)This is the power generated considering your input: 108.19835082522115This was my first approach to save the model. I ended using 'Pickle' instead as it seemed to work better and more simply. For this I used a Linear regression model following this [tutorial](https://towardsdatascience.com/deploy-a-machine-learning-model-using-flask-da580f84e60c)See process below.#Importing needed modules from sklearn.model_selection import train_test_split #sklearn to select model and LR from sklearn.linear_model import LinearRegression import pickle #importing to save model in disk #loading dataset dataset = pd.read_csv('Powerproduction dataset.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 1].values #train data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0) #creating model regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) #Saving model with picker pickle.dump(regressor, open('model.pkl','wb')) #Loading and printing model to make predictions model = pickle.load(open('model.pkl','rb')) print(model.predict([[7.20]]))#this command predicts for example how much power generates with 7.20 mps of wind[20.97181645]Web service developmentIn this section I am going to go through the steps I followed to complete the last part of the tasks which is to develop a web service that will work in conjunction with my Machine Learning model. This web server will respond with predicted power values based on speed values sent as HTTP requests.You can find my references for this section below: ***References: - [Lecturer's example app repository](https://github.com/ianmcloughlin/random-app)- [Creating a virtual environment ](https://realpython.com/python-virtual-environments-a-primer/)- [Flask Quick start documentation](https://flask.palletsprojects.com/en/1.1.x/quickstart/)- [Bootstrap introduction](https://getbootstrap.com/docs/5.0/getting-started/introduction/)- [Stackoverflow post ](https://stackoverflow.com/questions/65515800/the-method-is-not-allowed-for-the-requested-url-using-flask-post-method)- [Deploy machine learning model using flask](https://towardsdatascience.com/how-to-easily-deploy-machine-learning-models-using-flask-b95af8fe34d4)- [Send data from textbox into flask](https://stackoverflow.com/questions/12277933/send-data-from-a-textbox-into-flask)- [Python Flask tutorial](https://www.youtube.com/watch?v=MwZwr5Tvyxo&list=RDCMUCCezIgC97PvUuR4_gbFUs5g&index=2&ab_channel=CoreySchafer)- [Deploy machine learning models using flask](https://www.kdnuggets.com/2019/10/easily-deploy-machine-learning-models-using-flask.html)*** App creation with Flask first attemptMy first intention was to create an app that loaded the model saved using 'Pickle' or 'Joblib' modules as displayed above and then calculate the predictions to be shown in 'results' tab for the users. Finally, this approach did not work as I wanted and it shows a graphic with the prediction values extracted from the linear regression model using Sklearn Linear Regression as shown in previous sections of this notebook. Please see below documentation on how I tried to create this version , the works are available in this repository on the drafts folder in a subfolder called *initial_app_trial*.Please see my code for this initial app below for your reference. For this I used this [guide](https://towardsdatascience.com/how-to-easily-deploy-machine-learning-models-using-flask-b95af8fe34d4) as reference: -The structure I followed was to create an app called 'app.py' as a server , a separate file to respond to the http requests and added the model file . Then I created separate folders *Static*, for images and *templates* for my html index file which I intended to contain a button where user could select a value for wind speed and then obtaining a prediction of the power generated based on the model.#initial app trial app.py code. from flask import Flask, render_template, jsonify,request,redirect # here I import flask, render_template to load my html document, #jsonify module which serializes data to JSON format, request to manage object requests import pickle import numpy as np app = Flask(__name__) #creating instance of an app with Flask model = pickle.load(open('model.pkl','rb')) #Loading my model with pickle @app.route('/') #defining root route def home(): return render_template('index.html') @app.route('/predict',methods=['POST']) #defining predict route with method post , here I wanted to get values from the index # form def predict(): int_features = [int(x) for x in request.form.values()] final_features = [np.array(int_features)] prediction = model.predict(final_features) output = round(prediction[0], 2) return render_template('index.html', prediction_text='Power generated would be{}'.format(output)) @app.route('/results',methods=['POST']) def results(): data = request.get_json(force=True) prediction = model.predict([np.array(list(data.values()))]) output = prediction[0] return jsonify(output) if __name__=='__main__': #function to add debugging options app.run(debug=True, port=5000) #request. py file code import requests url = 'http://127.0.0.1:5000/results' #selecting url and the data to be returned as wind. r = requests.post(url,json={'wind':12}) print(r.json()) #Finally this is the index html document that I wanted to used as form to introduce the inputs Deployment Tutorial 1 Since I was getting repeatedly some error messages when trying this approach. I posted a question in Stackoverflow that gave me some insights. I would like to add this here as reference that I ask for help and I was advised some things that I tried. See [post here](https://stackoverflow.com/questions/65515800/the-method-is-not-allowed-for-the-requested-url-using-flask-post-method). App creation with Flask last attemptAfter many trials ( I have some of them included on my drafts folder), I finally created an app that initially I wanted to get the model saved and use to make predictions. I am going to keep working on this, then I am leaving some of the parts of this code on it , although they might not be functional at the moment. The result is in the folder named **lr_app**[direct link here](https://github.com/Ainara12/Machine-Learning-Statistics-Project2020/tree/master/my_project/lr_app). In order to create this app ( and previous attempts) I created a virtual environment following this [guide](https://realpython.com/python-virtual-environments-a-primer/) since I was having issues to use Flask with my Python configuration. Once I created this environment I was able to create my folder including this environment and work with Flask. See here below a copy of the code I used to create this app and some pictures of what you can see when running it. The lr_app includes the following files which are all included on the app folder: - Static folder: Includes images used. - templates: Includes the html files. 'About.html'for instructions in how to get to the different parts of the web app and 'myform.html' which is the form to enter the values. - .dockerignore file.- Dockefile file as requested contains the commands that user need to call on command line to assemble the image. - model.pkl : Model saved with Pickle to be loaded in the app. - README : This readme file details instructions to run app in different OS and also instructions in how to build and run Docker image- Requirements.txt: Includes the packages needed to run this app. - server.py : This is the app code#code for server.py #importing necessary modules from flask import Flask, render_template, request import pickle app = Flask(__name__) model = pickle.load(open('model.pkl','rb')) #Loading my model with pickle @app.route('/') def index(): return render_template('about.html') return app.send_static_file('wind-turbines.jpg') @app.route('/submit_form',methods=['POST','GET']) #Creating a submit form for user to enter the wind speed necessary values def submit_form(): return render_template('myform.html')# myform file is in 'templates' folder return using render_template method if request.method == 'POST': # Get the data from the POST request. data = request.form('text', type=int) return redirect(url_for('predict', pred=data)) #returning data entered through form as redirect url. else: print('Something went wrong here') @app.route('/') #accessing to this adding the entered value into the route returns the plot with our model #prediction values in relation to the actual values def predict(pred): return app.send_static_file('Prediction plot LR.png') if __name__=='__main__': app.run(debug=True, port=5000) #myform.html document code Power output calculator
Please enter wind speed to calculate power output
About page

Instructions for this web app

'With this app you can enter wind speed values to get a prediction graphic based on my model based on the Powerproduction dataset. Please navigate to the next page to access to submit form using the route /submit_form' Collect the weights in the first layer of all the models in different epochsepoch_list = [i * 10 for i in range(1, 31)] weights_mse_list = [] with tf.Session() as sess: for epoch in epoch_list: model_path = f'./experiments_mse/trained_model_mse/epochs_{epoch}/model_after_epochs-{epoch}.meta' ckp_dir = f'./experiments_mse/trained_model_mse/epochs_{epoch}/' saver = tf.train.import_meta_graph(model_path) saver.restore(sess, tf.train.latest_checkpoint(ckp_dir)) graph = tf.get_default_graph() trainable_variables = tf.trainable_variables() weight_values = sess.run(trainable_variables[0]) weight_values = weight_values.flatten() weights_mse_list.append(weight_values) plt.figure(figsize=(8, 6)) sns.histplot(data=weights_mse_list[0], stat='density') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.title('Epoch_10', fontsize=18) plt.figure(figsize=(8, 6)) sns.histplot(data=weights_mse_list[1], stat='density') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.title('Epoch_20', fontsize=18) plt.figure(figsize=(8, 6)) sns.histplot(data=weights_mse_list[20], stat='density') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.title('Epoch_210', fontsize=18) plt.figure(figsize=(20, 14)) for i in range(30): plt.subplot(6, 5, i+1) sns.histplot(data=weights_mse_list[i], stat='density') plt.title(f'Epoch_{(i+1)*10}') #plt.axis('off') plt.tight_layout() plt.savefig('./weights_vis/weights_hist_all_mse.png') plt.figure(figsize=(8, 6)) sns.histplot(data={"Epoch_1": weights_mse_list[0], "Epoch_30": weights_mse_list[-1]}, stat='density', alpha=0.6) #sns.histplot(daga=weights_1_list[-1], state='density', label='epoch_310') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.savefig("./weights_vis/epoch_1_vs_epoch_30_hist_mse.png") #plt.title('Epoch_10', fontsize=18)histogram plots with CE objectiveepoch_list = [i * 10 for i in range(1, 31)] weights_ce_list = [] with tf.Session() as sess: for epoch in epoch_list: model_path = f'./experiments_ce/trained_model/epochs_{epoch}/model_after_epochs-{epoch}.meta' ckp_dir = f'./experiments_ce/trained_model/epochs_{epoch}/' saver = tf.train.import_meta_graph(model_path) saver.restore(sess, tf.train.latest_checkpoint(ckp_dir)) graph = tf.get_default_graph() keys = graph.get_all_collection_keys() trainable_variables = tf.trainable_variables() weight_values = sess.run(trainable_variables[0]) weight_values = weight_values.flatten() weights_ce_list.append(weight_values) plt.figure(figsize=(20, 14)) for i in range(30): plt.subplot(6, 5, i+1) sns.histplot(data=weights_ce_list[i], stat='density') plt.title(f'Epoch_{(i+1)*10}') #plt.axis('off') plt.tight_layout() plt.savefig('./weights_vis/weights_hist_all_ce.png') plt.figure(figsize=(8, 6)) sns.histplot(data=weights_ce_list[0], stat='density') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.title('Epoch_10', fontsize=18) plt.figure(figsize=(8, 6)) sns.histplot(data=weights_ce_list[-1], stat='density') plt.xlabel('w1', fontsize=16) plt.ylabel('Density', fontsize=16) plt.title('Epoch_300', fontsize=18) plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) sns.histplot(data=weights_ce_list[0], stat='density') plt.title(f'Epoch_10') plt.subplot(1, 2, 2) sns.histplot(data=weights_ce_list[-1], stat='density', color='red') plt.title(f'Epoch_300') plt.savefig("./weights_vis/epoch_1_vs_epoch_30_hist_ce.png") #plt.axis('off')Ques1. Read the data into Jupyter Environmentdf = pd.read_csv('Cricket.csv',encoding='cp1252') df.head() df.shape df.info() RangeIndex: 79 entries, 0 to 78 Data columns (total 13 columns): Player 79 non-null object Span 79 non-null object Mat 79 non-null int64 Inns 79 non-null int64 NO 79 non-null int64 Runs 79 non-null int64 HS 79 non-null object Ave 79 non-null float64 BF 79 non-null int64 SR 79 non-null float64 100 79 non-null int64 50 79 non-null int64 0 79 non-null int64 dtypes: float64(2), int64(8), object(3) memory usage: 8.1+ KBQues2. Standardize the variablesfrom sklearn.preprocessing import StandardScaler sc = StandardScaler() df_new = df.drop("HS",axis = 1) df_new = df_new.drop("Player",axis = 1) df_new = df_new.drop("Span",axis = 1) df_new.head() #Standardizing the scale df_std = sc.fit_transform(df_new) col = df_new.columns df_new = pd.DataFrame(df_std, columns=col) df_new.head()Ques3. Find out the optimal number of clustersfrom sklearn.cluster import KMeans #Elbow Method - Find the optimal Value of Clusters ks = range(1,11) inertias = [] for k in ks: #Define the model model = KMeans(n_clusters=k) #Fit the model model.fit(df_new) #Append the Inertias inertias.append(model.inertia_) plt.plot(ks,inertias,"-o") plt.xlabel("No. of Clusters") plt.ylabel("Inertia Values") plt.xticks(ks) plt.show()The optimal Number of cluster is 6 Ques4. Perform K-Means clustering#Model the kmeans with ncluster Model = KMeans(n_clusters=6,random_state=42) Model.fit(df_new) Labels=Model.predict(df_new) Labels Model.cluster_centers_ len(Labels)Ques5. Attach clustered to each rowdf_new['Labels'] = Labels df_new.head() df_new['Labels'].value_counts()Ques6. Find out which cluster Belongs to?df['Labels'] = Labels df.head() df.ix[17]C:\Users\aujas\AppData\Local\Programs\Python\Python37\lib\site-packages\ipykernel_launcher.py:1: DeprecationWarning: .ix is deprecated. Please use .loc for label based indexing or .iloc for positional indexing See the documentation here: http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated """Entry point for launching an IPython kernel. Belongs to the 5th Cluster which is Label 5 Ques7. Find out the number of players who are in Gayle Clusterdf['Labels'].value_counts()7_Visualisation_using_bokeh IntroThis notebook uses the 'bokeh' python package to plot the EnergyPlus results.The process is:- loads the EnergyPlus csv file into a pandas DataFrame- create plots Setupimport os import pandas as pd import bokeh.plotting as bplt from bokeh.io import output_notebook output_notebook()Read csv data into a pandas DataFramefp='detached_house_0out.csv' df=pd.read_csv(fp) df df.columnsRead the Date/Time column as pandas TimeStampsdef parse_datetime_string(st): st=st.strip() year=2001 month=int(st[0:2]) day=int(st[3:5]) hour=int(st[7:9]) minute=int(st[10:12]) second=(st[13:15]) if not hour==24: dt=pd.Timestamp(year,month,day,hour,minute) else: hour=0 dt=pd.Timestamp(year,month,day,hour,minute) dt+=pd.Timedelta('1 day') return dt l=df['Date/Time'] l=[parse_datetime_string(i) for i in l] datetime=pd.Series(data=l).values datetimePlot the external air temperatureext_temp=df['Environment:Site Outdoor Air Drybulb Temperature [C](Hourly)'].values ext_temp'ext_temp' is a pandas Series instance and can be plotted using the plot method...p = bplt.figure(title='my_title', x_axis_label='date', y_axis_label='hourly external air temperature', plot_width=900) p.line(x=datetime, y=ext_temp, legend='external_air_temperature', line_width=2) bplt.show(p)A timer for ML functions > "A timer for ML functions"- toc: true- branch: master- badges: true- comments: true- author: - categories: [timer, jupyter]- description: A timer for ML functions- title: A timer for ML functions#collapse-hide from functools import wraps import time def timer(func): """[This decorator is a timer for functions] Args: func ([function]): [This decorator takes a function as argument] Returns: [string]: [states the duration of time between the function begining and ending] """ @wraps(func) def wrapper(*args, **kwargs): print(f"{func.__name__!r} begins") start_time = time.time() result = func(*args, **kwargs) print(f"{func.__name__!r} ends in {time.time()-start_time} secs") return result return wrapper @timer def model_metrics(*args, **kwargs): """[This is a function to print model metrics of interest] """ print("Model ID Number:", args) print("Metric of Interest:", kwargs) model_metrics(1, 2, 10, key="word", key2="word2", numtrees="200") from collections import Counter import math, random # # data splitting # def split_data(data, prob): """split data into fractions [prob, 1 - prob]""" results = [], [] for row in data: results[0 if random.random() < prob else 1].append(row) return results def train_test_split(x, y, test_pct): data = list(zip(x, y)) # pair corresponding values train, test = split_data(data, 1 - test_pct) # split the dataset of pairs x_train, y_train = list(zip(*train)) # magical un-zip trick x_test, y_test = list(zip(*test)) return x_train, x_test, y_train, y_test # # correctness # def accuracy(tp, fp, fn, tn): correct = tp + tn total = tp + fp + fn + tn return correct / total def precision(tp, fp, fn, tn): return tp / (tp + fp) def recall(tp, fp, fn, tn): return tp / (tp + fn) def f1_score(tp, fp, fn, tn): p = precision(tp, fp, fn, tn) r = recall(tp, fp, fn, tn) return 2 * p * r / (p + r) if __name__ == "__main__": print("accuracy(70, 4930, 13930, 981070)", accuracy(70, 4930, 13930, 981070)) print("precision(70, 4930, 13930, 981070)", precision(70, 4930, 13930, 981070)) print("recall(70, 4930, 13930, 981070)", recall(70, 4930, 13930, 981070)) print("f1_score(70, 4930, 13930, 981070)", f1_score(70, 4930, 13930, 981070)) favorite_number = 7 def add(a, b): return a + b def sub(a, b): return a - b def multiply(a, b): return a * b def divide(a, b): return a / b def count_vowels(word): count = 0 for letter in word.lower(): count += letter in 'aeiou' return count # import example_module as sm # print(sm.favorite_number) # # add two numbers together # print(sm.add(3, 8)) # # count the number of vowels in a string # print(sm.count_vowels('Testing')) import pandas as pd from alive_progress import alive_bar, showtime, show_bars, show_spinners, config_handler config_handler.set_global(theme='ascii', spinner='notes', bar='solid') with alive_bar(3) as bar: df = pd.read_csv('https://gist.githubusercontent.com/davidrkearney/bb461ba351da484336a19bd00a2612e2/raw/18dd90b57fec46a247248d161ffd8085de2a00db/china_province_economicdata_1996_2007.csv') bar('file read, printing file') print(df.head) bar('data printed ok, printing methods of data') print(dir(df)) bar('process complete') from functools import wraps import time def timer(func): """[This decorator is a timer for functions] Args: func ([function]): [This decorator takes a function as argument] Returns: [string]: [states the duration of time between the function begining and ending] """ @wraps(func) def wrapper(*args, **kwargs): print(f"{func.__name__!r} begins") start_time = time.time() result = func(*args, **kwargs) print(f"{func.__name__!r} ends in {time.time()-start_time} secs") return result return wrapper @timer def model_metrics(*args, **kwargs): """[This is a function to print model metrics of interest] """ print("Model ID Number:", args) print("Metric of Interest:", kwargs) model_metrics(1, 2, 10, key="word", key2="word2", numtrees="200")08 ``Numpy`` 中的聚合操作 sumimport numpy as np L = np.random.random(100) sum(L) np.sum(L) np.random.rand(10) big_array = np.random.rand(1000000) %timeit sum(big_array) %timeit np.sum(big_array)61.5 ms ± 409 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) 309 µs ± 4.48 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)min, maxnp.min(big_array) np.max(big_array) big_array.min() big_array.max() big_array.sum()多维度聚合X = np.arange(16).reshape(4,-1) X np.sum(X) np.sum(X, axis=0)沿着第一个维度,行的方向进行计算,得到的是每一列相加的值。np.sum(X, axis=1)注意:axis描述的是将要被压缩的维度。 其他聚合操作np.prod(X)得到所有元素的乘积np.prod(X + 1) np.mean(X) np.median(X)中位数v = np.array([1, 1, 2, 2, 10]) np.mean(v) np.median(v) np.percentile(big_array, q=50)统计学中的50%,百分位np.median(big_array) np.percentile(big_array, q=100) np.max(big_array) for percent in [0, 25, 50, 75, 100]: print(np.percentile(big_array, q=percent)) np.var(big_array)var 求方差np.std(big_array)std 求标准差x = np.random.normal(0, 1, 1000000)均值为0,标准差为1np.mean(x) np.std(x)Performance Profile DecoderThe default Python and Cython decoder can be profiled with Python's standard `cprofile`. The output can be a sorted table and a flame graph. Both are generated below:%%bash python -m openpifpaf.predict coco/000000081988.jpg --no-download-progress --debug --profile-decoder !flameprof profile_decoder.prof > profile_decoder_flame.svgRead in parquet files from pre-processing# do the reading templates = pd.read_parquet('data/processed_dfs/templates.parquet' ) sentences = pd.read_parquet('data/processed_dfs/sentences.parquet') mentions = pd.read_parquet('data/processed_dfs/mentions.parquet') umls = pd.read_parquet('data/processed_dfs/umls.parquet') sentences.head() mentions.head() templates.head()To make templates:1 Make an empty data frame with the fields to hold template info2 For each sentence: * Get the predicates for that sentence * trim the frameset after the '.' * Get the mentions * Get mention type * Append umls cui to end of mention (just take the first one) * Order the predicates and mentions by begin offset * Combine into a string separated by spaces * Write the template and semantic template to the dataframeprint(len(templates)) # templates = templates.drop_duplicates('sem_template') # print(len(templates)) def get_vectors(df): tf = TfidfVectorizer() return tf.fit_transform(df['sem_template']) # Only use unique templates vectors = get_vectors(templates) vecd = vectors.todense() print(vectors.shape) cluster_sizes = [70, 80, 90, 100, 110, 120, 125, 130, 140, 150, 200] for n_cluster in cluster_sizes: km = KMeans( init='k-means++', max_iter=100, n_init=1, n_clusters=n_cluster, verbose=False) km.fit(vectors) predictions = km.predict(vectors) sil_score = silhouette_score(vectors, predictions, metric='euclidean') print(f"Silhouette score for n_clusters={n_cluster}:") print(sil_score) km = KMeans( init='k-means++', max_iter=100, n_init=1, n_clusters=120, verbose=False) km.fit(vectors) predictions = km.predict(vectors) sil_score = silhouette_score(vectors, predictions, metric='euclidean') # print(km.cluster_centers_.shape) # order_centroids = km.cluster_centers_.argsort()[:, ::-1] # terms = tf.get_feature_names() # for i in range(50): # print("Cluster %d:" % i, end='') # for ind in order_centroids[i, :15]: # print(' %s' % terms[ind], end='') # print() predictions = km.predict(vectors) silhouette_score(vectors, predictions, metric='euclidean') templates['cluster'] = predictions templates.head() sentences.shapeAdd cluster labels to sentences and mentions (entities)sentences = sentences.merge(templates[['sent_id', 'cluster']], on='sent_id') mentions = mentions.merge(templates[['sent_id', 'cluster']], on='sent_id') sentences.head() mentions.head()Get the size of each clusterpdf = pd.DataFrame(predictions, columns=['cluster']) cluster_counts = pdf.groupby('cluster').size().reset_index(name='count') cluster_counts['count'].plot(kind='bar') cluster_counts['frequency'] = cluster_counts['count'] / cluster_counts['count'].sum() cluster_counts.head()Get the distribution of CUIs in each cluster How many clusters on average does a CUI appear incui_clust_freq = mentions.groupby(['cui', 'cluster']).size().reset_index(name='cluster_count') cui_clust_freq.sort_values('cluster_count', ascending=False).head(10) num_clusters_per_cui = cui_clust_freq.groupby('cui').size().reset_index(name='num_clusters') # avg_num_clusters = .agg({'num_clusters': 'mean'}) num_clusters_per_cui.sort_values('num_clusters', ascending=False).head(10)Max and average number of clusters that CUIs appear inprint("Max number of clusters that a cui appears in") print(num_clusters_per_cui.agg({'num_clusters': 'max'})) print('Average number of clusters that cuis appear in:') print(num_clusters_per_cui.agg({'num_clusters': 'mean'})) max_clusters = num_clusters_per_cui[num_clusters_per_cui['num_clusters'] == 23] max_clustersThe preferred text of cuis that occur in the most number of clustersmentions[mentions['cui'].isin(max_clusters['cui'])]['preferred_text'].unique()Average number of unique CUIs in a clusternum_cuis_in_cluster_freq = cui_clust_freq[['cui', 'cluster']] \ .groupby('cluster') \ .size() \ .reset_index(name="num_cuis_in_cluster") num_cuis_in_cluster_freq.sort_values('num_cuis_in_cluster', ascending=False) num_cuis_in_cluster_freq.agg({'num_cuis_in_cluster': 'mean'})Get the cluster label frequency by sentence positioncluster_label_by_sentence_pos = pd.crosstab(templates['cluster'] ,templates['sentence_number'] ).apply(lambda x: x / x.sum(), axis=0) cluster_label_by_sentence_posGet the number of documents in each clustermentions[mentions['cluster'] == 1] umls[umls['xmi_id'].isin([17309, 11768, 11337, 4456, 15539, 16616, 10061, 13422]) ] sentences[sentences['sent_id'] == 'f918cc4a-2f8b-4c5e-a904-3de84efe714b'] notes = pd.read_parquet('data/note-events.parquet', engine='fastparquet') notes[notes['ROW_ID'] == 333908]['TEXT'].iloc[0][1368:1372]Generating Notes Get all the entities for the documentdoc_ids = templates['doc_id'].unique() notes = notes[notes['ROW_ID'].isin(doc_ids)] notes = notes.reset_index(drop=True) # notes = notes.drop(['CHARTDATE','CHARTTIME','STORETIME','CGID','ISERROR'],axis=1) doc = notes.sample(n=1) doc_id = doc['ROW_ID'].iloc[0] doc_idDrop templates that contain entities not in the documentents_in_doc = mentions[mentions['doc_id'] == doc['ROW_ID'].iloc[0]] ments_in_doc = ents_in_doc.mention_type.unique() # print(ments_in_doc) ents_in_doc.head() # get metions where mention_type is in doc entities types print(len(mentions)) doc_ments = mentions[mentions.cui.isin(ents_in_doc.cui.unique())] # print(len(doc_ments)) doc_ments.head() # get templates that have the corresponding sentence ids from doc_ments template_candidates = templates[templates.sent_id.isin(doc_ments.sent_id)] template_candidates.head()Choose a cluster based on cluster frequency for that sentence positioncandidate_cluster_labels = template_candidates.cluster.sort_values().unique() candidate_clusters = cluster_label_by_sentence_pos.iloc[candidate_cluster_labels] sent_pos = 0 # remove cluster labels not present in template candidates selected_cluster = candidate_clusters.sample( n=1, weights=candidate_clusters.loc[:,sent_pos] ).iloc[0].name selected_cluster # templates_in_cluster = template_candidates[template_candidates['cluster'] == selected_cluster.iloc[0].index] cluster_templates = template_candidates[template_candidates.cluster == selected_cluster] cluster_templates.head()Choose a template from the cluster base on frequency for that sentence position# templates_at_pos = cluster_templates[cluster_templates.sentence_number == sent_pos] template = cluster_templates.sample(n=1) template # sentences[sentences.sent_id == 'deef8a81-b222-4d1f-aa3f-7dfc160cb428'].iloc[0].textFill template blank Choosing textSelect text to fill the template blank based on the frequency of strings for the CUI associated with the mention# get mentions in this template template_id = template.iloc[0]['sent_id'] ments_in_temp = mentions[mentions.sent_id == template_id] ments_in_temp # Get the sentence for that template raw_sentence = sentences[sentences.sent_id == template_id] raw_sentence.iloc[0].text # Select entities from entities in the document that match that entity type # ments_in_temp # ments_in_temp.drop(ments_in_temp.loc[482].name, axis=0) concepts = umls[umls.cui == ments_in_temp.iloc[0].cui] concepts.head() # ents_in_doc # txt_counts.sample(n=1, weights=txt_counts.cnt).iloc[0].text def template_filler(template, sentences, entities, all_mentions): # print(template.sem_template) num_start = len(entities) template_id = template.iloc[0]['sent_id'] ments_in_temp = all_mentions[all_mentions.sent_id == template_id] raw_sentence = sentences[sentences.sent_id == template_id] # print(f'raw sent df size: {len(raw_sentence)}') # print(template_id) sent_begin = raw_sentence.iloc[0].begin sent_end = raw_sentence.iloc[0].end raw_text = raw_sentence.iloc[0].text replacements = [] # rows_to_drop = [] # print('Mention types in template') # print(ments_in_temp.mention_type.unique()) # print('types in entities') # print(entities.mention_type.unique()) for i, row in ments_in_temp.iterrows(): ents_subset = entities[entities.mention_type == row.mention_type] if len(ents_subset) == 0: print('Empty list of doc entities') print(entities.mention_type) print(row.mention_type) break rand_ent = ents_subset.sample(n=1) entities = entities[entities['id'] != rand_ent.iloc[0]['id']] # rows_to_drop.append(rand_ent.iloc[0].name) ent_cui = rand_ent.iloc[0].cui # print(ent_cui) span_text = get_text_for_mention(ent_cui, all_mentions) replacements.append({ 'text' : span_text, 'begin' : row.begin - sent_begin, 'end' : row.end - sent_begin, }) new_sentence = '' for i, r in enumerate(replacements): if i == 0: new_sentence += raw_text[0 : r['begin'] ] else: new_sentence += raw_text[replacements[i-1]['end'] : r['begin']] new_sentence += r['text'] if(len(replacements) > 1): new_sentence += raw_text[replacements[-1]['end'] : ] # clean up num_end = len(entities) # print(f"Dropped {num_start - num_end} rows") return new_sentence, entities # Find all the text associated with the cui of the mention in the template # choose a text span based on frequency def get_text_for_mention(cui, mentions): txt_counts = mentions[mentions.cui == cui].groupby('text').size().reset_index(name='cnt') return txt_counts.sample(n=1, weights=txt_counts.cnt).iloc[0].text*** Write a full note ***# Select document to write note for # doc = notes.sample(n=1) # doc_id = doc['ROW_ID'].iloc[0] doc_id = 374185 # Get all the entities in the chosen document ents_in_doc = mentions[mentions['doc_id'] == doc_id] new_doc_sentences = [] sent_pos = 0 while len(ents_in_doc) > 0: # print(f"Sentence position: {sent_pos}") # print(f"Length of remaining entities: {len(ents_in_doc)}") # Get list of possible mentions based on CUIs found in the document mentions_pool = mentions[(mentions.cui.isin(ents_in_doc.cui.unique())) & (mentions.mention_type.isin(ents_in_doc.mention_type.unique()))] # Get template pool based on mentions pool # TODO: Need to only choose templates where all the mentions are in `ents_in_doc` template_candidates = templates[templates.sent_id.isin(mentions_pool.sent_id)] # ts = len(template_candidates.sent_id.unique()) # ms = len(mentions_pool.sent_id.unique()) # print(ts, ms) def all_ents_present(row, doc_ents, ments_pool): # Get mentions in this template all_temp_ments = ments_pool[ments_pool['sent_id'] == row['sent_id']] available_mentions = all_temp_ments[all_temp_ments['mention_type'].isin(doc_ents['mention_type'])] return (len(available_mentions) > 0) mask = template_candidates.apply(all_ents_present, args=(ents_in_doc, mentions_pool), axis=1) template_candidates = template_candidates[mask] # print(f'num templates: {len(template_candidates)}') #If there are no more possible templates then break if len(template_candidates) == 0: break # Get candidate clusters based on template pool # Remove the cluster labels that aren't present in template bank candidate_cluster_labels = template_candidates.cluster.sort_values().unique() candidate_clusters = cluster_label_by_sentence_pos.iloc[candidate_cluster_labels] # print(f"Num clusters: {len(candidate_clusters)}") # Select cluster based on frequency at sentence position selected_cluster = None try: selected_cluster = candidate_clusters.sample( n=1, weights=candidate_clusters.loc[:,sent_pos] ).iloc[0].name except: # It's possible the clusters we chose don't appear at that position # so we can choose randomly # print('choosing random cluster') selected_cluster = candidate_clusters.sample(n=1).iloc[0].name # print('selected cluster:') # print(selected_cluster) cluster_templates = template_candidates[template_candidates.cluster == selected_cluster] # Choose template from cluster at random template = cluster_templates.sample(n=1) template_id = template.iloc[0]['sent_id'] # Get mentions in the template ments_in_temp = mentions[mentions.sent_id == template_id] # Write the sentence and update entities found in the document !!! t, ents_in_doc = template_filler(template, sentences, ents_in_doc, mentions_pool) new_doc_sentences.append(t) sent_pos += 1 '\n'.join(new_doc_sentences) notes[notes.ROW_ID == 374185].iloc[0].TEXTWrite until all mentions have been usedmentions.groupby('doc_id').size().reset_index(name='cnt').sort_values('cnt').head(10) mentions[mentions.doc_id == 476781]Introduction to Machine Learning with scikit-learn Lab 5: Model evaluation and selectionIn this lab, we will apply a few model evaluation metrics we've seen in the lecture.%matplotlib inline import warnings warnings.filterwarnings("ignore", category=DeprecationWarning)As always, we need to start with some data.Let's first generate a set of outputs $y$ and predicted outputs $\hat{y}$ to illustrate a few typical cases.from sklearn.metrics import confusion_matrix y_true = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0] y_pred = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0] cnf_matrix = confusion_matrix(y_true, y_pred) print(cnf_matrix)[[12 1] [ 0 2]]Now let's define a function that will display the confusion matrix. The following is inspired from [this example]( http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html).import itertools import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes) plt.yticks(tick_marks, classes) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.figure() plot_confusion_matrix(cnf_matrix, ['0', '1'])Import libraries%load_ext autoreload %autoreload 2 import numpy as np import pandas as pd import matplotlib.pyplot as plt import os from utils import *Parametersfig_dir = 'data/figure' exp_name = 'compare_methods' time_path = 'data/time/compare_methods_2020_12_14_1_4_52.npy' cost_path = 'data/cost/compare_methods_2020_12_14_1_4_52.npy' methods = ['greedy', 'optimal', 'approx', 'approx_woc'] sizes = np.arange(1, 11) repeats = 10Load data%%time run_time = np.load(time_path) travel_cost = np.load(cost_path) print(run_time.shape, travel_cost.shape)(40,) (40,) Wall time: 2 msConvert to DataFramedf_time = pd.DataFrame(run_time.reshape(repeats,-1)/np.power(10, 6), columns=methods) print(df_time.shape) print(df_time.info()) display(df_time) df_cost = pd.DataFrame(travel_cost.reshape(repeats,-1), columns=methods) print(df_cost.shape) print(df_cost.info()) display(df_cost)(10, 4) RangeIndex: 10 entries, 0 to 9 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 greedy 10 non-null float64 1 optimal 10 non-null float64 2 approx 10 non-null float64 3 approx_woc 10 non-null float64 dtypes: float64(4) memory usage: 448.0 bytes NoneCalculate metrics Processing timedf_time.mean()Approximation ratiodf_cost.loc[:, 'approx_ratio'] = df_cost['approx']/df_cost['optimal'] df_cost.loc[:, 'approx_woc_ratio'] = df_cost['approx_woc']/df_cost['optimal'] df_cost.loc[:, 'greedy_ratio'] = df_cost['greedy']/df_cost['optimal'] display(df_cost) df_cost[['approx_ratio', 'approx_woc_ratio', 'greedy_ratio']].mean()Ride-sharing efficiencydf_cost.loc[:, 'approx_gain'] = df_cost['greedy']/df_cost['approx'] df_cost.loc[:, 'approx_woc_gain'] = df_cost['greedy']/df_cost['approx_woc'] df_cost.loc[:, 'optimal_gain'] = df_cost['greedy']/df_cost['optimal'] display(df_cost) df_cost[['approx_gain', 'approx_woc_gain', 'optimal_gain']].mean() df_cost[['greedy', 'optimal', 'approx', 'approx_woc']].mean()Plots Processing timeplot_proc_time( df_time, sizes, exp_name, ylim=(10**-2, 10**2), plot_optimal=True )Travel costplot_travel_cost( df_cost, sizes, exp_name, ylim=(7000,14000), plot_optimal=True )Approximation ratioplot_approx_ratio( df_cost, sizes, exp_name, ylim=(0.9,1.5), legend_loc=(0.3, 0.65) )Ride-sharing efficiencyplot_rs_efficiency( df_cost, sizes, exp_name, ylim=(0.9,1.5), plot_optimal=True, legend_loc=(0.3, 0.65) )[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pronobis/libspn-keras/blob/master/examples/notebooks/Sampling%20with%20conv%20SPNs.ipynb) **Image Sampling**: Sampling MNIST imagesIn this notebook, we'll set up an SPN to generate new MNIST images by sampling from an SPN.First let's set up the dependencies:!pip install libspn-keras matplotlibConvolutional SPNA convolutional SPN consists of convolutional product and convolutional sum nodes. For the sake of demonstration, we'll use a structure that trains relatively quickly, without worrying too much about the final performance of the model.import libspn_keras as spnkSetting the Default Sum Accumulator InitializerIn `libspn-keras`, we refer to the unnormalized weights as _accumulators_. These can be represented in linear space or logspace. Setting the ``SumOp`` also configures the default choice of representation space for these accumulators. For example, gradients should be used in the case of _discriminative_ learning and accumulators are then preferrably represented in logspace. This overcomes the need to project the accumulators to $\mathbb R^+$ after gradient updates, since for log accumulators can take any value in $\mathbb R$ (whereas linear accumulators are limited to $\mathbb R^+$).In this case however, we'll do generative learning so we can set our `SumOp` to `SumOpEMBackprop`.To set the default initial value (which will be transformed to logspace internally if needed), one can use `spnk.set_default_accumulator_initializer`:from tensorflow import keras spnk.set_default_accumulator_initializer( spnk.initializers.Dirichlet() ) import numpy as np import tensorflow_datasets as tfds from libspn_keras.layers import NormalizeAxes import tensorflow as tf def take_first(a, b): return tf.reshape(tf.cast(a, tf.float32), (-1, 28, 28, 1)) normalize = spnk.layers.NormalizeStandardScore( input_shape=(28, 28, 1), axes=NormalizeAxes.GLOBAL, normalization_epsilon=1e-3 ) mnist_images = tfds.load(name="mnist", batch_size=32, split="train", as_supervised=True).map(take_first) normalize.adapt(mnist_images) mnist_normalized = mnist_images.map(normalize) location_initializer = spnk.initializers.PoonDomingosMeanOfQuantileSplit( mnist_normalized )Defining the ArchitectureWe'll go for a relatively simple convolutional SPN architecture. We use solely non-overlapping patches. After 5 convolutions, the nodes' scopes cover all variables. We then add a layer with 10 mixtures, one for each class. We can do this to optimize the joint probability of $P(X,Y)$ instead of just $P(X)$.def build_spn(sum_op, return_logits, infer_no_evidence=False): spnk.set_default_sum_op(sum_op) return spnk.models.SequentialSumProductNetwork([ normalize, spnk.layers.NormalLeaf( num_components=4, location_trainable=True, location_initializer=location_initializer, scale_trainable=True ), spnk.layers.Conv2DProduct( depthwise=False, strides=[2, 2], dilations=[1, 1], kernel_size=[2, 2], padding='valid' ), spnk.layers.Local2DSum(num_sums=256), spnk.layers.Conv2DProduct( depthwise=True, strides=[2, 2], dilations=[1, 1], kernel_size=[2, 2], padding='valid' ), spnk.layers.Local2DSum(num_sums=512), # Pad to go from 7x7 to 8x8, so that we can apply 3 more Conv2DProducts tf.keras.layers.ZeroPadding2D(((0, 1), (0, 1))), spnk.layers.Conv2DProduct( depthwise=True, strides=[2, 2], dilations=[1, 1], kernel_size=[2, 2], padding='valid' ), spnk.layers.Local2DSum(num_sums=512), spnk.layers.Conv2DProduct( depthwise=True, strides=[2, 2], dilations=[1, 1], kernel_size=[2, 2], padding='valid' ), spnk.layers.Local2DSum(num_sums=1024), spnk.layers.Conv2DProduct( depthwise=True, strides=[2, 2], dilations=[1, 1], kernel_size=[2, 2], padding='valid' ), spnk.layers.LogDropout(rate=0.5), spnk.layers.DenseSum(num_sums=10), spnk.layers.RootSum(return_weighted_child_logits=return_logits) ], infer_no_evidence=infer_no_evidence, unsupervised=False) sum_product_network = build_spn(spnk.SumOpEMBackprop(), return_logits=True) sum_product_network.summary()Setting up a `tf.Dataset` with `tensorflow_datasets`Then, we'll configure a train set and a test set using `tensorflow_datasets`.import tensorflow_datasets as tfds batch_size = 128 mnist_train = ( tfds.load(name="mnist", split="train", as_supervised=True) .shuffle(1024) .batch(batch_size) ) mnist_test = ( tfds.load(name="mnist", split="test", as_supervised=True) .batch(100) )Configuring the remaining training componentsNote that our SPN spits out the joint probabities for each $y\in\{Y_i\}_{i=1}^{10}$, so there are 10 outputs per sample. We can optimize the probability of $P(X,Y)$ by using `spnk.metrics.NegativeLogJoint` as the loss.optimizer = spnk.optimizers.OnlineExpectationMaximization(learning_rate=0.05, accumulate_batches=1) metrics = [] loss = spnk.losses.NegativeLogJoint() sum_product_network.compile(loss=loss, metrics=metrics, optimizer=optimizer)Training the SPNWe can simply use the `.fit` function that comes with Keras and pass our `tf.data.Dataset` to it to train!import tensorflow as tf sum_product_network.fit(mnist_train, epochs=20, callbacks=[tf.keras.callbacks.ReduceLROnPlateau(monitor="loss", min_delta=0.1, patience=2, factor=0.5)]) sum_product_network.evaluate(mnist_test)Building an SPN to sampleFor sampling, we require our sum nodes to backpropagate discrete signals that correspond to the sampled paths. Eachpath originates at the root and eventually ends up at the leaves. We can set the backprop op to`spnk.SumOpSampleBackprop` to ensure all sum layers propagate the discrete sample signal.We build using the same function as before and copy the weights from the already trained SPN.sum_product_network_sample = build_spn(spnk.SumOpSampleBackprop(), return_logits=False, infer_no_evidence=True) sum_product_network_sample.set_weights(sum_product_network.get_weights())Drawing samplesSampling from SPNs comes down to determining values for variables that are outside of the evidence. When images aresampled as a whole, all variables are omitted from the evidence. For this special case of inference,the `SequentialSumProductNetwork` class defines a `zero_evidence_inference` method that takes a size parameter.Below, we sample 64 images and voilá!import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid fig = plt.figure(figsize=(12., 12.)) grid = ImageGrid( fig, 111, nrows_ncols=(10, 10), axes_pad=0.1, ) sample = sum_product_network_sample.zero_evidence_inference(100) print("Sampling done... Now ploting results") for ax, im in zip(grid, sample): ax.imshow(np.squeeze(im), cmap="gray") plt.show()Sentence Classification with Convolutional Neural NetworkKeras 2.0 implementation based on:* http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/* https://github.com/dennybritz/cnn-text-classification-tf* https://github.com/yoonkim/CNN_sentence * http://www.people.fas.harvard.edu/~yoonkim/data/sent-cnn.pdf (The original paper)import numpy as np import keras from keras.preprocessing.text import TokenizerSet up data source and model hyperparameterspositive_data_file = "data/rt-polarity.pos" negative_data_file = "data/rt-polarity.neg" # Percentage of the training data to use for validation dev_sample_percentage = 0.1 # Dimensionality of character embedding (default: 128) embedding_dim = 128 # Filter sizes (default: [3, 4, 5]) filter_sizes = [3, 4, 5] # Number of filters per filter size (default: 128) num_filters = 128 # L2 regularization lambda (default: 0.0) (unused) l2_reg_lambda = 0.0 # Dropout keep probability (default: 0.5) dropout_keep_prob = 0.5 # Batch Size (default: 64) batch_size = 64 # Number of training epochs (default: 200) num_epochs = 200Load the data# Load data from files positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines()) negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines()) # Concatenate x_text = positive_examples + negative_examples # Generate labels positive_labels = [[0, 1] for _ in positive_examples] negative_labels = [[1, 0] for _ in negative_examples] y = np.concatenate([positive_labels, negative_labels], 0)Prepare the dataTokenize text into number sequences.Pad sequences to the same length (of the longest sequence).Shuffle the dataset.# Build vocabulary max_document_length = max([len(x.split(" ")) for x in x_text]) # Tokenizer default filtering seems to be identical to original clean_str tokenizer = Tokenizer() tokenizer.fit_on_texts(x_text) x = np.array(tokenizer.texts_to_sequences(x_text)) # Pad examples to the same length x = keras.preprocessing.sequence.pad_sequences(x, value=0, padding='post', maxlen=max_document_length) # Randomly shuffle data np.random.seed(10) shuffle_indices = np.random.permutation(np.arange(len(y))) x_train = x[shuffle_indices] y_train = y[shuffle_indices] del x, yBuild the model# Build the model sequence_length = x_train.shape[1] num_classes = y_train.shape[1] vocab_size = len(tokenizer.word_index) + 1 embedding_size = embedding_dim print("Sequence Length: {}".format(sequence_length)) print("Number of Classes: {}".format(num_classes)) print("Vocabulary Size: {}".format(vocab_size)) print("Embedding Size: {}".format(embedding_size)) inputs = keras.layers.Input(shape=(sequence_length,)) x = keras.layers.Embedding(vocab_size, embedding_size, input_length=sequence_length, embeddings_initializer='random_uniform')(inputs) x = keras.layers.Reshape((sequence_length, embedding_size, 1))(x) filter_layers = [] for s in filter_sizes: f = keras.layers.Conv2D( num_filters, kernel_size=(s, embedding_size), strides=(1, 1), padding='valid', data_format='channels_last', activation='relu', use_bias=True, kernel_initializer=keras.initializers.TruncatedNormal(stddev=0.01), bias_initializer=keras.initializers.Constant(value=0.1))(x) f = keras.layers.MaxPooling2D(pool_size=(sequence_length - s + 1, 1), strides=(1, 1), padding='valid', data_format='channels_last')(f) filter_layers.append(f) # Combine all the pooled features x = keras.layers.concatenate(filter_layers, axis=1) x = keras.layers.Reshape((num_filters * len(filter_sizes), ))(x) # Add dropout x = keras.layers.Dropout(dropout_keep_prob)(x) # Final (unnormalized) scores and predictions predictions = keras.layers.Dense(num_classes, use_bias=True, bias_initializer=keras.initializers.Constant(value=0.1), kernel_initializer=keras.initializers.glorot_uniform(seed=None), kernel_regularizer=keras.regularizers.l2(l2_reg_lambda), bias_regularizer=keras.regularizers.l2(l2_reg_lambda), activation='softmax')(x) model = keras.Model(inputs=inputs, outputs=predictions) model.summary()Sequence Length: 61 Number of Classes: 2 Vocabulary Size: 19484 Embedding Size: 128 __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) (None, 61) 0 __________________________________________________________________________________________________ embedding_1 (Embedding) (None, 61, 128) 2493952 input_1[0][0] __________________________________________________________________________________________________ reshape_1 (Reshape) (None, 61, 128, 1) 0 embedding_1[0][0] __________________________________________________________________________________________________ conv2d_1 (Conv2D) [...]Visualize the modelfrom IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model).create(prog='dot', format='svg'))Prepare the trainingSpecify loss function and optimizer.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])Train the modelhistory = model.fit(x_train, y_train, epochs=num_epochs, batch_size=batch_size, validation_split=dev_sample_percentage, verbose=1)Train on 9595 samples, validate on 1067 samples Epoch 1/200 9595/9595 [==============================] - 2s 202us/step - loss: 0.6243 - acc: 0.6286 - val_loss: 0.5260 - val_acc: 0.7320 Epoch 2/200 9595/9595 [==============================] - 1s 134us/step - loss: 0.3519 - acc: 0.8470 - val_loss: 0.5053 - val_acc: 0.7638 Epoch 3/200 9595/9595 [==============================] - 1s 134us/step - loss: 0.1463 - acc: 0.9497 - val_loss: 0.6359 - val_acc: 0.7591 Epoch 4/200 9595/9595 [==============================] - 1s 133us/step - loss: 0.0534 - acc: 0.9833 - val_loss: 0.8648 - val_acc: 0.7301 Epoch 5/200 9595/9595 [==============================] - 1s 135us/step - loss: 0.0206 - acc: 0.9948 - val_loss: 0.9689 - val_acc: 0.7451 Epoch 6/200 9595/9595 [==============================] - 1s 135us/step - loss: 0.0092 - acc: 0.9981 - val_loss: 1.1073 - val_acc: 0.7357 Epoch 7/200 9595/9595 [==============================] - 1s 134us/step - loss: 0.0039 - acc: 0.9995 - val_loss: 1.1911 - val_acc: [...]Plot training historyimport matplotlib.pyplot as plt history_dict = history.history acc = history_dict['acc'] val_acc = history_dict['val_acc'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.clf() # clear figure plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show()Evaluate the modelTest the model on data that the model wasn't trained on.x_raw = np.array(["a masterpiece four years in the making", "everything is off."]) y_test = np.array([[1, 0], [0, 1]]) x_prep = keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences(x_raw), value=0, padding='post', maxlen=max_document_length) results = model.evaluate(x_prep, y_test) print(model.predict(x_prep))2/2 [==============================] - 0s 1ms/step [[0.0000000e+00 1.0000000e+00] [9.9995232e-01 4.7675734e-05]]IMPORTING LIBRARIES AND DATAFRAME# importing libraries import pandas as pd import numpy as np from collections import defaultdict # load and view first 5 columns transfer = pd.read_csv("transfer0019.csv") transfer.head(5)CLEANING DATAFRAME# shape of dataframe transfer.shape # look for missing values transfer.isna().sum() # look at columns with a market value transfer.loc[transfer["Market_value"] > 0].head(5) # percentage of missing values on Market_value percent = (1260 / 4700) * 100 print("Percentage of missing values on Market_value column is {:.3f} %".format(percent))Percentage of missing values on Market_value column is 26.809 %ANALYSIS OF DATAFRAME# count position column pos = transfer.groupby("Position")["Name"].count().sort_values(ascending= False) pos # view column with position Forward, Midfielder, Defender and sweeper transfer.loc[ (transfer["Position"] == "Forward")| (transfer["Position"] == "Midfielder")| (transfer["Position"] == "Defender")| (transfer["Position"] == "Sweeper") ] # replace values transfer["Position"].loc[transfer["Position"] == "Forward"] = "Centre-Forward" transfer["Position"].loc[transfer["Position"] == "Midfielder"] = "Central Midfield" transfer["Position"].loc[transfer["Position"] == "Sweeper"] = "Goalkeeper" transfer["Position"].loc[transfer["Position"] == "Defender"] = "Centre-Back" pos = transfer.groupby("Position")["Name"].count().sort_values(ascending= False) pos # count age transfer.groupby("Age")["Name"].count().sort_values(ascending= False) # looking at anomalous columns transfer.loc[(transfer["Age"] == 0) | (transfer["Age"] == 15)] # replace 0 with mode value transfer["Age"].loc[transfer["Age"] == 0] = 24 transfer.loc[transfer["Age"] == 0] # count League_from transfer.groupby("League_from")["Name"].count().sort_values(ascending= False).head(10) # placing league from column into a list LT = list(transfer["League_from"]) # removing repetitiveness in list res = defaultdict(list) for l in LT: res[l].append(l) print(res) # list league = list(res.keys()) print(league) print(league[82]) # list and index ind_league = [] for i in range(len(league)): ligue_un = "Index: {}, League Name:{}".format(i, league[i]) ind_league.append(ligue_un) print(ind_league) print(league[108]) transfer.loc[transfer["League_from"] == league[23]] word = input("Enter League Name: ") for i in range(len(away_league)): na = away_league[i] if na == word: print("This is the index of league: {}.".format(str(i))) break print("COMPLETED!!!") # create dictionary old name: new name change_nom = { league[5] : league[69], league[6] : "Arg Primera Division", league[106] : "Arg Primera Division", league[12] : league[78], league[75] : league[9], league[37] : league[10], league[36] : league[68], league[15] : league[54], league[18] : league[80], league[57] : league[80], league[20] : "Liga MX", league[114] : "Liga MX", league[83] : "Liga MX", league[21] : league[93], league[24] : league[113], league[25] : league[56], league[104] : league[56], league[27] : league[77], league[28] : league[62], league[30] : league[71], league[32] : league[70], league[112] : league[70], league[34] : league[100], league[110] : league[84], league[49] : league[19], league[47] : league[85], league[52] : league[58], league[60] : league[102], league[63] : "Serie C", league[88] : "Serie C", league[65] : league[98], league[66] : league[108], league[79] : league[101], league[82] : "Esp otra ligas", league[96] : "Esp otra ligas", league[107] : "Esp otra ligas", league[92] : league[105], league[109] : league[3], league[48] : league[103], league[23]: " Uru Primera Division", league[99]: "Uru Primera Division", away_league[-3]: "Esp otra ligas", " Israel": league[55], league[91]: league[71], league[14]: league[68], league[39]: league[84], " Belgium": league[33], league[76]: "Arg Primera Division" } print(change_nom) # place change_nom keys into list nom_keys = list(change_nom.keys()) # replace old values with new values tranfer["League_from"] for nom in nom_keys: transfer["League_from"].loc[transfer["League_from"] == nom] = change_nom[nom] # verification transfer.loc[(transfer["League_from"] == league[75]) | (transfer["League_from"] == league[48])] # replace old values with new values transfer["league_from"] for nom in nom_keys: transfer["League_to"].loc[transfer["League_to"] == nom] = change_nom[nom] # placing league from column into a list LT = list(transfer["League_to"]) # removing repetitiveness in list res = defaultdict(list) for l in LT: res[l].append(l) print(res) # list away_league = list(res.keys()) print(away_league) # count season saison = transfer.groupby("Season")["Name"].count() saison # describe market value transfer["Market_value"].describe() # describe Transfer Fee transfer["Transfer_fee"].describe() # save clean dataframe to csv transfer.to_csv("clean_transfer.csv", index= False)Using MLRUN with Dask Distributed Jobsfrom mlrun import new_function, mlconf mlconf.remote_host = '192.168.127.12' # remote cluster IP/DNS for link to dask dashboard mlconf.dbpath = 'http://mlrun-api:8080'Writing a function code# function that will be distributed def inc(x): return x+2The MLRun context in the case of Dask will have an extra param `dask_client`which is initialized based on the function spec (below), and can be used to submit Dask commands.def hndlr(context, x=1,y=2): x = context.dask_client.submit(inc, x) print(x) print(x.result()) context.log_result('y', x.result())Define the functiondask functions can be local (local workers), or remote (use containers in the cluster),in the case of `remote` users can specify the number of replica (optional) or leave blank for auto-scale.dsf = new_function('dask-tst', kind='dask') dsf.spec.remote = True dsf.spec.replicas = 1 dsf.spec.service_type = 'NodePort' dsf.spec.image_pull_policy = 'Always'Build the function with extra packagesWe can skip the build section if we dont add packages (instead need to specify the image e.g. `dsf.spec.image='daskdev/dask:2.9.1'`)dsf.build_config(base_image='daskdev/dask:2.9.1', commands=['pip install pandas']) dsf.deploy()[mlrun] 2020-01-10 00:41:53,710 starting remote build, image: .mlrun/func-default-dask-tst-latest INFO[0000] Resolved base name daskdev/dask:latest to daskdev/dask:latest INFO[0000] Resolved base name daskdev/dask:latest to daskdev/dask:latest INFO[0000] Downloading base image daskdev/dask:latest INFO[0000] Error while retrieving image from cache: getting file info: stat /cache/sha256:2ac5385ebc20fe2982a22f8fcf3cf765e7a01dc5e5003b42aa44493af0a06438: no such file or directory INFO[0000] Downloading base image daskdev/dask:latest INFO[0000] Built cross stage deps: map[] INFO[0000] Downloading base image daskdev/dask:latest INFO[0000] Error while retrieving image from cache: getting file info: stat /cache/sha256:2ac5385ebc20fe2982a22f8fcf3cf765e7a01dc5e5003b42aa44493af0a06438: no such file or directory INFO[0000] Downloading base image daskdev/dask:latest INFO[0000] Un[...]Run a task using our distributed dask function (cluster)myrun = dsf.run(handler=hndlr, params={'x': 12})[mlrun] 2020-01-10 00:42:16,351 starting run hndlr uid=877916f9d4a74ab39fb140b8bc0540b9 -> http://mlrun-api:8080 [mlrun] 2020-01-10 00:42:16,883 saving function: dask-tst, tag: latest [mlrun] 2020-01-10 00:42:22,229 using remote dask scheduler (mlrun-dask-tst-4c2600bc-d) at: 192.168.127.12:30064 [mlrun] 2020-01-10 00:42:22,229 remote dashboard (node) port: 192.168.127.12:31613 14 [mlrun] 2020-01-10 00:42:26,135 run ended with stateFigures# plot the confirmed cases def figure_conf(df, figsize = (8,5), fs = 15, logy = False, title = None, rank = 0): sns.set_style("whitegrid") palette = plt.get_cmap('magma') fig = plt.figure() plot_df = df.groupby('update_date').agg('sum') ax1 = fig.add_subplot(111) plot_df.plot(y = ['cum_confirmed'], style = '-', marker = 'o', ax = ax1, figsize = figsize, logy = logy, color = palette(rank/50 + 0.2)) # grid = False, ax1.set_ylabel("Number of people (cum)", fontsize = fs - 2) ax1.set_xticks([]) # hide the axes ticks plt.legend(loc = 'upper left', fancybox = True, fontsize = fs - 2) ax11 = ax1.twinx() ax11.bar(x = plot_df.index, height = plot_df['new_confirmed'], color = palette(rank/50 + 0.2), alpha = 0.8) ax11.set_ylabel('Number of people (daily)', fontsize = fs - 2) ax11.grid(False) # hide grid lines ax1.set_xlabel("Date", fontsize = fs - 2) daily_patch = mpatches.Patch(color = palette(rank/50 + 0.2), label='daily_confirmed') plt.legend(handles=[daily_patch], loc='upper left', bbox_to_anchor=(0, 0.9), fancybox=True, fontsize = fs - 2) if title is not None: fig.suptitle(title, fontsize = fs, y = 1) return fig # plot the confirmed, the dead, and the cured cases def figure_conf_dead_cured(df, fsize=(10,8), fs=18, title=None): sns.set_style("whitegrid") palette = ["#50a3ba", "#eac763", "#d94e5d"] # from pyecharts #palette = [ '#79a7ac', '#bd925a', '#d98994'] # earth and tealrose fig = plt.figure(figsize=fsize) plot_df = df.groupby('update_date').agg('sum') plot_df.reset_index(level=0, inplace=True) ax1 = fig.add_subplot(211) ax1.plot(plot_df['update_date'], plot_df['cum_confirmed'], marker = 'o', color = palette[2], label = 'cumulative infected') # grid = False, ax1.set_xticks([]) # hide the axes ticks ax1.set_ylabel("Number of people (cum)", fontsize = fs - 2) ax1.legend(loc = 'upper left', fancybox = True, fontsize = fs - 2) ax11 = ax1.twinx() ax11.bar(x = plot_df['update_date'], height = plot_df['new_confirmed'], color = palette[2], alpha = 0.8) ax11.set_ylabel('Number of people (new)', fontsize = fs - 2) ax11.grid(False) # hide grid lines daily_patch = mpatches.Patch(color = palette[2], label='new infected') ax11.legend(handles = [daily_patch], loc='upper right', fancybox = True, fontsize = fs - 2) ax2 = fig.add_subplot(212) ax2.plot(plot_df['update_date'], plot_df['cum_cured'], marker = 'o', color = palette[0], label = 'recovered') # cured plt.legend(loc = 'upper left', fancybox = True, fontsize = fs - 2) ax22 = ax2.twinx() ax22.plot(plot_df['update_date'], plot_df['cum_dead'], marker = 'o', color = palette[1], label = 'dead') ax22.set_ylabel('Number of people dead', fontsize = fs - 2) ax22.grid(False) # hide grid lines ax2.set_xlabel("Date", fontsize = fs - 2) ax2.set_ylabel("Number of people recovered", fontsize = fs - 2) ax1.set_xlim(min(df.update_date), max(df.update_date)) ax1.xaxis.set_major_locator(mdates.WeekdayLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xlim(min(df.update_date), max(df.update_date)) ax2.xaxis.set_major_locator(mdates.WeekdayLocator()) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) plt.legend(loc = 'upper right', fancybox = True, fontsize = fs - 2) # align labels ax1.get_yaxis().set_label_coords(-0.1,0.5) ax2.get_yaxis().set_label_coords(-0.1,0.5) ax11.get_yaxis().set_label_coords(1.1,0.5) ax22.get_yaxis().set_label_coords(1.1,0.5) if title is not None: fig.suptitle(title, fontsize = fs, y = 1.01) fig.savefig(_Figure_PATH_ + 'figures_china/' + 'China_' + 'summary.png', dpi = 400, bbox_inches='tight') return fig # plot the confirmed cases for every province def figure_conf_all(df, names_province, fsize = (5, 3), ncol = 3, ms = 2, fs = 10, logy = False, title = None, country = 'China'): sns.set_style("ticks") palette = plt.get_cmap('Reds') m = len(names_province) fig, axes = plt.subplots(int(np.ceil(m/ncol)), ncol, figsize = (ncol*fsize[0], int(np.ceil(m/ncol))*fsize[1]), sharey = False) fig.subplots_adjust(hspace = 0.2, wspace = 0.1) if m%ncol != 0: for j in range(m, int(np.ceil(m/ncol)*ncol)): fig.delaxes(axes.flatten()[j]) df_rank = df[df['update_date'] == max(df['update_date'])].copy() df_rank = df_rank.sort_values(by = 'province_name_en') # same order as the list names_province df_rank = df_rank.reset_index(drop=True) df_rank = df_rank.sort_values(by = 'cum_confirmed') rank_list = df_rank.index.tolist() for i, province in enumerate(names_province): ix = np.unravel_index(i, axes.shape) c = palette(rank_list.index(i)/2/m + 0.3) plot_df = df[df['province_name_en'] == province] axes[ix].plot(plot_df['update_date'], plot_df['cum_confirmed'], linewidth = 2, marker = 'o', ms = ms, color = c, label = (lambda x: None if x > 0 else 'cum')(i)) # if i >= (np.ceil(m/ncol) - 1)*ncol: axes[ix].set_xlabel('Date', fontsize = fs - 2) if i % ncol == 0: axes[ix].set_ylabel('Number (cum)', fontsize = fs - 2) axes[ix].get_yaxis().set_label_coords(-0.15,0.5) if i == 0: axes[ix].legend(loc = 'upper left', fancybox = True, fontsize = fs - 2) axes[ix].set_title(province, fontsize = fs) fig.align_ylabels(axes[:, 0]) for i, province in enumerate(names_province): ix = np.unravel_index(i, axes.shape) c = palette(rank_list.index(i)/2/m + 0.3) plot_df = df[df['province_name_en'] == province] ax11 = axes[ix].twinx() ax11.grid(False) # hide grid lines ax11.bar(plot_df['update_date'], height = plot_df['new_confirmed'], color = c, alpha = 0.8, label = (lambda x: None if x > 0 else 'new_confirmed')(i)) daily_patch = mpatches.Patch(color = c, label='new') if i == 0: ax11.legend(handles=[daily_patch], loc='upper right', fancybox=True, fontsize = fs - 2) ax11.tick_params(axis = 'both', which = 'major', labelsize = fs - 4) ax11.tick_params(axis = 'both', which = 'minor', labelsize = fs - 4) if i % ncol == ncol - 1: ax11.set_ylabel('Number (new)', fontsize = fs - 2) ax11.get_yaxis().set_label_coords(1.1, 0.5) axes[ix].set_xlim(min(df.update_date), max(df.update_date)) axes[ix].xaxis.set_major_locator(mdates.WeekdayLocator()) axes[ix].xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) axes[ix].tick_params(axis = 'both', which = 'major', labelsize = fs - 4) axes[ix].tick_params(axis = 'both', which = 'minor', labelsize = fs - 4) fig.suptitle(title, fontsize = fs + 2, y = 1.01) fig.savefig(_Figure_PATH_ + 'figures_china/' + country + '_infection.png', dpi = 400, bbox_inches='tight')🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄🎄# functions imported from utils_common data_city, data_province, data_province_domestic = load_DXY_raw() # the date is truncated to March 10, 2020 date_tr = datetime.date(int(2020),int(3),int(10)) data_province_domestic = data_province_domestic[data_province_domestic.update_date <= date_tr] min(data_province_domestic['update_date']), max(data_province_domestic['update_date']) data_slice = data_province_domestic[data_province_domestic['update_date'] == datetime.date(int(2020),int(1),int(31))] data_slice = data_slice.sort_values(by = 'province_name_en') data_slice = data_province_domestic[data_province_domestic['province_name_en'] == 'Hubei'] data_slice = data_slice.reset_index(drop = True) data_slice figure_conf_all(df = data_province_domestic, names_province = names_province, fsize = (5, 1.5), ncol = 3, ms = 3, fs = 12, logy = False, title = 'China: infection', country = 'China') fig = figure_conf_dead_cured(data_province_domestic, fsize = (10, 8), fs = 16, title = 'China: infected, recovered, and dead') # confirmed, cured, and dead #province = 'Hubei' # target province #fig = figure_conf_dead_cured(data_province[data_province['province_name_en'] == province], title = province + ': confirmed, cured, and dead') #plt.show() #city = 'Wuhan' # target city #fig = figure_conf_dead_cured(data_city[data_city['city_name_en'] == city], logy = False, title = city + ': confirmed, cured, and dead') #plt.show() # plot a specific column: confirmed, cured or dead def figure_bar(df, date_str, col, largestN = 0, log = True, figsize = (12, 8), fs = 18, title = None): sns.set_style("whitegrid") year, month, day = date_str.split('-') date = datetime.date(int(year),int(month),int(day)) if date_str is not None: df_single = df[df['update_date'] == date] else: df_single = df df_single = df_single[df_single['province_name_en'].isin(names_province)] df_single = df_single.sort_values(by = col) df_single = df_single.reset_index(drop = True) if largestN > 0: df_single = df_single[-largestN:] # only plot the first N bars else: largestN = df_single.shape[0] cmap_dict = {'cum_confirmed': tealrose, 'new_confirmed': tealrose, 'cum_cured': armyrose, 'new_cured': armyrose, 'cum_dead': earth, 'new_dead': earth, 'infection_rate': tealrose} palette = cmap_dict[col] palette = list(itertools.chain.from_iterable(itertools.repeat(x, int(np.ceil(largestN/len(palette)))) for x in palette)) palette = palette[:largestN] fig = plt.figure(figsize = figsize) ax = fig.add_subplot(111) bars = df_single[col].tolist() # in case zero appears bars = [temp if temp > 0 else 0.1 for temp in bars] names = df_single['province_name_en'] ax.barh(names, bars, color = palette, height = 0.4, alpha = 0.8) # palette(np.linspace(0.2, 0.8, largestN)) for j, v in enumerate(bars): # add text ax.text(v*1.1, j - 0.2, str(int(v)), color = 'black', fontsize = fs - 6) ax.set_xlabel("Number of people", fontsize = fs - 2) ax.set_ylabel('Province', fontsize = fs - 2) if log == True: ax.set_xscale('log') xmin = np.power(10, np.floor(np.log10(min(bars)))) xmax = np.power(10, np.ceil(np.log10(max(bars)))) ax.set_xlim(xmin, xmax) dict_title = {'cum_confirmed': 'infected', 'cum_cured': 'recovered', 'cum_dead': 'dead'} if title is not None: # col[:3] + ' ' + col[4:] #fig.suptitle(title + ' ' + dict_title[col], fontsize = fs, y = (lambda x: 1.06 if x != len(names_province) else 1.035)(largestN)) fig.suptitle(title + ' ' + dict_title[col], fontsize = fs, y = (lambda x: 1.05 if x != len(names_province) else 1.03)(largestN)) plt.figtext(0.85, 1, 'by ' + date.strftime("%d %B, %Y"), ha = "center", va = "top", fontsize = fs - 4) if largestN != len(names_province): fig.savefig(_Figure_PATH_ + 'figures_china/' + 'China_bar_' + dict_title[col] + '_top.png', dpi = 400, bbox_inches='tight') else: fig.savefig(_Figure_PATH_ + 'figures_china_be/' 'China_bar_' + dict_title[col] + '.png', dpi = 400, bbox_inches='tight') return fig fig = figure_bar(df = data_province_domestic, date_str = '2020-03-10', col = 'cum_confirmed', largestN = 10, log = True, figsize = (6, 4), fs = 15, title = 'China: provincial') # province level fig = figure_bar(df = data_province_domestic, date_str = '2020-03-10', col = 'cum_confirmed', largestN = 0, log = True, figsize = (6, 8), fs = 15, title = 'China: provincial') fig = figure_bar(df = data_province_domestic, date_str = '2020-03-10', col = 'cum_cured', largestN = 0, log = True, figsize = (6, 8), fs = 15, title = 'China: provincial') fig = figure_bar(df = data_province_domestic, date_str = '2020-03-10', col = 'cum_dead', largestN = 0, log = True, figsize = (6, 8), fs = 15, title = 'China: provincial') # city level #fig = figure_bar(data_city, '2020-03-10', col = 'cum_confirmed', groupby = 'province_name_en',title = 'National') #fig = figure_bar(data_city, '2020-03-10', col = 'new_confirmed', groupby = 'city_name_en', largestN = 10, title='Top 10 City') # city level in Hubei province #fig = figure_bar(data_city[data_city['province_name_en'] == 'Hubei'], '2020-02-01', col = 'cum_dead', groupby = 'city_name_en', title = 'Hubei Province') # Compare the population size with the infection size # infection ratio (per mille) infection_ratio = [(name, round(data_province_domestic[(data_province_domestic.province_name_en == name) & (data_province_domestic.update_date <= date_tr)].cum_confirmed.max()/provincial_population_dict.get(name)*1e6, 3)) for name in names_province] # conclusion: in the SEIR model, S + E is approximately N # min(infection_ratio, key = lambda t: t[1]), max(infection_ratio, key = lambda t: t[1]) infection_ratio = pd.DataFrame.from_records(infection_ratio, columns =['province_name_en', 'ratio']) infection_ratio # ir stands for infection ratio def figure_ir_bar(df, update_date_tr, largestN = 0, log = True, figsize = (12, 8), fs = 18, title = None): df_single = df[df['province_name_en'].isin(names_province)] df_single = df_single.sort_values(by = 'ratio') df_single = df_single.reset_index(drop = True) if largestN > 0: df_single = df_single[-largestN:] # only plot the first N bars else: largestN = df_single.shape[0] palette = plt.get_cmap('pink_r') palette = tealrose palette = list(itertools.chain.from_iterable(itertools.repeat(x, int(np.ceil(largestN/len(palette)))) for x in palette)) palette = palette[:largestN] sns.set_style("whitegrid") fig = plt.figure(figsize = figsize) ax = fig.add_subplot(111) bars = df_single['ratio'].tolist() names = df_single['province_name_en'] ax.barh(names, bars, color = palette, height = 0.4, alpha = 0.8) # palette(np.linspace(0.2, 0.8, largestN)) for j, v in enumerate(bars): # add text ax.text(v*1.1, j - 0.2, str(round(v)), color = 'black', fontsize = fs - 6) ax.set_xlabel("Infection rate (per million)", fontsize = fs - 2) ax.set_ylabel('Province', fontsize = fs - 2) if log == True: ax.set_xscale('log') xmin = 1 xmax = 1e4 ax.set_xlim(xmin, xmax) if title is not None: fig.suptitle(title, fontsize = fs, y = 1.03) #plt.figtext(0.5, 1, update_date_tr.strftime("%d %B, %Y"), ha = "center", va = "top", fontsize = fs - 4) plt.figtext(0.85, 1, 'by ' + update_date_tr.strftime("%d %B, %Y"), ha = "center", va = "top", fontsize = fs - 4) fig.savefig(_Figure_PATH_ + 'figures_china_be/' + 'China_infection_rate.png', dpi = 400, bbox_inches='tight') return fig fig = figure_ir_bar(df = infection_ratio, update_date_tr = date_tr, largestN = 0, log = True, figsize = (6, 8), fs = 15, title = 'China: provincial infection rate') # a dictionary for the pyecharts geo maps d = {'湖北': 'Hubei', '广东': 'Guangdong', '河南': 'Henan', '浙江': 'Zhejiang', '湖南': 'Hunan', '安徽': 'Anhui', '江西': 'Jiangxi', '山东': 'Shandong', '江苏': 'Jiangsu', '重庆': 'Chongqing', '四川': 'Sichuan', '黑龙江': 'Heilongjiang', '北京': 'Beijing', '上海': 'Shanghai', '河北': 'Hebei', '福建': 'Fujian', '广西': 'Guangxi', '陕西': 'Shaanxi', '云南': 'Yunnan', '海南': 'Hainan', '贵州': 'Guizhou', '天津': 'Tianjin', '山西': 'Shanxi', '辽宁': 'Liaoning', '吉林': 'Jilin', '甘肃': 'Gansu', '新疆': 'Xinjiang', '宁夏': 'Ningxia', '内蒙古': 'Inner Mongolia', '青海': 'Qinghai'} #for k, v in d.items(): #print ('if(params.name == \'' + k + '\')' + '\n' + ' ' + #'return \'' + v + '\' + \' : \' + ' + 'params.value[2];') # geo maps def figure_map_png(df, end_date, pieces, vr_upper = 0, fs = 20, subject = 'scatter'): data_single = df[df.update_date == end_date] data_single = data_single.sort_values(by='cum_confirmed', ascending=False) data_single = data_single.reset_index(drop=True) # for the scatter plot or heatmap provinces = data_single.province_name provinces = [pyecharts_province_dict[temp] for temp in provinces] values = data_single.cum_confirmed fn = """ function(params) { return params.name + ' : ' + params.value[2] } """ # show province names and values fn = """ function(params) { if(params.name == '湖北') return 'Hubei' + ' : ' + params.value[2]; if(params.name == '广东') return 'Guangdong' + ' : ' + params.value[2]; if(params.name == '河南') return 'Henan' + ' : ' + params.value[2]; if(params.name == '浙江') return 'Zhejiang' + ' : ' + params.value[2]; if(params.name == '湖南') return 'Hunan' + ' : ' + params.value[2]; if(params.name == '安徽') return 'Anhui' + ' : ' + params.value[2]; if(params.name == '江西') return 'Jiangxi' + ' : ' + params.value[2]; if(params.name == '山东') return 'Shandong' + ' : ' + params.value[2]; if(params.name == '江苏') return 'Jiangsu' + ' : ' + params.value[2]; if(params.name == '重庆') return 'Chongqing' + ' : ' + params.value[2]; if(params.name == '四川') return 'Sichuan' + ' : ' + params.value[2]; if(params.name == '黑龙江') return 'Heilongjiang' + ' : ' + params.value[2]; if(params.name == '北京') return 'Beijing' + ' : ' + params.value[2]; if(params.name == '上海') return 'Shanghai' + ' : ' + params.value[2]; if(params.name == '河北') return 'Hebei' + ' : ' + params.value[2]; if(params.name == '福建') return 'Fujian' + ' : ' + params.value[2]; if(params.name == '广西') return 'Guangxi' + ' : ' + params.value[2]; if(params.name == '陕西') return 'Shaanxi' + ' : ' + params.value[2]; if(params.name == '云南') return 'Yunnan' + ' : ' + params.value[2]; if(params.name == '海南') return 'Hainan' + ' : ' + params.value[2]; if(params.name == '贵州') return 'Guizhou' + ' : ' + params.value[2]; if(params.name == '天津') return 'Tianjin' + ' : ' + params.value[2]; if(params.name == '山西') return 'Shanxi' + ' : ' + params.value[2]; if(params.name == '辽宁') return 'Liaoning' + ' : ' + params.value[2]; if(params.name == '吉林') return 'Jilin' + ' : ' + params.value[2]; if(params.name == '甘肃') return 'Gansu' + ' : ' + params.value[2]; if(params.name == '新疆') return 'Xinjiang' + ' : ' + params.value[2]; if(params.name == '宁夏') return 'Ningxia' + ' : ' + params.value[2]; if(params.name == '内蒙古') return 'Inner Mongolia' + ' : ' + params.value[2]; if(params.name == '青海') return 'Qinghai' + ' : ' + params.value[2]; } """ # show only province names gn = """ function(params) { if(params.name == '湖北') return 'Hubei'; if(params.name == '广东') return 'Guangdong'; if(params.name == '河南') return 'Henan'; if(params.name == '浙江') return 'Zhejiang'; if(params.name == '湖南') return 'Hunan'; if(params.name == '安徽') return 'Anhui'; if(params.name == '江西') return 'Jiangxi'; if(params.name == '山东') return 'Shandong'; if(params.name == '江苏') return 'Jiangsu'; if(params.name == '重庆') return 'Chongqing'; if(params.name == '四川') return 'Sichuan'; if(params.name == '黑龙江') return 'Heilongjiang'; if(params.name == '北京') return 'Beijing'; if(params.name == '上海') return 'Shanghai'; if(params.name == '河北') return 'Hebei'; if(params.name == '福建') return 'Fujian'; if(params.name == '广西') return 'Guangxi'; if(params.name == '陕西') return 'Shaanxi'; if(params.name == '云南') return 'Yunnan'; if(params.name == '海南') return 'Hainan'; if(params.name == '贵州') return 'Guizhou'; if(params.name == '天津') return 'Tianjin'; if(params.name == '山西') return 'Shanxi'; if(params.name == '辽宁') return 'Liaoning'; if(params.name == '吉林') return 'Jilin'; if(params.name == '甘肃') return 'Gansu'; if(params.name == '新疆') return 'Xinjiang'; if(params.name == '宁夏') return 'Ningxia'; if(params.name == '内蒙古') return 'Inner Mongolia'; if(params.name == '青海') return 'Qinghai'; } """ # show only 5 province names and values hn = """ function(params) { if(params.name == '湖北') return 'Hubei' + ' : ' + params.value[2]; if(params.name == '广东') return 'Guangdong' + ' : ' + params.value[2]; if(params.name == '河南') return 'Henan' + ' : ' + params.value[2]; if(params.name == '浙江') return 'Zhejiang' + ' : ' + params.value[2]; if(params.name == '湖南') return 'Hunan' + ' : ' + params.value[2]; return ''; } """ # show only 5 province names ln = """ function(params) { if(params.name == '湖北') return 'Hubei'; if(params.name == '广东') return 'Guangdong'; if(params.name == '河南') return 'Henan'; if(params.name == '浙江') return 'Zhejiang'; if(params.name == '湖南') return 'Hunan'; return ''; } """ # scatter plot if subject == 'scatter': # show both province name and number of infected c = ( Geo(init_opts = opts.InitOpts(width = '900px', height = '600px', bg_color = "#FFFFFF"),) .add_schema(maptype = "china", itemstyle_opts = opts.ItemStyleOpts(color = "white", border_color = "black"),) .add( "number of infected", [list(z) for z in zip(provinces, values)], #type_= ChartType.EFFECT_SCATTER, color = '#d94e5d', symbol_size = 10, point_size = 5, ) .set_series_opts( label_opts = opts.LabelOpts( formatter=JsCode(ln), font_size = fs - 3, color = 'black')) #.set_series_opts(label_opts = opts.LabelOpts(is_show = True)) .set_global_opts( legend_opts = opts.LegendOpts(textstyle_opts = opts.TextStyleOpts(font_size = fs + 5)), visualmap_opts = opts.VisualMapOpts( is_show = True, split_number = 6, is_piecewise = True, pos_top = 'center', pieces = pieces, textstyle_opts = opts.TextStyleOpts(font_size = fs)), title_opts = opts.TitleOpts(title = "China", subtitle = end_date.strftime("%d %B, %Y"), title_textstyle_opts = opts.TextStyleOpts(font_size = fs + 10), subtitle_textstyle_opts = opts.TextStyleOpts(font_size = fs))) ) make_snapshot(snapshot, c.render(), # _Figure_PATH_ + 'figures_china/' + "China_smap.html" _Figure_PATH_ + 'figures_china/' + "China_smap.png") return c # heatmap plot else: c = ( Geo(init_opts = opts.InitOpts(width = '900px', height = '600px', bg_color = "#FFFFFF")) .add_schema(maptype="china", itemstyle_opts = opts.ItemStyleOpts(color = "white", border_color = "black"),) .add( "number of infected", [list(z) for z in zip(provinces, values)], color = '#d94e5d', #type_= ChartType.EFFECT_SCATTER, symbol_size = 8, point_size = 5, ) .add( "number of infected", [list(z) for z in zip(provinces, values)], type_ = ChartType.HEATMAP, symbol_size = 15, ) .set_series_opts(label_opts = opts.LabelOpts( formatter = JsCode(ln), font_size = fs - 3, color = 'black')) .set_global_opts( legend_opts = opts.LegendOpts(textstyle_opts = opts.TextStyleOpts(font_size = fs + 5)), visualmap_opts = opts.VisualMapOpts(max_ = vr_upper, pos_top = 'center', textstyle_opts = opts.TextStyleOpts(font_size = fs)), title_opts = opts.TitleOpts(title = "China",subtitle = end_date.strftime("%d %B, %Y"), title_textstyle_opts = opts.TextStyleOpts(font_size = fs + 10), subtitle_textstyle_opts = opts.TextStyleOpts(font_size = fs + 5))) ) c.render_notebook() make_snapshot(snapshot, c.render(), # _Figure_PATH_ + 'figures_china/' + "China_hmap.html" _Figure_PATH_ + 'figures_china/' + "China_hmap.png") return c end_date = datetime.date(int(2020),int(3),int(10)) pieces = [{'min': 1001}, # no max for this {'min': 501, 'max': 1000}, {'min': 201, 'max': 500}, {'min': 101, 'max': 200}, {'min': 51, 'max': 100}, {'min': 0, 'max': 50}] c = figure_map_png(data_province_domestic, end_date, pieces, fs = 18, subject = 'scatter') c.render_notebook() vr_upper = 1000 c = figure_map_png(data_province_domestic, end_date, pieces, vr_upper, fs = 16, subject = 'heatmap') c.render_notebook() # geo maps def figure_map_html(df, end_date, pieces, vr_upper = 0, fs = 20, subject = 'scatter'): data_single = df[df.update_date == end_date] data_single = data_single.sort_values(by='cum_confirmed', ascending=False) data_single = data_single.reset_index(drop=True) # for the scatter plot or heatmap provinces = data_single.province_name provinces = [pyecharts_province_dict[temp] for temp in provinces] values = data_single.cum_confirmed fn = """ function(params) { return params.name + ' : ' + params.value[2] } """ # show province names and values fn = """ function(params) { if(params.name == '湖北') return 'Hubei' + ' : ' + params.value[2]; if(params.name == '广东') return 'Guangdong' + ' : ' + params.value[2]; if(params.name == '河南') return 'Henan' + ' : ' + params.value[2]; if(params.name == '浙江') return 'Zhejiang' + ' : ' + params.value[2]; if(params.name == '湖南') return 'Hunan' + ' : ' + params.value[2]; if(params.name == '安徽') return 'Anhui' + ' : ' + params.value[2]; if(params.name == '江西') return 'Jiangxi' + ' : ' + params.value[2]; if(params.name == '山东') return 'Shandong' + ' : ' + params.value[2]; if(params.name == '江苏') return 'Jiangsu' + ' : ' + params.value[2]; if(params.name == '重庆') return 'Chongqing' + ' : ' + params.value[2]; if(params.name == '四川') return 'Sichuan' + ' : ' + params.value[2]; if(params.name == '黑龙江') return 'Heilongjiang' + ' : ' + params.value[2]; if(params.name == '北京') return 'Beijing' + ' : ' + params.value[2]; if(params.name == '上海') return 'Shanghai' + ' : ' + params.value[2]; if(params.name == '河北') return 'Hebei' + ' : ' + params.value[2]; if(params.name == '福建') return 'Fujian' + ' : ' + params.value[2]; if(params.name == '广西') return 'Guangxi' + ' : ' + params.value[2]; if(params.name == '陕西') return 'Shaanxi' + ' : ' + params.value[2]; if(params.name == '云南') return 'Yunnan' + ' : ' + params.value[2]; if(params.name == '海南') return 'Hainan' + ' : ' + params.value[2]; if(params.name == '贵州') return 'Guizhou' + ' : ' + params.value[2]; if(params.name == '天津') return 'Tianjin' + ' : ' + params.value[2]; if(params.name == '山西') return 'Shanxi' + ' : ' + params.value[2]; if(params.name == '辽宁') return 'Liaoning' + ' : ' + params.value[2]; if(params.name == '吉林') return 'Jilin' + ' : ' + params.value[2]; if(params.name == '甘肃') return 'Gansu' + ' : ' + params.value[2]; if(params.name == '新疆') return 'Xinjiang' + ' : ' + params.value[2]; if(params.name == '宁夏') return 'Ningxia' + ' : ' + params.value[2]; if(params.name == '内蒙古') return 'Inner Mongolia' + ' : ' + params.value[2]; if(params.name == '青海') return 'Qinghai' + ' : ' + params.value[2]; } """ # show only province names gn = """ function(params) { if(params.name == '湖北') return 'Hubei'; if(params.name == '广东') return 'Guangdong'; if(params.name == '河南') return 'Henan'; if(params.name == '浙江') return 'Zhejiang'; if(params.name == '湖南') return 'Hunan'; if(params.name == '安徽') return 'Anhui'; if(params.name == '江西') return 'Jiangxi'; if(params.name == '山东') return 'Shandong'; if(params.name == '江苏') return 'Jiangsu'; if(params.name == '重庆') return 'Chongqing'; if(params.name == '四川') return 'Sichuan'; if(params.name == '黑龙江') return 'Heilongjiang'; if(params.name == '北京') return 'Beijing'; if(params.name == '上海') return 'Shanghai'; if(params.name == '河北') return 'Hebei'; if(params.name == '福建') return 'Fujian'; if(params.name == '广西') return 'Guangxi'; if(params.name == '陕西') return 'Shaanxi'; if(params.name == '云南') return 'Yunnan'; if(params.name == '海南') return 'Hainan'; if(params.name == '贵州') return 'Guizhou'; if(params.name == '天津') return 'Tianjin'; if(params.name == '山西') return 'Shanxi'; if(params.name == '辽宁') return 'Liaoning'; if(params.name == '吉林') return 'Jilin'; if(params.name == '甘肃') return 'Gansu'; if(params.name == '新疆') return 'Xinjiang'; if(params.name == '宁夏') return 'Ningxia'; if(params.name == '内蒙古') return 'Inner Mongolia'; if(params.name == '青海') return 'Qinghai'; } """ # show only 5 province names and values hn = """ function(params) { if(params.name == '湖北') return 'Hubei' + ' : ' + params.value[2]; if(params.name == '广东') return 'Guangdong' + ' : ' + params.value[2]; if(params.name == '河南') return 'Henan' + ' : ' + params.value[2]; if(params.name == '浙江') return 'Zhejiang' + ' : ' + params.value[2]; if(params.name == '湖南') return 'Hunan' + ' : ' + params.value[2]; return ''; } """ # show only 5 province names ln = """ function(params) { if(params.name == '湖北') return 'Hubei'; if(params.name == '广东') return 'Guangdong'; if(params.name == '河南') return 'Henan'; if(params.name == '浙江') return 'Zhejiang'; if(params.name == '湖南') return 'Hunan'; return ''; } """ # scatter plot if subject == 'scatter': # show both province name and number of infected c = ( Geo(init_opts = opts.InitOpts(width = '750px', height = '500px', bg_color = "#FFFFFF"),) .add_schema(maptype = "china", itemstyle_opts = opts.ItemStyleOpts(color = "white", border_color = "black"),) .add( "number of infected", [list(z) for z in zip(provinces, values)], #type_= ChartType.EFFECT_SCATTER, color = '#d94e5d', symbol_size = 10, point_size = 5, ) .set_series_opts( label_opts = opts.LabelOpts( formatter=JsCode(ln), font_size = fs - 3, color = 'black')) #.set_series_opts(label_opts = opts.LabelOpts(is_show = True)) .set_global_opts( legend_opts = opts.LegendOpts(textstyle_opts = opts.TextStyleOpts(font_size = fs), item_width = 20, item_height = 10,), visualmap_opts = opts.VisualMapOpts( is_show = True, split_number = 6, is_piecewise = True, pos_top = 'center', pieces = pieces, textstyle_opts = opts.TextStyleOpts(font_size = fs), item_width = 20, item_height = 10), title_opts = opts.TitleOpts(title = "China", subtitle = end_date.strftime("%d %B, %Y"), title_textstyle_opts = opts.TextStyleOpts(font_size = fs + 5), subtitle_textstyle_opts = opts.TextStyleOpts(font_size = fs + 2))) ) c.render(_Figure_PATH_ + 'figures_china/' + "China_smap.html") return c # heatmap plot else: c = ( Geo(init_opts = opts.InitOpts(width = '750px', height = '500px', bg_color = "#FFFFFF")) .add_schema(maptype="china", itemstyle_opts = opts.ItemStyleOpts(color = "white", border_color = "black"),) .add( "number of infected", [list(z) for z in zip(provinces, values)], color = '#d94e5d', #type_= ChartType.EFFECT_SCATTER, symbol_size = 6, point_size = 5, ) .add( "number of infected", [list(z) for z in zip(provinces, values)], type_ = ChartType.HEATMAP, symbol_size = 15, ) .set_series_opts(label_opts = opts.LabelOpts( formatter=JsCode(ln), font_size = fs - 3, color = 'black')) .set_global_opts( legend_opts = opts.LegendOpts(textstyle_opts = opts.TextStyleOpts(font_size = fs), item_width = 20, item_height = 10,), visualmap_opts = opts.VisualMapOpts(max_ = vr_upper, pos_top = 'center', textstyle_opts = opts.TextStyleOpts(font_size = fs), item_width = 12, item_height = 100), title_opts = opts.TitleOpts(title="China",subtitle = end_date.strftime("%d %B, %Y"), title_textstyle_opts = opts.TextStyleOpts(font_size = fs + 5), subtitle_textstyle_opts = opts.TextStyleOpts(font_size = fs + 2))) ) c.render(_Figure_PATH_ + 'figures_china/' + "China_hmap.html") return c c = figure_map_html(data_province_domestic, end_date, pieces, fs = 15, subject = 'scatter') c.render_notebook() vr_upper = 1000 c = figure_map_html(data_province_domestic, end_date, pieces, vr_upper, fs = 15, subject = 'heatmap') c.render_notebook()How to use of module `fire`Trying to run the course notebook on 2019-03-27 fails because the script cannot import the module `fire`.Let's figure that and out and fix it.Q: Is the module `fire` available to the notebook code (i.e., the Jupyter kernel)?import fireAnswer: NoWhat environment is the notebook code running in? A conda env?The wisdom on these issues: https://jakevdp.github.io/blog/2017/12/05/installing-python-packages-from-jupyter/# Could run this to see # !conda infoSo the kernel's shell sees a conda base env.import sys sys.prefixSo it seems like the kernel code is running in the same conda base environment which we get when we don't specify a specific env.Let us try installing `fire` into this env to be able to run this script# Coudl run this to install fire # !conda install --yes fire -c conda-forgeHaving installed `fire` from the `conda-forge` channel, let us now to try to import it:import fireOkay. now let's see if the script which needs fire works. Export!python notebook2script.py 00_exports.ipynbQ: But where is this exported file? We can see that it is placed in the `exp/` directory:!ls exp/ !cat exp/nb_00.pyHow it works:import json d = json.load(open('00_exports.ipynb','r'))['cells'] d[0]Классификация текстовimport pandas as pd import matplotlib.pyplot as plt import random random.seed(1228) from sklearn.feature_extraction.text import * from sklearn.metrics import * from sklearn.pipeline import Pipeline import re from pymystem3 import Mystem import numpy as np import itertools %matplotlib inline import warnings warnings.filterwarnings('ignore') from nltk.stem.snowball import RussianStemmer import seaborn as sns import joblibРазбиение на тестовое и обучающее множествоm = Mystem() regex = re.compile("[А-Яа-я]+") def words_only(text, regex=regex): try: return " ".join(regex.findall(text)) except: return "" def lemmatize(text, mystem=m): try: return "".join(m.lemmatize(text)).strip() except: return " " def stemming(text, stemmer = RussianStemmer()): try: return " ".join([stemmer.stem(w) for w in text.split()]) except: return " " from nltk.corpus import stopwords # print(stopwords.words('russian')) with open("/Users/romakindmitriy/PycharmProjects/TelegramParser/docs/stopwords/fullstopwords.txt", 'r') as f: stopw = f.readlines() # print(stopw) v_stopwords = list(set([x[:-1] for x in stopw])) print(len(v_stopwords)) mystopwords = stopwords.words('russian') + v_stopwords mystopwords = list(set(mystopwords)) def remove_stopwords(text, mystopwords = mystopwords): try: return " ".join([token for token in text.split() if not token in mystopwords]) except: return "" d = pd.read_csv('./dataset_classes.csv', usecols=['message','class']) d.head() filter_1 = d['class'] == 1. filter_2 = d['class'] == 2. filter_3 = d['class'] == 3. filter_4 = d['class'] == 4. filter_5 = d['class'] == 5. # filter_6 = d['class'] == 5 data = d.loc[filter_1 | filter_2 | filter_3 | filter_4 | filter_5] # ch = 150 # pdlist = [d[filter_1][:ch], d[filter_2][:ch], d[filter_3][:ch], d[filter_4][:ch], d[filter_5][:ch]] # data = pd.concat(pdlist) data['class'].value_counts() data.head() # corpus_sample_train = pd.DataFrame(columns=data.columns) # corpus_sample_test = pd.DataFrame(columns=data.columns) # for cl in data['class'].unique(): # corpus_sample = data[data['class']==cl] # if len(corpus_sample) > 200: # corpus_sample_train = corpus_sample_train.append(corpus_sample.iloc[:60,:]) # corpus_sample_test = corpus_sample_test.append(corpus_sample.iloc[60:,:]) # elif len(corpus_sample) > 60: # corpus_sample_train = corpus_sample_train.append(corpus_sample.iloc[:60,:]) # corpus_sample_test = corpus_sample_test.append(corpus_sample.iloc[60:,:]) corpus_sample_train = pd.DataFrame(columns=data.columns) corpus_sample_test = pd.DataFrame(columns=data.columns) for cl in data['class'].unique(): corpus_sample = data[data['class']==cl][20:120] corpus_sample_train = corpus_sample_train.append(corpus_sample) corpus_sample_test = corpus_sample_test.append(data[data['class']==cl][:20]) corpus_sample_test = corpus_sample_test.append(data[data['class']==cl][120:]) len(corpus_sample_train) len(corpus_sample_test) corpus_sample_train.message = corpus_sample_train.message.apply(lemmatize) corpus_sample_test.message = corpus_sample_test.message.apply(lemmatize) corpus_sample_train.message = corpus_sample_train.message.apply(remove_stopwords) corpus_sample_test.message = corpus_sample_test.message.apply(remove_stopwords) corpus_sample_test.head() from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, GradientBoostingClassifier clf = Pipeline([ ('vect', CountVectorizer()), ('clf', BaggingClassifier()), # RandomForestClassifier # ('clf', AdaBoostClassifier()), ]) clf.fit(corpus_sample_train['message'], corpus_sample_train['class']) true = corpus_sample_test['class'] predictions = clf.predict(corpus_sample_test['message']) filename = 'clf_all_stopwords.sav' joblib.dump(clf, filename) print("Precision: {0:6.2f}".format(precision_score(true, predictions, average='macro'))) print("Recall: {0:6.2f}".format(recall_score(true, predictions, average='macro'))) print("F1-measure: {0:6.2f}".format(f1_score(true, predictions, average='macro'))) print("Accuracy: {0:6.2f}".format(accuracy_score(true, predictions))) print(classification_report(true, predictions)) labels = clf.classes_ labels = clf.classes_ sns.heatmap(data=confusion_matrix(true, predictions), annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels) plt.title("Confusion matrix") plt.show() # print(type(corpus_sample_test['message'][1])) clf.predict(corpus_sample_test['message'][1:2]) data = [[corpus_sample_test['message'][1]]] # Create the pandas DataFrame pdd = pd.DataFrame(data, columns = ['text']) print(pdd['text'][:0]) print(type(pdd['text'][:1])) clf.predict(pdd['text'][:1])Series([], Name: text, dtype: object) Классификация текстов По мотивам http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import random import numpy as np from sklearn.metrics import * from sklearn.feature_extraction.text import * from sklearn.model_selection import train_test_split from collections import Counter, defaultdict random.seed(1228) %matplotlib inline from pymystem3 import Mystem import re m = Mystem() regex = re.compile("[А-Яа-я:=!\)\()A-z\_\%/|]+") def words_only(text, regex=regex): try: return " ".join(regex.findall(text)) except: return "" def lemmatize(text, mystem=m): try: return "".join(m.lemmatize(text)).strip() except: return " " d = pd.read_csv('./dataset_classes.csv') d.head() filter_1 = d['class'] == 1. filter_2 = d['class'] == 2. filter_3 = d['class'] == 3. filter_4 = d['class'] == 4. filter_5 = d['class'] == 5. filter_6 = d['class'] == 5 # df = d.loc[filter_1 | filter_2 | filter_3 | filter_4 | filter_5 | filter_6] ch = 150 pdlist = [d[filter_1][:ch], d[filter_2][:ch], d[filter_3][:ch], d[filter_4][:ch], d[filter_5][:ch]] df = pd.concat(pdlist) # df.message.dropna(inplace = True) # len_data = df.message.apply(len) # len_data.describe() df.message = df.message.apply(words_only) df.message = df.message.apply(lemmatize) # df.message = df.message.apply(remove_stopwords) df.head() texts = [df.message.iloc[i].split() for i in range(len(df))] from gensim.models import Word2Vec model = Word2Vec(texts, size=100, window=5, min_count=5, workers=6) model.save("sent_w2v.model") model = Word2Vec.load("sent_w2v.model") cl = len(df.message.tolist()) X = df.message.tolist() y = list(df['class'])[:cl] X, y = np.array(X), np.array(y) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.33) print ("total train examples %s" % len(y_train)) print ("total test examples %s" % len(y_test)) class MeanEmbeddingVectorizer(object): def __init__(self, word2vec): self.word2vec = word2vec # if a text is empty we should return a vector of zeros # with the same dimensionality as all the other vectors self.dim = len(w2v.popitem()[1]) def fit(self, X, y): return self def transform(self, X): return np.array([ np.mean([self.word2vec[w] for w in words if w in self.word2vec] or [np.zeros(self.dim)], axis=0) for words in X ]) class TfidfEmbeddingVectorizer(object): def __init__(self, word2vec): self.word2vec = word2vec self.word2weight = None self.dim = len(w2v.popitem()[1]) def fit(self, X, y): tfidf = TfidfVectorizer(analyzer=lambda x: x) tfidf.fit(X) max_idf = max(tfidf.idf_) self.word2weight = defaultdict( lambda: max_idf, [(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()]) return self def transform(self, X): return np.array([ np.mean([self.word2vec[w] * self.word2weight[w] for w in words if w in self.word2vec] or [np.zeros(self.dim)], axis=0) for words in X ]) w2v = dict(zip(model.wv.index2word, model.wv.syn0)) from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, GradientBoostingClassifier rfc_w2v = Pipeline([ ("word2vec vectorizer", MeanEmbeddingVectorizer(w2v)), ("extra trees", RandomForestClassifier(n_estimators=20))]) rfc_w2v_tfidf = Pipeline([ ("word2vec vectorizer", TfidfEmbeddingVectorizer(w2v)), ("extra trees", RandomForestClassifier(n_estimators=20))]) rfc_w2v_tfidf_v1 = Pipeline([ ("word2vec vectorizer", TfidfEmbeddingVectorizer(w2v)), ("extra trees", BaggingClassifier())]) rfc_w2v.fit(X_train,y_train) pred = rfc_w2v.predict(X_test) filename = 'rfc_w2v.sav' joblib.dump(rfc_w2v, filename) print("Precision: {0:6.2f}".format(precision_score(y_test, pred, average='macro'))) print("Recall: {0:6.2f}".format(recall_score(y_test, pred, average='macro'))) print("F1-measure: {0:6.2f}".format(f1_score(y_test, pred, average='macro'))) print("Accuracy: {0:6.2f}".format(accuracy_score(y_test, pred))) print(classification_report(y_test, pred)) labels = rfc_w2v.classes_ sns.heatmap(data=confusion_matrix(y_test, pred), annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels) plt.title("Confusion matrix") plt.show() rfc_w2v_tfidf.fit(X_train,y_train) pred = rfc_w2v_tfidf.predict(X_test) print("Precision: {0:6.2f}".format(precision_score(y_test, pred, average='macro'))) print("Recall: {0:6.2f}".format(recall_score(y_test, pred, average='macro'))) print("F1-measure: {0:6.2f}".format(f1_score(y_test, pred, average='macro'))) print("Accuracy: {0:6.2f}".format(accuracy_score(y_test, pred))) print(classification_report(y_test, pred)) labels = rfc_w2v.classes_ sns.heatmap(data=confusion_matrix(y_test, pred), annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels) plt.title("Confusion matrix") plt.show() rfc_w2v_tfidf_v1.fit(X_train,y_train) pred = rfc_w2v_tfidf_v1.predict(X_test) print("Precision: {0:6.2f}".format(precision_score(y_test, pred, average='macro'))) print("Recall: {0:6.2f}".format(recall_score(y_test, pred, average='macro'))) print("F1-measure: {0:6.2f}".format(f1_score(y_test, pred, average='macro'))) print("Accuracy: {0:6.2f}".format(accuracy_score(y_test, pred))) print(classification_report(y_test, pred)) labels = rfc_w2v.classes_ sns.heatmap(data=confusion_matrix(y_test, pred), annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels) plt.title("Confusion matrix") plt.show()Longest common substring problem[reference](http://www.bogotobogo.com/python/python_longest_common_substring_lcs_algorithm_generalized_suffix_tree.php) In computer science, the longest common substring problem is to find the longest string (or strings) that is a substring (or are substrings) of two or more strings.For example, given two strings: 'academy' and 'abracadabra', the common and the longest is 'acad'.Another example: ''ababc', 'abcdaba'. For this one, we have two substrings with length of 3: 'abc' and 'aba'.There are several algorithms to solve this problem such as **Generalized suffix tree**. Generalized suffix treeSuffix tree for the strings ABAB and BABA. Suffix links not shown.In computer science, a generalized suffix tree is a suffix tree for a set of strings. Given the set of strings ***D=S1,S2,...,Sd*** of total length ***n***, it is a Patricia tree containing all ***n*** suffixes of the strings.def lcs(S,T): m = len(S) n = len(T) counter = [[0]*(n+1) for x in range(m+1)] longest = 0 lcs_set = set() for i in range(m): for j in range(n): if S[i] == T[j]: c = counter[i][j] + 1 counter[i+1][j+1] = c if c > longest: lcs_set = set() longest = c lcs_set.add(S[i-c+1:i+1]) elif c == longest: lcs_set.add(S[i-c+1:i+1]) return lcs_set # test 1 ret = lcs('academy', 'abracadabra') for s in ret: print(s) # test 2 ret = lcs('ababc', 'abcdaba') for s in ret: print(s)acad abc abaNLP for Feature Extraction and Machine Learning Today's workshop will address various concepts in the Natural Language Processing pipeline aimed at feature and information extraction for subsequent use in machine learning tasks. A fundmental understanding of Python is necessary. We will cover:1. Pre-processing2. Preparing and declaring your own corpus3. POS-Tagging4. Dependency Parsing5. NER6. Sentiment Analysis7. ClassificationYou will need:* NLTK ( `$ pip install nltk`)* the parser wrapper requires the [Stanford Parser](http://nlp.stanford.edu/software/lex-parser.shtmlDownload) (in Java)* the NER wrapper requires the [Stanford NER](http://nlp.stanford.edu/software/CRF-NER.shtmlDownload) (in Java)* scikit-learn ( `$ pip install scikit-learn`)* keras ( `$ pip install keras`)* gensim ( `$ pip install gensim`) 1) Pre-processing This won't be covered much today, but regex and basic python string methods are most important in preprocessing tasks. NLTK does, however, offer an array of tokenizers and stemmers for various languages. The term-document modelThis is also sometimes referred to as "bag-of-words" by those who don't think very highly of it. The term document model looks at language as individual communicative efforts that contain one or more tokens. The kind and number of the tokens in a document tells you something about what is attempting to be communicated, and the order of those tokens is ignored.This is the primary method still used for most text analysis, although models utilizing word embeddings are beginning to take hold. We will discuss word embeddings briefly at the end.To start with, let's import NLTK and load a document from their toy corpus. Python Regex Basicsimport nltk nltk.download('webtext') document = nltk.corpus.webtext.open('grail.txt').read()Let's see what's in this documentprint(document[:1000]) import re snippet = document.split("\n")[8] print(snippet) re.search(r'coconuts', snippet)Just like with `str.find`, we can search for plain text. But `re` also gives us the option for searching for patterns of bytes - like only alphabetic characters.re.search(r'[a-z]', snippet)In this case, we've told re to search for the first sequence of bytes that is only composed of lowercase letters between `a` and `z`. We could get the letters at the end of each sentence by including a bang at the end of the pattern.re.search(r'[a-z]!', snippet)There are two things happening here:1. `[` and `]` do not mean 'bracket'; they are special characters which mean 'anything of this class'2. we've only matched one letter eachRe is flexible about how you specify numbers - you can match none, some, a range, or all repetitions of a sequence or character class.character | meaning----------|--------`{x}` | exactly x repetitions`{x,y}` | between x and y repetitions`?` | 0 or 1 repetition`*` | 0 or many repetitions`+` | 1 or many repetitions Part of the power of regular expressions are their special characters. Common ones that you'll see are:character | meaning----------|--------`.` | match anything except a newline`^` | match the start of a line`$` | match the end of a line`\s` | matches any whitespace or newline What if we wanted to grab all of Arthur's speech without grabbing the name `ARTHUR` itself?If we wanted to do this using base string manipulation, we would need to do something like:```split the document into linescreate a new list of just lines that start with ARTHURcreate a newer list with ARTHUR removed from the front of each element```Regex gives us a way of doing this in one line, by using something called groups. Groups are pieces of a pattern that can be ignored, negated, or given names for later retrieval.character | meaning----------|--------`(x)` | match x`(?:x)` | match x but don't capture it`(?P)` | match something and give it name x`(?=x)` | match only if string is followed by x`(?!x)` | match only if string is not followed by xre.findall(r'(?:ARTHUR: )(.+)', document)[0:10]Because we are using `findall`, the regex engine is capturing and returning the normal groups, but not the non-capturing group. For complicated, multi-piece regular expressions, you may need to pull groups out separately. You can do this with names.p = re.compile(r'(?P[A-Z ]+)(?::)(?P.+)') match = re.search(p, document) print(match) print(match.group('name')) print(match.group('line'))Using the regex patter `p` above to print the `set` of unique characters in *Monty Python*:matches = re.findall(p, document) chars = set([x[0] for x in matches]) print(chars, len(chars))You should have 84 different characters.Now use the `set` you made above to gather all dialogue into a character `dictionary`, with the keys being the character name and the value being a list of dialogues.:char_dict = {} for n in chars: char_dict[n] = re.findall(re.compile(r'(?:' + n + ': )(.+)'), document) char_dict["PATSY"]Tokenizingtext = '''Hello, my name is Chris. I'll be talking about the python library NLTK today. NLTK is a popular tool to conduct text processing tasks in NLP.''' from nltk.tokenize import word_tokenize print("Notice the difference!") print() print(word_tokenize(text)) print() print("vs.") print() print(text.split())You can also tokenize sentences.from nltk.tokenize import sent_tokenize print(sent_tokenize(text)) tokenized_text = [word_tokenize(sent) for sent in sent_tokenize(text)] print(tokenized_text)A list of sentences with a list of tokenized words is generally the accepted format for most libraries for analysis. Stemming/Lemmatizingfrom nltk import SnowballStemmer snowball = SnowballStemmer('english') print(snowball.stem('running')) print(snowball.stem('eats')) print(snowball.stem('embarassed'))But watch out for errors:print(snowball.stem('cylinder')) print(snowball.stem('cylindrical'))Or collision:print(snowball.stem('vacation')) print(snowball.stem('vacate'))This is why lemmatizing, if the computing power and time is sufficient, is always preferable:from nltk import WordNetLemmatizer wordnet = WordNetLemmatizer() print(wordnet.lemmatize('vacation')) print(wordnet.lemmatize('vacate'))2) Declaring a corpus in NLTK While you can use NLTK on strings and lists of sentences, it's better to formally declare your corpus, as this will take care of the above for you and provide methods to access them. For our purposes today, we'll use a corpus of [book summaries](http://www.cs.cmu.edu/~dbamman/booksummaries.html). I've changed them into a folder of .txt files for demonstration. The file below will convert the .tsv file.! ls texts from nltk.corpus import PlaintextCorpusReader corpus_root = "texts/" # relative path to texts. my_texts = PlaintextCorpusReader(corpus_root, '.*txt')We now have a text corpus, on which we can run all the basic preprocessing methods. To list all the files in our corpus:my_texts.fileids()[:10] my_texts.words('To Kill A Mockingbird.txt') # uses punkt tokenizer like above my_texts.sents('To Kill A Mockingbird.txt')It also add as paragraph method:my_texts.paras('To Kill A Mockingbird.txt')[0]Let's save these to a variable to look at the next step on a low level:m_sents = my_texts.sents('To Kill A Mockingbird.txt') print (m_sents)We now have a corpus, or text, from which we can get any of the statistics you learned in Day 3 of the Python workshop. We will review some of these functions once we get some more information 3) POS-Tagging There are many situations, in which "tagging" words (or really anything) may be useful in order to determine or calculate trends, or for further text analysis to extract meaning. NLTK contains several methods to achieve this, from simple regex to more advanced machine learning models models.It is important to note that in Natural Language Processing (NLP), POS (Part of Speech) tagging is the most common use for tagging, but the actual tag can be anything. Other applications include sentiment analysis and NER (Named Entity Recognition). Tagging is simply labeling a word to a specific category via a tuple.Nevertheless, for training more advanced tagging models, POS tagging is nearly essential. If you are defining a machine learning model to predict patterns in your text, these patterns will most likley rely on, among other things, POS features. You will therefore first tag POS and then use the POS as a feature in your model. On a low-level Tagging is creating a tuple of (word, tag) for every word in a text or corpus. For example: "My name is Chris" may be tagged for POS as: My/PossessivePronoun name/Noun is/Verb Chris/ProperNoun ./Period*NB: type 'nltk.data.path' to find the path on your computer to your downloaded nltk corpora. You can explore these files to see how large corpora are formatted.* You'll notice how the text is annotated, using a forward slash to match the word to its tag. So how can we get this to a useful form for Python?from nltk.tag import str2tuple line = "My/Possessive_Pronoun name/Noun is/Verb Chris/Proper_Noun ./Period" tagged_sent = [str2tuple(t) for t in line.split()] print (tagged_sent)Further analysis of tags with NLTK requires a *list* of sentences, otherwise you will get an index error on higher level methods. Naturally, these tags are a bit verbose, the standard tagging conventions follow the Penn Treebank (more in a second): https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html Automatic Tagging NLTK's stock English `pos_tag` tagger is a perceptron tagger:from nltk import pos_tag m_tagged_sent = pos_tag(m_sents[0]) print (m_tagged_sent)What do these tags mean?from nltk import help help.upenn_tagset() m_tagged_all = [pos_tag(sent) for sent in m_sents] print(m_tagged_all[:3])We can find and aggregate certain parts of speech too:from nltk import ConditionalFreqDist def find_tags(tag_prefix, tagged_text): cfd = ConditionalFreqDist((tag, word) for (word, tag) in tagged_text if tag.startswith(tag_prefix)) return dict((tag, cfd[tag].most_common(5)) for tag in cfd.conditions()) #cfd.conditions() yields all tags possibilites m_tagged_words = [item for sublist in m_tagged_all for item in sublist] tagdict = find_tags('JJ', m_tagged_words) for tag in sorted(tagdict): print(tag, tagdict[tag])We can begin to quantify syntax by look at environments of words, so what commonly follows a verb?import nltk tags = [b[1] for (a, b) in nltk.bigrams(m_tagged_words) if a[1].startswith('VB')] fd1 = nltk.FreqDist(tags) print ("To Kill A Mockingbird") fd1.tabulate(10)Creating a tagged corpus Now that we know how tagging works, we can quickly tag all of our documents, but we'll only do a few hundred from the much larger corpus.tagged_sents = {} for fid in my_texts.fileids()[::10]: tagged_sents[fid.split(".")[0]] = [pos_tag(sent) for sent in my_texts.sents(fid)] tagged_sents.keys() tagged_sents[" and the Prisoner of Azkaban"]Absolute frequencies are available through NLTK's `FreqDist` method:all_tags = [] all_tups = [] for k in tagged_sents.keys(): for s in tagged_sents[k]: for t in s: all_tags.append(t[1]) all_tups.append(t) nltk.FreqDist(all_tags).tabulate(10) tags = ['NN', 'VB', 'JJ'] for t in tags: tagdict = find_tags(t, all_tups) for tag in sorted(tagdict): print(tag, tagdict[tag])We can compare this to other genres:from nltk.corpus import brown for c in brown.categories(): tagged_words = brown.tagged_words(categories=c) # not universal tagset tag_fd = nltk.FreqDist(tag for (word, tag) in tagged_words) print(c.upper()) tag_fd.tabulate(10) print() tags = ['NN', 'VB', 'JJ'] for t in tags: tagdict = find_tags(t, tagged_words) for tag in sorted(tagdict): print(tag, tagdict[tag]) print() print()We can also look at what linguistic environment words are in on a low level, below lists all the words preceding "love" in the romance category:brown_news_text = brown.words(categories='romance') sorted(set(a for (a, b) in nltk.bigrams(brown_news_text) if b == 'love'))4) Dependency Parsing While tagging parts of speech can be helpful for certain NLP tasks, dependency parsing is better at extracting real relationships within a sentence.from nltk.parse.stanford import StanfordDependencyParser dependency_parser = StanfordDependencyParser(path_to_jar = "/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser.jar", path_to_models_jar = "/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser-3.6.0-models.jar") result = dependency_parser.raw_parse_sents(['I shot an elephant in my sleep.', 'It was great.'])As the program takes longer to run, I will not run it on the entire corpus, but an example is below:for r in result: for o in r: trips = list(o.triples()) # ((head word, head tag), rel, (dep word, dep tag)) for t in trips: print(t) print()5) Named Entity Recognition After tokening, tagging, and parser, one of the last steps in the pipeline is NER. Identifying named entities can be useful in determing many different relationships, and often serves as a prerequisite to mapping textual relationships within a set of documents.from nltk.tag.stanford import StanfordNERTagger ner_tag = StanfordNERTagger( '/Users/chench/Documents/stanford-ner-2015-12-09/classifiers/english.all.3class.distsim.crf.ser.gz', '/Users/chench/Documents/stanford-ner-2015-12-09/stanford-ner.jar') import pyprind ner_sents = {} books = ["To Kill A Mockingbird.txt", "Hter and the Prisoner of Azkaban.txt"] for fid in books: bar = pyprind.ProgBar(len(my_texts.sents(fid)), monitor=True, bar_char="#") tagged_sents = [] for sent in my_texts.sents(fid): tagged_sents.append(ner_tag.tag(sent)) bar.update() ner_sents[fid.split(".")[0]] = tagged_sents print()We can look on the low level at a single summary:print(ner_sents["To Kill A Mockingbird"]) print(ner_sents["H and the Prisoner of Azkaban"]) from itertools import groupby from nltk import FreqDist NER = {"LOCATION": [], "PERSON": [], "ORGANIZATION": [], } for sentence in ner_sents["To Kill A Mockingbird"]: for tag, chunk in groupby(sentence, lambda x: x[1]): if tag != "O": NER[tag].append(" ".join(w for w, t in chunk)) if NER["LOCATION"]: print("Locations:") FreqDist(NER["LOCATION"]).tabulate() print() if NER["PERSON"]: print("Persons:") FreqDist(NER["PERSON"]).tabulate() print() if NER["ORGANIZATION"]: print("Organizations") FreqDist(NER["ORGANIZATION"]).tabulate()Or between the two:NER = {"LOCATION": [], "PERSON": [], "ORGANIZATION": [], } for k in ner_sents.keys(): for sentence in ner_sents[k]: for tag, chunk in groupby(sentence, lambda x: x[1]): if tag != "O": NER[tag].append(" ".join(w for w, t in chunk)) if NER["LOCATION"]: print("Locations:") FreqDist(NER["LOCATION"]).tabulate() print() if NER["PERSON"]: print("Persons:") FreqDist(NER["PERSON"]).tabulate() print() if NER["ORGANIZATION"]: FreqDist(NER["ORGANIZATION"]).tabulate()6) Sentiment Analysis While earlier sentiment analysis was based on simple dictionary look-up methods denoting words as positive or negative, or assigning numerical values to words, newer methods are better able to take a word's or sentence's environment into account. VADER (Valence Aware Dictionary and sEntiment Reasoner) is one such example.from nltk.sentiment.vader import SentimentIntensityAnalyzer import numpy as np sid = SentimentIntensityAnalyzer() print(sid.polarity_scores("I really don't like that book.")["compound"]) for fid in books: print(fid.upper()) sent_pols = [sid.polarity_scores(s)["compound"] for s in sent_tokenize(my_texts.raw(fid))] for i, s in enumerate(my_texts.sents(fid)): print(s, sent_pols[i]) print() print() print("Mean: ", np.mean(sent_pols)) print() print("="*100) print()7) Classification We'll use the IMDB [movie review database](http://ai.stanford.edu/~amaas/data/sentiment/).import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier from sklearn.svm import LinearSVC from sklearn import metrics, tree, cross_validation from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import RandomizedLogisticRegression import glob data = {"train": {"pos": [], "neg": []}, "test": {"pos": [], "neg": []}} txt_types = [("train", "neg"), ("train", "pos"), ("test", "neg"), ("test", "pos")] for t in txt_types: for txt_file in glob.glob("data/" + t[0] + "/" + t[1] + "/*.txt"): with open(txt_file, "r") as f: text = f.read() data[t[0]][t[1]].append(text) list(data["train"]["pos"])[0] list(data["train"]["neg"])[0] # get training + test data import numpy as np X_train = data["train"]["pos"] + data["train"]["neg"] y_train = np.append(np.ones(len(data["train"]["pos"])), np.zeros(len(data["train"]["neg"]))) X_test = data["test"]["pos"] + data["test"]["neg"] y_test = np.append(np.ones(len(data["test"]["pos"])), np.zeros(len(data["test"]["neg"]))) print(len(X_train), len(y_train)) print(len(X_test), len(y_test))*tfidf*tfidf = TfidfVectorizer() tfidf.fit_transform(X_train)Pipeline# build a pipeline - SVC from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))), ('tfidf', TfidfTransformer()), ('clf', OneVsRestClassifier(LinearSVC(random_state=0))) ]) # fit using pipeline clf = text_clf.fit(X_train, y_train) # predict predicted = clf.predict(X_test) clf.score(X_test, y_test) # print metrics print(metrics.classification_report(y_test, predicted)) scores = cross_validation.cross_val_score(text_clf, X_train + X_test, np.append(y_train, y_test), cv=5)TPOTfrom tpot import TPOTClassifier from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split tpot = TPOTClassifier(generations=5, population_size=20, verbosity=2) tpot.fit(X_train, y_train) print(tpot.score(X_test, y_test)) tpot.export('tpot_imdb_pipeline.py')Neural Networks and word2vec Word embeddings are the first successful attempt to move away from the "bag of words" model of language. Instead of looking at word frequencies, and vocabulary usage, word embeddings aim to retain syntactic information. Generally, a neural network model *will not* remove stopwords or punctuation, because they are vital to the model itself.Embedding first changes a tokenized sentence into a vector of numbers, with each unique token being its own number.e.g.:~~~[["I", "like", "coffee", "."], ["I", "like", "my", "coffee", "without", "sugar", "."]]~~~is tranformed to:~~~[[43, 75, 435, 98], [43, 75, 10, 435, 31, 217, 98]]~~~Notice, the "I"s, the "likes", the "coffees", and the "."s, all have the same assignment.The model is created by taking these numbers, and creating a high dimensional vector by mapping every word to its surrounding, creating a sort of "cloud" of words, where words used in a similar syntactic, and often semantic, fashion, will cluster closer together.One of the drawbacks of word2vec is the volume of data necessary for a decent analysis.Let's see how to code this into a classifier. One-hot encode text First we have to one-hot encode the text, but let's limit the features to the most common 20,000 words.from collections import Counter max_features = 20000 all_words = [] for text in X_train + X_test: all_words.extend(text.split()) unique_words_ordered = [x[0] for x in Counter(all_words).most_common()] word_ids = {} rev_word_ids = {} for i, x in enumerate(unique_words_ordered[:max_features-1]): word_ids[x] = i + 1 # so we can pad with 0s rev_word_ids[i + 1] = x X_train_one_hot = [] for text in X_train: t_ids = [word_ids[x] for x in text.split() if x in word_ids] X_train_one_hot.append(t_ids) X_test_one_hot = [] for text in X_test: t_ids = [word_ids[x] for x in text.split() if x in word_ids] X_test_one_hot.append(t_ids)NN Classification Now we can use Keras, a popular Theano wrapper, to quickly build an NN classifier.from __future__ import print_function import numpy as np np.random.seed(1337) # for reproducibility from keras.preprocessing import sequence from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Embedding from keras.layers import LSTM, SimpleRNN, GRU maxlen = 80 # cut texts after this number of words (among top max_features most common words) batch_size = 32 print('Pad sequences (samples x time)') X_train = sequence.pad_sequences(X_train_one_hot, maxlen=maxlen) # so arrays are uniform X_test = sequence.pad_sequences(X_test_one_hot, maxlen=maxlen) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Build model...') model = Sequential() model.add(Embedding(max_features, 128, dropout=0.2)) model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun model.add(Dense(1)) model.add(Activation('sigmoid')) # try using different optimizers and different optimizer configs model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15, validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc)EquationsHere we are solving a set of coupled differential equations gives (from Chapman supplement and Arne notebook)$$\begin{align} i \dot{\zeta}_1&= (p_1 B + q_1 B^2 + q\mu_1(t))\zeta_1+c\left[(\zeta_1^*\zeta_1+\zeta_0^*\zeta_0-\zeta_{-1}^*\zeta_{-1})\zeta_1+\zeta_0\zeta_0\zeta_{-1}^*)\right]\\ i\dot{\zeta}_0&=(p_0 B + q_0 B^2 + q\mu_0(t))\zeta_0+c\left[(\zeta_1^*\zeta_1 + \zeta_{-1}^*\zeta_{-1})\zeta_0 + 2\zeta_1\zeta_{-1}\zeta_0^*\right]\\ i \dot{\zeta}_{-1}&= (p_{-1} B + q_{-1} B^2 + q\mu_{-1}(t))\zeta_{-1}+c\left[(\zeta_{-1}^*\zeta_{-1}+\zeta_0^*\zeta_0-\zeta_{1}^*\zeta_{1})\zeta_{-1}+\zeta_0\zeta_0\zeta_{1}^*)\right]\\\end{align}$$ Expectation ValuesFirst we note that $$ S_x = \frac{1}{\sqrt{2}}\begin{pmatrix}0&1&0\\1&0&1\\0&1&0\\\end{pmatrix}$$So to calculate its expectation value with a given wavefunction, we have$$\psi^*S_x \psi = (\psi_1^*,\psi_0^*,\psi_{-1}^*)\frac{1}{\sqrt{2}}\begin{pmatrix}0&1&0\\1&0&1\\0&1&0\\\end{pmatrix}\begin{pmatrix}\psi_1\\\psi_0\\\psi_{-1}\end{pmatrix} = \frac{(\psi_1^*+\psi_{-1}^*)\psi_0 + \psi_0^*(\psi_{1}+\psi_{-1})}{\sqrt{2}}$$import numpy as np import matplotlib.pyplot as plt from scipy.integrate import ode from numpy.lib import scimath %matplotlib inline #first define the system dy/dt = f(y,t) def msqr(x): return np.conj(x) * x def f(t,y,*args): """system of ode we want to solve""" z1i = y[0] z0i = y[1] zmi = y[2] #now define equations f0 = ((p1*B+q1*B**2+qu1)*z1i + c*((msqr(z1i) + msqr(z0i) - msqr(zmi))*z1i + z0i*z0i*np.conj(zmi)))*np.complex(0,-1) f1 = ((p0*B+q0*B**2+qu0)*z0i + c*((msqr(z1i) + msqr(zmi))*z0i + 2*z1i*zmi*np.conj(z0i)))*np.complex(0,-1) f2 = ((pm1*B+qm1*B**2+qum1)*zmi + c*((msqr(zmi) + msqr(z0i) - msqr(z1i))*zmi + z0i*z0i*np.conj(z1i)))*np.complex(0,-1) return [f0,f1,f2] class Operator(object): """class of operator for 3x3 matrices in this problem""" def __init__(self, mat,rep): self.rep = rep self.mat = mat def apply(self,ele): return np.dot(np.conj(ele), np.dot(self.mat,ele.T)).real S_x = Operator(np.array([[0,1,0],[1,0,1],[0,1,0]])*1/np.sqrt(2),r'$S_x$') N_yz = Operator(1j/np.sqrt(2)* np.array([[0,-1,0],[1,0,1],[0,-1,0]]),r'$N_{yz}#') rho_0 = Operator(np.array([[0,0,0],[0,1,0],[0,0,0]]),r'$\rho_0$') #define problem parameters dt = .1e-4 tfinal = 2 #allot arrays t = np.linspace(0,tfinal,int(tfinal/dt)) def validate(par,t): """function to validate arrays""" lt = len(t) if isinstance(par,float) or isinstance(par,int) or isinstance(par,np.complex): return np.asarray([par for i in t]) elif len(par)==lt: return par else: print('check array dimensions') #define parameters B = 260/700 p1 = 0 p0 = 0 pm1 = 0 qu1 =0 qu0 = 0 qum1= 0 q1 = 0 q0 = -5 qm1= 0 c = 30 B_arr = validate(B,t) p1_arr = validate(p1,t) p0_arr = validate(p0,t) pm1_arr = validate(pm1,t) qu1_arr = validate(qu1,t) qu0_arr = validate(qu0,t) qum1_arr = validate(qum1,t) q1_arr = validate(q1,t) q0_arr = validate(q0,t) qm1_arr = validate(qm1,t) c_arr = validate(c,t) #solve the sucker and plot def solve_system(y0): r = ode(f).set_integrator('zvode') b = ode(f).set_integrator('zvode') r.set_initial_value(y0,0) b.set_initial_value(y0,0) ans = np.zeros((len(t),3),dtype = complex) step = 0 while r.successful() and r.t < tfinal-dt: #update the parameters B = B_arr[step] p1 = p1_arr[step] p0 = p0_arr[step] pm1 = pm1_arr[step] qu1 = qu1_arr[step] qu0 = qu0_arr[step] qum1 = qum1_arr[step] q1 = q1_arr[step] q0 = q0_arr[step] qm1 = qm1_arr[step] c = c_arr[step] ans[step] = np.asarray(r.integrate(r.t + dt)) step += 1 sol = [] while b.successful() and b.t < tfinal: b.integrate(tfinal,step = True) sol.append([b.t,(np.conj(b.y[1])*b.y[1]).real]) return [ans, np.array(sol)] def get_exp_values(soll,step_size): sol = soll[0] ans = soll[1] """function to compute expectation values""" r_0 = np.asarray([rho_0.apply(i) for i in sol[::step_size]]) sx_calc = np.asarray([S_x.apply(i) for i in sol[::step_size]]) nyz_calc = np.asarray([N_yz.apply(i) for i in sol[::step_size]]) return np.asarray([r_0, sx_calc, nyz_calc]),ans step_size = 1 #do calculation N = 10000 m = 0 theta = 0 rho = .99 state = [scimath.sqrt((1-rho+m)/2)*np.exp(theta/2*1j),scimath.sqrt(rho),scimath.sqrt((1-rho-m)/2) * np.exp(theta/2*1j)] ans, ans1 = get_exp_values(solve_system(state),step_size) plt.plot(t[::step_size],ans[0],'b.') plt.plot(ans1[:,0],ans1[:,1],'g.-') from lmfit import Model def sinfunc(x,A,f,off,phi): return A * np.sin(2*np.pi*f*x+phi)+off sinmod = Model(sinfunc) pars = sinmod.make_params(A =1.5e3,f = 3, off = 39.5e3,phi=0) pars['phi'].min = 0 pars['phi'].max = 2*np.pi pars['A'].min = 0 fit = sinmod.fit(x = t[::step_size],data = ans[0]*N,params = pars) fit.plot(fitfmt='-o'); print(fit.fit_report()) #help(fit) fig, ax = plt.subplots(3,1) for i in range(3): m = ans[i] s = ans[i] ax[i].plot(t[::step_size],m) #ax[i].fill_between(t[::step_size,],m-s,m+s,facecolor='green',alpha=0.3) plt.tight_layout()Blackman pulse shape This is a pulse shape which is sort of like a square pulse, will make implementation much simplier in code.def black_left(t,ts,dur,fv): """calc blackman pulse won't set proper elements to zero here""" ans = fv*(0.42 +0.5*np.cos(np.pi*(t-ts-dur)/dur)+0.08*np.cos(2*np.pi*(1-ts-dur))) ans[t(ts+dur)]=0 return ans def black_right(t,ts,dur,iv): """calc blackman pulse first we will calculate whole pulse and then set proper elements to zero""" ans = iv*(0.42 +0.5*np.cos(np.pi*(t-ts)/dur)+0.08*np.cos(2*np.pi*(1-ts))) ans[t(ts+dur)]=0 return ans def const_pulse(t,ts,dur,val): ans = np.asarray([val for i in t]) ans[t(ts+dur)]=0 return ans def micro_pulse(t,ts,dur,ew,qm): ans = black_left(t,ts,ew,qm)+black_right(t,ts+ew+dur,ew,qm)+const_pulse(t,ts+ew,dur,qm) return ans t = np.linspace(0,3e-3,1000) ans = micro_pulse(t,0,2e-3,25e-6,800) plt.plot(t,ans) %timeit micro_pulse(t,0,2e-3,25e-6,800)10000 loops, best of 3: 187 µs per loopPAN-STARRS Mbol Interpolation Using Color Author: Institution: The University of Texas at AustinThis is general purpose notebook to interpolate bolometric magnitudes using PAN-STARRS photometry in g, r, i, z bands as the input. The interpolation is made utilizing Table B1. from ***., ., ., et al. 2016, MNRAS, 455, 4212***Follow the USER comments for instructions on utilizing this notebook. Users will commonly have to modify only cell 2. You will need to clone and download the entire repository so that the relevant text file (containing the grid) can be utilized for interpolation. User Inputs: ACTION REQUIRED# USER: Enter your system paths to the FITS file fits_filepath = 'your_path/your_file.fits' # USER: Enter the names of the columns storing the respective data in your FITS File # If there are multiple columns for the same photometric band (arising from cross-matches etc.), # specify names of additional columns in '' separated by commas # The code will utilize the column that has photometry values and ignore ones that do not. # NOTE: If you are specifying a photometric band column but do not have an error column, # add '' to the error column name. (You do not need to have multiple columns for all bands but you need to # have a corresponding error column for each specification even if you leave it as '') # Example is provided below for multiple column use and no corresponding error column. # PAN-STARRS Photometry gmag_col = ['gmag', 'g_P1'] gmag_err_col = ['e_gmag', ''] rmag_col = ['rmag', 'r_P1'] rmag_err_col = ['e_rmag', 'r_P1_err'] imag_col = ['imag'] imag_err_col = ['e_imag'] zmag_col = ['zmag'] zmag_err_col = ['e_zmag'] # USER: If you have defined a subset for your data that is a boolean column and you wish to use those datapoints only. # Enter the name of the column and set flag to 1 # Subset Column bool_subset_col = 'your_subset' bool_subset_flag = 0Photometry Addition Function# USER: Do not modify contents from astropy.io import fits from astropy.table import Table import numpy as np from scipy.interpolate import griddata def photometry(photometry_cols, photometry_dict): keys = list(photometry_dict.keys()) for counter in range(0, len(photometry_cols), 2): photo_flag = 0 for ctr in range(0, len(photometry_cols[counter])): if(photo_flag == 0): if(photometry_cols[counter][ctr] != '' and ~np.isnan(data[index][photometry_cols[counter][ctr]])): photometry_dict[keys[counter]] = data[index][photometry_cols[counter][ctr]] if(photometry_cols[counter+1][ctr] != ''): photometry_dict[keys[counter+1]] = data[index][photometry_cols[counter+1][ctr]] photo_flag = 1 else: break return photometry_dictAccess Data# Open FITS file hdu_list = fits.open(fits_filepath, memmap=True) data = hdu_list[1].data hdu_list.close() if(bool_subset_flag): data = data[data[bool_subset_col] == True]Interpolationmbol_interp = [] # Color calculations for model g_model, r_model, i_model, z_model, y_model, mbol_model = np.loadtxt('text_files/PAN_STARRS_color_teff.txt', unpack=True, usecols=(1,2,3,4,5,9)) g_r_model = np.subtract(g_model, r_model) i_z_model = np.subtract(i_model, z_model) for index in range(0, len(data)): # List of photometry column names photometry_cols = [gmag_col, gmag_err_col, rmag_col, rmag_err_col, imag_col, imag_err_col, zmag_col, zmag_err_col] # Initialized dictionary of photometry variables photometry_dict = {'gmag': np.nan, 'gmage': np.nan, 'rmag': np.nan, 'rmage': np.nan, 'imag': np.nan, 'image': np.nan, 'zmag': np.nan, 'zmage': np.nan} # Color calculations for data photometry_dict = photometry(photometry_cols, photometry_dict) g_r = np.subtract(photometry_dict['gmag'], photometry_dict['rmag']) i_z = np.subtract(photometry_dict['imag'], photometry_dict['zmag']) # Interpolate and append mbol = griddata((g_r_model, i_z_model), mbol_model, (g_r, i_z), method='linear') mbol_interp.append(mbol)Print Interpolated Data# Print interpolated data mbol_interp = np.array(mbol_interp) print(mbol_interp)Add Filter DMV Versus Other Statesdf['dmv_area'] = (df.rp_plate_state=='DC') | (df.rp_plate_state=='MD') | (df.rp_plate_state=='VA')Compare Total Tickets and Fines Between Statesdmv_vs_other = df.groupby(df.dmv_area).counter.sum()#.reset_index() # dmv_vs_other# .name('Parking Tickets Jan 2009 - May 2016') dmv_vs_other = pd.Series(dmv_vs_other, name='Parking Tickets Volume % ~ Jan 2009 - May 2016') dmv_vs_other.plot.pie(labels=['Not DMV', 'DMV'], colors=['r', 'y'], autopct='%.2f', figsize=(4, 4)) dmv_vs_other = df.groupby(df.dmv_area).fine.sum()#.reset_index() # dmv_vs_other# .name('Parking Tickets Jan 2009 - May 2016') dmv_vs_other = pd.Series(dmv_vs_other, name='Parking Ticket Fine Percent ~ Jan 2009 - May 2016') dmv_vs_other.plot.pie(labels=['Not DMV', 'DMV'], colors=['r', 'y'], autopct='%.2f', figsize=(4, 4))TimeSeriesdates = pd.date_range(df.ticket_issue_datetime.min(), df.ticket_issue_datetime.max(), freq='Q') dmv_datetime = df[["ticket_issue_datetime", 'dmv_area', 'fine', 'counter']] dmv_datetime.set_index("ticket_issue_datetime", inplace=True) three_month_groups = dmv_datetime.groupby(pd.TimeGrouper('3M')).apply(lambda x: x.groupby('dmv_area').sum()) total_by_6month = dmv_datetime.groupby(pd.TimeGrouper('6M')).sum() total_by_6month total_by_6month.counter.plot(kind='bar', title="Volume of Tickets for All DMV States in DC") plt.xlabel('6 Month Period') plt.ylabel('Total Number Traffic Tickets') year_period = dmv_datetime.groupby(pd.TimeGrouper('12M')).apply(lambda x: x.groupby('dmv_area').sum()) year_period#.plot(kind='bar', title="Volume of Tickets for All DMV States in DC")Reboot: Box-Plots for Education Competition My resolution for the project based on the "Machine Learning with Experts: School Budgets" course by DataCamp 1. Import Libraries#ignore warnings import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import numpy as np import pandas as pd from warnings import warn2. Load Data The data has been loaded from the DrivenData Competition.Firstly, we will use the training set of data.df = pd.read_csv('TrainingData.csv', index_col=0) df.head() df.shape3. Implement Essential Functions The multi-class log-loss, multi-class train-test split functions were taken from the https://github.com/datacamp/course-resources-ml-with-experts-budget repository by DataCamp and represented in the aforesaid course.BOX_PLOTS_COLUMN_INDICES = [list(range(37)), list(range(37, 48)), list(range(48, 51)), list(range(51, 76)), list(range(76, 79)), list(range(79, 82)), list(range(82, 87)), list(range(87, 96)), list(range(96, 104))] def multi_multi_log_loss(predicted, actual, class_column_indices=BOX_PLOTS_COLUMN_INDICES, eps=1e-15): """ Multi class version of Logarithmic Loss metric as implemented on DrivenData.org """ class_scores = np.ones(len(class_column_indices), dtype=np.float64) # calculate log loss for each set of columns that belong to a class: for k, this_class_indices in enumerate(class_column_indices): # get just the columns for this class preds_k = predicted[:, this_class_indices].astype(np.float64) # normalize so probabilities sum to one (unless sum is zero, then we clip) preds_k /= np.clip(preds_k.sum(axis=1).reshape(-1, 1), eps, np.inf) actual_k = actual[:, this_class_indices] # shrink predictions so y_hats = np.clip(preds_k, eps, 1 - eps) sum_logs = np.sum(actual_k * np.log(y_hats)) class_scores[k] = (-1.0 / actual.shape[0]) * sum_logs return np.average(class_scores) def multilabel_sample(y, size=1000, min_count=5, seed=None): """ Takes a matrix of binary labels `y` and returns the indices for a sample of size `size` if `size` > 1 or `size` * len(y) if size =< 1. The sample is guaranteed to have > `min_count` of each label. """ try: if (np.unique(y).astype(int) != np.array([0, 1])).all(): raise ValueError() except (TypeError, ValueError): raise ValueError('multilabel_sample only works with binary indicator matrices') if (y.sum(axis=0) < min_count).any(): raise ValueError('Some classes do not have enough examples. Change min_count if necessary.') if size <= 1: size = np.floor(y.shape[0] * size) if y.shape[1] * min_count > size: msg = "Size less than number of columns * min_count, returning {} items instead of {}." warn(msg.format(y.shape[1] * min_count, size)) size = y.shape[1] * min_count rng = np.random.RandomState(seed if seed is not None else np.random.randint(1)) if isinstance(y, pd.DataFrame): choices = y.index y = y.values else: choices = np.arange(y.shape[0]) sample_idxs = np.array([], dtype=choices.dtype) # first, guarantee > min_count of each label for j in range(y.shape[1]): label_choices = choices[y[:, j] == 1] label_idxs_sampled = rng.choice(label_choices, size=min_count, replace=False) sample_idxs = np.concatenate([label_idxs_sampled, sample_idxs]) sample_idxs = np.unique(sample_idxs) # now that we have at least min_count of each, we can just random sample sample_count = int(size - sample_idxs.shape[0]) # get sample_count indices from remaining choices remaining_choices = np.setdiff1d(choices, sample_idxs) remaining_sampled = rng.choice(remaining_choices, size=sample_count, replace=False) return np.concatenate([sample_idxs, remaining_sampled]) def multilabel_train_test_split(X, Y, size, min_count=5, seed=None): """ Takes a features matrix `X` and a label matrix `Y` and returns (X_train, X_test, Y_train, Y_test) where all classes in Y are represented at least `min_count` times. """ index = Y.index if isinstance(Y, pd.DataFrame) else np.arange(Y.shape[0]) test_set_idxs = multilabel_sample(Y, size=size, min_count=min_count, seed=seed) train_set_idxs = np.setdiff1d(index, test_set_idxs) test_set_mask = index.isin(test_set_idxs) train_set_mask = ~test_set_mask return (X[train_set_mask], X[test_set_mask], Y[train_set_mask], Y[test_set_mask])4. Reshape and Split the Data I will create encode the given labels and split the training data to evaluate my algorithm.#labels need to be predicted by the description of the contest LABELS = ['Function', 'Use', 'Sharing', 'Reporting', 'Student_Type', 'Position_Type', 'Object_Type', 'Pre_K', 'Operating_Status'] #other labels NON_LABELS = [c for c in df.columns if c not in LABELS] dummy_labels = pd.get_dummies(df[LABELS]) X_train, X_test, y_train, y_test = multilabel_train_test_split(df[NON_LABELS], dummy_labels, 0.2, seed=43)5. Preprocess the Data Hereby I will define functions retrieving and transforming both text and numeric data and concatenating them into one set. Also for convenience I will convert my logloss function into sklearn metric.NUMERIC_COLUMNS = df.columns[df.dtypes.values == 'float'].values.tolist() # function merging all text values in a row def combine_text_columns(data_frame, to_drop=NUMERIC_COLUMNS + LABELS): """ Takes the dataset as read in, drops the non-feature, non-text columns and then combines all of the text columns into a single vector that has all of the text for a row. :param data_frame: The data as read in with read_csv (no preprocessing necessary) :param to_drop (optional): Removes the numeric and label columns by default. """ # drop non-text columns that are in the df to_drop = set(to_drop) & set(data_frame.columns.tolist()) text_data = data_frame.drop(to_drop, axis=1) # replace nans with blanks text_data.fillna("", inplace=True) # joins all of the text items in a row (axis=1) # with a space in between return text_data.apply(lambda x: " ".join(x), axis=1) from sklearn.preprocessing import FunctionTransformer get_text_data = FunctionTransformer(combine_text_columns, validate=False) get_numeric_data = FunctionTransformer(lambda x: x[NUMERIC_COLUMNS], validate=False) from sklearn.metrics.scorer import make_scorer log_loss_scorer = make_scorer(multi_multi_log_loss)6. Make Pipeline In this step I will train my pipeline model, which will use logistic regression.from sklearn.feature_selection import chi2, SelectKBest from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.impute import SimpleImputer from sklearn.preprocessing import PolynomialFeatures from sklearn.feature_extraction.text import HashingVectorizer from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import MaxAbsScaler TOKENS_ALPHANUMERIC = '[A-Za-z0-9]+(?=\\s+)' %%time # set a reasonable number of features before adding interactions chi_k = 1000 # create the pipeline object pl = Pipeline([ ('union', FeatureUnion( transformer_list = [ ('numeric_features', Pipeline([ ('selector', get_numeric_data), ('imputer', SimpleImputer()) # get rid of missing values ])), ('text_features', Pipeline([ ('selector', get_text_data), ('vectorizer', HashingVectorizer(token_pattern=TOKENS_ALPHANUMERIC, norm=None, binary=False, alternate_sign=False, ngram_range=(1, 2))), #transform text data in numerical ('dim_red', SelectKBest(chi2, chi_k)) #select 1000 best features ])) ] )), ('int', PolynomialFeatures(degree=2)), #take into account combination of features ('scale', MaxAbsScaler()), #scale all the features ('clf', OneVsRestClassifier(LogisticRegression())) ]) # fit the pipeline to our training data pl.fit(X_train, y_train.values) # print the score of our trained pipeline on our test set print("Logloss score of trained pipeline: ", log_loss_scorer(pl, X_test, y_test.values))Logloss score of trained pipeline: 1.3378237078762474 Wall time: 13h 33min 53s7. Make Predictions Right here I will use my model on the testing set and create predction file.# Load holdout data holdout = pd.read_csv('TestData.csv', index_col=0) # Make predictions predictions = pl.predict_proba(holdout) # Format correctly in new DataFrame: prediction_df prediction_df = pd.DataFrame(columns=pd.get_dummies(df[LABELS], prefix_sep='__').columns, index=holdout.index, data=predictions) prediction_df = prediction_df[prediction_df.columns.sort_values()] # Save prediction_df to csv called "predictions.csv" prediction_df.to_csv("predictions.csv")Linking and brushing with bokeh Linking and brushing is a powerful method for exploratory data analysis. One way to create linked plots in the notebook is to use Bokeh.import bokeh import numpy as np from astropy.table import Table sdss = Table.read('data/sdss_galaxies_qsos_50k.fits') sdss from bokeh.models import ColumnDataSource from bokeh.plotting import figure, gridplot, output_notebook, output_file, show umg = sdss['u'] - sdss['g'] gmr = sdss['g'] - sdss['r'] rmi = sdss['r'] - sdss['i'] imz = sdss['i'] - sdss['z'] # create a column data source for the plots to share source = ColumnDataSource(data=dict(umg=umg, gmr=gmr, rmi=rmi,imz=imz))We will output to a static html file. The output_notebook() function can output to the notebook, but with 50,000 points it really slows down.output_file('sdss_color_color.html') TOOLS = "pan,wheel_zoom,reset,box_select,poly_select,help" # create a new plot and add a renderer left = figure(tools=TOOLS, width=400, height=400, title='SDSS g-r vs u-g', webgl=True) left.x('umg', 'gmr', source=source) # create another new plot and add a renderer right = figure(tools=TOOLS, width=400, height=400, title='SDSS i-z vs r-i') right.x('rmi', 'imz', source=source) p = gridplot([[left, right]]) show(p)See many examples of configuring plot tools at http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html Interacting with Glue#import glue # Quick way to launch Glue #from glue import qglue #qglue()Here we'll interact with Glue from the notebook.import astropy.io.fits as fits hdu = fits.open('data/w5.fits') hdu[0].header from astropy.table import Table w5catalog = Table.read('data/w5_psc.vot') wisecat = Table.read('data/w5_wise.tbl', format='ipac') %gui qt #qglue(catalog=catalog, image=hdu, wisecat=wisecat) from glue.core.data_factories import load_data from glue.core import DataCollection from glue.core.link_helpers import LinkSame from glue.app.qt.application import GlueApplication #load 2 datasets from files image = load_data('data/w5.fits') catalog = load_data('data/w5_psc.vot') dc = DataCollection([image, catalog]) # link positional information dc.add_link(LinkSame(image.id['Right Ascension'], catalog.id['RAJ2000'])) dc.add_link(LinkSame(image.id['Declination'], catalog.id['DEJ2000'])) #start Glue app = GlueApplication(dc) app.start()Now we have access to the data collection in our notebookdc dc[0].components dc[0].id['Right Ascension']Now go select the "Western arm" of the star-forming region (in Glue) and make a subset of itcatalog = dc[1] j_minus_h = catalog['Jmag'] - catalog['Hmag']We can add something to our catalog and it shows up in Glue.catalog['jmh'] = j_minus_h hmag = catalog.id['Hmag'] jmag = catalog.id['Jmag']We can define a new subset group here or in Gluejmhred = (jmag - hmag) > 1.5 dc.new_subset_group('j - h > 1.5', jmhred) dc.subset_groups dc.subset_groups[2].label catalog.subsets catalog.subsets[0]['Jmag'] mask = catalog.subsets[0].to_mask() new_catalog = w5catalog[mask]pip install matplotlib ipympl%matplotlib widget import matplotlib.pyplot as plt import numpy as np X = np.linspace(0, 2*np.pi) Y = np.sin(X) fig, ax = plt.subplots() ax.plot(X, Y)知识点 1. 准备数据* 选取埋点:videoFinished / videoQuitted / problemAnswered* 清洁 - 移除缺少eventTime / device字段的埋点 - 移除problemAnswered中,duration小于1或大于等于2000的埋点 - 移除video埋点中,duration小于5或大于等于2000的埋点 2. 计算视频比值* VR(U, V) = T(U) / T(V)from pymongo import MongoClient conn = MongoClient('10.8.8.111:27017') mdp = conn['ronfedb']['mdp'] pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 1, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] %matplotlib inline import warnings import numpy as np import pandas as pd import scipy.stats as st import statsmodels as sm import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams['figure.figsize'] = (16.0, 12.0) matplotlib.style.use('ggplot') # remove extreme values def reject_outliers(data, m=2): return data[abs(data - np.mean(data)) < m * np.std(data)] # Create models from data def best_fit_distribution(data, bins=200, ax=None): """Model data by finding best fit distribution to data""" # Get histogram of original data y, x = np.histogram(data, bins=bins, normed=True) x = (x + np.roll(x, -1))[:-1] / 2.0 # Distributions to check DISTRIBUTIONS = [ st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy ] # Best holders best_distribution = st.norm best_params = (0.0, 1.0) best_sse = np.inf # Estimate distribution parameters from data for distribution in DISTRIBUTIONS: # Try to fit the distribution try: # Ignore warnings from data that can't be fit with warnings.catch_warnings(): warnings.filterwarnings('ignore') # fit dist to data params = distribution.fit(data) # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Calculate fitted PDF and error with fit in distribution pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) # if axis pass in add to plot try: if ax: pd.Series(pdf, x).plot(ax=ax) end except Exception: pass # identify if this distribution is better if best_sse > sse > 0: best_distribution = distribution best_params = params best_sse = sse except Exception: pass return (best_distribution.name, best_params) def make_pdf(dist, params, size=10000): """Generate distributions's Propbability Distribution Function """ # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Get sane start and end points of distribution start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) # Build PDF and turn into pandas Series x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Plot for comparison plt.figure(figsize=(12,8)) ax = data.plot(kind='hist', bins=50, normed=True, alpha=0.5, color=plt.rcParams['axes.color_cycle'][1]) # Save plot limits dataYLim = ax.get_ylim() # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200, ax) best_dist = getattr(st, best_fit_name) # Update plots ax.set_ylim(dataYLim) ax.set_title(u'Correct Answer Duration Dist') ax.set_xlabel(u'Time/s') ax.set_ylabel('Frequency') # Make PDF pdf = make_pdf(best_dist, best_fir_paramms) # Display plt.figure(figsize=(12,8)) ax = pdf.plot(lw=2, label='PDF', legend=True) data.plot(kind='hist', bins=50, normed=True, alpha=0.5, label='Data', legend=True, ax=ax) param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale'] param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fir_paramms)]) dist_str = '{}({})'.format(best_fit_name, param_str) ax.set_title(u'Correct Answer Duration Dist \n' + dist_str) ax.set_xlabel(u'Time/s') ax.set_ylabel('Frequency') print(best_fir_paramms) print(param_names) from scipy.stats import nct df = best_fir_paramms[0] nc = best_fir_paramms[1] loc = best_fir_paramms[2] scale = best_fir_paramms[3] xo = np.linspace(nct.ppf(0.01, df, nc, loc, scale), nct.ppf(0.99, df, nc, loc, scale), 100) xoo = nct.cdf(xo, df, nc, loc, scale) plt.plot(xoo) plt.show() pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 4, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) params = nct.fit(data) pdf = make_pdf(nct, params) plt.figure(figsize=(12,8)) ax = pdf.plot(lw=2, label='PDF', legend=True) data.plot(kind='hist', bins=50, normed=True, alpha=0.5, label='Data', legend=True, ax=ax) param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale'] param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, params)]) dist_str = '{}({})'.format(best_fit_name, param_str) ax.set_title(u'Correct Answer Duration Dist \n' + dist_str) ax.set_xlabel(u'Time/s') ax.set_ylabel('Frequency') from pymongo import MongoClient conn = MongoClient('10.8.8.111:27017') mdp = conn['ronfedb']['mdp'] pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 1, "correct": False}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] %matplotlib inline import warnings import numpy as np import pandas as pd import scipy.stats as st import statsmodels as sm import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams['figure.figsize'] = (16.0, 12.0) matplotlib.style.use('ggplot') # remove extreme values def reject_outliers(data, m=2): return data[abs(data - np.mean(data)) < m * np.std(data)] # Create models from data def best_fit_distribution(data, bins=200, ax=None): """Model data by finding best fit distribution to data""" # Get histogram of original data y, x = np.histogram(data, bins=bins, normed=True) x = (x + np.roll(x, -1))[:-1] / 2.0 # Distributions to check DISTRIBUTIONS = [ st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy ] # Best holders best_distribution = st.norm best_params = (0.0, 1.0) best_sse = np.inf # Estimate distribution parameters from data for distribution in DISTRIBUTIONS: # Try to fit the distribution try: # Ignore warnings from data that can't be fit with warnings.catch_warnings(): warnings.filterwarnings('ignore') # fit dist to data params = distribution.fit(data) # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Calculate fitted PDF and error with fit in distribution pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) # if axis pass in add to plot # try: # if ax: # pd.Series(pdf, x).plot(ax=ax) # end # except Exception: # pass # identify if this distribution is better if best_sse > sse > 0: best_distribution = distribution best_params = params best_sse = sse except Exception: pass return (best_distribution.name, best_params) def make_pdf(dist, params, size=10000): """Generate distributions's Propbability Distribution Function """ # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Get sane start and end points of distribution start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) # Build PDF and turn into pandas Series x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 1, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Plot for comparison # plt.figure(figsize=(12,8)) # ax = data.plot(kind='hist', bins=50, normed=True, alpha=0.5, color=plt.rcParams['axes.color_cycle'][1]) # Save plot limits # dataYLim = ax.get_ylim() # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) # best_dist = getattr(st, best_fit_name) print("Level 1 Correct") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 1, "correct": False}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 1 InCorrect") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 2, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 2 Correct") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 2, "correct": False}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 2 InCorrect") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 3, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 3 Correct") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 3, "correct": False}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 3 InCorrect") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 4, "correct": True}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 4 Correct") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 4, "correct": False}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 4 InCorrect") print(best_fit_name) %matplotlib inline import warnings import numpy as np import pandas as pd import scipy.stats as st import statsmodels as sm import matplotlib import matplotlib.pyplot as plt from pymongo import MongoClient matplotlib.rcParams['figure.figsize'] = (16.0, 12.0) matplotlib.style.use('ggplot') mdp = MongoClient("10.8.8.111:27017")['ronfedb']['mdp'] # remove extreme values def reject_outliers(data, m=2): return data[abs(data - np.mean(data)) < m * np.std(data)] # Create models from data def best_fit_distribution(data, bins=200, ax=None): """Model data by finding best fit distribution to data""" # Get histogram of original data y, x = np.histogram(data, bins=bins, normed=True) x = (x + np.roll(x, -1))[:-1] / 2.0 # Distributions to check DISTRIBUTIONS = [ st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy ] # Best holders best_distribution = st.norm best_params = (0.0, 1.0) best_sse = np.inf # Estimate distribution parameters from data for distribution in DISTRIBUTIONS: # Try to fit the distribution try: # Ignore warnings from data that can't be fit with warnings.catch_warnings(): warnings.filterwarnings('ignore') # fit dist to data params = distribution.fit(data) # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Calculate fitted PDF and error with fit in distribution pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) # if axis pass in add to plot # try: # if ax: # pd.Series(pdf, x).plot(ax=ax) # end # except Exception: # pass # identify if this distribution is better if best_sse > sse > 0: best_distribution = distribution best_params = params best_sse = sse except Exception: pass return (best_distribution.name, best_params) def make_pdf(dist, params, size=10000): """Generate distributions's Propbability Distribution Function """ # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Get sane start and end points of distribution start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) # Build PDF and turn into pandas Series x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 2}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 2") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 3}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 3") print(best_fit_name) pipeline = [ {"$match": {"action": "q", "topicId": "54cc72dfabc5bbb971f99bb3", "level": 4}}, {"$group": {"_id": None, "times": {"$push": "$duration"}}} ] x = list(mdp.aggregate(pipeline)) x = x[0]['times'] # Load data from statsmodels datasets xx = reject_outliers(np.array(x), m=1.64) data = pd.Series(xx) # Find best fit distribution best_fit_name, best_fir_paramms = best_fit_distribution(data, 200) print("Level 4") print(best_fit_name)Visualtization: Matrix Plot# imports import numpy as np import pandas as pd import scanpy as sc from anndata import read_h5ad from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectFromModel from sklearn.metrics import accuracy_score from sklearn.feature_selection import RFE from matplotlib import pyplot as pltold analysis# this is for comparing results from 3m and 24m data # imports import numpy as np import pandas as pd import scanpy as sc from anndata import read_h5ad from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectFromModel from sklearn.metrics import accuracy_score from sklearn.feature_selection import RFE from matplotlib import pyplot as plt # read in raw data adata = read_h5ad('/Users/madelinepark/Downloads/Limb_Muscle_facs.h5ad') # read in results and sort by gini results_24 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_24m.csv') results_sorted_24 = results_24.sort_values(by='24m_gini',ascending=False) results_3 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_3m.csv') results_sorted_3 = results_3.sort_values(by='3m_gini',ascending=False) # take top genes and ginis, here we chose 10 results_top_24_gene = results_sorted_24['24m'][0:10] results_top_24_gini = results_sorted_24['24m_gini'][0:10] results_top_3_gene = results_sorted_3['3m'][0:10] results_top_3_gini = results_sorted_3['3m_gini'][0:10] results_top_genes = list(set(results_top_gene_list) & set(adata.var_names.values)) results_top_gene_list = [] results_top_gene_list.extend(results_top_24_gene) results_top_gene_list.extend(results_top_3_gene) adatasubset = adata[adata.obs['age'].isin(['3m','24m'])] # Need to change the order of the ages adatasubset.obs['age_num'] = adatasubset.obs['age'] adatasubset.obs['age_num'] = [an.split('m')[0] for an in adatasubset.obs['age_num']] sc.pl.matrixplot(adatasubset, results_top_genes, groupby='age_num', dendrogram=False,log=True,cmap='Blues',save = '_top_30_droplet_test_8.pdf')... storing 'age_num' as categoricalAssignment 4 Understaning scaling of linear algebra operations on Apache Spark using Apache SystemMLIn this assignment we want you to understand how to scale linear algebra operations from a single machine to multiple machines, memory and CPU cores using Apache SystemML. Therefore we want you to understand how to migrate from a numpy program to a SystemML DML program. Don't worry. We will give you a lot of hints. Finally, you won't need this knowledge anyways if you are sticking to Keras only, but once you go beyond that point you'll be happy to see what's going on behind the scenes. Please make sure you run this notebook from an Apache Spark 2.3 notebook.So the first thing we need to ensure is that we are on the latest version of SystemML, which is 1.2.0:The steps are:- pip install- link the jars to the correct location- restart the kernel- start execution at the cell with the version - check!pip install systemmlWaiting for a Spark session to start... Spark Initialization Done! ApplicationId = app-20200302065315-0000 KERNEL_ID = 38399d8f-fa5e-4e4a-a295-f8d159f2e33e Collecting systemml [?25l Downloading https://files.pythonhosted.org/packages/b1/94/62104cb8c526b462cd501c7319926fb81ac9a5668574a0b3407658a506ab/systemml-1.2.0.tar.gz (9.7MB)  100% |################################| 9.7MB 1.6MB/s eta 0:00:01 [?25hCollecting numpy>=1.8.2 (from systemml) [?25l Downloading https://files.pythonhosted.org/packages/62/20/4d43e141b5bc426ba38274933ef8e76e85c7adea2c321ecf9ebf7421cedf/numpy-1.18.1-cp36-cp36m-manylinux1_x86_64.whl (20.1MB)  100% |################################| 20.2MB 1.0MB/s eta 0:00:01 [?25hCollecting scipy>=0.15.1 (from systemml) [?25l Downloading https://files.pythonhosted.org/packages/dc/29/162476fd44203116e7980cfbd9352eef9db37c49445d1fec35509022f6aa/scipy-1.4.1-cp36-cp36m-manylinux1_x86_64.whl (26.1MB)  100% |################################| 26.1MB 885kB/s et[...]Now we need to create two sym links that the newest version is picket up - this is a workaround and will be removed soon!ln -s -f ~/user-libs/python3.6/systemml/systemml-java/systemml-1.2.0-extra.jar ~/user-libs/spark2/systemml-1.2.0-extra.jar !ln -s -f ~/user-libs/python3.6/systemml/systemml-java/systemml-1.2.0.jar ~/user-libs/spark2/systemml-1.2.0.jarNow please restart the kernel and make sure the version is correctfrom systemml import MLContext ml = MLContext(spark) print(ml.version()) if not ml.version() == '1.2.0': raise ValueError('please upgrade to SystemML 1.2.0, or restart your Kernel (Kernel->Restart & Clear Output)')Waiting for a Spark session to start... Spark Initialization Done! ApplicationId = app-20200302065724-0002 KERNEL_ID = 38399d8f-fa5e-4e4a-a295-f8d159f2e33e 1.2.0Congratulations, if you see version 1.2.0, please continue with the notebook...from systemml import MLContext, dml import numpy as np import timeThen we create an MLContext to interface with Apache SystemML. Note that we pass a SparkSession object as parameter so SystemML now knows how to talk to the Apache Spark clusterml = MLContext(spark)Now we create some large random matrices to have numpy and SystemML crunch on itu = np.random.rand(1000,10000) s = np.random.rand(10000,1000) w = np.random.rand(1000,1000)Now we implement a short one-liner to define a very simple linear algebra operationIn case you are unfamiliar with matrxi-matrix multiplication: https://en.wikipedia.org/wiki/Matrix_multiplicationsum(U' * (W . (U * S)))| Legend | | | ------------- |-------------| | ' | transpose of a matrix | | * | matrix-matrix multiplication | | . | scalar multiplication |start = time.time() res = np.sum(u.T.dot(w * u.dot(s))) print (time.time()-start)2.0879814624786377As you can see this executes perfectly fine. Note that this is even a very efficient execution because numpy uses a C/C++ backend which is known for it's performance. But what happens if U, S or W get such big that the available main memory cannot cope with it? Let's give it a try:# u = np.random.rand(10000,100000) # s = np.random.rand(100000,10000) # w = np.random.rand(10000,10000)After a short while you should see a memory error. This is because the operating system process was not able to allocate enough memory for storing the numpy array on the heap. Now it's time to re-implement the very same operations as DML in SystemML, and this is your task. Just replace all your_code_goes_here sections with proper code, please consider the following table which contains all DML syntax you need:| Syntax | | | ------------- |-------------| | t(M) | transpose of a matrix, where M is the matrix | | %*% | matrix-matrix multiplication | | * | scalar multiplication | Task In order to show you the advantage of SystemML over numpy we've blown up the sizes of the matrices. Unfortunately, on a 1-2 worker Spark cluster it takes quite some time to complete. Therefore we've stripped down the example to smaller matrices below, but we've kept the code, just in case you are curious to check it out. But you might want to use some more workers which you easily can configure in the environment settings of the project within Watson Studio. Just be aware that you're currently limited to free 50 capacity unit hours per month wich are consumed by the additional workers.script = """ U = rand(rows=1000,cols=10000) S = rand(rows=10000,cols=1000) W = rand(rows=1000,cols=1000) res = sum(t(U) %*% (W * (U %*% S))) """ # res = sum(###your_code_goes_here(U) %*% (W * (U ###your_code_goes_here S)))To get consistent results we switch from a random matrix initialization to something deterministic# u = np.arange(100000).reshape((100, 1000)) # s = np.arange(100000).reshape((1000, 100)) # w = np.arange(10000).reshape((100, 100)) # prog = dml(script).output('res') prog = dml(script).input('U', u).input('S', s).input('W', w).output('res') # res = ml.execute(prog).get('res') res = ml.execute(prog).get('res') print(res)SystemML Statistics: Total execution time: 4.174 sec. Number of executed Spark inst: 0. 6249067735291.72If everything runs fine you should get *6252492444241.075* as result (or something in that bullpark). Feel free to submit your DML script to the grader now! Submission!rm -f rklib.py !wget https://raw.githubusercontent.com/romeokienzler/developerWorks/master/coursera/ai/rklib.py from rklib import submit key = "" email = "" part = "fUxc8" token = "" #you can obtain it from the grader page on Coursera (have a look here if you need more information on how to obtain the token https://youtu.be/GcDo0Rwe06U?t=276) submit(email, token, key, part, [part], script)import pandas as pd import plotly.graph_objs as go import plotly.offline as py from fbprophet import Prophet from fbprophet.plot import plot_plotly, add_changepoints_to_plot import numpy as npLoad Dataset# Confirmation, recovery, and death data sets by region worldwide # 전세계 지역별 확진자, 회복자, 사망자 Data Set url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' data = pd.read_csv(url, error_bad_lines=False) # Understanding the structure of the data set # Data Set의 구조 파악 data.head()Make it Simple# Make Korea's confirmed cases timeseries dataframe # 한국의 확진자 시계열 데이터프레임를 확인합니다 df_korea = data[data['Country/Region'] == 'Korea, South'] df_korea = df_korea.T[4:] df_korea = df_korea.reset_index().rename(columns={'index': 'date', 160: 'confirmed'}) df_korea.tail()Plot# Plot Korean COVID19 confirmed cases. # 한국 코로나19 확진자 트렌드를 그래프로 만듭니다. fig = go.Figure() fig.add_trace( go.Scatter( x=df_korea.date, y=df_korea.confirmed, name='Confirmed in Korea' ) ) figFacebook Prophet# Make dataframe for Facebook Prophet prediction model. # Facebook Prophet 예측 모델에 넣을 데이터프레임을 만들어줍니다. df_prophet = df_korea.rename(columns={ 'date': 'ds', 'confirmed': 'y' }) df_prophet.tail()Create a Model Add changepoints to Make More Flexible- https://facebook.github.io/prophet/docs/trend_changepoints.html# Make Prophet model including daily seasonality # Prophet에서 감안할 계절성을 선택해서 모델을 만듭니다 m = Prophet( changepoint_prior_scale=0.5, # increasing it will make the trend more flexible changepoint_range=0.95, # place potential changepoints in the first 98% of the time series yearly_seasonality=False, weekly_seasonality=True, daily_seasonality=True, seasonality_mode='additive' ) m.fit(df_prophet) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) fig = plot_plotly(m, forecast) py.iplot(fig)INFO:numexpr.utils:NumExpr defaulting to 2 threads.Plot changepoints# display changepoints as red dotted line on the plot. # changepoint를 그래프에 반영해봅시다. fig = m.plot(forecast) a = add_changepoints_to_plot(fig.gca(), m, forecast) day_confirmed = pd.DataFrame(np.array(df_korea.confirmed) - np.array(pd.concat([pd.DataFrame([0]), df_korea.confirmed[:-1]])).reshape(-1)) day_confirmed.columns = ["y"] ds = df_prophet['ds'] day_confirmed = pd.concat([ds, day_confirmed], axis=1) day_confirmed # day_confirmed = pd.DataFrame(np.array(df_korea.confirmed) - np.array(pd.concat([pd.DataFrame([0]), df_korea.confirmed[:-1]])).reshape(-1)) m = Prophet( changepoint_prior_scale=0.6, # increasing it will make the trend more flexible changepoint_range=0.97, # place potential changepoints in the first 98% of the time series yearly_seasonality=False, weekly_seasonality=True, daily_seasonality=True, seasonality_mode='additive' ) m.fit(day_confirmed) future = m.make_future_dataframe(periods=31) forecast = m.predict(future) fig = plot_plotly(m, forecast) py.iplot(fig) fig = m.plot(forecast) a = add_changepoints_to_plot(fig.gca(), m, forecast)Demo: Neural network training for joint denoising and surface projection of *Drosophila melanogaster* wingThis notebook demonstrates training a CARE model for a 3D → 2D denoising+projection task, assuming that training data was already generated via [1_datagen.ipynb](1_datagen.ipynb) and has been saved to disk to the file ``data/my_training_data.npz``.Note that training a neural network for actual use should be done on more (representative) data and with more training time.More documentation is available at http://csbdeep.bioimagecomputing.com/doc/.from __future__ import print_function, unicode_literals, absolute_import, division import numpy as np import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' from tifffile import imread from csbdeep.utils import axes_dict, plot_some, plot_history from csbdeep.utils.tf import limit_gpu_memory from csbdeep.io import load_training_data from csbdeep.models import Config, ProjectionCAREThe TensorFlow backend uses all available GPU memory by default, hence it can be useful to limit it:# limit_gpu_memory(fraction=1/2)Training dataLoad training data generated via [1_datagen.ipynb](1_datagen.ipynb), use 10% as validation data.(X,Y), (X_val,Y_val), axes = load_training_data('data/my_training_data.npz', validation_split=0.1, verbose=True) c = axes_dict(axes)['C'] n_channel_in, n_channel_out = X.shape[c], Y.shape[c] plt.figure(figsize=(12,5)) plot_some(X_val[:5],Y_val[:5]) plt.suptitle('5 example validation patches (top row: source, bottom row: target)');CARE modelBefore we construct the actual CARE model, we have to define its configuration via a `Config` object, which includes * parameters of the underlying neural network,* the learning rate,* the number of parameter updates per epoch,* the loss function, and* whether the model is probabilistic or not.The defaults should be sensible in many cases, so a change should only be necessary if the training process fails. ---Important: Note that for this notebook we use a very small number of update steps per epoch for immediate feedback, whereas this number should be increased considerably (e.g. `train_steps_per_epoch=400`) to obtain a well-trained model.config = Config(axes, n_channel_in, n_channel_out, unet_n_depth=3, train_batch_size=8, train_steps_per_epoch=20) print(config) vars(config)We now create a CARE model with the chosen configuration:model = ProjectionCARE(config, 'my_model', basedir='models')Note that there are additional parameters for the projection part of the CARE model. If you need to change them, you can do so by specifying them with the prefix `proj_` when creating the `Config` above. For example, use `proj_n_filt = 16` to change the parameter `n_filt` of the `ProjectionParameters` shown below.model.proj_paramsTrainingTraining the model will likely take some time. We recommend to monitor the progress with [TensorBoard](https://www.tensorflow.org/programmers_guide/summaries_and_tensorboard) (example below), which allows you to inspect the losses during training.Furthermore, you can look at the predictions for some of the validation images, which can be helpful to recognize problems early on.You can start TensorBoard from the current working directory with `tensorboard --logdir=.`Then connect to [http://localhost:6006/](http://localhost:6006/) with your browser.![](http://csbdeep.bioimagecomputing.com/img/tensorboard_projection.png)history = model.train(X,Y, validation_data=(X_val,Y_val))Plot final training history (available in TensorBoard during training):print(sorted(list(history.history.keys()))) plt.figure(figsize=(16,5)) plot_history(history,['loss','val_loss'],['mse','val_mse','mae','val_mae']);EvaluationExample results for validation images.plt.figure(figsize=(12,7)) _P = model.keras_model.predict(X_val[:5]) if config.probabilistic: _P = _P[...,:(_P.shape[-1]//2)] plot_some(X_val[:5],Y_val[:5],_P,pmax=99.5) plt.suptitle('5 example validation patches\n' 'top row: input (source), ' 'middle row: target (ground truth), ' 'bottom row: predicted from source');Export model to be used with CSBDeep **Fiji** plugins and **KNIME** workflowsSee https://github.com/CSBDeep/CSBDeep_website/wiki/Your-Model-in-Fiji for details.model.export_TF()data generatorfrom dataset import CPSC2021 from cfg import TrainCfg ds_train = CPSC2021(TrainCfg, task="rr_lstm", training=True) ds_val = CPSC2021(TrainCfg, task="rr_lstm", training=False) len(ds_train) err_list = [] for idx, seg in enumerate(ds_train.segments): sig, lb = ds_train[idx] if sig.shape != (2,6000) or lb.shape != (750, 1): print("\n"+f"segment {seg} has sig.shape = {sig.shape}, lb.shape = {lb.shape}"+"\n") err_list.append(seg) print(f"{idx+1}/{len(ds_train)}", end="\r") for idx, seg in enumerate(ds_val.segments): sig, lb = ds_val[idx] if sig.shape != (2,6000) or lb.shape != (750, 1): print("\n"+f"segment {seg} has sig.shape = {sig.shape}, lb.shape = {lb.shape}"+"\n") err_list.append(seg) print(f"{idx+1}/{len(ds_val)}", end="\r") len(err_list) loadmat(ds_train._get_seg_data_path(err_list[-1]))["ecg"].shape for idx, seg in enumerate(err_list): path = ds_train._get_seg_data_path(seg) os.remove(path) path = ds_train._get_seg_ann_path(seg) os.remove(path) print(f"{idx+1}/{len(err_list)}", end="\r")Plan R peak detection rr-lstm U-net sequence labelling R peak detectionfrom model import ( ECG_SEQ_LAB_NET_CPSC2021, ECG_UNET_CPSC2021, ECG_SUBTRACT_UNET_CPSC2021, RR_LSTM_CPSC2021, _qrs_detection_post_process, ) from trainer import train from utils.misc import init_logger, dict_to_str from cfg import ModelCfg, TrainCfg from copy import deepcopy from torch.nn.parallel import DistributedDataParallel as DDP, DataParallel as DP from cfg import ModelCfg task = "qrs_detection" # or "main" model_cfg = deepcopy(ModelCfg[task]) model_cfg.model_name = "seq_lab" model = ECG_SEQ_LAB_NET_CPSC2021(model_cfg) model = DP(model) model.to(torch.device("cuda")) train_config = deepcopy(TrainCfg) # train_config.task = "qrs_detection" _set_task("qrs_detection", train_config) device = torch.device("cuda") train_config.main.reduction logger = init_logger(log_dir=train_config.log_dir, verbose=2) logger.info(f"\n{'*'*20} Start Training {'*'*20}\n") logger.info(f"Using device {device}") logger.info(f"Using torch of version {torch.__version__}") logger.info(f"with configuration\n{dict_to_str(train_config)}") train( model=model, model_config=model_cfg, config=train_config, device=device, logger=logger, debug=train_config.debug, )rr-lstmfrom model import ( ECG_SEQ_LAB_NET_CPSC2021, ECG_UNET_CPSC2021, ECG_SUBTRACT_UNET_CPSC2021, RR_LSTM_CPSC2021, _qrs_detection_post_process, ) from trainer import train, evaluate from utils.misc import init_logger, dict_to_str from cfg import ModelCfg, TrainCfg from copy import deepcopy from torch.nn.parallel import DistributedDataParallel as DDP, DataParallel as DP task = "rr_lstm" # or "main" model_cfg = deepcopy(ModelCfg[task]) model = RR_LSTM_CPSC2021(model_cfg) model_cfg train_config = deepcopy(TrainCfg) _set_task("rr_lstm", train_config) device = torch.device("cuda") # model = DP(model) model.to(device) logger = init_logger(log_dir=train_config.log_dir, verbose=2) logger.info(f"\n{'*'*20} Start Training {'*'*20}\n") logger.info(f"Using device {device}") logger.info(f"Using torch of version {torch.__version__}") logger.info(f"with configuration\n{dict_to_str(train_config)}") best_model = train( model=model, model_config=model_cfg, config=train_config, device=device, logger=logger, debug=True, )main_taskfrom model import ( ECG_SEQ_LAB_NET_CPSC2021, ECG_UNET_CPSC2021, ECG_SUBTRACT_UNET_CPSC2021, RR_LSTM_CPSC2021, _qrs_detection_post_process, _main_task_post_process ) from trainer import train from utils.misc import init_logger, dict_to_str from cfg import ModelCfg, TrainCfg from copy import deepcopy from torch.nn.parallel import DistributedDataParallel as DDP, DataParallel as DP from cfg import ModelCfg task = "main" # or "main" model_cfg = deepcopy(ModelCfg[task]) # model_cfg.model_name = "seq_lab" # model = ECG_SEQ_LAB_NET_CPSC2021(model_cfg) model_cfg.model_name = "unet" model = ECG_UNET_CPSC2021(model_cfg) model_cfg model = DP(model) model.to(torch.device("cuda")) train_config = deepcopy(TrainCfg) # train_config.task = "qrs_detection" _set_task("main", train_config) device = torch.device("cuda") train_config.main.model_name = "unet" train_config.main.reduction = 1 train_config.main.cnn_name = None train_config.main.rnn_name = None train_config.main.attn_name = None logger = init_logger(log_dir=train_config.log_dir, verbose=2) logger.info(f"\n{'*'*20} Start Training {'*'*20}\n") logger.info(f"Using device {device}") logger.info(f"Using torch of version {torch.__version__}") logger.info(f"with configuration\n{dict_to_str(train_config)}") best_model = train( model=model, model_config=model_cfg, config=train_config, device=device, logger=logger, debug=True, )Miscfrom entry_2021 import * from test_entry import run_test sample_path = "./working_dir/sample_data/data_98_1" out = challenge_entry(sample_path) out type(out['predict_endpoints'][0][0]) ds_val.reader.load_data("data_98_1").shape ds_val.reader.load_af_episodes("data_98_1")from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import classification_report from sklearn import svm from sklearn.linear_model import SGDClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier breast_cancer = load_breast_cancer() breast_cancers_data = breast_cancer.data breast_cancers_label = breast_cancer.target X_train, X_test, y_train, y_test = train_test_split(breast_cancers_data, breast_cancers_label, test_size=0.2, random_state=7) print(breast_cancers_data.shape) decision_tree = DecisionTreeClassifier(random_state=11) decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_test) print(classification_report(y_test, y_pred)) Random_Forest = RandomForestClassifier(random_state=135) Random_Forest.fit(X_train, y_train) y_pred_rf = Random_Forest.predict(X_test) print(classification_report(y_test, y_pred_rf)) svm_model = svm.SVC() svm_model.fit(X_train, y_train) y_pred_svm = svm_model.predict(X_test) print(classification_report(y_test, y_pred_svm)) sgd_model = SGDClassifier() sgd_model.fit(X_train, y_train) y_pred_sgd = sgd_model.predict(X_test) print(classification_report(y_test, y_pred_sgd)) logistic_model = LogisticRegression(max_iter=10000) logistic_model.fit(X_train, y_train) y_pred_log = logistic_model.predict(X_test) print(classification_report(y_test, y_pred_log))precision recall f1-score support 0 1.00 0.85 0.92 40 1 0.93 1.00 0.96 74 accuracy 0.95 114 macro avg 0.96 0.93 0.94 114 weighted avg 0.95 0.95 0.95 114Project 3# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * from numpy import * m = UNITS.meter s = UNITS.second kg = UNITS.kilogram degree = UNITS.degree params = Params(x = 0 * m, y = 5000 * m, g = 3.7 * m/s**2, mass = 900 * kg, diameter = 1.5 * m, rho = 1.2 * 0.006 * kg/m**3, C_d = 0.3, angle = 45 * degree, velocity = 50 * m / s, t_end = 20 * s) def make_system(params): """Make a system object. params: Params object with angle, velocity, x, y, diameter, duration, g, mass, rho, and C_d returns: System object """ unpack(params) # convert angle to degrees theta = np.deg2rad(angle) # compute x and y components of velocity vx, vy = pol2cart(theta, velocity) # make the initial state init = State(x=x, y=y, vx=vx, vy=vy) # compute area from diameter area = np.pi * (diameter/2)**2 return System(params, init=init, area=area) def drag_force(V, system): """Computes drag force in the opposite direction of `V`. V: velocity system: System object with rho, C_d, area returns: Vector drag force """ unpack(system) mag = -rho * V.mag**2 * C_d * area / 2 direction = V.hat() f_drag = mag * direction return f_drag def slope_func(state, t, system): """Computes derivatives of the state variables. state: State (x, y, x velocity, y velocity) t: time system: System object with g, rho, C_d, area, mass returns: sequence (vx, vy, ax, ay) """ x, y, vx, vy = state unpack(system) V = Vector(vx, vy) a_drag = drag_force(V, system) / mass a_grav = Vector(0, -g) a = a_grav + a_drag return vx, vy, a.x, a.y def event_func(state, t, system): """Stop when the y coordinate is 0. state: State object t: time system: System object returns: y coordinate """ x, y, vx, vy = state return y make_system(params) drag_force(velocity, system)U-Net modelNetwork architecture called "U-Net". The name of this network architecture comes from it's U-like shape. U-nets are commonly used for image segmentation, this architecture features a series of down-convolutions connected by max-pooling operations, followed by a series of up-convolutions connected by upsampling and concatenation operations. Each of the down-convolutions is also connected directly to the concatenation operations in the upsampling portion of the network. For more detail on the U-Net architecture, have a look at the original U-Net paper by Ronneberger et al. 2015.# Import the elements we'll need to build your U-Net import keras from keras import backend as K from keras.engine import Input, Model from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization, PReLU, Deconvolution3D from keras.optimizers import Adam from keras.layers.merge import concatenate # Set the image shape to have the channels in the first dimension K.set_image_data_format("channels_first")Using TensorFlow backend.The "depth" of U-NetThe "depth" of our U-Net is equal to the number of down-convolutions we will use. U-Net depth of 2, meaning we'll have 2 down-convolutions in your network. Input layer and its "depth"we will be doing 3D image segmentation, which is to say that, in addition to "height" and "width", our input layer will also have a "length".The shape of the input layer is (num_channels, height, width, length), where num_channels you can think of like color channels in an image, height, width and length are just the size of the input.here, the values will be:num_channels: 4height: 160width: 160length: 16# input layer tensor of the shape you'll use in the input_layer = Input(shape=(4, 160, 160, 16)) input_layerNotice that the tensor shape has a '?' as the very first dimension. This will be the batch size. So the dimensions of the tensor are: (batch_size, num_channels, height, width, length) Contracting (downward) pathHere we'll start by constructing the downward path in your network (the left side of the U-Net). The (height,width, length) of the input gets smaller as you move down this path, and the number of channels increases.Depth 0By "depth 0" here, we're referring to the depth of the first down-convolution in the U-net.The number of filters is specified for each depth and for each layer within that depth.The formula to use for calculating the number of filters is:filtersi=32×(2i)Where i is the current depth.So at depth i=0:filters0=32×(20)=32 Layer 0There are two convolutional layers for each depth# Conv3D tensor with 32 filters down_depth_0_layer_0 = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(input_layer) down_depth_0_layer_0Notice that with 32 filters, the result you get above is a tensor with 32 channels.#a relu activation to layer 0 of depth 0 down_depth_0_layer_0 = Activation('relu')(down_depth_0_layer_0) down_depth_0_layer_0Depth 0, Layer 1For layer 1 of depth 0, the formula for calculating the number of filters is:filters[i]=32×(2pow(i))×2 Where i is the current depth.Notice that the ' × 2 ' at the end of this expression isn't there for layer 0.So at depth i=0 for layer 1:filters[0]=32×(20)×2=64# Conv3D layer with 64 filters and add relu activation down_depth_0_layer_1 = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(down_depth_0_layer_0) down_depth_0_layer_1 = Activation('relu')(down_depth_0_layer_1) down_depth_0_layer_1Max poolingWithin the U-Net architecture, there is a max pooling operation after each of the down-convolutions (not including the last down-convolution at the bottom of the U). In general, this means we'll add max pooling after each down-convolution up to (but not including) the depth - 1 down-convolution (since we started counting at 0).Here:The overall depth of the U-Net you're constructing is 2So the bottom of U is at a depth index of: 2−1=1 .So far we've only defined the depth=0 down-convolutions, so the next thing to do is add max pooling# max pooling layer down_depth_0_layer_pool = MaxPooling3D(pool_size=(2,2,2))(down_depth_0_layer_1) down_depth_0_layer_poolDepth 1, Layer 0At depth 1, layer 0, the formula for calculating the number of filters is:filtersi=32×(2i) Where i is the current depth.So at depth i=1 :filters[1]=32×(21)=64# Conv3D layer to your network with relu activation down_depth_1_layer_0 = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(down_depth_0_layer_pool) down_depth_1_layer_0 = Activation('relu')(down_depth_1_layer_0) down_depth_1_layer_0Depth 1, Layer 1For layer 1 of depth 1 the formula we'll use for number of filters is:filters[i]=32×(2i)×2 Where i is the current depth.Notice that the ' ×2 ' at the end of this expression isn't there for layer 0.So at depth i=1 :filters[0]=32×(21)×2=128# another Conv3D with 128 filters to your network. down_depth_1_layer_1 = Conv3D(filters=128, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(down_depth_1_layer_0) down_depth_1_layer_1 = Activation('relu')(down_depth_1_layer_1) down_depth_1_layer_1No max pooling at depth 1 (the bottom of the U)When we get to the "bottom" of the U-net, we don't need to apply max pooling after the convolutions. Expanding (upward) PathNow we'll work on the expanding path of the U-Net, (going up on the right side ). The image's (height, width, length) all get larger in the expanding path. Depth 0, Up sampling layer 0We'll use a pool size of (2,2,2) for upsampling.# an upsampling operation to your network up_depth_0_layer_0 = UpSampling3D(size=(2,2,2))(down_depth_1_layer_1) up_depth_0_layer_0Concatenate upsampled depth 0 with downsampled depth 0Now we'll apply a concatenation operation using the layers that are both at the same depth of 0.up_depth_0_layer_0: shape is (?, 128, 160, 160, 16)depth_0_layer_1: shape is (?, 64, 160, 160, 16)If they're the same, then they can be concatenated along axis 1 (the channel axis).The (height, width, length) is (160, 160, 16) for both.# Print the shape of layers to concatenate print(up_depth_0_layer_0) print() print(down_depth_0_layer_1) # concatenation along axis 1 up_depth_1_concat = concatenate([up_depth_0_layer_0, down_depth_0_layer_1], axis=1) up_depth_1_concatNotice that the upsampling layer had 128 channels, and the down-convolution layer had 64 channels so that when concatenated, the result has 128 + 64 = 192 channels. Up-convolution layer 1The number of filters for this layer will be set to the number of channels in the down-convolution's layer 1 at the same depth of 0 (down_depth_0_layer_1).down_depth_0_layer_1 print(f"number of filters: {down_depth_0_layer_1._keras_shape[1]}") # Conv3D up-convolution with 64 filters to network up_depth_1_layer_1 = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(up_depth_1_concat) up_depth_1_layer_1 = Activation('relu')(up_depth_1_layer_1) up_depth_1_layer_1Up-convolution depth 0, layer 2At layer 2 of depth 0 in the up-convolution the next step will be to add another up-convolution. The number of filters we'll want to use for this next up-convolution will need to be equal to the number of filters in the down-convolution depth 0 layer 1.print(down_depth_0_layer_1) print(f"number of filters: {down_depth_0_layer_1._keras_shape[1]}")Tensor("activation_2/Relu:0", shape=(None, 64, 160, 160, 16), dtype=float32) number of filters: 64As you can see, the number of channels / filters in down_depth_0_layer_1 is 64.# Conv3D up-convolution with 64 filters to your network up_depth_1_layer_2 = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(1,1,1) )(up_depth_1_layer_1) up_depth_1_layer_2 = Activation('relu')(up_depth_1_layer_2) up_depth_1_layer_2Final ConvolutionFor the final convolution, we will set the number of filters to be equal to the number of classes in our input data.we will be using data with 3 classes, namely:1: edema2: non-enhancing tumor3: enhancing tumor# final Conv3D with 3 filters to your network. final_conv = Conv3D(filters=3, #3 categories kernel_size=(1,1,1), padding='valid', strides=(1,1,1) )(up_depth_1_layer_2) final_convActivation for final convolution# sigmoid activation to your final convolution. final_activation = Activation('sigmoid')(final_conv) final_activationCreate and compile the model# Define and compile the model model = Model(inputs=input_layer, outputs=final_activation) model.compile(optimizer=Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['categorical_accuracy'] ) # Print out a summary of the model created model.summary()Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) (None, 4, 160, 160, 0 __________________________________________________________________________________________________ conv3d_1 (Conv3D) (None, 32, 160, 160, 3488 input_1[0][0] __________________________________________________________________________________________________ activation_1 (Activation) (None, 32, 160, 160, 0 conv3d_1[0][0] __________________________________________________________________________________________________ conv3d_2 (Conv3D) (None, 64, 160, 160, 55360 activation_1[0][0] [...]Elevator problemThis is a design problem that will ask for some pseudo-code to illustrate your ideas. The thought process is more important than syntax or programming details, so don't worry too much about those. In this problem, we will start with a very simple set of requirements and add some more later.Please **feel free to ask clarifying questions at any point**. Understanding the requirements, here as inreal life, is part of the challenge.The challenge is about designing the controller for an elevator. It is broken down into progressively more realistic requirements for the behavior, in three stages. Part 1This is a very crude model of an elevator. The only thing the elevator can do is go to a different floor. Any other features of a real elevator, i.e. people pressing buttons, are not included for this part.The elevator could print out its current state like this:The elevator could print out its current state like this:```Floor 1Floor 3Floor 1Floor 7..etc..```# Add your code/pseudo-code class Elevator: passPart 2Add the idea of people pressing buttons, both to call the elevator and choose their destination. People are on a given floor and can press the up or down button. Once the elevator arrives at their floor, they get in and choose another floor to go to (when they arrive at thay floor, they can be assumed to get out).The elevator could print out its current state like this:```Elevator on Floor 1Person on Floor 3 presses Down buttonElevator on Floor 31 Person gets in elevatorPerson in elevator presses Floor 1Elevator on Floor 11 Person gets out of elevator..etc..```# Add your code, pseudo-code (you can start by copying from above)Part 3This is a more realistic elevator. Add the concept of time for the elevator to get from one floor to another. Also try to act like a real elevator and avoid stranding people while still keeping travel time down. - You can assume it takes 3 units of time for the elevator to travel a distance of one floor.- The elevator pauses for 1 unit of time whenever it stops to give people a chance to enter or leave the elevator.- Pushing a button to call the elevator, or pushing a button inside the elevator, happens "instantly" in whatever is the current unit of time.This is a lot to deal with as pseudo-code, so here we're asking more for an API skeleton and some discussion:- methods for the Elevator class- description (and name) of variables used as data structures- descriptions of the main algorithm(s) used to manipulate the data structuresYour methods and data structures should be able to keep track of:- what floor the elevator is on now- which floor(s) will it visit next- how many people are inside the elevatorConsider the following example scenario:- The elevator is empty at the first floor (floors are numbered from 1)- Someone on the 3rd floor presses the down button- One time unit later, someone on the 2nd floor presses the down button- The elevator goes up to the 3rd floor, lets in the person- The person (who just got in) presses the 1st floor button- The elevator goes down to the 2nd floor, lets in the person- The 2nd person is also going to the 1st floor, so does nothing- The elevator goes to the 1st floor and lets both people outThe output from the scenario above could look like this:``` Time Action/Status0 Stopped at Floor 11 Person on Floor 3 presses Down button1 Going to Floor 32 Going to Floor 32 Person on Floor 2 presses Down button3 Passing Floor 24 Going to Floor 35 Going to Floor 36 Arrive at Floor 37 Pause at floor 37 Person enters the elevator7 Person inside presses Floor 1 button8 Going to Floor 29 Going to Floor 210 Arrive at Floor 211 Pause at Floor 211 Person enters the elevator12 Going to Floor 113 Going to Floor 114 Arrive at Floor 115 Pause at Floor 115 2 people exit the elevator16 ......etc...```# Add your API here class Elevator: def __init__(self, num_floors_in_building): self.num_floors = num_floors_in_building # other variables.. # Methods in the API..Auto-Batched Joint Distributions: A Gentle Tutorial Copyright 2020 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction TensorFlow Probability (TFP) offers a number of `JointDistribution` abstractions that make probabilistic inference easier by allowing a user to easily express a probabilistic graphical model in a near-mathematical form; the abstraction generates methods for sampling from the model and evaluating the log probability of samples from the model. In this tutorial, we review "autobatched" variants, which were developed after the original `JointDistribution` abstractions. Relative to the original, non-autobatched abstractions, the autobatched versions are simpler to use and more ergonomic, allowing many models to be expressed with less boilerplate. In this colab, we explore a simple model in (perhaps tedious) detail, making clear the problems autobatching solves, and (hopefully) teaching the reader more about TFP shape concepts along the way.Prior to the introduction of autobatching, there were a few different variants of `JointDistribution`, corresponding to different syntactic styles for expressing probabilistic models: `JointDistributionSequential`, `JointDistributionNamed`, and`JointDistributionCoroutine`. Auobatching exists as a mixin, so we now have `AutoBatched` variants of all of these. In this tutorial, we explore the differences between `JointDistributionSequential` and `JointDistributionSequentialAutoBatched`; however, everything we do here is applicable to the other variants with essentially no changes. Dependencies & Prerequisites#@title Import and set ups{ display-mode: "form" } import functools import numpy as np import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_probability as tfp tfd = tfp.distributionsPrerequisite: A Bayesian Regression Problem We'll consider a very simple Bayesian regression scenario:$$\begin{align*}m & \sim \text{Normal}(0, 1) \\b & \sim \text{Normal}(0, 1) \\Y & \sim \text{Normal}(mX + b, 1)\end{align*}$$In this model, `m` and `b` are drawn from standard normals, and the observations `Y` are drawn from a normal distribution whose mean depends on the random variables `m` and `b`, and some (nonrandom, known) covariates `X`. (For simplicity, in this example, we assume the scale of all random variables is known.)To perform inference in this model, we'd need to know both the covariates `X` and the observations `Y`, but for the purposes of this tutorial, we'll only need `X`, so we define a simple dummy `X`:X = np.arange(7) XDesiderata In probabilistic inference, we often want to perform two basic operations:- `sample`: Drawing samples from the model.- `log_prob`: Computing the log probability of a sample from the model.The key contribution of TFP's `JointDistribution` abstractions (as well as of many other approaches to probabilistic programming) is to allow users to write a model *once* and have access to both `sample` and `log_prob` computations.Noting that we have 7 points in our data set (`X.shape = (7,)`), we can now state the desiderata for an excellent `JointDistribution`:* `sample()` should produce a list of `Tensors` having shape `[(), (), (7,)`], corresponding to the scalar slope, scalar bias, and vector observations, respectively.* `log_prob(sample())` should produce a scalar: the log probability of a particular slope, bias, and observations.* `sample([5, 3])` should produce a list of `Tensors` having shape `[(5, 3), (5, 3), (5, 3, 7)]`, representing a `(5, 3)`-*batch* of samples from the model.* `log_prob(sample([5, 3]))` should produce a `Tensor` with shape (5, 3).We'll now look at a succession of `JointDistribution` models, see how to achieve the above desiderata, and hopefully learn a little more about TFP shapes along the way. Spoiler alert: The approach that satisfies the above desiderata without added boilerplate is [autobatching](scrollTo=_h7sJ2bkfOS7). First Attempt; `JointDistributionSequential`jds = tfd.JointDistributionSequential([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Normal(loc=m*X + b, scale=1.) # Y ])This is more or less a direct translation of the model into code. The slope `m` and bias `b` are straightforward. `Y` is defined using a `lambda`-function: the general pattern is that a `lambda`-function of $k$ arguments in a `JointDistributionSequential` (JDS) uses the previous $k$ distributions in the model. Note the "reverse" order. We'll call `sample_distributions`, which returns both a sample *and* the underlying "sub-distributions" that were used to generate the sample. (We could have produced just the sample by calling `sample`; later in the tutorial it will be convenient to have the distributions as well.) The sample we produce is fine:dists, sample = jds.sample_distributions() sampleBut `log_prob` produces a result with an undesired shape:jds.log_prob(sample)And multiple sampling doesn't work:try: jds.sample([5, 3]) except tf.errors.InvalidArgumentError as e: print(e)Incompatible shapes: [5,3] vs. [7] [Op:Mul]Let's try to understand what's going wrong. A Brief Review: Batch and Event Shape In TFP, an ordinary (not a `JointDistribution`) probability distribution has an *event shape* and a *batch shape*, and understanding the difference is crucial to effective use of TFP:* Event shape describes the shape of a single draw from the distribution; the draw may be dependent across dimensions. For scalar distributions, the event shape is []. For a 5-dimensional MultivariateNormal, the event shape is [5].* Batch shape describes independent, not identically distributed draws, aka a "batch" of distributions. Representing a batch of distributions in a single Python object is one of the key ways TFP achieves efficiency at scale.For our purposes, a critical fact to keep in mind is that if we call `log_prob` on a single sample from a distribution, the result will always have a shape that matches (i.e., has as rightmost dimensions) the *batch* shape.For a more in-depth discussion of shapes, see [the "Understanding TensorFlow Distributions Shapes" tutorial](https://www.tensorflow.org/probability/examples/Understanding_TensorFlow_Distributions_Shapes). Why Doesn't `log_prob(sample())` Produce a Scalar? Let's use our knowledge of batch and event shape to explore what's happening with `log_prob(sample())`. Here's our sample again:sampleAnd here are our distributions:distsThe log probability is computed by summing the log probabilities of the sub-distributions at the (matched) elements of the parts:log_prob_parts = [dist.log_prob(s) for (dist, s) in zip(dists, sample)] log_prob_parts np.sum(log_prob_parts) - jds.log_prob(sample)So, one level of explanation is that the log probability calculation is returning a 7-Tensor because the third subcomponent of `log_prob_parts` is a 7-Tensor. But why? Well, we see that the last element of `dists`, which corresponds to our distribution over `Y` in the mathematial formulation, has a `batch_shape` of `[7]`. In other words, our distribution over `Y` is a batch of 7 independent normals (with different means and, in this case, the same scale). We now understand what's wrong: in JDS, the distribution over `Y` has `batch_shape=[7]`, a sample from the JDS represents scalars for `m` and `b` and a "batch" of 7 independent normals. and `log_prob` computes 7 separate log-probabilities, each of which represents the log probability of drawing `m` and `b` and a single observation `Y[i]` at some `X[i]`. Fixing `log_prob(sample())` with `Independent` Recall that `dists[2]` has `event_shape=[]` and `batch_shape=[7]`:dists[2]By using TFP's `Independent` metadistribution, which converts batch dimensions to event dimensions, we can convert this into a distribution with `event_shape=[7]` and `batch_shape=[]` (we'll rename it `y_dist_i` because it's a distribution on `Y`, with the `_i` standing in for our `Independent` wrapping):y_dist_i = tfd.Independent(dists[2], reinterpreted_batch_ndims=1) y_dist_iNow, the `log_prob` of a 7-vector is a scalar:y_dist_i.log_prob(sample[2])Under the covers, `Independent` sums over the batch:y_dist_i.log_prob(sample[2]) - tf.reduce_sum(dists[2].log_prob(sample[2]))And indeed, we can use this to construct a new `jds_i` (the `i` again stands for `Independent`) where `log_prob` returns a scalar:jds_i = tfd.JointDistributionSequential([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Independent( # Y tfd.Normal(loc=m*X + b, scale=1.), reinterpreted_batch_ndims=1) ]) jds_i.log_prob(sample)A couple notes:- `jds_i.log_prob(s)` is *not* the same as `tf.reduce_sum(jds.log_prob(s))`. The former produces the "correct" log probability of the joint distribution. The latter sums over a 7-Tensor, each element of which is the sum of the log probability of `m`, `b`, and a single element of the log probability of `Y`, so it overcounts `m` and `b`. (`log_prob(m) + log_prob(b) + log_prob(Y)` returns a result rather than throwing an exception because TFP follows TF and NumPy's broadcasting rules; adding a scalar to a vector produces a vector-sized result.)- In this particular case, we could have solved the problem and achieved the same result using `MultivariateNormalDiag` instead of `Independent(Normal(...))`. `MultivariateNormalDiag` is a vector-valued distribution (i.e., it already has vector event-shape). Indeeed `MultivariateNormalDiag` could be (but isn't) implemented as a composition of `Independent` and `Normal`. It's worthwhile to remember that given a vector `V`, samples from `n1 = Normal(loc=V)`, and `n2 = MultivariateNormalDiag(loc=V)` are indistinguishable; the difference beween these distributions is that `n1.log_prob(n1.sample())` is a vector and `n2.log_prob(n2.sample())` is a scalar. Multiple Samples? Drawing multiple samples still doesn't work:try: jds_i.sample([5, 3]) except tf.errors.InvalidArgumentError as e: print(e)Incompatible shapes: [5,3] vs. [7] [Op:Mul]Let's think about why. When we call `jds_i.sample([5, 3])`, we'll first draw samples for `m` and `b`, each with shape `(5, 3)`. Next, we're going to try to construct a `Normal` distribution via:```tfd.Normal(loc=m*X + b, scale=1.)```But if `m` has shape `(5, 3)` and `X` has shape `7`, we can't multiply them together, and indeed this is the error we're hitting:m = tfd.Normal(0., 1.).sample([5, 3]) try: m * X except tf.errors.InvalidArgumentError as e: print(e)Incompatible shapes: [5,3] vs. [7] [Op:Mul]To resolve this issue, let's think about what properties the distribution over `Y` has to have. If we've called `jds_i.sample([5, 3])`, then we know `m` and `b` will both have shape `(5, 3)`. What shape should a call to `sample` on the `Y` distribution produce? The obvious answer is `(5, 3, 7)`: for each batch point, we want a sample with the same size as `X`. We can achieve this by using TensorFlow's broadcasting capabilities, adding extra dimensions:m[..., tf.newaxis].shape (m[..., tf.newaxis] * X).shapeAdding an axis to both `m` and `b`, we can define a new JDS that supports multiple samples:jds_ia = tfd.JointDistributionSequential([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Independent( # Y tfd.Normal(loc=m[..., tf.newaxis]*X + b[..., tf.newaxis], scale=1.), reinterpreted_batch_ndims=1) ]) shaped_sample = jds_ia.sample([5, 3]) shaped_sample jds_ia.log_prob(shaped_sample)As an extra check, we'll verify that the log probability for a single batch point matches what we had before:(jds_ia.log_prob(shaped_sample)[3, 1] - jds_i.log_prob([shaped_sample[0][3, 1], shaped_sample[1][3, 1], shaped_sample[2][3, 1, :]]))AutoBatching For The Win Excellent! We now have a version of JointDistribution that handles all our desiderata: `log_prob` returns a scalar thanks to the use of `tfd.Independent`, and multiple samples work now that we fixed broadcasting by adding extra axes.What if I told you there was an easier, better way? There is, and it's called `JointDistributionSequentialAutoBatched` (JDSAB):jds_ab = tfd.JointDistributionSequentialAutoBatched([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Normal(loc=m*X + b, scale=1.) # Y ]) jds_ab.log_prob(jds.sample()) shaped_sample = jds_ab.sample([5, 3]) jds_ab.log_prob(shaped_sample) jds_ab.log_prob(shaped_sample) - jds_ia.log_prob(shaped_sample)How does this work? While you could attempt to [read the code](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/distributions/joint_distribution_auto_batched.pyL426) for a deep understanding, we'll give a brief overview which is sufficient for most use cases:- Recall that our first problem was that our distribution for `Y` had `batch_shape=[7]` and `event_shape=[]`, and we used `Independent` to convert the batch dimension to an event dimension. JDSAB ignores the batch shapes of component distributions; instead it treats batch shape as an overall property of the model, which is assumed to be `[]` (unless specified otherwise by setting `batch_ndims > 0`). The effect is equivalent to using tfd.Independent to convert *all* batch dimensions of component distributions into event dimensions, as we did manually above.- Our second problem was a need to massage the shapes of `m` and `b` so that they could broadcast appropriately with `X` when creating multiple samples. With JDSAB, you write a model to generate a single sample, and we "lift" the entire model to generate multiple samples using TensorFlow's [vectorized_map](https://www.tensorflow.org/api_docs/python/tf/vectorized_map). (This feature is analagous to JAX's [vmap](https://jax.readthedocs.io/en/latest/notebooks/quickstart.htmlAuto-vectorization-with-vmap).) Exploring the batch shape issue in more detail, we can compare the batch shapes of our original "bad" joint distribution `jds`, our batch-fixed distributions `jds_i` and `jds_ia`, and our autobatched `jds_ab`:jds.batch_shape jds_i.batch_shape jds_ia.batch_shape jds_ab.batch_shapeWe see that the original `jds` has subdistributions with different batch shapes. `jds_i` and `jds_ia` fix this by creating subdistributions with the same (empty) batch shape. `jds_ab` has only a single (empty) batch shape. It's worth noting that `JointDistributionSequentialAutoBatched` offers some additional generality for free. Suppose we make the covariates `X` (and, implicitly, the observations `Y`) two-dimensional:X = np.arange(14).reshape((2, 7)) XOur `JointDistributionSequentialAutoBatched` works with no changes (we need to redefine the model because the shape of `X` is cached by `jds_ab.log_prob`):jds_ab = tfd.JointDistributionSequentialAutoBatched([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Normal(loc=m*X + b, scale=1.) # Y ]) shaped_sample = jds_ab.sample([5, 3]) shaped_sample jds_ab.log_prob(shaped_sample)On the other hand, our carefully crafted `JointDistributionSequential` no longer works:jds_ia = tfd.JointDistributionSequential([ tfd.Normal(loc=0., scale=1.), # m tfd.Normal(loc=0., scale=1.), # b lambda b, m: tfd.Independent( # Y tfd.Normal(loc=m[..., tf.newaxis]*X + b[..., tf.newaxis], scale=1.), reinterpreted_batch_ndims=1) ]) try: jds_ia.sample([5, 3]) except tf.errors.InvalidArgumentError as e: print(e)Incompatible shapes: [5,3,1] vs. [2,7] [Op:Mul]Lab 4: Functional Programming OverviewIn this lab, you will explore functional programming's place in the Python landscape, and gain practice with powerful tools like `map`, `filter`, iterators, generators, and decorators.*Surprisingly, a few people have asked for longer labs - we think we've delivered! We've added lots of challenge problems to this lab, many of which are domain-specific, but we don't expect you to complete them all. If you're short on time, or don't know exactly what a challenge problem is asking, skip it! Challenge problems are intended to be challenging, and should be reserved only for when you've finished the rest of the lab.* Functional Tools LambdasRecall that lambda functions are anonymous, unnamed function objects created on the fly, usually to accomplish a small transformation. For example,```Python(lambda val: val ** 2)(5) => 25(lambda x, y: x * y)(3, 8) => 24(lambda s: s.strip().lower()[:2])(' PyTHon') => 'py'```On their own, `lambda`s aren't particularly useful, as demonstrated above, and are almost never created and invoked directly as shown. Usually, `lambda`s are used to avoid creating a formal function definiton for small throwaway functions, not only because they involves less typing (no `def` or `return` statement needed) but also, and perhaps more importantly, because these small functions won't pollute the enclosing namespace and provide the function implementation inline.Lambdas are also frequently used as arguments to or return values from higher-order functions, such as `map` and `filter`. MapRecall from class that `map(func, iterable)` applies a function over elements of an iterable.For each of the following rows, write a single statement using `map` that converts the left column into the right column:| From | To| | --- | --- | | `['12', '-2', '0']` | `[12, -2, 0]` || `['hello', 'world']` | `[5, 5]` || `['hello', 'world']`|`['olleh', 'dlrow']` || `range(2, 6)`|`[(2, 4, 8), (3, 9, 27), (4, 16, 64), (5, 25, 125)]` || `zip(range(2, 5), range(3, 9, 2))`|`[6, 15, 28]` |*Hint: you may need to wrap the output in a `list()` constructor to see it printed to console - that is, `list(map(..., ...))`*# Write `map` expressions to convert the following inputs into the indicated outputs. # ['12', '-2', '0'] --> [12, -2, 0] # ['hello', 'world'] --> [5, 5] # ['hello', 'world']` --> ['olleh', 'dlrow'] # range(2, 6) --> [(2, 4, 8), (3, 9, 27), (4, 16, 64), (5, 25, 125)] # zip(range(2, 5), range(3, 9, 2)) --> [6, 15, 28]Using Multiple IterablesThe `map` function can accept a variable number of iterables as arguments. Thus, `map(func, iterA, iterB, iterC)` is equivalent to `map(func, zip(iterA, iterB, iterC))`. This can be used as follows:```Pythonmap(int, ('101001', '0xCAFE', '42'), (2, 16, 10)) generates 41, 51966, 42```To generate each of these elements, Python will evaluate: `int('10110', 2)`, then `int('0xCAFE', 16)`, and finally `int('42', 10)`.*This works because* `int` *takes an optional second argument specifying the conversion base* FilterRecall from class that `filter(pred, iterable)` keeps only those elements from an iterable that satisfy a predicate function.Write statements using `filter` that convert the following sequences from the left column to the right column:From | To--- | ---`['12', '-2', '0']` | `['12', '0']``['hello', 'world']` | `['world']``['Stanford', 'Cal', 'UCLA']`|`['Stanford']``range(20)`|`[0, 3, 5, 6, 9, 10, 12, 15, 18]`As before, you may have to wrap the result in a call to `list(...)` to produce the filtered output.# Write `filter` expressions to convert the following inputs into the indicated outputs. # ['12', '-2', '0'] --> ['12', '0'] # ['hello', 'world'] --> ['world'] # ['Stanford', 'Cal', 'UCLA'] --> ['Stanford'] # range(20) --> [0, 3, 5, 6, 9, 10, 12, 15, 18]Useful Tools from the Standard Library (optional) Module: `functools`The `functools` module is a module in the standard library "for higher order functions; functions that act on or return other functions."There is a utility in the `functools` module called `reduce`, which in Python 2.x was a builtin language feature but has since been relegated to this module. The `reduce` function is explained best by the [official documentation](https://docs.python.org/3/library/functools.htmlfunctools.reduce): `functools.reduce(function, iterable[, initializer])`> Apply `function` of two arguments cumulatively to the items of `iterable`, from left to right, so as to reduce the iterable to a single value. For example, `functools.reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])` calculates `((((1 + 2) + 3) + 4) + 5)`. The left argument, `x`, is the accumulated value and the right argument, `y`, is the update value from the sequence. If the optional `initializer` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the iterable is empty. If `initializer` is not given and `iterable` contains only one item, the first item is returned.Use the `reduce` function to find the least common multiple (LCM) of an arbitrary amount of positive integer arguments. This can be accomplished in one line of Python. If no numbers are supplied to the function, you can return the value 1.Hint: Recall that, mathematically, the LCM of two numbers `x` and `y` can be expressed as `(x*y) // gcd(x, y)`, and that the LCM of a list of numbers `[x, y, z, ...]` is the same as the `LCM(...(LCM(LCM(x, y), z), ...)`.from functools import reduce from math import gcd def lcm(*nums): """Return the least common multiple of an arbitrary collection of numbers.""" pass # Your implementation here. Use `reduce`. This function can be implemented in only one line! print(lcm(3, 5)) print(lcm(41, 106, 12)) print(lcm(1, 2, 6, 24, 120, 720)) print(lcm(3)) print(lcm())Module: `operator`Frequently, you might find yourself writing anonymous functions similar to `lambda x, y: x + y`. This feels a little redundant, since Python already knows how to add two values together. Unfortunately, we can't just refer to `+` as a function - it's a builtin syntax element. To solve this problem, The `operator` module exports callable functions for each builtin operation. These operators can simplify some common uses of lambdas, and should be used wherever possible, since in almost all cases they are faster than constructing and repeatedly invoking a lambda function.```Pythonimport operatoroperator.add(1, 2) => 3operator.mul(3, 10) => 30operator.pow(2, 3) => 8operator.itemgetter(1)([1, 2, 3]) => 2```Take a moment to skim over the [official documentation for the `operator` module](https://docs.python.org/3/library/operator.html).Next, use `reduce` in conjunction with a function from the `operator` module to compute factorials in one line of Python. For example, to compute `5!`, try computing `(((1 * 2) * 3) * 4) * 5` using `reduce` and the `operator` module!import operator from functools import reduce def fact(n): """Return the factorial of a positive number.""" # Your implementation here: Use reduce, an operator, and only one line! fact(3) # => 6 fact(7) # => 5040Custom comparison for `sort`, `max`, and `min`When ordering sequences, or finding the largest or smallest element of a sequence, Python defaults to a standard ordering for sequence elements of certain types. For instance, a collection of strings will be sorted alphabetically (by ASCII value), and a collection of tuples will sort lexicographically. Sometimes, however, we need to sort based on a custom key value. In Python, we can supply an optional `key` argument to `sorted(seq)`, `max(seq)`, `min(seq)`, or `seq.sort()` to determine the values used for ordering elements in a sequence. In Python, both `sorted(seq)` and `seq.sort()` are stable.Read the following code examples and see if you can justify to your neighbor why Python produces the answers it does in these cases.```Pythonwords = ['pear', 'cabbage', 'apple', 'bananas']min(words) => 'apple'words.sort(key=lambda s: s[-1]) Alternatively, key=operator.itemgetter(-1)words => ['cabbage', 'apple', 'pear', 'bananas'] ... Why 'cabbage' > 'apple'?max(words, key=len) 'cabbage' ... Why not 'bananas'?min(words, key=lambda s: s[1::2]) What will this value be?```Next, write a function to return the two words with the highest alphanumeric score of uppercase letters. We've provided a function that computes the alphanumeric score of supplied letters, which must be a string containing only uppercase letters. You may want to use `filter` in conjunction with any other functions we've seen.def alpha_score(upper_letters): """Return the alphanumeric sum of letters in a string, where A == 1 and Z == 26. The argument upper_letters must be composed entirely of capital letters. """ return sum(map(lambda l: 1 + ord(l) - ord('A'), upper_letters)) print(alpha_score('ABC')) # => 6 = 1 ('A') + 2 ('B') + 3 ('C') def two_best(words): """Return the two words whose alphanumeric score of uppercase letters is the highest.""" pass # Your implementation here two_best(['hEllO', 'wOrLD', 'i', 'aM', 'PyThOn'])Purely Functional Programming (optional)As an academic thought exercise, let's investigate how we would use Python in a purely functional programming paradigm. Ultimately, we will try to remove statements and replace them with expressions. Replacing Control FlowThe first thing that needs to go are control flow statements - `if/elif/else`. Luckily, Python, like many other languages, short circuits boolean expressions. This means that we can rewrite```Pythonif : func1()elif : func2()else: func3()```as the equivalent expression```Python( and func1()) or ( and func2()) or (func3())```Recalling Python's rules for short-circuiting boolean expressions, why does the above expression (usually) result in the same output as the procedural control flow case?Note: The above works if and only if all of the functions return truthy values. In order to guarantee that these expressions are actually the same, you might have to write something like the following, because all two-element tuples are truthy regardless of their content.```Python(( and (func1(), 0)) or ( and (func1(), 0)) or ((func1(), 0)))[0]```Rewrite the following code block without using `if/elif/else`:```Pythonif score == 1: return "Winner"elif score == -1: return "Loser"else: return "Tied"```# Purely-functional control flow. def result(score): return ... # Your implementation here.Replacing ReturnsHowever, in the above function, we still need return values to do anything useful. Since lambdas implicitly return their expression, we will use lambdas to eliminate return statements. We can bind our temporary conditional conjunctive normal form expressions (which replace if/elif/else statements) to a lambda function.```Pythonecho = lambda arg: arg In practice, you should never bind lambdas to local namescond_fn = lambda score: (x==1 and echo("one")) \ or (x==2 and echo("two")) \ or (echo("other"))```Now, we've gotten rid of ever having to use the `return` keyword. Replacing LoopsGetting rid of loops is easy! We can `map` over a sequence instead of looping over the sequence. For example:```Pythonfor e in lst: func(e)```becomes```Pythonmap(func, lst)```This is exactly the sort of high-level abstraction we discussed in class this week, where we describe the operation of a function over a collection instead of over elements. Replacing Action SequenceMost programs take the form a sequence of steps, written out line by line. By using a `just_do_it` function and `map`, we can replicate a sequence of function calls.```Pythonjust_do_it = lambda f: f() Suppose f1, f2, f3 are actionsmap(just_do_it, [f1, f2, f3])```Our main program execution can then be a single call to such a map expression. NoteIn fact, Python has `eval` and `exec` functions builtin, which behave somewhat like our `just_do_it` function. Don't use them! They are dangerous. Putting It All TogetherConsider the following program, which computes the number of ways to make change for some number of cents using an infinite supply of coins of specified denominations.```Pythondef make_change(target, coins): if target == 0: return 1 if not coins: return 0 increment = coins[0] If increment is 25c, try using 0c, 25c, 50c, ..., up to (and possibly including target) total = 0 for choice in range(0, target + 1, increment): total += make_change(target - choice, coins[1:]) return totalmake_change(9, [5, 1]) 2 ways: 1 five and 4 ones, or 9 ones.make_change(31, [25, 10, 5, 1]) 18 waysmake_change(31, [25, 5, 1]) 9 waysmake_change(100, [100, 50, 25, 10, 5, 1]) 293 ways```Rewriting this "purely functionally", we have```Pythonmake_change = lambda target, coins: ((target == 0 and (1, 0)) or (not coins and (0, 0)) or (sum(map(lambda choice: make_change(target - choice, coins[1:]), range(0, target + 1, coins[0]))), 0))[0] Suppose that the only top-level function we are "allowed" to call is apply_to.apply_to = lambda f, args, kwargs: f(*args, **kwargs)map(apply_to, (make_change, make_change, make_change, make_change), zip((9, 31, 31, 100), ((5, 1), (25, 10, 5, 1), (25, 5, 1), (100, 50, 25, 10, 5, 1))), ({}, {}, {}, {}))```There's a lot going on in this functional version of the same code!Fun fact: In a formal functional language that does not support recursion, we can introduce recursion by adding a special higher-order function called the Y-combinator. SummaryPython supports functional programming paradigms, but as you can see, in some cases FP introduces unnecessary complexity.If you really enjoyed this section, read [Part 1](http://www.ibm.com/developerworks/linux/library/l-prog/index.html), [Part 2](http://www.ibm.com/developerworks/linux/library/l-prog2/index.html), and [Part 3](http://www.ibm.com/developerworks/linux/library/l-prog3/index.html) of IBM's articles on FP in Python. IteratorsRecall from class than an iterator is an object that represents a stream of data delivered one value at a time. Iterator ConsumptionSuppose the following two lines of code have been run:```Pythonit = iter(range(100))67 in it => True```What is the result of running each of the following lines of code?```Pythonnext(it) => ??37 in it => ??next(it) => ??```With a partner, discuss why we see these results.it = iter(range(100)) 67 in it # => True print(next(it)) # => ?? print(37 in it) # => ?? print(next(it)) # => ??Module: `itertools`Python ships with a spectacular module for manipulating iterators called `itertools`. Take a moment to read through the [documentation page for itertools](https://docs.python.org/3/library/itertools.html).Predict the output of the following pieces of code:```Pythonimport itertoolsimport operatorfor el in itertools.permutations('XKCD', 2): print(el, end=', ')for el in itertools.cycle('LO'): print(el, end='') Don't run this one. Why not?itertools.starmap(operator.mul, itertools.zip_longest([3,5,7],[2,3], fillvalue=1))```import itertools import operator for el in itertools.permutations('XKCD', 2): print(el, end=', ') print() # for el in itertools.cycle('LO'): # print(el, end='') # Don't run this one. Why not? print(list(itertools.starmap(operator.mul, itertools.zip_longest([3,5,7],[2,3], fillvalue=1))))Linear Algebra (Challenge)These challenge problems test your ability to write compact Python functions using the tools of functional programming and some good old-fashioned cleverness. As always, these challenge problems are optional, and are much harder than the rest of the lab. These challenge problems also focus heavily on linear algebra, so if you are less familiar with linear algebra concepts, we recommend that you skip over this portion.Also, Python has incredible library support for working with these mathematical concepts through a package named `numpy`, so we will almost never write linear algebra code from scratch. Dot ProductWrite a one-liner in Python that takes the dot product of two lists `u` and `v`. You can assume that the lists are the same size, and are standard Python lists (not anything special, like `numpy.ndarray`s). For example, `dot_product([1, 3, 5], [2, 4, 6])` should return `44` (since `1 * 2 + 3 * 4 + 5 * 6 = 44`).def dot_product(u, v): """Return the dot product of two equal-length lists of numbers.""" passMatrix TranspositionWrite a one-liner in Python to transpose a matrix. Assume that the input matrix is a tuple-of-tuples that represents a valid matrix, not necessarily square. Again, do not use `numpy` or any other libraries - just raw data structure manipulation and our functional tools.Not only can you do this in one line - you can even do it in 14 characters!For example,```Pythonmatrix = ( (1, 2, 3, 4), (5, 6, 7, 8), (9,10,11,12))transpose(matrix) returns ( (1, 5, 9), (2, 6, 10), (3, 7, 11), (4, 8, 12) )```def transpose(m): """Return the transpose of a matrix represented as a rectangular tuple-of-tuples.""" passMatrix MultiplicationWrite another one-liner in Python to take the product of two matrices `m1` and `m2`. You can use the `dot_product` and `transpose` functions you already wrote.def matmul(m1, m2): """Return the matrix multiplication of two matrices as rectangular 2D tuples.""" passLazy GenerationRewrite your `transpose` and `matmul` functions above so that they are lazily evaluated. That is, rows (or columns) of the output matrix shouldn't be computed when the function is called. Generator ExpressionsRecall that generator expressions are a way to lazily compute values on the fly, without buffering the entire contents of the list in place.For each of the following scenarios, discuss with a partner whether it would be more appropriate to use a generator expression or a list comprehension:1. Searching for a given entity in the transformed entries of a 1TB (large!) database.2. Calculate cheap airfare using lots of journey-to-destination flight information.3. Finding the first palindromic Fibonacci number greater than 1,000,000.4. Determine all multi-word anagrams of user-supplied 1000-character-or-more strings (very expensive to do).5. Generate a list of names of Stanford students whose SUNet ID numbers are less than 5000000.6. Return a list of all startups within 50 miles of Stanford.The main takeaway is: if you only ever need to look at one element of the data stream at a time, generator expressions are probably the way to go. Generators Triangle GeneratorWrite a infinite generator that successively yields the triangle numbers `0, 1, 3, 6, 10, ...` which are formed by successively adding sequential positive integers (`3 = 1 + 2`, `6 = 1 + 2 + 3`, `10 = 1 + 2 + 3 + 4`, ...).def generate_triangles(): """Generate an infinite stream of triangle numbers.""" pass # Your implementation here g = generate_triangles() # Print the first 5 generated triangle numbers. Should be 0, 1, 3, 6, 10 for _ in range(5): print(next(g))Use your generator to write a function `triangles_under(n)` that prints out all triangle numbers strictly less than the parameter `n`.def triangles_under(n): """Print all triangle numbers less than an upper bound.""" pass triangles_under(1000)Functions in Data Structures (optional)Functions, as Python objects, can be stored inside data structures. As a consequence, we can do kooky things like implement a prime-generating function using an ever-growing collection of divisibility tests, with the `make_divisibility_test` function from lecture.```Pythondef make_divisibility_test(n): def divisible_by_n(m): return m % n == 0 return divisible_by_ndef primes_under(n): tests = [] for i in range(2, n): if not any(map(lambda test: test(i), tests)): tests.append(make_divisibility_test(i)) yield i```Take a moment to talk with a partner about what is even happening in this code block. How would you modify the code above to yield all composite numbers, rather than all prime numbers? Test your solution. What is the 1000th composite number?def make_divisibility_test(n): """Return a divisibility test for n.""" def divisible_by_n(m): return m % n == 0 return divisible_by_n def primes_under(n): """Generate all primes under an upper bound.""" tests = [] for i in range(2, n): if not any(map(lambda test: test(i), tests)): tests.append(make_divisibility_test(i)) yield i def primes_under(n): """Generate all composites under an upper bound.""" passNested Functions and ClosuresIn class, we saw that a function can be defined within the scope of another function. Recall from Week 3 that functions introduce new scopes via a new local symbol table. An inner function is only in scope inside of the outer function, so this type of function definition is usually only used when the inner function is being returned to the outside world.```Pythondef outer(): def inner(a): return a return innerf = outer()print(f) .inner at 0x1044b61e0>print(f(10)) => 10f2 = outer()print(f2) .inner at 0x1044b6268> (Different from above!)print(f2(11)) => 11```Why are the memory addresses different for `f` and `f2`? Discuss with a partner.def outer(): def inner(a): return a return inner f = outer() print(f) # .inner at 0x1044b61e0> print(f(10)) # => 10 f2 = outer() print(f2) # .inner at 0x1044b6268> (Different from above!) print(f2(11)) # => 11ClosureAs we saw above, the definition of the inner function occurs during the execution of the outer function. This implies that a nested function has access to the environment in which it was defined. Therefore, it is possible to return an inner function that remembers contents of the outer function, even after the outer function has completed execution. This model is referred to as a closure.```Pythondef make_adder(n): def add_n(m): Captures the outer variable `n` in a closure return m + n return add_nadd1 = make_adder(1)print(add1) .add_n at 0x103edf8c8>add1(4) => 5add1(5) => 6add2 = make_adder(2)print(add2) .add_n at 0x103ecbf28>add2(4) => 6add2(5) => 7```The information in a closure is available in the function's `__closure__` attribute. For example:```Pythonclosure = add1.__closure__cell0 = closure[0]cell0.cell_contents => 1 (this is the n = 1 passed into make_adder)``` As another example, consider the function:```Pythondef foo(a, b, c=-1, *d, e=-2, f=-3, **g): def wraps(): print(a, c, e, g) return wraps``` The `print` call induces a closure of `wraps` over `a`, `c`, `e`, `g` from the enclosing scope of `foo`. Or, you can imagine that wraps "knows" that it will need `a`, `c`, `e`, and `g` from the enclosing scope, so at the time `wraps` is defined, Python takes a "screenshot" of these variables from the enclosing scope and stores references to the underlying objects in the `__closure__` attribute of the `wraps` function.```Pythonw = foo(1, 2, 3, 4, 5, e=6, f=7, y=2, z=3)list(map(lambda cell: cell.cell_contents, w.__closure__)) => [1, 3, 6, {'y': 2, 'z': 3}]```What happens in the following situation? Why?```Pythondef outer(l): def inner(n): return l * n return inner l = [1, 2, 3]f = outer(l)print(f(3)) => ??l.append(4)print(f(3)) => ??```def outer(l): def inner(n): return l * n return inner l = [1, 2, 3] f = outer(l) print(f(3)) # => ?? l.append(4) print(f(3)) # => ??Working with the FMNIST dataset# Setting seeds to try and ensure we have the same results - this is not guaranteed across PyTorch releases. import torch torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False import numpy as np np.random.seed(0) from torchvision import datasets, transforms import torch.nn.functional as F from torch import nn mean, std = (0.5,), (0.5,) # Create a transform and normalise data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std) ]) # Download FMNIST training dataset and load training data trainset = datasets.FashionMNIST('~/.pytorch/FMNIST/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download FMNIST test dataset and load test data testset = datasets.FashionMNIST('~/.pytorch/FMNIST/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False) class FMNIST(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 128) self.fc2 = nn.Linear(128,64) self.fc3 = nn.Linear(64,10) def forward(self, x): x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) x = F.log_softmax(x, dim=1) return x model = FMNIST() from torch import optim criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.01) num_epochs = 3 for i in range(num_epochs): cum_loss = 0 for images, labels in trainloader: optimizer.zero_grad() output = model(images) loss = criterion(output, labels) loss.backward() optimizer.step() cum_loss += loss.item() print(f"Training loss: {cum_loss/len(trainloader)}") %matplotlib inline import matplotlib.pyplot as plt images, labels = next(iter(testloader)) test_image_id = 52 img = images[test_image_id].view(1, 784) with torch.no_grad(): logps = model(img) ps = torch.exp(logps) nps = ps.numpy()[0] FMNIST_labels = ['T-shirt/top','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sport Shoes','Bag','Ankle Boot'] plt.xticks(np.arange(10),labels=FMNIST_labels,rotation='vertical') plt.bar(np.arange(10), nps) def denormalize(tensor): tensor = tensor*0.5 + 0.5 return tensor img = img.view(28,-1) img = denormalize(img) plt.imshow(img,cmap='gray')Exercise 4: Creating Line Graphs Using Different Libraries%matplotlib inline import numpy as np X = np.arange(0,100) Y = np.random.randint(0,200, size=X.shape[0]) import matplotlib.pyplot as plt plt.plot(X, Y) import pandas as pd df = pd.DataFrame({'x':X, 'y_col':Y}) plt.plot('x', 'y_col', data=df) df.plot('x', 'y_col') import seaborn as sns sns.lineplot(X, Y) sns.lineplot('x', 'y_col', data=df)from google.colab import files uploaded = files.upload() import pandas as pd import matplotlib.pyplot as plt import numpy as np %matplotlib inline headers_list= ["Date","Rate"] data = pd.read_csv('dataproj.csv', delimiter = ';' , index_col= 0, decimal=",", parse_dates=["Date"], names = headers_list) data.head() data = data.astype("float") data.plot() plt.title("United States Unemployment Rate [2000 - 2020]") plt.xlabel("Date") plt.ylabel("Unemployment Rate [%]") import pandas as pd import matplotlib.pyplot as plt import numpy as np %matplotlib inline headers_list = ["Date", "Unemployment Rate"] data_two = pd.read_csv('data20192020proj.csv', delimiter = ';' , index_col= 0, names= headers_list, parse_dates=["Date"], decimal=",") data_two.head() data_two.plot() plt.title("United States Unemployment Rate [2019 - 2020]") plt.xlabel("Date") plt.ylabel("Unemployment Rate [%]") from google.colab import files uploaded = files.upload() data_three = pd.read_csv('UE_19:20.csv', delimiter = ';' , index_col= 0, decimal=",") print(data_three) import numpy as np N = 4 year_one = (3.4, 5.6, 4.2, 2.8) year_two = (7.9, 13.2,11.2,10.6) ind = np.arange(N) width = 0.35 plt.bar(ind, year_one, width, label='2019') plt.bar(ind + width, year_two, width,label='2020') plt.ylabel('Unemployment Rate [%]') plt.title('US Unemployment Rate by race or ethnicity [3rd Quarter 2019 vs. 3rd Quarter 2020]') plt.xticks(ind + width / 2, ('White', 'African-American', 'Hispanic', 'Asian')) plt.show()FunctionDefines a function called myDate.The program must prompt the user for the input to the function, namely is three integers (nDay, nMonth, nYear). The output from the function must be a string message describing your birthday. The function call theMessage = myDate (nDay, nMonth, nYear), must be following by a print(theMessage).def myDate(nDay:int, nMonth:int, nYear:int): sMonth = 'XXX' months = [ 'January','February','March','April','May','June','July', 'August','September','October','November','December' ] if 1 <= nMonth <= 12: print ("The month is", months[nMonth-1]) sMonth = months[nMonth-1] else: print ("Value is out of the range") return ('You were born on %d %s %d' %(nDay, sMonth, nYear)) nDay = int(input("On which day were you born? ")) nMonth = int(input("In which month were you born? ")) nYear = int(input("In which year were you born? ")) theMessage = myDate(nDay, nMonth, nYear) print(theMessage)Test statistic: Roundobserved_statistic = abs ( 100 * (423 / 556) - 75) observed_statistic sample_size = 556 mendel_proportions = make_array(0.75, 0.25) # From Mendel's law mendel_proportion_round = mendel_proportions.item(0) def one_simulated_distance(): sample_proportion_round = sample_proportions(556, mendel_proportions).item(0) return 100 * abs(sample_proportion_round - mendel_proportion_round) repetitions = 10000 distances = make_array() for i in np.arange(repetitions): distances = np.append(distances, one_simulated_distance())Null hypothesis is that Mendel's model explains observed dataTable().with_column( 'Distance between Sample % and 75%', distances ).hist() plots.title('Prediction Made by the Null Hypothesis'); Table().with_column( 'Distance between Sample % and 75%', distances ).hist() plots.ylim(-0.02, 0.4) plots.title('Prediction Made by the Null Hypothesis') plots.scatter(observed_statistic, 0, color='red', s=60) different_observed_statistic = 3.2 plots.scatter(different_observed_statistic, 0, color='green', s=60); np.count_nonzero(distances >= observed_statistic) / repetitions np.count_nonzero(distances >= different_observed_statistic) / repetitionsexample with mask# # more advanced with mask # mask = cv2.imread('mask.png') # mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB) # augmented = transform(image=image, mask=mask) # augmented_image = augmented('image') # augmented_mask = augmented('mask')example with bbox augmentationtransform = A.Compose([ A.RandomCrop(1024, 1024), A.HueSaturationValue(p= 0.3) ], bbox_params={'format': 'coco'}) image = cv2.imread('./images/test/A/A0.jpeg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) bboxes = read_bboxes_from_file(...) # [x, y, width, height, label] for coco augmented = transform(image=image, bboxes=bboxes) augmented_image = augmented('image') augmented_bboxes = augmented('bboxes')example with keypoints augmentation# transform = A.Compose([ # A.RandomCrop(1024, 1024), # A.HueSaturationValue(p= 0.3) # ], keypoint_params={'format': 'xy'}) # image = cv2.imread('./images/test/A/A0.jpeg') # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # keypoints = read_bboxes_from_file(...) # # [x, y] # augmented = transform(image=image, keypoints=keypoints) # augmented_image = augmented('image') # augmented_bboxes = augmented('keypoints') # Suported augmentations: # Size augmentations (Resize, SmallestMaxSize, PadIfNeeded) # Crop (RandomCrop, CenterCrop, RandomSizedCrop) # Color augmentations (RGBShift, HueSaturationValue, RandomRotate90, Flip, IAAPerspective) # Blur augmentations (Blur, MotionBlur, MedianBlur, GlassBlur, ISONoise, MultiplicativeNoise) # Distortion (OpticalDistortion, GridDistortion, ElasticTransform) # Weather effects (RandomFog, RandomRain, RandomSunFlare) # Other (Normalize, ToTensor, Lambda)Creating a custom augmentation pipelinetransform = A.Compose( [ A.RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1), A.RandomGamma(gamma_limit=(90, 110)), A.ShiftScaleRotate(scale_limit=0.1, rotate_limit=10), A.Transpose(), A.RandomRotate90(), A.MaskDropout(max_objects=10), A.OneOf([A.NoOp(), A.MultiplicativeNoise(), A.GaussNoise(), A.ISONoise()]), A.OneOf( [ A.NoOp(p=0.8), A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10), A.RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10), ], p=0.2, ), A.OneOf([A.ElasticTransform(), A.GridDistortion(), A.NoOp()]) ])Augmentation exampleiamge = cv2.imread('./images/test/A/A0.jpeg') mask = cv2.imread('anotherfile.tif') transform = A.Compose([ A.RandomSizedCrop((400, 600), 512, 512), A.ShiftScaleRotate(), A.RGBShift(), A.Blur(), A.GaussNoise(), A.ElasticTransform(), A.MaskDropout((10,15)), A.CoarseDropout() ]) data = transform(image=image, mask=mask)Example kaggle competition winner pipeline# image = ... #[H,W,3] # masks = ... # [H,W] # bboxes = ... # [N, 4] (x,y,w,h) # labels = ... # [N] # transform = A.Compose([ # A.RGBShift(), # A.InvertImg(), # A.Blur(), # A.GaussNoise(), # A.Flip(), # A.RandomRotate90(), # A.RandomSizedCrop((400, 600), 512, 512) # Min-max height, width # ], # bbox_params={ # 'format':'coco', # 'label_fields': ['nuclei_type'] # }) # data = transform(image=image, mask=mask, # bboxes=bboxes, nuclei_type=labels)Very heavy augmentation examplefrom matplotlib import pyplot as plt import pandas as pd import numpy as np image = cv2.imread('./images/test/A/A0.jpeg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image.shape[1] plt.imshow(image) image.shape bboxes = np.genfromtxt('./labels/A0.txt') bboxes = list(bboxes) bboxes = bboxes[1:] bboxes bb_width = bboxes[2] * image.shape[1] bb_width bb_height = bboxes[3] * image.shape[0] bb_height img_ht = image.shape[0] img_ht img_wd = image.shape[1] img_wd x_min = (img_wd * bboxes[0]) - (bb_width/2) x_min x_max = (img_wd * bboxes[0]) + (bb_width/2) x_max y_min = img_ht * bboxes[1] - (bb_width/2) y_min y_max = img_ht * bboxes[1] + (bb_width/2) y_max bboxes = [[458, 2308, 1104, 2954]] bboxes[0][0] BOX_COLOR = (255, 0, 0) # Red TEXT_COLOR = (255, 255, 255) # White def visualize_bbox(img, bbox, class_name, color=BOX_COLOR, thickness=20): """Visualizes a single bounding box on the image""" x_min, y_min, w, h = bbox x_min, x_max, y_min, y_max = bboxes[0] # x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h) cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness) ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 10.35, 10) cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1) cv2.putText( img, text=class_name, org=(x_min, y_min - int(0.3 * text_height)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=10.35, color=TEXT_COLOR, thickness=10, lineType=cv2.LINE_AA, ) return img def visualize(image, bboxes, category_ids, category_id_to_name): img = image.copy() for bbox, category_id in zip(bboxes, category_ids): class_name = category_id_to_name[category_id] img = visualize_bbox(img, bbox, class_name) plt.figure(figsize=(12, 12)) plt.axis('off') plt.imshow(img) category_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # We will use the mapping from category_id to the class name # to visualize the class label for the bounding box on the image category_id_to_name = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J' } visualize(image, bboxes, category_ids, category_id_to_name) transform = A.Compose([ A.LongestMaxSize(max_size=1024), A.PadIfNeeded(min_height=1024, min_width=1024, p=1.0), A.ShiftScaleRotate(shift_limit=.15, scale_limit=0.2, p=0.3), A.RandomSizedCrop((900, 1000), 1024, 1024, p=.2), A.HorizontalFlip(p=.5), A.Rotate(limit=30,p=.3), A.MultiplicativeNoise(p=.2), A.OneOf([ A.OpticalDistortion(p=0.2), A.GridDistortion(distort_limit=.1,p=0.15), A.IAAPiecewiseAffine(p=0.3), ], p=0.2), A.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30), A.Blur(blur_limit=20, p=.25), A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3), A.HueSaturationValue(p=.2), A.ElasticTransform(alpha=.8, sigma=20,p=.3), A.CoarseDropout(max_holes=9,min_width=30, max_width=250, min_height=30, max_height=250,p=.2), A.OneOf([A.IAAAdditiveGaussianNoise(), A.GaussNoise()], p=0.2), A.OneOf([ A.MotionBlur(p=0.2), A.MedianBlur(blur_limit=3, p=0.1), A.Blur(blur_limit=3, p=0.1)], p=0.2), A.OneOf([ A.IAASharpen(), A.IAAEmboss(), A.RandomBrightnessContrast(), A.RandomGamma(), A.ToGray()], p=0.3), ], bbox_params=A.BboxParams(format='yolo', label_fields=['category_ids']) ) image = cv2.imread('./images/test/A/A0.jpeg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) bboxes = np.genfromtxt('./labels/A0.txt') # [x, y, width, height, label] for coco augmented = transform(image=image, bboxes=bboxes) augmented_image = augmented('image') augmented_bboxes = augmented('bboxes') bboxes for i in range(20): augmented_image = transform(image=image)['image'] visualize(augmented_image) transformed = transform(image=image, bboxes=bboxes, category_ids=category_ids) visualize( transformed['image'], transformed['bboxes'], transformed['category_ids'], category_id_to_name, )1. Load model%run -i convergence_analysis.py -a sparsescatnet --scattering-J 4 --scattering-order2 --scattering-wph \ --L-kernel-size 3 --dictionary-size 2048 --L-proj-size 256 --nb-images 5120 -b 128 --n-iterations 12 \ --model-checkpoint path/to/checkpoint -j 10 path/to/ImageNet2. Compute loss and convergence curvesfrom convergence_analysis import compute_conv_model, compute_conv_FISTA, compute_conv_ISTA n_iterations = model.module.istc.n_iterations loss_curve_FISTA, conv_curve_FISTA = compute_conv_FISTA(model, val_loader, dictionary, 50) loss_curve_ISTA, conv_curve_ISTA = compute_conv_ISTA(model, val_loader, dictionary, 50) loss_curve_model, conv_curve_model, support_incl, support_diff, support_size_x_star, support_size_x_star_curve, \ support_size_model = compute_conv_model(model, val_loader, dictionary, w_matrix)Relative lossesloss_ISTC = loss_curve_model[n_iterations-1]/loss_curve_FISTA[-1] loss_FISTA = loss_curve_FISTA[n_iterations-1]/loss_curve_FISTA[-1] loss_ISTA = loss_curve_ISTA[n_iterations-1]/loss_curve_FISTA[-1] print(loss_ISTC) print(loss_ISTA) print(loss_FISTA)1.0111119050543114 1.1192425504531223 1.0478478228577026Loss curvesimport numpy as np import matplotlib.pyplot as plt plt.figure(1, figsize=(5,5)) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.plot(np.arange(30), loss_curve_FISTA[:30], label='FISTA') plt.plot(np.arange(30), loss_curve_ISTA[:30], label='ISTA') plt.plot(np.arange(12), loss_curve_model, label='ISTC') plt.ylim((0.32, loss_curve_ISTA.max()+0.04)) plt.yticks(np.arange(0.35, 0.54, step=0.05)) plt.legend(fontsize=12) plt.show()Support and MSE curvesplt.figure(1) plt.plot(np.arange(n_iterations), support_incl, label='Support incl.') plt.plot(np.arange(n_iterations), support_diff, label='Support diff.') plt.legend() plt.figure(2) plt.plot(np.arange(n_iterations), support_size_x_star, label='Support FISTA') plt.plot(np.arange(n_iterations), support_size_model, label='Support ISTC') plt.legend() plt.figure(3) plt.plot(np.arange(50), support_size_x_star_curve[:50], label='Support size FISTA') plt.legend() plt.figure(4) plt.plot(np.arange(n_iterations), conv_curve_model, label='MSE ISTC') plt.legend() plt.show()Print MSEprint(conv_curve_model[n_iterations-1]) print(conv_curve_ISTA[n_iterations-1]) print(conv_curve_FISTA[n_iterations-1])0.016979753105918167 0.447909765866239 0.234507540252732323. Coherencegram_W_T_D = torch.matmul(model.module.istc.w_weight.data[..., 0, 0].t(), model.module.istc.dictionary_weight.data[..., 0, 0]).cpu() gram_W_T_D = torch.abs(gram_W_T_D) shape = gram_W_T_D.shape[0] for i in range(shape): gram_W_T_D[i, i] = 0 max_coherence = torch.max(gram_W_T_D) support_size = support_size_model[n_iterations-1] print(support_size) print(max_coherence) # \mu s print(max_coherence*support_size)79.10463966836734 tensor(0.7915) tensor(62.6137)Running Sum and First Difference Algorithms Calculus-like OperationsConvolution can change discrete signals in ways that resemble integration and differentiation. Since the terms "derivative" and "integral" specifically refer to operations on continuous signals, other names are given to their discrete counterparts. The discrete operation that mimics the first derivative is called the first difference. Likewise, the discrete form of the integral is called the running sum. It is also common to hear these operations called the discrete derivative and the discrete integral, although mathematicians frown when they hear these informal terms used.import sys sys.path.insert(0, '../../../') import numpy as np import matplotlib.pyplot as plt from Common import common_plots cplots = common_plots.Plot() file = {'x':'Signals/InputSignal_f32_1kHz_15kHz.dat'} x = np.loadtxt(file['x']) N,M = x.shape x = x.reshape(N*M, 1) cplots.plot_single(x.T, title='x[n]', style='line')First DifferenceThis is the discrete version of the first derivative. Each sample in the output signal is equal to the difference between adjacent samples in the input signal. In other words, the output signal is the slope of the input signal.$$ y[n] = x[n] - x[n-1]$$def first_difference(x): """ Function that calculates the first difference of an input signal x using the recursive equation y[n]=x[n]-x[n-1]. Parameters: x (numpy array): Array of numbers representing the input signal. Returns: numpy array: Returns first difference of input signal x. """ pass x_diff = first_difference(x) plt.rcParams["figure.figsize"] = (15,5) plt.subplot(1,2,1) cplots.plot_single(x.T, title='Input Signal', style='line') plt.subplot(1,2,2) cplots.plot_single(x_diff.T, title='First Difference', style='line')Running SumThis is the discrete version of the integral. Each sample in the output signal is equal to the sum of all samples in the input signal to the left.$$ y[n] = x[n] + y[n-1]$$def running_sum(x): """ Function that calculates the running sum of an input signal x using the recursive equation y[n]=x[n]+y[n-1]. Parameters: x (numpy array): Array of numbers representing the input signal. Returns: numpy array: Returns running sum of input signal x. """ pass x_sum = running_sum(x) plt.rcParams["figure.figsize"] = (15,5) plt.subplot(1,2,1) cplots.plot_single(x.T, title='Input Signal', style='line') plt.subplot(1,2,2) cplots.plot_single(x_sum.T, title='First Difference', style='line')Exercise: Add your functions to your Convolve classAs an exercise you will add your `first_difference` and `running_sum` functions to the class `Convolve`.from Common import convolution convolve = convolution.Convolve() cplots.plot_single(convolve.first_difference(x).T, title='First Difference', style='line') cplots.plot_single(convolve.running_sum(x).T, title='Running Sum', style='line')Pytorch + HuggingFace KoElectra Model박장원님의 KoElectra-small 사용https://monologg.kr/2020/05/02/koelectra-part1/https://github.com/monologg/KoELECTRA Dataset네이버 영화 리뷰 데이터셋https://github.com/e9t/nsmc References- https://huggingface.co/transformers/training.html- https://tutorials.pytorch.kr/beginner/data_loading_tutorial.html- https://tutorials.pytorch.kr/beginner/blitz/cifar10_tutorial.html- https://wikidocs.net/44249 주의사항꼭 GPU로 해주세요 - 1epoch 당 약 20분 소요# HuggingFace transformers 설치 및 NSMC 데이터셋 다운로드 !pip install transformers !wget https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt !wget https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt !head ratings_train.txt !head ratings_test.txt import pandas as pd import torch from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset from transformers import AutoTokenizer, ElectraForSequenceClassification, AdamW from tqdm.notebook import tqdm # GPU 사용 device = torch.device("cuda")Dataset 만들어서 불러오기class NSMCDataset(Dataset): def __init__(self, csv_file): # 일부 값중에 NaN이 있음... self.dataset = pd.read_csv(csv_file, sep='\t').dropna(axis=0) # 중복제거 self.dataset.drop_duplicates(subset=['document'], inplace=True) self.tokenizer = AutoTokenizer.from_pretrained("monologg/koelectra-small-v2-discriminator") print(self.dataset.describe()) def __len__(self): return len(self.dataset) def __getitem__(self, idx): row = self.dataset.iloc[idx, 1:3].values text = row[0] y = row[1] inputs = self.tokenizer( text, return_tensors='pt', truncation=True, max_length=256, pad_to_max_length=True, add_special_tokens=True ) input_ids = inputs['input_ids'][0] attention_mask = inputs['attention_mask'][0] return input_ids, attention_mask, y train_dataset = NSMCDataset("ratings_train.txt") test_dataset = NSMCDataset("ratings_test.txt")id label count 1.461820e+05 146182.000000 mean 6.779186e+06 0.498283 std 2.919223e+06 0.499999 min 3.300000e+01 0.000000 25% 4.814832e+06 0.000000 50% 7.581160e+06 0.000000 75% 9.274760e+06 1.000000 max 1.027815e+07 1.000000 id label count 4.915700e+04 49157.000000 mean 6.752945e+06 0.502695 std 2.937158e+06 0.499998 min 6.010000e+02 0.000000 25% 4.777143e+06 0.000000 50% 7.565415e+06 1.000000 75% 9.260204e+06 1.000000 max 1.027809e+07 1.000000Create Modelmodel = ElectraForSequenceClassification.from_pretrained("monologg/koelectra-small-v2-discriminator").to(device) # 한번 실행해보기 # text, attention_mask, y = train_dataset[0] # model(text.unsqueeze(0).to(device), attention_mask=attention_mask.unsqueeze(0).to(device)) model.load_state_dict(torch.load("model.pt")) # 모델 레이어 보기 modelLearnepochs = 3 batch_size = 128 optimizer = AdamW(model.parameters(), lr=1e-5) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=16, shuffle=True) losses = [] accuracies = [] for i in range(epochs): total_loss = 0.0 correct = 0 total = 0 batches = 0 model.train() for input_ids_batch, attention_masks_batch, y_batch in tqdm(train_loader): optimizer.zero_grad() y_batch = y_batch.to(device) y_pred = model(input_ids_batch.to(device), attention_mask=attention_masks_batch.to(device))[0] loss = F.cross_entropy(y_pred, y_batch) loss.backward() optimizer.step() total_loss += loss.item() _, predicted = torch.max(y_pred, 1) correct += (predicted == y_batch).sum() total += len(y_batch) batches += 1 if batches % 100 == 0: print("Batch Loss:", total_loss, "Accuracy:", correct.float() / total) losses.append(total_loss) accuracies.append(correct.float() / total) print("Train Loss:", total_loss, "Accuracy:", correct.float() / total) losses, accuracies테스트 데이터셋 정확도 확인하기model.eval() test_correct = 0 test_total = 0 for input_ids_batch, attention_masks_batch, y_batch in tqdm(test_loader): y_batch = y_batch.to(device) y_pred = model(input_ids_batch.to(device), attention_mask=attention_masks_batch.to(device))[0] _, predicted = torch.max(y_pred, 1) test_correct += (predicted == y_batch).sum() test_total += len(y_batch) print("Accuracy:", test_correct.float() / test_total) # 모델 저장하기 torch.save(model.state_dict(), "model.pt")Importação de bibliotecasimport pandas as pd import statistics as stats import seaborn as sns import numpy as np import matplotlib.pyplot as plt %matplotlib inlineRecebendo arquivos csvaux = pd.read_csv('athlete_events.csv') regions = pd.read_csv('datasets_31029_40943_noc_regions.csv')Verifcando o arquivo auxaux.head(3)Verificando o arquivo regionsregions.head(3) aux.describe()Adicionando os dois arquivos em um unico dataframedata = pd.merge(aux, regions, how='left') data.head()Verificando as colunas do arquivo e traduzindo para pt_BRdata.columns data.columns = ['ID','Nome','Sexo','Idade','Altura','Peso','Time','Sigla','Jogos','Ano','Temporada','Cidade','Esporte','Evento','Medalha','Região','Notas'] data.head(3) data['Temporada'].replace({'Summer':'Verão','Winter':'Inverno'},inplace=True) data.tail(2)Verificando a quantidade de dados faltando em cada classedata.isnull().sum() plt.hist(data['Idade'].dropna(),bins=20) # Plot grafico: idades x n_atletas , excluindo idades faltando plt.title('Distribuição das Idades') plt.ylabel('Número de pessoas') plt.xlabel('Idades') plt.show() plt.hist(data['Sexo'].dropna()) plt.title('Distribuição de sexo') plt.show() plt.subplot(1,2,1) plt.hist(data['Idade'].dropna()) plt.title('Distribuição de idades') plt.subplot(1,2,2) plt.hist(data['Sexo'].dropna()) plt.title('Distribuição de sexo') plt.tight_layout() plt.show() plt.figure(figsize=(5,3),dpi=100) sns.boxplot(data=data,x='Sexo',y='Idade',hue='Temporada') # Pontos pretos são os outliersMulheres que participaram x anomulheres = data[(data.Sexo == 'F')] # Mulheres é um dataframe só com sexo feminino mulheres.head(3) sns.set(style='whitegrid') plt.figure(figsize=(20,10),dpi=100) sns.countplot(x='Ano',data=mulheres) plt.title('Mulheres na olimpiada') plt.show() mulheres['ID'].loc[mulheres['Ano']==2016].count() # Mostra que em 2016 ,6223 mulheres competiram nas olimpiadasTotal de Medalhas de Ouro por paísgold = data[(data.Medalha=='Gold')] gold.head(3) gold.Região.value_counts().reset_index(name='Medalha').head(10) total_gold = gold.Região.value_counts()\ .reset_index(name='Medalha').head(6) g = sns.catplot(x='index',y='Medalha', data=total_gold,height=6,kind='bar',palette='magma') g.set_xlabels('Top 6 Paises') g.set_ylabels('Número de medalhas') plt.title('Medalhas por país') plt.show()Distribuição de participantes(Sexo) por ano Kernel density estimation (kde plot)plt.figure(figsize=(8,5),dpi=80) sns.kdeplot(data['Ano'].loc[data['Sexo']=='F'],shade=True, color='deeppink',label='Mulheres',alpha=0.5) sns.kdeplot(data['Ano'].loc[data['Sexo']=='M'],shade=True, color='darkblue',label='Homens',alpha=0.7) plt.show()UnitsThis package provides unit conversion toolsfrom pmd_beamphysics import particle_paths from pmd_beamphysics.units import dimension_name from h5py import File import numpy as np # Open a file, find the particle paths from the root attributes # Pick one: #H5FILE = 'data/bmad_particles.h5' H5FILE = 'data/distgen_particles.h5' #H5FILE = 'data/astra_particles.h5' h5 = File(H5FILE, 'r') ppaths = particle_paths(h5) print(ppaths) # This points to a partigle group ph5 = h5[ppaths[0]] list(ph5) # Each component should have a dimension and a conversion factor to SI: d = dict(ph5['momentum/x'].attrs) d tuple(d['unitDimension']) # This will extract the name of this dimension dimension_name(d['unitDimension'])Nice arraysfrom pmd_beamphysics.units import nice_array # This will scale the array, and return the appropriate SI prefix x = 20e-9 unit = 'C' nice_array(x) nice_array(0)Tip. Click here to view this notebook on nbviewer.jupyter.org. These notebooks are better read there, as Github default viewer ignores some of the formatting and interactive content. Hands-on Computer Vision with TensorFlow 2by & (Packt Pub.) > Chapter 9: Performance and running on mobile Notebook 2:Comparing two Non-Maximum Supression (NMS) implementations In this chapter, we highlighted the importance of optimizing pre-processing and post-processing. This notebook compares two post-processing implementations. Tip. The notebooks shared on this git repository illustrate some of notions from the book "Hands-on Computer Vision with TensorFlow 2" written by and and published by Packt. If you enjoyed the insights shared here, please consider acquiring the book!The book provides further guidance for those eager to learn about computer vision and to harness the power of TensorFlow 2 and Keras to build performant recognition systems for object detection, segmentation, video processing, smartphone applications, and more. Leverage deep learning to create powerful image processing apps with TensorFlow 2 and Keras. Get the book for more insights! Source 1: https://gist.github.com/PREM1980/93ec1298bea0495feaae77c798a345f0Source 2: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/import numpy as np import cv2 import datetime import numpy as np boxes = np.array([ (12, 84, 140, 236), (24, 85, 152, 213), (36, 86, 164, 211), (36, 86, 164, 211), (35, 86, 162, 210), (31, 86, 161, 211), (35, 86, 162, 210), (31, 87, 165, 210), (35, 86, 162, 211), (12, 96, 140, 224), (24, 96, 152, 225), (24, 96, 152, 223), (24, 96, 152, 227), (24, 96, 152, 221), (24, 96, 152, 229), (25, 96, 153, 224), (27, 96, 154, 224), (28, 96, 155, 224), (26, 96, 152, 224), (25, 108, 152, 212)]) # Felzenszwalb et al. def non_max_suppression_slow(boxes, overlapThresh): # print boxes # if there are no boxes, return an empty list if len(boxes) == 0: return [] # initialize the list of picked indexes pick = [] # grab the coordinates of the bounding boxes # print boxes x1 = boxes[:,0] y1 = boxes[:,1] x2 = boxes[:,2] y2 = boxes[:,3] # compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box area = (x2 - x1 + 1) * (y2 - y1 + 1) # print 'area == ', area # print 'y2 == ', y2 idxs = np.argsort(y2) # print 'idxs == ', idxs # keep looping while some indexes still remain in the indexes # list while len(idxs) > 0: # grab the last index in the indexes list, add the index # value to the list of picked indexes, then initialize # the suppression list (i.e. indexes that will be deleted) # using the last index last = len(idxs) - 1 # print 'last == ', last i = idxs[last] pick.append(i) # print 'pick == ', pick suppress = [last] # print 'suppress == ', suppress # loop over all indexes in the indexes list for pos in range(0, last): # grab the current index j = idxs[pos] # print 'i === ', i # print 'j = ', j # find the largest (x, y) coordinates for the start of # the bounding box and the smallest (x, y) coordinates # for the end of the bounding box xx1 = max(x1[i], x1[j]) yy1 = max(y1[i], y1[j]) xx2 = min(x2[i], x2[j]) yy2 = min(y2[i], y2[j]) # compute the width and height of the bounding box w = max(0, xx2 - xx1 + 1) h = max(0, yy2 - yy1 + 1) # compute the ratio of overlap between the computed # bounding box and the bounding box in the area list overlap = float(w * h) / area[j] # if there is sufficient overlap, suppress the # current bounding box # print 'overlap == ', overlap if overlap > overlapThresh: suppress.append(pos) # print 'suppress /== ', suppress # delete all indexes from the index list that are in the # suppression list idxs = np.delete(idxs, suppress) # print 'final idxs == ', idxs # return only the bounding boxes that were picked # print 'final pick == ', pick return boxes[pick] # Malisiewicz et al. def non_max_suppression_fast(boxes, overlapThresh): # if there are no boxes, return an empty list if len(boxes) == 0: return [] # if the bounding boxes integers, convert them to floats -- # this is important since we'll be doing a bunch of divisions # print boxes.dtype.kind if boxes.dtype.kind == "i": boxes = boxes.astype("float") # initialize the list of picked indexes pick = [] # grab the coordinates of the bounding boxes x1 = boxes[:,0] y1 = boxes[:,1] x2 = boxes[:,2] y2 = boxes[:,3] # compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box area = (x2 - x1 + 1) * (y2 - y1 + 1) # print area idxs = np.argsort(area) # print idxs # keep looping while some indexes still remain in the indexes # list while len(idxs) > 0: # grab the last index in the indexes list and add the # index value to the list of picked indexes # print 'hello' last = len(idxs) - 1 i = idxs[last] pick.append(i) # find the largest (x, y) coordinates for the start of # the bounding box and the smallest (x, y) coordinates # for the end of the bounding box xx1 = np.maximum(x1[i], x1[idxs[:last]]) yy1 = np.maximum(y1[i], y1[idxs[:last]]) xx2 = np.minimum(x2[i], x2[idxs[:last]]) yy2 = np.minimum(y2[i], y2[idxs[:last]]) # compute the width and height of the bounding box w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) # compute the ratio of overlap overlap = (w * h) / area[idxs[:last]] # delete all indexes from the index list that have idxs = np.delete(idxs, np.concatenate( ( [last], np.where(overlap > overlapThresh)[0] ) ) ) # return only the bounding boxes that were picked using the # integer data type return boxes[pick].astype("int") x = [] slow_y, fast_y = [], [] all_boxes = np.array(boxes) for i in range(50): all_boxes = np.concatenate((all_boxes, boxes)) x.append(len(all_boxes)) start = datetime.datetime.now() for _ in range(1000): pick = non_max_suppression_slow(all_boxes, 0.8) end = datetime.datetime.now() delta = end - start slow_y.append(delta.total_seconds()) start = datetime.datetime.now() for _ in range(1000): pick = non_max_suppression_fast(all_boxes, 0.8) end = datetime.datetime.now() delta = end - start fast_y.append(delta.total_seconds()) from matplotlib import pyplot as plt fig, ax = plt.subplots() slow, = ax.plot(x, slow_y, label='Slow implementation') fast, = ax.plot(x, fast_y, label='Fast implementation') plt.legend(handles=[slow, fast]) ax.set(xlabel='Number of boxes', ylabel='Computation time in ms', title='Evolution of NMS computing time with the number of boxes') ax.grid() plt.show()One step univariate model - ARIMAIn this notebook, we demonstrate how to:- prepare time series data for training an ARIMA times series forecasting model- implement a simple ARIMA model the next HORIZON steps ahead (time *t+1* through *t+HORIZON*) in the time series- evaluate the model on a test datasetThe data in this example is taken from the GEFCom2014 forecasting competition1. It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014. The task is to forecast future values of electricity load. In this example, we show how to forecast one time step ahead, using historical load data only.1, , , , and , "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016.import os import warnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt from common.utils import load_data, mape from pandas.tools.plotting import autocorrelation_plot from statsmodels.tsa.arima_model import ARIMA from sklearn.preprocessing import MinMaxScaler %matplotlib inline pd.options.display.float_format = '{:,.2f}'.format np.set_printoptions(precision=2) warnings.filterwarnings("ignore") # specify to ignore warning messages demo = True if not os.path.exists(os.path.join('data', 'energy.csv')): # Download and move the zip file !wget https://www.dropbox.com/s/pqenrr2mcvl0hk9/GEFCom2014.zip !mv GEFCom2014.zip ./data # If not done already, extract zipped data and save as csv %run common/extract_data.pyLoad the data from csv into a Pandas dataframeenergy = load_data()[['load']] energy.head()Plot all available load data (January 2012 to Dec 2014)energy.plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Plot first week of July 2014energy['2014-07-01':'2014-07-07'].plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Create train, validation and test setsWe separate our dataset into train, validation and test sets. We train the model on the train set. The validation set is used to evaluate the model after each training epoch and ensure that the model is not overfitting the training data. After the model has finished training, we evaluate the model on the test set. We must ensure that the validation set and test set cover a later period in time from the training set, to ensure that the model does not gain from information from future time periods.We will allocate the period 1st November 2014 to 31st December 2014 to the test set. The period 1st September 2014 to 31st October is allocated to validation set. All other time periods are available for the training set.valid_start_dt = '2014-09-01 00:00:00' if(not demo): test_start_dt = '2014-11-01 00:00:00' else: test_start_dt = '2014-12-30 00:00:00' energy[energy.index < valid_start_dt][['load']].rename(columns={'load':'train'}) \ .join(energy[(energy.index >=valid_start_dt) & (energy.index < test_start_dt)][['load']] \ .rename(columns={'load':'validation'}), how='outer') \ .join(energy[test_start_dt:][['load']].rename(columns={'load':'test'}), how='outer') \ .plot(y=['train', 'validation', 'test'], figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Data preparation Our data preparation for the training set will involve the following steps:1. Filter the original dataset to include only that time period reserved for the training set2. Scale the time series such that the values fall within the interval (0, 1) Create training set containing only the model featurestrain = energy.copy()[energy.index < test_start_dt][['load']] test = energy.copy()[energy.index >= test_start_dt][['load']] print(train.shape) print(test.shape)(26256, 1) (48, 1)Scale data to be in range (0, 1). This transformation should be calibrated on the training set only. This is to prevent information from the validation or test sets leaking into the training data.scaler = MinMaxScaler() train['load'] = scaler.fit_transform(train) train.head(10)Original vs scaled data:energy[energy.index < valid_start_dt][['load']].rename(columns={'load':'original load'}).plot.hist(bins=100, fontsize=12) train.rename(columns={'load':'scaled load'}).plot.hist(bins=100, fontsize=12) plt.show()Implement ARIMA method An ARIMA model can be created using the statsmodels library. In the next section, we perform the following steps:1. Define the model by calling ARIMA() and passing in the p, d, and q parameters.2. The model is prepared on the training data by calling the fit() function.3. Predictions can be made by calling the forecast() function and specifying the number of steps (horizon) which to forecast# Specify the number os steps to forecast ahead HORIZON = 3Let’s look at an autocorrelation plot of the time series. The example below plots the autocorrelation for 48 lags in the time series.autocorrelation_plot(train[1:48]) plt.show()We can see that there is a significant positive correlation with the first 5 lags or so. That may be a good starting point for the AR parameter (p) of the model. The plot of energy load over time (see above) shows that the time series is not stationary due to its seasonality (daily peaks and also peaks in August and February due to the increased energy usage). This suggests the modeling might benefit from differencing the data, that is, making the data stationary. We show below what the data looks like when we difference the data by 24 hours.energy.copy()['load'].diff(periods=24).plot(y='load', figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Selecting the best parameters for an ARIMA model can be somewhat subjective and time intesive, so we'll leave it as an exercise to the user. We'll use a set of parameters we found work reasonably well through trial and error.# fit model model_params = (5,1,0) model = ARIMA(train, order=model_params) model_fit = model.fit(disp=0) print(model_fit.summary().tables[1])================================================================================ coef std err z P>|z| [0.025 0.975] -------------------------------------------------------------------------------- const 6.335e-06 0.000 0.014 0.989 -0.001 0.001 ar.L1.D.load 1.2915 0.006 210.427 0.000 1.279 1.304 ar.L2.D.load -0.7687 0.010 -76.591 0.000 -0.788 -0.749 ar.L3.D.load 0.3532 0.011 32.445 0.000 0.332 0.374 ar.L4.D.load -0.1118 0.010 -11.137 0.000 -0.131 -0.092 ar.L5.D.load -0.1047 0.006 -17.058 0.000 -0.117 -0.093 ================================================================================Next we display the distribution of residuals. A zero mean in the residuals show that there is no bias in the prediction.# plot residual errors residuals = pd.DataFrame(model_fit.resid) residuals.plot() plt.show() residuals.plot(kind='kde') plt.show() print(residuals.describe())Evaluate the model Scale the test datatest['load'] = scaler.transform(test) test.head()Create a test data point for each HORIZON step.test_shifted = test.copy() for t in range(1, HORIZON): test_shifted['load+'+str(t)] = test_shifted['load'].shift(-t, freq='H') test_shifted = test_shifted.dropna(how='any') test_shifted.head(5)Make predictions on the test data%%time train_ts = train['load'] test_ts = test_shifted history = [x for x in train_ts] predictions = list() for t in range(test_ts.shape[0]): model = ARIMA(history, order=model_params) model_fit = model.fit(disp=0) output = model_fit.forecast(steps = HORIZON) yhat = output[0] predictions.append(yhat) obs = list(test_ts.iloc[t]) history.append(obs[0]) print(test_ts.index[t]) print(t+1, ': predicted =', yhat, 'expected =', obs)2014-12-30 00:00:00 1 : predicted = [0.33 0.31 0.32] expected = [0.32172573189522335, 0.29460708782742684, 0.2835130970724191] 2014-12-30 01:00:00 2 : predicted = [0.29 0.28 0.3 ] expected = [0.29460708782742684, 0.2835130970724191, 0.27950693374422186] 2014-12-30 02:00:00 3 : predicted = [0.29 0.31 0.34] expected = [0.2835130970724191, 0.27950693374422186, 0.30323574730354386] 2014-12-30 03:00:00 4 : predicted = [0.29 0.31 0.34] expected = [0.27950693374422186, 0.30323574730354386, 0.3759630200308166] 2014-12-30 04:00:00 5 : predicted = [0.29 0.31 0.33] expected = [0.30323574730354386, 0.3759630200308166, 0.4865947611710324] 2014-12-30 05:00:00 6 : predicted = [0.34 0.38 0.4 ] expected = [0.3759630200308166, 0.4865947611710324, 0.5630200308166409] 2014-12-30 06:00:00 7 : predicted = [0.45 0.51 0.54] expected = [0.4865947611710324, 0.5630200308166409, 0.598151001540832] 2014-12-30 07:00:00 8 : predicted = [0.58 0.65 0.68] expected = [0.5630200308166409, 0.598151001540832, 0.61201848998[...]Compare predictions to actual loadeval_df = pd.DataFrame(predictions, columns=['t+'+str(t) for t in range(1, HORIZON+1)]) eval_df['timestamp'] = test.index[0:len(test.index)-HORIZON+1] eval_df = pd.melt(eval_df, id_vars='timestamp', value_name='prediction', var_name='h') eval_df['actual'] = np.array(np.transpose(test_ts)).ravel() eval_df[['prediction', 'actual']] = scaler.inverse_transform(eval_df[['prediction', 'actual']]) eval_df.head()Compute the mean absolute percentage error over all predictionsif(HORIZON > 1): eval_df['APE'] = (eval_df['prediction'] - eval_df['actual']).abs() / eval_df['actual'] print(eval_df.groupby('h')['APE'].mean()) mape(eval_df['prediction'], eval_df['actual'])Plot the predictions vs the actuals for the first week of the test setif(HORIZON == 1): ## Plotting single step forecast eval_df.plot(x='timestamp', y=['actual', 'prediction'], style=['r', 'b'], figsize=(15, 8)) else: ## Plotting multi step forecast plot_df = eval_df[(eval_df.h=='t+1')][['timestamp', 'actual']] for t in range(1, HORIZON+1): plot_df['t+'+str(t)] = eval_df[(eval_df.h=='t+'+str(t))]['prediction'].values fig = plt.figure(figsize=(15, 8)) ax = plt.plot(plot_df['timestamp'], plot_df['actual'], color='red', linewidth=4.0) ax = fig.add_subplot(111) ax.plot(plot_df['timestamp'], plot_df['t+1'], color='blue', linewidth=4.0, alpha=0.75) ax.plot(plot_df['timestamp'], plot_df['t+2'], color='blue', linewidth=3.0, alpha=0.5) ax.plot(plot_df['timestamp'], plot_df['t+3'], color='blue', linewidth=2.0, alpha=0.25) ax.legend(loc='best') plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Bölüm 00: Python'a Giriş ında Çalışma Defteri HakkındaBu çalışma defteri Google'ın Jupyter Notebook platformuna benzer özellikler taşıyan Google Colab üzerinde oluşturulmuştur. Google Colab, herhangi bir altyapı düzenlemesine ihtiyaç duymadan Web tabanlı olarak Python kodları yazmanıza ve çalıştırmanıza imkan veren ücretsiz bir platformdur. Platform ile ilgili detaylı bilgiye [https://colab.research.google.com/notebooks/intro.ipynb](https://colab.research.google.com/notebooks/intro.ipynb) adresinden ulaşabilirsiniz.Python'a giriş seviyesinde 10 dersten oluşan bu çalışma defteri daha önce kodlama deneyimi olmayan öğrenenler için hazırlanmıştır. Etkileşimli yapısından dolayı hem konu anlatımlarının hem de çalıştırılabilir örneklerin bir arada olduğu bu yapı, sürekli olarak güncellenebilecek bir altyapıya sahiptir. Bu açıdan çalışma defterinin güncel sürümünü aşağıdaki adresten kontrol etmenizi tavsiye ederim.Sürüm 1.0: [Python 101](https://github.com/orcunmadran/Python101/blob/main/Python_101.ipynb)İyi çalışmalar ve başarılar :) Kullanım ŞartlarıBu çalışma defteri aşağıda belirtilen şartlar altında, katkıda bulunanlara Atıf vermek ve aynı lisansla paylaşmak kaydıyla ticari amaç dahil olmak üzere her şekilde dağıtabilir, paylaşabilir, üzerinde değişiklik yapılarak yeniden kullanılabilir.---![Atıf-AynıLisanslaPaylaş 4.0 Uluslararası Lisansı](https://i.creativecommons.org/l/by-sa/4.0/88x31.png)Bu çalışma defteri Jetbrains'in "Introduction to Python" dersi temel alınarak hazırlanmış ve Creative Commons [Atıf-AynıLisanslaPaylaş 4.0 Uluslararası Lisansı](http://creativecommons.org/licenses/by-sa/4.0/) ile lisanslanmıştır.--- Bölüm 01: GirişBu bölümde:* İlk bilgisayar programımız,* Yorumlar yer almaktadır. İlk Bilgisayar ProgramımızGeleneksel olarak herhangi bir programlama dilinde yazılan ilk program "Mer!"'dır. **Örnek Uygulama:**```print("Mer!")```# Örnek uygulamayı çalıştır print("Mer!")Merhaba Dünya!**Görev:** Kendinizi dünyaya tanıtacak ilk bilgisayar programını yazın!print("Merhaba Python")Merhaba PythonYorumlarPython'daki yorumlar "hash" karakteriyle başlar ve fiziksel çizginin sonuna kadar uzanır. Yorum yapmak için kullanılan "hash" karakteri kod satırlarını geçici olarak devre dışı bırakmak amacıyla da kullanılabilir. **Örnek Uygulama:**``` Bu ilk bilgisayar programım için ilk yorumumprint(" bu bir yorum değildir")print("Merhaba!") yorumlar kod satırının devamında da yapılabilir.print("Bu kod geçici olarak devre dışı bırakılmıştır.")```# Örnek uygulamayı çalıştır # Bu ilk bilgisayar programım için ilk yorumum print("# bu bir yorum değildir") print("Merhaba!") # yorumlar kod satırının devamında da yapılabilir. # print("Bu kod geçici olarak devre dışı bırakılmıştır.") #Python öğreniyorum print("#Python öğreniyorum")# bu bir yorum değildir Merhaba! #Python öğreniyorum**Görev:** Python kodunuza yeni bir yorum ekleyin, mevcut satıra yorum ekleyin, yazılmış olan bir kod satırını geçici olarak devre dışı bırakın!print("Bu satırın devamına bir yorum ekleyin") #Python öğreniyorum #print("Bu satırı devre dışı bırakın!")Bu satırın devamına bir yorum ekleyinBölüm 02: DeğişkenlerBu bölümde:* Değişken nedir?,* Değişken tanımlama,* Değişken türleri,* Değişken türü dönüştürme,* Aritmetik operatörler,* Artıtılmış atama operatörleri,* Boolean operatörleri,* Karşılaştırma operatörleri yer almaktadır. Değişken Nedir?Değişkenler değerleri depolamak için kullanılır. Böylece daha sonra bu değişkenler program içinden çağırılarak atanan değer tekrar ve tekrar kullanılabilir. Değişkenlere metinler ve / veya sayılar atanabilir. Sayı atamaları direkt rakamların yazılması ile gerçekleştirilirken, metin atamalarında metin tek tırnak içinde ( 'abc' ) ya da çift tırnak ( "abc" ) içinde atanır.Değişkenler etiketlere benzer ve atama operatörü olarak adlandırılan eşittir ( = ) operatörü ile bir değişkene bir değer atanabilir. Bir değer ataması zincirleme şeklinde gerçekleştirilebilir. Örneğin: a = b = 2 **Örnek Uygulama 1**Aşağıda bir "zincir atama" örneği yer almaktadır. Değer olarak atanan 2 hem "a" değişkenine, hem de "b" değişkenine atanmaktadır.```a = b = 2print("a = " + str(a))print("b = " + str(b))```"a" ve "b" değişkenleri başka metinler ile birlikte ekrana yazdırılmak istendiğinde metin formatına çevrilmesi gerekmektedir. Bu bağlamda kullanılan "str(a)" ve "str(b)" ifadeleri eğitimin ilerleyen bölümlerinde anlatılacaktır.# Örnek uygulamayı çalıştır a = b = 2 print("a = " + str(a)) print("b = " + str(b)) a = b = 5 print("a = " + str(a)) print("b = " + str(b))a = 2 b = 2 a = 5 b = 5**Örnek Uygulama 2**```adSoyad = ""print("Adı Soyadı: " + adSoyad)```# Örnek uygulamayı çalıştır adSoyad = "" print("Adı Soyadı: " + adSoyad) AdSoyad = "" print("Adı Soyadı: " + AdSoyad)Adı Soyadı: Orçun Madran Adı Soyadı: **Görev:** "eposta" adlı bir değişken oluşturun. Oluşturduğunuz bu değişkene bir e-posta adresi atayın. Daha sonra atadığınız bu değeri ekrana yazdırın. Örneğin: "E-posta: orcun[at]madran.net"# Ekrana e-posta yazdır Eposta = "" print("E-Posta Adresi: " + Eposta)E-Posta Adresi: Değişken TanımlamaDeğişken isimlerinde uyulması gereken bir takım kurallar vardır:* Rakam ile başlayamaz.* Boşluk kullanılamaz.* Alt tire ( _ ) haricinde bir noktalama işareti kullanılamaz.* Python içinde yerleşik olarak tanımlanmış anahtar kelimeler kullanılamaz (ör: print).* Python 3. sürümden itibaren latin dışı karakter desteği olan "Unicode" desteği gelmiştir. Türkçe karakterler değişken isimlerinde kullanılabilir. **Dikkat:** Değişken isimleri büyük-küçük harfe duyarlıdır. Büyük harfle başlanan isimlendirmeler genelde *sınıflar* için kullanılır. Değişken isimlerinin daha anlaşılır olması için deve notasyonu (camelCase) ya da alt tire kullanımı tavsiye edilir. **Örnek Uygulama:**```degisken = 1kullaniciAdi = "orcunmadran"kul_ad = "rafet"``` Henüz tanımlanmamış bir değişken kullanıldığında derleyicinin döndürdüğü hatayı kodu çalıştırarak gözlemleyin!degisken1 = "Veri" print(degisken2)**Görev:** Tanımladığınız değişkeni ekrana yazdırın!degisken3 = 'Yeni veri' print("Değişkeni yaz: " + degisken3)Değişken TürleriPython'da iki ana sayı türü vardır; tam sayılar ve ondalık sayılar.**Dikkat:** Ondalık sayıların yazımında Türkçe'de *virgül* (,) kullanılmasına rağmen, programlama dillerinin evrensel yazım kuralları içerisinde ondalık sayılar *nokta* (.) ile ifade edilir. **Örnek Uygulama:**```tamSayi = 5print(type(tamSayi)) tamSayi değişkeninin türünü yazdırırondalikSayi = 7.4print(type(ondalikSayi) ondalikSayi değişkeninin türünü yazdırır```# Örnek uygulamayı çalıştır tamSayi = 5 print(type(tamSayi)) ondalikSayi = 7.4 print(type(ondalikSayi))**Görev:** "sayi" değişkeninin türünü belirleyerek ekrana yazdırın!sayi = 9.0 print(type(sayi))Değişken Türü DönüştürmeBir veri türünü diğerine dönüştürmenize izin veren birkaç yerleşik fonksiyon (built-in function) vardır. Bu fonksiyonlar ("int()", "str()", "float()") uygulandıkları değişkeni dönüştürerek yeni bir nesne döndürürler. **Örnek Uygulama**```sayi = 6.5print(type(sayi)) "sayi" değişkeninin türünü ondalık olarak yazdırırprint(sayi)sayi = int(sayi) Ondalık sayı olan "sayi" değişkenini tam sayıya dönüştürürprint(type(sayi))print(sayi)sayi = float(sayi) Tam sayı olan "sayi" değişkenini ondalık sayıya dönüştürürprint(type(sayi))print(sayi)sayi = str(sayi) "sayi" değişkeni artık düz metin halini almıştırprint(type(sayi))print(sayi)```# Örnek uygulamayı çalıştır sayi = 6.5 print(type(sayi)) print(sayi) sayi = int(sayi) print(type(sayi)) print(sayi) sayi = float(sayi) print(type(sayi)) print(sayi) sayi = str(sayi) print(type(sayi)) print(sayi)**Görev:** Ondalık sayıyı tam sayıya dönüştürün ve ekrana değişken türünü ve değeri yazdırın!sayi = 3.14 print(type(sayi)) print(sayi) sayi = int(sayi) print(type(sayi)) print(sayi) sayi = float(sayi) print(type(sayi)) print(sayi) sayi= str(sayi) print(type(sayi)) print(sayi) Değer = input("Yaşınızı giriniz") print(Değer) print(type(Değer)) print(2022 - int(Değer)) #Doğum Yılı Yazdırma Programı bulunduğumuzyıl = input("Bulunduğunuz yılı giriniz") yaş = input("Yaşınızı giriniz") print(int(bulunduğumuzyıl)- int(yaş)) #Doğum Yılı Yazdırma Programı #Şimdiki yılı al syil = input("İçinde bulunduğunuz yılı giriniz") #Doğum tarihini al dtarih = input("Doğum tarihinizi giriniz") #Dönüştürme işlemleri syil = int(syil) dtarih = int(dtarih) #Yaşı hesapla yas = syil - dtarih #Yaşı ekrana yazdır print("Yaşınız: " + str(yas))Aritmetik OperatörlerDiğer tüm programlama dillerinde olduğu gibi, toplama (+), çıkarma (-), çarpma (yıldız) ve bölme (/) operatörleri sayılarla kullanılabilir. Bunlarla birlikte Python'un üs (çift yıldız) ve mod (%) operatörleri vardır.**Dikkat:** Matematik işlemlerinde geçerli olan aritmetik operatörlerin öncelik sıralamaları (çarpma, bölme, toplama, çıkarma) ve parantezlerin önceliği kuralları Python içindeki matematiksel işlemler için de geçerlidir. **Örnek Uygulama:**``` Toplama işlemisayi = 7.0sonuc = sayi + 3.5print(sonuc) Çıkarma işlemisayi = 200sonuc = sayi - 35print(sonuc) Çarpma işlemisayi = 44sonuc = sayi * 10print(sonuc) Bölme işlemisayi = 30sonuc = sayi / 3print(sonuc) Üs alma işlemisayi = 30sonuc = sayi ** 3print(sonuc) Mod alma işlemi sayi = 35sonuc = sayi % 4print(sonuc)```# Örnek uygulamayı çalıştır # Toplama işlemi sayi = 7.0 sonuc = sayi + 3.5 print(sonuc) # Çıkarma işlemi sayi = 200 sonuc = sayi - 35 print(sonuc) # Çarpma işlemi sayi = 44 sonuc = sayi * 10 print(sonuc) # Bölme işlemi sayi = 30 sonuc = sayi / 3 print(sonuc) # Üs alma işlemi sayi = 30 sonuc = sayi ** 3 print(sonuc) # Mod alma işlemi sayi = 35 sonuc = sayi % 4 print(sonuc)**Görev:** Aşağıda değer atamaları tamamlanmış olan değişkenleri kullanarak ürünlerin peşin satın alınma bedelini TL olarak hesaplayınız ve ürün adı ile birlikte ekrana yazdırınız! İpucu: Ürün adını ve ürün bedelini tek bir satırda yazdırmak isterseniz ürün bedelini str() fonksiyonu ile düz metin değişken türüne çevirmeniz gerekir.urunAdi = "Bisiklet" urunBedeliAvro = 850 kurAvro = 10 urunAdet = input("Ürün adetini giriniz: ") pesinAdetIndirimTL = 500 butce = 15000 hesapla = ((urunBedeliAvro* int(urunAdet)) * kurAvro) - pesinAdetIndirimTL butceTamam = butce > hesapla print(hesapla) print("Alışveriş bütçeme uygun mu?" + str(butceTamam)) #Ürünlerin peşin satın alma bedelini TL olarak hesapla, ürün adı ile ekrana yazdır! urunAdı = "Telefon" urunBedeliAvro = 2000 kurAvro = 15 urunAdet = input("Ürün adetini giriniz: ") pesinAdetindirimTL = 500 butce = 30000 hesapla = ((urunBedeliAvro * int(urunAdet)) * kurAvro) - pesinAdetindirimTL butceTamam = butce > hesapla print(hesapla) print("Alışveriş bütçeme uygun mu?" + str(butceTamam))Artırılmış Atama OperatörleriArtırılmış atama, bir değişkenin mevcut değerine belirlenen değerin eklenerek ( += ) ya da çıkartılarak ( -= ) atanması işlemidir. **Örnek Uygulama**```sayi = 8sayi += 4 Mevcut değer olan 8'e 4 daha ekler.print(sayi) sayi -= 6 Mevcut değer olan 12'den 6 eksiltir.print("Sayı = " + str(sayi))```# Örnek uygulama çalıştır sayi = 8 sayi += 4 print(sayi) sayi -= 6 print("Sayı = " + str(sayi))**Görev:** Artıtılmış atama operatörleri kullanarak "sayi" değişkenine 20 ekleyip, 10 çıkartarak değişkenin güncel değerini ekrana yazdırın!sayi = 55 sayi += 20 print(sayi) sayi -= 10 print("Sayı = " + str(sayi))Boolean OperatörleriBoolean, yalnızca **Doğru (True)** veya **Yanlış (False)** olabilen bir değer türüdür. Eşitlik (==) operatörleri karşılaştırılan iki değişkenin eşit olup olmadığını kontrol eder ve *True* ya da *False* değeri döndürür. **Örnek Uygulama:**```deger1 = 10deger2 = 10esitMi = (deger1 == deger2) Eşit olup olmadıkları kontrol ediliyorprint(esitMi) Değişken "True" olarak dönüyordeger1 = "Python"deger2 = "Piton"esitMi = (deger1 == deger2) Eşit olup olmadıkları kontrol ediliyorprint(esitMi) Değişken "False" olarak dönüyor```# Örnek uygulama çalıştır deger1 = 10 deger2 = 10 esitMi = (deger1 == deger2) print(esitMi) deger1 = "Python" deger2 = "Piton" esitMi = (deger1 == deger2) print(esitMi)**Görev:** Atamaları yapılmış olan değişkenler arasındaki eşitliği kontrol edin ve sonucu ekrana yazıdırın!sifre = "Python2020" sifreTekrar = "Piton2020" sifrek = input("Şifrenizi giriniz: ") print(sifrek==sifre) #Kullanıcı adı ve şifre gir Kullanıcıadı = "yarengozutok" Sıfre = "tavsan23" Kullanıcıadık = input("Kullanıcı adınızı giriniz: ") Sıfrek = input("Şifrenizi giriniz: ") print(Kullanıcıadık==Kullanıcıadı) print(Sıfrek==Sıfre)Karşılaştırma OperatörleriPython'da, >=, , < vb. dahil olmak üzere birçok operatör bulunmaktadır. Python'daki tüm karşılaştırma operatörleri aynı önceliğe sahiptir. Karşılaştırma sonucunda boole değerleri (*True* ya da *False*) döner. Karşılaştırma operatörleri isteğe bağlı olarak arka arkaya da (zincirlenerek) kullanılabilir. **Örnek Uygulama:**```deger1 = 5deger2 = 7deger3 = 9print(deger1 < deger2 < deger3) Sonuç "True" olarak dönecektir```# Örnek uygulama çalıştır deger1 = 5 deger2 = 7 deger3 = 9 print(deger1 < deger2 < deger3)**Görev:** Aşağıda değer atamaları tamamlanmış olan değişkenleri kullanarak ürünlerin peşin satın alınma bedelini TL olarak hesaplayın. Toplam satın alma bedeli ile bütçenizi karşılaştırın. Satın alma bedelini ve bütçenizi ekrana yazdırın. Ödeme bütçenizi aşıyorsa ekrana "False", aşmıyorsa "True" yazdırın.urunAdi = "Bisiklet" urunBedeliAvro = 850 kurAvro = 10 urunAdet = 3 pesinAdetIndirimTL = 500 butce = 20000 hesapla= ((urunBedeliAvro*urunAdet)*kurAvro)- pesinAdetIndirimTL butceTamam = butce > hesapla print(hesapla) print("Alışveriş bütçeme uygun mu? " + str(butceTamam)) yasLimiti = 13 yas = int(input( "Yaşınızı giriniz: ")) kontrol = yas >= yasLimiti print("Youtube yayınlarını izleyebilir: " + str(kontrol))Bölüm 03: Metin KatarlarıBu bölümde:* Birbirine bağlama,* Metin katarı çarpımı,* Metin katarı dizinleme,* Metin katarı negatif dizinleme,* Metin katarı dilimleme,* In operatörü,* Metin katarının uzunluğu,* Özel karakterlerden kaçma,* Basit metin katarı metodları,* Metin katarı biçimlendirme yer almaktadır. Birbirine BağlamaBirbirine bağlama artı (+) işlemini kullanarak iki metin katarının birleştirilmesi işlemine denir. **Örnek Uygulama**```deger1 = "Merhaba"deger2 = "Dünya"selamlama = deger1 + " " + deger2print(selamlama) Çıktı: ```# Örnek uygulamayı çalışıtır deger1 = "Merhaba" deger2 = "Dünya" selamlama = deger1 + " " + deger2 print(selamlama)**Görev:** *ad*, *soyad* ve *hitap* değişkenlerini tek bir çıktıda birleştirecek kodu yazın!hitap = "Öğr. Gör." ad = "Orçun" soyad = "Madran" çıktı = hitap + ad + soyad print(çıktı) # Çıktı: Öğr. Gör. Metin Katarı ÇarpımıPython, metin katarlarının çarpım sayısı kadar tekrar ettirilmesini desteklemektedir. **Örnek Uygulama**```metin = "Hadi! "metniCarp = metin * 4print(metniCarp) Çıktı: Hadi! Hadi! Hadi! Hadi! ```# Örnek uygulamayı çalıştır metin = "Hadi! " metniCarp = metin * 4 print(metniCarp)**Görev:** Sizi sürekli bekleten arkadaşınızı uyarabilmek için istediğiniz sayıda "Hadi!" kelimesini ekrana yazdırın!metin = "Hadi! " metniCarp = metin*4 print(metniCarp) # Çıktı: Hadi! Hadi! Hadi! Hadi! ... Hadi!Metin Katarı DizinlemeKonumu biliniyorsa, bir metin katarındaki ilgili karaktere erişilebilir. Örneğin; str[index] metin katarındaki indeks numarasının karşılık geldiği karakteri geri döndürecektir. İndekslerin her zaman 0'dan başladığı unutulmamalıdır. İndeksler, sağdan saymaya başlamak için negatif sayılar da olabilir. -0, 0 ile aynı olduğundan, negatif indeksler -1 ile başlar. **Örnek Uygulama**```metin = "Python Programlama Dili"print("'h' harfini yakala: " + metin[3]) Çıktı: 'h' harfini yakala: h"```# örnek uygulama çalıştır metin = "Python Programlama Dili" print("'h'harfini yakala: " + metin[3])'h'harfini yakala: h**Görev:** İndeks numarasını kullanarak metin katarındaki ikinci "P" harfini ekrana yazdırın!#Çıktı = P metin ="Python Programlama Dili" print(metin[0])PMetin Katarı Negatif DizinlemeMetin katarının sonlarında yer alan bir karaktere daha rahat erişebilmek için indeks numarası negatif bir değer olarak belirlenebilir. **Örnek Uygulama**```metin = "Python Programlama Dili"dHarfi = metin[-4]print(dHarfi) Çıktı: D```# Örnek uygulama çalıştır metin = "Python Programlama Dili" dHarfi = metin[-4] print(dHarfi)D**Görev:** Metin katarının sonunda yer alan "i" harfini ekrana yazdırın!metin = "Python Programlama Dili" print(metin[-1]) #Çıktı: iiMetin Katarı DilimlemeDilimleme, bir metin katarından birden çok karakter (bir alt katar oluşturmak) almak için kullanılır. Söz dizimi indeks numarası ile bir karaktere erişmeye benzer, ancak iki nokta üst üste işaretiyle ayrılmış iki indeks numarası kullanılır. Ör: str[ind1:ind2].Noktalı virgülün solundaki indeks numarası belirlenmezse ilk karakterden itibaren (ilk karakter dahil) seçimin yapılacağı anlamına gelir. Ör: str[:ind2]Noktalı virgülün sağındaki indeks numarası belirlenmezse son karaktere kadar (son karakter dahil) seçimin yapılacağı anlamına gelir. Ör: str[ind1:] **Örnek Uygulama**```metin = "Python Programlama Dili"dilimle = metin[:6] print(dilimle) Çıktı: Pythonmetin = "Python Programlama Dili" print(metin[7:]) Çıktı: Programlama Dili```# Örnek uygulama çalıştır metin = "Python Programlama Dili" dilimle = metin[:6] print(dilimle) metin = "Python Programlama Dili" print(metin[7:])Python Programlama Dili**Görev:** Metin katarını dilemleyerek katarda yer alan üç kelimeyi de ayrı ayrı (alt alta) ekrana yazdırın!.# Çıktı: # Python # Programlama # Dili metin = "Python Programlama Dili" dilimle = metin[:6] print(dilimle) metin = "Python Programlama Dili" print(metin[7:]) metin2 = "Python Programlama Dili" dilimle2 = metin2[7:18] print(dilimle2) haber= "But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system" print(haber) ozet = haber[:40] + " devamı için tıklayınız..." print(ozet) haber = "But I must explain to you how all this mistaken idea of denouncing pleasure praising pain was born" baslangic = haber[:20] bitis = haber[-20:] print(baslangic + "......." + bitis)In OperatörüBir metin katarının belirli bir harf ya da bir alt katar içerip içermediğini kontrol etmek için, in anahtar sözcüğü kullanılır. **Örnek Uygulama**```metin = "Python Programlama Dili"print("Programlama" in metin) Çıktı: True``` **Görev:** Metin katarında "Python" kelimesinin geçip geçmediğini kontrol ederek ekrana yazdırın!metin = "Python Programlama Dili" arama = input("Arama yapılacak kelimeyi giriniz: ") sonuç = arama in metin print("Aradığınız kelime var: " + str(sonuç))Arama yapılacak kelimeyi giriniz: p Aradığınız kelime var: FalseMetin Katarının UzunluğuBir metin katarının kaç karakter içerdiğini saymak için len() yerleşik fonksiyonu kullanılır. **Örnek Uygulama**```metin = "Python programlama dili"print(len(metin)) Çıktı: 23```# Örnek uygulamayı çalıştır metin = "Python programlama dili" print(len(metin)) # 1-Bir girdiye > klavyeden # 2-Klavyeden girilen bilginin uzunluğunu hesapla # 3-Uzunluğu limit ile karşılaştır # 4-Sonucu ekrana yaz #Klavyeden girilen metnin 20 karakterden küçük ise false mesajı veren kod. girilen = input("Metin giriniz: ") print(girilen) girilenKarakter = len(girilen) print(girilenKarakter) kontrol = girilenKarakter > 10 print(kontrol)**Görev:** Metin katarındaki cümlenin ilk yarısını ekrana yazdırın! Yazılan kod cümlenin uzunluğundan bağımsız olarak cümleyi ikiye bölmelidir.metin = "Python programlama dili, dünyada eğitim amacıyla en çok kullanılan programlama dillerinin başında gelir." print(metin[:52]) #yarısı = len(metin)/2 #print(yarısı) # Çıktı: Python programlama dili, dünyada eğitim amacıyla enPython programlama dili, dünyada eğitim amacıyla enÖzel Karakterlerden KaçmaMetin katarları içerisinde tek ve çift tırnak kullanımı kimi zaman sorunlara yol açmaktadır. Bu karakterin metin katarları içerisinde kullanılabilmesi için "Ters Eğik Çizgi" ile birlikte kullanılırlar. Örneğin: 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecek' cümlesindeki tek tırnak kullanımı soruna yol açacağından 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecek' şeklinde kullanılmalıdır.**İpucu:** Tek tırnaklı metin katarlarından kaçmak için çift tırnak ya da tam tersi kullanılabilir. **Örnek Uygulama**```metin = 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecektir.'print(metin) Çıktı: Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.metin = 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.'print(metin) Çıktı: Geçersiz söz dizimi hatası dönecektir. ```# Örnek uygulamayı çalıştır metin = 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecektir.' print(metin) # Örnek uygulamadaki hatayı gözlemle metin = 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.' print(metin)**Görev:** Metin katarındaki cümlede yer alan noktalama işaretlerinden uygun şekilde kaçarak cümleyi ekrana yazdırın!metin = "Bilimsel çalışmalarda 'Python' kullanımı Türkiye'de çok yaygınlaştı!" print(metin)Bilimsel çalışmalarda 'Python' kullanımı Türkiye'de çok yaygınlaştı!Basit Metin Katarı MetodlarıPython içinde birçok yerleşik metin katarı fonksiyonu vardır. En çok kullanılan fonksiyonlardan bazıları olarak;* tüm harfleri büyük harfe dönüştüren *upper()*,* tüm harfleri küçük harfe dönüştüren *lower()*,* sadece cümlenin ilk harfini büyük hale getiren *capitalize()* sayılabilir.**İpucu:** Python'daki yerleşik fonksiyonların bir listesini görüntüleyebilmek için metin katarından sonra bir nokta (.) koyulur ve uygun olan fonksiyonlar arayüz tarafından otomatik olarak listelenir. Bu yardımcı işlevi tetiklemek için CTRL + Bolşuk tuş kombinasyonu da kullanılabilir. **Örnek Uygulama**```metin = "Python Programlama Dili"print(metin.lower()) Çıktı: python programlama diliprint(metin.upper()) Çıktı: PYTHON PROGRAMLAMA DILIprint(metin.capitalize()) Çıktı: Python programlama dili```# Örnek uygulamayı çalıştır metin = "Python Programlama Dili" print(metin.lower()) print(metin.upper()) print(metin.capitalize())python programlama dili PYTHON PROGRAMLAMA DILI Python programlama dili**Görev:** *anahtarKelime* ve *arananKelime* değişkenlerinde yer alan metinler karşılaştırıldığında birbirlerine eşit (==) olmalarını sağlayın ve dönen değerin "True" olmasını sağlayın!anahtarKelime = "Makine Öğrenmesi" arananKelime = "makine öğrenmesi" print(anahtarKelime.lower() == arananKelime) # Çıktı: True print(anahtarKelime.lower())True makine öğrenmesiMetin Katarı BiçimlendirmeBir metin katarından sonraki % operatörü, bir metin katarını değişkenlerle birleştirmek için kullanılır. % operatörü, bir metin katarıdanki % s öğesini, arkasından gelen değişkenle değiştirir. % d sembolü ise, sayısal veya ondalık değerler için yer tutucu olarak kullanılır. **Örnek Uygulama**```adsoyad = ""dogumTarihi = 1976print("Merhaba, ben %s!" % adsoyad) Çıktı: Merhaba, ben Orçun Madran!print("Ben %d doğumluyum" % dogumTarihi) Ben 1976 doğumluyum.ad = "Orçun"soyad = "Madran"print("Merhaba, ben %s %s!" % (ad, soyad)) Çıktı: Merhaba, ben Orçun Madran!```# Örnek uygulamayı çalıştır adsoyad = "" dogumTarihi = 1976 print("Merhaba, ben %s!" % adsoyad) print("Ben %d doğumluyum" % dogumTarihi) # Örnek uygulamayı çalıştır ad = "Orçun" soyad = "Madran" print("Merhaba, ben %s %s!" % (ad, soyad))Merhaba, ben Orçun Madran!**Görev:** ", bu dönemki dersiniz 'Programlama Dilleri'. Başarılar!" cümlesini ekrana biçimlendirmeyi kullanarak (artı işaretini kullanmadan) yazdırın!ad = "Orçun" soyad = "Madran" ders = "Programlama Dilleri" print("Merhaba ben %s %s, bu dönemki dersiniz '%s'.Başarılar!" % (ad, soyad, ders)) # Çıktı: , bu dönemki dersiniz "Programlama Dilleri". Başarılar!Merhaba ben Or, bu dönemki dersiniz 'Programlama Dilleri'.Başarılar!21 ŞUBAT 2022 PAZARTESİ (Buraya kadar geldik.) ---ad = "Yaren" soyad = "Gozutok" ders = "Python" print("Merhaba ben %s %s, bu dönemki dersim '%s' . Başarılı olacağım!" % (ad, soyad, ders))Merhaba ben , bu dönemki dersim 'Python' . Başarılı olacağım!Bölüm 04: Veri YapılarBu bölümde:* Listeler,* Liste işlemleri,* Liste öğeleri,* Demetler (Tuples),* Sözlükler,* Sözlük değerleri ve anahtarları,* In anahtar kelimesinin kullanımı yer almaktadır. ListelerListe, birden fazla değeri tek bir değişken adı altında saklamak için kullanabileceğiniz bir veri yapısıdır. Bir liste köşeli parantez arasında virgülle ayrılmış değerler dizisi olarak yazılır. Ör: liste = [deger1, deger2].Listeler farklı türden öğeler içerebilir, ancak genellikle listedeki tüm öğeler aynı türdedir. Metin katarları gibi listeler de dizine eklenebilir ve dilimlenebilir. (Bkz. Bölüm 3). **Örnek Uygulama**```acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] acikListe adında yeni bir liste oluştururprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']```# Örnek uygulamayı çalıştır acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] print(acikListe)**Görev 1:** acikListe içinde yer alan 3. liste öğesini ekrana yazıdırın!acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] print(acikListe[2])Açık Veri**Görev 2:** acikListe içinde yer alan 4. ve 5. liste öğesini ekrana yazıdırın!acikListe = ["A", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] print(acikListe[3:5])['Açık Eğitim', 'Açık Kaynak']Liste İşlemleriappend() fonksiyonunu kullanarak ya da artırılmış atama operatörü ( += ) yardımıyla listenin sonuna yeni öğeler (değerler) eklenebilir. Listelerin içindeki öğeler güncellenebilir, yani liste[indeksNo] = yeni_deger kullanarak içeriklerini değiştirmek mümkündür. **Örnek Uygulama**```acikListe = ["A", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] acikListe adında yeni bir liste oluştururprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']acikListe += ["Açık Donanım", "Açık İnovasyon"] listeye iki yeni öğe eklerprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak', 'Açık Donanım', 'Açık İnovasyon']acikListe.append("Açık Veri Gazeteciliği") listeye yeni bir öğe eklerprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak', 'Açık Donanım', 'Açık İnovasyon', 'Açık Veri Gazeteciliği']acikListe[4] = "Açık Kaynak Kod" listenin 5. öğesini değiştirirprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak Kod', 'Açık Donanım', 'Açık İnovasyon', 'Açık Veri Gazeteciliği']```# Örnek uygulamayı çalıştır acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] print(acikListe) acikListe += ["Açık Donanım", "Açık İnovasyon"] print(acikListe) acikListe.append("Açık Veri Gazeteciliği") print(acikListe) acikListe[4] = "Açık Kaynak Kod" print(acikListe) #Arkadaş Listesi Liste = ["Yaren"] print(Liste) yeni = input("Arkadaşının adı: ") Liste.append(yeni) yeni = input("Arkadaşının adı: ") Liste.append(yeni) print(Liste) #Arkadaş Listesi liste = [""] ad = input("Adınızı giriniz: ") soyad = input("Soyadınızı giriniz: ") adsoyad = ad + " " + soyad liste.append(adsoyad) print(liste) liste[0]= "Yeni Arkadaş" print(liste)Adınızı giriniz: Bayezıt Soyadınızı giriniz: Uyanır ['', 'Bayyanır'] ['Yeni Arkadaş', 'Bayezıt Uyanır']**Görev:** bilgiBilim adlı bir liste oluşturun. Bu listeye bilgi bilim disiplini ile ilgili 3 adet anahtar kelime ya da kavram ekleyin. Bu listeyi ekrana yazdırın. Listeye istediğiniz bir yöntem ile (append(), +=) 2 yeni öğe ekleyin. Ekrana listenin son durumunu yazdırın. Listenizdeki son öğeyi değiştirin. Listenin son halini ekrana yazıdırn.#bilgiBilim liste = ["Açık Erişim", "Açık Kaynak", "Açık Veri"] print(liste) yeni = input("Eklemek istediğiniz kelimeyi girin: ") liste.append(yeni) yeni = input("Eklemek istediğiniz kelimeyi girin: ") liste.append(yeni) print(liste)['Açık Erişim', 'Açık Kaynak', 'Açık Veri'] Eklemek istediğiniz kelimeyi girin: Kamu malı Eklemek istediğiniz kelimeyi girin: açık lisans ['Açık Erişim', 'Açık Kaynak', 'Açık Veri', 'Kamu malı', 'açık lisans']Liste Öğeleri Liste öğelerini dilimleme (slice) yaparak da atamak mümkündür. Bu bir listenin boyutunu değiştirebilir veya listeyi tamamen temizleyebilir. **Örnek Uygulama**```acikListe = ["A", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] acikListe adında yeni bir liste oluştururprint(acikListe) Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']acikListe[2:4] = ["Açık İnovasyon"] "Açık Veri" ve "Açık Eğitim" öğelerinin yerine tek bir öğe eklerprint(acikListe) Çıktı: ["Aim", "Açık Erişim", "Açık İnovasyon", "Açık Kaynak"]acikListe[:2] = [] listenin ilk iki öğesini silerprint(acikListe) Çıktı: ["Açık İnovasyon", "Açık Kaynak"]acikListe[:] = [] listeyi temizler print(acikListe) Çıktı: []```# Örnek uygulamayı çalıştır acikListe = ["A", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] print(acikListe) acikListe[2:4] = ["Açık İnovasyon"] print(acikListe) acikListe[:2] = [] print(acikListe) acikListe[:] = [] print(acikListe)['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak'] ['Açık Bilim', 'Açık Erişim', 'Açık İnovasyon', 'Açık Kaynak'] ['Açık İnovasyon', 'Açık Kaynak'] []**Görev:** Önceki görevde oluşturulan "bilgiBilim" adlı listenin istediğiniz öğesini silerek listenin güncel halini ekrana yazdırın. Listeyi tamamen temizleyerek listenin güncel halini ekrana yazdırın.#bilgiBilim liste = ["Açık Erişim", "Açık Kaynak", "Açık Veri", "Açık Lisans", "Kamu Malı"] print(liste) liste [2:4] = ["Açık İnovasyon"] print(liste) liste [:2] = [] print(liste) liste[:] = [] print(liste)['Açık Erişim', 'Açık Kaynak', 'Açık Veri', 'Açık Lisans', 'Kamu Malı'] ['Açık Erişim', 'Açık Kaynak', 'Açık İnovasyon', 'Kamu Malı'] ['Açık İnovasyon', 'Kamu Malı'] []Demetler (Tuples) Demetler neredeyse listelerle aynı. Demetler ve listeler arasındaki tek önemli fark, demetlerin değiştirilememesidir. Demetlere öğe eklenmez, öğe değiştirilmez veya demetlerden öğe silinemez. Demetler, parantez içine alınmış bir virgül operatörü tarafından oluşturulur. Ör: demet = ("deger1", "deger2", "deger3"). Tek bir öğe demetinde ("d",) gibi bir virgül olmalıdır. **Örnek Uygulama**```ulkeKodlari = ("TR", "US", "EN", "JP")print(ulkeKodlari) Çıktı: ('TR', 'US', 'EN', 'JP')```# Örnek uygulamayı çalıştır ulkeKodlari = ("TR", "US", "EN", "JP") print(ulkeKodlari)**Görev:** Kongre Kütüphanesi konu başlıkları listesinin kodlarından oluşan bir demet oluşturun ve ekrana yazdırın! Oluşturulan demet içindeki tek bir öğeyi ekrana yazdırın!#konuBasliklari baslıkkodları = ("CB", "CC", "CT") print(baslıkkodları) print(baslıkkodları[2])('CB', 'CC', 'CT') CTSözlüklerSözlük, listeye benzer, ancak sözlük içindeki değerlere indeks numarası yerine bir anahtara ile erişilebilir. Bir anahtar herhangi bir metin katarı veya rakam olabilir. Sözlükler ayraç içine alınır. Ör: sozluk = {'anahtar1': "değer1", 'anahtar2': "değer2"}. **Örnek Uygulama**```adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} yeni bir sözlük oluştururprint(adresDefteri) Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}adresDefteri["Ankara Üniversitesi"] = "ankara.edu.tr" sözlüğe yeni bir öğe eklerprint(adresDefteri) Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr', 'Ankara Üniversitesi': 'ankara.edu.tr'}del adresDefteri ["Ankara Üniversitesi"] sözlükten belirtilen öğeyi silerprint(adresDefteri) Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}```# Örnek uygulamayı çalıştır adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} print(adresDefteri) adresDefteri["Ankara Üniversitesi"] = "ankara.edu.tr" print(adresDefteri) del adresDefteri ["Ankara Üniversitesi"] print(adresDefteri){'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'} {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr', 'Ankara Üniversitesi': 'ankara.edu.tr'} {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.'}**Görev:** İstediğin herhangi bir konuda 5 öğeye sahip bir sözlük oluştur. Sözlüğü ekrana yazdır. Sözlükteki belirli bir öğeyi ekrana yazdır. Sözlükteki belirli bir öğeyi silerek sözlüğün güncel halini ekrana yazdır!#Bilim Sözlüğü sozluk = {"Açık Erişim": "Kamu Kaynakları..." , "Açık Veri": "Açık olarak..."} print(sozluk) sozluk["Açık İnovasyon"] = "Aİ......." print(sozluk) del sozluk["Açık Erişim"] print(sozluk) sozluk["Açık İnovasyon"] = "Aİ22......." print(sozluk) print(sozluk["Açık Veri"]){'Açık Erişim': 'Kamu Kaynakları...', 'Açık Veri': 'Açık olarak...'} {'Açık Erişim': 'Kamu Kaynakları...', 'Açık Veri': 'Açık olarak...', 'Açık İnovasyon': 'Aİ.......'} {'Açık Veri': 'Açık olarak...', 'Açık İnovasyon': 'Aİ.......'} {'Açık Veri': 'Açık olarak...', 'Açık İnovasyon': 'Aİ22.......'} Açık olarak...Sözlük Değerleri ve AnahtarlarıSözlüklerde values() ve keys() gibi birçok yararlı fonksiyon vardır. Bir sozlük adı ve ardından noktadan sonra çıkan listeyi kullanarak geri kalan fonksiyolar incelenebilir. **Örnek Uygulama**```adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} yeni bir sözlük oluştururprint(adresDefteri) Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}print(adresDefteri.values()) Çıktı: dict_values(['hacettepe.edu.tr', 'odtu.edu.tr', 'bilkent.edu.tr'])print(adresDefteri.keys()) Çıktı: dict_keys(['Hacettepe Üniversitesi', 'ODTÜ', 'Bilkent Üniversitesi'])```# Örnek uygulamayı çalıştır adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} print(adresDefteri) print(adresDefteri.values()) print(adresDefteri.keys()){'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'} dict_values(['hacettepe.edu.tr', 'odtu.edu.tr', 'bilkent.edu.tr']) dict_keys(['Hacettepe Üniversitesi', 'ODTÜ', 'Bilkent Üniversitesi'])**Görev:** İstediğin bir konuda istediğin öğe saysına sahip bir sözlük oluştur. Sözlükler ile ilgili farklı fonksiyoları dene. Sonuçları ekrana yazdır!#yeniSozluk sozluk = {"Açık Erişim" : "Kamu kaynakları...", "Açık Veri": "Açık verilere erişim..."} print(sozluk) print(sozluk.values()) print(sozluk.keys()){'Açık Erişim': 'Kamu kaynakları...', 'Açık Veri': 'Açık verilere erişim...'} dict_values(['Kamu kaynakları...', 'Açık verilere erişim...']) dict_keys(['Açık Erişim', 'Açık Veri'])In Anahtar Kelimesi"In" anahtar sözcüğü, bir listenin veya sözlüğün belirli bir öğe içerip içermediğini kontrol etmek için kullanılır. Daha önce metin katarlarındaki kullanıma benzer bir kullanımı vardır. "In" anahtar sözcüğü ile öğe kontrolü yapıldıktan sonra sonuç, öğe listede ya da sözlükte yer alıyorsa *True* yer almıyorsa *False* olarak geri döner.**Dikkat**: Aranan öğe ile liste ya da sözlük içinde yer alan öğelerin karşılaştırılması sırasında büyük-küçük harf duyarlılığı bulunmaktadır. Ör: "Bilgi" ve "bilgi" iki farklı öğe olarak değerlendirilir. **Örnek Uygulama**```bilgiKavramları = ["indeks", "erişim", "koleksiyon"] yeni bir liste oluştururprint("Erişim" in bilgiKavramları) Çıktı: FalsebilgiSozlugu = {"indeks": "index", "erişim": "access", "koleksiyon": "collection"} yeni bir sozluk oluştururprint("koleksiyon" in bilgiSozlugu.keys()) çıktı: True```# Örnek uygulamayı çalıştır bilgiKavramları = ["indeks", "erişim", "koleksiyon"] print("Erişim" in bilgiKavramları) bilgiSozlugu = {"indeks": "index", "erişim": "access", "koleksiyon": "collection"} print("koleksiyon" in bilgiSozlugu.keys())False True**Görev:** Bir liste ve bir sözlük oluşturun. Liste içinde istediğiniz kelimeyi aratın ve sonucunu ekrana yazdırın! Oluşturduğunuz sözlüğün içinde hem anahtar kelime (keys()) hem de değer (values()) kontrolü yaptırın ve sonucunu ekrana yazdırın!#yeniListe dersler = ["Bilgi Erişim", "Bilgi Hizmetleri", "Bilginin Düzenlenmesi"] print("Bilgi Hizmetleri" in dersler) #yeniSozluk derssozlugu = {"Bilgi Erişim":"Bilgiye kolay eriştirme...", "Bilginin Düzenlenmesi": "AACR ve Marc..."} print(derssozlugu.values()) print(derssozlugu.keys())True dict_values(['Bilgiye kolay eriştirme...', 'AACR ve Marc...']) dict_keys(['', ''])Bölüm 05: Koşullu İfadelerBu bölümde:* Mantıksal operatörler,* If cümleciği,* Else ve elif kullanımı yer almatadır. Mantıksal OperatörlerMantıksal operatörler ifadeleri karşılaştırır ve sonuçları *True* ya da *False* değerleriyle döndürür. Python'da üç tane mantıksal operatör bulunur:1. "and" operatörü: Her iki yanındaki ifadeler doğru olduğunda *True* değerini döndürür.2. "or" operatörü: Her iki tarafındaki ifadelerden en az bir ifade doğru olduğunda "True" değerini döndürür.3. "not" operatörü: İfadenin tam tersi olarak değerlendirilmesini sağlar. **Örnek Uygulama**```kullaniciAdi = "orcunmadran"sifre = 123456print(kullaniciAdi == "orcunmadran" and sifre == 123456) Çıktı: TruekullaniciAdi = "orcunmadran"sifre = 123456print(kullaniciAdi == "orcunmadran" and not sifre == 123456) Çıktı: FalsecepTel = "05321234567"ePosta = ""print(cepTel == "" or ePosta == "" ) Çıktı: True```# Örnek uygulamayı çalıştır kullaniciAdi = "orcunmadran" sifre = 123456 print(kullaniciAdi == "orcunmadran" and sifre == 123456) kullaniciAdi = "orcunmadran" sifre = 123456 print(kullaniciAdi == "orcunmadran" and not sifre == 123456) cepTel = "05321234567" ePosta = "" print(cepTel == "" or ePosta == "" )True False True**Görev:** Klavyeden girilen kullanıcı adı ve şifrenin kayıtlı bulunan kullanıcı adı ve şifre ile uyuşup uyuşmadığını kontrol edin ve sonucu ekrana yazdırın!#Sistemde yer alan bilgiler: sisKulAdi = "yonetici" sisKulSifre = "bby162" #Klavyeden girilen bilgiler: girKulAdi = input("Kullanıcı Adı: ") girKulSifre = input("Şifre: ") #Kontrol sonuc = sisKulAdi == girKulAdi and sisKulSifre == girKulSifre #Sonuç print(sonuc) kuladı = "yaren" kulsifre = "12345" girkuladı = input("Kullanıcı Adı: ") girkulsifre = input("Şifre: ") sonuc = kuladı == girkuladı and kulsifre == girkulsifre print(sonuc)Kullanıcı Adı: yaren Şifre: 12345 TrueBirden fazla koşulu and ile birleştirebiliyoruz 28 ŞUBAT 2022 PAZARTESİ (Buraya kadar geldik.) If Cümleciği"If" anahtar sözcüğü, verilen ifadenin doğru olup olmadığını kontrol ettikten sonra belirtilen kodu çalıştıran bir koşullu ifade oluşturmak için kullanılır. Python'da kod bloklarının tanımlanması için girinti kullanır. and ve or ile farklı kurgulanabiliyor. **Örnek Uygulama**```acikKavramlar = ["bilim", "erişim", "veri", "eğitim"]kavram = input("Bir açık kavramı yazın: ")if kavram in acikKavramlar: print(kavram + " açık kavramlar listesinde yer alıyor!")``` : ile if cümleciği kapatılıyor.kendine ait olan alt satırların devreye girip girmemesini kontrol ediyor.#Örnek derste deger= 1 deger2= 2 if deger == deger2: print("birbirine eşit") if deger != deger2: print("birbirine eşit değil") #kendi örneğim deger = 1453 deger2 = 1071 if deger == deger2: print("birbirine eşit") if deger != deger2: print("birbirine eşit değil") # Örnek uygulamayı çalıştır acikKavramlar = ["bilim", "erişim", "veri", "eğitim"] kavram = input("Bir açık kavramı yazın: ") if kavram in acikKavramlar: print(kavram + " açık kavramlar listesinde yer alıyor!")Bir açık kavramı yazın: bilim bilim açık kavramlar listesinde yer alıyor!**Görev:** "acikSozluk" içinde yer alan anahtarları (keys) kullanarak eğer klavyeden girilen anahtar kelime sözlükte varsa açıklamasını ekrana yazdırın!acikSozluk = { "" : "B kamu malıdır. Bilimsel yayınlara ve verilere açık erişim bir haktır." , "Açık Erişim" : "Kamu kaynakları ile yapılan araştırmalar sonucunda üretilen yayınlara ücretsiz erişim" , "Açık Veri" : "Kamu kaynakları ile yapılan araştırma sonucunda üretilen verilere ücretsiz ve yeniden kullanılabilir biçimde erişim" } anahtar = input("Anahtar Kelime: ") if anahtar in acikSozluk: print(anahtar + " Açık sözlükte yer alıyor!") #IfAnahtar Kelime: Açık Veri Açık Veri Açık sözlükte yer alıyor!Else ve Elif Kullanımı"If" cümleciği içinde ikinci bir ifadenin doğruluğunun kontrolü için "Elif" ifadesi kullanılır. Doğruluğu sorgulanan ifadelerden hiçbiri *True* döndürmediği zaman çalışacak olan kod bloğu "Else" altında yer alan kod bloğudur. **Örnek Uygulama**```gunler = ["Pazartesi", "Çarşamba", "Cuma"]girilen = input("Gün giriniz: ")if girilen == gunler[0]: print("Programlama Dilleri")elif girilen == gunler[1]: print("Kataloglama")elif girilen == gunler[2]: print("Bilimsel İletişim")else : print("Kayıtlı bir gün bilgisi girmediniz!")```# Örnek uygulamayı çalıştır gunler = ["Pazartesi", "Çarşamba", "Cuma"] girilen = input("Gün giriniz: ") if girilen == gunler[0]: print("Programlama Dilleri") elif girilen == gunler[1]: print("Kataloglama") elif girilen == gunler[2]: print("Bilimsel İletişim") else : print("Kayıtlı bir gün bilgisi girmediniz!") gunler = ["Pazartesi", "Salı", "Çarşamba", "Perşembe"] girilen = input("Gün giriniz: ") if girilen == gunler[0]: print("Programlama Dilleri") elif girilen == gunler[1]: print("Türk Dili") elif girilen == gunler[2]: print("Bilimsel İletişim ve Bilgi Erişim") elif girilen == gunler[3]: print("Bilginin Düzenlenmesi") else : print("Kayıtlı bir gün girmediniz! ")Gün giriniz: Salı Elif birden fazla durum kontrol etmek için kullanılıyor. **Görev:** Klavyeden girilen yaş bilgisini kullanarak ekrana aşağıdaki mesajları yazdır:* 21 yaş altı ve 64 yaş üstü kişilere: "Sokağa çıkma yasağı bulunmaktadır!"* Diğer tüm kişilere: "Sokağa çıkma yasağı yoktur!"* Klavyeden yaş harici bir bilgi girişi yapıldığında: "Yaşınızı rakam olarak giriniz!"yas = int(input("Yaşınızı giriniz: ")) if yas < 21: print("Sokağa çıkma yasağı bulunmaktadır!") elif yas > 64: print("Sokağa çıkma yasağı bulunmaktadır!") else: print("Sokağa çıkma yasağı yoktur!")7 MART PAZARTESİ (Buraya kadar geldik.) Bölüm 06: DöngülerBu bölümde:* for döngüsü,* Metin katarlarında for döngüsü kullanımı,* while döngüsü,* break anahtar kelimesi,* continue anahtar kelimesi yer almaktadır. for Döngüsüfor döngüleri belirli komut satırını ya da satırlarını yinelemek (tekrar etmek) için kullanılır. Her yinelemede, for döngüsünde tanımlanan değişken listedeki bir sonraki değere otomatik olarak atanacaktır. **Örnek Uygulama**```for i in range(5): i değerine 0-4 arası indeks değerleri otomatik olarak atanır print(i) Çıktı: Bu komut satırı toplam 5 kere tekrarlanır ve her satırda yeni i değeri yazdırılırkonular = ["", "Açık Erişim", "Açık Veri"] yeni bir liste oluştururfor konu in konular: print(konu) Çıktı: Her bir liste öğesi alt alta satırlara yazdırılır```# Örnek uygulmayı çalıştır for i in range(5): print(i+1) #Sıfırı ekranda görmemek için +1 ekledik. #Ders örneği liste = [] for i in range(3): veri = input("Giriş yap: ") liste.append(veri) print(liste) !range for sonraası indeks numarası atıyor range kullanmadığımızda listenin elemanlarını ekliyor # Örnek uygulmayı çalıştır konular = ["", "Açık Erişim", "Açık Veri", "Açık Donanım"] for konu in konular: print(konu) Açık Erişim Açık Veri Açık Donanım! Liste içindeki eleman sayısı kadar otomatik for döngüsü yapabilir. **Görev:** Bir liste oluşturun. Liste öğelerini "for" döngüsü kullanarak ekrana yazdırın!#liste liste = ["elma", "armut", "kivi", "muz"] for yazdır in liste: print(yazdır)elma armut kivi muzMetin Katarlarında for Döngüsü KullanımıMetin Katarları üzerinde gerçekleştirilebilecek işlemler Python'daki listelerle büyük benzerlik taşırlar. Metin Katarını oluşturan öğeler (harfler) liste elemanları gibi "for" döngüsü yardımıyla ekrana yazdırılabilir. **Örnek Uygulama**```cumle = "Bisiklet hem zihni hem bedeni dinç tutar!"for harf in cumle: Cümledeki her bir harfi ekrana satır satır yazdırır print(harf)```# Örnek uygulamayı çalıştır cumle = "Bisiklet hem zihni, hem bedeni dinç tutar!" for harf in cumle: print(harf)B i s i k l e t h e m z i h n i , h e m b e d e n i d i n ç t u t a r !**Görev:** İçinde metin katarı bulunan bir değişken oluşturun. Bu değişkende yer alan her bir harfi bir satıra gelecek şekilde "for" döngüsü ile ekrana yazdırın!#degisken cumle = "" for harf in cumle: print(harf)B e n i m a d ı m Y a r e nwhile Döngüsü"While" döngüsü "if" cümleciğinin ifade şekline benzer. Koşul doğruysa döngüye bağlı kod satırı ya da satırları yürütülür (çalıştırılır). Temel fark, koşul doğru (True) olduğu olduğu sürece bağlı kod satırı ya da satırları çalışmaya devam eder. **Örnek Uygulama**```deger = 1while deger <= 10: print(deger) Bu satır 10 kez tekrarlanacak deger += 1 Bu satır da 10 kez tekrarlanacakprint("Program bitti") Bu satır sadece bir kez çalıştırılacak```# Örnek uygulamayı çalıştır deger = 1 while deger <= 10: print(deger) deger += 1 print("Program bitti")1 2 3 4 5 6 7 8 9 10 Program bittibreak Anahtar KelimesiAsla bitmeyen döngüye sonsuz döngü adı verilir. Döngü koşulu daima doğru (True) olursa, böyle bir döngü sonsuz olur. "Break" anahtar kelimesi geçerli döngüden çıkmak için kullanılır. **Örnek Uygulama**```sayi = 0while True: bu döngü sonsuz bir döngüdür print(sayi) sayi += 1 if sayi >= 5: break sayı değeri 5 olduğunda döngü otomatik olarak sonlanır```# Örnek Uygulamayı çalıştır sayi = 0 while True: print(sayi) sayi += 1 if sayi >= 5: break0 1 2 3 4continue Anahtar Kelimesi"continue" anahtar kelimesi, o anda yürütülen döngü için döngü içindeki kodun geri kalanını atlamak ve "for" veya "while" deyimine geri dönmek için kullanılır. ```for i in range(5): if i == 3: continue i değeri 3 olduğu anda altta yer alan "print" komutu atlanıyor. print(i)```# Örnek Uygulamayı çalıştır for i in range(5): if i == 3: continue print(i)0 1 2 4Belirli bir kısmı atlamak için de kullanıyorduk. **Görev: Tahmin Oyunu**"while" döngüsü kullanarak bir tahmin oyunu tasarla. Bu tahmin oyununda, önceden belirlenmiş olan kelime ile klavyeden girilen kelime karşılaştırılmalı, tahmin doğru ise oyun "Bildiniz..!" mesajı ile sonlanmalı, yanlış ise tahmin hakkı bir daha verilmeli.#Tahmin Oyunu kelime = "bilgi" tahmin = "" print("Kelime tahmin oyununa hoş geldiniz! ") oyuncuismi = input("İsminizi giriniz: ") kelime = "erişim" tahmin = input("Tahmininizi giriniz: " ) while tahmin == kelime: print("Bildiniz") break else: print("Bilemediniz")Kelime tahmin oyununa hoş geldiniz! İsminizi giriniz: Yaren Tahmininizi giriniz: kkk BilemedinizBölüm 07: Fonksiyonlar Fonksiyon Tanımlama (Definition)Fonksiyonlar, yazılan kodu faydalı bloklara bölmenin, daha okunabilir hale getirmenin ve tekrar kullanmaya yardımcı olmanın kullanışlı bir yoludur. Fonksiyonlar "def" anahtar sözcüğü ve ardından fonksiyonun adı kullanılarak tanımlanır. **Örnek Uygulama**```def merhaba_dunya(): fonksiyon tanımlama, isimlendirme print("Merhaba Dünya!") fonksiyona dahil kod satırlarıfor i in range(5): merhaba_dunya() fonksiyon 5 kere çağırılacak```# Örnek uygulamayı çalıştır def merhaba_dunya(): # fonksiyon tanımlama, isimlendirme print("Merhaba Dünya!") #fonksiyona dahil kod satırları for i in range(5): merhaba_dunya() # fonksiyon 5 kere çağırılacakFonksiyolarda Parametre KullanımıFonksiyon parametreleri, fonksiyon adından sonra parantez () içinde tanımlanır. Parametre, iletilen bağımsız değişken için değişken adı görevi görür. **Örnek Uygulama**```def foo(x): x bir fonksiyon parametresidir print("x = " + str(x))foo(5) 5 değeri fonksiyona iletilir ve değer olarak kullanılır.```# Örnek uygulamayı çalıştır def foo(x): print("x = " + str(x)) foo(5)x = 5**Görev:** *karsila* fonksiyonunun tetiklenmesi için gerekli kod ve parametleri ekle!def karsila(kAd, kSoyad): print("Hoşgeldin, %s %s" % (kAd, kSoyad))Return DeğeriFonksiyonlar, "return" anahtar sözcüğünü kullanarak fonksiyon sonucunda bir değer döndürebilir. Döndürülen değer bir değişkene atanabilir veya sadece örneğin değeri yazdırmak için kullanılabilir. **Örnek Uygulama**```def iki_sayi_topla(a, b): return a + b hesaplama işleminin sonucu değer olarak döndürülüyorprint(iki_sayi_topla(3, 12)) ekrana işlem sonucu yazdırılacak```# Örnek uygulamayı çalıştır def iki_sayi_topla(a, b): return a + b print(iki_sayi_topla(3, 12))15Varsayılan ParametrelerBazen bir veya daha fazla fonksiyon parametresi için varsayılan bir değer belirtmek yararlı olabilir. Bu, ihtiyaç duyulan parametrelerden daha az argümanla çağrılabilen bir fonksiyon oluşturur. **Örnek Uygulama**```def iki_sayi_carp(a, b=2): return a * bprint(iki_sayi_carp(3, 47)) verilen iki degeri de kullanır print(iki_sayi_carp(3)) verilmeyen 2. değer yerine varsayılanı kullanır```# Örnek uygulamayı çalıştır def iki_sayi_carp(a, b=2): return a * b print(iki_sayi_carp(3, 47)) print(iki_sayi_carp(3))141 6**Örnek Uygulama: Sayısal Loto**Aşağıda temel yapısı aynı olan iki *sayısal loto* uygulaması bulunmaktadır: Fonksiyonsuz ve fonksiyonlu.İlk sayısal loto uygulamasında herhangi bir fonksiyon kullanımı yoktur. Her satırda 1-49 arası 6 adet sayının yer aldığı 6 satır oluşturur.İkinci sayısal loto uygulamsında ise *tahminEt* isimli bir fonksiyon yer almaktadır. Bu fonksiyon varsayılan parametrelere sahiptir ve bu parametreler fonksiyon çağırılırken değiştirilebilir. Böylece ilk uygulamadan çok daha geniş seçenekler sunabilir bir hale gelmiştir.#Sayısal Loto örnek uygulama (fonksiyonsuz) from random import randint i = 0 secilenler = [0,0,0,0,0,0] for rastgele in secilenler: while i < len(secilenler): secilen = randint(1, 49) if secilen not in secilenler: secilenler[i] = secilen i+=1 print(sorted(secilenler)) i=0 #Sayısal Loto örnek uygulama (fonksiyonlu) from random import randint def tahminEt(rakam=6, satir=6, baslangic=1, bitis=49): i = 0 secilenler = [] for liste in range(rakam): secilenler.append(0) for olustur in range(satir): while i < len(secilenler): secilen = randint(baslangic, bitis) if secilen not in secilenler: secilenler[i] = secilen i+=1 print(sorted(secilenler)) i=0 tahminEt(10,6,1,60)**Görev:** Bu görev genel olarak fonksiyon bölümünü kapsamaktadır.Daha önce yapmış olduğunuz "" projesini (ya da aşağıda yer alan örneği) fonksiyonlar kullanarak oyun bittiğinde tekrar başlatmaya gerek duyulmadan yeniden oynanabilmesine imkan sağlayacak şekilde yeniden kurgulayın.Oyunun farklı sekansları için farklı fonksiyonlar tanımlayarak oyunu daha optimize hale getirmeye çalışın.Aşağıda bir adam asmaca oyununun temel özellikerine sahip bir örnek yer almaktadır.#Fonksiyonsuz from random import choice adamCan = 3 kelimeler = ["bisiklet", "triatlon", "yüzme", "koşu"] secilenKelime = choice(kelimeler) print(secilenKelime) dizilenKelime = [] for diz in secilenKelime: dizilenKelime.append("_") print(dizilenKelime) while adamCan > 0: girilenHarf = input("Bir harf giriniz: ") canKontrol = girilenHarf in secilenKelime if canKontrol == False: adamCan-=1 i = 0 for kontrol in secilenKelime: if secilenKelime[i] == girilenHarf: dizilenKelime[i] = girilenHarf i+=1 print(dizilenKelime) print("Kalan can: "+ str(adamCan)) #Fonksiyonlu Bölüm 08: Sınıflar ve NesnelerBu bölümde:* Sınıf ve nesne tanımlama,* Değişkenlere erişim,* self parametresi,* init metodu yer almaktadır. Sınıf ve Nesne TanımlamaBir nesne değişkenleri ve fonksiyonları tek bir varlıkta birleştirir. Nesneler değişkenlerini ve fonksiyonlarını sınıflardan alır. Sınıflar bir anlamda nesnelerinizi oluşturmak için kullanılan şablonlardır. Bir nesneyi, fonksiyonların yanı sıra veri içeren tek bir veri yapısı olarak düşünebilirsiniz. Nesnelerin fonksiyonlarına yöntem (metod) denir.**İpucu:** Sınıf isimlerinin baş harfi büyük yazılarak Python içindeki diğer öğelerden (değişken, fonksiyon vb.) daha rahat ayırt edilmeleri sağlanır. **Örnek Uygulama**```class BenimSinifim: yeni bir sınıfın tanımlanması bsDegisken = 4 sınıf içinde yer alan bir değişken def bsFonksiyon(self): sınıf içinde yer alan bir fonksiyon print("Benim sınıfımın fonksiyonundan Merhaba!")benimNesnem = BenimSinifim()``` Değişkenlere ve Fonksiyonlara ErişimSınıftan örneklenen bir nesnenin içindeki bir değişkene ya da fonksiyona erişmek için öncelikle nesnenin adı daha sonra ise değişkenin ya da fonkiyonun adı çağırılmalıdır (Ör: nesneAdi.degiskenAdi). Bir sınıfın farklı örnekleri (nesneleri) içinde tanımlanan değişkenlerin değerleri değiştirebilir. **Örnek Uygulama 1**```class BenimSinifim: yeni bir sınıf oluşturur bsDegisken = 3 sınıfın içinde bir değişken tanımlar def bsFonksiyon(self): sınıfın içinde bir fonksiyon tanımlar print("Benim sınıfımın fonksiyonundan Merhaba!")benimNesnem = BenimSinifim() sınıftan yeni bir nesne oluştururfor i in range(benimNesnem.bsDegisken): oluşturulan nesne üzerinden değişkene ve fonksiyona ulaşılır benimNesnem.bsFonksiyon()benimNesnem.bsDegisken = 5 sınıfın içinde tanımlanan değişkene yeni değer atanmasıfor i in range(benimNesnem.bsDegisken): benimNesnem.bsFonksiyon()```# Örnek uygulama 1'i gözlemleyelim class BenimSinifim: bsDegisken = 3 def bsFonksiyon(self): print("Benim sınıfımın fonksiyonundan Merhaba!") benimNesnem = BenimSinifim() for i in range(benimNesnem.bsDegisken): benimNesnem.bsFonksiyon() benimNesnem.bsDegisken = 5 for i in range(benimNesnem.bsDegisken): benimNesnem.bsFonksiyon()Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba! Benim sınıfımın fonksiyonundan Merhaba!Programı yaz belirli bölümlerini tekrar lkullanma ihtiyacı sınıf. (Büyük parça) **Örnek Uygulama 2**```class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetaybisiklet1 = Bisiklet()bisiklet2 = Bisiklet()print("Bisiklet 1: " + bisiklet1.ozellikler())bisiklet2.renk = "Sarı"bisiklet2.vites = 22print("Bisiklet 2: " + bisiklet2.ozellikler())```# Örnek uygulama 2'i gözlemleyelim class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetay bisiklet1 = Bisiklet() bisiklet2 = Bisiklet() print("Bisiklet 1: " + bisiklet1.ozellikler()) bisiklet2.renk = "Sarı" bisiklet2.vites = 22 print("Bisiklet 2: " + bisiklet2.ozellikler())self Parametresi"self" parametresi bir Python kuralıdır. "self", herhangi bir sınıf yöntemine iletilen ilk parametredir. Python, oluşturulan nesneyi belirtmek için self parametresini kullanır. **Örnek Uygulama**Aşağıdaki örnek uygulamada **Bisiklet** sınıfının değişkenleri olan *renk* ve *bisiklet*, sınıf içindeki fonksiyonda **self** parametresi ile birlikte kullanılmaktadır. Bu kullanım şekli sınıftan oluşturulan nesnelerin tanımlanmış değişkenlere ulaşabilmeleri için gereklidir.```class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetay```# Örnek uygulamada "self" tanımlaması yapılmadığı zaman döndürülen hata kodunu inceleyin class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (renk, vites) #tanımlama eksik return ozellikDetay bisiklet1 = Bisiklet() bisiklet2 = Bisiklet() print("Bisiklet 1: " + bisiklet1.ozellikler()) bisiklet2.renk = "Sarı" bisiklet2.vites = 22 print("Bisiklet 2: " + bisiklet2.ozellikler())__init__ Metodu__init__ fonksiyonu, oluşturduğu nesneleri başlatmak için kullanılır. init "başlat" ın kısaltmasıdır. __init__() her zaman yaratılan nesneye atıfta bulunan en az bir argüman alır: "self". **Örnek Uygulama**Aşağıdaki örnek uygulamada *sporDali* sınıfının içinde tanımlanan **init** fonksiyonu, sınıf oluşturulduğu anda çalışmaya başlamaktadır. Fonksiyonun ayrıca çağırılmasına gerek kalmamıştır.```class sporDali: sporlar = ["Yüzme", "Bisiklet", "Koşu"] def __init__(self): for spor in self.sporlar: print(spor + " bir triatlon branşıdır.")triatlon = sporDali()```# Örnek uygulamayı çalıştır class sporDali: sporlar = ["Yüzme", "Bisiklet", "Koşu"] def __init__(self): for spor in self.sporlar: print(spor + " bir triatlon branşıdır.") triatlon = sporDali() # Örnek uygulamayı > Duatlon class sporDali: sporlar = ["Yüzme", "Bisiklet", "Koşu"] def __init__(self): for spor in self.sporlar: print(spor + " bir triatlon branşıdır.") triatlon = sporDali()Yüzme bir triatlon branşıdır. Bisiklet bir triatlon branşıdır. Koşu bir triatlon branşıdır.Bölüm 09: Modüller ve Paketler Modülün İçe AktarılmasıPython'daki modüller, Python tanımlarını (sınıflar, fonksiyonlar vb.) ve ifadelerini (değişkenler, listeler, sözlükler vb.) içeren .py uzantısına sahip Python dosyalarıdır.Modüller, *import* anahtar sözcüğü ve uzantı olmadan dosya adı kullanılarak içe aktarılır. Bir modül, çalışan bir Python betiğine ilk kez yüklendiğinde, modüldeki kodun bir kez çalıştırılmasıyla başlatılır. **Örnek Uygulama**```bisiklet.py adlı modülün içeriği"""Bu modül içinde Bisiklet sınıfı yer almaktadır."""class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetay``````bisikletler.py adlı Python dosyasının içeriğiimport bisikletbisiklet1 = bisiklet.Bisiklet()print("Bisiklet 1: " + bisiklet1.ozellikler())``` **PyCharm Örneği**![bisiklet.py](http://www.madran.net/wp-content/uploads/2020/05/bisikletPY.png) bisiklet.py---![alt text](http://www.madran.net/wp-content/uploads/2020/05/bisikletlerPY.png)bisikletler.py Colab'de Modülün İçe AktarılmasıBir önceki bölümde (Modülün İçe Aktarılması) herhangi bir kişisel bilgisayarın sabit diski üzerinde çalışırken yerleşik olmayan (kendi yazdığımız) modülün içe aktarılması yer aldı.Bu bölümde ise Colab üzerinde çalışırken yerleşik olmayan bir modülü nasıl içe aktarılacağı yer almakta. **Örnek Uygulama**Aşağıda içeriği görüntülenen *bisiklet.py* adlı Python dosyası Google Drive içerisinde "BBY162_Python_a_Giris.ipynb" dosyasının ile aynı klasör içinde bulunmaktadır.```bisiklet.py adlı modülün içeriği"""Bu modül içinde Bisiklet sınıfı yer almaktadır."""class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetay```# Google Drive'ın bir disk olarak görülmesi from google.colab import drive drive.mount('gdrive') # bağlanan diskin 'gdrive' adı ile tanımlanması. import sys # bağlanan diskin fiziksel yolunun tespit edilmesi ve bağlantı yoluna eklenmesi sys.path.append('/content/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/') import bisiklet # bisiklet.py içerisindeki 'bisiklet' modülünün içe aktarılması bisiklet1 = bisiklet.Bisiklet() print("Bisiklet 1: " + bisiklet1.ozellikler())Yerleşik Modüller (built-in)Python aşağıdaki bağlantıda yer alan standart modüllerle birlikte gelir. Bu modüllerin *import* anahtar kelimesi ile çağrılması yeterlidir. Ayrıca bu modüllerin yüklenmesine gerek yoktur.[Python Standart Modülleri](https://docs.python.org/3/library/) **Örnek Uygulama**```import datetimeprint(datetime.datetime.today())```# Örnek uygulamayı çalıştır import datetime print(datetime.datetime.today())2022-05-03 20:28:29.347449from import Kullanımıİçe aktarma ifadesinin bir başka kullanım şekli *from* anahtar kelimesinin kullanılmasıdır. *from* ifadesi ile modül adları paketin içinde alınarak direkt kullanıma hazır hale getirilir. Bu şekilde, içe aktarılan modül, modül_adı öneki olmadan doğrudan kullanılır. **Örnek Uygulama**```bisiklet.py adlı modülün içeriği"""Bu modül içinde Bisiklet sınıfı yer almaktadır."""class Bisiklet: renk = "Kırmızı" vites = 1 def ozellikler(self): ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites) return ozellikDetay```# Google Drive'ın bir disk olarak görülmesi from google.colab import drive drive.mount('gdrive') # bağlanan diskin 'gdrive' adı ile tanımlanması. import sys # bağlanan diskin fiziksel yolunun tespit edilmesi ve bağlantı yoluna eklenmesi sys.path.append('/content/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/') from bisiklet import Bisiklet # bisiklet.py içerisindeki 'bisiklet' sınıfının içe aktarılması bisiklet1 = Bisiklet() # bisiklet ön tanımlamasına gerek kalmadı print("Bisiklet 1: " + bisiklet1.ozellikler())Bölüm 10: Dosya İşlemleri Dosya OkumaPython, bilgisayarınızdaki bir dosyadan bilgi okumak ve yazmak için bir dizi yerleşik fonksiyona sahiptir. **open** fonksiyonu bir dosyayı açmak için kullanılır. Dosya, okuma modunda (ikinci argüman olarak "r" kullanılarak) veya yazma modunda (ikinci argüman olarak "w" kullanılarak) açılabilir. **open** fonksiyonu dosya nesnesini döndürür. Dosyanın saklanması için kapatılması gerekir. **Örnek Uygulama**```Google Drive Bağlantısıfrom google.colab import drivedrive.mount('/gdrive')dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/metin.txt"f = open(dosya, "r") for line in f.readlines(): print(line)f.close()```Dosyanın sağlıklı şekilde okunabilmesi için Google Drive ile bağlantının kurulmuş olması ve okunacak dosyanın yolunun tam olarak belirtilmesi gerekmektedir.![Google Drive Colab Klasörü](http://www.madran.net/wp-content/uploads/2020/05/driveMetin.png)#Google Drive Bağlantısı from google.colab import drive drive.mount('/gdrive') dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/metin.txt" f = open(dosya, "r") for line in f.readlines(): print(line) f.close() from google.colab import drive drive.mount('/content/drive')Mounted at /content/driveDosya YazmaBir dosyayı ikinci argüman olarak "w" (yazma) kullanarak açarsanız, yeni bir boş dosya oluşturulur. Aynı ada sahip başka bir dosya varsa silineceğini unutmayın. Mevcut bir dosyaya içerik eklemek istiyorsanız "a" (ekleme) değiştiricisini kullanmalısınız. **Örnek Uygulama**Aşağıdaki örnekte dosya 'w' parametresi ile açıldığı için var olan dosyanın içindekiler silinir ve yeni veriler dosyaya yazılır. Dosyanın içindeki verilerin kalması ve yeni verilerin eklenmesi isteniyorsa dosya 'a' parametresi ile açılmalıdır.```Google Drive Bağlantısıfrom google.colab import drivedrive.mount('/gdrive') dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/cikti.txt"f = open(dosya, 'w') Mevcut veriye ek veri yazılması için parametre: 'a'f.write("test") Her yeni verinin bir alt satıra yazdırılması "test\n"f.close()```Kod çalıştırıldıktan sonra eğer *cikti.txt* adında bir dosya yoksa otomatik olarak oluşturulur ve istenilen içerik yazılır.![Google Drive Colab Klasörü](http://www.madran.net/wp-content/uploads/2020/05/driveColab.png)#Google Drive Bağlantısı from google.colab import drive drive.mount('/gdrive') dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/cikti.txt" f = open(dosya, 'w') # Mevcut veriye ek veri yazılması için parametre: 'a' f.write("test") # Her yeni verinin bir alt satıra yazdırılması "test\n" f.close()PacWave Resource AssessmentThis example notebook provides an example using MHKiT to perform a resource assessment similar to Dunkel et. al at the PACWAVE site following the IEC 62600-101 where applicable. PacWave is an open ocean, grid-connected, full-scale test facility consisting of two sites (PacWave-North & PacWave-South) for wave energy conversion technology testing located just a few miles from the deep-water port of Newport, Oregon. This example notebook performs a resource analysis using omni-directional wave data from a nearby NDBC buoy and replicates plots created by Dunkel et. al and prescribed by IEC TS 62600-101 using these data.Note: this example notebook requires the Python package folium which is not a requirement of MHKiT and may need to be pip installed seperately.Dunkle, Gabrielle, et al. "PacWave Wave Resource Assessment." (2020).import mhkit from mhkit.wave import resource, performance, graphics from sklearn.mixture import GaussianMixture from mhkit.wave.io import ndbc import matplotlib.pyplot as plt from matplotlib import colors from scipy import stats import pandas as pd import numpy as np import folium import os import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} pylab.rcParams.update(params)Buoy LocationThe closest NDBC buoy to PacWave is located further from shore than the PacWave sites, as can be seen in the map below. The buoy used in this analysis is NDBC buoy 46050 (blue marker). The analysis here will focus on PacWave-South (red marker) which is approximately 40 km closer to shore than the NDBC buoy. PacWave-North is shown by the green marker.m = folium.Map(location=[44.613600975457715, -123.74317583354498], zoom_start=9, tiles="Stamen Terrain", control_scale = True) tooltip = "NDBC 46050" folium.Marker([44.669, -124.546], popup=" Water depth: 160 m", tooltip=tooltip).add_to(m) tooltip = "PACWAVE North" folium.Marker([44.69, -124.13472222222222], tooltip=tooltip, icon=folium.Icon(color='green',icon="th-large")).add_to(m) tooltip = "PACWAVE South" folium.Marker([44.58444444444444, -124.2125], tooltip=tooltip, icon=folium.Icon(color='red', icon="th")).add_to(m) m.save("index.png") mImport Data from NDBCNDBC buoy data can be imported using MHKiT's NDBC submodule. At the top of this script, we have already imported this module using the call `from mhkit.wave.io import ndbc`. Here, we will use the `available_data` function to ask the NDBC servers what data is available for buoy 46050. We will also need to specify what parameter we are interested in. In this case, we are requesting wave spectral wave density data for which NDBC uses the keyword 'sweden'. We then pass the data of interest using the filenames of the returned data into the `request_data` function to retrieve the requested data from the NDBC servers.# Spectral wave density for buoy 46050 parameter = 'swden' buoy_number = '46050' # Request list of available files ndbc_available_data= ndbc.available_data(parameter, buoy_number) # Pass file names to NDBC and request the data filenames = ndbc_available_data['filename'] ndbc_requested_data = ndbc.request_data(parameter, filenames) ndbc_requested_data['2020']Create DateTime indexThe data returned from NDBC include separate columns for year, month, day, etc. as shown above. MHKiT has a built-in function to convert these separate columns into a DateTime index for the DataFrame and remove these date ant time columns from the data leaving only the frequency data. The resultant DataFrame is shown below.ndbc_data={} # Create a Datetime Index and remove NOAA date columns for each year for year in ndbc_requested_data: year_data = ndbc_requested_data[year] ndbc_data[year] = ndbc.to_datetime_index(parameter, year_data) ndbc_data['2020']Calculate QoIs from Spectral DataHere we will calculate quantities of interest (QoIs) using the spectral data by applying the appropriate MHKiT function, appending the results to a list, then combining all the lists into a single DataFrame.# Intialize empty lists to store the results from each year Hm0_list=[] Te_list=[] J_list=[] Tp_list=[] Tz_list=[] # Iterate over each year and save the result in the initalized dictionary for year in ndbc_data: data_raw = ndbc_data[year] year_data = data_raw[data_raw != 999.0].dropna() Hm0_list.append(resource.significant_wave_height(year_data.T)) Te_list.append(resource.energy_period(year_data.T)) J_list.append(resource.energy_flux(year_data.T, h=399.)) Tp_list.append(resource.peak_period(year_data.T)) Tz_list.append(resource.average_zero_crossing_period(year_data.T)) # Concatenate list of Series into a single DataFrame Te = pd.concat(Te_list ,axis=0) Tp = pd.concat(Tp_list ,axis=0) Hm0 = pd.concat(Hm0_list ,axis=0) J = pd.concat(J_list ,axis=0) Tz = pd.concat(Tz_list ,axis=0) data = pd.concat([Hm0, Te, Tp, J, Tz],axis=1) # Calculate wave steepness data['Sm'] = data.Hm0 / (9.81/(2*np.pi) * data.Tz**2) # Drop any NaNs created from the calculation of Hm0 or Te data.dropna(inplace=True) # Sort the DateTime index data.sort_index(inplace=True) #data/Users/rpauly/Library/Python/3.7/lib/python/site-packages/pandas/core/series.py:679: RuntimeWarning: overflow encountered in sinhAverage Annual Energy TableA comparison of where a resource's most energetic sea states are compared with how frequently they occur can be performed using the `plot_avg_annual_energy_matrix` function. For a given set of data, the function will bin the data by Hm0 and Te. Within each bin, the average annual power and avg number of hours are calculated. A contour of the average annual power is plotted in each bin and the average number of hours that sea state occurs is plotted as text in the box.The figure below shows that the most frequent sea state occurs on average 527 hours per year at an energy period of 7.5 s and a significant wave height of 1.25 m. Dunkle et al. reported a similar most frequent sea state with a slightly longer energy period at 8.5 s and having a 1.75-m wave height for 528 hours per year. The highest average annual energy sea state at buoy 46050 occurs at an energy period of 9.5 s and a significant wave height of 2.75 m and occurs on average for 168 hours per year. Further, Dunkle et al. reported the most energetic sea state on average to occur at 2.75 m and 10.5 s for 231 hours per year.# Start by cleaning the data of outliers data_clean = data[data.Hm0 < 20] sigma = data_clean.J.std() data_clean = data_clean[data_clean.J > (data_clean.J.mean() - 0.9* sigma)] # Organizing the cleaned data Hm0=data_clean.Hm0 Te=data_clean.Te J=data_clean.J # Setting the bins for the resource frequency and power distribution Hm0_bin_size = 0.5 Hm0_edges = np.arange(0,15+Hm0_bin_size,Hm0_bin_size) Te_bin_size = 1 Te_edges = np.arange(0, 20+Te_bin_size,Te_bin_size) fig = mhkit.wave.graphics.plot_avg_annual_energy_matrix(Hm0, Te, J, Hm0_edges=Hm0_edges, Te_edges=Te_edges)Wave Power by MonthWe can create a plot of monthly statistics for a quantity of interest as shown in the code and plots below. These plots show the median value of a month over the dataset timeframe and bound the value by its 25th and 75th percentile by a shaded region.Comparing the plots below to the analysis performed by Dunkle et. al we can see that in the top subplot the significant wave height has a maximum mean value in December at 3.11 m, which corresponds well with Fig. 5 in Dunkel et. al. The higher significant wave height also brings higher variability in the winter months than in the summer months, which shows a minimum value around 1.44 m in August. The second and third subplots below show an energy period and peak period each having a maximum value in January at 10.3 s and 12.5 s, respectively. The minimums also correspond to the same month of July with values of 7.12 s, and 8.33 s for the energy period and peak period, respectively. Dunkle et al. report a minimum energy period value of 8.5 s in July and a maximum energy period of 11.3 s in February and do not report peak period monthly statistics.The maximum energy flux occurs in December at 48889 kW/m while the minimum occurs in August at 7212 kW/m. These values come in lower than the results from Dunkel et. al., which report values ranging between 70 and 80 kW/m in the winter months and a mean around 20 kW/m in the summer months. The average monthly steepness stays relatively constant throughout the year, ranging between 0.0265 and 0.0313. A discussion of monthly wave steepness was not held in Dunkel et. al but would be interesting to compare for the PACWAVE-South site.months=data_clean.index.month data_group=data_clean.groupby(months) QoIs = data_clean.keys() fig, axs = plt.subplots(len(QoIs),1, figsize=(8, 12), sharex=True) #shade between 25% and 75% QoIs = data_clean.keys() for i in range(len(QoIs)): QoI = QoIs[i] axs[i].plot(data_group.median()[QoI], marker='.') axs[i].fill_between(months.unique(), data_group.describe()[QoI, '25%'], data_group.describe()[QoI, '75%'], alpha=0.2) axs[i].grid() mx = data_group.median()[QoI].max() mx_month= data_group.median()[QoI].argmax()+1 mn = data_group.median()[QoI].min() mn_month= data_group.median()[QoI].argmin()+1 print('--------------------------------------------') print(f'{QoI} max:{np.round(mx,4)}, month: {mx_month}') print(f'{QoI} min:{np.round(mn,4)}, month: {mn_month}') plt.setp(axs[5], xlabel='Month') plt.setp(axs[0], ylabel=f'{QoIs[0]} [m]') plt.setp(axs[1], ylabel=f'{QoIs[1]} [s]') plt.setp(axs[2], ylabel=f'{QoIs[2]} [s]') plt.setp(axs[3], ylabel=f'{QoIs[3]} [kW/M]') plt.setp(axs[4], ylabel=f'{QoIs[4]} [s]') plt.setp(axs[5], ylabel=f'{QoIs[5]} [ ]') plt.tight_layout() plt.savefig('40650QoIs.png')-------------------------------------------- Hm0 max:3.1134, month: 12 Hm0 min:1.4445, month: 8 -------------------------------------------- Te max:10.305, month: 1 Te min:7.1154, month: 7 -------------------------------------------- Tp max:12.5, month: 1 Tp min:8.3333, month: 7 -------------------------------------------- J max:48889.5105, month: 12 J min:7212.2974, month: 8 -------------------------------------------- Tz max:7.9858, month: 1 Tz min:5.7369, month: 7 -------------------------------------------- Sm max:0.0313, month: 12 Sm min:0.0265, month: 9Monthly Cumulative DistributionA cumulative distribution of the energy flux, as described in the IEC TS 62600-101 is created using MHKiT as shown below. The summer months have a lower maximum energy flux and are found left of the black data line representing the cumulative distribution of all collected data. April and October most closely follow the overall energy flux distribution while the winter months show less variation than the summer months in their distribution.ax = graphics.monthly_cumulative_distribution(data_clean.J) plt.xlim([1000, 1E6])Extreme Sea StatesFifty and one-hundred year contours were created using the environmental contours functions. The environmental contours function in MHKiT was adapted from the Wave Energy Converter Design Response Toolbox (WDRT) [Coe et. al, 2016]. The methodologies for calculating environmental contours are an active area of research and differences in methodology can be seen when comparing to the results. Dunkle et al. present 16.68 s and 12.49 m as the peak energy period and significant wave height for the 50-year contour, whereas the methodology applied in MHKiT returns a 50-year peak at 15.71 m and 16.24 s. Dunkle et al. present a peak for the 100-year contour at 13.19 m and 16.85 s, whereas the MHKiT functionality returns 16.62 m and 16.43 s. Coe, , and . (2016, March 30). WEC Design Response Toolbox v. 1.0 (Version 00) [Computer software]. https://www.osti.gov//servlets/purl/1312743.# Delta time of sea-states dt = (data_clean.index[2]-data_clean.index[1]).seconds # Return period (years) of interest period = 100 Hm0_contour_100, Te_contour_100, PCA = resource.environmental_contour(data.Hm0, data.Te, dt, period, return_PCA=True) period = 50 Hm0_contour_50, Te_contour_50 = resource.environmental_contour(data.Hm0, data.Te, dt, period, PCA=PCA) fig, ax = plt.subplots(figsize=(9,4)) ax = graphics.plot_environmental_contour(np.array(data_clean.Te), np.array(data_clean.Hm0), np.array([Te_contour_50, Te_contour_100]).T, np.array([Hm0_contour_50,Hm0_contour_100]).T , data_label='NDBC 46050', contour_label=['50 Year Contour','100 Year Contour'], x_label = 'Energy Period, $Te$ [s]', y_label = 'Sig. wave height, $Hm0$ [m]', ax=ax) plt.legend(loc='upper left') plt.tight_layout() print(Hm0_contour_50.max(), Te_contour_50[Hm0_contour_50.argmax()]) print(Hm0_contour_100.max(), Te_contour_100[Hm0_contour_100.argmax()])15.709501488494444 16.235216168537534 16.627857736073373 16.435667814334238Calculate Sea State Representative ClustersThe above methodology discusses ways to apply MHKiT and industry standards to characterize a wave energy resource. When optimizing a WEC design through simulation it is customary to consider a limited number of representative sea states as the full set of sea states is intractable to simulate during design optimization. Down-selecting a limited number of sea states results in loss of information. The following analysis will compare a full 2D histogram result to several representative sea state clusters typically referred to as K-means clusters.We first calculate the total energy flux for the provided data.nHours = (data_clean.index[1] - data_clean.index[0]).seconds/3600 Total = data_clean.J.sum() * nHours print(f'{Total} (W*hr)/m')6729597955.922245 (W*hr)/mDown Select by 2D Histogram Knowing the total energy flux we may calculate the 2D histogram result and compare it to the total result. We expect this value to near unity. While we have down-selected the number of sea states the histogram is still 100 bins and this is still not generally tractable for WEC optimization. We could continue to reduce the number of histogram bins, however, in sticking with this technique we are generally constrained to a cartesian set of bins.Jsum, xe, ye, bn = stats.binned_statistic_2d(data_clean.Hm0, data_clean.Te, data_clean.J, statistic='sum')#,bins=[Te_bins, Hm0_bins]) hist_result = np.round(Jsum.sum().sum()/Total,4) print(f'{hist_result} = (2D Histogram J) / (1-year total J) ')1.0 = (2D Histogram J) / (1-year total J)Down Select by K-means clustersBy choosing a limited number of data clusters we should be able to choose some minimum representative number for our sea state which still captures the resource well. To calculate these we will use a Gaussian-mixture model (a more general K-means method) which allows us to assign data points to an individual cluster and assign weights based on the density of points in that cluster. We will consider varying numbers of clusters (`N=[4, 8, 16, 32, 64]`) and then calculate the representative energy in each cluster by representing each sea state as a Bretchnider spectrum. In the following code, we will create a plot for each of our N clusters and show where the provided data points fall within each cluster.# Compute Gaussian Mixture Model for each number of clusters Ns= [4, 8, 16, 32, 64] X = np.vstack((data_clean.Te.values, data_clean.Hm0.values)).T fig, axs = plt.subplots(len(Ns),1, figsize=(8, 24), sharex=True) results={} for N in Ns: gmm = GaussianMixture(n_components=N).fit(X) # Save centers and weights result = pd.DataFrame(gmm.means_, columns=['Te','Hm0']) result['weights'] = gmm.weights_ result['Tp'] = result.Te / 0.858 results[N] = result labels = gmm.predict(X) i = Ns.index(N) axs[i].scatter(data_clean.Te.values, data_clean.Hm0.values, c=labels, s=40) axs[i].plot(result.Te, result.Hm0, 'm+') axs[i].title.set_text(f'{N} Clusters') plt.setp(axs[i], ylabel='Energy Period, $T_e$ [s]') plt.setp(axs[len(Ns)-1], xlabel='Sig. wave height, $Hm0$ [m')Compare Representative Sea State Energy to TotalLastly, we will compare each sea state's representative energy to the original total energy of the dataset. As expected we observe increasing agreement with the original total energy as the number of clusters increases.w = ndbc_data[year].columns.values f = w / 2*np.pi for N in results: result = results[N] J=[] for i in range(len(result)): b = resource.bretschneider_spectrum(f, result.Tp[i], result.Hm0[i]) J.extend([resource.energy_flux(b, h=399.).values[0][0]]) result['J'] = J results[N] = result ratios={} for N in results: J_hr = results[N].J*len(data_clean) total_weighted_J= (J_hr * results[N].weights).sum() normalized_weighted_J = total_weighted_J / Total ratios[N] = np.round(normalized_weighted_J, 4) pd.Series(ratios)/Users/rpauly/Library/Python/3.7/lib/python/site-packages/pandas/core/series.py:679: RuntimeWarning: overflow encountered in sinhManaging flowsheets Retrieve any Unit, Stream or System object by ID `find` has access to Flowsheet objects where all BioSTEAM objects are registered. The main flowsheet defaults to the 'Default' flowsheet:import biosteam as bst bst.findFind a Unit object:bst.Stream.species = bst.Species('Water', 'Ethanol') unit = bst.units.Mixer('M1') bst.find('M1')Find a Stream object:bst.find('d1')Stream: d1 from Mixer-M1 phase: 'l', T: 298.15 K, P: 101325 Pa flow: 0All Unit objects can be viewed as a diagram:bst.units.Mixer('M2') bst.find.diagram()All Stream, Unit, and System objects are stored as Register objects in `find`:bst.find.stream bst.find.unit bst.find.systemAccess items in a register:bst.find.unit.M1Switch between flowsheets A new flowsheet may be created and set as the main flowsheet:bst.find.set_flowsheet(bst.Flowsheet('new_flowsheet')) bst.findNow all new objects will be registered in the new flowsheet:unit = bst.units.Mixer('M3') bst.find.diagram()Note that objects in the original flowsheet are not defined anymore and searching them would raise an error:bst.find('M1')All Flowsheet objects are added to the `flowsheet` registry. Switching between flowsheets is easy:bst.find.set_flowsheet(bst.find.flowsheet['default']) # Switch back to default flowsheet bst.findAs an example, the `lipidcane` biorefinery defines its own flowsheet and leaves it as the main flowsheet when you import it:from biosteam.biorefineries.lipidcane import system bst.find bst.find.diagram()Deep Learning with PyTorchAuthor: [](http://teleported.in/) 1.7 Tensors on GPUimport torchCheck if your machine has GPU supportif torch.cuda.is_available(): print("GPU Supported") else: print("GPU Not Supported")Check the number of GPUs attached to this machinetorch.cuda.device_count()Get device nametorch.cuda.get_device_name(0)Moving a Tensor to GPUt = torch.FloatTensor([2, 3]) print(t) t = t.cuda(0)Creating a Tensor on GPU, directlyt = torch.cuda.FloatTensor([2, 3]) print(t)Bring it back to CPUt = t.cpu() print(t)Use device contextwith torch.cuda.device(0): t = torch.cuda.FloatTensor([2, 3]) print(t)deseq2 pipeline in rpy2%load_ext autoreload %autoreload 2 import os import uuid import pylab as plt import matplotlib from IPython.display import display, HTML, Image import numpy as np import pandas as pd %matplotlib inlineLoad test datacount_data = pd.read_pickle('../static/data/debugging/data_df.p') count_data = count_data.drop('gene_id', axis=1) count_data.head() col_data = pd.read_pickle('../static/data/debugging/design_df.p') col_dataRun DESeq2from rpy2.robjects.packages import importr from rpy2 import robjects from rpy2.robjects import Formula from rpy2.robjects import pandas2ri pandas2ri.activate() deseq = importr('DESeq2') grdevices = importr('grDevices') def to_pd_df(r_df): pd_df = pandas2ri.ri2py_dataframe(r_df) pd_df.index = r_df.rownames return pd_df def run_deseq(count_data, col_data, keep_threshold, case, control): design = Formula("~ group") dds = deseq.DESeqDataSetFromMatrix(countData=count_data, colData=col_data, design=design) sv = robjects.StrVector(col_data['group'].values) condition = robjects.FactorVector(sv) runs = col_data.index rstring = """ function(dds, condition, runs, keepThreshold, case, control) { # collapse technical replicates dds$condition <- condition dds$condition <- relevel(dds$condition, ref=control) # set control dds$sample <- runs dds$run <- runs ddsColl <- collapseReplicates(dds, dds$sample, dds$run) # count filter keep <- rowSums(counts(ddsColl)) >= keepThreshold ddsColl <- ddsColl[keep,] # run DESeq2 analysis ddsAnalysis <- DESeq(dds) res <- results(ddsAnalysis, contrast=c("group", control, case)) resOrdered <- res[order(res$padj),] # sort by p-adjusted values df = as.data.frame(resOrdered) rld <- as.data.frame(assay(rlog(dds, blind=FALSE))) list(df, rld, resOrdered) } """ rfunc = robjects.r(rstring) results = rfunc(dds, condition, runs, keep_threshold, case, control) pd_df = to_pd_df(results[0]) rld_df = to_pd_df(results[1]) res_ordered = results[2] return pd_df, rld_df, res_ordered pd_df, rld_df, res_ordered = run_deseq(count_data, col_data, 10, 'HK', 'UN') pd_df.head(20)Make some plotsdef plot_notebook(rfunc, res): fn = '{uuid}.png'.format(uuid = uuid.uuid4()) grdevices.png(fn) rfunc(res) grdevices.dev_off() return Image(filename=fn) rfunc = robjects.r('function(res) { plotMA(res, ylim=c(-2,2)) }') plot_notebook(rfunc, res_ordered) import pandas as pd import numpy as np from sklearn.decomposition import PCA df = rld_df.transpose() n_components = 10 pca = PCA(n_components=n_components) X = pca.fit_transform(df) np.cumsum(pca.explained_variance_ratio_) X.shape fig, ax = plt.subplots() ax.scatter(X[:, 0], X[:, 1]) for i, txt in enumerate(df.index): ax.annotate(txt, (X[i, 0], X[i, 1])) plt.tight_layout()Name: VaishaliStudent Number: 200417634Email ID: Encoding a message signal into the carrier signal to generate the original message by decoding function. In this interactive python notebook, we will examine the process of encoding a text message into binary form and transmitting it with a carrier signal. Then using the decoding function get the original text message.import math import struct import numpy as np from scipy import signal as sg import matplotlib import matplotlib.pyplot as plt import binasciiPart 1 : Creating a Carrier Signal Here, we are converting the ASCII message into a binary. To do so, we have to generate a string and continue to append each element of that string into our list.In the binary characters, we get unnecessary spaces. So, we will get rid of them by checking and passing it over every time we encounter them. Converting ascii message to binary form.asciiMessage = "good" binMessage = ''.join(format(ord(x),'b')for x in asciiMessage) binMessage message=[] for bit in binMessage: message.append(int(bit))Let us begin with creating a carrier signal. It will help in encoding the original text message into a transmissible signal. The carrier signal is usually a sinusoidal wave.To do so, we will generate the sampling points for the carrier signal.Now we have to figure out the period of the wave, which is done by dividing the number of samples by the number of cycles per bit.Then, we get the total number of samples from the 'message' list.spb = 50 #samples per bit cpb = 2 #cycles per bit T = spb/cpb #period(in samples) samples = spb*len(binMessage) #Total Samples samplesNow we will generate the carrier signal as we have determined the number of samples required to encode the original message. The amplitude of our carrier signal is 1, and the frequency is the number of samples per bit.x = np.arange(samples) carrier = np.sin(2*np.pi*(1/T)*x) %matplotlib inline plt.stem(x,carrier, 'r') plt.plot(x,carrier)As we can see in the above graph, the carrier signal generates in the form of a simplistic sine wave. Here, the signal generated has an equal number of sample points as our message signal. Hence by multiplying both of them together, we will be able to produce the message signal. Part 2: Generating a Message Signal Here, we will first generate the message signals. These signals are the amplitudes required to be multiplied by the carrier signal. In this encoding scheme, we have used '0' to be represented by f samples and amplitude be as the original amplitude of the carrier signal. And then to represent '1', taking threefold the amplitude of the original one.We are using for loop and if-else to create a new message list.messageList=[] for bit in message: if bit is 0: for i in range(0,spb): messageList.append(1) else: for i in range(0,spb): messageList.append(3) message = messageList % matplotlib inline # showing the exact location of the smaples plt.stem(x,message, 'r' ) plt.plot(x,message)The above graph is of a square wave as we have only two values present of message signal to represent i.e. 0 and 1.len(messageList)Part 3: Creating a Transmission Signal To generate a transmission signal, we will multiply the message and carrier signal.import matplotlib.pyplot as plt signal = carrier * messageList %matplotlib inline plt.stem(x,signal, 'r') plt.plot(x,signal) sigList=[] sigList=[signal,x] sigListWe store the time and amplitude values in the variable form for comparing them in the X-Y graph.xvalue=sigList[1] ReceivedSignal=sigList[0]Here for decoding the message text, we determine the number of bits to send across the channel to compare across the total number of samples.bitssent=len(ReceivedSignal)/spb bitssentPart 2: Generating a Decode Function Now for selecting a decoding scheme, we will take a threshold value 1.5 as the amplitude of our transmitting wave. And compare it with the signal wave across x-axis of the transmitted wave. If these crossing received are higher than the threshold value then we append a '1' in the list otherwise a '0' will be placed.decmess = [] j=0 for v in range(0,int(bitssent)): co=0 for value in ReceivedSignal[(v*spb)+0:(v+1)*spb]: if value>1.5: co=co+1 if co>2: decmess.append(1) else: decmess.append(0) decmessConverting binary message to ascii.binMessage = bin(int.from_bytes(asciiMessage.encode(),'big')) n = int(binMessage,2) n.to_bytes((n.bit_length()+7) // 8, 'big').decode()CMSIS-DSP Python package example Installing and importing the needed packages The following command may take some time to execute : the full cmsisdsp library is built.!pip install cmsisdsp import numpy as np import cmsisdsp as dsp import matplotlib.pyplot as pltCreating the signalnb = 512 f=100 signal = np.sin(2 * np.pi * np.arange(nb)*f / nb) + 0.1*np.random.randn(nb) plt.plot(signal) plt.show() # Array of complex numbers as an array of real numbers def imToReal1D(a): ar=np.zeros(np.array(a.shape) * 2) ar[0::2]=a.real ar[1::2]=a.imag return(ar) # Array of real numbers as an array of complex numbers def realToIm1D(ar): return(ar[0::2] + 1j * ar[1::2])Using the F32 CMSIS-DSP FFT# CMSIS-DSP FFT F32 initialization cfftf32=dsp.arm_cfft_instance_f32() status=dsp.arm_cfft_init_f32(cfftf32,nb) print(status) # Re-evaluate this each time you change the signal signalR = imToReal1D(signal) resultR = dsp.arm_cfft_f32(cfftf32,signalR,0,1) resultI = realToIm1D(resultR) mag=20 * np.log10(np.abs(resultI)) plt.plot(mag[1:nb//2]) plt.show()Using the Q15 CMSIS-DSP FFT# Convert the signal to Q15 and viewed as a real array import cmsisdsp.fixedpoint as f signalR = imToReal1D(signal) signalRQ15 = f.toQ15(signalR) # Initialize the Q15 CFFT cfftq15 = dsp.arm_cfft_instance_q15() status = dsp.arm_cfft_init_q15(cfftq15,nb) print(status) # Compute the Q15 CFFT and convert back to float and complex array resultR = dsp.arm_cfft_q15(cfftq15,signalRQ15,0,1) resultR = f.Q15toF32(resultR) resultI = realToIm1D(resultR)*nb mag = 20 * np.log10(np.abs(resultI)) plt.plot(mag[1:nb//2]) plt.show()Selenium---Biblioteca do Python (também de outras linguagens como C) para *web scrapping* Instalação1. `pip install selenium`2. Baixar o [Chrome Driver](https://sites.google.com/a/chromium.org/chromedriver/home) (versão Windows 32-bits serve para 64-bits também) Como usar- **[Navigating - Selenium Python](https://selenium-python.readthedocs.io/navigating.html)**- **[Locating elements - Selenium Python](https://selenium-python.readthedocs.io/locating-elements.html)** Exemplo Acessando a página e buscando SelectorGadgetVamos acessar a página do G1 e buscar por um conteúdo. É importante inspecionar a página para saber os nomes de classes ou elementos de interesse. Isso pode ser feito com a extensão [SelectorGadget](https://chrome.google.com/webstore/detail/selectorgadget/mhjhnkcfbdhnjickkkdbjoemdmbfginb). - Uma vez instalada, basta buscar a extensão no canto superior direito e irá aparecer o menu destacado em branco.- Clique na tela no local que você deseja e selecione "XPath" para descobrir o caminho daquele elemento.- Copie esse caminho para a função `find_element_by_xpath` que será utilizada no código ![Selector - G1](./figures/g1_selector.png) Inspecionando a página- Outra opção é mais "manual", buscando no código fonte da página: Na figura abaixo está destacada (código em azul) a área referente à busca, mais especificamente (código em cinza) a classe do `input` ![Inspecionar - G1](./figures/g1_inspecionar.png)* Aqui podemos observar que a classe do `input` tem um id chamado `busca_campo`, então vamos localizá-lo (para saber mais como localizar elementos, ver link em [Como usar](Como-usar)). Aqui basta clicar com o botão direito no element o `input` e em seguida `Copiar > Copiar XPath`![Localizar elemento - G1](./figures/g1_xpath.png)from selenium import webdriver webpage = r'https://g1.globo.com/' # edit me # mudar para o local onde você extraiu o chromedriver driver = webdriver.Chrome('C:\\Users\\Elogroup\\Documents\\chromedriver_win32\\chromedriver.exe') driver.maximize_window() # é bom colocar o tempo (segundos) de espera caso vá fazer muitas requisições driver.implicitly_wait(5) # capturando o conteúdo da página driver.get(webpage) # identificando o campo de busca com o XPath copiado sbox = driver.find_element_by_xpath("//*[@id=\"busca-campo\"]") # preenchendo com o termo a ser buscado e enter! from selenium.webdriver.common.keys import Keys sbox.send_keys("", Keys.ENTER) content = driver.page_source* No caso da página ter um botão para submeter o conteúdo, podemos usar o código abaixo# identificando o campo de busca com o XPath copiado sbox = driver.find_element_by_xpath("//*[@id=\"busca-campo\"]") # preenchendo o termo a ser buscado sbox.send_keys("") # submetendo a busca submit = driver.find_element_by_name("[nome do elemento submit]") submit.click()* No caso do conteúdo estar dentro de um `frame` (classe chamada `iframe`), deve-se mudar o driver para esse local como abaixo antes de fazer a buscadriver.switch_to_frame(driver.find_element_by_name("iframe")) # todo o resto igual sbox = driver.find_element_by ...* No caso do conteúdo ser gerado numa nova janela, devemos mudar o `driver` para a nova janela também# lista de janelas (tanto a primária quanto as demais abertas no processo) windows = driver.window_handle # selecione a janela pelo índice (0 é a janela da url, 1 é a janela aberta após a principal, e assim sucessivamente) driver.switch_to_window(windows[1]) # todo o resto igual sbox = driver.find_element_by ...Lendo o conteúdo com `Beautiful Soup`from bs4 import BeautifulSoup soup = BeautifulSoup(content, 'html.parser') soup.body.contents #[i for i in soup.body.children if type(i) == bs4.]Anomaly detectionimport numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.io import loadmat from scipy import stats data = loadmat('ex8data1.mat') X = data['X'] X.shape plt.scatter(X.T[0], X.T[1]) plt.xlabel('Latency (ms)') plt.ylabel('Throughput (mb/s)') plt.show() plt.hist(X) plt.show()Gaussian distributiondef estimate_gaussian(X): mu = [] sigma = [] for i in X.T: mu.append(i.mean()) sigma.append(i.var()) return mu, sigma mu, sigma = estimate_gaussian(X)Calculate probabilityXval = data['Xval'] yval = data['yval'] Xval.shape dist = stats.norm(mu[0], sigma[0]) dist.pdf(X[:,0])[0:50] p = np.zeros((X.shape[0], X.shape[1])) p[:,0] = stats.norm(mu[0], sigma[0]).pdf(X[:,0]) p[:,1] = stats.norm(mu[1], sigma[1]).pdf(X[:,1]) pval = np.zeros((Xval.shape[0], Xval.shape[1])) pval[:,0] = stats.norm(mu[0], sigma[0]).pdf(Xval[:,0]) pval[:,1] = stats.norm(mu[1], sigma[1]).pdf(Xval[:,1])Selecting the thresholddef select_threshold(pval, yval): best_epsilon = 0 best_f1 = 0 step = (pval.max() - pval.min()) / 10000 for epsilon in np.arange(pval.min(), pval.max(), step): preds = pval < epsilon tp = np.sum(np.logical_and(preds == 1, yval == 1)).astype(float) fp = np.sum(np.logical_and(preds == 1, yval == 0)).astype(float) fn = np.sum(np.logical_and(preds == 0, yval == 1)).astype(float) precision = tp / (tp + fp) recall = tp / (tp + fn) f1 = (2 * precision * recall) / (precision + recall) if f1 > best_f1: best_f1 = f1 best_epsilon = epsilon return best_epsilon, best_f1 epsilon, f1 = select_threshold(pval, yval) epsilon, f1/tmp/ipykernel_47349/484194870.py:14: RuntimeWarning: invalid value encountered in double_scalars precision = tp / (tp + fp)Anomaly detection resultoutliers = np.where(p < epsilon) fig, ax = plt.subplots(figsize=(12,8)) ax.scatter(X[:,0], X[:,1]) ax.scatter(X[outliers[0],0], X[outliers[0],1], s=50, color='r', marker='o') plt.show()Estimating the fraction of archaea out of the total terrestrial deep subsurface prokaryote populationIn order to estimate the fraction of archaea out of the total population of terrestrial deep subsurface bacteria and archaea, we rely of three sources of data. Two of those sources are measurements made in the terrestrial deep subsurface of the fraction of archaea using two independent methods: 16S rDNA sequencing (FISH) and quantitative PCR (qPCR). For each method we collect several studies which used the method to measure the fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep subsurface. We calculate the geometric means of samples within each study. We then calculate the geometric mean of the average estimates from each study using the same method to generate a characteristic estimate for the fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep subsurface for each method. 16S rDNA sequencing-based estimateFor our 16S rDNA sequencing-based estimate we rely on data from [Rempfert et al.](http://dx.doi.org/10.3389/fmicb.2017.00056), [Lau et al.](http://dx.doi.org/10.1073/pnas.1612244113), [Osburn et al.](http://dx.doi.org/10.3389/fmicb.2014.00610), and [Simkus et al.](http://dx.doi.org/10.1016/j.gca.2015.10.003). Here is a sample of the data:# Define a function that will calculate the geometric mean of fractions for each bin of a groupby def frac_geo_mean_groupby(input): return frac_mean(input['Archaea fraction']) # Define a function that will calculate the CI of geometric mean of fractions for each bin of a groupby def frac_CI_groupby(input): return frac_CI(input['Archaea fraction']) seq_data = pd.read_excel('terrestrial_deep_subsurface_arch_frac_data.xlsx','16S rDNA sequencing') seq_data.head()We calculate the geometric mean of the fraction of archaea out of the total population of bacteria and archea for each study:seq_bin = seq_data.groupby('Study') seq_study_mean = seq_bin.apply(frac_geo_mean_groupby) seq_study_meanWe calculate the geometric mean of the average fractions from each study:seq_mean = frac_mean(seq_study_mean) print('The characteristic 16S rDNA sequencing-based fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep susurface is ' + '{:,.1f}%'.format(seq_mean*100))The characteristic 16S rDNA sequencing-based fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep susurface is 2.6%qPCR-based estimateFor our qPCR-based estimate we rely on data from [Purkamo et al.](https://helda.helsinki.fi/handle/10138/165462), [Takai et al.](http://dx.doi.org/10.1128/AEM.67.21.5750-5760.2001), and [Bomberg et al.](http://dx.doi.org/10.5194/bg-13-6031-2016). Here is a sample of the data:qpcr_data = pd.read_excel('terrestrial_deep_subsurface_arch_frac_data.xlsx','qPCR') qpcr_data.head()We calculate the geometric mean of the fraction of archaea out of the total population of bacteria and archea for each study:qpcr_bin = qpcr_data.groupby('Study') qpcr_study_mean = qpcr_bin.apply(frac_geo_mean_groupby) qpcr_study_meanWe calculate the geometric mean of the average fractions from each study:qpcr_mean = frac_mean(qpcr_study_mean) print('The characteristic qPCR-based fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep susurface is ' + '{:,.1f}%'.format(qpcr_mean*100))The characteristic qPCR-based fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep susurface is 1.5%Due to the scarcity of data in the terrestrial deep subsurface, we use as a third source of data our estimate for the fraction of archaea out of the total population of bacteria and archea in subseafloor sediments.Our best estimate for the fraction of archaea out of the total population of bacteria and archaea is the geometric mean of these three sources of data:# As a third data source we use our estimate for the fraction of archaea out of the total population of bacteria # and archaea in subseafloor sediments. subseafloor_sed_arch_frac = 0.35 # Calculate the geometric mean of the three data sources best_estimate = frac_mean(np.array([qpcr_mean, seq_mean, subseafloor_sed_arch_frac])) print('Our best estimate for the fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep subsurface is ' + '{:,.0f}%'.format(best_estimate*100))Our best estimate for the fraction of archaea out of the total population of bacteria and archaea in the terrestrial deep subsurface is 6%Uncertainty analysisIn order to assess the uncertainty associated with our estimate for the fraction of terrestrial deep subsurface archaea out of the total population of bacteria and archaea in the terrestrial deep subsurface, we gather all possible indices of uncertainty. We compare the uncertainty of values within each one of the methods and the uncertainty stemming from the variability of the values provided by the two methods. Intra-study uncertainty 16S rDNA sequencing-based methodWe calculate the intra-study 95% confidence inteval for the geometric mean of the values for the fraction of archaea out of the total population of bacteria and archaea measured using 16S rDNA seuqencing.seq_arc_CI = seq_bin.apply(frac_CI_groupby) seq_data_bac = seq_data.copy() seq_data_bac['Archaea fraction'] = 1.- seq_data_bac['Archaea fraction'] seq_bin_bac = seq_data_bac.groupby('Study') seq_bac_CI = seq_bin_bac.apply(frac_CI_groupby) print('The intra-study uncertainty of the 16S rDNA sequencing-based estimate of the fraction of archaea out of the population of bacteria nad archaea are:') print(seq_arc_CI) print('The intra-study uncertainty of the 16S rDNA sequencing-based estimate of the fraction of bacteria out of the population of bacteria nad archaea are:') print(seq_bac_CI)The intra-study uncertainty of the 16S rDNA sequencing-based estimate of the fraction of archaea out of the population of bacteria nad archaea are: Study Lau et al. nan Osburn et al. 2.0e+00 Rempfert et al. 2.4e+00 Simkus et al. 3.0e+00 dtype: float64 The intra-study uncertainty of the 16S rDNA sequencing-based estimate of the fraction of bacteria out of the population of bacteria nad archaea are: Study Lau et al. nan Osburn et al. 1.1e+00 Rempfert et al. 1.0e+00 Simkus et al. 1.0e+00 dtype: float64qPCR-based methodWe calculate the intra-study 95% confidence inteval for the geometric mean of the values for the fraction of archaea out of the total population of bacteria and archaea measured using qPCR.qpcr_arc_CI = qpcr_bin.apply(frac_CI_groupby) qpcr_data_bac = qpcr_data.copy() qpcr_data_bac['Archaea fraction'] = 1.- qpcr_data_bac['Archaea fraction'] qpcr_bin_bac = qpcr_data_bac.groupby('Study') qpcr_bac_CI = qpcr_bin_bac.apply(frac_CI_groupby) print('The intra-study uncertainty of the qPCR-based estimate of the fraction of archaea out of the population of bacteria nad archaea are:') print(qpcr_arc_CI) print('The intra-study uncertainty of the qPCR-based estimate of the fraction of bacteria out of the population of bacteria nad archaea are:') print(qpcr_bac_CI)The intra-study uncertainty of the qPCR-based estimate of the fraction of archaea out of the population of bacteria nad archaea are: Study Bomberg et al. 3.7e+00 Purkamo et al. 1.3e+01 Takai et al. 1.7e+01 dtype: float64 The intra-study uncertainty of the qPCR-based estimate of the fraction of bacteria out of the population of bacteria nad archaea are: Study Bomberg et al. 1.1e+00 Purkamo et al. 1.0e+00 Takai et al. 1.5e+00 dtype: float64Interstudy uncertainty 16S rDNA sequencing-based methodWe calculate the interstudy 95% confidence inteval for the geometric mean of the average values from each study for the fraction of archaea out of the total population of bacteria and archaea measured using 16S rDNA sequencing.inter_seq_arc_CI = frac_CI(seq_study_mean) inter_seq_bac_CI = frac_CI(1-seq_study_mean) print('The interstudy uncertainty of the 16S rDNA sequencing-based estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_seq_arc_CI) print('The interstudy uncertainty of the 16S rDNA sequencing-based estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_seq_bac_CI)The interstudy uncertainty of the 16S rDNA sequencing-based estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈1.9-fold The interstudy uncertainty of the 16S rDNA sequencing-based estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈1.0-foldqPCR-based methodWe calculate the interstudy 95% confidence inteval for the geometric mean of the average values from each study for the fraction of archaea out of the total population of bacteria and archaea measured using qPCR.inter_qpcr_arc_CI = frac_CI(qpcr_study_mean) inter_qpcr_bac_CI = frac_CI(1-qpcr_study_mean) print('The interstudy uncertainty of the qPCR-based estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_qpcr_arc_CI) print('The interstudy uncertainty of the qPCR-based estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_qpcr_bac_CI)The interstudy uncertainty of the qPCR-based estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈8.7-fold The interstudy uncertainty of the qPCR-based estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈1.1-foldInter-method uncertaintyWe calculate the interstudy 95% confidence inteval for the geometric mean of the estimates from the three different sources - the 16S rDNA sequencing-based estimate, the pPCR-based estiamte and the estimate for the fraction of archea out of the total population of bacteria and archaea in subseafloor sediments.inter_method_arc_CI = frac_CI(np.array([seq_mean,qpcr_mean,subseafloor_sed_arch_frac])) inter_method_bac_CI = frac_CI(1-np.array([seq_mean,qpcr_mean,subseafloor_sed_arch_frac])) print('The inter-method uncertainty of the estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_method_arc_CI) print('The inter-method uncertainty of the estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈%.1f-fold' % inter_method_bac_CI)The inter-method uncertainty of the estimate of the fraction of archaea out of the population of bacteria nad archaea is ≈7.3-fold The inter-method uncertainty of the estimate of the fraction of bacteria out of the population of bacteria nad archaea is ≈1.2-foldAs our best estimates for the uncertainty associated with the fraction of archaea and bacteria out of the total population of terrestrial deep subsurface bacteria and archaea, we use the highest uncertainty out of the available set pf uncertainties we collected.The highest uncertainty for the fraction of archaea is the intra-study uncertainty of the Takai et al. study, which is ≈20-fold. Similarly, the highest uncertainty for the fraction of bacteria is intra-study uncertainty of the Takai et al. study, which is ≈1.5-fold.Our final parameters are:# Take the maximum uncertainty as our best projection of uncertainty arc_mul_CI = np.max([seq_arc_CI.max(),qpcr_arc_CI.max(),inter_seq_arc_CI,inter_method_arc_CI]) bac_mul_CI = np.max([seq_bac_CI.max(),qpcr_bac_CI.max(),inter_seq_bac_CI,inter_qpcr_bac_CI,inter_method_bac_CI]) print('Fraction of archaea out of the total population of terrestrial deep subsurface bacteria and archaea: %.0f percent' %(best_estimate*100)) print('Fraction of bacteria out of the total population of terrestrial deep subsurface bacteria and archaea: %.0f percent' %(100.-best_estimate*100)) print('Uncertainty associated with the fraction of terrestrial deep subsurface archaea: %.1f-fold' % arc_mul_CI) print('Uncertainty associated with the fraction of terrestrial deep subsurface bacteria: %.1f-fold' % bac_mul_CI) old_results = pd.read_excel('../terrestrial_deep_subsurface_prok_biomass_estimate.xlsx') result = old_results.copy() if (result.shape[0]==0): result = pd.DataFrame(index= range(1), columns=['Parameter','Value','Units','Uncertainty']) result.loc[1] = pd.Series({ 'Parameter': 'Fraction of archaea', 'Value': "{0:.2f}".format(best_estimate), 'Units': 'Unitless', 'Uncertainty': "{0:.1f}".format(arc_mul_CI) }) result.loc[2] = pd.Series({ 'Parameter': 'Fraction of bacteria', 'Value': "{0:.2f}".format(1.0 - best_estimate), 'Units': 'Unitless', 'Uncertainty': "{0:.1f}".format(bac_mul_CI) }) result.to_excel('../terrestrial_deep_subsurface_prok_biomass_estimate.xlsx',index=False)Fraction of archaea out of the total population of terrestrial deep subsurface bacteria and archaea: 6 percent Fraction of bacteria out of the total population of terrestrial deep subsurface bacteria and archaea: 94 percent Uncertainty associated with the fraction of terrestrial deep subsurface archaea: 17.4-fold Uncertainty associated with the fraction of terrestrial deep subsurface bacteria: 1.5-foldSCORE CALCULATIONdf = pd.read_excel("~/Desktop/rankings.xlsx", header=None) df df = df.drop(0, axis = 0) df = df.drop(0, axis = 1) df query = 9 schemas = 5 df_list = ['ST', 'VT', 'PT', 'ExtVP', 'WPT'] print("scores\n") for index, row in df.iterrows(): s = 0 for r in range(schemas): s = s + ( row[r+1]*(schemas-(r+1)) / (query*(schemas-1)) ) print(df_list[index-1], "\t", s) for i,row in df.iterrows(): print(i,row[1], row[2]) rowPA003: Churn Predict 0.0 Importimport pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import inflection import math import pickle import inflection from IPython.core.display import HTML from scipy.stats import shapiro, chi2_contingency from sklearn import preprocessing as pp from scikitplot.metrics import plot_cumulative_gain, plot_lift_curve from BorutaShap import BorutaShap from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, f1_score, recall_score, precision_score, accuracy_score, classification_report from catboost import CatBoostClassifier, metrics, Pool from yellowbrick.classifier import ClassPredictionError, ConfusionMatrix, ClassificationReport, ROCAUC, ClassPredictionError, DiscriminationThreshold from xgboost import XGBClassifier from imblearn.combine import SMOTETomek # from sklearn.preprocessing import StandardScaler, MinMaxScaler , RobustScaler import warnings warnings.filterwarnings("ignore")0.1.Helper functiondef my_settings(): %matplotlib inline # plotly settings plt.style.use( 'ggplot' ) plt.rcParams['figure.figsize'] = [25, 12] plt.rcParams['font.size'] = 8 # notebook settings display(HTML('')) np.set_printoptions(suppress=True) pd.set_option('display.float_format', '{:.3f}'.format) # seaborn settings sns.set(rc={'figure.figsize':(25,12)}) sns.set_theme(style = 'darkgrid', font_scale = 1) my_settings() def numerical_descriptive_statistical(num_attributes): """ Shows the main values for descriptive statistics in numerical variables. Args: data ([float64 and int64]): Insert all numerical attributes in the dataset Returns: [dataframe]: A dataframe with mean, median, std deviation, skewness, kurtosis, min, max and range """ # Central Tendency - Mean, Median ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T ct2 = pd.DataFrame(num_attributes.apply(np.median)).T # Dispersion - std, min, max, range, skew, kurtosis, Shapiro-Wilk Test d1 = pd.DataFrame(num_attributes.apply(np.std)).T d2 = pd.DataFrame(num_attributes.apply(min)).T d3 = pd.DataFrame(num_attributes.apply(max)).T d4 = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T d5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T d6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T d7 = pd.DataFrame(num_attributes.apply(lambda x: 'not normal' if shapiro(x.sample(5000))[1] < 0.05 else 'normal')).T # concatenate m = pd.concat([d2, d3, d4, ct1, ct2, d1, d5, d6, d7]).T.reset_index() m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis', 'shapiro'] return m def categorical_descriptive_statstical(data , col): """ Shows the the absolute and percent values in categorical variables. Args: data ([object]): Insert all categorical attributes in the dataset Returns: [dataframe]: A dataframe with absolute and percent values """ return pd.DataFrame({'absolute' : data[col].value_counts() , 'percent %': data[col].value_counts(normalize = True) * 100}) def correlation_matrix(data , method): """Generates a correlation matrix of numerical variables Args:correlation_matrix data ([DataFrame]): [The dataframe of the EDA] method ([string]): [The method used, it can be ‘pearson’, ‘kendall’ or ‘spearman’] Returns: [Image]: [The correlation matrix plot made with seaborn] """ # correlation num_attributes = data.select_dtypes( include = ['int64' , 'float64']) correlation = num_attributes.corr( method = method) # correlation.append('exited') # df_corr = data[correlation].reset_index(drop=True) # df_corr['exited'] = df_corr['exited'].astype('int') # mask mask = np.zeros_like(correlation) mask = np.triu(np.ones_like(correlation , dtype = np.bool)) # plot - mask = mask , ax = sns.heatmap(correlation , fmt = '.2f' , vmin = -1 , vmax = 1, annot = True, cmap = 'YlGnBu' , square = True) return ax def without_hue(plot, feature): total = len(feature) for p in plot.patches: percentage = '{:.1f}%'.format(100 * p.get_height()/total) x = p.get_x() + p.get_width() / 2 - 0.05 y = p.get_y() + p.get_height() plot.annotate(percentage, (x, y), size = 12) def plot_cat_overview(df, cat_attributes, target): cat_attributes.remove(target) plots_lin = math.ceil(len(cat_attributes)/2) fig, axs = plt.subplots(plots_lin,2, figsize=(25, 10), facecolor='w', edgecolor='k') fig.subplots_adjust(hspace = .5, wspace=.20) axs = axs.ravel() for c in range(len(cat_attributes)): ax1 = sns.countplot(ax=axs[c], x=cat_attributes[c],hue=target, data=df) without_hue(ax1,df1.exited) def sum_of_na (data): return pd.DataFrame({'Sum of NA' : data.isna().sum(), '% NA': data.isna().sum()/data.shape[0]}) def lift_score(y, y_pred, **kwargs): df = pd.DataFrame() df['true'] = y df['pred'] = y_pred df.sort_values('pred', ascending=False, inplace=True) N = len(df) churn_total = df['true'].sum() / N n = int(np.ceil(.1 * N)) data_here = df.iloc[:n, :] churn_here = data_here['true'].sum() / n lift = churn_here / churn_total return lift def knapsack(W, wt, val): n = len(val) K = [[0 for x in range(W + 1)] for x in range(n + 1)] for i in range(n + 1): for w in range(W + 1): if i == 0 or w == 0: K[i][w] = 0 elif wt[i-1] <= w: K[i][w] = max(val[i-1] + K[i-1][w-wt[i-1]], K[i-1][w]) else: K[i][w] = K[i-1][w] max_val = K[n][W] keep = [False] * n res = max_val w = W for i in range(n, 0, -1): if res <= 0: break if res == K[i - 1][w]: continue else: keep[i - 1] = True res = res - val[i - 1] w = w - wt[i - 1] del K return max_val, keep0.2. Loading Datadf_raw = pd.read_csv(r'~/repositorio/churn_predict/data/raw/churn.csv') df_raw.head()1.0. Data Description - **RowNumber** : O número da coluna. - **CustomerID** : Identificador único do cliente. - **Surname** : Sobrenome do cliente. - **CreditScore** : A pontuação de Crédito do cliente para o mercado de consumo. - **Geography** : O país onde o cliente reside. - **Gender** : O gênero do cliente. - **Age** : A idade do cliente. - **Tenure** : Número de anos que o cliente permaneceu ativo. - **Balance** : Valor monetário que o cliente tem em sua conta bancária. - **NumOfProducts** : O número de produtos comprado pelo cliente no banco. - **HasCrCard** : Indica se o cliente possui ou não cartão de crédito. - **IsActiveMember** : Indica se o cliente fez pelo menos uma movimentação na conta bancário dentro de 12 meses. - **EstimateSalary** : Estimativa do salário mensal do cliente. - **Exited** : Indica se o cliente está ou não em Churn.df1 = df_raw.copy() df1.columns df1.duplicated('CustomerId').sum() df1.info() RangeIndex: 10000 entries, 0 to 9999 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 RowNumber 10000 non-null int64 1 CustomerId 10000 non-null int64 2 Surname 10000 non-null object 3 CreditScore 10000 non-null int64 4 Geography 10000 non-null object 5 Gender 10000 non-null object 6 Age 10000 non-null int64 7 Tenure 10000 non-null int64 8 Balance 10000 non-null float64 9 NumOfProducts 10000 non-null int64 10 HasCrCard 10000 non-null int64 11 IsActiveMember 10000 non-null int64 12 EstimatedSalary 10000 non-null float64 13 Exited 10000 non-null int64 dtypes: float64(2), int64(9), object(3) memory usage: 1.1+ MB1.1 Rename Columnsold_columns=list(df1.columns) snakecase = lambda x : inflection.underscore(x) new_columns = map(snakecase , old_columns) # rename columns df1.columns = new_columns1.2. Data Dimensionsprint('Numbers of rows: {}'.format(df1.shape[0])) print('Numbers of cols: {}'.format(df1.shape[1]))Numbers of rows: 10000 Numbers of cols: 141.3. Data Typesdf1.head() df1.dtypes1.3.1. Change Data Typesdf1.exited = df1.exited.astype('bool') df1.has_cr_card = df1.has_cr_card.astype('bool') df1.is_active_member= df1.is_active_member.astype('bool')1.3.2. Check unique valuesdf1.nunique()1.3.3. Remove Variablescols_drop = ['row_number', 'surname', 'customer_id'] df1 = df1.drop(cols_drop , axis = 1)1.4. Check NAdf1.isna().sum()1.5. Data Descriptivenum_attributes = df1.select_dtypes(include=['int64', 'float64']) cat_attributes = df1.select_dtypes(exclude=['int64', 'float64'])1.5.1. Numerical Attributesm = numerical_descriptive_statistical(num_attributes) m1.5.2. Categorical Attributescat_attributes.columns x = df1[['geography' , 'exited']].groupby('geography').count().reset_index() x plot_cat_overview(cat_attributes, list(cat_attributes.columns), 'exited') categorical_descriptive_statstical(cat_attributes , 'geography') categorical_descriptive_statstical(cat_attributes , 'gender') categorical_descriptive_statstical(cat_attributes , 'has_cr_card') categorical_descriptive_statstical(cat_attributes , 'is_active_member') categorical_descriptive_statstical(cat_attributes , 'exited')1.5.3. Multivariate Analysiscorrelation_matrix(df1 , 'spearman')1.5.4. Outliers Numerical Attributesnum_cols = num_attributes.columns.tolist() i = 1 for col in df1[num_cols]: plt.subplot(2,3,i) ax = sns.boxplot( data = df1 , x = col) i += 1**Important informations:** - There are outliers in **credit_score, num_of_products and age**- The **churn ratio is 20.37%**- **70.6%** of the members **has credit card**- More than **50% of the clients** are **from France** 2.0. Feature Engineeringdf2 = df1.copy() df2.head()2.1. Balance_age# balance_per_age balance_age = df2[['balance', 'age']].groupby('age').mean().reset_index() balance_age.columns = ['age' , 'balance_age'] # merge df2 = pd.merge(df2, balance_age, on = 'age' , how = 'left')2.2. Balance_countrybalance_country = df2.loc[:, ['geography', 'balance']].groupby('geography').mean().reset_index() balance_country.columns = ['geography', 'balance_per_country'] # merge df2 = pd.merge(df2, balance_country, on = 'geography', how = 'left')2.3. Balance_tenurebalance_tenure = df2.loc[:, ['tenure', 'balance']].groupby('tenure').mean().reset_index() balance_tenure.columns = ['tenure', 'LTV'] # merge df2 = pd.merge(df2, balance_tenure, on = 'tenure', how = 'left')2.3. Salary_genderestimated_salary_gender = df2.loc[:, ['gender', 'estimated_salary']].groupby('gender').mean().reset_index() estimated_salary_gender.columns = ['gender', 'estimated_salary_per_gender'] # merge df2 = pd.merge(df2, estimated_salary_gender, on = 'gender', how = 'left') correlation_matrix(df2, 'pearson')3.0. Data Filteringdf3 = df2.copy()4.0. Exploratoria Data Analysis (EDA)df4 = df3.copy()5.0. Data Preparationdf5 = df4.copy() df5.columns df5.head() df5.exited = df1.exited.astype('int64') df5.has_cr_card = df1.has_cr_card.astype('int64') df5.is_active_member= df1.is_active_member.astype('int64')5.1. Rescalingmms = pp.MinMaxScaler() rbs = pp.RobustScaler() cols_rob =['age'] cols_mms = ['credit_score', 'tenure', 'salary'] df5['age'] = rbs.fit_transform(df5[['age']].values) #Balance df5['balance'] = mms.fit_transform(df5[['balance']].values) df5['tenure'] = mms.fit_transform(df5[['tenure']].values) df5['balance_age'] = mms.fit_transform(df5[['balance_age']].values) df5['balance_per_country'] = mms.fit_transform(df5[['balance_per_country']].values) df5['LTV'] = mms.fit_transform(df5[['LTV']].values) df5['estimated_salary_per_gender'] = mms.fit_transform(df5[['estimated_salary_per_gender']].values) #EstimatedSalary df5['estimated_salary'] = mms.fit_transform(df5[['estimated_salary']].values) #LTV df5['LTV'] = mms.fit_transform(df5[['LTV']].values)5.2. Encoding# #gender - label encoding # gender_dict = { 'Male':0 , 'Female':1 } # df5['gender'] = df5['gender'].map( gender_dict ) # #Geography - One Hot Encoding # # one hot encoding encoding df5 = pd.get_dummies(df5, prefix=['country'], columns=['geography']) df5 = pd.get_dummies(df5, prefix=['gender'], columns=['gender']) # questions_encoding = {'False': 0,'True': 1} # df5['is_active_member'] = df5['is_active_member'].map(questions_encoding ) # df5['has_cr_card'] = df5['has_cr_card'].map(questions_encoding) # df5['exited'] = df5['exited'].map(questions_encoding)5.3. Balanicing Data Setx = df5.drop('exited', axis = 1) y = df5.exited x_train, x_test,y_train, y_test = train_test_split(x , y , test_size=0.33 , random_state = 42, stratify = y) # balance dataset up_sampler = SMOTETomek(random_state=42 , n_jobs = -1) x_train_res , y_train_res = up_sampler.fit_resample(x_train , y_train) fig , axes = plt.subplots(1,2, figsize = (25,5)) fig.suptitle('Comparation before x After Smote Tomek') axes[0].set_title('Before Up sample') ax1 = sns.countplot(ax = axes[0] , x=y_train) axes[1].set_title('After Up sample') ax1 = sns.countplot(ax = axes[1] , x=y_train_res)6.0. Feature Selectiondf6 = df5.copy() df6.head() # no model selected default is Random Forest, we will use a CatBoostClassifier. If classification is True it is a Classification problem. #cat_boruta = CatBoostClassifier(random_state=42) # rf = RandomForestClassifier(n_jobs=-1 ) # Feature_Selector_rf = BorutaShap(model = rf, # importance_measure='shap', # classification=True) # Feature_Selector_rf.fit(X=x_train_res, y=y_train_res, n_trials=100, verbose=True, train_or_test = 'train', random_state=42) model = XGBClassifier(n_jobs=-1 ) Feature_Selector_rf = BorutaShap(model = model, importance_measure='shap', classification=True) Feature_Selector_rf.fit(X=x_train_res, y=y_train_res, n_trials=100, verbose=True, train_or_test = 'train', random_state=42) # Returns Boxplot of features Feature_Selector_rf.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') subset = Feature_Selector_rf.Subset() subset.head()7.0. Machine Learning Modellingdf7 = df6.copy()7.1. Logistic Regressionlr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=42, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=-1, l1_ratio=None) lr.fit(x_train_res, y_train_res)7.2. Random Forestrf = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=-1, random_state=42, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None) rf.fit(x_train_res, y_train_res)7.3. Support Vector Machine - SVMsvm = SVC(random_state=42, probability=True) svm.fit(x_train_res, y_train_res)7.4. XGBoostxgb = XGBClassifier(random_state=42) xgb.fit(x_train_res,y_train_res, eval_metric='logloss', early_stopping_rounds=10, eval_set=[(x_train_res, y_train_res)], ) models = [lr,rf,svm,xgb] models_names = ['logistic Regression',' Random Forest', 'SVM' , 'XGBoost'] scoring = 'recall' means=[] stds=[] for model, name in zip(models, models_name): pipe = make_pipeline()8.0. Performance Metricsdf8 = df7.copy()8.1. Confusion Matrixfig, axes = plt.subplots(2, 2, figsize=(15,10)) fig.suptitle('Model Comparasion') cm_viz = ConfusionMatrix(lr, classes=['not churn', 'churn'], support=True, cmap='YlGnBu', ax=axes[0][0]) cm_viz.fit(x_train_res, x_train_res) cm_viz.score(x_test, y_test) cm_viz.finalize() cm_viz = ConfusionMatrix(svm, classes=['not churn', 'churn'], support=True, cmap='YlGnBu', ax=axes[0][1]) cm_viz.fit(x_train_res, x_train_res) cm_viz.score(x_test, y_test) cm_viz.finalize() cm_viz = ConfusionMatrix(rf, classes=['not churn', 'churn'], support=True, cmap='YlGnBu', ax=axes[1][0]) cm_viz.fit(x_train_res, x_train_res) cm_viz.score(x_test, y_test) cm_viz.finalize() cm_viz = ConfusionMatrix(xgb, classes=['not churn', 'churn'], support=True, cmap='YlGnBu', ax=axes[1][1]) cm_viz.fit(x_train_res, x_train_res) cm_viz.score(x_test, y_test) cm_viz.finalize()Accounting for substrateOften in heat diffusion settings, the substrate is disregarded and only the target layer is considered. We suggest to also account for the substate since it is an important channel for heat diffusion and therefore influences the dynamics of the target layer to a large extent. However, we want to note, that if the parameters for the substrate layer are not available, then one can think of changing the boundary condition on the right-hand side to *Neumann*- type. This can be done via: `sim.changeBC_Type(system1,"right","neumann")sim.changeBC_Type(system2,"right","neumann")sim.changeBC_Value(system1,"right",1)sim.changeBC_Value(system2,"right",1)`The advantage of this method is that we can create a heat flux directed outward of the target material by imposing a certain flux on the boundary. The disadvantage is, that we have no precise knowledge about the value of the imposed flux, hence setting it here to `1` is just an arbitrary choice. This is why we **recommend the following**:* Set up a 2TM simulation for two layers* Account for the substrate with a high spatial resolution at the target- substrate edge and then decrease the number of interpolation points with the distance to the target layer. Aim * Perform simulations and analyze the effect of the substrate* Suggest one way to "correctly" implement the substrateAs always, the workflow is **Source** $\rightarrow$ **Simulation** $\rightarrow$ **Visualization**from NTMpy import NTMpy as ntm import numpy as np from matplotlib import pyplot as plt import numericalunits as u u.reset_units('SI')Define a Sources = ntm.source() s.spaceprofile = "TMM" s.timeprofile = "Gaussian" # Set the Full Width at Half Maximum (width of the Gaussian) s.FWHM = 0.1*u.ps # Set the fluence of the laser pulse (area under the Gaussian) s.fluence = 6*u.mJ/u.cm**2 # Set the time of the Gaussian peak s.t0 = 1*u.ps # Set the wavelength in vacuum in nm s.lambda_vac = 400 #Set the incident angle in rad (0 is perpendicular to the surface) s.theta_in = np.pi/4 s.polarization = 'p'Parameters Pt and Si- materials# Platinum length_Pt = 10*u.nm n_Pt = 1.7176+2.844j # at 400nm k_el_Pt = 72*u.W/(u.m*u.K)# Same for electrons and lattice rho_Pt = 1e3*21*u.kg/(u.m**3) C_el_Pt = lambda Te: (740*u.J/(u.m**3*u.K**2))/(1e3*21*u.kg/(u.m**3)) *Te C_lat_Pt = 2.78e6*u.J/(u.m**3*u.K**2)/rho_Pt G_Pt = 2.5e17*u.W/(u.m**3*u.K) # Silicon n_Si = 5.5674+0.38612j #at 400nm k_el_Si = 130#W/(m*K); k_lat_Si = lambda T: np.piecewise(T,[T<=120.7,T>120.7],\ [lambda T: 100*(0.09*T**3*(0.016*np.exp(-0.05*T)+np.exp(-0.14*T))), lambda T: 100*(13*1e3*T**(-1.6))]) rho_Si = 2.32e3#kg/(m**3) C_el_Si = lambda Te: 150/rho_Si *Te C_lat_Si = 1.6e6/rho_Si G_Si = 1e17*18Set up a simulation for a 2- System caseNote that here the Platinum target layer and the Silicon substrate are very different in length. Still, the way the algorithm is designed, every layer will get a certain number of *collocation* points. Those can be uniformly changed by `sim.temp_data.collocpts = int_number`.However, the spacial resolution of Platinum is very high, with respect to the one of Silicon, simply because their length is so different.sim = ntm.simulation(2,s) #lengt,refractive index,conductivity[el,la],heatCapacity [el,lat],density,linearCoupling sim.addLayer(length_Pt,n_Pt,[k_el_Pt,k_el_Pt],[C_el_Pt,C_lat_Pt],rho_Pt,[G_Pt]) sim.addLayer(5000*u.nm,n_Si,[k_el_Si,k_lat_Si],[C_el_Si,C_lat_Si],rho_Si,[G_Si]) sim.final_time = 7*u.ps [x,t,Tmap] = sim.run() T_e = Tmap[0]; T_l = Tmap[1]----------------------------------------------------------- No specific time constant has been indicated. The stability region has been calculated and an appropriate timestep has been chosen. Timestep = 5.37e-16 s ----------------------------------------------------------- ----------------------------------------------------------- Transfer matrix absorption profile and a Gaussian time profile is taken into account for the source. Length of every layer has to be given in units of meter. -----------------------------------------------------------Now let us inspect the result, by exponentially giving weight to the averaged temperature and subtracting the initial temperature $T_{e,l}(x,t=0) = 300$, since we are only interested int he change $\Delta T_e$ and $\Delta T_l$.pen_depth = 10*u.nm #Optical penetration depth of probe laser exp_weights = np.exp(-x/pen_depth) avT_E_w = np.average(T_e,axis = 1, weights = exp_weights) avT_L_w = np.average(T_l,axis = 1, weights = exp_weights) avT_tot = (avT_E_w + avT_L_w - 600) plt.figure() plt.suptitle("Temperature dynamics averaged in space",fontsize = 16) plt.title("Stack [10nm Pt|5000 nm Si]") plt.xlabel("Time in ps",fontsize = 16); plt.ylabel(r"Weighted Temp. in K",fontsize = 16) plt.plot(t*(1/u.ps),avT_tot,label = f"Simulation") plt.tick_params(axis='x', labelsize=14) plt.tick_params(axis='y', labelsize=14) plt.legend(loc = "upper right",fontsize = 16) plt.grid()Now let us consider the same simulation again **but** instead of only stacking up one "long" layer of the substrate, we **put multiple layers**, with increasing length after the Pt- target. This will increase the spatial resolution close to the Pt|Si- edge, where modeling the diffusion accurately is important since it is an important channel for the heat dynamics.sim = ntm.simulation(2,s) sim.addLayer(length_Pt,n_Pt,[k_el_Pt,k_el_Pt],[C_el_Pt,C_lat_Pt],rho_Pt,[G_Pt]) sim.addLayer(10*u.nm,n_Si,[k_el_Si,k_lat_Si],[C_el_Si,C_lat_Si],rho_Si,[G_Si]) sim.addLayer(20*u.nm,n_Si,[k_el_Si,k_lat_Si],[C_el_Si,C_lat_Si],rho_Si,[G_Si]) sim.addLayer(100*u.nm,n_Si,[k_el_Si,k_lat_Si],[C_el_Si,C_lat_Si],rho_Si,[G_Si]) sim.addLayer(4870*u.nm,n_Si,[k_el_Si,k_lat_Si],[C_el_Si,C_lat_Si],rho_Si,[G_Si]) sim.final_time = 7*u.ps [x,t,Tmap] = sim.run() T_e = Tmap[0]; T_l = Tmap[1] pen_depth = 10*u.nm exp_weights = np.exp(-x/pen_depth) avT_E_w = np.average(T_e,axis = 1, weights = exp_weights) avT_L_w = np.average(T_l,axis = 1, weights = exp_weights) avT_tot = (avT_E_w + avT_L_w - 600) plt.figure() plt.suptitle("Temperature dynamics averaged in space",fontsize = 16) plt.title("Stack [10nm Pt|10 nm Si|20 nm Si|100 nm Si|4870 nm Si]") plt.xlabel("Time in ps",fontsize = 16); plt.ylabel(r"Weighted Temp. in K",fontsize = 16) plt.plot(t*(1/u.ps),avT_tot,label = f"Simulation") plt.tick_params(axis='x', labelsize=14) plt.tick_params(axis='y', labelsize=14) plt.legend(loc = "upper right",fontsize = 16) plt.grid()Evaluation# !pip install --force-reinstall /content/drive/MyDrive/instance_segmentation/Mask_RCNN weights_path = '/content/drive/MyDrive/instance_segmentation/Mask_RCNN/logs/plain_new_eval/mask_rcnn_sun_0005.h5' %cd /content/drive/MyDrive/instance_segmentation/Mask_RCNN !python -m samples.sunrgbd.sun evaluate\ --dataset '/content/SUNRGBD'/content/drive/MyDrive/instance_segmentation/Mask_RCNN Try to set gpu ressources ... invalid literal for int() with base 10: '00000000:00:04.0 Off' Using TensorFlow backend. Arguments: Namespace(augm_num=0, augm_strength=0, command='evaluate', cvhci_mode=False, dataset='/content/SUNRGBD', depth_mode=False, epochs=4, logs='/content/drive/MyDrive/instance_segmentation/Mask_RCNN/logs', lr=None, snapshot_mode='None', weights='/content/drive/MyDrive/instance_segmentation/Mask_RCNN/snapshots/mask_rcnn_coco.h5') Depth mode False Following classes are used: bed chair table sofa bookcase Configurations: BACKBONE resnet101 BACKBONE_STRIDES [4, 8, 16, 32, 64] BATCH_SIZE 3 BBOX_STD_DEV [0.1 0.1 0.2 0.2] COMPUTE_BACKBONE_SHAPE None DETECTION_MAX_INSTANCES 100 DETECTION_MIN_CONFIDENCE 0.9 DETECTION_NMS_THRESHOLD 0.3 FPN_CLASSIF_FC_LAYERS_SIZE 1024 GPU_COUNT 1 GRADIENT_C[...]Training%cd /content/drive/MyDrive/instance_segmentation/Mask_RCNN weights_path = '/content/drive/MyDrive/instance_segmentation/Mask_RCNN/logs/snapshots/mask_rcnn_sun_0005.h5' import sys !python -m samples.sunrgbd.sun train\ --dataset '/content/SUNRGBD'\ --epochs 25\ --lr 0.001\ --augm-num 2\ --augm-strength 7\ --depth-mode True\ --weights {weights_path} import multiprocessing import os multiprocessing.cpu_count() os.name is 'nt'function ConnectButton(){ console.log("Connect pushed"); document.querySelector("#top-toolbar > colab-connect-button").shadowRoot.querySelector("#connect").click() } setInterval(ConnectButton,300000);Intrduction to NumPy 1. Import NumPy under the name np.# your code here import numpy as np2. Print your NumPy version.# your code here print(np.__version__)1.16.53. Generate a 2x3x5 3-dimensional array with random values. Assign the array to variable *a*.**Challenge**: there are at least three easy ways that use numpy to generate random arrays. How many ways can you find?# Method 1 a = np.random.random((2,3,5)) print(a) # Method 2 a = np.random.rand(2,3,5) print(a) # Method 3 a = np.random.randn(2,3,5)4. Print *a*.# your code here print(a)[[[ 0.15029962 0.16709118 -1.81006912 -1.07990635 0.26090639] [-0.39069937 -0.22648089 -0.05553226 0.07993998 0.26112052] [-1.09173696 0.10820165 -1.44217709 -0.59204743 0.05324294]] [[ 0.59013886 0.53156329 0.83228162 1.71264247 -1.3698096 ] [-0.72437669 0.37578938 0.59568654 -0.43701517 2.5370072 ] [ 0.56318257 0.7255606 -0.71467409 -0.15693535 -0.92157976]]]5. Create a 5x2x3 3-dimensional array with all values equaling 1. Assign the array to variable *b*.# your code here b = np.ones((5,2,3))6. Print *b*.# your code here print(b)[[[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]] [[1. 1. 1.] [1. 1. 1.]]]7. Do *a* and *b* have the same size? How do you prove that in Python code?# your code here print(a.shape) print(a.size) print(b.shape) print(b.size) #It seems like they are the same size print(a.size == b.size)(2, 3, 5) 30 (5, 2, 3) 30 True8. Are you able to add *a* and *b*? Why or why not?# your answer here # No, because they are of different dimensions print(a.shape == b.shape)False9. Transpose *b* so that it has the same structure of *a* (i.e. become a 2x3x5 array). Assign the transposed array to variable *c*.# your code here c = np.reshape(b, (2,3,5)) print(c.shape) print(c)(2, 3, 5) [[[1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.]] [[1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.]]]10. Try to add *a* and *c*. Now it should work. Assign the sum to variable *d*. But why does it work now?# your code/answer here # The arrays need to be the same dimensions d = np.add(a,c) print(d)[[[ 1.15029962 1.16709118 -0.81006912 -0.07990635 1.26090639] [ 0.60930063 0.77351911 0.94446774 1.07993998 1.26112052] [-0.09173696 1.10820165 -0.44217709 0.40795257 1.05324294]] [[ 1.59013886 1.53156329 1.83228162 2.71264247 -0.3698096 ] [ 0.27562331 1.37578938 1.59568654 0.56298483 3.5370072 ] [ 1.56318257 1.7255606 0.28532591 0.84306465 0.07842024]]]11. Print *a* and *d*. Notice the difference and relation of the two array in terms of the values? Explain.# your code/answer here print(a) print(d) # for some/most values, you are adding by 1[[[ 0.15029962 0.16709118 -1.81006912 -1.07990635 0.26090639] [-0.39069937 -0.22648089 -0.05553226 0.07993998 0.26112052] [-1.09173696 0.10820165 -1.44217709 -0.59204743 0.05324294]] [[ 0.59013886 0.53156329 0.83228162 1.71264247 -1.3698096 ] [-0.72437669 0.37578938 0.59568654 -0.43701517 2.5370072 ] [ 0.56318257 0.7255606 -0.71467409 -0.15693535 -0.92157976]]] [[[ 1.15029962 1.16709118 -0.81006912 -0.07990635 1.26090639] [ 0.60930063 0.77351911 0.94446774 1.07993998 1.26112052] [-0.09173696 1.10820165 -0.44217709 0.40795257 1.05324294]] [[ 1.59013886 1.53156329 1.83228162 2.71264247 -0.3698096 ] [ 0.27562331 1.37578938 1.59568654 0.56298483 3.5370072 ] [ 1.56318257 1.7255606 0.28532591 0.84306465 0.07842024]]]12. Multiply *a* and *c*. Assign the result to *e*.# your code here e = np.multiply(a, c) print(e)[[[ 0.15029962 0.16709118 -1.81006912 -1.07990635 0.26090639] [-0.39069937 -0.22648089 -0.05553226 0.07993998 0.26112052] [-1.09173696 0.10820165 -1.44217709 -0.59204743 0.05324294]] [[ 0.59013886 0.53156329 0.83228162 1.71264247 -1.3698096 ] [-0.72437669 0.37578938 0.59568654 -0.43701517 2.5370072 ] [ 0.56318257 0.7255606 -0.71467409 -0.15693535 -0.92157976]]]13. Does *e* equal to *a*? Why or why not?# your code/answer here #Equal in what sense? Size? Shape? # They appear to equal in shape size and values. print(a==e) print(a.shape) print(a.size) print(e.shape) print(e.size)[[[ True True True True True] [ True True True True True] [ True True True True True]] [[ True True True True True] [ True True True True True] [ True True True True True]]] (2, 3, 5) 30 (2, 3, 5) 3014. Identify the max, min, and mean values in *d*. Assign those values to variables *d_max*, *d_min* and *d_mean*.# your code here d_max = d.max() print(d_max) d_min = d.min() print(d_min) d_mean = np.mean(d) print(d_mean)3.5370072026778927 -0.8100691223476884 0.95105382271501715. Now we want to label the values in *d*. First create an empty array *f* with the same shape (i.e. 2x3x5) as *d* using `np.empty`.# your code here f = np.empty([2,3,5]) print(f)[[[0.15029962 0.16709118 1.81006912 1.07990635 0.26090639] [0.39069937 0.22648089 0.05553226 0.07993998 0.26112052] [1.09173696 0.10820165 1.44217709 0.59204743 0.05324294]] [[0.59013886 0.53156329 0.83228162 1.71264247 1.3698096 ] [0.72437669 0.37578938 0.59568654 0.43701517 2.5370072 ] [0.56318257 0.7255606 0.71467409 0.15693535 0.92157976]]]16. Populate the values in *f*. For each value in *d*, if it's larger than *d_min* but smaller than *d_mean*, assign 25 to the corresponding value in *f*. If a value in *d* is larger than *d_mean* but smaller than *d_max*, assign 75 to the corresponding value in *f*. If a value equals to *d_mean*, assign 50 to the corresponding value in *f*. Assign 0 to the corresponding value(s) in *f* for *d_min* in *d*. Assign 100 to the corresponding value(s) in *f* for *d_max* in *d*. In the end, f should have only the following values: 0, 25, 50, 75, and 100.**Note**: you don't have to use Numpy in this question.# your code here ind1=0 ind2=0 ind3=0 for i in d: for j in i: for k in j: if k == d_mean: f[ind1][ind2][ind3] = 50 elif k == d_min: f[ind1][ind2][ind3] = 0 elif k == d_max: f[ind1][ind2][ind3] = 100 elif (k > d_min) and (k < d_mean): f[ind1][ind2][ind3] = 25 elif (k > d_mean) and (k < d_max): f[ind1][ind2][ind3] = 75 ind3 += 1 ind3=0 ind2+=1 ind2=0 ind1+=1 print(f)[[[ 75. 75. 0. 25. 75.] [ 25. 25. 25. 75. 75.] [ 25. 75. 25. 25. 75.]] [[ 75. 75. 75. 75. 25.] [ 25. 75. 75. 25. 100.] [ 75. 75. 25. 25. 25.]]]17. Print *d* and *f*. Do you have your expected *f*?For instance, if your *d* is:```python[[[1.85836099, 1.67064465, 1.62576044, 1.40243961, 1.88454931],[1.75354326, 1.69403643, 1.36729252, 1.61415071, 1.12104981],[1.72201435, 1.1862918 , 1.87078449, 1.7726778 , 1.88180042]],[[1.44747908, 1.31673383, 1.02000951, 1.52218947, 1.97066381],[1.79129243, 1.74983003, 1.96028037, 1.85166831, 1.65450881],[1.18068344, 1.9587381 , 1.00656599, 1.93402165, 1.73514584]]]```Your *f* should be:```python[[[ 75., 75., 75., 25., 75.],[ 75., 75., 25., 25., 25.],[ 75., 25., 75., 75., 75.]],[[ 25., 25., 25., 25., 100.],[ 75., 75., 75., 75., 75.],[ 25., 75., 0., 75., 75.]]]```# your code here print(d) print(f)[[[ 1.15029962 1.16709118 -0.81006912 -0.07990635 1.26090639] [ 0.60930063 0.77351911 0.94446774 1.07993998 1.26112052] [-0.09173696 1.10820165 -0.44217709 0.40795257 1.05324294]] [[ 1.59013886 1.53156329 1.83228162 2.71264247 -0.3698096 ] [ 0.27562331 1.37578938 1.59568654 0.56298483 3.5370072 ] [ 1.56318257 1.7255606 0.28532591 0.84306465 0.07842024]]] [[[ 75. 75. 0. 25. 75.] [ 25. 25. 25. 75. 75.] [ 25. 75. 25. 25. 75.]] [[ 75. 75. 75. 75. 25.] [ 25. 75. 75. 25. 100.] [ 75. 75. 25. 25. 25.]]]18. Bonus question: instead of using numbers (i.e. 0, 25, 50, 75, and 100), use string values ("A", "B", "C", "D", and "E") to label the array elements. For the example above, the expected result is:```python[[[ 'D', 'D', 'D', 'B', 'D'],[ 'D', 'D', 'B', 'B', 'B'],[ 'D', 'B', 'D', 'D', 'D']],[[ 'B', 'B', 'B', 'B', 'E'],[ 'D', 'D', 'D', 'D', 'D'],[ 'B', 'D', 'A', 'D', 'D']]]```**Note**: you don't have to use Numpy in this question.# your code here ind1=0 ind2=0 ind3=0 for i in d: for j in i: for k in j: if k == d_mean: f[ind1][ind2][ind3] = str('C') elif k == d_min: f[ind1][ind2][ind3] = str('A') elif k == d_max: f[ind1][ind2][ind3] = str('E') elif (k > d_min) and (k < d_mean): f[ind1][ind2][ind3] = str('B') elif (k > d_mean) and (k < d_max): f[ind1][ind2][ind3] = str('D') ind3 += 1 ind3=0 ind2+=1 ind2=0 ind1+=1 print(f)Prepared by Data Visualization with MatplotlibConnect with me on - LinkedInimport numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt %matplotlib inlineHistogramx = np.random.normal(size = 2000) plt.hist(x, bins=40, color='yellowgreen') plt.gca().set(title='Histogram', ylabel='Frequency') plt.show() x = np.random.rand(2000) plt.hist(x, bins=30 ,color='#D4AC0D') plt.gca().set(title='Histogram', ylabel='Frequency') plt.show() # Using Edge Color for readability plt.figure(figsize=(10,8)) x = np.random.normal(size = 2000) plt.hist(x, bins=40, color='yellowgreen' , edgecolor="#6A9662") plt.gca().set(title='Histogram', ylabel='Frequency') plt.show()Binning# Binning plt.figure(figsize=(10,8)) x = np.random.normal(size = 2000) plt.hist(x, bins=30, color='yellowgreen' , edgecolor="#6A9662") plt.gca().set(title='Histogram', ylabel='Frequency') plt.show() plt.figure(figsize=(10,8)) plt.hist(x, bins=20, color='yellowgreen' , edgecolor="#6A9662") plt.gca().set(title='Histogram', ylabel='Frequency') plt.show() plt.figure(figsize=(10,8)) plt.hist(x, bins=10, color='yellowgreen' , edgecolor="#6A9662") plt.gca().set(title='Histogram', ylabel='Frequency') plt.show()Plotting Multiple Histogramsplt.figure(figsize=(8,11)) x = np.random.normal(-4,1,size = 800) y = np.random.normal(0,1.5,size = 800) z = np.random.normal(3.5,1,size = 800) plt.hist(x, bins=30, color='yellowgreen' , alpha=0.6) plt.hist(y, bins=30, color='#FF8F00' , alpha=0.6) plt.hist(z, bins=30, color='blue' , alpha=0.6) plt.gca().set(title='Histogram', ylabel='Frequency') plt.show() # Using Histogram to plot a cumulative distribution function plt.figure(figsize=(10,8)) x = np.random.rand(2000) plt.hist(x, bins=30 ,color='#ffa41b' , edgecolor="#639a67",cumulative=True) plt.gca().set(title='Histogram', ylabel='Frequency') plt.show()Plots for RoFL paper Single-shot FEMNIST# TODO: Single-shot FEMNIST, CIFAR10 import plots.single_shot as single_shot df = single_shot.load_data_femnist() df = df[df['round'] <= 10] display(df.columns) fig, df = single_shot.build_plot_tight("femnist_single_shot_static_tight", df, 'FEMNIST', True) display(fig) display(df[df["round"] >= 0])CIFAR# TODO: Single-shot FEMNIST, CIFAR10 import plots.single_shot as single_shot df = single_shot.load_data_cifar() df = df[df['round'] <= 10] display(df.columns) fig, df = single_shot.build_plot_tight("cifar_single_shot_static_tight", df, 'CIFAR10', False) display(fig) # df = single_shot.load_data_cifar() # fig, _ = single_shot.build_plot("cifar_single_shot_static", df, 'CIFAR10_defenses') # display(fig) # display(df[df["round"] <= 10])Continuous bounds* __TODO:__ Do we add a baseline (no attack) in the experiments as well? Static bounds FEMNISTimport plots.continuous as continuous df = continuous.load_continuous_bound_data('FEMNIST', None) display(df.columns) fig, df = continuous.build_continuous_static_plot("femnist_continuous_bound_static", df, 'FEMNIST') display(fig) display(df)CIFARimport plots.continuous as continuous df = continuous.load_continuous_bound_data('CIFAR10', 'static') display(df.columns) # df = df[df["round"] <= 800] fig, df = continuous.build_continuous_static_plot("cifar10_continuous_bound_static", df, 'CIFAR10', leftmost=False) display(fig) display(df)Median* For FEMNIST, we seem to be able to increase the median bound by quite a lot. FEMNISTimport plots.continuous as continuous df = continuous.load_continuous_bound_data('FEMNIST', None) fig, df = continuous.build_continuous_median_plot("femnist_continuous_bound_median_tight", df, 'FEMNIST', leftmost=True) display(fig) display(df)CIFARimport plots.continuous as continuous df = continuous.load_continuous_bound_data('CIFAR10', 'median') display(df.columns) fig, df = continuous.build_continuous_median_plot("cifar10_continuous_bound_median_tight", df, 'CIFAR10') display(fig) display(df)Increase attacks FEMNIST# TODO: Increase attackers CIFAR10&FEMNIST import plots.increase_attackers as increase_attackers df = increase_attackers.load_data('FEMNIST') display(df.columns) fig, df = increase_attackers.build_plot("femnist_increase_attackers_tight", df, 'FEMNIST', leftmost=True) display(fig) display(df)However, what does 27% in a data poisoning setting look like?import plots.blackbox_increase_attackers as bb_inc df = bb_inc.load_data_femnist() display(df.columns) fig, df = bb_inc.build_plot("femnist_increase_attackers_bb", df, 'FEMNIST', True) display(fig) display(df)CIFAR* TODO: Also run larger bounds.import plots.increase_attackers as increase_attackers df = increase_attackers.load_data('CIFAR10') # df = df[df['round'] <= 800] display(df.columns) fig, df = increase_attackers.build_plot("cifar_increase_attackers_tight", df, 'CIFAR10') display(fig) display(df) import plots.blackbox_increase_attackers as bb_inc df = bb_inc.load_data_cifar() display(df.columns) fig, df = bb_inc.build_plot("cifar_increase_attackers_bb", df, 'CIFAR10', False) display(fig) display(df)Comparison with blackbox Single-shot Outlierimport plots.single_shot_objective as single_shot_objective df = single_shot_objective.load_data_femnist() display(df.columns) fig, df = single_shot_objective.build_plot("femnist_single_shot_outlier", df, 'FEMNIST', True) display(fig) import plots.single_shot_objective as single_shot_objective df = single_shot_objective.load_data_cifar() display(df.columns) fig, df = single_shot_objective.build_plot("cifar10_single_shot_outlier", df, 'CIFAR10', False) display(fig)FEMNISTimport plots.attack_objective as att_obj # Capped at 500 rounds df = att_obj.load_data_femnist() fig, df = att_obj.build_plot("femnist_comparison_blackbox_edge", df, 'FEMNIST_edge') display(fig) fig, df = att_obj.build_plot("femnist_comparison_blackbox_tasks", df, 'FEMNIST_tasks') display(fig) # display(df[df["round"] >= 40]) import plots.attack_objective as att_obj # Capped at 500 rounds df = att_obj.load_data_femnist() display(df.columns) fig, df = att_obj.build_attack_plot("femnist_comparison_edge_tasks_inone_l2", df, 'FEMNIST_l2', leftmost=True) display(fig) # median df = att_obj.load_data_femnist("median") display(df.columns) fig, df = att_obj.build_attack_plot("femnist_comparison_edge_tasks_inone_median", df, 'FEMNIST_median', leftmost=True) display(fig) import plots.attack_objective as att_obj # Capped at 500 rounds df = att_obj.load_data_cifar() display(df.columns) fig, df = att_obj.build_attack_plot("cifar_comparison_edge_tasks_inone", df, 'CIFAR10', leftmost=False) display(fig)Appendiximport plots.continuous as continuous df = continuous.load_continuous_bound_data('FEMNIST_linf', None) df = df.sort_values('round') display(df.columns) fig, df = continuous.build_continuous_static_plot("femnist_continuous_bound_static_linf", df, 'FEMNIST_linf') display(fig) display(df) import plots.weight_distribution as weights df = weights.load_params_data() fig, df = weights.build_continuous_static_plot("femnist_weight_distribution_overtime", df) display(fig) display(df) import plots.weight_distribution as weights df = weights.load_weight_distribution_single_round_data() fig, df = weights.build_single_round("femnist_weight_distribution_single", df) display(fig) display(df) for label in ["mal", "ben"]: weights_truncated = df[(df[f"{label}_weights"] > 0.4) | (df[f"{label}_weights"] < -0.4)].count() print(f"Truncated {weights_truncated} {label} weights!") for label in ["mal", "ben"]: weights_truncated = df[(df[f"{label}_weights"] > 0.01) | (df[f"{label}_weights"] < -0.01)].count() print(f"Truncated {weights_truncated} {label} weights!") import plots.weight_distribution as weights df = weights.load_weight_distribution_single_round_data() fig, df = weights.build_single_round_broken("femnist_weight_distribution_single", df) display(fig) display(df)Microbenchmarksfrom plots.microbenchmarks import build_df_mbench_computation, build_fig_mbench_computation_server_dlog, \ build_fig_mbench_computation_server_perclient_zkp, build_fig_mbench_computation_client_zkp df = build_df_mbench_computation() df.to_csv("full_including_smallclient.csv") display(df) display(df['server_log2reconstruct_ms']) fig_mbench_computation_server_dlog = build_fig_mbench_computation_server_dlog(df) print("Server Overall - Discrete Log Reconstruction:") display(fig_mbench_computation_server_dlog) fig_mbench_computation_server_perclient_zkp = build_fig_mbench_computation_server_perclient_zkp(df) print("\nServer per Client - Zero Knowledge Proofs + Aggregation:") display(fig_mbench_computation_server_perclient_zkp) fig_mbench_computation_clientlarge_zkp = build_fig_mbench_computation_client_zkp(df, "large") print("\nClient Large - Zero Knowledge Proofs:") display(fig_mbench_computation_clientlarge_zkp) print("\nClient Small - Zero Knowledge Proofs:") fig_mbench_computation_clientsmall_zkp = build_fig_mbench_computation_client_zkp(df, "small") display(fig_mbench_computation_clientsmall_zkp) # Numbers in text n_weights = 262144 line = df[df["n_weights"] == n_weights] for machine in ["clientlarge", "clientsmall"]: for norm in ["l2", "l8", "l8p"]: print(f'{machine} {norm}: {(line[f"{norm}_{machine}_range_ms"] + line[f"{norm}_{machine}_wellformed_ms"]) / 1000 }') for norm in ["l2", "l8", "l8p"]: print(f'server {norm}: {(line[f"{norm}_server_range_ms"] + line[f"{norm}_server_wellformed_ms"] + line[f"{norm}_server_caggregation_ms"]) / 1000 }') for machine in ["clientlarge", "clientsmall"]: base = (line[f"l8_{machine}_range_ms"] + line[f"l8_{machine}_wellformed_ms"]) / 1000 opt = (line[f"l8p_{machine}_range_ms"] + line[f"l8p_{machine}_wellformed_ms"]) / 1000 print(f'{machine}: { base } { opt }, {round(base / opt, 2)}') display(df['l8p_server_range_ms']) # Bandwidth import math import pandas as pd import numpy as np from matplotlib import pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from common import get_colorful_styles, output_dir def setup_plt(): fig_width_pt = 220 # Get this from LaTeX using \showthe inches_per_pt = 1.0 / 72.27 * 2 # Convert pt to inches golden_mean = ((np.math.sqrt(5) - 1.0) / 2.0) * .8 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = (fig_width * golden_mean) # height in inches fig_size = [fig_width, fig_height] plt_params = { 'backend': 'ps', 'axes.labelsize': 20, 'legend.fontsize': 16, 'xtick.labelsize': 18, 'ytick.labelsize': 18, 'font.size': 18, 'figure.figsize': fig_size, 'font.family': 'Times New Roman' } plt.rcParams.update(plt_params) plt.rc('pdf', fonttype=42) # IMPORTANT to get rid of Type 3 def build_df_bandwidth(max_n_weights=10**6): n_steps = 1000 step_size = int(max_n_weights / 1000) message_sizes = [] for n_weights in range(1, max_n_weights, step_size): d = get_message_size(n_weights) message_sizes.append(d) df = pd.DataFrame(message_sizes) df["l2_total_mb"] = (df["l2_commitment_bytes"] + df["l2_wellformedness_bytes"] + df["l2_range_proof_bytes"]) * 1e-6 df["l8_total_mb"] = (df["l8_commitment_bytes"] + df["l8_wellformedness_bytes"] + df["l8_range_proof_bytes"]) * 1e-6 return df def get_message_size(n_weights, proving_bit_range=8, n_vector_segments=4, group_element_bytes=32, scalar_bytes=32, plaintext_weight_bytes=4): # n_weights: D # proving_bit_range: n # n_vector_segments: p d = { "n_weights": n_weights, "proving_bit_range": proving_bit_range, "n_vector_segments": n_vector_segments, "group_element_bytes": group_element_bytes, "scalar_bytes": scalar_bytes } d["plaintext_bytes"] = plaintext_weight_bytes * n_weights ############################################################### ## Commitments ## ############################################################### # L8 / L8p d["l8_commitment_bytes"] = n_weights * 2 * group_element_bytes # L2: requires additional commitment to the squared parameters # (pederson commitment => 1 group element per weight) d["l2_commitment_bytes"] = d["l8_commitment_bytes"] + n_weights * group_element_bytes ############################################################### ## Well-Formedness Proof ## ############################################################### # L8 / L8p d["l8_wellformedness_bytes"] = (2 * scalar_bytes + 2 * group_element_bytes) * n_weights # L2: requires one additional scalar and group element for the square relation proof d["l2_wellformedness_bytes"] = d["l8_wellformedness_bytes"] + (scalar_bytes + group_element_bytes) * n_weights ############################################################### ## Range Proofs ## ############################################################### def next_pow(x): return pow(2, math.ceil(math.log(x, 2))) # L8 / L8p n_group_elements = 2 * (math.log(proving_bit_range, 2) + math.log(next_pow(n_weights / n_vector_segments), 2)) + 4 n_scalars = 5 d["l8_range_proof_bytes"] = n_group_elements * group_element_bytes + n_scalars * scalar_bytes # L2: requires additional group elements and scalars to proof that l2 norm is in range n_group_elements_additional = math.log(proving_bit_range, 2) + 4 n_scalars_additional = 5 d["l2_range_proof_bytes"] = d["l8_range_proof_bytes"] + n_group_elements_additional * group_element_bytes + n_scalars_additional * scalar_bytes return d def build_fig_mbench_bandwidth_perclient(df, norm): name = f"mbench_bandwidth_perclient_{norm}" if norm not in ["l2", "l8"]: raise ValueError("unknown norm") if norm == "l2": label_prefix = "$L_2$" else: label_prefix = "$L_{\infty}$" setup_plt() colors, _ = get_colorful_styles() colors = [(0.66, 0.66, 0.66, 1), colors[1], colors[2]] with PdfPages(f"{output_dir}/{name}.pdf") as pdf: fig, ax = plt.subplots(figsize=(3,2.2)) ########################## # Draw all the lines ########################## ax.plot(df["n_weights"], df["plaintext_bytes"]*1e-6, label="plaintext", color="0.0", linestyle="--", linewidth=2) ax.stackplot(df["n_weights"], df[norm + "_commitment_bytes"]*1e-6, df[norm + "_wellformedness_bytes"]*1e-6, df[norm + "_range_proof_bytes"]*1e-6, colors=colors, labels=[label_prefix + " commitment", label_prefix + " well-formedness", label_prefix + " range proof"], zorder=2) ########################## # General Format ########################## #ax.set_title("Hello World") handles, labels = plt.gca().get_legend_handles_labels() order = [2,1,3,0] #ax.legend([handles[idx] for idx in order], [labels[idx] for idx in order], loc="upper left", ncol=2, handletextpad=0.3, columnspacing=0.5) # 'best', 'upper right', 'upper left', 'lower left', # 'lower right', 'right', 'center left', 'center right', # 'lower center', 'upper center', 'center' ax.grid(True, axis="y", linestyle=':', color='0.6', zorder=0, linewidth=1.2) ########################## # Y - Axis Format ########################## ax.set_ylim(ymin=0, ymax=300) ax.set_ylabel("Bandwidth [MB]") ax.set_yticks(np.arange(0, 400, 100)) #ax.set_yticklabels(labels, fontsize=16, rotation=345) ########################## # X - Axis Format ########################## ax.set_xlim(xmin=0, xmax=1000000) ax.set_xlabel("Number of Parameters") ax.set_xticks(np.arange(0, 1000001, 250*1000)) xlabels = [f"{round(x)}k" if x < 1000 else f"{round(x/1000)}M" for x in ax.get_xticks()/1000] ax.set_xticklabels(xlabels) pdf.savefig(bbox_inches='tight', pad_inches=0) plt.close() return fig df = build_df_bandwidth(max_n_weights=1000000) fig_mbench_bandwidth_perclient_l8 = build_fig_mbench_bandwidth_perclient(df, norm="l8") print("L8 Bandwidth (infinity norm):") display(fig_mbench_bandwidth_perclient_l8) fig_mbench_bandwidth_perclient_l2 = build_fig_mbench_bandwidth_perclient(df, norm="l2") print("\nL2 Bandwidth:") display(fig_mbench_bandwidth_perclient_l2) #plt_params_l = plt_params #plt_params_l['figure.figsize'] = [fig_size[0], 2/3 * fig_size[1]] #plt.rcParams.update(plt_params_l) colors, _ = get_colorful_styles() colors = [(0.66, 0.66, 0.66, 1), colors[1], colors[2]] bar_labels=["commitment", "well-formedness", "range proof"] plt.plot([1], [1], label="plaintext", color="0.0", linestyle="--", linewidth=2) for color, label in zip(colors, bar_labels): plt.bar([1],[1], color=color, label=label) legend = plt.legend(bbox_to_anchor=(0, 1.02,1, 0.2), loc="lower left", mode="expand", ncol=2) def export_legend(legend, filename): pdf_pages = PdfPages(filename) fig = legend.figure fig.canvas.draw() bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) F = plt.gcf() pdf_pages.savefig(F, bbox_inches=bbox, pad_inches=0) plt.clf() pdf_pages.close() export_legend(legend, filename=f"{output_dir}/mbench_bandwidth_perclient_legend.pdf") display(df[df.index % 8 == 0])L8 Bandwidth (infinity norm):This notebook looks at the signs of biosphere flows and characterization factors.First, let's do the usual setup.from brightway2 import * projects.set_current("biosphere notebook") bw2setup()Creating default biosphereWe will take the ReCiPe method as an example.is_recipe = lambda x: x[0] == 'ReCiPe Endpoint (H,A)'Make a function to print any negative CFs in a given method.def print_negative_cfs(method): cfs = Method(method).load() for key, value in cfs: if value < 0: print(get_activity(key), value)Now we loop through the ReCiPe endpoint methods, and print any negative CFs we find.for method in filter(is_recipe, methods): print(method) print_negative_cfs(method) print()('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'agricultural land occupation') ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'climate change, ecosystems') 'Carbon dioxide, to soil or biomass stock' (kilogram, None, ('soil',)) -0.175224928177 ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'freshwater ecotoxicity') ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'freshwater eutrophication') ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'marine ecotoxicity') ('ReCiPe Endpoint (H,A)', 'ecosystem quality', 'natural land transformation') 'Transformation, to forest, extensive' (square meter, None, ('natural resource', 'land')) -4.25108560478 'Transformation, to forest, intensive' (square meter, None, ('natural resource', 'land')) -4.25108560478 'Transformation, to forest, secondary (non-use)' (square meter, None, ('natural resource', 'land')) -4.25185647787 'Transformation, to forest, unspecified' (square meter, None, ('natural resource', 'land')) -4.25108560478 'Transformation, to unsp[...]Lexicon Generator This tutorial is available as an IPython notebook at [Malaya/example/lexicon](https://github.com/huseinzol05/Malaya/tree/master/example/lexicon).%%time import malaya import numpy as npCPU times: user 4.47 s, sys: 1.01 s, total: 5.48 s Wall time: 5.37 sWhy lexiconLexicon is populated words related to certain domains, like, words for negative and positive sentiments.Example, word `suka` can represent as positive sentiment. If `suka` exists in a sentence, we can say that sentence is positive sentiment.Lexicon based is common way people use to classify a text and very fast. Again, it is pretty naive because a word can be semantically ambiguous. sentiment lexiconMalaya provided a small sample for sentiment lexicon, simply,sentiment_lexicon = malaya.lexicon.sentiment sentiment_lexicon.keys()emotion lexiconMalaya provided a small sample for emotion lexicon, simply,emotion_lexicon = malaya.lexicon.emotion emotion_lexicon.keys()Lexicon generatorTo build a lexicon is time consuming, because required expert domains to populate related words to the domains. With the help of word vector, we can induce sample words to specific domains given some annotated lexicon. Why we induced lexicon from word vector? Even for a word `suka` commonly represent positive sentiment, but if the word vector learnt the context of `suka` different polarity and based nearest words also represent different polarity, so `suka` got tendency to become negative sentiment.Malaya provided inducing lexicon interface, build on top of [Inducing Domain-Specific Sentiment Lexicons from Unlabeled Corpora](https://arxiv.org/pdf/1606.02820.pdf).Let say you have a lexicon based on standard language or `bahasa baku`, then you want to find similar lexicon on social media context. So you can use this `malaya.lexicon` interface. To use this interface, we must initiate `malaya.wordvector.load` first.And, at least small lexicon sample like this,```python{'label1': ['word1', 'word2'], 'label2': ['word3', 'word4']}````label` can be more than 2, example like `malaya.lexicon.emotion`, up to 6 different labels.vocab, embedded = malaya.wordvector.load_social_media() wordvector = malaya.wordvector.load(embedded, vocab)random walkRandom walk technique is main technique use by the paper, can read more at [3.2 Propagating polarities from a seed set](https://arxiv.org/abs/1606.02820)```pythondef random_walk( lexicon, wordvector, pool_size = 10, top_n = 20, similarity_power = 100.0, beta = 0.9, arccos = True, normalization = True, soft = False, silent = False,): """ Induce lexicon by using random walk technique, use in paper, https://arxiv.org/pdf/1606.02820.pdf Parameters ---------- lexicon: dict curated lexicon from expert domain, {'label1': [str], 'label2': [str]}. wordvector: object wordvector interface object. pool_size: int, optional (default=10) pick top-pool size from each lexicons. top_n: int, optional (default=20) top_n for each vectors will multiple with `similarity_power`. similarity_power: float, optional (default=100.0) extra score for `top_n`, less will generate less bias induced but high chance unbalanced outcome. beta: float, optional (default=0.9) penalty score, towards to 1.0 means less penalty. 0 < beta < 1. arccos: bool, optional (default=True) covariance distribution for embedded.dot(embedded.T). If false, covariance + 1. normalization: bool, optional (default=True) normalize word vectors using L2 norm. L2 is good to penalize skewed vectors. soft: bool, optional (default=False) if True, a word not in the dictionary will be replaced with nearest jarowrinkler ratio. if False, it will throw an exception if a word not in the dictionary. silent: bool, optional (default=False) if True, will not print any logs. Returns ------- tuple: (labels[argmax(scores), axis = 1], scores, labels) """```%%time results, scores, labels = malaya.lexicon.random_walk(sentiment_lexicon, wordvector, pool_size = 5) np.unique(list(results.values()), return_counts = True) results %%time results_emotion, scores_emotion, labels_emotion = malaya.lexicon.random_walk(emotion_lexicon, wordvector, pool_size = 10) np.unique(list(results_emotion.values()), return_counts = True) results_emotionpropagate probabilistic```pythondef propagate_probabilistic( lexicon, wordvector, pool_size = 10, top_n = 20, similarity_power = 10.0, arccos = True, normalization = True, soft = False, silent = False,): """ Learns polarity scores via standard label propagation from lexicon sets. Parameters ---------- lexicon: dict curated lexicon from expert domain, {'label1': [str], 'label2': [str]}. wordvector: object wordvector interface object. pool_size: int, optional (default=10) pick top-pool size from each lexicons. top_n: int, optional (default=20) top_n for each vectors will multiple with `similarity_power`. similarity_power: float, optional (default=10.0) extra score for `top_n`, less will generate less bias induced but high chance unbalanced outcome. arccos: bool, optional (default=True) covariance distribution for embedded.dot(embedded.T). If false, covariance + 1. normalization: bool, optional (default=True) normalize word vectors using L2 norm. L2 is good to penalize skewed vectors. soft: bool, optional (default=False) if True, a word not in the dictionary will be replaced with nearest jarowrinkler ratio. if False, it will throw an exception if a word not in the dictionary. silent: bool, optional (default=False) if True, will not print any logs. Returns ------- tuple: (labels[argmax(scores), axis = 1], scores, labels) """```%%time results_emotion, scores_emotion, labels_emotion = malaya.lexicon.propagate_probabilistic(emotion_lexicon, wordvector, pool_size = 10) np.unique(list(results_emotion.values()), return_counts = True) results_emotionpropagate graph```pythondef propagate_graph( lexicon, wordvector, pool_size = 10, top_n = 20, similarity_power = 10.0, normalization = True, soft = False, silent = False,): """ Graph propagation method dapted from Velikovich, Leonid, et al. "The viability of web-derived polarity lexicons." http://www.aclweb.org/anthology/N10-1119 Parameters ---------- lexicon: dict curated lexicon from expert domain, {'label1': [str], 'label2': [str]}. wordvector: object wordvector interface object. pool_size: int, optional (default=10) pick top-pool size from each lexicons. top_n: int, optional (default=20) top_n for each vectors will multiple with `similarity_power`. similarity_power: float, optional (default=10.0) extra score for `top_n`, less will generate less bias induced but high chance unbalanced outcome. normalization: bool, optional (default=True) normalize word vectors using L2 norm. L2 is good to penalize skewed vectors. soft: bool, optional (default=False) if True, a word not in the dictionary will be replaced with nearest jarowrinkler ratio. if False, it will throw an exception if a word not in the dictionary. silent: bool, optional (default=False) if True, will not print any logs. Returns ------- tuple: (labels[argmax(scores), axis = 1], scores, labels) """```%%time results_emotion, scores_emotion, labels_emotion = malaya.lexicon.propagate_graph(emotion_lexicon, wordvector, pool_size = 10) np.unique(list(results_emotion.values()), return_counts = True) results_emotionEmotion Detection from Facial Photos Dataset URL: https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/overview Google Drive Connectionfrom google.colab import drive drive.mount('/gdrive') %cd /gdrive import os os.chdir('/gdrive/My Drive/Colab Notebooks') !lsaraba.jpg kepce.jpg ddef.jpg MNIST_Ornek.ipynb dog1.jpg models Emotion_Detection.ipynb Pomeranian_01.jpeg Emotion_Detection_My_CNN.ipynb regularization_CNN.ipynb Evrisim.ipynb test_image01.jpg f35.jpg tree.jpg Fashion_MNIST.ipynb Untitled0.ipynb fer2013.csv 'VGG16 in Keras.ipynb' Img_Class_w_VGG16.ipynbLibrariesimport numpy as np import pandas as pd from matplotlib import pyplot as plt %matplotlib inline import keras from keras.models import Sequential, Model, model_from_json from keras.layers import Dense, Conv2D, Activation, MaxPool2D, Flatten, Dropout, BatchNormalization from keras.utils import np_utils from keras.preprocessing import image from keras.callbacks import ModelCheckpoint from keras.preprocessing.image import ImageDataGeneratorDataset# path_of_csv = '/gdrive/My Drive/Colab Notebooks/' data = pd.read_csv('fer2013.csv') # Check referances x_column = data['pixels'].tolist() y_column = data['emotion'].tolist() data.head(11) data.shape35887 data with 3 columnsdata["Usage"].value_counts()28709 data for Training 3589 data for PublicTest 3589 data for PrivateTest Training Data# (x_train, y_train), (x_test, y_test) data_train = data[data.Usage == "Training"] data_test = data[data.Usage == "PublicTest"] train_pixel = data_train.pixels.str.split(" ").tolist() pixel_images = pd.DataFrame(train_pixel, dtype=int) pixel_images = pixel_images.values train_images = pixel_images.astype(np.float) train_images.shapeTraining Imagesdef show(img): image_show = img.reshape(48,48) plt.axis('off') plt.imshow(image_show, cmap='gray') which_img = 28708 show(train_images[which_img]) print(data.loc[which_img, 'emotion']) print("(0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral)")4 (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral)Test Data Test Imagesdata_test = data[data.Usage == "PublicTest"] test_pixel = data_test.pixels.str.split(" ").tolist() pixel_images2 = pd.DataFrame(test_pixel, dtype=int) pixel_images2 = pixel_images2.values test_images = pixel_images2.astype(np.float) test_images.shape which_img2 = 3588 show(test_images[which_img2]) img_label2 = 28708 + which_img2 print(data.loc[img_label2, 'emotion']) print("(0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral)")4 (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral)Review Dataset Imagesplt.figure(0, figsize=(12,6)) for i in range(1,13): plt.subplot(3,4, i) plt.axis('off') image = test_images[i].reshape(48,48) plt.imshow(image, cmap='gray') plt.suptitle('Some Images from the Dataset', fontsize=15) plt.show()Creating Model & Layersmodel = Sequential()Adding Layers Layer 1: 1st Convolutionmodel.add(Conv2D(128, 3, data_format='channels_last', kernel_initializer='random_normal',input_shape=(48,48,1))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2), strides=1))Layer 2: 2nd Convolution & Max Pooling & 0.25 DropOutmodel.add(Conv2D(64, 3)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2), strides=1)) model.add(Dropout(0.25))Layer 3: 2 Convolutions, Max Pooling & 0.25 Dropoutmodel.add(Conv2D(64, 3)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(64, 3)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2), strides=1)) model.add(Dropout(0.25))Layer 4: Convolutions, Padding, Max Pooling & 0.35 DropOutmodel.add(Conv2D(32, 3, padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, 3, padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2), strides=2, padding='same')) model.add(Dropout(0.35))Layer 5: Convolutions, Padding, Max Pooling & 0.4 DropOutmodel.add(Conv2D(16, 3, padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(16, 3, padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(16, 3, padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2), strides=2, padding='same')) model.add(Dropout(0.4))Layer 6: Fully Connection & 0.5 Dropoutmodel.add(Flatten()) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(128)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(64)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5))Layer 7: Output Layermodel.add(Dense(7)) model.add(Activation('softmax'))Model Compilemodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])Model Summarymodel.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 46, 46, 128) 1280 _________________________________________________________________ batch_normalization (BatchNo (None, 46, 46, 128) 512 _________________________________________________________________ activation (Activation) (None, 46, 46, 128) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 45, 45, 128) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 43, 43, 64) 73792 _________________________________________________________________ batch_normalization_1 (Batch (None, 43, 43, 64) 256 ________________________________________________________[...]Defining (x_train, y_train), (x_test, y_test)x_train = train_images.reshape(-1, 48, 48, 1) x_test = test_images.reshape(-1, 48, 48, 1) print(x_train.shape) print(x_test.shape) def one_hot(labels_dense, num_classes): num_labels = labels_dense.shape[0] index_offset = np.arange(num_labels) * num_classes labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot train_labels_flat = data_train["emotion"].values.ravel() train_labels_count = np.unique(train_labels_flat).shape[0] y_train = one_hot(train_labels_flat, train_labels_count) y_train = y_train.astype(np.uint8) test_labels_flat = data_test["emotion"].values.ravel() test_labels_count = np.unique(train_labels_flat).shape[0] y_test = one_hot(test_labels_flat, test_labels_count) y_test = y_test.astype(np.uint8) print(y_train.shape) print(y_test.shape)(28709, 7) (3589, 7)Model Training Model Progress Model CheckPoint Model will be saved as models/emotion.h5 (with best epoch)checkpoint_callback = ModelCheckpoint(filepath='models/emotion.h5', verbose=1, save_best_only=True) epochs = 10 batchsize = 128Data Augmentation We can improve the model with data augmentation. (was not implemented this main model)datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)20° Rotation & 0.2 Shiftdatagen.fit(x_train)Model Fithist = model.fit(x_train, y_train, batch_size=batchsize, epochs=epochs, shuffle=True, validation_data=(x_test, y_test), callbacks=[checkpoint_callback], verbose=2 )Epoch 1/10 225/225 - 61s - loss: 1.8484 - accuracy: 0.2838 - val_loss: 1.8017 - val_accuracy: 0.2739 Epoch 00001: val_loss improved from inf to 1.80173, saving model to models/emotion.h5 Epoch 2/10 225/225 - 24s - loss: 1.4969 - accuracy: 0.4218 - val_loss: 1.5092 - val_accuracy: 0.4257 Epoch 00002: val_loss improved from 1.80173 to 1.50924, saving model to models/emotion.h5 Epoch 3/10 225/225 - 24s - loss: 1.3742 - accuracy: 0.4748 - val_loss: 1.5002 - val_accuracy: 0.4408 Epoch 00003: val_loss improved from 1.50924 to 1.50017, saving model to models/emotion.h5 Epoch 4/10 225/225 - 24s - loss: 1.3097 - accuracy: 0.5002 - val_loss: 1.3325 - val_accuracy: 0.4773 Epoch 00004: val_loss improved from 1.50017 to 1.33249, saving model to models/emotion.h5 Epoch 5/10 225/225 - 24s - loss: 1.2650 - accuracy: 0.5192 - val_loss: 1.3417 - val_accuracy: 0.4840 Epoch 00005: val_loss did not improve from 1.33249 Epoch 6/10 225/225 - 24s - loss: 1.2280 - accuracy: 0.5351 - val_loss: 1.3385 - val[...]Save model as JSONjson = model.to_json() with open('models/emotion.json', 'w') as json_w: json_w.write(json)Plotting the resultsplt.figure(figsize=(20,5)) plt.subplot(1,2,1) plt.suptitle('Training Results', fontsize=18) plt.ylabel('Loss', fontsize=15) plt.plot(hist.history['loss'], color='r', label='Training Loss') plt.plot(hist.history['val_loss'], color='g', label='Validation Loss') plt.legend(loc='upper right') plt.subplot(1,2,2) plt.ylabel('Accuracy', fontsize=15) plt.plot(hist.history['accuracy'], color='b', label='Training Accuracy') plt.plot(hist.history['val_accuracy'], color='black', label='Validation Accuracy') plt.legend(loc='lower right') plt.show()Test Your Own Image Input image URL#@title Paste Image URL below: url = "https://thumbs.dreamstime.com/b/portrait-older-adult-senior-man-pain-sad-exhausted-face-human-emotions-facial-expression-retirement-128516153.jpg" #@param {type:"string"} from PIL import Image import requests # response = requests.get(url) # img = Image.open(BytesIO(response.content)) img = Image.open(requests.get(url, stream=True).raw) plt.imshow(img, cmap='gray') plt.axis('off')Save image to drivefrom keras.preprocessing.image import load_img, img_to_array, save_img input_arr = img_to_array(img) save = save_img('test_image02.jpg', input_arr) path = "test_image02.jpg" os.mkdir("aug_img") !ls os.chdir("/gdrive/My Drive/Colab Notebooks/aug_img") !ls path = "/gdrive/My Drive/Colab Notebooks/aug_img" from keras.preprocessing.image import save_img from keras.preprocessing.image import img_to_array, array_to_img train_images = img_to_array(train_images[100]) save_img(path + '.jpg', train_images) from PIL import Image from keras.preprocessing.image import save_img from keras.preprocessing.image import img_to_array, array_to_img, ImageDataGenerator import glob, os for i in range(1,10): # img = test_images[i].reshape(48,48) # img = train_images # getdata = img.getdata() # img_array = np.array(getdata) train_images = img_to_array(train_images) save_img(path + str(i) + '.jpg', train_images) # input_array = img_to_array(image) # path = '/aug_img/' # save = save_img(path + str(i) + '.jpg', input_array) def img_process(img, target): img = load_img(path,target_size=target, color_mode="grayscale") input_arr = img_to_array(img) input_arr = np.array([input_arr]) # Convert single image to a batch. return input_arr labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']Print prediction resulttest_data = img_process(img, (48,48)) result = model.predict(test_data) # print(result) plt.imshow(img, cmap='gray') plt.axis('off') print(labels[np.argmax(result)]) print(int(np.max(result)*100),"%")Sad 28 %Create your own pipeline Initial phase Import necessary libraries# Standard python libraries import logging import os import time logging.basicConfig(format='[%(asctime)s] (%(levelname)s): %(message)s', level=logging.INFO) # Installed libraries import numpy as np import pandas as pd from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split import torch # Imports from our package from lightautoml.automl.base import AutoML from lightautoml.ml_algo.boost_lgbm import BoostLGBM from lightautoml.ml_algo.tuning.optuna import OptunaTuner from lightautoml.pipelines.features.lgb_pipeline import LGBSimpleFeatures from lightautoml.pipelines.ml.base import MLPipeline from lightautoml.pipelines.selection.importance_based import ImportanceCutoffSelector, ModelBasedImportanceEstimator from lightautoml.reader.base import PandasToPandasReader from lightautoml.tasks import Task from lightautoml.automl.blend import WeightedBlenderGeneral parameters setupN_THREADS = 8 # threads cnt for lgbm and linear models N_FOLDS = 5 # folds cnt for AutoML RANDOM_STATE = 42 # fixed random state for various reasons TEST_SIZE = 0.2 # Test size for metric check TARGET_NAME = 'TARGET' # Target column nameFix torch number of threads and numpy seednp.random.seed(RANDOM_STATE) torch.set_num_threads(N_THREADS)Example data load%%time data = pd.read_csv('../LightAutoML/examples/data/sampled_app_train.csv') data.head()CPU times: user 97.6 ms, sys: 28.7 ms, total: 126 ms Wall time: 125 ms(Optional) Some user feature preparation Cell below shows some user feature preparations to create task more difficult (this block can be omitted if you don't want to change the initial data):%%time data['BIRTH_DATE'] = (np.datetime64('2018-01-01') + data['DAYS_BIRTH'].astype(np.dtype('timedelta64[D]'))).astype(str) data['EMP_DATE'] = (np.datetime64('2018-01-01') + np.clip(data['DAYS_EMPLOYED'], None, 0).astype(np.dtype('timedelta64[D]')) ).astype(str) data['constant'] = 1 data['allnan'] = np.nan data['report_dt'] = np.datetime64('2018-01-01') data.drop(['DAYS_BIRTH', 'DAYS_EMPLOYED'], axis=1, inplace=True)CPU times: user 101 ms, sys: 4.41 ms, total: 105 ms Wall time: 104 ms(Optional) Data splitting for train-test Block below can be omitted if you are going to train model only or you have specific train and test files:%%time train_data, test_data = train_test_split(data, test_size=TEST_SIZE, stratify=data[TARGET_NAME], random_state=RANDOM_STATE) logging.info('Data splitted. Parts sizes: train_data = {}, test_data = {}' .format(train_data.shape, test_data.shape)) train_data.head()AutoML Modules Setup![AutoML pipeline for this task](imgs/tutorial_1_pipeline.png) Create Task and PandasReader%%time task = Task('binary') reader = PandasToPandasReader(task, cv=N_FOLDS, random_state=RANDOM_STATE)CPU times: user 5 ms, sys: 1.69 ms, total: 6.69 ms Wall time: 5.19 msCreate feature selector (if necessary)%%time model0 = BoostLGBM( default_params={'learning_rate': 0.05, 'num_leaves': 64, 'seed': 42, 'num_threads': N_THREADS} ) pipe0 = LGBSimpleFeatures() mbie = ModelBasedImportanceEstimator() selector = ImportanceCutoffSelector(pipe0, model0, mbie, cutoff=0)Copying TaskTimer may affect the parent PipelineTimer, so copy will create new unlimited TaskTimerCreate 1st level ML pipeline for AutoML Our first level ML pipeline:- Simple features for gradient boosting built on selected features (using step 2) - 2 different models: * LightGBM with params tuning (using OptunaTuner) * LightGBM with heuristic params%%time pipe = LGBSimpleFeatures() params_tuner1 = OptunaTuner(n_trials=20, timeout=30) # stop after 20 iterations or after 30 seconds model1 = BoostLGBM( default_params={'learning_rate': 0.05, 'num_leaves': 128, 'seed': 1, 'num_threads': N_THREADS} ) model2 = BoostLGBM( default_params={'learning_rate': 0.025, 'num_leaves': 64, 'seed': 2, 'num_threads': N_THREADS} ) pipeline_lvl1 = MLPipeline([ (model1, params_tuner1), model2 ], pre_selection=selector, features_pipeline=pipe, post_selection=None)CPU times: user 922 µs, sys: 414 µs, total: 1.34 ms Wall time: 1.34 msCreate 2nd level ML pipeline for AutoML Our second level ML pipeline:- Using simple features as well, but now it will be Out-Of-Fold (OOF) predictions of algos from 1st level- Only one LGBM model without params tuning- Without feature selection on this stage because we want to use all OOFs here%%time pipe1 = LGBSimpleFeatures() model = BoostLGBM( default_params={'learning_rate': 0.05, 'num_leaves': 64, 'max_bin': 1024, 'seed': 3, 'num_threads': N_THREADS}, freeze_defaults=True ) pipeline_lvl2 = MLPipeline([model], pre_selection=None, features_pipeline=pipe1, post_selection=None)CPU times: user 861 µs, sys: 162 µs, total: 1.02 ms Wall time: 1.03 msCreate AutoML pipeline AutoML pipeline consist of:- Reader for data preparation- First level ML pipeline (as built in step 3.1)- Second level ML pipeline (as built in step 3.2)- `Skip_conn = False` equals here "not to use initial features on the second level pipeline"%%time automl = AutoML(reader, [ [pipeline_lvl1], [pipeline_lvl2], ], skip_conn=False, verbose=0)CPU times: user 735 µs, sys: 0 ns, total: 735 µs Wall time: 741 µsTrain AutoML on loaded data In cell below we train AutoML with target column `TARGET` to receive fitted model and OOF predictions:%%time oof_pred = automl.fit_predict(train_data, roles={'target': TARGET_NAME}) logging.info('oof_pred:\n{}\nShape = {}'.format(oof_pred, oof_pred.shape))Train data shape: (8000, 125) Feats was rejected during automatic roles guess: [] Start fitting LightGBM ... Training until validation scores don't improve for 100 rounds [100] valid's auc: 0.716183 Early stopping, best iteration is: [16] valid's auc: 0.720694 LightGBM fitting and predicting completed Optuna may run 6299999991.996463 secsAnalyze fitted model Below we analyze feature importances of different algos:logging.info('Feature importances of selector:\n{}' .format(selector.get_features_score())) logging.info('=' * 70) logging.info('Feature importances of top level algorithm:\n{}' .format(automl.levels[-1][0].ml_algos[0].get_features_score())) logging.info('=' * 70) logging.info('Feature importances of lowest level algorithm - model 0:\n{}' .format(automl.levels[0][0].ml_algos[0].get_features_score())) logging.info('=' * 70) logging.info('Feature importances of lowest level algorithm - model 1:\n{}' .format(automl.levels[0][0].ml_algos[1].get_features_score())) logging.info('=' * 70)[2020-12-03 18:14:52,366] (INFO): Feature importances of selector: EXT_SOURCE_3 1029.681686 EXT_SOURCE_2 894.265428 BIRTH_DATE 537.081401 EXT_SOURCE_1 424.764621 DAYS_LAST_PHONE_CHANGE 262.583100 ... FLAG_DOCUMENT_16 0.000000 FLAG_DOCUMENT_14 0.000000 FLAG_DOCUMENT_13 0.000000 FLAG_DOCUMENT_11 0.000000 FLAG_PHONE 0.000000 Length: 110, dtype: float64 [2020-12-03 18:14:52,368] (INFO): ====================================================================== [2020-12-03 18:14:52,370] (INFO): Feature importances of top level algorithm: Lvl_0_Pipe_0_Mod_0_LightGBM_prediction_0 2861.708537 Lvl_0_Pipe_0_Mod_1_LightGBM_prediction_0 2043.129412 dtype: float64 [2020-12-03 18:14:52,371] (INFO): ====================================================================== [2020-12-03 18:14:52,374] (INFO): Feature importances of lowest level al[...]Predict to test data and check scores%%time test_pred = automl.predict(test_data) logging.info('Prediction for test data:\n{}\nShape = {}' .format(test_pred, test_pred.shape)) logging.info('Check scores...') logging.info('OOF score: {}'.format(roc_auc_score(train_data[TARGET_NAME].values, oof_pred.data[:, 0]))) logging.info('TEST score: {}'.format(roc_auc_score(test_data[TARGET_NAME].values, test_pred.data[:, 0])))[2020-12-03 18:14:52,997] (INFO): Prediction for test data: array([[0.04925682], [0.05368711], [0.0534688 ], ..., [0.04521164], [0.04988748], [0.16970594]], dtype=float32) Shape = (2000, 1) [2020-12-03 18:14:52,998] (INFO): Check scores... [2020-12-03 18:14:53,002] (INFO): OOF score: 0.706138322789459 [2020-12-03 18:14:53,005] (INFO): TEST score: 0.7235682744565217--- Day 6: Lanternfish --- [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/oddrationale/AdventOfCode2021FSharp/main?urlpath=lab%2Ftree%2FDay06.ipynb) The sea floor is getting steeper. Maybe the sleigh keys got carried this way?A massive school of glowing lanternfish swims past. They must spawn quickly to reach such large numbers - maybe exponentially quickly? You should model their growth rate to be sure.Although you know nothing about this specific species of lanternfish, you make some guesses about their attributes. Surely, each lanternfish creates a new lanternfish once every 7 days.However, this process isn't necessarily synchronized between every lanternfish - one lanternfish might have 2 days left until it creates another lanternfish, while another might have 4. So, you can model each fish as a single number that represents the number of days until it creates a new lanternfish.Furthermore, you reason, a new lanternfish would surely need slightly longer before it's capable of producing more lanternfish: two more days for its first cycle.So, suppose you have a lanternfish with an internal timer value of 3:After one day, its internal timer would become 2.After another day, its internal timer would become 1.After another day, its internal timer would become 0.After another day, its internal timer would reset to 6, and it would create a new lanternfish with an internal timer of 8.After another day, the first lanternfish would have an internal timer of 5, and the second lanternfish would have an internal timer of 7.A lanternfish that creates a new fish resets its timer to 6, not 7 (because 0 is included as a valid timer value). The new lanternfish starts with an internal timer of 8 and does not start counting down until the next day.Realizing what you're trying to do, the submarine automatically produces a list of the ages of several hundred nearby lanternfish (your puzzle input). For example, suppose you were given the following list:3,4,3,1,2This list means that the first fish has an internal timer of 3, the second fish has an internal timer of 4, and so on until the fifth fish, which has an internal timer of 2. Simulating these fish over several days would proceed as follows:Initial state: 3,4,3,1,2After 1 day: 2,3,2,0,1After 2 days: 1,2,1,6,0,8After 3 days: 0,1,0,5,6,7,8After 4 days: 6,0,6,4,5,6,7,8,8After 5 days: 5,6,5,3,4,5,6,7,7,8After 6 days: 4,5,4,2,3,4,5,6,6,7After 7 days: 3,4,3,1,2,3,4,5,5,6After 8 days: 2,3,2,0,1,2,3,4,4,5After 9 days: 1,2,1,6,0,1,2,3,3,4,8After 10 days: 0,1,0,5,6,0,1,2,2,3,7,8After 11 days: 6,0,6,4,5,6,0,1,1,2,6,7,8,8,8After 12 days: 5,6,5,3,4,5,6,0,0,1,5,6,7,7,7,8,8After 13 days: 4,5,4,2,3,4,5,6,6,0,4,5,6,6,6,7,7,8,8After 14 days: 3,4,3,1,2,3,4,5,5,6,3,4,5,5,5,6,6,7,7,8After 15 days: 2,3,2,0,1,2,3,4,4,5,2,3,4,4,4,5,5,6,6,7After 16 days: 1,2,1,6,0,1,2,3,3,4,1,2,3,3,3,4,4,5,5,6,8After 17 days: 0,1,0,5,6,0,1,2,2,3,0,1,2,2,2,3,3,4,4,5,7,8After 18 days: 6,0,6,4,5,6,0,1,1,2,6,0,1,1,1,2,2,3,3,4,6,7,8,8,8,8Each day, a 0 becomes a 6 and adds a new 8 to the end of the list, while each other number decreases by 1 if it was present at the start of the day.In this example, after 18 days, there are a total of 26 fish. After 80 days, there would be a total of 5934.Find a way to simulate lanternfish. How many lanternfish would there be after 80 days?let input = File.ReadAllText(@"input/06.txt").Split(",") |> Array.map int let step (fishCountList: list) = fishCountList |> List.mapi (fun i count -> match i with | 6 -> fishCountList.[7] + fishCountList.[0] | 8 -> fishCountList.[0] | _ -> fishCountList.[i + 1]) let rec days (fishCountList: list) = seq { yield fishCountList yield! fishCountList |> step |> days } #!time [0..8] |> List.map (fun i -> input |> Array.filter (fun t -> t = i) |> Array.length |> bigint) |> days |> Seq.item 80 |> Seq.sum--- Part Two --- Suppose the lanternfish live forever and have unlimited food and space. Would they take over the entire ocean?After 256 days in the example above, there would be a total of 26984457539 lanternfish!How many lanternfish would there be after 256 days?#!time [0..8] |> List.map (fun i -> input |> Array.filter (fun t -> t = i) |> Array.length |> bigint) |> days |> Seq.item 256 |> Seq.sumLoad datanetwork_dim = int(256/2) # ensure that this is always greater than max node number that occurs in your data # in addition it needs to fit to the unet layer for concatenation #graphs # #masks = glob.glob("../input/graph_images/train/label/*.png") # masks = np.load('../input/graph_images/train/label/adjcouput_matrix.npy',allow_pickle='TRUE').item() # orgs = glob.glob("../input/graph_images/train/image/*.png") #training images #masks = np.load('S:/06_Studienarbeit/03_CNN/generate_data/data/train/label/adjcouput_matrix.npy',allow_pickle='TRUE').item() masks = glob.glob('S:/studenten/Rausch/06_Studienarbeit/03_CNN/generate_data/data/train_less128_2000imgs/label/*') masks = masks[0:803] orgs = glob.glob("S:/studenten/Rausch/06_Studienarbeit/03_CNN/generate_data/data/train_less128_2000imgs/image/*.png") orgs = orgs[0:803] #every training image has less than 128 nodes #training images imgs_list = [] masks_list = [] for image, mask in zip(orgs, masks): I = cv2.imread(image) key = image graph_label = np.load(masks[int(key[-14:-9])], allow_pickle=True) graph_label_norm = graph_label.copy() positions = graph_label[:, :2, 0] #normalize positions pos_norm = np.zeros(positions.shape) for i in range(len(positions)): pos_norm[i][0] = np.round((positions[i][0]/I.shape[1])*512, 0) pos_norm[i][1] = np.round((positions[i][1] /I.shape[0])*512, 0) graph_label_norm[:, :2, 0] = pos_norm #pad the label to obtain uniform array sizes #graph_label_padded = np.pad(graph_label_norm, ((0, network_dim-graph_label.shape[0]), (0, network_dim-graph_label.shape[1]), (-9.9, -9.9))) #graph_label_padded = np.pad(graph_label_norm, ((0, 0), (0, 0), (0, 0))) #graph_label_padded = graph_label_norm imgs_list.append(np.array(Image.open(image).convert('L').resize((512,512)))) #print(graph_label_padded) #masks_list_position.append(np.array(graph_label_norm[:, 0:2, 0])) #masks_list_adjacency.append(np.array(graph_label_norm[:, 2:, 0])) masks_list.append(graph_label_norm) imgs_np = np.asarray(imgs_list)Plot images + masks + overlay (mask over original)from keras_unet.utils_regine import plot_graph_on_img, plot_nodes_on_img node_thick = 6 index = 3 save = True masks_np = np.asarray(masks_list[index]) #uniform array sizes are necessary y_positions_label = masks_np[:,0:2, 0] y_adjacency_label = masks_np[:,2:, 0] node_img = plot_nodes_on_img(imgs_np[index,:,:], y_positions_label, node_thick) fig = plot_graph_on_img(imgs_np[index,:,:], y_positions_label, y_adjacency_label) # tailor the data to the specific pre defined network dimension a = np.full((len(masks_list),network_dim *2), -9.9) adj_flatten_dim = int((network_dim*network_dim-network_dim)/2) b = np.zeros((len(masks_list),adj_flatten_dim)) for index in range(len(masks_list)): masks_np = np.asarray(masks_list[index]) y_label_positions = masks_np[:,0:2, 0] # last zero --> without attributes y_label_adjacency = masks_np[:,2:, 0] # last zero --> without attributes # form position matrix and adjacency in a one dimensional vector information y_label_positions = y_label_positions.reshape((y_label_positions.shape[0]*2)) adjacency_label_indices = np.triu_indices(y_label_adjacency.shape[1], k = 1) y_label_adjacency = y_label_adjacency[adjacency_label_indices[0],adjacency_label_indices[1]] if y_label_positions.shape[0] >= network_dim *2: print('the number of labeld nodes/frame is too high for network dimension - decrease nodes in training data or consider to adapt the network size') a[index,0:network_dim *2] = y_label_positions[0:network_dim *2] b[index,0:adj_flatten_dim] = y_label_adjacency[0:adj_flatten_dim] else: a[index,0:y_label_positions.shape[0]] = y_label_positions b[index,0:y_label_adjacency.shape[0]] = y_label_adjacency y_label = [a, b] print('total number of positions: ', y_label[0].shape) print('total number of relevant adjacency etries: ', y_label[1].shape)total number of positions: (803, 256) total number of relevant adjacency etries: (803, 8128)Get data into correct shape, dtype and range (0.0-1.0)print(imgs_np.max(), masks_np.max()) x = np.asarray(imgs_np, dtype=np.float32)/255 x = x.reshape(x.shape[0], x.shape[1], x.shape[2], 1)255 1150.2470671014507Train/val splitfrom sklearn.model_selection import train_test_split # split data x_train, x_val, y_train_positions,y_val_positions, y_train_adjacency, y_val_adjacency = train_test_split(x, y_label[0], y_label[1],shuffle=False, test_size=0.1, random_state=0) from models_graph.prepare_functions import convert_to_tensor # convert from numpy to tensorflow object x_train = convert_to_tensor(x_train) x_val = convert_to_tensor(x_val) y_train_positions = convert_to_tensor(y_train_positions) y_train_adjacency = convert_to_tensor(y_train_adjacency) y_val_positions = convert_to_tensor(y_val_positions) y_val_adjacency = convert_to_tensor(y_val_adjacency)Initialize networkfrom models_graph.custom_graph_head import custom_graph_head, custom_adj_unet print(x_train.shape) input_shape = (512, 512, 1) model = custom_adj_unet(input_size = input_shape, pretrained_weights =None, network_dim = network_dim)(722, 512, 512, 1)Network shapeprint('Input Shape: ',model.input_shape) print('Output Shape: ',model.output_shape) print('with position vector: ',model.output_shape[0], ' and adjacency vector: ', model.output_shape[1]) model.summary() import os os.environ["PATH"] += os.pathsep + "C:\\Program Files\\Graphviz\\bin\\" import sys print(sys.path) sys.path.append("C:\\Program Files\\Graphviz\\bin\\") import tensorflow.python.keras as keras from tensorflow.keras.callbacks import ModelCheckpoint model_filename = 'graph_extract_model_v1.h5' callback_checkpoint = ModelCheckpoint(model_filename)Compile + train#import keras as keras import tensorflow as tf import tensorflow.python.keras as keras from tensorflow.python.keras.optimizer_v2.adam import Adam from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD #from keras.optimizers import Adam, SGD from keras_unet.metrics import iou, iou_thresholded from models_graph.losses import loss_node_positions from models_graph.losses import loss_adjacency # Load the TensorBoard notebook extension %load_ext tensorboard import datetime #opt = keras.optimizers.Adam(learning_rate=0.005) model.compile( optimizer= 'adam', loss={ "pixel_position_of_nodes": keras.losses.MeanSquaredError(), "adjacency_matrix": keras.losses.BinaryCrossentropy(), }, loss_weights=[1.1, 1.0], ) #print('done') model.output from models_graph.losses import loss_node_positions from models_graph.losses import loss_adjacency print("x_train: ", x_train.shape) print("y_train_positions: ", y_train_positions.shape) print(" y_train_adjacency: ", y_train_adjacency.shape) print("x_val: ", x_val.shape) print("y_val_positions: ", y_val_positions.shape) print("y_val_adjacency: ", y_val_adjacency.shape) # model.fit({'input_image': x_train},{'pixel_position_of_nodes': y_train_positions, 'adjacency_matrix': y_train_adjacency}, # epochs = 1, batch_size=2 , # validation_data=({'input_image': x_val},{'pixel_position_of_nodes': y_val_positions, 'adjacency_matrix': y_val_adjacency}), # ) #tensorboard --logdir logs/fit #to use in command line log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) print(no) history = model.fit(x_train, [y_train_positions, y_train_adjacency], epochs = 60, batch_size=3 , validation_data=(x_val,{'pixel_position_of_nodes': y_val_positions, 'adjacency_matrix': y_val_adjacency}), callbacks=[tensorboard_callback],) # Save the entire model to a HDF5 file #model.save(model_filename)Plot original + ground truth + pred + overlay (pred on top of original)model_filename = 'graph_extract_model_v0.h5' #compute prediction #model = create_model() model.load_weights(model_filename) #first three elements in validation set print(x_val.shape) x_val012 = x_val[:3, :, :, :] y_pred = model.predict(x_val012) predicted_adjacency_matrices = y_pred[1] predicted_positions = y_pred[0] predicted_adjacency0 = predicted_adjacency_matrices[0, :] # predicted_adjacency1 = predicted_adjacency_matrices[1, :] # predicted_adjacency2 = predicted_adjacency_matrices[2, :] predicted_positions0 = predicted_positions[0, :] # predicted_positions1 = predicted_positions[1, :] # predicted_positions2 = predicted_positions[2, :] #reshape from keras_unet.utils_regine import plot_graph_on_img, plot_nodes_on_img from models_graph.prepare_functions import create_adj_matrix, create_position_matrix adj_matrix0 = create_adj_matrix(predicted_adjacency0,17) position_matrix0 = create_position_matrix(predicted_positions0,17) from keras_unet.utils_regine import plot_graph_on_img, plot_nodes_on_img #plot nodes on img node_thick = 6 index = 0 node_img = plot_nodes_on_img(x_val.numpy()[index, :, :, 0], position_matrix0, node_thick) #plot graph on img # index = 0 # fig = plot_graph_on_img(x_val.numpy[index, :, :, 0], position_matrix0, adj_matrix0) #Overfitting and test with label! x_test012 = x_train[ 700:701, :, :, :] y_pred = model.predict(x_test012) print(y_pred[1].shape) print(y_pred[0].shape) predicted_adjacency0 = y_pred[1][0,:] predicted_positions0 = y_pred[0][0,:] adj_matrix0 = create_adj_matrix(predicted_adjacency0,35) position_matrix0 = create_position_matrix(predicted_positions0,35) #node_img = plot_nodes_on_img(x_train.numpy()[index, :, :, 0],predicted_positions0 , node_thick) #fig = plot_graph_on_img(x_train.numpy()[index, :, :, 0], predicted_positions0 , predicted_adjacency0) print("x_train: ", x_train.shape) print("y_train_positions: ", y_train_positions.shape) print(" y_train_adjacency: ", y_train_adjacency.shape) print("x_val: ", x_val.shape) print("y_val_positions: ", y_val_positions.shape) print("y_val_adjacency: ", y_val_adjacency.shape) import numpy as np index = 58 masks_np = np.asarray(masks_list[index]) #uniform array sizes are necessary y_positions_label = masks_np[:,0:2, 0] y_adjacency_label = masks_np[:,2:, 0] position_label = y_train_positions.numpy()[index,:] adjacency_label = y_train_adjacency.numpy()[index,:] adjacency_label_matrix = create_adj_matrix(adjacency_label,60) position_label_matrix = create_position_matrix(position_label,60) node_thick = 6 node_img = plot_nodes_on_img(x_train.numpy()[index, :, :, 0], position_label_matrix, node_thick) fig = plot_graph_on_img(x_train.numpy()[index, :, :, 0],position_label_matrix,adjacency_label_matrix ) node_img = plot_nodes_on_img(imgs_np[index,:,:], y_positions_label, node_thick) fig = plot_graph_on_img(imgs_np[index,:,:], y_positions_label, y_adjacency_label) # print('orginal label: ',y_positions_label[1:5,:],'label reconstruct: ',position_label_matrix[1:5,:]) print('orginal label: ',y_adjacency_label[0:5,0:5],'label reconstruct: ',adjacency_label_matrix[0:5,0:5]) def create_adj_matrix2(adj_vector, size = 35): adj_matrix = np.zeros((size, size)) print('adj vecot',adj_vector) print(size) print('indices', np.triu_indices(size, k = 1)) adj_matrix[np.triu_indices(size, k = 1)] = adj_vector[0:np.shape(np.triu_indices(size, k = 1))[1]] print('adj vecot',adj_vector[0:np.shape(np.triu_indices(size, k = 1))[1]]) print('adj matrix',adj_matrix ) adj_matrix = adj_matrix+np.transpose(adj_matrix) print('adj matrix2',adj_matrix ) return(adj_matrix) adjacency_label_matrix2 = create_adj_matrix2(adjacency_label,5) print('adj matrix 4: ',adjacency_label_matrix2) print('label vector',adjacency_label) #print('orginal label: ',y_positions_label[1:5,:],'label reconstruct: ',position_label_matrix[1:5,:]) print('orginal label: ',y_adjacency_label[1:5,1:5],'label reconstruct: ',adjacency_label_matrix2) adj_flatten_dim = int((network_dim*network_dim-network_dim)/2) y_adjacency_label_org_test =y_adjacency_label adjacency_label_indices = np.triu_indices(y_adjacency_label_org_test.shape[1], k = 1) c = y_adjacency_label_org_test[adjacency_label_indices[0],adjacency_label_indices[1]] y_adjacency_label_org_test = y_adjacency_label_org_test[adjacency_label_indices[0],adjacency_label_indices[1]] print(c.shape) print(adj_flatten_dim) index=0 b = np.zeros((1,adj_flatten_dim)) if c.shape[0] >= adj_flatten_dim: print('the number of labeld nodes/frame is too high for network dimension - decrease nodes in training data or consider to adapt the network size') b[index,0:adj_flatten_dim] =c[0:adj_flatten_dim] else: b[index,0:c.shape[0]] = c y_label = [a, b] print(b.shape) adjacency_label_matrix3 = create_adj_matrix2(b[0,:],68) print(np.shape(adjacency_label_matrix3)) if y_adjacency_label.all() == adjacency_label_matrix3.all(): print(True) else: print(None) print('orginal label: ',y_adjacency_label[1:8,1:8],'label reconstruct: ',adjacency_label_matrix3[1:8,1:8]) # one d vecotr is unsorted somewhere, maybebe the split is the issue!!!!(2278,) 8128 (1, 8128) adj vecot [1. 0. 0. ... 0. 0. 0.] 68 indices (array([ 0, 0, 0, ..., 65, 65, 66], dtype=int64), array([ 1, 2, 3, ..., 66, 67, 67], dtype=int64)) adj vecot [1. 0. 0. ... 0. 0. 0.] adj matrix [[0. 1. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] adj matrix2 [[0. 1. 0. ... 0. 0. 0.] [1. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] (68, 68) TruePlan for rough estimate: 1. perennials, and grasses and non irrigated (Why google slide doess not say annuals) Function definitions Directoriesdata_dir = "/Users/hn/Documents/01_research_data/Ag_check_point/remote_sensing/01_NDVI_TS/Grant/"Data Readingfile_names = ["Grant_2018_TS.csv"] file_N = file_names[0] a_df = pd.read_csv(data_dir + file_N) a_df = remote_core.initial_clean(a_df) a_df.head(2) # Count distict values, use nunique: pprint (a_df['geo'].nunique()) # Count only non-null values, use count: print (a_df['geo'].count()) # Count total values including null values, use size attribute: print (a_df['geo'].size)7 378 378Iterate through polygons and count peakspeak_dt = remote_core.generate_peak_df(a_df) polygon_list = a_df['geo'].unique() for a_poly in polygon_list: curr_field = a_df[a_df['geo']==a_poly] plot_TS(an_EE_TS_df, xp_axis='doy', yp_axis='NDVI') year = int(an_EE_TS_df['year'].unique()) plant = an_EE_TS_df['CropTyp'].unique()[0] county = an_EE_TS_df['county'].unique()[0] curr_field.columns an_EE_TS_df['county'].unique()[0] xp_axis='doy' yp_axis='NDVI' sb.set(); TS_plot = plt.plot(an_EE_TS_df[xp_axis], an_EE_TS_df[yp_axis]); an_EE_TS_df.head(5) plot_title = county + ", " + plant + ", " + str(year) + ", (" + TRS + ")"LastFm Data Loading%%time df = pd.read_csv(r"F:\Data_Repository\lastfm\userid-timestamp-artid-artname-traid-traname.tsv", sep="\t", error_bad_lines=False, header = None) df.columns = ['userid', 'timestamp', 'artistid', 'artist', '1', 'song'] df.head() df['time'] = pd.to_datetime(df['timestamp']) df.drop(['timestamp'], axis=1, inplace=True) df.head()Removing artist name and song id (possibly) from the user_songs_df dataframeuser_songs_df = df.drop(['artist'], axis=1, inplace=False) user_songs_df.columns = ['userid', 'artistid', 'songid', 'song', 'time'] user_songs_df = user_songs_df.drop(['songid'], axis=1, inplace=False) user_songs_df.head() print('Number of songs : ' + str(user_songs_df['song'].nunique())) def get_unique_count(column): return len(set(column))Filtering songs heard by at least 100 usersdf_songs_heard_100_users = user_songs_df.groupby("song").filter(lambda x: get_unique_count(x['userid'])>100) df_songs_heard_100_users.head() print('Number of songs heard by at least 100 users : ' + str(df_songs_heard_100_users['song'].nunique())) print('Number of users in this dataframe : ' + str(df_songs_heard_100_users['userid'].nunique()))Number of songs heard by at least 100 users : 4034 Number of users in this dataframe : 990Creation of timeslotsdf_songs_heard_100_users['hour'] = df_songs_heard_100_users['time'].dt.hour df_songs_heard_100_users.head() #function to return slot number def slot(hour): if 0 <= hour and hour <= 5: return 1 elif 6 <= hour and hour <= 11: return 2 elif 12 <= hour and hour <= 17: return 3 else: return 4 %%time df_songs_heard_100_users['slot'] = df_songs_heard_100_users['hour'].apply( lambda x : slot(x) ) df_songs_heard_100_users.head() df_songs_heard_100_users.shapeCreating song idsdf_songs_heard_100_users['songid'] = df_songs_heard_100_users.groupby(['song']).ngroup().add(1) # Removing artistid, time and hour columns #input_dataset = df_songs_heard_100_users.drop(['artistid', 'song', 'time','hour'], axis=1, inplace=False) input_dataset = df_songs_heard_100_users.drop(['artistid', 'time','hour'], axis=1, inplace=False) input_dataset = input_dataset[['userid', 'song', 'slot']] input_dataset.head() train_set = input_dataset.copy() #test_set = input_dataset.groupby("userid").filter(lambda x: x['userid'].iloc[0] in test_set_keys) print("Number of users in train set : "+str(train_set['userid'].nunique())) # print("Number of users in test set : "+str(test_set['userid'].nunique())) train_set.head()Creating train dataframes based on slotsgrouped_train_set = train_set.groupby('slot') # grouped_test_set = test_set.groupby('slot') train_first_slot_df = grouped_train_set.get_group(1) train_second_slot_df = grouped_train_set.get_group(2) train_third_slot_df = grouped_train_set.get_group(3) train_fourth_slot_df = grouped_train_set.get_group(4) # test_first_slot_df = grouped_test_set.get_group(1) # test_second_slot_df = grouped_test_set.get_group(2) # test_third_slot_df = grouped_test_set.get_group(3) # test_fourth_slot_df = grouped_test_set.get_group(4) # train_first_slot_df # print('First train slot shape : ' + str(train_first_slot_df.shape)) # print('Second train slot shape : ' + str(train_second_slot_df.shape)) # print('Third train slot shape : ' + str(train_third_slot_df.shape)) # print('Fourth train slot shape : ' + str(train_fourth_slot_df.shape)) # print('') # print("Number of users in train_first_slot_df : " + str(train_first_slot_df['userid'].nunique())) # print("Number of users in train_second_slot_df : " + str(train_second_slot_df['userid'].nunique())) # print("Number of users in train_third_slot_df : " + str(train_third_slot_df['userid'].nunique())) # print("Number of users in train_fourth_slot_df : " + str(train_fourth_slot_df['userid'].nunique())) # print('') # print("Number of songs in train_first_slot_df : " + str(train_first_slot_df['song'].nunique())) # print("Number of songs in train_second_slot_df : " + str(train_second_slot_df['song'].nunique())) # print("Number of songs in train_third_slot_df : " + str(train_third_slot_df['song'].nunique())) # print("Number of songs in train_fourth_slot_df : " + str(train_fourth_slot_df['song'].nunique())) # # print('') # # print('First test slot shape : ' + str(test_first_slot_df.shape)) # # print('Second test slot shape : ' + str(test_second_slot_df.shape)) # # print('Third test slot shape : ' + str(test_third_slot_df.shape)) # # print('Fourth test slot shape : ' + str(test_fourth_slot_df.shape)) # # print('') # # print("Number of users in test_first_slot_df : " + str(test_first_slot_df['userid'].nunique())) # # print("Number of users in test_second_slot_df : " + str(test_second_slot_df['userid'].nunique())) # # print("Number of users in test_third_slot_df : " + str(test_third_slot_df['userid'].nunique())) # # print("Number of users in test_fourth_slot_df : " + str(test_fourth_slot_df['userid'].nunique())) # # print('') # # print("Number of songs in test_first_slot_df : " + str(test_first_slot_df['songid'].nunique())) # # print("Number of songs in test_second_slot_df : " + str(test_second_slot_df['songid'].nunique())) # # print("Number of songs in test_third_slot_df : " + str(test_third_slot_df['songid'].nunique())) # # print("Number of songs in test_fourth_slot_df : " + str(test_fourth_slot_df['songid'].nunique())) # Get user-song-count dataframe for each slot train_user_song_count_df_first = train_first_slot_df.groupby(["userid","song"]).size().reset_index(name="count") train_user_song_count_df_second = train_second_slot_df.groupby(["userid","song"]).size().reset_index(name="count") train_user_song_count_df_third = train_third_slot_df.groupby(["userid","song"]).size().reset_index(name="count") train_user_song_count_df_fourth = train_fourth_slot_df.groupby(["userid","song"]).size().reset_index(name="count") # test_user_song_count_df_first = test_first_slot_df.groupby(["userid","songid"]).size().reset_index(name="count") # test_user_song_count_df_second = test_second_slot_df.groupby(["userid","songid"]).size().reset_index(name="count") # test_user_song_count_df_third = test_third_slot_df.groupby(["userid","songid"]).size().reset_index(name="count") # test_user_song_count_df_fourth = test_fourth_slot_df.groupby(["userid","songid"]).size().reset_index(name="count") train_user_song_count_df_first.head() (train_user_song_count_df_first.shape, train_user_song_count_df_second.shape, train_user_song_count_df_third.shape, train_user_song_count_df_fourth.shape)*** Use these dataframes *** to export as tsvtrain_user_song_count_df_first train_user_song_count_df_second train_user_song_count_df_third train_user_song_count_df_fourth # test_user_song_count_df_first # test_user_song_count_df_second # test_user_song_count_df_third # test_user_song_count_df_fourth train_user_song_count_df_first.to_csv (r"F:\Data_Repository\lastfm\df_slot1.tsv",sep='\t',index=False,header=False) train_user_song_count_df_second.to_csv(r"F:\Data_Repository\lastfm\df_slot2.tsv",sep='\t',index=False,header=False) train_user_song_count_df_third.to_csv (r"F:\Data_Repository\lastfm\df_slot3.tsv",sep='\t',index=False,header=False) train_user_song_count_df_fourth.to_csv(r"F:\Data_Repository\lastfm\df_slot4.tsv",sep='\t',index=False,header=False)Py-DDA example notebookThis notebooks shows how you can get multiple Doppler winds from different radars. All that is needed are Cf-Compliant grids that share the same grid specifications. Tools such as Python ARM Radar Toolkit (Py-ART) (https://github.com/ARM-DOE/pyart) and LROSE (https://www.eol.ucar.edu/content/lidar-radar-open-software-environment) can create these grids for you. The PyDDA package is built on top of Py-ART. Therefore, Py-ART is needed for PyDDA to run.import pyart import pydda %pylab inline import warnings warnings.filterwarnings("ignore")## You are using the Python ARM Radar Toolkit (Py-ART), an open source ## library for working with weather radar data. Py-ART is partly ## supported by the U.S. Department of Energy as part of the Atmospheric ## Radiation Measurement (ARM) Climate Research Facility, an Office of ## Science user facility. ## ## If you use this software to prepare a publication, please cite: ## ## and , JORS 2016, doi: 10.5334/jors.119First, we load our Cf-Compliant grids using Py-ART's read_grid function.berr_grid = pyart.io.read_grid("berrwinds.20060120.005000.nc") cpol_grid = pyart.io.read_grid("cpolwinds.20060120.005000.nc") print(cpol_grid.fields.keys())dict_keys(['reflectivity', 'region_dealias_velocity', 'ROI', 'corrected_velocity', 'AZ', 'EL', 'u', 'v', 'w', 'divergence', 'shear', 'shear_dir', 'vorticity'])Next, we must specify the initial conditions. PyDDA currently has two options for the intial state: * pydda.retrieval.make_wind_field_from_profile - Make the intialization wind field be that from a sounding * pydda.retrieval.make_constant_wind_field - Specify a constant wind field More options will be added in the future! In this example, we will load data from a sounding and use that as our intial state with no vertical motion.sounding = pyart.io.read_arm_sonde( "twpsondewnpnC3.b1.20060119.231600.custom.cdf") print(sounding) u_init, v_init, w_init = pydda.initialization.make_wind_field_from_profile( cpol_grid, sounding[1], vel_field='region_dealias_velocity')(datetime.datetime(2006, 1, 19, 23, 16), )Next, we will retrieve the wind field! The output of these is a list of Py-ART Grids which correspond to the inputs with the wind fields added in.Grids = pydda.retrieval.get_dd_wind_field([berr_grid, cpol_grid], u_init, v_init, w_init, Co=10.0, Cm=1500.0, Cz=0, vel_name='corrected_velocity', refl_field='reflectivity', frz=5000.0, filt_iterations=0, mask_outside_opt=False, upper_bc=1)Calculating weights for radars 0 and 1 Calculating weights for models... Starting solver rmsVR = 6.761960801583306 Total points:57975.0 | Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Max w | 6.7675| 175.7040| 0.0000| 0.0000| 0.0000| 0.0000| 10.6044 Norm of gradient: 0.06291713275707776 Iterations before filter: 10 | Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Max w | 10.0505| 70.2571| 0.0000| 0.0000| 0.0000| 0.0000| 14.8231 Norm of gradient: 0.11379195538780112 Iterations before filter: 20 | Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Max w | 13.2959| 48.9155| 0.0000| 0.0000| 0.0000| 0.0000| 15.6091 Norm of gradient: 0.1253346105690195 Iterations before filter: 30 | Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Max w | 2.1407| 43.5154| 0.0000| 0.0000| 0.0000| 0.0000| 17.2214 Norm of gradient: 0.06611851866791223 Iterations before filter: 40 | Jvel | Jmass | Jsmooth | Jb[...]Let's save our grid so we don't have to regenerate it again!pyart.io.write_grid('example_grid_radar0.nc', Grids[0]) pyart.io.write_grid('example_grid_radar1.nc', Grids[1]) Grids = [pyart.io.read_grid('example_grid_radar0.nc'), pyart.io.read_grid('example_grid_radar1.nc')]Finally, we will visualize the wind field. This is easy with the visualization module of PyDDA! PyDDA currently has support to plot wind barb plots through xy, xz, and yz cross sections. More types of plots (streamline plots, plots on geospatial grids) are planned as future features.plt.figure(figsize=(7,7)) pydda.vis.plot_horiz_xsection_barbs(Grids, None, 'reflectivity', level=3, w_vel_contours=None, cmap='pyart_HomeyerRainbow', barb_spacing_x_km=7.0, barb_spacing_y_km=7.0) plt.savefig('barb_figure.png', dpi=300) plt.figure(figsize=(8,4)) pydda.vis.plot_xz_xsection_barbs(Grids, None, 'reflectivity', level=40, w_vel_contours=[3, 6, 9], cmap='pyart_HomeyerRainbow', barb_spacing_x_km=10.0, barb_spacing_z_km=2.0) plt.figure(figsize=(8,4)) pydda.vis.plot_yz_xsection_barbs(Grids, None,'DT', level=40, w_vel_contours=[1, 3, 5, 7], barb_spacing_y_km=10.0, barb_spacing_z_km=2.0) plt.figure(figsize=(6,6)) pydda.vis.plot_horiz_xsection_streamlines(Grids, None, 'reflectivity', level=6, w_vel_contours=[3, 6, 9], ) plt.figure(figsize=(8,3)) pydda.vis.plot_xz_xsection_streamlines(Grids, None, 'DT', level=40, w_vel_contours=[3, 6, 9], thickness_divisor=5.0) plt.ylim([0,15]) plt.figure(figsize=(8,4)) pydda.vis.plot_yz_xsection_streamlines(Grids, None,'DT', level=40, w_vel_contours=[1, 3, 5, 7], )Doing a retrieval on a clusterfrom distributed import Client, LocalClusterStart a dask distributed cluster. If you are running a retrieval on more than one machine, we highly recommend the use of dask-jobqueue for setting up your cluster. Dask-jobqueue is available here:https://jobqueue.dask.org/en/latest/# Can also use something like dask-jobqueue to set up your cluster - this is for one machine cluster = LocalCluster(n_workers=2, processes=True) client = Client(cluster) clientget_dd_wind_field_nested takes in the same parameters as get_dd_wind_field, except for a distributed client after w_init as a required entry.Grids = pydda.retrieval.get_dd_wind_field_nested([berr_grid, cpol_grid], u_init, v_init, w_init, client, Co=100.0, Cm=1500.0, Cz=0, vel_name='VT', refl_field='DT', frz=5000.0, filt_iterations=0, mask_outside_opt=True, upper_bc=1) pyart.io.write_grid('example_grid_radar0.nc', Grids[0]) pyart.io.write_grid('example_grid_radar1.nc', Grids[1]) Grids = [pyart.io.read_grid('example_grid_radar0.nc'), pyart.io.read_grid('example_grid_radar1.nc')] plt.figure(figsize=(6,6)) pydda.vis.plot_horiz_xsection_barbs(Grids, None, 'DT', level=6, w_vel_contours=[3, 6, 9], barb_spacing_x_km=5.0, barb_spacing_y_km=15.0) plt.figure(figsize=(6,6)) pydda.vis.plot_horiz_xsection_streamlines(Grids, None, 'DT', level=6, w_vel_contours=[3, 6, 9], )import numpy as np import pandas as pd # data handling from tqdm.notebook import tqdm # measuring for loops runtime import matplotlib.pyplot as plt # plot import seaborn as sns import warnings warnings.filterwarnings('ignore') #Import data from GDrive - if you run this notebook in your local machine mute this cell and change path below from google.colab import drive drive.mount('/content/drive', force_remount= True) path = "drive/MyDrive/MAS/Data/Output_GAMA/Data/People/Restrictions_Beta0.05_1k/" # path to folder & files num_people = 1020 curfew_time = [18,19,20] # hour when the curfew starts curfew_delay = [5, 10] #days after the start of the simulation for curfew people_files = [] for i in tqdm(range(0,num_people)): people_files.append(path + "people" + str(i) + ".txt") def preprocess_people(people_file): DF_people = pd.read_csv(people_file, sep = ",", names = ["cycle","beta","curfew_time", "curfew_delay", "working_place","living_place","is_infected","is_immune","is_dead"]) DF_people = DF_people[DF_people["cycle"] != "cycle"] DF_people = DF_people.reset_index(drop = True) DF_people["cycle"] = DF_people["cycle"].astype(np.int16) # dtype conversion DF_people["beta"] = DF_people["beta"].astype(np.float32) DF_people["curfew_time"] = DF_people["curfew_time"].astype(np.int16) DF_people["curfew_delay"] = DF_people["curfew_delay"].astype(np.int16) DF_people = DF_people.replace({"is_infected":{"true":1, "false":0}}) DF_people = DF_people.replace({"is_immune":{"true":1, "false":0}}) DF_people = DF_people.replace({"is_dead":{"true":1, "false":0}}) DF_people["is_susceptible"] = 1 - (DF_people.is_infected + DF_people.is_immune + DF_people.is_dead) mean_ = DF_people.groupby(['curfew_time', 'curfew_delay', 'cycle'])['is_susceptible', 'is_immune', 'is_infected', 'is_dead'].mean() var_ = DF_people.groupby(['curfew_time', 'curfew_delay', 'cycle'])['is_susceptible', 'is_immune', 'is_infected', 'is_dead'].var() return mean_, var_ mean_0, var_0 = preprocess_people(people_files[0]) mean = mean_0 var = var_0 for file in tqdm(people_files[1:], total = len(people_files)-1): pivot_mean, pivot_var = preprocess_people(file) mean += pivot_mean var += pivot_var n_batch = 30 aggr_people = mean.reset_index() aggr_people["days"] = aggr_people["cycle"]/24 aggr_people_var = var.reset_index() aggr_people_var["days"] = aggr_people_var["cycle"]/24 aggr_people_var["is_susceptible"] = aggr_people_var["is_susceptible"] /n_batch aggr_people_var["is_immune"] = aggr_people_var["is_immune"]/n_batch aggr_people_var["is_infected"] = aggr_people_var["is_infected"]/n_batch aggr_people_var["is_dead"] = aggr_people_var["is_dead"]/n_batch aggr_people["std_infected"] = np.sqrt(aggr_people_var["is_infected"]) aggr_people["lower_bound"] = aggr_people["is_infected"] - 3*aggr_people["std_infected"] # 3 sigma aggr_people["upper_bound"] = aggr_people["is_infected"] + 3*aggr_people["std_infected"] curfew_0 = aggr_people[(aggr_people.curfew_time == curfew_time[0]) & (aggr_people.curfew_delay == curfew_delay[0]) ] # corresponding to beta = 0.075...... curfew_1 = aggr_people[(aggr_people.curfew_time == curfew_time[1]) & (aggr_people.curfew_delay == curfew_delay[0]) ] curfew_2 = aggr_people[(aggr_people.curfew_time == curfew_time[2]) & (aggr_people.curfew_delay == curfew_delay[0]) ] curfew_3 = aggr_people[(aggr_people.curfew_time == curfew_time[0]) & (aggr_people.curfew_delay == curfew_delay[1]) ] # corresponding to beta = 0.075...... curfew_4 = aggr_people[(aggr_people.curfew_time == curfew_time[1]) & (aggr_people.curfew_delay == curfew_delay[1]) ] curfew_5 = aggr_people[(aggr_people.curfew_time == curfew_time[2]) & (aggr_people.curfew_delay == curfew_delay[1]) ] # Beta = .05 curfew_time = 18, curfew_delay = 5 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_0, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_0.days, curfew_0.lower_bound, curfew_0.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 19, curfew_delay = 5 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_1, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_1.days, curfew_1.lower_bound, curfew_1.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 20, curfew_delay = 5 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_2, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_2.days, curfew_2.lower_bound, curfew_2.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 18, curfew_delay = 10 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_3, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_3.days, curfew_3.lower_bound, curfew_3.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 19, curfew_delay = 10 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_4, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_4.days, curfew_4.lower_bound, curfew_4.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 20, curfew_delay = 10 sns.set_theme() plt.figure(figsize = (15,8)) sns.lineplot( data= curfew_5, x="days", y="is_infected" , markers=True, dashes=False ) plt.fill_between(curfew_5.days, curfew_5.lower_bound, curfew_5.upper_bound, alpha=.3) plt.show() # Beta = .05 curfew_time = 18, curfew_delay = 5 sns.set_theme() fig, axes = plt.subplots(2,3,figsize = (16,9)) sns.lineplot(ax = axes[0,0], data= curfew_0, x="days", y="is_infected" , markers=True, dashes=False ) axes[0,0].fill_between( curfew_0.days, curfew_0.lower_bound, curfew_0.upper_bound, alpha=.3) axes[0,0].legend(["CT = 6p.m. CD = 5"]) axes[0,0].set_xlabel("Days") axes[0,0].set_ylabel("Infected Citizens") # Beta = .05 curfew_time = 19, curfew_delay = 5 sns.lineplot(ax = axes[0,1], data= curfew_1, x="days", y="is_infected" , markers=True, dashes=False ) axes[0,1].fill_between(curfew_1.days, curfew_1.lower_bound, curfew_1.upper_bound, alpha=.3) axes[0,1].legend(["CT = 7p.m., CD = 5"]) axes[0,1].set_xlabel("Days") axes[0,1].set_ylabel("Infected Citizens") # Beta = .05 curfew_time = 20, curfew_delay = 5 sns.lineplot(ax = axes[0,2], data= curfew_2, x="days", y="is_infected" , markers=True, dashes=False ) axes[0,2].fill_between( curfew_2.days, curfew_2.lower_bound, curfew_2.upper_bound, alpha=.3) axes[0,2].legend(["CT = 8p.m. CD = 5"]) axes[0,2].set_xlabel("Days") axes[0,2].set_ylabel("Infected Citizens") ####------------------------------------------------------------------------------------------------------------#### # Beta = .05 curfew_time = 18, curfew_delay = 10 sns.lineplot(ax = axes[1,0], data= curfew_3, x="days", y="is_infected" , markers=True, dashes=False ) axes[1,0].fill_between(curfew_3.days, curfew_3.lower_bound, curfew_3.upper_bound, alpha=.3) axes[1,0].legend(["CT = 6p.m. CD = 10"]) axes[1,0].set_xlabel("Days") axes[1,0].set_ylabel("Infected Citizens") # Beta = .05 curfew_time = 19, curfew_delay = 10 sns.lineplot(ax = axes[1,1], data= curfew_4, x="days", y="is_infected" , markers=True, dashes=False ) axes[1,1].fill_between(curfew_4.days, curfew_4.lower_bound, curfew_4.upper_bound, alpha=.3) axes[1,1].legend(["CT = 7p.m. CD = 10"]) axes[1,1].set_xlabel("Days") axes[1,1].set_ylabel("Infected Citizens") # Beta = .05 curfew_time = 20, curfew_delay = 10 sns.lineplot(ax = axes[1,2], data= curfew_5, x="days", y="is_infected" , markers=True, dashes=False ) axes[1,2].fill_between(curfew_5.days, curfew_5.lower_bound, curfew_5.upper_bound, alpha=.3) axes[1,2].legend(["CT = 8p.m. CD = 10"]) axes[1,2].set_xlabel("Days") axes[1,2].set_ylabel("Infected Citizens") fig.suptitle("MAS Epidemics - Different Curfew_Time & Curfew_Delay") #plt.savefig("Curfew_Beta0.05_1k.png", dpi = 600) plt.show()Maximum Percentage of Infectedc0_max_val = curfew_0.is_infected.max() c1_max_val = curfew_1.is_infected.max() c2_max_val = curfew_2.is_infected.max() c3_max_val = curfew_3.is_infected.max() c4_max_val = curfew_4.is_infected.max() c5_max_val = curfew_5.is_infected.max() curfew_0[curfew_0.is_infected == c0_max_val] curfew_1[curfew_1.is_infected == c1_max_val] curfew_2[curfew_2.is_infected == c2_max_val] curfew_3[curfew_3.is_infected == c3_max_val] curfew_4[curfew_4.is_infected == c4_max_val] curfew_5[curfew_5.is_infected == c5_max_val]`df = web.DataReader('AAPL',data_source='yahoo',start='2012-01-01',end='2019-12-17') df df.shape # visualize the closing price history plt.figure(figsize=(16,8)) plt.title('Close price history') plt.plot(df['Close']) plt.xlabel('Date',fontsize=18) plt.ylabel('Close price for USD',fontsize=18) plt.show() # creating a new dataframe with only the close column data = df.filter(['Close']) # convert the data frame to a numpy array dataset = data.values # Get the number of rows to train the model on training_data_len = math.ceil(len(dataset) * .8) training_data_len # scale the data scaler = MinMaxScaler(feature_range=(0,1)) scaled_data = scaler.fit_transform(dataset) scaled_data #Create the training dataset #Create the scaled training dataset train_data = scaled_data[0:training_data_len,:] #Split the data into x_train and y_train data sets x_train = [] y_train = [] for i in range(60,len(train_data)): x_train.append(train_data[i-60:i,0]) y_train.append(train_data[i,0]) if i<=61: print(x_train) print(y_train) print() #Convert the x_train and y_train to numpy arrays x_train, y_train = np.array(x_train),np.array(y_train) #Reshape the data x_train = np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1)) x_train.shape #Build the lstm model model = Sequential() model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1],1))) model.add(LSTM(50, return_sequences=False)) model.add(Dense(25)) model.add(Dense(1)) #Compile the model model.compile(optimizer='adam',loss='mean_squared_error') #Train the model # fit is another name for train model.fit(x_train, y_train, batch_size=1, epochs=1) #Create the testing dataset #Create a new array containing scaled values from index 1543 to 2003 test_data = scaled_data[training_data_len-60:, : ] #Create the datasets x_test and y_test x_test = [] y_test = dataset[training_data_len:, :] for i in range(60,len(test_data)): x_test.append(test_data[i-60:i, 0]) #Convert the data to a numpy array x_test = np.array(x_test) #Reshape the data x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) #Get the model predicted price values predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) #Get the root mean square error (RMSE) rmse = np.sqrt( np.mean( (predictions - y_test)**2 )) rmse #Plot the data train = data[:training_data_len] valid = data[training_data_len:] valid['Predictions'] = predictions #Visualize the data plt.figure(figsize=(16,8)) plt.title('Model') plt.xlabel('Date', fontsize=18) plt.ylabel('Close price USD', fontsize =18) plt.plot(train['Close']) plt.plot(valid[['Close','Predictions']]) plt.legend(['Train','Val','Predictions'],loc='lower right') plt.show() #Show the actual and predicted price valid #Get the quote apple_quote = web.DataReader('AAPL',data_source='yahoo', start='2012-01-01', end='2019-12-17') #Create a new dataframe new_df = apple_quote.filter(['Close']) #Get the last 60 days closing price value and convert the dataframe to an array last_60_days = new_df[-60:].values #Scale the data to be values between 0 and 1 last_60_days_scaled = scaler.transform(last_60_days) #Create an empty list X_test = [] #Append the past 60 days X_test.append(last_60_days_scaled) #Convert the X_test dataset to a numpy array X_test = np.array(X_test) #Reshape the data to be 3D X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1)) #Get the predicted scaled price pred_price = model.predict(X_test) #undo the scaling pred_price = scaler.inverse_transform(pred_price) print(pred_price) apple_quote2 = web.DataReader('AAPL',data_source='yahoo', start='2019-12-18', end='2019-12-18') print(apple_quote2['Close']) apple_quote3 = web.DataReader('AAPL',data_source='yahoo', start='2021-05-13', end='2021-05-13') print(apple_quote3['Close'])[![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/bwrsandman/learningpython/master?filepath=05-Modules.ipynb) Modules Review[Modules](https://docs.python.org/3/tutorial/modules.html), sometimes called libraries, are collections of definitions which can be reused for different purposes.They are bits of reusable code and are at the core of any software writen to run.There is no technical difference between writing a module and writing a script. Instead of running a libary like you would with a script (e.g. `python script_name.py`), you use the `import` keyword inside of python (e.g. `import module_name.py`).Modules are made by creating a script as a text file ending in ".py" and inserting definitions and in them. The name of file is the name of the module. Be careful not to use the `-` character in the script filename as this will confuse python which interprets `-` as a minus operator. In general `_` is preferable.Python (as with most languages) has a [Standard Library](https://docs.python.org/3/library/index.html). This library is a collection of modules which are available by default in any python environment. This means that you don't have to write your own implementation of the `print` function or create a module for random functions, you can just `import random`. This allows for a much quicker start to programming. Python has an extensive standard libary compared to many other languages and refers this as their ["Batteries included"](https://www.python.org/dev/peps/pep-0206/batteries-included-philosophy) philosophy. Reading ExerciseIn your own words explain what is happening in the following code snippets.For each line, what does python do.For each module imported, what do you think is the name of the `.py` file and try to find it in the Home of these notebooks and take a look at the contents. 1. Simple import module and run functionPrediction:(Double click here and enter your prediction before running the next cell)import module_1 module_1.hello()2. Import module which imports another modulePrediction:(Double click here and enter your prediction before running the next cell)import module_2 module_2.hello() module_2.module_1.hello()3. Importing specific declarations from a modulePrediction:(Double click here and enter your prediction before running the next cell)from module_3 import z from module_3 import hello hello() print(z)4. Importing specific declarations from a module with aliasesPrediction:(Double click here and enter your prediction before running the next cell)from module_1 import hello as hello_1 from module_2 import hello as hello_2 from module_3 import hello as hello_3 hello_1() hello_2() hello_3()Writing Exercise 1.Play around with the python standard library.1. Try importing the `sys` library and printing the `copyright` variable it contains.2. Try importing the `math` libarary and printing the `pi` variable it contains.3. Try importing the `math` libarary and calling the `sin` and `cos` functions it contains with `math.pi` as the parameter.4. Try importing the `os` library and printing the current working directory by calling the `getcwd` function it contains.5. Try importing the `os` library and printing the properties of the operating system you are running by calling the `uname` function it contains.6. Try importing the `random` library and printing a random float by calling the `random` function it contains.7. Try importing the `datetime` library and running `datetime.datetime.now()`8. Read about the [sqlite3 module](https://docs.python.org/3/library/sqlite3.html)9. Read about the [csv module](https://docs.python.org/3/library/csv.html)10. Play around with [others](https://docs.python.org/3/library/index.html) 2.Create a module for the functions created in the Functions notebook.To do this, create a `.py` file in Home by creating a new text file an renaming it to`functions.py`.Insert your function declarations into this new text file.Test the module by importing it and calling everyone of its functions in the following cell. Extra The Zen of PythonThe python standard library include the `this` library. This library is one [many](https://hackernoon.com/pythons-easter-eggs-and-hidden-jokes-d7368c7fe2c2) python [easter-eggs](https://en.wikipedia.org/wiki/Easter_egg_(media%29).The libarary is implemented with a simple `print()` call which is not contained within a function. For this reason, the `print()` call is executed at import time.It is a good exercise to memorize each line of the Zen of Python as they are goodprinciples to use in developement (and not just in python).An explanation for each line of The Zen of Python can be found [here](https://www.quora.com/What-do-different-aphorisms-in-The-Zen-of-Python-mean).# Run me import thisLoading Raw Data(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train[:, 0:27, 0:27] x_test = x_test[:, 0:27, 0:27] x_train_flatten = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])/255.0 x_test_flatten = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])/255.0 print(x_train_flatten.shape, y_train.shape) print(x_test_flatten.shape, y_test.shape) x_train_0 = x_train_flatten[y_train == 0] x_train_1 = x_train_flatten[y_train == 1] x_train_2 = x_train_flatten[y_train == 2] x_train_3 = x_train_flatten[y_train == 3] x_train_4 = x_train_flatten[y_train == 4] x_train_5 = x_train_flatten[y_train == 5] x_train_6 = x_train_flatten[y_train == 6] x_train_7 = x_train_flatten[y_train == 7] x_train_8 = x_train_flatten[y_train == 8] x_train_9 = x_train_flatten[y_train == 9] x_train_list = [x_train_0, x_train_1, x_train_2, x_train_3, x_train_4, x_train_5, x_train_6, x_train_7, x_train_8, x_train_9] print(x_train_0.shape) print(x_train_1.shape) print(x_train_2.shape) print(x_train_3.shape) print(x_train_4.shape) print(x_train_5.shape) print(x_train_6.shape) print(x_train_7.shape) print(x_train_8.shape) print(x_train_9.shape) x_test_0 = x_test_flatten[y_test == 0] x_test_1 = x_test_flatten[y_test == 1] x_test_2 = x_test_flatten[y_test == 2] x_test_3 = x_test_flatten[y_test == 3] x_test_4 = x_test_flatten[y_test == 4] x_test_5 = x_test_flatten[y_test == 5] x_test_6 = x_test_flatten[y_test == 6] x_test_7 = x_test_flatten[y_test == 7] x_test_8 = x_test_flatten[y_test == 8] x_test_9 = x_test_flatten[y_test == 9] x_test_list = [x_test_0, x_test_1, x_test_2, x_test_3, x_test_4, x_test_5, x_test_6, x_test_7, x_test_8, x_test_9] print(x_test_0.shape) print(x_test_1.shape) print(x_test_2.shape) print(x_test_3.shape) print(x_test_4.shape) print(x_test_5.shape) print(x_test_6.shape) print(x_test_7.shape) print(x_test_8.shape) print(x_test_9.shape)(980, 729) (1135, 729) (1032, 729) (1010, 729) (982, 729) (892, 729) (958, 729) (1028, 729) (974, 729) (1009, 729)Selecting the datasetOutput: X_train, Y_train, X_test, Y_testrand_conv = './preprocessed_RandQConv4class_10class_' trained_conv = './preprocessed_QConv4class_10class_' X_train = np.loadtxt(trained_conv + 'train.txt') X_test = np.loadtxt(trained_conv + 'test.txt') #X_train = X_train[0:800, :] #X_test = X_test[0:200, :] X_train.shape, X_test.shape # 10 class directly Y_train = np.zeros((len(X_train),)) Y_test = np.zeros((len(X_test),)) for i in range(10): Y_train[i*200:(i+1)*200] += i Y_test[i*50:(i+1)*50] += i Y_train = to_categorical(Y_train) Y_test = to_categorical(Y_test) Y_train.shape, Y_test.shape Y_train # one vs all scheme selected_class = 3 temp1 = X_train[200*selected_class:200*(selected_class+1)] # positive class = 1 ind_delete = np.linspace(200*selected_class, 200*(selected_class+1)-1, 200, dtype=int) temp2 = np.delete(X_train, ind_delete, axis=0) # negative class = 0 number_of_rows = temp2.shape[0] random_indices = np.random.choice(number_of_rows, size=200, replace=False) temp2 = temp2[random_indices, :] X_train = np.concatenate((temp1, temp2), axis=0) Y_train = np.zeros((len(X_train),)) Y_train[0:int(len(Y_train)/2)] += 0 Y_train[int(len(Y_train)/2):] += 1 Y_train = to_categorical(Y_train) temp1 = X_test[50*selected_class:50*(selected_class+1)] # positive class = 0 ind_delete = np.linspace(50*selected_class, 50*(selected_class+1)-1, 50, dtype=int) temp2 = np.delete(X_test, ind_delete, axis=0) # negative class = 1 number_of_rows = temp2.shape[0] random_indices = np.random.choice(number_of_rows, size=50, replace=False) temp2 = temp2[random_indices, :] X_test = np.concatenate((temp1, temp2), axis=0) Y_test = np.zeros((len(X_test),)) Y_test[0:int(len(Y_test)/2)] += 0 Y_test[int(len(Y_test)/2):] += 1 Y_test = to_categorical(Y_test) print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape)(400, 9) (400, 2) (100, 9) (100, 2)Dataset Preprocessing Quantumimport pennylane as qml from pennylane import numpy as np from pennylane.optimize import AdamOptimizer, GradientDescentOptimizer qml.enable_tape() from tensorflow.keras.utils import to_categorical # Set a random seed np.random.seed(2020) # Define output labels as quantum state vectors def density_matrix(state): """Calculates the density matrix representation of a state. Args: state (array[complex]): array representing a quantum state vector Returns: dm: (array[complex]): array representing the density matrix """ return state * np.conj(state).T label_0 = [[1], [0]] label_1 = [[0], [1]] state_labels = [label_0, label_1] # def density_matrix(state): # """Calculates the density matrix representation of a state. # Args: # state (array[complex]): array representing a quantum state vector # Returns: # dm: (array[complex]): array representing the density matrix # """ # return np.outer(state, np.conj(state)) #state_labels = np.loadtxt('./ESB_states.txt', dtype=np.complex_) n_qubits = 10 dev_fc = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev_fc) def q_fc(params, inputs): """A variational quantum circuit representing the DRC. Args: params (array[float]): array of parameters inputs = [x, y] x (array[float]): 1-d input vector y (array[float]): single output state density matrix Returns: float: fidelity between output state and input """ # layer iteration for l in range(len(params[0])): # qubit iteration for q in range(n_qubits): # gate iteration for g in range(int(len(inputs)/3)): qml.Rot(*(params[0][l][q][3*g:3*(g+1)] * inputs[3*g:3*(g+1)] + params[1][l][q][3*g:3*(g+1)]), wires=q) return [qml.expval(qml.Hermitian(density_matrix(state_labels[0]), wires=[i])) for i in range(n_qubits)] a = np.zeros((2, 1, 10, 9)) q_fc(a, X_train[0]) class class_weights(tf.keras.layers.Layer): def __init__(self): super(class_weights, self).__init__() w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(1, 10), dtype="float32"), trainable=True, ) def call(self, inputs): return (inputs * self.w) # Input image, size = 27 x 27 X = tf.keras.Input(shape=(9,), name='Input_Layer') # Quantum FC Layer, trainable params = 18*L*n_class + 2, output size = 2 num_fc_layer = 2 q_fc_layer_0 = qml.qnn.KerasLayer(q_fc, {"params": (2, num_fc_layer, 10, 9)}, output_dim=10)(X) # q_fc_layer_1 = qml.qnn.KerasLayer(q_fc, {"params": (2, num_fc_layer, 9)}, output_dim=2)(X) # q_fc_layer_2 = qml.qnn.KerasLayer(q_fc, {"params": (2, num_fc_layer, 9)}, output_dim=2)(X) # q_fc_layer_3 = qml.qnn.KerasLayer(q_fc, {"params": (2, num_fc_layer, 9)}, output_dim=2)(X) # Alpha Layer q_fc_layer_0 = class_weights()(q_fc_layer_0) # q_fc_layer_1 = class_weights()(q_fc_layer_1) # q_fc_layer_2 = class_weights()(q_fc_layer_2) # q_fc_layer_3 = class_weights()(q_fc_layer_3) model = tf.keras.Model(inputs=X, outputs=q_fc_layer_0) model(X_train[0:5]) model.summary() import keras.backend as K # def custom_loss(y_true, y_pred): # return K.sum(((y_true.shape[1]-2)*y_true+1)*K.square(y_true-y_pred))/len(y_true) def custom_loss(y_true, y_pred): loss = K.square(y_true-y_pred) #class_weights = y_true*(weight_for_1-weight_for_0) + weight_for_0 #loss = loss * class_weights return K.sum(loss)/len(y_true) lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.1, decay_steps=int(len(X_train)/32), decay_rate=0.85, staircase=True) opt = tf.keras.optimizers.Adam(learning_rate=0.1) model.compile(opt, loss=custom_loss, metrics=["accuracy"]) cp_val_acc = tf.keras.callbacks.ModelCheckpoint(filepath="./Model/Testing/10class_TrainedQConv_All_valacc.hdf5", monitor='val_accuracy', verbose=1, save_weights_only=True, save_best_only=True, mode='max') cp_val_loss = tf.keras.callbacks.ModelCheckpoint(filepath="./Model/Testing/10class_TrainedQConv_All_valloss.hdf5", monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True, mode='min') H = model.fit(X_train, Y_train, epochs=10, batch_size=128, validation_data=(X_test, Y_test), verbose=1, initial_epoch=0, callbacks=[cp_val_acc, cp_val_loss])Epoch 1/10 16/16 [==============================] - 10440s 650s/step - loss: 0.9019 - accuracy: 0.2249 - val_loss: 0.7377 - val_accuracy: 0.4660 Epoch 00001: val_accuracy improved from -inf to 0.46600, saving model to ./Model/Testing/10class_TrainedQConv_All_valacc.hdf5 Epoch 00001: val_loss improved from inf to 0.73771, saving model to ./Model/Testing/10class_TrainedQConv_All_valloss.hdf5 Epoch 2/10 16/16 [==============================] - 10429s 651s/step - loss: 0.6886 - accuracy: 0.5088 - val_loss: 0.6257 - val_accuracy: 0.5480 Epoch 00002: val_accuracy improved from 0.46600 to 0.54800, saving model to ./Model/Testing/10class_TrainedQConv_All_valacc.hdf5 Epoch 00002: val_loss improved from 0.73771 to 0.62568, saving model to ./Model/Testing/10class_TrainedQConv_All_valloss.hdf5 Epoch 3/10 16/16 [==============================] - 10416s 650s/step - loss: 0.5977 - accuracy: 0.5982 - val_loss: 0.5956 - val_accuracy: 0.5720 Epoch 00003: val_accuracy improved from 0.54800 to 0.5720[...]Map actives to drugsactives_to_drug = [] for li, drug in zip(df.actives.apply(lambda x: x.split(',')), df.drug_name): for act in li: actives_to_drug.append([act, drug]) final_df = pd.DataFrame.from_records(actives_to_drug, columns=['actives', 'drugs']) final_df.dropna(inplace=True) len(final_df)We don't want actives that appear more than 4 timescounts = final_df.groupby('actives').count() < 5 counts.head() final_df = final_df.loc[counts.values.flatten()] # Get rid of the "'" from actives final_df['actives'] = final_df.actives.apply(lambda x: x.split("'")[1]) final_df.head() final_df.to_csv("actives_to_drug.csv", index=False)No Null values so no need for manipulating nanQuite surprised to see this tbh, but easier for me!df.isnull().any()Split DateTime as Date and Time in seperate columns for easier manipulationdt = [] tm = [] for x in df.DateTime: dt.append(x.split(" ")[0]) tm.append(x.split(" ")[1]) df['dt'] = dt df['tm']= tm df.head()Dropped DateTimeDateTime column split hencewe don't need the combined column anymore.df = df.drop('DateTime', axis=1) # df = df.drop('ID', axis=1) df.isnull().any() df.head() df.to_csv('data/processed.csv', index=False) df = pd.read_csv('data/processed.csv') x = df.drop('Vehicles', axis=1) y = df.Vehicles.to_frame() x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) df.head() for x in df.tm.unique(): df.loc[df.tm == x, 'tm'] = x[0:2] # df.ix[df.tm == 20000, 'my_channel'] = 0 # print(x[0:2]) df.head() len(df.dt.unique()) df['dt']= df['dt'].astype(str) df['Junction']= df['Junction'].astype(str) a = (df['dt'] == '2015-11-01') & (df['Junction'] == '1') b = (df['dt'] == '2015-11-01') df[a].head() sns.stripplot(x="tm", y="Vehicles", data=df, jitter=True, hue="Junction"); sns.swarmplot(x="tm", y="Vehicles", data=df[b], hue="Junction"); df.head() sns.factorplot(x="tm", y="Vehicles",hue="Junction", col="Junction", data=df); sns.pairplot(x_train); sns.pairplot(y_train); x_train.head() def for x in df.tm.unique(): df.loc[df.tm == x, 'tm'] = x[0:2] # df.ix[df.tm == 20000, 'my_channel'] = 0 # print(x[0:2])模型建立#切割資料,train:test=0.85:0.15 train_data,test_data,train_labels,test_labels=train_test_split(data1.iloc[:,0:6],data1.iloc[:,-1],test_size=0.15, random_state=69) print('Size of training dataset:', train_data.shape) print('Size of testing dataset:', test_data.shape) train_data #from xgboost.sklearn import XGBClassifier # Create the model XGB = XGBRegressor(n_estimators=18,min_child_weight=5,subsample=0.88) # Learn the digits on the train subset XGB.fit(train_data, train_labels) # Predict the value of the digit on the testing subset predicted = XGB.predict(test_data) # Calculate MSE/MAE from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error MSE= mean_squared_error(test_labels, predicted) print('Mean_squared_error of testing dataset: ', MSE) MAE= mean_absolute_error(test_labels, predicted) print('Mean_absolute_error of testing dataset: ', MAE) from xgboost import plot_importance plot_importance(XGB) plt.show() compare=[predicted,test_labels] plt.boxplot(compare, labels= ['預測', '實際']) plt.title("預測與實際差異",{"fontsize" : 20} ) plt.show() plt.plot([x for x in range(0,len(predicted-test_labels))],predicted-test_labels) plt.ylabel("預測誤差") plt.title("殘差圖",{"fontsize" : 20} ) plt.show() data1_test=pd.read_csv(".\\hw2_data\\data7\\test_mid.csv",header=None) #imputed_data1_test = imputer.fit_transform(data1_test) #data1_test = pd.DataFrame(imputed_data1_test) #data1_test = data1_test.drop(data1_test.columns[[11]], axis=1) data1_test.head() predicted = XGB.predict(data1_test) one=pd.concat([data1_test,pd.DataFrame(predicted)],axis=1) one.head() plt.hist(one.iloc[:,-1],bins=25,color="#A2D2FF") plt.show() one.to_csv(".\\xgboost\\mid_xgb_data7.csv",header=False, index=False)[NTDS'18] milestone 2: network models[ntds'18]: https://github.com/mdeff/ntds_2018[](https://people.epfl.ch/hermina.petricmaretic), [EPFL LTS4](https://lts4.epfl.ch) Students* Team: `32`* Students: `, , , `* Dataset: `FMA` Rules* Milestones have to be completed by teams. No collaboration between teams is allowed.* Textual answers shall be short. Typically one to two sentences.* Code has to be clean.* In the first part, you cannot import any other library than we imported. In the second part, you are allowed to import any library you want.* When submitting, the notebook is executed and the results are stored. I.e., if you open the notebook again it should show numerical results and plots. We won't be able to execute your notebooks.* The notebook is re-executed from a blank state before submission. That is to be sure it is reproducible. You can click "Kernel" then "Restart & Run All" in Jupyter. ObjectiveThe purpose of this milestone is to explore various random network models, analyse their properties and compare them to your network. In the first part of the milestone you will implement two random graph models and try to fit them to your network. In this part you are not allowed to use any additional package. In the second part of the milestone you will choose a third random graph model that you think shares some properties with your network. You will be allowed to use additional packages to construct this network, but you must explain your network choice. Finally, make your code as clean as possible, and keep your textual answers short. Part 0Import the adjacency matrix of your graph that you constructed in milestone 1, as well as the number of nodes and edges of your network. Part 1**For the computation of this part of the milestone you are only allowed to use the packages that have been imported in the cell below.**%matplotlib inline import random import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy from scipy.spatial.distance import squareform adjacency = np.load('adjacency.npy')# the adjacency matrix n_nodes = adjacency.shape[0]# the number of nodes in the network n_edges = np.count_nonzero(adjacency)//2# the number of edges in the network print('Number of nodes: ', n_nodes) print('Number of edges: ', n_edges) adjacency_bin = np.zeros(adjacency.shape) adjacency_bin[adjacency > 0] = 1 plt.spy(adjacency_bin) plt.title('Our graph')Number of nodes: 2000 Number of edges: 32646Question 1Create a function that constructs an Erdős–Rényi graph.def erdos_renyi(n, p, seed=None): """Create an instance from the Erdos-Renyi graph model. Parameters ---------- n: int Size of the graph. p: float Edge probability. A number between 0 and 1. seed: int (optional) Seed for the random number generator. To get reproducible results. Returns ------- adjacency The adjacency matrix of a graph. """ if seed is not None: np.random.seed(seed) flipped_coin = np.random.binomial(1, p, (n*(n-1)//2)) adjacency = squareform(flipped_coin) return adjacency er = erdos_renyi(5, 0.6, 9765) plt.spy(er) plt.title('Erdos-Renyi (5, 0.6)') er = erdos_renyi(10, 0.4, 7648) plt.spy(er) plt.title('Erdos-Renyi (10, 0.4)')Question 2Use the function to create a random Erdos-Renyi graph. Choose the parameters such that number of nodes is the same as in your graph, and the number of edges similar. You don't need to set the random seed. Comment on your choice of parameters.erdos_renyi_graph = erdos_renyi(n_nodes, 0.0165, 41) n_edges_erdos_renyi = np.sum(erdos_renyi_graph)/2 plt.spy(erdos_renyi_graph) plt.title('Erdos-Renyi (' + str(n_edges_erdos_renyi)+ ', 0.0165)') print(n_edges_erdos_renyi)33118.0**Our answer:**We had to choose a very low probability (0.0165) for the Erdos-Renyi graph to have a graph as sparse as our own graph. We chose this number by trial and error. Question 3Create a function that constructs a Barabási-Albert graph.def barabasi_albert(n, m, m0=2, seed=None): """Create an instance from the Barabasi-Albert graph model. Parameters ---------- n: int Size of the graph. m: int Number of edges to attach from a new node to existing nodes. m0: int (optional) Number of nodes for the inital connected network. seed: int (optional) Seed for the random number generator. To get reproducible results. Returns ------- adjacency The adjacency matrix of a graph. """ if seed is not None: np.random.seed(seed) adjacency = np.zeros((n,n)) #initial graph with m0 nodes. initial_connection_list = np.zeros((m0*(m0-1)//2,)) while(not np.all(np.sum(adjacency[:m0,:m0], 0))): #make sure each node has at least one link #create random edge between 2 nodes new_connection_ind = np.random.randint(0, high=(m0*(m0-1)//2), size=1) if initial_connection_list[new_connection_ind] != 1: initial_connection_list[new_connection_ind]= 1 #update adjacency matrix adjacency[:m0,:m0] = squareform(initial_connection_list) for new_node_ind in range(m0,n): connected_count = 0 #in the beginning the node is unconnected sum_of_degree = np.sum(np.sum(adjacency[:new_node_ind, :new_node_ind], axis=0)) while (connected_count < m): #until i connect new nodes to m nodes, draw a new node and try to connect random_node_ind = np.random.randint(0, high=new_node_ind, size=1) #draw a random existing node if (adjacency[new_node_ind, random_node_ind] != 1): #if node is not already connected #find degree of this new node node_degree = np.sum(adjacency[random_node_ind, :new_node_ind]) #find sum of degree of all nodes p = node_degree/sum_of_degree p_drawn = np.random.rand(1,1) if p_drawn < p: #connect node adjacency[random_node_ind, new_node_ind] =1 adjacency[new_node_ind, random_node_ind] =1 connected_count += 1 return adjacency ba = barabasi_albert(5, 1, 2, 9087) plt.spy(ba) plt.title('Barabasi-Albert (5, 1)') ba = barabasi_albert(10, 2, 3, 8708) plt.spy(ba) plt.title('Barabasi-Albert (10, 2)')Question 4Use the function to create a random Barabási-Albert graph. Choose the parameters such that number of nodes is the same as in your graph, and the number of edges similar. You don't need to set the random seed. Comment on your choice of parameters.barabasi_albert_graph = barabasi_albert(2000, 17, 20, 41) plt.spy(barabasi_albert_graph) plt.title('Barabasi-Albert (2000, 10)') n_edges_barabasi_albert = np.sum(barabasi_albert_graph)/2 print(n_edges_barabasi_albert)33689.0**Your answer here**We start with around 20 initial nodes. We expect that this initial network to have few edges (negligable compared to our edge count). Therefore the only way the BA graph can have 32646 edges is for each new edge (there will be 1980 of them) to make 32600/1980 ~= 17 new connections. Question 5Compare the number of edges in all three networks (your real network, the Erdős–Rényi network, and the Barabási-Albert netowk).# Your code here. n_edges_ours = np.count_nonzero(adjacency)//2# the number of edges in the network n_edges_erdos_renyi = np.sum(erdos_renyi_graph)/2 n_edges_barabasi_albert = np.sum(barabasi_albert_graph)/2 print("Our edges:", n_edges_ours) print("Erdos renyi Edges:", n_edges_erdos_renyi) print("Barabasi-Albert Edges:", n_edges_barabasi_albert) print("They are all similar!")Our edges: 32646 Erdos renyi Edges: 33118.0 Barabasi-Albert Edges: 33689.0 They are all similar!Question 6Implement a function that computes the [Kullback–Leibler (KL) divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between two probability distributions.We'll use it to compare the degree distributions of networks.def kl_divergence(p, q): """Compute the KL divergence between probability distributions of degrees of two networks. Parameters ---------- p: np.array Probability distribution of degrees of the 1st graph. q: np.array Probability distribution of degrees of the 2nd graph. Returns ------- kl The KL divergence between the two distributions. """ # Your code here. kl= -np.sum(np.multiply(p,np.log(q/p))) return kl def get_KLDivergence(X): epsilon = np.min(np.min(X)) epsilon = 0 if epsilon >0 else -epsilon+1 ##make sure all values are positive distmat = [] ## Iterate to get the JS divergence between each sample ### ## Will get an upper triangle of the matrix and then just straighten it### for i,sample1 in enumerate(X.values[:-1]): for j,sample2 in enumerate(X.values[(i+1):]): distmat.append((scipy.stats.entropy(sample1+epsilon, sample2+epsilon) + scipy.stats.entropy(sample2+epsilon, sample1+epsilon))/2) return np.array(distmat) p_test = np.array([0.2, 0.2, 0.2, 0.4]) q_test = np.array([0.3, 0.3, 0.1, 0.3]) print(kl_divergence(p_test, q_test))0.0915162218494358Question 7: Compare the degree distribution of your network to each of the two synthetic ones, in terms of KL divergence.**Hint:** Make sure you normalise your degree distributions to make them valid probability distributions.**Hint:** Make sure none of the graphs have disconnected nodes, as KL divergence will not be defined in that case. If that happens with one of the randomly generated networks, you can regenerate it and keep the seed that gives you no disconnected nodes.def has_disconnected_nodes(adjacency): return not np.all(np.sum(adjacency, axis=0)) def remove_disconnected_nodes(adjacency): disconnected_nodes_ind = np.where((np.sum(adjacency, axis=0) == 0)) temp = np.delete(adjacency, disconnected_nodes_ind, axis = 0) output = np.delete(temp, disconnected_nodes_ind, axis = 1) return output #Does our graph have disconnected nodes? print("Our graph has disconnected nodes:", has_disconnected_nodes(adjacency_bin)) #We will remove these disconnected nodes. our_graph = remove_disconnected_nodes(adjacency_bin) n_nodes = our_graph.shape[0] print("Removed disconnected nodes. Our graph now has", n_nodes, "nodes") #genereate new ER and BA graphs with same number of nodes er_graph = erdos_renyi(n_nodes, 0.02, 41) ba_graph = barabasi_albert(n_nodes, 17, 20, 41) print("Edge count: ours:", np.sum(our_graph)//2, "er:", np.sum(er_graph)//2, "ba:", np.sum(ba_graph)//2) #Find degree distributions degree_sum_ours = np.sum(our_graph) degree_dist_ours = np.sort(np.sum(our_graph, axis=0)/degree_sum_ours) degree_sum_er = np.sum(er_graph) degree_dist_er = np.sort(np.sum(er_graph, axis=0)/degree_sum_er) degree_sum_ba = np.sum(ba_graph) degree_dist_ba = np.sort(np.sum(ba_graph, axis=0 )/degree_sum_ba) #find the KL divergences print("KL Divergence between ours and ER:", kl_divergence(degree_dist_ours, degree_dist_er)) print("KL Divergence between ours and BA:", kl_divergence(degree_dist_ours, degree_dist_ba))KL Divergence between ours and ER: 0.3124131448112719 KL Divergence between ours and BA: 0.12130305411164323Question 8Plot the degree distribution historgrams for all three networks. Are they consistent with the KL divergence results? Explain.# Your code here. weights_ours = np.ones_like(degree_dist_ours) / float(n_nodes) weights_er = np.ones_like(degree_dist_er) / float(n_nodes) weights_ba = np.ones_like(degree_dist_ba) / float(n_nodes) fig = plt.figure() plt.suptitle("degree distributions") plt.subplot(131) plt.hist(degree_dist_ours, weights=weights_ours) plt.title("ours") plt.subplot(132) plt.hist(degree_dist_er, weights=weights_er) plt.title("ER") plt.subplot(133) plt.hist(degree_dist_ba, weights=weights_ba) plt.title("BA") plt.show() plt.close(fig)**Our answer:**Judging by the degree distributions, our network is closer to a Barabasi-Albert network than Erdos-Renyi. We expect the KL divergence to be **higher** between our graph and the ER graph, and **lower** between our graph and BA graph and we can observe this (KL divergence between ours and ER: 0.3124, between ours and BA: 0.1213) Question 9Imagine you got equal degree distributions. Would that guarantee you got the same graph? Explain. **Your answer here.**Equal degree distributions do not mean that the graphs are the same. Degree distributions show, from highest to lowest, the degree of each node. If we have two equal degree distributions, this does not mean that the nodes have to be connected in the same way. We show an example below to reinforce our point.#Form two graphs graph1 = np.zeros([8,8]) graph1[0,1]=graph1[1,0]=1 graph1[1,2]=graph1[2,1]=1 graph1[1,3]=graph1[3,1]=1 graph1[2,4]=graph1[4,2]=1 graph1[3,5]=graph1[5,3]=1 graph1[4,6]=graph1[6,4]=1 graph1[5,7]=graph1[7,5]=1 graph2 = np.zeros([8,8]) graph2[0,2]=graph2[2,0]=1 graph2[1,2]=graph2[2,1]=1 graph2[2,3]=graph2[3,2]=1 graph2[3,4]=graph2[4,3]=1 graph2[4,5]=graph2[5,4]=1 graph2[5,6]=graph2[6,5]=1 graph2[6,7]=graph2[7,6]=1 #Plot them fig = plt.figure() plt.subplot(121) plt.spy(graph1) plt.title('Graph1') plt.subplot(122) plt.spy(graph2) plt.title('Graph2') plt.show() plt.close(fig) print("They are different graphs!\n") degree_g1 = np.sum(graph1) degree_dist_g1 = np.sort(np.sum(graph1, axis=0)/degree_g1) degree_g2 = np.sum(graph2) degree_dist_g2 = np.sort(np.sum(graph2, axis=0)/degree_g2) print("Degree distribution of graph 1:", degree_dist_g1) print("Degree distribution of graph 2:", degree_dist_g2) print("They have the same degree distribution!")Part 2**You are allowed to use any additional library here (e.g., NetworkX, PyGSP, etc.).** Be careful not to include something here and use it in part 1!import networkx as nxQuestion 10Choose a random network model that fits you network well. Explain your choice. **Hint:** Check lecture notes for different network models and their properties. Your choice should be made based on at least one property you'd expect to be similar. **Our answer:**We choose a **Power-Law Cluster** network. Our reasons are the following:* We found that the degree distribution of our graph resembles a power-law degree distribution (albeit with a very high gamma value). Therefore, we wanted to generate a graph with a power-law degree distribution. * We compared the histogram of our degree distribution to those of Erdos-Renyi and Watts-Strogatz, and found that they look very different from ours. Therefore we did not want to choose these distributions.* When we check the size of the largest connected component, we find that it is 1769. Considering the fact that our graph is made up of 2000 nodes, we can say that we have a giant component in our graph. The power-law cluster algorithm implemented by the NetworkX library also leads to a giant component forming, because every new node that is added is more likely to attach to a node with a higher degree (preferential attachment), similar to Barabasi-Albert.* Our model has a high clustering coefficent. The power-law cluster network is also likely to have a higher clustering coefficent than similar algorithms such as Barabasi-Albert. This is because, in this algorithm, after the preferential attachment step, the new node also has a chance of making an edge to the neighbors of the node it has attached to (forming a triangle). Question 11Explain (in short) how the chosen model works. **Our answer:**The power-law cluster network model incorperates the ideas of growth and preferential attachment. It also leads to high clustering coefficents. It is formed in the following way:1. We start with an initial number of connected nodes (m_0).2. We add a new node to the graph. 3. We randomly choose an existing node in the graph. The probability of making a new link between the new node and this existing node is determined by the existing node's degree divided by the sum of all degrees in the graph. This is called the preferential attachment step.4. If we have made the link in the preferential attachment step, we also consider a triad formation step with probably p. We decide whether to make another connection between the new node and a neighbor of the node it has attached to. 4. We repeat steps 3-4 (preferential attachment and triad formation) until the new node has m links.5. We keep adding nodes according to steps 2-3-4 until we have the number of nodes we need. Question 12Create a random graph from that model, such that the number of nodes is the same as in your graph.our_graph_nx= nx.from_numpy_matrix(our_graph) print("Number of edges is: ours:", our_graph_nx.number_of_edges()) #We form n_nodes = nx.number_of_nodes(our_graph_nx) pl_graph_nx = nx.powerlaw_cluster_graph(n=n_nodes, m=18, p=0.8, seed=41) print("Number of edges for power-law graph:", pl_graph_nx.number_of_edges())Number of edges is: ours: 32646 Number of edges for power-law graph: 31947Question 13Check the properties you expected to be similar, and compare to your network.our_connected_comp_list = list(nx.connected_component_subgraphs(our_graph_nx)) pl_connected_comp_list = list(nx.connected_component_subgraphs(pl_graph_nx)) print("Number of connected components: ours:", nx.number_connected_components(our_graph_nx), "pl:", nx.number_connected_components(pl_graph_nx)) print("Size of largest connected component: ours:", nx.number_of_nodes(our_connected_comp_list[0]), "pl:", nx.number_of_nodes(pl_connected_comp_list[0])) degree_sequence_pl = np.array(sorted([d for n, d in pl_graph_nx.degree()], reverse=False)) # degree sequence normalized_degree_sequence_pl = degree_sequence_pl/np.sum(degree_sequence_pl) degree_sequence_ours = np.array(sorted([d for n, d in our_graph_nx.degree()], reverse=False)) # degree sequence normalized_degree_sequence_ours = degree_sequence_ours/np.sum(degree_sequence_ours) print("Average degree is: ours:", np.mean(degree_sequence_ours), "pl:", np.mean(degree_sequence_pl)) print("Std of degree is: ours:", np.std(degree_sequence_ours), "pl:", np.std(degree_sequence_pl)) fig = plt.figure() weights = np.ones_like(normalized_degree_sequence_ours) / float(n_nodes) n, bins, patches = plt.hist(normalized_degree_sequence_ours, weights=weights, bins = 100) y_ax = bins[1:] weights_pl = np.ones_like(normalized_degree_sequence_pl) / float(n_nodes) n_pl, bins, patches = plt.hist(normalized_degree_sequence_pl, weights=weights_pl, bins = 100) y_ax_pl = bins[1:] plt.close(fig) fig = plt.figure() plt.suptitle("degree distribution") plt.subplot(131) plt.title("log plot of ours") plt.semilogy(y_ax, n, "bo") plt.xlabel("k") plt.subplot(132) plt.title("log plot of Power_Law") plt.semilogy(y_ax_pl, n_pl, "ro") plt.xlabel("k") plt.subplot(133) plt.title("ours and pl") plt.semilogy(y_ax_pl, n_pl, "ro", label='pl') plt.semilogy(y_ax, n, "bo", label='ours') plt.legend() plt.xlabel("k") plt.show() plt.close(fig) print("KL divergence between the two graphs is:", kl_divergence(normalized_degree_sequence_ours, normalized_degree_sequence_pl)) print("Average clustering coefficent: ours:", nx.average_clustering(our_graph_nx), "pl:", nx.average_clustering(pl_graph_nx)) print("Average shortest path length: ours:", nx.average_shortest_path_length(our_connected_comp_list[0]), "pl:", nx.average_shortest_path_length(pl_graph_nx))Number of connected components: ours: 15 pl: 1 Size of largest connected component: ours: 1769 pl: 1799 Average degree is: ours: 36.2934963868816 pl: 35.51639799888827 Std of degree is: ours: 35.5476571335769 pl: 33.648257933451845Chi-SquareIn this Statistics Appendix Lecture, we'll go over the Chi-Square Distribution and the Chi-Square Test. Let's start by introducing the general idea of observed and theoretical frequencies, then later we'll approach the idea of the Chi-Sqaure Distribution and its definition. After that we'll do a qcuik example with Scipy on using the Chi-Square Test. Supppose that you tossed a coin 100 times. Theoretically you would expect 50 tails and 50 heads, however it is pretty unlikely you get that result exactly. Then a question arises... how far off from you expected/theoretical frequency would you have to be in order to conclude that the observed result is statistically significant and is not just due to random variations.We can begin to think about this question by defining an example set of possible events. We'll call them Events 1 through *k*. Each of these events has an expected (theoretical) frequency and an observed frequency. We can display this as a table: EventEvent 1Event 2Event 3...Event kObserved Frequency$$o_1$$$$o_2$$$$o_3$$...$$o_k$$Expected Frequency$$e_1$$$$e_2$$$$e_3$$...$$e_k$$ Since we wanted to know whether observed frequencies differ significantly from the expected frequencies we'll have to define a term for a measure of discrepency.We'll define this measure as Chi-Square, which will be the sum of the squared difference between the observed and expected frequency divided by the expected frequency for all events. To show this more clearly, this is mathematically written as:$$ \chi ^2 = \frac{(o_1 - e_1)^2}{e_1}+\frac{(o_2 - e_2)^2}{e_2}+...+\frac{(o_k - e_k)^2}{e_k} $$Which is the same as:$$\chi ^2 = \sum^{k}_{j=1} \frac{(o_j - e_j)^2}{e_j} $$ If the total frequency is N $$ \sum o_j = \sum e_j = N $$ Then we could rewrite the Chi-Square Formula to be:$$ \chi ^2 = \sum \frac{o_j ^2}{e_j ^2} - N$$ We can now see that if the Chi Square value is equal to zero, then the observed and theoretical frequencies agree exactly. While if the Chi square value is greater than zero, they do not agree.The sampling distribution of Chi Square is approximated very closely by the *Chi-Square distribution* The Chi Square Test for Goodness of FitWe can now use the [Chi-Square test](http://stattrek.com/chi-square-test/goodness-of-fit.aspx?Tutorial=AP) can be used to determine how well a theoretical distribution fits an observed empirical distribution. Scipy will basically be constructing and looking up this table for us: ![](http://upload.wikimedia.org/wikipedia/commons/thumb/8/8e/Chi-square_distributionCDF-English.png/300px-Chi-square_distributionCDF-English.png) Let's go ahead and do an example problem. Say you are at a casino and are in charge of monitoring a [craps](http://en.wikipedia.org/wiki/Craps)(a dice game where two dice are rolled). You are suspcious that a player may have switched out the casino's dice for their own. How do we use the Chi-Square test to check whether or not this player is cheating?You will need some observations in order to begin. You begin to keep track of this player's roll outcomes.You record the next 500 rolls taking note of the sum of the dice roll result and the number of times it occurs.You record the following: Sum of Dice Roll23456789101112Number of Times Observed83248596784765734287 Now we also know the espected frequency of these sums for a fair dice. That frequency distribution looks like this: Sum of Dice Roll23456789101112Expected Frequency1/362/363/364/365/366/365/364/363/362/361/36 Now we can calculated the expected number of rolls by multiplying the expected frequency with the total sum of the rolls (500 rolls).# Check sum of the rolls observed = [8,32,48,59,67,84,76,57,34,28,7] roll_sum = sum(observed) roll_sum # The expected frequency freq = [1,2,3,4,5,6,5,4,3,2,1] # Note use of float for python 2.7 possible_rolls = 1.0/36 freq = [possible_rolls*dice for dice in freq] #Check freqExcellent, now let's multiply our frequency by the sum to get the expected number of rolls for each frequency.expected = [roll_sum*f for f in freq] expectedWe can now use Scipy to perform the [Chi Square Test](http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.chisquare.html) by using chisquare.from scipy import stats chisq,p = stats.chisquare(observed,expected) chisq# chi-squared value p#valueModellingThis notebook exists in script format as well, ``modelling/train_model.py``. This notebook is for illustration in an interactive way. Loading DataProcessed Disruption Data: This is the disruption data joined with RSSI data to get location information.RSSI Combined data: This is the processed rssi data averaged over a day and Position of the train.disruption_df = pd.read_csv( DataProcessor.gen_proc_file_name("disruption.csv"), infer_datetime_format=True, parse_dates=True, ) # ideally this should be from a database, as loading entirety of this data is super slow. print("Following step takes a lot of time, around 30-35 min, go get a coffee!") print("14 models into the future are trained") print("Collecting RSSI historical Data") rssi_comb_df = DataProcessor.combine_events(save=False) print("Data collection Done!")Following step takes a lot of time, around 30-35 min, go get a coffee! 14 models into the future are trained Collecting RSSI historical DataData preperation for training and evaluationThe disruptions from disruption_df are used to look for the instances in rssi_comb where the information for signal values are observed `h` days in the past. These values are saved and used to train for the disruptions.(date, PositionNoLeap) -> DataGenerator -> historical values of signals Data generation for trainingFor a position the signal values in neighbourhood of 20m are combined to remove effects of noise in the measurement. **Samples for disruption:**If the event happens `today` and we want to train a model that predicts disruption 7 days into future then we would train the model from a window of [`today`-7-h, `today`-7] with training label of `disruption`.**Normal samples:**Since we also need to train the model with normal behaviour of the signal, we also sample random windows from the data and label them as `NO disruption`.During training we sample equal proportion of disruptive and non disruptive samples. Feature EngineeringWe investigated the A2_rssi (averaged over a day and positional window = +- 20 m). Then we calculated mean and standard deviation of this value for the chosen historical window. The reason for this choice was on assumption that at a location the RSSI value follows a normal distrbution with a fixed mean and standard deviation.We calculated similar values for quality of signal 1 and signal 2 which are proportion of valid telegram received.We also used the position as one of the feature as well because each position can have a different characteristics.train_disruptions_df, test_disruptions_df = train_test_split( disruption_df, test_size=0.2 ) features = ["A2_RSSI"] train_disruptions_df, test_disruptions_df = train_test_split( disruption_df, test_size=0.2 )Model approachWe explored linear ridge classification for predicting disruption for its simplicity and GradientBoosting models for their fast inference and superior performance. In our experiments GradientBoosting performed better than a linear model. The best feature with most predictive power was the mean and standard deviation of A2_RSSI , other feature did not improve the validation performance. We train 7 models for predicting disruption into the future, one model for each day into the future starting from 2.models = [] scores = [] # in training script the future horizon is 1 to 13 days. for d in range(3,4): train_data = DataGenerator(train_disruptions_df, rssi_comb_df, features) train_x, train_y = train_data.generate_samples( num_samples=300, prediction_days=d, history=20 ) test_data = DataGenerator(test_disruptions_df, rssi_comb_df, features) test_x, test_y = test_data.generate_samples( num_samples=50, prediction_days=d, history=20 ) model = lgb.LGBMClassifier() model.fit(X=train_x, y=train_y) print( f"Train accuracy score for {d} days into future {model.score(train_x,train_y)}" ) print( f"Test accuracy score for {d} days into future {model.score(test_x,test_y)}" ) scores.append([model.score(test_x, test_y), model.score(train_x, train_y)]) ## optionally one can save the model as here # model.booster_.save_model( # os.path.join(PATHS.data, "model", f"lgb_model_d{d}.txt") # ) models.append(model) print(scores)/home/aneesh/Documents/HackZurich2021/team_repo/HackZurich2021/data_processing/src/feature_extractor.py:14: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy self.disruptions_df["Date"] = pd.to_datetime(disruptions_df["DateTime"]).dt.date 100%|██████████| 150/150 [01:07<00:00, 2.21it/s] 100%|██████████| 150/150 [00:59<00:00, 2.54it/s] 100%|██████████| 25/25 [00:10<00:00, 2.37it/s] 100%|██████████| 25/25 [00:10<00:00, 2.32it/s]fastp# export from pybiotools4p.softwares.base import Base,modify_cmd # export class Fastp(Base): def __init__(self, software, fd): super(Fastp, self).__init__(software) self._default = fd def cmd_version(self): ''' :return: ''' return 'echo {repr} ;{software} --version'.format( repr=self.__repr__(), software=self._software ) @modify_cmd def cmd_clean_data(self, fq1, cfq1, fq2, cfq2, report_prefix): ''' :param fq1: :param cfq1: :param fq2: :param cfq2: :param report_prefix: :return: ''' if fq2 in ['',None]: return r''' {software} {se} \ -i {fq1} \ -o {cfq1} \ --html {report_prefix}.fastp.html \ --json {report_prefix}.fastp.json '''.format( se=self._default['se'], software=self._software, **locals() ) else: return r''' {software} {pe} \ -i {fq1} \ -I {fq2} \ -o {cfq1} \ -O {cfq2} \ --html {report_prefix}.fastp.html \ --json {report_prefix}.fastp.json '''.format( pe=self._default['pe'], software=self._software, **locals()) def __repr__(self): return 'fastp:' + self._software def __str__(self): return 'A tool designed to provide fast all-in-one preprocessing for FastQ files. This tool is developed ' \ 'in C++ with multithreading supported to afford high performance.' import configparser config=configparser.ConfigParser() config.read('pybiotools4p/default.ini') fastp=Fastp('fastp',config['fastp']) fastp print(fastp) print(fastp.cmd_version()) fq1='./biology-test-data/fastq/HS.22.r1.fq.gz' fq2='./biology-test-data/fastq/HS.22.r2.fq.gz' cfq1='./pybiotools/HS_22_clean_r1.fq.gz' cfq2='./pybiotools/HS_22_clean_r2.fq.gz' report_prefix='./pybiotools/HS_22' print(fastp.cmd_clean_data(fq1,cfq1,fq2,cfq2,report_prefix)) fq2='' cfq1='./pybiotools/HS_22_se_clean_r1.fq.gz' print(fastp.cmd_clean_data(fq1,cfq1,fq2,cfq2,report_prefix+'se'))fastp \ -i ./biology-test-data/fastq/HS.22.r1.fq.gz \ -o ./pybiotools/HS_22_se_clean_r1.fq.gz \ --html ./pybiotools/HS_22se.fastp.html \ --json ./pybiotools/HS_22se.fastp.jsonBuild a test databaseDATABASE_PATH = "../data/database_8.h5" with h5py.File(DATABASE_PATH, mode="r") as f5: tr_noise = f5["training_noise"] tr_eq = f5["training_events"] test_noise = f5["test_noise"] test_eq = f5["test_events"] X_train = np.vstack([tr_noise[:, -1], tr_eq[:, -1]]) y_train = np.repeat([0, 1], [tr_noise.shape[0], tr_eq.shape[0]]) # X_test = X_train.copy() # y_test = y_train.copy() X_test = np.vstack([test_noise[:, -1], test_eq[:, -1]]) y_test = np.repeat([0, 1], [test_noise.shape[0], test_eq.shape[0]]) PHI = (1 + np.sqrt(5)) / 2 def plot_data(X_train, y_train): w = 342 / 72.27 h = 260 / 72.27 fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(w, h)) for j in range(2): for i, data in enumerate(X_train[np.argwhere(y_train == j).flatten()]): axes[j].plot(data / np.max(np.abs(data)) + i, color="k", linewidth=0.5) axes[j].set_xlim(0, X_train.shape[-1]) axes[j].set_xlabel("Sample index") axes[j].set_yticklabels([]) axes[0].set_ylabel("Trace index") return (fig, axes) plt.close("all") fig, axes = plot_data(X_train, y_train) fig.suptitle("Training Data") axes[0].set_title("Noise") axes[1].set_title("Earthquakes") plt.savefig( "/home/malcolmw/Google Drive/mal/meetings/2021-08-18_nakata_group_project_meeting/src/figures/training_waveforms.png", dpi=360, bbox_inches="tight" ) fig, axes = plot_data(X_test, y_test) fig.suptitle("Test Data") axes[0].set_title("Noise") axes[1].set_title("Earthquakes") plt.savefig( "/home/malcolmw/Google Drive//meetings/2021-08-18_nakata_group_project_meeting/src/figures/test_waveforms.png", dpi=360, bbox_inches="tight" )Embed the test databasedef make_meshgrid(W_max, ndim, iax, n=256): coords = np.meshgrid( *[ np.linspace(-W_max, W_max, n) if i in iax else [0] for i in range(ndim) ], indexing="ij" ) return (coords) def plot_clf(ax, clf, W_max, ndim, iax, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ coords = make_meshgrid(W_max, ndim, iax) Z = clf.decision_function(np.column_stack([xx.ravel() for xx in coords])) Z = Z.reshape(coords[iax[0]].shape) slices = tuple([slice(None) if i in iax else 0 for i in range(ndim)]) Z = Z[slices] xx = coords[iax[0]][slices] yy = coords[iax[1]][slices] out = ax.contourf(xx, yy, Z, cmap=plt.get_cmap("coolwarm_r"), zorder=100, alpha=0.8) return (out) def plot_images(W, y, iax=slice(None), marker="o", clf=None, W_max=None, labels=["Noise", "Earthquake"]): w = 0.9 * 342 / 72.27 h = 0.9 * 260 / 72.27 W_max = 1.08*np.max(np.abs(W)) if W_max is None else W_max W = W[:, iax] ndim = W.shape[-1] fig, axes = plt.subplots(nrows=ndim-1, ncols=ndim-1, figsize=(w, h)) for icol in range(ndim-1): for irow in range(icol, ndim-1): axes[irow, icol].scatter( W[:, icol], W[:, irow+1], marker=marker, c=y, cmap=CMAP, edgecolor="k", s=16, alpha=0.8, zorder=200 ) if clf is not None: plot_clf(axes[irow, icol], clf, W_max, ndim, [icol, irow+1]) axes[irow, icol].set_xlim(-W_max, W_max) axes[irow, icol].set_ylim(-W_max, W_max) axes[irow, icol].xaxis.set_major_locator(mpl.ticker.MaxNLocator(3)) axes[irow, icol].yaxis.set_major_locator(mpl.ticker.MaxNLocator(3)) # Label x-axis on bottom row. for irow in range(ndim-1): label = irow+1 if iax == slice(None) else iax[irow+1] axes[irow, 0].set_ylabel(f"$x_{label}$") # Label y-axis on left column. for icol in range(ndim-1): label = icol if iax == slice(None) else iax[icol] axes[-1, icol].set_xlabel(f"$x_{label}$") # Turn off x-axis tick labels on all but last row. for irow in range(ndim-2): for icol in range(irow+1): axes[irow, icol].set_xticklabels([]) # Turn off y-axis tick labels on all but left column. for irow in range(ndim-1): for icol in range(1, ndim-1): axes[irow, icol].set_yticklabels([]) # Turn off upper triangle. for irow in range(ndim-1): for icol in range(irow+1, ndim-1): axes[irow, icol].set_frame_on(False) axes[irow, icol].set_xticks([]) axes[irow, icol].set_yticks([]) # Add a legend kwargs = dict(marker=marker, linewidth=0, markeredgecolor="k", ) legend_elements = [ mpl.lines.Line2D([0], [0], color=CMAP(0), label="Noise", **kwargs), mpl.lines.Line2D([0], [0], color=CMAP(1), label="Earthquake", **kwargs) ] axes[0, 2].legend(handles=legend_elements, loc="center") return (fig, axes) path = pathlib.Path("test.hdf5") path.unlink(missing_ok=True) fastmap = fm.FastMap(X_train, fm.distance, 4, "test.hdf5") fastmap.embed_database(); CMAP = mpl.colors.ListedColormap(['#FF0000', '#0000FF']) W_train = fastmap.image[:] W_test = np.vstack([fastmap.embed(X) for X in X_test]) scaler = sklearn.preprocessing.StandardScaler() scaler.fit(W_train) W_train = scaler.transform(W_train) W_test = scaler.transform(W_test) classifier = sklearn.svm.SVC(gamma=1/4, C=8) classifier.fit(W_train, y_train) plt.close("all") fig, axes = plot_images(W_train, y_train) fig.suptitle(f"{W_train.shape[-1]}-D FastMap images of Training Data") plt.savefig( "/home/malcolmw/Google Drive//meetings/2021-08-18_nakata_group_project_meeting/src/figures/fastmap01.png", dpi=360, bbox_inches="tight" ) fig, axes = plot_images(W_train, y_train, clf=classifier) fig.suptitle(f"{W_train.shape[-1]}-D FastMap images of Training Data \n with SVM Decision Function") plt.savefig( "/home/malcolmw/Google Drive//meetings/2021-08-18_nakata_group_project_meeting/src/figures/fastmap02.png", dpi=360, bbox_inches="tight" ) fig, axes = plot_images(W_test, y_test, marker="s", clf=classifier, W_max=1.08*np.max(np.abs(W_train))) fig.suptitle(f"{W_train.shape[-1]}-D FastMap images of Test Data with \n SVM Decision Function") score = classifier.score(W_test, y_test)*100 axes[1, 2].text( 0.5, 0.5, f"Score: {score:.2f}%", ha="center", va="center", transform=axes[1, 2].transAxes ) plt.savefig( "/home/malcolmw/Google Drive//meetings/2021-08-18_nakata_group_project_meeting/src/figures/fastmap03.png", dpi=360, bbox_inches="tight" ) ax.get_legend_handles_labels(CMAP(0)) CMAP(1)predictive uncertainty analysis# import packages import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import math from matplotlib.ticker import FuncFormatter import matplotlib.ticker as mtick mpl.rcParams['font.size'] = 16 mpl.rcParams['savefig.bbox'] = 'tight' mpl.rcParams['savefig.format'] = 'pdf' mpl.rcParams['axes.labelsize'] = 20 mpl.rcParams['xtick.labelsize'] = 20 mpl.rcParams['ytick.labelsize'] = 20 mpl.rcParams['legend.fontsize'] = 20 # import the annual loads file_date = '20220116' fpath = f'../../output/work_run_{file_date}/' fn = '126001A.3.obs.csv' fn_meas = '126001A.base.obs.csv' log_load = True df = pd.read_csv(fpath + fn, index_col = 'real_name') # select results of which the pbias is with 15% # df = df[(df.din_pbias < 15) & (df.din_pbias > -15)] df_meas = pd.read_csv(fpath + fn_meas, index_col = 'real_name') if log_load: df_meas.loc[:, 'din_2009':] = 10**(df_meas.loc[:, 'din_2009':]) df.loc[:, 'din_2009':] = 10**(df.loc[:, 'din_2009':]) df['average'] = df.loc[:, 'din_2009':'din_2017'].mean(axis=1).values df_meas['average'] = df_meas.loc[:, 'din_2009':'din_2017'].mean(axis=1).values # obs data obs_annual = [52.093, 99.478, 44.064, 57.936, 53.449, 21.858, 38.561, 51.843, 14.176] obs_annual.append(np.round(np.mean(obs_annual), 2)) obs_df = pd.DataFrame(data=obs_annual, index = [*np.arange(2009, 2018), 'average'], columns=['Annual loads']) # reorganize the dataframe for plotting df_plot = pd.DataFrame(data = df.values[:, 1:].T.flatten(), columns=['Annual loads']) year_col = np.repeat(np.arange(2009, 2019), df.shape[0], 0).T df_plot['year'] = year_col df_plot['type'] = 'Estimate' df_meas_plot = pd.DataFrame(data = df_meas.values[:, 1:].T.flatten(), columns=['Annual loads']) year_col = np.repeat(np.arange(2009, 2019), df_meas.shape[0], 0).T df_meas_plot['year'] = year_col df_meas_plot['type'] = 'Measurement realization' df_plot = pd.concat([df_meas_plot, df_plot]) df_plot.reset_index().tail() # Plot the uncertainty of annual loads sns.set_style('whitegrid') fig = plt.figure(figsize=(10, 6)) ax = sns.violinplot(x='year', y='Annual loads', data=df_plot, hue='type', split=True); ax.legend(handles=ax.legend_.legendHandles, labels=['Measurement', 'Estimate']) # obs_df.plot(y=[0], linewidth=0, ax=ax, marker='d', markersize=5, color='r', legend=['Obs']); ax.set_xticklabels([*np.arange(2009, 2018), 'average'], rotation=90); plt.gca().yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) ax.set_xlabel('Year') ax.set_ylabel('Annual loads(KG)') # plt.savefig(f'../../output/figs/predict_uncertainty_{file_date}.png', dpi=300, format='png') sns.set_style('darkgrid', {"axes.facecolor": ".9"}) df_temp = df - df_meas ax = df_temp.loc[:, 'din_2009':].T.plot(legend=False, figsize=(20, 6), use_index=True, xlabel='Year', ylabel='Annual loads(KG)') ax.set_xticks(range(len(df.columns[1:]))) obs_df.plot(y=[0], linewidth=0, ax=ax, marker='d', markersize=5, color='r', legend=['Obs']); ax.set_xticklabels([*np.arange(2009, 2018), 'average']); # plt.savefig(f'../../output/figs/predict_uncertainty_lineplot_{file_date}.png', dpi=300, format='png')Import necessary packagesimport xlrd import pandas as pd import numpy as np import seaborn as sns import hypertools as hyp import matplotlib.pyplot as plt %matplotlib inline sns.set_context('poster')Read in datadata = pd.read_excel('GSS 3.xlsx', sheetname='Data') data.head()Create new data frames only looking at happiness, work, and demographic factors#list of happiness factors happiness = ['Standard of living of r will improve', 'How much time felt sad in past wk', 'How much time felt happy in past wk', 'How much time felt depressed in past wk', 'I am meeting my current goals', 'I see myself as successful', 'I expect more good things to happen to me than bad', 'I\'m always optimistic about my future', 'Happiness of marriage', 'General happiness', 'Happiness of relt with partner', 'Condition of health', 'Satisfaction with financial situation', 'Rs self ranking of social position', 'Is life exciting or dull'] #list of work factors work = ['Rs income in constant $', 'Rs job is useful to society', 'R has opportunity to advance', 'Rs income is high', 'Rs job is secure', 'The highest degree r have earned', 'Respondents income', 'Rs living standard compared to parents', 'Respondents income', 'Living with parents when 16 yrs old', 'Marital status', 'Number of hours usually work a week', 'Job or housework', 'Is r likely to lose job'] #list of demographic factors demo = ['Ballot used for interview', 'Gss year for this respondent ', 'Year of birth', 'Region of interview', 'Was r born in this country'] demo_df = data[demo] happiness_df = data[happiness] work_df = data[work]Is happiness data clustered in any way?hyp.plot(happiness_df, '.', group = happiness_df['General happiness'], legend = list(happiness_df['General happiness'].unique())) for factor in happiness: print(factor) hyp.plot(happiness_df, '.', group=happiness_df[factor], legend = list(happiness_df[factor].unique()))Overall, how many respondents consider themselves happy?sns.countplot('General happiness', data = data, order = ['Very happy', 'Pretty happy', 'Not too happy', "Don't know", 'No answer'])What does work look like for different levels of happiness?sns.swarmplot(data = data, x='General happiness', y='Rs income in constant $') sns.violinplot(data = data, x='General happiness', y='Rs income in constant $', order = ['Very happy', 'Pretty happy', 'Not too happy', "Don't know"]) sns.boxplot(data = data, x='General happiness', y='Rs income in constant $', order = ['Very happy', 'Pretty happy', 'Not too happy', "Don't know"]) #create a data frame for people who answered "pretty happy" pretty_happy = data.loc[data['General happiness'] == 'Pretty happy'] #create a data frame for people who answered "very happy" very_happy = data.loc[data['General happiness'] == 'Very happy'] #create a data frame for people who answered "not too happy" not_too_happy = data.loc[data['General happiness'] == 'Not too happy']Incomesns.countplot('Rs income in constant $', data = very_happy) plt.title('Very happy') plt.xticks(rotation=90) plt.ylim(0, 60) plt.show() sns.countplot('Rs income in constant $', data = pretty_happy) plt.title('Pretty happy') plt.xticks(rotation=90) plt.ylim(0, 100) plt.show() sns.countplot('Rs income in constant $', data = not_too_happy) plt.title('Not too happy') plt.xticks(rotation=90) plt.ylim(0, 20) plt.show()Job satisfactionsns.countplot('Job or housework', data = very_happy, order = ['Very satisfied', 'Mod. satisfied', 'A little dissat', 'Very dissatisfied', "Don't know", 'Not applicable', 'No answer']) plt.title('Very happy') plt.xticks(rotation=90) #plt.ylim(0, 100) plt.show() sns.countplot('Job or housework', data = pretty_happy, order = ['Very satisfied', 'Mod. satisfied', 'A little dissat', 'Very dissatisfied', "Don't know", 'Not applicable', 'No answer']) plt.title('Pretty happy') plt.xticks(rotation=90) #plt.ylim(0, 60) plt.show() sns.countplot('Job or housework', data = not_too_happy, order = ['Very satisfied', 'Mod. satisfied', 'A little dissat', 'Very dissatisfied', "Don't know", 'Not applicable', 'No answer']) plt.title('Not too happy') plt.xticks(rotation=90) #plt.ylim(0, 20) plt.show()Job securitysns.countplot('Rs job is secure', data = very_happy, order = ['Strongly agree', 'Agree', 'Neither', 'Disagree', 'Strongly disagree', 'No issp']) plt.title('Very happy') plt.xticks(rotation=90) plt.ylim(0, 150) plt.show() sns.countplot('Rs job is secure', data = pretty_happy, order = ['Strongly agree', 'Agree', 'Neither', 'Disagree', 'Strongly disagree', 'No issp']) plt.xticks(rotation=90) plt.ylim(0, 250) plt.show() sns.countplot('Rs job is secure', data = not_too_happy, order = ['Strongly agree', 'Agree', 'Neither', 'Disagree', 'Strongly disagree', 'No issp']) plt.title('Not too happy') plt.xticks(rotation=90) plt.ylim(0, 50) plt.show()Graph with percentages instead of counts#write function to get percentages by dividing value count of each by total count def answer(df, factor): #make list for all unique answers answers = [] for answer in range(0, len(df[factor].value_counts())): answers.append(df[factor].value_counts().index[answer]) return answers def percentage(df, factor): #make list for all percentages percentage_list = [] #get percentages for answer in range(0, len(df[factor].value_counts())): percentage_list.append(df[factor].value_counts()[answer]/sum(df[factor].value_counts())) return percentage_list #wrong bc value_counts and unique() are not in same order x = answer(very_happy, 'Job or housework') print(x) y = percentage(very_happy, 'Job or housework') print(y) sns.barplot(x = x, y = y, order = ['Very satisfied', 'Mod. satisfied', 'A little dissat', 'Very dissatisfied', "Don't know", 'Not applicable', 'No answer'])Clusteringbased on scipy2018-geospatial goals of the tutorial- cluster of point- cluster of polygons requirements- python knowledge- geopandas status *"Teamwork divides the tasks and multiplies the success"*---import os import numpy as np import libpysal as lp import geopandas as gpd import pandas as pd import shapely.geometry as shp import matplotlib.pyplot as plt import sklearn.metrics as skm import seaborn as sns %matplotlib inlinePoint Clusteringlibraries_turin = gpd.read_file('data' + os.sep + 'libraries_turin.geojson') libraries_turin = libraries_turin.to_crs(epsg=32632) libraries_turin.plot('cap',cmap='plasma') import sklearn.cluster as skc coordinates = libraries_turin['geometry'].apply(lambda p: np.hstack(p.xy)).values coordinates = np.vstack(coordinates)Like other scikit estimators, they are split into an initialization/configuration call and a fit call.Here, we'll use a local density-based scan clusterer, called DBSCAN. It works by fitting clusters of nearby points using the eps distance, which is the furthest distance at which points are considered to be in each other's clusters.clusterer = skc.DBSCAN(eps=1000).fit(coordinates) clusterer.components_ clusterer.core_sample_indices_ clusterer.labels_ nclusters = clusterer.p libraries_turin.assign(labels=clusterer.labels_).plot('labels', k=nclusters, cmap='plasma')Area Clusterspopulation_italian_regions = gpd.read_file('data'+ os.sep + 'geo_population_italian_regions.shp').to_crs(epsg=32632) population_italian_regions.plot('Value', cmap='plasma') population_cluster = skc.AgglomerativeClustering(n_clusters=3).fit(population_italian_regions[['Value']]) population_italian_regions.assign(labels=population_cluster.labels_).plot('labels', cmap='plasma')Contiguous clustersrook_graph = lp.weights.Rook.from_dataframe(population_italian_regions) %time regions_populationclusters_sp = skc.AgglomerativeClustering(n_clusters=3, connectivity=rook_graph.sparse).fit(population_italian_regions[['Value']]) population_italian_regions.assign(labels=regions_populationclusters_sp.labels_).plot('labels', cmap='plasma')> For the above dataset y-label will be the `score` and rest of the features will be used score prediction## list of categorical variables which need encoding categorical = list(data.select_dtypes(include=['object']).columns.values) categorical from sklearn import preprocessing le = preprocessing.LabelEncoder() # seasons in place of months ['Dec-Feb' 'Mar-May' 'Jun-Aug' 'Sep-Nov'] data['Period of stay'] = data['Period of stay'].map({'Dec-Feb':'winter', 'Mar-May':'spring', 'Jun-Aug' :'summer','Sep-Nov':'autumn'}) for i in range(0, len(categorical)): # data[categorical[i]] = le.fit_transform(data[categorical[i]]) print(data[categorical[i]].unique()) #Encoding categorical features with numbers for i in range(0, len(categorical)): data[categorical[i]] = le.fit_transform(data[categorical[i]]) data.head()**Prepare train and test sets**## prepare train and test labels from sklearn.model_selection import train_test_split X= data.drop(['Score'], axis=1) ## remove score label from data y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)**Recursive feature elimination (RFE) to select features by recursively considering smaller and smaller sets of features**##Applying random forest classifier on features to class ## Using feature selection pipeline and classification from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import RFE from sklearn.linear_model import Ridge rfe = RFE(estimator = Ridge(), n_features_to_select = 12) rfe.fit(X_train, y_train) feature_list = pd.DataFrame({'col':list(X_train.columns.values),'sel':list(rfe.support_ *1)}) print("*Most contributing features in Score*") print(feature_list[feature_list.sel==1].col.values) ## Subset train data based on selected features X_sel = pd.DataFrame(X_train, columns=(feature_list[feature_list.sel==1].col.values)) X_sel_t = pd.DataFrame(X_test, columns=(feature_list[feature_list.sel==1].col.values))**Using selected features to fit the model for Score prediction**> Using random forest classifier and kNN to predict score **Using Random forest**clf = RandomForestClassifier(max_depth=5, random_state=0) clf.fit(X_sel, y_train)**Using KNN for score prediction**from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=10) knn.fit(X_sel, y_train) knn.score(X_sel_t, y_test),clf.score(X_sel_t, y_test)**Random Forest gave better score than KNN on test data*** Using Random forest for `Score` Predictionp = (list(clf.predict(X_sel_t))) len(p) Predictions = X_sel_t Predictions['Original_Score']= y_test Predictions['pred_score'] = p Predictions.head() plt.figure(figsize=(8, 5)) ax = plt.subplot() d = list(range(0,len(Predictions))) p1 = plt.plot(d,Predictions['Original_Score'],'r-', label="Original Score", linewidth= 1 )#,data['gamma'],data['test_err'],'b-') p2 = plt.plot(d,Predictions['pred_score'],'b-', label="Predicted Score", linewidth= 1) ax.set_title('Original Score vs Prediction score on test reviews data\n Using Random Forest') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) ax.set_xlabel('review Id') ax.set_ylabel('Score') plt.show()Build a network with at least 3 hidden layers that achieves better than 92% accuracy on validation and test data. You may need to train for more than 10 epochs to achieve this result.* Compare your best results to the result you got in part one: * Which network achieved the best accuracy on test data after training? * Did the networks train for a similar number of epochs?# For drawing the MNIST digits as well as plots to help us evaluate performance we # will make extensive use of matplotlib from matplotlib import pyplot as plt # All of the Keras datasets are in keras.datasets from tensorflow.keras.datasets import mnist #Allows us to flatten 2d given data from tensorflow.keras.utils import to_categorical # Keras has already split the data into training and test data (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # 28 x 28 = 784, because that's the dimensions of the MNIST data. image_size = 784 # Reshaping the training_images and test_images to lists of vectors with length 784 # instead of lists of 2D arrays. Same for the test_images training_data = training_images.reshape(training_images.shape[0], image_size) test_data = test_images.reshape(test_images.shape[0], image_size) # Create 1-hot encoded vectors using to_categorical num_classes = 10 # Because it's how many digits we have (0-9) # to_categorical takes a list of integers (our labels) and makes them into 1-hot vectors training_labels = to_categorical(training_labels, num_classes) test_labels = to_categorical(test_labels, num_classes) print("training data: ", training_images.shape, " ==> ", training_data.shape) print("test data: ", test_images.shape, " ==> ", test_data.shape) #I need to be able to create the necessary Params using the code below from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Sequential models are a series of layers applied linearly. model = Sequential() #This defines the layer itself. 400x784 (size of data) model.add(Dense(units=800, activation='sigmoid', input_shape=(image_size,))) model.add(Dense(units=600, activation='sigmoid', input_shape=(image_size,))) model.add(Dense(units=400, activation='sigmoid', input_shape=(image_size,))) # This is how the output layer gets added, the 'softmax' activation function ensures # that the sum of the values in the output nodes is 1. Softmax is very # common in classification networks. model.add(Dense(units=num_classes, activation='softmax', input_shape=(image_size,))) # This function provides useful text data for our network model.summary() # sgd stands for stochastic gradient descent. # categorical_crossentropy is a common loss function used for categorical classification. # accuracy is the percent of predictions that were correct. model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy']) # The network will make predictions for 128 flattened images per correction. # It will make a prediction on each item in the training set 5 times (5 epochs) # And 10% of the data will be used as validation data. history = model.fit(training_data, training_labels, batch_size=128, epochs=20, verbose=True, validation_split=.1) loss, accuracy = model.evaluate(test_data, test_labels, verbose=True) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() print(f'Test loss: {loss:.3}') print(f'Test accuracy: {accuracy:.3}')10000/10000 [==============================] - 7s 677us/sample - loss: 0.2498 - accuracy: 0.9296Cosmic-ray flux vs. sky position%load_ext watermark %watermark -u -d -v -p numpy,scipy,pandas,sklearn,mlxtend %load_ext autoreload %autoreload 2 from __future__ import division, print_function import os import sys from numbers import Number import numpy as np import pandas as pd import healpy as hp import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter import seaborn as sns import dask from dask import delayed, compute from dask.diagnostics import ProgressBar import dask.array as da import pyunfold import comptools as comp import sky_anisotropy as sa color_dict = comp.color_dict sns.set_context(context='paper', font_scale=1.5) %matplotlib inline config = 'IC86.2012' num_groups = 2 comp_list = comp.get_comp_list(num_groups=num_groups) analysis_bins = comp.get_bins(config=config, num_groups=num_groups) energybins = comp.get_energybins(config) num_ebins = len(energybins.log_energy_midpoints) unfolding_dir = os.path.join(comp.paths.comp_data_dir, config, 'unfolding') feature_list, feature_labels = comp.get_training_features() energy_pipeline_name = 'linearregression' # energy_pipeline_name = 'RF' energy_pipeline = comp.load_trained_model('{}_energy_{}'.format(energy_pipeline_name, config)) # pipeline_str = 'SGD_comp_{}_{}-groups'.format(config, num_groups) pipeline_str = 'xgboost_comp_{}_{}-groups'.format(config, num_groups) comp_pipeline = comp.load_trained_model(pipeline_str) # print('Loading data into memory...') # df_data = comp.load_data(config=config, # energy_reco=True, # log_energy_min=6.1, # log_energy_max=8.0, # columns=feature_list + ['lap_ra', 'lap_dec'], # n_jobs=20, # verbose=True) # df_data.to_hdf('data_dataframe.hdf', 'dataframe', format='table') df_data = pd.read_hdf('data_dataframe.hdf', 'dataframe', mode='r') print('Running energy reconstruction...') df_data['reco_log_energy'] = energy_pipeline.predict(df_data[feature_list].values) df_data['reco_energy'] = 10**df_data['reco_log_energy'] import dask.array as da X_data = da.from_array(df_data[feature_list].values, chunks=int(1e4)) X_data from dask_ml.wrappers import ParallelPostFit from dask.diagnostics import ProgressBar pred_comp_target = ParallelPostFit(comp_pipeline).predict(X_data) reco_log_energy = ParallelPostFit(energy_pipeline).predict(X_data) import warnings with ProgressBar() as _, warnings.catch_warnings() as _: warnings.simplefilter("ignore") # Wan to ignore xgboost DeprecationWarning print('Running composition classifications...') # df_data['pred_comp_target'] = pred_comp_target.compute(scheduler='threads', # num_workers=20) df_data['pred_comp_target'] = pred_comp_target.compute(scheduler='sync', num_workers=1)Running composition classifications... [########################################] | 100% Completed | 28.3s [########################################] | 100% Completed | 0.1sCosmic-ray flux vs. sky position Sample on/off regionsimport sky_anisotropy as sa from scipy import stats from scipy.special import erfcinv nside = 64 npix = hp.nside2npix(nside)Spectrum anisotropy skymapdef unfolding_func(counts, composition='total'): original_shape = counts.shape counts_err = np.sqrt(counts) counts_total = counts.sum(axis=1) counts_err_total = np.sqrt(np.sum(counts_err**2, axis=1)) unfolding_energy_range_mask = np.logical_and(energybins.log_energy_midpoints >= 6.4, energybins.log_energy_midpoints <= 7.8) return counts_total[unfolding_energy_range_mask], counts_err_total[unfolding_energy_range_mask] import pyprind sig_max = [] n_samples = 20 for idx in pyprind.prog_bar(range(n_samples)): random_state = idx ra = df_data.loc[:, 'lap_ra'].sample(frac=1.0, random_state=random_state).values dec = df_data.loc[:, 'lap_dec'].values theta, phi = comp.equatorial_to_healpy(ra, dec) pix_array = hp.ang2pix(nside, theta, phi) df_data['pix'] = pix_array theta, phi = hp.pix2ang(nside, list(range(npix))) ra, dec = sa.healpy_to_equatorial(theta, phi) dec_max_deg = -65 size = np.deg2rad(5) on_region = 'disc' off_region = 'theta_band' with_unfolding = False has_data = dec < np.deg2rad(dec_max_deg) if off_region == 'theta_band': has_data = has_data & (dec > np.deg2rad(-90) + size) pix_disc = np.arange(npix)[has_data] data = df_data.loc[:, ['reco_log_energy', 'pred_comp_target']].values pix = df_data.loc[:, 'pix'].values binned_skymaps = sa.binned_skymaps(data=data, pix=pix, bins=analysis_bins, nside=nside) with dask.config.set(scheduler='sync', num_workers=1): results = sa.on_off_chi_squared(binned_maps=binned_skymaps, pix_center=pix_disc, on_region=on_region, size=size, off_region=off_region, nside=nside, hist_func=unfolding_func, ) dof = 13 pval = stats.chi2.sf(results['chi2'], dof) sig = erfcinv(2 * pval) * np.sqrt(2) sig_max.append(sig.max()) sig_max sig_max = np.array(sig_max) sig_max outdir = os.path.join(os.getcwd(), 'results', 'unfolded' if with_unfolding else 'pre-unfolding') print('outdir = {}'.format(outdir)) def calculate_local_sigma(df, nside=64, bins=None, random_state=None): if bins is None: raise ValueError('bins cannot be None') if random_state is None: ra = df.loc[:, 'lap_ra'].values else: ra = df.loc[:, 'lap_ra'].sample(frac=1.0, random_state=random_state).values dec = df.loc[:, 'lap_dec'].values theta, phi = comp.equatorial_to_healpy(ra, dec) pix_array = hp.ang2pix(nside, theta, phi) df['pix'] = pix_array npix = hp.nside2npix(nside) map_pix = np.arange(npix) theta, phi = hp.pix2ang(nside, map_pix) ra, dec = sa.healpy_to_equatorial(theta, phi) dec_max_deg = -65 size = np.deg2rad(5) on_region = 'disc' off_region = 'theta_band' with_unfolding = False has_data = dec < np.deg2rad(dec_max_deg) if off_region == 'theta_band': has_data = has_data & (dec > np.deg2rad(-90) + size) pix_disc = map_pix[has_data] data = df.loc[:, ['reco_log_energy', 'pred_comp_target']].values pix = df.loc[:, 'pix'].values binned_skymaps = sa.binned_skymaps(data=data, pix=pix, bins=bins, nside=nside) with dask.config.set(scheduler='sync', num_workers=1): results = sa.on_off_chi_squared(binned_maps=binned_skymaps, pix_center=pix_disc, on_region=on_region, size=size, off_region=off_region, nside=nside, hist_func=unfolding_func, ) dof = 13 pval = stats.chi2.sf(results['chi2'], dof) sig = erfcinv(2 * pval) * np.sqrt(2) return sig.max() calculate_local_sigma(df=df_data, nside=nside, bins=analysis_bins, random_state=3) sig_max = np.array([calculate_local_sigma(df=df_data, nside=nside, bins=analysis_bins, random_state=i) for i in range(2)]) sig_max*The Price Is Right* Problem This notebook is part of [Bite Size Bayes](https://allendowney.github.io/BiteSizeBayes/), an introduction to probability and Bayesian statistics using Python.Copyright 2020 License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) The following cell downloads `utils.py`, which contains some utility function we'll need.from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://github.com/AllenDowney/BiteSizeBayes/raw/master/utils.py')If everything we need is installed, the following cell should run with no error messages.import numpy as np import pandas as pd import matplotlib.pyplot as pltReview[In a previous notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/09_predict.ipynb) we used the time between goals to update our estimate of the goal-scoring rate of a soccer team.Under the assumption that goal-scoring is well-modeled by a Poisson process, the time between goals follows an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).In other words, if the goal-scoring rate is λ, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$f(t; λ) = λ~\exp(-λ t)$Because $t$ is a continuous quantity, the value of this expression is not really a probability; technically it is a [probability density](https://en.wikipedia.org/wiki/Probability_density_function). However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.In this notebook, we'll use the PDF of a normal distribution the same way, in order to estimate the value of prizes on a game show.Once we compute a posterior distribution, we'll use it to optimize a decision-making process.This example demonstrates the real power of Bayesian methods, not just computing posterior distributions, but using them to make better decisions. The Price is Right problemOn November 1, 2007, contestants named Letia and Nathaniel appeared on *The Price is Right*, an American game show. They competed in a game called "The Showcase", where the objective is to guess the price of a collection of prizes. The contestant who comes closest to the actual price, without going over, wins the prizes.Nathaniel went first. His showcase included a dishwasher, a wine cabinet, a laptop computer, and a car. He bid $26,000.Letia’s showcase included a pinball machine, a video arcade game, a pool table, and a cruise of the Bahamas. She bid $21,500.The actual price of Nathaniel’s showcase was $25,347. His bid was too high, so he lost.The actual price of Letia’s showcase was $21,578. She was only off by $78, so she won her showcase and, because her bid was off by less than 250, she also won Nathaniel’s showcase. For a Bayesian thinker, this scenario suggests several questions:1. Before seeing the prizes, what prior beliefs should the contestant have about the price of the showcase?2. After seeing the prizes, how should the contestant update those beliefs?3. Based on the posterior distribution, what should the contestant bid?The third question demonstrates a common use of Bayesian methods: [decision analysis](https://en.wikipedia.org/wiki/Decision_analysis).This problem is inspired by [this example](https://nbviewer.jupyter.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter5_LossFunctions/Ch5_LossFunctions_PyMC3.ipynb) in Cameron Davidson-Pilon’s book, [Probablistic Programming and Bayesian Methods for Hackers](http://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers). The priorTo choose a prior distribution of prices, we can take advantage of data from previous episodes. Fortunately, [fans of the show keep detailed records](https://web.archive.org/web/20121107204942/http://www.tpirsummaries.8m.com/). For this example, I downloaded files containing the price of each showcase from the 2011 and 2012 seasons and the bids offered by the contestants.The following cells load the data files.# Load the data files import os if not os.path.exists('showcases.2011.csv'): !wget https://github.com/AllenDowney/BiteSizeBayes/raw/master/showcases.2011.csv if not os.path.exists('showcases.2012.csv'): !wget http://github.com/AllenDowney/BiteSizeBayes/raw/master/showcases.2012.csvThe following function reads the data and cleans it up a little.def read_data(filename): """Read the showcase price data. filename: string returns: DataFrame """ df = pd.read_csv(filename, index_col=0, skiprows=[1]) return df.dropna().transpose()I'll read both files and concatenate them.df2011 = read_data('showcases.2011.csv') df2011.shape df2012 = read_data('showcases.2012.csv') df2012.shape df = pd.concat([df2011, df2012], ignore_index=True) df.shapeHere's what the dataset looks like:df.head()Kernel density estimationThis dataset contains the prices for 313 previous showcases, which we can think of as a sample from the population of possible prices.We can use this sample to estimate the prior distribution of showcase prices. One way to do that is kernel density estimation (KDE), which uses the sample to estimate a smooth distribution.SciPy provides `gaussian_kde`, which takes a sample and returns an object that represents the estimated distribution.from scipy.stats import gaussian_kde kde = gaussian_kde(df['Showcase 1']) kdeWe can use `kde` to evaluate the estimated distribution for a sequence of values:xs = np.linspace(0, 80000, 81) ps = kde(xs)And put the results into a normalized Series that represents the prior distribution for Showcase 1.def make_pmf(xs, ps, **options): """Make a Series that represents a PMF. xs: sequence of values ps: sequence of probabilities options: keyword arguments passed to Series constructor returns: Pandas Series """ pmf = pd.Series(ps, index=xs, **options) return pmf prior1 = make_pmf(xs, ps) prior1 /= prior1.sum()Here's what it looks like:prior1.plot(label='Prior 1') plt.xlabel('Showcase value ($)') plt.ylabel('Probability') plt.title('Prior distribution of showcase value') plt.legend();The following function takes a sample, makes a KDE, evaluates it at a given sequence of `xs`, and returns the result as a normalized PMF.def make_kde(xs, sample): """Make a PMF based on KDE: xs: places where we should evaluate the KDE sample: sequence of values returns: Series that represents a normalized PMF """ kde = gaussian_kde(sample) ps = kde(xs) pmf = make_pmf(xs, ps) pmf /= pmf.sum() return pmf**Exercise:** Use this function to make a Pmf that represents the prior distribution for Showcase 2, and plot it.# Solution goes here # Solution goes hereDistribution of errorTo update these priors, we have to answer these questions:* What data should we consider and how should we quantify it?* Can we compute a likelihood function; that is, for each hypothetical price, can we compute the conditional likelihood of the data?To answer these questions, I will model the contestant as a price-guessing instrument with known error characteristics. In other words, when the contestant sees the prizes, they guess the price of each prize --- ideally without taking into consideration the fact that the prize is part of a showcase --- and add up the prices. Let’s call this total `guess`.Under this model, the question we have to answer is, “If the actual price is `price`, what is the likelihood that the contestant’s estimate would be `guess`?”Equivalently, if we define `error = guess - price`, we can ask, “What is the likelihood that the contestant’s estimate is off by `error`?”To answer this question, I'll use the historical data again. For each showcase in the dataset, let's look at the difference between the contestant's bid and the actual price:sample_diff1 = df['Bid 1'] - df['Showcase 1'] sample_diff2 = df['Bid 2'] - df['Showcase 2']To visualize the distribution of these differences, we can use KDE again.xs = np.linspace(-40000, 20000, 61) kde_diff1 = make_kde(xs, sample_diff1) kde_diff2 = make_kde(xs, sample_diff2) kde_diff1.plot(label='Diff 1') kde_diff2.plot(label='Diff 2') plt.xlabel('Difference in value ($)') plt.ylabel('Probability') plt.title('Difference between bid and actual value') plt.legend();It looks like the bids are too low more often than too high, which makes sense. Remember that under the rules of the game, you lose if you overbid, so contestants probably underbid to some degree deliberately.Here is the mean and standard deviation of `Diff` for Player 1.mean_diff1 = sample_diff1.mean() std_diff1 = sample_diff1.std() mean_diff1, std_diff1We can use the observed distribution of differences to model the contestant's distribution of errors.This step is a little tricky because we don’t actually know the contestant’s guesses; we only know what they bid.So we have to make some assumptions:* I'll assume that contestants underbid because they are being strategic, and that on average their guesses are accurate. In other word, the mean of their errors is 0.* But I'll assume that the spread of the differences reflects the actual spread of their errors. So, I'll use the standard deviation of the differences as the standard deviation of their errors.Based on these assumptions, I'll make a normal distribution with parameters 0 and `std_diff1`.from scipy.stats import norm error_dist1 = norm(0, std_diff1) error_dist1We'll use this distribution to do the update. UpdateSuppose you are Player 1. You see the prizes in your showcase and your estimate of the total price is $23,000.For each hypothetical price in the prior distribution, I'll subtract away your guess.The result is your error under each hypothesis.guess1 = 23000 xs = prior1.index error1 = guess1 - xsNow suppose you know based on past performance that your estimation error is well modeled by `error_dist1`.Under that assumption we can compute the likelihood of your estimate under each hypothesis.likelihood1 = error_dist1.pdf(error1)And we can use that likelihood to update the prior.posterior1 = prior1 * likelihood1 posterior1 /= posterior1.sum()Here's what the posterior distribution looks like:prior1.plot(color='gray', label='Prior 1') posterior1.plot(color='C0', label='Posterior 1') plt.xlabel('Showcase value ($)') plt.ylabel('Probability') plt.title('Prior and posterior distribution of showcase value') plt.legend();Because your estimate is in the lower end of the range, the posterior distribution has shifted to the left. We can use the posterior mean to see by how much.def pmf_mean(pmf): """Compute the mean of a PMF. pmf: Series representing a PMF return: float """ return np.sum(pmf.index * pmf) pmf_mean(prior1), pmf_mean(posterior1)Before you saw the prizes, you expected to see a showcase with a value close to $30,000.After making an estimate of $23,000, you updated the prior distribution.Based on the combination of the prior and your estimate, you now expect the actual price to be about $26,000. **Exercise:** Now suppose you are Player 2. When you see your showcase, you estimte that the total price is $38,000.Use `diff2` to construct a normal distribution that represents the distribution of your estimation errors.Compute the likelihood of your estimate for each actual price and use it to update `prior2`.Plot the posterior distribution and compute the posterior mean. Based on your estimate, what do you expect the actual price of the showcase to be?# Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes hereProbability of winningNow that we have a posterior distribution for each player, let's think about strategy.First, from the point of view of Player 1, let's compute the probability that Player 2 overbids. To keep it simple, I'll use only the performance of past players, ignoring the estimated value of the showcase. The following function takes a sequence of past bids and returns the fraction that overbid.def prob_overbid(sample_diff): """Returns the probability this player overbids. sample_diff: sequence of differences """ return np.mean(sample_diff > 0)Here's an estimate for the probability that Player 2 overbids.prob_overbid(sample_diff2)Now suppose Player 1 underbids by $5000.What is the probability that Player 2 underbids by more?The following function uses past performance to estimate the probabily that a player underbids by more than a given amount, `diff`:def prob_worse_than(diff, sample_diff): """Probability the opponents's diff is worse than the given diff. diff: how much the oppenent is off by (always negative) sample_diff: sequence of differences for the opponent """ return np.mean(sample_diff < diff)Here's the probability that Player 2 underbids by more than $5000.prob_worse_than(-5000, sample_diff2)And here's the probability they are off by more than $10,000.prob_worse_than(-10000, sample_diff2)We can combine these function to compute the probability that Player 1 wins, given the difference between their bid and the actual price:def compute_prob_win(diff, sample_diff): """Computes the probability of winning for a given diff. diff: how much your bid was off by sample_diff: sequence of differences for the opponent """ # if you overbid you lose if diff > 0: return 0 # if the opponent over bids, you win p1 = prob_overbid(sample_diff) # or of their bid is worse than yours, you win p2 = prob_worse_than(diff, sample_diff) return p1 + p2Here's the probability that you win, given that you underbid by $5000.compute_prob_win(-5000, sample_diff2)Now let's look at the probability of winning for a range of possible differences.xs = np.linspace(-30000, 5000, 121) ys = [compute_prob_win(x, sample_diff2) for x in xs] plt.plot(xs, ys) plt.xlabel('Difference between guess and actual price ($)') plt.ylabel('Probability of winning') plt.title('Player 1');If you underbid by $30,000, the chance of winning is about 30%, which is mostly the chance your opponent overbids.As your bids gets closer to the actual price, your chance of winning approaches 1.And, of course, if you overbid, you lose (even if your opponent also overbids). **Exercise:** Run the same analysis from the point of view of Player 2. Using the sample of differences from Player 1, compute:1. The probability that Player 1 overbids.2. The probability that Player 1 underbids by more than $5000.3. The probability that Player 2 wins, given that they underbid by $5000.Then plot the probability that Player 2 wins for a range of possible differences between their bid and the actual price.prob_overbid(sample_diff1) prob_worse_than(-5000, sample_diff1) compute_prob_win(-5000, sample_diff1) xs = np.linspace(-30000, 5000, 121) ys = [compute_prob_win(x, sample_diff1) for x in xs] plt.plot(xs, ys) plt.xlabel('Difference between guess and actual price ($)') plt.ylabel('Probability of winning') plt.title('Player 2');Decision analysisIn the previous section we computed the probability of winning, given that we have underbid by a particular amount.In reality the contestants don't know how much they have underbid by, because they don't know the actual price.But they do have a posterior distribution that represents their beliefs about the actual price, and they can use that to estimate their probability of winning with a given bid.The following function take a possible bid, a posterior distribution of actual prices, and a sample of differences for the opponent.It loops through the hypothetical prices in the posterior distribution and for each price:1. Computes the difference between the bid and the hypothetical price.2. Computes the probability that the player wins, given that difference.3. Adds up the weighted sum of the probabilities, where the weights are the probabilites in the posterior distribution.def total_prob_win(bid, posterior, sample_diff): """Computes the total probability of winning with a given bid. bid: your bid posterior: Pmf of showcase value sample_diff: sequence of differences for the opponent returns: probability of winning """ total = 0 for price, prob in posterior.items(): diff = bid - price total += prob * compute_prob_win(diff, sample_diff) return totalThis loop implements the law of total probability:$P(win) = \sum_{price} P(price) ~ P(win ~|~ price)$total_prob_win(25000, posterior1, sample_diff2) bids = posterior1.index probs = [total_prob_win(bid, posterior1, sample_diff2) for bid in bids] prob_win_series = pd.Series(probs, index=bids) prob_win_series.plot(color='C1') plt.xlabel('Bid ($)') plt.ylabel('Probability of winning') plt.title('Player 1');And here's the bid that maximizes your chance of winning.prob_win_series.idxmax()Recall that your estimate was $23,000.After using your estimate to compute the posterior distribution, the posterior mean is about $26,000.But the bid that maximizes your chance of winning is $21,000. **Exercise:** Do the same analysis for Player 2.# Solution goes here # Solution goes here # Solution goes hereMaximizing expected gainIn the previous section we computed the bid that maximizes your chance of winning.And if that's your goal, the bid we computed is optimal.But winning isn't everything.Remember that if your bid is off by $250 or less, you will both showcases.So it might be a good idea to increase your bid a little: it increases the chance you overbid and lose, but it also increases the chance of winning both showcases.Let's see how that works out.The following function computes how much you will win, on average, given your bid, the actual price, and a sample of errors for your opponent.def compute_gain(bid, price, sample_diff): """Computes expected gain given a bid and actual price. bid: number price: actual price sample_diff: sequence of differences for the opponent """ diff = bid - price prob = compute_prob_win(diff, sample_diff) # if you are within 250 dollars, you win both showcases if -250 <= diff <= 0: return 2 * price * prob else: return price * probFor example, if the actual price is $35000 and you bid $30000, you will win about $23,600 worth of prizes on average.compute_gain(30000, 35000, sample_diff2)In reality we don't know the actual price, but we have a posterior distribution that represents what we know about it.By averaging over the prices and probabilities in the posterior distribution, we can compute the "expected gain" for a particular bid.def expected_gain(bid, posterior, sample_diff): """Computes the expected return of a given bid. bid: your bid posterior: distribution of showcase values sample_diff: distribution of differences for the opponent """ total = 0 for price, prob in posterior.items(): total += prob * compute_gain(bid, price, sample_diff) return totalFor the posterior we computed earlier, based on an estimate of $23,000, the expected gain for a bid of $21,000is about $16,900.expected_gain(21000, posterior1, sample_diff2)But can we do any better? To find out, we can loop through a range of bids and find the one that maximizes expected gain.bids = posterior1.index gains = [expected_gain(bid, posterior1, sample_diff2) for bid in bids] expected_gain_series = pd.Series(gains, index=bids)Here are the results.expected_gain_series.plot(color='C1') plt.xlabel('Bid ($)') plt.ylabel('Expected gain ($)') plt.title('Player 1');And here is the optimal bid.expected_gain_series.idxmax()With that bid, the expected gain is about $17,400.expected_gain_series.max()Recall that the estimated value of the prizes was $23,000.The bid that maximizes the chance of winning is $21,000.And the bid that maximizes your the expected gain is $22,000. **Exercise:** Do the same analysis for Player 2.# Solution goes here # Solution goes here # Solution goes hereClassification with FACET: Prediabetes Study***FACET is composed of the following key components:- **Model Inspection** FACET introduces a new algorithm to quantify dependencies and interactions between features in ML models. This new tool for human-explainable AI adds a new, global perspective to the observation-level explanations provided by the popular [SHAP](https://shap.readthedocs.io/en/latest/) approach. To learn more about FACET's model inspection capabilities, see the getting started example below.- **Model Simulation** FACET's model simulation algorithms use ML models for *virtual experiments* to help identify scenarios that optimise predicted outcomes. To quantify the uncertainty in simulations, FACET utilises a range of bootstrapping algorithms including stationary and stratified bootstraps. For an example of FACET’s bootstrap simulations, see the getting started example below. - **Enhanced Machine Learning Workflow** FACET offers an efficient and transparent machine learning workflow, enhancing [scikit-learn]( https://scikit-learn.org/stable/index.html)'s tried and tested pipelining paradigm with new capabilities for model selection, inspection, and simulation. FACET also introduces [sklearndf](https://github.com/BCG-Gamma/sklearndf), an augmented version of *scikit-learn* with enhanced support for *pandas* dataframes that ensures end-to-end traceability of features. *****Context**Prediabetes is a treatable condition that leads to many health complications and eventually type 2 diabetes. Identification of individuals at risk of prediabetes can improve early intervention and provide insights into those interventions that work best.Using a cohort of healthy (*n*=2847) and prediabetic (*n*=1509) patients derived from the [NHANES 2013-14 U.S. cross-sectional survey](https://wwwn.cdc.gov/nchs/nhanes/Search/DataPage.aspx?Component=Examination&CycleBeginYear=2013) we aim to create a classifier for prediabetes. For further details on data sources, definitions and the study cohort please see the Appendix ([Data source and study cohort](Data-source-and-study-cohort)).Utilizing FACET, we will do the following:1. create a pipeline to find identify a well-performing classifier.2. perform model inspection and simulation to gain understanding and insight into key factors predictive of prediabetes.*****Tutorial outline**1. [Required imports](Required-imports)2. [Preprocessing and initial feature selection](Preprocessing-and-initial-feature-selection)3. [Selecting a learner using FACET ranker](Selecting-a-learner-using-FACET-ranker)4. [Using FACET for advanced model inspection](Using-FACET-for-advanced-model-inspection)5. [FACET univariate simulator: the impact of waist to height ratio](FACET-univariate-simulator:-the-impact-of-waist-to-height-ratio)6. [Summary](Summary)7. [What can you do next?](What-can-you-do-next?)8. [Appendix](Appendix)# this cell's metadata contains # "nbsphinx": "hidden" so it is hidden by nbsphinx # ignore irrelevant warnings that would affect the output of this tutorial notebook import warnings import tableone # need to import this to suppress an IPython warning triggered by tableone warnings.filterwarnings("ignore", category=UserWarning, message=r".*Xcode_8\.3\.3") warnings.filterwarnings("ignore", message=r".*`should_run_async` will not call `transform_cell`") warnings.filterwarnings("ignore", message=r".*`np\..*` is a deprecated alias") # set global options for matplotlib import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams["figure.figsize"] = (16.0, 8.0) matplotlib.rcParams["figure.dpi"] = 72Required imports In order to run this notebook, we will import not only the FACET package, but also other packages useful to solve this task. Overall, we can break down the imports into three categories: 1. Common packages (pandas, matplotlib, etc.)2. Required FACET classes (inspection, selection, validation, simulation, etc.)3. Other BCG GAMMA packages which simplify pipelining (sklearndf, see on [GitHub](https://github.com/orgs/BCG-Gamma/sklearndf/)) and support visualization (pytools, see on [GitHub](https://github.com/BCG-Gamma/pytools)) when using FACET **Common package imports**import numpy as np import pandas as pd import matplotlib.pyplot as plt import shap import seaborn as sns import tableone from sklearn.compose import make_column_selector from sklearn.model_selection import RepeatedKFold**FACET imports**from facet.data import Sample from facet.inspection import LearnerInspector from facet.selection import LearnerRanker, LearnerGrid from facet.validation import BootstrapCV from facet.data.partition import ContinuousRangePartitioner from facet.simulation import UnivariateProbabilitySimulator from facet.simulation.viz import SimulationDrawer from facet.crossfit import LearnerCrossfit**sklearndf imports**Instead of using the "regular" scikit-learn package, we are going to use sklearndf (see on [GitHub](https://github.com/orgs/BCG-Gamma/sklearndf/)). sklearndf is an open source library designed to address a common issue with scikit-learn: the outputs of transformers are numpy arrays, even when the input is a data frame. However, to inspect a model it is essential to keep track of the feature names. sklearndf retains all the functionality available through scikit-learn plus the feature traceability and usability associated with Pandas data frames. Additionally, the names of all your favourite scikit-learn functions are the same except for `DF` on the end. For example, the standard scikit-learn import:`from sklearn.pipeline import Pipeline`becomes:`from sklearndf.pipeline import PipelineDF`from sklearndf.pipeline import PipelineDF, ClassifierPipelineDF from sklearndf.classification import RandomForestClassifierDF from sklearndf.classification.extra import LGBMClassifierDF from sklearndf.transformation import ( ColumnTransformerDF, OneHotEncoderDF, SimpleImputerDF, ) from sklearndf.transformation.extra import BorutaDF**pytools imports**pytools (see on [GitHub](https://github.com/BCG-Gamma/pytools)) is an open source library containing general machine learning and visualization utilities, some of which are useful for visualising the advanced model inspection capabilities of FACET.from pytools.viz.dendrogram import DendrogramDrawer, LinkageTree from pytools.viz.matrix import MatrixDrawerPreprocessing and initial feature selection First we need to load our prediabetes data and create a simple preprocessing pipeline. For those interested some initial EDA can be found in the Appendix ([Exploratory Data Analysis](Exploratory-Data-Analysis-(EDA))).# load the prepared data frame prediab_df = pd.read_csv("pre_diab_nhanes.csv") # create a couple of new interesting features prediab_df["SBP_to_DBP"] = prediab_df["Average_SBP"] / prediab_df["Average_DBP"] prediab_df["Waist_to_hgt"] = ( prediab_df["Waist_Circumference"] / prediab_df["Standing_Height"] ) # make clear based on dtypes these two features are categorical prediab_df["General_health"] = prediab_df["General_health"].astype("object") prediab_df["Healthy_diet"] = prediab_df["Healthy_diet"].astype("object") # have a look prediab_df.head() # to ensure a quick run we will use a random sample of 1000 observations prediab_df = prediab_df.sample(n=1000, random_state=42)For easier data management we will create a sample object using FACET's `Sample` class, which allows us to: - Quickly access the target vs. features- Pass our data into sklearndf pipelines- Pass information to other FACET functions# create a FACET sample object prediab = Sample( observations=prediab_df, feature_names=prediab_df.drop(columns=["Pre_diab"]).columns, target_name="Pre_diab", )Next we create a minimum preprocessing pipeline which based on our initial EDA ([Exploratory Data Analysis](Exploratory-Data-Analysis-(EDA))) needs to address the following:1. Simple imputation for missing values in both continuous and categorical features2. One-hot encoding for categorical featuresWe will use the sklearndf wrappers for scikit-learn functions such as `SimpleImputerDF` in place of `SimpleImputer`, `OneHotEncoderDF` in place of `OneHotEncoder`, and so on.# for categorical features we will use the mode as the imputation value and also one-hot encode preprocessing_categorical = PipelineDF( steps=[ ("imputer", SimpleImputerDF(strategy="most_frequent", fill_value="")), ("one-hot", OneHotEncoderDF(sparse=False, handle_unknown="ignore")), ] ) # for numeric features we will impute using the median preprocessing_numerical = SimpleImputerDF(strategy="median") # put the pipeline together preprocessing_features = ColumnTransformerDF( transformers=[ ( "categorical", preprocessing_categorical, make_column_selector(dtype_include=object), ), ( "numerical", preprocessing_numerical, make_column_selector(dtype_include=np.number), ), ] )Next, we perform some initial feature selection using Boruta, a recent approach shown to have quite good performance. The Boruta algorithm removes features that are no more predictive than random noise. If you are interested further, please see this [article](https://www.jstatsoft.org/article/view/v036i11).The `BorutaDF` transformer in our sklearndf package provides easy access to this powerful method. The approach relies on a tree-based learner, usually a random forest. For settings, a `max_depth` of between 3 and 7 is typically recommended, and here we rely on the default setting of 5. However, as this depends on the number of features and the complexity of interactions one could also explore the sensitivity of feature selection to this parameter. The number of trees is automatically managed by the Boruta feature selector argument `n_estimators="auto"`.We also use parallelization for the random forest using `n_jobs` to accelerate the Boruta iterations.# create the pipeline for Boruta boruta_feature_selection = PipelineDF( steps=[ ("preprocessing", preprocessing_features), ( "boruta", BorutaDF( estimator=RandomForestClassifierDF( max_depth=5, n_jobs=-3, random_state=42 ), n_estimators="auto", random_state=42, verbose=False, ), ), ] ) # run feature selection using Boruta and report those selected boruta_feature_selection.fit(X=prediab.features, y=prediab.target) selected = boruta_feature_selection.feature_names_original_.unique() selectedBoruta identified 10 features (out of a potential 47) that we will retain in our FACET sample object for classification. Note that this feature selection process could be included in a general preprocessing pipeline, however due to the computation involved, we have utilized Boruta here as an initial one-off processing step to narrow down the features for our classifier development.# update FACET sample object to only those features Boruta identified as useful prediab_initial_features = prediab.keep(feature_names=selected)Selecting a learner using FACET rankerFACET implements several additional useful wrappers which further simplify comparing and tuning a larger number of models and configurations: - `LearnerGrid`: allows you to pass a learner pipeline (i.e., classifier + any preprocessing) and a set of hyperparameters- `LearnerRanker`: multiple LearnerGrids can be passed into this class as a list - this allows tuning hyperparameters both across different types of learners in a single step and ranks the resulting models accordinglyThe following learners and hyperparameter ranges will be assessed using 10 repeated 5-fold cross-validation:1. **Random forest**: with hyperparameters - max_depth: [4, 5, 6] - min_samples_leaf: [8, 11, 15] 2. **Light gradient boosting**: with hyperparameters - max_depth: [4, 5, 6] - min_samples_leaf: [8, 11, 15] Note if you want to see a list of hyperparameters you can use `classifier_name().get_params().keys()` where `classifier_name` could be for example `RandomForestClassifierDF` and if you want to see the default values, just use `classifier_name().get_params()`.Finally, for this exercise we will use AUC as the performance metric for scoring and ranking our classifiers (the default is accuracy). Note that ranking uses the average performance minus two times the standard deviation, so that we consider both the average performance and variability when selecting a classifier. First, we specify the classifiers we want to train using `ClassifierPipelineDF` from sklearndf. Note here we also include the feature preprocessing steps we created earlier.# random forest learner rforest_clf = ClassifierPipelineDF( preprocessing=preprocessing_features, classifier=RandomForestClassifierDF(random_state=42), ) # light gradient boosting learner lgbm_clf = ClassifierPipelineDF( preprocessing=preprocessing_features, classifier=LGBMClassifierDF(random_state=42) )Then we create a list of learner grids where each learner grid is created using `LearnerGrid` and allows us to associate a `ClassifierPipelineDF` with a specified set of hyperparameter via the `learner_parameters` argument. Note this structure allows us to easily include additional classifiers and hyperparameters.classifier_grid = [ LearnerGrid( pipeline=rforest_clf, learner_parameters={ "max_depth": [4, 5, 6], "min_samples_leaf": [8, 11, 15], }, ), LearnerGrid( pipeline=lgbm_clf, learner_parameters={ "max_depth": [4, 5, 6], "min_samples_leaf": [8, 11, 15], }, ), ]We now fit the grid defined above using the `LeanerRanker`, which will run a gridsearch (or random search if defined) using 10 repeated 5-fold cross-validation on our selected set of features from Boruta.clf_ranker = LearnerRanker( grids=classifier_grid, cv=RepeatedKFold(n_splits=5, n_repeats=10, random_state=42), n_jobs=-3, scoring="roc_auc", ).fit(prediab_initial_features)[LightGBM] [Warning] Unknown parameter: min_samples_leafWe can see how each model scored using the `summary_report()` method of the `LearnerRanker`.# let's look at performance for the top ranked classifiers clf_ranker.summary_report()We can see based on our learner ranker, we have selected a Random Forest algorithm that achieved a mean ROC AUC of 0.72 with a SD of 0.03. Using FACET for advanced model inspectionThe [SHAP approach](http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions) has become the standard method for model inspection. SHAP values are used to explain the additive contribution of each feature to the prediction for each observation (i.e., explain **individual** predictions).The FACET `LearnerInspector` computes SHAP values for each crossfit (i.e., a CV fold or bootstrap resample) using the best model identified by the `LearnerRanker`. The FACET `LearnerInspector` then provides advanced model inspection through new SHAP-based summary metrics for understanding pairwise feature redundancy and synergy. Redundancy and synergy are calculated using a new algorithm to understand model predictions from a **global perspective** to complement local SHAP.The definitions of synergy and redundancy are as follows:- **Synergy** The degree to which the model combines information from one feature with another to predict the target. For example, let's assume we are predicting cardiovascular health using age and gender and the fitted model includes a complex interaction between them. This means these two features are synergistic for predicting cardiovascular health. Further, both features are important to the model and removing either one would significantly impact performance. Let's assume age brings more information to the joint contribution than gender. This asymmetric contribution means the synergy for (age, gender) is less than the synergy for (gender, age). To think about it another way, imagine the prediction is a coordinate you are trying to reach. From your starting point, age gets you much closer to this point than gender, however, you need both to get there. Synergy reflects the fact that gender gets more help from age (higher synergy from the perspective of gender) than age does from gender (lower synergy from the perspective of age) to reach the prediction. *This leads to an important point: synergy is a naturally asymmetric property of the global information two interacting features contribute to the model predictions.* Synergy is expressed as a percentage ranging from 0% (full autonomy) to 100% (full synergy).- **Redundancy** The degree to which a feature in a model duplicates the information of a second feature to predict the target. For example, let's assume we had house size and number of bedrooms for predicting house price. These features capture similar information as the more bedrooms the larger the house and likely a higher price on average. The redundancy for (number of bedrooms, house size) will be greater than the redundancy for (house size, number of bedrooms). This is because house size "knows" more of what number of bedrooms does for predicting house price than vice-versa. Hence, there is greater redundancy from the perspective of number of bedrooms. Another way to think about it is removing house size will be more detrimental to model performance than removing number of bedrooms, as house size can better compensate for the absence of number of bedrooms. This also implies that house size would be a more important feature than number of bedrooms in the model. *The important point here is that like synergy, redundancy is a naturally asymmetric property of the global information feature pairs have for predicting an outcome.* Redundancy is expressed as a percentage ranging from 0% (full uniqueness) to 100% (full redundancy).Note that cases can apply at the same time so a feature pair can use some information synergistically and some information redundantly.The FACET `LearnerInspector` can calculate all of this with a single method call, but also offers methods to access the intermediate results of each step. A lightweight visualization framework is available to render the results in different styles.SHAP values from the `LearnerInspector` can also be used with the SHAP package plotting functions for sample and observation level SHAP visualizations, such as SHAP distribution plots, dependency plots, force plots and waterfall plots.# run inspector clf_inspector = LearnerInspector( n_jobs=-3, verbose=False, ).fit(crossfit=clf_ranker.best_model_crossfit_)Feature importanceFeature importance has many ways of being measured. Here we utilize the FACET implementation based on the `LearnerInspector`. Each feature is ranked according to the mean SHAP value for that feature. This plot is paired with a standard SHAP distribution plot for features to see if there is any directional tendency for the associations.# FACET feature importance f_importance = clf_inspector.feature_importance() plt.subplot(1, 2, 1) f_importance.sort_values().plot.barh() # get some info for standard SHAP model inspection shap_data = clf_inspector.shap_plot_data() # standard SHAP summary plot using the shap package plt.subplot(1, 2, 2) shap.summary_plot(shap_values=shap_data.shap_values, features=shap_data.features, show=False, plot_size=(16.0, 8.0)) plt.tight_layout()Based on the feature importance's we can see the top five features are age, RBC count, waist to height ratio, average systolic blood pressure and waist circumference. Inspection of the SHAP value distributions does not provide any indication of a general direction of association for any features. Synergy# synergy heatmap synergy_matrix = clf_inspector.feature_synergy_matrix() MatrixDrawer(style="matplot%").draw(synergy_matrix, title="Feature synergies")To interpret the synergy matrix, the first feature in a pair is the row ("perspective from"), and the second feature the column. For example, for (`SBP_to_DBP`, `RBC_count`) from the perspective of `SBP_to_DBP` we find that 4.4% of the information is combined with `RBC_count` to predict prediabetes risk. This represents an example of little synergy between the feature pair from the perspective of `SBP_to_DBP`.One interesting observation is that looking at the column for `Age`, we see from the perspective of other features such as `BMI`, `RBC count` and `SBP_to_DBP`, we see values ranging up to 8.4% suggesting a small amount of synergy. This is consistent with the idea that `Age` is a strong independent feature (it has the highest feature importance) and that contributions of `BMI`, `RBC count` and `SBP/DBP` to predicting prediabetes are partly enabled by `Age`.We can inspect synergistic pairs of features more deeply using the *SHAP dependence plots* offered by the SHAP package. The `LearnerInspector` provides all data required for creating these and other plots through method`shap_plot_data()`:spd = clf_inspector.shap_plot_data() shap.dependence_plot("SBP_to_DBP", spd.shap_values, spd.features, interaction_index="Age") shap.dependence_plot("BMI", spd.shap_values, spd.features, interaction_index="Age")Redundancy# redundancy heatmap redundancy_matrix = clf_inspector.feature_redundancy_matrix() MatrixDrawer(style="matplot%").draw(redundancy_matrix, title="Feature redundancies")As with synergy, the matrix row is the "perspective from" feature in the row-column feature pair. Let's take `Hematocrit` and `RBC count` as our features of interest. We can see that from the perspective of `Hematocrit` 35% of the information is duplicated with `RBC count` to predict prediabetes, and vice versa.A second interesting and perhaps expected attribute of the heatmap is the apparent clustering of `BMI`, `Waist Circumference` and `Waist/Height`. Intuitively it makes sense that these features would have varying degrees of redundancy among them given they are physically related and the relationships with prediabetes risk. Feature clusteringAs detailed above redundancy and synergy for a feature pair is from the "perspective" of one of the features in the pair, and so yields two distinct values. However, a symmetric version can also be computed that provides not only a simplified perspective but allows the use of (1 - metric) as a feature distance. With this distance hierarchical, single linkage clustering is applied to create a dendrogram visualization. This helps to identify groups of low distance, features which activate "in tandem" to predict the outcome. Such information can then be used to either reduce clusters of highly redundant features to a subset or highlight clusters of highly synergistic features that should always be considered together.For this example, let's apply clustering to redundancy to see how the apparent grouping observed in the heatmap appears in the dendrogram. Ideally, we want to see features only start to cluster as close to the right-hand side of the dendrogram as possible. This implies all features in the model are contributing uniquely to our predictions.# redundancy dendrogram dd_redundancy = clf_inspector.feature_redundancy_linkage() DendrogramDrawer().draw(title="Redundancy linkage", data=dd_redundancy)The dendrogram shows that `Waist Circumference` and `Waist/Height` cluster together first, which then cluster with `BMI` as well. This is a much clearer representation of the cluster of redundant features we observed in the heatmap. Considering `BMI`, `Waist Circumference` and `Waist/Height`:**What might we infer from synergy, redundancy and redundancy feature clustering?**1. `BMI`, `Waist Circumference` and `Waist/Height` form a small cluster of redundant features. This seems reasonable: * `Waist Circumference` is included in the calculation of `Waist/Height` * we might expect `BMI` to capture similar information about excess body mass as higher `Waist Circumference` and `Waist/Height`2. We saw little synergy between features. We might have expected apriori to find some interesting synergies between diet, exercise, sleep and body composition. Of course, the model needs to identify these relationships from them to be reflected in the synergy metric(s).**What action(s) might we take?**Given the redundancy that appears between `BMI`, `Waist Circumference` and `Waist/Height`, we could look to eliminate one or two of these features from the model. For convenience when working in a non-notebook environment, all of the `Drawer`'s provided by the [pytools](https://github.com/BCG-Gamma/pytools) package also support a `style='text'` flag.DendrogramDrawer(style="text").draw(title="Redundancy linkage", data=dd_redundancy)****************************** Redundancy linkage ****************************** Age 31% -------------------------------------------------------\ Average_SBP 10% -----------------------------------------------------\_/-\ SBP_to_DBP 6% -----------------------------------------------------/ | RBC_count 12% --------------------------------------\_______________ | Hematocrit 5% --------------------------------------/ \\ |_ Uric_acid 8% ------------------------------------------------------/|\| Gamma_glutamyl_ 6% -------------------------------------------------------/|/ Waist_to_hgt 11% --------------------\____ | Waist_Circumfer 8% --------------------/ \------------------------------/ BMI 4% -------------------------/Removing redundant featuresRecall the redundancy dendrogram above where we saw a clear cluster of features with redundancy; `Waist/Height`, `BMI`, and `Waist Circumference`.- assess if the features of the model are unique, i.e. not redundant with other features- decide which features to discard, combine, or modify to increase the uniqueness of important features in the modelBefore we proceed to looking at SHAP values for individual predictions and perform a univariate simulation, let's eliminate two partially redundant features - we will choose to keep `Waist/Height` ratio and drop `BMI` and `Waist Circumference`.# drop redundant features from our FACET sample object prediab_no_redundant_feat = prediab_initial_features.drop( feature_names=["BMI", "Waist_Circumference"] ) # re-run ranker without redundant features clf_ranker = LearnerRanker( grids=classifier_grid, cv=RepeatedKFold(n_splits=5, n_repeats=10, random_state=42), n_jobs=-3, scoring="roc_auc", ).fit(prediab_no_redundant_feat) clf_ranker.summary_report() # run inspector inspector_no_redun = LearnerInspector( n_jobs=-3, verbose=False, ).fit(crossfit=clf_ranker.best_model_crossfit_) # redundancy dendrogram dd_redundancy = inspector_no_redun.feature_redundancy_linkage() DendrogramDrawer().draw(title="HD set features", data=dd_redundancy)Now with the removal of `BMI` and `Waist Circumference` we can see the feature clustering starts much further to the right. We can also check the best ranked model after removing redundant features.clf_ranker.best_model_.classifierFACET univariate simulator: the impact of waist to height ratioAnother advantage of FACET is the ability to quickly instigate and run univariate simulation.Simulation enables us to gain insight into what value(s) of this ratio might minimize the likelihood of prediabetes.As the basis for the simulation, we divide the feature into relevant partitions: - We use FACET's `ContinuousRangePartitioner` to split the range of observed values of waist to height ratio into intervals of equal size. Each partition is represented by the central value of that partition. - For each partition, the simulator creates an artificial copy of the original sample assuming the variable to be simulated has the same value across all observations - which is the value representing the partition. Using the best `LearnerCrossfit` acquired from the ranker, the simulator now re-predicts all targets using the models trained for all folds and determines the average value of the target variable resulting from this.- The FACET `SimulationDrawer` allows us to visualise the result; both in a matplotlib and a plain-text styleFinally, because FACET can use bootstrap cross validation, we can create a crossfit from our previous `LearnerRanker` best model to perform the simulation so we can quantify the uncertainty by using bootstrap confidence intervals.# create a bootstrap CV crossfit for simulation using best model boot_crossfit = LearnerCrossfit( pipeline=clf_ranker.best_model_, cv=BootstrapCV(n_splits=1000, random_state=42), n_jobs=-3, verbose=False, ).fit(sample=prediab_no_redundant_feat) # set up and run a simulation sim_feature = "Waist_to_hgt" waist_to_hgt_simulator = UnivariateProbabilitySimulator( crossfit=boot_crossfit, n_jobs=-1 ) waist_to_hgt_partitions = ContinuousRangePartitioner() waist_to_hgt_simulation = waist_to_hgt_simulator.simulate_feature( feature_name=sim_feature, partitioner=waist_to_hgt_partitions ) # visualize the results SimulationDrawer().draw(data=waist_to_hgt_simulation, title=sim_feature)As we can see the simulation shows that higher waist to height ratios are associated with an increased risk of prediabetes. We could also suggest that keeping a person's waist to height ratio below 0.52 may reduce the likelihood of prediabetes from around 34% to 30%.# can also get a print out of simulation results SimulationDrawer("text").draw(data=waist_to_hgt_simulation, title=sim_feature)********************************* Waist_to_hgt ********************************* probability(1): Baseline = 0.342 Partition 2.5th percentile Median 97.5th percentile ========= ================ ========= ================= 0.42 0.261 0.302 0.34 0.44 0.259 0.298 0.335 0.46 0.26 0.297 0.333 0.48 0.261 0.297 0.332 0.5 0.26 0.297 0.332 0.52 0.261 0.296 0.33 0.54 0.27 0.307 0.343 0.56 0.27 0.308 0.346 0.58 0.32 0.362 0.408 0.6 0.317 0.356 0.396 0.62 0.323 0.363 0.402 0.64 0.322 0.363 0.405 0.66 0.334 0.373 0.417 0.68 [...]SummaryWith the capabilities offered by FACET we were able to:1. Identify a learner with performance comparable to models in the literature.2. Utilize advanced the SHAP value capabilities (synergy and redundancy) to identify additional features that could be removed (i.e., BMI and waist circumference removed in favour of waist to height ratio) and whether any features had strong synergistic effects - which they did not.3. Simulate the effect of changes in waist to height ratio on the likelihood of being prediabetic. What can you do next?There are several next/alternative steps that could be taken:1. Utilize methods to deal with class imbalance and see if it improves the model.2. Adding more features! The NHANES data is a treasure trove of information.3. Retain diabetic patients and convert it into a multi-class learning problem.4. What would happen if we applied survey weights when constructing a learner?5. Further investigation of feature engineering. One could also look at different sets of measurements such as the bio-profile and perform dimension reduction first via PCA or some other method.6. Other learners such as SVC, LDA, Elastic-Net, CNN.7. More sophisticated imputation for missing values: the assumption of MAR might not hold, as those with worse health and thus more at risk of prediabetes may be more likely not to disclose poor health characteristics. Methods enabled by IterativeImputer could be used or even KNNImputer. Also feature engineering could be done post imputation in the pipeline, so values such as ratios are consistent. Appendix Data source and study cohort **Introduction** Prediabetes is a treatable condition that leads to many health complications, including eventually type 2 diabetes. Prediabetes has become an epidemic worldwide and is increasing in prevalence. As a largely asymptomatic condition, screening for prediabetes can be extremely challenging. However, early intervention, especially with lifestyle changes has been shown as effective in treating prediabetes. Accurate prediction/identification of those individuals at risk of prediabetes can improve early intervention and may provide insights into those interventions that work best. The current standard of care is a CDC prediabetes risk [screening tool](https://www.cdc.gov/diabetes/prevention/pdf/Prediabetes-Risk-Test-Final.pdf). **Data source** The dataset used in this tutorial is derived from the [National Health and Nutrition Examination Survey (NHANES) 2013-14 cross-sectional survey](https://wwwn.cdc.gov/nchs/nhanes/Search/DataPage.aspx?Component=Examination&CycleBeginYear=2013). In brief, NHANES collects demographic, socioeconomic, dietary, health, medical, dental, physiological and laboratory data on a nationally representative sample of noninstitutionalized, civilian United States residents. Please note the set-up for this data loosely follows the approach in [De Silva et al](https://pubmed.ncbi.nlm.nih.gov/31889178/).**Patient cohort** In the NHANES data sub-sets of those surveyed may undergo a plasma glucose (FPG) test, oral glucose tolerancetest (OGTT), or have glycated haemoglobin (HbA1c) measured. Diabetic patients were defined as those with any of the following: FPG >= 126 mg/dl, OGTT > 200 mg/dl, HbA1c > 6.4% or a Doctor diagnosed diabetes. The created dataset contains selected information for 4356 patients aged 20 years or over who were not considered diabetic or who were not pregnant or suspected to be pregnant at the time of the survey.**Learning target: prediabetes status** Using any of the available FPG, OGTT and HbA1c tests we defined patients as prediabetic where any of the following was satisfied: FPG 100–125 mg/dl, OGTT 140–200 mg/dl, or HbA1c 5.7–6.4%. Among this cohort 35% were prediabetic (n=1509).**Initial features** The following tables provides an overview of the 37 features included in the example dataset.|Instrument |Data File Name (File) | NHANES Field | Description | Dataset name | Type || :-- | :-- | :-- | :-- | :-- | :-- ||Demographics|Demographic Variables, Sample Weights (DEMO_H)|RIDAGEYR|Age in years at screening|Age|Numeric||Demographics|Demographic Variables, Sample Weights (DEMO_H)|RIAGENDR|Gender|Gender| Categorical||Examination|Body Measures (BMX_H)|BMXWT|Weight (kg)|Weight|Numeric||Examination|Body Measures (BMX_H)|BMXHT|Standing Height (cm)|Standing_Height|Numeric||Examination|Body Measures (BMX_H)|BMXWAIST|Waist Circumference (cm)|Waist_Circumference|Numeric||Examination|Body Measures (BMX_H)|BMXBMI|Body Mass Index (kg/m^2)|BMI|Numeric||Examination|Blood Pressure (BPX_H)|BPXSY1 to 4|Systolic: Blood pres mm Hg|Average_SBP| Numeric||Examination|Blood Pressure (BPX_H)|BPXDI1 to 4|Diastolic: Blood pres mm Hg|Average_DBP| Numeric||Questionnaire|Blood Pressure & Cholesterol (BPQ_H)|BPQ020|Ever told you had high blood pressure|High_BP| Categorical||Questionnaire|Diet Behavior & Nutrition (DBQ_H)|DBQ700|How healthy is the diet|Healthy_diet| Categorical||Questionnaire|Diabetes (DIQ_H)|DIQ175A|Family history|Family_hist_diab| Categorical||Questionnaire|Diabetes (DIQ_H)|DIQ172|Feel could be at risk for diabetes|Feel_at_risk_diab| Categorical||Questionnaire|Current Health Status (HSQ_H)|HSD010|General health condition|General_health| Categorical||Questionnaire|Medical Conditions (MCQ_H)|MCQ080|Doctor ever said you were overweight|Told_overweight| Categorical||Questionnaire|Physical Activity (PAQ_H)|PAQ605|Vigorous work activity|Vigorous_work_activity| Categorical||Questionnaire|Physical Activity (PAQ_H)|PAQ620|Moderate work activity|Moderate_work_activity| Categorical||Questionnaire|Physical Activity (PAQ_H)|PAQ635|Walk or bicycle|Walk_or_bicycle| Categorical||Questionnaire|Physical Activity (PAQ_H)|PAQ650|Vigorous recreational activities|Vigorous_rec_activity| Categorical||Questionnaire|Physical Activity (PAQ_H)|PAQ665|Moderate recreational activities|Moderate_rec_activity| Categorical||Questionnaire|Sleep Disorders (SLQ_H)|SLD010H|How much sleep do you get (hours)?|Sleep_hours| Numeric||Questionnaire|Sleep Disorders (SLQ_H)|SLQ050|Ever told doctor had trouble sleeping?|Trouble_sleeping| Categorical||Questionnaire|Sleep Disorders (SLQ_H)|SLQ060|Ever told by doctor have sleep disorder?|Sleep_disorder| Categorical||Questionnaire|Weight History (WHQ_H)|WHQ070|Tried to lose weight in past year|Tried_weight_loss_past_year| Categorical||Laboratory|Cholesterol HDL (HDL_H)|LBDHDD|Direct HDL-Cholesterol (mg/dL)|HDL_Cholesterol| Numeric||Laboratory|Cholesterol Total (TCHOL_H)|LBXTC|Total Cholesterol(mg/dL)|Total_Cholesterol| Numeric||Laboratory|Complete Blood Count (CBC_H)|LBXWBCSI|White blood cell count (1000 cells/uL)|WBC_count| Numeric||Laboratory|Complete Blood Count (CBC_H)|LBXRBCSI|Red blood cell count (million cells/uL)|RBC_count| Numeric||Laboratory|Complete Blood Count (CBC_H)|LBXHCT|Haematocrit (%)|Haematocrit| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSTR|Triglycerides (mg/dL)|Triglycerides| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSUA|Uric acid (mg/dL)|Uric_acid| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSOSSI|Osmolality (mmol/Kg)|Osmolality| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSNASI|Sodium (mmol/L)|Sodium| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSKSI|Potassium (mmol/L)|Potassium| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSGTSI|Gamma glutamyl transferase (U/L)|Gamma_glutamyl_transferase| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSCA|Total calcium (mg/dL)|Calcium| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSATSI|Alanine aminotransferase ALT (IU/L)|Alanine_aminotransferase| Numeric||Laboratory|Biochemistry Profile (BIOPRO_H)|LBXSASSI|Aspartate aminotransferase AST (IU/L)|Aspartate_aminotransferase| Numeric| Exploratory Data Analysis (EDA) Let's begin by doing some brief exploratory data analysis to assess the impact features might have on the likelihood someone is prediabetic and to also determine what will need to be addressed in a preprocessing pipeline.# load the prepared data frame prediab_eda = pd.read_csv("pre_diab_nhanes.csv") prediab_eda.head()We might also consider some rudimentary feature engineering as well, such as the ratio of waist circumference to height or the ratio of systolic to diastolic blood pressure. Let's create these two features as well.prediab_eda["SBP_to_DBP"] = prediab_eda["Average_SBP"] / prediab_eda["Average_DBP"] prediab_eda["Waist_to_hgt"] = ( prediab_eda["Waist_Circumference"] / prediab_eda["Standing_Height"] ) prediab_eda.head() # first a quick look at features overall prediab_eda.describe().T # missingness miss_count = prediab_eda.isna().sum() miss_pct = miss_count[miss_count > 0] / len(prediab_eda) miss_pct.sort_values().plot.barh() # those variables that are complete miss_count[miss_count == 0] / len(prediab_eda) # view correlations with a heatmap df_cor = prediab_eda.corr() sns.heatmap(df_cor, xticklabels=df_cor.columns, yticklabels=df_cor.columns) # let's do a table comparing features by the target categorical = [ "Gender", "High_BP", "Trouble_sleeping", "Sleep_disorder", "Told_overweight", "General_health", "Family_hist_diab", "Feel_at_risk_diab", "Vigorous_work_activity", "Moderate_work_activity", "Walk_or_bicycle", "Vigorous_rec_activity", "Moderate_rec_activity", "Tried_weight_loss_past_year", "Healthy_diet", ] mytable = TableOne( prediab_eda, columns=prediab_eda.columns.drop("Pre_diab").to_list(), categorical=categorical, groupby="Pre_diab", pval=True, remarks=False, overall=False, ) print(mytable) # KDE plots by prediabetes status as well for those continuous features distn_vars = [ "Age", "Waist_Circumference", "Weight", "Standing_Height", "BMI", "Average_SBP", "Average_DBP", "HDL_Cholesterol", "Total_Cholesterol", "Sleep_hours", "WBC_count", "RBC_count", "Hematocrit", "Triglycerides", "Uric_acid", "Osmolality", "Sodium", "Potassium", "Gamma_glutamyl_transferase", "Calcium", "Alanine_aminotransferase", "Aspartate_aminotransferase", "SBP_to_DBP", "Waist_to_hgt", ] df_kde = pd.melt(prediab_eda[distn_vars + ["Pre_diab"]], "Pre_diab", distn_vars) g = sns.FacetGrid( df_kde, col="variable", hue="Pre_diab", col_wrap=5, sharex=False, sharey=False ) g.map(sns.kdeplot, "value", shade=True) plt.show()**THE CHALLENGE**Your task is to build an image classifier with Keras and Convolutional Neural Networks for the Fashion MNIST dataset. This data set includes 10 labels of different clothing types with 28 by 28 grayscale images. There is a training set of 60,000 images and 10,000 test images.Label Description0 T-shirt/top1 Trouser2 Pullover3 Dress4 Coat5 Sandal6 Shirt7 Sneaker8 Bag9 Ankle boot Importing Datasets and librariesimport numpy as np import matplotlib.pyplot as plt %matplotlib inline from tensorflow.keras.datasets import fashion_mnist (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step**VISUALIZING DATA**X_train[0] plt.imshow(X_train[0]) y_train[0]Processing the dataX_train.max() X_train = X_train/255 X_test = X_test/255Reshape the X arrays to include a 4 dimension of the single channel. Similar to what we did for the numbers MNIST data set.X_train.shape X_train = X_train.reshape(60000, 28, 28, 1) X_test = X_test.reshape(10000, 28, 28, 1)Convert the y_train and y_test values to be one-hot encoded for categorical analysis by kerasfrom tensorflow.keras.utils import to_categorical y_train y_cat_train = to_categorical(y_train) y_cat_test = to_categorical(y_test)**Building the Model**TASK 5: Use Keras to create a model consisting of at least the following layers (but feel free to experiment):2D Convolutional Layer, filters=32 and kernel_size=(4,4)Pooling Layer where pool_size = (2,2)Flatten LayerDense Layer (128 Neurons, but feel free to play around with this value), RELU activationFinal Dense Layer of 10 Neurons with a softmax activationThen compile the model with these parameters: loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten model = Sequential() #Convolutional Layer model.add(Conv2D(filters=32, kernel_size=(4,4), input_shape=(28,28,1), activation='relu',)) #POOLING LAYER model.add(MaxPool2D(pool_size=(2,2))) #FLATTEN IMAGES FROM 28 BY 28 TO 764 BEFORE FINAL LAYER model.add(Flatten()) #128 NEURONS IN DENSE HIDDEN LAYER model.add(Dense(128, activation='relu')) #LAST LAYER IS THE CLASSIFIER, THUS 10 POSSIBLE CLASSES model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 25, 25, 32) 544 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 12, 12, 32) 0 _________________________________________________________________ flatten (Flatten) (None, 4608) 0 _________________________________________________________________ dense (Dense) (None, 128) 589952 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 591,786 Trainable params: 591,786 Non-trainable params: 0 __________________________________________________[...]TRAINING MODELmodel.fit(X_train, y_cat_train, epochs=20)Epoch 1/20 1875/1875 [==============================] - 11s 2ms/step - loss: 0.5419 - accuracy: 0.8115 Epoch 2/20 1875/1875 [==============================] - 5s 3ms/step - loss: 0.2882 - accuracy: 0.8970 Epoch 3/20 1875/1875 [==============================] - 5s 2ms/step - loss: 0.2344 - accuracy: 0.9153 Epoch 4/20 1875/1875 [==============================] - 4s 2ms/step - loss: 0.2124 - accuracy: 0.9229 Epoch 5/20 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1929 - accuracy: 0.9324 Epoch 6/20 1875/1875 [==============================] - 5s 2ms/step - loss: 0.1815 - accuracy: 0.9359 Epoch 7/20 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1664 - accuracy: 0.9416 Epoch 8/20 1875/1875 [==============================] - 5s 2ms/step - loss: 0.1590 - accuracy: 0.9438 Epoch 9/20 1875/1875 [==============================] - 5s 3ms/step - loss: 0.1470 - accuracy: 0.9480 Epoch 10/20 1875/1875 [==============================] - 4s 2ms/step - loss: 0.13[...]EVALUATING THE MODELmodel.metrics_names model.evaluate(X_test, y_cat_test) from sklearn.metrics import classification_report pred = model.predict(X_test) predictions = np.argmax(pred, axis=1) y_cat_test.shape y_cat_test[0] predictions[0] y_test print(classification_report(y_test, predictions))precision recall f1-score support 0 0.79 0.90 0.84 1000 1 0.99 0.97 0.98 1000 2 0.85 0.83 0.84 1000 3 0.92 0.89 0.91 1000 4 0.80 0.89 0.84 1000 5 0.98 0.98 0.98 1000 6 0.78 0.65 0.71 1000 7 0.93 0.98 0.96 1000 8 0.99 0.97 0.98 1000 9 0.99 0.94 0.97 1000 accuracy 0.90 10000 macro avg 0.90 0.90 0.90 10000 weighted avg 0.90 0.90 0.90 10000Predicting Credit Card Fraud - A Tour of Basic Classification Techniques In this project, we will explore data relating to fraudulent credit card transactions. We will run through basic techniques of data exploration and pre-processing, as well as attempt to build a simple model to predict fraudulent transaction using basic regression techniques. The open dataset used for this notebook was obtained from Kaggle (https://www.kaggle.com/dalpozz/creditcardfraud). Below is a breakdown of the processes that will be outlined in this notebook: Contents 1. Dataset Exploration 2. Visualiztion 3. Data Pre-Processing 4. Classification Models 1. Logistic RegressionThe objective for the models we will build is as follows: Problem DescriptionPredict fraudulent credit card transactions using anonymized transaction data.Let's begin! 1. Data Exploration We first begin by loading in the necessary packages for the data analysis# for general handling and manipulation of table data import pandas as pd import numpy as np # for generation of interactive data visualization import matplotlib.pyplot as plt from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.graph_objs as go init_notebook_mode(connected=True) # for building classifier model predictions and result analysis from sklearn.feature_selection import SelectKBest from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score, confusion_matrix, precision_recall_curve import itertools # start a random seed for reproducibility of results np.random.seed(1)Next we will load the data and get a sense of the general structure of information availabledata = pd.read_csv('creditcard.csv') print('data({0[0]},{0[1]})'.format(data.shape)) print('Number of training examples: {0}'.format(data.shape[0])) print('Number of features for each example: {0}'.format(data.shape[1]))data(284807,31) Number of training examples: 284807 Number of features for each example: 31Below is a list of the labels for each of the 31 data featurespd.DataFrame(data = {'Feature Label': data.columns})The data has the following feature structure within its 31 features: Time - Time that the transaction occurred in seconds (time span for this dataset is 48 hours total: 172,800 seconds) V1-V28 - Anonymized credit card transaction data Amount - Amount of the transactionClass - Label describing whether transaction was fraudulent or genuiune We are also given the information from Porto Seguro that missing data in this dataset is marked by a '-1' value. Any columns with a '-1' value present are listed below along with the number of missing entries for each of these columns.pd.DataFrame(data = {'# of Missing Data Entries': data.isnull().sum()})Lucky! So far we don't have any evidence of missing data entries, but sometimes datasets encode missing entries differently, such as with '-1' entries, dashes '-', blank spaces ' ', etc. so we will be on the lookout for these types of outliers as we explore the features. 2. VisualizationLet's now begin to visualize different aspects of the data to see what we can learn about the information contained in the features we've been given.Starting off we will look at the class variable, which is the main feature of interest we are asked to predict. The pie graph below shows the relative distribution of the target variable.labels = ["Genuine", "Fraudulent"] values = data["Class"].value_counts().values trace = go.Pie(labels = labels, values = values) layout = go.Layout(title = 'Distribution of Class Feature') fig = go.Figure(data = [trace], layout = layout) iplot(fig)As we can see, the class variable is highly skewed, with only 0.173% of transactions being fraudulent. In order to properly evaluate the performance of our model, we will need to use a specialized metric that accounts for false positive and false negative predictions since a model that simply predicts all transactions as genuine would (on the surface) be over 99.8% accurate! Next we will take a look at the distribution of the Time feature in our data:# Distribution of time feature, divided by 3600 seconds to view data in hours (data['Time']/3600.0).describe() # Initialize figure plt.figure(figsize = (12,9)) # Add histogram data plt.hist(data['Time']/3600, bins = 48, facecolor='c', edgecolor = 'k', alpha=0.75, ) # Figure formatting plt.xlabel('Time (in Hours)') plt.ylabel('# of Credit Card Transactions') plt.title('Time Distribution of Credit Card Transactions') plt.rcParams.update({'font.size': 18}) # plot! plt.show() # Initialize figure plt.figure(figsize = (12,9)) # Add histogram data plt.hist(data['Time'][data['Class'] == 1]/3600, bins = 48, facecolor='m', edgecolor = 'k', alpha=0.75, ) # Figure formatting plt.xlabel('Time (in Hours)') plt.ylabel('# of Fraudulent Credit Card Transactions') plt.title('Time Distribution of Fraudulent Transactions') plt.rcParams.update({'font.size': 18}) # plot! plt.show()Although there are some peaks in the time data relating to fraudulent transactions, it's difficult to tell at a glance if there is a trend that will help us with our predictions. Next we will take a look at the Amount feature.# Initialize figure plt.figure(figsize = (12,9)) # Add histogram data plt.hist(data['Amount'], bins = 50, facecolor='c', edgecolor = 'k', alpha=0.75, log = True ) # Figure formatting plt.xlabel('Transaction Amount') plt.ylabel('# of Credit Card Transactions (log scale)') plt.title('Distribution of Credit Card Transaction Amounts') plt.rcParams.update({'font.size': 18}) # plot! plt.show() # Initialize figure plt.figure(figsize = (12,9)) # Add histogram data plt.hist(data['Amount'][data['Class'] == 1], bins = 50, facecolor='m', edgecolor = 'k', alpha=0.75, log = True ) # Figure formatting plt.xlabel('Transaction Amount') plt.ylabel('# of Fraudulent Credit Card Transactions') plt.title('Distribution of Fraudulent Transaction Amounts') plt.rcParams.update({'font.size': 18}) # plot! plt.show()Here we can see that while the fradulent transaction trend is similar to the overall trend, it drops off much more rapidly, and we typically don't see any fraudulent transactions above ~$1,000 with the exception of a few outliers. 3. Data Pre-processing Let's begin preparing our data to build our predictive models. To start, we will split the training data into training ("train") and cross-validation ("CV") sets. The data will be split with the following distribution: 90% train 10% CV# Split test set from data X_temp, X_test, y_temp, y_test = train_test_split(data.drop(['Class'], axis = 1), data['Class'], test_size=0.2, random_state=42) # Split remaining data into train and CV sets X_train, X_CV, y_train, y_CV = train_test_split(X_temp, y_temp, test_size=0.25, random_state=42) # Verify train, CV and test sets print('Number of training entries: {0} -> {1:.0f}% of data'.format(X_train.shape[0], 100*X_train.shape[0]/data.shape[0])) print('Number of CV entries: {0} -> {1:.0f}% of data'.format(X_CV.shape[0], 100*X_CV.shape[0]/data.shape[0])) print('Number of test entries: {0} -> {1:.0f}% of data'.format(X_test.shape[0], 100*X_test.shape[0]/data.shape[0]))Number of training entries: 170883 -> 60% of data Number of CV entries: 56962 -> 20% of data Number of test entries: 56962 -> 20% of dataBefore we get started, we want to make sure that both our training and cross-validation sets contain a distribution of genuine and fraudulent transactions.# Create labels and counts of genuine and fraudulent transactions for all data subsets labels = ['Genuine', 'Fraudulent'] train_values = y_train.value_counts().values CV_values = y_CV.value_counts().values test_values = y_test.value_counts().values # Create interactive pie charts for each subset trace1 = go.Pie(labels = labels, values = train_values, domain= {"x": [0, 0.3]}, hole = 0.5 ) trace2 = go.Pie(labels = labels, values = CV_values, domain= {"x": [0.35, 0.65]}, hole = 0.5 ) trace3 = go.Pie(labels = labels, values = test_values, domain= {"x": [0.70, 1]}, hole = 0.5 ) # Plot formatting layout = go.Layout(title = 'Distribution of Class Feature', annotations = [{"text": "Train", "font": {"size": 20}, "showarrow": False, "x": 0.11, "y": 0.5 }, {"text": "CV", "font": {"size": 20}, "showarrow": False, "x": 0.5, "y": 0.5 }, {"text": "Test", "font": {"size": 20}, "showarrow": False, "x": 0.88, "y": 0.5 }, ] ) # Plot! fig = go.Figure(data = [trace1, trace2, trace3], layout = layout) iplot(fig)4. Classification Models 4.1 Logistic RegressionNow that we have separated our data, we can begin building our logistic regression model to predict fraudulent transactions given our data. In order to avoid overfitting and choose the best features for our model we will use best subset selection, which was covered in Regression Fundamentals \1. Below is an example of best subset selection for the top 5 ranked features for our data prediction.# Select number of features for subset K = 5 subset = SelectKBest(k = K) subset.fit(X_train, y_train) # Display selected subset features pd.DataFrame(data = {'Subset Features': X_train.columns[subset.get_support()]})In order to choose the appropriate number of features we will combine the best subset selection method with cross-validation and a threshold of 50%, which means any predicted probability of fraud above 50% will result in a Fradulent transaction prediction.# Initialize list for CV_errors and logistic regression model (with normalization) CV_scores = [] lm = LogisticRegression() for K in range(1, len(X_train.columns) + 1): # Create best subset of K features subset = SelectKBest(k = K) subset.fit(X_train, y_train) X_train_subset = X_train[X_train.columns[subset.get_support()].values.tolist()] # Perform logistic regression on selected features fit = lm.fit(X_train_subset, y_train) predict = lm.predict(X_CV[X_train.columns[subset.get_support()].values.tolist()]) # Calculate classification error score = f1_score(y_CV, predict*1) CV_scores.append(score)for our model evaluation metric we will be using F1 score, which is calculated as follows: F1 = 2 * (precision * recall) / (precision + recall)where **precision** is the fraction of true positives divided by the total number of true positives and false positive fradulent values and **recall** represents the number of true positives divided by the number of true positives and false negative values.# Plot CV error vs. number of selected features plt.figure(figsize = (12,9)) plt.plot(range(1, len(X_train.columns)+1), CV_scores, linewidth = 4, c = 'r' ) # Plot formatting plt.xlabel('# of Subset Features') plt.ylabel('F1 score') plt.title('F1 Score vs. Number of Features Selected for Linear Regression') plt.rcParams.update({'font.size': 18}) # Plot! plt.show()By looking at our F1 scores we can determine which subset to use, but since it looks like using all of our available features has yielded the highest F1 score so far we will proceed with the full set of features. We can visualize the accuracy of our model by looking directly at a confusion matrix, which will show us the rate of False positives or false negatives that our model has returned.# Perform logistic regression on full feature set lm = LogisticRegression() fit = lm.fit(X_train, y_train) predict = lm.predict(X_CV) cnf_matrix = confusion_matrix(y_CV, predict)The following function will plot visualizations for our confusion matrices. This was sourced from sci-kit learn documentation for confusion_matrix. Please see: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html for more detailsdef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. NOTE: This code was sourced from Sci-kit learn documentation, see: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html for more details """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Plot non-normalized confusion matrix plt.rcParams.update({'font.size': 16}) plt.figure(figsize = (8,6)) plot_confusion_matrix(cnf_matrix, classes=['Genuine', 'Fraudulent'], title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure(figsize = (8,6)) plot_confusion_matrix(cnf_matrix, classes=['Genuine', 'Fraudulent'], normalize=True, title='Normalized confusion matrix') plt.show()Although we can see that our model performs quite well on predicting genuine transactions, as expected we still have room to improve with respect to our Fraudulent transaction predictions, since only ~62% of Fraudulent transactions in our CV were predicted by the model.One approach we can take is to vary the threshold at which we will predict Fraud. With a lower prediction threshold, we may catch more fraudulent transactions at the expense of misclassifying some genuine transactions. The balance between these two is generally dependent on the task we are trying to achieve, but it seems sensible to try and catch more fraudulent transactions, even if that means catching some genuine transactions by our model.Below is a precision-recall curve, which shows the balance between both precision and recall with the variation of our threshold.# Perform logistic regression on full feature set lm = LogisticRegression() fit = lm.fit(X_train, y_train) predict = lm.predict_proba(X_CV) # Generate precision-recall curve precision, recall, thresholds = precision_recall_curve(y_CV, predict[:,1]) plt.rcParams.update({'font.size': 18}) plt.figure(figsize = (12,9)) plt.step(recall, precision, color='k', alpha=0.5, where='post') plt.fill_between(recall, precision, step='post', alpha=0.5, color='g') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0, 1]) plt.xlim([0, 1]) plt.title('Precision-Recall Curve') plt.show()Looking at the curve above, we should be able to achieve a higher combination of precision and recall accuracy, which would make an improvement to our model. We can choose the threshold which achieves the highest F1 score using the following:F1_scores = [] for threshold in np.arange(0, 1, 0.01): CV_class = 1*(predict[:,1] >= threshold) F1_scores.append(f1_score(y_CV, CV_class)) # Plot CV error vs. number of selected features plt.figure(figsize = (12,9)) plt.plot(np.arange(0,1,0.01), F1_scores, linewidth = 4, c = 'r' ) # Plot formatting plt.xlabel('Probability Threshold for Prediction of Fraudulent Transaction') plt.ylabel('F1 score') plt.title('F1 Score vs. Threshold for Logistic Regression Classification') plt.rcParams.update({'font.size': 18}) # Plot! plt.show()Now we can use the threshold with the maximum score to re-frame our predictions.CV_class = 1*(predict[:,1] >= (np.argmax(F1_scores)*.01)) cnf_matrix = confusion_matrix(y_CV, CV_class) # Plot normalized confusion matrix plt.figure(figsize = (8,6)) plot_confusion_matrix(cnf_matrix, classes=['Genuine', 'Fraudulent'], normalize=True, title='Normalized confusion matrix') plt.show()We can see from the confusion matrix that we were able increase our True positive rate of fraud detection in our CV dataset to about ~73%. Now that we've performed some optimization, we can test the accuracy on our test dataset.# Use the model to make predictions onthe test set predict = lm.predict_proba(X_test) # Generate Confusion Matrix test_class = 1*(predict[:,1] >= (np.argmax(F1_scores)*.01)) cnf_matrix = confusion_matrix(y_test, test_class) # Plot normalized confusion matrix plt.figure(figsize = (8,6)) plot_confusion_matrix(cnf_matrix, classes=['Genuine', 'Fraudulent'], normalize=True, title='Normalized confusion matrix') # Plot! plt.show()US Fed Docs Registry (OCLC numbers)The US Fed Docs Registry: https://github.com/HTGovdocs/feddoc_oclc_numsimport datetime import json import os import shutil import gitClone data from Github repository (frequently updated)# clone data fresh, remove existing repository if needed. if os.path.exists("feddoc_oclc_nums"): shutil.rmtree("feddoc_oclc_nums") print("Cloning data from Github...") repo = git.Repo.clone_from("https://github.com/HTGovdocs/feddoc_oclc_nums", "feddoc_oclc_nums")Cloning data from Github...Copy file to data directory with a manifestdataset_name = "feddoc_oclc_nums" dataset_file = "data/{}.txt".format(dataset_name) if not os.path.exists("data"): os.makedirs("data") # copy file to data folder shutil.copyfile("feddoc_oclc_nums/feddoc_oclc_nums.txt", dataset_file) # create manifest file manifest = {} manifest["name"] = "feddoc_oclc_nums" manifest["description"] = "A daily updated list of OCLC numbers determined to be Federal Documents." # use the latest commit as a proxy for datetime commit = repo.head.commit file_datetime_proxy = datetime.datetime.utcfromtimestamp(commit.committed_date).isoformat() manifest["datetime"] = str(file_datetime_proxy) manifest["schema"] = { "oclc": "object" } manifest["format"] = { "type": "text", "extension": "txt", "header": False, } manifest["data-origins"] = [{ "origin": "https://github.com/HTGovdocs/feddoc_oclc_nums", "datetime": str(file_datetime_proxy) }] # create manifest to accompany data manifest_file = "data/{}.manifest.json".format(manifest["name"]) with open(manifest_file, 'w') as outfile: json.dump(manifest, outfile, indent=4, sort_keys=True)Finishing up!print("Completed notebook ({}).".format(datetime.datetime.utcnow().isoformat())) print("Output created:") print(dataset_file) print(manifest_file)Completed notebook (2019-09-03T19:06:11.379850). Output created: data/feddoc_oclc_nums.txt data/feddoc_oclc_nums.manifest.jsonconsumer_key = '' consumer_secret = '' access_token = '' access_token_secret = '' import tweepy import pandas as pd auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) twitter_users = [] tweet_time = [] tweet_string = [] for tweet in tweepy.Cursor(api.search, q='black friday', count= 1000).items(1000): if (not tweet.retweeted) and ('RT @' not in tweet.text): if tweet.lang == 'es': twitter_users.append(tweet.user.name) tweet_time.append(tweet.created_at) tweet_string.append(tweet.text)Ejemplo cambiando parámetros:twitter_users_2 = [] tweet_time_2 = [] tweet_string_2 = [] for tweet in tweepy.Cursor(api.search, q='@Fridaruh', count= 1000).items(1000): if (not tweet.retweeted) and ('RT @' not in tweet.text): if tweet.lang == 'en': twitter_users_2.append(tweet.user.name) tweet_time_2.append(tweet.created_at) tweet_string_2.append(tweet.text) df = pd.DataFrame({'name':twitter_users, 'time':tweet_time, 'tweet':tweet_string })Exportamos el dataframe a un archivo csvdf.to_csv('tweets_black_friday.csv')En caso de que no tengas acceso a la API, tendrás que ejecutar esta celda donde importas el archivo csvdf = pd.read_csv('/content/tweets_black_friday.csv') df.head()Convierto los tweets a listasdata = df['tweet'].to_list() data pattern = r'''(?x) # Flag para iniciar el modo verbose (?:[A-Z]\.)+ # Hace match con abreviaciones como U.S.A. | \w+(?:-\w+)* # Hace match con palabras que pueden tener un guión interno | \$?\d+(?:\.\d+)?%? # Hace match con dinero o porcentajes como $15.5 o 100% | \.\.\. # Hace match con puntos suspensivos | [][.,;"'?():-_`] # Hace match con signos de puntuación ''' import nltk nltk.download('punkt') from nltk import word_tokenize texto = [] for x in range(0, len(data)): token_1 = data[x].lower() token_2 = nltk.regexp_tokenize(token_1, pattern) texto.append(token_2) #texto es una lista de listas #texto flatten = [w for l in texto for w in l] import string puntuacion = list(string.punctuation) puntuacion puntuacion.append('https') puntuacion.append('co') puntuacion.append('t') puntuacion nltk.download('stopwords') stop_words_n = nltk.corpus.stopwords.words('spanish') df_2 = [w for w in flatten if w not in stop_words_n] df_3 = [w for w in df_2 if w not in puntuacion] freq_words = nltk.FreqDist(df_3) freq_words.most_common(15) omitir_palabras = ['black','friday','2021','ofertas','semana'] df_4 = [w for w in df_3 if w not in omitir_palabras] freq_words = nltk.FreqDist(df_4) freq_words.most_common(15) from wordcloud import WordCloud import matplotlib.pyplot as plt wordcloud = WordCloud(background_color='white', collocations=False, max_words=30).fit_words(freq_words) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()Referencehttps://elitedatascience.com/imbalanced-classes Balance Scale Dataset[Download the synthetic dataset Balance Scale Data from the UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/balance+scale).This dataset was originally generated to model psychological experiment results.import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # Read dataset df = pd.read_csv('balance-scale.data', names=['balance', 'var1', 'var2', 'var3', 'var4']) # Display example observations df.head()The dataset contains information about whether a scale is balanced or not, based on weights and distances of the two arms.- 1 target variable, `balance`- 4 input features, `var1` through `var4` ![balance-scale-data.png](attachment:balance-scale-data.png) The target variable has 3 classes:- `R` for right-heavy, i.e. when `var3 * var4 > var1 * var2`- `L` for left-heavy, i.e. when `var3 * var4 < var1 * var2`- `B` for balanced, i.e. when `var3 * var4 = var1 * var2`df['balance'].value_counts() # R 288 # L 288 # B 49 # Name: balance, dtype: int64Convert into a binary classification problem.Label each observation as `1` (positive class) if scale is balanced or `0` (negative class) if scale is not balanced:# Transform into binary classification df['balance'] = [1 if b=='B' else 0 for b in df.balance] df['balance'].value_counts() # 0 576 # 1 49 # Name: balance, dtype: int64 # About 8% were balancedOnly about 8% of the observations were balanced. Therefore, if we were to always predict 0, we'd achieve an accuracy of 92%. Base Model# Separate input features (X) and target variable (y) y = df.balance X = df.drop('balance', axis=1) # Train model clf_0 = LogisticRegression().fit(X, y) # Predict on training set pred_y_0 = clf_0.predict(X) print( accuracy_score(pred_y_0, y) )0.921692% overall accuracy, but is it because it's predicting only 1 class?print( np.unique( pred_y_0 ) )[0]This model is only predicting 0, which means it's completely ignoring the minority class in favor of the majority class. 1. Up-sample Minority ClassUp-sampling is the process of randomly duplicating observations from the minority class in order to reinforce its signal.Most common way is to simply resample with replacement.from sklearn.utils import resampleCreate a new DataFrame with an up-sampled minority class.1. Separate observations from each class into different DataFrames2. Resample minority class with replacement, set number of samples to match that of the majority class3. Combine up-sampled minority class DataFrame with original majority class DataFrame# Separate majority and minority classes df_majority = df[df.balance==0] df_minority = df[df.balance==1] # Upsample minority class df_minority_upsampled = resample(df_minority, replace=True, # sample with replacement n_samples=576, # to match majority class random_state=123) # reproducible results # Combine majority class with upsampled minority class df_upsampled = pd.concat([df_majority, df_minority_upsampled]) # Display new class counts df_upsampled.balance.value_counts() # 1 576 # 0 576 # Name: balance, dtype: int64 # Separate input features (X) and target variable (y) y = df_upsampled.balance X = df_upsampled.drop('balance', axis=1) # Train model clf_1 = LogisticRegression().fit(X, y) # Predict on training set pred_y_1 = clf_1.predict(X) # Is our model still predicting just one class? print( np.unique( pred_y_1 ) ) # [0 1] # How's our accuracy? print( accuracy_score(y, pred_y_1) )[0 1] 0.51475694444444442. Down-sample Majority ClassDown-sampling involves randomly removing observations from the majority class to prevent its signal from dominating the learning algorithm.Most common heuristic is resampling without replacement.1. Separate observations from each class into different DataFrames2. Resample majority class without replacement, setting number of samples to match that of minority class3. Combine down-sampled majority class DataFrame with original minority class DataFrame# Separate majority and minority classes df_majority = df[df.balance==0] df_minority = df[df.balance==1] # Downsample majority class df_majority_downsampled = resample(df_majority, replace=False, # sample without replacement n_samples=49, # to match minority class random_state=123) # reproducible results # Combine minority class with downsampled majority class df_downsampled = pd.concat([df_majority_downsampled, df_minority]) # Display new class counts df_downsampled.balance.value_counts() # 1 49 # 0 49 # Name: balance, dtype: int64 # Separate input features (X) and target variable (y) y = df_downsampled.balance X = df_downsampled.drop('balance', axis=1) # Train model clf_2 = LogisticRegression().fit(X, y) # Predict on training set pred_y_2 = clf_2.predict(X) # Is our model still predicting just one class? print( np.unique( pred_y_2 ) ) # [0 1] # How's our accuracy? print( accuracy_score(y, pred_y_2) )[0 1] 0.56122448979591833. Change Your Performance MetricFor a general-purpose metric for classification - Area Under ROC Curve (AUROC).- AUROC represents likelihood of model distinguishing observations from two classes- If you randomly select one observation from each class, what's the probability that your model will be able to "rank" them correctly?If AUROC is AUROC should be >= 0.5from sklearn.metrics import roc_auc_scoreTo calculate AUROC, need predicted class probabilities instead of just the predicted classes.# Predict class probabilities prob_y_2 = clf_2.predict_proba(X) # Keep only probabilities of the positive class # [proba(negative), proba(positive)] prob_y_2 = [p[1] for p in prob_y_2] prob_y_2[:5] # trained on the down-sampled dataset print( roc_auc_score(y, prob_y_2) ) prob_y_0 = clf_0.predict_proba(X) prob_y_0 = [p[1] for p in prob_y_0] print( roc_auc_score(y, prob_y_0) )0.53068310657596364. Penalize Algorithms (Cost-Sensitive Training)Use penalized learning algorithms that increase the cost of classification mistakes on the minority class.A popular algorithm is Penalized-SVM:from sklearn.svm import SVCDuring training, use the argument `class_weight='balanced'` to penalize mistakes on the minority class by an amount proportional to how under-represented it is.Include the argument `probability=True` if we want to enable probability estimates for SVM algorithms.Train a model using Penalized-SVM on the original imbalanced dataset:# Separate input features (X) and target variable (y) y = df.balance X = df.drop('balance', axis=1) # Train model clf_3 = SVC(kernel='linear', class_weight='balanced', # penalize probability=True) clf_3.fit(X, y) # Predict on training set pred_y_3 = clf_3.predict(X) # Is our model still predicting just one class? print( np.unique( pred_y_3 ) ) # [0 1] # How's our accuracy? print( accuracy_score(y, pred_y_3) ) # 0.688 # What about AUROC? prob_y_3 = clf_3.predict_proba(X) prob_y_3 = [p[1] for p in prob_y_3] print( roc_auc_score(y, prob_y_3) )[0 1] 0.688 0.469476332199546435. Use Tree-Based AlgorithmsDecision trees often perform well on imbalanced datasets because their hierarchical structure allows them to learn signals from both classes.Tree ensembles (Random Forests, Gradient Boosted Trees, etc.) almost always outperform singular decision trees.from sklearn.ensemble import RandomForestClassifier # Separate input features (X) and target variable (y) y = df.balance X = df.drop('balance', axis=1) # Train model clf_4 = RandomForestClassifier() clf_4.fit(X, y) # Predict on training set pred_y_4 = clf_4.predict(X) # Is our model still predicting just one class? print( np.unique( pred_y_4 ) ) # [0 1] # How's our accuracy? print( accuracy_score(y, pred_y_4) ) # 0.9744 # What about AUROC? prob_y_4 = clf_4.predict_proba(X) prob_y_4 = [p[1] for p in prob_y_4] print( roc_auc_score(y, prob_y_4) )[0 1] 0.9984 1.0core> Core utilities for working with DICOM files. Utilizes the `pydicom` and `fastcore` packages. Some ideas borrowed from [fastai.medical.image](https://github.com/fastai/fastai/blob/master/fastai/medical/imaging.py).#hide from nbdev.showdoc import * #export from dicomtools.imports import * import pydicom from pydicom.dataset import Dataset as DcmDataset from pydicom.tag import BaseTag as DcmTag from pydicom.multival import MultiValue as DcmMultiValue #export def get_dicoms(path): "Walk `path` to get DICOM file names, then read one file from each series into a `pandas.DataFrame`." fns = L() print("Finding DICOM files. This may take a few minutes.") for r, d, f in os.walk(path): if f: if Path(f[0]).suffix.lower() == '.dcm': fns.append(Path(f'{r}/{f[0]}')) print("Reading DICOMs. This may take a few minutes, depending on the number of files to read...") df = pd.DataFrame.from_dicoms(fns) return fns, df #export @patch def dcmread(fn: Path, no_pixels=True, force=True): "Reads a DICOM file and returns the corresponding pydicom.Dataset" return pydicom.dcmread(str(fn), stop_before_pixels=no_pixels, force=force) #export def _cast_dicom_special(x): cls = type(x) if not cls.__module__.startswith('pydicom'): return x if cls.__base__ == object: return x return cls.__base__(x) def _split_elem(res, k, v): if not isinstance(v, DcmMultiValue): return for i, o in enumerate(v): res[f'{k}{"" if i == 0 else i}'] = o #export _cols = [ 'PatientID', # Study info 'StudyInstanceUID', 'StudyID', 'StudyDescription', # to filter on "MRI BRAIN WITH AND WITHOUT CONTRAST" in some cases # Series info 'SeriesInstanceUID', 'SeriesNumber', 'SeriesDescription', # needed for labeling series 'SequenceName', # may be used for labeling series 'BodyPartExamined', # to filter on "HEAD" or "BRAIN" 'AcquisitionNumber', # Image info and features 'InstanceNumber', # i.e. image number 'SOPClassUID', # to filter on "MR Image Storage" 'ImageOrientationPatient', # to calculate slice orientation (e.g. axial, coronal, sagittal) 'EchoTime', 'InversionTime', 'EchoTrainLength', 'RepetitionTime', 'TriggerTime', 'SequenceVariant', 'ScanOptions', 'ScanningSequence', 'MRAcquisitionType', 'ImageType', 'PixelSpacing', 'SliceThickness', 'PhotometricInterpretation', 'ContrastBolusAgent', 'AngioFlag', # addition to list from paper 'DiffusionBValue' # addition to list from paper ] #export @patch def as_dict(self: DcmDataset, filt=True, split_multi=False): if filt: vals = [self[o] for o in self.keys() if self[o].keyword in _cols] else: vals = [self[o] for o in self.keys()] items = [(v.keyword, v.value.name) if v.keyword == 'SOPClassUID' else (v.keyword, v.value) for v in vals] res = dict(items) res['fname'] = self.filename if split_multi: for k, v in items: _split_elem(res, k, v) for k in res: res[k] = _cast_dicom_special(res[k]) return res #export def _dcm2dict(fn, excl_private=False, **kwargs): ds = fn.dcmread(**kwargs) if excl_private: ds.remove_private_tags() return ds.as_dict(**kwargs) #export @delegates(parallel) def _from_dicoms(cls, fns, n_workers=0, **kwargs): return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs)) pd.DataFrame.from_dicoms = classmethod(_from_dicoms)**Random Forest**Random forest is a Supervised Machine Learning Algorithm that is used widely in Classification and Regression problems. It builds decision trees on different samples and takes their majority vote for classification and average in case of regression.One of the most important features of the Random Forest Algorithm is that it can handle the data set containing continuous variables as in the case of regression and categorical variables as in the case of classification. It performs better results for classification problems.![rf1.png](data:image/png;base64,EQgygQkyqNsHZVNBERABERABERABESgKAhIlBeFmVVJERABERABERABERCBKBP4fxyaEj3CcyzRAAAAAElFTkSuQmCC)**Working of Random Forest Algorithm**Before understanding the working of the random forest we must look into the ensemble technique. Ensemble simply means combining multiple models. Thus a collection of models is used to make predictions rather than an individual model.**Ensemble uses two types of methods**1. Bagging– It creates a different training subset from sample training data with replacement & the final output is based on majority voting. For example, Random Forest.2. Boosting– It combines weak learners into strong learners by creating sequential models such that the final model has the highest accuracy. For example, ADA BOOST, XG BOOST![rf2.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA+gAAAIyCAYAAABYceAZAAAABmJLR0QA/wD/AP+gvaeTAACAAElEQVR42uydd5xV5Z3/P+ec2+/03gtthg4iHamCqFgBQY1GE6MmMT3ZZDfZ3+4m2bhpa5KNxhYTY0UFVAQF6SC9DTAFGGZgmN77rec8vz/uPA/PuXNBNMVh8n2/XsMM957znHbv83z7V2GMMRAEQRAEQRAEQRAE8Zmi0i0gCIIgCIIgCIIgiM8eUtAJgiAIgiAIgiAIYgBACjpBEARBEARBEARBDABIQScIgiAIgiAIgiCIAQAp6ARBEARBEARBEAQxACAFnSAIgiAIgiAIgiAGAKSgEwRBEARBEARBEMQAgBR0giAIgiAIgiAIghgAkIJOEARBEARBEARBEAMAUtAJgiAIgiAIgiAIYgBACjpBEARBEARBEARBDABIQScIgiAIgiAIgiCIAQAp6ARBEARBEARBEAQxACAFnSAIgiAIgiAIgiAGAKSgEwRBEARBEARBEMQAgBR0giAIgiAIgiAIghgAkIJOEARBEARBEARBEAMAUtAJgiAIgiAIgiAIYgBACjpBEARBEARBEARBDABIQScIgiAIgiAIgiCIAQAp6ARBEARBEARBEAQxACAFnSAIgiAIgiAIgiAGAKSgEwRBEARBEARBEMQAgBR0giAIgiAIgiAIghgAkIJOEARBEARBEARBEAMAUtAJgiAIgiAIgiAIYgBACjpBEARBEARBEARBDABIQScIgiAIgiAIgiCIAQAp6ARBEARBEARBEAQxACAFnSAIgiAIgiAIgiAGAKSgEwRBEARBEARBEMQAgBR0giAIgiAIgiAIghgAkIJOEARBEARBEARBEAMAUtAJgiAIgiAIgiAIYgBACjpBEARBEARBEARBDABIQScIgiAIgiAIgiCIAQAp6ARBEARBEARBEAQxACAFnSAIgiAIgiAIgiAGAKSgEwRBEARBEARBEMQAwEK34OqHMSZ+K4oCxhgMw4CmadB1HZqmiW0URem3P98n0riKosAwDLFf+P/l1zRNo4dBEARBEMSggDFmkqMYY1BVFYZhQFVVk2zFt5P3DZeV+PaRxgw/Lt+X5CuC+OdDYZE0M+KqgivMuq6LBYMvIOJBK4qY7GVFnr+u6zoYY2IR4Ep+uEIvj8OPDYAWD4IgCIIgBhVcgQYglHLgorKt6zosFotpew6Xl7gsFa6Mc5lN3kZW+km+Ioh/XsiDPoiQFXBZwQ73qnMYY/B4PGhra0NTUxM6OzvR3d0NxhgcDgeio6ORnp6OlJQUWK1WofTLiwn30odbigmCIAiCIK5mwiMUgYveclmhDo9gZIyhs7MTLS0taG1tRXd3N3w+HzRNg9PpRFpaGtLS0uByuaBpmklW48cMd7QQBPHPA3nQBwnBYFAo34ZhwOfzoaGhAd3d3YiOjkZaWhqsViuA0KTv8XhQUVGBffv2Yc+ePTh9+jQ6OzvR29sLXddht9uRmpqKoUOHYu7cuZg3bx5SU1OFpVj2wMueeIIgCIIgiMGArKDrug4A6OjoQH19PSwWC9LT0xETEwMA8Pv9UBQFHR0dOHbsGHbs2IGjR4+isbERPT098Pl8UFUVLpcL+fn5mDhxIm644QaMGTMGTqdTHIeHvoeHxxME8c8DKeiDADkUPRAIoKWlBS+//DJWr16NxsZGjBs3Dl/96ldx3XXXwW63o729HWvXrsWf/vQnnDx5El1dXaax5JwqTdMQFxeHOXPm4Gtf+xpmzpxpWjyCwSAsFgstHgRBEARBDCoYY0LOCQQC2LdvH5577jkcOnQImqbhrrvuwhe/+EWkpaVBVVUcPHgQzzzzDDZu3IiWlhb4fD5T1CH/W1VVWK1WjBw5EitXrsTDDz+MuLg4k/wV7rUnCOKfBwpxH0QEAgEEAgGsXr0av/vd79DQ0ADGGKqrq+F0OpGbm4tAIIA//elPeP3111FTUwMAsNls0HVdWIcBiNB1wzDQ1taGNWvWoKqqCj/5yU8wd+5c2Gw2ABC5VHJuFkEQBEEQxGCAOyVKSkrwxBNPYOPGjfD5fDAMA8899xxyc3Nx00034cMPP8Qf/vAH7N+/X8hPMrI/TFVV6LqOY8eOoampCR0dHfjyl7+MzMxMkY7If1MKIUH880EK+iCAK8ZWqxXV1dV4//33UVdXJxRuVVXR2NiIY8eO4YMPPsCqVavQ29sr9g8EAsJT7nA4YBgGvF4vvF4vAoGAWGSOHDmCn/70p3A4HJg+fbrIS49UgZSIjJwWIL8mF4aJtD0tzgRBEATxj4dHCx49ehQfffQRPB6PWJN7enpQXV2NN998E7/97W9RUVGBYDAo9gMAh8MBl8sFq9WKYDCI7u5ueL1eACH5rKamBs899xyio6Px8MMPIyEhIWLdIOLjiRSBcKmohPCCfvJr4XJZeJFlgvh7Qwr6IKO7uxvNzc1CqeZh6l6vFy+//DJ27twJj8cj3lMUBSkpKbjmmmtw7bXXIi0tDYZhoKamBh999BGOHj2K7u5uACEv+YEDB/Diiy8iLy8P2dnZl2zRRkRGnvz5M/D7/bDb7aZKr/x+0+JMEARBEJ8N8prd0NAgnBs8V1xVVRQVFeH06dM4e/asSfZyOp0oLCzEtGnTMGTIEERHR6OzsxNHjx7F3r17UVlZKRS+1tZWPP/88ygoKMCSJUtgsVhMLXKJK39W4c+NGzvkiM/w1AG5bR6vru/z+WC1WqneEvGZQAr6IEdVVQSDQRQXFyMYDMLr9YrKoFarFdOmTcP999+P+fPnIyEhARaLBaqqwu/3o6KiAk8//TRee+01sSj5/X5s2bIFCxYswNKlS2Gz2UiJ/ASEW3ABwGKxoLW1FadOnUJbWxvi4+NRWFiImJgYsWjQgkAQBEEQ/1jktTdcWTYMA52dndiyZQs6Ojqg6zqsVit0XUd6ejpuv/12rFy5EiNGjEBUVJTYp729Hdu2bcPjjz+OsrIyMW5VVRVee+01XHPNNcjJyYGmafD7/SKlkPh0cIX7ck6PcAU8EAigrq4O5eXlAIDMzEzk5eXB5XL1+zyQfEb8PSAFfZDDJ6aOjg6RVw6EwuGnT5+O73znO7j++uuFYs4tjIwxjBs3Do888gjKy8uxc+dOBINBKIqC+vp67NixAwsWLEBqaip0XScl/QqRlXOeo9bV1YUnnngCb731Frq6uhAdHY3Zs2fjS1/6EiZOnEiTP0EQBEF8Bsitz+QWaMBF72xHR4fwnAeDQcTGxuLzn/88vvzlLyMjI8PUlg0A3G43li5dirq6Ojz++ONob28XxeMOHTqEkydPIj09HRaLxdRjnfh4ZHmUy1u8c1F7ezs0TUNycjKAi3UAeJs7uT3x1q1b8cQTT+DEiRNwOBwYMmQI7r77btx6661ITU2lyAbi7w598wcZ4QsIhy8Q/O+RI0fiW9/6Fq6//nrYbDZTdVEemgUAw4YNw9SpU3Hw4EHRgk3XdRQXF6O6uhrJyckiLIgUyY9Hvkd8EXnjjTfwhz/8Ae3t7WCMoba2FuXl5WhsbMT//M//oKCggG4cQRAEQXwGRJJt+GvcccFD3m02G+6991489thjSEtLQyAQgNVqNRV7417xRYsW4a233sKhQ4cAhLzrzc3NKCoqwpw5c0SHHCrC+8mflRzafvLkSfz5z3/GgQMHkJ6ejhUrVuCGG25AVFSUUM55q2JVVVFWVobf/OY32Lx5s3BsnT9/HmfOnEEwGMSDDz4Iu91ON5v4u0Lf+EHEpRRzWWm3WCxITk7G/fffj7lz54pJRl4EuDJvGAacTifGjRuH5ORkoeDruo6GhgZUVVUhEAjQjf8Uz4n/BINB7N69WyjnQCi6wTAMfPTRR9izZw98Ph/dNIIgCIL4jAhX0mW5Ss5nnjt3Lh555BEkJSWJkPdgMCha2AaDQZEamJKSgmHDhsFmswkPvM/nQ3l5OTo6OoTjhJTzK39GssNI0zS0tbXh+eefx7PPPot9+/ZhzZo1+Nd//Ve88sor6O7uFs9Fvsfl5eU4duyYaVxFUVBTU4O//OUvuHDhAt1s4u8OfesHOfICwn9uu+02LFu2DDExMf2KX5g+HH0TVlZWFmJiYkyLUEtLC+rq6gBctCATV4Z8r3RdF7n//Hnpug7GGLxeL+rq6sgIQhAEQRCfMZeSc7icNWLECHz1q19FQUGBSBtkjAkPuqqqInLOMAy4XC5kZWWZohh1XUddXR26u7tNYdfExyMX6JMV7/r6eni9XvGczp8/j9deew1VVVX90g8YY3C5XIiNjTVFlXJqamrQ3t4utqVQd+LvBSnog4hI1SXlquCMMUyaNAkPPfQQsrOzTUq5vG/46zExMXA6nUJhVxQFPp8PbW1tIqzrcvCJko8t/1/m00x2kfaRlVz+f34eV3ou4ePLY8ihU/z/hmGIqAN5fH4ekfLXAMDpdOKWW25BYWFhPyu5y+VCenq68KjL+8vj8UVJPp9L5cp9HDxKQh77csfl1y3/HwgZbWjhIgiCIAYLlyoKxhiD2+3Ggw8+iOuuu04Y3C8lk/FoRYvFgtjYWFEpnK+j3d3d8Hg8H6uYh8sY/O9w+SdcLriScSP9yPIOAJPzIPy4V3o/w+WycBnkUrLT5d6Tn5Hb7UZqaqopl1/XdVRXV+P8+fP9CscpioKCggIsXLgQDofDdL2MMeTl5SEmJsZ0/uH3OfwaAPxVsln4vQ/fP1zujCTzkjx29UEK+iAkvOooV9JjY2Nxxx13YPTo0SaPeqQFhKNpGiwWC2w2W7+Jxuv1wu/3f+wXX87Vkv8vTzjygsGVvk+DbEGVlUS/3y+ulTGGQCDwifLmee59eK9MwzDEceRq6/x9rnTL18O35+cyc+ZMfPe738W4ceNgt9uhKAosFgvmz5+P6dOni9f4vrJCzK+VLwqysq7rOjwej+nYH2eQ4EVTIgkikRZ2OfxOtvZT0UCCIAjinwFVVTF9+nQsXLgQ0dHRl5SvZC85ACFbha+rgUBAyCx8v8sdGwjJBvJaDECs5ZeT8T4psqxhsViEPCWfy5Uq6tw7He7xvtS+4bJiOJF6mfNQ90iyC5dXuGzKFdmMjAx87Wtfw913343U1FSxbV5eHu655x7k5eWJc9c0zSRr8muQlXS+rWzQ+CT3SL42+bzDZTL5Ovx+v2l7fkyKxrh6oCJx/wTwCWPMmDG4+eab4XK5+uXcRNqe/82VWf46V8CsVqspx/3jJv1wpS28+Il8jCtdQMInZD5JcSWXj2m1WgGEJrtAIAC73d6vN+al4AVdZEskX0Tka+KTtMViEdXz+dh8O35eHB7mtmzZMowZMwaHDh1CR0cHkpOTMWvWLAwZMsS0bfi+8sTLQ+i8Xq8ImXM4HOL+fJL+nfICw6+VXze/v7x4Ch8zEAiI41Jlf4IgCGKwoygKYmNjcdNNN2HEiBEfu70sq4Qb+LnyxNdyILS2y+t+JE8yACHvcEWd/8jeU75eX25t5sXS5OsLVxL5+fB13mKxCKeA1Wo1yXTyfuGoqopAICDOVVYew40TchG3y40ZSRbVNE0o6fycHA6HSC+Uj8Vlt8LCQvzyl7/E8uXLUVJSAovFgnHjxmHatGkmeZKPLRPJEcLvjXzt4dd6JZ+dcHlVNtDw85Db/clOFKplcHVBCvogh096LpcLc+bMQWZmpviiX+mE0N3djZ6eHgAXrXlWqxUJCQmw2WwRv/ThITiRPMvhhgC5wMflFP7LTfp8DFVV4fP54PP5xATMz9Vms4nKqpHOMfw48nayBTwYDIriL6qqmvpj8mOGL8byc+EKPmMMdrsd48ePx9ixY4VBgC8o4ZZ4vgDzyTgYDIoFUh4TgKmFCB/rcpN0eE2C8L91XYfP5xMLk1zJlP99JWkPBEEQBHG1oygKhg4dikmTJgml6HLrH3cYGIYBn8+Hjo4OkwdWURRERUUhOjpa/P/jji9HInIjOnd0RHIOXE6+4tvIHu3wVnP8WLJcoiiKSXG9UmVQrlTv9/vh8/mg6zpsNhtsNps4H7kV2qVkNX4e4REE8g+Xj7Kyskzt0vh7src9JiYGixYtwvz58/vJUfycONxxIctY/Fz5PpG811xGv9T94rKiHEof3q6Xy6J8e7vdLmQ0OaoxPJ+eGNiQgj7I4ZNPYmIipkyZIpTIyy0g4Yple3s7urq6TK87nU4kJiZekbdbntz54sErk/PXuGX0cuFNH3eNwMUQ8Pr6ehw/fhxnzpwBYwzp6ekYP3488vLy4HA4TErs5caUrc98AjUMA1VVVTh27BjOnz8PXdeRkZGBCRMmICsrCy6XK+Kke6lrCwaD8Hq9MAwDVqtVtLgLV4xVVYXf78eFCxfQ2toKh8OBvLw8uN1ucZ49PT3w+Xzo7e2F3W6H2+2G3W4X9/dKnpX8N1/gNE1Da2srTp06hZKSEjQ2NiI5ORmFhYUoKChAQkKCafEkDzpBEAQxmOGyy7hx4zBkyJB+VcQjIRvBDcNAS0tLPwU9Pj5erOtclrhUSiJ/z+v14ty5c6ivr0dzczM0TUN2djays7ORmJgoFMyPkwEiHYvLF16vF9XV1aipqUFrays0TUNaWhpyc3ORmJgooio/zgkgHwsAurq6UFlZiaNHj+Ls2bMIBAKIj49Hfn4+Ro8ejZycHLjdbnF/Pi5i83JOHC6X5eXlITEx0XSt8n4+n0+kcDocDmHgkJ8vvy+dnZ04f/48/H4/0tPTkZaWZmrf5vf7RWE5l8sFt9stnCqy4s0JTy0Md2bJDpjq6moUFRWhvLwciqIgMzMTY8aMQX5+vpAlP0mkKzFwIAV9kMO/2Dk5OcjNzRUWziuZ4PgkderUKbS3t5usfzk5OcjOzr6iHCn+vq7rqKmpwcmTJ3HmzBk0NzfDarUiLi4Oubm5GD58OLKysuB2uy87XqSwdvm94uJi/N///R927twpCtlFR0ejsLAQ99xzD+68805ERUVdtoK9PJ5cgEXTNBw+fBhPPfUUduzYgY6ODlH1s7CwEA888ABuvPFGJCYm9rvP4cfhz6akpARr165FS0sLkpOTMXr0aIwfPx65ubliEueL9KZNm/DnP/8ZpaWlSE5OxrJly3DXXXchLi4ONTU1+PDDD7F37150dnYiOjoaubm5mDdvHiZNmgS32/2xSjo/Fj83vijU19fjlVdewZtvvonz58/D4/HA4XAgPT0dCxcuxAMPPIDRo0eLcDxaBAiCIIjBjKIocLlcGDlyJOLi4kwey0spqVymUFUVLS0tOHfunCk32el0Yvjw4WK9vpQjQVbUiouLsWnTJmzYsAGVlZXw+XxgjCE+Ph6TJk3CXXfdhVmzZgmlXw6Zj3R+HLno64ULF7B69Wps3boVZ86cgc/ng6IoSEhIwKRJk7B06VJMnToViYmJIsT6UuH48rG6urrwxhtv4NVXX0VpaalwBmmahtjYWOTn5+P666/HkiVLUFhYKNLows+Vjx8eLajrOrq7u0054ZqmISkpyVTfRx6jtbUV7733Hvbv3w+r1YrCwkJcc801GDZsGBITE4VirWkazp8/j9dffx3r1q1DV1cXpkyZgi996UuYMmUKOjs7cezYMWzatAmnT58GcNFZNnPmTOTl5YloisvB0xvlqAav1yv6tW/fvh1dXV0wDANRUVEoLCzE/fffjyVLliA2NjZiDj5xFcCIQYNhGKyoqIhNmTKFKYrCADAAzGKxsBUrVrCzZ8+yYDDIDMNghmFcdixd15lhGKyxsZF94QtfYHa7XYynaRq799572fnz58VY4ePJrxuGwWpqathzzz3H5s+fzxITE1lUVBSz2WzMZrOxqKgolpyczMaPH8+++tWvsm3btrHu7u4rut7wn5KSEnbLLbcwh8Nhun5+3jk5OewnP/kJa2lpYcFgkAUCAabrOtN1vd/Yuq4zr9cr/tZ1nR04cIAtWLCAaZomxudjK4rC8vPz2f/+7/+yuro6ZhgGCwQC/c6Rj2UYBvP5fOw3v/kNs9vtzGKxMKfTyZKTk9ncuXPZK6+8wtrb28V+hw4dYtdddx2zWq1M0zSmqipLTk5mr776Kvvoo4/Y7bffzuLi4pimaUzTNDFednY2e/jhh9lHH33Eenp6+l1rpOsOBoPiHFtbW9n//M//sPT0dKYoivhsqarKADCXy8WWLFnC9u7dywzDEPsGg0H6UhIEQRBXPT6fj/33f/83c7lcJvkqLS2Nvfbaa8zj8Yj183LylbzGvv/++2zEiBEmWSIrK4u9+uqrzOfzMcaYSYbg6LrO/H4/83q9bNu2bezmm29mLpdLrMkAmKIoTFVVZrFY2JAhQ9h3v/tdVlZWJuSSy+H3+4Ws6PP52NatW9miRYtYVFQUU1XVdP0AmM1mY0OGDGH/8i//wkpLS4UcwMeQr13+6e3tZU8//TQbNmwYUxRFyGqynKFpGrPb7Wz69Ons5ZdfZi0tLUJuC7+v/IfLdj6fj1VWVrLFixebZDVN09gdd9zBPvjgA1ZeXs46OjpMcs+xY8fYtddeyywWC7Pb7cztdrP8/Hz22GOPsYqKCnFdra2t7D//8z+Z0+kU99tqtbJly5axCxcusOeff56NGjWKOZ1OZrVamaqqzGazsZiYGDZr1iz2zDPPsNraWvGZuZRs7vf7xXMPBoPM4/GwY8eOsTlz5ohx+fFlWfRXv/oV6+joYH6/X9yzj5P9iYEDKeiDAP6l8/v97NixY2zKlCmmidpqtbKHHnqI1dXVmSYheVKQ/88Xo2AwyLZu3cpGjx5tmjDj4+PZL3/5SzGp8f3lSZKP5/P5WFlZGfvWt77F4uLi+im18t98khkxYgR74oknWG1trWlxks+Rvy4rkk1NTezRRx9ldrudqaoqfuQFiy+ozz77LOvu7hYTLR8v/J7Ixzh//jxbsWIFs1qtQkGVFxF+nPz8fPbiiy+y3t7eS95j/rwCgQD74Q9/KMaUz3XWrFmspKREbP/CCy+YDA9cOf7Wt77Fbr31VvGevHjy89I0jU2bNo298cYbrL29nfn9/n7XGX6O/PmtXbuW5efniwVAfl78OE6nky1dupSdOnXKZASK9EMQBEEQVwN8PfR4POxnP/sZc7lcpnU2OzubbdiwQax73DB9uTUwGAyylpYW9u1vf1s4KriMNWPGDHb8+HEhq0XaV1acFy9ezKxWq1Buw+UILqc4HA525513siNHjjCv19tPKdR13SRv+f1+5vf72bZt29j111/PbDZbv7FlY72iKMzpdLI77riDHTlyhHk8HtP5879leXXr1q1s3Lhx/eQVLrspimJ6LS8vj/30pz9lVVVVQlYJv0c9PT2srKyM/eUvf2E//////////)**Steps involved in random forest algorithm*** Step 1: In Random forest n number of random records are taken from the data set having k number of records.* Step 2: Individual decision trees are constructed for each sample.* Step 3: Each decision tree will generate an output.* Step 4: Final output is considered based on Majority Voting or Averaging for Classification and regression respectively.**Important Features of Random Forest**1. Diversity- Not all attributes/variables/features are considered while making an individual tree, each tree is different.2. Immune to the curse of dimensionality- Since each tree does not consider all the features, the feature space is reduced.3. Parallelization-Each tree is created independently out of different data and attributes. This means that we can make full use of the CPU to build random forests.4. Train-Test split- In a random forest we don’t have to segregate the data for train and test as there will always be 30% of the data which is not seen by the decision tree.5. Stability- Stability arises because the result is based on majority voting/ averaging.**Important Hyperparameters**Hyperparameters are used in random forests to either enhance the performance and predictive power of models or to make the model faster.***Following hyperparameters increases the predictive power:***1. ***n_estimators***– number of trees the algorithm builds before averaging the predictions.2. ***max_features***– maximum number of features random forest considers splitting a node.3. ***mini_sample_leaf***– determines the minimum number of leaves required to split an internal node.***Following hyperparameters increases the speed:***1. ***n_jobs***– it tells the engine how many processors it is allowed to use. If the value is 1, it can use only one processor but if the value is -1 there is no limit.2. ***random_state***– controls randomness of the sample. The model will always produce the same results if it has a definite value of random state and if it has been given the same hyperparameters and the same training data.3. ***oob_score***– OOB means out of the bag. It is a random forest cross-validation method. In this one-third of the sample is not used to train the data instead used to evaluate its performance. These samples are called out of bag samples **Using Sklearn**import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from google.colab import drive drive.mount('/content/gdrive', force_remount=True) import os os.environ['KAGGLE_CONFIG_DIR'] = "/content/gdrive/My Drive/Kaggle" # /content/gdrive/My Drive/Kaggle is the path where kaggle.json is present in the Google Drive #changing the working directory %cd /content/gdrive/My Drive/Kaggle !kaggle datasets download -d volodymyrgavrysh/fraud-detection-bank-dataset-20k-records-binary #unzipping the zip files and deleting the zip files !unzip \*.zip && rm *.zip data=pd.read_csv('fraud_detection_bank_dataset.csv') data # Countplot sns.countplot(x='targets',data=data,orient="h") # Boxplot plt.boxplot(data.iloc[0:10,0:10], vert=True, patch_artist=True) data.info() data.describe() # Segeggrating X & Y y=data[['targets']] x=data.drop(['Unnamed: 0','targets'],axis=1) x # Train Test Split from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.25) x_train.shape from sklearn.ensemble import RandomForestClassifier rfc=RandomForestClassifier(max_depth=10,oob_score=True) rfc.fit(x_train,y_train) y_pred=rfc.predict(x_test) y_pred # Calculating the F1 Score from sklearn.metrics import f1_score s=f1_score(y_pred,y_test) s # Confusion Matrix from sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay cm = confusion_matrix(y_test, y_pred, labels=rfc.classes_) disp = ConfusionMatrixDisplay(confusion_matrix=cm,display_labels= rfc.classes_) disp.plot()11 Text Preprocessing and AugmentationIn this notebook, we will work with text data. Firstly, we will learn how to perform preprocessing and visualization on text data. Then, we will try to adopt data augmentation on text data. The Enron email datasets will be used here to demonstrate how text mining/NLP techniques could be used for fraud analysis. Through the whole example, we will mainly use the following two NLP libraries:1. [Texthero](https://pypi.org/project/texthero/)2. [nlpaug](https://nlpaug.readthedocs.io/en/latest/) 1. Data BackgroundIn 2000, [Enron](https://en.wikipedia.org/wiki/Enron) was one of the largest companies in the United States. By 2002, it had collapsed into bankruptcy due to widespread corporate fraud. In the resulting Federal investigation, a significant amount of typically confidential information entered into the public record, including tens of thousands of emails and detailed financial data for top executives. The Enron fraud is the largest case of corporate fraud in American history. Founded in 1985, Enron Corporation went bankrupt by end of 2001 due to widespread corporate fraud and corruption. Before its fall, Fortune magazine had named Enron "America's most innovative company" for six consecutive years. So what happened? Who were the culprits?In this notebook, we are going to work with emails corpus from Enron employees. We will learn how to analyze text data for fraud analysis.basefn = "data//" import pandas as pd df_corpus = pd.read_csv(basefn + "enron_emails_clean.csv") df_corpus.head()Exact Word MatchOne simple approach to analyze text data is keyword based query. For example, look for any emails mentioning 'money'. Here, the query word could be any informative words.# Select data that matches df_corpus.loc[df_corpus['content'].str.contains('money', na=False)].head(3)Usually you want to search more than one term. For example, in fraud analysis, you may prepare a full **fraud word lists** including terms that could potentially flag fraudulent clients and/or transactions. Here, we create a list containing the following words/terms:* 'enron stock'* 'sell stock'* 'stock bonus'* 'sell enron stock'.# Create a list of terms to search for searchfor = ['enron stock', 'sell stock', 'stock bonus', 'sell enron stock'] filtered_emails = df_corpus.loc[df_corpus['content'].str.contains('|'.join(searchfor), na=False)] filtered_emails.head(2) print("Number of returned fraud emails is {}".format(filtered_emails.shape[0]))Number of returned fraud emails is 13The recall rate is quite low because the search keyword has to be exactly identical to the words in the emails to be found. For example, the email containing "SELL stock" will not be counted. In the following, we will use text preprocessing techniques from **texthero** to improve the recall rate. 2. TextheroTexthero is a simple Python toolkit that helps you work with a text-based dataset. It provides quick and easy functionalities that let you preprocess, represent, map into vectors and visualize text data in just a couple of lines of code.Texthero is designed to be used on top of **pandas**, so it makes it easier to preprocess and analyze text-based Pandas Series or Dataframes.If you are working on an NLP project, Texthero can help you get things done faster than before and gives you more time to focus on important tasks.NOTE: The Texthero library is still in the beta version. You might face some bugs and pipelines might change. A faster and better version will be released and it will bring some major changes. Install Package```pip install texthero``` Texthero has four useful modules that handle different functionalities that you can apply in your text-based dataset.- Preprocessing This module allows for the efficient pre-processing of text-based Pandas Series or DataFrames. It has different methods to clean your text dataset such as lowercase(), remove_html_tags() and remove_urls().- NLP This module has a few NLP tasks such as named_entities, noun_chunks, and so on.- Representation This module has different algorithms to map words into vectors such as TF-IDF, GloVe, Principal Component Analysis(PCA), and term_frequency.- Visualization The last module has three different methods to visualize the insights and statistics of a text-based Pandas DataFrame. It can plot a scatter plot and word cloud.In this section, we will focus on two parts: **preprocessing** and **Visualization**import texthero as heroText PreprocessingTexthero provides useful text preprocessing methods. For example, * Remove digits* Remove stopwords* Remove URLs* Tokenize* Remove HTML tagsAll the required inputs should be **Pandas series** or Pandas dataframetext = pd.Series("Hi my phone number is +86 12394 call me at 09:00 am") clean_text = hero.preprocessing.remove_digits(text) print(clean_text) text = pd.Series("you need to know machine learning") clean_text = hero.remove_stopwords(text) print(clean_text) text = pd.Series("Go to https://spacy.io/ to read more articles you like") clean_text = hero.remove_urls(text) print(clean_text) text = pd.Series(["You can think of Texthero as a tool to help you understand and work with text-based dataset. "]) clean_text = hero.tokenize(text) print(clean_text) text = pd.Series("

hello world

") clean_text = hero.remove_html_tags(text) print(clean_text)0 hello world dtype: objectTexthero provide a simple interface named **clean()**.The clean() method runs seven functions when you pass a pandas series. These seven functions are:* lowercase(s): Lowercases all text.* remove_diacritics(): Removes all accents from strings.* remove_stopwords(): Removes all stop words.* remove_digits(): Removes all blocks of digits.* remove_punctuation(): Removes all string.punctuation (!"$%&'()*+,-./:;?@[]^_`{|}~).* fillna(s): Replaces unassigned values with empty spaces.* remove_whitespace(): Removes all white space between wordsNow we can see the cleaned news content.# clean the news content by using clean method from hero package df_corpus['clean_content'] = hero.clean(df_corpus['content']) #show unclean and clean news content df_corpus[['content','clean_content']].head(2)We can also modify the **clean()** function. Then, we can call the customized text clean functions.#create custom pipeline custom_pipeline = [hero.preprocessing.fillna, hero.preprocessing.lowercase, hero.preprocessing.remove_whitespace, hero.preprocessing.remove_urls ] df_corpus['clean_custom_content'] = df_corpus['content'].pipe(hero.clean, custom_pipeline)Search the email corpus againHere, we search the cleaned email corpus# Create a list of terms to search for searchfor = ['enron stock', 'sell stock', 'stock bonus', 'sell enron stock'] filtered_emails = df_corpus.loc[df_corpus['clean_content'].str.contains('|'.join(searchfor), na=False)] print("Number of returned fraud emails after text preprocessing is {}".format(filtered_emails.shape[0]))Number of returned fraud emails after text preprocessing is 314After text preprocessing, the recall rate is improved Visualization Then, let us explore some text visualization methods in texthero. Texthero contains different method to visualize insights and statistics of a text-based Pandas DataFrame.* Top words If you want to know the top words in your text-based dataset, you can use the top_words() method from the visualization module. This method is useful if you want see additional words that you can add to the stop words lists.* Wordclouds The wordcloud() method from the visualization module plots an image using WordCloud from the word_cloud package.import matplotlib.pyplot as plt NUM_TOP_WORDS = 20 top_20 = hero.visualization.top_words(df_corpus.loc[:, 'clean_content']).head(NUM_TOP_WORDS) top_20.plot.bar(rot=90, title="Top 20 words in corpus"); plt.show(block=True);WordcloudsThe wordcloud() method from the visualization module plots an image using WordCloud from the word_cloud package.#Plot wordcloud image using WordCloud method hero.wordcloud(df_corpus.loc[:, 'clean_content'], max_words=100)3. NLPAUG![https://github.com/makcedward/nlpaug/blob/master/res/logo_small.png?raw=true](https://github.com/makcedward/nlpaug/blob/master/res/logo_small.png?raw=true)More data we have, better performance we can achieve. What is more, sample more data from minority class is one approach to address the imbalanced problem. However, it is very costy to annotate large amount of training data. And in some applications includign fraud detection, it is impossible to obtain lots of data labeled as fraud one. Therefore, proper data augmentation is useful to boost up your model performance.Due to high complexity of language, it is more challenging to augment text compared to images which can simply cropping out portion of images. Here, we will explore the library named nlpaug. This python library helps you with augmenting nlp for your machine learning projects. Provided Features listed as:1. Generate synthetic data for improving model performance without manual effort2. Simple, easy-to-use and lightweight library. Augment data in 3 lines of code3. Plug and play to any neural network frameworks (e.g. PyTorch, TensorFlow)4. Support textual and audio input Install Package! pip install nlpaug df_corpus['Tag'] = 0 df_corpus.loc[df_corpus['clean_content'].str.contains('|'.join(searchfor), na=False), 'Tag'] = 1 df_corpus['Tag'].value_counts()This library nlpaug provides various textual augmenter functions including character augmenter, word augmenter and sentence augmenter. In this section, we will only explore word-level augmentation based on [WordNet](https://wordnet.princeton.edu/): substitute word by WordNet's synonym.You may find other frameworks [here](https://github.com/makcedward/nlpaug/blob/master/example/textual_augmenter.ipynb)texts = df_corpus.loc[df_corpus.Tag==1, 'clean_content'].tolist() short_email = min(texts, key=lambda word: len(word)) #for better visualization, find the shortest email short_email import nlpaug.augmenter.word as nawInstall WordNetimport nltk nltk.download('averaged_perceptron_tagger') aug = naw.SynonymAug(aug_src='wordnet') augmented_texts = aug.augment(short_email, 5) # 5 is the number of generated text print("Original:") print(short_email) print("Augmented Texts:") for idx in range(len(augmented_texts)): print(augmented_texts[idx])Original: taxes owe based gain whatever sell much less instance bought enron stock noe worth pay tax gain look tonight Augmented Texts: taxes owe ground gain whatever sell much less instance bought enron stock noe worth pay taxation gain spirit tonight taxes owe based amplification whatever sell much less example bought enron farm animal noe worth yield revenue enhancement gain look tonight taxes owe based addition whatever sell often less instance buy enron stock noe worth pay tax gain looking tonight taxes owe based addition whatever sell much less instance bought enron stock noe deserving bear tax gain look tonight taxes owe based gain whatever sell much less instance purchase enron pedigree noe deserving pay tax gain look tonight**python语言及其在桥梁工程中的应用**               **郭军军(博士研究生)**             [github个人主页](https://github.com/Junjun1guo)   [ORCID个人主页](https://orcid.org/0000-0001-9867-414X)              **Markdown语言** [Markdown](https://daringfireball.net/projects/markdown/)是一种轻量级 的标记语言,可与HTML语言兼容,可以看做是[HTML语言](https://www.runoob.com/html/html-editors.html)的简易版本,2004年由创建。Markdown语言主要用于编写文档及论坛发表消息,如[github](https://github.com/)的read.me文档,[jupyterlab](https://jupyterlab.readthedocs.io/en/latest/)文本的编写。 **多级标题**# 一级标题 ## 二级标题 ... ###### 六级标题**段落样式**   **空行:** 通过文本间空行实现   这是第一段   这是第二段 **空格:**   中文输入 状态下,按shift+空格切换到全角状态,然后按空格 **文字样式:**斜体采用 *斜体* 粗体采用 **粗体** 粗斜体采用 ***粗斜体****斜体* **粗体** ***粗斜体*** **分割线:**可以采用三个以上的下划线________ **下划线**采用HTML语言的underline标签实现 通过HTML的标签实现 **字体颜色**采用HTML font标签实现 内容字体颜色通过HTML字体颜色标签实现 **有序与无序列表*** 无序列表 * 无序列表 1. 有序列表 2. 有序列表* 无序列表* 无序列表1. 有序列表2. 有序列表 **区块引用**> 区块引用1 >> 区块引用2> 区块引用1 >> 区块引用2>>> 区块引用3 代码块``` Python print ("Hello world!") `````` Pythonprint ("Hello world!")``` 地址与图片链接地址链接[python官网](https://www.python.org/)   图片链接[python官网](https://www.python.org/) 表格采用|分割单元格,采用----分割标题栏与单元格。单元格的对齐采用:,如左对齐(:----) | 左对齐 | 右对齐 | 居中对齐 | | :------|------:| :-----: | | 单元格 | 单元格 | 单元格 | | 单元格 | 单元格 | 单元格 || 左对齐 | 右对齐 | 居中对齐 || :-----|----:| :----: || 单元格 | 单元格 | 单元格 || 单元格 | 单元格 | 单元格 | 公式编辑 [markdown公式编辑教程](https://www.jianshu.com/p/25f0139637b7)行内公式$ \Gamma(z) = \int_0^\infty t^{z-1}e^{-t}dt\,. $ 行间公式$$\Gamma(z) = \int_0^\infty t^{z-1}e^{-t}dt\,.$$$ \Gamma(z) = \int_0^\infty t^{z-1}e^{-t}dt\,. $$$\Gamma(z) = \int_0^\infty t^{z-1}e^{-t}dt\,.$$ Python语言 python语言及其特点 python语言的发展   1989年开发了python编译器。python语言介于C语言与脚本语言之间,是一种“胶水”语言。python语言的宗旨就是“能用胶水解决的问题,坚决不造轮子”。发展的版本有2.x和3.x,[python官网](https://www.python.org/) python语言下载及安装1. 在下载页面选择相应系统对应的版本进行下载2. 安装python,可以选择相应的安装目录,添加python到系统路径 3. 进入python安装目录Scripts进行安装需要的模块,安装好的模块位于site-pakages中 python常用编辑器介绍   [editPlus](https://www.editplus.com/), [pyCharm](https://www.jetbrains.com/pycharm/),[anaConda](https://www.anaconda.com/)等 **pyCharm启动缓慢的解决办法:** 打开pycharm的安装目录 bin文件夹下的pycharm.exe.vmoptions文件,将Xms128m改为Xms256m,Xmx512m改为Xmx1024m python语言的特点 **优点*** 简洁优雅,可读性强  * 可移植性强,接口强大* 优秀强大的第三方库**不足*** 速度中等* 不同版本及包的兼容性 python的应用场景 * 数值计算,依赖于众多的第三方包* 机器学习及深度学习* 网络爬虫及大数据分析* 网页制作,知乎,facebook等* 桌面程序的开发,依赖于pyQt,wxPython等第三方GUI库* 与其他软件的交互,如Opensees,Abaqus,SAP2000等... python与matlab对比 |matlab|python||:------:|:------:||商业软件|免费开源||软件冗杂|即用即装||面向过程|面向对象||速度慢|速度快||发展缓慢|发展极快||...|...| python语言基础语言 数据类型 数字:整型(int) 浮点型 (float)print (type(2)) #type()函数返回输入数据的类型 print (type(2.0)) #print ()函数用于文本输出 a=float(2) #整型转换为浮点型 b=int(2.0) #浮点型转换为整型 print (a) print (b)2.0 2字符串:'' ""a="hello world!" b="我是python" print (type(a)) print (a) print (a+b) hello world! hello world!我是pythonMath模块from math import * #导入math模块中所有函数 a=sqrt(2) print (a)1.4142135623730951布尔类型:真(True),假(False)a=True b=False print(int(a)) print(int(b))1 0数据结构 列表(list)a=[] #空列表 print (a) b=[1,8,3,6] print (b[0]) b.append(7) #从列表尾部不断添加新对象 print (b[-1]) b.reverse() #反转列表中元素 print (b) b.sort() #列表排序 print (b) f=len(b) #列表的长度 print (f)[] 1 7 [7, 6, 3, 8, 1] [1, 3, 6, 7, 8] 5可以通过help函数与dir函数查看模块内所有的特性print (dir(list)) #特殊方法与公共方法列表 print (help(list)) #返回各个方法的具体定义元组(tuple) 元组与列表类似,区别在于一旦创建不能对其进行修改 元组可应用于不可变量的存放,既快又安全,需要修改时可以先转化为列表a=(2,3,2,5,1) print(a.append(6)) b=list(a) b.append(8) print (b)字典(dict) 字典(dict)是键与值一一映射的一种数据结构 {key:value}a={"one":21,"two":43,"three":54} #三个元素的字典 print (a) print (list(a.keys())) #返回字典a的所有键 print (list(a.values())) #f返回字典a的所有值 print (a["one"]) #返回特定键对应的值{'one': 21, 'two': 43, 'three': 54} ['one', 'two', 'three'] [21, 43, 54] 21列表解析:目的是简化代码a=[1,2,3,5] b=[each**2 for each in a] #列表解析 print (b) c={str(key1):value1 for key1,value1 in zip(a,b)} #列表解析 print (c) d=(each1*2 for each1 in a) print (d) #返回一个生成器 print (list(d))[1, 4, 9, 25] {'1': 1, '2': 4, '3': 9, '5': 25} at 0x00000225B3EBA780> [2, 4, 6, 10]运算符算术运算符: 加(+),减(-),乘(*),除(/),乘方(**),求余(%) 比较运算符:等于(==),不等于(!=),大于(>),小于(<),大于等于(>=)等 赋值运算符:=,+=,*=等 逻辑运算符:与(and), 或(or),非(not) 成员运算符:in, not in条件语句if (1>2) and (3>2) : print ("代码块1") else: print ("3") score=70 if 0<=score<60: print ("不及格") elif 60<=score<80: print ("良好") elif 80<=score<=100: print ("优秀") else: print ("输入错误,请输入0到100之间的数字!")良好循环语句for i1 in range(1,8,2): print (i1) flag=True while flag: #第二种循环结构 print ("It's true!") flag=False print ("It's done!") a=[1,3,4,5,86,34] for each in a: if each==max(a): print (each) break #跳出最近一层的循环 else: print (each) continue #终止本轮循环,并进入下轮循环 print ("我不是最大值!")1 3 4 5 86函数(面向过程编程)def listSquare(inputList): #形参 """ 描述:对列表中数字平方并返回列表 输入:列表 输出:平方后的列表 例子:listSquare([1,2,3]) """ b=[each**2 for each in inputList] return b #返回值,可返回多个类型数据 print (help(listSquare)) #查看帮助文档 print (listSquare([1,2,3])) print (listSquare(["a",2,3])) #引发程序异常异常处理def listSquare(inputList): #形参 """ 描述:对列表中数字平方并返回列表 输入:列表 输出:平方后的列表 例子:listSquare([1,2,3]) """ try: b=[each**2 for each in inputList] #可能出现异常的代码块 except TypeError: 处理,可以指出异常的类型,如TypeError print ("请输入数字型列表!") return #无正确返回,则返回None else: #没有异常,则执行如下代码 return b #返回值,可返回多个类型数据 print (listSquare(["a",1,2])) print (listSquare([1,2,3]))类(面向对象编程的核心) **类**=属性(数据)+方法(函数) **OOP特征**:封装(只提供借口),继承(避免重复造轮子),多态(方法名相   同,具体实现不同)等 **以做菜为例:** **属性:**各种原材料与工具 **方法:**切菜,炒菜等具体每一项工作 **封装:**炒菜机(输入原材料,调用每一个方法,出菜,不需要知道细节 **继承:**多功能炒菜机(炒青菜,土豆炖牛肉等菜) **多态:**子类炒青菜与子类土豆炖牛肉都有切菜这个共同方法,但具体操作不同import numpy as np import math class IMs():#没有继承自任何类,默认继承自基类object """ 地震动强度指标计算类 初始化参数:单列加速度时程acc(g),采样间隔t(s) 方法: PGA()--返回峰值加速度值(g) PGV()--返回峰值速度值(cm/s) """ def __init__(self,acc,t): #类的初始化方法,内置特殊方法 #acc单列加速度时程(g),t时间间隔(s) self.acc=acc #self是类的实例化铭牌,在类内起到通信的作用 self.t=t self.num=len(self.acc) def PGA(self): #返回acc的最大峰值加速度(PGA)(g) pga=b=np.fabs(self.acc).max() return pga def __AcctoVelocity (self): #类的私有化方法(前置两下划线) #将加速度(g)转换为速度(cm/s) vel=[0] acc=self.acc for i in range(self.num-1): velocity=(acc[i]+acc[i+1])*self.t/2*981+vel[-1] vel.append(velocity) return vel def PGV (self): #返回Velocity的最大峰值速度(PGV)(cm/s) veloc=self.__AcctoVelocity() pgv=b=np.fabs(veloc).max() return pgv if __name__=='__main__':#测试用 acc=np.loadtxt("acceleration.txt")#txt数据的加载 imInstance=IMs(acc,0.01)#强度指标类的实例化 pga=imInstance.PGA()#调用类中的方法计算PGA pgv=imInstance.PGV() print ("PGA=",round(pga,3),"g") #保留3位小数 print(pgv) print ("PGV=",round(pgv,3),"cm/s")python学习资料 **常用网站:** [matplotlib](https://matplotlib.org/)(第三方绘图库) [Numpy](https://numpy.org/)(科学计算基本库) [Scipy](https://www.scipy.org/)(集成了数学,科学及工程常用库) [SymPy](https://www.sympy.org/en/index.html)(符号运算库) [Python教程](https://docs.python.org/zh-cn/3/tutorial/index.html)(官方教程中文版) [Python标准库](https://docs.python.org/zh-cn/3/library/index.html)(Python内置标准库) [wxPython](https://www.wxpython.org/)(图形用户界面库) [pyQt](https://wiki.python.org/moin/PyQt)(图形用户界面库) [pypi](https://pypi.org/search/?q=&o=&c=Operating+System+%3A%3A+Microsoft+%3A%3A+Windows)(大量第三方包库) [Scikit-learn](https://scikit-learn.org/stable/)(python机器学习库) [PyOpenGL](http://pyopengl.sourceforge.net/)(基于Python的图形开发库) [stackoverflow](https://stackoverflow.com/)(程序员交流答疑网站) [台大李宏毅机器学习](https://study.163.com/course/introduction/1208946807.htm)(中文机器学习经典课程) [斯坦福吴恩达机器学习](https://study.163.com/course/courseMain.htm?courseId=1004570029)(英文机器学习经典课程) [斯坦福吴恩达深度学习](https://mooc.study.163.com/smartSpec/detail/1001319001.htm)(英文深度学习经典课程) [github上python资源](https://github.com/Junjun1guo?tab=repositories) (收集了一些不错的python教程) [github](https://github.com/)(代码管理及发布网站,可以免费搭建个人博客) [MCMC](https://twiecki.io/blog/2015/11/10/mcmc-sampling/)(基于马尔科夫蒙特卡洛的贝叶斯模型) [异步社区](https://www.epubit.com/user/)(编程电子书) [图灵社区](https://www.ituring.com.cn/)(编程电子书) **推荐书籍:**                           python语言在桥梁工程中的应用 基于python语言的PEER网站地震波处理   主要涉及到python文件的操作,列表,字典及列表解析等   完整的项目见[github-PEERMotionFormatProcess-with-Python](https://github.com/Junjun1guo/PEERMotionFormatProcess-with-Python)########################################################################## import os import numpy as np import shutil ########################################################################## def peerMotionProcess (fileName): """ Processing each file and return the processed resutls(percolumn data list, timestep and pointers number) """ accList=[] fopen=open(fileName) #打开文件 saveList1=[] saveList2=[] lines=fopen.readlines() #读取所有的行并返回列表 for line_counter,line in enumerate(lines): #遍历行号及每行内容 curLine=line.strip().split(" ") #去除首尾空格,并按行内空格划分 removeSpace=[x for x in curLine if x!=""] #去除行内空格 if line_counter<=3: #从前四行提取地震波点数与采样间隔 [saveList1.append(x) for x in removeSpace] #前四行内容列表 else: [saveList2.append(x) for x in removeSpace] #地震波时程点 fopen.close() #关闭文件 indexNumber=saveList1.index("NPTS=") #索引位置 indexDt=saveList1.index("DT=") pointNumber=saveList1[indexNumber+1].split(",")[0] #提取数字 deltaT=saveList1[indexDt+1].split(",")[0] saveList=[ float(x) for x in saveList2] return pointNumber,deltaT,saveList #返回地震波点数,采样间隔及时程列表 ########################################################################## if __name__=='__main__': direction=["E","N","V"] postFixList=[".AT2",".VT2",".DT2"] timeFileDict={".AT2":"Acceleration",".VT2":"Velocity",".DT2":"Displacement"} #Generating all saving file paths midirList=[(topFile,secFile) for topFile in timeFileDict.values() \ for secFile in direction] #Clearing existing files [shutil.rmtree(x) for x in timeFileDict.values()] for toplevel,seclevel in midirList: os.makedirs(toplevel+"/"+seclevel)#generate save files fileListE=[] fileListN=[] fileListV=[] # text file read and process fileNameOpen=open("FileName.txt") for line in fileNameOpen.readlines(): curLine=line.strip().split("\t") #分割同一行各个分量 fileListE.append(curLine[0].split(".AT2")[0])#水平E向分量名称 fileListN.append(curLine[1].split(".AT2")[0])#水平N向分量名称 fileListV.append(curLine[2].split(".AT2")[0])#竖向V分量名称 fileNameOpen.close() finalLengthList=[] finaltimeList=[] finalFileNameList=[] for i1 in range(len(fileListE)): #遍历每条地震波 fileNameENV=[{fileListE[i1]:direction[0]},{fileListN[i1]:direction[1]},\ {fileListV[i1]:direction[2]}]#将各个分量地震文件名与方向标签对应 caseList=[(xx,yy) for xx in fileNameENV for yy in postFixList]#双循环实现每个 #工况,总共9个,加速度,速度及位移文件夹下建E,N,V三个分量文件夹 lengthList=[] #每条波点数列表 timeList=[]#每条波采样间隔 for eachCase in caseList: fileDirection=eachCase[0].values()[0]# eachCase[0]是字典, #选取的是方向E,N或者V filePrefix=eachCase[0].keys()[0].strip()#移除分量名首尾空格 loadFilePath=os.path.join("downLoadPeerMotion/",filePrefix+eachCase[1]) #地震动分量的相对路径 if filePrefix=="NoFile":#判断地震动分量文件是否存在 accPointNum=1e8 #对于不存在的文件设其点数为一大数 accDeltaT=1e8 accTimeHistory=[0.0] else: accResult=peerMotionProcess (loadFilePath) #调用地震波处理函数 accPointNum=int(accResult[0]) accDeltaT=float(accResult[1]) accTimeHistory=accResult[2] lengthList.append(accPointNum) timeList.append(accDeltaT) cwd=os.getcwd() #返回当前工作目录 savePathName=os.path.join(cwd,timeFileDict[eachCase[1]]+"/"+\ fileDirection+"/",filePrefix+".txt") #文件保存路径 np.savetxt(savePathName,accTimeHistory,fmt="%f") #保存处理后的地震波 finalLengthList.append(min(lengthList)) #选取3个分量中最小值 finaltimeList.append(min(timeList)) np.savetxt("MotonLength.txt",finalLengthList,fmt="%d") np.savetxt("deltaT.txt",finaltimeList,fmt="%f")基于模拟退火与三次B样条插值曲线的斜拉桥索力优化   主要涉及python与Opensees的交互,异常的处理等   完整的项目见[github-Cable-force-optimization-of-a-curved-cable-stayed-bridge](https://github.com/Junjun1guo/Cable-force-optimization-of-a-curved-cable-stayed-bridge) 基于wxPython的地震波处理GUI开发   主要涉及python桌面程序的开发等   完整的项目见[github-seismicWaveAnalysis](https://github.com/Junjun1guo/seismicWaveAnalysis) 基于wxPython与PyOpenGL的有限元软件开发   主要涉及wxPython,PyOpenGL,类及设计模式等   完整的项目见[git-2DTrussStructureAnalysisWithPython](https://github.com/Junjun1guo/2DTrussStructureAnalysisWithPython) 附录 Jupyterlab 安装 1. 进入python安装目录Scripts文件夹,shift+ctrl+鼠标右键打开powershell窗口,或者在当前文件栏输入cmd进入命令行窗口       2. 在命令行输入:pip3 install jupyterlab开始自动安装 3. 在jupyter文件所在目录栏输入cmd进入命令行,输入jupyter lab打开,进入jupyterlab界面。关于jupyterlab的更多知识可参考[官方帮助文档](https://jupyterlab.readthedocs.io/en/latest/)。    python包的开发及发布 1. 登录到[pypi](https://pypi.org/)注册账户,记住自己的用户名与密码,上传包的时候需要用到。 2. 准备待发布的包* 包的格式 包括setup.py文件及包文件(包含__init__.py的文件,可以什么也不写) setup.py文件#setup.py from distutils.core import setup from setuptools import setup, find_packages #导入发布模块 setup( name = 'testPackage', #包的名称 version = '0.1.2', #版本号 keywords = ('seismic', 'signal'), #包的关键字 description = 'seismic wave analysis', #对包的描述 license = 'MIT License', #许可证 author = '',#作者信息 author_email = '', #邮箱 url='https://github.com/Junjun1guo/seismicWaveAnalysis',#包的链接 packages = find_packages(),#需要具有__init__.py文件的文件夹 install_requires=['wxPython','numpy'],#列出包依赖的包 platforms = 'any', #跨平台 )1.3 Some Hidden Messages are More Surprising than OthersSee https://stepik.org/lesson/3/step/1?course=Stepic-Interactive-Text-for-Week-1&unit=8232 for context Description of the problemsThere are two problem prompts in this session:1. reverse complement2. list the index locations of a sub-sequence in a genomeFor 1 above I choose here to implement a BioPython class, Bio.Seq -- in the end not really worth the trouble of reviewing all the subtle tics of how the specific Class data structure works.# reverse complement function. first solution -- cheat # use the biopython built in from Bio.Seq import Seq from Bio.Alphabet import IUPAC help(Seq) allowed = "ATCG" test_unallowed = "QPOCJ" allowed_test = Seq(allowed, alphabet=IUPAC.IUPACUnambiguousDNA()) unallowed_test_seq = Seq(test_unallowed, alphabet=IUPAC.IUPACUnambiguousDNA) unallowed_test_seq unallowed_test_seq.reverse_complement() allowed_test.reverse_complement() allowed_testThe first exercise is to make a reverse complement generatorGoing to use the Seq object methods already available in BioPythondef reverse_complement_w_biop(input_string): """Convert IUPAC unambiguous DNA string input to reverse complement """ alphabet = set(IUPAC.unambiguous_dna.letters) input_upper = input_string.upper() if not set(input_upper).issubset(alphabet): raise ValueError("input text must be IUPAC unambiguous DNA sequence") seq_obj = Seq(input_string, alphabet=IUPAC.IUPACUnambiguousDNA) ouput_seq = str(seq_obj.reverse_complement()) return output_seq reverse_complement_w_biop('ATCG') def reverse_complement_w_biop(input_string): """Convert IUPAC unambiguous DNA string input to reverse complement """ alphabet = set(IUPAC.unambiguous_dna.letters) input_upper = input_string.upper() if not set(input_upper).issubset(alphabet): raise ValueError("input text must be IUPAC unambiguous DNA sequence") seq_obj = Seq(input_string, alphabet=IUPAC.IUPACUnambiguousDNA()) ouput_seq = str(seq_obj.reverse_complement()) return output_seq reverse_complement_w_biop('ATCG') def reverse_complement_w_biop(input_string): """Convert IUPAC unambiguous DNA string input to reverse complement """ alphabet = set(IUPAC.unambiguous_dna.letters) input_upper = input_string.upper() if not set(input_upper).issubset(alphabet): raise ValueError("input text must be IUPAC unambiguous DNA sequence") seq_obj = Seq(input_string, alphabet=IUPAC.IUPACUnambiguousDNA()) seq_obj.reverse_complement() return str(seq_obj) reverse_complement_w_biop('ATCG') def reverse_complement_w_biop(input_string): """Convert IUPAC unambiguous DNA string input to reverse complement """ alphabet = set(IUPAC.unambiguous_dna.letters) input_upper = input_string.upper() if not set(input_upper).issubset(alphabet): raise ValueError("input text must be IUPAC unambiguous DNA sequence") seq_obj = Seq(input_string, alphabet=IUPAC.IUPACUnambiguousDNA()) new_obj = seq_obj.reverse_complement() return str(new_obj) reverse_complement_w_biop('ATCG') def reverse_complement_w_biop(input_string): """Convert IUPAC unambiguous DNA string input to reverse complement """ alphabet = set(IUPAC.unambiguous_dna.letters) input_upper = input_string.upper() if not set(input_upper).issubset(alphabet): raise ValueError("input text must be IUPAC unambiguous DNA sequence") seq_obj = Seq(input_string, alphabet=IUPAC.IUPACUnambiguousDNA()) return str(seq_obj.reverse_complement()) reverse_complement_w_biop('GCTAGCT') reverse_complement_w_biop('ATCG') with open('dataset_3_2.txt', 'r') as f: input_test_one = f.read() input_args_list = input_test_one.splitlines() len(input_args_list) type(input_args_list[0]) len(input_args_list[0]) # this is using the above code on the test sequence for the exercise reverse_complement_w_biop(input_args_list[0])Exercise 2: return index locations of a pattern in a larger sequenceHave to think a bit more about how I want to do this# do it with the brute force O(N) method def locate_pattern_in_genome(pattern, genome): """see https://stepik.org/lesson/3/step/5?course=Stepic-Interactive-Text-for-Week-1&unit=8232 Using a brute force loop """ genome_length = len(genome) pattern_length = len(pattern) index_list = [] for i in range(genome_length - pattern_length): if genome[i: i + pattern_length] == pattern: index_list.append(i) return index_list locate_pattern_in_genome('ATA', 'GACGATATACGACGATA') locate_pattern_in_genome('ATAT', 'GATATATGCATATACTT') with open('dataset_3_5.txt', 'r') as f: test_input_two = f.read() test_input_args = test_input_two.splitlines() len(test_input_args) len(test_input_args[0]) len(test_input_args[1]) output_list = locate_pattern_in_genome( test_input_args[0], test_input_args[1] ) ' '.join([str(item) for item in output_list]) with open('Vibrio_cholerae.txt', 'r') as f: vibrio_genome_string = f.read() len(vibrio_genome_string) # are there line returns? print len(vibrio_genome_string.splitlines()) print vibrio_genome_string[-8:] genome_output_list = locate_pattern_in_genome('CTTGATCAT', vibrio_genome_string) len(genome_output_list) ' '.join([str(item) for item in genome_output_list]) # this is the end of the 1.3 section of the course materaldefdef fn(arg1: float, arg2: int, *args, arg3=None **kwargs) -> str: "Summary" val = arg1 + arg2 for arg in args: val += arg out_type = kwargs["output_type"] if out_type == "float": retval = float(val) else: retval = int(val) return retvalData-driven Approach to CFP - Mining PyData Conferences Data-driven approach is good in the most cases. Most of us have seen people use this approach for some of the business decisions, or something you will care much more than a minor daily decision, but what if your toolkit is so fast and powerful that you can use it easily even for daily tasks.In this article, I'm going to explain one of such use-cases, and introduce you one of the tools I use for some of my tasks. So starting with the problem: I was interested to visit a conference, in my case it was one of the local PyData conferences. If you visit a conference you most likely focus on content, so I wanted to have an analysis of the content of the conference. From another side, I was also interested to see, how the focus of the conference was changing over the time and for the very last point, try to find out would it be the kind of conference where I can share my knowledge and experience of using python for data-related tasks.from IPython.display import HTML HTML('') from bs4 import BeautifulSoup import urllib2 # past events # TODO: fetch this data from past events page conferences = ['nyc2014', 'berlin2014', 'sv2014', 'ldn2014', 'nyc2013'] abstract_url = "http://pydata.org/%s/abstracts/" conf_data = {} # Collecting data about abstracts for conference in conferences: print "loading data for %s conference" % conference raw = urllib2.urlopen(abstract_url % conference).read() soup = BeautifulSoup(raw) abstracts = [abstract.get_text().strip() for abstract in soup.find_all(class_="accordion-inner")] titles = [title.get_text().strip() for title in soup.find_all(class_="accordion-toggle")] # speakers = [speaker.get_text().strip() for speaker in soup.select(".accordion-heading h5 a")] conf_data[conference] = {} conf_data[conference]['abstracts'] = abstracts conf_data[conference]['titles'] = titles # conf_data[conference]['speakers'] = speakers conf_data['nyc2014']['titles'][:20]I use Python Pandas to structure all parsed data into dataframesimport pandas as pd pydata = pd.DataFrame() for conf in conf_data: conf_dataframe = pd.DataFrame.from_dict(conf_data[conf]) conf_dataframe['conference'] = conf conf_dataframe['city'] = conf[:-4] conf_dataframe['year'] = int(conf[-4:]) print pd.DataFrame.head(conf_dataframe) pydata = pydata.append(conf_dataframe)abstracts \ 0 1 The The Greater Plains Collaborative (GPC) is ... 2 To a lot of people, Facebook is a website for ... 3 The ad targeting team at Yelp is tasked with p... 4 titles conference city year 0 sv2014 sv 2014 1 Using Python and Paver to Control a Large Medi... sv2014 sv 2014 2 A Full Stack Approach to Data Visualization: T... sv2014 sv 2014 3 Ad Targeting at Yelp sv2014 sv 2014 4 Analyzing Satellite Images With Python Scienti... sv2014 sv 2014 abstracts \ 0 The Python data ecosystem has grown beyond the... 1 In this talk I will give an overview of Random... 2 Clustering data is a fundament[...]Interesting to see how many talks we had from year to year, also that's another point to check that data looks close to what we expectprint 'records in dataframe %i' % len(pydata) pydata.groupby(['conference']).count(1).sort('year', ascending=False)records in dataframe 233**Seems like number of talks is slowly growing from 40 during nyc2013 and up to 50 during the last pydata of 2014** Now we have all the data. Let's try to analyse it So what size of proposal do they usually have?abstract_lens = [len(abst) for abst in pydata['abstracts'] if len(abst) > 44] print abstract_lens print %matplotlib inline import matplotlib.pyplot as plt plt.hist(abstract_lens) pd.DataFrame(abstract_lens).describe()What about word corpus from different yearsimport nltk stop = nltk.corpus.stopwords.words('english') text = {} words = {} stop_list = ["ll", "II", "ll", "http", "://", "e", "g", "2", "0"] for conference in conf_data: raw = " ".join(conf_data[conference]['abstracts']) tokens = nltk.WordPunctTokenizer().tokenize(raw) text[conference] = nltk.Text(tokens) words[conference] = [w.lower() for w in text[conference] if w.lower() not in stop_list] words[conference] = [w for w in words[conference] if w not in stop] words[conference] = filter(lambda word: word not in u'%,-:()$\/;?.’–“”*\'[]', words[conference]) words[conference] = [w for w in words[conference] if w not in stop_list]Let's check collocations in the abstracts. Collocations are expressions of multiple words which commonly co-occur.for conference in text: print conference print text[conference].collocations() printsv2014 http ://; nearest neighbor; machine learning; Reference Model; neighbor algorithm; IPython Notebook; big data; open source; make predictions; data analysis; Big Data; github repository; current state; means clustering; visualization libraries; https ://; compiler optimizations; accepting payments; block fraud; concise construction None ldn2014 http ://; machine learning; :// www; data processing; open source; Matrix Factorisation; certain types; public clouds; rent ratios; financial industry; PyData Boston; blocking technique; cloud computing; exact solution; includes two; presentation focuses; drug development; graphical plotting; quantum chemistry; wide range None berlin2014 http ://; machine learning; Big Data; Quantified Self; self tracking; Semantic Web; Coming Soon; among others; open source; data analysis; case study; Hadoop jobs; :// www; working knowledge; predictive model; time permits; Add tranformations; Machine Learning; Operating System; Pythonista interested Non[...]Words used in abstractsnumwords = {} uniwords = {} for conference in text: numwords[conference] = len(text[conference]) uniwords[conference] = len(set(text[conference])) for conference in reversed(conferences): print "%s: \tnumwords - %i, unique - %i" % \ (conference, numwords[conference], uniwords[conference]) plt.bar(range(len(uniwords)), [uniwords[conference] for conference in reversed(conferences)], align='center', ) plt.xticks(range(len(uniwords)), [conference for conference in reversed(conferences)]) plt.show()Seems like the number of unique words had its peak during the sv2014 and right now is pretty stable and even slowly decreasing Bigramsfrom nltk.collocations import * bigram_measures = nltk.collocations.BigramAssocMeasures() for conference in reversed(conferences): print "Bigrams " + str(conference) finder = BigramCollocationFinder.from_words(words[conference]) scored = finder.score_ngrams(bigram_measures.raw_freq) print pd.DataFrame(scored[:25]) print "\n\n"Bigrams nyc2013 0 1 0 (machine, learning) 0.008158 1 (scikit, learn) 0.005152 2 (coming, soon) 0.003435 3 (data, analysis) 0.002147 4 (chip, design) 0.001717 5 (data, science) 0.001717 6 (image, features) 0.001717 7 (ipython, notebook) 0.001717 8 (open, source) 0.001717 9 (scidb, py) 0.001717 10 (data, scientists) 0.001288 11 (learning, algorithms) 0.001288 12 (learning, tasks) 0.001288 13 (models, like) 0.001288 14 (python, data) 0.001288 15 (talk, covers) 0.001288 16 (across, organization) 0.000859 17 (analytics, capabilities) 0.000859 18 (array, based) 0.000859 19 (array, oriented) 0.000859 20 (asynchronous, o) 0.000859 21 (asynchronous, programming) 0.000859 22 ([...]Year over Yearresult = pd.DataFrame() for conference in reversed(conferences): finder = BigramCollocationFinder.from_words(words[conference], window_size = 2) ignored_words = nltk.corpus.stopwords.words('english') finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) scores = finder.score_ngrams(bigram_measures.raw_freq) if len(result) == 0: result = pd.DataFrame(scores, columns=["ngram", str(conference)]) else: result = result.merge(pd.DataFrame(scores, columns=["ngram", str(conference)])) print result[:15] transposed = result[:10].transpose() headers = transposed[0:1:].values print headers %matplotlib inline new_transposed = transposed[1::] new_transposed.columns = headers[0] new_transposed.plot(figsize=(16,12)) print result[:15].sort(['nyc2014'], ascending=[0])ngram nyc2013 ldn2014 sv2014 berlin2014 nyc2014 0 (machine, learning) 0.008158 0.001969 0.002550 0.002329 0.004188 1 (scikit, learn) 0.005152 0.001312 0.002295 0.001294 0.003290 3 (data, science) 0.001717 0.001312 0.001530 0.001035 0.003290 5 (open, source) 0.001717 0.001312 0.002295 0.001294 0.002692 8 (big, data) 0.000429 0.000656 0.002805 0.001811 0.001496 10 (data, processing) 0.000429 0.001969 0.001020 0.001811 0.001197 11 (data, sets) 0.000429 0.000656 0.000765 0.000259 0.001197 2 (data, analysis) 0.002147 0.000656 0.001530 0.002070 0.000897 4 (ipython, notebook) 0.001717 0.000984 0.002040 0.001294 0.000598 9 (data, driven) 0.000429 0.000656 0.000510 0.001035 0.000598 13 (high, performance) 0.000429 0.000984 0.001785 0.000259 0.000598 6 (python, data) 0.001288 0.000328 0.000765 0.000259 0.000299 7 (user,[...]Solving Knapsack Problem with Amazon SageMaker RL Knapsack is a canonical operations research problem. We start with a bag and a set of items. We choose which items to put in the bag. Our objective is to maximize the value of the items in the bag; but we cannot put all the items in as the bag capacity is limited. The problem is hard because the items have different values and weights, and there are many combinations to consider.In the classic version of the problem, we pick the items in one shot. But in this baseline, we instead consider the items one at a time over a fixed time horizon. Problem StatementWe start with an empty bag and an item. We need to either put the item in the bag or throw it away. If we put it in the bag, we get a reward equal to the value of the item. If we throw the item away, we get a fixed penalty. In case the bag is too full to accommodate the item, we are forced to throw it away.In the next step, another item appears and we need to decide again if we want to put it in the bag or throw it away. This process repeats for a fixed number of steps.Since we do not know the value and weight of items that will come in the future, and the bag can only hold so many items, it is not obvious what is the right thing to do. At each time step, our agent is aware of the following information:- Weight capacity of the bag- Volume capacity of the bag- Sum of item weight in the bag- Sum of item volume in the bag- Sum of item value in the bag- Current item weight- Current item volume- Current item value- Time remainingAt each time step, our agent can take one of the following actions:- Put the item in the bag- Throw the item awayAt each time step, our agent gets the following reward depending on their action:- Item value if you put it in the bag and bag does not overflow- A penalty if you throw the item away or if the item does not fit in the bagThe time horizon is 20 steps. You can see the specifics in the `KnapSackMediumEnv` class in `knapsack_env.py`. There are a couple of other classes that provide an easier (`KnapSackEnv`) and a more difficult version (`KnapSackHardEnv`) of this problem. Using Amazon SageMaker RLAmazon SageMaker RL allows you to train your RL agents in cloud machines using docker containers. You do not have to worry about setting up your machines with the RL toolkits and deep learning frameworks. You can easily switch between many different machines setup for you, including powerful GPU machines that give a big speedup. You can also choose to use multiple machines in a cluster to further speedup training, often necessary for production level loads. Pre-requsites ImportsTo get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.import sagemaker import boto3 import sys import os import glob import re import subprocess from IPython.display import HTML import time from time import gmtime, strftime sys.path.append("common") from misc import get_execution_role, wait_for_s3_object from sagemaker.rl import RLEstimator, RLToolkit, RLFrameworkSettingsYou can run this notebook from your local host or from a SageMaker notebook instance. In both of these scenarios, you can run the following in either `local` or `SageMaker` modes. The `local` mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`.# run in local mode? local_mode = False # create unique job name job_name_prefix = "rl-knapsack" # S3 bucket sage_session = sagemaker.session.Session() s3_bucket = sage_session.default_bucket() print("Using s3 bucket %s" % s3_bucket) # create this bucket if it doesn't exist s3_output_path = "s3://{}/".format(s3_bucket) # SDK appends the job name and output folderInstall docker for `local` modeIn order to work in `local` mode, you need to have docker installed. When running from you local instance, please make sure that you have docker or docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script Note, you can only run a single local notebook at one time.if local_mode: !/bin/bash ./common/setup.shCreate an IAM roleEither get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running locally, set it to an IAM role with `AmazonSageMakerFullAccess` and `CloudWatchFullAccess permissions`.try: role = sagemaker.get_execution_role() except: role = get_execution_role() print("Using IAM role arn: {}".format(role))Setup the environmentThe environment is defined in a Python file called `knapsack_env.py` in the `./src` directory. It implements the init(), step(), reset() and render() functions that describe how the environment behaves. This is consistent with Open AI Gym interfaces for defining an environment.- Init() - initialize the environment in a pre-defined state- Step() - take an action on the environment- reset()- restart the environment on a new episode- render() - get a rendered image of the environment in its current state Configure the presets for RL algorithmThe presets that configure the RL training jobs are defined in the `preset-knapsack-clippedppo.py` in the `./src` directory. Using the preset file, you can define agent parameters to select the specific agent algorithm. You can also set the environment parameters, define the schedule and visualization parameters, and define the graph manager. The schedule presets will define the number of heat up steps, periodic evaluation steps, training steps between evaluations.These can be overridden at runtime by specifying the RLCOACH_PRESET hyperparameter. Additionally, it can be used to define custom hyperparameters.!pygmentize src/preset-knapsack-clippedppo.pyWrite the Training CodeThe training code is in the file `train-coach.py` which is also the `./src` directory.!pygmentize src/train-coach.pyTrain the model using Python SDK/ script modeIf you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.- Specify the source directory where the environment, presets and training code is uploaded.- Specify the entry point as the training code- Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.- Define the training parameters such as the instance count, job name, S3 path for output and job name.- Specify the hyperparameters for the RL agent algorithm. The RLCOACH_PRESET can be used to specify the RL agent algorithm you want to use.- Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.if local_mode: instance_type = "local" else: instance_type = "ml.m4.4xlarge" estimator = RLEstimator( entry_point="train-coach.py", source_dir="src", dependencies=["common/sagemaker_rl"], toolkit=RLToolkit.COACH, toolkit_version="1.0.0", framework=RLFramework.TENSORFLOW, role=role, instance_type=instance_type, instance_count=1, output_path=s3_output_path, base_job_name=job_name_prefix, hyperparameters={ "RLCOACH_PRESET": "preset-knapsack-clippedppo", "rl.agent_params.algorithm.discount": 0.9, "rl.evaluation_steps:EnvironmentEpisodes": 8, }, ) estimator.fit(wait=local_mode)Store intermediate training output and model checkpointsThe output from the training job above is stored on S3. The intermediate folder contains gifs and metadata of the trainingjob_name = estimator._current_job_name print("Job name: {}".format(job_name)) s3_url = "s3://{}/{}".format(s3_bucket, job_name) if local_mode: output_tar_key = "{}/output.tar.gz".format(job_name) else: output_tar_key = "{}/output/output.tar.gz".format(job_name) intermediate_folder_key = "{}/output/intermediate".format(job_name) output_url = "s3://{}/{}".format(s3_bucket, output_tar_key) intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key) print("S3 job path: {}".format(s3_url)) print("Output.tar.gz location: {}".format(output_url)) print("Intermediate folder path: {}".format(intermediate_url)) tmp_dir = "/tmp/{}".format(job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir))Visualization Plot metrics for training jobWe can pull the reward metric of the training and plot it to see the performance of the model over time.%matplotlib inline import pandas as pd csv_file_name = "worker_0.simple_rl_graph.main_level.main_level.agent_0.csv" key = intermediate_folder_key + "/" + csv_file_name wait_for_s3_object(s3_bucket, key, tmp_dir) csv_file = "{}/{}".format(tmp_dir, csv_file_name) df = pd.read_csv(csv_file) df = df.dropna(subset=["Training Reward"]) x_axis = "Episode #" y_axis = "Training Reward" plt = df.plot(x=x_axis, y=y_axis, figsize=(12, 5), legend=True, style="b-") plt.set_ylabel(y_axis) plt.set_xlabel(x_axis);Visualize the rendered gifsThe latest gif file found in the gifs directory is displayed. You can replace the tmp.gif file below to visualize other files generated.key = intermediate_folder_key + "/gifs" wait_for_s3_object(s3_bucket, key, tmp_dir) print("Copied gifs files to {}".format(tmp_dir)) glob_pattern = os.path.join("{}/*.gif".format(tmp_dir)) gifs = [file for file in glob.iglob(glob_pattern, recursive=True)] extract_episode = lambda string: int( re.search(".*episode-(\d*)_.*", string, re.IGNORECASE).group(1) ) gifs.sort(key=extract_episode) print("GIFs found:\n{}".format("\n".join([os.path.basename(gif) for gif in gifs]))) # visualize a specific episode gif_index = -1 # since we want last gif gif_filepath = gifs[gif_index] gif_filename = os.path.basename(gif_filepath) print("Selected GIF: {}".format(gif_filename)) os.system( "mkdir -p ./src/tmp_render/ && cp {} ./src/tmp_render/{}.gif".format(gif_filepath, gif_filename) ) HTML(''.format(gif_filename))Evaluation of RL modelsWe use the last checkpointed model to run evaluation for the RL Agent. Load checkpointed modelCheckpointed data from the previously trained models will be passed on for evaluation / inference in the checkpoint channel. In local mode, we can simply use the local directory, whereas in the SageMaker mode, it needs to be moved to S3 first.wait_for_s3_object(s3_bucket, output_tar_key, tmp_dir, timeout=1800) if not os.path.isfile("{}/output.tar.gz".format(tmp_dir)): raise FileNotFoundError("File output.tar.gz not found") os.system("tar -xvzf {}/output.tar.gz -C {}".format(tmp_dir, tmp_dir)) if local_mode: checkpoint_dir = "{}/data/checkpoint".format(tmp_dir) else: checkpoint_dir = "{}/checkpoint".format(tmp_dir) print("Checkpoint directory {}".format(checkpoint_dir)) if local_mode: checkpoint_path = "file://{}".format(checkpoint_dir) print("Local checkpoint file path: {}".format(checkpoint_path)) else: checkpoint_path = "s3://{}/{}/checkpoint/".format(s3_bucket, job_name) if not os.listdir(checkpoint_dir): raise FileNotFoundError("Checkpoint files not found under the path") os.system("aws s3 cp --recursive {} {}".format(checkpoint_dir, checkpoint_path)) print("S3 checkpoint file path: {}".format(checkpoint_path))Run the evaluation stepUse the checkpointed model to run the evaluation step.estimator_eval = RLEstimator( role=role, source_dir="src/", dependencies=["common/sagemaker_rl"], toolkit=RLToolkit.COACH, toolkit_version="1.0.0", framework=RLFramework.TENSORFLOW, entry_point="evaluate-coach.py", instance_count=1, instance_type=instance_type, base_job_name=job_name_prefix + "-evaluation", hyperparameters={ "RLCOACH_PRESET": "preset-knapsack-clippedppo", "evaluate_steps": 250, # 5 episodes }, ) estimator_eval.fit({"checkpoint": checkpoint_path})Homework 1 1.4 aDenote $r_{11}=|x_1-3|, r_{12}=|x_1|, r_{13}=|x_1+2|, r_{14}=|x_1-1|$, $r_{21}=|x_2-1|, r_{22}=|x_2+3|, r_{23}=|x_2-2|, r_{24}=|x_2-4|$.Note that $|r|=r^+ + r^-$, where $r^+ =\max\{x, 0\}, r^-=\max\{-x, 0\}$.Then the problem can be formulated as$$\begin{align*} & \min \sum_{i=1}^2\sum_{j=1}^4 r_{ij}^{+} + r^{-}_{ij} \\ & s.t. \\ & r_{ij}^{+}, r^{-}_{ij}\geq 0, \quad i=1,2; j=1,2,3,4 \end{align*}$$ b$$\begin{align*} \min\quad & 6(r_{11}^{+} + r^{-}_{11} + r_{21}^{+} + r^{-}_{21}) + 4(r_{12}^{+} + r^{-}_{12} + r_{22}^{+} + r^{-}_{22})\quad + \\ & 7(r_{13}^{+} + r^{-}_{13} + r_{23}^{+} + r^{-}_{23}) + 2(r_{14}^{+} + r^{-}_{14} + r_{24}^{+} + r^{-}_{24})\quad \\ & s.t. \\ & r_{ij}^{+}, r^{-}_{ij}\geq 0, \quad i=1,2; j=1,2,3,4 \end{align*}$$ cNote that $r = r^+ - r^-$, then the problem can be formulated as$$\begin{align*} & \min\sum_{i=1}^2\sum_{j=1}^4 r_{ij}^{+} + r^{-}_{ij}\\ & s.t. \\ & -1 \leq r_{1j}^{+}+ r^{-}_{1j}\leq 2,\quad j=1,2,3,4 \\ & 0 \leq r_{2j}^{+}+ r^{-}_{2j}\leq 1,\quad j=1,2,3,4\\ & r_{ij}^{+}, r^{-}_{ij}\geq 0, \quad i=1,2; j=1,2,3,4 \end{align*}$$ d$$\begin{align*} & \min \sum_{i=1}^2\sum_{j=1}^4 r_{ij}^{+} + r^{-}_{ij} \\ & s.t. \\ & r_{11}^{+} + r^{-}_{11} + r_{21}^{+} + r^{-}_{21} \leq 2\\ & r_{11}^{+} - r^{-}_{11} + 3 = r_{12}^{+} - r^{-}_{12} = r_{13}^{+} + r^{-}_{13} - 2 = r_{14}^{+} + r^{-}_{14} + 1\\ & r_{21}^{+} - r^{-}_{21} + 1 = r_{22}^{+} - r^{-}_{22} - 3 = r_{23}^{+} - r^{-}_{23} + 2 = r_{24}^{+} - r^{-}_{24} + 4\\ & r_{ij}^{+}, r^{-}_{ij}\geq 0, \quad i=1,2; j=1,2,3,4 \end{align*}$$ 1.6 **Assumptions*** The surplus of week $i$ only accounts for newly produced cheese. * The cheese produced at the beginning of the week $i$ is regarded as equivalent to those produced at the end of the week $i$ in terms of the time, which means they can be used to fulfill the following one week's demand. **Model**| Notation | Explaination || :--- | :--- ||$i$|week index; $i=1,..,8$||$j$|job type; $j=1,2,3$ represents producing Swiss cheese, sharp cheese and training new employees respectively.||$k$|type of cheese; $k=1,2$ represents Swiss cheese and sharp cheese respectively.|| $x_{ij}$ | the number of people doing task $j$ on the week $i$ ||$y_{i}$| the number of people trained on the week $i$||$r_{ik}$|the surplus of type $k$ cheese on the week $i$|**Problem Formulation:**$$\begin{align*} & \min 40 ( \sum_{i=1}^8 \sum_{j=1}^3 x_{ij} + \sum_{i=1}^6 y_{i}) \\ & s.t. \\ & x_{11} + x_{12} + x_{13} = 65 \\ & 400x_{11} - r_{11} = 11000 ; 240x_{12} - r_{12} = 8000 \\ & x_{21} + x_{22} + x_{23} = 65\\ & 400x_{21} + r_{11} - r_{21} = 12000 ; 240x_{22} + r_{12} - r_{22} = 8000 \\ & x_{31} + x_{32} + x_{33} = 65 + \sum_{i=1}^1y_{i} ; y_{1} \leq 3x_{13} \\ & 400x_{31} + r_{21} - r_{31} = 13000 ; 240x_{32} + r_{22} - r_{32}= 10000 \\ & x_{41} + x_{42} + x_{43} = 65 + \sum_{i=1}^2y_{i} ; y_{2} \leq 3x_{23}\\ & 400x_{41} + r_{31} - r_{41}= 18000 ; 240x_{42} + r_{32} - r_{42}= 8000 \\ & x_{51} + x_{52} + x_{53} = 65 + \sum_{i=1}^3y_{i} ; y_{3} \leq 3x_{33}\\ & 400x_{51} + r_{41} - r_{51}= 14000 ; 240x_{52} + r_{42} - r_{52}= 12000 \\& x_{61} + x_{62} + x_{63} = 65 + \sum_{i=1}^4y_{i} ; y_{4} \leq 3x_{43} \\& 400x_{61} + r_{51} - r_{61}= 18000 ; 240x_{62} + r_{52}- r_{62} = 13000 \\& x_{71} + x_{72} + x_{73} = 65 + ; y_{5} \leq 3x_{53}\\& 400x_{71} + r_{61}- r_{71} = 20000 ; 240x_{72} + r_{62}- r_{72} = 12000 \\ & x_{81} + x_{82} + x_{83} = 65 + \sum_{i=1}^6y_{i}; y_{6} \leq 3x_{63}\\ & 400x_{81} + r_{71} \geq 20000 ; 240x_{82} + r_{72} \geq 12000 \\ & \sum_{i=1}^6y_{i} = 35\\ & x_{ij}, y_i \in Z_{+} \end{align*}$$ Based on the solution given by the following code, there's no viable solution.from pulp import * x_1 = ["x_{},1".format(i) for i in range(1,9)] x_2 = ["x_{},2".format(i) for i in range(1,9)] x_3 = ["x_{},3".format(i) for i in range(1,9)] y = ["y_{}".format(i) for i in range(1,7)] r_1 = ["r_{},1".format(i) for i in range(1,9)] r_2 = ["r_{},2".format(i) for i in range(1,9)] var = [x_1, x_2, x_3, y, r_1, r_2]; var = [item for sublist in var for item in sublist] prob = LpProblem("The Cheese and Worker Problem",LpMinimize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None, cat="Integer") prob += lpSum([40 * Lp_vars[i] for i in var if "r" not in i]), "Total Cost" counter = 0 for i in range(len(x_1)): counter += 1 if counter <= 2: prob += lpSum([Lp_vars[name] for name in [x_1[i], x_2[i], x_3[i]]]) == int(65) else: prob += lpSum([Lp_vars[name] for name in [x_1[i], x_2[i], x_3[i]]]) == int(65) + lpSum([Lp_vars["y_{}".format(j)] for j in range(1,i)]) for i in range(1,7): prob += Lp_vars["y_{}".format(i)] <= 3 * Lp_vars["x_{},3".format(i)] prob += 400 * Lp_vars["x_1,1"] - Lp_vars["r_1,1"] == 11000 prob += 400 * Lp_vars["x_2,1"] + Lp_vars["r_1,1"] - Lp_vars["r_2,1"]== 12000 prob += 400 * Lp_vars["x_3,1"] + Lp_vars["r_2,1"] - Lp_vars["r_3,1"]== 13000 prob += 400 * Lp_vars["x_4,1"] + Lp_vars["r_3,1"] - Lp_vars["r_4,1"]== 18000 prob += 400 * Lp_vars["x_5,1"] + Lp_vars["r_4,1"] - Lp_vars["r_5,1"]== 14000 prob += 400 * Lp_vars["x_6,1"] + Lp_vars["r_5,1"] - Lp_vars["r_6,1"]== 18000 prob += 400 * Lp_vars["x_7,1"] + Lp_vars["r_6,1"] - Lp_vars["r_7,1"]== 20000 prob += 240 * Lp_vars["x_1,2"] - Lp_vars["r_1,2"] == 8000 prob += 240 * Lp_vars["x_2,2"] + Lp_vars["r_1,2"] - Lp_vars["r_2,2"]== 8000 prob += 240 * Lp_vars["x_3,2"] + Lp_vars["r_2,2"] - Lp_vars["r_3,2"]== 10000 prob += 240 * Lp_vars["x_4,2"] + Lp_vars["r_3,2"] - Lp_vars["r_4,2"]== 8000 prob += 240 * Lp_vars["x_5,2"] + Lp_vars["r_4,2"] - Lp_vars["r_5,2"]== 12000 prob += 240 * Lp_vars["x_6,2"] + Lp_vars["r_5,2"] - Lp_vars["r_6,2"]== 13000 prob += 240 * Lp_vars["x_7,2"] + Lp_vars["r_6,2"] - Lp_vars["r_7,2"]== 12000 prob += 400 * Lp_vars["x_8,1"] + Lp_vars["r_7,1"] >= 20000 prob += 240 * Lp_vars["x_8,2"] + Lp_vars["r_7,2"] >= 12000 prob += lpSum([Lp_vars["y_{}".format(i)] for i in range(1,7)]) == 35 prob prob.writeLP("Production_Inventory_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) import re r1 = {}; r2 = {}; x1 = {}; x2 = {}; x3 = {} for v in prob.variables(): if not (re.match(re.compile('Lp_r_.,1'), v.name) is None) > 0: r1[v.name] = v.varValue if not (re.match(re.compile('Lp_r_.,2'), v.name) is None) > 0: r2[v.name] = v.varValue if not (re.match(re.compile('Lp_x_.,1'), v.name) is None) > 0: x1[v.name] = v.varValue if not (re.match(re.compile('Lp_x_.,2'), v.name) is None) > 0: x2[v.name] = v.varValue if not (re.match(re.compile('Lp_x_.,3'), v.name) is None) > 0: x3[v.name] = v.varValue x1, x2, x3 r1, r21.7**Model**| Notation | Explaination || :--- | :--- ||$i$|month index; $i=1,..,12$||$j$|item type; $j=1,2$ || $x_{ij}$ | the number of item $j$ produced on the month $i$ ||$y_{ij}$| the number of item $j$ stored on the month $i$|**Problem Formulation:**$$\begin{align*} & \min \sum_{i=1}^6 (5x_{i1} +8.5 x_{i2}) + \sum_{i=7}^{12} (4.5x_{ij} + 7x_{i2}) + \sum_{i=1}^{12} (0.4 y_{i1} + 0.8y_{i2})\\ & s.t. \\ & x_{i1}- y_{i1}= 10^4, i=1,..,4 \\ & x_{i1}- y_{i1} = 3*10^4, i=5,..,9 \\ & x_{i1}- y_{i1}= 10*10^4, i=10,..,12 \\ & x_{i2}- y_{i2} = 5*10^4, i=1,2,10,11,12\\ & x_{i2}- y_{i2} = 1.5*10^4, i=3,...,9 \\ & x_{i1} + x_{i2} \leq 12*10^4, i=1,..,9 \\ & x_{i1} + x_{i2} \leq 15*10^4, i=10,..,12 \\ & 2 * y_{i1} + 4 * y_{i2} \leq 15 * 10^4, i=1,...,12\\ & x_{ij},y_{ij}\geq 0 \end{align*}$$from pulp import * x_1 = ["x_{},1".format(i) for i in range(1,13)] x_2 = ["x_{},2".format(i) for i in range(1,13)] y_1 = ["y_{},1".format(i) for i in range(1,13)] y_2 = ["y_{},2".format(i) for i in range(1,13)] var = [x_1, x_2, y_1, y_2]; var = [item for sublist in var for item in sublist] costs = {} counter = 0 for i in x_1: counter += 1 if counter <= 5: costs[i] = 5 else: costs[i] = 4.5 counter = 0 for i in x_2: counter += 1 if counter <= 5: costs[i] = 8.5 else: costs[i] = 7 for i in y_1: costs[i] = 0.4 for i in y_2: costs[i] = 0.8 prob = LpProblem("The Production and Inventory Problem",LpMinimize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None, cat="Integer") prob += lpSum([costs[i]*Lp_vars[i] for i in var]), "Total Cost" counter = 0 for i, j in zip(x_1, y_1): counter += 1 if counter <= 4: prob += Lp_vars[i] - Lp_vars[j] == int(1e4) elif counter <= 9: prob += Lp_vars[i] - Lp_vars[j] == 3 * int(1e4) else: prob += Lp_vars[i] - Lp_vars[j] == 10 * int(1e4) counter = 0 for i, j in zip(x_2, y_2): counter += 1 if counter in [1,2,10,11,12]: prob += Lp_vars[i] - Lp_vars[j] == 5 * int(1e4) else: prob += Lp_vars[i] - Lp_vars[j] == 1.5 * int(1e4) for i in range(1,13): if i <= 9: prob += Lp_vars["x_{},1".format(i)] + Lp_vars["x_{},2".format(i)] <= 12 * int(1e4) else: prob += Lp_vars["x_{},1".format(i)] + Lp_vars["x_{},2".format(i)] <= 15 * int(1e4) for i in range(1,13): prob += 2 * Lp_vars["y_{},1".format(i)] + 4 * Lp_vars["y_{},2".format(i)] <= 15 * int(1e4) prob.writeLP("Production_Inventory_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) import re x1= {}; x2={}; y1 = {}; y2 = {} for v in prob.variables(): if not (re.match(re.compile('Lp_x_.,1'), v.name) is None) > 0: x1[v.name] = v.varValue if not (re.match(re.compile('Lp_x_.,2'), v.name) is None) > 0: x2[v.name] = v.varValue if not (re.match(re.compile('Lp_y_.,1'), v.name) is None) > 0: y1[v.name] = v.varValue if not (re.match(re.compile('Lp_y_.,2'), v.name) is None) > 0: y2[v.name] = v.varValue x1,x2 y1, y21.26 a**Model**| Notation | Explaination || :--- | :--- ||$i$|bidder; $i=1,..,m$||$j$|depot type; $j=1,2,...,n$ ||$x_{ij}$ | the gallon of fuels transport from the bidder $i$ to the depot $j$ ||$c_{ij}$| the cost per gallon of from the bidder $i$ to the depot $j$|**Problem Formulation:**$$\begin{align*} & \min \sum_{i=1}^n\sum_{j=1}^m c_{ij}x_{ij} \\ & s.t. \\ & \sum_{j=1}^m x_{ij}\leq a_i, i=1,..,m\\ & \sum_{i=1}^n x_{ij}\geq b_j, i=1,..,m\\ & x_{ij} \geq 0 \end{align*}$$ bActually, there are two different ways to interpret the so-called discount.**(I)** Once the $x_{ij}\geq \alpha_i$, then unit price discount applies to all $x_{ij}$.**(II)** If the $x_{ij}\geq \alpha_i$, then unit price discount applies only to $x_{ij} - \alpha_i$ parts.According to the TA, the interpretation **(II)** is chosen here.we need introduce a sets of binary variables $z_{ij}$, where$$z_{ij} = \begin{cases}1, & x_{ij} \geq \alpha_i \\0, & o.w.\end{cases}.$$Also we introduce another sets of variable $d_{ij}$, the discounted unit price for bidder $i$ if the demand exceeds $\alpha_i$. Then the problem is formulated as$$\begin{align*} & \min \sum_{i=1}^n\sum_{j=1}^m c_{ij}x_{ij}- d_jz_{ij}(x_{ij} - \alpha_i) \\ & s.t. \\ & \sum_{j=1}^m x_{ij}\leq a_i, i=1,..,m\\ & \sum_{i=1}^n x_{ij}\geq b_j, i=1,..,m\\ & x_{ij}/\alpha_i \geq z_{ij} \\ & z_{ij} \in \{0,1\} \\ & x_{ij} \geq 0 \end{align*}$$Of course this is not a linear programming problem. 1.28Note that$$P(Q) = \int_0^Q (\sum_{t=1}^s f_t(x)) dx,$$where $f_t(x) = H_t I(W_{t-1}\leq x < W_t)$ and $W_0 = 0$. Also, denote $g_t(Q)=H_tQ, Q\geq 0$.Then $$P(Q)=\min_{t=1,...,s} \{g_t(Q)\}, 0 \leq Q \leq \sum_{t=1}^s w_t.$$Then the problem can be formulated as $$\begin{align*} & \max_{Q, y} P(Q) + (-cy) \\ & s.t. \\ & P(Q) \leq g_t(Q),\quad 0 \leq Q \leq \sum_{t=1}^s w_t \\ & Ay = b\\ & \alpha y = Q \\ & y \geq 0 \end{align*}$$ 1.30 aThe problem is equivalent to$$\begin{align*} & \min x_1 -2x_2 - 3x_3\\ & s.t. \\ & -x_1 + 3x_2 + x_3 + s_1 = 13 \\ & x_1 + 2x_2 + 3x_3 - s_2 = 12 \\ & 2x_1 - x_2 + x_3 = 4 \\ & x_1 = y_1 - y'_1\\ & x_2 = y_2 - y'_2 \\ & x_3 = -3-s_3 \\ & s_1,s_2,s_3 \geq 0 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$And we plug the expressions of $x_1,x_2,x_3$ into the problem above, then the standard form is given below.$$\begin{align*} & \min (y_1 - y'_1) -2(y_2 - y'_2) - 3(-3-s_3)\\ & s.t. \\ & -(y_1 - y'_1) + 3(y_2 - y'_2) + (-3-s_3) + s_1 = 13 \\ & (y_1 - y'_1) + 2(y_2 - y'_2) + 3(-3-s_3) - s_2 = 12 \\ & 2(y_1 - y'_1) - (y_2 - y'_2) + (-3-s_3) = 4 \\ & s_1,s_2,s_3 \geq 0 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$ bThe problem is equivalent to$$\begin{align*} & \min x_1 -2x_2 - 3x_3\\ & s.t. \\ & x_1 - 3x_2 - x_3 \geq 13 \\ & x_1 + 2x_2 + 3x_3 \geq = 12 \\ & 2x_1 - x_2 + x_3 \geq 4 \\ & -(2x_1 - x_2 + x_3) \geq -4 \\ & x_1 \geq y_1 - y'_1\\ & -x_1 \geq -(y_1 - y'_1)\\ & x_2 \geq y_2 - y'_2 \\ & -x_2 \geq -(y_2 - y'_2) \\ & -x_3 \geq 3 \\ & s_1,s_2,s_3 \geq 0 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$And we plug the expressions of $x_1,x_2,x_3$ into the problem above, then the standard form is given below.$$\begin{align*} & \min (y_1 - y'_1) -2(y_2 - y'_2) - 3(-3-s_3)\\ & s.t. \\ & (y_1 - y'_1) - 3(y_2 - y'_2) - (-3-s_3) \geq 13 \\ & x_1 + 2x_2 + 3x_3 \geq 12 \\ & 2(y_1 - y'_1) - (y_2 - y'_2) + (-3-s_3) \geq 4 \\ & -(2(y_1 - y'_1) - (y_2 - y'_2) + (-3-s_3)) \geq -4 \\ & -x_3 \geq 3 \\ & s_1,s_2,s_3 \geq 0 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$ cThe problem is equivalent to$$\begin{align*} & \max -x_1 + 2x_2 + 3x_3\\ & s.t. \\ & -x_1 + 3x_2 + x_3 \leq 13 \\ & x_1 + 2x_2 + 3x_3 \geq 12 \\ & 2x_1 - x_2 + x_3 = 4 \\ & x_1 = y_1 - y'_1\\ & x_2 = y_2 - y'_2 \\ & x_3 \leq -3 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$And we plug the expressions of $x_1,x_2,x_3$ into the problem above, then the maximization form is given below.$$\begin{align*} & \max -(y_1 - y'_1) + 2(y_2 - y'_2) + 3x_3\\ & s.t. \\ & -(y_1 - y'_1) + 3(y_2 - y'_2) + x_3 \leq 13 \\ & (y_1 - y'_1) + 2(y_2 - y'_2) + 3x_3 \geq 12 \\ & 2(y_1 - y'_1) - (y_2 - y'_2) + x_3 = 4 \\ & x_3 \leq -3 \\ & y_i, y'_i \geq 0, \quad i=1,2 \end{align*}$$ 1.37 aAs shown in the graph below, the red line indicates the optimal solution set with the natural $x_1, x_2 \geq 0$ restrictions added. Mathematically, the optimal solution set is:$$S=\{(x_1, x_2); x_1+2x_2=4, x_1, x_2 \geq 0\}$$import numpy as np import matplotlib.pyplot as plt from sympy.solvers import solve from sympy import Symbol def f1(x): return 1 + 0.5 * x def f2(x): return 2 * x def f3(x): return -0.5 * x + 2 x = Symbol('x') x1, = solve(f1(x)-f2(x)) x2, = solve(f1(x)-f3(x)) x3 = 4 x4 =0 y1 = f1(x1) y2 = f1(x2) y3 = 0 y4 =0 plt.plot(x1,y1,'go',markersize=10) plt.plot(x2,y2,'go',markersize=10) plt.plot(x3,y3,'go',markersize=10) plt.plot(x4,y4,'go',markersize=10) plt.fill([x1,x2,x3,x4],[y1,y2,y3,y4],'green',alpha=0.5) xr = np.linspace(0,4,100) y1r = f1(xr) y2r = f2(xr) y3r = f3(xr) y4r = 0 * xr plt.plot(xr,y1r,'k-',label=r"$x_2=1+0.5x_1$") plt.plot(xr,y2r,'b--', label=r"$x_2=2x_1$") plt.plot(xr,y4r,'k-') plt.plot(xr,y3r,'r*-', label=r"$x_2=2-0.5x_1$") plt.legend() #pplt.plot(xr,-0.5*xr + 0.8,'c--') plt.show()bIt's straightforward to see from the following graph that, the maximum is attained at $(0,2)$.plt.figure() xr = np.linspace(0,4,100) y3r = f3(xr) plt.plot(xr,y3r,'k-',label=r"$x_2=2-0.5x_1$") plt.plot(xr, 3*xr+2,'r-') #plt.plot(xr, 3*xr,'c--') plt.plot(0,2,'ko',markersize=10) plt.plot(4,0,'ko',markersize=10) plt.xlim(-1,5) plt.legend() plt.show()cFrom the graph below, we know the optimal solution for problem$$\max -3x_1 + x_2$$over the same feasible set, is unique. That is,$(0, 0) = \arg\max_{x_1,x_2} -3x_1 + x_2$Since the feasible set for problem $$\max 3x_1 + 6x_2 \quad (**)$$only contains 1 element. So $\bf (0, 0)$ is also the solution for (**)plt.figure() plt.fill([x1,x2,x3,x4],[y1,y2,y3,y4],'green',alpha=0.5) xr = np.linspace(0,4,100) y1r = f1(xr) y2r = f2(xr) y3r = f3(xr) y4r = 0 * xr plt.plot(xr,y1r,'k-',label=r"$x_2=1+0.5x_1$") plt.plot(xr,y2r,'b--', label=r"$x_2=2x_1$") plt.plot(xr,y4r,'k-') plt.plot(xr,y3r,'m-.', label=r"$x_2=2-0.5x_1$") plt.legend() #plt.plot(xr, 3*xr - 5,'c--') plt.plot(xr, 3*xr,'r-') plt.show()(BT) 1.5 a **Formulation 1:**$$\begin{align*} & \min \quad c^Tx + d^Ty \\ & s.t. \\ & Ax + By \leq b\\ & x_i \leq y_i \\ & - x_i \leq y_i \end{align*}$$ **Formulation 2:**Note that $|x| = x^{+} + x^{-}$ and $x = x^{+} - x^{-}$, where the operation is element-wise, we have$$\begin{align*} & \min \quad c^T(x^{+} - x^{-}) + d^T(x^{+} + x^{-}) \\ & s.t. \\ & A(x^{+} - x^{-}) + B(x^{+} + x^{-}) \leq b\\ & x^{+} \geq 0 \\ & x^{-} \geq 0 \end{align*}$$ bSince both **Formulation 1** and **Formulation 2** are one-to-one transformations, so if we can obtain a solution from any of three formulation, the rest two can be uniquely determined. If there is no feasible solution in any of the three formulation, the rest two are not solvable. Therefore, three formulations are equivalent.I use two simple example to illustrate the statement above.1. Suppose for the **Formulation 2**, the optimal solution is$(x^{+}, x^{-}) = (a,b)$, then * For the original formulation, $x = a - b$; * For the **Formulation 2**, $x=a - b, y = |a-b|$, where $|.|$ is a element-wise operation.2. Suppose for the **Formulation 1**, the optimal solution is$(x, y) = (a,|a|)$, then * For the original formulation, $x = a$; * For the **Formulation 2**, the optimal solution is $(x^{+}, x^{-}) = \big(0.5(|a| + a),0.5(|a| - a)\big)$. cConsider the following example,$$\begin{align*} & \min x \\ & s.t. \\ & -0.5x - y\leq -3\\ & y = |x| \end{align*}$$The feasible set for this problem is $\{x| x\leq -1.5, x\geq 2\}$. The local minimal attains at $x=2$, while the global minimal attains at $x=-\infty$. (BT) 1.11 **Assumptions*** All the currency conversions can be done at given time point, say $k$. Throughout the day, conversions only take place $K$ times in total.**Model**| Notation | Explaination || :--- | :--- ||$i,j$|currency index; $i,j = 1,2....n$||$k$|time point; $k=0,1,...,K$||$x_{ik}$|the amount of currency $i$ held at time point $k$||$t_{ijk}$|the amount of currency $i$ converted to currency $j$ at time point $k$|**Note:** By definition, $t_{jik}$ represents the amount of currency $j$ converted to currency $i$ at time point $k$**Problem Formulation:**$$\begin{align*} & \max \quad x_{NK} \\ & s.t. \\ & x_{j(k+1)} = \sum_{i=1}^Nr_{ij}t_{ijk} + x_{jk}\\ & x_{j(k+1)} = x_{jk} - \sum_{i=1}^Nr_{ij}t_{jik} \\ & t_{ijk} \geq 0, \quad k=1,2,...,K-1\\ & t_{ijk} \leq x_{ik}\\ & \sum_{j}^N\sum_{k=1}^K t_{jik} \leq u_j\\ & x_{10} = B\\ & x_{j0} = 0, \quad j=2,...,N \end{align*}$$ Assortment OptimizationDefine the $w_i$ as the probability of buying product $i$. Especially, $w_0$ denotes buying nothing. Also denote $c$ as the number of items in the assortment. Then the object function is $$\begin{align*} & \max\sum_{i=1}^4r_iw_i \\ & s.t. \\ &w_0 + \sum_{i=1}^4w_i = 1\\ &0 \leq w_i \leq \frac{v_i}{v_0}w_0, \quad i=1,..,4\\ & \sum_{i=1}^4 \frac{w_i}{v_i} \leq c \frac{w_0}{v_0} \end{align*}$$ From the code given below, the best assortment for $c=1$ is **(item4)**, and for $c=2$ is **(item1, item2)**, and for $c=3, 4$ are **(item1, item2, item3)**.from pulp import * w = ["w_{}".format(i) for i in range(5)] var = w revenue = {"w_0":0, "w_1":9.5, "w_2":9.0, "w_3":7.0, "w_4":4.5} prob = LpProblem("The Assortment Problem",LpMaximize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None) prob += lpSum([revenue[i]*Lp_vars[i] for i in var]) prob += lpSum(Lp_vars[i] for i in var) == 1 prob += Lp_vars["w_1"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_2"] <= 0.6 * Lp_vars["w_0"] prob += Lp_vars["w_3"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_4"] <= 5.2 * Lp_vars["w_0"] prob += (1 / 0.3) * Lp_vars["w_1"] + (1 / 0.6) * Lp_vars["w_2"] + (1 / 0.3) * Lp_vars["w_3"] + (1/ 5.2) * Lp_vars["w_4"] <= 1 * Lp_vars["w_0"] prob.writeLP("The_Assortment_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) for v in prob.variables(): print(v.name, "=", v.varValue) from pulp import * w = ["w_{}".format(i) for i in range(5)] var = w revenue = {"w_0":0, "w_1":9.5, "w_2":9.0, "w_3":7.0, "w_4":4.5} counter = 0 prob = LpProblem("The Assortment Problem",LpMaximize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None) prob += lpSum([revenue[i]*Lp_vars[i] for i in var]) prob += lpSum(Lp_vars[i] for i in var) == 1 prob += Lp_vars["w_1"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_2"] <= 0.6 * Lp_vars["w_0"] prob += Lp_vars["w_3"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_4"] <= 5.2 * Lp_vars["w_0"] prob += (1 / 0.3) * Lp_vars["w_1"] + (1 / 0.6) * Lp_vars["w_2"] + (1 / 0.3) * Lp_vars["w_3"] + (1/ 5.2) * Lp_vars["w_4"] <= 2 * Lp_vars["w_0"] prob.writeLP("The_Assortment_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) for v in prob.variables(): print(v.name, "=", v.varValue) from pulp import * w = ["w_{}".format(i) for i in range(5)] var = w revenue = {"w_0":0, "w_1":9.5, "w_2":9.0, "w_3":7.0, "w_4":4.5} counter = 0 prob = LpProblem("The Assortment Problem",LpMaximize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None) prob += lpSum([revenue[i]*Lp_vars[i] for i in var]) prob += lpSum(Lp_vars[i] for i in var) == 1 prob += Lp_vars["w_1"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_2"] <= 0.6 * Lp_vars["w_0"] prob += Lp_vars["w_3"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_4"] <= 5.2 * Lp_vars["w_0"] prob += (1 / 0.3) * Lp_vars["w_1"] + (1 / 0.6) * Lp_vars["w_2"] + (1 / 0.3) * Lp_vars["w_3"] + (1/ 5.2) * Lp_vars["w_4"] <= 3 * Lp_vars["w_0"] prob.writeLP("The_Assortment_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) for v in prob.variables(): print(v.name, "=", v.varValue) from pulp import * w = ["w_{}".format(i) for i in range(5)] var = w revenue = {"w_0":0, "w_1":9.5, "w_2":9.0, "w_3":7.0, "w_4":4.5} counter = 0 prob = LpProblem("The Assortment Problem",LpMaximize) Lp_vars = LpVariable.dicts("Lp",var,0, upBound=None) prob += lpSum([revenue[i]*Lp_vars[i] for i in var]) prob += lpSum(Lp_vars[i] for i in var) == 1 prob += Lp_vars["w_1"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_2"] <= 0.6 * Lp_vars["w_0"] prob += Lp_vars["w_3"] <= 0.3 * Lp_vars["w_0"] prob += Lp_vars["w_4"] <= 5.2 * Lp_vars["w_0"] prob += (1 / 0.3) * Lp_vars["w_1"] + (1 / 0.6) * Lp_vars["w_2"] + (1 / 0.3) * Lp_vars["w_3"] + (1/ 5.2) * Lp_vars["w_4"] <= 4 * Lp_vars["w_0"] prob.writeLP("The_Assortment_Problem.lp") prob.solve() print("Status:", LpStatus[prob.status]) for v in prob.variables(): print(v.name, "=", v.varValue)Status: Optimal Lp_w_0 = 0.45454545 Lp_w_1 = 0.13636364 Lp_w_2 = 0.27272727 Lp_w_3 = 0.13636364 Lp_w_4 = 0.0Sparse matrix-vector multiplication in Spark Sparse matricesSparse matrices are defined as matrices in which most elements are zero. Specifically, the sparsity of a matrix is defined as$$\frac{\text{number of zero-valued elements}}{\text{total number of elements}}.$$Sparse matrices describe loosely coupled linear systems. It is often convenient to store sparse matrices in [COO (coordinate list)](https://en.wikipedia.org/wiki/Sparse_matrixStoring_a_sparse_matrix) format. This allows us to define only the non-zero elements of the matrix as a list of 3-tuples: $(i, j, v)$, such that $M_{ij}=v$. As an example, here's some Python code that uses NumPy to generate a random, sparse matrix in $\mathbf{R}^{1000\times 1000}$ with 2000 non-zero entries between 0 and 1. We'll also make use of the `coo_matrix` class from `scipy.sparse`, which allows us to quickly convert to a dense format for testing.import numpy as np from scipy.sparse import coo_matrix from pyspark import SparkConf, SparkContext n = 10000 indices = np.random.randint(0, n, size=(2*n, 2)) values = np.random.random(size=2*n) sparse_representation = np.c_[indices, values[:, None]] sparse_representation[:5]We'll save this to disk for future use.np.savetxt('sparse_matrix.txt', sparse_representation, delimiter=' ')The `coo_matrix` class constructs a sparse matrix using the form `(data, (i, j)`, where `data`, `i`, and `j` are arrays:1. `data[:]`, the entries of the matrix, in any order2. `i[:]`, the row indices of the matrix entries3. `j[:]`, the column indices of the matrix entriesThe SciPy [sparse matrix formats](https://docs.scipy.org/doc/scipy/reference/sparse.html) are super useful and are compatible with [sklearn algorithms](http://scikit-learn.org/stable/auto_examples/text/document_classification_20newsgroups.html). Here, we'll just use it to convert our sparse representation to a dense array for comparison and testing.M_sparse = coo_matrix((values, (indices.T[0], indices.T[1])), shape=(n, n)) M_sparse M = M_sparse.toarray() M.shape type(M)Spark RDDs and TransformationsThe fundamental data structure of Spark is the [resilliant distributed dataset (RDD)](https://spark.apache.org/docs/2.2.0/rdd-programming-guide.htmlresilient-distributed-datasets-rdds), which is a fault-tolerant collection of elements that can be operated on in parallel via Spark. The standard method for instantiating an RDD is by referencing a dataset in an external storage system, such as a shared filesystem, HDFS, HBase, or any data source offering a Hadoop InputFormat. Below, we instatiate an RDD using the built-in `textFile` from PySpark. This interprets a text file as a sequence of strings, with each line of the file represented as a single stringconf = SparkConf() sc = SparkContext(conf=conf) lines = sc.textFile('sparse_matrix.txt') lines.take(10)We used the `take(10)` method to view the first 10 items in the RDD, which correspond to the first 10 lines in the file we wrote to disk earlier. We want to convert the lines from strings to 3-tuples. We do this via a transformation on this RDD. The most basic transformation is `map`, which applies a function to every element in the RDD.M_rdd = lines.map(lambda l: map(float, l.strip().split(' '))) M_rdd.take(10)So, we successfully created an RDD containing a COO representation of the matrix. Matrix-vector multiplication on Spark RDDSThe basic tranformations on RDDs are `map` and `reduceByKey`, which are exact parallels of the older [MapReduce](https://en.wikipedia.org/wiki/MapReduce) paradigm. Briefly, a MapReduce operation does the following:1. _Map:_ Apply a function to each element of the input dataset, resulting in a sequence of key-value pairs: $[(k_1, v_1), (k_2, v_2), (k_1, v_3), \ldots]$2. _Group:_ The key-value pairs are sorted and organized by key, so that each unique key is associated with a list of values: $[(k_1, [v_1, v_3, \ldots]), (k_2, [v_2, \ldots]), \ldots]$3. _Reduce:_ Combine the values in each key's list according to some function. Function is defined on two values at a time and must be associative and communitive.For example, the following would be the reduce function used to take the sum over all elements associated with a key:```Pythondef summation(v1, v2): return v1 + v2```which can be written more compactly using `lambda` form:```Pythonlambda v1, v2: v1 + v2```As it turns out, the MapReduce paradigm is particularly well-suited to multiplying a sparse matrix and a vector. Let's explore why that is, and then go through an example.Given the matrix equation$$y=Ax$$with $A\in\mathbf{R}^{m\times n}$, each element of $y$ is defined as$$y_i = \sum_{j=1}^n A_{ij} x_j.$$So, if we have an RDD representing the matrix, and the vector $x$ fits in memory, then we carry out the multiplication as follows:1. _Map:_ Take in tuples `(i, j, Aij)` and return tuples `(i, Aij * x[j])`2. _Group:_ Group all entries by row index3. _Reduce:_ Sum values for each row indexSpark's `reduceByKey` performs steps 2 and 3 together. All that's left is to correctly organize the results. We must sort the results by key and then handle missing keys, which would occur if a row of our matrix does not contain any non-zero entries. Let's try it out.First, we create a random vector to multiply against our matrix.v_in = np.random.random(size=n)Next, we perform the MapReduce operation, using Spark. Note how transformations can be chained together. This is not necessary, but is often a cleaner way to represent a multi-step operation. In the last step, we use `collect` which converts the resulting RDD to a Python list. This should be done with care! If the resulting list is too large, this could cause some real problem. In this case, we know the resulting vector is the same size as the input vector, so we can safely collect the RDD to active memory.v_out_spark_raw = np.array( M_rdd\ .map(lambda x: (x[0], v_in[int(x[1])] * x[2]))\ .reduceByKey(lambda v1, v2: v1 + v2)\ .sortByKey()\ .collect() ) len(v_out_spark_raw)Uh-oh, we we expecting a vector in $\mathbf{R}^{\text{10,000}}$! As mentioned above, this happens when the sparse matrix has no non-zero entries in some rows. We can easily handle this case by using some NumPy indexing tricks, as follows:v_out_spark = np.zeros(n) v_out_spark[map(int, v_out_spark_raw.T[0])] = v_out_spark_raw.T[1]Finally, we will compare what we just calculated to what we get with Numpy, using the dense array from earlier.v_out_numpy = M.dot(v_in) np.allclose(v_out_spark, v_out_numpy) v_out_numpy[:20] v_out_spark[:20]Step 8: Overall Recommendationsimport pandas as pd import sys import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import numpy as np %matplotlib inline cmap_bold = ListedColormap(['#00FF00','#FF0000']) sys.path.append('../utils') import DataAggregation as da import AlgoUtils as au algos_dd = { "LogisticRegression": {"C": 1e9}, "LogisticRegressionB": {"C": 1e9, "class_weight":'balanced'}, "KNeighborsClassifier": {"n_neighbors": 7}, "LinearDiscriminantAnalysis": {}, "QuadraticDiscriminantAnalysis": {}, "SVC": {} } fcols = ["d_mean:d_std:d_max:l_range", "d_mean:d_std:l_range", "d_std:l_range", "l_range", "d_std", "d_max"] algos_str = ["LogisticRegression", "LogisticRegressionB", "KNeighborsClassifier", "LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"] a2 = da.GetFrames("../data/device_failure.csv", "a2") a7 = da.GetFrames("../data/device_failure.csv", "a7") a4 = da.GetFrames("../data/device_failure.csv", "a4", ldays=-30, lday_strict=False) tdf = a2.df_sfeature.drop("failure", axis=1).join(a7.df_sfeature.drop("failure", axis=1)).join(a4.df_sfeature)All models and recommendations need further validation at scale! Model 1: Definite Action Model If this model detects failure, take action Ofcourse still need to do validation at scale See analysis in Step3_a2_analysis.ipynb, Step4_a7_analysis.ipynb for more info on why we are confident about this recommendationalgo_str = "QuadraticDiscriminantAnalysis" scols = ["a2l_range", "a2d_std", "a2d_mean", "a2d_max", "a7l_range", "a7d_std", "a7d_mean", "a7d_max"] analysisdf = au.do_clf_validate_new(tdf, algo_str,algos_dd[algo_str], scols, "failure")Cross-val-score(roc_auc) = 0.74 Cross-val-score(accuracy) = 0.94 Cross-val-score(recall) = 0.46 Cross-val-score(precision)= 0.76 Cross-val-score(f1) = 0.76Model 2: If Model 1 does not detect failure, but this model detects failure Recommend Inspection of device Do Hypothesis testing from field: How many days to actual failure once this model detected fail Refer to analysis in Step5_a4_analysis.ipynb for explanationalgo_str = "QuadraticDiscriminantAnalysis" scols = tdf.columns[:-1] analysisdf = au.do_clf_validate_new(tdf, algo_str,algos_dd[algo_str], scols, "failure")Cross-val-score(roc_auc) = 0.78 Cross-val-score(accuracy) = 0.93 Cross-val-score(recall) = 0.56 Cross-val-score(precision)= 0.65 Cross-val-score(f1) = 0.65Data Quality Improvement Recommendationsdf = pd.read_csv("../data/device_failure.csv") df.loc[:,'date'] = pd.to_datetime(df['date']) df.groupby(["date"]).count()["device"].plot.line() plt.ylabel("#devices")IntroductionData preparation is very important for all machine learning problems. Most of the real world datasets have dirty or unclean data, data may also be unsuitable for machine learning techniques that you want to apply. Data may suffer from many defects, including :- a). Missing Data b). Duplications c). Incorrect data types including categorical data d). Outliers e). Inconsistencies in data representation e.g. Rating was “1,2,3”, now rating “A, B, C” The purpose of data preparation is to transform data sets so that their information content is best exposed to solve given problem. Important advantages of preprocessing include :- a). Error prediction rate should be lower (or the same) after the preparation as before it b). Less computation and higher performance is achieved because of preprocessingPreprocessing Techniques covered in this tutorial are :-1). Label Encoding2). Handling Missing Features with Imputation3). Scaling and Normalization4). Dummy Variables for dealing with Categorical Values# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. plt.style.use('fivethirtyeight')['pimaindian', 'gm2008', 'housingvotes', 'gapminder']Exploring Categorical FeaturesThe Gapminder dataset also contains a categorical 'Region' feature. In this exercise we first explore this feature. Boxplots are particularly useful for visualizing categorical features such as this.# Import pandas #import pandas as pd #import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(15,7)) # Read 'gapminder.csv' into a DataFrame: df df = pd.read_csv('../input/gm2008/gm_2008_region.csv') # Print the columns of df print(df.columns) # Create a boxplot of life expectancy per region df.boxplot('life', 'Region', rot=60,ax=ax) # Show the plot plt.show()Index(['population', 'fertility', 'HIV', 'CO2', 'BMI_male', 'GDP', 'BMI_female', 'life', 'child_mortality', 'Region'], dtype='object')Dummy VariablesAs we know, scikit-learn does not accept non-numerical features. You saw in the graph above that the 'Region' feature contains very useful information that can predict life expectancy. For example, Sub-Saharan Africa has a lower life expectancy compared to Europe and Central Asia. Therefore, if you are trying to predict life expectancy, it would be preferable to retain the 'Region' feature. To do this, you need to binarize it by creating dummy variables, which is what you will do in this exercise.# Print the columns of df_region print("Dataframe with Region:\n ",df.info()) print("Dataframe head containing Region column :\n", df['Region'].head(10)) # Create dummy variables: df_region df_region1 = pd.get_dummies(df) print("Dataframe after creating dummy columns without dropping region :\n ",df_region1.info()) # Create dummy variables with drop_first=True: df_region df_region2 = pd.get_dummies(df,drop_first=True) # Print the new columns of df_region print("Dataframe Region with Dummy Columns but dropping first dummy column : \n",df_region2.info()) print("Dataframe Region columns after dummification step :\n ", df_region2.iloc[:10,9:11]) RangeIndex: 139 entries, 0 to 138 Data columns (total 10 columns): population 139 non-null float64 fertility 139 non-null float64 HIV 139 non-null float64 CO2 139 non-null float64 BMI_male 139 non-null float64 GDP 139 non-null float64 BMI_female 139 non-null float64 life 139 non-null float64 child_mortality 139 non-null float64 Region 139 non-null object dtypes: float64(9), object(1) memory usage: 10.9+ KB Dataframe with Region: None Dataframe head containing Region column : 0 Middle East & North Africa 1 Sub-Saharan Africa 2 America 3 Europe & Central Asia 4 East Asia & Pacific 5 Europe & Central Asia 6 Europe & Central Asia 7 America 8 South Asia 9 America Name: Region, dtype: object Setting Up for Label EncodingAnother approach is to encode categorical values with a technique called "label encoding", which allows you to convert each value in a column to a number. Numerical labels are always between 0 and n_categories-1.Sometimes, you might just want to encode a bunch of categories within a feature to some numeric value and encode all the other categories to some other numeric value.from sklearn.datasets import load_boston boston = load_boston() X = boston.data Y = boston.target print("X Shape : ",X.shape) print("Y Shape : ",Y.shape) from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=1000) import numpy as np X = np.random.uniform(0.0, 1.0, size=(10, 2)) Y = np.random.choice(('Male','Female'), size=(10)) print("X : ",X) print("Y : ",Y)X : [[0.36600382 0.72741013] [0.80504008 0.70697672] [0.74623249 0.11963019] [0.2280842 0.89134984] [0.54977715 0.98907535] [0.83314783 0.38211753] [0.13077088 0.31467905] [0.09498454 0.9982746 ] [0.08416287 0.2955988 ] [0.48809661 0.77914031]] Y : ['Male' 'Male' 'Female' 'Female' 'Female' 'Male' 'Male' 'Female' 'Male' 'Male']Label EncodingThe first option is to use the LabelEncoder class, which adopts a dictionary-oriented approach, associating to each category label a progressive integer number, that is an index of an instance array called classes_:from sklearn.preprocessing import LabelEncoder le = LabelEncoder() yt = le.fit_transform(Y) print(yt) from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() Yb = lb.fit_transform(Y) print("Yb : ",Yb) print("Inverse Transformation : ",lb.inverse_transform(Yb))Yb : [[1] [1] [0] [0] [0] [1] [1] [0] [1] [1]] Inverse Transformation : ['Male' 'Male' 'Female' 'Female' 'Female' 'Male' 'Male' 'Female' 'Male' 'Male']Handling Missing FeaturesSometimes a dataset can contain missing features, so there are a few options that can be taken into account:1). Removing the whole line2). Creating sub-model to predict those features3). Using an automatic strategy to input them according to the other known valuesfrom sklearn.preprocessing import Imputer data = np.array([[1, np.nan, 2], [2, 3, np.nan], [-1, 4, 2]]) print("data : ",data) # Mean Strategy imp = Imputer(strategy='mean') trans_data = imp.fit_transform(data) print("Transformed Data using mean strategy : \n",trans_data) # Median Strategy imp = Imputer(strategy='median') trans_data = imp.fit_transform(data) print("Transformed Data using median strategy : \n",trans_data) # Most Frequent imp = Imputer(strategy='most_frequent') trans_data = imp.fit_transform(data) print("Transformed Data using most frequent strategy : \n",trans_data)data : [[ 1. nan 2.] [ 2. 3. nan] [-1. 4. 2.]] Transformed Data using mean strategy : [[ 1. 3.5 2. ] [ 2. 3. 2. ] [-1. 4. 2. ]] Transformed Data using median strategy : [[ 1. 3.5 2. ] [ 2. 3. 2. ] [-1. 4. 2. ]] Transformed Data using most frequent strategy : [[ 1. 3. 2.] [ 2. 3. 2.] [-1. 4. 2.]]Dropping Missing DataThe voting dataset contained a bunch of missing values. Now, it's time for you to take care of these !The unprocessed dataset has been loaded into a DataFrame df. Explore it in the IPython Shell with the .head() method. You will see that there are certain data points labeled with a '?'. These denote missing values. As you saw in the video, different datasets encode missing values in different ways. Sometimes it may be a '9999', other times a 0 - real-world data can be very messy! If you're lucky, the missing values will already be encoded as NaN. We use NaN because it is an efficient and simplified way of internally representing missing data, and it lets us take advantage of pandas methods such as .dropna() and .fillna(), as well as scikit-learn's Imputation transformer Imputer().In this exercise, we convert the '?'s to NaNs, and then drop the rows that contain them from the DataFrame.# Import pandas import pandas as pd import matplotlib.pyplot as plt import numpy as np # Read 'gapminder.csv' into a DataFrame: df df = pd.read_csv('../input/housingvotes/house-votes-84.csv',header=None) # Convert '?' to NaN df[df == '?'] = np.nan # Print the number of NaNs print("The number of NaNs :\n",df.isnull().sum()) # Print shape of original DataFrame print("Shape of Original DataFrame: {}".format(df.shape)) # Drop missing values and print shape of new DataFrame df = df.dropna() # Print shape of new DataFrame print("Shape of DataFrame After Dropping All Rows with Missing Values: {}".format(df.shape))The number of NaNs : 0 0 1 12 2 48 3 11 4 11 5 15 6 11 7 14 8 15 9 22 10 7 11 21 12 31 13 25 14 17 15 28 16 104 dtype: int64 Shape of Original DataFrame: (435, 17) Shape of DataFrame After Dropping All Rows with Missing Values: (232, 17)Imputing Missing DataReal-world data often has missing values.Data can have missing values for a number of reasons such as observations that were not recorded and data corruption.Handling missing data is important as many machine learning algorithms do not support data with missing values.As you've come to appreciate, there are many steps to building a model, from creating training and test sets, to fitting a classifier or regressor, to tuning its parameters, to evaluating its performance on new data. Imputation allows you to specify the value to replace (it can be something other than NaN) and the technique used to replace it (such as mean, median, or mode). The Imputer class operates directly on the NumPy array instead of the DataFrame.Imputation can be seen as the first step of this machine learning process, the entirety of which can be viewed within the context of a pipeline. Scikit-learn provides a pipeline constructor that allows you to piece together these steps into one process and thereby simplify your workflow.from pandas import read_csv from sklearn.preprocessing import Imputer #from sklearn.impute import SimpleImputer import numpy as np dataset = read_csv('../input/pimaindian/pima-indians-diabetes.data.csv', header=None) # mark zero values as missing or NaN dataset[[1,2,3,4,5]] = dataset[[1,2,3,4,5]].replace(0, np.NaN) # fill missing values with mean column values values = dataset.values imputer = Imputer() transformed_values = imputer.fit_transform(values) # count the number of NaN values in each column print("NaN values count :- ",np.isnan(transformed_values).sum())NaN values count :- 0Data Scaling and NormalizationA generic dataset (we assume here that it is always numerical) is made up of different values which can be drawn from different distributions, having different scales and, sometimes, there are also outliers. A machine learning algorithm isn't naturally able todistinguish among these various situations, and therefore, it's always preferable to standardize datasets before processing them.from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_wine ss = StandardScaler() features, target = load_wine(return_X_y=True) scaled_data = ss.fit_transform(features) print('Unscaled Data:\n',features) print("Scaled Data :\n",scaled_data) from sklearn.preprocessing import Normalizer import numpy as np data = np.array([1.0, 2.0]) n_max = Normalizer(norm='max') norm_data = n_max.fit_transform(data.reshape(1, -1)) print("Norm Data(max) :\n ",norm_data) n_l1 = Normalizer(norm='l1') norm_data = n_l1.fit_transform(data.reshape(1, -1)) print("Norm Data(l1) :\n ",norm_data) n_l2 = Normalizer(norm='l2') n_l2.fit_transform(data.reshape(1, -1)) print("Norm Data(l2) :\n ",norm_data)Norm Data(max) : [[0.5 1. ]] Norm Data(l1) : [[0.33333333 0.66666667]] Norm Data(l2) : [[0.33333333 0.66666667]]MinMax ScalerThe MinMaxScaler is the probably the most famous scaling algorithm. It essentially shrinks the range such that the range is now between 0 and 1 (or -1 to 1 if there are negative values).This scaler works better for cases in which the standard scaler might not work so well. If the distribution is not Gaussian or the standard deviation is very small, the min-max scaler works better.from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.DataFrame({ # positive skew 'x1': np.random.chisquare(8, 1000), # negative skew 'x2': np.random.beta(8, 2, 1000) * 40, # no skew 'x3': np.random.normal(50, 3, 1000) }) scaler = MinMaxScaler() scaled_df = scaler.fit_transform(df) scaled_df = pd.DataFrame(scaled_df, columns=['x1', 'x2', 'x3']) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5)) ax1.set_title('Before Scaling') sns.kdeplot(df['x1'], ax=ax1) sns.kdeplot(df['x2'], ax=ax1) sns.kdeplot(df['x3'], ax=ax1) ax2.set_title('After Min-Max Scaling') sns.kdeplot(scaled_df['x1'], ax=ax2) sns.kdeplot(scaled_df['x2'], ax=ax2) sns.kdeplot(scaled_df['x3'], ax=ax2) plt.show()/opt/conda/lib/python3.6/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalRobust ScalerThe RobustScaler uses a similar method to the Min-Max scaler but it instead uses the interquartile range, rathar than the min-max, so that it is robust to outliers.from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns x = pd.DataFrame({ # Distribution with lower outliers 'x1': np.concatenate([np.random.normal(20, 1, 1000), np.random.normal(1, 1, 25)]), # Distribution with higher outliers 'x2': np.concatenate([np.random.normal(30, 1, 1000), np.random.normal(50, 1, 25)]), }) scaler = RobustScaler() robust_scaled_df = scaler.fit_transform(x) robust_scaled_df = pd.DataFrame(robust_scaled_df, columns=['x1', 'x2']) scaler = MinMaxScaler() minmax_scaled_df = scaler.fit_transform(x) minmax_scaled_df = pd.DataFrame(minmax_scaled_df, columns=['x1', 'x2']) fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(12, 5)) ax1.set_title('Before Scaling') sns.kdeplot(x['x1'], ax=ax1) sns.kdeplot(x['x2'], ax=ax1) ax2.set_title('After Robust Scaling') sns.kdeplot(robust_scaled_df['x1'], ax=ax2) sns.kdeplot(robust_scaled_df['x2'], ax=ax2) ax3.set_title('After Min-Max Scaling') sns.kdeplot(minmax_scaled_df['x1'], ax=ax3) sns.kdeplot(minmax_scaled_df['x2'], ax=ax3) plt.show()/opt/conda/lib/python3.6/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalNormalizerThe normalizer scales each value by dividing each value by its magnitude in n-dimensional space for n number of features.from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import Normalizer import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.DataFrame({ 'x1': np.random.randint(-100, 100, 1000).astype(float), 'y1': np.random.randint(-80, 80, 1000).astype(float), 'z1': np.random.randint(-150, 150, 1000).astype(float), }) scaler = Normalizer() scaled_df = scaler.fit_transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) fig = plt.figure(figsize=(9, 5)) ax1 = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122, projection='3d') ax1.scatter(df['x1'], df['y1'], df['z1']) ax2.scatter(scaled_df['x1'], scaled_df['y1'], scaled_df['z1']) plt.show()Feature SelectionTwo examples of feature selection that use the classes SelectKBest (which selects the best K high-score features) and SelectPercentile (which selects only a subset of featuresbelonging to a certain percentile) are shown next. It's possible to apply them both to regression and classification datasets, being careful to select appropriate score functions.#SelectKBest features from sklearn.datasets import load_boston, load_iris from sklearn.feature_selection import SelectKBest, SelectPercentile, chi2, f_regression regr_data = load_boston() print(regr_data.data.shape) kb_regr = SelectKBest(f_regression) X_b = kb_regr.fit_transform(regr_data.data, regr_data.target) print(X_b.shape) print(kb_regr.scores_) class_data = load_iris() print(class_data.data.shape) perc_class = SelectPercentile(chi2, percentile=15) X_p = perc_class.fit_transform(class_data.data, class_data.target) print(X_p.shape) print(perc_class.scores_)(150, 4) (150, 1) [ 10.81782088 3.7107283 116.31261309 67.0483602 ]Principal Component AnalysisIn many cases, the dimensionality of the input dataset X is high and so is the complexity of every related machine learning algorithm. Moreover, the information is seldom spreaduniformly across all the features and, as discussed in the previous chapter, there will be high entropy features together with low entropy ones, which, of course, don't contributedramatically to the final outcome. It's possible to project the original feature vectors into this new (sub-)space, where each component carries a portion of total variance and where the new covariance matrix isdecorrelated to reduce useless information sharing (in terms of correlation) among different features. In scikit-learn, there's the PCA class which can do all this in a very smooth way.from sklearn.datasets import load_digits from sklearn.decomposition import PCA import matplotlib.pyplot as plt digits = load_digits() # Show some random digits selection = np.random.randint(0, 1797, size=100) fig, ax = plt.subplots(10, 10, figsize=(10, 10)) samples = [digits.data[x].reshape((8, 8)) for x in selection] for i in range(10): for j in range(10): ax[i, j].set_axis_off() ax[i, j].imshow(samples[(i * 8) + j], cmap='gray') plt.show()Each image is a vector of 64 unsigned int (8 bit) numbers (0, 255), so the initial number of components is indeed 64. However, the total amount of black pixels is often predominant and the basic signs needed to write 10 digits are similar, so it's reasonable to assume both high cross-correlation and a low variance on several components. Trying with 36 principal components, we get:pca = PCA(n_components=36, whiten=True) X_pca = pca.fit_transform(digits.data / 255) print(pca.explained_variance_ratio_) # Plot the explained variance ratio fig, ax = plt.subplots(1, 2, figsize=(16, 6)) ax[0].set_xlabel('Component') ax[0].set_ylabel('Variance ratio (%)') ax[0].bar(np.arange(36), pca.explained_variance_ratio_ * 100.0) ax[1].set_xlabel('Component') ax[1].set_ylabel('Cumulative variance (%)') ax[1].bar(np.arange(36), np.cumsum(pca.explained_variance_)[::-1]) plt.show()[0.14890594 0.13618771 0.11794594 0.08409979 0.05782415 0.0491691 0.04315987 0.03661373 0.03353248 0.03078806 0.02372341 0.02272697 0.01821863 0.01773855 0.01467101 0.01409716 0.01318589 0.01248138 0.01017718 0.00905617 0.00889538 0.00797123 0.00767493 0.00722904 0.00695889 0.00596081 0.00575615 0.00515157 0.0048954 0.00428888 0.00373606 0.00353272 0.00336684 0.00328029 0.00308318 0.00293778]As expected, the contribution to the total variance decreases dramatically starting from the fifth component, so it's possible to reduce the original dimensionality without anunacceptable loss of information, which could drive an algorithm to learn wrong classes. In the preceding graph, there are the same handwritten digits rebuilt using the first 36components with whitening and normalization between 0 and 1.X_rebuilt = pca.inverse_transform(X_pca) # Rebuild from PCA and show the result fig, ax = plt.subplots(10, 10, figsize=(10, 10)) samples = [pca.inverse_transform(X_pca[x]).reshape((8, 8)) for x in selection] for i in range(10): for j in range(10): ax[i, j].set_axis_off() ax[i, j].imshow(samples[(i * 8) + j], cmap='gray') plt.show()Messtechnik HS2021 - Tutorial 2 Aufgabe 1: Linearität in der Spektroskopie --------- Die Impulseantwort eines Spinsystems in der NMR/EPR kann durch die Blochgleichungen beschrieben werden. In der Annahme des Steady-States (wie z.B. bei continous wave EPR) ist das resultierende Spektrum durch die folgenden Parameter charakterisiert: Frequenz $\nu$ , longitudinale Relaxationszeit $T_1$, transversale Relaxationszeit $T_2$ und die Anregungsstärke $\nu_1$. Nehmen Sie für diese Aufgabe an, dass das System sich linear verhält (d.h. die Anregungsstärke klein ist). --------- 1a) Erstellen sie ein Spektrum (Absorptions- und Dispersionsspektrum) im Frequenzbreich $[-10,10]$, welches zwei unterschiedliche Frequenzkomponenten beinhaltet. Die beiden Komponenten sind charakterisiert durch: Komponente A - $\nu_A = 4$ - $T_{1,A} = 10$ - $T_{2,A} = 1$ Komponente B - $\nu_B = -2$ - $T_{1,B} = 20$ - $T_{2,B} = 1.5$ 70% der angeregten Spins tragen zu Komponente A bei und 30% zu B. Importieren Sie zu Beginn die gebrauchten Python-libraries und definieren Sie anschliessend alle oben genannten Parameter.import numpy as np import matplotlib.pyplot as plt from Bloch import Bloch_stationary # Komponente A frq_A = 4 T1_A = 10 T2_A = 1 frac_A = 0.7 # Komponente B frq_B = -2 T1_B = 20 T2_B = 1.5 frac_B = 0.31b) Verwenden Sie die Funktion `Bloch_stationary()` aus den Modul `Bloch.py` um das Spektrum zu berechnen. Um die Inputparameter und die Eingabereihenfolge für die Funktion `Bloch_stationary()` herauszufinden, können Sie die Helpfunktion `help(function)` von Python verwenden.help(Bloch_stationary) # Konstruiere Frequenzachse frq_axis = np.linspace(-10,10,1024) # Berechnen von Spektren mit Bloch_stationary spc_A = Bloch_stationary(frq_axis,frq_A,T1_A,T2_A) spc_B = Bloch_stationary(frq_axis,frq_B,T1_B,T2_B) spc = frac_A*spc_A + frac_B*spc_B # Annahme der Linearität, Einzelspektren können aufsummiert werden1c) Berechnen Sie das Amplitudenverhältnis der beiden Spektralkomponenten und kommentieren Sie das Ergebnis. Weshalb entspricht es nicht dem Verhältnis der angeregten Spins?maxA = np.amax(np.real(frac_A*spc_A)) maxB = np.amax(np.real(frac_B*spc_B)) ratio = maxA/maxB ratioDas Verhältnis der unterschiedlichen Komponenten widerspiegelt sich nicht in den Amplituden, sondern in den integralen Intensitäten ($7/3 \approx 2.33$). Hier ist die Linie mit dem grösseren Anteil breiter als diejenige mit dem kleineren Anteil. Daher ist das Amplitudenverhältnis kleiner ($\approx 1.56$). 1d) Plotten Sie das berechnete Spektrum, beschriften Sie die Achsen korrekt und erstellen Sie eine Legende, um den Real- und Imaginärteil zu beschriften. (Infos zu allen Funktionen in matlibplot.pyplot finden Sie [hier](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.htmlmodule-matplotlib.pyplot)).# Plot plt.figure(figsize=(8,6)) plt.plot(frq_axis,np.real(spc),'k') plt.plot(frq_axis,np.imag(spc),'r') plt.xlabel('$\Delta\\nu$ (MHz)',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['Absorpion (real)','Dispersion (imaginär)'],fontsize=13) plt.title('Solution 1a)',fontsize=13);1e) Schreiben Sie eine Funktion `Bloch_stationary_multiline()`, die als Input Listen von Parametern akzeptiert und so ein Mehrlinienspektrum berechnet. Testen Sie Ihre geschriebene Funktion mit den unten angegebenen Parametern. Um zu sehen wie eine Funktion aufgebaut ist können Sie die `Bloch.py`-Datei öffnen und schauen, wie die `Bloch_stationary`-Funktion geschrieben wurde. Wenn Sie noch Informationen zum `numpy`-package brauchen, finden sie diese [hier](https://numpy.org/doc/stable/reference/routines.html). *Hinweis:* Ein `for`-loop in der Funktion kann sehr hilfreich sein.import numpy as np import matplotlib.pyplot as plt def Bloch_stationary_multiline(frq_axis,frq,T1,T2,fraction): N = len(frq) spectrum = np.zeros_like(frq_axis) fraction = fraction/np.sum(fraction) # normalization for i in range(N): currspc = Bloch_stationary(frq_axis,frq[i],T1[i],T2[i]) spectrum = spectrum + fraction[i]*currspc return spectrum # specify spectrum as line list frq = [-9, -7.5, -3,2, 5, 7.75, 8.25] T1 = [10, 12, 15, 10, 10, 20, 20] T2 = [3, 1.5, 1.5, 1, 1, 1.5, 1.5] fraction = [4, 5, 3, 3, 7, 2, 2] # define the frequency axis frq_axis = np.linspace(-15,15,1024) # spectrum calculation spc = Bloch_stationary_multiline(frq_axis,frq,T1,T2,fraction) plt.figure(figsize=(8,6)) plt.plot(frq_axis,np.real(spc),'k') plt.plot(frq_axis,np.imag(spc),'r') plt.xlabel('$\Delta\\nu$ (MHz)',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['Absorpion (real)','Dispersion (imaginär)'],fontsize=13) plt.title('Solution 1c)',fontsize=13);1f) Das Spektrum kann mathematisch beschrieben werden als $$ S(\nu) = ( A(\nu) + \mathrm{i} D(\nu) ) \cdot \exp{(\mathrm{i}\phi)} $$ wobei $A(\nu)$ das Absorptionssignal, $D(\nu)$ das Dispersionssignal und $\phi$ eine Phasen sind. Die Phasenverschiebung durch $\phi$ muss korrigiert werden, dass nur das reine Absorptions- und Dispersionsspektrum analysiert werden kann. Diese Phasenkorrektur kann vorgenommen werden, indem man den oberen Ausdruck umformt zu: $$ S(\nu) \cdot \exp{(-\mathrm{i}\phi)} = A(\nu) + \mathrm{i} D(\nu) $$ Probieren Sie die Phase des gegebenen Signals (`spc_uncorr`) so zu korrigieren, dass das reine Absorptions- und Dispersionsspektrum entsteht.import math as m data = np.load('signal_uncorrected.npz') frq_axis = data['frq_axis'] spc_uncorr = data['spc'] phase = m.pi/5 spc_corr = spc_uncorr*np.exp(-1j*phase) plt.figure(figsize=(16,6)) plt.subplot(121) plt.plot(frq_axis,np.real(spc_uncorr),'k') plt.plot(frq_axis,np.imag(spc_uncorr),'r') plt.xlabel('$\Delta\\nu$ (MHz)',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['Absorpion (unkorrigiert)','Dispersion (unkorrigiert)'],fontsize=13,loc=2) plt.subplot(122) plt.plot(frq_axis,np.real(spc_corr),'k') plt.plot(frq_axis,np.imag(spc_corr),'r') plt.xlabel('$\Delta\\nu$ (MHz)',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['Absorption (korrigiert)','Dispersion (korrigiert)'],fontsize=13) plt.suptitle('Solution 1f)',fontsize=13);Aufgabe 2: Lineares Verhalten vs. Nicht-lineares Verhalten / Sättigung (optional)---------Der Sättigungsparameter $S = (2\pi \nu_1)^2 T_1 T_2 $ beschreibt, ob ein Spinübergang als lineares System betrachtet werden kann. Um eine Abweichung vom linearen Verhalten zu erkennen muss die Abhängigkeit der Amplitude, integralen Intensität und der Linienform einer Spektralkomponente von der Anregungsstärke $\nu_1$ betrachet werden.--------- 2a) Schreiben Sie eine Funktion, welche die maximale Amplitude, die integrale Intensität und die Linienform anhand der Full-Width-Half-Maximum (FWHM) für unterschiedliche Anregungsstärken $\nu_1$ berechnet. Teste deine Funktion mit den unten angegebenen Spinsystem-Parametern ($\nu,T_1,T_2$) für Anregungstärken $\nu_1 = [0.0001,0.1]$. *Hinweis:* Um ein Integral zu berechnen können Sie die Numpy-Funktion `np.trapz()` verwenden.import numpy as np import math as m import matplotlib.pyplot as plt from Bloch import Bloch_stationary def Bloch_saturation(frq_axis,frq,T1,T2,nu1): amplitude = np.zeros_like(nu1) integral = np.zeros_like(nu1) fwhm = np.zeros_like(nu1) for i in range(len(nu1)): currnu = nu1[i] currspc = Bloch_stationary(frq_axis,frq,T1,T2,currnu) amplitude[i] = np.amax(np.real(currspc)) integral[i] = np.trapz(np.real(currspc)) halfmax = np.amax(np.real(currspc))/2 idx_max = np.nonzero(np.real(currspc) == np.amax(np.real(currspc)))[0][0] spc_low = np.real(currspc[0:idx_max]) spc_high = np.real(currspc[idx_max+1:len(currspc)]) fwhm_low = np.abs(spc_low - halfmax).argmin() fwhm_high = np.abs(spc_high - halfmax).argmin() fwhm[i] = frq_axis[fwhm_high+idx_max] - frq_axis[fwhm_low] return amplitude,integral,fwhm # Spinsystem Parameter frq_axis = np.linspace(-5,5,16384) frq = 0 T1 = 10 T2 = 1 # Teste Bloch_saturation Funktion nu1 = np.linspace(0.0001,0.1,2048) amplitude,integral,fwhm = Bloch_saturation(frq_axis,frq,T1,T2,nu1)2b) Plotten Sie die maximale Amplitude, die integrale Intensität und die Full-Width-Half-Maximum (FWHM) in Abhänigkeit von der Anregungsstärke $\nu_1$ und dem Sättigungsparameter $S$. Überlegen Sie sich anhand dieser Plots in welchem Bereich von $\nu_1$ und $S$ das Spinsystem als linear betrachtet werden kann. Wie verhalten sich die maximale Amplitude, die integrale Intensität und die FWHM unterschiedlich im linearen und nicht-linearen Bereich?S = ((2*m.pi*nu1)**2)*T1*T2 plt.figure(figsize=(16,10)) # Plot maximale Amplitude plt.subplot(231) plt.plot(nu1,amplitude,'k') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.title('Amplitude') plt.subplot(234) plt.plot(S,amplitude,'k') plt.xlabel('$S$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) # Plot integral Intensität plt.subplot(232) plt.plot(nu1,integral,'k') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.title('Integral') plt.subplot(235) plt.plot(S,integral,'k') plt.xlabel('$S$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) # Plot FWHM plt.subplot(233) plt.plot(nu1,fwhm,'k') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.title('FWHM') plt.subplot(236) plt.plot(S,fwhm,'k') plt.xlabel('$S$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13);2c) Bestimmen Sie die Steigungen der Amplitude und der integralen Intensität im linearen Bereich als Funktion von $\nu_1$: - $ A_{\text{max}}(\nu_1) = c_1 \nu_1 $ - $ I(\nu_1) \; \; \; \; \; \;= c_2 \nu_1 $ Plotten Sie die Abhängigkeit der Amplitude und integralen Intensität von der Anregungsstärke $\nu_1$ zusammen mit der linearen Approximation. Berechnen und plotten Sie das Verhältnis zwischen der tatsächlichen und linear approximierten Amplitude / integralen Intensität ($A_{\text{real}}/A_{\text{lin}}$ und $I_{\text{real}}/I_{\text{lin}}$) in Abhänigkeit von $\nu_1$ und $S$.linlim = 0.01 idx_lim = np.abs(nu1-linlim).argmin() c1 = amplitude[idx_lim]/nu1[idx_lim] c2 = integral[idx_lim]/nu1[idx_lim] amplitude_lin = c1*nu1 integral_lin = c2*nu1 Aratio = amplitude/amplitude_lin Iratio = integral/integral_lin plt.figure(figsize=(10,16)) # Plot maximale Amplitude plt.subplot(321) plt.plot(nu1,amplitude,'k') plt.plot(nu1,amplitude_lin,'r') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['tatsächliche Amplitude','lineare Approximation'],fontsize=13) plt.title('Amplitude',fontsize=13) plt.subplot(323) plt.plot(nu1,Aratio,'k') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('$A_{lin}/A_{real}$',fontsize=13) plt.subplot(325) plt.plot(S,Aratio,'k') plt.xlabel('$S$',fontsize=13) plt.ylabel('$A_{lin}/A_{real}$',fontsize=13) # Plot integral Intensität plt.subplot(322) plt.plot(nu1,integral,'k') plt.plot(nu1,integral_lin,'r') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('intensity (a.u.)',fontsize=13) plt.legend(['tatsächliches Integral','lineare Approximation'],fontsize=13) plt.title('Integral',fontsize=13) plt.subplot(324) plt.plot(nu1,Iratio,'k') plt.xlabel('$\\nu_1$',fontsize=13) plt.ylabel('$I_{lin}/I_{real}$',fontsize=13) plt.subplot(326) plt.plot(S,Iratio,'k') plt.xlabel('$S$',fontsize=13) plt.ylabel('$I_{lin}/I_{real}$',fontsize=13);Example 2: Pairwise offset from Reference to Target starThis example shows how to use lowfssim to model the sensing offsets (errors) caused by moving between the reference and target stars, which have different spectral types, brightnesses, and for which we use different EM gains. It showcases:- using the integrated detector model- interweaving of multiple model "states" simultaneouslyNote that the EM gains in this example are notional, and should not be taken as a form of gospel for CGI.%load_ext autoreload %autoreload 2 # this import block isn't any of the good stuff from pathlib import Path from lowfsc import props from lowfsc.data import DesignData from lowfsc.spectral import StellarDatabase, ThroughputDatabase, LOWFS_BANDPASS from lowfsc.reconstruction import Reconstructor, synthesize_pupil_shear, prepare_Zmm, vmag_normalize from lowfsc.emccd import EMCCD from lowfsc.automate import chop_bipolar, plot_modes import numpy as truenp from tqdm import tqdm from prysm.conf import config from prysm.fttools import mdft import cupy as cp from cupyx.scipy import fft as cpfft from prysm.mathops import np, fft from matplotlib import pyplot as plt mdft.clear() fft._srcmodule = cpfft np._srcmodule = cp config.precision = 32 plt.style.use('bmh') %matplotlib inline # note that this is a little bit different root = Path('~/src/lowfssim-public/data').expanduser() star_type1 = 'g0v' star_type2 = 'b3v' wvl = LOWFS_BANDPASS mode='hlc' # or spec or wfov dd = DesignData.hlc_design(root) sd = StellarDatabase.bijan_data(root) td = ThroughputDatabase.bijan_data(root) throughput = td(mode, wvl) weightsRef = sd(star_type1, wvl) weightsTarg = sd(star_type2, wvl) fudgeRef = sd.sparsity_fudge_factor(star_type1, wvl) fudgeTarg = sd.sparsity_fudge_factor(star_type2, wvl) # the reference star is magnitude 2.25, the target magnitude 5 vRef = 10 ** (-2.25/2.5) vTarg = 10 ** (-2.25/2.5) weightsRef = weightsRef * (fudgeRef*vRef*throughput) weightsTarg = weightsTarg * (fudgeTarg*vTarg*throughput) GAINREF = 20 GAINTARG = 150 cam = EMCCD.cgi_camera() cam.em_gain = GAINREF dd.seed_zernikes(range(2,12)) wt = np.zeros(10) # in the cgi conops, we do chopping on the brighter reference star and adjust the estimator for the target star CHOPSIZE = 5 # this is a "more clever" way of doing the same thing as the lowfssim manual chops = np.diag(np.ones_like(wt)*CHOPSIZE) ref_z = np.zeros_like(wt) ref_z[0] = 3.14 # assume we didn't do an ideal (perfect) job aligning the occulter; ref_z[1] = 2.87 average_frame_10s = lambda img: cam.expose(img, 10_000).mean(axis=0, dtype=cp.float32) cam.em_gain = GAINREF diffs, ups, downs = chop_bipolar(wvl, weightsRef, dd, ref_z, chops, cam=cam) # note difference from ex1 for diff in diffs: diff /= CHOPSIZE ref = props.polychromatic(wvl, weightsRef, dd, ref_z) dark = cp.zeros_like(ref) # dark field to expose refRef = average_frame_10s(ref) darkRef = average_frame_10s(dark) cam.em_gain = GAINTARG # here we assume there is no change in alignment between ref and target for simplicity ref2 = props.polychromatic(wvl, weightsTarg, dd, ref_z) refTarg = average_frame_10s(ref2) darkTarg = average_frame_10s(dark) darksubRef = refRef - darkRef darksubTarg = refTarg - darkTarg chop_shear_px = 0.038 sy = synthesize_pupil_shear(darksubRef, chop_shear_px, 0) sx = synthesize_pupil_shear(darksubRef, chop_shear_px, 1) sy /= chop_shear_px sx /= chop_shear_px sy2 = synthesize_pupil_shear(darksubTarg, chop_shear_px, 0) sx2 = synthesize_pupil_shear(darksubTarg, chop_shear_px, 1) sy2 /= chop_shear_px sx2 /= chop_shear_px mask = np.ones_like(ref) mask[0,:] = 0 mask[-1,:] = 0 mask[:,0] = 0 mask[:,-1] = 0 zmm = prepare_Zmm(diffs, darksubRef, (sx,sy), mask) zmm2 = vmag_normalize(zmm, darksubRef, darksubTarg, sx2, sy2, mask) RRef = Reconstructor(zmm, refRef, darkRef) RTarg = Reconstructor(zmm2, refTarg, darkTarg) plot_modes(diffs, sx, sy, clim=5); # check that the reference star reconstructor recognizes the chops RRef.estimate(ups[0])[1] # now we poke each mode on the target star and see the difference in response wt[:] = 0 estimates = [] cam.em_gain = GAINTARG for i, chop in enumerate(chops): wt[i] = CHOPSIZE im = props.polychromatic(wvl, weightsTarg, dd, ref_z+wt) im = average_frame_10s(im) est = RTarg.estimate(im) estimates.append(est) # crop out the Zernike estimates and take the diagonals raw_estimates = cp.array(estimates) estimates = cp.diag(raw_estimates[:,1:11]) truenp.set_printoptions(suppress=True, precision=3) print(cp.diag(chops)) print(estimates)[5. 5. 5. 5. 5. 5. 5. 5. 5. 5.] [5.044 5.02 5.022 4.893 5.005 5.084 4.927 5.113 4.823 6.924]Checking the ANNimport os cwd = os.getcwd() cwd from keras.models import load_model import dnns import numpy as np from dnns.noisy_sgd import NoisySGD # load model model = load_model('../trained_model_of_lenet_300_100_relu_crossent_noisy_sgd_retest_final_dense.h5', custom_objects={'NoisySGD':NoisySGD}) # summarize model. model.summary() # load dataset from dnns.load_dataset import load_and_preprocess_dataset is_output_categorical = True dataset_info = load_and_preprocess_dataset( 'mnist', categorical_output=is_output_categorical) x_train, y_train = dataset_info['train'] x_test, y_test = dataset_info['test'] img_rows, img_cols = dataset_info['img_dims'] input_shape = dataset_info['input_shape'] num_classes = dataset_info['num_classes'] # reshape input to flatten data x_train = x_train.reshape(x_train.shape[0], 1, np.prod(x_train.shape[1:])) np.savez('x_train', x_train) x_test = x_test.reshape(x_test.shape[0], 1, np.prod(x_test.shape[1:])) np.savez('x_test', x_test) x_norm = x_train[:10000].reshape(10000, 1, np.prod(x_train.shape[1:])) np.savez('x_norm', x_norm) np.savez('y_train', y_train) np.savez('y_test', y_test) score = model.evaluate(x_test, y_test, verbose=1, batch_size=10) print('Test Loss:', score[0]) print('Test Accuracy:', score[1])Running the Toolboxfrom snntoolbox.bin.run import run_toolbox t_stim = 1000 testing_examples = 10 config = """[paths] dataset_path = %(path_wd)s filename_ann = trained_model_of_lenet_300_100_relu_crossent_noisy_sgd_retest_final_dense runlabel = lenet_dense_dt_0.1_normalised_99 [tools] evaluate_ann = False normalize = False only_serialise = True [normalization] percentile = 99 [simulation] simulator = spiNNaker duration = 1000 num_to_test = 5 batch_size = 5 keras_backend = tensorflow dt = 0.1 [cell] tau_syn_E = 0.2 tau_syn_I = 0.2 v_thresh = 1 [input] poisson_input = True [output] log_vars = {'all'} plot_vars = {'all'}""" file='config' with open(file, 'w') as filetowrite: filetowrite.write(config) f = open("config","r") #necessary to fully reset simulator from spinn_front_end_common.utilities import globals_variables globals_variables.unset_simulator() run_toolbox(config_filepath='config',terminal=True) #complains because it tries to do data analysis #in future it would be better to put serialisation inside the toolboxRunning the serialised model#necessary to fully reset simulator from spinn_front_end_common.utilities import globals_variables globals_variables.unset_simulator() extra_args = ['lenet_dense_dt_1_not_normalised_serialised', '--t_stim', str(t_stim), '--testing_examples',\ str(testing_examples), '--result_filename', 'output_data', '--result_dir', '.'] import pynn_object_serialisation.experiments.mnist_testing.mnist_testing as mnist_testing from pynn_object_serialisation.experiments.mnist_testing.mnist_argparser import parser new_args = parser.parse_args(extra_args) mnist_testing.run(new_args)Looking at the outputfrom pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor np.set_printoptions(suppress=True) output = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/output_data_900.npz') spikes = spikes['03Dense_10'] spikes = spikes + [0,1] spikes + [0,1] output.t_stim output.y_test.shape output.accuracy output.y_pred == output.y_test output.plot_bin(0, output.layer_names[0], shape = (28,28)) output.layer_names output.get_bounds(1)Multiple Evaluations This is used to make graphs#Being run separately import multiprocessing import sys import os from multiprocessing import Pool from time import time import numpy as np def EvaluateModel(t_stim, testing_examples): current = multiprocessing.current_process() print('Started {}'.format(current)) f_name = "errorlog/" + current.name +"_stdout.txt" g_name = "errorlog/" + current.name + "_stderror.txt" f = open(f_name, 'w') g = open(g_name, 'w') old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = f sys.stderr = g from spinn_front_end_common.utilities import globals_variables globals_variables.unset_simulator() extra_args = ['lenet_dense_dt_0.1_not_normalised_serialised', '--t_stim', str(t_stim), '--testing_examples',\ str(testing_examples), '--result_filename', 'output_data_'+str(t_stim), '--result_dir', 'results',\ '--chunk_size', '20'] import pynn_object_serialisation.experiments.mnist_testing.mnist_testing as mnist_testing from pynn_object_serialisation.experiments.mnist_testing.mnist_argparser import parser new_args = parser.parse_args(extra_args) mnist_testing.run(new_args) sys.stdout = old_stdout sys.stderr = old_stderr print("Run for {} completed".format(t_stim)) return po = Pool(15) range_input = np.array(range(1000,1600,100)) input_data = [(i,100) for i in range_input] #output = po.starmap(EvaluateModel, input_data) print('Done!') test = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/output_data_1000.npz') test.y_test test.y_pred import glob, os from pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor dt_01_accuracies = [] os.chdir("/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/") for file in glob.glob("output_data_[0-9]*.npz"): output = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/'+file) dt_01_accuracies.append([output.t_stim,output.accuracy]) print(file, output.t_stim,output.accuracy) dt_01_accuracies = np.array(dt_01_accuracies).astype(float) import glob, os from pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor dt_1_accuracies = [] os.chdir("/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/") for file in glob.glob("output_data_dt_1_[0-9]*.npz"): output = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/'+file) dt_1_accuracies.append([output.t_stim,output.accuracy]) print(file, output.t_stim,output.accuracy) dt_1_accuracies = np.array(dt_1_accuracies).astype(float) dt_01_accuracies import matplotlib.pyplot as plt import numpy as np accuracies = np.array(accuracies) #dt_1_accuracies = dt_1_accuracies[:-1,:] fig = plt.figure(dpi=300) ax = plt.subplot(111) ax.scatter(dt_01_accuracies[:,0], 100*dt_01_accuracies[:,1], label='SNN 0.1ms timestep') ax.scatter(dt_1_accuracies[:,0], 100*dt_1_accuracies[:,1], label='SNN 1ms timestep') box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.set_xlabel('Stimulation time (ms)') ax.set_ylabel('Accuracy (%)') ax.axhline(y=97.9, color='r', linestyle='-', label='ANN (97.9%)') ax.set_ylim([0,100]) ax.set_xlim([0,10500]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) range_input = np.array([50,60,70,80,90,95,99]) #range_input = np.array([90,95]) from pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor dt_01_norm_accuracies = [] for norm in range_input: accuracy = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/output_data_dt_01_normalised_'+str(norm)+'.npz').accuracy dt_01_norm_accuracies.append([norm,accuracy]) dt_01_norm_accuracies = np.array(dt_01_norm_accuracies) range_input = np.array([50,60,70,80,90,95,99]) from pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor dt_1_norm_accuracies = [] for norm in range_input: accuracy = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/output_data_dt_1_normalised_'+str(norm)+'.npz').accuracy dt_1_norm_accuracies.append([norm,accuracy]) dt_1_norm_accuracies = np.array(dt_1_norm_accuracies) out = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/results/output_data_dt_01_normalised_50.npz') out.y_pred out.y_test import matplotlib.pyplot as plt import numpy as np fig = plt.figure(dpi=300) ax = plt.subplot(111) ax.scatter(dt_1_norm_accuracies[:,0], 100*dt_1_norm_accuracies[:,1], label='SNN 1ms timestep') #ax.axhline(y=dt_1_accuracies[]) ax.scatter(dt_01_norm_accuracies[:,0], 100*dt_01_norm_accuracies[:,1], label='SNN 0.1ms timestep') box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.set_xlabel('Normalisation percentile') ax.set_ylabel('Accuracy (%)') ax.axhline(y=97.9, color='r', linestyle='-', label='ANN (97.9%)') ax.set_ylim([0,100]) ax.set_xlim([40, 100]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) dt_01_norm_accuracies[:,0] dt_1_accuracies[:,0] from pynn_object_serialisation.OutputDataProcessor import OutputDataProcessor sparse_test = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_sparse/results/test_1000ms_10_examples_unnormalised_2.npz') sparse_test.accuracy sparse_test.y_pred sparse_test.y_test sparse_test_long = OutputDataProcessor('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_sparse/results/test_10000ms_10_examples.npz') sparse_test_long.accuracy sparse_test.y_pred sparse_test.dt sparse_test_long.get_counts(5, sparse_test.layer_names[3], 10) sparse_test_long.plot_bin(5, sparse_test_long.layer_names[-1], shape=(1,10)) sparse_test.y_testInput ANN check activation valuesfrom keras.models import load_model import dnns import numpy as np from dnns.noisy_sgd import NoisySGD from dnns.sparse_layer import Sparse custom_objects = {'NoisySGD':NoisySGD, 'Sparse':Sparse} # load model dense_model = load_model('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_dense/trained_model_of_lenet_300_100_relu_crossent_noisy_sgd_retest_final_dense.h5', custom_objects=custom_objects) sparse_model = load_model('/mnt/snntoolbox/snn_toolbox_private/examples/models/test_examples/lenet_sparse/trained_model_of_lenet_300_100_relu_crossent_noisy_sgd_retest.h5', custom_objects=custom_objects) dense_model.summary() sparse_model.summary() print(sparse_model.layers[-1].activation) plt.hist(dense_model.layers[3].get_weights()[0].flatten()) some_weights = sparse_model.layers[-1].get_weights()[2] * sparse_model.layers[-1].get_weights()[0] plt.hist(some_weights[(abs(some_weights) > 0)].flatten(), bins=100) np.max(some_weights[some_weights<0]) from keras.datasets import mnist import matplotlib.pyplot as plt (x_train, y_train), (x_test, y_test) = mnist.load_data() from keras.models import Model from keras.activations import relu, linear, tanh, softsign, exponential,sigmoid from vis.utils.utils import apply_modifications model = sparse_model print([layer.name for layer in model.layers]) def update_layer_activation(model, activation, index=-1, custom_objects=None): model.layers[index].activation = activation return apply_modifications(model, custom_objects=custom_objects) layer_name = 'sparse_3' intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) intermediate_layer_model = update_layer_activation(intermediate_layer_model, linear, custom_objects=custom_objects) print(intermediate_layer_model.layers[-1].activation) intermediate_layer_model.summary() intermediate_output = intermediate_layer_model.predict(x_test[:10].reshape((10,1,784))) model_output = model.predict(x_test[:10].reshape((10,1,784))) intermediate_output[3] intermediate_output = intermediate_layer_model.predict(x_test[:10].reshape((10,1,784))) plt.imshow(intermediate_output[9].reshape(10,-1)) plt.colorbar() plt.imshow(x_test[0].reshape((28,28))) x_test[0].flatten().shape plt.imshow(sparse_test.get_counts(9, sparse_test.layer_names[2], 100).reshape((10,-1))) plt.colorbar()Converting the paired data from "Winning Arguments: Interaction Dynamics and Persuasion Strategies in Good-faith Online Discussions" into ConvoKit format (the data used in section 4 of their paper). Note: we are only converting the subset data used to measure successful vs. unsuccessful arguments. All data provided by -------------------- Winning Arguments: Interaction Dynamics and Persuasion Strategies in Good-faith Online Discussions, , , . In Proceedings of the 25th International World Wide Web Conference (WWW'2016).The paper, data, and associated materials can be found at:http://chenhaot.com/pages/changemyview.htmlIf you use this data, please cite:@inproceedings{tan+etal:16a, author = { and and and }, title = {Winning Arguments: Interaction Dynamics and Persuasion Strategies in Good-faith Online Discussions}, year = {2016}, booktitle = {Proceedings of WWW} } Note at the blog in the hyperlink above, the data we used is the original data (linked with corresponding README, PDF and Slides). We did *not* use the updated data provided on 11/11/2016 Before starting the data conversion, you need to download the data, linked above, and extract the data from the tar archive. ------------------------------------import os #here I set the working directory to where I store the convokit package os.chdir('C:\\Users\\Andrew\\Desktop\\Cornell-Conversational-Analysis-Toolkit') from convokit import Corpus, User, Utterance, meta_index import pandas as pdLoad the original pair data:pairDFtrain=pd.read_json('C:\\Users\\Andrew\\Documents\\pair_task\\train_pair_data.jsonlist',lines=True) print(len(pairDFtrain)) pairDFtrain['train']=1 pairDFtrain.tail() pairDFhold=pd.read_json('C:\\Users\\Andrew\\Documents\\pair_task\\heldout_pair_data.jsonlist',lines=True) print(len(pairDFhold)) pairDFhold['train']=0 pairDFhold.head() pairDF=pd.concat([pairDFtrain,pairDFhold]) len(pairDF)Note: Each observation has the reply comments in a conversation that changes the OP's (OP: original poster) mind (positive column) and a conversation that does not change the OP's mind (negative column). Unfortunately, this does not include the comments that OP made after their original post: the comments made by the OP in response to the second conversant's arguments. To find the comments made by OP (i.e. the other half of the conversation), we need to retrieve them from the 'all' dataset. First: collect the unique identifiers for each original post in our datasetnyms = list(set(pairDF.op_name)) len(nyms)Collect each post from the full dataset (this has the full comment threads, whereas the pair data above only has the first response): Note: if you have not run this notebook before, then you will need to uncomment the following seven code cells. It will load the full dataset into your working memory and save only the observations that match with the posts in the pair_data above.# #note: this is over 2 GB of data, uncomment the following two lines to read in the data # dataT = pd.read_json('C:\\Users\\Andrew\\Documents\\all\\train_period_data.jsonlist', lines=True) # # len(dataT)Keep only the posts that are identified in our original dataset:# #note: this reduces the 2 GB dataset to a similar size as our original dataset # dataT=dataT[dataT.name.isin(nyms)] # len(dataT) # # do the same for the holdout data # dataH = pd.read_json('C:\\Users\\Andrew\\Documents\\all\\heldout_period_data.jsonlist', lines=True) # len(dataH) # dataH=dataH[dataH.name.isin(nyms)] # len(dataH) # #combine holdout and train datasets # data = pd.concat([dataT,dataH]) # len(data)Saving the posts from the full dataset that are the same as posts in our pair data.# #note: I save the data as a pickle file so I don't have to reload the 2 GB dataset in my working memory # data.to_pickle('C:\\Users\\Andrew\\Downloads\\pairAll.pkl')Here, I have already run this notebook, so I can just load this dataset back into working memory.data = pd.read_pickle('C:\\Users\\Andrew\\Downloads\\pairAll.pkl') data.tail() len(data) len(pairDF) data.columnsonly keep the comments and the identifier for merging with the original dataset:data=data[['comments','name']] pairDF.columnsThis joins the comments in the 'all' data, with the posts we are interested in studying:pairDF=pairDF.join(data.set_index('name'), on='op_name') len(pairDF) pairDF.tail()Now that we have all comments made within every CMV post in our dataset, we need to extract only the comments that correspond to a positive argument and negative argument (i.e. the ones recorded as either changing OP's mind or not). First, collect the identifiers for each comment made by the respondent attempting to change the OP's mind (there is a respondent in both the positive and negative columns).def collectResponses(responseList): iDs=[] if len(responseList['comments'])>0: for each in responseList['comments']: iDs.append(each['id']) return iDs pairDF['negIDs']=pairDF.negative.apply(lambda x: collectResponses(x)) pairDF['posIDs']=pairDF.positive.apply(lambda x: collectResponses(x))Now collect each of the comment identifiers that signify a response to the challenger by OPdef collectOPcommentIDs(op_auth, allComments, replyIDs): opIds =[] for comment in allComments: if comment['parent_id'].split('_')[1] in replyIDs: if 'author' in comment.keys(): if comment['author'] == op_auth: opIds.append(comment['id']) return opIds pairDF['opRepliesPos'] = pairDF[['op_author','comments','posIDs']].apply(lambda x: collectOPcommentIDs(x['op_author'],x['comments'],x['posIDs']),axis=1) pairDF['opRepliesNeg'] = pairDF[['op_author','comments','negIDs']].apply(lambda x: collectOPcommentIDs(x['op_author'],x['comments'],x['negIDs']),axis=1)Here I collect and properly order each of the comment IDs made in the thread _only_ by either OP or the 2nd conversant studied for both succesful and unsuccesful arguments:def orderThreadids(comments, replyIDs, opCommentIDs): threadIDs=list(replyIDs) for comment in comments: if comment['id'] in opCommentIDs: pID= comment['parent_id'].split('_')[1] if pID in replyIDs: threadIDs.insert(threadIDs.index(pID)+1,comment['id']) return threadIDs pairDF['posOrder']= pairDF[['comments','posIDs','opRepliesPos']].apply(lambda x: orderThreadids(x['comments'],x['posIDs'],x['opRepliesPos']) ,axis = 1) pairDF['negOrder']= pairDF[['comments','negIDs','opRepliesNeg']].apply(lambda x: orderThreadids(x['comments'],x['negIDs'],x['opRepliesNeg']) ,axis = 1)This function takes the ordered thread IDs for only the successful and unsuccesful arguments measured in the original paper (although, note: I have also collected the OP replies from the 'all' data, which wasn't included in the smaller pair_data).Note: I don't convert this section into convokit format, but instead I convert the full comment threads later in this notebook. If you are interested in looking at the successful and unsuccessful arguments in the convokit format, see the 'success' attribute in each utterance's metadatadef collectThread(comments, orderedThreadids): threadComments=[] for iD in orderedThreadids: for comment in comments: if iD==comment['id']: threadComments.append(comment) return threadComments pairDF['positiveThread'] = pairDF[['comments','posOrder']].apply(lambda x: collectThread(x['comments'],x['posOrder']),axis=1) pairDF['negativeThread'] = pairDF[['comments','negOrder']].apply(lambda x: collectThread(x['comments'],x['negOrder']),axis=1)Note above: I have just collected each individual thread (with OP comments). However, when studying this data, we may be interested in looking at the entire conversation. Therefore, instead of only converting the positive threads and negative threads into convokit format, here I simply add an attribute to the comments if they are part of either the positive or negative thread. Here I add the success attribute and the pair identification (see my readme file for a more detailed explanation of 'success' and 'pair_ids') :# Create an identification # for the paired unsuccessful/successful arguments, # Note: the pair # will be the same for successful-unsuccessful matched pairs with the prefix 'p_' for pair # if there is no paired argument for the comment (i.e. it was either the original post by OP or an uncategorized comment), # then pair_id = None c=0 pairIDS={} for i, r in pairDF.iterrows(): c=c+1 for comment in r.comments: if comment['id'] in r.posOrder: comment['success']=1 if comment['name'] in pairIDS.keys(): pairIDS[comment['name']].append('p_'+str(c)) pairIDS[comment['name']]=list(set(pairIDS[comment['name']])) else: pairIDS[comment['name']]=['p_'+str(c)] pairIDS[comment['name']]=list(set(pairIDS[comment['name']])) elif comment['id'] in r.negOrder: comment['success']=0 if comment['name'] in pairIDS.keys(): pairIDS[comment['name']].append('p_'+str(c)) pairIDS[comment['name']]=list(set(pairIDS[comment['name']])) else: pairIDS[comment['name']]=['p_'+str(c)] pairIDS[comment['name']]=list(set(pairIDS[comment['name']])) if comment['name'] not in pairIDS.keys(): pairIDS[comment['name']]=[] if 'success' not in comment.keys(): comment['success']=None #make a column for pair_ids collected at the op post level, note: this won't be unique at the observation level in our pairDF dataframe, but I'm just doing this for quick conversion and after converting it into convokit, I add the list in at the conversation-level metadata and it is unique per conversation threads = list(set(pairDF.op_name)) pids =[] for thread in threads: pid=[] for i,r in pairDF[pairDF.op_name==thread].iterrows(): for comment in r.comments: if len(pairIDS[comment['name']])>0: for p in pairIDS[comment['name']]: pid.append(p) pid=list(set(pid)) pids.append(pid) pairDF['pIDs']=pairDF.op_name.apply(lambda x: pids[threads.index(x)])Now the data is collected in a pandas dataframe with each thread's comments fully accounted for. Convert it into convokit format: The first step is to create a list of all Redditors, or 'users' in convokit parlance:users = list(set(pairDF.op_author)) for i,r in pairDF.iterrows(): for comment in r.comments: if 'author' in comment.keys(): if comment['author'] not in users: users.append(comment['author']) else: continue len(users)Note: I don't have metadata on individual users. I briefly considered creating a unique identifier for each user and including the 'username' as metadata, but since each Reddit username is unique, it would be superfluousC:\Users\Andrew\Desktop\Cornell-Conversational-Analysis-Toolkit. I believe other relevant information (such as whether a Redditor is the original poster) is specific to individual conversations and utterances.2 metadata points of note: 'author_flair_css_class' and 'author_flair_text' both describe flags that appear next to an author in a subeddit. In the changemyview subreddit the moderators use this to illustrate whether the author has changed someone's mind and it can be seen as both an award and evidence of credibility in the subreddit. While I would include this as author metadata, I believe, instead, that it is actually 'conversation' metadata because this flag would be updated overtime if the author changes multiple people's minds over the course of many conversations. Since this data was collected overtime, the flag is likely to change per user across multiple conversations, possibly across utterances.I will include the user_meta dictionary, just in case, so data can be added to it later.user_meta={} for user in users: user_meta[user]={} corpus_users = {k: User(name = k, meta = v) for k,v in user_meta.items()} print("number of users in the data = {0}".format(len(corpus_users)))number of users in the data = 34910Next: create utterancesc=0 count=0 errors=[] utterance_corpus = {} for i , r in pairDF.iterrows(): #this creates an Utterance using the metadata provided in the original file. Note: this is for the original post in each observation within the pandas dataframe utterance_corpus[r.op_name]=Utterance(id=r.op_name , user=corpus_users[r.op_author], root=r.op_name , reply_to=None, timestamp=None, text=r.op_text, meta= {'pair_ids':[], 'success':None, 'approved_by': None, 'author_flair_css_class': None, 'author_flair_text': None, 'banned_by': None, 'controversiality': None, 'distinguished': None, 'downs': None, 'edited': None, 'gilded': None, 'likes': None, 'mod_reports':None, 'num_reports': None, 'replies': [com['id'] for com in r.comments if com['parent_id']==r.op_name], 'report_reasons': None, 'saved': None, 'score': None, 'score_hidden': None, 'subreddit': None, 'subreddit_id': None, 'ups': None, 'user_reports': None}) #note: now for every comment in the original thread, make an utterance for comment in r.comments: try: utterance_corpus[comment['name']]=Utterance(id=comment['name'], user=corpus_users[comment['author']], root=r.op_name, reply_to=comment['parent_id'], timestamp=int(comment['created']), text=comment['body'] , meta={ 'pair_ids':pairIDS[comment['name']], 'success':comment['success'], 'approved_by': comment['approved_by'], 'author_flair_css_class': comment['author_flair_css_class'], 'author_flair_text': comment['author_flair_text'], 'banned_by': comment['banned_by'], 'controversiality': comment['controversiality'], 'distinguished': comment['distinguished'], 'downs': comment['downs'], 'edited': comment['edited'], 'gilded': comment['gilded'], 'likes': comment['likes'], 'mod_reports':comment['mod_reports'], 'num_reports': comment['num_reports'], 'replies':comment['replies'], 'report_reasons': comment['report_reasons'], 'saved': comment['saved'], 'score': comment['score'], 'score_hidden': comment['score_hidden'], 'subreddit': comment['subreddit'], 'subreddit_id': comment['subreddit_id'], 'ups': comment['ups'], 'user_reports': comment['user_reports'] }) #this except catches multiple comments that have no text body, see errors examples below except: c=c+1 errors.append(comment) utterance_corpus[comment['name']]=Utterance(id=comment['name'], user=User(name='[missing]'), root=r.op_name, reply_to=comment['parent_id'], timestamp=None, text=None , meta={ 'pair_ids':pairIDS[comment['name']], 'success':comment['success'], 'approved_by': None, 'author_flair_css_class': None, 'author_flair_text': None, 'banned_by': None, 'controversiality': None, 'distinguished': None, 'downs': None, 'edited': None, 'gilded': None, 'likes': None, 'mod_reports': None, 'num_reports': None, 'replies': None, 'report_reasons': None, 'saved': None, 'score': None, 'score_hidden': None, 'subreddit': None, 'subreddit_id': None, 'ups': None, 'user_reports': None }) print('there were '+str(c)+' comments that were missing common attributes')there were 530 comments that were missing common attributesThe 530 comments missing common attributes (note that none of them have a text body or author) have been included in the corpus for completeness (note: each were caught by the exception in the above code, but still included), here are some examples of these comments:errors[22] errors[99] errors[395] len(utterance_corpus)Note above: the of individual posts is less than each recorded comment in our dataset. This stands scrutiny when reviewing the dataset for two reasons: 1. each positive and negative thread correspond to the same original post. 2. original posts were re-used to compare different successful/non-successful arguments. Creating a corpus from a list of utterances:utterance_list = [utterance for k,utterance in utterance_corpus.items()] change_my_view_corpus = Corpus(utterances=utterance_list, version=1) print("number of conversations in the dataset = {}".format(len(change_my_view_corpus.get_conversation_ids())))number of conversations in the dataset = 3051Note: 3051 is the number of original posts recorded in the dataset (both train and hold out data)convo_ids = change_my_view_corpus.get_conversation_ids() for i, convo_idx in enumerate(convo_ids[0:2]): print("sample conversation {}:".format(i)) print(change_my_view_corpus.get_conversation(convo_idx).get_utterance_ids())sample conversation 0: ['t3_2ro9ux', 't1_cnhplrm', 't1_cnhrvq7', 't1_cnhz66d', 't1_cniauhy', 't1_cnibfev', 't1_cnic0gj', 't1_cnhpsmr', 't1_cnhpvqs', 't1_cnhq7iw', 't1_cnhqrw1', 't1_cnhqzsf', 't1_cni8tcx', 't1_cnhpp4o', 't1_cnhqouu', 't1_cnhrd8u', 't1_cnhrwsq', 't1_cnhs6sc', 't1_cnhtr4t', 't1_cnhuopi', 't1_cnio1bg', 't1_cnhq330', 't1_cnhs7xb', 't1_cnhpnmr', 't1_cnhqhxa', 't1_cnhrkoc', 't1_cnhq7nv', 't1_cnhqcwz', 't1_cnhsyft', 't1_cnhww76', 't1_cnhz5wq', 't1_cni80dr', 't1_cni8e2y'] sample conversation 1: ['t3_2ro0ti', 't1_cnhpddf', 't1_cnhpqan', 't1_cnhuxye', 't1_cni1m79', 't1_cni24ug', 't1_cnhrcu4', 't1_cni06fr', 't1_cnhp0bu', 't1_cnhppsw', 't1_cnhwhma', 't1_cnho6mi', 't1_cnhot32', 't1_cnhp1pb', 't1_cnho7iy', 't1_cnhoqp4', 't1_cnhobzs', 't1_cnhop4t', 't1_cnhp1nq', 't1_cnhpgyd', 't1_cnhp5lp', 't1_cnhplmn', 't1_cni3tyd', 't1_cnhqck4', 't1_cnhpee3', 't1_cnhregg', 't1_cniogf7', 't1_cnhowj2', 't1_cnhxuu1', 't1_cniedbg', 't1_cnixgm0']Add conversation-level metadata:convos = change_my_view_corpus.iter_conversations() for convo in convos: convo.add_meta('op-userID',pairDF[pairDF.op_name==convo._id].op_author[pairDF[pairDF.op_name==convo._id].index[0]]) convo.add_meta('op-text-body',pairDF[pairDF.op_name==convo._id].op_text[pairDF[pairDF.op_name==convo._id].index[0]]) convo.add_meta('op-title',pairDF[pairDF.op_name==convo._id].op_title[pairDF[pairDF.op_name==convo._id].index[0]]) convo.add_meta('pair_ids',pairDF[pairDF.op_name==convo._id].pIDs[pairDF[pairDF.op_name==convo._id].index[0]]) convo_ids= change_my_view_corpus.get_conversation_ids() for cv in convo_ids: change_my_view_corpus.get_conversation(cv).add_meta('train',int(pairDF[pairDF.op_name==cv].train[pairDF[pairDF.op_name==cv].index[0]]))Add corpus title:change_my_view_corpus.meta['name'] = "Change My View Corpus" change_my_view_corpus.print_summary_stats() change_my_view_corpus.dump('change-my-view-corpus', base_path='C:\\Users\\Andrew\\Desktop\\CMV data')Most statements (logical lines) that you write will contain expressions. A simple example of an expression is 2 + 3 . Anexpression can be broken down into operators and operands.Operators are functionality that do something and can be represented by symbols such as + or by special keywords. Operatorsrequire some data to operate on and such data is called operands. In this case, 2 and 3 are the operands.eval("4+7")Unpacking the paper - CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX* https://arxiv.org/pdf/1611.01144.pdf Introduction* Not much to get here THE GUMBEL-SOFTMAX Distribution * Gumbel-Softmax distribution, a continuous distribution over the simplex that can approximate samples from a categorical distribution: Simplex means that it consist of variables that are all between 0 .. 1, and the sum of all these variables is 1. Example: [0.2,0.2,0.2,0.4] * z is a categorical variable with class probabilities π1, π2, ...πk* k is the number of classes* samples (e.g., z's) are encoded as k-dimensional 1-hot vector. So if you have five classes, an examples is: [0,0,0,1,0] you can draw samples z efficiently by: * drawing k samples from a gumbel distribution $g_1...g_k$. The samples are independent and identically distributed drawn from a Gumbel Distribution $(\mu=0,\beta=1)^1$* calculating $argmax(g_i + log(\pi_i))$ for all $k$ samples, with $\pi_i$ being the class probability. * create a one hot encoded of that argmax. if you use softmax as approximation of argmax, you'll get gumbel-softmax* additionally, they add $\tau$ as a temperature parameter to their softmax* $y_i = exp(x_i) / sum of all exp(x_n) $ for $n = 1..k$ https://en.wikipedia.org/wiki/Gumbel_distribution $$c = \sqrt{a^2 + b^2}$$ Sampling from a gumbel distribution* GIST: https://gist.github.com/ericjang/1001afd374c2c3b7752545ce6d9ed349Footnote 1 on page 2def sample_gumbel(shape, eps=1e-20): U = tf.random_uniform(shape,minval=0,maxval=1) return -tf.log(-tf.log(U + eps) + eps) def gumbel_softmax_sample(logits, temperature): y = logits + sample_gumbel(tf.shape(logits)) return tf.nn.softmax( y / temperature) def gumbel_softmax(logits, temperature, hard=False): """Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs temperature: non-negative scalar hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probabilitiy distribution that sums to 1 across classes """ y = gumbel_softmax_sample(logits, temperature) if hard: k = tf.shape(logits)[-1] #y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype) y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype) y = tf.stop_gradient(y_hard - y) + y return yAnalysis of dyad symmetries in centromeric and noncentromeric regionsCompare dyads detected using EMBOSS palindromefrom __future__ import division %pylab inline import glob import seaborn as sns import matplotlib.ticker as ticker import re sns.set_style('ticks') sns.set_context('paper') from scipy.stats import ks_2samp def iter_palindrome(fn): """Generator -- parse EMBOSS palindrome output""" with open(fn,'r') as f: coords = [] seqs = [] name = None out = False for line in f: line = line.rstrip() if 'Palindromes of' in line: name = line.split()[2] elif ':' in line or len(line) == 0: continue else: if '|' in line: nm = line.count('|') out = True continue else: line = line.split() s,e = int(line[0]),int(line[-1]) if s > e: s,e = e,s s -= 1 coords.append((s,e)) seqs.append(line[1].upper()) if out: G = coords[1][0]-coords[0][1] for (a,b),c in zip(coords,seqs): yield name,a,b,c,nm/len(c),G out = False coords = [] seqs = [] def pal2mat(fn,minstem=5,maxstem=20,mingap=0,maxgap=20,pct_id=0.8,pidscale=False,norm=True): """Create a matrix of stem length x gap size from an EMBOSS palindrome file""" mat = np.zeros((maxstem-minstem+1,maxgap-mingap+1)) nreads = 0 for _,s,e,seq,pi,G in iter_palindrome(fn): nreads+=1 L = len(seq) if (L < minstem or L > maxstem): continue if (G < mingap or G > maxgap): continue if pi < pct_id: continue if not pidscale: mat[L-minstem,G-mingap] += 1 else: mat[L-minstem,G-mingap] += pct_id return mat, nreads def parse_data(fns): """Parse EMBOSS palindorme data from a list of files; note that the length of the region must be encoded in the filename, for example: gi_30988105_gb_AACF01000002.1__43510_43710.emboss.txt""" data = [] for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) return data def format_horiz_bar(ax,data,labels,yticklabels=[0,1],xlabel=None,xlim=None,xticks=None, colors=['black','white'],iqrscale=1.75): ml = ticker.MaxNLocator() ml._min_n_ticks=2 ml._nbins=2 ax.xaxis.set_major_locator(ml) mf = ticker.ScalarFormatter(useMathText=True) mf.set_powerlimits((-2,2)) ax.xaxis.set_major_formatter(mf) bp = sns.boxplot(x=data,y=labels,ax=ax,palette=colors,fliersize=0,width=0.75,orient='h') if xlim is not None: ax.set_xlim(xlim) else: lnames = set(labels) upper = -np.inf for ln in lnames: subs = data[labels == ln] if len(subs) > 3: ql,qu = np.percentile(subs,q=[25,75]) iqr = qu-ql lu = qu+iqrscale*iqr else: lu = np.max(data) if lu > upper: upper = lu ax.set_xlim(np.min(data),upper) plt.draw() if xlabel is not None: old_label = 'SIST score ()' try: units = old_label[old_label.index("(") + 1:old_label.rindex(")")] except: units = "" label = old_label.replace("({})".format(units), "") exponent_text = ax.xaxis.get_offset_text().get_text().encode('utf-8') if len(exponent_text) > 1: exponent_text = re.sub("\xE2\x88\x92", "-", exponent_text) exponent_text.encode('ascii') exponent_text = exponent_text.replace("\\times", "") newlab = "{} ({} {})".format(label, exponent_text, units) else: newlab = xlabel ax.xaxis.set_label_text(newlab,size=10) ax.xaxis.offsetText.set_visible(False) ax.set_yticklabels(yticklabels,size=12) sns.despine(left=True,offset=5) ax.tick_params('y',length=0) ax.xaxis.set_tick_params(labelsize=10,length=4) def format_vert_bar(ax,data,labels,xticklabels=[0,1],ylabel=None,ylim=None,yticks=None, colors=['black','white'],iqrscale=1.75): ml = ticker.MaxNLocator() ml._min_n_ticks=2 ml._nbins=2 ax.yaxis.set_major_locator(ml) mf = ticker.ScalarFormatter(useMathText=True) mf.set_powerlimits((-2,2)) ax.yaxis.set_major_formatter(mf) bp = sns.boxplot(y=data,x=labels,ax=ax,palette=colors,fliersize=0,width=0.4,orient='v') if ylim is not None: ax.set_ylim(ylim) else: lnames = set(labels) upper = -np.inf for ln in lnames: subs = data[labels == ln] if len(subs) > 3: ql,qu = np.percentile(subs,q=[25,75]) iqr = qu-ql lu = qu+iqrscale*iqr else: lu = np.max(data) if lu > upper: upper = lu ax.set_ylim(np.min(data),upper) plt.draw() if yticks is not None: ax.set_yticks(yticks) if ylabel is not None: old_label = 'Score ()' try: units = old_label[old_label.index("(") + 1:old_label.rindex(")")] except: units = "" label = old_label.replace("({})".format(units), "") exponent_text = ax.yaxis.get_offset_text().get_text().encode('utf-8') if len(exponent_text) > 1: exponent_text = re.sub("\xE2\x88\x92", "-", exponent_text) exponent_text.encode('ascii') exponent_text = exponent_text.replace("\\times", "") newlab = "{} ({} {})".format(label, exponent_text, units) else: newlab = ylabel ax.yaxis.set_label_text(newlab,size=9) ax.yaxis.offsetText.set_visible(False) ax.set_xticklabels(xticklabels,size=10) sns.despine(bottom=True,offset=5) ax.tick_params('x',length=0) ax.yaxis.set_tick_params(labelsize=9,length=4) # Define colors: blue = '#1f78b4' brown = '#b15928' gray = '#d9d9d9' dark_gray = '#252525' # dark_gray = '#d9d9d9' # gray = 'white'Populating the interactive namespace from numpy and matplotlibPlot human cen vs. randomhue = [] data = [] names = [] minl = 0 fns = glob.glob('../data/palindrome/human/bacs/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/human/matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] # format_horiz_bar(ax,data,names,yticklabels=['Cen','Control'],colors=colors,xlabel='Dyad density',xlim=(0.15,0.35)) format_vert_bar(ax,data,names,xticklabels=['Cen','Control'],colors=colors,ylabel='Density', ylim=(0.15,0.35),yticks=[0.15,0.35]) ax.set_title('Dyad\nsymmetry',size=12) plt.savefig('../figures/human_cen_dyad_symmetry.svg')Plot human neocen vs. randomhue = [] data = [] names = [] minl = 0 fns = glob.glob('../data/palindrome/human/neocen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Neo']*len(sdata)) fns = glob.glob('../data/palindrome/human/matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] format_vert_bar(ax,data,names,xticklabels=['Neo','Control'],colors=colors,ylabel='Density', ylim=(0.15,0.35),yticks=(0.15,0.35)) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/human_neocen_dyad_density.svg') names = [] hue = [] data = [] fns = glob.glob('../data/palindrome/mouse/misat/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/mouse/misat_matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] format_vert_bar(ax,data,names,xticklabels=['Cen','Control'], colors=colors,ylabel='Density',ylim=(-0.025,0.55),yticks=(0,0.55)) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/mouse_dyad_symmetry.svg')Plot chicken cen vs. randomnames = [] hue = [] data = [] fns = glob.glob('../data/palindrome/chicken/unique_cen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/chicken/matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] format_vert_bar(ax,data,names,xticklabels=['Cen','Control'], colors=colors,ylabel='Density',ylim=(0.15,0.35),yticks=(0.15,0.35)) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/chicken_dyad_symmetry.svg') names = [] hue = [] data = [] fns = glob.glob('../data/palindrome/agm/cen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/agm/random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] format_vert_bar(ax,data,names,xticklabels=['Cen','Control'], colors=colors,ylabel='Density',ylim=(-0.025,0.5),yticks=(0,0.5)) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/agm_dyad_symmetry.svg') data = np.array(data) names = np.array(names) ks_2samp(data[names=='Cen'],data[names=='Control']) names = [] hue = [] data = [] fns = glob.glob('../data/palindrome/chicken/neocen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Neo']*len(sdata)) fns = glob.glob('../data/palindrome/chicken/matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] format_vert_bar(ax,data,names,xticklabels=['Neo','Control'],colors=colors, ylabel='Density',ylim=[0.18,0.36],yticks=[0.18,0.36]) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/chicken_neocen_dyad_symmetry.svg')Cerevisiaenames = [] hue = [] data = [] fns = glob.glob('../data/palindrome/sacCer2/sacCer2'+'_cen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/sacCer2/sacCer2'+'_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] # format_horiz_bar(ax,data,names,yticklabels=['Cen','Control'],colors=colors,xlabel='Dyad density',xlim=[0,2]) format_vert_bar(ax,data,names,xticklabels=['Cen','Control'],colors=colors, ylabel='Density',ylim=[-0.1,2],yticks=[0,2]) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/sc2_dyad_density.svg') names = [] hue = [] data = [] species = ['sacCer2','sacMik','sacKud','sacCas','sacDar'] for spec in species: fns = glob.glob('../data/palindrome/'+spec+ '/' + spec + '_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend([spec]*len(sdata)) fns = glob.glob('../data/palindrome/'+spec+ '/' + spec + '_cen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend([spec]*len(sdata)) names = [] hue = [] data = [] species=['sacCer2','sacMik','sacKud','sacCas','sacDar'] for spec in species: # if spec == 'None': # names.append(spec) # hue.append('cen') # data.append(None) # continue fns = glob.glob('../data/palindrome/'+spec+ '/' + spec + '_random/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append(0) names.append(spec) fns = glob.glob('../data/palindrome/'+spec+ '/' + spec + '_cen/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append(1) names.append(spec) fig = plt.figure(figsize=(4,3)) ax = fig.add_subplot(111) colors = ['#d9d9d9','#e31a1c'] sns.boxplot(x=data,y=names,hue=hue,palette=colors,fliersize=0) # ax.set_xlim(-0.00001,0.0002) ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,2),size=10) ax.set_yticklabels(['$\it{S. cerevisiae}$', '$\it{S. mikatae}$', '$\it{S. kudriavzevii}$', '$\it{S. dairenensis}$', '$\it{S. castellii}$'],size=14) setp(ax.get_xticklabels(),size=12) ax.axes.yaxis.set_tick_params(length=0) ax.xaxis.get_offset_text().set_fontsize(12) ax.xaxis.set_ticklabels([0,'',0.5,'',1,'',1.5,'']) ax.legend_.remove() ax.set_xlabel('Dyad density (length-normalized)',size=14) ax.hlines([0.5,1.5,2.5,3.5,4.5],0,1.75,linestyles='dotted',colors='grey') sns.despine(left=True,trim=True) # # plt.figure(figsize=(2.5,4)) # plt.figure(figsize=(3,1.5)) # # color=['#e31a1c','black'] # sns.boxplot(x=data,y=names,hue=hue, # palette=['#e31a1c','grey'],fliersize=0,width=0.6) # # setp(plt.legend(),visible=False) # # plt.xticks([0.250,0.3,0.350],size=14) # plt.xticks([0,0.75,1.5],size=14) # plt.xlabel('Dyad density (length-normalized)',size=14) # sns.despine(bottom=False,trim=True,left=True) # plt.yticks([]) # # plt.savefig('figures/yeast_cen_dyad_density.svg') fig = plt.figure(figsize=(4,3)) ax = fig.add_subplot(111) colors = ['#d9d9d9','#e31a1c'] sns.boxplot(x=data,y=names,hue=hue,palette=colors,fliersize=0) # ax.set_xlim(-0.00001,0.0002) ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,2),size=10) ax.set_yticklabels(['$\it{S. cerevisiae}$', '$\it{S. mikatae}$', '$\it{S. kudriavzevii}$', '$\it{S. dairenensis}$', '$\it{S. castellii}$'],size=14) setp(ax.get_xticklabels(),size=12) ax.axes.yaxis.set_tick_params(length=0) ax.xaxis.get_offset_text().set_fontsize(12) ax.xaxis.set_ticklabels([0,'',0.5,'',1,'',1.5,'']) ax.legend_.remove() ax.set_xlabel('Dyad density (length-normalized)',size=14) ax.hlines([0.5,1.5,2.5,3.5,4.5],0,1.75,linestyles='dotted',colors='grey') sns.despine(left=True,trim=True) plt.savefig('../figures/yeasts_dyad_density.svg') # plt.figure(figsize=(2.5,4)) plt.figure(figsize=(3,1.5)) # color=['#e31a1c','black'] sns.stripplot(x=data[::-1],y=hue[::-1],s=6,jitter=0.2, palette=['black','lightgrey']) sns.boxplot(x=data[::-1],y=hue[::-1], palette=['#e31a1c','grey'],fliersize=0,width=0.6) # setp(plt.legend(),visible=False) # plt.xticks([0.250,0.3,0.350],size=14) plt.xticks([0,0.75,1.5],size=14) plt.xlabel('Dyad density (length-normalized)',size=14) sns.despine(bottom=False,trim=True,left=True) plt.yticks([]) # plt.savefig('figures/yeast_cen_dyad_density.svg') names = [] hue = [] data = [] minl = 0 random = [] bac = [] neo = [] fns = glob.glob('../data/palindrome/human/matched_random/*.txt') for fn in fns: # print fn x = 0 sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) for name,s,e,seq,cvg,gap in iter_palindrome(fn): if len(seq) > minl: x += len(seq) data.append(x/L) hue.append('Random') names.append('human') random.append(x/L) # if x > 2500: # print fn fns = glob.glob('../data/palindrome/human/bacs/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): if len(seq) > minl: x += len(seq) data.append(x/L) hue.append(r'$\alpha$'+'-satellite') names.append('human') bac.append(x/L) fns = glob.glob('../data/palindrome/human/neocen/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): if len(seq) > minl: x += len(seq) data.append(x/L) hue.append('Neocentromere') names.append('human') neo.append(x/L) # plt.figure(figsize=(2.5,4)) plt.figure(figsize=(3,1.5)) # color=['#e31a1c','black'] # sns.stripplot(x=data[::-1],y=hue[::-1],s=6,jitter=0.2, # palette=['black','lightgrey']) sns.boxplot(x=data[::-1],y=hue[::-1], palette=['#1f78b4','#e31a1c','grey'],fliersize=0,width=0.6,whis=1) # setp(plt.legend(),visible=False) # plt.xticks([0.250,0.3,0.350],size=14) # plt.xticks([0,0.75,1.5],size=14) plt.xlim(0.18,0.36) plt.xticks(size=14) plt.xlabel('Dyad density (length-normalized)',size=14) sns.despine(bottom=False,trim=True,left=True) plt.yticks([]) plt.savefig('../figures/human_neocen_cen_dyad_density.svg')Pombenames = [] hue = [] data = [] fns = glob.glob('../data/palindrome/pombe/pombe_cen/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([1]*len(sdata)) names.extend(['Cen']*len(sdata)) fns = glob.glob('../data/palindrome/pombe/matched_random/*.txt') sdata = parse_data(fns) data.extend(sdata) hue.extend([0]*len(sdata)) names.extend(['Control']*len(sdata)) plt.figure(figsize=(1,1.25)) ax = plt.subplot(111) data = np.array(data) names = np.array(names) colors = [dark_gray,gray] # format_horiz_bar(ax,data,names,yticklabels=['Cen','Control'],colors=colors,xlabel='Dyad density',xlim=[0.24,0.36]) format_vert_bar(ax,data,names,xticklabels=['Cen','Control'],colors=colors,ylabel='Density', ylim=(0.24,0.36),yticks=[0.24,0.36]) ax.set_title('Dyad symmetry',size=12) plt.savefig('../figures/pombe_cen_dyad_density.svg') names = [] hue = [] data = [] random = [] cen = [] fns = glob.glob('../data/palindrome/pombe/matched_random/*.txt') for fn in fns: # print fn x = 0 sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append('Random') names.append('pombe') random.append(x/L) # if x > 2500: # print fn fns = glob.glob('../data/palindrome/pombe/pombe_cen/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append('cen') names.append('pombe') cen.append(x/L) ks_2samp(random,cen) # plt.figure(figsize=(2.5,4)) plt.figure(figsize=(3,1.5)) # color=['#e31a1c','black'] sns.stripplot(x=data[::-1],y=hue[::-1],s=6,jitter=0.2, palette=['black','lightgrey']) sns.boxplot(x=data[::-1],y=hue[::-1], palette=['#e31a1c','grey'],fliersize=0,width=0.6) # setp(plt.legend(),visible=False) plt.xticks([0.250,0.3,0.350],size=14) plt.xlabel('Dyad density (length-normalized)',size=14) sns.despine(bottom=False,trim=True,left=True) plt.yticks([]) plt.savefig('../figures/pombe_cen_dyad_density.svg') names = [] hue = [] data = [] random = [] unique = [] neo = [] fns = glob.glob('../data/palindrome/chicken/matched_random/*.txt') for fn in fns: # print fn x = 0 sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append('Random') names.append('chicken') random.append(x/L) # if x > 2500: # print fn fns = glob.glob('../data/palindrome/chicken/unique_cen/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append(r'$\alpha$'+'-satellite') names.append('chicken') unique.append(x/L) fns = glob.glob('../data/palindrome/chicken/neocen/*.txt') for fn in fns: sp = fn.split("_") L = int(sp[-1].split('.')[0]) - int(sp[-2]) x = 0 for name,s,e,seq,cvg,gap in iter_palindrome(fn): x += len(seq) data.append(x/L) hue.append('Neocentromere') names.append('chicken') neo.append(x/L) plt.figure(figsize=(3,1.5)) sns.boxplot(x=data[::-1],y=hue[::-1], palette=['#1f78b4','#e31a1c','grey'],fliersize=0,width=0.6,whis=1) plt.xlim(0.18,0.36) plt.xticks(size=14) plt.xlabel('Dyad density (length-normalized)',size=14) sns.despine(bottom=False,trim=True,left=True) plt.yticks([]) plt.savefig('../figures/chicken_neocen_cen_dyad_density.svg')Cloud AI Platform + What-if Tool: Playground XGBoost ExampleThis notebook shows how to use the [What-if Tool](https://pair-code.github.io/what-if-tool/) on a deployed [Cloud AI Platform](https://cloud.google.com/ai-platform/) model. *You don't need your own cloud project* to run this notebook. For instructions on creating a Cloud project, see the documentation [here](https://cloud.google.com/resource-manager/docs/creating-managing-projects).import sys python_version = sys.version_info[0] # If you're running on Colab, you'll need to install the What-if Tool package and authenticate on the TF instance def pip_install(module): if python_version == '2': !pip install {module} --quiet else: !pip3 install {module} --quiet try: import google.colab IN_COLAB = True except: IN_COLAB = False if IN_COLAB: pip_install('witwidget') from google.colab import auth auth.authenticate_user() import pandas as pd import numpy as np import witwidget from witwidget.notebook.visualization import WitWidget, WitConfigBuilderLoading the test datasetThe model we'll be exploring here is a binary classification model built with XGBoost and trained on a [mortgage dataset](https://www.ffiec.gov/hmda/hmdaflat.htm). It predicts whether or not a mortgage application will be approved. In this section we'll:* Download some test data from Cloud Storage and load it into a numpy array + Pandas DataFrame* Preview the features for our model in Pandas# Download our Pandas dataframe and our test features and labels !gsutil cp gs://mortgage_dataset_files/data.pkl . !gsutil cp gs://mortgage_dataset_files/x_test.npy . !gsutil cp gs://mortgage_dataset_files/y_test.npy . # Preview the features from our model as a pandas DataFrame features = pd.read_pickle('data.pkl') features.head() # Load the test features and labels into numpy ararys x_test = np.load('x_test.npy') y_test = np.load('y_test.npy') # Combine the features and labels into one array for the What-if Tool test_examples = np.hstack((x_test,y_test.reshape(-1,1)))Using the What-if Tool to interpret our modelWith our test examples ready, we can now connect our model to the What-if Tool using the `WitWidget`. To use the What-if Tool with Cloud AI Platform, we need to send it:* A Python list of our test features + ground truth labels* Optionally, the names of our columns* Our Cloud project, model, and version name (we've created a public one for you to play around with)See the next cell for some exploration ideas in the What-if Tool.# Create a What-if Tool visualization, it may take a minute to load # See the cell below this for exploration ideas # This prediction adjustment function is needed as this xgboost model's # prediction returns just a score for the positive class of the binary # classification, whereas the What-If Tool expects a list of scores for each # class (in this case, both the negative class and the positive class). def adjust_prediction(pred): return [1 - pred, pred] config_builder = (WitConfigBuilder(test_examples.tolist(), features.columns.tolist() + ['mortgage_status']) .set_ai_platform_model('wit-caip-demos', 'xgb_mortgage', 'v1', adjust_prediction=adjust_prediction) .set_target_feature('mortgage_status') .set_label_vocab(['denied', 'approved'])) WitWidget(config_builder, height=800)Week 3 Project - Pittsburgh Capital Projects AnalysisPrompt: Explore the city of Pittsburgh's capital projects on the [WPRDC](https://data.wprdc.org/dataset/capital-projects) and answer a question by processing the dataset as a CSV Step 1: Examine and Clean Datasets# import CSVs as a dataframe df = pd.read_csv('../data/pgh_capital_projects.csv') df_dict = pd.read_csv('../data/capital-projects-data-dictionary.csv')Step 1.1: Data DictionaryThe imported data dictionary is hard to read.First task is to transform the data dictionary into a more readable format.ToDo List:1. Separate the first two rows ('Description' and 'Required') into its own table, and switch the row and column.2. From the original `df_dict` table, separate out 'fieldName' and 'fieldDescription' into its own table.# Examine data dictionary. Note how the table is not immediately clear as to how it should be read df_dict.head() # ToDo 1: # Separate the first two rows ('Description' and 'Required') into its own table, and switch the row and column. # Transpose the first two rows dict_descriptions = df_dict.head(2).transpose().reset_index() # Make the first row as the header dict_descriptions.columns = dict_descriptions.iloc[0] # Remove the first row now that it is the header dict_descriptions = dict_descriptions.drop(index=0) # Check output dict_descriptions # ToDo 2: # From the original df_dict table, separate out 'fieldName' and 'fieldDescription' into its own table. dict_field_info = df_dict[['fieldName','fieldDescription']][2:].copy() dict_field_infoStep 1.2: Capital Projects DataNow we'll take a look at the actual dataset.df.head() # Check the data type of the columns. # Notice how date columns are stored as a string object. df.dtypes # Convert date columns to date types df['fiscal_year'] = pd.to_datetime(df['fiscal_year'], format='%Y') # astype() will return 1970-01-01 df['start_date'] = df['start_date'].astype('datetime64[ns]') df.head()Step 2: Analyze Dataset Budget Data# Get summarized statistics for 'budgeted_amount' column print('Budget Stats for Capital Projects' + '\n') print(df['budgeted_amount'].describe() .apply(lambda x: format(round(x,0), ','))) # Check which project had the maximum budget alloted # original method # df.loc[df['budgeted_amount'] == df['budgeted_amount'].max()] # Faster method is to use idxmax to get row number df.loc[df['budgeted_amount'].idxmax()] # Check which project had the smallest budget alloted df.loc[df['budgeted_amount'].idxmin()] # Check the highest budgeted projects df.sort_values(by='budgeted_amount', ascending=False).head() # Check the lowest budgeted projects df.sort_values(by='budgeted_amount').head() # Amongst the top 25% percentile, what were its project area and neighborhoods? top_25_percent = df['budgeted_amount'] >= df['budgeted_amount'].describe()['75%'] top_25_percent_area_agg = df[top_25_percent].groupby(['area'])['id'] \ .agg('count') \ .sort_values(ascending=False) top_25_percent_neighborhood_agg = df[top_25_percent]['neighborhood'].value_counts().head(10) print('Top 25% Project Areas:') print(top_25_percent_area_agg) print('\nTop 25% Project Neighborhoods:') print(top_25_percent_neighborhood_agg) # Amongst the bottom 25% percentile, what were its project area and neighborhood? bottom_25_percent = df['budgeted_amount'] <= df['budgeted_amount'].describe()['25%'] bottom_25_percent_area_agg = df[bottom_25_percent].groupby(['area'])['id'] \ .agg('count') \ .sort_values(ascending=False) bottom_25_percent_neighborhood_agg = df[bottom_25_percent]['neighborhood'].value_counts().head(10) print('Bottom 25% Project Areas:') print(bottom_25_percent_area_agg) print('\nBottom 25% Project Neighborhoods') print(bottom_25_percent_neighborhood_agg) # Find the sum of budget amount received by each neighborhood budget_agg = df.groupby(['neighborhood'])['budgeted_amount'].agg('sum') budget_agg_top10 = budget_agg.sort_values(ascending=False).head(10) budget_agg_bottom10 = budget_agg.sort_values().head(10) print('Sum of Budgeted Amount Allotted per Neighborhood\n') print('Top 10 Budget Sums Grouped by Neighborhood:') print(budget_agg_top10) print('\nBottom 10 Budget Sums Grouped by Neighborhood:') print(budget_agg_bottom10) # Search for some neighborhood of interest to me: # Create function to get the budget_sum for neighborhoods def neighborhood_match(neighborhood): n_stats = budget_agg[budget_agg.index .str.lower() .str.contains(neighborhood)] if len(n_stats) == 0: return 'N/A' return n_stats print('Squirrel Hill', neighborhood_match('squirrel hill')) print('\nEast Liberty', neighborhood_match('east liberty')) print('\nRegent Square', neighborhood_match('regent square')) print('\nPoint Breeze', neighborhood_match('point breeze')) # For named projects, which ones were allotted the largest budget? df.groupby(['name'])['budgeted_amount'] \ .agg('sum') \ .sort_values(ascending=False).head(10) # For each asset, what was the total budget allotted? df.groupby(['asset_id','neighborhood'])['budgeted_amount'] \ .agg('sum') \ .sort_values(ascending=False).head(10) # Follow up on analysis above: Group and rank named projects by neighborhood and budget sum df.groupby(['name','asset_id','neighborhood'])['budgeted_amount'] \ .agg('sum') \ .sort_values(ascending=False).head(10) # For categorical variables, loop through each one with a count and sort cols = ['name','area','status','asset_id','neighborhood'] for col in cols: print(col.upper()) print(df[col].value_counts().head(10), '\n')NAME CITY COUNCIL'S UNSPECIFIED LOCAL OPTION 96 CAPITAL EQUIPMENT ACQUISITION 62 SPORT FACILITY IMPROVEMENTS 56 PARK RECONSTRUCTION 50 PLAY AREA IMPROVEMENTS 48 COMPLETE STREETS 38 FACILITY IMPROVEMENTS - PUBLIC SAFETY FACILITIES 31 FACILITY IMPROVEMENTS - RECREATION AND SENIOR CENTERS 19 ECONOMIC DEVELOPMENT AND HOUSING 17 STREET RESURFACING 16 Name: name, dtype: int64 AREA Facility Improvement 295 Engineering and Construction 218 Administration/Sub-Award 177 Vehicles and Equipment 62 Neighborhood and Community Development 45 Public Safety 11 Name: area, dtype: int64 STATUS Planned 579 Completed 122 In Progress [...]Parallel Collections---------------------Systems like Spark and Dask include "big data" collections with a small set of high-level primitives like `map`, `filter`, `groupby`, and `join`. With these common patterns we can often handle computations that are more complex than map, but are still structured.In this section we repeat the submit example using the PySpark and the Dask.Bag APIs, which both provide parallel operations on linear collections of arbitrary objects. Objectives* Use high-level `pyspark` and `dask.bag` to parallelize common non-map patterns Requirements* Dask.bag*Note: the following exercises were designed to work with `dask 0.10.1`, you can check your installed version of `dask` with the following code*:```import daskprint(dask.__version__)``` ApplicationWe again start with the following sequential code```pythonseries = {}for fn in filenames: Simple map over filenames series[fn] = pd.read_hdf(fn)['x']results = {}for a in filenames: Doubly nested loop over the same collection for b in filenames: if a != b: Filter out bad elements results[a, b] = series[a].corr(series[b]) Apply function((a, b), corr) = max(results.items(), key=lambda kv: kv[1]) Reduction``` Spark/Dask.bag methodsWe can construct most of the above computation with the following Spark/Dask.bag methods:* `collection.map(function)`: apply function to each element in collection* `collection.product(collection)`: Create new collection with every pair of inputs* `collection.filter(predicate)`: Keep only elements of colleciton that match the predicate function* `collection.max()`: Compute maximum elementWe use these briefly in isolated exercises and then combine them to rewrite the previous computation from the `submit` section. Dask.bag: Example APIimport dask.bag as db b = db.from_sequence(range(5)) b b.compute() # Gather results back to local process`map`# Square each element b.map(lambda x: x ** 2).compute() # Square each element and collect results b.map(lambda x: x ** 2).compute() # Select only the even elements b.filter(lambda x: x % 2 == 0).compute() # Cartesian product of each pair of elements in two sequences (or the same sequence in this case) b.product(b).compute() # Chain operations to construct more complex computations (b.map(lambda x: x ** 2) .product(b) .filter(lambda tup: tup[0] % 2 == 0) .compute())Exercise: Parallelize pairwise correlations with Dask.bagTo make this a bit easier we're just going to compute the maximum correlation and not try to keep track of the stocks that yielded this maximal result.from glob import glob import os import pandas as pd filenames = sorted(glob(os.path.join('..', 'data', 'json', '*.h5'))) # ../data/json/*.json filenames[:5] %%time ### Sequential Code series = [] for fn in filenames: # Simple map over filenames series.append(pd.read_hdf(fn)['close']) results = [] for a in series: # Doubly nested loop over the same collection for b in series: if not (a == b).all(): # Filter out comparisons of the same series results.append(a.corr(b)) # Apply function result = max(results) %%time ### Parallel code b = db.from_sequence(filenames) # TODO result %load solutions/collections-2.py result %%time import dask result = corr.compute(get=dask.local.get_sync)PySpark solution for comparison%load solutions/collections-1.pyimport pandas as pd import numpy as npFoodmart product salessales = pd.read_csv('foodmart.sales.tsv', sep = '\t', header = 0, parse_dates = [2]) sales.head() products = pd.read_csv('foodmart.products.tsv', sep = '\t', header = 0) products.head() sales = sales.merge(products[['product_id', 'product_name']], on = ['product_id'], how = 'inner') sales.head()Взаимное влияние продуктов друг на другаsparse_sales = pd.pivot_table(sales, values='sales', index=['date', 'store_id'], columns=['product_name'], fill_value = 0) sparse_sales.head() sales_correlation = sparse_sales.corr() sales_correlation.head() product_name = 'American Chicken Hot Dogs' sales_correlation[[product_name]].sort_values(product_name, ascending = True).head() min_corr = pd.DataFrame(sales_correlation.min()) min_corr.columns = ['min'] min_corr.sort_values(by = 'min').head() max_corr = pd.DataFrame(sales_correlation.apply(lambda x : np.max(list(filter(lambda x : x != 1., x))), axis = 1)) max_corr.columns = ['max'] max_corr.sort_values(by = 'max', ascending = False).head() product_name = 'Plato French Roast Coffee' sales_correlation[[product_name]].sort_values(product_name, ascending = False).head()Inter Process Communications (IPC) Спасибо Сове Глебу и Голяр Димитрису за участие в написании текста Сегодня в программе:* `mmap` для IPC Используем примитивы межпоточной синхронизации для межпроцессной. Через разделяемую память создаем правильные мьютексы. [Ссылка про правильные мьютексы](https://linux.die.net/man/3/pthread_mutexattr_init)* Объекты разделяемой памяти POSIX Это почти то же самое, что и обычные файлы, но у них ортогональное пространство имен и они не сохраняются на диск. Вызовы `shm_open` (открывает/создает объект разделяемой памяти, аналогично `open`) и `shm_unlink` (удаляет ссылку на объект, аналогично `unlink`) [Документашка](https://www.opennet.ru/man.shtml?topic=shm_open&category=3&russian=0). [Отличия от `open`](https://stackoverflow.com/questions/24875257/why-use-shm-open) * Семафоры Неименованные Именованные* Сочетаемость семафоров и сигналов Комментарии к ДЗ[Ридинг Яковлева](https://github.com/victor-yacovlev/mipt-diht-caos/tree/master/practice/posix_ipc) `mmap`Разделяемая память - это когда два региона виртуальной памяти (один в одном процессе, другой в другом) ссылаются на одну и ту же физическую память. То есть могут обмениваться информацией через нее.Межпроцессное взаимодействие через разделяемую память нужно, когда у нас есть две различные программы (могут быть написаны на разных языках)и когда нам не подходит взаимодействие через сокеты (такое взаимодействие не очень эффективно).%%cpp mmap.c %run gcc -Wall -fsanitize=thread mmap.c -lpthread -o mmap.exe %run ./mmap.exe #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } typedef enum { VALID_STATE = 0, INVALID_STATE = 1 } state_t; typedef struct { pthread_mutex_t mutex; state_t current_state; // protected by mutex } shared_state_t; shared_state_t* state; // interprocess state // process_safe_func и process_func - функции-примеры с прошлого семинара (с точностью до замены thread/process) void process_safe_func() { // all function is critical section, protected by mutex pthread_mutex_lock(&state->mutex); // try comment lock&unlock out and look at result pa_assert(state->current_state == VALID_STATE); state->current_state = INVALID_STATE; // do some work with state. sched_yield(); state->current_state = VALID_STATE; pthread_mutex_unlock(&state->mutex); } void process_func(int process_num) { log_printf(" Process %d started\n", process_num); for (int j = 0; j < 10000; ++j) { process_safe_func(); } log_printf(" Process %d finished\n", process_num); } shared_state_t* create_state() { // Создаем кусок разделяемой памяти. Он будет общим для данного процесса и его дочерних shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), // Размер разделяемого фрагмента памяти /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED | MAP_ANONYMOUS, /* fd = */ -1, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); // create and initialize interprocess mutex pthread_mutexattr_t mutex_attrs; pa_assert(pthread_mutexattr_init(&mutex_attrs) == 0); // Важно! Без этого атрибута один из процессов навсегда зависнет в lock мьютекса // Вероятно этот атрибут влияет на отсутствие флага FUTEX_PRIVATE_FLAG в операциях с futex // Если он стоит, то ядро может делать некоторые оптимизации в предположении, что futex используется одним процессом pa_assert(pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED) == 0); pa_assert(pthread_mutex_init(&state->mutex, &mutex_attrs) == 0); pa_assert(pthread_mutexattr_destroy(&mutex_attrs) == 0); state->current_state = VALID_STATE; // Инициализирем защищаемое состояние return state; } void delete_state(shared_state_t* state) { pa_assert(pthread_mutex_destroy(&state->mutex) == 0); pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } int main() { log_printf("Main process started\n"); state = create_state(); // Создаем разделяемое состояние const int process_count = 2; pid_t processes[process_count]; // Создаем дочерние процессы for (int i = 0; i < process_count; ++i) { log_printf("Creating process %d\n", i); // дочерние процессы унаследуют разделяемое состояние (оно не скопируется, а будет общим) pa_assert((processes[i] = fork()) >= 0); if (processes[i] == 0) { process_func(i); // Имитируем работу из разных процессов exit(0); } } for (int i = 0; i < process_count; ++i) { int status; pa_assert(waitpid(processes[i], &status, 0) != -1) pa_assert(WIFEXITED(status) && WEXITSTATUS(status) == 0); log_printf("Process %d 'joined'\n", i); } delete_state(state); log_printf("Main process finished\n"); return 0; }Ну и spinlock давайте. А почему бы и нет?Отличие только в замене инициализации и в взятии/снятии локов.%%cpp mmap.c %run gcc -Wall -fsanitize=thread mmap.c -lpthread -o mmap.exe %run ./mmap.exe #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } typedef enum { VALID_STATE = 0, INVALID_STATE = 1 } state_t; typedef struct { _Atomic int lock; state_t current_state; // protected by mutex } shared_state_t; shared_state_t* state; // interprocess state void sl_lock(_Atomic int* lock) { int expected = 0; while (!atomic_compare_exchange_weak(lock, &expected, 1)) { expected = 0; } } void sl_unlock(_Atomic int* lock) { atomic_fetch_sub(lock, 1); } void process_safe_func() { // all function is critical section, protected by spinlock sl_lock(&state->lock); pa_assert(state->current_state == VALID_STATE); state->current_state = INVALID_STATE; // do some work with state. sched_yield(); state->current_state = VALID_STATE; sl_unlock(&state->lock); } void process_func(int process_num) { log_printf(" Process %d started\n", process_num); for (int j = 0; j < 10000; ++j) { process_safe_func(); } log_printf(" Process %d finished\n", process_num); } shared_state_t* create_state() { shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED | MAP_ANONYMOUS, /* fd = */ -1, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); state->lock = 0; state->current_state = VALID_STATE; return state; } void delete_state(shared_state_t* state) { pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } int main() { log_printf("Main process started\n"); state = create_state(); const int process_count = 2; pid_t processes[process_count]; for (int i = 0; i < process_count; ++i) { log_printf("Creating process %d\n", i); pa_assert((processes[i] = fork()) >= 0); if (processes[i] == 0) { process_func(i); exit(0); } } for (int i = 0; i < process_count; ++i) { int status; pa_assert(waitpid(processes[i], &status, 0) != -1) pa_assert(WIFEXITED(status) && WEXITSTATUS(status) == 0); log_printf("Process %d 'joined'\n", i); } delete_state(state); log_printf("Main process finished\n"); return 0; }`shm_open`Сделаем то же самое, что и в предыдущем примере, но на этот раз не из родственных процессов. Воспользуемся именноваными объектами разделяемой памяти.%%cpp shm.c %# Обратите внимание: -lrt. Здесь нужна новая разделяемая библиотека %run gcc -Wall -fsanitize=thread shm.c -lrt -lpthread -o s.exe %run ./s.exe create_shm /my_shm %run ./s.exe work 1 /my_shm & PID=$! ; ./s.exe work 2 /my_shm ; wait $PID %run ./s.exe remove_shm /my_shm #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } typedef enum { VALID_STATE = 0, INVALID_STATE = 1 } state_t; typedef struct { pthread_mutex_t mutex; state_t current_state; // protected by mutex } shared_state_t; void process_safe_func(shared_state_t* state) { // all function is critical section, protected by mutex pthread_mutex_lock(&state->mutex); // try comment lock&unlock out and look at result pa_assert(state->current_state == VALID_STATE); state->current_state = INVALID_STATE; // do some work with state. sched_yield(); state->current_state = VALID_STATE; pthread_mutex_unlock(&state->mutex); } shared_state_t* load_state(const char* shm_name, bool do_create) { // открываем / создаем объект разделяемой памяти // по сути это просто open, только для виртуального файла (без сохранения данных на диск + ортогональное пространство имен) int fd = shm_open(shm_name, O_RDWR | (do_create ? O_CREAT : 0), 0644); pa_assert(fd >= 0); pa_assert(ftruncate(fd, sizeof(shared_state_t)) == 0); shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED, /* fd = */ fd, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); if (!do_create) { return state; } // create interprocess mutex pthread_mutexattr_t mutex_attrs; pa_assert(pthread_mutexattr_init(&mutex_attrs) == 0); // Важно! pa_assert(pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED) == 0); pa_assert(pthread_mutex_init(&state->mutex, &mutex_attrs) == 0); pa_assert(pthread_mutexattr_destroy(&mutex_attrs) == 0); state->current_state = VALID_STATE; return state; } void unload_state(shared_state_t* state) { pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } int main(int argc, char** argv) { pa_assert(argc >= 2); if (strcmp("create_shm", argv[1]) == 0) { log_printf(" Creating state: %s\n", argv[2]); unload_state(load_state(argv[2], /*do_create=*/ 1)); log_printf(" State created\n"); } else if (strcmp("remove_shm", argv[1]) == 0) { log_printf(" Removing state: %s\n", argv[2]); // Файлы shm существуют пока не будет вызвана unlink. pa_assert(shm_unlink(argv[2]) == 0) log_printf(" State removed\n"); } else if (strcmp("work", argv[1]) == 0) { pa_assert(argc >= 3); int worker = strtol(argv[2], 0, 10); log_printf(" Worker %d started\n", worker); shared_state_t* state = load_state(argv[3], /*do_create=*/ 0); for (int j = 0; j < 10000; ++j) { process_safe_func(state); } unload_state(state); log_printf(" Worker %d finished\n", worker); } else { pa_assert(0 && "unknown command") } return 0; }Проблема: как решить, кто из независимых процессов будет создавать участок?Способ разрешить конфликт создания участка разделяемой памяти: * Все процессы создают файлы с флагом O_EXCL | O_CREAT * Из-за О_EXCL выкинет ошибку для всех процессов кроме одного * Этот один процесс создаст файл, выделит память, создаст спинлок на инициализацию и начнёт инициализировать * Другие, которые получили ошибку попытаются открыть файл ещё раз, без этих флагов уже. * Далее они будут ждать (регулярно проверять) пока не изменится размер файла, потом откроют его, и дальше будут ждать инициализации на спинлоке. Анонимные семафорыИгровое сравнение: семафор это ящик с шариками.| || || ||_*_| ^ |Это семафор со значением 1 (ящик с одним шариком)У семафора такая семантика:* Операция post() кладет шарик в ящик. Работает мгновенно.* Операция wait() извлекает шарик из ящика. Если шариков нет, то блокируется пока не появится шарик и затем его извлекает.* Еще есть try_wait(), timed_wait() - они соответствуют названиям.Шарики можно так же рассматривать как свободные ресурсы.Семафор с одним шариком можно использовать как мьютекс. В данном случае шарик - это ресурс на право входить в критическую секцию. Соответственно lock - это wait. А unlock это post. Пример использования с многими шариками: Построение очереди.Создаём 2 семафора, semFree и semElementsInside.При добавлении берём ресурс (~шарик) из semFree, под lock добавляем элемент, кладём ресурс в semElementsInsideПри удалении берём ресурс из semElementsInside, под локом удаляем элемент, кладём ресурс в semFree%%cpp sem_anon.c %run gcc -Wall -fsanitize=thread -lrt sem_anon.c -o sem_anon.exe %run ./sem_anon.exe #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } typedef enum { VALID_STATE = 0, INVALID_STATE = 1 } state_t; typedef struct { sem_t semaphore; state_t current_state; // protected by semaphore } shared_state_t; shared_state_t* state; // interprocess state void process_safe_func() { // all function is critical section, protected by mutex sem_wait(&state->semaphore); // ~ lock pa_assert(state->current_state == VALID_STATE); state->current_state = INVALID_STATE; // do some work with state. sched_yield(); state->current_state = VALID_STATE; sem_post(&state->semaphore); // ~ unlock } void process_func(int process_num) { log_printf(" Process %d started\n", process_num); for (int j = 0; j < 10000; ++j) { process_safe_func(); } log_printf(" Process %d finished\n", process_num); } shared_state_t* create_state() { shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED | MAP_ANONYMOUS, /* fd = */ -1, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); // create interprocess semaphore pa_assert(sem_init( &state->semaphore, 1, // interprocess? (0 if will be used in one process) 1 // initial value ) == 0); state->current_state = VALID_STATE; return state; } void delete_state(shared_state_t* state) { pa_assert(sem_destroy(&state->semaphore) == 0); pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } int main() { log_printf("Main process started\n"); state = create_state(); const int process_count = 2; pid_t processes[process_count]; for (int i = 0; i < process_count; ++i) { log_printf("Creating process %d\n", i); pa_assert((processes[i] = fork()) >= 0); if (processes[i] == 0) { process_func(i); exit(0); } } for (int i = 0; i < process_count; ++i) { int status; pa_assert(waitpid(processes[i], &status, 0) != -1) pa_assert(WIFEXITED(status) && WEXITSTATUS(status) == 0); log_printf("Process %d 'joined'\n", i); } delete_state(state); log_printf("Main process finished\n"); return 0; }Именнованные семафорыВ примере про именованные объекты разделяемой памяти мы явно запускали процесс для инициализации состояния до процессов-воркеров, чтобы избежать гонки инициализации состояния.В этом примере предлагается способ избежать гонки используя именованный семафор.В примере используется одно и то же имя для объекта разделяемой памяти и семафора. Это безопасно, так как имя семафора автоматически расширяется префиксом или суффиксом `sem`. То есть в результате имена разные.%%cpp sem_named.c %# Обратите внимание: -lrt. Здесь нужна новая разделяемая библиотека %run gcc -Wall -fsanitize=thread sem_named.c -lrt -lpthread -o s.exe %run ./s.exe work 1 /s42 & PID=$! ; ./s.exe work 2 /s42 ; wait $PID %run ./s.exe cleanup /s42 # необязательная команда. Будет работать и без нее #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_warn_if_not(stmt) if (stmt) {} else { log_printf("WARNING: '" #stmt "' failed\n"); } #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } typedef enum { VALID_STATE = 0, INVALID_STATE = 1 } state_t; typedef struct { pthread_mutex_t mutex; state_t current_state; // protected by mutex } shared_state_t; void process_safe_func(shared_state_t* state) { // all function is critical section, protected by mutex pthread_mutex_lock(&state->mutex); // try comment lock&unlock out and look at result pa_assert(state->current_state == VALID_STATE); state->current_state = INVALID_STATE; // do some work with state. sched_yield(); state->current_state = VALID_STATE; pthread_mutex_unlock(&state->mutex); } shared_state_t* load_state(const char* shm_name, bool do_create) { // открываем / создаем объект разделяемой памяти int fd = shm_open(shm_name, O_RDWR | (do_create ? O_CREAT : 0), 0644); if (do_create) { pa_assert(ftruncate(fd, sizeof(shared_state_t)) == 0); } shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED, /* fd = */ fd, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); if (do_create) { // create interprocess mutex pthread_mutexattr_t mutex_attrs; pa_assert(pthread_mutexattr_init(&mutex_attrs) == 0); // Важно! pa_assert(pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED) == 0); pa_assert(pthread_mutex_init(&state->mutex, &mutex_attrs) == 0); pa_assert(pthread_mutexattr_destroy(&mutex_attrs) == 0); state->current_state = VALID_STATE; } return state; } void unload_state(shared_state_t* state) { pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } shared_state_t* process_safe_init_and_load(const char* name) { // succeeded only for first process. This process will initalize state sem_t* init_semaphore = sem_open( name, O_CREAT | O_EXCL, 0644, 0); // Создаем семафор с изначальным значением 0. Если семафор уже есть, то команда пофейлится if (init_semaphore != SEM_FAILED) { // Если смогли сделать семафор, то мы - главный процесс, ответственный за инициализацию // initializing branch for initializing process shared_state_t* state = load_state(name, /*do_create=*/ 1); sem_post(init_semaphore); // Кладем в "ящик" весточку, что стейт проинициализирован sem_close(init_semaphore); return state; } else { // Если мы не главные процесс, то подождем инициализацию // branch for processes waiting initialisation init_semaphore = sem_open(name, 0); pa_assert(init_semaphore != SEM_FAILED); sem_wait(init_semaphore); // ждем весточку, что стейт готов sem_post(init_semaphore); // возвращаем весточку на место, чтобы другим процессам тоже досталось sem_close(init_semaphore); return load_state(name, /*do_create=*/ 0); } } int main(int argc, char** argv) { pa_assert(argc >= 2); if (strcmp("cleanup", argv[1]) == 0) { log_printf(" Cleanup sem and shm: %s\n", argv[2]); pa_warn_if_not(shm_unlink(argv[2]) == 0); pa_warn_if_not(sem_unlink(argv[2]) == 0); log_printf(" State created\n"); } else if (strcmp("work", argv[1]) == 0) { pa_assert(argc >= 3); int worker = strtol(argv[2], 0, 10); log_printf(" Worker %d started\n", worker); shared_state_t* state = process_safe_init_and_load(argv[3]); for (int j = 0; j < 10000; ++j) { process_safe_func(state); } unload_state(state); log_printf(" Worker %d finished\n", worker); } else { pa_assert(0 && "unknown command") } return 0; }Важное замечание про именованные и неименованные семафорыДля открытия/закрытия именованных семафоров используются `sem_open` и `sem_close`.А для неименованных `sem_init` и `sem_destroy`. Смешивать эти операции определенно не стоит, если конечно, вы где-нибудь не найдете документацию, подтверждающую обратное. Делать `sem_open`, а затем `sem_destroy`, это как создавать объект конструктором одного класса, а уничтожать деструктором другого (для родственных классов, без виртуального деструктора). Сочетаемость семафоров и сигналовНет этой сочетаемости.```process 1> send signal to process 2> sem_postprocess 2> sem_wait> check variable that is set in signal handler```Не гарантируется, что сигнал будет доставлен и обработан до того, как отработает sem_wait.%%cpp sem_and_signal.c %run gcc -Wall -fsanitize=thread -lrt sem_and_signal.c -o sem_and_signal.exe %run ./sem_and_signal.exe #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include const char* log_prefix(const char* file, int line) { struct timeval tp; gettimeofday(&tp, NULL); struct tm ltime; localtime_r(&tp.tv_sec, <ime); static __thread char prefix[100]; size_t time_len = strftime(prefix, sizeof(prefix), "%H:%M:%S", <ime); sprintf(prefix + time_len, ".%03ld %s:%3d [pid=%d]", tp.tv_usec / 1000, file, line, getpid()); return prefix; } #define log_printf_impl(fmt, ...) { dprintf(2, "%s: " fmt "%s", log_prefix(__FILE__, __LINE__), __VA_ARGS__); } #define log_printf(...) log_printf_impl(__VA_ARGS__, "") // process-aware assert #define pa_assert(stmt) if (stmt) {} else { log_printf("'" #stmt "' failed\n"); exit(EXIT_FAILURE); } volatile sig_atomic_t signal_count = 0; static void handler(int signum) { signal_count += 1; } typedef struct { sem_t semaphore_1; sem_t semaphore_2; } shared_state_t; shared_state_t* state; shared_state_t* create_state() { shared_state_t* state = mmap( /* desired addr, addr = */ NULL, /* length = */ sizeof(shared_state_t), /* access attributes, prot = */ PROT_READ | PROT_WRITE, /* flags = */ MAP_SHARED | MAP_ANONYMOUS, /* fd = */ -1, /* offset in file, offset = */ 0 ); pa_assert(state != MAP_FAILED); pa_assert(sem_init(&state->semaphore_1, 1, 0) == 0); pa_assert(sem_init(&state->semaphore_2, 1, 0) == 0); return state; } void delete_state(shared_state_t* state) { pa_assert(sem_destroy(&state->semaphore_1) == 0); pa_assert(sem_destroy(&state->semaphore_2) == 0); pa_assert(munmap(state, sizeof(shared_state_t)) == 0); } int main() { log_printf("Main process started\n"); state = create_state(); pid_t process = fork(); if (process == 0) { sigaction(SIGUSR1, &(struct sigaction){.sa_handler = handler, .sa_flags = SA_RESTART}, NULL); sleep(1); // imitate synchronous start for (int i = 0; ; ++i) { sem_wait(&state->semaphore_1); int cnt = signal_count; if (cnt != i + 1) { fprintf(stderr, "Signals and semaphors are not ordered... i = %d, signals_count = %d\n", i, cnt); exit(-1); } if (i % 100000 == 0) { fprintf(stderr, "i = %d\n", i); } sem_post(&state->semaphore_2); } } else { sleep(1); // imitate synchronous start int status; int ret; while ((ret = waitpid(process, &status, WNOHANG)) == 0) { kill(process, SIGUSR1); sem_post(&state->semaphore_1); while (sem_timedwait(&state->semaphore_2, &(struct timespec){.tv_nsec = 500000000}) == -1 && (ret = waitpid(process, &status, WNOHANG)) == 0) { } } pa_assert(ret != -1) pa_assert(WIFEXITED(status) && WEXITSTATUS(status) == 0); } delete_state(state); log_printf("Main process finished\n"); return 0; }Importing libraries and loading the datasetimport sklearn.datasets as datasets import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline iris = datasets.load_iris() X = iris.data df=pd.DataFrame(iris.data, columns=iris.feature_names) y = iris.target df.head() df.shapeFeature engineeringdf.info() df.describe()Correlation matrixcorr_matrix = df.corr() corr_matrixVisualizationssns.pairplot(df) corrmat=df.corr() top_corr_features=corrmat.index plt.figure(figsize=(13,10)) # plot heat map g=sns.heatmap(df[top_corr_features].corr(), annot=True, cmap="RdYlGn")Train-Test SplittingX.shape y.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X,y,test_size=0.20, random_state=10)Model Evaluationfrom sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(max_depth=3) model.fit(x_train, y_train)Predictionpredict = model.predict(x_test) predictTest Accuracymodel.score(x_test, y_test)Visualize Decision Tree with Plot Treefrom sklearn import tree fig = plt.figure(figsize=(25,20)) _ = tree.plot_tree(model, feature_names=iris.feature_names, class_names=iris.target_names, filled=True) fig.savefig("decistion_tree.png")Visualize Decision Tree with graphvizimport graphviz dot_data = tree.export_graphviz(model, out_file=None) graph = graphviz.Source(dot_data) graph.render("iris") dot_data = tree.export_graphviz(model, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data, format="png") graph graph.render("decision_tree_graphivz")Question 1. Create a Python code that displays a square matrix whose length is 5import numpy as np #import Library #square matrix has same length and width thus it is 5x5 matrix SquareMatrix = np.array ([[6,0,0,0,6],[7,0,0,0,7],[8,0,0,0,8],[9,0,0,0,9],[4,0,0,0,4]]) #5x5 matrix print("SQUARE MATRIX WHOSE LENGTH IS 5:") print() print(SquareMatrix) #displays the final outputSQUARE MATRIX WHOSE LENGTH IS 5: [[6 0 0 0 6] [7 0 0 0 7] [8 0 0 0 8] [9 0 0 0 9] [4 0 0 0 4]]Question 2. Create a Python code that displays a square matrix whose elements below the principal diagonal are zeroA = np.triu([[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]]) #square matrix print(A) #displays the final output[[1 2 3 4 5] [0 2 3 4 5] [0 0 3 4 5] [0 0 0 4 5] [0 0 0 0 5]]Question 3. Create a Python code that displays a square matrix which is symmetricalC = np.array([[1,2,3,4,5],[2,1,2,3,4],[3,2,1,2,3],[4,3,2,1,2],[5,4,3,2,1]]) #square matrix print("SQUARE MATRIX WHICH IS SYMMETRICAL:") print(C) #displays the final outputSQUARE MATRIX WHICH IS SYMMETRICAL: [[1 2 3 4 5] [2 1 2 3 4] [3 2 1 2 3] [4 3 2 1 2] [5 4 3 2 1]]Question 4. What is the inverse of matrix C? Show your solution by python coding.C = np.array ([[1,2,3], [2,3,3], [3,4,-2]]) #Given matrix invC = np.linalg.inv(C) #Compute the (multiplicative) inverse of a matrix print("INVERSE OF MATRIX C:") print(invC) #displays the final outputINVERSE OF MATRIX C: [[-3.6 3.2 -0.6] [ 2.6 -2.2 0.6] [-0.2 0.4 -0.2]]Question 5. What is the determinant of the given matrix in Question 4? Show your solution by python coding.C = np.array ([[1,2,3], [2,3,3], [3,4,-2]]) #Given matrix detOfC = np.linalg.det(C) #Compute the determinant of an array print("DETERMINANT OF THE GIVEN MATRIX IN QUESTION 4:") print(int(detOfC)) #displays the final outputDETERMINANT OF THE GIVEN MATRIX IN QUESTION 4: 5Question 6. Find the roots of the linear equations by showing its python codeseq= np.array([[5,4,1],[10,9,4], [10,13,15]]) #Given matrix const = np.array([[[3.4],[8.8],[19.2]]]) roots = np.linalg.inv(eq) @ const print("ROOTS OF THE GIVEN LINEAR EQUATIONS:") print(roots) #displays the final outputROOTS OF THE GIVEN LINEAR EQUATIONS: [[[0.2] [0.4] [0.8]]]Show the convergence map with its peaks highlightedconv_map = ConvergenceMap.load(os.path.join(dataExtern(),"conv1.fit")) #Plot setup fig,ax = plt.subplots(figsize=(8,8)) #Smooth on 1 arcmin conv_map.smooth(1.0*u.arcmin,kind="gaussianFFT",inplace=True) #Find the peak locations and height sigma_peaks = np.linspace(-2.,11.,101) height,positions = conv_map.locatePeaks(sigma_peaks,norm=True) #Show the map and the peaks on it (left panel) conv_map.visualize(fig=fig,ax=ax,colorbar=True,cbar_label=r"$\kappa$") ax.scatter(*positions[height>2.].to(u.deg).value.T,color="black",marker="x") ax.set_xlim(0,conv.side_angle.to(u.deg).value) ax.set_ylim(0,conv.side_angle.to(u.deg).value) fig.tight_layout()Show the peak histogram and the PDF of the convergence map#Plot setup fig,ax = plt.subplots(figsize=(8,8)) #Smooth on 1 arcmin conv_map.smooth(1.0*u.arcmin,kind="gaussianFFT",inplace=True) #Show the peak histogram and the PDF at these convergence values sigma = np.linspace(-2.,11.,101) #This method is wrapped around conv.countPeaks(sigma) conv.peakHistogram(sigma,norm=True,fig=fig,ax=ax) ax_right = ax.twinx() #This method is wrapped around conv.pdf(sigma) conv.plotPDF(sigma,norm=True,fig=fig,ax=ax_right,color="red") #All PDF quantities are shown in red ax_right.spines["right"].set_color("red") ax_right.tick_params(axis="y",colors="red") ax_right.yaxis.label.set_color("red")Plot the E and B modes power spectrumshear_map = ShearMap.load(os.path.join(dataExtern(),"WLshear_z2.00_0001r.fits")) #Plot setup fig,ax = plt.subplots(figsize=(8,8)) #Load in the shear map, compute E and B modes power spectrum l_edges = np.linspace(200.,50000.,50) l,ee,bb,eb = shear_map.eb_power_spectrum(l_edges) ################################################### #Plot the power spectra and prediction from NICAEA# ################################################### #Plot measured E and B modes ax.plot(l,l*(l+1)*ee/(2.*np.pi),label=r"$P^{EE}$",color="red") ax.plot(l,l*(l+1)*bb/(2.*np.pi),label=r"$P^{BB}$",color="blue") #Use the python bindings of NICAEA to get the E mode prediction cosmo = Nicaea(Om0=0.26,Ode0=0.74,w0=-1,sigma8=0.8) ax.plot(l,l*(l+1)*cosmo.convergencePowerSpectrum(l,z=2.0)/(2.*np.pi),label=r"$P^{\kappa\kappa}{\rm (NICAEA)}$",linestyle="--",color="red") #Labels ax.set_xscale("log") ax.set_yscale("log") ax.set_xlabel(r"$\ell$",fontsize=22) ax.set_ylabel(r"$\ell(\ell+1)P_\ell/2\pi$",fontsize=22) ax.legend(loc="upper left")Drought in Europe Summer 2018 Drought Analysis with ICOS Ecosystem DataOne of the consequences of climate change are more frequent occurrences of drought. Dry spells are expected to happen more often and last for a longer time. This has been observed in a number of areas in Southern and Central Europe. During the summer of 2018, even parts of Northern Europe (incl. Sweden) were affected by drought.The figure bellow shows the results of a drought-index called [SPEI06](http://spei.csic.es/home.html) applied over Europe for the month of August for different years: 2003, 2010, 2015 and 2018 [[1](references)]. The dark red colors represent areas affected by drought whilst the darker blue colors mark areas with high levels of precipitation. But what is drought and how can we define it? I. Drought - DefinitionDrought can be defined as a period that is characterized by water scarcity caused by lower than average levels of precipitation [[2](references)]. Periods of drought can occur during different seasons. However, in most cases, droughts occur during periods with unusually high temperatures and insufficient rainfall.Droughts can be divided into different categories [[3](references)]:- **Meteorological droughts:** occur when the levels of precipitation are lower than average for an extended period of time.- **Agricultural droughts:** occur when the soil water content goes below a certain degree and the crops can no longer absorb the water that is contained in the soil. Such events may stress the crops and lead to lost harvests.- **Hydrological droughts:** occur when the water level in water reserves like aquifers, lakes and reservoirs fall bellow a certain threshold. - **Socioeconomic droughts:** happen when dry spells affect the society. For instance, when the demand for an economic good exceeds supply due to a weather-related shortage in water availability. II. Drought Impact on TreesDroughts have a significant impact on plants. Here the focus will be set on the impact of droughts on trees. During periods characterized by uncommonly high temperatures and low water content in the soil, trees tend to cease to grow and their resistance towards illnesses, like e.g. fungal diseases or pest infestations, weakens [[4](references)]. The first sign of drought affliction on trees is that their leaves start to close even during day and will eventually fall off, if the tree does not get access to water in time [[4](references)]. Other trees might behave differently. For instance, their leaves might start to crumble instead of closing. When trees sense that water supplies are scarce, they react by closing their [stomata](https://en.wikipedia.org/wiki/Stoma), which ultimately brings their photosynthetic activity to a halt [[4](references)]. It is in this state that the tree might lose its leaves driven by the same mechanisms that cause this effect during autumn. Trees that lose their leaves while they are still green, lose a lot of nutrients. Additionally, water-stressed trees have a limited capacity to transfer nutrients to all parts of the tree, which, in some cases, may lead to tree malnutrition.Trees lose green leaves during severe drought events. Under other circumstances, the leaves have time to turn yellow before they fall or turn brown before they dry out close to the tree trunk. Depending on the type of tree, leaf shedding may occur during the drought or just after rehydration [[4](references)]. In Nordic forests [birch](https://en.wikipedia.org/wiki/Birch)- and the [European spruce](https://en.wikipedia.org/wiki/Picea_abies)-trees are more sensitive to drought [[5](references)]. The needle-like leaves of the European spruce turn brown and fall off while birch-leaves also fall off before they have had the chance to obtain their autumn coloration [[5](references)]. In general, young or fast-growing trees tend to be more severely affected compared to older or more slow-growing trees [[5](references)]. When plants cease to photosynthesize, their leaves are afflicted by ([chlorosis](https://en.wikipedia.org/wiki/Chlorosis)), lose their green color ([chlorophyll](https://en.wikipedia.org/wiki/Chlorophyll)) and begin to show signs of autumn coloration. When plants reach this state, due to the occurrence of a drought, it is an indication that even their roots have been damaged [[4](references)]. Entire branches may dry out and die. Plants that have survived a severe drought event once, are more likely to survive a new event. This is one of the reasons why young trees are usually more severely affected [[4](references)]. III. Drought Impact on Decomposers/Detritivores and their Activity Decomposers and detritivores are organisms that break down organic matter (e.g. dead twigs and leaves, decaying organisms or excrements, etc.) to carbon dioxide, methane, carboxylic acid, water and heat [[6](references)]. Typical examples of decomposers are fungi and bacteria. Typical examples of detritivores are earthworms, woodlice and sea cucumbers. The difference between detritivores and decomposers is that detritivores have to ingest nutrients in order to break them down to organic matter. Through their activity, decomposers and detritivores release carbon to the atmosphere. If the occurrence of a drought causes the soil water content to drop below a certain threshold, the environment can become too dry for decomposers and detritivores. To survive they will limit their level of activity and consequently reduce the amount of carbon they release to the atmosphere. IV. Drought Impact on the Carbon Ballance of EcosystemsThe amount of carbon dioxide in the atmosphere may increase during an extended period of drought. This can be attributed to the change in the behaviour of plants once the climate gets drier and the temperature too high. Droughts make the soil become drier, the air less moist and thus plants are forced to save water in order to preserve their existing tissue and survive. This is achieved by limiting or totally ceasing their photosyntetic activity, which in turn means that they limit or totally stop their intake of carbon dioxide [[7](references)]. Subsequently, plants absorb less carbon dioxide from their environment during drought periods compared to other times. An increase in the frequence of drought events can therefore contribute to the global warming effect and, thus, create a vicious circle of extreme temperatures [[7](references)]. V. Notebook - Table of ContentsThis notebook is dedicated to using ICOS Ecosystem Data from Hyltemossa Research Station in Southern Sweden, to study how the drought during the summer of 2018 affected the vegetation and the carbon balance in the surrounding area. The temporal resolution of the data extends from January 1st 2015 to December 31st 2018. Another objective of this notebook is to introduce basic principles of Python Programming. More in particular, users are going to learn how to:- Read in csv files to Python structures (Pandas DataFrames)- Clean and Harmonize data- Process data and Compute Basic Statistics- Plot Data by Creating Static and Interactive PlotsThe notebook is divided in the following main parts:- [Introduction](intro)- [Instructions on How to Use the Notebook](instructions_how_to_use_nb)- [Data from ICOS Hyltemossa Station](data_HTM_station)- [Python Programming](py_programming) - [References](references) VI. Instructions on How to Use the Notebook Run the NotebookIn order to run this Jupyter Notebook, go to the menu at the top of the page and click on **Kernel** and then **Restart & Run All**. Use the links in the **Table of Contents** above to navigate to different parts of the notebook. Parts of the notebook that are long include an additional tabel of contents with links to their corresponding subparts. Use the links to quickly navigate from one subpart to another. It is also possible to scroll. Once you have clicked on **Restart & Run All**, it will be possible to navigate to the plots of the different programming parts and interact with them using widgets. Widget is the Pythonic name for an interactive element (e.g. dropdown lists, radiobuttons, execution buttons, etc.). A more detailed description on how to interact with the widgets and the interactive plots of every part of the analysis is presented in the beginning of that part. Run a Single Code-CellA Jupyter Notebook consists of code-cells. It is possible to write Python code in a code-cell and then run it by clicking on **Run** in the menu at the top of the page.Observe that only one code-cell will be executed and this is the code-cell that was active when you clicked on **Run**. You can activate a code-cell just by clicking on it. An active code-cell is highlighted in blue or green color (see image bellow).It is also possible to write Markup code in a Jupyter Notebook code-cell. For instance, the instructions you are reading here are written in a Markup code-cell that includes markup text and HTML code. When you are writing Python code in a code-cell make sure that the cell is a Python code-cell. The type of the currently active code-cell is shown in the dropdown list on the menu bar at the top of the page (see figure). A code-cell that includes Python code should be marked as **Code**. Add a Code-CellClick on **"+"** in the menu to add a new code-cell under the current active code-cell. Delete a Code-CellIf you wish to delete a code-cell, select the code-cell by clicking on it and then go to the menu at the top of the page and click on **Edit** --- > **Delete Cells**. Stop ExecutionIf an execution is taking too long, you can stop your notebook from running by clicking on **Interrupt kernel** in the menu.Alternatively, another choice is to go to **Kernel** and click on **Interrupt**. Save NotebookClick on **Save** freequently to save your work. Download NotebookIf you wish to download the notebook as a Jupyter Notebook, go to the menu at the top of the page, click on **File** --- > **Save As...** --- > **Notebook(.ipynb)**.If you wish to save your work as pure Python code, go the menu at the top of the page, click on **File** --- > **Save As...** --- > **Python(.py)**. VII. Data from ICOS Hyltemossa Research StationAreas in southern Sweden showed clear signs of drought damage during the summer of 2018. The analysis in this module is conducted using data from [Hyltemossa Research Station](https://www.icos-sweden.se/station_hyltemossa.html).The station is located near a 30-year old managed spruce-forest south of Perstorp, in northwestern Scania, Sweden. The station collects atmospheric and ecosystem measurements and is part of the [ICOS Sweden](https://www.icos-sweden.se/) research infrastructure. [ICOS](https://www.icos-ri.eu/) is an acronym for Integrated Carbon Observation System and is a European Research Infrastructure that has implemented a European measurement system for high quality and high precision greenhouse gas observations. The objective of ICOS is to create an extended network of measuring stations producing time series of high-quality data that will ultimately help to map the carbon balance of Europe. The [ICOS Carbon Portal](https://www.icos-cp.eu/) provides free and open access to all ICOS data.%%javascript IPython.OutputArea.prototype._should_scroll = function(lines) { return false; } #Import modules: import folium #Create map object: m = folium.Map(location=[56.097991, 13.420181], zoom_start=7) #Add marker: folium.Marker(location=[56.097991, 13.420181], popup='Hyltemossa research station', icon=folium.Icon(color='darkred', icon='cloud')).add_to(m) #Show map mMeasured Variables from Hyltemossa StationFor the purpose of this analysis, we will use a subset of the available variable measurements from Hyltemossa station. A list of the code (column-name in file), the title and the unit of these variables is presented below:- **TA_1_1_1**       --->    Air Temperature $(^oC$) - **P_1_1_1**       --->    Precipitation (mm) - **SWC_4_4_1**     --->    Soil Water Content (%) - **GPP_PI_1_1_1**     --->    Gross Primary Production ($\mu$$mol \: m^{-2}\:s^{-1}$)- **RECO_PI_1_1_1**    --->    Respiration ($\mu$$mol\: m^{-2}\:s^{-1}$) - **FC_PI_1_1_1**     --->     Carbon Flux ($\mu$$mol\: m^{-2}\:s^{-1}$) - **SW_IN_1_1_1**    --->     Incoming Shortwave Infrared (SW-IR) Solar Radiation - Light (W $m^{-2}$)Here is a brief explanation of what every variable stands for: What is Soil Water Content (SWC)?Soil Water Content measures the proportion of water in the soil. In this case it is expressed as %.$$SWC = \frac{100 M_w}{M_s}$$where:SWC = Soil Water Content (%), $M_w$ = mass of water in the soil (kg),$M_s$ = mass of dry soil (kg) What is Photosynthesis ?Photosynthesis is desgribed as the process by which a plant uses light energy to transform carbon dioxide, water and minerals into oxygen and energy-rich organic compounds. The chemical formula of photosynthesis is:$$6 H_2O + 6 CO_2 \rightarrow{} C_6H_{12}O_6 + 6O_2 $$ What is Gross Primary Production (GPP) ?The Gross Primary Production (GPP) of an ecosystem can be described as the amount of carbon that has been taken from the atmosphere by plants because of their photosynthetic activity. What is Respiration ?Respiration can be described as the amount of carbon that is emitted from an ecosystem because of animal and plant respiration. What is Net Ecosystem Exchange (NEE) ?Net Ecosystem Exchange (NEE) is a measure of the net exchange of Carbon between an ecosystem and the atmosphere per unit ground area. In simpler words, it is a balance between the total amount of carbon emitted and the total anount of carbon absorbed by an ecosystem per unit ground area. What is Shortwave Infrared Incoming Solar Radiation (SW-IR)?Shortwave Infrared Incoming Solar Radiation can be described as the amount of incoming solar radiation in the shortwave infrared wavelengths (1.4 - 3 μm) in a given area, during a given time period. Hyltemossa station utilizes measuring equipment that measures the amount of incoming shortwave infrared solar radiation to estimate the available amount of solar energy the vegetation can interact with. The availability of solar energy is essential for the plants to photosynthesize. Back to top VIII. Python ProgrammingThis part presents basic principles of Python programming. The focus is set on reading csv files to Python specific structures such as Pandas DataFrames (matrix) and processing this data using built-in methods. The built-in methods are used to filter data and produce basic statistics. The results are then visualized as interactive plots utilizing the [Bokeh](https://bokeh.pydata.org/en/latest/index.html) interactive visualization library.This part is divided into the following subparts:1. [Import Python Modules](import_py_modules)2. [Prerequisites - Basic Programming Principles in Python](python_intro)3. [Define Global Variables](python_global_var)4. [Read csv-files into Python Pandas DataFrames](csv2pandasdf) 5. [Update values in a Pandas DataFrame Column](updatePandasDfCol)6. [Handling Date and Time in Python - Python DateTime Objects](CreateDatetimeObj)7. [Add a column with DateTime-Objects in every Pandas DataFrame](addDatetimeCol2pdDf)8. [Indexing a Pandas DataFrame](pandasDfSetIndex) 1. [Set a column of a Pandas DataFrame as index](pandasDfSetIndex) 2. [Extract all rows from a Pandas DataFrame index-column](pandasDfSearchWithIndex) 3. [How to index a Pandas DataFrame with DateTime Objects](pandasDfSearchWithDateTimeIndex) 4. [How to filter a Pandas DataFrame using an index of DateTime Objects](pandasDfSliceWithDateTimeIndex)9. [Compute Statistics over a Pandas DataFrame Column](pandasDfCalcStat) 1. [Compute the min, max, mean and standard deviation over all rows of a Pandas DataFrame column](pandasDfCalcStatMinMaxMeanStDev) 2. [Compute the min, max, mean and standard deviation over a selection of rows of a Pandas DataFrame column](pandasDfCalcStatMinMaxMeanStDevFiltered)10. [Plot Data from a Pandas Dataframe with Bokeh](bokeh_plot_df) 1. [Create an Interactive Plot from 2 Pandas DataFrame columns](bokeh_plot_2_cols_from_df) 2. [Plot Statistics with Bokeh Visualization Library ](bokeh_plot_stat_barplot) 3. [Create Plots with Cumulative Sums of Daily Totals and Daily Means per Year](bokeh_plot_iterative_sums_of_daily_totals_or_means_per_year_intro) 4. [Barplot with Incoming Shortwave-Infrared Solar Radiation (Daily Total) & GPP (Daily Total)](bokeh_plot_daily_total_GPP_SWIR_per_year) 5. [Barplot with GPP (Daily Total) and Soil Water Content (Daily Mean)](bokeh_plot_daily_total_GPP_daily_mean_SWC_per_year) 6. [Plot Daily Mean Soil Water Content with Daily Total Respiration and Daily Mean Air Temperature](bokeh_plot_daily_total_RECO_daily_mean_SWC_and_TA_per_year) 7. [Plot Daily Mean Soil Water Content with Daily Total GPP and Daily Total Precipitation](bokeh_plot_daily_mean_SWC_and_daily_total_GPP_and_Precip_per_year) 8. [Plot Daily Mean Soil Water Content and Air temperature with Daily Total GPP and Light (SW-IR)](bokeh_plot_daily_mean_SWC_and_TA_and_daily_total_GPP_and_SWIR_per_year) Back to top 1. Import Python ModulesPython is a programming language that includes built-in methods. A module can be described as set of functions. To use these functions, you need to first import the module they belong to. Usually, modules are imported in the beginning of a Python-program.The next code-cell shows the syntax of how to import Python modules. It is possible to import a module using the syntax import math. To import all functions from a module type from math import *. However, this is considered bad practice, so it is best to avoid that. For importing a single function from a module type from datetime import datetime. Some large modules may include more than one different packages of functions. For example, the bokeh module includes a package of functions called plotting, which in turn includes a function called figure.When you import a module, it is possible to change its name after the keyword as. Usually, the name provided after as is an abbreviation of the modules official name. The following piece of code import pandas as pd, will import a module called _pandas_ and change its name to _pd_. This way, you do not have to type the full name of the module when you call it in your code. Ultimately, by following this practice, your code will be easier to read.#Import modules: import numpy as np import pandas as pd import itertools from datetime import datetime import math from bokeh.plotting import figure, show from bokeh.models import ColumnDataSource, HoverTool, Label, Legend, SingleIntervalTicker, LinearAxis, Range1d from bokeh.io import reset_output, output_notebook reset_output() output_notebook()Back to TOC 2. Prerequisites - Basic Programming Principles in PythonTo understand the Python code in this notebook, you are expected to know the basic principle of the following concepts:- Global and Local Variables- Python Dictionaries- Python Lists- Python Tuples- Control Statements in Python - If-Statements - For-Loops- List Comprehensions- String Manipulation in Python- FunctionsIf you are not familiar with the previous concepts or you want to brush-up your memory, you can read through the corresponding part in the **Quickstart to Python**-notebook included in the "introduction" folder. Back to TOC 3. Define Global VariablesThe next code-cell includes 5 global variables. All 5 global variables are Python dictionaries. Global variables should be handled carefully and if possible avoided, if no specific reason exists. In this implementation, we will make regular use of these variables and, thus we define them as global. The global variables here handle the format of numbers (superscript/subscript) and the association between the name, code and unit of ecosystem variables.#Create a dictionary to transform numbers to their equivalent subscript or superscript representation: SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉") SUP = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹") #Create a dictionary to store the variable names and their corresponding codes: measurement_dict_eng = {'TA_1_1_1':'Air Temperature', 'FC_PI_1_1_1':'Carbon Flux (NEE)', 'GPP_PI_1_1_1':'Gross Primary Production', 'P_1_1_1':'Precipitation', 'RECO_PI_1_1_1':'Respiration', 'SW_IN_1_1_1':'SW-IR Incoming Solar Radiation', 'SWC_4_4_1':'Soil Water Content'} #Create a dictionary to store the units related to every variable-code: unit_dict = {'TA_1_1_1':'\u00b0C', 'FC_PI_1_1_1':'umol m-2 s-1'.replace('u', '\u03BC').translate(SUP), 'P_1_1_1':'mm', 'RECO_PI_1_1_1':'umol m-2 s-1'.replace('u', '\u03BC').translate(SUP), 'GPP_PI_1_1_1':'umol m-2 s-1'.replace('u', '\u03BC').translate(SUP), 'SWC_4_4_1':'%', 'SW_IN_1_1_1':'W/m2'.translate(SUP)} #Create a dictionary to store the units related to every variable-code for daily aggregated computations: unit_dict_daily = {'TA_1_1_1':'C\u00b0', 'FC_PI_1_1_1':'umol m-2'.replace('u', '\u03BC').translate(SUP), 'P_1_1_1':'mm', 'RECO_PI_1_1_1':'umol m-2'.replace('u', '\u03BC').translate(SUP), 'GPP_PI_1_1_1':'umol m-2'.replace('u', '\u03BC').translate(SUP), 'SWC_4_4_1':'%', 'SW_IN_1_1_1':'MJoules/m2'.translate(SUP)}Back to TOC 4. Read csv-files into Python Pandas Dataframes What is a Pandas DataFrame ?Python has a data structure called [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html). It consists of a 2-dimensional matrix which can vary in size (in terms of number of columns or number of rows). A Pandas DataFrame has the ability to store data belonging to different data types. It is, for example, permitted to store a column with strings, a column with integers and a column with floats in the same dataframe (see Figure). A Pandas DataFrame has indexed columns and rows. Columns can be indexed based on their name whilst rows can be indexed based on their row number or a specific index-value. A more detailed explanation of what a Pandas DataFrame index is, can be found in the **Quickstart to Python**-notebook. For now, it is enough to envision an index as one of the columns in the dataframe, that include unique values for every row. For example, the column *Student ID* in the figure, could be used as an index. This is attributed to the fact that it is not possible for two students to have the same *Student ID*. In other words, the values in the aforementioned column, uniquely identify every row in the dataframe.The Pandas module (which was renamed to ```pd``` at import) includes a built-in method ```read_csv()``` to read a csv-file to a Pandas DataFrame. The next code-cell shows the Python syntax fo reading in data from a csv-file to a Pandas DataFrame.**Syntax:**pandas.read_csv(path_to_file,              row_with_column_names,              num_of_rows_to_skip,              delimiter,              coding_system\*)\* By defining the coding system, it is possible to read special characters such as, for example, national characters like (å, ä, ö) or symbols like ($C^o$). **Read Air Temperature, Respiration, GPP, NEE and SW-IR Data for 2015**#Read ecosystem-data for 2015: htm_eko_2015 = pd.read_csv('data/SE-Htm_2015_vs20190510.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the five first rows of the dataframe: htm_eko_2015.head(5)**Read Air Temperature, Respiration, GPP, NEE and SW-IR Data for 2016**#Read ecosystem-data for 2016: htm_eko_2016 = pd.read_csv('data/SE-Htm_2016.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_2016.head(2)**Read Air Temperature, Respiration, GPP, NEE and SW-IR Data for 2017**#Read ecosystem-data for 2017: htm_eko_2017 = pd.read_csv('data/SE-Htm_2017.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_2017.head(2)**Read Air Temperature, Respiration, GPP, NEE and SW-IR Data for 2018**#Read ecosystem-data for 2018: htm_eko_2018 = pd.read_csv('data/SE-Htm_2018_vs20190510.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_2018.head(2)**Read Soil Water Content, Precipitation and Soil Temperature Data for 2015**#Read ecosystem soil water content, soil temperature and precipitation-data for 2015: #(Note that precipitation is measured in mm) htm_eko_precip_2015 = pd.read_csv('data/SE-Htm_2015_TS_SWC_Prec.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_precip_2015.head(2)**Read Soil Water Content, Precipitation and Soil Temperature Data for 2016**#Read ecosystem soil water content, soil temperature and precipitation-data for 2016: #(Note that precipitation is measured in mm) htm_eko_precip_2016 = pd.read_csv('data/SE-Htm_2016_TS_SWC_Prec.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_precip_2016.head(2)**Read Soil Water Content, Precipitation and Soil Temperature Data for 2017**#Read ecosystem soil water content, soil temperature and precipitation-data for 2017: #(Note that precipitation is measured in mm) htm_eko_precip_2017 = pd.read_csv('data/SE-Htm_2017_TS_SWC_Prec.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_precip_2017.head(2)**Read Soil Water Content, Precipitation and Soil Temperature Data for 2018**#Read ecosystem soil water content, soil temperature and precipitation-data for 2018: #(Note that precipitation is measured in mm) htm_eko_precip_2018 = pd.read_csv('data/SE-Htm_2018_TS_SWC_Prec.csv', header=0, skiprows=range(1,2), sep=',', encoding='windows-1252') #Show the 2 first rows of the dataframe: htm_eko_precip_2018.head(2)Back to TOC 5. Update values in a Pandas DataFrame ColumnOne of the most important and time-consuming tasks when working with data analysis is cleaning and harmonizing data. From this aspect, it is important to be able to access data and alter values in a structured and automated way. Pandas DataFrames include such methods. This part presents how it is possible to update existing values in a Pandas DataFrame. Missing values in ICOS data have a fix-value that is equal to ```-9999.0```. Missing values can be a result of something gone wrong with a measurement. Often when you create a plot, you do not wish to include the missing values. In these cases, you have to convert the *missing values* to ```NaN```. ```NaN```is a numeric datatype that stands for *Not a Number* and doesn't represent a value. When the value of a field is set to ```NaN```, then this field is treated as an empty field. **Function**def ecoToNan(df_data, variable, threshold): #Import modules: import pandas as pd import numpy as np from numpy import nan #Set values under the threshold equal to NaN: df_data.loc[df_data[variable] **Call Function**#Convert all "missing values" (i.e. values=-9999.0) and negative GPP- or RECO-values to NaN: #Air-Temperature (TA): htm_eko_2015.loc[htm_eko_2015['TA_1_1_1'] <-9990.0, ['TA_1_1_1']] = np.nan htm_eko_2016.loc[htm_eko_2016['TA_1_1_1'] <-9990.0, ['TA_1_1_1']] = np.nan htm_eko_2017.loc[htm_eko_2017['TA_1_1_1'] <-9990.0, ['TA_1_1_1']] = np.nan htm_eko_2018.loc[htm_eko_2018['TA_1_1_1'] <-9990.0, ['TA_1_1_1']] = np.nan #Carbon Fluxes (NEE): htm_eko_2015.loc[htm_eko_2015['FC_PI_1_1_1'] <-9990.0, ['FC_PI_1_1_1']] = np.nan htm_eko_2016.loc[htm_eko_2016['FC_PI_1_1_1'] <-9990.0, ['FC_PI_1_1_1']] = np.nan htm_eko_2017.loc[htm_eko_2017['FC_PI_1_1_1'] <-9990.0, ['FC_PI_1_1_1']] = np.nan htm_eko_2018.loc[htm_eko_2018['FC_PI_1_1_1'] <-9990.0, ['FC_PI_1_1_1']] = np.nan #Respiration (RECO): htm_eko_2015.loc[htm_eko_2015['RECO_PI_1_1_1'] <0, ['RECO_PI_1_1_1']] = np.nan htm_eko_2016.loc[htm_eko_2016['RECO_PI_1_1_1'] <0, ['RECO_PI_1_1_1']] = np.nan htm_eko_2017.loc[htm_eko_2017['RECO_PI_1_1_1'] <0, ['RECO_PI_1_1_1']] = np.nan htm_eko_2018.loc[htm_eko_2018['RECO_PI_1_1_1'] <0, ['RECO_PI_1_1_1']] = np.nan #Gross Primary Production (GPP): htm_eko_2015.loc[htm_eko_2015['GPP_PI_1_1_1'] <0, ['GPP_PI_1_1_1']] = np.nan htm_eko_2016.loc[htm_eko_2016['GPP_PI_1_1_1'] <0, ['GPP_PI_1_1_1']] = np.nan htm_eko_2017.loc[htm_eko_2017['GPP_PI_1_1_1'] <0, ['GPP_PI_1_1_1']] = np.nan htm_eko_2018.loc[htm_eko_2018['GPP_PI_1_1_1'] <0, ['GPP_PI_1_1_1']] = np.nan #Light (Short-Wave Infrared Incoming Solar Radiation): htm_eko_2015.loc[htm_eko_2015['SW_IN_1_1_1'] <-9990.0, ['SW_IN_1_1_1']] = np.nan htm_eko_2016.loc[htm_eko_2016['SW_IN_1_1_1'] <-9990.0, ['SW_IN_1_1_1']] = np.nan htm_eko_2017.loc[htm_eko_2017['SW_IN_1_1_1'] <-9990.0, ['SW_IN_1_1_1']] = np.nan htm_eko_2018.loc[htm_eko_2018['SW_IN_1_1_1'] <-9990.0, ['SW_IN_1_1_1']] = np.nan #Precipitation: htm_eko_precip_2015.loc[htm_eko_precip_2015['P_1_1_1'] <-9990.0, ['P_1_1_1']] = np.nan htm_eko_precip_2016.loc[htm_eko_precip_2016['P_1_1_1'] <-9990.0, ['P_1_1_1']] = np.nan htm_eko_precip_2017.loc[htm_eko_precip_2017['P_1_1_1'] <-9990.0, ['P_1_1_1']] = np.nan htm_eko_precip_2018.loc[htm_eko_precip_2018['P_1_1_1'] <-9990.0, ['P_1_1_1']] = np.nan #Soil Water Content: htm_eko_precip_2015.loc[htm_eko_precip_2015['SWC_4_4_1'] <-9990.0, ['SWC_4_4_1']] = np.nan htm_eko_precip_2016.loc[htm_eko_precip_2016['SWC_4_4_1'] <-9990.0, ['SWC_4_4_1']] = np.nan htm_eko_precip_2017.loc[htm_eko_precip_2017['SWC_4_4_1'] <-9990.0, ['SWC_4_4_1']] = np.nan htm_eko_precip_2018.loc[htm_eko_precip_2018['SWC_4_4_1'] <-9990.0, ['SWC_4_4_1']] = np.nan6. Handling Date and Time in Python - Python DateTime ObjectsPython has a built-in module (i.e. set of functions) called ```datetime``` that includes functions for handling dates and time. The module provides multiple options on how to process date and time. In this example, we will focus on how to create a DateTime object based on existing information of date and time. The existing information, in this case, is in String format.Click on the [link](https://docs.python.org/3/library/datetime.html) to get more detailed information regarding how you can work with Python DatTime objects. **Variable storing Date-inforation in a String format**#Variable containing date information as a String (text): date = '01/01/2015'**Variable storing Time-information in a String format**#Variable containing time information as a String (text): time = '00:30'**Create a Datetime Object**#Create a DateTime Object: datetime_variable = datetime.strptime(date + ' ' + time, '%d/%m/%Y %H:%M') #Show result: datetime_variableBack to TOC 7. Add a column with DateTime-Objects in every Pandas DataFrameWhen you plot data and want to visualize time on the x-axis, then there are a number of visualization-libraries that request you to have time represented as a DateTime Object. A DateTime Object is a data structure that represents date and time in a specific format (e.g. "Y-m-d H:M:S" may stand for: "2019-01-01 08:05:00"). The following functions include code that creates DateTime objects by combining existing information on date and time. Date and time, in this case, are stored as String variables. The functions below create a new column of DateTime objects in a Pandas DataFrame, based on the content of two existing columns, which, in turn, include information about the date and time of a measurement. **Function**def icosEcoAddDatetimeObjYMDHM(df_data): #To be used when time is expressed as: HH:MM #Import modules: from datetime import datetime #Add a column with datetime obj: df_data['DateTime'] = [datetime.strptime((df_data.date.iloc[i]+ ' ' + df_data.time.iloc[i]),'%d/%m/%Y %H:%M') for i in range(len(df_data))] #Return dataframe: return df_data**Function**def icosEcoAddDatetimeObjYMDHMS(df_data): #To be used when time is expressed as: HH:MM:SS #Import modules: from datetime import datetime #Add a column with datetime obj: df_data['DateTime'] = [datetime.strptime((df_data.date.iloc[i]+ ' ' + df_data.time.iloc[i]),'%d/%m/%Y %H:%M:%S') for i in range(len(df_data))] #Return dataframe: return df_data**Call Function(s)**#Add a column with datetime objects to every dataframe: HTM_eko_2015 = icosEcoAddDatetimeObjYMDHM(htm_eko_2015) HTM_eko_2016 = icosEcoAddDatetimeObjYMDHM(htm_eko_2016) HTM_eko_2017 = icosEcoAddDatetimeObjYMDHMS(htm_eko_2017) HTM_eko_2018 = icosEcoAddDatetimeObjYMDHM(htm_eko_2018) HTM_eko_precip_2015 = icosEcoAddDatetimeObjYMDHM(htm_eko_precip_2015) HTM_eko_precip_2016 = icosEcoAddDatetimeObjYMDHM(htm_eko_precip_2016) HTM_eko_precip_2017 = icosEcoAddDatetimeObjYMDHM(htm_eko_precip_2017) HTM_eko_precip_2018 = icosEcoAddDatetimeObjYMDHM(htm_eko_precip_2018) #Show results: HTM_eko_2015.head(5)Back to TOC 8. Indexing Pandas DataFramesIn this part we will learn how to index Pandas DataFrames. Indexing is away to extract values on demand based on certain criteria. Here you will learn how to define an index and how to use it in order to extract values from a Pandas DataFrame. 8.1 Set a Column of a Pandas DataFrame as IndexPandas includes a built-in method to set a column as an index:$$ dataframe\_name.set\_index(column\_name) $$#Set the "DateTime"-column as index in all pandas dataframes: HTM_eko_2015_indexed = HTM_eko_2015.set_index('DateTime') HTM_eko_2016_indexed = HTM_eko_2016.set_index('DateTime') HTM_eko_2017_indexed = HTM_eko_2017.set_index('DateTime') HTM_eko_2018_indexed = HTM_eko_2018.set_index('DateTime') HTM_eko_precip_2015_indexed = HTM_eko_precip_2015.set_index('DateTime') HTM_eko_precip_2016_indexed = HTM_eko_precip_2016.set_index('DateTime') HTM_eko_precip_2017_indexed = HTM_eko_precip_2017.set_index('DateTime') HTM_eko_precip_2018_indexed = HTM_eko_precip_2018.set_index('DateTime') #Show example: HTM_eko_2015_indexed.head(4)8.2. Extract all rows from a Pandas DataFrame index-columnType the following code to retrieve all values from a Pandas DataFrame index:$$dataframe\_name.index.values$$#Get index values: HTM_eko_2015_indexed.index.values8.3. How to index a Pandas DataFrame with DateTime ObjectsUse the following syntax to extract all data for a selected date and time:$$dataframe\_name[dataframe\_name.index==datetime(year, month, day, time, minute, second)]$$Observe that for this piece of code to work, your Pandas DataFrame must have a column of DateTime Objects as index.#Show all rows that include values for the given date and time: HTM_eko_2015_indexed[HTM_eko_2015_indexed.index==datetime(2015, 6, 1)]8.4. How to filter a Pandas DataFrame using an index of DateTime ObjectsIt is possible to filter a Pandas DataFrame either using its index or based on the values in its columns. The following piece of code shows how to extract data for a given time period. The syntax is:$$dataframe\_name[datetime(year_{start}, month_{start}, day_{start}):datetime(year_{end}, month_{end}, day_{end}, hour_{end}, minute_{end})]$$#Filter all pandas dataframes to extract data for the summermonths (June-August): HTM_summermonths_2015 = HTM_eko_2015_indexed[datetime(2015, 6, 1):datetime(2015, 8, 31, 23, 30)] HTM_summermonths_2016 = HTM_eko_2016_indexed[datetime(2016, 6, 1):datetime(2016, 8, 31, 23, 30)] HTM_summermonths_2017 = HTM_eko_2017_indexed[datetime(2017, 6, 1):datetime(2017, 8, 31, 23, 30)] HTM_summermonths_2018 = HTM_eko_2018_indexed[datetime(2018, 6, 1):datetime(2018, 8, 31, 23, 30)] #Filter all pandas dataframes (precipitation) to extract data for the summermonths (June-August): HTM_P_summermonths_2015 = HTM_eko_precip_2015_indexed[datetime(2015, 6, 1):datetime(2015, 8, 31, 23, 30)] HTM_P_summermonths_2016 = HTM_eko_precip_2016_indexed[datetime(2016, 6, 1):datetime(2016, 8, 31, 23, 30)] HTM_P_summermonths_2017 = HTM_eko_precip_2017_indexed[datetime(2017, 6, 1):datetime(2017, 8, 31, 23, 30)] HTM_P_summermonths_2018 = HTM_eko_precip_2018_indexed[datetime(2018, 6, 1):datetime(2018, 8, 31, 23, 30)] #Show results: HTM_summermonths_2015.head(5)Back to TOC 9. Compute Statistics over a Pandas DataFrame ColumnIt is possible to calculate the minimum ```min()```, maximum ```max()```, mean ```mean()``` and standard deviation ```std()``` of all values in a column of a Pandas DataFrame. The Pandas syntax for that is:$$dataframe\_name.column\_name.function()$$The Pandas built-in method for computing e.g. the minimum value of a column, performs the exact same process as the Python code in the following code-cell.#Assume that the following list is a Pandas DataFrame column: pandas_kolumn = [1, 4, -1, 10, 37] #Define and initialize help variable: total_min = 20 #Loop that loops through all the values of the list: for i in pandas_kolumn: #Compare the current value from the list to the value in the help variable: if(i9.1. Compute the min, max, mean and standard deviation over all rows of a Pandas DataFrame column **Min Function**#Compute the lowest air temperature of 2015: HTM_eko_2015_indexed.TA_1_1_1.min()**Max Function**#Compute the highest air temperature of 2015: HTM_eko_2015_indexed.TA_1_1_1.max()**Mean Function**#Compute the average air temperature of 2015: HTM_eko_2015_indexed.TA_1_1_1.mean()**Standard Deviation Function**#Compute the standard deviation of air temperatures for 2015: HTM_eko_2015_indexed.TA_1_1_1.std()9.2. Compute the min, max, mean and standard deviation over a selection of rows of a Pandas Dataframe column **Min Function**#Compute the lowest air temperature for the summer months of 2015: HTM_eko_2015_indexed[datetime(2015, 6, 1):datetime(2015, 8, 31, 23, 30)].TA_1_1_1.min()**Max Function**#Compute the highest air temperature for the summer months of 2015: HTM_summermonths_2015.TA_1_1_1.max()**Mean Function**#Compute the average air temperature for the summer months of 2015: HTM_summermonths_2015.TA_1_1_1.mean()**Standard Deviation Function**#Compute the standard deviation of air temperatures for the summer months of 2015: HTM_summermonths_2015.TA_1_1_1.std()Back to TOC 10. Plot Data from a Pandas DataFrame with BokehBokeh is a Python Library for creating visualizations of Data. There is a large variety of modules available for many different types of graphs. If you are interested to discover more about Bokeh click on the [link](https://bokeh.pydata.org/en/latest/index.html).In this part we will present a set of different Bokeh plots, depending on our purpose. We will start by creating an interactive plot that shows how the values of a variable change in time. Then we are going to display some basic statistics using barplots. Finally, we are going to create different combinations of barplots and line-graphs to show how ecosystem variable values from different years differ compared to the corresponding ecosystem variable values for the year the drought occurred. The latter plots will also include an interactive legend that allows the user to switch layers on and off, to enahnce the readability of the plot.The user will be able to control the content of the plots by using widgets. Widgets are the Python name for controls like dropdown lists, radio-buttons etc. This part is divided into the following subparts:- [Create an Interactive Plot from 2 Pandas DataFrame columns](bokeh_plot_2_cols_from_df)- [Plot Statistics with Bokeh Visualization Library](bokeh_plot_stat_barplot)- [Create Plots with Cumulative Sums of Daily Totals and Daily Means per Year](bokeh_plot_iterative_sums_of_daily_totals_or_means_per_year_intro)- [Barplot with Incoming Shortwave-Infrared Solar Radiation (Daily Total) & GPP (Daily Total)](bokeh_plot_daily_total_GPP_SWIR_per_year)- [Barplot with GPP (Daily Total) and Soil Water Content (Daily Mean)](bokeh_plot_daily_total_GPP_daily_mean_SWC_per_year)- [Plot Daily Mean Soil Water Content with Daily Total Respiration and Daily Mean Air Temperature](bokeh_plot_daily_total_RECO_daily_mean_SWC_and_TA_per_year)- [Plot Daily Mean Soil Water Content with Daily Total GPP and Daily Total Precipitation](bokeh_plot_daily_mean_SWC_and_daily_total_GPP_and_Precip_per_year)- [Plot Daily Mean Soil Water Content and Air temperature with Daily Total GPP and Light (SW-IR)](bokeh_plot_daily_mean_SWC_and_TA_and_daily_total_GPP_and_SWIR_per_year) Back to TOC 10.1. Create an Interactive Plot from 2 Pandas DataFrame columnThis part presents how to plot data from a Pandas DataFrame using Bokeh. The plot presents how different types of ecosystem variables change during the period of one year. The user is able to change the content of the plot using a set of widgets (see Figure below). There are two dropdown widgets that control the selection of year and ecosystem variable, a color-picker that controls the color of the line in the plot and, finally, an execution-button used to update the content of the plot with the user's choice in all of the aforementioned widgets. It is also possible to interact with the content of a plot using the Bokeh Plot Toolbox, located in the right part of the plot.In order to use a tool in the Bokeh Plot ToolBox, you have to activate it. You can activate a tool just by clicking on it. An active tool is always highlighted with a blue line next to its symbol. For instance, in the figure above, the Pan-tool is the only active tool.Use the ```Pan-tool``` to move the content of the plot up or down, right or left.Use the ```Box Zoom-tool``` to zoom-in on a rectangular selected area. Use the ```Wheel Zoom-tool``` to zoom-in over an area in the plot just by scrolling.Press the ```Reset``` button to restore the plot to its initial state.Press the ```Save``` button to save a copy of the plot to your computer.Press the ```Hover``` button and hover with your mouse over the plot to see annotations. [Go to Plot] **Plotting Function**def plotIcosEcoIndexedDF(df, variable, color): #Create a figure object: p = figure(plot_width=900, plot_height=500, x_axis_label='Time (UTC)', y_axis_label= measurement_dict_eng[variable].replace('(NEE)','') + ' ('+unit_dict[variable]+')', x_axis_type='datetime', title = measurement_dict_eng[variable] +' - Hyltemossa, Sverige (' + str(df.index[1].year)+')', tools='pan,box_zoom,wheel_zoom,reset,save') #Extract time and tracer values for every data level: x1 = df.index.values y1 = df[variable].values #Create a circle and line glyph for the values of every emission category: r0 = p.circle(x1, y1, radius=.12, color=color) r1 = p.line(x1, y1, line_width=1, color=color) #Add tooltip on hover: p.add_tools(HoverTool(tooltips=[ ('Time (UTC)','@x{%Y-%m-%d %H:%M:%S}'), (measurement_dict_eng[variable] + ' ('+unit_dict[variable]+')','@y{0.f}'), ], formatters={ '@x' : 'datetime', # use 'datetime' formatter for 'date' field }, # display a tooltip whenever the cursor is vertically in line with a glyph mode='vline' )) #Set title attributes: p.title.align = 'center' p.title.text_font_size = '13pt' p.title.offset = 15 #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units #Set the copyright label position: label_opts = dict(x=0, y=10, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Deactivate hover-tool, which is by default active: p.toolbar.active_inspect = None #Add label to plot: p.add_layout(caption1, 'below') #Set the output location: output_notebook() #Show plot: show(p)**Widget Function**#Function that create widgets to update plot with icos ecosystem data: def create_widgets_icos_eco_htm(): #Import modules: from ipywidgets import interact_manual, ColorPicker, Dropdown #Create a list with the years for which data exist: year_ls = [2015, 2016, 2017, 2018] #Create a list to store the different ecosystem variables: eco_var_ls = [tuple(reversed(tupl)) for tupl in tuple(measurement_dict_eng.items())] #Create dropdown-widgets: years = Dropdown(options = year_ls) eco_vars = Dropdown(options = eco_var_ls) #Function that updates the plot based on the user's selection: def update_eco_line_plot(Year, Variable, color): #Create a dictionary for every type of dataframe with ICOS ecosystem data: icos_eco_df_dict ={'2015': HTM_eko_2015_indexed, '2016': HTM_eko_2016_indexed, '2017': HTM_eko_2017_indexed, '2018': HTM_eko_2018_indexed} icos_eco_precip_df_dict ={'2015': HTM_eko_precip_2015_indexed, '2016': HTM_eko_precip_2016_indexed, '2017': HTM_eko_precip_2017_indexed, '2018': HTM_eko_precip_2018_indexed} #Check selected variable and get the name of its corresponding pandas dataframe: if(Variable in HTM_eko_2015_indexed): dataFrame = icos_eco_df_dict[str(Year)] else: dataFrame = icos_eco_precip_df_dict[str(Year)] #Call function to show plot: plotIcosEcoIndexedDF(dataFrame, Variable, color) #Create function that contains a box of widgets: interact = interact_manual(update_eco_line_plot, Year = years, Variable = eco_vars, color = ColorPicker(concise=False, description='Pick a color', value='#3973ac', disabled=False)) #Set the font of the widgets included in interact_manual: interact.widget.children[0].layout.width = '460px' interact.widget.children[0].layout.margin = '40px 2px 2px 2px' interact.widget.children[1].layout.width = '460px' interact.widget.children[2].layout.width = '460px' interact.widget.children[3].description = 'Update Plot' interact.widget.children[3].button_style = 'danger' interact.widget.children[3].style.button_color = '#3973ac' interact.widget.children[3].layout.margin = '20px 10px 40px 200px' # top/right/bottom/leftBokeh Interactive Plot - Displaying Values from 2 Pandas DataFrame columns**Call Function**#Call function to display widgets: create_widgets_icos_eco_htm()Back to TOC 10.2. Plot Statistics with Bokeh Visualization LibraryIn this part, you will learn how to create Barplots with Bokeh Visualization Library over statistics that have been calculated over all values of a year or over a selection of values of a certain year. The statistics are calculated using the code that was presented in the corresponding previous part. This part is divided into two subparts:- [Plot Statistics with Bokeh Visualization Library (Annual Statistics-Complete Year)](bokeh_plot_stat_barplot_annual_total)- [Plot Statistics with Bokeh Visualization Library (Annual Statistics-Part of Year)](bokeh_plot_stat_barplot_annual_filtered)Every subpart includes three code-cells:- The first code-cell includes code for the function that handles the format of the barplot. - The second code-cell includes a function that creates and formats the widgets (e.g. dropdown lists, color-pickers, execution button, etc.). The second code-cell also includes a nested function that calculates the statistics and updates the content of the barplot based on the users selection.- The third code-cell includes a call to the function that creates and displays the widgets (i.e. function included in the 2nd code-cell). 10.2.1. Plot Statistics with Bokeh Visualization Library (Annual Statistics-Complete Year)This subpart is dedicated to calculating statistics over all values of a year and displaying the results in the form of a barplot. Every bar represents the statistic value for one year. The user is able to interact and the change the content of a plot using a set of widgets. The available widgets are two dropdown lists that control the type of statistic and the variable over which the statistic should be calculated over, two color-pickers that allow the user to set the color for the bars and the text on the bars in the barplot and, finally, an execution button.The user is free to change the values in the widgets, but the content of the plot will change to show the results of the new selection of widget-values only once the execution-button is clicked. The barplot includes an interactive toolbox menu (see Figure below). From here it is possible to pan, zoom-in and out, reset the plot to its initial state and save a copy of the plot to your computer. [Go to Plot] **Plotting Function**def plotIcosEcoBarPlotAnnualStat(Variable, Stat, stat_list, year_ls, bar_color, txt_color): #Import modules: from bokeh.models import ColumnDataSource, LabelSet, Label, FixedTicker #Define y-position of statistics-label (in the middle of the bar-glyph): y_label_pos_ls = [(stat/2)-0.5 if((stat<=-1) or (stat>=1)) else stat/2 for stat in stat_list] #Create ColumnDataSource Object: source = ColumnDataSource(data=dict(years=year_ls, stats=stat_list, y_label_pos = y_label_pos_ls)) #Create figure object: p = figure(plot_width=600, plot_height=450, title = 'Hyltemossa: '+Stat+' '+measurement_dict_eng[Variable]+' per Year', x_axis_label = 'Year', y_axis_label = Stat+' '+measurement_dict_eng[Variable]+ ' ('+unit_dict[Variable]+')') #Add bar glyphs: p.vbar(x='years', width=0.5, bottom=0, top='stats', source=source, color=bar_color) #orange #Set title attributes: p.title.align = 'center' p.title.text_font_size = '12pt' p.title.offset = 15 #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units #Add labels to the bar glyphs: labels = LabelSet(x='years', y='y_label_pos', text='stats', level='glyph', text_color=txt_color, x_offset=0, y_offset=0, source=source, render_mode='css', text_align='center') #Set the copyright-label position: label_opts = dict(x=0, y=5, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add the bar-glyph lables to the plot: p.add_layout(labels) #Add label to plot: p.add_layout(caption1, 'below') #Set x-axis tickers: p.xaxis.ticker = FixedTicker(ticks=year_ls) #Define output location: output_notebook() #Show plot: show(p)**Widget Function**def create_widgets_icos_eco_htm_stat_annual(): #Import modules: from ipywidgets import interact_manual, ColorPicker, Dropdown #Create a list with the years for which data exist: year_ls = [2015, 2016, 2017, 2018] #Create a list containing the names of the statistical operations: stat_ls = ['Min', 'Max', 'Mean', 'St dev'] #Create a list to store the different ecosystem variables: eco_var_ls = [tuple(reversed(tupl)) for tupl in tuple(measurement_dict_eng.items())] #Create dropdown-widgets: eco_vars = Dropdown(options = eco_var_ls) stats = Dropdown(options = stat_ls) #Function that updates the plot based on the user's selection: def update_eco_bar_plot(Stat, Variable, bar_color, txt_color): #Create a dictionary for every type of dataframe with ICOS ecosystem data: icos_eco_df_dict ={'2015': HTM_eko_2015_indexed, '2016': HTM_eko_2016_indexed, '2017': HTM_eko_2017_indexed, '2018': HTM_eko_2018_indexed} icos_eco_precip_df_dict ={'2015': HTM_eko_precip_2015_indexed, '2016': HTM_eko_precip_2016_indexed, '2017': HTM_eko_precip_2017_indexed, '2018': HTM_eko_precip_2018_indexed} #Declare and initialize list to store the stats: stat_list = [] #Check if the selected variable is included in the Temp, GPP, NEE, RECO & SW-IR pandas dataframe: if(Variable in HTM_eko_2015_indexed): #Check the type of the selected statistic and store the stat-value #of every year in a list: if(Stat=='Min'): stat_list.append(round(HTM_eko_2015_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_2016_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_2017_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_2018_indexed[Variable].min(),1)) elif(Stat=='Max'): stat_list.append(round(HTM_eko_2015_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_2016_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_2017_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_2018_indexed[Variable].max(),1)) elif(Stat=='Mean'): stat_list.append(round(HTM_eko_2015_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_2016_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_2017_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_2018_indexed[Variable].mean(),1)) elif(Stat=='St dev'): stat_list.append(round(HTM_eko_2015_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_2016_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_2017_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_2018_indexed[Variable].std(),1)) else: print('Statistic does not exist!') #If the selected variable is in the precipitation and soil-water-content dataframe: else: #Check the type of the selected statistic and store the stat-value #of every year in a list: if(Stat=='Min'): stat_list.append(round(HTM_eko_precip_2015_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_precip_2016_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_precip_2017_indexed[Variable].min(),1)) stat_list.append(round(HTM_eko_precip_2018_indexed[Variable].min(),1)) elif(Stat=='Max'): stat_list.append(round(HTM_eko_precip_2015_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_precip_2016_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_precip_2017_indexed[Variable].max(),1)) stat_list.append(round(HTM_eko_precip_2018_indexed[Variable].max(),1)) elif((Stat=='Mean') and (Variable=='P_1_1_1')): stat_list.append(round(HTM_eko_precip_2015_indexed[Variable].mean(),2)) stat_list.append(round(HTM_eko_precip_2016_indexed[Variable].mean(),2)) stat_list.append(round(HTM_eko_precip_2017_indexed[Variable].mean(),2)) stat_list.append(round(HTM_eko_precip_2018_indexed[Variable].mean(),2)) elif((Stat=='Mean') and (Variable!='P_1_1_1')): stat_list.append(round(HTM_eko_precip_2015_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_precip_2016_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_precip_2017_indexed[Variable].mean(),1)) stat_list.append(round(HTM_eko_precip_2018_indexed[Variable].mean(),1)) elif(Stat=='St dev'): stat_list.append(round(HTM_eko_precip_2015_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_precip_2016_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_precip_2017_indexed[Variable].std(),1)) stat_list.append(round(HTM_eko_precip_2018_indexed[Variable].std(),1)) else: print('Statistic does not exist!') #Call function to show plot: plotIcosEcoBarPlotAnnualStat(Variable, Stat, stat_list, year_ls, bar_color, txt_color) #Create function that contains a box of widgets: interact = interact_manual(update_eco_bar_plot, Variable = eco_vars, Stat = stats, bar_color = ColorPicker(concise=False, description='Bar color', value='#3973ac', disabled=False), txt_color = ColorPicker(concise=False, description='Text color', value='orange', disabled=False)) #Set the font of the widgets included in interact_manual: interact.widget.children[0].layout.width = '460px' interact.widget.children[0].layout.margin = '40px 2px 2px 2px' interact.widget.children[1].layout.width = '460px' interact.widget.children[2].layout.width = '460px' interact.widget.children[3].layout.width = '460px' interact.widget.children[4].description = 'Update Plot' interact.widget.children[4].button_style = 'danger' interact.widget.children[4].style.button_color = '#3973ac' interact.widget.children[4].layout.margin = '20px 10px 40px 200px' # top/right/bottom/leftDisplay Widgets for Barplots - Annual Statistics (All values)**Call Function**#Call function to display widgets: create_widgets_icos_eco_htm_stat_annual()Back to TOC 10.2.2. Plot Statistics with Bokeh Visualization Library (Annual Statistics-Part of Year)This subpart is dedicated to calculating statistics over a selection of values of a year and displaying the results in the form of a barplot. Every bar represents the statistic value for one year. Here the results only present statistics calculated over values belonging to the time period June-August.The user is able to interact and the change the content of a plot using a set of widgets. The available widgets are two dropdown lists that control the type of statistic and the variable over which the statistic should be calculated over, two color-pickers that allow the user to set the color for the bars and the text on the bars in the barplot and, finally, an execution button.The user is free to change the values in the widgets, but the content of the plot will change to show the results of the new selection of widget-values only once the execution-button is clicked. The barplot includes an interactive toolbox menu (see Figure below). From here it is possible to pan, zoom-in and out, reset the plot to its initial state and save a copy of the plot to your computer. [Go to Plot] **Plotting Function**def plotIcosEcoBarPlotAnnualStatJunAug(Variable, Stat, stat_list, year_ls, bar_color, txt_color): #Import modules: from bokeh.models import ColumnDataSource, LabelSet, Label, FixedTicker #Define y-position of statistics-label (in the middle of the bar-glyph): y_label_pos_ls = [(stat/2)-0.5 if((stat<=-1) or (stat>=1)) else stat/2 for stat in stat_list] #Create ColumnDataSource Object: source = ColumnDataSource(data=dict(years=year_ls, stats=stat_list, y_label_pos = y_label_pos_ls)) #Create figure object: p = figure(plot_width=600, plot_height=450, title = 'Hyltemossa: '+Stat+' '+measurement_dict_eng[Variable]+' (Jun-Aug) per Year', x_axis_label = 'Year', y_axis_label = Stat+' '+measurement_dict_eng[Variable]+ ' ('+unit_dict[Variable]+')') #Add bar glyphs: p.vbar(x='years', width=0.5, bottom=0, top='stats', source=source, color=bar_color) #orange #Set title attributes: p.title.align = 'center' p.title.text_font_size = '12pt' p.title.offset = 15 #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units #Add labels to the bar glyphs: labels = LabelSet(x='years', y='y_label_pos', text='stats', level='glyph', text_color=txt_color, x_offset=0, y_offset=0, source=source, render_mode='css', text_align='center') #Set the copyright-label position: label_opts = dict(x=0, y=5, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add the bar-glyph lables to the plot: p.add_layout(labels) #Add label to plot: p.add_layout(caption1, 'below') #Set x-axis tickers: p.xaxis.ticker = FixedTicker(ticks=year_ls) #Define output location: output_notebook() #Show plot: show(p)**Widget Function**def create_widgets_icos_eco_htm_stat_annual_jun_aug(): #Import modules: from ipywidgets import interact_manual, ColorPicker, Dropdown #Create a list with the years for which data exist: year_ls = [2015, 2016, 2017, 2018] #Create a list containing the names of the statistical operations: stat_ls = ['Min', 'Max', 'Mean', 'St dev'] #Create a list to store the different ecosystem variables: eco_var_ls = [tuple(reversed(tupl)) for tupl in tuple(measurement_dict_eng.items())] #Create dropdown-widgets: eco_vars = Dropdown(options = eco_var_ls) stats = Dropdown(options = stat_ls) #Function that updates the plot based on the user's selection: def update_eco_bar_plot(Stat, Variable, bar_color, txt_color): #Create a dictionary for every type of dataframe with ICOS ecosystem data: icos_eco_df_dict ={'2015': HTM_summermonths_2015, '2016': HTM_summermonths_2016, '2017': HTM_summermonths_2017, '2018': HTM_summermonths_2018} icos_eco_precip_df_dict ={'2015': HTM_P_summermonths_2015, '2016': HTM_P_summermonths_2016, '2017': HTM_P_summermonths_2017, '2018': HTM_P_summermonths_2018} #Declare and initialize list to store the stats: stat_list = [] #Check if the selected variable is included in the Temp, GPP, NEE, RECO & SW-IR pandas dataframe: if(Variable in HTM_summermonths_2015): #Check the type of the selected statistic and store the stat-value #of every year in a list: if(Stat=='Min'): stat_list.append(round(HTM_summermonths_2015[Variable].min(),1)) stat_list.append(round(HTM_summermonths_2016[Variable].min(),1)) stat_list.append(round(HTM_summermonths_2017[Variable].min(),1)) stat_list.append(round(HTM_summermonths_2018[Variable].min(),1)) elif(Stat=='Max'): stat_list.append(round(HTM_summermonths_2015[Variable].max(),1)) stat_list.append(round(HTM_summermonths_2016[Variable].max(),1)) stat_list.append(round(HTM_summermonths_2017[Variable].max(),1)) stat_list.append(round(HTM_summermonths_2018[Variable].max(),1)) elif(Stat=='Mean'): stat_list.append(round(HTM_summermonths_2015[Variable].mean(),1)) stat_list.append(round(HTM_summermonths_2016[Variable].mean(),1)) stat_list.append(round(HTM_summermonths_2017[Variable].mean(),1)) stat_list.append(round(HTM_summermonths_2018[Variable].mean(),1)) elif(Stat=='St dev'): stat_list.append(round(HTM_summermonths_2015[Variable].std(),1)) stat_list.append(round(HTM_summermonths_2016[Variable].std(),1)) stat_list.append(round(HTM_summermonths_2017[Variable].std(),1)) stat_list.append(round(HTM_summermonths_2018[Variable].std(),1)) else: print('Statistic does not exist!') #If the selected variable is in the precipitation and soil-water-content dataframe: else: #Check the type of the selected statistic and store the stat-value #of every year in a list: if(Stat=='Min'): stat_list.append(round(HTM_P_summermonths_2015[Variable].min(),1)) stat_list.append(round(HTM_P_summermonths_2016[Variable].min(),1)) stat_list.append(round(HTM_P_summermonths_2017[Variable].min(),1)) stat_list.append(round(HTM_P_summermonths_2018[Variable].min(),1)) elif(Stat=='Max'): stat_list.append(round(HTM_P_summermonths_2015[Variable].max(),1)) stat_list.append(round(HTM_P_summermonths_2016[Variable].max(),1)) stat_list.append(round(HTM_P_summermonths_2017[Variable].max(),1)) stat_list.append(round(HTM_P_summermonths_2018[Variable].max(),1)) elif((Stat=='Mean') and (Variable=='P_1_1_1')): stat_list.append(round(HTM_P_summermonths_2015[Variable].mean(),2)) stat_list.append(round(HTM_P_summermonths_2016[Variable].mean(),2)) stat_list.append(round(HTM_P_summermonths_2017[Variable].mean(),2)) stat_list.append(round(HTM_P_summermonths_2018[Variable].mean(),2)) elif((Stat=='Mean') and (Variable!='P_1_1_1')): stat_list.append(round(HTM_P_summermonths_2015[Variable].mean(),1)) stat_list.append(round(HTM_P_summermonths_2016[Variable].mean(),1)) stat_list.append(round(HTM_P_summermonths_2017[Variable].mean(),1)) stat_list.append(round(HTM_P_summermonths_2018[Variable].mean(),1)) elif(Stat=='St dev'): stat_list.append(round(HTM_P_summermonths_2015[Variable].std(),1)) stat_list.append(round(HTM_P_summermonths_2016[Variable].std(),1)) stat_list.append(round(HTM_P_summermonths_2017[Variable].std(),1)) stat_list.append(round(HTM_P_summermonths_2018[Variable].std(),1)) else: print('Statistic does not exist!') #Call function to show plot: plotIcosEcoBarPlotAnnualStatJunAug(Variable, Stat, stat_list, year_ls, bar_color, txt_color) #Create function that contains a box of widgets: interact = interact_manual(update_eco_bar_plot, Variable = eco_vars, Stat = stats, bar_color = ColorPicker(concise=False, description='Bar color', value='#3973ac', disabled=False), txt_color = ColorPicker(concise=False, description='Text color', value='orange', disabled=False)) #Set the font of the widgets included in interact_manual: interact.widget.children[0].layout.width = '460px' interact.widget.children[0].layout.margin = '40px 2px 2px 2px' interact.widget.children[1].layout.width = '460px' interact.widget.children[2].layout.width = '460px' interact.widget.children[3].layout.width = '460px' interact.widget.children[4].description = 'Update Plot' interact.widget.children[4].button_style = 'danger' interact.widget.children[4].style.button_color = '#3973ac' interact.widget.children[4].layout.margin = '20px 10px 40px 200px' # top/right/bottom/leftDisplay Widgets for Barplots - Annual Statistics (Selection of values)**Call Function**#Call function to show widgets: create_widgets_icos_eco_htm_stat_annual_jun_aug()Back to TOC 10.3. Create Plots with Cumulative Sums of Daily Totals and Daily Means per YearIn this part we will create plots of cumulative sums of daily totals or daily means per year for a selection of ecosystem variables. These plots provide a general overview of how the values of a selected variable might vary between different years. The first step is to produce daily totals or daily means for every variable. It might also be necessary to perform some unit conversions. A step by step explanation of the processes follows.This part is subdivided into the following parts:- [Compute Daily Totals and Daily Means per Year](compute_daily_totals_and_daily_means_per_year)- [Conversion of Units for the Computation of Daily Totals](conversion_of_units_for_daily_totals)- [Compute Cumulative Sums of Daily Totals per Year](compute_iterative_sums_of_daily_totals_per_year)- [Convert the Units of the Cumulative Sum Values](convert_units_iterative_sums)- [Create Interactive Plots to Display the Cumulative Sums for every Variable](bokeh_plot_stat_summed_values_per_year) [Go to plot]   Back to TOC 10.3.1. Compute Daily Totals and Daily Means per YearWe will compute the daily sum of the following variables:- Air Temperature- Precipitation- Respiration- Gross Primary Production (GPP)- Carbon Flux - Net Ecosystem Exchange (NEE)- Incoming Shortwave Infrared (SW-IR) Solar RadiationTo compute the daily sum of each variable, we will use some of Python's built-in methods for Pandas DataFrames. The image bellow shows the code used to compute the daily sums of Air Temperature values for 2015. Observe that the Python built-in methods, used in this case, will only work if you have set a column containing Python DateTime objects as the index of your Pandas DataFrame. We will compute the daily average of the following variables:- Air Temperature- Soil Water ContentSimilarly to the previous example, to compute the daily mean of each variable we will again use some of Python's built-in methods for a Pandas DataFrame. The image bellow shows the code used to compute the daily averages of Air Temperature values for 2015. Again, this code will only work if you have set a column containing Python DateTime objects as the index of your Pandas DataFrame. [Back to Create Plots of Cumulative Sums - Intro] 10.3.2. Conversion of Units for the Computation of Daily TotalsIt is necessary to convert the units of some of the ecosystem variables, before computing their daily sums or daily means.For example, there is a new Respiration value every 30 min. However the Respiration unit is **μmol m$^{-2}$ s$^{-1}$**. In order to compute the daily sum of Respiration, it is necessary to first compute the Respiration for every 30 min and then sum the computed values. To get the Respiration in **μmol m$^{-2}$ day$^{-1}$**, multiply every value by 60 (to get the respiration per minute - there are 60 sec in one minute) and then by 30 (to get the respiration per 30 min) and, finally, sum up all the values.The same conversion should be applied to the Gross Primary Production (GPP) and Net Ecosystem Exchange (NEE) values, as they have the same unit.Incoming Shortwave Infrared (SW-IR) Solar Radiation is given as Watts per square meter (W/m$^2$). In order to get the total sum of Incoming SW-IR Solar Radiation per day, it is necessary to convert Watts to Joules. Because **1 Watt = 1 Joule/sec**, we will compute Incoming SW-IR Radiation as Joules per square meter per 30 min by multiplying the current values first by 60 (to get the SW-IR Radiation per minute) and then by 30 (to get SW-IR Radiation per 30 minutes). [Back to Create Plots of Cumulative Sums - Intro] **Air Temperature - Daily Totals**#Get daily sum of Air Temperature for every dataframe (i.e. year): HTM_eko_TA_2015_daily_sum = HTM_eko_2015_indexed.TA_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_TA_2016_daily_sum = HTM_eko_2016_indexed.TA_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_TA_2017_daily_sum = HTM_eko_2017_indexed.TA_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_TA_2018_daily_sum = HTM_eko_2018_indexed.TA_1_1_1.resample('D').sum().dropna() #disregard NaN-values #View the 5 first rows of the result: #HTM_eko_TA_2018_daily_sum.head(5)**Air Temperature - Daily Average**#Get daily average of Air Temperature for every dataframe (i.e. year): HTM_eko_TA_2015_daily_mean = HTM_eko_2015_indexed.TA_1_1_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_TA_2016_daily_mean = HTM_eko_2016_indexed.TA_1_1_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_TA_2017_daily_mean = HTM_eko_2017_indexed.TA_1_1_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_TA_2018_daily_mean = HTM_eko_2018_indexed.TA_1_1_1.resample('D').mean().dropna() #disregard NaN-values #View the 5 first rows of the result: #HTM_eko_TA_2015_daily_mean.head(5)**Precipitation - Daily Totals**#Get daily sum of Precipitation for every dataframe (i.e. year): HTM_eko_P_2015_daily_sum = HTM_eko_precip_2015_indexed.P_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_P_2016_daily_sum = HTM_eko_precip_2016_indexed.P_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_P_2017_daily_sum = HTM_eko_precip_2017_indexed.P_1_1_1.resample('D').sum().dropna() #disregard NaN-values HTM_eko_P_2018_daily_sum = HTM_eko_precip_2018_indexed.P_1_1_1.resample('D').sum().dropna() #disregard NaN-values**Respiration - Daily Totals**#RESPIRATION HALF-HOURLY: #Add column with RECO computed as micromoles per square meter per 30 min (conv. sec to 30min): HTM_eko_2015_indexed['RECO_PI_1_1_1_30min'] = [HTM_eko_2015_indexed.RECO_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2015_indexed))] HTM_eko_2016_indexed['RECO_PI_1_1_1_30min'] = [HTM_eko_2016_indexed.RECO_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2016_indexed))] HTM_eko_2017_indexed['RECO_PI_1_1_1_30min'] = [HTM_eko_2017_indexed.RECO_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2017_indexed))] HTM_eko_2018_indexed['RECO_PI_1_1_1_30min'] = [HTM_eko_2018_indexed.RECO_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2018_indexed))] #Get daily sum of Respiration for every dataframe (i.e. year): HTM_eko_RECO_2015_daily_sum = HTM_eko_2015_indexed.RECO_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_RECO_2016_daily_sum = HTM_eko_2016_indexed.RECO_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_RECO_2017_daily_sum = HTM_eko_2017_indexed.RECO_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_RECO_2018_daily_sum = HTM_eko_2018_indexed.RECO_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values #View the 5 first rows of the result: #HTM_eko_RECO_2015_daily_sum.head(5)**GPP - Daily Totals**#GPP HALF-HOURLY: #Add column with GPP computed as micromoles per square meter per 30 min (conv. sec to 30min): HTM_eko_2015_indexed['GPP_PI_1_1_1_30min'] = [HTM_eko_2015_indexed.GPP_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2015_indexed))] HTM_eko_2016_indexed['GPP_PI_1_1_1_30min'] = [HTM_eko_2016_indexed.GPP_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2016_indexed))] HTM_eko_2017_indexed['GPP_PI_1_1_1_30min'] = [HTM_eko_2017_indexed.GPP_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2017_indexed))] HTM_eko_2018_indexed['GPP_PI_1_1_1_30min'] = [HTM_eko_2018_indexed.GPP_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2018_indexed))] #Get daily sum of GPP for every dataframe (i.e. year): HTM_eko_GPP_2015_daily_sum = HTM_eko_2015_indexed.GPP_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_GPP_2016_daily_sum = HTM_eko_2016_indexed.GPP_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_GPP_2017_daily_sum = HTM_eko_2017_indexed.GPP_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_GPP_2018_daily_sum = HTM_eko_2018_indexed.GPP_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values**NEE - Daily Totals**#CARBON FLUX (NEE) HALF-HOURLY: #Add column with Carbon Flux computed as micromoles per square meter per 30 min (conv. sec to 30min): HTM_eko_2015_indexed['FC_PI_1_1_1_30min'] = [HTM_eko_2015_indexed.FC_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2015_indexed))] HTM_eko_2016_indexed['FC_PI_1_1_1_30min'] = [HTM_eko_2016_indexed.FC_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2016_indexed))] HTM_eko_2017_indexed['FC_PI_1_1_1_30min'] = [HTM_eko_2017_indexed.FC_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2017_indexed))] HTM_eko_2018_indexed['FC_PI_1_1_1_30min'] = [HTM_eko_2018_indexed.FC_PI_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2018_indexed))] #Get daily sum of Carbon Flux for every dataframe (i.e. year): HTM_eko_NEE_2015_daily_sum = HTM_eko_2015_indexed.FC_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_NEE_2016_daily_sum = HTM_eko_2016_indexed.FC_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_NEE_2017_daily_sum = HTM_eko_2017_indexed.FC_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_NEE_2018_daily_sum = HTM_eko_2018_indexed.FC_PI_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values**Shortwave Infrared Incoming Solar Radiation - Daily Totals**#Add column with Light computed as Joules per square meter for 30 min (i.e. 1800 sec): HTM_eko_2015_indexed['SW_IN_1_1_1_30min'] = [HTM_eko_2015_indexed.SW_IN_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2015_indexed))] HTM_eko_2016_indexed['SW_IN_1_1_1_30min'] = [HTM_eko_2016_indexed.SW_IN_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2016_indexed))] HTM_eko_2017_indexed['SW_IN_1_1_1_30min'] = [HTM_eko_2017_indexed.SW_IN_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2017_indexed))] HTM_eko_2018_indexed['SW_IN_1_1_1_30min'] = [HTM_eko_2018_indexed.SW_IN_1_1_1[ind]*60*30 for ind in range(len(HTM_eko_2018_indexed))] #Get daily sum of "Light" for every dataframe (i.e. year): HTM_eko_LIGHT_2015_daily_sum = HTM_eko_2015_indexed.SW_IN_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_LIGHT_2016_daily_sum = HTM_eko_2016_indexed.SW_IN_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_LIGHT_2017_daily_sum = HTM_eko_2017_indexed.SW_IN_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values HTM_eko_LIGHT_2018_daily_sum = HTM_eko_2018_indexed.SW_IN_1_1_1_30min.resample('D').sum().dropna() #disregard NaN-values**Soil Water Content - Daily Average**#Get daily mean of "Soil Water Content" for every dataframe (i.e. year): HTM_eko_SWC_2015_daily_mean = HTM_eko_precip_2015_indexed.SWC_4_4_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_SWC_2016_daily_mean = HTM_eko_precip_2016_indexed.SWC_4_4_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_SWC_2017_daily_mean = HTM_eko_precip_2017_indexed.SWC_4_4_1.resample('D').mean().dropna() #disregard NaN-values HTM_eko_SWC_2018_daily_mean = HTM_eko_precip_2018_indexed.SWC_4_4_1.resample('D').mean().dropna() #disregard NaN-values10.3.3. Compute Cumulative Sums of Daily Totals per YearNow we will compute the cumulative sums of the daily totals of the following variables:- Air Temperature- Precipitation- Respiration- Gross Primary Production (GPP)- Carbon Flux - Net Ecosystem Exchange (NEE)A cumulative sum of daily sums is computed as following:The next code cell includes a function in Python code that computes the cumulative sum of the elements of a list or pandas series. It returns a list with the result. Python has built-in methods to perform the same computation. In this case, we present both options for explanatory purposes. Generally, Python's built-in methods are faster and should therefore be preferred over any piece of self-produced code. [Back to Create Plots of Cumulative Sums - Intro] **Cumulative Sum Function (without the use of built-in methods)**#This function does the same as the following python command: #list(itertools.accumulate(HTM_eko_2015_daily_mean)) def cumulative_sum(ls): """ Function that produces a list of the iterative sums of the elements of a list or series. """ #Create and initialize help variables: sum_temp=0 #variable to store intermediate sums sum_ls = [] #list to store iterative sums #Loop through every element in list: for i in range(len(ls)): #Add current daily-average air temperature to sum: sum_temp = sum_temp + ls[i] #Add intermediate sum to list: sum_ls.append(sum_temp) #Return list: return sum_ls**Cumulative Sums of Daily Averaged Air Temperature**#Compute cumulative sums of averaged daily Air Temperature values per year: #Create a pandas series with the iterative daily sums of averaged daily Air Temperature values as data #and their corresponding date as index. HTM_eko_TA_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_TA_2015_daily_mean), index = HTM_eko_TA_2015_daily_mean.index) HTM_eko_TA_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_TA_2016_daily_mean), index = HTM_eko_TA_2016_daily_mean.index) HTM_eko_TA_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_TA_2017_daily_mean), index = HTM_eko_TA_2017_daily_mean.index) HTM_eko_TA_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_TA_2018_daily_mean), index = HTM_eko_TA_2018_daily_mean.index)**Cumulative Sums of Daily Summed Precipitation**#Compute cumulative sums of summed daily Precipitation values per year: #Create a pandas series with the iterative daily sums of summed daily Precipitation values as data #and their corresponding date as index. HTM_eko_P_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_P_2015_daily_sum), index = HTM_eko_P_2015_daily_sum.index) HTM_eko_P_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_P_2016_daily_sum), index = HTM_eko_P_2016_daily_sum.index) HTM_eko_P_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_P_2017_daily_sum), index = HTM_eko_P_2017_daily_sum.index) HTM_eko_P_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_P_2018_daily_sum), index = HTM_eko_P_2018_daily_sum.index)**Cumulative Sums of Daily Summed Respiration**#Compute cumulative sums of summed daily Respiration values per year: #Create a pandas series with the iterative daily sums of summed daily Respiration values as data #and their corresponding date as index. HTM_eko_RECO_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_RECO_2015_daily_sum), index = HTM_eko_RECO_2015_daily_sum.index) HTM_eko_RECO_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_RECO_2016_daily_sum), index = HTM_eko_RECO_2016_daily_sum.index) HTM_eko_RECO_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_RECO_2017_daily_sum), index = HTM_eko_RECO_2017_daily_sum.index) HTM_eko_RECO_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_RECO_2018_daily_sum), index = HTM_eko_RECO_2018_daily_sum.index)**Cumulative Sums of Daily Summed GPP**#Compute cumulative sums of summed daily GPP values per year: #Create a pandas series with the iterative daily sums of summed daily GPP values as data #and their corresponding date as index. HTM_eko_GPP_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_GPP_2015_daily_sum), index = HTM_eko_GPP_2015_daily_sum.index) HTM_eko_GPP_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_GPP_2016_daily_sum), index = HTM_eko_GPP_2016_daily_sum.index) HTM_eko_GPP_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_GPP_2017_daily_sum), index = HTM_eko_GPP_2017_daily_sum.index) HTM_eko_GPP_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_GPP_2018_daily_sum), index = HTM_eko_GPP_2018_daily_sum.index)**Cumulative Sums of Daily Summed NEE**#Compute cumulative sums of summed daily Carbon Flux (NEE) values per year: #Create a pandas series with the iterative daily sums of summed daily Carbon Flux (NEE) values as data #and their corresponding date as index. HTM_eko_NEE_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_NEE_2015_daily_sum), index = HTM_eko_NEE_2015_daily_sum.index) HTM_eko_NEE_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_NEE_2016_daily_sum), index = HTM_eko_NEE_2016_daily_sum.index) HTM_eko_NEE_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_NEE_2017_daily_sum), index = HTM_eko_NEE_2017_daily_sum.index) HTM_eko_NEE_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_NEE_2018_daily_sum), index = HTM_eko_NEE_2018_daily_sum.index)**Cumulative Sums of Daily Summed Incoming Shortwave Infrared Solar Radiation**#Compute cumulative sums of summed daily Shortwave Infrared Incoming Solar Radiation values per year: #Create a pandas series with the iterative daily sums of summed daily Shortwave Infrared Incoming #Solar Radiation values as data and their corresponding date as index. HTM_eko_SWIR_2015_itersum = pd.Series(data = itertools.accumulate(HTM_eko_LIGHT_2015_daily_sum), index = HTM_eko_LIGHT_2015_daily_sum.index) HTM_eko_SWIR_2016_itersum = pd.Series(data = itertools.accumulate(HTM_eko_LIGHT_2016_daily_sum), index = HTM_eko_LIGHT_2016_daily_sum.index) HTM_eko_SWIR_2017_itersum = pd.Series(data = itertools.accumulate(HTM_eko_LIGHT_2017_daily_sum), index = HTM_eko_LIGHT_2017_daily_sum.index) HTM_eko_SWIR_2018_itersum = pd.Series(data = itertools.accumulate(HTM_eko_LIGHT_2018_daily_sum), index = HTM_eko_LIGHT_2018_daily_sum.index)10.3.4. Convert the Units of the Cumulative Sum ValuesSometimes the output values can be quite large. Before visualizing the output, it is considered good practice to test if it possible to change the units so that the visualized/displayed values are lower. This increases the readability of the plot and thus helps the viewer to better comprehend its content.$$1~mole ~=~ 1,000,000~micromoles$$In this case, we will change the unit of the GPP, NEE and Respiration variables from micromoles/m$^2$ day to moles/m$^2$ day. We will also change the unit of the Shortwave Infrared Incoming Solar Radiation from Joules/m$^2$ day to MegaJoules/m$^2$ day.$$1~Megajoule ~=~ 1,000,000~Joules$$The functions bellow will take a Pandas Series with the computed iterative sums for a given variable for a particular year as input parameter and return a Pandas Series whose values have been divided by 1,000,000 to produce the output in a different unit (as described above). The function divides each value in the Pandas Series by 1,000,000. [Back to Create Plots of Cumulative Sums - Intro] **Function - Convert Micromoles to Moles**#Function that transforms the values in the column of a Pandas Series from micromoles to moles: def micromoles2moles(pandasSeries): #Import modules: import pandas as pd #Convert a pandas series column containing values in micromoles to moles: ds = pd.Series(data = pandasSeries.values/1000000, index = pandasSeries.index) #Return Pandas Series: return ds**Function - Convert Joules to MegaJoules**#Function that transforms the values in the column of a Pandas Series from Joules to MegaJoules: def Joules2MegaJoules(pandasSeries): #Import modules: import pandas as pd #Convert a pandas series column containing values in Joules to MegaJoules: ds = pd.Series(data = pandasSeries.values/1000000, index = pandasSeries.index) #Return Pandas Series: return ds10.3.5. Create Interactive Plots to Display the Cumulative Sums of every VariableNow that we are done with our computations, we are going to visualize the results in the form of an interactive plot (see figure). For the purpose of this visualization, we are again going to use the [Bokeh visualization library](https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/legends.html).In order to use a tool in the Bokeh Plot ToolBox, you have to activate it. You can activate a tool just by clicking on it. An active tool is always highlighted with a blue line next to its symbol. For instance, in the figure above, the Pan-tool is the only active tool.Use the ```Pan-tool``` to move the content of the plot up or down, right or left.Use the ```Box Zoom-tool``` to zoom-in on a rectangular selected area. Use the ```Wheel Zoom-tool``` to zoom-in over an area in the plot just by scrolling.Press the ```Save``` button to save a copy of the plot to your computer.Press the ```Reset``` button to restore the plot to its initial state.Press the ```Hover``` button and hover with your mouse over the plot to see annotations.Click on an item in the ```interactive legend``` to make the line of that item disappear.Two functions are used to produce the previously described output. One function creates the plots and another function creates the widgets and handles the updating of the plot based on the user's selection. [Go to plot]   [Back to Create Plots of Cumulative Sums - Intro] **Plotting Function**#Function that creates plots of iterative sums: def plotIterSum(itersum_ls, year_ls, aggregation_type, variable, unit, colors): #Import modules: from bokeh.models import Legend, HoverTool #Dictionary for subscript/superscript transformations of numbers: SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉") SUP = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹") #Create plot p = figure(plot_width=900, plot_height=450, title = 'Hyltemossa Station: Cumulative Sums of '+variable+ ' Daily '+aggregation_type+' per Year', x_axis_label = 'Day of the Year (DOY)', y_axis_label = variable+' ('+unit+')') #Create an empty list that will store the legend info: legend_it = [] for num in range(len(itersum_ls)): #Add Line-glyph: gL = p.line(list(range(1,len(itersum_ls[num])+1)), itersum_ls[num], color=colors[num], line_width=1.5, name=str(year_ls[num])) #Add Circle-glyph: gC = p.circle(list(range(1,len(itersum_ls[num])+1)), itersum_ls[num], radius=.12, color=colors[num], name=str(year_ls[num])) #Add the name and glyph info (i.e. colour and marker type) to the legend: legend_it.append((gL.name, [gL,gC])) #Add tooltip on hover: p.add_tools(HoverTool(tooltips=[ ('Year','$name'), ('Day of Year','@x'), (variable+' ('+unit+')','@y{0.f}'), ], formatters={ 'x' : 'datetime', # use 'datetime' formatter for 'date' field }, # display a tooltip whenever the cursor is vertically in line with a glyph mode='vline' )) #Create legend: legend = Legend(items=legend_it, location= 'bottom_center') legend.orientation = 'horizontal' legend.click_policy='hide' legend.spacing = 10 #sets the distance between legend entries #Set title attributes: p.title.align = 'center' p.title.text_font_size = '13pt' p.title.offset = 15 #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units #Set the copyright-label position: label_opts = dict(x=0, y=72, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add legend to figure: p.add_layout(legend, 'below') #Add label to plot: p.add_layout(caption1, 'below') #Disable the scientific output of numbers on y-axis: p.left[0].formatter.use_scientific = False #Inactivate hover-tool, which is by default active: p.toolbar.active_inspect = None #Set the output location: output_notebook() #Show plot: show(p)**Widget Function**#Function that creates widgets and updates the plot based on the user's selection: def create_widgets_icos_eco_htm_iterative_sums(): #Import modules: from ipywidgets import interact_manual, Dropdown #Create a list with the years for which data exist: year_ls = [2015, 2016, 2017, 2018] #Create a list containing the color that corresponds to every year: colors = ['blue','#abd9e9', 'orange', 'red'] #Create a list to store the different ecosystem variables: eco_var_ls = [tuple(reversed(tupl)) for tupl in tuple(measurement_dict_eng.items())][:len(measurement_dict_eng)-1] #Create dropdown-widgets: eco_vars = Dropdown(options = eco_var_ls) #Function that updates the plot based on the user's selection: def update_iter_sums_plot(Variable): #Check selected variable: if(Variable == 'TA_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: iter_sum_ls = [HTM_eko_TA_2015_itersum, HTM_eko_TA_2016_itersum, HTM_eko_TA_2017_itersum, HTM_eko_TA_2018_itersum] #Get variable unit: var_unit = unit_dict[Variable] #Define daily aggregation type: daily_aggr_type = 'Means' #If the selected variable is Carbon Flux (NEE): elif(Variable == 'FC_PI_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: #Convert micromoles to moles. iter_sum_ls = [micromoles2moles(HTM_eko_NEE_2015_itersum), micromoles2moles(HTM_eko_NEE_2016_itersum), micromoles2moles(HTM_eko_NEE_2017_itersum), micromoles2moles(HTM_eko_NEE_2018_itersum)] #Get variable unit: var_unit = 'molm-2'.translate(SUP) #Define daily aggregation type: daily_aggr_type = 'Totals' #If the selected variable is Gross Primary Production (GPP): elif(Variable == 'GPP_PI_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: #Convert micromoles to moles. iter_sum_ls = [micromoles2moles(HTM_eko_GPP_2015_itersum), micromoles2moles(HTM_eko_GPP_2016_itersum), micromoles2moles(HTM_eko_GPP_2017_itersum), micromoles2moles(HTM_eko_GPP_2018_itersum)] #Get variable unit: var_unit = 'molm-2'.translate(SUP) #Define daily aggregation type: daily_aggr_type = 'Totals' #If the selected variable is Gross Primary Production (GPP): elif(Variable == 'RECO_PI_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: #Convert micromoles to moles. iter_sum_ls = [micromoles2moles(HTM_eko_RECO_2015_itersum), micromoles2moles(HTM_eko_RECO_2016_itersum), micromoles2moles(HTM_eko_RECO_2017_itersum), micromoles2moles(HTM_eko_RECO_2018_itersum)] #Get variable unit: var_unit = 'molm-2'.translate(SUP) #Define daily aggregation type: daily_aggr_type = 'Totals' #If the selected variable is Gross Primary Production (GPP): elif(Variable == 'P_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: #Convert micromoles to moles. iter_sum_ls = [HTM_eko_P_2015_itersum, HTM_eko_P_2016_itersum, HTM_eko_P_2017_itersum, HTM_eko_P_2018_itersum] #Get variable unit: var_unit = unit_dict[Variable] #Define daily aggregation type: daily_aggr_type = 'Totals' #If the selected variable is Gross Primary Production (GPP): elif(Variable == 'SW_IN_1_1_1'): #Get list of lists of iterative sums for every year for the selected variable: #Convert micromoles to moles. iter_sum_ls = [Joules2MegaJoules(HTM_eko_SWIR_2015_itersum), Joules2MegaJoules(HTM_eko_SWIR_2016_itersum), Joules2MegaJoules(HTM_eko_SWIR_2017_itersum), Joules2MegaJoules(HTM_eko_SWIR_2018_itersum)] #Get variable unit: var_unit = 'molm-2'.translate(SUP) #Define daily aggregation type: daily_aggr_type = 'Totals' #If the selected variable is none of the above print error message: else: print("Variable doesn't exist!") #Call function to show plot: plotIterSum(iter_sum_ls, year_ls, daily_aggr_type, measurement_dict_eng[Variable], unit_dict_daily[Variable], colors) #Create function that contains a box of widgets: interact = interact_manual(update_iter_sums_plot, Variable = eco_vars) #Set the font of the widgets included in interact_manual: interact.widget.children[0].layout.width = '460px' interact.widget.children[0].layout.margin = '40px 2px 2px 2px' interact.widget.children[1].description = 'Update Plot' interact.widget.children[1].button_style = 'danger' interact.widget.children[1].style.button_color = '#3973ac' interact.widget.children[1].layout.margin = '20px 10px 40px 200px' # top/right/bottom/leftPlot with Cumulative Sums of Daily Totals or Daily Means per Year **Call Function**#Call function to display widgets: create_widgets_icos_eco_htm_iterative_sums()[Back to Create Plots of Iterative Sums - Intro] 10.4. Barplot with Incoming Shortwave-Infrared Solar Radiation (Daily Total) & GPP (Daily Total)The existance of incoming solar radiation is essential for plants to photosynthesize. Gross Primary Production (GPP) can be used to provide a measure of the magnitude of photosynthetical activity. In this part, it is possible to view an interactive plot of daily totals of incoming shortwave infrared solar radiation and daily totals of GPP per year. The objective is to observe how the availability of shortwave infrared incoming solar radiation influences GPP between different years.A dropdown widget is provided for the user to select a year between 2015-2017. Two plots will be displayed once the user presses the | Update Plot | button. The first plot corresponds to the selected year. The second plot displays the variable values for 2018, the year when the drought occured. [Go to plot] **Rounding Functions**def roundup10(x): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 10:30:00 2018 Last Changed: Tue May 07 10:30:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes a number as input and rounds it up to the closest "10". Input parameters: Number (var_name: 'x', var_type: Integer or Float) Output: Float """ #import module: import math import numbers #Check if input parameter is numeric: if(isinstance(x, numbers.Number)==True): #Return rounded value: return int(math.ceil(x / 10.0)) * 10 #If input parameter is NOT numeric, prompt an error message: else: print("Input parameter is not numeric!") def rounddown10(x): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 10:30:00 2018 Last Changed: Tue May 07 10:30:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes a number as input and floors it down to the closest "10". Input parameters: Number (var_name: 'x', var_type: Integer or Float) Output: Float """ #import module: import math import numbers #Check if input parameter is numeric: if(isinstance(x, numbers.Number)==True): #Return rounded value: return int(math.ceil(x / 10.0)) * 10 -10 #If input parameter is NOT numeric, prompt an error message: else: print("Input parameter is not numeric!") def rounddown20(x): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 09:00:00 2018 Last Changed: Tue May 07 09:00:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes a number as input and floors it to the nearest "20". Input parameters: Number (var_name: 'x', var_type: Integer or Float) Output: Float """ #Import module: import math import numbers #Check if input parameter is numeric: if(isinstance(x, numbers.Number)==True): #If the 2nd digit from the decimal point is an even number: if(int(x/10.0)%2==0): return(int(x / 10.0) * 10) - 20 #If the 2nd digit from the decimal point is an odd number: else: return(int(x / 10.0) * 10) - 10 #If input parameter is not numeric, prompt an error message: else: print("Input parameter is not numeric!") def roundup20(x): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 09:00:00 2018 Last Changed: Tue May 07 09:00:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes a number as input and rounds it up to the closest "20". Input parameters: Number (var_name: 'x', var_type: Integer or Float) Output: Float """ #Import module: import math import numbers #Check if input parameter is numeric: if(isinstance(x, numbers.Number)==True): #for positive numbers, multiples of 20.0: if((x>=0)&(((x/10.0)%20)%2 == 0)): return int(math.ceil(x / 10.0)) * 10 +20 #for positive numbers with an even number as 2nd digit: elif((x>0)&(int(x/10.0)%2==0)): return int(math.ceil(x / 10.0)) * 10 +10 #for positive and negative numbers, whose 2nd digit is an odd number (except for i in [-1,-9]): elif(int(x/10.0)%2!=0): return int((x / 10.0)) * 10 +10 #for negative numbers, whose 1st or 2nd digit is an even number: elif((x<-10) & (int(x)%2==0)): return int((x / 10.0)) * 10 +20 else: return 0 #If input parameter is NOT numeric, prompt an error message: else: print("Input parameter is not numeric!")**Function - Define the range of the y-axes (2 y-axes)**def set_yranges_2y_barplot(y1_min, y1_max, y2_min, y2_max, y1_step, y2_step ,new_yrange_name): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 10:30:00 2018 Last Changed: Tue May 07 10:30:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes the primary and secondary y-axis min/max values as well as the step values for every y-axis and the secondary y-axis new range name as input parameters, performs computations so that the two axes are alligned and returns their corresponding RangeId objects. Works only for Bokeh plots. Input parameters: 1. Min value of primary y-axis (var_name: 'y1_min', var_type: Integer or Float) 2. Max value of primary y-axis (var_name: 'y1_max', var_type: Integer or Float) 3. Min value of secondary y-axis (var_name: 'y2_min', var_type: Integer or Float) 4. Max value of secondary y-axis (var_name: 'y2_max', var_type: Integer or Float) 5. Step of primary y-axis (var_name: 'y1_step', var_type: Integer or Float) 6. Step of secondary y-axis (var_name: 'y2_step', var_type: Integer or Float) 7. Name of new yrange object for secondary y-axis (var_name: "new_yrange_name", var_type: Bokeh Plot yrange object) Output: Bokeh Plot yrange objects for primary and secondary y-axes. """ #import modules: import numpy as np from bokeh.models import Range1d #yrange and tick function for plot with primary and secondary y-axis: yticks1 = np.arange(y1_min, y1_max + y1_step, y1_step) yticks2 = np.arange(y2_min, y2_max + y2_step, y2_step) #Get difference in total number of ticks between primary and secondary y-axis: diff = abs(len(yticks2)-len(yticks1)) #Get how many times the step needs to be added to start and end: num_of_steps = int(diff/2) #If the primary and the secondary y-axis have the same number of ticks: if(diff==0): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)} #print('diff==0') #If the primary y-axis has fewer ticks than the secondary y-axis: elif(len(yticks2)>len(yticks1)): #If the difference in ticks between the two axes is an odd number: if(diff%2==1): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min-(y1_step*(num_of_steps+1)), end=y1_max+(y1_step*num_of_steps)) #Set the 2nd y-axis, range-name, range: extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)} #print('len(yticks2)>len(yticks1) --> diff==odd') #If the difference in ticks between the two axes is an even number: else: #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min-(y1_step*num_of_steps), end=y1_max+(y1_step*num_of_steps)) #Set the 2nd y-axis, range-name, range: extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)} #print('len(yticks2)>len(yticks1) --> diff==even') #If the primary y-axis has more ticks than the secondary y-axis, e.g. len(yticks1)>len(yticks2_test): else: #If the difference in ticks between the two axes is an odd number: if(diff%2==1): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges = {new_yrange_name: Range1d(start=y2_min + (y2_step*(num_of_steps)), end=y2_max + (y2_step*(num_of_steps+1)))} #print('len(yticks2) diff==odd') #If the difference in ticks between the two axes is an even number: else: #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: #extra_y_ranges = {new_yrange_name: Range1d(start=y2_min - (y2_step*num_of_steps), end=y2_max + (y2_step*num_of_steps))} extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max + (y2_step*(num_of_steps+1)))} #print('len(yticks2) diff==even') #Return y-range for primary and secondary y-axes: return y_range, extra_y_ranges**Plotting Function**#Plot daily totals per year for a given variable: def plot_barplot_2axes(df1, df2, variable_ls, unit_ls, dailyType_ls, color_ls): #Import modules: from bokeh.models import Legend p = figure(plot_width=600, plot_height=450, title = 'HTM: '+variable_ls[0]+' Daily '+dailyType_ls[0]+', '+ variable_ls[1]+' Daily '+dailyType_ls[1]+' for '+str(df1.index[0].year), x_axis_label = 'Time', y_axis_label = variable_ls[0] + ' ('+unit_ls[0].translate(SUP)+') daily '+dailyType_ls[0], x_axis_type='datetime') # Setting the second y axis range name and range p.y_range, p.extra_y_ranges = set_yranges_2y_barplot(0,#rounddown10(df1.values.min()), roundup20(df1.values.max()), 0,#math.floor(df2.values.min()), math.ceil(df2.values.max()), 10.0, 0.5, 'y2') #Set primary y-axis ticker: ticker_1 = SingleIntervalTicker(interval= 10.0) #Add primary y-axis ticker to plot: p.yaxis.ticker = ticker_1 #Set secondary y-axis ticker: ticker_2 = SingleIntervalTicker(interval=0.5) # Adding the second axis to the plot. p.add_layout(LinearAxis(y_range_name="y2", axis_label=variable_ls[1] + ' ('+unit_ls[1].translate(SUP)+')', ticker=ticker_2, axis_label_standoff = 15, axis_label_text_color = color_ls[1]), 'right') #Create an empty list that will store the legend info: legend_it = [] #Add 1st barplot: bp1 = p.vbar(x=list(df1.index.values), width=2.5, bottom=0, top=list(df1.values), color=color_ls[0], name=variable_ls[0]) #Add 2nd barplot: bp2 = p.vbar(x=list(df2.index.values), width=2.5, bottom=0, alpha=0.4, top=list(df2.values), color=color_ls[1], y_range_name="y2", name=variable_ls[1]) #Add the name and glyph info (i.e. colour and marker type) to the legend: legend_it.append((bp1.name, [bp1])) legend_it.append((bp2.name, [bp2])) #Create legend: legend = Legend(items=legend_it, location= 'bottom_center') legend.orientation = 'horizontal' legend.click_policy='hide' legend.spacing = 10 #sets the distance between legend entries #Add legend to figure: p.add_layout(legend, 'below') #Set title attributes: p.title.align = 'center' p.title.text_font_size = '12pt' p.title.vertical_align = 'top' #Create a distance between the title and the plot #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units p.yaxis[0].axis_label_text_color = color_ls[0] #Set the copyright-label position: label_opts = dict(x=0, y=5, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add label to plot: p.add_layout(caption1, 'below') #Format plot borders: p.min_border_top = 54 #Return plot: return p**Widget Function**def widget_SWIR_GPP(): #Import Python modules: from ipywidgets import interact_manual, Dropdown from bokeh.layouts import column from bokeh.io import show, output_notebook #Create a dictionary to store the filenames associated with each year: labels = {"2015":[Joules2MegaJoules(HTM_eko_LIGHT_2015_daily_sum), micromoles2moles(HTM_eko_GPP_2015_daily_sum)], "2016":[Joules2MegaJoules(HTM_eko_LIGHT_2016_daily_sum), micromoles2moles(HTM_eko_GPP_2016_daily_sum)], "2017":[Joules2MegaJoules(HTM_eko_LIGHT_2017_daily_sum), micromoles2moles(HTM_eko_GPP_2017_daily_sum)]} #Create Dropdown-List widget: years = Dropdown(options=labels.keys(), value='2015', description='Year:', disabled=False) #Function that calls functions to update the plot #based on the selected year: def update_plot_func(Year): #Call function to plot data for the selected year: p1 = plot_barplot_2axes(labels[Year][0], labels[Year][1], ['SW-IR','GPP'], ['MJoules/m2','moles m-2'], ['Total', 'Total'], ['orange','green']) #Show Plot for 2018: p2 = plot_barplot_2axes(Joules2MegaJoules(HTM_eko_LIGHT_2018_daily_sum), micromoles2moles(HTM_eko_GPP_2018_daily_sum), ['SW-IR','GPP'], ['MJoules/m2','moles m-2'], ['Total', 'Total'], ['orange','green']) #Define output location: output_notebook() #Show plots: show(column(p1, p2)) #Create function that contains a box of widgets: interact_c = interact_manual(update_plot_func, Year=years) #Set the font of the widgets included in interact_manual: interact_c.widget.children[0].layout.width = '430px' interact_c.widget.children[0].layout.margin = '40px 2px 2px 2px' interact_c.widget.children[1].description = 'Update Plot' interact_c.widget.children[1].button_style = 'danger' interact_c.widget.children[1].style.button_color = '#3973ac' interact_c.widget.children[1].layout.margin = '10px 10px 40px 180px' # top/right/bottom/leftPlot Daily Total Shortwave Infrared Incoming Solar Radiation with Daily Total GPP**Call Function**#Call function to display widgets: widget_SWIR_GPP()Back to TOC 10.5. Plot GPP Daily Totals with Soil Water Content Daily MeanThe amount of water available to the plants, can affect their rate of photosynthesis. If the amount of water in the soil drops bellow 10%, the plants can no longer absorb it with their roots. Plants that do not have enough water, close their stomata and loose their capacity to take in CO$_2$. This means that they cease to photosynthesize.In this part, it is possible to view an interactive plot with daily mean Soil Water Content values and daily total GPP values for the duration of one year. A dropdown widget allows the user to select a year between 2015 and 2017. Once the user clicks on the | Update Plot |-button, two plots will appear. The first plot depicts the values of the aforementioned variables for the selected year, whilst the second plot shows the variable values for 2018, when the drought occured. The interactive legend allows the user to switch layers on and off. The purpose of these visualizations is to examine if changes in Soil Water Content correlate with changes in GPP. [Go to plot] **Widget Function**def widget_SWC_GPP(): #Import Python modules: from ipywidgets import interact_manual, Dropdown from bokeh.layouts import column from bokeh.io import show, output_notebook #Create a dictionary to store the filenames associated with each year: labels = {"2015":[HTM_eko_SWC_2015_daily_mean, micromoles2moles(HTM_eko_GPP_2015_daily_sum)], "2016":[HTM_eko_SWC_2016_daily_mean, micromoles2moles(HTM_eko_GPP_2016_daily_sum)], "2017":[HTM_eko_SWC_2017_daily_mean, micromoles2moles(HTM_eko_GPP_2017_daily_sum)]} #Create Dropdown-List widget: years = Dropdown( options=labels.keys(), value='2015', description='Year:', disabled=False ) #Function that calls functions to update the plot #based on the selected year: def update_plot_func(Year): #Call function to plot data for the selected year: p1 = plot_barplot_2axes(labels[Year][0], labels[Year][1], ['Soil Water Content','GPP'], ['%','moles m-2'], ['Mean', 'Total'], ['lightblue','green']) #Plot Figure for 2018: p2 = plot_barplot_2axes(HTM_eko_SWC_2018_daily_mean, micromoles2moles(HTM_eko_GPP_2018_daily_sum), ['Soil Water Content','GPP'], ['%','moles m-2'], ['Mean', 'Total'], ['lightblue','green']) #Define output location: output_notebook() #Show plots: show(column(p1, p2)) #Create function that contains a box of widgets: interact_c = interact_manual(update_plot_func, Year=years) #Set the font of the widgets included in interact_manual: interact_c.widget.children[0].layout.width = '430px' interact_c.widget.children[0].layout.margin = '40px 2px 2px 2px' interact_c.widget.children[1].description = 'Update Plot' interact_c.widget.children[1].button_style = 'danger' interact_c.widget.children[1].style.button_color = '#3973ac' interact_c.widget.children[1].layout.margin = '10px 10px 40px 180px' # top/right/bottom/leftPlot Daily Mean Soil Water Content with Daily Total GPP**Call Function**#Call function to plot widgets: widget_SWC_GPP()Back to TOC 10.6. Plot Daily Mean Soil Water Content with Daily Total Respiration and Daily Mean Air TemperatureDecomposers and detritivores like warm and moist environments. They emit carbon to the atmosphere as a result of theri activity. As mentioned before, decomposers and detritivores would limit their activity if the conditions of their environment became too dry. The following plots will present how the values of Air Temperature and Soil Water Content correlate with the values of Respiration.In this part, it is possible to view an interactive plot with daily mean Soil Water Content values and daily total Respiration and Air Temperature values for the duration of one year. A dropdown widget allows the user to select a year between 2015 and 2017. Once the user clicks on the | Update Plot | button, two plots will appear. The first plot depicts the values of the aforementioned variables for the selected year, whilst the second plot shows the variable values for 2018, when the drought occured. The interactive legend allows the user to switch layers on and off. [Go to plot] **Function - Define the range of the y-axes (3 y-axes)**def set_yranges_3y_ymin0(y1_min, y1_max, y2_min, y2_max, y3_min, y3_max, y1_step, y2_step, y3_step): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 10:30:00 2018 Last Changed: Tue May 07 10:30:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes the primary, secondary and third y-axis min/max values as well as the step values for every y-axis as input parameters, performs computations so that the three axes are alligned and returns their corresponding RangeId objects. Works only for Bokeh plots. Input parameters: 1. Min value of primary y-axis (var_name: 'y1_min', var_type: Integer or Float) 2. Max value of primary y-axis (var_name: 'y1_max', var_type: Integer or Float) 3. Min value of secondary y-axis (var_name: 'y2_min', var_type: Integer or Float) 4. Max value of secondary y-axis (var_name: 'y2_max', var_type: Integer or Float) 5. Min value of third y-axis (var_name: 'y3_min', var_type: Integer or Float) 6. Max value of third y-axis (var_name: 'y3_max', var_type: Integer or Float) 7. Step of primary y-axis (var_name: 'y1_step', var_type: Integer or Float) 8. Step of secondary y-axis (var_name: 'y2_step', var_type: Integer or Float) 9. Step of third y-axis (var_name: 'y3_step', var_type: Integer or Float) Output: Bokeh Plot yrange objects for primary and secondary y-axes. """ #import modules: import numpy as np from bokeh.models import Range1d #yrange and tick function for plot with primary and secondary y-axis: yticks1 = np.arange(y1_min, y1_max + y1_step, y1_step) yticks2 = np.arange(y2_min, y2_max + y2_step, y2_step) yticks3 = np.arange(y3_min, y3_max + y3_step, y3_step) #Get the number of ticks per y-axis: y1_num_of_ticks = len(yticks1) y2_num_of_ticks = len(yticks2) y3_num_of_ticks = len(yticks3) #Get difference in total number of ticks between primary and secondary y-axis: diff_12 = abs(len(yticks2)-len(yticks1)) diff_13 = abs(len(yticks3)-len(yticks1)) diff_23 = abs(len(yticks3)-len(yticks2)) #If the primary, secondary and 3rd y-axis have the same number of ticks: if((diff_12==0) and (diff_13==0) and (diff_23==0)): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max) #print('All y-axes have the same num of ticks') #if y-axis 1 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks)==y1_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max + (y2_step*diff_12)) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max + (y3_step*diff_13)) #print('y1axis highest num of ticks') #if y-axis 2 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks)==y2_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max+(y1_step*diff_12)) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max+(y3_step*diff_23)) #print('y2axis highest num of ticks') #if y-axis 3 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks)==y3_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max+(y1_step*diff_13)) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max+(y2_step*diff_23)) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max) #print('y3axis highest num of ticks') else: y_range = None extra_y_ranges_1 = None extra_y_ranges_2 = None #Return y-range for primary and secondary y-axes: return y_range, extra_y_ranges_1, extra_y_ranges_2**Plotting Function**#Plot daily totals per year for a given variable: def plot_2barplots_line_glyph_3axes(df1, df2, df3, variable_ls, unit_ls, dailyType_ls, color_ls, step_y1, step_y2, step_y3): #Import modules: from bokeh.models import Legend #Create Fogure Object: p = figure(plot_width=600, plot_height=450, title = 'HTM: '+variable_ls[1]+', '+variable_ls[0]+' & '+variable_ls[2]+ ' for '+str(df1.index[0].year), x_axis_label = 'Time', y_axis_label = variable_ls[0] + ' ('+unit_ls[0].translate(SUP)+') Daily '+dailyType_ls[0], x_axis_type='datetime') #Add the ranges for every y-axis: p.y_range, p.extra_y_ranges['Yaxis2'], p.extra_y_ranges['Yaxis3']= set_yranges_3y_ymin0(0, roundup10(df1.values.max()), 0, math.ceil(df2.values.max()), 0, roundup10(df3.values.max()), step_y1, step_y2, step_y3) #Set primary y-axis ticker: ticker_1 = SingleIntervalTicker(interval= step_y1) #Add primary y-axis ticker to plot: p.yaxis.ticker = ticker_1 #Set secondary y-axis ticker: ticker_2 = SingleIntervalTicker(interval=step_y2) #Set secondary y-axis ticker: ticker_3 = SingleIntervalTicker(interval=step_y3) # Adding the second axis to the plot. yaxis2 = LinearAxis(y_range_name="Yaxis2", axis_label=variable_ls[1] + ' ('+unit_ls[1].translate(SUP)+') Daily '+dailyType_ls[1], ticker=ticker_2, axis_label_standoff = 15, axis_label_text_color = color_ls[1]) # Adding the third axis to the plot. yaxis3 = LinearAxis(y_range_name='Yaxis3', axis_label=variable_ls[2] + ' ('+unit_ls[2].translate(SUP)+') Daily '+dailyType_ls[2], ticker=ticker_3, axis_label_standoff = 15, axis_label_text_color = color_ls[2]) #Define at which part of the plot the additional y-axes will be located: p.add_layout(yaxis2,'right') p.add_layout(yaxis3,'right') #Create an empty list that will store the legend info: legend_it = [] #Create 1st barplot: bp1 = p.vbar(x=list(df1.index.values), width=0.5, bottom=0, top=list(df1.values), color=color_ls[0], name=variable_ls[0]) #Create 2nd barplot: bp2 = p.vbar(x=list(df2.index.values), width=0.5, bottom=0, alpha=0.5, top=list(df2.values), color=color_ls[1], y_range_name="Yaxis2", name=variable_ls[1]) #Add line-glyph: g1 = p.line(df3.index.values, df3.values, line_width=2.0, color=color_ls[2], y_range_name="Yaxis3", name=variable_ls[2]) #Add the name and glyph info (i.e. colour and marker type) to the legend: legend_it.append((bp1.name, [bp1])) legend_it.append((bp2.name, [bp2])) legend_it.append((g1.name, [g1])) #Create legend: legend = Legend(items=legend_it, location= 'bottom_center') legend.orientation = 'horizontal' legend.click_policy='hide' legend.spacing = 10 #sets the distance between legend entries #Add legend to figure: p.add_layout(legend, 'below') #Set title attributes: p.title.align = 'center' p.title.text_font_size = '10pt' p.title.vertical_align = 'top' #Create a distance between the title and the plot #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units p.yaxis[0].axis_label_text_color = color_ls[0] #Set title attributes: p.title.align = 'center' p.title.text_font_size = '12pt' p.title.offset = 15 #Set the copyright-label position: label_opts = dict(x=0, y=5, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add label to plot: p.add_layout(caption1, 'below') #Format plot borders: p.min_border_top = 54 #Return Figure Object: return p**Widget Function**def widgetAirTempRECO(): #Import Python modules: from ipywidgets import interact_manual, Dropdown from bokeh.layouts import column from bokeh.io import show, output_notebook #Create a dictionary to store the filenames associated with each year: labels = {"2015":[HTM_eko_SWC_2015_daily_mean, micromoles2moles(HTM_eko_RECO_2015_daily_sum), HTM_eko_TA_2015_daily_mean], "2016":[HTM_eko_SWC_2016_daily_mean, micromoles2moles(HTM_eko_RECO_2016_daily_sum), HTM_eko_TA_2016_daily_mean], "2017":[HTM_eko_SWC_2017_daily_mean, micromoles2moles(HTM_eko_RECO_2017_daily_sum), HTM_eko_TA_2017_daily_mean]} #Create Dropdown-List widget: years = Dropdown( options=labels.keys(), value='2015', description='Year:', disabled=False ) #Function that calls functions to update the plot #based on the selected year: def update_plot_func(Year): #Call function to plot data for the selected year: p1 = plot_2barplots_line_glyph_3axes(labels[Year][0], labels[Year][1], labels[Year][2], ['Soil Water Content','Respiration','Air Temperature'], ['%','moles m-2','C\u00b0'], ['Mean', 'Total','Mean'], ['lightblue', '#9e9ac8','firebrick'], 10.0, 1.0, 10.0) #Call function to plot data for 2018 (drought year): p2 = plot_2barplots_line_glyph_3axes(HTM_eko_SWC_2018_daily_mean, micromoles2moles(HTM_eko_RECO_2018_daily_sum), HTM_eko_TA_2018_daily_mean, ['Soil Water Content','Respiration','Air Temperature'], ['%','moles m-2','C\u00b0'], ['Mean', 'Total','Mean'], ['lightblue', '#9e9ac8','firebrick'], 10.0, 1.0, 10.0) #Define output location: output_notebook() #Show plots: show(column(p1, p2)) #Create function that contains a box of widgets: interact_c = interact_manual(update_plot_func, Year=years) #Set the font of the widgets included in interact_manual: interact_c.widget.children[0].layout.width = '430px' interact_c.widget.children[0].layout.margin = '40px 2px 2px 2px' interact_c.widget.children[1].description = 'Update Plot' interact_c.widget.children[1].button_style = 'danger' interact_c.widget.children[1].style.button_color = '#3973ac' interact_c.widget.children[1].layout.margin = '10px 10px 40px 180px' # top/right/bottom/leftPlot Daily Mean Soil Water Content and Air Temperature with Daily Total Respiration**Call Function**#Call function to display widgets: widgetAirTempRECO()Back to TOC 10.7. Plot Daily Mean Soil Water Content with Daily Total GPP and Daily Total PrecipitationPlants need access to water in order to photosynthesize. Water-stressed plants close their stomata and limit their photosynthetic activity to preserve water and survive. If the level of soil water content drops below a threshold of 10%, then the plants are no longer able to absorb water from the soil using their roots. In this part, it is possible to view an interactive plot with daily mean Soil Water Content values and daily total GPP and Precipitation values for the duration of one year. A dropdown widget allows the user to select a year between 2015 and 2017. Once the user clicks on the | Update Plot | button, two plots will appear. The first plot depicts the values of the aforementioned variables for the selected year, whilst the second plot shows the variable values for 2018, when the drought occured. The interactive legend allows the user to switch layers on and off. The objective here is to examine how soil water content is affected by precipitation and how soil water content affects GPP. [Go to plot] **Widget Function**def widgetSWCGPPPrecip(): #Import Python modules: from ipywidgets import interact_manual, Dropdown from bokeh.layouts import column from bokeh.io import show, output_notebook #Create a dictionary to store the filenames associated with each year: labels = {"2015":[HTM_eko_SWC_2015_daily_mean, micromoles2moles(HTM_eko_GPP_2015_daily_sum), HTM_eko_P_2015_daily_sum], "2016":[HTM_eko_SWC_2016_daily_mean, micromoles2moles(HTM_eko_GPP_2016_daily_sum), HTM_eko_P_2016_daily_sum], "2017":[HTM_eko_SWC_2017_daily_mean, micromoles2moles(HTM_eko_GPP_2017_daily_sum), HTM_eko_P_2017_daily_sum]} #Create Dropdown-List widget: years = Dropdown(options=labels.keys(), value='2015', description='Year:', disabled=False) #Function that calls functions to update the plot #based on the selected year: def update_plot_func(Year): #Call function to plot data for the selected year: p1 = plot_2barplots_line_glyph_3axes(labels[Year][0], labels[Year][1], labels[Year][2], ['Soil Water Content','GPP', 'Precipitation'], ['%','moles m-2', 'mm'], ['Mean', 'Total', 'Total'], ['lightblue', 'green', 'navy'], 10.0, 0.5, 10.0) #Call function to plot data for 2018 (drought year): p2 = plot_2barplots_line_glyph_3axes(HTM_eko_SWC_2018_daily_mean, micromoles2moles(HTM_eko_GPP_2018_daily_sum), HTM_eko_P_2018_daily_sum, ['Soil Water Content','GPP', 'Precipitation'], ['%','moles m-2', 'mm'], ['Mean', 'Total', 'Total'], ['lightblue', 'green', 'navy'], 10, 0.5, 10.0) #Define output location: output_notebook() #Show plots: show(column(p1, p2)) #Create function that contains a box of widgets: interact_c = interact_manual(update_plot_func, Year=years) #Set the font of the widgets included in interact_manual: interact_c.widget.children[0].layout.width = '430px' interact_c.widget.children[0].layout.margin = '40px 2px 2px 2px' interact_c.widget.children[1].description = 'Update Plot' interact_c.widget.children[1].button_style = 'danger' interact_c.widget.children[1].style.button_color = '#3973ac' interact_c.widget.children[1].layout.margin = '10px 10px 40px 180px' # top/right/bottom/leftPlot Daily Mean Soil Water Content with Daily Total GPP and Precipitation**Call Function**#Call function to display widgets: widgetSWCGPPPrecip()Back to TOC 10.8. Plot Daily Mean Soil Water Content and Air temperature with Daily Total GPP and Light (SW-IR)Plants need access to water and sunlight in order to photosynthesize. Water-stressed plants close their stomata and limit their photosynthetic activity to preserve water and survive. If the level of soil water content drops below a threshold of 10%, then the plants are no longer able to absorb water from the soil using their roots. In this part, it is possible to view an interactive plot with daily mean Soil Water Content and Air Temperature values and daily total GPP and Light values for the duration of one year. A dropdown widget allows the user to select a year between 2015 and 2017. Once the user clicks on the | Update Plot | button, two plots will appear. The first plot depicts the values of the aforementioned variables for the selected year, whilst the second plot shows the variable values for 2018, when the drought occured. The interactive legend allows the user to switch layers on and off. The objective here is to examine how the values of the main factors affecting the photosynthetic ability of plants correlate with GPP. [Go to plot] **Function - Define the range of the y-axes (4 y-axes)**def set_yranges_4y_ymin0(y1_min, y1_max, y2_min, y2_max, y3_min, y3_max, y4_min, y4_max, y1_step, y2_step, y3_step, y4_step): """ Project: 'ICOS Carbon Portal' Created: Tue May 07 10:30:00 2018 Last Changed: Tue May 07 10:30:00 2019 Version: 1.0.0 Author(s): Karolina Description: Function that takes the primary, secondary, third and fourth y-axis min/max values as well as the step values for every y-axis as input parameters, performs computations so that the three axes are alligned and returns their corresponding RangeId objects. Works only for Bokeh plots. Input parameters: 1. Min value of primary y-axis (var_name: 'y1_min', var_type: Integer or Float) 2. Max value of primary y-axis (var_name: 'y1_max', var_type: Integer or Float) 3. Min value of secondary y-axis (var_name: 'y2_min', var_type: Integer or Float) 4. Max value of secondary y-axis (var_name: 'y2_max', var_type: Integer or Float) 5. Min value of third y-axis (var_name: 'y3_min', var_type: Integer or Float) 6. Max value of third y-axis (var_name: 'y3_max', var_type: Integer or Float) 7. Min value of fourth y-axis (var_name: 'y4_min', var_type: Integer or Float) 8. Max value of fourth y-axis (var_name: 'y4_max', var_type: Integer or Float) 9. Step of primary y-axis (var_name: 'y1_step', var_type: Integer or Float) 10. Step of secondary y-axis (var_name: 'y2_step', var_type: Integer or Float) 11. Step of third y-axis (var_name: 'y3_step', var_type: Integer or Float) 12. Step of fourth y-axis (var_name: 'y4_step', var_type: Integer or Float) Output: Bokeh Plot yrange objects for primary and secondary y-axes. """ #import modules: import numpy as np from bokeh.models import Range1d #yrange and tick function for plot with primary, secondary, third and fourth y-axis: yticks1 = np.arange(y1_min, y1_max + y1_step, y1_step) yticks2 = np.arange(y2_min, y2_max + y2_step, y2_step) yticks3 = np.arange(y3_min, y3_max + y3_step, y3_step) yticks4 = np.arange(y4_min, y4_max + y4_step, y4_step) #Get the number of ticks per y-axis: y1_num_of_ticks = len(yticks1) y2_num_of_ticks = len(yticks2) y3_num_of_ticks = len(yticks3) y4_num_of_ticks = len(yticks4) #Get difference in total number of ticks between primary and secondary y-axis: diff_12 = abs(len(yticks2)-len(yticks1)) diff_13 = abs(len(yticks3)-len(yticks1)) diff_23 = abs(len(yticks3)-len(yticks2)) diff_14 = abs(len(yticks4)-len(yticks1)) diff_24 = abs(len(yticks4)-len(yticks2)) diff_34 = abs(len(yticks4)-len(yticks3)) #If the primary, secondary and 3rd y-axis have the same number of ticks: if((diff_12==0) and (diff_13==0) and (diff_23==0) and (diff_14==0) and (diff_24==0) and (diff_34==0)): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max) #Set the 4th y-axis, range-name, range: extra_y_ranges_3 = Range1d(start=y4_min, end=y4_max) #print('All y-axes have the same length') #if y-axis 1 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks, y4_num_of_ticks)==y1_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max + (y2_step*diff_12)) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max + (y3_step*diff_13)) #Set the 4th y-axis, range-name, range: extra_y_ranges_3 = Range1d(start=y4_min, end=y4_max + (y4_step*diff_14)) #print('y1-axis --> highest num of ticks') #if y-axis 2 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks, y4_num_of_ticks)==y2_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max+(y1_step*diff_12)) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max+(y3_step*diff_23)) #Set the 4th y-axis, range-name, range: extra_y_ranges_3 = Range1d(start=y4_min, end=y4_max+(y4_step*diff_24)) #print('y2-axis --> highest num of ticks') #if y-axis 3 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks, y4_num_of_ticks)==y3_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max+(y1_step*diff_13)) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max+(y2_step*diff_23)) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max) #Set the 4th y-axis, range-name, range: extra_y_ranges_3 = Range1d(start=y4_min, end=y4_max+(y4_step*diff_34)) #print('y3-axis --> highest num of ticks') #if y-axis 4 is the axis with the highest number of ticks: elif(max(y1_num_of_ticks, y2_num_of_ticks, y3_num_of_ticks, y4_num_of_ticks)==y4_num_of_ticks): #Set the range of the 1st y-axis: y_range = Range1d(start=y1_min, end=y1_max+(y1_step*diff_14)) #Set the 2nd y-axis, range-name, range: extra_y_ranges_1 = Range1d(start=y2_min, end=y2_max+(y2_step*diff_24)) #Set the 3rd y-axis, range-name, range: extra_y_ranges_2 = Range1d(start=y3_min, end=y3_max+(y3_step*diff_34)) #Set the 4th y-axis, range-name, range: extra_y_ranges_3 = Range1d(start=y4_min, end=y4_max) #print('y4-axis --> highest num of ticks') else: y_range = None extra_y_ranges_1 = None extra_y_ranges_2 = None #Return y-range for all y-axes: return y_range, extra_y_ranges_1, extra_y_ranges_2, extra_y_ranges_3**Plotting unction**#Plot daily totals per year for a given variable: def plotGPPLightSWCTempYr_4axes(df1, df2, df3, df4, variable_ls, unit_ls, dailyType_ls, color_ls, step_y1, step_y2, step_y3, step_y4): p = figure(plot_width=600, plot_height=450, title = 'Hyltemossa: '+variable_ls[0]+', '+variable_ls[1]+', '+ variable_ls[2]+' & '+variable_ls[3]+' for '+str(df1.index[0].year), x_axis_label = 'Time', y_axis_label = variable_ls[0] + ' ('+unit_ls[0].translate(SUP)+') daily '+dailyType_ls[0], x_axis_type='datetime') #Add the ranges for every y-axis: p.y_range,p.extra_y_ranges['Yaxis2'],p.extra_y_ranges['Yaxis3'],p.extra_y_ranges['Yaxis4'] = set_yranges_4y_ymin0(0,roundup10(df1.values.max()), 0,math.ceil(df2.values.max()), 0,roundup10(df3.values.max()), 0,roundup10(df4.values.max()), step_y1, step_y2, step_y3, step_y4) #Set primary y-axis ticker: ticker_1 = SingleIntervalTicker(interval=step_y1) #Add primary y-axis ticker to plot: p.yaxis.ticker = ticker_1 #Set secondary y-axis ticker: ticker_2 = SingleIntervalTicker(interval=step_y2) #Set 3rd y-axis ticker: ticker_3 = SingleIntervalTicker(interval=step_y3) #Set 4th y-axis ticker: ticker_4 = SingleIntervalTicker(interval=step_y4) # Adding the second axis to the plot. yaxis2 = LinearAxis(y_range_name="Yaxis2", axis_label=variable_ls[1] + ' ('+unit_ls[1].translate(SUP)+') daily '+dailyType_ls[1], ticker=ticker_2, axis_label_standoff = 15, axis_label_text_color = color_ls[1]) # Adding the 3rd axis to the plot. yaxis3 = LinearAxis(y_range_name='Yaxis3', axis_label=variable_ls[2] + ' ('+unit_ls[2].translate(SUP)+') daily '+dailyType_ls[2], ticker=ticker_3, axis_label_standoff = 15, axis_label_text_color = color_ls[2]) # Adding the 4th axis to the plot. yaxis4 = LinearAxis(y_range_name='Yaxis4', axis_label=variable_ls[3] + ' ('+unit_ls[3].translate(SUP)+') daily '+dailyType_ls[3], ticker=ticker_4, axis_label_standoff = 15, axis_label_text_color = color_ls[3]) p.add_layout(yaxis2,'right') p.add_layout(yaxis3,'right') p.add_layout(yaxis4,'left') #Create an empty list that will store the legend info: legend_it = [] #Add SWC barplot: bp1 = p.vbar(x=list(df1.index.values), width=0.5, bottom=0, top=list(df1.values), color=color_ls[0], name=variable_ls[0]) #Add GPP barplot: bp2 = p.vbar(x=list(df2.index.values), width=0.5, bottom=0, alpha=0.5, top=list(df2.values), color=color_ls[1], y_range_name="Yaxis2", name=variable_ls[1]) #Add Air-Temp line-glyph: l1 = p.line(df3.index.values, df3.values, line_width=2.0, color=color_ls[2], y_range_name="Yaxis3", alpha=0.7, name=variable_ls[2]) #Add Light line-glyph: l2 = p.line(list(df4.index.values), list(df4.values), line_width=2.0, color=color_ls[3], y_range_name="Yaxis4", alpha=0.7, name=variable_ls[3]) #Add the name and glyph info (i.e. colour and marker type) to the legend: legend_it.append((bp1.name, [bp1])) legend_it.append((bp2.name, [bp2])) legend_it.append((l1.name, [l1])) legend_it.append((l2.name, [l2])) #Create legend: legend = Legend(items=legend_it, location= 'bottom_center') legend.orientation = 'horizontal' legend.click_policy='hide' legend.spacing = 10 #sets the distance between legend entries #Add legend to figure: p.add_layout(legend, 'below') #Set title attributes: p.title.align = 'center' p.title.text_font_size = '12pt' p.title.vertical_align = 'top' #Create a distance between the title and the plot #Set axis label font style: p.xaxis.axis_label_text_font_style = 'normal' p.yaxis.axis_label_text_font_style = 'normal' p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units p.yaxis[0].axis_label_text_color = color_ls[0] #Set the copyright-label position: label_opts = dict(x=0, y=5, x_units='screen', y_units='screen') #Create a label object and format it: caption1 = Label(text="© ICOS ERIC", **label_opts) caption1.text_font_size = '8pt' #Add label to plot: p.add_layout(caption1, 'below') #Format plot borders: p.min_border_top = 54 #Return Figure Object: return p**Widget Function**def widgetSWCGPPTempSWIR(): #Import Python modules: from ipywidgets import interact_manual, Dropdown from bokeh.layouts import column from bokeh.io import show, output_notebook #Create a dictionary to store the filenames associated with each year: labels = {"2015":[HTM_eko_SWC_2015_daily_mean, micromoles2moles(HTM_eko_GPP_2015_daily_sum), HTM_eko_TA_2015_daily_mean, Joules2MegaJoules(HTM_eko_LIGHT_2015_daily_sum)], "2016":[HTM_eko_SWC_2016_daily_mean, micromoles2moles(HTM_eko_GPP_2016_daily_sum), HTM_eko_TA_2016_daily_mean, Joules2MegaJoules(HTM_eko_LIGHT_2016_daily_sum)], "2017":[HTM_eko_SWC_2017_daily_mean, micromoles2moles(HTM_eko_GPP_2017_daily_sum), HTM_eko_TA_2017_daily_mean, Joules2MegaJoules(HTM_eko_LIGHT_2017_daily_sum)]} #Create Dropdown-List widget: years = Dropdown( options=labels.keys(), value='2015', description='Year:', disabled=False) #Function that calls functions to update the plot #based on the selected year: def update_plot_func(Year): #Call function to plot data for the selected year: p1 = plotGPPLightSWCTempYr_4axes(labels[Year][0], labels[Year][1], labels[Year][2], labels[Year][3], ['Soil Water Content','GPP', 'Temperature', 'SW-IR'], ['%', 'moles m-2', 'C', 'MJoules m-2'], ['Mean', 'Total', 'Mean', 'Total'], ['lightblue', 'green', 'firebrick', 'gold'], 10.0, 0.5, 10.0, 10.0) #Call function to plot data for 2018 (drought year): p2 = plotGPPLightSWCTempYr_4axes(HTM_eko_SWC_2018_daily_mean, micromoles2moles(HTM_eko_GPP_2018_daily_sum), HTM_eko_TA_2018_daily_mean, Joules2MegaJoules(HTM_eko_LIGHT_2018_daily_sum), ['Soil Water Content','GPP', 'Temperature', 'SW-IR'], ['%','moles m-2', 'C', 'MJoules m-2'], ['Mean', 'Total', 'Mean', 'Total'], ['lightblue', 'green', 'firebrick', 'gold'], 10.0, 0.5, 10.0, 10.0) #Define output location: output_notebook() #Show plots: show(column(p1, p2)) #Create function that contains a box of widgets: interact_c = interact_manual(update_plot_func, Year=years) #Set the font of the widgets included in interact_manual: interact_c.widget.children[0].layout.width = '430px' interact_c.widget.children[0].layout.margin = '40px 2px 2px 2px' interact_c.widget.children[1].description = 'Update Plot' interact_c.widget.children[1].button_style = 'danger' interact_c.widget.children[1].style.button_color = '#3973ac' interact_c.widget.children[1].layout.margin = '10px 10px 40px 180px' # top/right/bottom/leftPlot Daily Mean Soil Water Content and Air temperature with Daily Total GPP and Light (SW-IR)**Call Function**widgetSWCGPPTempSWIR()1 Short Answer (25pts)1. (5pts) True or False. (And explain your reason.)Mean-variance optimization goes long the highest Sharpe-Ratio assets and shorts the lowest Sharpe-ratio assets.False. This may not work, it should be decided also considering the covariance matrix. There are other possibilities such as short the second highest Sharpe-Ratio assets. 2. (5pts) True or False. (And explain your reason.)Investing in an LETF makes more sense for a long-term horizon than a short-term horizon.True.Assume LETF's return increases in a long tem, it makes sense to invest in an LETF long term. 3. (5pts) This week ProShares launches BITO on the NYSE. The ETF holds Bitcoin futures contracts. Suppose in a year from now, we want to try to replicate BITO using SPY and IEF asregressors in a LFD. Because BITO will only have a year of data, we do not trust that we will have a good estimate of the mean return.Do you suggest that we (in a year) estimate the regression with an intercept or without an intercept? Why?I suggest we estimate the regression with an intercept because we don't trust its mean. The mean of BITO may grow and is not stable, so it's more insurance to use an intercept. 4. (5pts) Is HDG effective at tracking HFRI in-sample? And out of sample?HDG is effective at tracking HFRI in-sample. HDG is effective at tracking HFRI out of sample. Based on the last lecture, the parameters of HDG both in-sample and out of sample are similar. 5. (5pts) A hedge fund claims to beat the market by having a very high alpha. After regressing the hedge fund returns on the 6 Merrill-Lynch style factors, you find the alpha to be negative.Explain why this discrepancy can happen.Its regressors may have a high return such as SPY, and is a rocket to boost. 2 Allocation (25 pts)Consider the Merrill-Lynch Style Factors found in “proshares analysis data.xlsx”, sheet “merrill factors”.We will use “USGG3M Index” as the risk-free rate. Subtract it from the other 5 columns, and proceedwith those 5 risky assets.11. (5pts) What are the weights of the tangency portfolio, wtan?import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from sklearn.linear_model import LinearRegression import sys rets = pd.read_excel('proshares_analysis_data.xlsx',sheet_name='merrill_factors') rets rets.set_index('date',inplace=True) rets retsx = rets.subtract(rets['USGG3M Index'],axis=0) retsx retsx=retsx.drop(['USGG3M Index'],axis=1) retsx def compute_tangency(df, diagonalize_Sigma=False): """Compute tangency portfolio given a set of excess returns. Also, for convenience, this returns the associated vector of average returns and the variance-covariance matrix. Parameters ---------- diagonalize_Sigma: bool When `True`, set the off diagonal elements of the variance-covariance matrix to zero. """ Sigma = df.cov() # N is the number of assets N = Sigma.shape[0] Sigma_adj = Sigma.copy() if diagonalize_Sigma: Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj)) mu_tilde = df.mean() Sigma_inv = np.linalg.inv(Sigma_adj) weights = Sigma_inv @ mu_tilde / (np.ones(N) @ Sigma_inv @ mu_tilde) # For convenience, I'll wrap the solution back into a pandas.Series object. omega_tangency = pd.Series(weights, index=mu_tilde.index) return omega_tangency, mu_tilde, Sigma retsx.cov() omega_tangency, mu_tilde, Sigma = compute_tangency(retsx,diagonalize_Sigma=False) omega_tangency2. (5pts) What are the weights of the optimal portfolio, w∗, with a targeted excess mean return of .02 per month?Is the optimal portfolio, w∗, invested in the risk-free rate?def target_mv_portfolio(df, target_return, diagonalize_Sigma=False): """Compute MV optimal portfolio, given target return and set of excess returns. Parameters ---------- diagonalize_Sigma: bool When `True`, set the off diagonal elements of the variance-covariance matrix to zero. """ omega_tangency, mu_tilde, Sigma = compute_tangency(df, diagonalize_Sigma=diagonalize_Sigma) Sigma_adj = Sigma.copy() if diagonalize_Sigma: Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj)) Sigma_inv = np.linalg.inv(Sigma_adj) N = Sigma_adj.shape[0] delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return omega_star = delta_tilde * omega_tangency return omega_star omega_star = target_mv_portfolio(retsx*12, target_return=0.02 * 12) omega_star3. (5pts) Report the mean, volatility, and Sharpe ratio of the optimized portfolio. Annualize all three statistics.# Mean mean = mu_tilde @ omega_star # Volatlity vol = np.sqrt(omega_star @ Sigma @ omega_star)/np.sqrt(12) # Sharpe ratio sharpe_ratio = mean/vol print("Mean: ",mean,", vol: ",vol,", sharpe_ratio: ",sharpe_ratio)Mean: 0.020000000000000007 , vol: 0.01321312826168756 , sharpe_ratio: 1.51364609529989074. (5pts) Re-calculate the optimal portfolio, w∗ with target excess mean of .02 per month. Butthis time only use data through 2018 in doing the calculation. Calculate the return in 2019-2021based on those optimal weights.Report the mean, volatility, and Sharpe ratio of the 2019-2021 performance.omega_star1 = target_mv_portfolio(retsx.loc[: '2018']*12, target_return=0.02 * 12) omega_star1 Sigma = retsx.loc[: '2018'].cov() mu_tilde =retsx.loc[: '2018'].mean() mean = omega_star1 @ mu_tilde vol = np.sqrt(omega_star1 @ Sigma @ omega_star1)/np.sqrt(12) sharpe_ratio_in_sample = mean/vol sharpe_ratio_in_sample5. (5pts) Suppose that instead of optimizing these 5 risky assets, we optimized 5 commodity futures:oil, coffee, cocoa, lumber, cattle, and gold.Do you think the out-of-sample fragility problem would be better or worse than what we haveseen optimizing equities?No calculation is needed for this question–we just want a conceptual (though specific) answer. The commodity futures are less corrrelated with each other, and their volatility is higher. The out-of-sample fragility problem would be better because their correlation is less than equities. 3 Hedging & Replication (20pts)Continue to use the same data file from the previous problem.2Suppose we want to invest in EEM, but hedge out SPY. Do this by estimating a regression of EEMon SPY.• Do NOT include an intercept.• Use the full sample of data.1. (5pts) What is the optimal hedge ratio over the full sample of data? That is, for every dollarinvested in EEM, what would you invest in SPY?y = retsx['EEM US Equity'] X = retsx['SPY US Equity'] static_model = sm.OLS(y,X).fit() static_model.summary()for every dollar invested in EEM, I would invest 0.9257 in SPY2. (5pts) What is the mean, volatility, and Sharpe ratio of the hedged position, had we applied that hedge throughout the full sample? Annualize the statistics. def performanceMetrics(returns,annualization=1): metrics = pd.DataFrame(index=returns.columns) metrics['Mean'] = returns.mean() * annualization metrics['Vol'] = returns.std() * np.sqrt(annualization) metrics['Sharpe'] = (returns.mean() / returns.std()) * np.sqrt(annualization) metrics['Min'] = returns.min() metrics['Max'] = returns.max() return metrics rep_spy = retsx['SPY US Equity'].copy() rep_spy rep_spy['Static-IS-NoInt'] = static_model.fittedvalues rep_spy['Static-IS-NoInt'] rep_spy performanceMetrics(rep_spy['Static-IS-NoInt'] ,annualization=12) 3. (5pts) Does it have the same mean as EEM? Why or why not?Yes. EEM and SPY is highly correlated.4. (5pts) Suppose we estimated a multifactor regression where in addition to SPY, we had IWM as a regressor. Why might this regression be difficult to use for attribution or even hedging? ISHARES RUSSELL 2000 ETF is negatively correlated with EEM, thus it's more difficult to use IWM to replicate EEM, thus making it difficult for attribution or even hedgingAdaptive A.B Testing> Once you have more information, shouldn't you adapt?Recently at Ellevest, we launched one of our largest, and I'd venture our most 1-sided A/B test. In full support of confirming assumptions with data, it was easy to agree to A/B test but shortly after launch (in the magnitude of hours) I suggested that we start adjusting our 50/50 split in favor of the new B. Note this was shifting and not full on decision making. It was far from statistically significant but we were starting to see B outperform. I was surprised when this was met with resistance even as I tried to explain what Adaptive A/B Testing can achieve. I temporarily surrendered and wrapped up my day. I knew I had read about it in the context of clinical trials and thus brought it up with my wife who happens to have a penchant for remembering things, especially as they relate to medicine and statistics. She started bringing me study after study that described 'Christmas tree testing' and 'adaptive clinical trials'. Admittedly, I didn't read them, as the jargon was too much. But I felt I wanted the models to go a little more head to head and here we are.In this little experiment, we are looking at testing which model will get us a higher return* for our average participant. From a business perspective, there are a couple of goals we are striving for.First, we want to be confident in our decision in picking a final variant. This is already done with ind. t-tests and thus we will continue to use that.Second, we don't want to be 'wasting' resources and time to send a person to A if B has a higher return. Thus we will not use binary success/failure but look to see if the mean returns of using even split, vs adaptive trials. This was the crux of why I requested we start shifting our weightings.Third, we need to ensure the trial lengths are not statistically different. The logic here is that if the trial goes longer, even with a higher average return, the indecision is costing us by continuing to put cases in the less effective variant. This is the weakest of our three requirements and could be revisited.So without further ado, let's get to some exploring.*1 'higher return' can mean many things, higher revenue, higher mortaility, etc.# Yup. Always need some imports. import pandas as pd import numpy as np import scipy as sp import math ARB_CUTOFF = 6000 # to ensure we don't loop forever, allow 6k trials max """ Let's define our A and B variant outcomes. The assumptions we are making about the underlying population and how they react to the variants could have an impact on our test. Something to keep in mind before generalizing this too much. """ variant_a_mean_payout = 5 variant_b_mean_payout = 6 variant_a_stdev = 1 variant_b_stdev = 1 """ We will use these methods as our 'random draw'. This is equivlant to someone landing on variant A or variant B and realizing the outcome. """ ## Setup Variant Responses def get_variant_A(): return np.random.normal(loc=variant_a_mean_payout, scale=variant_a_stdev) def get_variant_B(): return np.random.normal(loc=variant_b_mean_payout, scale=variant_b_stdev) """ A/B test with 50/50 split. This is what most people are familiar with. """ def even_split_weight_pick(_a, _b, weighting): return np.random.rand() < weighting """ A/B test with Adaptive split. This is the adaptive split. 1. Start with random 50/50 until we have a valid pvalue. 2. If we have a pvalue start 'leaning' towards the variant that has the larger average return with the weight being decided by the pvalue/confidence. 3. Repeat. """ def adaptive_weight_pick(a, b, weighting): pvalue = sp.stats.ttest_ind(a, b).pvalue rand = np.random.rand() # if not enough information, be random 50/50 A/B if math.isnan(pvalue): return rand > weighting # reshape pvalue # p-value e [0, 1] # prob e [1, 0.5] conf = ((pvalue / 2 ) + 1) - pvalue # lean towards 'a' w/confidence if np.mean(a) > np.mean(b): return rand < conf else: return rand > conf """ Our basic trial runs. Here we are realizing invidual outcomes sequentially and seeing if we have statistical significance. If not, we keep the trial running. This achieves our 1st requirement for the trials in that we are confident in picking our variant. """ def run_trial(decision_function, weighting=0.5, stat_sig=0.05): p_value = 1 var_A = [] var_B = [] trial_counts = 0 while (p_value > stat_sig) and (trial_counts < ARB_CUTOFF): # implementation of decision_function if decision_function(var_A, var_B, weighting): var_A.append(get_variant_A()) else: var_B.append(get_variant_B()) p_value = sp.stats.ttest_ind(var_A, var_B).pvalue trial_counts = len(var_A) + len(var_B) if math.isnan(p_value): p_value = 1 # if not enough data, we have no significance # this is just me being bad at python... Taking the weighted average. average_A = np.mean(var_A) * len(var_A) average_B = np.mean(var_B) * len(var_B) average = (average_A + average_B) / trial_counts return [average, trial_counts] """ Run N number """ N = 1000 adaptive = [run_trial(adaptive_weight_pick) for _x in xrange(N)] even = [run_trial(even_split_weight_pick) for _x in xrange(N)] # pull out means to compare adaptive_x = [item[0] for item in adaptive] adaptive_trial = [item[1] for item in adaptive] # pull out trial counts to compare even_x = [item[0] for item in even] even_trial = [item[1] for item in even] # Conclusion x_pvalue = sp.stats.ttest_ind(adaptive_x, even_x).pvalue trial_pvalue = sp.stats.ttest_ind(adaptive_trial, even_trial).pvalue print("\n") print('----------') print("Requirement 2: Exploring trial mean return.") adaptive_sigificant = x_pvalue < 0.05 print("Is adaptive statistically significant? {r} (pvalue: {p})".format(r=(adaptive_sigificant), p=x_pvalue)) adaptive_greater = np.mean(adaptive_x) > np.mean(even_x) print("Is adaptive returning higher? {r} (A: {a} to E: {b})".format(r=(adaptive_greater), a=np.mean(adaptive_x), b=np.mean(even_x))) print('----------') print("\n") print('----------') print("Requirement 3: Exploring trial length.") adaptive_sigificant = trial_pvalue < 0.05 print("Is adaptive statistically significant? {r} (pvalue: {p})".format(r=(adaptive_sigificant), p=trial_pvalue)) adaptive_greater = np.mean(adaptive_trial) > np.mean(even_trial) print("Is adaptive longer? {r} (A: {a} to E: {b})".format(r=(adaptive_greater), a=np.mean(adaptive_trial), b=np.mean(even_trial))) print('----------') print("\n\n") # Take a look at mean return pd.DataFrame({ 'adaptive': adaptive_x, 'even': even_x }).hist(bins=50, sharex=True, figsize=(20,8)); # Take a look at trial length pd.DataFrame({ 'adaptive': adaptive_trial, 'even': even_trial }).hist(bins=N/30, sharex=True, figsize=(20,8));ConclusionCertainly not as cut and dry as I would have expected, but there is value in being adaptive! Feel free to copy and play with your own assumptions!Given that the trial is statistically longer, the actual comparisons in means requires the 50/50 split to run an extra 10 (difference_in_trial_length) trials but with a strict mean assuming the 'B' choice. This means the weighted sum is (as an example):```(5.48*18 + 6*10) / 28 = 5.6657 (50/50 with same number of samples)```This is still lower! But is it statistically significant?''' Generic adjustment for difference in trial length ''' difference_in_trial_length = int(np.mean(adaptive_trial) - np.mean(even_trial)) even_x_with_extension = even_x + [get_variant_B() for _x in xrange(difference_in_trial_length)] x_pvalue = sp.stats.ttest_ind(adaptive_x, even_x_with_extension).pvalue print('\n----------') print("Conclusion: Exploring trial mean return.") adaptive_sigificant = x_pvalue < 0.05 print("Is adaptive statistically significant? {r} (pvalue: {p})".format(r=(adaptive_sigificant), p=x_pvalue)) adaptive_greater = np.mean(adaptive_x) > np.mean(even_x) print("Is adaptive returning higher? {r} (A: {a} to E: {b})".format(r=(adaptive_greater), a=np.mean(adaptive_x), b=np.mean(even_x_with_extension))) print("\n")---------- Conclusion: Exploring trial mean return. Is adaptive statistically significant? True (pvalue: 5.24475430653e-27) Is adaptive returning higher? True (A: 5.68671806174 to E: 5.52396608122)![title](img/cover4.png) Copyright! This material is protected, please do not copy or distribute. by: ***Udemy course : Python Bootcamp for Data Science 2021 Numpy Pandas & Seaborn *** The Exercises for Module 12 Instruction: Please write your code after the phrase " Your codes begin here:" Q1. Your first task is to read a dataframe from a csv file form the path: ('data/ex5.csv') with the name df1? Then display this dataframe:Hint1: To read a csv file you will need to use the function **pd.read_csv()**.Hint2: To display the head of a dataframe you will need to use the function head().import pandas as pd ## Your codes begin here:Q2. Your second task is to make this dataframe ('df1') a multi-indexed dataframe by setiing its index to two columns which are: ('year' and 'quarters')? Then display the dataframe?**Hint1:** To set index you will need the function **set_index()**. and to pass the two columns in a list.**Hint2:** To save changes to the dataframe use the argument: **inplace = True**.## Your codes begin here:Q3. In this exercise, for the dataframe 'df1', you are required to move the 'quarters' from the index into the columns (which means to unstack this dataframe) and to save the modified dataframe to a new dataframe called 'df2'? Then display the new dataframe 'df2'?**Hint:** To unstack a dataframe you will need the function **unstack()**.## Your codes begin here:Q4. For the dataframe 'df2', reverse what you have done in Q3, i.e. move the quarters columns into the index again (moving columns to index is called stacking)? Then display the new dataframe 'df2'?**Hint:** To stack a dataframe you will need the function **stack()**.## Your codes begin here:Q5. In this dataframe named 'income' (which is created in the first code cell below): Make the categorical column with the label 'gender' two separate columns (This is called pivoting: transform one column into several columns)?**Hint1:** Do not forget to run the first cell below to create the dataframe first.**Hint2:** you will need the function **pivot()**. This function requires three arguments (**index**, **columns** and **values**).# Just run this cell to create the dataframe 'income': # Here you need to pivot the dataframe for the column 'gender' ## Your codes begin here:*** Solutions*** Q1. Your first task is to read a dataframe from a csv file form the path: ('data/ex5.csv') with the name df1? Then display this dataframe:import pandas as pd ## Your codes begin here: df1 = pd.read_csv('data/ex5.csv') df1Q2. Your second task is to make this dataframe ('df1') a multi-indexed dataframe by setiing its index to two columns which are: ('year' and 'quarters')? Then display the dataframe?## Your codes begin here: df1.set_index(['year', 'quarters'], inplace = True) df1Q3. In this exercise, for the dataframe 'df1', you are required to move the 'quarters' from the index into the columns (which means to unstack this dataframe) and to save the modified dataframe to a new dataframe called 'df2'? Then display the new dataframe 'df2'?## Your codes begin here: df2 = df1.unstack() df2Q4. For the dataframe 'df2', reverse what you have done in Q3, i.e. move the quarters columns into the index again (moving columns to index is called stacking)? Then display the new dataframe 'df2'?## Your codes begin here: df2.stack()Q5. In this dataframe named 'income' (which is created in the first code cell below): Make the categorical column with the label 'gender' two separate columns (This is called pivoting: transform one column into several columns)?# Just run this cell to create the dataframe 'income': income = pd.read_csv('data/ex6.csv') income # Here you need to pivot the dataframe for the column 'gender' ## Your codes begin here: income.pivot(index = 'year', columns = 'gender', values = 'income')No pandas podemos trabalhar com dois tipos de objetos principais:* Series()* DataFrame()* DatetimeIndex()Ambos os objetos possuem indexadores que definem a escala (data ou números naturais positivos)series = pd.Series([7,4,5,3], name = "nome") print(series) series_2 = 2 * series # operações aritméticas funcionam com series print(series_2) type(series)0 7 1 4 2 5 3 3 Name: nome, dtype: int64 0 14 1 8 2 10 3 6 Name: nome, dtype: int64Podemos trabalhar com datas no pandas, e criar uma lista (range) com base em critériosdatas = pd.date_range("20181231", periods = 10, freq = "M") print(datas) type(datas) df = pd.DataFrame(np.random.randn(10,3), # gera uma matriz de números aleatórios que seguem uma distribuição normal padrão index = datas, columns = ["data", "frame", "novo"]) print(df) type(df)data frame novo 2018-12-31 0.083490 1.330079 0.289239 2019-01-31 -0.393984 0.213833 -1.034380 2019-02-28 -0.998533 0.263467 1.182122 2019-03-31 0.351980 -0.675092 0.067555 2019-04-30 -0.925267 -0.114448 -1.857578 2019-05-31 -0.236701 0.803052 0.195022 2019-06-30 -0.191708 -0.140139 -0.384675 2019-07-31 0.379359 -0.164327 0.670691 2019-08-31 0.104657 0.309200 0.390564 2019-09-30 1.259590 -0.077988 0.650917Managing the grid from python As the grid is completely bi-directional, you can act on the grid from python. Here are some interesting features that have been implemented.%load_ext autoreload %autoreload 2 import os import json import pandas as pd import numpy as np import urllib.request as ur from copy import deepcopy as copy from ipyaggrid import Grid url = 'https://raw.githubusercontent.com/bahamas10/css-color-names/master/css-color-names.json' with ur.urlopen(url) as res: cnames = json.loads(res.read().decode('utf-8')) colors = [] for k in cnames.keys(): colors.append({'color':k, 'value':cnames[k]}) colors_ref = colors[:]Exporting data via pythoncss_rules=""" .color-box{ float: left; width: 10px; height: 10px; margin: 5px; border: 1px solid rgba(0, 0, 0, .2); } """ columnDefs = [ {'headerName': 'Color', 'field':'color', 'pinned': True, 'editable': True}, {'headerName': 'Code', 'field':'value', 'editable': False, 'cellRenderer': """ function(params){ return `
${params.value}
` }"""} ] gridOptions = {'columnDefs':columnDefs, 'enableFilter':'true', 'enableSorting':'true', 'rowSelection':'multiple', } color_grid = Grid(width=400, height=250, css_rules=css_rules, grid_data=colors, grid_options=gridOptions, sync_on_edit=True, sync_grid=True, #default ) display(color_grid)You can use the `get_grid`, `get_selected_rows`, `get_selected_columns` (available only in rangeSelection mode) to get the data out of the grid even when deactivating the export mode.color_grid.get_grid() color_grid.get_selected_rows() color_grid.grid_data_out.get('grid')If you set `sync_grid=True` (default) then the grid data is automatically synced with `grid_data_out['grid']` as a dataframe.color_grid.grid_data_out.get('grid')Updating DatagridOptions = {'columnDefs':columnDefs, 'enableFilter':'true', 'enableColumnResize':'true', 'enableSorting':'true', } color_grid2 = Grid(width=500, height=250, css_rules=css_rules, quick_filter=True, show_toggle_edit=True, grid_data=colors_ref, grid_options=gridOptions) color_grid2 colors = colors_ref[:] colors.append({'color':'jupyterorange', 'value':'#f37626'}) color_grid2.update_grid_data(copy(colors)) # New data set corresponding to the good columns color_grid2.delete_selected_rows() color_grid2.grid_data_out['grid']Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/using-mlflow/train-deploy-pytorch/train-deploy-pytorch.png) Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image ClassifierThis example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a PyTorch model to classify MNIST digit images, and then deploy the model as a web service. You'll learn how to: 1. Set up MLflow tracking URI so as to use Azure ML 2. Create experiment 3. Instrument your model with MLflow tracking 4. Train a PyTorch model locally 5. Train a model on GPU compute on Azure 6. View your experiment within your Azure ML Workspace in Azure Portal 7. Create a Docker image from the trained model 8. Deploy the model as a web service on Azure Container Instance 9. Call the model to make predictions Pre-requisites Make sure you have completed the [Configuration](../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.Also, install mlflow-azureml package using ```pip install mlflow-azureml```. Note that mlflow-azureml installs mlflow package itself as a dependency, if you haven't done so previously. Set-upImport packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.import sys, os import mlflow import mlflow.azureml import mlflow.sklearn import azureml.core from azureml.core import Workspace print("SDK version:", azureml.core.VERSION) print("MLflow version:", mlflow.version.VERSION) ws = Workspace.from_config() ws.get_details()Set tracking URISet the MLFlow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLFlow APIs will go to Azure ML services and will be tracked under your Workspace.mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())Create ExperimentIn both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.experiment_name = "pytorch-with-mlflow" mlflow.set_experiment(experiment_name)Train model locally while logging metrics and artifactsThe ```scripts/train.py``` program contains the code to load the image dataset, and train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, ```mlflow.log_metric``` functions are used to track the convergence of the neural network training iterations. Finally ```mlflow.pytorch.save_model``` is used to save the trained model in framework-aware manner.Let's add the program to search path, import it as a module, and then invoke the driver function. Note that the training can take few minutes.lib_path = os.path.abspath("scripts") sys.path.append(lib_path) import train run = train.driver()You can view the metrics of the run at Azure Portalprint(azureml.mlflow.get_portal_url(run))Train model on GPU compute on AzureNext, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.Create a PyTorch estimator to specify the training configuration: script, compute as well as additional packages needed. To enable MLflow tracking, include ```azureml-mlflow``` as pip package. The low-level specifications for the training run are encapsulated in the estimator instance.from azureml.train.dnn import PyTorch pt = PyTorch(source_directory="./scripts", entry_script = "train.py", compute_target = "gpu-cluster", node_count = 1, process_count_per_node = 1, use_gpu=True, pip_packages = ["azureml-mlflow", "Pillow==6.0.0"])Get a reference to the experiment you created previously, but this time, as Azure Machine Learning experiment object.Then, use ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as cached image is used.from azureml.core import Experiment exp = Experiment(ws, experiment_name) run = exp.submit(pt)You can monitor the run and its metrics on Azure Portal.runAlso, you can wait for run to complete.run.wait_for_completion(show_output=True)Deploy model as web serviceTo deploy a web service, first create a Docker image, and then deploy that Docker image on inferencing compute.The ```mlflow.azureml.build_image``` function builds a Docker image from saved PyTorch model in a framework-aware manner. It automatically creates the PyTorch-specific inferencing wrapper code and specififies package dependencies for you.run.get_file_names()Then build a docker image using *runs:/<run.id>/model* as the model_uri path.Note that the image building can take several minutes.model_path = "model" azure_image, azure_model = mlflow.azureml.build_image(model_uri='runs:/{}/{}'.format(run.id, model_path), workspace=ws, model_name='pytorch_mnist', image_name='pytorch-mnist-img', synchronous=True)Then, deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service. [Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.Note that the service deployment can take several minutes.from azureml.core.webservice import AciWebservice, Webservice aci_config = AciWebservice.deploy_configuration(cpu_cores=2, memory_gb=5, tags={"data": "MNIST", "method" : "pytorch"}, description="Predict using webservice") # Deploy the image to Azure Container Instances (ACI) for real-time serving webservice = Webservice.deploy_from_image( image=azure_image, workspace=ws, name="pytorch-mnist-1", deployment_config=aci_config) webservice.wait_for_deployment()Once the deployment has completed you can check the scoring URI of the web service.print("Scoring URI is: {}".format(webservice.scoring_uri))In case of a service creation issue, you can use ```webservice.get_logs()``` to get logs to debug. Make predictions using web serviceTo make the web service, create a test data set as normalized PyTorch tensors. Then, let's define a utility function that takes a random image and converts it into format and shape suitable for as input to PyTorch inferencing end-point. The conversion is done by: 1. Select a random (image, label) tuple 2. Take the image and converting the tensor to NumPy array 3. Reshape array into 1 x 1 x N array * 1 image in batch, 1 color channel, N = 784 pixels for MNIST images * Note also ```x = x.view(-1, 1, 28, 28)``` in net definition in ```train.py``` program to shape incoming scoring requests. 4. Convert the NumPy array to list to make it into a built-in type. 5. Create a dictionary {"data", <list>} that can be converted to JSON string for web service requests.from torchvision import datasets, transforms import random import numpy as np test_data = datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])) def get_random_image(): image_idx = random.randint(0,len(test_data)) image_as_tensor = test_data[image_idx][0] return {"data": elem for elem in image_as_tensor.numpy().reshape(1,1,-1).tolist()}Then, invoke the web service using a random test image. Convert the dictionary containing the image to JSON string before passing it to web service.The response contains the raw scores for each label, with greater value indicating higher probability. Sort the labels and select the one with greatest score to get the prediction. Let's also plot the image sent to web service for comparison purposes.%matplotlib inline import json import matplotlib.pyplot as plt test_image = get_random_image() response = webservice.run(json.dumps(test_image)) response = sorted(response[0].items(), key = lambda x: x[1], reverse = True) print("Predicted label:", response[0][0]) plt.imshow(np.array(test_image["data"]).reshape(28,28), cmap = "gray")You can also call the web service using a raw POST method against the web serviceimport requests response = requests.post(url=webservice.scoring_uri, data=json.dumps(test_image),headers={"Content-type": "application/json"}) print(response.text)Merge the controlled values This notebook provides the following functionality:* The controlled data should be added in existing empty columns, with the same name as the controlled vocabulary (e.g. invasionStage)* The mapping should be done on dataset= datasetName, verbatimValue = e.g verbatimInvasionStage* It would be nice to get errors if a mapping cannot be found for non-blank values...import numpy as np import pandas as pd concatenated = pd.read_csv('../data/processed/checklist.tsv', delimiter='\t', dtype=object)Single column mapping Writing a function for the execution of the single column mappingdef merge_control_vocabulary(concatenated, mapping, mappingname): """combine the controlled mapping with the names provided in the current version of the checklist """ merge_column = ''.join(['verbatim', ''.join([mappingname[0].upper(), mappingname[1:]])]) merged = pd.merge(concatenated[["datasetName", merge_column]], mapping, how='left', left_on=["datasetName", merge_column], right_on=['dataset', 'verbatimValue']) concatenated[mappingname] = merged["controlledValue"] return concatenatedList with the mappings that need to be done(cfr. https://github.com/inbo/alien-species-checklist/issues/64):mappings = ['invasionStage', 'introductionPathway', 'habitat', 'nativeRange']Loop over the mapping and updating the concatenated file:for term in mappings: print(term) mapping = pd.read_csv(''.join(["../data/vocabularies/", term, "-mapping.tsv"]), delimiter='\t') concatenated = merge_control_vocabulary(concatenated, mapping, term)invasionStage introductionPathway habitat nativeRangeMulti-column mapping When N (> 1) columns need to be mapped together, the join should take into account the combined set of names:term = 'presence' mapping = pd.read_csv(''.join(["../data/vocabularies/", term, "-mapping.tsv"]), delimiter='\t') merge_columns = [term for term in mapping.columns if 'verbatim' in term] merge_columns_mapping = ["dataset"] + merge_columns merge_columns_concat = ["datasetName"] + merge_columnsMerging for the entire set of columns available in the mapping:merged = pd.merge(concatenated[merge_columns_concat], mapping, how='left', left_on=merge_columns_concat, right_on=merge_columns_mapping) merge_columns_final = [''.join([term[8].lower(), term[9:]]) for term in merge_columns] concatenated[merge_columns_final] = merged[merge_columns_final]Saving the concatenated#concatenated.to_csv('../data/processed/checklist.tsv', sep='\t', dtype=object)Checkup Check if a mapping cannot be found for non-blank values: Single column mappings:for term in mappings: number_of_unmapped = sum(concatenated['verbatimNativeRange'].notnull() & concatenated['nativeRange'].isnull()) if number_of_unmapped == 0: message = 'Great!' else: message = 'Bummer' print(term, "has", str(number_of_unmapped), " badly mapped values. ", message)invasionStage has 0 badly mapped values. Great! introductionPathway has 0 badly mapped values. Great! habitat has 0 badly mapped values. Great! nativeRange has 0 badly mapped values. Great!Multi column mappingmerge_columns_mapping sum(concatenated[merge_columns].notnull().all(axis=1) & concatenated[merge_columns_final].isnull().all(axis=1))Measuring Similarity Between DocumentsWe are going to measure similarity between documents by representing them as a vector of their most significant words and then measuring the distance beween those vectors.So, we are going to represent our documents in a format called [TF-IDF using a library called Scikit Learn](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html?highlight=tfidfsklearn.feature_extraction.text.TfidfVectorizer). Code examples from - https://goodboychan.github.io/python/datacamp/natural_language_processing/2020/07/17/04-TF-IDF-and-similarity-scores.html Install to the REL 560 Environment- Pandas- Numpy- SciKit Learn - Matplotlibimport os import pandas as pd import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity # Load texts into a dataframe source_dir = "../data/Example_texts/history/NYT-Obituaries/" texts = [] for filename in os.listdir(source_dir): with open(os.path.join(source_dir, filename), 'r') as obit: content = obit.read() texts.append( { "doc_id": filename, "text": content } ) texts_df = pd.DataFrame(texts) texts_df.head() texts_df.reset_index(inplace=True) texts_df # Create Tf-IDF Vector Representation vectorizer = TfidfVectorizer() # Generate matrix of word vectors tfidf_matrix = vectorizer.fit_transform(texts_df['text']) print(tfidf_matrix.shape) cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix) print(cosine_sim) corr_df = pd.DataFrame(cosine_sim) corr_df # import seaborn as sns import matplotlib.pyplot as plt corr_df.style.background_gradient(cmap ='viridis')\ .set_properties(**{'font-size': '8px'})Now that we have the correspondence, we can do a couple of things:- Get most similar documents- Get least similar documents- Get documents most similar to a particular documentpairs = corr_df.unstack().reset_index() # pairs_df.columns = ['doc_A', 'doc_B', 'similarity_measure'] pairs_df = pd.DataFrame(pairs) pairs_df.columns = ['Doc_A', 'Doc_B', 'Similarity_Score'] pairs_df # Clean out rows where matching self pairs_df = pairs_df[pairs_df['Doc_A'] != pairs_df['Doc_B']] pairs_df # https://stackoverflow.com/questions/48549637/pandas-removing-mirror-pairs-from-dataframe # df.loc[pd.DataFrame(np.sort(df[['A','B']],1),index=df.index).drop_duplicates(keep='first').index] unique_pairs = pairs_df.loc[pd.DataFrame(np.sort(pairs_df[['Doc_A', 'Doc_B']], 1), index=pairs_df.index).drop_duplicates(keep='first').index] unique_pairs # Get most similar documents def get_top_docs(sim_df, metadata, num_docs=10, rank='top'): sorted = sim_df.sort_values('Similarity_Score', ascending=True) if rank == 'top': sliced = sorted.tail(num_docs) elif rank == 'bottom': sliced = sorted.head(num_docs) else: return "Please use 'top' or 'bottom' for rank variable" sliced_named = sliced.merge(metadata, how="left", left_on = "Doc_A", right_on="index").merge(metadata, how="left", left_on="Doc_B", right_on="index") sliced_named.columns = ['Doc_A', 'Doc_B', 'Similarity_Score', 'Index', 'Doc_A_ID', 'IndexB', 'Doc_B_ID'] # print(sliced_named) top_docs_df = sliced_named[['Similarity_Score', 'Doc_A_ID', 'Doc_B_ID']] return top_docs_df get_top_docs(unique_pairs, texts_df[['index', 'doc_id']], rank='top') def get_similar_docs(title, sim_mx, metadata): idx = metadata.index[metadata['doc_id'] == title].tolist() # print(idx) # Get similarity scores sim_scores = list(enumerate(sim_mx[idx[0]])) # sort them sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) # Get the top 10 (ignorning the 1 for self-matching) sim_scores = sim_scores[1:11] scores = pd.DataFrame(sim_scores) scores.columns = ['index', 'similarity_score'] # print(scores) title_index = [i[0] for i in sim_scores] matches = pd.DataFrame(metadata['doc_id'].iloc[title_index]).reset_index() # print(matches) matches = matches.merge(scores, how="left", on="index") return matches get_similar_docs('1870-Robert-E-Lee.txt', corr_df, texts_df)Day 12: Inheritance*Author: * ObjectiveToday, we're delving into Inheritance. Check out the attached tutorial for learning materials and an instructional video. TaskYou are given two classes, Person and Student, where Person is the base class and Student is the derived class. Completed code for Person and a declaration for Student are provided for you in the editor. Observe that Student inherits all the properties of Person.Complete the Student class by writing the following:- A Student class constructor, which has **$4$** parameters: 1. A string, **$firstName$** 2. A string **$lastName$** 3. An integer **$idNumber$** 4. An integer array (or vector) of test scores, **$scores$**- A char calculate() method that calculates a Student object's average and returns the grade character representative of their calculated average:\begin{bmatrix} Letter & Average(a)\\O & 90 \leq a \leq 100\\E & 80 \leq a < 90\\A & 70 \leq a < 80\\P & 55 \leq a < 70\\D & 40 \leq a < 55\\T & a < 40\end{bmatrix} Sample Input``` 81356272100 80``` Sample Output```Name: ID: 8135627Grade: O```class Person: def __init__(self, firstName, lastName, idNumber): self.firstName = firstName self.lastName = lastName self.idNumber = idNumber def printPerson(self): print("Name:", self.lastName + ",", self.firstName) print("ID:", self.idNumber) class Student(Person): # Class Constructor # # Parameters: # firstName - A string denoting the Person's first name. # lastName - A string denoting the Person's last name. # id - An integer denoting the Person's ID number. # scores - An array of integers denoting the Person's test scores. # # Write your constructor here def __init__(self,firstName, lastName, idNumber,testScores): super().__init__(firstName,lastName,idNumber) self.testScores = testScores # Function Name: calculate # Return: A character denoting the grade. # # Write your function here def calculate(self): total = 0 for testScore in self.testScores: total += testScore average = total / len(self.testScores) if 90 <= average <= 100: return "O" if 80 <= average < 90: return "E" if 70 <= average < 80: return "A" if 55 <= average < 70: return "P" if 40 <= average < 55: return "D" return "T" line = input().split() firstName = line[0] lastName = line[1] idNum = line[2] numScores = int(input()) # not needed for Python scores = list( map(int, input().split()) ) s = Student(firstName, lastName, idNum, scores) s.printPerson() print("Grade:", s.calculate())Name: ID: 8135627 Grade: O[DeepSphere]: a spherical convolutional neural network[DeepSphere]: https://github.com/SwissDataScienceCenter/DeepSphere[](https://perraudin.info), [](http://deff.ch), , Figures for the paper%load_ext autoreload %autoreload 2 %matplotlib inline import os import numpy as np from scipy.interpolate import interp1d from scipy import sparse import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib import healpy as hp import pygsp import svgutils.compose as sc import IPython.display as ipd import hyperparameters from deepsphere import utils, plot, models import experimental.cnn os.environ["CUDA_VISIBLE_DEVICES"] = "" plt.rcParams['figure.figsize'] = (17, 5) # (9, 4) for matplotlib notebook matplotlib.rcParams.update({'font.size': 10}) pathfig = './figures/' os.makedirs(pathfig, exist_ok=True)1 Graph 1.1 Full spherefig = plt.figure(figsize=[8,6]) ax = fig.add_subplot(111, projection='3d') G = utils.healpix_graph(nside=8, nest=True) G.plotting.update(vertex_size=10) G.plot(ax=ax,edges=False) # Get rid of the ticks ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) # Get rid of the panes ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.set_title('Healpix sampling, Nside=8') plt.savefig(os.path.join(pathfig, "healpix_sampling_8.pdf"), bbox_inches='tight') fig = plt.figure(figsize=[8,6]) ax = fig.add_subplot(111, projection='3d') G = utils.healpix_graph(nside=4, nest=True) G.plotting.update(vertex_size=20) G.plot(ax=ax) # Get rid of the ticks ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) # Get rid of the panes ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.set_title('Graph, full sphere, Nside=4') plt.savefig(os.path.join(pathfig, "healpix_graph_4.pdf"), bbox_inches='tight')1.2 Half the spherenside = 4 npoints = hp.nside2npix(nside) indexes = hp.reorder(np.array(list(range(npoints))),n2r=True)[:npoints//2] G = utils.healpix_graph(nside=nside, nest=True, indexes=indexes) G.plotting['elevation']=90 G.plotting['azimuth']=0 G.plotting.update(vertex_size=50) fig = plt.figure(figsize=[8,8]) ax = fig.add_subplot(111, projection='3d') # plt.cm.Blues_r # Highlight the node with a degree of 7 on the full sphere G2 = utils.healpix_graph(nside=nside, nest=True) snode = np.arange(0,G2.N)[G2.d==7] sindex = set(indexes) snode2 = [el for el in snode if el in sindex] hl_index = [np.argmin(np.abs(indexes-el)) for el in snode2] sig = np.zeros([G.N]) sig[hl_index]=1 G.plot_signal(1-sig, ax=ax,colorbar=False) # G.plot(ax=ax) # Get rid of the ticks ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) # Get rid of the panes ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) # Remove the title # ax.set_title('Graph, half sphere, Nside=4') ax.set_title('') # Zoom in c = 0.6 plt.axis([c*min(G.coords[:,0]), c*max(G.coords[:,0]), c*min(G.coords[:,1]), c*max(G.coords[:,1]) ]) fig.savefig(os.path.join(pathfig, "half_graph_{}.pdf").format(nside), bbox_inches='tight')2 PoolingA better figure is made in the `figure_pooling` notebook.order = 4 index = np.arange(hp.nside2npix(order)) + 1 mask = np.zeros_like(index, dtype=np.bool) mask[:order**2] = 1 index *= mask index = index.astype(np.float) index[index==0] = hp.UNSEEN hp.mollview(index, title='', nest=True, cbar=False,cmap=None, xsize=1600) plt.savefig(os.path.join(pathfig, "pooling-order4.pdf"), bbox_inches='tight') order = 2 index = np.arange(hp.nside2npix(order)) + 1 mask = np.zeros_like(index, dtype=np.bool) mask[:order**2] = 1 index *= mask index = index.astype(np.float) index[index==0] = hp.UNSEEN hp.mollview(index, title='', nest=True, cbar=False,cmap=None, xsize=1600) plt.savefig(os.path.join(pathfig, "pooling-order2.pdf"), bbox_inches='tight') order = 1 index = np.arange(hp.nside2npix(order)) + 1 mask = np.zeros_like(index, dtype=np.bool) mask[:order**2] = 1 index *= mask index = index.astype(np.float) index[index==0] = hp.UNSEEN hp.mollview(index, title='', nest=True, cbar=False,cmap=None, xsize=1600) plt.savefig(os.path.join(pathfig, "pooling-order1.pdf"), bbox_inches='tight') index = np.array(list(range(12))) hp.mollview(index, title='', nest=True, cbar=False,cmap=None, xsize=1600) plt.savefig(os.path.join(pathfig, "12parts.pdf"), bbox_inches='tight') orders = [1,2,4] order_max = max(orders) npix = hp.nside2npix(order_max) index = np.zeros([npix]) for order in orders: index[:order**2] = index[:order**2]+1 index.astype(np.float) index[index==0] = hp.UNSEEN hp.mollview(index, title='', nest=True, cbar=False,cmap=None, xsize=1600) plt.savefig(os.path.join(pathfig, "part_sphere.pdf"), bbox_inches='tight') def make_ball(map_test1, cmap=plt.cm.gray_r, sub=None, vmin =-0.5, vmax=1.5): cmap.set_under('w') cmap.set_bad('lightgray') dot_size=10 rot = (0,30,345) hp.visufunc.orthview(map=map_test1, half_sky=True, title='', rot=rot, cmap=cmap, cbar=False, hold=True, nest=True, min=vmin, max=vmax, notext=True, sub=sub); theta, phi = hp.pix2ang(hp.npix2nside(len(map_test1)), range(len(map_test1)), nest=True); hp.projscatter(theta, phi, c='k', s=dot_size); hp.graticule(); hp.graticule(dmer=360,dpar=360,alpha=1, rot=(0,0,15), local=True); hp.graticule(dmer=360,dpar=360,alpha=1, rot=(0,0,195), local=True); orders = [1,2,4] order_max = max(orders) npix = hp.nside2npix(order_max) index = np.zeros([npix]) for order in orders: index[:order**2] = index[:order**2]+1 index.astype(np.float) index[index==0] = hp.UNSEEN make_ball(index, cmap=plt.cm.RdBu_r, vmin=0, vmax=np.max(index)) plt.savefig(os.path.join(pathfig, "part_sphere2.pdf"), bbox_inches='tight')3 Fourier basisLet us display a few Fourier modes on the healpix map.n_eigenvectors = 16 G = utils.healpix_graph(nside=16, lap_type='normalized', nest=True, dtype=np.float64) G.compute_fourier_basis(n_eigenvectors=n_eigenvectors) fig = plt.figure(figsize=(8, 5)) cm = plt.cm.RdBu_r cm.set_under('w') l, m = 0, 0 lm = [] for idx in range(n_eigenvectors): lm.append([l,m]) m += 1 if m > l: l += 1 m = -l ind = np.array([ 0, 1, 3, 2, 4, 5, 7, 6, 8, 10, 12, 9, 15, 14, 11, 13]) for idx in range(n_eigenvectors): l,m = lm[ind[idx]] hp.mollview(G.U[:, idx], title='Mode {}: $\ell$={}, $|m|$={}'.format(idx, l, np.abs(m)), nest=True, sub=(np.sqrt(n_eigenvectors), np.sqrt(n_eigenvectors), idx+1), max=np.max(np.abs(G.U[:, :n_eigenvectors])), min=-np.max(np.abs(G.U[:, :n_eigenvectors])), cbar=False, cmap=cm) hp.graticule(verbose=False) plt.savefig(os.path.join(pathfig, "eigenvectors.pdf"), bbox_inches='tight')4 Convolution on graphs: show filters in spectral and spatial domains# taus = [5, 10, 20, 50] taus = [5, 20, 50] matplotlib.rcParams.update({'font.size': 14}) # fig, ax = plt.subplots(1,len(taus), figsize=(17, 4)) fig, ax = plt.subplots(1,len(taus), figsize=(12, 4)) for i,tau in enumerate(taus): hf = pygsp.filters.Heat(G, tau=tau) hf.plot(eigenvalues=False, sum=False, ax=ax[i]) ax[i].set_xlabel('Graph eigenvalues', fontsize=18) if i is not 0: ax[i].set_ylabel('') else: ax[i].set_ylabel('Spectral response', fontsize=18) ax[i].set_title('$t={}$'.format(tau), fontsize=22) fig.tight_layout(rect=[0, 0.05, 1, 0.92]) # plt.suptitle('Filter response in the graph spectral domain', fontsize=24) plt.savefig(os.path.join(pathfig, "gaussian_filters_spectral.pdf"), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10}) hf = pygsp.filters.Heat(G,tau=taus) def arcmin2rad(x): return x / 60 / 360 * 2 * np.pi def gaussian_smoothing(sig, sigma, nest=True): if nest: sig = hp.reorder(sig, n2r=True) smooth = hp.sphtfunc.smoothing(sig, sigma=arcmin2rad(sigma)) if nest: smooth = hp.reorder(smooth, r2n=True) return smooth _, center = plot.get_index_equator(hp.npix2nside(G.N), radius=20) ind0 = center sig = np.zeros(G.N) sig[ind0] = 1 conv = hf.analyze(sig) fig = plt.figure(figsize=(12, 5)) rel_diff = [] matplotlib.rcParams.update({'font.size': 18}) cm = plt.cm.seismic # cm = plt.cm.jet cm.set_under('w') m = 0 #[315, 465, 670, 1080] for i, (tau, sigma) in enumerate(zip(taus, [315, 670, 1080])): with utils.HiddenPrints(): smooth = gaussian_smoothing(sig, sigma, nest=True) m = max(m, max(smooth)) hp.mollview(conv[:, i], title='$t={}$'.format(tau), nest=True, min=-m, max=m, cbar=False, rot=(180,0,180), sub=(2, len(taus), i+1), cmap=cm) hp.mollview(smooth, title='$\sigma={}$'.format(sigma), nest=True, min=-m, max=m, cbar=False, rot=(180,0,180), sub=(2,len(taus),i+len(taus)+1), cmap=cm) diff = (conv[:, i]-smooth) rel_diff.append(np.linalg.norm(diff)/np.linalg.norm(smooth)) # hp.mollview(diff, # title='', # nest=True, # cbar=False, # sub=(3, len(taus), i+2*len(taus)+1)) with utils.HiddenPrints(): hp.graticule(); print(rel_diff) plt.savefig(os.path.join(pathfig, "gaussian_filters_sphere.pdf"), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10}) hf = pygsp.filters.Heat(G,tau=taus) order = 20 matplotlib.rcParams.update({'font.size': 20}) fig = plt.figure( figsize=(12, 5.5)) plot.plot_filters_gnomonic(filters=hf,order=order, title='', graticule=True) # plt.suptitle('Gnomonic projection of a convoluted delta', fontsize=27) plt.savefig(os.path.join(pathfig, "gaussian_filters_gnomonic.pdf"), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10}) matplotlib.rcParams.update({'font.size': 14}) fig = plt.figure( figsize=(12, 4)) plot.plot_filters_section(hf, order=order, xlabel='', ylabel='', title='', marker='o') # plt.suptitle('Section of a convoluted delta', fontsize=22) plt.savefig(os.path.join(pathfig, "gaussian_filters_section.pdf"), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10}) plot.plot_index_filters_section(hf,order=order) plt.savefig(os.path.join(pathfig, "index_plotting_order{}_nside16.pdf".format(order)), bbox_inches='tight')5 Experimental results: classification of convergence maps in two model classessigma=3 deepsphere_result_fcn = np.load('results/deepsphere/deepsphere_results_list_sigma{}_FCN.npz'.format(sigma))['data'][-15:] deepsphere_result_cnn = np.load('results/deepsphere/deepsphere_results_list_sigma{}_CNN.npz'.format(sigma))['data'][-15:] hist_result = np.load('results/histogram/histogram_results_list_sigma{}.npz'.format(sigma))['data'][-15:] psd_result = np.load('results/psd/psd_results_list_sigma{}.npz'.format(sigma))['data'] cnn2d_result_fcn_reg = np.load('results/2dcnn/reg3_deepsphere_results_list_sigma{}_FCN-2d.npz'.format(sigma))['data'][:15] cnn2d_result_cnn_reg = np.load('results/2dcnn/reg3_deepsphere_results_list_sigma{}_CNN-2d.npz'.format(sigma))['data'][:15] cnn2d_result_fcn_big_reg = np.load('results/2dcnn/reg3_deepsphere_results_list_sigma{}_FCN-2d-big.npz'.format(sigma))['data'][:15] cnn2d_result_cnn_big_reg = np.load('results/2dcnn/reg3_deepsphere_results_list_sigma{}_CNN-2d-big.npz'.format(sigma))['data'][:15] def get_xy(result, order): x = [] y = [] for d in result: if d[0]==order: x.append(d[1]) y.append(d[2]) x = np.array(x) y = np.array(y) a = np.argsort(x) x = x[a] y = y[a] return x, y for order in[1, 2, 4]: x_hist, y_hist = get_xy(hist_result, order) x_deepsphere_fcn, y_deepsphere_fcn = get_xy(deepsphere_result_fcn, order) x_deepsphere_cnn, y_deepsphere_cnn = get_xy(deepsphere_result_cnn, order) x_psd, y_psd = get_xy(psd_result, order) x_cnn2d_fcn_reg, y_cnn2d_fcn_reg = get_xy(cnn2d_result_fcn_reg, order) x_cnn2d_cnn_reg, y_cnn2d_cnn_reg = get_xy(cnn2d_result_cnn_reg, order) x_cnn2d_fcn_big_reg, y_cnn2d_fcn_big_reg = get_xy(cnn2d_result_fcn_big_reg, order) x_cnn2d_cnn_big_reg, y_cnn2d_cnn_big_reg = get_xy(cnn2d_result_cnn_big_reg, order) acc_hist = (1-y_hist)*100 acc_deepsphere_fcn = (1-y_deepsphere_fcn)*100 acc_deepsphere_cnn = (1-y_deepsphere_cnn)*100 acc_psd = (1-y_psd)*100 acc_cnn2d_fcn_reg = (1-y_cnn2d_fcn_reg)*100 acc_cnn2d_cnn_reg = (1-y_cnn2d_cnn_reg)*100 acc_cnn2d_fcn_big_reg = (1-y_cnn2d_fcn_big_reg)*100 acc_cnn2d_cnn_big_reg = (1-y_cnn2d_cnn_big_reg)*100 plt.figure(figsize=[6, 4]) plt.plot(x_deepsphere_fcn, acc_deepsphere_fcn,'g.-', label='DeepSphere (FCN variant)') plt.plot(x_deepsphere_cnn, acc_deepsphere_cnn,'g.--', label='DeepSphere (CNN variant)') plt.plot(x_cnn2d_fcn_reg, acc_cnn2d_fcn_reg,'c.-', label='2D ConvNet (FCN variant)') plt.plot(x_cnn2d_cnn_reg, acc_cnn2d_cnn_reg,'c.--', label='2D ConvNet (CNN variant)') # plt.plot(x_cnn2d_fcn_big_reg, acc_cnn2d_fcn_big_reg,'m.-', label='2DCNN (FCN variant)') # plt.plot(x_cnn2d_cnn_big_reg, acc_cnn2d_cnn_big_reg,'m.--', label='2DCNN (CNN variant)') plt.plot(x_hist, acc_hist,'r.-', label='linear SVM on histogram') plt.plot(x_psd, acc_psd,'b.-', label='linear SVM on PSD') plt.legend(loc=3, prop={'size': 12}) plt.xlabel('Relative noise level') plt.ylabel('Accuracy in %') npix = (1024//order)**2 part = 12 * order**2 plt.title('Order $o={}$: {:,} pixels per samples (1/{} sphere)'.format(order, npix, part)) if order==1: plt.ylim( (77, 101)) plt.savefig(os.path.join(pathfig, "result_order{}.pdf".format(order)), bbox_inches='tight') # deepsphere_result_params = np.load('results/deepsphere/deepsphere_results_list_sigma{}_params.npz'.format(sigma))['data'] # def make_tab(order, results): # print('-'*48) # print('| {} | {} |'.format('Network'.ljust(30),'Accuracy % ')) # print('-'*48) # for result in results: # if int(result[0])==int(order): # print('| {} | {:0.2f} |'.format(result[3].ljust(30), 100*(1-float(result[2])))) # print('-'*48) # make_tab(4, deepsphere_result_params) # make_tab(2, deepsphere_result_params)6 Experimental data: show two convergence maps, one per modelimg1 = hp.read_map('data/same_psd/kappa_omega_m_0p31_s_2.fits') img2 = hp.read_map('data/same_psd/kappa_omega_m_0p26_s_2.fits') img1 = hp.reorder(img1, r2n=True) img2 = hp.reorder(img2, r2n=True) Nside = 1024 img1 = hp.ud_grade(img1, nside_out=Nside, order_in='NESTED') img2 = hp.ud_grade(img2, nside_out=Nside, order_in='NESTED') cmin = min(np.min(img1), np.min(img2)) cmax = max(np.max(img1), np.max(img2)) cmax = -2*cmin # _ = plt.hist(img1,bins=100) # hp.mollview(img1, title='Map 1, omega_m=0.31, pk_norm=0.82, h=0.7', nest=True, min=cmin, max=cmax) # hp.mollview(img2, title='Map 2, omega_m=0.26, sigma_8=0.91, h=0.7', nest=True, min=cmin, max=cmax) def arcmin2rad(x): return x / 60 / 360 * 2 * np.pi def gaussian_smoothing(sig, sigma, nest=True): if nest: sig = hp.reorder(sig, n2r=True) smooth = hp.sphtfunc.smoothing(sig, sigma=arcmin2rad(sigma)) if nest: smooth = hp.reorder(smooth, r2n=True) return smooth sigma=3 fig = plot.zoom_mollview(img1, cmin=cmin, cmax=cmax) plt.suptitle('Sample from class 1, $\Omega_m=0.31$, , $\sigma_8=0.82$',y=0.78, fontsize=18); # omega_m=0.31, pk_norm=0.82, h=0.7 fig = plot.zoom_mollview(gaussian_smoothing(img1,sigma), cmin=cmin, cmax=cmax) plt.suptitle('Smoothed map from class 1, $\Omega_m=0.31$, $\sigma_8=0.82$',y=0.78, fontsize=18) plt.savefig(os.path.join(pathfig, "smooth_map_class_1.pdf"), bbox_inches='tight') # omega_m=0.31, pk_norm=0.82, h=0.7 fig = plot.zoom_mollview(img2, cmin=cmin, cmax=cmax) _ = plt.suptitle('Sample from class 2, $\Omega_m=0.26$, $\sigma_8=0.91$',y=0.78, fontsize=18) # omega_m=0.26, sigma_8=0.91, h=0.7 fig = plot.zoom_mollview(gaussian_smoothing(img2, sigma), cmin=cmin, cmax=cmax) _ = plt.suptitle('Smoothed map from class 2, $\Omega_m=0.26$, $\sigma_8=0.91$',y=0.78, fontsize=18) plt.savefig(os.path.join(pathfig, "smooth_map_class_2.pdf"), bbox_inches='tight') # omega_m=0.26, sigma_8=0.91, h=0.77 Power spectral densities (PSD) of the convergence mapssigma = 3 compute = False if compute: def psd(x): '''Spherical Power Spectral Densities''' hatx = hp.map2alm(hp.reorder(x, n2r=True)) return hp.alm2cl(hatx) data_path = 'data/same_psd/' ds1 = np.load(data_path+'smoothed_class1_sigma{}.npz'.format(sigma))['arr_0'] ds2 = np.load(data_path+'smoothed_class2_sigma{}.npz'.format(sigma))['arr_0'] psds_img1 = [psd(img) for img in ds1] psds_img2 = [psd(img) for img in ds2] np.savez('results/psd_data_sigma{}'.format(sigma), psd_class1=psds_img1, psd_class2=psds_img2) else: psds_img1 = np.load('results/psd_data_sigma{}.npz'.format(sigma))['psd_class1'] psds_img2 = np.load('results/psd_data_sigma{}.npz'.format(sigma))['psd_class2'] matplotlib.rcParams.update({'font.size': 14}) l = np.array(range(len(psds_img1[0]))) plot.plot_with_std(l,np.stack(psds_img1)*l*(l+1), label='class 1, $\Omega_m=0.31$, $\sigma_8=0.82$, $h=0.7$', color='r') plot.plot_with_std(l,np.stack(psds_img2)*l*(l+1), label='class 2, $\Omega_m=0.26$, $\sigma_8=0.91$, $h=0.7$', color='b') plt.legend(fontsize=16); plt.xlim([11, np.max(l)]) plt.ylim([1e-6, 5e-4]) plt.yscale('log') plt.xscale('log') plt.xlabel('$\ell$: spherical harmonic index', fontsize=18) plt.ylabel('$C_\ell \cdot \ell \cdot (\ell+1)$', fontsize=18) plt.title('Power Spectrum Density, 3-arcmin smoothing, noiseless, Nside=1024', fontsize=18); plt.savefig(os.path.join(pathfig, "psd_sigma{}.pdf".format(sigma)), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10})8 Checking convergence of SVM (w.r.t. the number of augmented training samples)sigma = 3 order = 2 sigma_noise = 1.5 # path = 'results/psd/' # name = '40sim_1024sides_{0}arcmin_{2:.1f}noise_{1}order.npz'.format(sigma, order, sigma_noise) path = 'results/histogram/' name = '40sim_1024sides_{2}noise_{1}order_{0}sigma.npz'.format(sigma, order, sigma_noise) filepath = os.path.join(path,name) data = np.load(filepath)['arr_0'] matplotlib.rcParams.update({'font.size': 24}) plt.plot(data[0], data[1], linewidth=4) plt.plot(data[0], data[2], linewidth=4) plt.plot(data[0][-1], data[3],'x', markersize=10) plt.legend(['Training','Validation', 'Testing']) plt.xlabel('Number of training samples') plt.ylabel('Error rate in %') # plt.title('Error for the histogram + SVM, order: {}, noise level: {}'.format(order, sigma_noise)) plt.savefig(os.path.join(pathfig, "hist_error_order{}_noise{}.pdf".format(order, sigma_noise)), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10})9 Plotting learned graph convolutional filtersNside = 1024 order = 2 # 1,2,4,8 correspond to 12,48,192,768 parts of the sphere. sigma_noise = 2 sigma = 3 ntype = 'FCN' EXP_NAME = '40sim_{}sides_{:0.1f}noise_{}order_{}sigma_{}'.format(Nside, sigma_noise, order, sigma, ntype) params = hyperparameters.get_params(12*40*0.8*order*order, EXP_NAME, order, Nside, ntype) model = models.deepsphere(**params) folder = 'figures/filters/{}/'.format(EXP_NAME) os.makedirs(folder, exist_ok=True) layer = 5 try: model.plot_chebyshev_coeffs(layer, ind_in=range(5), ind_out=range(10)) plt.savefig('{}/layer{}_coefficients.png'.format(folder, layer), dpi=100) except ValueError: raise ValueError('If checkpoints/{} is empty, run the demo_part_sphere notebook first.'.format(EXP_NAME)) model.plot_filters_spectral(layer, ind_in=range(5), ind_out=range(10)); plt.savefig('{}/layer{}_spectral.png'.format(folder, layer), dpi=100) matplotlib.rcParams.update({'font.size': 16}) model.plot_filters_section(layer, ind_in=range(6), ind_out=range(4), title=''); plt.savefig(os.path.join(pathfig, "section_filter_last.pdf".format(order)), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10}) plt.rcParams['figure.figsize'] = (8, 12) model.plot_filters_gnomonic(layer, ind_in=range(6), ind_out=range(4), title=''); plt.savefig(os.path.join(pathfig, "gnonomic_filter_last.pdf".format(order)), bbox_inches='tight', dpi=100) plt.rcParams['figure.figsize'] = (17, 5) # (9, 4) for matplotlib notebook matplotlib.rcParams.update({'font.size': 16}) model.plot_filters_section(1, ind_out=range(4), title=''); fig.savefig('{}/layer{}_section.png'.format(folder, layer), dpi=100) plt.savefig(os.path.join(pathfig, "section_filter_first.pdf".format(order)), bbox_inches='tight') matplotlib.rcParams.update({'font.size': 10})10 Border effect of the graph convolution (part of the sphere)matplotlib.rcParams['image.cmap'] = 'RdBu_r' nside = 16 indexes = range(nside**2) G = utils.healpix_graph(nside=nside, indexes=indexes) G.estimate_lmax() tau = 30 hf = pygsp.filters.Heat(G, tau=tau) index1 = 170 index2 = 64+2*16+2*4+2 sig1 = np.zeros([nside**2]) sig2 = np.zeros([nside**2]) sig1[index1] = 1 sig2[index2] = 1 sig1 = hf.filter(sig1) sig2 = hf.filter(sig2) m = max(np.max(sig1), np.max(sig2)) limits = [-m, m] # sig = np.arange(nside**2) fig = plt.figure(figsize=[12,6]) ax1 = fig.add_subplot(121, projection='3d') G.plot_signal(sig1, ax=ax1, colorbar=False,limits=limits) # Get rid of the ticks ax1.set_xticks([]) ax1.set_yticks([]) ax1.set_zticks([]) # Get rid of the panes ax1.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax1.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax1.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax1.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax1.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax1.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) # Zoom a = 0.35 ax1.set_xlim(-a,a) ax1.set_ylim(-a,a) # Remove the title # ax.set_title('Graph, half sphere, Nside=4') ax1.set_title('', FontSize=16) ax1.view_init(elev=10, azim=45) ax2 = fig.add_subplot(122, projection='3d') G.plot_signal(sig2, ax=ax2, limits=limits, colorbar=False) # Get rid of the ticks ax2.set_xticks([]) ax2.set_yticks([]) ax2.set_zticks([]) # Get rid of the panes ax2.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax2.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax2.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax2.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax2.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax2.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) # Zoom a = 0.35 ax2.set_xlim(-a,a) ax2.set_ylim(-a,a) # Remove the title # ax.set_title('Graph, half sphere, Nside=4') ax2.set_title('', FontSize=16) ax2.view_init(elev=10, azim=45) plt.tight_layout(pad=0) plt.savefig(os.path.join(pathfig, "border_effects.pdf"), bbox_inches='tight') # for nside in [16, 32, 64, 128, 256, 512, 1024, 2048]: # print('Time for nside: {}'.format(nside)) # %timeit G = utils.healpix_graph(nside=nside)11 Natural projection of Healpix to the planenside = 8 indexes = range(nside**2) G = utils.healpix_graph(nside=nside, indexes=indexes) sig1 = np.arange(nside**2) fig = plt.figure(figsize=[12,6]) ax1 = fig.add_subplot(121, projection='3d') ax1.axis('off') G.plot_signal(sig1, ax=ax1, colorbar=False, edges=True) # Zoom a = 0.3 ax1.set_xlim(-a, a) ax1.set_ylim(-a, a) ax1.view_init(elev=10, azim=45) ax1.set_title('Pixels on HEALPix (1/12 of the sphere)') ax2 = fig.add_subplot(122) ind = experimental.cnn.build_index(3).flatten().astype(np.int) ax2.imshow(np.flipud(sig1[ind].reshape([nside, nside]))) ax2.set_title('Pixels on the plane') ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) fig.savefig(os.path.join(pathfig, 'projection_healpix_plane.pdf'))12 Filtering speedNumbers measured in the `spherical_vs_graph` notebook.results = np.load('results/filtering_speed.npz') fig, ax = plt.subplots(figsize=(5.9, 3.8)) npix = [hp.nside2npix(nside) for nside in results['nsides']] ax.loglog(npix, results['times_sphere'][:, ::-1], '--', marker='.') ax.loglog(npix, results['times_graph'][:, ::-1], '-', marker='.') ax.loglog(npix, results['times_part'][:, 1, 0], '-.', marker='.') ax.loglog(npix, results['times_part'][:, 1, 2], '-.', marker='.') #ax.loglog(npix, np.array(npix)/1e6, ':', color='#808080') #ax.loglog(npix, (np.array(npix)/1e6)**1.5, ':', color='#808080') labels = ['Sph. harm., $\ell_{{max}}$ = {}$N_{{side}}$'.format(lm) for lm in results['lmax'][::-1]] labels += ['Graph, poly. order K={}'.format(order) for order in results['orders'][::-1]] labels += ['Partial graph 1/{}, K={}'.format(v, 15) for v in [12, 192]] #labels += [r'Asymptotic $\mathcal{O}(N_{side})$'] #labels += [r'Asymptotic $\mathcal{O}(N_{side}^{3/2})$'] ax.legend(labels, loc='upper left') for i, nside in enumerate(results['nsides']): x = npix[i] y = results['times_sphere'][i, -1] * 2 ax.text(x, y, '$N_{{side}}$\n{}'.format(nside), horizontalalignment='center') ax.set_ylim(0.6 * results['times_part'][:,1,:].min(), 8e4)#6e2 * results['times_sphere'].max()) #ax.set_xlim(0.5 * min(npix), 2 * max(npix)) ax.set_xlabel('Number of pixels') ax.set_ylabel('Processing time [s]') fig.tight_layout() fig.savefig(os.path.join(pathfig, 'filtering_speed.pdf'))13 Eigenvalues of graph Laplacian are grouped (like the degree of the spherical harmonics)n_eigenvalues = 50 nside = 16 graph = utils.healpix_graph(nside=nside, lap_type='normalized', nest=True, dtype=np.float64) graph.compute_fourier_basis(n_eigenvectors=n_eigenvalues) fig, ax = plt.subplots(figsize=(6, 2.5)) ax.plot(graph.e, '.-') idx = 1 xticks = [idx] for l in range(1, 7): ax.text(idx + l - 2.3, graph.e[idx + l] + 0.005, '$\ell$ = {}'.format(l)) idx += 2*l + 1 xticks.append(idx) ax.set_xlabel('Eigenvalue $\lambda$') ax.set_ylabel('Value') ax.set_xticks(xticks) fig.tight_layout() fig.savefig(os.path.join(pathfig, 'graph_eigenvalues.pdf'))14 Correspondance of subspaces 14.1 Graph eigenvectors vs sampled spherical harmonicsnside = 16 lmax = 8 def compute_orhtogonality(nside, lmax, lap_type='normalized'): n_harmonics = np.cumsum(np.arange(1, 2*lmax+2, 2)) harmonics = utils.compute_spherical_harmonics(nside, lmax=lmax) graph = utils.healpix_graph(nside, lap_type=lap_type, nest=True, dtype=np.float64) n_eigenvectors = min(n_harmonics[-1], graph.N) graph.compute_fourier_basis(n_eigenvectors) C = harmonics.T @ graph.U return C, n_harmonics C, n_harmonics = compute_orhtogonality(nside, lmax) fig, ax = plt.subplots(figsize=(5, 4)) im = ax.imshow(np.abs(C), cmap=plt.cm.gist_heat_r, aspect='equal') ax.set_xlabel('Graph Fourier modes') ax.set_ylabel('Spherical harmonics') ax.set_xticks(n_harmonics - 0.5) ax.set_yticks(n_harmonics - 0.5) ax.set_xticklabels(n_harmonics) ax.set_yticklabels(n_harmonics) for l in range(4, lmax+1): ax.text(n_harmonics[l-1] + l - 3.9, n_harmonics[l-1] - 1, '$\ell={}$'.format(l)) ax.grid(True) fig.colorbar(im) fig.tight_layout() fig.savefig(os.path.join(pathfig, 'subspace_harmonics_eigenvectors.pdf'));14.2 SHT of the graph eigenvectorsnsides = [4, 8, 16] spectral_content = dict() for nside in nsides: lmax = 3 * nside - 1 n_harmonics = np.cumsum(np.arange(1, 2*lmax+2, 2))[-1] graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64) graph.compute_fourier_basis(n_eigenvectors=n_harmonics) cl = np.empty((n_harmonics, lmax+1)) for i in range(n_harmonics): eigenvector = hp.reorder(graph.U[:, i], n2r=True) cl[i] = hp.sphtfunc.anafast(eigenvector, lmax=lmax) spectral_content[nside] = np.empty((lmax+1, lmax+1)) start = 0 for ell in range(lmax+1): end = start + (2 * ell + 1) spectral_content[nside][ell] = np.sum(cl[start:end,:], axis=0) start = end fontsize = matplotlib.rcParams['font.size'] matplotlib.rcParams['font.size'] = 16 #fig, axes = plt.subplots(1, len(nsides), figsize=(6, 3)) matplotlib.rcParams.update({'font.size': 16}) fig, axes = plt.subplots(1, len(nsides), figsize=(12, 6)) for ax, (nside, sc) in zip(axes.flatten(), spectral_content.items()): lmax = 3*nside-1 sc = sc / sc[0, 0] im = ax.imshow(sc, cmap=plt.cm.gist_heat_r) #ax.set_title(rf'$N_{{side}}={nside}$') ax.text(0.35, 0.8, rf'$N_{{side}}={nside}$', fontsize=20, transform=ax.transAxes) #ax.set_xlabel('SH degree $\ell$') ax.text(0.5, -0.1, 'SH degree $\ell$', horizontalalignment='center', transform=ax.transAxes) #ax.yaxis.set_visible(False) #ax.set_yticklabels([]) ax.set_yticks([]) #ax.set_yticks([0, lmax]) ax.set_xticks([0, lmax]) axes[0].set_yticks([0]) #axes[0].set_ylabel('subspaces of graph eigenvectors') axes[0].set_ylabel('graph eigenvectors', fontsize=20) fig.subplots_adjust(wspace=0.1, right=0.9) ax_cbar = fig.add_axes([0.92, 0.25, 0.02, 0.5]) fig.colorbar(im, ax=ax, cax=ax_cbar, ticks=[0, 1]) #fig.tight_layout() # Doesn't work with color bar. fig.savefig(os.path.join(pathfig, 'subspace_harmonics_eigenvectors_v2.pdf')); matplotlib.rcParams['font.size'] = fontsize;15 Convolution basis: Chebyshev polynomials vs monomials1. Orthogonality of the basis in the spectral domain.1. Orthogonality of the basis in the vertex domain.1. Expected shape of the filters given a distribution over the coefficients.Todo:* compute the expectation analyticallymatplotlib.rcParams.update({'font.size': 10}) # Order of Chebyshev polynomials. Degree of monomials degree = 7 n_points = 1000 graph = pygsp.graphs.Path(64) # Irregular graph. Otherwise the Chebyshev polynomials are exactly orthogonal. graph.W.data = 0.5 + 0.1 * np.random.uniform(size=graph.W.data.shape) graph = pygsp.graphs.Graph(pygsp.utils.symmetrize(graph.W)) #plt.imshow(graph.W.toarray()) graph.estimate_lmax() graph.set_coordinates('line1D') fig = plt.figure(figsize=(8, 5)) # Chebyshev #x = np.linspace(0, 1.05*graph.lmax, 1000) x = np.linspace(0, graph.lmax, n_points) coefficients = np.identity(degree) f = pygsp.filters.Chebyshev(graph, coefficients) Y = f.evaluate(x) ax = plt.subplot2grid((2, 3), (0, 0), colspan=2) ax.plot(x / graph.lmax * 2 - 1, Y.T) ax.legend(['k={}'.format(k) for k in range(degree)]) ax.set_xlabel('Eigenvalue $\lambda$') ax.set_ylabel('Polynomial $T_k(\lambda)$') ax.set_title('Chebyshev basis (spectral domain)') ax.set_xticks([-1, 0, 1]) ax.set_yticks([-1, 0, 1]) ax.grid() C = Y @ Y.T ax = plt.subplot2grid((2, 3), (0, 2)) im = ax.imshow(np.abs(C), cmap=plt.cm.gist_heat_r) fig.colorbar(im, ax=ax) ax.set_title('Cross-correlation') # Monomials x = np.linspace(-1, 1, n_points) Y = np.empty((degree, len(x))) for k in range(degree): Y[k] = x**k ax = plt.subplot2grid((2, 3), (1, 0), colspan=2) plt.plot(x, Y.T) ax.legend(['k={}'.format(k) for k in range(degree)]) ax.set_xlabel('Eigenvalue $\lambda$') ax.set_ylabel('Monomial $\lambda^k$') ax.set_title('Monomial basis (spectral domain)') ax.set_xticks([-1, 0, 1]) ax.set_yticks([-1, 0, 1]) ax.grid() C = Y @ Y.T ax = plt.subplot2grid((2, 3), (1, 2)) im = ax.imshow(np.abs(C), cmap=plt.cm.gist_heat_r) fig.colorbar(im, ax=ax) ax.set_title('Cross-correlation') fig.tight_layout() fig.savefig(os.path.join(pathfig, 'polynomial_bases_spectrum.pdf')) fig = plt.figure(figsize=(8, 5)) # Chebyshev Y = f.localize(graph.N // 2) ax = plt.subplot2grid((2, 3), (0, 0), colspan=2, fig=fig) for k in range(degree): graph.plot_signal(Y[k], ax=ax) ax.legend(['k={}'.format(k) for k in range(degree)]) ax.set_ylim(1.1*Y.min(), 1.1*Y.max()) ax.set_xlim(graph.N // 2 - degree, graph.N // 2 + degree) ax.set_xticks(np.arange(graph.N // 2 - degree + 1, graph.N // 2 + degree, 2)) ax.set_xticklabels('$v_{{{}}}$'.format(i) for i in range(- degree + 1, degree, 2)) ax.set_title('Chebyshev basis (localized on vertex $v_0$)') ax.set_ylabel('($T_k(L) \delta_0)_j$') C = Y @ Y.T ax = plt.subplot2grid((2, 3), (0, 2), fig=fig) im = ax.imshow(np.abs(C), cmap=plt.cm.gist_heat_r) fig.colorbar(im, ax=ax) ax.set_title('Cross-correlation') # Monomials Y = np.empty((degree, graph.N)) s = np.zeros(graph.N) s[graph.N // 2] = 1 L = graph.L / graph.lmax * 2 - sparse.identity(graph.N) for k in range(degree): Y[k] = L**k @ s ax = plt.subplot2grid((2, 3), (1, 0), colspan=2, fig=fig) for k in range(degree): graph.plot_signal(Y[k], ax=ax) ax.legend(['k={}'.format(k) for k in range(degree)]) ax.set_ylim(1.1*Y.min(), 1.1*Y.max()) ax.set_xlim(graph.N // 2 - degree, graph.N // 2 + degree) ax.set_xticks(np.arange(graph.N // 2 - degree + 1, graph.N // 2 + degree, 2)) ax.set_xticklabels('$v_{{{}}}$'.format(i) for i in range(- degree + 1, degree, 2)) ax.set_title('Monomial basis (localized on vertex $v_0$)') ax.set_ylabel('($L^k \delta_0)_j$') C = Y @ Y.T ax = plt.subplot2grid((2, 3), (1, 2), fig=fig) im = ax.imshow(np.abs(C), cmap=plt.cm.gist_heat_r) fig.colorbar(im, ax=ax) ax.set_title('Cross-correlation') fig.tight_layout() fig.savefig(os.path.join(pathfig, 'polynomial_bases_vertex.pdf')) degrees = [5, 20, 100] n_realizations = int(1e4) n_points = 100 x = np.linspace(-1, 1, n_points) fig, axes = plt.subplots(1, 2, sharey=True, figsize=(8.5, 3)) for degree in degrees: coefficients = np.random.normal(0, 1, size=(degree, n_realizations)) #coefficients = np.random.uniform(-1, 1, size=(order, n_realizations)) # Monomials. y = np.zeros((n_realizations, n_points)) for k, c in enumerate(coefficients): y += np.outer(c, x**k) plot.plot_with_std(x, y, ax=axes[0]) # Chebyshev polynomials. graph = pygsp.graphs.Path(n_points) graph.estimate_lmax() filters = pygsp.filters.Chebyshev(graph, coefficients) y = filters.evaluate((x + 1) / 2 * graph.lmax) plot.plot_with_std(x, y, ax=axes[1]) legend = ['degree $K={}$'.format(degree) for degree in degrees] axes[0].legend(legend, loc='upper center') axes[1].legend(legend, loc='upper center') axes[0].set_xlabel('Scaled eigenvalue $x$') axes[1].set_xlabel('Scaled eigenvalue $x$') axes[0].set_ylabel(r'Expected filter value $\mathbf{E}_\theta[ g_\theta(x) ]$') axes[0].set_title('Expected sum of monomials') axes[1].set_title('Expected sum of Chebyshev polynomials') axes[0].text(0, -7, r'$g_\theta(x) = \sum_{k=0}^K \theta_k x^k$', horizontalalignment='center') axes[1].text(0, -6, r'$g_\theta(x) = \sum_{k=0}^K T_k(x)$', horizontalalignment='center') axes[1].text(0, -9.5, r'$T_k(x) = 2xT_{k-1}(x) - T_{k-2}(x), T_1(x) = x, T_0(x) = 1$', horizontalalignment='center') fig.tight_layout() fig.savefig(os.path.join(pathfig, 'expected_filters.pdf')) # x = np.arange(-1,1,0.001) # order = 20 # c = np.random.randn(order,100) # f = [] # for coeffs in c.T: # s = 0*x # for o, coeff in enumerate(coeffs): # s += coeff*(x**o) # f.append(s) # f = np.array(f) # ax = plot.plot_with_std(x, f) # ax.set_title('Monomial - order {}'.format(order)); # x = np.arange(-1,1,0.001) # order = 20 # c = np.random.randn(order,100) # f = [] # p = [] # p.append(x**0) # p.append(x**1) # for o in range(2, order): # p.append(2*x*p[o-1]-p[o-2]) # for coeffs in c.T: # s = x**0 # for o, coeff in enumerate(coeffs): # s += coeff*p[o] # f.append(s) # f = np.array(f) # ax = plot.plot_with_std(x, f) # ax.set_title('Chebyshev - order {}'.format(order)); x = np.arange(-1,1,0.001) order =20 p = [] p.append(x**0) p.append(x**1) for o in range(2,order): p.append(2*x*p[o-1]-p[o-2]) for o in range(order): plt.plot(x, p[o]) for o in range(5,12): plt.plot(x, np.sum(np.array(p[0:o])**2/(o+0.5)*2,axis=0)) plt.plot(x, x**0) o = 10 plt.plot(x, np.sum(np.array(p[0:o])**2,axis=0)) plt.plot(x, (o+0.5)/2*(x**0))05_07: Temperature Anomaly Solutionimport math import collections import urllib import numpy as np import pandas as pd import matplotlib.pyplot as pp %matplotlib inline import getweather def smooth(array, window=10, mode='valid'): return np.correlate(array, np.ones(window)/window, mode) # get all historical data for New York, stacked into array station = 'NEW YORK' allyears = np.arange(1880, 2020) alldata = np.vstack([getweather.getyear(station, ['TMIN','TMAX'], year) for year in allyears]) # compute (TMIN + TMAX)/2, averaged over days in every year allavg = np.nanmean(0.5 * (alldata['TMIN'] + alldata['TMAX']), axis=1) allavg.shape allyears.index(1945) # find the index of values 1945 and 1955 in allyears list(allyears).index(1945), list(allyears).index(1955) midcentury = np.nanmean(allavg[65:75]) midcentury pp.plot(allyears, allavg - midcentury) pp.plot(allyears[4:-4], smooth(allavg - midcentury, 9, 'valid')) # compute and plot the temperature anomaly time series for any station allyears = np.arange(1880, 2020) def plotanomaly(station): # grab the data alldata = np.vstack([getweather.getyear(station, ['TMIN','TMAX'], year) for year in allyears]) # make yearly averages, and then the midcentury average allavg = np.nanmean(0.5 * (alldata[:,:]['TMIN'] + alldata[:,:]['TMAX']), axis=1) midcentury = np.nanmean(allavg[65:75]) # plot with smoothing, adding a label that we can show in a legend pp.plot(allyears[4:-4], smooth(allavg - midcentury, 9, 'valid'), label=station) # set a reasonable range pp.axis(ymin=-3,ymax=3) plotanomaly('NEW YORK') plotanomaly('PASADENA') plotanomaly('MINNEAPOLIS') pp.legend()**Installing the Libraries**import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from pandas import plotting**Reading the Dataset**dataset = pd.read_csv('https://raw.githubusercontent.com/KeshavAman/file/main/trends.csv') dataset.head(7) dataset.describe().T dataset.info() print('Rows and columns of dataset are ',dataset.shape)Rows and columns of dataset are (26955, 5)**Checking if there is any NULL data**dataset.isnull().any().any()**Pairplot of Dataset**plt.figure(figsize=(12,8)) sns.pairplot(dataset) plt.title('Pairplot of Dataset', fontsize = 15) plt.show()**Heatmap of Dataset**sns.heatmap(dataset.corr(), annot = True) plt.title('Heatmap of Dataset', fontsize = 15) plt.show()**Preprocessing of Dataset**from sklearn.preprocessing import LabelEncoder dataset['location'] = LabelEncoder().fit_transform(dataset['location']) dataset['category'] = LabelEncoder().fit_transform(dataset['category']) dataset['query'] = LabelEncoder().fit_transform(dataset['query']) dataset.head()**Splitting of Dataset**x = dataset.iloc[:,[1,-1]].values x = StandardScaler().fit_transform(x) x.shape**Using K-Means Clustering*****The Elbow Method***from sklearn.cluster import KMeans wcss = [] for i in range (1,5): km = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) km.fit(x) wcss.append(km.inertia_) plt.figure(figsize = (12,8)) plt.plot(range(1,5), wcss) plt.title('The Elbow Method', fontsize = 15) plt.xlabel('No of clusters') plt.ylabel('wcss') plt.grid() plt.show() km = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_means = km.fit_predict(x) plt.scatter(x[y_means == 0,0], x[y_means == 0,1], s = 100, c = 'pink', label = 'Label 1') plt.scatter(x[y_means == 1,0], x[y_means == 1,1], s = 100, c = 'orange', label = 'Label 2') plt.scatter(x[y_means == 2,0], x[y_means == 2,1], s = 100, c = 'green', label = 'Label 3') plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s = 50, c = 'black', label = 'Centeroid') plt.title('K-Means Clustering', fontsize = 20) plt.xlabel('location') plt.ylabel('query') plt.legend() plt.show()**Using Hierarchical Clustering*****Using Dendrogram***from scipy.cluster import hierarchy as sch dendrogram = sch.dendrogram(sch.linkage(x, method = 'ward')) plt.title('Dendrogram', fontsize = 20) plt.xlabel('location') plt.ylabel('query') plt.show() from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters = 3, affinity = 'euclidean', linkage = 'ward') y_hc = hc.fit_predict(x) plt.scatter(x[y_hc == 0,0], x[y_hc == 0,1], s = 100, c = 'pink', label = 'label1') plt.scatter(x[y_hc == 1,0], x[y_hc == 1,1], s = 100, c = 'orange', label = 'label2') plt.scatter(x[y_hc == 2,0], x[y_hc == 2,1], s = 100, c = 'yellow', label = 'label3') plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s = 50, c = 'black', label = 'centeroid') plt.title('Hierachical Clustering', fontsize = 20) plt.xlabel('location') plt.ylabel('query') plt.legend() plt.show()Parallel Coordinates with PythonThe aim of these notebook is to explain how work the program pyParallelCoordinate and show some examples.This kind of graphs are a common way of visualizing high-dimensional geometry and analyzing multivariate data![Example of PC](images/examplePC.png) In order to make this graph with a large amount of data, I am going to use the module `pandas` and `bokeh`.The first module `pandas` allows to create Data Frame, whereas, the module `bokeh` permit us to create the graph. CodeNow, I am going to explain, briefly, the necessary function. First of all, we require the following module, that are describe in the file *requirements.txt*:- unittest- bokeh- bokeh selenium- phantomjs- pandas- mathThen, we only need to import the corresponding file that contain the class `ParallelCoordinates`:from ParallelCoordinates import ParallelCoordinatesAnd then, create the object `ParallelCoordinates` with the following arguments:- `file_name`: The path to the file that we want to obtain the Parallel Coordinates.- `header`: As default is None, but we can set to infer if there is a header at the beginning of the file.- `my_delimiter`: The delimiter of the data. As default is ','.For example:fun_pc = ParallelCoordinates('data/FUN.BB20002.tsv', my_delimiter='\t') iris_pc = ParallelCoordinates('data/iris.csv', header='infer')The attribute of these class are:- my_df: the data frame that is obtain from the file- dict_categorical_var: empty dictionary that will contain the variables which are categorical- my_df_normalize: the my_df data frame wich will be normalize if it is necessary- parallel_plot: the final plotMainly, the constructor calls two functions:- `read_file`- `convert_categorical_to_number`Now, we are going to see how is the implementation of these functions: **`read_file`**import pandas as pd def read_file(self, file_name: str, header, my_delimiter: str)-> pd.DataFrame: """ This function read the file and compute the data frame. Also, use the function 'dropna' in the case that there are missing values in all of the columns. Moreover, if there are no header, it creates some names for the variables with the following structure --> 'Var-N' :param file_name: the corresponding name (with the path, if it isn´t at the source root) :param header: it can be 'None' (as default) or 'infer' if the file contain the header in the firs line :param my_delimiter: the type of delimiter that split the data in the file. As default is ',' :return: The corresponding data frame """ if header is not None and not 'infer': raise Exception("The header value must be 'None' or 'infer'") with open(file_name) as csv_file: df = pd.read_csv(csv_file, sep=my_delimiter, header=header) df.dropna(how='all', inplace=True) if header is None: my_header = list() for i in range(0, len(df.columns)): my_header.append('Var-'+str(i)) df.columns = my_header return dfAs we can see, the flexibility of the function is huge, since we can use different type of file, as csv, tsv... because we can edit the delimiter of the data, as well as if there is the header at the beggining of the file.Now, we can show some examples of these function:df_iris = iris_pc.read_file(file_name='data/iris.csv', header='infer', my_delimiter=',') print(df_iris[1:10])sepal_length sepal_width petal_length petal_width species 1 4.9 3.0 1.4 0.2 setosa 2 4.7 3.2 1.3 0.2 setosa 3 4.6 3.1 1.5 0.2 setosa 4 5.0 3.6 1.4 0.2 setosa 5 5.4 3.9 1.7 0.4 setosa 6 4.6 3.4 1.4 0.3 setosa 7 5.0 3.4 1.5 0.2 setosa 8 4.4 2.9 1.4 0.2 setosa 9 4.9 3.1 1.5 0.1 setosaAnd, in the case that there is no header:df_fun = fun_pc.read_file(file_name='data/FUN.BB20002.tsv', header=None, my_delimiter='\t') print(df_fun[1:10])Var-0 Var-1 Var-2 1 1.316355 0.039683 23.878968 2 0.940633 0.000000 37.283147 3 0.940633 0.000000 37.283147 4 1.351411 0.037327 22.461739 5 1.283916 0.000000 23.888448 6 1.119206 0.000000 30.811572 7 1.265007 0.000000 28.819444 8 1.266593 0.000000 23.983659 9 1.079389 0.000000 35.775862**`convert_categorical_to_number`**The following step is process this data and convert the data type string to categorical; and then codify the categorical value in numeric. Since, the constructor contain this two functions, we can access directly to the data frame that is stored in the object `df_iris`.def convert_categorical_to_number(self)->None: """ It converts the columns which the type of data are string to categorical. Also, save the variable and the levels in a dictionary (dict_categorical_var) and then transform the categorical data to numeric code :return: """ for col in self.my_df.columns: if type(self.my_df[col].values.tolist()[0]) is str: self.my_df[col] = pd.Categorical(self.my_df[col]) values_of_cat = dict(enumerate(self.my_df[col].cat.categories)) self.dict_categorical_var[col] = values_of_cat self.my_df[col] = self.my_df[col].cat.codes print(iris_pc.my_df[1:10])sepal_length sepal_width petal_length petal_width species 1 4.9 3.0 1.4 0.2 0 2 4.7 3.2 1.3 0.2 0 3 4.6 3.1 1.5 0.2 0 4 5.0 3.6 1.4 0.2 0 5 5.4 3.9 1.7 0.4 0 6 4.6 3.4 1.4 0.3 0 7 5.0 3.4 1.5 0.2 0 8 4.4 2.9 1.4 0.2 0 9 4.9 3.1 1.5 0.1 0Also, the variable and the different values of the categorical data is stored in the attribute `dict_categorical_var`:print(iris_pc.dict_categorical_var){'species': {0: 'setosa', 1: 'versicolor', 2: 'virginica'}}**`plot`**After we read and process the data, the next step is to plot. For doing that we have the function `plot`:def plot(self, normalize=False, width=500, height=500, title='Parallel-Coordinates', show=True, notebook=False)->None: """ The function plot the data frame as Parallel Coordinates graph. For that, it requires the function get_multi_line_plot. In the case, that we want the normalize data, it compute both and show both thanks to a tab. Also, it verifies if we are working or notebook or not. :param normalize: Boolean that indicates if the data must be normalize or not. In the case of yes, the function show both :param width: The width measure for the figure. As default is 500 :param height: The height measure for the figure. As default is 500 :param title: The tittle for the figure. As default is 'Parallel-Coordinates' :param show: Boolean that indicate if we want to show the plot in a browser :param notebook: Boolean that indicate if we are using a notebook, in order to show the plot :return: """ if notebook: output_notebook() else: output_file(title + ".html") if normalize: self.my_df_normalize = self.normalize_data_frame(self.my_df) p1 = self.get_multi_line_plot(self.my_df_normalize, width, height, title) tab1 = Panel(child=p1, title="Normalize") p2 = self.get_multi_line_plot(self.my_df, width, height, title) tab2 = Panel(child=p2, title="No normalize") self.parallel_plot = Tabs(tabs=[tab1, tab2]) else: self.parallel_plot = self.get_multi_line_plot(self.my_df, width, height, title) if show: bk.show(self.parallel_plot)This function permit to plot the data, normalize or not, and show it.It requires another two functions:- `my_df_normalize`- `get_multi_line_plot` **`my_df_normalize`**def normalize_data_frame(self, df: pd.DataFrame) -> pd.DataFrame: """ Normalize a data frame. It requires that the data in the input data frame must be numeric :param df: The data frame that we want to normalize :return: Corresponding normalize data frame """ df_norm = df.copy() for col in df_norm: total = sum(df_norm[col]) df_norm[col] = df_norm[col]/total return df_normMainly, this function compute the normalize data frame.df_iris_norm= iris_pc.normalize_data_frame(iris_pc.my_df) print(df_iris_norm[1:10])sepal_length sepal_width petal_length petal_width species 1 0.005590 0.006549 0.002483 0.001112 0.0 2 0.005362 0.006985 0.002306 0.001112 0.0 3 0.005248 0.006767 0.002661 0.001112 0.0 4 0.005705 0.007859 0.002483 0.001112 0.0 5 0.006161 0.008513 0.003015 0.002225 0.0 6 0.005248 0.007422 0.002483 0.001669 0.0 7 0.005705 0.007422 0.002661 0.001112 0.0 8 0.005020 0.006330 0.002483 0.001112 0.0 9 0.005590 0.006767 0.002661 0.000556 0.0As we see at the beginning, there is also and attribute compute in the case that we order normalize plot --> `my_df_normalize` **`get_multi_line_plot`**The last function that use `plot` is `get_multi_line_plot`. It create a figure in order to seem as possible to a Parallel Coordinates, and add the necessary information.def plot(self, normalize=False, width=500, height=500, title='Parallel-Coordinates', show=True, notebook = False)->None: """ The function plot the data frame as Parallel Coordinates graph. For that, it requires the function get_multi_line_plot. In the case, that we want the normalize data, it compute both and show both thanks to a tab. Also, it verifies if we are working or notebook or not. :param normalize: Boolean that indicates if the data must be normalize or not. In the case of yes, the function show both :param width: The width measure for the figure. As default is 500 :param height: The height measure for the figure. As default is 500 :param title: The tittle for the figure. As default is 'Parallel-Coordinates' :param show: Boolean that indicate if we want to show the plot in a browser :param notebook: Boolean that indicate if we are using a notebook, in order to show the plot :return: """ if notebook: output_notebook() else: output_file() if normalize: bk.output_file("slider.html") self.my_df_normalize = self.normalize_data_frame(self.my_df) p1 = self.get_multi_line_plot(self.my_df_normalize, width, height, title) tab1 = Panel(child=p1, title="Normalize") p2 = self.get_multi_line_plot(self.my_df, width, height, title) tab2 = Panel(child=p2, title="No normalize") self.parallel_plot = Tabs(tabs=[tab1, tab2]) else: self.parallel_plot = self.get_multi_line_plot(self.my_df, width, height, title) if show: bk.show(self.parallel_plot) iris_pc.plot(notebook=True) # Move the cursor to the bottom to see the labelMoreover, there is an option to plot normalize data:iris_pc.plot(notebook=True, normalize = True)**`save`**Finally, I am going to explain the functions that are related to save the plot:- `save`- `file_name_with_ext_and_path`- `my_path`def save(self, format='html', file_name='Parallel-Coordinates', path=None)->None: """ This function allows to save the plot in a specific format. :param format: The format that we want for our file :param file_name: The corresponding name of the file :param path: The path where we want to store the plot, as default we assign the actual directory and create new one in there; 'results' :return: """ if path is None: path = self.my_path() valid_format = ['html', 'png', 'svg', 'all'] if format not in valid_format: raise Exception('The format is incorrect') if format == 'html' or format == 'all': file_name = self.file_name_with_ext_and_path(file_name, 'html', path) save(self.parallel_plot, filename=file_name, title=title) if format == 'png' or format == 'all': file_name = self.file_name_with_ext_and_path(file_name, 'png', path) be.export_png(self.parallel_plot, filename=file_name) if format == 'svg' or format == 'all': file_name = self.file_name_with_ext_and_path(file_name, 'svg', path) self.parallel_plot.output_backend = "svg" be.export_svgs(self.parallel_plot, filename=file_name)We can save the plot in different format:- html- png- svgDepends of the format this function use differente methods. Moreover, at the beginning, the method set as default the current path and create a directory callled 'results'. For that, there is a methods called `my_path()` **`my_path`**def my_path(self)->str: """ Obtain the path where, as default, we want to store the data :return: The path correspond to the source root plus another directory; 'results' """ path = os.getcwd() + '/results' if not os.path.exists(path): os.makedirs(path) return pathAn example of the use:print(iris_pc.my_path())/Users/juanlu/Documents/Universidad/PAB/ParallelCoordinates/notebook/results**`file_name_with_ext_and_path`**After, it checks if the `file_name` contains the corresponding extension with the function `file_name_with_ext_and_path`def file_name_with_ext_and_path(self, file_name: str, format: str, path: str)->str: """ The objective of this function is to add the corresponding path and extension to the name which we want to store the plot. In case that the extension is added, it checks if both correspond. :param file_name: the file name which we want to store the plot. :param format: The corresponding format {.html, .png, .svg} :param path: The path where we want to store the data :return: """ list_fn = file_name.split('.') if len(list_fn) == 1: file_name_plus_extension = list_fn[0] + '.' + format elif list_fn[1]!=format: file_name_plus_extension = list_fn[0] + '.' + format else: file_name_plus_extension = file_name return path + '/' + file_name_plus_extensionSome examples are:print(iris_pc.file_name_with_ext_and_path('iris', 'svg', 'path/')) print(iris_pc.file_name_with_ext_and_path('iris.html', 'html', 'path/')) print(iris_pc.file_name_with_ext_and_path('iris.html', 'png', 'path/'))path//iris.svg path//iris.html path//iris.pngNow, we can store the data and check if we do correctly:iris_pc.save(file_name='iris.png', format='png')![Resulst of Iris](results/iris.png)Since we have seen how is implemented this class, now we can plot three different data:- iris.csv- FUN.BB20002.tsv- MaF14PF_M10.txt Irisiris_pc.plot(notebook = True)Funfun_pc.plot(notebook = True, normalize = True)MaF14PF_M10This file contain large amount of data. Because these reason, we can see that the range of colous of `viridis` is not enough, so there is going plot only in one colour.ma_pc = ParallelCoordinates(file_name='data/MaF14PF_M10.txt', my_delimiter=' ') ma_pc.plot(notebook=True)Generation of speckle pattern using Zemax's Grid sag surface *Please feel free to [e-mail](mailto:) any corrections, comments and suggestions to the author ([](http://indranilsinharoy.com/))* Last updated: 12/27/2015License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/) References1. Statistical properties of Laser Speckle, 2. Laser Doppler and time-varying speckle: a reconciliation, from __future__ import division, print_function import os as os import collections as co import numpy as np import math as math import scipy.stats as sps import scipy.optimize as opt import matplotlib.pyplot as plt from IPython.display import Image as ipImage import pyzdde.zdde as pyz import pyzdde.zfileutils as zfu # The following python modules are available at # 1. https://github.com/indranilsinharoy/iutils/blob/master/optics/fourier.py # 2. https://github.com/indranilsinharoy/iutils/blob/master/optics/beam.py import iutils.optics.fourier as fou import iutils.optics.beam as bou %matplotlib inline zmxfile = 'SpeckleUsingPOP_GridSagSurf.zmx' lensPath = os.path.join(os.getcwd().split('Examples')[0], 'ZMXFILES') lensFile = os.path.join(lensPath, zmxfile) ln = pyz.createLink() ln.zLoadFile(lensFile) # Surfaces in the LDE @ Zemax. ln.ipzGetLDE() # Define surface number constants to remember SURF_BEAMSTART = 1 SURF_DIFFSMOOTHFACE = 2 # Smooth face of the diffuser SURF_GRIDSAG = 3 # Rough face of the diffuser SURF_IMA = 4 # Get wavelength (Zemax returns in units of microns) wavelength = ln.zGetWave(ln.zGetPrimaryWave()).wavelength/1000.0 print(u'Wavelength, \u03BB = {:.3e} mm'.format(wavelength)) # Set sigma, sampling, and semi-diameter of the grid sag surface # the semi-diameter must match that of the grid sag surface in LDE #sigma = 5*wavelength # set sigma later nx, ny = 401, 401 semidia = 5.0 # Start out with a zero height profile surface comment = 'zero height profile sag' filename = os.path.join(os.path.expandvars("%userprofile%"), 'Documents', 'Zemax\\Objects\\Grid Files', 'gridsag_zeroheight.DAT') # the function randomGridSagFile() in pyzdde/zfileutils generates grid sag ASCII # file with Gaussian distributed sag profile z, sagfile = zfu.randomGridSagFile(mu=0, sigma=np.inf, semidia=semidia, nx=nx, ny=ny, fname=filename, comment=comment) # load the zero height grid sag surface file in to the extra data editor ln.zImportExtraData(surfNum=SURF_GRIDSAG, fileName=sagfile)Set up POP analysis# Function to set the POP analysis parameters def set_POP(ln, data='irr', wide=50.0, waist=1.0, start=1, end=None): """helper function to set POP Parameters ---------- ln : object data : string the display data type. 'irr' or 'phase' wide : float initial width and height of the region to display. See Note 2. waist : float beam radius at the waist (in mm) start : integer start surface end : integer end surface Return ------ settinsfilename : string CFG settings file name Note ---- 1. Use the same name for the CFG settings file. This helps POPD to return the correct values of parameters (mostly) 2. The ``auto`` parameter in the function ``zSetPOPSettings()`` does not seem to work as expected. Hence, we need to provide the ``widex`` and ``widey`` parameters explicitly. In order to get the appropriate values for these parameters, use the "Automatic" button in Zemax POP analysis window for the particular design file """ setfile = ln.zGetFile().lower().replace('.zmx', '.CFG') datatype = 1 if data == 'phase' else 0 GAUSS_WAIST, WAIST_X, WAIST_Y = 0, 1, 2 S_1024, S_2048 = 6, 7 cfgfile = ln.zSetPOPSettings(data=datatype, settingsFile=setfile, startSurf=start, endSurf=end, field=1, wave=1, beamType=GAUSS_WAIST, paramN=((WAIST_X, WAIST_Y), (waist, waist)), sampx=S_2048, sampy=S_2048, widex=wide, widey=wide) return cfgfile # Helper functions to display POP display data def plot_pop_display_data(popdata, height, width, title): """plot pop display data retrieved from Zemax application using `zGetPOP()` function Parameters ---------- popdata : list list of speckle patterns or pop display data arrays height : list list of height of the speckle patterns width : list list of width of the speckle patterns Returns ------- None Notes ----- The labels of the plot extents are not guaranteed to be exact """ numPatts = len(popdata) figHeight = 5 figWidth = 1.3*figHeight*numPatts if numPatts > 1 else figHeight fig = plt.figure(figsize=(figWidth, figHeight)) for i, (pat, h, w, t) in enumerate(zip(popdata, height, width, title), 1): ax = fig.add_subplot(1, numPatts, i) ax.imshow(pat, cmap=plt.cm.plasma, extent=(-w/2, w/2, h/2, -h/2)) ax.set_xlabel('mm'); ax.set_ylabel('mm') ax.set_title(t, y=1.02) fig.tight_layout() plt.show() # helper function to zoom def zoom(img, amount=2): """simple function for cropping the image data for display Parameters ---------- img : ndarray 2-dim ndarray amount : float amount of zooming """ r, c = img.shape newR = r//amount newC = c//amount startR, startC = (r - newR)//2, (c - newC)//2 return img[startR:startR+newR, startC:startC+newC] # the `wide` value was determined from the Zemax main applicaiton's POP analysis # setting by clicking on "Automatic" beamRadius = 1.0 cfgfile = set_POP(ln, data='irr', wide=80.0, waist=beamRadius, start=SURF_BEAMSTART, end=SURF_IMA)Rayleigh range and Fraunhofer distancesdef rayleigh_fraunhofer(beamRadius, wavelength): """print the rayleigh range and Fraunhofer distance in mm """ beamDia = 2.0*beamRadius rr = bou.GaussianBeam(waistDiameter=beamDia, wavelength=wavelength).rayleigh fd = fou.fraunhofer_distance(d=beamDia, wavelen=wavelength) print('Rayleigh range = {:2.2f} mm'.format(rr)) print('Fraunhofer distance (far-field) = {:2.2f} mm'.format(fd)) rayleigh_fraunhofer(beamRadius, wavelength) # view the set parameters for POP analysis. Note that this analysis takes little more than a min # because of large number of samples popinfo, irrdata = ln.zGetPOP(settingsFile=cfgfile, displayData=True, timeout=3*60) popinfo mmPerPxY, mmPerPxX = popinfo.widthY/popinfo.gridY, popinfo.widthX/popinfo.gridX irradiance = zoom(np.array(irrdata), amount=5.5) pxY, pxX = irradiance.shape h, w = mmPerPxY*pxY, mmPerPxX*pxX plot_pop_display_data([irradiance,], [h,], [w,], ['Initial irradiance',])C:\Anaconda2\lib\site-packages\ipykernel\__main__.py:16: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the futureCreate and import rough sag surface into Zemax# Helper function to see the surface statistics def sag_statistics(sag, sigma=1, wave=1, nbins=100): """dispaly basic statistics of the sag profile """ h, w = sag.shape absMax, meanSag = np.max(np.abs(sag)), np.mean(sag) varSag, stdSag = np.var(sag), np.std(sag) print(u'sag absolute max: {:.4f} mm ({:.4f}\u03BB)' .format(absMax, absMax/wave)) print(u'sag mean value: {:.4e} mm ({:.4f}\u03BB)' .format(meanSag, meanSag/wave)) print(u'sag std deviation: {:.4e} ({:.4f}\u03BB)' .format(stdSag, stdSag/wave)) hist, binEdges = np.histogram(sag, bins=nbins, range=(-5*sigma, 5*sigma), density=True) binCenters = (binEdges[:-1] + binEdges[1:])/2 # def gauss(x, mu, sigma): """gaussian distribution """ a = 1.0/(sigma*np.sqrt(2.0*np.pi)) return a*np.exp(-(x - mu)**2/(2.0*sigma**2)) # figures fig = plt.figure(figsize=(8, 4)) ax0 = fig.add_axes([0.00, 0.00, 0.40, 0.95]) ax1 = fig.add_axes([0.49, 0.00, 0.46, 1.00]) ax2 = fig.add_axes([0.98, 0.05, 0.02, 0.89]) gaussDist = gauss(binCenters, mu=0, sigma=sigma) ax0.plot(binCenters/wave, gaussDist, lw=6, alpha=0.4, label='Gaussian dist') ax0.plot(binCenters/wave, hist, label='Fluctuation hist') ax0.set_xlim(-5*sigma/wave, 5*sigma/wave) ax0.yaxis.set_ticks([]) ax0.legend(fontsize=8) ax0.set_xlabel(r'$\lambda$', fontsize=15) ax0.set_title('Sag fluctuation histogram', y=1.01) im = ax1.imshow(sag, cmap=plt.cm.jet, vmin=-absMax, vmax=absMax, interpolation='none') ax1.set_title('Sag surface profile', y=1.01) plt.colorbar(im, ax2) plt.show() # Create a rough surface and display the surface roughness statistics sigma = 5.0*wavelength # surface roughness comment = 'gauss random dist of grid sag for speckle generation' print('Diffuser semi-diameter = {:2.3f} mm'.format(semidia)) print('Nx = {:d}, Ny = {:d}'.format(nx, ny)) print('delx = {:.5f} mm' .format(2.0*semidia/(nx-1))) print('dely = {:.5f} mm' .format(2.0*semidia/(ny-1))) z, sagfile = zfu.randomGridSagFile(mu=0, sigma=sigma, semidia=semidia, nx=nx, ny=ny) sag_statistics(z.reshape(ny, nx), sigma, wavelength) # load the Grid sag surface file in to the extra data editor ln.zImportExtraData(surfNum=SURF_GRIDSAG, fileName=sagfile)Retrieve and plot the speckle data generated in Zemaxpopinfo, irrdata = ln.zGetPOP(settingsFile=cfgfile, displayData=True, timeout=3*60) popinfoNOTE: If the Zemax Error Message "*The reference rays cannot be traced or are too close together*", please check the maximum aboslute height of the grid sag surface. It is probably much larger (more than an order of magnitude) than the wavelength of the source.mmPerPxY, mmPerPxX = popinfo.widthY/popinfo.gridY, popinfo.widthX/popinfo.gridX speckle = zoom(np.array(irrdata), amount=5.5) pxY, pxX = irradiance.shape h, w = mmPerPxY*pxY, mmPerPxX*pxX plot_pop_display_data([speckle,], [h,], [w,], ['speckle pattern',])C:\Anaconda2\lib\site-packages\ipykernel\__main__.py:16: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the futureFirst order speckle statistics **Distribution of speckle intensity**The speckle intensity of the ideal Gaussian speckle pattern follows a negative exponential probability-density function [1]. The (negative) exponential probability density function is given as:$$P(x | \lambda) = \left \{ \begin{array}{cc} \lambda e^{-\lambda x} & \hspace{4pt} & x \geq 0, \\0 & \hspace{4pt} & x < 0, \end{array}\right .\ $$# histogram of speckle numBins = 100 hist, binEdges = np.histogram(speckle.flatten(), bins=numBins, density=True) binCenters = (binEdges[:-1] + binEdges[1:])/2 fig, ax = plt.subplots(1, 1) ax.plot(binCenters, hist, label='speckle data') # fit an exponential curve. Since the data is noise-free, we will use # the method provided by Scipy loc, scale = sps.expon.fit(speckle.flatten(), floc=0) y = sps.expon.pdf(binCenters, loc=loc, scale=scale) ax.plot(binCenters, y, linestyle='dashed', label='fitted expo curve') ax.set_xlim(0, np.max(binCenters)) ax.legend(fontsize=12) ax.set_xlabel('(scaled) intensity') print(u'Rate parameter, \u03BB = {:.3f}'.format(1.0/scale)) plt.show()Rate parameter, λ = 158.707Speckle contrast For ideal Gaussian speckle pattern, the standard deviation, $\sigma$ of the intensity is equal to the mean intensity, $\langle I \rangle$, where strictly, $\langle \, \rangle$ stands for ensemble average. Here, we will assume that the ensemble average equals the sample average.speckleContrast = np.std(speckle.flatten())/ np.mean(speckle.flatten()) print('Speckle contrast = {:2.5f}'.format(speckleContrast))Speckle contrast = 1.09703Second order speckle statistics The width of the autocorrelation of the intensity of the speckle distribution gives a reasonable measure of the "average" width of a speckle in the pattern [1].# Expected speckle size def set_small_values_to_zero(tol, *values): """helper function to set infinitesimally small values to zero Parameters ---------- tol : float threshold. All numerical values below abs(tol) is set to zero *values : unflattened sequence of values Returns ------- """ return [0.0 if abs(value) < tol else value for value in values] #TODO!! Move this function to PyZDDE!! (could rename to specify it's a POP analysis helper function) def get_beam_centroid_radius(ln, surf, update=True): """returns the beam width and position at surface ``surf`` using POP analysis Parameters ---------- surf : integer surface number. 0 implies last surface udpate : bool if `True`, then Zemx will recompute all pupil positions and solves, etc and the data in the LDE will be updated before retrieving the POPD values. Returns ------- para : namedtuple beam parameters (cx, cy, rx, ry) where cx, cy are the coordinates of the centroid of the beam w.r.t. the chief ray """ CENTROID_X, CENTROID_Y, BEAM_RADIUS_X, BEAM_RADIUS_Y = 21, 22, 23, 24 wave, field, xtr1, xtr2 = 0, 0, 0, 0 if update: ln.zGetUpdate() cx = ln.zOperandValue('POPD', surf, wave, field, CENTROID_X, xtr1, xtr2) cy = ln.zOperandValue('POPD', surf, wave, field, CENTROID_Y, xtr1, xtr2) rx = ln.zOperandValue('POPD', surf, wave, field, BEAM_RADIUS_X, xtr1, xtr2) ry = ln.zOperandValue('POPD', surf, wave, field, BEAM_RADIUS_Y, xtr1, xtr2) cx, cy, rx, ry = set_small_values_to_zero(1e-12, cx, cy, rx, ry) beam = co.namedtuple('beam', ['cx', 'cy', 'rx', 'ry']) return beam(cx, cy, rx, ry) beamDiameterAtDiff = 2.0*get_beam_centroid_radius(ln, SURF_DIFFSMOOTHFACE).rx THICKNESS = 3 diffScDist = ln.zGetSurfaceData(surfNum=SURF_IMA - 1, code=THICKNESS) # note this is not general theorySpeckleWidth = wavelength*diffScDist/beamDiameterAtDiff print('Beam diameter @ diffuser = {} mm'.format(beamDiameterAtDiff)) print('Distance between diffuser and obs. screen = {} mm'.format(diffScDist)) print(u'Theoretical speckle width = {:.5f} mm ({:.3E} \u03BCm)' .format(theorySpeckleWidth, theorySpeckleWidth/wavelength))Beam diameter @ diffuser = 2.00000018 mm Distance between diffuser and obs. screen = 1000.0 mm Theoretical speckle width = 0.26600 mm (5.000E+02 μm)After fitting a Gaussian distribution the $\text{FWHM}$ and $1/e^2$ widths may be estimated as follows:If $F(x)$ is the Gaussian curve, and $F(x) \Big|_{x=x^+} = \frac{1}{2}$ and $F(x) \Big|_{x=x^-} = \frac{1}{2}$ on either side of the mean, then the $\text{FWHM}$ width is given by $x^+ - x^-$. Similarly, the $1/e^2$ width may be estimated by taking the difference between the abscissae where $F(x)=1/e^2=0.135335$$$\begin{array}{cl}F_x & = & a e^{- \frac{(x - \mu)^2}{2\sigma^2} } \\\ln(F_x) & = & ln(a) - \frac{(x - \mu)^2}{2\sigma^2} \\\\frac{(x - \mu)^2}{2\sigma^2} & = & ln \left( \frac{a}{F_x} \right)\\\x & = & \mu + \sigma \sqrt{ \left[ 2 \, ln \left( \frac{a}{F_x} \right) \right]}\end{array}$$If we represent$$x^{\pm} = \mu \pm \sigma \sqrt{ \left[ 2 \, ln \left( \frac{a}{F_x} \right) \right]}$$then,$$\Delta x = 2 \, \sigma \sqrt{ \left[ 2 \, ln \left( \frac{a}{F_x} \right) \right]}$$# Helper functions for estimating the speckle size # Most of the ideas for determining the speckle size is from # "Speckle Size via Autocorrelation", by Joel # mathworks.com/matlabcentral/fileexchange/ # 25046-speckle-size-via-autocorrelation/content//SpeckleSize.m def xcov(x, y=None, scale='none'): """returns the cross-covariance of two discrete-time sequences, `x` and `y`. Parameters ---------- x : ndarray 1-dim ndarray y : ndarray, optional 1-dim ndarray. If y is 'None', the autocovariance of the sequence `x`is returned scale : string, optional specifies a normalization option for the cross- covariance Returns ------- crosscov : ndarray 1-dim ndarray of the cross-covariance sequence. Length of `crosscov` is `2*m - 1`, where `m` is the length of `x` (and `y` is passed) Notes ----- `xcov` emulates Matlab's `xcov()` function in a limited way. For details see _[1] References ---------- .. [1] http://www.mathworks.com/help/signal/ref/xcov.html """ m = len(x) y = x if y is None else y assert m == len(y), \ 'Sequences x and y must be of same length.' raw = np.correlate(x - np.mean(x), y - np.mean(y), 'full') if scale == 'coeff': crosscov = raw/np.max(raw) elif scale == 'biased': crosscov = raw/m elif scale == 'unbiased': maxlag = m - 1 k = np.arange(-maxlag, maxlag + 1) crosscov = raw/(m - np.abs(k)) else: crosscov = raw return crosscov def avg_autocov(x, axis=0, scale='coeff'): """returns the "average" autocovariance of x along the `axis` specified Parameters ---------- x : ndarray 2-dim ndarray axis : integer 0 = average auto-covariance along the first dimension; 1 = average auto-covariance along the second dimension scale : string, optional specifies a normalization option for the cross- covariance Returns ------- aCorr : ndarray 1-dim ndarray of average auto-covariance along the `axis`, normalized such that the maximum is 1. """ x = x if axis else x.T r, c = x.shape avgAcov = np.zeros(2*c - 1) for row in x: avgAcov = avgAcov + xcov(row, scale=scale) return avgAcov/np.max(avgAcov) def gauss(x, a, mu, sigma): """gaussian model function for curve fitting """ return a*np.exp((-(x - mu)**2)/(2.0*sigma**2)) def gauss_fit(data, expectedSize=10): """helper function for curve gaussian curve fitting Parameters ---------- data : ndarray 1-dim ndarray consisting of the data expectedSize : ndarray expected size of the speckle???? (in pixels) Returns ------- a : float mu : float mean of the gaussian curve sigma : float standard deviation of the gaussian curve TO DO!!! What is a good `expectedSize`? probably should use some standard deviation of speckle estimate, and pixel size ... based on the beam width and wavelength """ # clean the data by simple thresholding y = data.copy() upper, lower = 1.0, 0.005 y[y > upper] = 1.0 y[y < lower] = 0.0 m = len(y) x = np.arange(0, m) # index p0 = [1.0, m/2, expectedSize] # initial guess pEst, pCov = opt.curve_fit(gauss, x, y, p0=p0) stdErr = np.sqrt(np.diag(pCov)) return pEst[0], pEst[1], pEst[2], stdErr def width_FWHM(a, sigma): return 2.0*sigma*np.sqrt(2.0*np.log(a/0.5)) def width_oneOESqu(a, sigma): return 2.0*sigma*np.sqrt(2.0*np.log(a/.1353353)) def plot_avg_intensity_autocovariance(x, acR, acRFit, acC, acCFit): """helper function to plot the average intensity autocovariances and the fit curves for visual inspection Parameters ---------- x : ndarray indices acR, acC : ndarray 1-dim ndarray of the average autocovariance along the rows/columns acRFit, acRFit : ndarray 1-dim ndarray of the fitted gaussian curve along the rows/columns """ fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(12, 7)) ax0.plot(x, acR, label='avg acov horizontal') ax0.plot(x, acRFit, '--', label='fitted gauss') ax0.legend() ax0.autoscale(tight=True) ax1.plot(x, acC, label='avg acov vertical') ax1.plot(x, acCFit, '--', label='fitted gauss') ax1.legend() ax1.autoscale(tight=True) plt.show() def estimate_mean_speckle_size(intPat, mmPerPxY, mmPerPxX): """function to estimate the mean speckle intensity Parameters ---------- intPat : ndarray 2-dim ndarray of the intensity pattern of the speckle mmPerPxY : float millimeter per pixel in y direction (in the POP display) mmPerPxX : float millimeter per pixel in x direction (in the POP display) Returns ------- None """ r, c = intPat.shape # average auto-covariance along the rows acR = avg_autocov(intPat, axis=1, scale='coeff') # average auto-covariance along the columns acC = avg_autocov(intPat, axis=0, scale='coeff') # fit a Gaussian curve to acR and acC x = np.arange(0, len(acR)) aR, muR, stdR, _ = gauss_fit(acR) acRFit = gauss(x, aR, muR, stdR) aC, muC, stdC, _ = gauss_fit(acC) acCFit = gauss(x, aC, muC, stdC) print('Gaussian fit parameters:') print('aR = {:2.4f}, muR = {:2.2f}, stdR = {:2.4f}'.format(aR, muR, stdR)) print('aC = {:2.4f}, muC = {:2.2f}, stdC = {:2.4f}'.format(aC, muC, stdC)) print('\nPlot of the average autocovariances and fitted Gaussian curve:') plot_avg_intensity_autocovariance(x, acR, acRFit, acC, acCFit) # Estimate the FWHM and 1/e^2 widths fwhm_x = width_FWHM(aR, stdR) fwhm_y = width_FWHM(aC, stdC) oneOESqu_x = width_oneOESqu(aR, stdR) oneOESqu_y = width_oneOESqu(aC, stdC) print('\nSpeckle size estimates:') print('----------------------') print('FWHM: Wx = {:2.4f} pixles ({:2.4f} mm), Wy = {:2.4f} pixles ({:2.4f} mm)' .format(fwhm_x, fwhm_x*mmPerPxX, fwhm_y, fwhm_y*mmPerPxY)) print(u'1/e\u00B2: Wx = {:2.4f} pixles ({:2.4f} mm), Wy = {:2.4f} pixles ({:2.4f} mm)' .format(oneOESqu_x, oneOESqu_x*mmPerPxX, oneOESqu_y, oneOESqu_y*mmPerPxY)) estimate_mean_speckle_size(speckle, mmPerPxY, mmPerPxX) #print(np.mean(speckle), np.std(speckle)) print(u'Theoretical speckle width = {:.5f} mm ({:.3E} \u03BCm)' .format(theorySpeckleWidth, theorySpeckleWidth/wavelength)) ln.close()run till here# test with btc delete BTC_adr2 = coins ['btc-test'][1]['address'] # def create_tx(coin, account, recipient, amount): BTC_tx = create_tx ('btc-test', BTC_act1, BTC_adr2, 0.000001) BTC_tx # delete test code eth_adr2 = coins['eth'][1]['address'] # eth_ret = create_tx(ETH,Eth_act1, eth_adr2, 0.01 ) # w3 = Web3(Web3.HTTPProvider('http://localhost:8545')) # HTTP://127.0.0.1:8545 w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545')) account = Eth_act1 recipient = eth_adr2 amount = 2.0 # w3.middleware_onion.inject(geth_poa_middleware, layer=0) # w3.eth.setGasPriceStrategy(medium_gas_price_strategy) # delete test code # recipient # delete test code # gasEstimate = w3.eth.estimateGas( # {"from": account.address, "to": recipient, "value": int(amount)} # ) # Create a function called `send_tx` that calls `create_tx`, signs and sends the transaction. def send_tx(coin, account, recipient, amount): # YOUR CODE HERE tx = create_tx(coin, account, recipient, amount) signed_tx = account.sign_transaction(tx) if coin == ETH: result = w3.eth.sendRawTransaction(signed_tx.rawTransaction) return result if coin == BTCTEST: return NetworkAPI.broadcast_tx_testnet(signed_tx) else: return ('error') w3.middleware_onion.inject(geth_poa_middleware, layer=0) Eth_act1 # delete test code eth_send = send_tx(ETH,Eth_act1, eth_adr2,1 ) eth_send # delete test code eth_send = send_tx(ETH,Eth_act1, eth_adr2,1 ) eth_send btc_send = send_tx('btc-test', BTC_act1, BTC_adr2, 0.0001) btc_send BTC_act1 BTC_adr2 loc_eth_adr = '0x20AB545aFF9eDA6841C885D0027b978cd18d9bd5' net_name = 'puppethonastring' w3.middleware_onion.inject(geth_poa_middleware, layer=0) # http://127.0.0.1:8545/ puppethonastring.json배치 예측import math batch_size = 2 def predict_batch(df, batch_size, threshold): p_nums = df.shape[0] print(p_nums) steps = math.ceil(p_nums / batch_size) print(steps) predictions = [] for i in range(steps): start = i * batch_size end = (i+1) * batch_size p_df = df[start:end] result = predictor.predict(p_df.to_csv(index=False)) predictions.append(result) print(start, end , p_df.shape) if i == 1: break return predictions predictions = predict_batch(X_test, batch_size=batch_size, threshold=0.8) predictions a.split('\n')convert int to floatdef change_type(raw_df): ''' test2_data_nolab = change_type(test_data_nolab) ''' df = raw_df.copy() int_columns = df.columns[df.dtypes == 'int64'].tolist() for e in int_columns: print(e) df[e] = df[e].astype('float64') # int_index = [num -1 for num in int_index] return df test2_data_nolab = change_type(test_data_nolab) test2_data_nolab.info()Label Encoding with Train and TEst# from sklearn import preprocessing import numpy as np from sklearn.preprocessing import LabelEncoder class LabelEncoderExt(object): ''' Source: # https://stackoverflow.com/questions/21057621/sklearn-labelencoder-with-never-seen-before-values ''' def __init__(self): """ It differs from LabelEncoder by handling new classes and providing a value for it [Unknown] Unknown will be added in fit and transform will take care of new item. It gives unknown class id """ self.label_encoder = LabelEncoder() # self.classes_ = self.label_encoder.classes_ def fit(self, data_list): """ This will fit the encoder for all the unique values and introduce unknown value :param data_list: A list of string :return: self """ self.label_encoder = self.label_encoder.fit(list(data_list) + ['Unknown']) self.classes_ = self.label_encoder.classes_ return self def transform(self, data_list): """ This will transform the data_list to id list where the new values get assigned to Unknown class :param data_list: :return: """ new_data_list = list(data_list) for unique_item in np.unique(data_list): if unique_item not in self.label_encoder.classes_: new_data_list = ['Unknown' if x==unique_item else x for x in new_data_list] print("new_data_list: ", new_data_list) return self.label_encoder.transform(new_data_list) def make_test_label_encoding(raw_train_df, raw_test_df,cols): train_df = raw_train_df.copy() test_df = raw_test_df.copy() for lb_col in cols: print(lb_col) le = LabelEncoderExt() le = le.fit(train_df[lb_col]) train_en = le.transform(train_df[lb_col]) test_en = le.transform(test_df[lb_col]) lb_col_name = 'lb_' + lb_col print("new col name: ", lb_col_name) train_df[lb_col_name] = train_en test_df[lb_col_name] = test_en return train_df, test_df import pandas as pd train_pd = pd.DataFrame({"col1": ['a','b','c']}) test_pd = pd.DataFrame({"col1": ['a','a','b','d','e']}) train_pd label_cols = ['col1'] train_pd_lb, test_pd_lb = make_test_label_encoding(train_pd, test_pd, label_cols) train_pd_lb, test_pd_lb # Unknown == 0Install PrettyTablepip install PrettyTable from prettytable import PrettyTable from timeit import default_timer as timerBYOC Functionimport autogluon as ag import pandas as pd import os # import autogluon.core as ag # from autogluon.tabular import TabularPrediction as task import autogluon from autogluon.tabular import TabularPrediction as task from autogluon.tabular import TabularDataset %store -r no_auto_train_file def __load_input_data(path: str): """ Load training data as dataframe :param path: :return: DataFrame """ input_data_files = os.listdir(path) try: input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files] return task.Dataset(df=pd.concat(input_dfs)) except: print(f'No csv data in {path}!') return None folder_name = '/'.join(no_auto_train_file.split('/')[0:-1]) train_data = __load_input_data(folder_name) import pickle columns = train_data.columns.tolist() column_dict = {"columns":columns} with open('columns.pkl', 'wb') as f: pickle.dump(column_dict, f) print(column_dict){'columns': ['classes', 'customer_zip_code_prefix', 'customer_city', 'customer_state', 'price', 'freight_value', 'product_weight_g', 'product_category_name_english', 'seller_zip_code_prefix', 'seller_city', 'seller_state', 'order_weekday', 'order_day', 'order_month', 'customer_seller_state', 'customer_seller_city', 'customer_seller_zip_code_prefix', 'product_volume']}Parser Testfit_args = { 'label': 'y', # Adding 'best_quality' to presets list will result in better performance (but longer runtime) 'presets': ['optimize_for_deployment'], } fit_args="test" ! python working/z_parse.py --fit_args 'label' --feature_importance True # ! python working/z_parse.py --fit_args label=classes --feature_importance True # ! python working/z_parse.py --feature_importance Truefeature_importance: True fit_args: labeldocker commnddocker stop `docker ps -q` MXNet Model 생성## 모델 결과 파일 저장 model_artifact='s3://{}/{}/output/model.tar.gz'.format(bucket, train_byos_job_name) print('model_artifact: ', model_artifact) # training_image, inferene_image 가 동일 inference_image = '763104351884.dkr.ecr.{}.amazonaws.com/mxnet-training:1.6.0-cpu-py3'.format(region) mxnet_model = MXNetModel( # entry_point = 'src/autogluon_inference.py', entry_point = 'src/autogluon_train.py', framework_version='1.6.0', py_version='py3', model_data = model_artifact, role = role, )os command in Pythonimport os, subprocess # subprocess.run(['ls','-l']) subprocess.check_output(["ls", "-l", "/dev/null"]) import os cmd = 'ls -l' r = os.system(cmd) r글루온 모델 결과 다운로드import tarfile import sagemaker def download_extact_infer_file(s3_output_path, output_infer_folder, zip_file_name='model.tar.gz'): sagemaker.s3.S3Downloader.download(s3_output_path, output_infer_folder) output_infer_path = os.path.join(output_infer_folder,zip_file_name ) tf = tarfile.open(output_infer_path) print("Infer file {} is downloaded".format(output_infer_path)) tf.extractall(path = output_infer_folder) print("Infer file {} is extracted".format(output_infer_path)) import os output_folder = 'ZTEST/model_artifact' os.makedirs(output_folder, exist_ok=True) s3_model_output = 's3://code-free-automl-gsmoon/results/auto-autogluon-2020-11-13-07-48-33-054/output/model.tar.gz' download_extact_infer_file(s3_model_output, output_folder)Infer file ZTEST/model_artifact/model.tar.gz is downloaded Infer file ZTEST/model_artifact/model.tar.gz is extractedTarget encoding, the right way - https://www.kaggle.com/c/ieee-fraud-detection/discussion/108311 Target Encoding with Smoothing- Python target encoding for categorical features - https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features - Data - https://www.kaggle.com/c/porto-seguro-safe-driver-predictionimport numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "ZTEST"]).decode("utf8")) # reading data trn_df = pd.read_csv("ZTEST/train.csv", index_col=0) sub_df = pd.read_csv("ZTEST/test.csv", index_col=0) from IPython.display import display display(trn_df.head()) display(sub_df.head()) trn_df.nunique()[['target','ps_car_11_cat']] from IPython.display import display def add_noise(series, noise_level): return series * (1 + noise_level * np.random.randn(len(series))) def target_encode(trn_series=None, tst_series=None, target=None, min_samples_leaf=1, smoothing=1, noise_level=0): """ Smoothing is computed like in the following paper by https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf trn_series : training categorical feature as a pd.Series tst_series : test categorical feature as a pd.Series target : target data as a pd.Series min_samples_leaf (int) : minimum samples to take category average into account smoothing (int) : smoothing effect to balance categorical average vs prior """ assert len(trn_series) == len(target) assert trn_series.name == tst_series.name temp = pd.concat([trn_series, target], axis=1) # Compute target mean averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"]) # display(averages) # Compute smoothing smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing)) # display(smoothing) # Apply average function to all target data prior = target.mean() # The bigger the count the less full_avg is taken into account averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing averages.drop(["mean", "count"], axis=1, inplace=True) display(averages) # Apply averages to trn and tst series ft_trn_series = pd.merge( trn_series.to_frame(trn_series.name), averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}), on=trn_series.name, how='left')['average'].rename(trn_series.name + '_mean').fillna(prior) # pd.merge does not keep the index so restore it ft_trn_series.index = trn_series.index ft_tst_series = pd.merge( tst_series.to_frame(tst_series.name), averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}), on=tst_series.name, how='left')['average'].rename(trn_series.name + '_mean').fillna(prior) # pd.merge does not keep the index so restore it ft_tst_series.index = tst_series.index return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level) # Target encode ps_car_11_cat trn, sub = target_encode(trn_df["ps_car_11_cat"], sub_df["ps_car_11_cat"], target=trn_df.target, min_samples_leaf=100, smoothing=10, noise_level=0.01) trn.head(10) sub.head(10) import matplotlib.pyplot as plt %matplotlib inline plt.scatter(trn_df["ps_car_11_cat"], trn) plt.xlabel("ps_car_11_cat category values") plt.ylabel("Noisy target encoding")K-fold target encoding- https://github.com/pourya-ir/Medium/blob/master/K-fold-target-enc/K-fold-Target-Encoding.ipynbimport pandas as pd import numpy as np from sklearn import base from sklearn.model_selection import KFold def getRandomDataFrame(data, numCol): if data== 'train': key = ["A" if x ==0 else 'B' for x in np.random.randint(2, size=(numCol,))] value = np.random.randint(2, size=(numCol,)) df = pd.DataFrame({'Feature':key, 'Target':value}) return df elif data=='test': key = ["A" if x ==0 else 'B' for x in np.random.randint(2, size=(numCol,))] df = pd.DataFrame({'Feature':key}) return df else: print(';)') train = pd.read_csv('ZTEST/train.csv') test = pd.read_csv('ZTEST/test.csv') train = getRandomDataFrame('train',20) test = getRandomDataFrame('test',5) train train.groupby('Feature').mean() test class KFoldTargetEncoderTrain(base.BaseEstimator, base.TransformerMixin): def __init__(self, colnames,targetName,n_fold=5,verbosity=True,discardOriginal_col=False): self.colnames = colnames self.targetName = targetName self.n_fold = n_fold self.verbosity = verbosity self.discardOriginal_col = discardOriginal_col def fit(self, X, y=None): return self def transform(self,X): assert(type(self.targetName) == str) assert(type(self.colnames) == str) assert(self.colnames in X.columns) assert(self.targetName in X.columns) mean_of_target = X[self.targetName].mean() kf = KFold(n_splits = self.n_fold, shuffle = False, random_state=2019) col_mean_name = self.colnames + '_' + 'Kfold_Target_Enc' X[col_mean_name] = np.nan for tr_ind, val_ind in kf.split(X): X_tr, X_val = X.iloc[tr_ind], X.iloc[val_ind] # print(tr_ind,val_ind) X.loc[X.index[val_ind], col_mean_name] = X_val[self.colnames].map(X_tr.groupby(self.colnames)[self.targetName].mean()) X[col_mean_name].fillna(mean_of_target, inplace = True) if self.verbosity: encoded_feature = X[col_mean_name].values print('Correlation between the new feature, {} and, {} is {}.'.format(col_mean_name, self.targetName, np.corrcoef(X[self.targetName].values, encoded_feature)[0][1])) if self.discardOriginal_col: X = X.drop(self.targetName, axis=1) return X targetc = KFoldTargetEncoderTrain('Feature','Target',n_fold=5) new_train = targetc.fit_transform(train) new_train train[['Feature','Target']].iloc[4:20,:].groupby('Feature').mean() train[['Feature','Target']].groupby('Feature').mean() class KFoldTargetEncoderTest(base.BaseEstimator, base.TransformerMixin): def __init__(self,train,colNames,encodedName): self.train = train self.colNames = colNames self.encodedName = encodedName def fit(self, X, y=None): return self def transform(self,X): mean = self.train[[self.colNames,self.encodedName]].groupby(self.colNames).mean().reset_index() dd = {} for index, row in mean.iterrows(): dd[row[self.colNames]] = row[self.encodedName] X[self.encodedName] = X[self.colNames] X = X.replace({self.encodedName: dd}) return X test_targetc = KFoldTargetEncoderTest(new_train,'Feature','Feature_Kfold_Target_Enc') test_targetc.fit_transform(test)Usage:1- goto the dataset repository:https://drive.google.com/drive/u/2/folders/1mRefmN4Yzy60Uh7z3B6cllyyOXaxQrgg and select one of the datasets e.g. 1_nice_60000_rows2- download the related files, for example:a- 1_nice_60000_rows.binandb- 1_nice_60000_rows.txt2- upload the dataset files you downloaded from the dataset repository into your Google Colab drive.if you uploaded the files into Colab drive, they will be deleted once the session is over. Hence, you can upload the downloaded dataset files into your own google drive and mount your google drive in Colab.4- Set the path for the dataset files in config.py below i.e. whether in Colab drive or your own google drive e.g. /content/drive/MyDrive/5- initially, we need to train the model, so we set the **OPERATION_TYPE = OperationType.Training** value in config.py below to Training.6- If your are using the dataset for the first time, make sure to empty your output folder from any previously generated files (dataset split files and model files...etc).7- Set the experiment name via the experiment_name constant below (if required)8- Run the training process, the training process will stop after 5 epoches with no improvements, you can change this value in config.py below.9- After finishing the training process, you can test the model on the testing dataset by setting the **OPERATION_TYPE = OperationType.Testing**10- Finally, you can make inference on single images by setting **OPERATION_TYPE = OperationType.Infer**To make inference, you need to place the required word image file in the designated folder defined by the INDIVIDUAL_TEST_IMAGE_PATH constant value below.You also need to set the filename for the image you are trying to infer in the fnInfer variable below e.g. 0.png.You can find sample files in the dataset repository in Google drive as well as the dataset folder in GitHub (under the sample_dataset_imgages folder) Config.py%cd /content/drive/MyDrive/ArabicMultiFontsDataset/ %ls # The location/path of the uploaded dataset files (after downloading them from the dataset repository) # Make sure to mount your Google drive in Colab BASE_PATH = "/content/drive/MyDrive/ArabicMultiFontsDataset/" # The name of the dataset files i.e. the binary file and the labels file. BASE_FILENAME = "1_nice_60000_rows" %tensorflow_version 1.x from __future__ import division from __future__ import print_function import os from enum import Enum import numpy as np import argparse from datetime import datetime import time import cv2 import random import sys from shutil import Error import tensorflow as tf import editdistance class OperationType(Enum): Training = 1 Validation = 2 Testing = 3 Infer = 4 class DecoderType: BestPath = 0 BeamSearch = 1 WordBeamSearch = 2 # Experiment name, to be saved in the audit log. EXPERIMENT_NAME = "Test Drive Training Process" # The type of the run session, depending on this type, designated datasets will be loaded. # Set this value to training to run a training process using the testing dataset. # Set this value to testing to run a testing process using the testing dataset. # Set this value to infer, to make an inference for a single word image file, make sure # to place the required word image file in the directory defined by the INDIVIDUAL_TEST_IMAGE_PATH below, and # make sure to set the fnInfer to the name of the image file your are trying to infer OPERATION_TYPE = OperationType.Training DECODER_TYPE = DecoderType.BestPath # Use this value to regenerate the training/validation/test datasets, as well as # the other support files. Usually this is needed when we start the training process # It is not needed during the Testing process so we set it to true # in order to regenerate all the required files, we have to delete to old ones train/validate/test and delete # and then its value to true. After running the app, the files are generated and we can set it back to false unless # we need to generate a new set of data i.e. train/validate/test REGENERATE_CHARLIST_AND_CORPUS = True #You can modify these folder settings according to your preference #Set the path where the train, validate,test datasets are saved DATA_PATH = BASE_PATH #set the path where to save the generated tensorflow model MODEL_PATH = BASE_PATH #set the path for the autogenerated files OUTPUT_PATH = BASE_PATH #Set the path of the single image files that you want to recognize INDIVIDUAL_TEST_IMAGE_PATH = DATA_PATH BASE_IMAGES_FILE = DATA_PATH + BASE_FILENAME + ".bin" BASE_LABELS_FILE = DATA_PATH + BASE_FILENAME + ".txt" TRAINING_LABELS_FILE = DATA_PATH + "TRAINING_DATA_" + BASE_FILENAME + ".txt" VALIDATION_LABELS_FILE = DATA_PATH + \ "VALIDATION_DATA_" + BASE_FILENAME + ".txt" TESTING_LABELS_FILE = DATA_PATH + "TESTING_DATA_" + BASE_FILENAME + ".txt" fnCharList = OUTPUT_PATH + 'charList.txt' fnResult = OUTPUT_PATH + 'result.txt' # define the name of the word image file you want to test/infer # for example, you can select 0.png file from as a sample #https://github.com/msfasha/Arabic-Deep-Learning-OCR/tree/master/dataset/sample_files fnInfer = INDIVIDUAL_TEST_IMAGE_PATH + "sample_files/" + "0.png" fnCorpus = OUTPUT_PATH + 'corpus.txt' fnwordCharList = OUTPUT_PATH + 'wordCharList.txt' # Number of batches for each epoch = SAMPLES_PER_EPOCH / BATCH_SIZE TRAINING_SAMPLES_PER_EPOCH = 5000 BATCH_SIZE = 100 VALIDATIOIN_SAMPLES_PER_STEP = (int)(TRAINING_SAMPLES_PER_EPOCH * .2) TRAINING_DATASET_SIZE = .9 # .5 of the remaining ==> (Total - TRAINING_DATASET_SIZE) / 2 VALIDATION_DATASET_SPLIT_SIZE = .5 # stop after no improvements for this number of epochs MAXIMUM_NONIMPROVED_EPOCHS = 5 MAXIMUM_MODELS_TO_KEEP = 3 # usually only 1, the last one #IMAGE_SIZE = (128, 32) IMAGE_WIDTH = 128 IMAGE_HEIGHT = 32 MAX_TEXT_LENGTH = 32 RESIZE_IMAGE = True CONVERT_IMAGE_TO_MONOCHROME = False MONOCHROME_BINARY_THRESHOLD = 127 AUGMENT_IMAGE = False def auditLog(logStr): open(fnResult, 'a').write(logStr)Model.pyclass Model: "minimalistic TF model for HTR" def __init__(self, decoderType = DecoderType.BestPath, mustRestore=False, dump=False): "init model: add CNN, RNN and CTC and initialize TF" self.dump = dump self.charList = open(fnCharList, encoding="utf-8").read() self.decoderType = decoderType self.mustRestore = mustRestore self.snapID = 0 # Whether to use normalization over a batch or a population self.is_train = tf.placeholder(tf.bool, name='is_train') # input image batch self.inputImgs = tf.placeholder(tf.float32, shape=( None, IMAGE_WIDTH, IMAGE_HEIGHT)) # setup CNN, RNN and CTC self.setup5LayersCNN() self.setupRNN() self.setupCTC() # setup optimizer to train NN self.batchesTrained = 0 self.learningRate = tf.placeholder(tf.float32, shape=[]) self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(self.update_ops): self.optimizer = tf.train.RMSPropOptimizer( self.learningRate).minimize(self.loss) self.auditModelDetails() # initialize TF (self.sess, self.saver) = self.setupTF() def auditModelDetails(self): total_parameters = 0 saveString = "Model Details" + "\n" for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() saveString = saveString + "Shape:" + \ str(shape) + " ,shape length:" + str(len(shape)) variable_parameters = 1 for dim in shape: variable_parameters *= dim.value saveString = saveString + " , parameters: " + \ str(variable_parameters) + "\n" total_parameters += variable_parameters saveString = saveString + "Total Parameters: " + \ str(total_parameters) + "\n\n" print(saveString) auditLog(saveString) def setup5LayersCNN(self): "create CNN layers and return output of these layers" cnnIn4d = tf.expand_dims(input=self.inputImgs, axis=3) pool = cnnIn4d # input to first CNN layer self.kernel1 = tf.Variable( tf.truncated_normal([5, 5, 1, 32], stddev=0.1)) self.conv1 = tf.nn.conv2d( pool, self.kernel1, padding='SAME', strides=(1, 1, 1, 1)) conv_norm = tf.layers.batch_normalization( self.conv1, training=self.is_train) self.relu1 = tf.nn.relu(conv_norm) self.pool1 = tf.nn.max_pool( self.relu1, (1, 2, 2, 1), (1, 2, 2, 1), 'VALID') kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1)) self.conv2 = tf.nn.conv2d( self.pool1, kernel, padding='SAME', strides=(1, 1, 1, 1)) conv_norm = tf.layers.batch_normalization( self.conv2, training=self.is_train) relu = tf.nn.relu(conv_norm) pool = tf.nn.max_pool(relu, (1, 2, 2, 1), (1, 2, 2, 1), 'VALID') kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1)) self.conv3 = tf.nn.conv2d( pool, kernel, padding='SAME', strides=(1, 1, 1, 1)) conv_norm = tf.layers.batch_normalization( self.conv3, training=self.is_train) relu = tf.nn.relu(conv_norm) pool = tf.nn.max_pool(relu, (1, 1, 2, 1), (1, 1, 2, 1), 'VALID') kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], stddev=0.1)) self.conv4 = tf.nn.conv2d( pool, kernel, padding='SAME', strides=(1, 1, 1, 1)) conv_norm = tf.layers.batch_normalization( self.conv4, training=self.is_train) relu = tf.nn.relu(conv_norm) pool = tf.nn.max_pool(relu, (1, 1, 2, 1), (1, 1, 2, 1), 'VALID') kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], stddev=0.1)) self.conv5 = tf.nn.conv2d( pool, kernel, padding='SAME', strides=(1, 1, 1, 1)) conv_norm = tf.layers.batch_normalization( self.conv5, training=self.is_train) relu = tf.nn.relu(conv_norm) pool = tf.nn.max_pool(relu, (1, 1, 2, 1), (1, 1, 2, 1), 'VALID') self.cnnOut4d = pool def setupCNN7Layers(self): "create CNN layers and return output of these layers" cnnIn4d = tf.expand_dims(input=self.inputImgs, axis=3) kernel1 = tf.Variable(tf.truncated_normal([3, 3, 1, 64], stddev=0.1)) conv1 = tf.nn.conv2d( cnnIn4d, kernel1, padding='SAME', strides=(1, 1, 1, 1)) pool1 = tf.nn.max_pool(conv1, (1, 2, 2, 1), (1, 2, 2, 1), 'VALID') kernel2 = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1)) conv2 = tf.nn.conv2d( pool1, kernel2, padding='SAME', strides=(1, 1, 1, 1)) pool2 = tf.nn.max_pool(conv2, (1, 2, 2, 1), (1, 2, 2, 1), 'VALID') kernel3 = tf.Variable(tf.truncated_normal( [3, 3, 128, 256], stddev=0.1)) conv3 = tf.nn.conv2d( pool2, kernel3, padding='SAME', strides=(1, 1, 1, 1)) kernel4 = tf.Variable(tf.truncated_normal( [3, 3, 256, 256], stddev=0.1)) conv4 = tf.nn.conv2d( conv3, kernel4, padding='SAME', strides=(1, 1, 1, 1)) pool3 = tf.nn.max_pool(conv4, (1, 1, 2, 1), (1, 1, 2, 1), 'VALID') kernel5 = tf.Variable(tf.truncated_normal( [3, 3, 256, 512], stddev=0.1)) conv5 = tf.nn.conv2d( pool3, kernel5, padding='SAME', strides=(1, 1, 1, 1)) batch_norm1 = tf.layers.batch_normalization( conv4, training=self.is_train) kernel6 = tf.Variable(tf.truncated_normal( [3, 3, 512, 512], stddev=0.1)) conv6 = tf.nn.conv2d(batch_norm1, kernel6, padding='SAME', strides=(1, 1, 1, 1)) batch_norm2 = tf.layers.batch_normalization( conv6, training=self.is_train) pool4 = tf.nn.max_pool(batch_norm2, (1, 1, 2, 1), (1, 1, 2, 1), 'VALID') kernel7 = tf.Variable(tf.truncated_normal( [2, 2, 512, 512], stddev=0.1)) conv7 = tf.nn.conv2d(batch_norm1, kernel7, padding='SAME', strides=(1, 1, 1, 1)) self.cnnOut4d = conv7 def setupRNN(self): "create RNN layers and return output of these layers" rnnIn3d = tf.squeeze(self.cnnOut4d, axis=[2]) # basic cells which is used to build RNN numHidden = 256 cells = [tf.contrib.rnn.LSTMCell( num_units=numHidden, state_is_tuple=True) for _ in range(2)] # 2 layers # stack basic cells stacked = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True) # bidirectional RNN # BxTxF -> BxTx2H ((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=stacked, cell_bw=stacked, inputs=rnnIn3d, dtype=rnnIn3d.dtype) # BxTxH + BxTxH -> BxTx2H -> BxTx1X2H concat = tf.expand_dims(tf.concat([fw, bw], 2), 2) # project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC kernel = tf.Variable(tf.truncated_normal( [1, 1, numHidden * 2, len(self.charList) + 1], stddev=0.1)) self.rnnOut3d = tf.squeeze(tf.nn.atrous_conv2d( value=concat, filters=kernel, rate=1, padding='SAME'), axis=[2]) def setupCTC(self): "create CTC loss and decoder and return them" # BxTxC -> TxBxC self.ctcIn3dTBC = tf.transpose(self.rnnOut3d, [1, 0, 2]) # ground truth text as sparse tensor self.gtTexts = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]), tf.placeholder(tf.int32, [None]), tf.placeholder(tf.int64, [2])) # calc loss for batch self.seqLen = tf.placeholder(tf.int32, [None]) self.loss = tf.reduce_mean( tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, ctc_merge_repeated=True)) # calc loss for each element to compute label probability self.savedCtcInput = tf.placeholder( tf.float32, shape=[MAX_TEXT_LENGTH, None, len(self.charList) + 1]) self.lossPerElement = tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.savedCtcInput, sequence_length=self.seqLen, ctc_merge_repeated=True) # decoder: either best path decoding or beam search decoding if self.decoderType == DecoderType.BestPath: self.decoder = tf.nn.ctc_greedy_decoder( inputs=self.ctcIn3dTBC, sequence_length=self.seqLen) elif self.decoderType == DecoderType.BeamSearch: self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=False) elif self.decoderType == DecoderType.WordBeamSearch: # import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch) word_beam_search_module = tf.load_op_library('TFWordBeamSearch.so') # prepare information about language (dictionary, characters in dataset, characters forming words) chars = str().join(self.charList) wordChars = open(fnwordCharList).read().splitlines()[0] corpus = open(fnCorpus).read() # decode using the "Words" mode of word beam search self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(self.ctcIn3dTBC, dim=2), 50, 'Words', 0.0, corpus.encode( 'utf8'), chars.encode('utf8'), wordChars.encode('utf8')) def setupTF(self): "initialize TF" print('Python: ' + sys.version) print('Tensorflow: ' + tf.__version__) sess = tf.Session() # TF session # saver saves model to file saver = tf.train.Saver(max_to_keep=MAXIMUM_MODELS_TO_KEEP) modelDir = MODEL_PATH latestSnapshot = tf.train.latest_checkpoint( modelDir) # is there a saved model? # if model must be restored (for inference), there must be a snapshot if self.mustRestore and not latestSnapshot: raise Exception('No saved model found in: ' + modelDir) # load saved model if available if latestSnapshot: print('Init with stored values from ' + latestSnapshot) saver.restore(sess, latestSnapshot) else: print('Init with new values') sess.run(tf.global_variables_initializer()) return (sess, saver) def toSparse(self, texts): "put ground truth texts into sparse tensor for ctc_loss" indices = [] values = [] shape = [len(texts), 0] # last entry must be max(labelList[i]) # go over all texts for (batchElement, text) in enumerate(texts): # convert to string of label (i.e. class-ids) CharactersIndexesOflabels = [self.charList.index(c) for c in text] # sparse tensor must have size of max. label-string if len(CharactersIndexesOflabels) > shape[1]: shape[1] = len(CharactersIndexesOflabels) # put each label into sparse tensor for (i, label) in enumerate(CharactersIndexesOflabels): indices.append([batchElement, i]) values.append(label) return (indices, values, shape) def decoderOutputToText(self, ctcOutput, batchSize): "extract texts from output of CTC decoder" # contains string of labels for each batch element encodedLabelStrs = [[] for i in range(batchSize)] # word beam search: label strings terminated by blank if self.decoderType == DecoderType.WordBeamSearch: blank = len(self.charList) for b in range(batchSize): for label in ctcOutput[b]: if label == blank: break encodedLabelStrs[b].append(label) # TF decoders: label strings are contained in sparse tensor else: # ctc returns tuple, first element is SparseTensor decoded = ctcOutput[0][0] # go over all indices and save mapping: batch -> values idxDict = {b: [] for b in range(batchSize)} for (idx, idx2d) in enumerate(decoded.indices): label = decoded.values[idx] batchElement = idx2d[0] # index according to [b,t] encodedLabelStrs[batchElement].append(label) # map labels to chars for all batch elements return [str().join([self.charList[c] for c in labelStr]) for labelStr in encodedLabelStrs] def trainBatch(self, batch): "feed a batch into the NN to train it" numBatchElements = len(batch.imgs) sparse = self.toSparse(batch.gtTexts) rate = 0.01 if self.batchesTrained < 10 else ( 0.001 if self.batchesTrained < 10000 else 0.0001) # decay learning rate evalList = [self.optimizer, self.loss] feedDict = {self.inputImgs: batch.imgs, self.gtTexts: sparse, self.seqLen: [MAX_TEXT_LENGTH] * numBatchElements, self.learningRate: rate, self.is_train: True} (_, lossVal) = self.sess.run(evalList, feedDict) self.batchesTrained += 1 return lossVal def dumpNNOutput(self, rnnOutput): "dump the output of the NN to CSV file(s)" dumpDir = '../dump/' if not os.path.isdir(dumpDir): os.mkdir(dumpDir) # iterate over all batch elements and create a CSV file for each one maxT, maxB, maxC = rnnOutput.shape for b in range(maxB): csv = '' for t in range(maxT): for c in range(maxC): csv += str(rnnOutput[t, b, c]) + ';' csv += '\n' fn = dumpDir + 'rnnOutput_' + str(b) + '.csv' print('Write dump of NN to file: ' + fn) with open(fn, 'w') as f: f.write(csv) def inferBatch(self, batch, calcProbability=False, probabilityOfGT=False): "feed a batch into the NN to recognize the texts" # decode, optionally save RNN output numBatchElements = len(batch.imgs) evalRnnOutput = self.dump or calcProbability evalList = [self.decoder] + \ ([self.ctcIn3dTBC] if evalRnnOutput else []) feedDict = {self.inputImgs: batch.imgs, self.seqLen: [MAX_TEXT_LENGTH] * numBatchElements, self.is_train: False} evalRes = self.sess.run(evalList, feedDict) decoded = evalRes[0] texts = self.decoderOutputToText(decoded, numBatchElements) # feed RNN output and recognized text into CTC loss to compute labeling probability probs = None if calcProbability: sparse = self.toSparse( batch.gtTexts) if probabilityOfGT else self.toSparse(texts) ctcInput = evalRes[1] evalList = self.lossPerElement feedDict = {self.savedCtcInput: ctcInput, self.gtTexts: sparse, self.seqLen: [MAX_TEXT_LENGTH] * numBatchElements, self.is_train: False} lossVals = self.sess.run(evalList, feedDict) probs = np.exp(-lossVals) # dump the output of the NN to CSV file(s) if self.dump: self.dumpNNOutput(evalRes[1]) return (texts, probs) def save(self): "save model to file" self.snapID += 1 self.saver.save(self.sess, MODEL_PATH + EXPERIMENT_NAME, global_step=self.snapID)SamplePreprosessor.pydef preprocess(img): "scale image into the desired imgSize, transpose it for TF and normalize gray-values" # increase dataset size by applying random stretches to the images if AUGMENT_IMAGE: stretch = (random.random() - 0.5) # -0.5 .. +0.5 # random width, but at least 1 wStretched = max(int(img.shape[1] * (1 + stretch)), 1) # stretch horizontally by factor 0.5 .. 1.5 img = cv2.resize(img, (wStretched, img.shape[0])) # create target image and copy sample image into it (h, w) = img.shape fx = w / IMAGE_WIDTH fy = h / IMAGE_HEIGHT f = max(fx, fy) # scale according to f (result at least 1 and at most wt or ht) newSize = (max(min(IMAGE_WIDTH, int(w / f)), 1), max(min(IMAGE_HEIGHT, int(h / f)), 1)) img = cv2.resize(img, newSize) target = np.ones([IMAGE_HEIGHT, IMAGE_WIDTH]) * 255 target[0:newSize[1], 0:newSize[0]] = img # transpose for TF img = cv2.transpose(target) # normalize (m, s) = cv2.meanStdDev(img) m = m[0][0] s = s[0][0] img = img - m img = img / s if s > 0 else img return imgDataGenerator_BinaryFile.pyclass Sample: "a single sample from the dataset" def __init__(self, gtText, imageIdx, imageHeight, imageWidth, imageSize, imageStartPosition): self.gtText = gtText self.imageIdx = imageIdx self.imageHeight = imageHeight self.imageWidth = imageWidth self.imageSize = imageSize self.imageStartPosition = imageStartPosition class Batch: "batch containing images and ground truth texts" def __init__(self, gtTexts, imgs): self.gtTexts = gtTexts self.imgs = np.stack(imgs, axis=0) class DataGenerator: def __init__(self): self.binaryImageFile = open(BASE_IMAGES_FILE, "rb") self.currIdx = 0 self.samples = [] self.trainSamples = [] self.validationSamples = [] self.testSamples = [] def LoadData(self, operationType): if not os.path.isfile(TRAINING_LABELS_FILE) \ or not os.path.isfile(VALIDATION_LABELS_FILE) \ or not os.path.isfile(TESTING_LABELS_FILE): self.createDataFiles() if operationType == OperationType.Training: self.loadDataFile(OperationType.Training) self.loadDataFile(OperationType.Validation) elif operationType == OperationType.Validation: self.loadDataFile(OperationType.Validation) elif operationType == OperationType.Testing: self.loadDataFile(OperationType.Testing) def createDataFiles(self): charsSet = set() wordsSet = set() f = open(BASE_LABELS_FILE, encoding="utf-8") for line in f: # read all samples ==> append line as is self.samples.append(line) if REGENERATE_CHARLIST_AND_CORPUS: # extract unique characters from text lineSplit = line.split(';') gtText = lineSplit[8] gtText = gtText[5:] wordsSet.add(gtText) charsSet = charsSet.union(set(list(gtText))) f.close() # create a text file that contains all the characters in the dataset # this list shall used to create the CTC model # There might be a problem if a previously saved model used larger data, consequently, not all # the characters in the previous model will be generated and therefore RNN creation will fail # note that a problem might arise when we try to open a saved model that was saved on a larger dataset # conseuqnelty some represented characters might be abscent and the new model will fail to load previous one # a solution for this problem is to use a static character set for the used dataset # also create the corpus data file for BeamSearch (if required) # DONT CREATE THEM UNLESS U R USING LARGER DATASET, ALREADY CREATED IN DIRECTORY if REGENERATE_CHARLIST_AND_CORPUS: localCharList = sorted(list(charsSet)) open(fnCharList, 'w', encoding="utf-8").write(str().join(localCharList)) open(fnCorpus, 'w', encoding="utf-8").write(str().join(sorted(list(wordsSet)))) # first of all, make sure to randomly shuffle the main lables file # random.shuffle(self.samples) # split into training, validation, testing lenOfAllSamples = len(self.samples) lenOfTrainSamples = int(TRAINING_DATASET_SIZE * lenOfAllSamples) lenOfTrainingAndValidationSamples = lenOfAllSamples - lenOfTrainSamples lenOfValidationSamples = int( VALIDATION_DATASET_SPLIT_SIZE * lenOfTrainingAndValidationSamples) with open(TRAINING_LABELS_FILE, 'w', encoding="utf-8") as f: for item in self.samples[:lenOfTrainSamples]: f.write(item) with open(VALIDATION_LABELS_FILE, 'w', encoding="utf-8") as f: for item in self.samples[lenOfTrainSamples:lenOfTrainSamples + lenOfValidationSamples]: f.write(item) with open(TESTING_LABELS_FILE, 'w', encoding="utf-8") as f: for item in self.samples[lenOfTrainSamples + lenOfValidationSamples:]: f.write(item) self.samples = [] def loadDataFile(self, operationType): if operationType == OperationType.Training: fileName = TRAINING_LABELS_FILE elif operationType == OperationType.Validation: fileName = VALIDATION_LABELS_FILE elif operationType == OperationType.Testing: fileName = TESTING_LABELS_FILE f = open(fileName, encoding="utf-8") for line in f: lineSplit = line.split(';') imgIdx = lineSplit[0] imgIdx = imgIdx[10:] imgStartPosition = lineSplit[1] imgStartPosition = int(imgStartPosition[15:]) imgHeight = lineSplit[2] imgHeight = int(imgHeight[13:]) imgWidth = lineSplit[3] imgWidth = int(imgWidth[12:]) imgSize = imgHeight * imgWidth gtText = lineSplit[8] gtText = gtText[5:] #gtText = self.truncateLabel(' '.join(gtText), MAX_TEXT_LENGTH) # put sample into list if operationType == OperationType.Training: self.trainSamples.append( Sample(gtText, imgIdx, imgHeight, imgWidth, imgSize, imgStartPosition)) elif operationType == OperationType.Validation: self.validationSamples.append( Sample(gtText, imgIdx, imgHeight, imgWidth, imgSize, imgStartPosition)) elif operationType == OperationType.Testing: self.testSamples.append( Sample(gtText, imgIdx, imgHeight, imgWidth, imgSize, imgStartPosition)) def truncateLabel(self, text, maxTextLen): # ctc_loss can't compute loss if it cannot find a mapping between text label and input # labels. Repeat letters cost double because of the blank symbol needing to be inserted. # If a too-long label is provided, ctc_loss returns an infinite gradient cost = 0 for i in range(len(text)): if i != 0 and text[i] == text[i - 1]: cost += 2 else: cost += 1 if cost > maxTextLen: return text[:i] return text def selectTrainingSet(self): "switch to randomly chosen subset of training set" self.currIdx = 0 random.shuffle(self.trainSamples) self.samples = self.trainSamples[:TRAINING_SAMPLES_PER_EPOCH] def selectValidationSet(self): "switch to validation set" self.currIdx = 0 random.shuffle(self.validationSamples) self.samples = self.validationSamples[: VALIDATIOIN_SAMPLES_PER_STEP] def selectTestSet(self): "switch to validation set" self.currIdx = 0 random.shuffle(self.testSamples) self.samples = self.testSamples[:VALIDATIOIN_SAMPLES_PER_STEP] def getIteratorInfo(self): "current batch index and overall number of batches" return (self.currIdx // BATCH_SIZE + 1, len(self.samples) // BATCH_SIZE) def hasNext(self): "iterator" return self.currIdx + BATCH_SIZE <= len(self.samples) def getNext(self): "iterator" batchRange = range(self.currIdx, self.currIdx + BATCH_SIZE) gtTexts = [self.samples[i].gtText for i in batchRange] imgs = [] for i in batchRange: try: self.binaryImageFile.seek(self.samples[i].imageStartPosition) img = np.frombuffer(self.binaryImageFile.read( self.samples[i].imageSize), np.dtype('B')) img = img.reshape( self.samples[i].imageHeight, self.samples[i].imageWidth) img = preprocess(img) # img = preprocess(img, IMAGE_WIDTH, IMAGE_HEIGHT, RESIZE_IMAGE, # CONVERT_IMAGE_TO_MONOCHROME, AUGMENT_IMAGE) imgs.append(img) except IOError as e: print("I/O error({0}): {1}".format(e.errno, e.strerror)) pass except ValueError as e: print(e) pass except Error as e: print("Unexpected error:", sys.exc_info()[0]) print("Value error({0}): {1}".format(e.errno, e.strerror)) pass self.currIdx += BATCH_SIZE return Batch(gtTexts, imgs)main.pystartTime = datetime.now() totalProcessingTime = 0 # we only need DataGenerator in training, validation, testing inorder to access the related datasets if OPERATION_TYPE != OperationType.Infer: dataGenerator = DataGenerator() def accumulateProcessingTime(paraTimeSnapshot): totalProcessingTime = time.time() #totalProcessingTime = totalProcessingTime + (time.time() - paraTimeSnapshot) def train(paraModel): "train NN" epoch = 0 # number of training epochs since start bestCharErrorRate = float('inf') # best valdiation character error rate noImprovementSince = 0 # number of epochs no improvement of character error rate occured auditString = get_initial_status_log() print(auditString) auditLog(auditString) continueLooping = True while continueLooping: print("Current Time =", datetime.now()) epoch += 1 print('Epoch:', epoch) dataGenerator.selectTrainingSet() while dataGenerator.hasNext(): timeSnapshot = time.time() iterInfo = dataGenerator.getIteratorInfo() batch = dataGenerator.getNext() loss = paraModel.trainBatch(batch) # #stop execution after reaching a certain threashold # if (int(loss) == 1): # noImprovementSince = MAXIMUM_NONIMPROVED_EPOCHS; print('Training Batch:', iterInfo[0], '/', iterInfo[1], 'Loss:', loss) accumulateProcessingTime(timeSnapshot) # validate charErrorRate, charSuccessRate, wordsSuccessRate = validate( paraModel, OperationType.Validation) auditString = "Epoch Number %d." % epoch + "\n" # if best validation accuracy so far, save model parameters if charErrorRate < bestCharErrorRate: auditString = auditString + 'Character error rate improved, saving model' paraModel.save() bestCharErrorRate = charErrorRate noImprovementSince = 0 else: auditString = auditString + "Character error rate not improved\n" noImprovementSince += 1 # stop training if no more improvement in the last x epochs if noImprovementSince >= MAXIMUM_NONIMPROVED_EPOCHS: auditString = auditString + \ "No more improvement since %d epochs." % MAXIMUM_NONIMPROVED_EPOCHS + "\n" # gracefull termination continueLooping = False # Model did not finish, print log and save it auditString = auditString + \ get_execution_log(charSuccessRate, wordsSuccessRate) print(auditString) auditLog(auditString) def validate(paraModel, paraOperationType): if paraOperationType == OperationType.Validation: dataGenerator.selectValidationSet() elif paraOperationType == OperationType.Testing: dataGenerator.selectTestSet() numCharErr = 0 numCharTotal = 0 numWordOK = 0 numWordTotal = 0 timeSnapshot = 0.0 while dataGenerator.hasNext(): timeSnapshot = time.time() iterInfo = dataGenerator.getIteratorInfo() print('Validating Batch:', iterInfo[0], '/', iterInfo[1]) batch = dataGenerator.getNext() (recognized, _) = paraModel.inferBatch(batch) accumulateProcessingTime(timeSnapshot) # print('Ground truth -> Recognized') for i in range(len(recognized)): numWordTotal += 1 numCharTotal += len(batch.gtTexts[i]) numWordOK += 1 if batch.gtTexts[i] == recognized[i] else 0 dist = editdistance.eval(recognized[i], batch.gtTexts[i]) numCharErr += dist # remove remark to see each success and error values #print('[OK]' if dist==0 else '[ERR:%d]' % dist,'"' + batch.gtTexts[i] + '"', '->', '"' + recognized[i] + '"') # print validation result charErrorRate = numCharErr / numCharTotal charSuccessRate = 1 - (numCharErr / numCharTotal) wordsSuccessRate = numWordOK / numWordTotal # print and save validation result, this includes post epoch operation as well as when # running standalone testing or validation processes return charErrorRate, charSuccessRate, wordsSuccessRate def inferSingleImage(paraModel, paraFnImg): "recognize text in image provided by file path" img = cv2.imread(paraFnImg, cv2.IMREAD_GRAYSCALE) img = preprocess(img) # img = preprocess(img, IMAGE_WIDTH, # IMAGE_HEIGHT, True, False, False) batch = Batch(None, [img]) #(recognized, probability) = model.inferBatch(batch) (recognized, probability) = paraModel.inferBatch(batch, True) print('Recognized:', '"' + recognized[0] + '"') print('Probability:', probability[0]) def get_initial_status_log(): auditString = "____________________________________________________________" + "\n" auditString = auditString + "Experiment Name: " + EXPERIMENT_NAME + "\n" auditString = auditString + "Base File Name: " + BASE_FILENAME + "\n" auditString = auditString + 'Start Execution Time :' + \ startTime.strftime("%m/%d/%Y, %H:%M:%S") + "\n" auditString = auditString + "Training set size: " + \ str(len(dataGenerator.trainSamples)) + "\n" auditString = auditString + "Validation set size: " + \ str(len(dataGenerator.validationSamples)) + "\n" auditString = auditString + "Training Samples per epoch: " + \ str(TRAINING_SAMPLES_PER_EPOCH) + "\n" auditString = auditString + "Validation Samples per step: " + \ str(VALIDATIOIN_SAMPLES_PER_STEP) + "\n" auditString = auditString + "Batch size: " + str(BATCH_SIZE) + "\n" auditString = auditString + "TRAINING_SAMPLES_PER_EPOCH: " + \ str(TRAINING_SAMPLES_PER_EPOCH) + "\n" auditString = auditString + "BATCH_SIZE: " + str(BATCH_SIZE) + "\n" auditString = auditString + "VALIDATIOIN_SAMPLES_PER_STEP: " + \ str(VALIDATIOIN_SAMPLES_PER_STEP) + "\n" auditString = auditString + "TRAINING_DATASET_SIZE: " + \ str(TRAINING_DATASET_SIZE) + "\n" auditString = auditString + "VALIDATION_DATASET_SPLIT_SIZE: " + \ str(VALIDATION_DATASET_SPLIT_SIZE) + "\n" auditString = auditString + "IMAGE_WIDTH: " + \ str(IMAGE_WIDTH) + "\n" auditString = auditString + "IMAGE_HEIGHT: " + \ str(IMAGE_HEIGHT) + "\n" auditString = auditString + "MAX_TEXT_LENGTH: " + \ str(MAX_TEXT_LENGTH) + "\n" auditString = auditString + "RESIZE_IMAGE: " + \ str(RESIZE_IMAGE) + "\n" auditString = auditString + "CONVERT_IMAGE_TO_MONOCHROME: " + \ str(CONVERT_IMAGE_TO_MONOCHROME) + "\n" auditString = auditString + "MONOCHROME_BINARY_THRESHOLD: " + \ str(MONOCHROME_BINARY_THRESHOLD) + "\n" auditString = auditString + "AUGMENT_IMAGE: " + \ str(AUGMENT_IMAGE) + "\n\n" return auditString def get_execution_log(paraCharSuccessRate, paraWordsSuccessRate): auditString = "Start Execution Time : " + \ startTime.strftime("%m/%d/%Y, %H:%M:%S") + "\n" auditString = auditString + "End Execution Time :" + \ datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + "\n" auditString = auditString + "Accumulated Processing Time : " + \ str(totalProcessingTime / 60) + " minutes" + "\n" auditString = auditString + "Characters Success Rate: " + \ str(paraCharSuccessRate * 100.0) + "%\n" auditString = auditString + "Words Success Rate: " + \ str(paraWordsSuccessRate * 100.0) + "%\n\n" return auditString def main(): if OPERATION_TYPE != OperationType.Infer: dataGenerator.LoadData(OPERATION_TYPE) if OPERATION_TYPE == OperationType.Training: auditString = "EXPERIMENT_NAME: " + EXPERIMENT_NAME + "\n" auditString = auditString + "Training Using Dataset: " + \ str(OPERATION_TYPE) + "\n" print(auditString) auditLog(auditString) model = Model(DECODER_TYPE, mustRestore=False, dump=False) train(model) elif OPERATION_TYPE == OperationType.Validation or OPERATION_TYPE == OperationType.Testing: auditString = "EXPERIMENT_NAME: " + EXPERIMENT_NAME + "\n" auditString = auditString + "Validation/Tesing Using Dataset: " + \ str(OPERATION_TYPE) + "\n" print(auditString) model = Model(DECODER_TYPE, mustRestore=True, dump=False) charErrorRate, charSuccessRate, wordsSuccessRate = validate( model, OPERATION_TYPE) auditString = auditString + \ get_execution_log(charSuccessRate, wordsSuccessRate) + "\n" print(auditString) auditLog(auditString) elif OPERATION_TYPE == OperationType.Infer: # infer text on test image print(open(fnResult).read()) tf.reset_default_graph() #model = Model(open(fnCharList, encoding="utf-8").read(), decoderType, mustRestore=True, dump=args.dump) model = Model(DECODER_TYPE, mustRestore=True, dump=False) inferSingleImage(model, fnInfer) if __name__ == '__main__': main()Importing the required librariesimport pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px df = pd.read_csv('data/owid-covid-latest.csv') df.info() df.head(10)Continent specific visualisationscontinent_obj = df.groupby('continent') asia_df = continent_obj.get_group('Asia') na_df = continent_obj.get_group('North America') sa_df = continent_obj.get_group('South America')ASIAasia_df.head() asia_df.drop(['last_updated_date','new_cases_smoothed','new_deaths_smoothed','new_cases_smoothed_per_million','new_deaths_smoothed_per_million','icu_patients','hosp_patients','weekly_icu_admissions','weekly_hosp_admissions','new_tests','new_tests_per_thousand','new_tests_smoothed','new_tests_smoothed_per_thousand','new_vaccinations','new_vaccinations_smoothed','total_vaccinations_per_hundred','people_vaccinated_per_hundred','people_fully_vaccinated_per_hundred','new_vaccinations_smoothed_per_million'],axis=1,inplace=True)/Users/thegeorgejoseph/opt/anaconda3/envs/proton/lib/python3.8/site-packages/pandas/core/frame.py:4167: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy return super().drop(Dealing with Null Dataasia_df.shape #logic asia_df.dropna(axis=0,subset=['total_cases','total_deaths'],inplace=True,how='any') df.dropna(axis=0,subset=['total_cases','total_deaths'],inplace=True,how='any') print(asia_df.columns)Index(['iso_code', 'continent', 'location', 'total_cases', 'new_cases', 'total_deaths', 'new_deaths', 'total_cases_per_million', 'new_cases_per_million', 'total_deaths_per_million', 'new_deaths_per_million', 'reproduction_rate', 'icu_patients_per_million', 'hosp_patients_per_million', 'weekly_icu_admissions_per_million', 'weekly_hosp_admissions_per_million', 'total_tests', 'total_tests_per_thousand', 'positive_rate', 'tests_per_case', 'tests_units', 'total_vaccinations', 'people_vaccinated', 'people_fully_vaccinated', 'stringency_index', 'population', 'population_density', 'median_age', 'aged_65_older', 'aged_70_older', 'gdp_per_capita', 'extreme_poverty', 'cardiovasc_death_rate', 'diabetes_prevalence', 'female_smokers', 'male_smokers', 'handwashing_facilities', 'hospital_beds_per_thousand', 'life_expectancy', 'human_development_index'], dtype='object')Bubble Mapsfig = px.scatter_geo(df, locations="iso_code", size="total_cases", # size of markers, "pop" is one of the columns of gapminder ) fig.show()Exploring Memory-Optmized TempDB Metadata TempDB metadata contention has historically been a bottleneck to scalability for many workloads running on SQL Server. SQL Server 2019 introduces a new feature that is part of the [In-Memory Database](https://docs.microsoft.com/sql/relational-databases/in-memory-database) feature family, memory-optimized tempdb metadata, which effectively removes this bottleneck and unlocks a new level of scalability for tempdb-heavy workloads. In SQL Server 2019, the system tables involved in managing temp table metadata can be moved into latch-free non-durable memory-optimized tables. To learn more about tempdb metadata contention, along with other types of tempdb contention, check out the blog article [TEMPDB - Files and Trace Flags and Updates, Oh My!](https://techcommunity.microsoft.com/t5/SQL-Server/TEMPDB-Files-and-Trace-Flags-and-Updates-Oh-My/ba-p/385937). Keep reading to explore the new memory-optimized tempdb metadata feature. Configure your environment Contention in tempdb happens when a large number of concurrent threads are attemping to create, modify or drop temp tables. In order to simulate this situation, you'll need to have a SQL Server instance that has multiple cores (4 or more is recommended), and a way to simulate multiple concurrent threads. For this example, we'll be using the ostress.exe tool to generate multiple concurrent threads. If you have an existing multi-core SQL Server instance, you can follow the T-SQL instructions to set up the demo, otherwise you can try the docker container steps instead. First, download the demo files to your local computer. Docker Container Setup Note that the docker commands may take some time to execute, but you will not see progress here until they are complete. 1. Make sure you have your docker environment configured, more information [here](https://docs.docker.com/get-started/). > NOTE > If you are using Docker Desktop for Windows or Mac, the default configuration will limit your containers to 2 cores, regardless of the number of cores on your computer. Be sure to configure docker to allow at least 4 cores and 4GB of RAM for this demo to run properly. To do this, right click on the Docker Desktop icon in the status bar and choose Settings -> Advanced. 2. Pull the demo container with the following command:! docker pull bluefooted/sql2019tempdbdemoUsing default tag: latest latest: Pulling from bluefooted/sql2019tempdbdemo Digest: sha256:035a1bda5539bfe68ad1b2f032a6e389cea91a0cd880e75b83ef186c46b2e34f Status: Image is up to date for bluefooted/sql2019tempdbdemo:latest docker.io/bluefooted/sql2019tempdbdemo:latest3. Start the demo container with the following command:! docker run -e "ACCEPT_EULA=Y" -e "SA_PASSWORD=!" -p 1455:1433 --name sql2019tempdbdemo -d bluefooted/sql2019tempdbdemo2a3dbfff94ce2bd277778e36ac268b4495afb77f1b6c5417478b10a9c545cca6> NOTE: > If you see the following error, you may already have run the docker run command with this image: *docker: Error response from daemon: Conflict. The container name "/sql2019tempdbdemo" is already in use by container "3f662e0fd9b8cbdc1013e874722e066aa8e81ec3a07423fc3ab95cb75e640af9". You have to remove (or rename) that container to be able to reuse that name. See 'docker run --help'.* If you see this message, you can start the container instead with the following command:! docker start sql2019tempdbdemo4. Connect to the demo SQL Server instance using Azure Data Studio or SQL Server Management Studio using the following information: **Server Name**: localhost,1455 **Username**: sa **Password**: ! Existing SQL Server Instance Setup (skip if you are using the demo container) If you already have a SQL Server instance with a minimum of 4 cores, you can download and restore the AdventureWorks database and use the scripts in this repo to configure the database. Follow the steps in the T-SQL notebook to complete this setup. Detecting TempDB Metadata Contention The first thing to figure out before you turn on this new feature is whether or not you are experiencing TempDB metadata contention. The main symptom of this contention is a number of sessions in `Suspended` state with a wait type of `PAGELATCH_xx` and a wait resource of a page that hosts a TempDB system table, such as `2:1:118`. In order to know whether or not the page is part of a TempDB system table, you can use the new dynamic management function `sys.dm_db_page_info()` in SQL Server 2019 or later, or the older `DBCC PAGE` command in older versions. For this demo, let's focus on `sys.dm_db_page_info()`. Note that this command will take some time to complete, you'll want to proceed to the next step before it completes. First, start the workload using the `ostress.exe` tool that is included in the downloads. Note that if you are not using the demo container, you will need to change the server name and login information.! ostress.exe -Slocalhost,1455 -Usa -PP@ssw0rd! -dAdventureWorks -Q"EXEC dbo.usp_EmployeeBirthdayList 4" -mstress -quiet -n16 -r120 | FINDSTR "QEXEC Starting Creating elapsed"10/08/19 15:39:54.739 [0x00007A74] -QEXEC dbo.usp_EmployeeBirthdayList 4 10/08/19 15:39:54.779 [0x00007A74] Starting query execution... 10/08/19 15:39:54.783 [0x00007A74] Creating 16 thread(s) to process queries 10/08/19 15:40:30.158 [0x00007A74] OSTRESS exiting normally, elapsed time: 00:00:35.419While the above script is running, switch over to the T-SQL notebook and run the script to monitor your workload for page contention. You should see several sessions with a `wait_type` of `PAGELATCH_EX` or `PAGELATCH_SH`, often with an `object_name` of `sysschobjs`. > NOTE > If this query does not return any results, make sure the command above is still running. If it is not running, start it and try the query again. If you still do not see any sessions waiting, you may need to increase the number of CPUs available to your server, and/or increase the number of concurrent threads by increasing the `-n` parameter in the command. This demo was tested with 4 cores and 16 concurrent sessions, which should yield the expected results. If you would like more time to examine the contention, you can increase the `-r` parameter, which will increase the number of iterations. Improve performance with Memory-Optimized TempDB Metadata Now that you have observed TempDB metadata contention, let's see how SQL Server 2019 addresses this contention. Switch over to the T-SQL notebook to review and run the script to enable Memory-Optimized TempDB Metadata. Once you have run the T-SQL command, you will need to restart the service. If you are using the demo container, you can do so with the following command:! docker restart sql2019tempdbdemosql2019tempdbdemoOnce the server is restarted, you can use queries in the T-SQL notebook to verify that the feature has been enabled. > NOTE > It's a good idea to run a few T-SQL queries after the restart to make sure the server is up and running before you attempt the scenario again. Now that we have enabled Memory-Optimized TempDB Metadata, let's try running the workload again:! ostress.exe -Slocalhost,1455 -Usa -PP@ssw0rd! -dAdventureWorks -Q"EXEC dbo.usp_EmployeeBirthdayList 4" -mstress -quiet -n16 -r120 | FINDSTR "QEXEC Starting Creating elapsed"10/08/19 15:38:27.261 [0x00009AA0] -QEXEC dbo.usp_EmployeeBirthdayList 4 10/08/19 15:38:27.316 [0x00009AA0] Starting query execution... 10/08/19 15:38:27.321 [0x00009AA0] Creating 16 thread(s) to process queries 10/08/19 15:38:54.045 [0x00009AA0] OSTRESS exiting normally, elapsed time: 00:00:26.784TO DO: Set seed for generator of initial conditions.import pickle import numpy as np import pandas as pd import math import yamlWe import the model specification parameters and externally defined constants here.# Import specified definitions only from given notebook import ipynb.fs from .defs.shared_constants import MISSING_INT, MISSING_FLOAT from .defs.shared_auxiliary import draw_disturbances from .defs.read import read_init_file from .defs.read import init_dict_to_attr_dict from .defs.shared_auxiliary import calculate_wage_systematic from .defs.shared_auxiliary import calculate_period_wages from .defs.shared_auxiliary import calculate_consumption_utilities from .defs.shared_auxiliary import calculate_total_utilities from .defs.shared_auxiliary import calculate_utilities from .defs.shared_auxiliary import calculate_continuation_values # Read in initialization file as attr_dict attr_dict = read_init_file('toy_model_init_file.yml') # Import the final output of pyth_create_state_space, args # In the modular implementation pyth_create_state_space will be called by by pyth_solve # pyth_solve is executed before pyth_simulate file_name = "args_file.pkl" # Open the file for reading file_object = open(file_name,'rb') # load the object from the file into var args state_space_args = pickle.load(file_object) # Import the final output of pyth_backward_induction, periods_emax # In the modular implementation pyth_create_state_space will be called by by pyth_solve # pyth_solve is executed before pyth_simulate file_name = "periods_emax_file.pkl" # Open the file for reading file_object = open(file_name,'rb') # load the object from the file into var args periods_emax = pickle.load(file_object)Then, we need to define additional function called in the loop to determine agents choices.def extract_individual_covariates (educ_years, educ_min, i): """Constructs additional covariates given agent indicator.""" # Determine education level given number of years of education # Would it be more efficient to do this somewhere else? # Unpack state space components educ_years_i = educ_years[i] # Extract education information if (educ_years_i <= 10): educ_level = [1,0,0] elif (educ_years_i > 10) and (educ_years_i <= 12): educ_level = [0,1,0] else: educ_level = [0,0,1] educ_years_idx = educ_years_i - educ_min # Return function output return educ_years_i, educ_level, educ_years_idx # Test ensure that simulated values of initial conditions are reproducible educ_years_test = list(range(10, 15)) np.random.seed(123) educ_years_test = np.random.choice(educ_years_test, 10) educ_years_test def pyth_simulate(attr_dict, state_space_args, periods_emax): """Simulate agent experiences.""" # Unpack objects from agrs states_all, states_number_period, mapping_states_index, max_states_period = state_space_args[0], state_space_args[1], state_space_args[2], state_space_args[3] # Unpack parameter from the model specification educ_min = attr_dict['INITIAL_CONDITIONS']['educ_min'] educ_max = attr_dict['INITIAL_CONDITIONS']['educ_max'] num_periods = attr_dict['GENERAL']['num_periods'] num_agents_sim = attr_dict['SIMULATION']['num_agents_sim'] seed_sim = attr_dict['SIMULATION']['seed_sim'] shocks_cov = attr_dict['DERIVED_ATTR']['shocks_cov'] optim_paras = attr_dict['PARAMETERS']['optim_paras'] delta = attr_dict['CONSTANTS']['delta'] educ_years = list(range(educ_min, educ_max + 1)) educ_years = np.random.choice(educ_years, num_agents_sim) # Create draws for simulated sample draws_sim = draw_disturbances((num_periods, num_agents_sim), shocks_cov, seed_sim) # Start count over all simulations/row (number of agents times number of periods) count = 0 # Initialize container for the final output num_columns = 14 # count of the information units we wish to record dataset = np.tile(MISSING_FLOAT, (num_agents_sim*num_periods, num_columns)) # Loop over all agents for i in range(num_agents_sim): # Construct additional education information educ_years_i, educ_level, educ_years_idx = extract_individual_covariates (educ_years, educ_min, i) # Extract the indicator of the initial state for the individual # depending on the individuals initial condition initial_state_index = mapping_states_index[educ_years_idx, educ_years_idx, 0, 0, 0] # Assign the initial state as current state current_state = states_all[educ_years_idx, initial_state_index, :].copy() # Loop over all remaining for period in range(num_periods): # Record agent identifier, period number, and level of education dataset[count, :2] = i, period, dataset[count, 2:3] = educ_years_i # Ensure that the simulation starts only in the period # in which the individual enters the model after having compldeted education if period < educ_years_idx: count += 1 continue # Extract state space components choice_lagged, exp_p, exp_f = current_state[1], current_state[2], current_state[3] # Look up the indicator for the current state k = mapping_states_index[period, educ_years_i - educ_min, choice_lagged, exp_p, exp_f] # Calculate choice specific value functions # for individual, period and state space point # Extract the error term draws corresponding to # period number and individual corresponding_draws = draws_sim[period, i, :] # Calculate correspongind flow utilities flow_utilities, consumption_utilities, period_wages, wage_systematic = calculate_utilities(attr_dict, educ_level, exp_p, exp_f, optim_paras, corresponding_draws) # Obtain continuation values for all choices continuation_values = calculate_continuation_values(attr_dict, mapping_states_index, periods_emax, period, educ_years_idx, exp_p, exp_f) # Calculate total values for all choices value_functions = flow_utilities + delta * continuation_values # Determine choice as option with highest choice specific value function max_idx = np.argmax(value_functions) # Record period experiences dataset[count, 3:4] = max_idx dataset[count, 4:5] = wage_systematic dataset[count, 5:8] = period_wages[:] dataset[count, 8:11] = consumption_utilities[:] dataset[count, 11:14] = flow_utilities[:] # Update state space component experience current_state[max_idx + 1] += 1 # Update state space component choice_lagged current_state[1] = max_idx # Update simulation/row count count += 1 # Return function output return dataset dataset = pyth_simulate(attr_dict, state_space_args, periods_emax)Finally, we want to record the dataset as a Pandas Dataframe.def replace_missing_values (arguments): """Replace MISSING_FLOAT with NAN.""" # Antibugging assert isinstance(arguments, tuple) or isinstance(arguments, np.ndarray) if isinstance(arguments, np.ndarray): arguments = (arguments,) rslt = tuple() for argument in arguments: # Transform to float array to evaluate missing values argument_internal = np.asfarray(argument) # Determine missing values is_missing = argument_internal == MISSING_FLOAT if np.any(is_missing): # Replace missing values argument = np.asfarray(argument) argument[is_missing] = np.nan rslt += (argument,) # Align interface if len(rslt) == 1: rslt = rslt[0] # Function output return rslt # Create fixed objects needed to record simulated dataset to Pandas Dataframe # Define column lables DATA_LABLES_SIM = [] DATA_LABLES_SIM += ["Identifier", "Period"] DATA_LABLES_SIM += ["Years of Education"] DATA_LABLES_SIM += ["Choice"] DATA_LABLES_SIM += ["Systematic Wage"] DATA_LABLES_SIM += ["Period Wage N", "Period Wage P", "Period Wage F"] DATA_LABLES_SIM += ["Consumption Utility N", "Consumption Utility P", "Consumption Utility F"] DATA_LABLES_SIM += ["Flow Utility N", "Flow Utility P", "Flow Utility F"] # Define data types for data set columns DATA_FORMATS_SIM = dict() for key_ in DATA_LABLES_SIM: DATA_FORMATS_SIM[key_] = np.int if key_ in ["Choice", "Systematic Wage", "Period Wage N", "Period Wage P", "Period Wage F", "Consumption Utility N", "Consumption Utility P", "Consumption Utility F", "Flow Utility N", "Flow Utility P", "Flow Utility F"]: DATA_FORMATS_SIM[key_] = np.float # Create data frame from simulated dataset data_frame = pd.DataFrame( data = replace_missing_values(dataset), columns = DATA_LABLES_SIM ) # Set specific columns to desired data types data_frame = data_frame.astype(DATA_FORMATS_SIM) # Define identifier for unique observation in the data frame data_frame.set_index(["Identifier", "Period"], drop=False, inplace=True) data_frameTo run this colab, press the "Runtime" button in the menu tab and then press the "Run all" button. Recognize Flowers using Transfer Learning Import necessary libraries.from __future__ import absolute_import, division, print_function, unicode_literals !pip install 'tensorflow>=1.3.0,<2.0.0' --force-reinstall !pip install tf-nightly-gpu import tensorflow as tf import os import numpy as np import matplotlib.pyplot as pltChack the varsion.tf.__version__Setup Input Pipeline Download the faces dataset._URL = "https://doc-04-7s-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/rcrb3putiqv50p1quji2jae0a4u096ie/1559995200000/03029111978704805167/*/1aWB7gcnxoqHF0iuHBqF_GMuKsFz9f_Us?e=download" zip_file = tf.keras.utils.get_file(origin=_URL, fname="faces_photos_kaggle.zip", extract=True) base_dir = os.path.join(os.path.dirname(zip_file), 'faces_photos')Use `ImageDataGenerator` to rescale the images.Create the train generator and specify where the train dataset directory, image size, batch size.Create the validation generator with similar approach as the train generator with the flow_from_directory() method.# IMAGE_SIZE = 224 IMAGE_SIZE = 48 BATCH_SIZE = 64 datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1./255, validation_split=0.15) train_generator = datagen.flow_from_directory( base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='training') val_generator = datagen.flow_from_directory( base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='validation') for image_batch, label_batch in train_generator: break image_batch.shape, label_batch.shapeSave the labels in a file which will be downloaded later.print (train_generator.class_indices) labels = '\n'.join(sorted(train_generator.class_indices.keys())) open('labels.txt', 'w').write(labels) !cat labels.txtCreate the base model from the pre-trained convnetsCreate the base model from the **MobileNet V2** model developed at Google, and pre-trained on the ImageNet dataset, a large dataset of 1.4M images and 1000 classes of web images.First, pick which intermediate layer of MobileNet V2 will be used for feature extraction. A common practice is to use the output of the very last layer before the flatten operation, the so-called "bottleneck layer". The reasoning here is that the following fully-connected layers will be too specialized to the task the network was trained on, and thus the features learned by these layers won't be very useful for a new task. The bottleneck features, however, retain much generality.Let's instantiate an MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the `include_top=False` argument, we load a network that doesn't include the classification layers at the top, which is ideal for feature extraction.IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3) # Create the base model from the pre-trained model MobileNet V2 base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')Feature extractionYou will freeze the convolutional base created from the previous step and use that as a feature extractor, add a classifier on top of it and train the top-level classifier.base_model.trainable = FalseAdd a classification headmodel = tf.keras.Sequential([ base_model, tf.keras.layers.Conv2D(32, 1, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(7, activation='softmax') ])Compile the modelYou must compile the model before training it. Since there are two classes, use a binary cross-entropy loss.model.compile(optimizer=tf.keras.optimizers.Adam(), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() print('Number of trainable variables = {}'.format(len(model.trainable_variables)))Train the modelepochs = 150 history = model.fit(train_generator, epochs=epochs, validation_data=val_generator)Learning curvesLet's take a look at the learning curves of the training and validation accuracy/loss when using the MobileNet V2 base model as a fixed feature extractor. Fine tuningIn our feature extraction experiment, you were only training a few layers on top of an MobileNet V2 base model. The weights of the pre-trained network were **not** updated during training.One way to increase performance even further is to train (or "fine-tune") the weights of the top layers of the pre-trained model alongside the training of the classifier you added. The training process will force the weights to be tuned from generic features maps to features associated specifically to our dataset. Un-freeze the top layers of the model All you need to do is unfreeze the `base_model` and set the bottom layers be un-trainable. Then, recompile the model (necessary for these changes to take effect), and resume training.base_model.trainable = True # Let's take a look to see how many layers are in the base model print("Number of layers in the base model: ", len(base_model.layers)) # Fine tune from this layer onwards fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]: layer.trainable = FalseCompile the modelCompile the model using a much lower training rate.model.compile(loss='categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(1e-5), metrics=['accuracy']) model.summary() print('Number of trainable variables = {}'.format(len(model.trainable_variables)))Continue Train the modelhistory_fine = model.fit(train_generator, epochs=60, validation_data=val_generator)Convert to TFLite Saved the model using `tf.saved_model.save` and then convert the saved model to a tf lite compatible format.file = "new.h5" model.save(file) converter = tf.lite.TFLiteConverter.from_keras_model_file(file) tflite_model = converter.convert() open('model.tflite', 'wb').write(tflite_model)Download the converted model and labelsfrom google.colab import files files.download('model.tflite') files.download('labels.txt')Prepare Data path and load cfgBy setting the `L5KIT_DATA_FOLDER` variable, we can point the script to the folder where the data lies.Then, we load our config file with relative paths and other configurations (rasteriser, training params...).# set env variable for data os.environ["L5KIT_DATA_FOLDER"] = "PATH_TO_DATA" dm = LocalDataManager(None) # get config cfg = load_config_data("./agent_motion_config.yaml") print(cfg)ModelOur baseline is a simple `resnet50` pretrained on `imagenet`. We must replace the input and the final layer to address our requirements.def build_model(cfg: Dict) -> torch.nn.Module: # load pre-trained Conv2D model model = resnet50(pretrained=True) # change input channels number to match the rasterizer's output num_history_channels = (cfg["model_params"]["history_num_frames"] + 1) * 2 num_in_channels = 3 + num_history_channels model.conv1 = nn.Conv2d( num_in_channels, model.conv1.out_channels, kernel_size=model.conv1.kernel_size, stride=model.conv1.stride, padding=model.conv1.padding, bias=False, ) # change output size to (X, Y) * number of future states num_targets = 2 * cfg["model_params"]["future_num_frames"] model.fc = nn.Linear(in_features=2048, out_features=num_targets) return model def forward(data, model, device, criterion): inputs = data["image"].to(device) target_availabilities = data["target_availabilities"].unsqueeze(-1).to(device) targets = data["target_positions"].to(device) # Forward pass outputs = model(inputs).reshape(targets.shape) loss = criterion(outputs, targets) # not all the output steps are valid, but we can filter them out from the loss using availabilities loss = loss * target_availabilities loss = loss.mean() return loss, outputsLoad the Train DataOur data pipeline map a raw `.zarr` folder into a multi-processing instance ready for training by:- loading the `zarr` into a `ChunkedDataset` object. This object has a reference to the different arrays into the zarr (e.g. agents and traffic lights);- wrapping the `ChunkedDataset` into an `AgentDataset`, which inherits from torch `Dataset` class;- passing the `AgentDataset` into a torch `DataLoader`# ===== INIT DATASET train_cfg = cfg["train_data_loader"] rasterizer = build_rasterizer(cfg, dm) train_zarr = ChunkedDataset(dm.require(train_cfg["key"])).open() train_dataset = AgentDataset(cfg, train_zarr, rasterizer) train_dataloader = DataLoader(train_dataset, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"], num_workers=train_cfg["num_workers"]) print(train_dataset) # ==== INIT MODEL device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = build_model(cfg).to(device) optimizer = optim.Adam(model.parameters(), lr=1e-3) criterion = nn.MSELoss(reduction="none")Trainingnote: if you're on MacOS and using `py_satellite` rasterizer, you may need to disable opencv multiprocessing by adding:`cv2.setNumThreads(0)` before the following cell. This seems to only affect running in python notebook and it's caused by the `cv2.warpaffine` function# ==== TRAIN LOOP tr_it = iter(train_dataloader) progress_bar = tqdm(range(cfg["train_params"]["max_num_steps"])) losses_train = [] for _ in progress_bar: try: data = next(tr_it) except StopIteration: tr_it = iter(train_dataloader) data = next(tr_it) model.train() torch.set_grad_enabled(True) loss, _ = forward(data, model, device, criterion) # Backward pass optimizer.zero_grad() loss.backward() optimizer.step() losses_train.append(loss.item()) progress_bar.set_description(f"loss: {loss.item()} loss(avg): {np.mean(losses_train)}")Plot Loss CurveWe can plot the train loss against the iterations (batch-wise)plt.plot(np.arange(len(losses_train)), losses_train, label="train loss") plt.legend() plt.show()EvaluationEvaluation follows a slightly different protocol than training. When working with time series, we must be absolutely sure to avoid leaking the future in the data.If we followed the same protocol of training, one could just read ahead in the `.zarr` and forge a perfect solution at run-time, even for a private test set.As such, **the private test set for the competition has been "chopped" using the `chop_dataset` function**.# ===== GENERATE AND LOAD CHOPPED DATASET num_frames_to_chop = 100 eval_cfg = cfg["val_data_loader"] eval_base_path = create_chopped_dataset(dm.require(eval_cfg["key"]), cfg["raster_params"]["filter_agents_threshold"], num_frames_to_chop, cfg["model_params"]["future_num_frames"], MIN_FUTURE_STEPS)The result is that **each scene has been reduced to only 100 frames**, and **only valid agents in the 100th frame will be used to compute the metrics**. Because following frames in the scene have been chopped off, we can't just look ahead to get the future of those agents.In this example, we simulate this pipeline by running `chop_dataset` on the validation set. The function stores:- a new chopped `.zarr` dataset, in which each scene has only the first 100 frames;- a numpy mask array where only valid agents in the 100th frame are True;- a ground-truth file with the future coordinates of those agents;Please note how the total number of frames is now equal to the number of scenes multipled by `num_frames_to_chop`. The remaining frames in the scene have been sucessfully chopped off from the dataeval_zarr_path = str(Path(eval_base_path) / Path(dm.require(eval_cfg["key"])).name) eval_mask_path = str(Path(eval_base_path) / "mask.npz") eval_gt_path = str(Path(eval_base_path) / "gt.csv") eval_zarr = ChunkedDataset(eval_zarr_path).open() eval_mask = np.load(eval_mask_path)["arr_0"] # ===== INIT DATASET AND LOAD MASK eval_dataset = AgentDataset(cfg, eval_zarr, rasterizer, agents_mask=eval_mask) eval_dataloader = DataLoader(eval_dataset, shuffle=eval_cfg["shuffle"], batch_size=eval_cfg["batch_size"], num_workers=eval_cfg["num_workers"]) print(eval_dataset)Storing PredictionsThere is a small catch to be aware of when saving the model predictions. The output of the models are coordinates in `agent` space and we need to convert them into displacements in `world` space.To do so, we first convert them back into the `world` space and we then subtract the centroid coordinates.# ==== EVAL LOOP model.eval() torch.set_grad_enabled(False) # store information for evaluation future_coords_offsets_pd = [] timestamps = [] agent_ids = [] progress_bar = tqdm(eval_dataloader) for data in progress_bar: _, ouputs = forward(data, model, device, criterion) # convert agent coordinates into world offsets agents_coords = ouputs.cpu().numpy() world_from_agents = data["world_from_agent"].numpy() centroids = data["centroid"].numpy() coords_offset = [] for agent_coords, world_from_agent, centroid in zip(agents_coords, world_from_agents, centroids): coords_offset.append(transform_points(agent_coords, world_from_agent) - centroid[:2]) future_coords_offsets_pd.append(np.stack(coords_offset)) timestamps.append(data["timestamp"].numpy().copy()) agent_ids.append(data["track_id"].numpy().copy())Save resultsAfter the model has predicted trajectories for our evaluation set, we can save them in a `csv` file.During the competition, only the `.zarr` and the mask will be provided for the private test set evaluation.Your solution is expected to generate a csv file which will be compared to the ground truth one on a separate serverpred_path = f"{gettempdir()}/pred.csv" write_pred_csv(pred_path, timestamps=np.concatenate(timestamps), track_ids=np.concatenate(agent_ids), coords=np.concatenate(future_coords_offsets_pd), )Perform EvaluationPleae note that our metric supports multi-modal predictions (i.e. multiple predictions for a single GT trajectory). In that case, you will need to provide a confidence for each prediction (confidences must all be between 0 and 1 and sum to 1).In this simple example we don't generate multiple trajectories, so we won't pass any confidences vector. Internally, the metric computation will assume a single trajectory with confidence equal to 1metrics = compute_metrics_csv(eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace]) for metric_name, metric_mean in metrics.items(): print(metric_name, metric_mean)Visualise ResultsWe can also visualise some results from the ego (AV) point of view for those frames of interest (the 100th of each scene).However, as we chopped off the future from the dataset **we must use the GT csv if we want to plot the future trajectories of the agents**model.eval() torch.set_grad_enabled(False) # build a dict to retrieve future trajectories from GT gt_rows = {} for row in read_gt_csv(eval_gt_path): gt_rows[row["track_id"] + row["timestamp"]] = row["coord"] eval_ego_dataset = EgoDataset(cfg, eval_dataset.dataset, rasterizer) for frame_number in range(99, len(eval_zarr.frames), 100): # start from last frame of scene_0 and increase by 100 agent_indices = eval_dataset.get_frame_indices(frame_number) if not len(agent_indices): continue # get AV point-of-view frame data_ego = eval_ego_dataset[frame_number] im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0)) center = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"] predicted_positions = [] target_positions = [] for v_index in agent_indices: data_agent = eval_dataset[v_index] out_net = model(torch.from_numpy(data_agent["image"]).unsqueeze(0).to(device)) out_pos = out_net[0].reshape(-1, 2).detach().cpu().numpy() # store absolute world coordinates predicted_positions.append(transform_points(out_pos, data_agent["world_from_agent"])) # retrieve target positions from the GT and store as absolute coordinates track_id, timestamp = data_agent["track_id"], data_agent["timestamp"] target_positions.append(gt_rows[str(track_id) + str(timestamp)] + data_agent["centroid"][:2]) # convert coordinates to AV point-of-view so we can draw them predicted_positions = transform_points(np.concatenate(predicted_positions), data_ego["raster_from_world"]) target_positions = transform_points(np.concatenate(target_positions), data_ego["raster_from_world"]) draw_trajectory(im_ego, predicted_positions, PREDICTED_POINTS_COLOR) draw_trajectory(im_ego, target_positions, TARGET_POINTS_COLOR) plt.imshow(im_ego[::-1]) plt.show()Using plugins in PerspectiveWidget`PerspectiveWidget` comes with a set of plugins that are able to visualize your data.from perspective import PerspectiveWidget data = { "int": [i for i in range(4)], "float": [i * 1.25 for i in range(4)], "str": ["a", "b", "c", "d"], "bool": [True, False, True, False], "date": [date.today() for i in range(4)], "datetime": [datetime.now() for i in range(4)] }Pass the plugin name into the kwargs of `PerspectiveWidget` or `start`. Once the viewer has been initialized, you can select plugins at the top left corner (and see the parsable names for each one).widget = PerspectiveWidget(data, plugin="d3_x_bar") widget widget2 = PerspectiveWidget(data, plugin="d3_heatmap") widget2Plugins can be set from the notebook:widget.plugin = "datagrid" widget.plugin = "d3_y_line"Randomization Testing Upstream Regulator Analysis----------------------Author: ()Date: 1st March, 2018---------------------- Table of Contents1. [Background](background)2. [Import packages](import)3. [Randomization Support Functions](support)6. [Arthritis](arthritis)8. [Breast Cancer](boobs) Background This notebook aims to show that our results are more significant than the null model. We do this by producing a number of graphs that contain the same edge connections as our real graph, but with randomized adjusted p-value and fold change information. This notebook shows that all of our predicted upstream regulators are far from the center of our randomized distribution, which is centered on zero for each gene. No gene's distribution is biased, and none of our top genes seem to be significant do to random chance.Below, we demonstrate two test cases: our DEG set from our URA_Arthritis notebook and the breast cancer DEG set from our URA_HUVEC_BRCA notebook. Import packagesimport pandas as pd from random import shuffle import networkx as nx import seaborn as sns import matplotlib.pyplot as plt # uncomment if you have not pip-installed network_bio_toolkit #import sys #code_path = '../../network_bio_toolkit' #sys.path.append(code_path) #import Upstream #reload(Upstream) from network_bio_toolkit import Upstream %matplotlib inlineRandomization Support Functionsdef shuffleACopy(x): b = x[:] # make a copy of the keys shuffle(b) # shuffle the copy return b def returnShuffle(its, to_shuffle): a = [shuffleACopy(to_shuffle) for x in range(its)] return a[its-1] # modifying our usual function to support shuffled genes def create_DEG_list(filename, G, # specify in order to add up-down info to graph its = 1, p_value_filter=0.05, p_value_or_adj='adj', # filtering by p-value ('p') or adjusted p-value ('adj') fold_change_filter=None, # specify a number to filter by absolute (log) fold change gene_type='symbol', # 'symbol' or 'entrez' gene_column_header='gene_symbol', p_value_column_header='adj_p_value', fold_change_column_header='fold_change' ): df = pd.DataFrame.from_csv(filename, sep='\t') # shuffle just the genes in this DEG file genes_to_shuffle = list(df['gene_symbol']) shuffled_genes = returnShuffle(its, genes_to_shuffle) shuffle_mapping = dict(zip(df['gene_symbol'], shuffled_genes)) df['gene_symbol'] = shuffled_genes # remove duplicate lines for same gene symbol, just use first occurance df.drop_duplicates(subset=[gene_column_header], keep='first', inplace=True) # map from gene to p-value and fold change DEG_full_list = list(df[gene_column_header]) DEG_to_pvalue = dict(zip(shuffled_genes, list(df[p_value_column_header]))) DEG_to_updown = dict(zip(shuffled_genes, list(df[fold_change_column_header]))) # create a graph based on the full DEG file DEG_full_graph = nx.DiGraph() DEG_full_graph.add_nodes_from(DEG_full_list) nx.set_node_attributes(DEG_full_graph, values = DEG_to_pvalue, name = 'adj_p_value') nx.set_node_attributes(DEG_full_graph, values = DEG_to_updown, name = 'updown') # filter by p-value cut-off df = df.loc[df[p_value_column_header] < p_value_filter] # filter by (log) fold change cut off if applicable if fold_change_filter != None: df = df.loc[abs(df[fold_change_column_header]) > fold_change_filter] # save just significant values DEG_list = list(df[gene_column_header]) # add up-down info to DG_TF small_updown_dict = {n:DEG_to_updown[n] if n in DEG_to_updown else 0 for n in G.nodes()} nx.set_node_attributes(G, values = small_updown_dict, name = 'updown') return DEG_list, DEG_to_pvalue, DEG_to_updown, DEG_full_graph, G def create_mega_list(ura, filename, iters, p_value_filter): # init mega list ura.DEG_list, ura.DEG_to_pvalue, ura.DEG_to_updown, ura.DEG_full_graph, DG_TF = create_DEG_list(filename, ura.DG_TF, p_value_filter=p_value_filter) z_scores_mega_list = pd.DataFrame(index=ura.z_scores.index) ura.tf_zscore(bias_filter=0.25) z_scores_mega_list[str(0)] = pd.Series(ura.z_scores) # generate random z-score shuffles for i in range(iters): ura.DEG_list, ura.DEG_to_pvalue, ura.DEG_to_updown, ura.DEG_full_graph, DG_TF = create_DEG_list(filename, ura.DG_TF, p_value_filter=p_value_filter) ura.tf_zscore(bias_filter=0.25) z_scores_mega_list[str(i+1)] = pd.Series(ura.z_scores) return z_scores_mega_listArthritis# User preferences symbol = 'symbol' entrez = 'entrez' human = 'human' mouse = 'mouse' ura = Upstream.Upstream(gene_type = symbol, species = human)Predict activation state normally (calculate z-scores as usual)First, let's look at which transcription factors are most significant according to our URA package.# transcription factors ura.easy_load_TF_list('../../TF_databases/TF_database_URA.csv') # background network filename = "../../background_networks/9606.protein.actions.v10.5.txt" confidence_filter = 400 ura.load_STRING_to_digraph(filename, confidence_filter); # DEG list DEG_filename = "../../DEG_databases/DE_Coeff_OAvsNormal_OAvsNormal_20171215.csv" ura.create_DEG_list(DEG_filename, p_value_filter = 0.05, fold_change_filter = 1) ura.tf_zscore(bias_filter = 0.25) top_values = ura.top_values(act = True, abs_value = True, top = 5) display(top_values)Calculate randomized z-scoresNow let's see how significant these same TF's are in a randomized graph.Takes about 45 seconds per iteration, so you may need to be patient!filename = "../../DEG_databases/DE_Coeff_OAvsNormal_OAvsNormal_20171215.csv" iters = 10 p_value_filter = 0.05 mega_list = create_mega_list(ura, filename, iters, p_value_filter) # z-scores for each iteration, using a different shuffled DEG list each time mega_list.loc[top_values.index] for i in range(len(top_values)): TF = mega_list.loc[top_values.index].index[i] TF_normal_z = top_values["z-score"][TF] plt.figure(figsize=(10, 5)) plt.xlim((-10,10)) # adjust the max leaving min unchanged ax = sns.distplot(list(mega_list.loc[top_values.index[i]]), kde=True) plt.scatter([TF_normal_z], [0], marker='^', s=200, c='r') ax.annotate(TF, xy=(TF_normal_z, 0.025), rotation=90, horizontalalignment='center', verticalalignment='bottom', fontsize=14)Breast Cancer# User preferences symbol = 'symbol' entrez = 'entrez' human = 'human' mouse = 'mouse' urb = Upstream.Upstream(gene_type = symbol, species = human) # transcription factors urb.easy_load_TF_list('../../TF_databases/TF_database_URA.csv') TF_list = urb.get('TF_list') TF_list = TF_list + ['TNF', 'IFNG', 'LBP'] # known regulators of interest missing from our TF databases urb.set('TF_list', TF_list) # background network filename = "../../background_networks/9606.protein.actions.v10.5.txt" confidence_filter = 400 urb.load_STRING_to_digraph(filename, confidence_filter); # add DEG information to STRING background network filename_brca = '../../DEG_databases/geo2r_GSE11352_brca_48hours.txt' urb.create_DEG_list(filename_brca, p_value_filter = 0.05) urb.tf_zscore(bias_filter=0.25) top_values = urb.top_values(act=True, abs_value=True, top=5) top_values #iters = 1000 iters = 10 p_value_filter = 0.05 mega_list = create_mega_list(urb, filename_brca, iters, p_value_filter) # z-scores for each iteration, using a different shuffled DEG list each time mega_list.loc[top_values.index] for i in range(len(top_values)): TF = mega_list.loc[top_values.index].index[i] TF_normal_z = top_values["z-score"][TF] plt.figure(figsize=(10, 5)) plt.xlim((-10,10)) # adjust the max leaving min unchanged ax = sns.distplot(list(mega_list.loc[top_values.index[i]]), kde=True) plt.scatter([TF_normal_z], [0], marker='^', s=200, c='r') ax.annotate(TF, xy=(TF_normal_z, 0.025), rotation=90, horizontalalignment='center', verticalalignment='bottom', fontsize=14) plt.savefig('brca_'+ str(TF)+'.png') plt.savefig('brca_'+ str(TF)+'.pdf')Models of Higher Brain Functions - Computer Practical 'Learning Dynamics in Deep Linear Networks' - November 2019 - TU Berlin Skeleton for Exercise Two: Deeper (Non-)Linear Networks with AutoDiff Install PyTorch, for example with conda:```conda install pytorch```or with pip:```pip install torch```# Import the required packages import numpy as np from numpy.linalg import svd as svd import torch import torch.nn as nn import torch.optim as optim from collections import OrderedDict np.random.seed(1)A. Branching Diffusion Process for Data GenerationThe `DiffuseTreeSampler`Class implements a hierarchical data-generation process which we use throughout this exercise (**no need to implement anything from scratch**). A target is generated for all datapoints at a time in a tree-based sequential fashion. A first +/-1 coin flip determines the initial sign of the node at the top of the tree. Afterwards, the tree branches and changes the sign at each stage with a small probability. We repeat this branching process for each layer of the tree. The bottom of the tree corresponds to the value of one feature across the different datapoints. In order to generate multiple target dimensions, the process is repeated for each dimension independently. Finally, we stack them into the overall targets. The input again corresponds to the identity matrix. For more information you can have a look here: https://arxiv.org/pdf/1810.10531.pdf (p. 14).class DiffuseTreeSampler(): def __init__(self, target_dim, tree_depth, branching_factor, sample_epsilon): self.target_dim = target_dim self.num_examples = branching_factor**tree_depth self.tree_depth = tree_depth self.branching_factor = branching_factor self.sample_epsilon = sample_epsilon def sample_target(self): samples_per_tree_layer = [self.branching_factor**i for i in range(1, self.tree_depth+1)] target_tree = [np.random.choice([-1, 1], p=[0.5, 0.5], size=1)] for l in range(self.tree_depth): switch = np.random.choice([-1, 1], p=[self.sample_epsilon, 1-self.sample_epsilon], size=samples_per_tree_layer[l]) next_layer = np.repeat(target_tree[-1], self.branching_factor) target_tree.append(next_layer*switch) return target_tree[-1] def sample_data(self): """ Each target dimension diffuses independently of the others! """ targets = [] for tar in range(self.target_dim): target_temp = self.sample_target() targets.append(target_temp) targets_out = np.array(targets).T features_out = np.diag(np.ones(self.num_examples)) return targets_out, features_out # Create the dataset by instantiating and sampling hierarchical_tree = DiffuseTreeSampler(target_dim=100, tree_depth=3, branching_factor=4, sample_epsilon=0.5) targets, features = hierarchical_tree.sample_data() print("Output Dimensions: {}".format(targets.shape)) print("Input Dimensions: {}".format(features.shape)) # Calculate the SVD of the covariance matrix SIGMA_YX = targets.T @ features U, s, V = svd(SIGMA_YX, full_matrices=True)B. PyTorch Example - Feedforward Networks* Define Neural Network Architecture (single hidden layer ReLU activation)* Create a Network Instance, Optimizer (stochastic gradient descent) & MSE (mean squared error) Loss* Perform a forward pass to get predictions, calculate the loss* Reset the gradients to 0 and perform a backward pass to calculate the gradients + (SGD) update# Example Feedforward PyTorch Network class DeepNet(nn.Module): """ Deep Network in PyTorch - Single Hidden Layer with ReLU activation Inputs: Input Array Dimensions, Output Array Dimensions """ def __init__(self, input_dim, output_dim): super(DeepNet, self).__init__() # Define a dictionary that collects the different layers # Afterwards, this dictionary provides the input to the nn.Sequential model layers = OrderedDict() layers["in_hidden"] = nn.Linear(input_dim, 64, bias=False) layers["in_hidden-activation"] = nn.ReLU() layers["hidden_out"] = nn.Linear(64, output_dim, bias=False) self.model = nn.Sequential(layers) def forward(self, input_array): # Propagate the input through the linear network return self.model(input_array.float()) # Create the network instance, define the learning rate, optimizer & loss input_dim, output_dim = features.shape[1], targets.shape[1] l_rate = 0.5 relu_net = DeepNet(input_dim, output_dim) relu_optimizer = optim.SGD(relu_net.parameters(), lr=l_rate) mse_loss = nn.MSELoss() print("The Network Architecture") print(relu_net) # Perform a forward pass through the network and calculate the loss input_tensor = torch.tensor(features[0]) y_true = torch.tensor(targets[0]).float() y_hat = relu_net(input_tensor) loss = mse_loss(y_hat, y_true) print("The MSE Loss for the 1st datapoint:{:.3f}".format(loss)) # Perform a backward pass and update the weights using the SGD optimizer relu_net.zero_grad() print("Gradient Sum Input-Hidden Weights after reset: {}".format(relu_net.model.in_hidden.weight.grad)) loss.backward() print("Gradient Sum Input-Hidden Weights after backward pass: {}".format(relu_net.model.in_hidden.weight.grad.sum())) relu_optimizer.step()Gradient Sum Input-Hidden Weights after reset: None Gradient Sum Input-Hidden Weights after backward pass: -0.096031248569488531. Code a Variable Depth Linear Network# TODO: Generalize to variable depth class DeepLinearNet(nn.Module): """ Deep Network in PyTorch - Single Hidden Layer with ReLU activation Inputs: Input Array Dimensions, Output Array Dimensions """ def __init__(self, input_dim=64, output_dim=100, hidden_units=[64]): super(DeepLinearNet, self).__init__() # Define a dictionary that collects the different layers # Afterwards, this dictionary provides the input to the nn.Sequential model layers = OrderedDict() layers["in-hidden"] = nn.Linear(input_dim, hidden_units[0], bias=False) # TODO: Loop over the hidden layers in the middle to define the full network architecture! layers["hidden-out"] = nn.Linear(hidden_units[-1], output_dim, bias=False) self.model = nn.Sequential(layers) def forward(self, input_array): # Propagate the input through the linear network return self.model(input_array.float())2. Code the Learning Loop for the Linear Network# TODO: Define the Online Gradient Descent Training Loop def linear_net_learning(deep_net, mse_loss, optimizer, num_epochs, features, targets): """ Inputs: 'deep_net' - Instantiated PyTorch Network 'mse_loss' - Loss Criterion, i.e. Mean Squared Error 'optimizer' - PyTorch Optimizer Object, i.e. Stochastic Gradient Descent 'num_epochs' - Number of Training Loops over the entire dataset 'features' - Training features generated from the sampler 'targets' - Training targets generated from the sampler Function: Runs the learning loop for the linear network """ loss_log = [] log_singular_vals = [] num_points = targets.shape[0] for epoch in range(num_epochs): epoch_loss = 0 # TODO: Reshuffle the order of the dataset # Loop over all examples in an Online SGD Loop for t in range(num_points): # Extract the current training datapoint and transform it into a Torch Tensor input_tensor = torch.tensor(train_items[t]) y_true = torch.tensor(train_features[t]).float() # TODO: Compute the prediction for the single datapoint y^hat & the corresponding loss # TODO: Clear the gradients, Perform the backward pass, and SGD update # Update the epoch loss tracker epoch_loss += loss.item() # Log the mean epoch loss & calculate the SVD loss_log.append(epoch_loss/num_points) y_hat_full = deep_net(torch.tensor(features)).detach().numpy() U, s, V = svd(y_hat_full.T, full_matrices=True) log_singular_vals.append(s) return loss_log, np.array(log_singular_vals)3. Run the Loop & Plot the singular values# TODO: Instantiate the network, define the loss & optimizer and run the learning loop # TODO: Plot the singular values4. Add ReLU Non-Linearities to the Architecture & Repeat# TODO: Define a new network class that adds a ReLU activation function after each layer. # TODO: Instantiate the network, define the loss & optimizer and run the learning loop # TODO: Plot the singular valuesClassifying the Complete Dataset* The tuned logistic regression baseline classifier will be used in this notebook to score tweet sentiment.* VADER compund scores will also generated and compared to the baseline model.* By comparing sentiment scores of the covid and non covid DataFrames, we will begin to assess the impact the covid has on tweet sentiment.import sys sys.path.insert(0, '/Users/lclark/data_bootcamp/data-science-final-project/scripts/') # Import custom functions from functions import * pd.set_option('display.max_colwidth', None)Importing Filtered Tweets# Loading filtered tweets from pickle file df_full = pd.read_pickle('~/data_bootcamp/data-science-final-project/data/df_filtered_tweets_master.pkl') # All the files below are a subset of df_filtered_tweets_master #df_no_retweets = pd.read_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets.pkl') #df_no_rt_covid = pd.read_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets_covid_mention.pkl') #df_no_rt_no_covid = pd.read_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets_no_covid.pkl')Load Modellr_model = pickle.load(open('/Users/lclark/data_bootcamp/data-science-final-project/models/LogReg_GridCV_3C_87p_40kfeats.sav', 'rb')) lr_model.best_params_Classifying Tweets Logisitic Regression Classification* Given that the full dataset is roughly 25% original tweets versus retweets, analyzing the full dataset may provide us with an indication of whether people tend to retweet positive or negative tweets more frequentlydf_full['full_text_clean'] = df_full['full_clean'].apply(joiner) vectorizer = TfidfVectorizer(use_idf=True, lowercase=True, ngram_range=(1,2), max_features=40000) X = vectorizer.fit_transform(df_full.full_text_clean) df_full['lr_labels'] = lr_model.predict(X) df_full.info() Int64Index: 420634 entries, 1294232573636304896 to 1333143090723319808 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 created_at 420634 non-null datetime64[ns, UTC] 1 full_text 420634 non-null object 2 vader_text 420634 non-null object 3 no_hashtags 420634 non-null object 4 full_clean 420634 non-null object 5 covid_mention 420634 non-null int64 6 retweet_count 420634 non-null int64 7 user_name 420634 non-null object 8 is_retweet 420634 non-null int64 9 full_text_clean 420634 non-null object 10 lr_labels 420634 non-null int64 dtypes: datetime64[ns, UTC](1), int64(4), object(6) memory usage: 38.5+ MBVADER%%time # Analyze tweets, extract scores from dictionary result, drop dictionary result, categorize df_full['vader_text'] = df_full['full_text'].apply(vader_preprocess) df_full = vader_score_to_series(df_full) # Testing wider thresholds than default +-0.05 of 0 # df_full['vader_label_wider_neu'] = df_full['compound'].apply(lambda x: categorize(x, upper = 0.1,lower = -0.1)) df_full['vader_label_wider_neu'].value_counts().sort_index() df_full.compound.describe() df_full[(df_full['vader_label'] == 4)][['created_at','vader_text','lr_labels','compound','vader_label']].sample(n=10)Comparing Logisitic Regression Classification with VADER# Logisitic Regression Value Counts df_full['lr_labels'].value_counts().sort_index() # VADER Value Counts with extracted full_text from retweet_status df_full.vader_label.value_counts().sort_index()VADER Value Counts before extracting the full_text from the retweet_statusIf a tweet is a retweet, it will be truncated in the full_text column. You need to extract the full_text from the dictionary in retweet_status.Note: This comparison had a different number of tweets (more tweets in more recent tests), though the positive tweet count is less. This gives us some indiction that negative sentiment is more strongly dictated by the end of a tweet than the beginning.0 - 1068592 - 1045464 - 175328# Create distributable labelled bcpoli dataset. #df_full_distribute = df_full[['covid_mention','neg','neu','pos','compound','vader_label']].reset_index() #df_full_distribute.to_pickle('/Users/lclark/data_bootcamp/data-science-final-project/data/bcpoli_vader_labelled_tweets.sav') # Export labelled df_full #df_full.to_pickle('/Users/lclark/data_bootcamp/data-science-final-project/data/bcpoli_labelled_tweets.pkl')**Check Status Of Services And Store Into Csv File**%%bash systemctl --type=service | sed -n '/service/p' | \ sed -rn 's/(^.*)loaded.*(running|exited|failed)(.*)/\1, \2/p'| \ sort -rk3 > ~/Work/status.csv %%python3 from subprocess import check_output from re import search import csv cmd = 'systemctl --type=service' services = check_output(cmd.split()).decode().splitlines()[1:] status = [] for i in services: match = search(r'(^.*)loaded.*(running|exited|failed)',i) if match: status.append(list(match.groups())) status = sorted(status, key = lambda x: x[1], reverse = True) with open('/home/mana/Work/status.csv','w') as file: writer = csv.writer(file) writer.writerows(status) !cat /home/mana/Work/status.csvaccounts-daemon.service ,running acpid.service ,running avahi-daemon.service ,running clean-mount-point@media-mana-DATA1.service ,running colord.service ,running containerd.service ,running cron.service ,running cups-browsed.service ,running cups.service ,running dbus.service ,running docker.service ,running getty@tty1.service ,running irqbalance.service ,running kerneloops.service ,running lightdm.service ,running ModemManager.service ,running networkd-dispatcher.service ,running NetworkManager.service ,ru[...]Classification [Run in Google Colab](https://colab.research.google.com/drive/1ANQUix9Y6V4RXu-vAaCFGmU979d5m4bO?usp=sharing) [View on GitHub](https://github.com/adapt-python/notebooks/blob/d0364973c642ea4880756cef4e9f2ee8bb5e8495/Classification.ipynb) You will find here the application of DA methods from the ADAPT package on a simple two dimensional DA classification problem.First we import packages needed in the following. We will use ``matplotlib Animation`` tools in order toget a visual understanding of the mselected methods:import numpy as np import matplotlib.pyplot as plt import matplotlib import matplotlib.animation as animation from sklearn.metrics import accuracy_score from matplotlib import rc rc('animation', html='jshtml')Experimental Setup We now set the synthetic classification DA problem using the [make_classification_da](https://adapt-python.github.io/adapt/generated/adapt.utils.make_classification_da.html) function from ``adapt.utils``.from adapt.utils import make_classification_da Xs, ys, Xt, yt = make_classification_da() x_grid, y_grid = np.meshgrid(np.linspace(-0.1, 1.1, 100), np.linspace(-0.1, 1.1, 100)) X_grid = np.stack([x_grid.ravel(), y_grid.ravel()], -1)We define here ``show`` function which we will use in the following to visualize the algorithms performanceson the toy problem.def show(ax, yp_grid=None, yp_t=None, x_grid=x_grid, y_grid=y_grid, Xs=Xs, Xt=Xt, weights_src=50*np.ones(100), disc_grid=None): cm = matplotlib.colors.ListedColormap(['w', 'r', 'w']) # ax = plt.gca() if yp_grid is not None: ax.contourf(x_grid, y_grid, yp_grid, cmap=cm, alpha=1.) ax.plot([Xs[0, 0]], [Xs[0, 1]], c="red", label="class separation") if disc_grid is not None: cm_disc = matplotlib.colors.ListedColormap([(1,1,1,0), 'g', (1,1,1,0)]) ax.contourf(x_grid, y_grid, disc_grid, cmap=cm_disc, alpha=0.5) ax.plot([Xs[0, 0]], [Xs[0, 1]], c="green", label="disc separation") if yp_t is not None: score = accuracy_score(yt.ravel(), yp_t.ravel()) score = " - Acc=%.2f"%score else: score = "" ax.scatter(Xs[ys==0, 0], Xs[ys==0, 1], label="source", edgecolors='k', c="C0", s=weights_src[ys==0], marker="o", alpha=0.9) ax.scatter(Xs[ys==1, 0], Xs[ys==1, 1], edgecolors='k', c="C0", s=2*weights_src[ys==1], marker="*", alpha=0.9) ax.scatter(Xt[yt==0, 0], Xt[yt==0, 1], label="target"+score, edgecolors='k', c="C1", s=50, marker="o", alpha=0.9) ax.scatter(Xt[yt==1, 0], Xt[yt==1, 1], edgecolors='k', c="C1", s=100, marker="*", alpha=0.9) ax.legend(fontsize=14, loc="upper left") ax.set_xlabel("X0", fontsize=16) ax.set_ylabel("X1", fontsize=16) fig, ax = plt.subplots(1, 1, figsize=(8, 6)) show(ax) plt.show()As we can see in the figure above (plotting the two dimensions of the input data),source and target data define two distinct domains. We have modeled here a classical unsupervised DA issue where the goal is to build a good model on orange data knowing only the labels ("o" or "*" given by ``y``) of the bluepoints.We now define the base model used to learn the task. We use here a neural network with two hidden layer.We also define a ``SavePrediction`` callback in order to save the prediction of the neural network ateach epoch.import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import Input, Dense, Reshape from tensorflow.keras.optimizers import Adam def get_model(input_shape=(2,)): model = Sequential() model.add(Dense(100, activation='elu', input_shape=input_shape)) model.add(Dense(100, activation='relu')) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=Adam(0.01), loss='binary_crossentropy') return model from tensorflow.keras.callbacks import Callback class SavePrediction(Callback): """ Callbacks which stores predicted labels in history at each epoch. """ def __init__(self, X_grid_=X_grid, Xt_=Xt): self.X_grid = X_grid_ self.Xt = Xt_ self.custom_history_grid_ = [] self.custom_history_ = [] super().__init__() def on_epoch_end(self, batch, logs={}): """Applied at the end of each epoch""" predictions = self.model.predict_on_batch(self.X_grid).reshape(100, 100) self.custom_history_grid_.append(predictions) predictions = self.model.predict_on_batch(self.Xt).ravel() self.custom_history_.append(predictions)Src Only First, let's fit a network on source data without any adaptation. As we can observe,the "o" labels from the target domain are missclassified.Because of the "*" blue points close to the "o" domain, the network learns a classborder not regularized enough and then misclassifies the target "*" data.np.random.seed(0) tf.random.set_seed(0) model = get_model() save_preds = SavePrediction() model.fit(Xs, ys, callbacks=[save_preds], epochs=100, batch_size=100, verbose=0); def animate(i): ax.clear() yp_grid = (save_preds.custom_history_grid_[i]>0.5).astype(int) yp_t = save_preds.custom_history_[i]>0.5 show(ax, yp_grid, yp_t) fig, ax = plt.subplots(1, 1, figsize=(8, 6)); ani = animation.FuncAnimation(fig, animate, frames=100, blit=False, repeat=True) ani![src_only](../images/srcOnlyCla.gif) mSDA Let's now consider the domain adaptation method [mSDA](https://adapt-python.github.io/adapt/generated/adapt.feature_based.mSDA.html). This "two-stage" method first perfroms a feature encoding on source data and then fitsan estimator using the new feature space.The encoded features are learned with a stacked denoising autoencoder. Here we choose to reducethe feature space to one feature with the encoder.def get_encoder(): model = Sequential() model.add(Dense(100, activation='elu', input_shape=(2,))) model.add(Dense(1, activation=None)) model.compile(optimizer=Adam(0.01), loss='mse') return model def get_decoder(): model = Sequential() model.add(Dense(100, activation='elu', input_shape=(1,))) model.add(Dense(2, activation="sigmoid")) model.compile(optimizer=Adam(0.01), loss='mse') return model from adapt.feature_based import mSDA model = mSDA(get_encoder(), get_decoder(), get_model((1,)), noise_lvl=0.1, random_state=0) model.fit_embeddings(Xs, Xt, epochs=100, batch_size=200, verbose=0); save_preds = SavePrediction(model.predict_features(X_grid), model.predict_features(Xt)) model.fit_estimator(model.predict_features(Xs), ys, callbacks=[save_preds], epochs=100, batch_size=100, verbose=0); np.random.seed(0) noise = np.random.randn(100, 1) * 0.1 noise_grid = np.random.randn(len(X_grid), 1)* 0.1 Xs_enc = model.predict_features(Xs) Xs_enc = np.concatenate((noise, Xs_enc), 1) Xt_enc = model.predict_features(Xt) Xt_enc = np.concatenate((noise, Xt_enc), 1) X_grid_enc = model.predict_features(X_grid) X_grid_enc = np.concatenate((noise_grid, X_grid_enc), 1) x_grid_enc = X_grid_enc[:, 0].reshape(100,100) y_grid_enc = X_grid_enc[:, 1].reshape(100,100) def animate_msda(i): yp_grid = (save_preds.custom_history_grid_[i]>0.5).astype(int) yp_t = save_preds.custom_history_[i]>0.5 ax1.clear() ax2.clear() ax1.set_title("Input Space", fontsize=16) show(ax1, yp_grid, yp_t) ax2.set_title("Encoded Space", fontsize=16) show(ax2, yp_grid, yp_t, x_grid=x_grid_enc, y_grid=y_grid_enc, Xs=Xs_enc, Xt=Xt_enc) ax2.set_xlabel("U0", fontsize=16) ax2.set_ylabel("U1", fontsize=16) fig, (ax1 , ax2) = plt.subplots(1, 2, figsize=(16, 6)) ani = animation.FuncAnimation(fig, animate_msda, frames=100, blit=False, repeat=True) ani![msda](../images/msda.gif) We plot on the left, the evolution of the delimiting line through epochs. On the rightwe represent the one dimensional encoded space (on the y axis), we give random x coordinateto the inputs in order to get a better visualization.As we can see, on the encoded feature space blue and orange "*" labels go on one side and "o" on the other. So when fitting the classifier on the encoded space using blue data, the network learns agood delimitation line for both domains. Thus [mSDA](https://adapt-python.github.io/adapt/generated/adapt.feature_based.mSDA.html)perfroms an efficient adaptation between domains for this toy DA issue. DANN We now consider the [DANN](https://adapt-python.github.io/adapt/generated/adapt.feature_based.DANN.html) method.This method consists in learning a new feature representation on which no ``discriminator`` network can be able to classify between source and target data.This is done with adversarial techniques following the principle of GANs.def get_encoder(input_shape=(2,)): model = Sequential() model.add(Dense(100, activation='elu', input_shape=input_shape)) model.add(Dense(2, activation="sigmoid")) model.compile(optimizer=Adam(0.01), loss='mse') return model def get_task(input_shape=(2,)): model = Sequential() model.add(Dense(10, activation='elu')) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=Adam(0.01), loss='mse') return model def get_discriminator(input_shape=(2,)): model = Sequential() model.add(Dense(10, activation='elu')) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=Adam(0.01), loss='mse') return model from tensorflow.keras.callbacks import Callback class SavePredictionDann(Callback): """ Callbacks which stores predicted labels in history at each epoch. """ def __init__(self, X_grid_=X_grid, Xt_=Xt, Xs_=Xs): self.X_grid = X_grid_ self.Xt = Xt_ self.Xs = Xs_ self.custom_history_grid_ = [] self.custom_history_ = [] self.custom_history_enc_s = [] self.custom_history_enc_t = [] self.custom_history_enc_grid = [] self.custom_history_disc = [] super().__init__() def on_epoch_end(self, batch, logs={}): """Applied at the end of each epoch""" predictions = model.task_.predict_on_batch( model.encoder_.predict_on_batch(self.X_grid)).reshape(100, 100) self.custom_history_grid_.append(predictions) predictions = model.task_.predict_on_batch( model.encoder_.predict_on_batch(self.Xt)).ravel() self.custom_history_.append(predictions) predictions = model.encoder_.predict_on_batch(self.Xs) self.custom_history_enc_s.append(predictions) predictions = model.encoder_.predict_on_batch(self.Xt) self.custom_history_enc_t.append(predictions) predictions = model.encoder_.predict_on_batch(self.X_grid) self.custom_history_enc_grid.append(predictions) predictions = model.discriminator_.predict_on_batch( model.encoder_.predict_on_batch(self.X_grid)).reshape(100, 100) self.custom_history_disc.append(predictions) from adapt.feature_based import DANN save_preds = SavePredictionDann() model = DANN(get_encoder(), get_task(), get_discriminator(), lambda_=1.0, optimizer=Adam(0.001), random_state=0) model.fit(Xs, ys, Xt, callbacks=[save_preds], epochs=300, batch_size=100, verbose=0); enc_s = np.concatenate(save_preds.custom_history_enc_s) enc_t = np.concatenate(save_preds.custom_history_enc_t) enc = np.concatenate((enc_s, enc_t)) x_min, y_min = enc.min(0) x_max, y_max = enc.max(0) x_min, y_min = (0., 0.) x_max, y_max = (1., 1.) def animate_dann(i): i *= 2 yp_grid = (save_preds.custom_history_grid_[i]>0.5).astype(int) yp_t = save_preds.custom_history_[i]>0.5 ax1.clear() ax2.clear() ax1.set_title("Input Space", fontsize=16) show(ax1, yp_grid, yp_t) ax2.set_title("Encoded Space", fontsize=16) Xs_enc = save_preds.custom_history_enc_s[i] Xt_enc = save_preds.custom_history_enc_t[i] X_grid_enc = save_preds.custom_history_enc_grid[i] x_grid_enc = X_grid_enc[:, 0].reshape(100,100) y_grid_enc = X_grid_enc[:, 1].reshape(100,100) disc_grid = (save_preds.custom_history_disc[i]>0.5).astype(int) show(ax2, yp_grid, yp_t, x_grid=x_grid_enc, y_grid=y_grid_enc, Xs=Xs_enc, Xt=Xt_enc, disc_grid=disc_grid) ax2.set_xlabel("U0", fontsize=16) ax2.set_ylabel("U1", fontsize=16) ax2.set_xlim(x_min, x_max) ax2.set_ylim(y_min, y_max) fig, (ax1 , ax2) = plt.subplots(1, 2, figsize=(16, 6)) ani = animation.FuncAnimation(fig, animate_dann, frames=150, blit=False, repeat=True) ani![dann](../images/dann.gif) As we can see on the figure above, when applying [DANN](https://adapt-python.github.io/adapt/generated/adapt.feature_based.DANN.html)algorithm, source data are projected on target data in the encoded space. Thus a ``task`` networktrained in parallel to the ``encoder`` and the ``discriminator`` is able to well classify "o" from "*" in the target domain. Instance Based Finally, we consider here the instance-based method [KMM](https://adapt-python.github.io/adapt/generated/adapt.instance_based.KMM.html).This method consists in reweighting source instances in order to minimize the MMD distance betweensource and target domain. Then the algorithm trains a classifier using the reweighted source data.from adapt.instance_based import KMM save_preds = SavePrediction() model = KMM(get_model(), kernel_params=dict(gamma=1), random_state=0) model.fit(Xs, ys, Xt, callbacks=[save_preds], epochs=100, batch_size=100, verbose=0); def animate_kmm(i): ax.clear() yp_grid = (save_preds.custom_history_grid_[i]>0.5).astype(int) yp_t = save_preds.custom_history_[i]>0.5 weights_src = model.predict_weights().ravel() * 50 show(ax, yp_grid, yp_t, weights_src=weights_src) fig, ax = plt.subplots(1, 1, figsize=(8, 6)) ani = animation.FuncAnimation(fig, animate_kmm, frames=100, blit=False, repeat=True) ani![title](img/cover4.png) Copyright! This material is protected, please do not copy or distribute. by: ***Udemy course : Python Bootcamp for Data Science 2021 Numpy Pandas & Seaborn *** 4.1 Tuples To create a simple tuple:tupl = (2,4,6,8)To display the tuple named 'tupl'tuplTo check the type of the variable 'tupl'type(tupl)To create a nested tuple:nest_tub = (1,2,3,('yes', 'no'), 6)To display the nested tuple:nest_tubTo check immutability, lets create this example tuple:tub = (1,2,3,4,5)To display the second element:tub[1]If you try to change the value of the second element from 2 to 9, we get an error:tub[1] =9To join two tuples, we use the plus sign:tup1 = (1,2,3) tup2 = (4,5,6) tub3 = tup1+tup2 tub3Multiplying a tuple will concatenate mu;tiple copies of that tuple:tub3 *4Tupples unpacking:tub1=(11,22,33) a,b,c = tub1To display the unpacked elements:a b cLets create this example tupletup1 = (1,1,2,2,3,3,3,3,4,5,5,6)Using th function count() returns how many elements:tup1.count(3)from google.colab import drive drive.mount('/content/gdrive') !pip install lightkurve import lightkurve as lk import pandas as pd import matplotlib.pyplot as plt import numpy as np import astropy.units as u from astropy.timeseries import BoxLeastSquares np.random.seed(42) t = np.random.uniform(0, 20, 2000) y = np.ones_like(t) - 0.1*((t%3)<0.2) + 0.01*np.random.randn(len(t)) model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2) plt.plot(periodogram.period, periodogram.power) model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2, objective="snr") model = BoxLeastSquares(t * u.day, y, dy=0.01) periods = np.linspace(2.5, 3.5, 1000) * u.day periodogram = model.power(periods, 0.2) plt.plot(periodogram.period, periodogram.power) model = BoxLeastSquares(t * u.day, y, dy=0.01) periods = np.linspace(0.5, 10.5, 15) * u.day periodogram = model.power(periods, 0.2) plt.plot(periodogram.period, periodogram.power) model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2) max_power = np.argmax(periodogram.power) stats = model.compute_stats(periodogram.period[max_power], periodogram.duration[max_power], periodogram.transit_time[max_power]) plt.plot(periodogram.period, periodogram.power) from sklearn import preprocessing import numpy as np a = np.random.random((1, 4)) a = a*20 print("Data = ", a) # normalize the data attributes normalized = preprocessing.normalize(a) print("Normalized Data = ", normalized) import lightkurve as lk lc = lk.LightCurve(time=[1, 2, 3, 4], flux=[0.97, 1.01, 1.03, 0.99]) lc.time #array([1, 2, 3, 4]) lc.flux #array([0.97, 1.01, 1.03, 0.99]) lc.bin(binsize=2).flux #array([0.99, 1.01])Question Generator example First we need to install HuggingFace's transformers library.!pip install transformersCollecting transformers [?25l Downloading https://files.pythonhosted.org/packages/27/3c/91ed8f5c4e7ef3227b4119200fc0ed4b4fd965b1f0172021c25701087825/transformers-3.0.2-py3-none-any.whl (769kB)  |████████████████████████████████| 778kB 2.8MB/s [?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers) (2019.12.20) Collecting sacremoses [?25l Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)  |████████████████████████████████| 890kB 16.2MB/s [?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers) (1.18.5) Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers) (20.4) Collecting tokenizers==0.8.1.rc1 [?25l Downloading https://files.pythonhosted.org/packages/40/d0/30d5f8d221a0ed981a186c8eb986ce1c94e3a6e87f994eae9f4aa[...]Next we have to clone the github repo and import `questiongenerator`:!git clone https://github.com/amontgomerie/question_generator/ !pip install spacy # %load questiongenerator.py import os import sys import math import numpy as np import torch import spacy import re import random import json import en_core_web_sm from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, ) class QuestionGenerator: def __init__(self, model_dir=None): QG_PRETRAINED = "iarfmoose/t5-base-question-generator" self.ANSWER_TOKEN = "" self.CONTEXT_TOKEN = "" self.SEQ_LENGTH = 512 self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.qg_tokenizer = AutoTokenizer.from_pretrained(QG_PRETRAINED, use_fast=False) self.qg_model = AutoModelForSeq2SeqLM.from_pretrained(QG_PRETRAINED) self.qg_model.to(self.device) self.qa_evaluator = QAEvaluator(model_dir) def generate( self, article, use_evaluator=True, num_questions=None, answer_style="all" ): print("Generating questions...\n") qg_inputs, qg_answers = self.generate_qg_inputs(article, answer_style) generated_questions = self.generate_questions_from_inputs(qg_inputs) message = "{} questions doesn't match {} answers".format( len(generated_questions), len(qg_answers) ) assert len(generated_questions) == len(qg_answers), message if use_evaluator: print("Evaluating QA pairs...\n") encoded_qa_pairs = self.qa_evaluator.encode_qa_pairs( generated_questions, qg_answers ) scores = self.qa_evaluator.get_scores(encoded_qa_pairs) if num_questions: qa_list = self._get_ranked_qa_pairs( generated_questions, qg_answers, scores, num_questions ) else: qa_list = self._get_ranked_qa_pairs( generated_questions, qg_answers, scores ) else: print("Skipping evaluation step.\n") qa_list = self._get_all_qa_pairs(generated_questions, qg_answers) return qa_list def generate_qg_inputs(self, text, answer_style): VALID_ANSWER_STYLES = ["all", "sentences", "multiple_choice"] if answer_style not in VALID_ANSWER_STYLES: raise ValueError( "Invalid answer style {}. Please choose from {}".format( answer_style, VALID_ANSWER_STYLES ) ) inputs = [] answers = [] if answer_style == "sentences" or answer_style == "all": segments = self._split_into_segments(text) for segment in segments: sentences = self._split_text(segment) prepped_inputs, prepped_answers = self._prepare_qg_inputs( sentences, segment ) inputs.extend(prepped_inputs) answers.extend(prepped_answers) if answer_style == "multiple_choice" or answer_style == "all": sentences = self._split_text(text) prepped_inputs, prepped_answers = self._prepare_qg_inputs_MC(sentences) inputs.extend(prepped_inputs) answers.extend(prepped_answers) return inputs, answers def generate_questions_from_inputs(self, qg_inputs): generated_questions = [] for qg_input in qg_inputs: question = self._generate_question(qg_input) generated_questions.append(question) return generated_questions def _split_text(self, text): MAX_SENTENCE_LEN = 128 sentences = re.findall(".*?[.!\?]", text) cut_sentences = [] for sentence in sentences: if len(sentence) > MAX_SENTENCE_LEN: cut_sentences.extend(re.split("[,;:)]", sentence)) # temporary solution to remove useless post-quote sentence fragments cut_sentences = [s for s in sentences if len(s.split(" ")) > 5] sentences = sentences + cut_sentences return list(set([s.strip(" ") for s in sentences])) def _split_into_segments(self, text): MAX_TOKENS = 490 paragraphs = text.split("\n") tokenized_paragraphs = [ self.qg_tokenizer(p)["input_ids"] for p in paragraphs if len(p) > 0 ] segments = [] while len(tokenized_paragraphs) > 0: segment = [] while len(segment) < MAX_TOKENS and len(tokenized_paragraphs) > 0: paragraph = tokenized_paragraphs.pop(0) segment.extend(paragraph) segments.append(segment) return [self.qg_tokenizer.decode(s) for s in segments] def _prepare_qg_inputs(self, sentences, text): inputs = [] answers = [] for sentence in sentences: qg_input = "{} {} {} {}".format( self.ANSWER_TOKEN, sentence, self.CONTEXT_TOKEN, text ) inputs.append(qg_input) answers.append(sentence) return inputs, answers def _prepare_qg_inputs_MC(self, sentences): spacy_nlp = en_core_web_sm.load() docs = list(spacy_nlp.pipe(sentences, disable=["parser"])) inputs_from_text = [] answers_from_text = [] for i in range(len(sentences)): entities = docs[i].ents if entities: for entity in entities: qg_input = "{} {} {} {}".format( self.ANSWER_TOKEN, entity, self.CONTEXT_TOKEN, sentences[i] ) answers = self._get_MC_answers(entity, docs) inputs_from_text.append(qg_input) answers_from_text.append(answers) return inputs_from_text, answers_from_text def _get_MC_answers(self, correct_answer, docs): entities = [] for doc in docs: entities.extend([{"text": e.text, "label_": e.label_} for e in doc.ents]) # remove duplicate elements entities_json = [json.dumps(kv) for kv in entities] pool = set(entities_json) num_choices = ( min(4, len(pool)) - 1 ) # -1 because we already have the correct answer # add the correct answer final_choices = [] correct_label = correct_answer.label_ final_choices.append({"answer": correct_answer.text, "correct": True}) pool.remove( json.dumps({"text": correct_answer.text, "label_": correct_answer.label_}) ) # find answers with the same NER label matches = [e for e in pool if correct_label in e] # if we don't have enough then add some other random answers if len(matches) < num_choices: choices = matches pool = pool.difference(set(choices)) choices.extend(random.sample(pool, num_choices - len(choices))) else: choices = random.sample(matches, num_choices) choices = [json.loads(s) for s in choices] for choice in choices: final_choices.append({"answer": choice["text"], "correct": False}) random.shuffle(final_choices) return final_choices def _generate_question(self, qg_input): self.qg_model.eval() encoded_input = self._encode_qg_input(qg_input) with torch.no_grad(): output = self.qg_model.generate(input_ids=encoded_input["input_ids"]) question = self.qg_tokenizer.decode(output[0], skip_special_tokens=True) return question def _encode_qg_input(self, qg_input): return self.qg_tokenizer( qg_input, padding='max_length', max_length=self.SEQ_LENGTH, truncation=True, return_tensors="pt", ).to(self.device) def _get_ranked_qa_pairs( self, generated_questions, qg_answers, scores, num_questions=10 ): if num_questions > len(scores): num_questions = len(scores) print( "\nWas only able to generate {} questions. For more questions, please input a longer text.".format( num_questions ) ) qa_list = [] for i in range(num_questions): index = scores[i] qa = self._make_dict( generated_questions[index].split("?")[0] + "?", qg_answers[index] ) qa_list.append(qa) return qa_list def _get_all_qa_pairs(self, generated_questions, qg_answers): qa_list = [] for i in range(len(generated_questions)): qa = self._make_dict( generated_questions[i].split("?")[0] + "?", qg_answers[i] ) qa_list.append(qa) return qa_list def _make_dict(self, question, answer): qa = {} qa["question"] = question qa["answer"] = answer return qa class QAEvaluator: def __init__(self, model_dir=None): QAE_PRETRAINED = "iarfmoose/bert-base-cased-qa-evaluator" self.SEQ_LENGTH = 512 self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.qae_tokenizer = AutoTokenizer.from_pretrained(QAE_PRETRAINED) self.qae_model = AutoModelForSequenceClassification.from_pretrained( QAE_PRETRAINED ) self.qae_model.to(self.device) def encode_qa_pairs(self, questions, answers): encoded_pairs = [] for i in range(len(questions)): encoded_qa = self._encode_qa(questions[i], answers[i]) encoded_pairs.append(encoded_qa.to(self.device)) return encoded_pairs def get_scores(self, encoded_qa_pairs): scores = {} self.qae_model.eval() with torch.no_grad(): for i in range(len(encoded_qa_pairs)): scores[i] = self._evaluate_qa(encoded_qa_pairs[i]) return [ k for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True) ] def _encode_qa(self, question, answer): if type(answer) is list: for a in answer: if a["correct"]: correct_answer = a["answer"] else: correct_answer = answer return self.qae_tokenizer( text=question, text_pair=correct_answer, padding="max_length", max_length=self.SEQ_LENGTH, truncation=True, return_tensors="pt", ) def _evaluate_qa(self, encoded_qa_pair): output = self.qae_model(**encoded_qa_pair) return output[0][0][1] def print_qa(qa_list, show_answers=True): for i in range(len(qa_list)): space = " " * int(np.where(i < 9, 3, 4)) # wider space for 2 digit q nums print("{}) Q: {}".format(i + 1, qa_list[i]["question"])) answer = qa_list[i]["answer"] # print a list of multiple choice answers if type(answer) is list: if show_answers: print( "{}A: 1.".format(space), answer[0]["answer"], np.where(answer[0]["correct"], "(correct)", ""), ) for j in range(1, len(answer)): print( "{}{}.".format(space + " ", j + 1), answer[j]["answer"], np.where(answer[j]["correct"] == True, "(correct)", ""), ) else: print("{}A: 1.".format(space), answer[0]["answer"]) for j in range(1, len(answer)): print("{}{}.".format(space + " ", j + 1), answer[j]["answer"]) print("") # print full sentence answers else: if show_answers: print("{}A:".format(space), answer, "\n")Make sure that we're using the GPU:import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') assert device == torch.device('cuda'), "Not using CUDA. Set: Runtime > Change runtime type > Hardware Accelerator: GPU"Now we can create a `QuestionGenerator` and feed it some text. We are going to use a BBC article about Twitter getting hacked.The models should be automatically loaded when instantiating the `QuestionGenerator` class, but if you have them saved somewhere else you can pass the path to the folder containing them as an argument like `QuestionGenerator(MODEL_DIR)`.qg = QuestionGenerator() with open('articles/owl_rescue.txt', 'r') as a: article = a.read()Now We can call `QuestionGenerator`'s `generate()` method. We can choose an answer style from `['all', 'sentences', 'multiple_choice']`. You can choose how many questions you want to generate by setting `num_questions`. Note that the quality of questions may decrease if `num_questions` is high.If you just want to print the questions without showing the answers, you can optionally set `show_answers=False` when calling `print_qa()`.qa_list = qg.generate( article, num_questions=1, answer_style='sentences' ) print_qa(qa_list)Generating questions... Evaluating QA pairs... 1) Q: How many people were involved in the rescue? A:
The eagle owl rescue involved a team of 12 firefighters, plus a six-member volunteer technical team and two staff from the nearby bat centre, the fire service reported (in German).Consumption analysis Author : (https://github.com/NicolasLacroix)Licence : [Apache License 2.0]Data provided by [DataSud]Source file link (csv) : https://trouver.datasud.fr/dataset/8bfa93b0-ac2f-4148-b550-0ec5c917bb28/resource/52a8f5dd-758d-4e54-a837-8fc7ad57d378/download/eco2mix-regional-tr.csv[DataSud]: https://www.datasud.fr/[Apache License 2.0]: https://github.com/NicolasLacroix/data-representation/blob/master/LICENSEimport pandas as pd import matplotlib.pyplot as plt import numpy as np import dateutil.parser from datetime import datetime, date data_link = 'https://trouver.datasud.fr/dataset/8bfa93b0-ac2f-4148-b550-0ec5c917bb28/resource/52a8f5dd-758d-4e54-a837-8fc7ad57d378/download/eco2mix-regional-tr.csv' data = pd.read_csv(data_link, delimiter=';', encoding='utf_8', parse_dates=True) data.head() # TODO: use parse_date=True in pd.read_csv method instead data['Date'] = pd.to_datetime(data['Date']) data['Heure'] = pd.to_datetime(data['Heure'], format='%H:%M', utc=True).dt.time data['Date - Heure'] = pd.to_datetime(data['Date - Heure'], format='%Y-%m-%dT%H:%M:%S', utc=True) data['Date - Heure'] = data['Date - Heure'].dt.tz_convert('Europe/Paris') data.dtypes volumeLabels = list(data.columns.values)[6:15] percentLabels = list(data.columns.values)[15:-1] # Sorting data by ['Date - Heure'] data = data.sort_values(by=['Date - Heure']) data.head() # Today values today = datetime.combine(date.today(), datetime.min.time()) dailyData = data.loc[data['Date'] == today][['Date - Heure']+volumeLabels] dailyData.plot(x='Date - Heure', title="Today values") plt.show() def getExtremums(data, key): dailyData = getDailyData(data, 'Date - Heure', volumeLabels) # get dataframes per day min_serie = data.loc[data[key] == min(data[key])]['Date'] # Date column of data's serie where data[key] is min min_df = dailyData[pd.to_datetime(min_serie.values[0]).strftime('%Y-%m-%d')] # cell's value (date) to string max_serie = data.loc[data[key] == max(data[key])]['Date'] # Date column of data's serie where data[key] is max max_df = dailyData[pd.to_datetime(max_serie.values[0]).strftime('%Y-%m-%d')] # cell's value (date) to string return min_df, max_df min_serie = data.min() max_serie = data.max() min_df = data.loc[data['Date'] == min_serie['Date']][['Date - Heure']+volumeLabels] max_df = data.loc[data['Date'] == max_serie['Date']][['Date - Heure']+volumeLabels] max_value = max_serie['Consommation (MW)'] min_value = min_serie['Consommation (MW)'] min_df.plot(x='Date - Heure', title="Minimum") max_df.plot(x='Date - Heure', title="Maximum") plt.show() avg_df = data.mean() avg_df # average values per day avg_day = data[['Date'] + volumeLabels].groupby(['Date']).agg(np.mean) avg_day.plot(title="average values per day") plt.show() # average values per hour avg_hr = data[['Heure'] + volumeLabels].groupby(['Heure']).agg(np.mean) ax = avg_hr.plot(title="average values per hour") plt.show() # average percentage per day percent_df = data[percentLabels].mean() ax = percent_df.plot(autopct='%.2f', kind='pie', title='average percentage per day') ax.set_ylabel('') plt.show()Find Shortest Path Using https://www.geeksforgeeks.org/building-an-undirected-graph-and-finding-shortest-path-using-dictionaries-in-python/graph = { 'A' : ['B','C'], 'B' : ['D', 'E'], 'C' : ['F'], 'D' : [], 'E' : ['F'], 'F' : [] } def shortest_path(graph, start, end): explored = [] queue = [[start]] if start == end: print("Start is End") return while queue: path = queue.pop() node = path[-1] if node not in explored: for neighbour in graph[node]: new_path = list(path) new_path.append(neighbour) queue.append(new_path) if neighbour == end: print("Shortest Path - ", new_path) return explored.append(node) print("No Path Available") return print(shortest_path(graph, "A", "F"))Shortest Path - ['A', 'C', 'F'] Noneml-mipt course Seminar 2 Linear Regression & other stuffBased on [](https://github.com/esokolov) open materials. Let's take a look at Linear Regression and its implementations in this notebook.__Contents__:* Linear Regression analytical solution* Gradient descent approach* Stochastic gradient* Linear Regression out of the box (sklearn, vw, etc.)* MSE and MAE in Linear RegressionSee `week02_extra*` notebooks for extra (more complex or just additional) materials.import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd np.random.seed(16) matplotlib.rcParams.update({'font.size': 16})Time to generate features matrix $X$ and correct weights vector $w_{true}$. Targer vector $Y$ is computed as $Xw_{true}$ with gaussian noise:n_features = 2 n_objects = 300 batch_size = 10 num_steps = 43 w_true = np.random.normal(size=(n_features, )) X = np.random.uniform(-5, 5, (n_objects, n_features)) X *= (np.arange(n_features) * 2 + 1)[np.newaxis, :] # for different scales Y = X.dot(w_true) + np.random.normal(0, 1, n_objects)*Recap:*Analytical solution formula is simple:$$w = (X^TX)^{-1}X^Ty.$$Let's check how it works:w_star = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y) w_star w_trueAs we can see, the analytical solution is quite close to the original one. Градиентный спускЗдесь присутствует обращение матрицы $X^TX$ — очень трудоёмкая операция при большом количестве признаков. Нетрудно подсчитать, что сложность вычислений $O(d^3 + d^2l)$. При решении задач такая трудоёмкость часто оказывается непозволительной, поэтому параметры ищут итерационными методами, стоимость которых меньше. Один из них — градиентный спуск.Напомним, что в градиентном спуске значения параметров на следующем шаге получаются из значений параметров на текущем шаге смещением в сторону антиградиента функционала: $$w^{(t+1)} = w^{(t)} - \eta_t \nabla Q(w^{(t)}),$$где $\eta_t$ — длина шага градиентного спуска.Формула градиента функции ошибки в случае MSE выглядит следующим образом:$$\nabla Q(w) = -2X^Ty + 2X^TXw = 2X^T(Xw - y).$$ Сложность вычислений в данном случае $O(dl)$. Стохастический градиентный спуск отличается от обычного заменой градиента на несмещённую оценку по одному или нескольким объектам. В этом случае сложность становится $O(kd)$, где $k$ — количество объектов, по которым оценивается градиент, $k << l$. Это отчасти объясняет популярность стохастических методов оптимизации. Визуализация траекторий GD и SGDНа простом примере разберём основные тонкости, связанные со стохастической оптимизацией. Обучим на сгенерированных данных линейную регрессию для MSE при помощи полного градиентного спуска — тем самым получим вектор параметров. Покажем последовательность оценок параметров $w^{(t)}$, получаемых в ходе итераций. Красная точка — $w_{true}$.w_0 = np.random.uniform(-2, 2, n_features) w = w_0.copy() w_list = [w.copy()] step_size = 1e-2 for i in range(num_steps): w -= 2 * step_size * np.dot(X.T, np.dot(X, w) - Y) / Y.size w_list.append(w.copy()) w_list = np.array(w_list) # compute level set A, B = np.meshgrid(np.linspace(-2, 2, 100), np.linspace(-2, 2, 100)) levels = np.empty_like(A) for i in range(A.shape[0]): for j in range(A.shape[1]): w_tmp = np.array([A[i, j], B[i, j]]) levels[i, j] = np.mean(np.power(np.dot(X, w_tmp) - Y, 2)) plt.figure(figsize=(13, 9)) plt.title('GD trajectory') plt.xlabel('$w_1$') plt.ylabel('$w_2$') plt.xlim((w_list[:, 0].min() - 0.1, w_list[:, 0].max() + 0.1)) plt.ylim((w_list[:, 1].min() - 0.1, w_list[:, 1].max() + 0.1)) plt.gca().set_aspect('equal') # visualize the level set CS = plt.contour(A, B, levels, levels=np.logspace(0, 1, num=20), cmap=plt.cm.rainbow_r) CB = plt.colorbar(CS, shrink=0.8, extend='both') # visualize trajectory plt.scatter(w_true[0], w_true[1], c='r') plt.scatter(w_list[:, 0], w_list[:, 1]) plt.plot(w_list[:, 0], w_list[:, 1]) plt.show()Градиент перпендикулярен линиям уровня. Это объясняет такие зигзагообразные траектории градиентного спуска. Для большей наглядности в каждой точке пространства посчитаем градиент функционала и покажем его направление.# compute level set A, B = np.meshgrid(np.linspace(-3, 3, 100), np.linspace(-3, 3, 100)) A_mini, B_mini = np.meshgrid(np.linspace(-2, 2, 20), np.linspace(-2, 2, 27)) levels = np.empty_like(A) for i in range(A.shape[0]): for j in range(A.shape[1]): w_tmp = np.array([A[i, j], B[i, j]]) levels[i, j] = np.mean(np.power(np.dot(X, w_tmp) - Y, 2)) # visualize the level set plt.figure(figsize=(13, 9)) CS = plt.contour(A, B, levels, levels=np.logspace(-1, 1.5, num=30), cmap=plt.cm.rainbow_r) CB = plt.colorbar(CS, shrink=0.8, extend='both') # visualize the gradients gradients = np.empty_like(A_mini) for i in range(A_mini.shape[0]): for j in range(A_mini.shape[1]): w_tmp = np.array([A_mini[i, j], B_mini[i, j]]) antigrad = - 2 * 1e-3 * np.dot(X.T, np.dot(X, w_tmp) - Y) / Y.shape[0] plt.arrow(A_mini[i, j], B_mini[i, j], antigrad[0], antigrad[1], head_width=0.02) plt.title('Antigradients demonstration') plt.xlabel(r'$w_1$') plt.ylabel(r'$w_2$') plt.xlim((w_true[0] - 1.5, w_true[0] + 1.5)) plt.ylim((w_true[1] - .5, w_true[1] + .7)) plt.gca().set_aspect('equal') plt.show()Визуализируем теперь траектории стохастического градиентного спуска, повторив те же самые действия, оценивая при этом градиент по подвыборке.w = w_0.copy() w_list = [w.copy()] step_size = 0.2 for i in range(num_steps): sample = np.random.randint(n_objects, size=batch_size) w -= 2 * step_size * np.dot(X[sample].T, np.dot(X[sample], w) - Y[sample]) / batch_size w_list.append(w.copy()) w_list = np.array(w_list) # compute level set A, B = np.meshgrid(np.linspace(-2, 2, 100), np.linspace(-2, 2, 100)) levels = np.empty_like(A) for i in range(A.shape[0]): for j in range(A.shape[1]): w_tmp = np.array([A[i, j], B[i, j]]) levels[i, j] = np.mean(np.power(np.dot(X, w_tmp) - Y, 2)) plt.figure(figsize=(13, 9)) plt.title('SGD trajectory') plt.xlabel(r'$w_1$') plt.ylabel(r'$w_2$') plt.xlim((w_list[:, 0].min() - 0.1, w_list[:, 0].max() + 0.1)) plt.ylim((w_list[:, 1].min() - 0.1, w_list[:, 1].max() + 0.1)) plt.gca().set_aspect('equal') # visualize the level set CS = plt.contour(A, B, levels, levels=np.logspace(0, 1, num=20), cmap=plt.cm.rainbow_r) CB = plt.colorbar(CS, shrink=0.8, extend='both') # visualize trajectory plt.scatter(w_true[0], w_true[1], c='r') plt.scatter(w_list[:, 0], w_list[:, 1]) plt.plot(w_list[:, 0], w_list[:, 1]) plt.show()Как видно, метод стохастического градиента «бродит» вокруг оптимума. Это объясняется подбором шага градиентного спуска $\eta_k$. Дело в том, что для сходимости стохастического градиентного спуска для последовательности шагов $\eta_k$ должны выполняться [условия Роббинса-Монро](https://projecteuclid.org/download/pdf_1/euclid.aoms/1177729586):$$\sum_{k = 1}^\infty \eta_k = \infty, \qquad \sum_{k = 1}^\infty \eta_k^2 < \infty.$$Интуитивно это означает следующее: 1. последовательность должна расходиться, чтобы метод оптимизации мог добраться до любой точки пространства, 2. но расходиться не слишком быстро. Попробуем посмотреть на траектории SGD, последовательность шагов которой удовлетворяет условиям Роббинса-Монро:w = w_0.copy() w_list = [w.copy()] step_size_0 = 0.45 for i in range(num_steps): step_size = step_size_0 / ((i+1)**0.51) sample = np.random.randint(n_objects, size=batch_size) w -= 2 * step_size * np.dot(X[sample].T, np.dot(X[sample], w) - Y[sample]) / batch_size w_list.append(w.copy()) w_list = np.array(w_list) # compute level set A, B = np.meshgrid(np.linspace(-2, 2, 100), np.linspace(-2, 2, 100)) levels = np.empty_like(A) for i in range(A.shape[0]): for j in range(A.shape[1]): w_tmp = np.array([A[i, j], B[i, j]]) levels[i, j] = np.mean(np.power(np.dot(X, w_tmp) - Y, 2)) plt.figure(figsize=(13, 9)) plt.title('SGD trajectory') plt.xlabel(r'$w_1$') plt.ylabel(r'$w_2$') plt.xlim((w_list[:, 0].min() - 0.1, w_list[:, 0].max() + 0.1)) plt.ylim((w_list[:, 1].min() - 0.1, w_list[:, 1].max() + 0.1)) #plt.gca().set_aspect('equal') # visualize the level set CS = plt.contour(A, B, levels, levels=np.logspace(0, 1, num=20), cmap=plt.cm.rainbow_r) CB = plt.colorbar(CS, shrink=0.8, extend='both') # visualize trajectory plt.scatter(w_true[0], w_true[1], c='r') plt.scatter(w_list[:, 0], w_list[:, 1]) plt.plot(w_list[:, 0], w_list[:, 1]) plt.show()Сравнение скоростей сходимости Последнее, что хотелось бы продемонстрировать — сравнение, насколько быстро достигают оптимума метод полного и стохастического градиентного спуска. Сгенерируем выборку и построим график зависимости функционала от итерации.# data generation n_features = 50 n_objects = 1000 num_steps = 200 batch_size = 10 w_true = np.random.uniform(-2, 2, n_features) X = np.random.uniform(-10, 10, (n_objects, n_features)) Y = X.dot(w_true) + np.random.normal(0, 5, n_objects) step_size_sgd = 1 step_size_gd = 1e-2 w_sgd = np.random.uniform(-4, 4, n_features) w_gd = w_sgd.copy() residuals_sgd = [np.mean(np.power(np.dot(X, w_sgd) - Y, 2))] residuals_gd = [np.mean(np.power(np.dot(X, w_gd) - Y, 2))] for i in range(num_steps): step_size = step_size_sgd / ((i+1) ** 0.51) sample = np.random.randint(n_objects, size=batch_size) w_sgd -= 2 * step_size * np.dot(X[sample].T, np.dot(X[sample], w_sgd) - Y[sample]) / batch_size residuals_sgd.append(np.mean(np.power(np.dot(X, w_sgd) - Y, 2))) w_gd -= 2 * step_size_gd * np.dot(X.T, np.dot(X, w_gd) - Y) / Y.shape[0] residuals_gd.append(np.mean(np.power(np.dot(X, w_gd) - Y, 2))) plt.figure(figsize=(13, 6)) plt.plot(range(num_steps+1), residuals_gd, label='Full GD') plt.plot(range(num_steps+1), residuals_sgd, label='SGD') plt.title('Empirial risk over iterations') plt.xlim((-1, num_steps+1)) plt.legend() plt.xlabel('Iter num') plt.ylabel(r'Q($w$)') plt.grid() plt.show()Как видно, GD буквально за несколько итераций оказывается вблизи оптимума, в то время как поведение SGD может быть весьма нестабильным. Как правило, для более сложных моделей наблюдаются ещё большие флуктуации в зависимости качества функционала от итерации при использовании стохастических градиентных методов. Путём подбора величины шага можно добиться лучшей скорости сходимости, и существуют методы, адаптивно подбирающие величину шага (AdaGrad, Adam, RMSProp). Linear regression out of the boxFinally, let's take a brief look at implemented versions of Linear Regression from sklearn. The main classes are:* [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) — classical linear regression (*actially, it is just `scipy.linalg.lstsq` wrapped with sklearn `Predictor` class) __analytical__ solver.* [Lasso](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html) — Linear regression with L1 regularization.* [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) — Linear regression with L2 regularization.To minimize any other error function you are free to use [SGDRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html) (or wait a few weeks and we will get the great *PyTorch* automatic differentiation engine).Let's compare the speed of analytical and gradient solutions from the sklearn realizations.IPython magic `%%time` wrapper will be used.To measure the quality $R^2$ score will be used. It compares our model (`a`) with one always predicting mean `y`:$$R^2 = 1 - \frac{\sum_i (y_i - a(x_i))^2}{\sum_i (y_i - \overline{y}_i)^2}$$`LinearRegression` vs. `Ridge`: __Fight!__ ![](img/mortal_combat.jpg)from sklearn.linear_model import LinearRegression, Lasso, Ridge n_features = 700 n_objects = 100000 num_steps = 150 w_true = np.random.uniform(-2, 2, (n_features, 1)) X = np.random.uniform(-100, 100, (n_objects, n_features)) Y = X.dot(w_true) + np.random.normal(0, 10, (n_objects, 1)) %%time lr = LinearRegression() lr.fit(X, Y) print(f'R2: {lr.score(X, Y)}') %%time lr = Ridge(alpha=0.0, solver='sparse_cg') lr.fit(X, Y) print(f'R2: {lr.score(X, Y)}')R2: 0.999968249091112 CPU times: user 3.08 s, sys: 567 ms, total: 3.65 s Wall time: 968 msФункции потерь в регрессии Функционал качества в задачах обучения с учителем обычно задается в виде суммы по объектам выборки:$$Q(a) = \frac 1 \ell \sum_{i=1}^\ell L(y_i, a(x_i)),$$где $L(\cdot, \cdot)$ - функция потерь, задающая штраф за разницу между предсказанием и истинным значением целевого признака. Свойства функции потерь:* $L(y_i, a(x_i)) \geqslant 0$;* $L(y_i, y_i) = 0$. Как отмечалось на первой лекции, функционал качества должен в первую очередь отвечать требованиям заказчика, при этом математические свойства функции потерь могут быть неудобны для оптимизации. __Пример:__ если мы не различаем маленькие ошибки (между 0.01 и 0.1 нет особой разницы), но зато не хотим получать большие ошибки, можно использовать следующую функцию потерь:$$L(y_i, a(x_i)) = [| y_i - a(x_i) | < \varepsilon],$$ $\varepsilon$ - допустимая разница между предсказанием и фактом. Среднеквадратичная и средняя абсолютная ошибкаКроме требований заказчика, функционал качества должен учитывать математические особенности модели, например устойчивость к шумовым объектам. В линейной регрессии Mean Squared Error: $L(y_i, a(x_i)) = (a(x_i) - y_i)^2$ не обладает этим свойством, потому что задает очень большие штрафы за большие отклонения от фактического значения. Рассмотрим это явление на примере. Выберем один признак, от которого целевой признак (имеющий индекс 15 в матрице X) зависит практически линейно. Добавим к выборке два объекта-выброса и посмотрим, как изменится оптимизированная на MSE прямая.with open('data/data_preprocessed.json') as file: X = pd.read_json(file) X_subset = X[[7, 15]].values # add two outliers X_subset_modified = np.vstack((X_subset, [[1, 90], [2, 50]])) def scatter_points_and_plot_line_MSE(X_subset): plt.scatter(X_subset[:, 0], X_subset[:, 1]) lr = LinearRegression() lr.fit(X_subset[:, 0][:, None], X_subset[:, 1]) grid = np.linspace(0, 2, 100) line = lr.predict(grid[:, None]) plt.plot(grid, line) plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) scatter_points_and_plot_line_MSE(X_subset) plt.ylim(-20, 100) plt.xlabel("x") plt.ylabel("y") plt.subplot(1, 2, 2) scatter_points_and_plot_line_MSE(X_subset_modified) plt.ylim(-20, 100) plt.xlabel("x")Из-за шумовых объектов прямая достаточно сильно изменила наклон. Поэтому вместо MSE часто используют Mean Absoulte Error:$$L(y_i, a(x_i)) = |y_i - a(x_i)|$$Теперь обучим регрессию, оптимизируя MAE. В sklearn такая регрессия не реализована, но можно использовать модуль statsmodelsimport statsmodels.api as sm import statsmodels.formula.api as smf def scatter_points_and_plot_line_MAE(X_subset): mod = smf.quantreg('f15 ~ f7', pd.DataFrame(data=X_subset, columns=["f7", "f15"])) # задаеем зависимость и передаем данные res = mod.fit(q=0.5) plt.scatter(X_subset[:, 0], X_subset[:, 1]) # визуализируем точки grid = np.linspace(0, 2, 100) plt.plot(grid, grid * res.params["f7"] + res.params["Intercept"]) # визуализируем прямую return mod, res plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) model, result = scatter_points_and_plot_line_MAE(X_subset) plt.ylim(-20, 100) plt.xlabel("x") plt.ylabel("y") plt.subplot(1, 2, 2) model, result = scatter_points_and_plot_line_MAE(X_subset_modified) plt.ylim(-20, 100) plt.xlabel("x")/installed/anaconda/anaconda3/lib/python3.6/site-packages/statsmodels/regression/quantile_regression.py:207: RuntimeWarning: divide by zero encountered in double_scalars d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2) /installed/anaconda/anaconda3/lib/python3.6/site-packages/statsmodels/regression/quantile_regression.py:209: RuntimeWarning: invalid value encountered in multiply xtdx = np.dot(exog.T * d[np.newaxis, :], exog) /installed/anaconda/anaconda3/lib/python3.6/site-packages/statsmodels/regression/quantile_regression.py:220: RuntimeWarning: divide by zero encountered in double_scalars lfit.sparsity = 1. / fhat0Прямая не изменила направление из-за выбросов. Попробуем добавить больше шумовых объектов:X_subset_modified_twice = np.vstack(( X_subset_modified, np.random.randint(5, size=60).reshape(-1, 2) * [1, 30], )) plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) model, result = scatter_points_and_plot_line_MAE(X_subset) plt.ylim(-20, 100) plt.xlabel("x") plt.ylabel("y") plt.subplot(1, 2, 2) model, result = scatter_points_and_plot_line_MAE(X_subset_modified_twice) plt.ylim(-20, 100) plt.xlabel("x")Прямая изменила наклон, когда мы добавили 30 (почти 15%) шумовых точек. Мультиколлинеарность и регуляризацияimport seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_scoreРешение задачи МНКdef my_linear_regression(X_train, Y_train): return np.linalg.inv(X_train.T.dot(X_train)).dot(X_train.T).dot(y_train) def predict(X, w): return np.dot(X, w)Загрузим датасет https://habrahabr.ru/post/206306/data = pd.read_csv('data/energy_efficiency.csv') data.head()Посмотрим на скоррелированность данныхdata.corr() f, ax = plt.subplots(figsize=(10, 8)) corr = data.drop(['Y1','Y2'], axis=1).corr() sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True)) f, ax = plt.subplots(figsize=(10, 8)) corr = data.corr() sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True))Видим, что x1 скоррелирован с x2, а x4 с x5. Из-за этого матрица $X^{T}*X$ необратима. Посмотрим как на таких данных отработает наша линейная регрессия Разобьем выборку на train и testX = data.drop(['Y1','Y2'], axis=1) y = data['Y1'] X.shape, y.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)Обучим регрессию и посмотрим на качествоw = my_linear_regression(X_train, y_train) y_train_pred = predict(X_train, w) print("Train MSE: ", mean_squared_error(y_train, y_train_pred)) print("Train R2: ", r2_score(y_train, y_train_pred)) y_test_pred = predict(X_test, w) print("Test MSE: ", mean_squared_error(y_test, y_test_pred)) print("Test R2: ", r2_score(y_test, y_test_pred))Test MSE: 460128.72662043874 Test R2: -4413.449687872916Как-то не очень Попробуем убрать скоррелированные признакиX = data.drop(['X1','X4', 'Y1','Y2'], axis=1) y = data['Y1'] X.shape, y.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)Обучим регрессию и посмотрим на качествоw = my_linear_regression(X_train, y_train) y_train_pred = predict(X_train, w) print("Train MSE: ", mean_squared_error(y_train, y_train_pred)) print("Train R2: ", r2_score(y_train, y_train_pred)) y_test_pred = predict(X_test, w) print("Test MSE: ", mean_squared_error(y_test, y_test_pred)) print("Test R2: ", r2_score(y_test, y_test_pred))Test MSE: 11.387213360639421 Test R2: 0.8907517015187721Юху! Получили алгоритм с хорошим качеством Реализуем линейную регрессию с L2 регуляризациейdef my_linear_regression(X_train, Y_train, l2=0): return np.linalg.inv(X_train.T.dot(X_train) + l2*np.eye(X_train.shape[1])).dot(X_train.T).dot(y_train)Обучим регрессию с регуляризацией и посмотрим на качествоX = data.drop(['Y1','Y2'], axis=1) y = data['Y1'] X.shape, y.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) w = my_linear_regression(X_train, y_train, l2=0.001) y_train_pred = predict(X_train, w) print("Train MSE: ", mean_squared_error(y_train, y_train_pred)) print("Train R2: ", r2_score(y_train, y_train_pred)) y_test_pred = predict(X_test, w) print("Test MSE: ", mean_squared_error(y_test, y_test_pred)) print("Test R2: ", r2_score(y_test, y_test_pred))Test MSE: 9.4639612678421 Test R2: 0.9092032762837478Read data on regolith cover and thickness extracted from facet simulations, and plot the resultsimport csv import numpy as np import matplotlib.pyplot as plt import pandas as pd %matplotlib inline filename = 'regolith_analysis20180910.csv' # Count number of lines in file num_lines = len(open(filename).readlines( )) # Create data arrays dist_param = np.zeros(num_lines - 1) # skip 1 header line weath_param = np.zeros(num_lines - 1) reg_cover_proportion = np.zeros(num_lines - 1) reg_thickness = np.zeros(num_lines - 1) # Read data with open(filename, 'r') as csvfile: myreader = csv.reader(csvfile) i = 0 for row in myreader: print(','.join(row) + '\n') if i >= 1: dist_param[i-1] = row[1] weath_param[i-1] = row[2] reg_cover_proportion[i-1] = row[3] reg_thickness[i-1] = row[9] i += 1 tau = 500.0 # average interval between one-cell slip events (corresponds to numerical model interval of 866 yr) dprime = dist_param * tau wprime = weath_param * tau data = {'d': dist_param, 'dprime': dprime, 'w': weath_param, 'wprime': wprime, 'cover': reg_cover_proportion} df = pd.DataFrame(data) df = df.sort_values(by=['dprime', 'wprime']) df reg_cover_proportion = df['cover'].values.reshape((4, 31)) wprime = df['wprime'].values.reshape((4, 31)) dprime = df['dprime'].values.reshape((4, 31)) wprime psyms = ['k.', 'k+', 'k^', 'k*'] # Plot the weathering + disturbance runs for d in range(4): plt.semilogx(wprime[d,:], reg_cover_proportion[d,:], psyms[d]) # Labels and legend plt.xlabel(r"Dimensionless weathering rate parameter, $w'$", fontsize=14) plt.ylabel('Fractional regolith cover', fontsize=14) plt.legend([r"$d'= 10^{-1}$", r"$d'= 10^0$", r"$d'= 10^1$", r"$d'= 10^2$", r"$d' \rightarrow \infty$", r"$\theta = 60^\circ - 360 w' / \pi$", r"$\theta = 30^\circ$"], fontsize=14) plt.savefig('reg_cover_vs_wprime.pdf')Connect to SnowEx database# This is what you will use for all of hackweek to access the db db_name = 'snow:hackweek@172.16.17.324/snowex' # Using the function get_db, we receive 2 ways to interact with the database engine, session = get_db(db_name)Connect to LayerData table# Use the function to see what columns are available to use. db_columns = get_table_attributes(LayerData) # Print out the results nicely #print("These are the available columns in the table:\n \n* {}\n".format('\n* '.join(db_columns)))Make a query of temperature profiles# Pick a dataset dataset = 'temperature' # Pick a date #collection_date = date(2020, 2, 8) # Make the query qry = session.query(LayerData).filter(LayerData.type == 'temperature')#.filter(LayerData.date == collection_date) # Limit it to a couple thousand qry = qry.limit(2000) # Execute the query and convert to geopandas in one handy function df = query_to_geopandas(qry, engine) # how many did we retrieve? print(f'{len(df.index)} records returned!')1675 records returned!close the db session# Close the session to avoid hanging transactions session.close()---Convert geodataframe into an xarray dataset# Create a datetime column combining date and time df['datetime'] = [datetime.datetime.combine(row.date, row.time) for _, row in df.iterrows()]columns: `index, Location, Type, Easting, Northing, Surveyor, Time, Time Type, Air Temp, Ground, Notes,Wx,Hs,Temperature,Notes,geometry` unpack the columns we want from the dataframe# unpack values from dataframe that we want, make sure to set the datatypes we want time = df.datetime.values depth = df.depth.values.astype(np.float64) northing = df.northing.values.astype(np.float64) easting = df.easting.values.astype(np.float64) longitude = df.longitude.values.astype(np.float64) latitude = df.latitude.values.astype(np.float64) temperature = df.value.values.astype(np.float64)Build a dataset from these columns# build a dataset ds = xr.Dataset( data_vars=dict( temperature=(["time"], temperature) ), coords=dict( #lon=(["time"], longitude), #lat=(["time"], latitude), easting=(["time"], easting), northing=(["time"],northing), depth=(["time"], depth), time=(["time"], time), ), attrs=dict(description="snow pit temperature dataset"), )Try making some plotsplt.figure(figsize=(20,5)) plt.scatter(x=ds.time, y=ds.depth, c=ds.temperature, cmap='magma') plt.colorbar(label='Snow layer temperature ($\degree C$)') plt.xlabel('Time') plt.ylabel('Height of snow layer (cm)') plt.title('Snow layer temperature with depth over time') plt.figure(figsize=(15,5)) plt.gca().set_aspect('equal') plt.scatter(x=ds.easting,y=ds.northing, c=ds.temperature, s=ds.depth, alpha=0.5, cmap='magma') plt.colorbar(label='Snow layer temperature ($\degree C$)'); plt.xlabel('Easting (m)') plt.ylabel('Northing (m)') plt.title('Snow layer temperatures\n(point size proportional to depth)')---Open snow pit data from Jewell Lund's fieldwork later in the 2020 snow season:import pandas as pd import numpy as np import matplotlib.pyplot as plt df_jl = pd.read_csv('~/project/hot-pow/contributors/Dillon_Ragar/layerdata_temps_db_JL.csv') # Grab only the later portion of this dataframe df_jl = df_jl[1676:] # Set the index to a datetime df_jl['datetime'] = pd.to_datetime(df_jl.date) df_jl.set_index('datetime', inplace=True); # make another xarray dataset from this dataframe # unpack values from dataframe that we want, make sure to set the datatypes we want time = df_jl.index.values depth = df_jl.depth.values.astype(np.float64) northing = df_jl.northing.values.astype(np.float64) easting = df_jl.easting.values.astype(np.float64) longitude = df_jl.longitude.values.astype(np.float64) latitude = df_jl.latitude.values.astype(np.float64) temperature = df_jl.value.values.astype(np.float64) # build a dataset ds_jl = xr.Dataset( data_vars=dict( temperature=(["time"], temperature) ), coords=dict( #lon=(["time"], longitude), #lat=(["time"], latitude), easting=(["time"], easting), northing=(["time"],northing), depth=(["time"], depth), time=(["time"], time), ), attrs=dict(description="snow pit temperature dataset from Jewell Lund"), )---Open temperature timeseries dataframedf = pd.read_csv('~/CR10X_GM1_final_storage_1.csv',index_col=0) df.index = pd.DatetimeIndex(df.index) #plt.figure(figsize=(10,4)) # ## plot radiometer average temperature #df.rad_avg.plot(linestyle='-', marker='', markersize=1, c='k', label='Ground-based $T_s$') ## plot the snow temperature at each depth it was measured #df.temp1_avg.plot(linestyle='-', marker='.', markersize=1, c=[0.8,0.8,1], label='Ts @ -5 cm') #df.temp2_avg.plot(linestyle='-', marker='.', markersize=1, c=[0.6,0.6,1], label='Ts @ -10 cm') #df.temp3_avg.plot(linestyle='-', marker='.', markersize=1, c=[0.4,0.4,1], label='Ts @ -15 cm') #df.temp4_avg.plot(linestyle='-', marker='.', markersize=1, c=[0.2,0.2,1], label='Ts @ -20 cm') #df.temp5_avg.plot(linestyle='-', marker='.', markersize=1, c=[0,0,1], label='Ts @ -30 cm') # ## plot all other snow pits #plt.scatter(x=ds.time.values, y=ds.temperature, c=ds.depth, cmap='bwr', marker='.', vmin=0, vmax=250, label='Snow Pit Temperatures\n(all layers)') # ## set axes limits #plt.ylim((-35,5)) #plt.xlim((pd.Timestamp(2020,2,5,11,0),pd.Timestamp(2020,2,12,16,0))) # ## add a legend to the plot #plt.legend(loc=(1.02, 0.1)) # ## set axes labels #plt.ylabel('Temperature [$C\degree$]') #plt.xlabel('Time') # ## add grid lines to the plot #plt.grid('on') # ## set the plot title #plt.title('Snow Pit Temperature Timeseries'); ## save figure #plt.savefig('snow_pit_temp_timeseries.jpg') plt.figure(figsize=(20,5)) # plot radiometer average temperature plt.scatter(x=df.index, y=np.ones_like(df.rad_avg)*(73), c=df.rad_avg, cmap='magma', marker='o', vmin=-20, vmax=0, label='Snow pit 2S10 timeseries (continuous)') plt.scatter(x=df.index, y=np.ones_like(df.temp1_avg)*(73-5), c=df.temp1_avg, cmap='magma', marker='o', vmin=-20, vmax=0) plt.scatter(x=df.index, y=np.ones_like(df.temp2_avg)*(73-10), c=df.temp2_avg, cmap='magma', vmin=-20, vmax=0) plt.scatter(x=df.index, y=np.ones_like(df.temp3_avg)*(73-15), c=df.temp3_avg, cmap='magma', vmin=-20, vmax=0) plt.scatter(x=df.index, y=np.ones_like(df.temp4_avg)*(73-20), c=df.temp4_avg, cmap='magma', vmin=-20, vmax=0) plt.scatter(x=df.index, y=np.ones_like(df.temp5_avg)*(73-30), c=df.temp5_avg, cmap='magma', vmin=-20, vmax=0) # Plot asll other snowpit temperatures over time plt.scatter(x=ds.time, y=ds.depth, c=ds.temperature, cmap='magma', marker='x', label='Grand Mesa IOP snow pit temperatures (instantaneous)') # plot Jewell Lund's snow pit observations for later in the snow season plt.scatter(x=ds_jl.time, y=ds_jl.depth, c=ds_jl.temperature, cmap='magma', marker='+', label='Jewell Lund\'s snow pit temperatures (instantaneous)') plt.colorbar(label='Snow layer temperature ($\degree C$)') # set axes limits plt.ylim((-5,200)) #plt.xlim((pd.Timestamp(2020,2,5,11,0),pd.Timestamp(2020,2,12,16,0))) # add a legend to the plot plt.legend() # set axes labels plt.xlabel('Time') plt.ylabel('Height of snow layer (cm)') # add grid lines to the plot plt.grid('on') # set the plot title plt.title('Snow Pit Temperature Timeseries with with Height of Snow Layer\n(continuous and instantaneous measurements)'); # save figure plt.savefig('snow_pit_temp_timeseries_by_depth_all_data.jpg')Labeled Faces in the Wildhttp://vis-www.cs.umass.edu/lfw/ このデータベースを利用する目的:制御された撮影環境にによる画像のデータベースではなく、さまざまな撮影環境で撮影された画像セットに対する顔の検出・顔の照合に使われるデータベース。近年、画像認識技術の評価に用いられることが増えてきている。 評価上の注意:- 東洋人の顔が少ない。- 既に顔画像が正規化されている。両目の位置は既に一致するようになっている。- rollの評価は、別途画像を回転させて検出率を評価すること。- 有名人の画像であるので、その同じ有名人が既に検出などの学習の被写体として利用されている可能性がある。- 報道などの撮影で用いられた画像とみられるので、フォーカスや画像の明るさなどは確保された画像になっている比率が高い。 顔検出が面内回転に対してどれくらい頑強かを評価する。データベースによっては既に目位置を正規化してあり、面内回転を加えたデータで評価してはじめて、実際環境での顔検出能力を評価できる。そこで、このスクリプトでは、データに面内回転を加えた画像を作って検出率を評価している。%matplotlib inline import pandas as pd import resnet_ssd_face import glob dataset = "lfw" names = glob.glob("lfw/lfw/*/*.jpg") degs=(-45, -40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45) #degs=( -20, 0, 20) names.sort() for deg in degs: resnet_ssd_face.processDatabase(dataset, names, deg)/usr/local/lib/python2.7/dist-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice. out=out, **kwargs)Labeled Faces in the Wild dataset の検出処理後のデータ解析dfs={} for deg in degs: dfs[deg] = pd.read_csv("log_lfw_%d.csv" % deg) print deg, dfs[deg]["truePositives"].mean() rates = [dfs[deg]["truePositives"].mean() for deg in degs] data = {"degs":degs, "rates":rates} df = pd.DataFrame(data, columns=["degs", "rates"]) df.plot(x="degs", y="rates", grid=True)VESIcal Part I: An open-source thermodynamic model engine for mixed volatile (H$_2$O-CO$_2$) solubility in silicate melts$^1$, $^{2,3}$, $^4$, $^1$, $^5$$^1$Jacobs, NASA Johnson Space Center, Houston, TX 77058, USA$^2$Johns Hopkins University, Department of Earth and Planetary Sciences, Baltimore, MD 21218, USA$^3$University of Iceland, Institute of Earth Sciences, Askja, Sturlugata 7, 101 Reykjavik, Iceland$^4$University of Cambridge, Department of Earth Sciences, Downing Street, Cambridge CB2 3EQ, UK$^5$University of Geneva, Department of Earth Sciences, Geneva, Switzerland **Key Points**1. The first comprehensive volatile solubility tool capable of processing large datasets automatically2. Seven built-in solubility models, with automatic calculation and plotting functionality3. Built in python and easily usable by scientists with any level of coding skill **Abstract**Thermodynamics has been fundamental to the interpretation of geologic data and modeling of geologic systems for decades. However, more recent advancements in computational capabilities and a marked increase in researchers' accessibility to computing tools has outpaced the functionality and extensibility of currently available modeling tools. Here we present VESIcal (Volatile Equilibria and Saturation Identification calculator): the first comprehensive modeling tool for H$_2$O, CO$_2$, and mixed (H$_2$O-CO$_2$) solubility in silicate melts that: a) allows users access to seven of the most popular models, plus easy inter-comparison between models; b) provides universal functionality for all models (e.g., functions for calculating saturation pressures, degassing paths, etc.); c) can process large datasets (1,000's of samples) automatically; d) can output computed data into an Excel spreadsheet or CSV file for simple post-modeling analysis; e) integrates plotting capabilities directly within the tool; and f) provides all of these within the framework of a python library, making the tool extensible by the user and allowing any of the model functions to be incorporated into any other code capable of calling python. The tool is presented within this manuscript, which is a Jupyter notebook containing worked examples accessible to python users with a range of skill levels. The basic functions of VESIcal can also be accessed via a web app ([https://vesical.anvil.app](https://vesical.anvil.app))The VESIcal python library is open-source and available for download at [https://github.com/kaylai/VESIcal](https://github.com/kaylai/VESIcal). **Plain Language Summary**Geologists use numerical models to understand and predict how volcanoes behave during storage (pre-eruption), eruption, and the composition and amount of volcanic gas released into the atmosphere of Earth and other planets. Most models are made by performing experiments on a limited dataset and creating a model that applies to that dataset. Some models combine lots of these individual models to make a generalized model that can apply to lots of different volcanoes. Many of these different models exist, and they all have specific uses, limitations, and pitfalls. Here we present the first tool, VESIcal, which acts as a simple interface to seven of the most commonly used models. VESIcal is written in python, so users can use VESIcal as an application or include it in their own models. VESIcal is the first tool that allows geologists to model thousands of data points automatically and provides a simple platform to compare results from different models in a way never before possible. IntroductionUnderstanding the solubility and degassing of volatiles in silicate melts is a crucial component of modeling volcanic systems. As dissolved components, volatiles (primarily H$_2$O and CO$_2$) affect magma viscosity, rheology, and crystal growth. In addition, due to the strong dependence of volatile solubility on pressure, measured volatile concentrations in preserved high-pressure melts (i.e., melt inclusions: liquid magma trapped within crystals at high pressure, then brough to the surface during an eruption) can be used to determine pre-eruptive magmatic storage pressures, and thus depths. Importantly, volatile exsolution-driven overpressure of a magmatic system is likely the trigger of many explosive volcanic eruptions (Tait et al., 1989; Blake, 1984; Stock et al., 2016). Once triggered, further drops in magmatic pressure caused by ascent of magma within a volcanic conduit result in the continuous exsolution of volatiles from the melt. Volatile elements experience a large positive volume change when moving from a dissolved to exsolved free fluid state. This expansion fuels a dramatic increase in the magma's buoyancy, which can often lead to a runaway effect in which the ascent and degassing of volatile-bearing magma eventually erupts at the surface in an explosive fashion. Working in concert with seismic and gas monitoring data, pre-eruptive magmatic volatile concentrations as well as solubility and degassing modelling can be used in forensic and sometimes in predictive scenarios, helping us to understand and potentially mitigate the effects of explosive eruptions. All of these processes depend directly on the solubility -- or the capacity of a magma to hold in solution -- of volatile elements. Over the last several decades, a veritable explosion of new volatile solubility data has opened the door to a plethora of models to describe the solubility of H$_2$O, CO$_2$, or mixed H$_2$O-CO$_2$ fluid in magmas covering a wide compositional, pressure, and temperature range. Volatile solubility is highly dependent upon the composition of the host magma, making already challenging experiments more onerous to perform to encapsulate the range of magmas seen in nature. The most fundamental models (Stolper, 1982; Dixon et al., 1995; Moore et al. 1998) focus on a specific range of magma bulk compositions (e.g., basalt or rhyolite only). Later studies filled in compositional gaps, some with an increased focused on mixed-volatile (H$_2$O-CO$_2$) studies, increasing the natural applicability of our models to more systems (Liu et al., 2005; Iacono-Marziano et al., 2012; Iacovino et al., 2013). To date, there have been only a few significant efforts to create a holistic thermodynamic model calibrated by a wide range of data in the literature. The most popular are MagmaSat (the mixed-volatile solubility model built into the software package MELTS v. 1.2.0; Ghiorso and Gualda, 2015) and the model of Papale et al. (2006). Both of these studies have made their source code available; the Papale et al. (2006) FORTRAN source code (titled Solwcad), web app, and a Linux program can be found at http://www.pi.ingv.it/progetti/eurovolc/, and very recently MagmaSat has been made accessible via the ENKI thermodynamic python framework (http://enki-portal.org/)Despite this communal wealth of solubility models, quantitative calculations of volatile solubility, and by extension saturation pressures, equilibrium fluid compositions, and degassing paths, remains a time-consuming endeavor. Modeling tools that are available are typically unable to process more than one sample at a time, requiring manual entry of the concentrations of 8-10 major oxides, temperature, as well as CO$_2$ and H$_2$O concentrations to calculate saturation pressures, or X$_{H2O}$ to calculate dissolved volatile contents. This is particularly problematic for melt inclusion studies, where saturation pressures are calculated for hundreds of inclusions, each with different entrapment temperatures, CO$_2$, H$_2$O, and major element concentrations. For example, the saturation pressures from 105 Gakkel ridge melt inclusions calculated in MagmaSat by Bennet et al. (2019) required the manual entry of 1,365 values! The potential for user error in this data entry stage should not be overlooked.In many cases, newly published solubility models do not include an accompanying tool, requiring users to correctly combine and interpret the relevant equations (e.g., Dixon et al., 1995; Dixon, 1997; Liu et al., 2005; Shishkina et al., 2014). This is problematic from a perspective of reproducibility of the multitude of studies utilizing these models, especially given that some of the equations in the original manuscripts contain typos or formatting errors. For some models, an Excel spreadsheet was provided, or available at request from the authors. For example, Newman and Lowenstern (2002) included a simplified version of the Dixon (1997) model as part of “VolatileCalc”, which was written in Visual Basic for Excel. Due to its simplicity, allowing users to calculate saturation pressures, degassing paths, isobars and isopleths with a few button clicks and pop-up boxes, this tool has proved extremely popular (with 766 citations at the time of writing). However, to calculate saturation pressures using VolatileCalc, the user must individually enter the SiO$_2$, H$_2$O, CO$_2$ content and temperature of every single sample into pop-up boxes. Similarly, the Excel spreadsheet for the Moore et al. (1998) model calculates dissolved H$_2$O contents based on the concentration of 9 oxides, temperature, and the fraction of X$_{H_{2}O}$ in the vapor, which must be pasted in for every sample. Finally, Allison et al. (2019) provide an Excel spreadsheet that allows users to calculate fugacities, partial pressures, isobars, isopleths and saturation pressures. Again, parameters for each sample must be entered individually, with no way to calculate large numbers of samples automatically. Some of these published models and tools are at risk of being lost to time, since spreadsheet tools (particularly earlier studies published before journal-provided hosting of data and electronic supplements was commonplace) must be obtained by request to the author. Even if the files are readily available, programs used to open and operate them may not support depreciated file formats. More recently, authors have provided web-hosted interfaces to calculating saturation pressures and dissolved volatile contents (e.g., Iacono-Marziano et al., 2012; http://calcul-isto.cnrs-orleans.fr/, and Ghiorso and Gualda, 2015; http://melts.ofm-research.org/CORBA_CTserver/GG-H2O-CO2.html). Ghiorso and Gualda (2015) also provide a Mac application. While more accessible in the present time, this does not negate the issue of the longevity of these models. The link provided in the Iacono-Marziano et al. (2012) manuscript returns an error “this site cannot be reached”, although email contact with the author directed us towards the newer link given above. Similarly, the link to the H$_2$O-CO$_2$ equation of state web calculator that Duan and Zhang (2006) provided in their manuscript returns a 404 error. While we certainly advocate for the continued refinement of solubility models, including the completion of new experiments in poorly studied yet critical compositional spaces such as andesites (Wieser et al., in prep), a perhaps more crucial step at this juncture is in the development of a tool that can apply modern computational solutions to making our current knowledge base of volatile solubility in magmas accessible and enduring.Here we present VESIcal (Volatile Equilibria and Saturation Identification calculator): a python-based thermodynamic volatile solubility model engine that incorporates seven popular volatile solubility models under one proverbial roof. The models included in VESIcal are (also see Table 1):1. MagmaSat: VESIcal's default model. The mixed-volatile solubility model within MELTS v. 1.2.0 (Ghiorso and Gualda, 2015)2. Dixon: The simplification of the Dixon (1997) model as implemented in VolatileCalc (Newman and Lowenstern, 2002) 1. DixonWater and DixonCarbon available as pure-fluid models3. MooreWater: (Moore et al. 1998; water only, but H$_2$O fluid concentration can be specified)4. Liu: (Liu et al 2005) 1. LiuCarbon and LiuWater available as pure-fluid models5. IaconoMarziano: (Iacono-Marziano et al. 2012) 1. IaconoMarzianoWater and IaconoMarzianoCarbon available as pure-fluid models6. ShishkinaIdealMixing: (Shishkina et al. 2014) using pure-H$_2$O and pure-CO$_2$ models and assuming ideal mixing. In general, the pure-fluid versions of this model should be used 1. ShishkinaWater and ShishkinaCarbon available as pure-fluid models7. AllisonCarbon: (Allison et al. 2019, carbon only) 1. AllisonCarbon_vesuvius (default; phonotephrite from Vesuvius, Italy) 2. AllisonCarbon_sunset (alkali basalt from Sunset Crater, AZ, USA) 3. AllisonCarbon_sfvf (basaltic andesite from San Francisco Volcanic Field, AZ, USA) 4. AllisonCarbon_erebus (phonotephrite from Erebus, Antarctica) 5. AllisonCarbon_etna (trachybasalt from Etna, Italy) 6. AllisonCarbon_stromboli (alkali basalt from Stromboli, Italy) As any individual model is only valid within its calibrated range (see below), and each model is parameterized and expressed differently (e.g., empirical vs. thermodynamic models), it is impractical to simply combine them into one large model. Instead, VESIcal is a single tool that can access and utilize all built-in models, giving VESIcal an extensive pressure-temperature-composition calibration range (Fig. 1). VESIcal is capable of performing a wide array of calculations on large datasets automatically, with built-in functionality for extracting data from an Excel or CSV file. In addition, the code is written such that it is flexible (sample, calculation type, and model type can be chosen discreetly) and extensible (VESIcal code can be imported for use in python scripts, and the code is formatted such that new volatile models can be added).Importantly, VESIcal has been designed for practicality and ease of use. It is designed to be used by anyone, from someone who is completely unfamiliar with coding to an adept programmer. The non-coder user can interact with VESIcal through a webapp (https://vesical.anvil.app) or directly within this manuscript, which utilizes the user-friendly Jupyter Notebook format, allowing them to upload a file with data, execute the various example calculations provided below, and save the results to an Excel or CSV file to work with outside of VESIcal. This notebook also incorporates built-in plotting options for easy visualization of user data and calculated results. More experienced programmers may wish to use the more advanced functionality provided by VESIcal, including the ability to hybridize models (e.g., use one model for H$_2$O and another for CO$_2$) or write their own routines and code calling VESIcal methods. VESIcal is an open source tool and as such is far less prone to the preservation issues discussed above. Because the VESIcal code is hosted on GitHub, every change to the code is tracked publicly (Perkel, 2016). VESIcal’s current release (version 0.9.10) is also archived on Zenodo, which provides a static citable DOI (10.5281/zenodo.4652839) for the current version of the code, along with a snapshot of the GitHub repository at the time of release.A detailed history of volatile solubility modeling and the implications of VESIcal are explored in detail in the companion manuscript to this work, Wieser et al. (submitted). Research MethodologyNavigating the array of models implemented in VESIcal can be challenging. How can a user determine which model best suits their needs? MagmaSat (the default model in VESIcal) is the most widely calibrated in P-T-X space, and so we recommend it for the majority of cases. Where a user wishes to use the other implemented models, we provide some tools to help choose the most appropriate model (see Supplement). These tools are described in more detail in Section 3.2 on comparing user data to model calibrations.**Table 1. Calibration ranges of VESIcal models**![Description](tables/Table1.png)**Figure 1**![Description](figures/Figure1.png)*Figure 1 - Illustrations showing the calibrated ranges of VESIcal models in pressure-temperature space. Due to difficulty in differentiating between pure-CO*$_2$ *and mixed fluid experiments in the literature, plots are subdivided into: experiments performed with pure-CO*$_2$ *or mixed (H*$_2$*O-CO*$_2$*) fluid; and pure-H*$_2$*O fluid.*A list of model names recognized by VESIcal can be retreived by executing the command `v.get_model_names()`, assuming VESIcal has been imported as `v` as is demonstrated in worked examples below. Note that the model names as listed in the previous section are given in terms of how to call them within VESIcal (e.g., `model='MooreWater'`). Allison et al. (2019) provides unique model equations for each of the six alkali-rich mafic magmas investigated in their study. The default model in VESIcal is that calibrated for Vesuvius magmas, whose calibration has the widest pressure range of the study (Table 1). Setting a model name of `'AllisonCarbon'` within VESIcal will thus result in calculations using the AllisonCarbon_vesuvius model equations.All of the calculations implemented in VESIcal can be performed using any of the models included. The code is structured by calculation rather than by model, which provides an intuitive way for users to interact with the code and compare outputs from multiple models. A python method defined for each calculation takes the model name and any applicable data as arguments and returns the results of the calculation. Each method performs five key functions: 1) creates the requested model object and performs any necessary pre-processing (e.g., ensuring relevant data are present; normalizing data); 2) takes user input and performs the mathematical calculation; 3) does any necessary processing of the output (e.g., normalizing totals); 4) checks that the model is being used within its calibrated range; and 5) returns calculated outputs in an intuitive and manipulatable format (e.g., a python dictionary, a figure, or a pandas DataFrame). Results of calculations can be saved to one or more Excel or CSV files. To demonstrate that VESIcal returns results which are comparable with pre-existing tools, we have performed a number of tests, which are described in the Supplementary Information (Text S2). Model Calibrations and BenchmarkingThe pressure, temperature, and composition calibration ranges of the seven models implemented in VESIcal are shown in Table 1 and Figure 1. VESIcal abides by statements of caution made by the authors of these models regarding their extrapolation by informing the user if a calculation is being performed outside of a model's calibrated range. In this case, the code returns a warning message, which is as specific as possible, along with the requested output. We also provide several Jupyter notebooks in the supplementary material (Supplementary Text S3-S4 and Supplementary files S1-S7), allowing users to plot their data amongst the calibrations of the different models to assess their suitability for less objective measures. Detailed descriptions of the seven solubility models implemented in VESIcal, including information about their calibration range in terms of melt composition, pressure, and temperature, are given in Wieser et al. (in prep).Testing was undertaken to ensure that VESIcal faithfully reproduces the results of all incorporated models. When possible, all models were benchmarked by testing VESIcal outputs against those of a relevant published calculator (e.g., web apps or Excel macros). The models of Shishkina et al. (2014) and Liu et al. (2005) were published with no such tool and so testing instead compares VESIcal outputs to experimental conditions or analyses and, where possible, plots VESIcal results against published figures. All models underwent multiple tests, the results of which are shown in the supplement (Supplementary Text S3-S4 and Supplemental Jupyter Notebooks S1-S7). For all models, VESIcal reproduced the results from previous tools (e.g., web apps, Excel spreadsheets) to within ±1% relative and often on the order of ±0.1% relative.MagmaSat, VESIcal’s default model, underwent three tests, the results of which are shown in Fig. 2: 1. Comparison of saturation pressures from MORB melt inclusions in VESIcal to those published by Bennett et al. (2019), who used the MagmaSat Mac App (R$^2$=0.99998; Fig. 2a); 2. Comparison of fluid composition (X$_{H_{2}O}$) calculated with VESIcal and the web app (R$^2$=0.999, identical considering the web app returns 2dp; Fig. 2b); 3. Comparison of isobars for the Early Bishop Tuff calculated with VESIcal (star symbols) and isobars published in Fig. 14 of Ghiorso and Gualda (Fig. 2c). VESIcal outputs using the model of Dixon (1997) were tested against outputs from the VolatileCalc Excel spreadsheet (Newman and Lowenstern, 2002) and a widely used Excel macro (see, e.g., Tucker et al., 2019).**Figure 2**![Description](figures/Figure2.png)*Figure 2 - Benchmarking of VESIcal against MagmaSat. a. Comparison of saturation pressures calculated with VESIcal against those by Bennett et al. (2019) using the MagmaSat app for Mac. Samples are all MORB melt inclusions, and pressures were calculated at a temperature unique to each sample. b. Equilibrium fluid compositions calculated with VESIcal against those calculated with the MagmaSat web app. c. Individual points along the 1,000, 2,000, and 3,000 bar isobars for the Early Bishop Tuff rhyolite calculated with VESIcal (stars) and plotted atop isobars published in Fig. 14 of Ghiorso and Gualda (2015).* Format of the python libraryIn this section, the basic organization and use cases of VESIcal are discussed. VESIcal relies heavily on python pandas, a python package designed for working with tabulated data. Knowledge of pandas is not required to use VESIcal, and we refer the user to the pandas documentation for an overview of the package (https://pandas.pydata.org/pandas-docs/stable/userguide/index.html).Specific details on how to perform model calculations are discussed in Section 3 and include worked examples. The VESIcal library is written so that users can interact first and foremost with the calculation they want to perform. Five standard calculations can be performed with any model in the library: 1. `calculate_dissolved_volatiles()`; 2. `calculate_equilibrium_fluid_composition()`; 3. `calculate_saturation_pressure()`; 4. `calculate_isobars_and_isopleths()` (plus functionality for plotting; only for mixed volatiles models); and 5. `calculate_degassing_path()` (plus functionality for plotting; only for mixed volatiles models).Fig. 3 illustrates the basic organization of the code. First, the user determines which calculation they wish to perform by accessing one of the five core calculation classes (listed above). In this step, the user specifies any input parameters needed for the calculation (e.g., sample composition in wt% oxides, pressure in bars, temperature in $^{\circ}$C, and fluid composition "X_fluid" in terms of XH$_2$O$^{fluid}$) as well as the model they wish to use. The default model is MagmaSat, but the user may specify any model in the library. As an example, the code to calculate the saturation pressure of some sample using the MagmaSat model would be written as:`calculate_saturation_pressure(sample=mysample, temperature=850.0).result`where `mysample` is a variable (python dictionary or pandas Series) containing the composition of the sample in oxide wt%, and the temperature is given in $^{\circ}$C. If a different model is desired, for example Dixon (1997), it can be passed as:`calculate_saturation_pressure(sample=mysample, temperature=850.0, model='Dixon').result`The core calculation classes each perform two functions: 1) a check is performed to ensure that the user input is within the model's recommended calibration range; 2) the `calculate()` method sends the user input to the appropriate model. **Figure 3**![Fig. 3](figures/Figure3.png)*Figure 3 - Flowchart illustrating the basic organization of the python library. First, a user chooses a calculation to perform and calls one of the five core calculation classes. Here, any necessary parameters are passed such as sample composition, pressure, and temperature. A check is run to ensure the calculation is being performed within model-specified limits. The Calculate() class then calls on one of the Model() classes. The default model is MagmaSat, but a user may specify a different model when defining the calculation parameters. Standard pre-processing is then performed on the input data, and this pre-processing step is unique to each model. The processed data are then fed into a model-specific method to perform the desired core calculation.*Users can process individual samples (single-sample calculations) or entire datasets (batch calculations; Fig. 4). If processing more than one sample, the "simplest" way to interact with VESIcal is via batch calculations. Here, the user provides input data in the form of a Microsoft Excel spreadsheet (.xlsx file) or CSV file and instructs the model to perform whatever calculation is desired. The model is run on all samples and returns data formatted like a spreadsheet (using the python pandas package), which contains the user's original input data plus whatever model outputs were calculated. The user can continue to work with returned data by saving the result to a variable (as is shown in all examples in this manuscript). Data can then be exported to an Excel or CSV file with a simple command (see Section 3.10).The syntax for processing a single sample is very similar to that for batch calculations but provides the user direct access to more advanced features that cannot be accessed via batch calculations (e.g., specifying fugacity or activity model, hybridizing models; see Section 3.9). This also gives the user more flexibility in integrating any VESIcal model function into some other python code. **Figure 4**![Fig. 4](figures/Figure4.png)*Figure 4 - Flowchart illustrating the different operational paths. On top, batch calculation is shown, in which an Excel or CSV file with any amount of samples is fed into the model, calculations are performed, and the original user data plus newly calculated values are returned and can be saved as an Excel or CSV file. Below, single-sample calculation is shown. These methods can run calculations on one sample at a time, but multi-sample calculations can be performed iteratively with code written by the user. Calculated values are returned as a variable. For single-sample calculations, more advanced modeling options can be set, and hybridization of models can be performed.* Running the codeVESIcal can be used in a number of ways: via this Jupyter notebook, via the VESIcal web app, or by directly importing VESIcal into any python script. VESIcal was born from functionality provided by ENKI and so all the files necessary to use VESIcal are hosted on the ENKI server (http://enki-portal.org/). A unique personal coding environment can be initiated by logging into the ENKI production server using a GitLab username and password (which is free to obtain; see directions on the ENKI website for specifics). The simplest way to use VESIcal while retaining all of its functionality is within this very manuscript, in the form of a Jupyter notebook. Because this manuscript and VESIcal python library files are hosted on the ENKI server, code can be manipulated and executed in the code cells below. Making changes won't affect the public version of this manuscript. Likewise, any user can write their own python code using VESIcal by creating a Jupyter notebook on the ENKI server and importing VESIcal as is demonstrated in the code below. Computation time on the ENKI server is limited by the server itself. VESIcal may run faster if installed locally. Advanced instructions on installing VESIcal on your own computer are provided in the Supplement (Supplementary Text S1). Note that VESIcal requires installation of the ENKI thermoengine library to function properly. Thermoengine is written in python but is based on the original MELTS code (Ghiorso and Sack 1995; Ghiorso and Gualda, 2015), which contains MacOS-specific header files. The result is that thermoengine is most easily installed on MacOS but can be installed on Windows and Linux operating systems via Docker (see thermoengine documentation for installation instructions; https://gitlab.com/ENKI-portal/ThermoEngine).The most limited but simplest method to interacting with VESIcal is through the web app (https://vesical.anvil.app). The web app can currently perform three of the five core calculations in batch process mode (via upload of an Excel or CSV file). Some, but not all, optional parameters can be set.**To run the code in this notebook**, nothing needs to be installed. Simply execute the code cells below, changing parameters as desired. Custom data may be processed by uploading an Excel or CSV file into the same folder containing this notebook and then changing the filename in Section 3.1. DocumentationThis manuscript serves as an introduction to the VESIcal library aimed at python users of all levels. However, the code itself is documented with explanations of each method, its input parameters, and its returned values. This documentation can be accessed at our readthedocs website (https://vesical.readthedocs.io/). The documentation for any function can be viewed in a jupyter notebook by typing the function followed by a question mark and executing the cell (e.g., "`v.calculate_saturation_pressure?`").Video tutorials are also available on the VESIcal YouTube (https://www.youtube.com/channel/UCpvCCs5KMXzOxXWm0seF8Qw). Currently, the first tutorial covers the basics of VESIcal. More videos for specific features and uses are planned. Generic methods for calculating mixed-fluid propertiesVESIcal provides a set of methods for calculating the properties of mixed CO$_2$-H$_2$O fluids, which can be used with any combination of H$_2$O and CO$_2$ solubility model. The use of generic methods allows additional models to be added to VESIcal by defining only the (simpler) expressions describing pure fluid solubility. Non-ideality of mixing in the fluid or magma phases can be incorporated by specifying activity and fugacity models. A complete description of these methods, including all relevant equations, can be found in the Supplement (Supplementary Text S2). Workable example uses In this section we detail how to use the various functions available in VESIcal through worked examples. The python code presented below may be copied and pasted into a script or can be edited and executed directly within the Jupyter notebook version of this manuscript. For all examples, code in sections 3.0.2 and 3.1 must be executed to initialize the model and import data from the provided companion Excel file. The following sections then may be executed on their own and do not need to be executed in order.In each example below, a generic "method structure" is given along with definitions of unique, required, and optional user inputs. The method structure is simply for illustrative purposes and gives default values for every argument (input). In some cases, executing the method structure as shown will not produce a sensible result. For example, the default values for the `plot()` function (Section 3.8) contain no data, and so no plot would be produced. Users should replace the default values shown with values corresponding to the samples or conditions of interest.All examples will use the following sample data by default (but this can be changed by the user):- Dataset from example_data.xlsx loaded in Section 3.1.1 (variable name `myfile`)- Single composition defined in Section 3.1.2 (variable name `mysample`)- Sample 10* extracted from example_data.xlsx dataset in Section 3.1.3 (variable name `sample_10`)Calculations performed on single samples or on a dataset imported from an Excel or CSV file containing many samples are executed in two distinct ways. Note that single sample calculations require that the argument `sample` be defined. To return the numerical result of the calculation, `.result` must be added to the end of the method, as shown below. Batch calculations are performed on the dataset itself, after that dataset is imported into VESIcal. Thus, the `sample` argument does not need to be defined discretely, since sample compositional information is stored within the dataset object. The two basic formats for performing calculations are:*Single sample calculations*>`myvariable = v.name_of_the_core_calculation(sample=mysample, argument1=value1, argument2=value2).result`*Batch calculations*>`myvariable = myfile.name_of_the_core_calculation(argument1=value1, argument2=value2)`where VESIcal has been imported as `v`, `myvariable` is some arbitrary variable name to which the user wishes to save the calculated output, `name_of_the_core_calculation` is one of the five core calculations, `mysample` is a variable containing compositional information in wt% oxides, `myfile` is a variable containing an BatchFile object created by importing an Excel or CSV file, and `argument1`, `argument2`, `value1`, and `argument2` are two required or optional arguments and their user-assigned values, respectively.Workable examples detailed here are:1. [Loading, viewing, and preparing user data](loading_viewing_preparing) 1.1. [Loading a Batch file](batch_processing) 1.2. [Defining a single sample composition](defining_single_sample) 1.3. [Plotting user data](plotting_user_data) 1.4. [Extracting a single sample from a Batch file](extracting_single_melt_composition) 1.5. [Normalizing and transforming data](normalizing_transforming_data)2. [Calculating dissolved volatile concentrations](calculating_dissolved_volatile_concentrations)3. [Calculating equilibrium fluid compositions](calculating_equilibrium_fluid_compositions)4. [Calculating saturation pressures](calculating_saturation_pressures)5. [Calculating and plotting isobars and isopleths](calculating_plotting_isobars_isopleths)6. [Calculating and plotting degassing paths](calculating_plotting_degassing_paths)7. [Plotting multiple calculations](plotting_multiple_calculations)8. [Comparing results from multiple models](comparing_results_from_multiple_models)9. [Code hybridization (Advanced)](code_hybridization)10. [Exporting data](exporting_data) Function arguments and their definitionsEach section below details what arguments are required or optional inputs and gives examples of how to perform the calculations. Table 2 lists all arguments, both required and optional, used in the five core calculations. Many of the function arguments have identical form and use across all calculations, and so we list these here. Any special cases are noted in the section describing that calculation.The most commonly used arguments are:>`sample` *Single sample calculations only* The composition of a sample. A VESIcal Sample object is created to hold compositional information about sample. A Sample object can be created from a dictionary or pandas Series containing values, with compositions of oxides in wt%, oxides in mol fraction, or cations in mol fraction. This argument is not needed for batch calculations since they are performed on BatchFile objects, which already contain sample information. See examples for details.>`temperature`, `pressure`, and `X_fluid`: the temperature in $^{\circ}$C, the pressure in bars, and the mole fraction of H$_2$O in the H$_2$O-CO$_2$ fluid, XH$_2$O$^{fluid}$. In all cases, `X_fluid` is optional, with a default value of 1 (pure H$_2$O fluid). Note that that `X_fluid` argument is only used for calculation of dissolved volatile concentrations. *For single sample calculations*Temperature, pressure, and X fluid should be specified as a numerical value. *For batch calculations*Temperature, pressure, and X_fluid can either be specified as a numerical value or as strings referring to the names of columns within the file containing temperature, pressure, or X_fluid values for each sample. If a numerical value is passed for either temperature, pressure, or X_fluid, that will be the value used for one or all samples. If, alternatively, the user wishes to use temperature, pressure, and/or X_fluid information in their BatchFile object, the title of the column containing temperature, pressure, or X_fluid data should be passed in quotes (as a string) to `temperature`, `pressure`, and/or `X_fluid`, respectively. Note for batch calculations that if temperature, pressure, or XH$_2$O$^{fluid}$ information exists in the BatchFile but a single numerical value is defined for one or both of these variables, both the original information plus the values used for the calculations will be returned.>`verbose`: *Only for single sample calculations* Always an optional argument with a default value of False. If set to True, additional values of interest, which were calculated during the main calculation, are returned in addition to the results of the calculation.>`print_status`: *Only for batch calculations* Always an optional argument, which sometimes defaults to True and other times defaults to False (see specific calculation section for details). If set to True, the progress of the calculation will be printed to the terminal. The user may desire to see the status of the calculation, as some calculations using MagmaSat can be somewhat slow, particularly for large datasets.>`model`: Always an optional argument referring to the name of the desired solubility model to use. The default is always "MagmaSat". **Table 2: Matrix of all arguments used in the five core calculations, the nature of the argument (required or optional) and the input type or default value.**![Description](tables/argument_table.png) Initialize packages For any code using the VESIcal library, the library must be imported for use. Here we import VESIcal as v. Any time we wish to use a function from VESIcal, that function must be preceded by '`v.`' (e.g., v.calculate_saturation_pressure). Specific examples of this usage follow. Here we also import some other python libraries that we will be using in the worked examples below.import sys sys.path.insert(0, '../') import VESIcal as v import pandas as pd #The following are options for formatting this manuscript pd.set_option('display.max_colwidth', 0) from IPython.display import display, HTML %matplotlib inlineLoading, viewing, and preparing user dataAll of the following examples will use data loaded in the code cells in this section. Both batch processing of data loaded from a file and single-sample processing are shown. An example file called 'example_data.xlsx' is included with this manuscript. You can load in your own data by first ensuring that your file is in the same folder as this notebook and then by replacing the filename in the code cell below with the name of your file. The code cell below must be executed for the examples in the rest of this section to function properly. Batch processingBatch calculations are always facilitated via the `BatchFile()` class, which the user uses to specify the filename corresponding to sample data. Loading in data is as simple as calling `BatchFile(filename)`. Optionally, `units` can be used to specify whether the data are in wt% oxides, mol fraction oxides, or mol fraction cations. Calculations will always be performed and returned with melt composition in the default units (wt% oxides unless changed by the user) and fluid composition in mol fraction.**Structure of the input file:** A file containing compositions (and optional pressure, temperature, or XH$_2$O$^{fluid}$ information) on one or multiple samples can be loaded into VESIcal. The loaded file must be a Microsoft Excel file with the extension .xls or .xlsx or CSV file with the extension .csv. The file must be laid out in the same manner as the example file 'example_data.xlsx'. The basic structure is also shown in Table 3. Any extraneous columns that are not labeled as oxides or input parameters will be ignored during calculations. The first column titled 'Label' contains sample names. Note that the default assumption on the part of VESIcal is that this column will be titled 'Label'. If no 'Label' column is found, the first non-oxide column name will be set as the index column, meaning this is how samples can be accessed by name (see Section 3.1.3). An index column can be specified by the user using the argument `label` (see documentation below). The following columns must contain compositional information as oxides. The only allowable oxides are: SiO$_2$, TiO$_2$, Al$_2$O$_3$, Fe$_2$O$_3$, FeO, Cr$_2$O$_3$, MnO, MgO, CaO, NiO, CoO, Na$_2$O, K$_2$O, P$_2$O$_5$, H$_2$O, and CO$_2$. Currently, VESIcal can only read these oxide names exactly as written (e.g., with no leading or trailing spaces and with correct capitalization), but functionality to interpret variations in how these oxides are entered is planned (e.g., such that "sio2. " would be understood as "SiO2"). All of these oxides need not be included; if for example your samples contain no NiO concentration information, you can omit the NiO column. Omitted oxide data will be set to 0 wt\% concentration. If other oxide columns not listed here are included in your file, they will be ignored during calculations. Notably, the order of the columns does not matter, as they are indexed by name rather than by position. Compositions can be entered either in wt% (the default), mol%, or mole fraction. If mol% or mole fraction data are loaded, this must be specified when importing the tile.Because VESIcal may misread column headings, we highly recommend that users examine their data after loading into VESIcal and before performing calculations. The user data, as it will be used by VESIcal, can be viewed at any time with `myfile.get_data()` (see generation of Table 3 below).Pressure, temperature, or XH$_2$O$^{fluid}$ data may optionally be included, if they are known. Column names for these data do not matter, as they can be specified by the user as will be shown in following examples.The standard units used by VESIcal are always pressure in bars, temperature in $^{\circ}$C, melt composition as oxides in wt%, and fluid composition as mol fraction (typically specified as X_fluid, the mol fraction of H$_2$O in an H$_2$O-CO$_2$ fluid, ranging from 0-1). Sample compositions may be translated between wt%, mol fraction, and mol cations if necessary.**Class structure:** `BatchFile(filename, sheet_name=0, file_type='excel', units='wtpt_oxides', label='Label', default_normalization='none', default_units='wtpt_oxides', dataframe=None)`**Required inputs:**>`filename`: A file name must be passed in quotes. This file must be in the same folder as the notebook or script that is calling it. This imports the data from the file name given and saves it to a variable of your choosing.**Optional inputs:** By default, the BatchFile class assumes that loaded data is in units of wt%; alternatively, data in mole fraction oxides or cations may be loaded.>`sheet_name`: If importing data from an Excel file, this argument is used to specify which sheet to import. Only one sheet can be imported to a single BatchFile object. The default is '0', which imports the first sheet in the file, regardless of its name.>`file_type`: Specifies whether the file being imported is an Excel or CSV file. This argument is never strictly necessary, as `BatchFile()` will automatically detect whether an imported file is Excel or CSV if the file extension is one of .xls or .xslx (Excel) or .csv (CSV).>`units`: The units in which data are input. The default value is `'wtpt_oxides'` for data as wt% oxides. The user can pass `'mol_oxides'` for data in mol fraction oxides or `'mol_cations'` for data in mol fraction cations. >`default_normalization`: The type of normalization to apply to the data by default. One of: None, `'standard'`, `'fixedvolatiles'`, or `'additionalvolatiles'`. These normalization types are described in the section on normalization below.>`default_units`: The type of composition to return by default, one of: `'wtpt_oxides'` (wt% oxides, default), `'mol_oxides'` (mol fraction oxides), or `'mol_cations'` (mol fraction cations).>`label`: This is optional but can be specified if the column title referring to sample names is anything other than "Label". The default value is "Label". The default value is "Label". If no "Label" column is present and the label argument is not specified, the first column whose first row is not one of VESIcal's recognized oxides will be set as the index column. The index column will be used to select samples by name.>`dataframe`: This argument is used for transforming a pandas DataFrame object into a VESIcal BatchFile object. For convenience, this functionality is also defined as a separate function `BatchFile_from_DataFrame(dataframe, units='wtpt_oxides', label='Label')`.**Outputs:**>A special type of python object defined in the VESIcal code known as an BatchFile object.myfile = v.BatchFile('Supplement/Datasets/example_data.xlsx')Once the BatchFile object is created and assigned to a variable, the user can then access the data loaded from their file as `variable.get_data()`. In this example, the variable corresponding to the `BatchFile` object is named `myfile` and so the data in that file can be accessed with `myfile.get_data()`. Below, `myfile.get_data()` is saved to a variable we name `data`. The variable `data` is a pandas DataFrame object, which makes displaying the data itself quite simple and aesthetically pleasing, since pandas DataFrames mimic spreadsheets.Usage of `get_data()` allows the user to retrieve the data as originally entered or in any units and with any normalization supported by VESIcal.**Class structure:** `get_data(self, normalization=None, units=None, asBatchFile=False)`**Optional inputs:** >`normalization` or `units` may be passed, with options as defined in the description of BatchFile above.>`asBatchFile` Default is False. If True, will return a VESIcal BatchFile object.**Outputs:** > A pandas dataframe or BatchFile object with all user data. **Table 3. User input data: Compositions, pressures, and temperatures for several silicate melts as supplied in the file 'example\_data.xlsx'**data = myfile.get_data() dataFor the rest of this manuscript, data will be pulled from the `example_data.xlsx` file (Supplemental Dataset S1), which contains compositional information for basalts (Tucker et al., 2019; Roggensack, 2001), andesites (Moore et al., 1998), rhyolites (Mercer et al., 2015; Myers et al., 2019), and alkaline melts (phototephrite, basaltic-trachyandesite, and basanite from Iacovino et al., 2016). Several additional example datasets from the literature are available in the Supplement (Supplementary Datasets S2-S5; Table 4). These include experimentally produced alkaline magmas from Iacovino et al. (2016; `alkaline.xlsx`), basaltic melt inclusions from Kilauea (Tucker et al., 2019) and Gakkel Ridge (Bennett et al., 2019; `basalts.xlsx`), basaltic melt inclusions from Cerro Negro volcano, Nicaragua (Roggensack, 2001; `cerro_negro.xlsx`), and rhyolite melt inclusions from the Taupo Volcanic Center, New Zealand (Myers et al., 2019) and a topaz rhyolite from the Rio Grande Rift (Mercer et al., 2015; `rhyolites.xlsx`). Where available, the calibration datasets for VESIcal models are also provided (Supplementary Datasets S6-S7). **Table 4. Example datasets included with VESIcal**pd.read_excel("tables/Table_Example_Data.xlsx", index_col="Filename")Defining a single sampleMore advanced functionality of VESIcal is facilitated directly through the five core calculation classes. Each calculation requires its own unique inputs, but all calculations require that a sample composition be passed. We can pass in a sample either as a python dictionary or pandas Series. Below, we define a sample and name it `mysample`. Oxides are given in wt%. Only the oxides shown here can be used, but not all oxides are required. Any extra oxides (or other information not in the oxide list) the user defines will be ignored during calculations.Much like is done to create a BatchFile object, we can create a VESIcal Sample object to represent our sample composition.**Class structure:** `Sample(composition, units='wtpt_oxides', default_normalization='none', default_units='wtpt_oxides')`**Required inputs:**>`composition`: The composition of the sample in the format specified by the units parameter. The default is oxides in wt%.**Optional inputs:**>`units`, `default_normalization`, and `default_units` have the same meaning here as in the BatchFile class desribed above.**Outputs:**>A special type of python object defined in the VESIcal code known as a Sample object.To manually input a bulk composition, fill in the oxides in wt% below:mysample = v.Sample({'SiO2': 77.3, 'TiO2': 0.08, 'Al2O3': 12.6, 'Fe2O3': 0.207, 'Cr2O3': 0.0, 'FeO': 0.473, 'MnO': 0.0, 'MgO': 0.03, 'NiO': 0.0, 'CoO': 0.0, 'CaO': 0.43, 'Na2O': 3.98, 'K2O': 4.88, 'P2O5': 0.0, 'H2O': 6.5, 'CO2': 0.05})To see the composition of mysample, use the `get_composition(species=None, normalization=None, units=None, exclude_volatiles=False, asSampleClass=False)` method. By default, the composition is returned exactly as input above. `species` can be set as an element or oxide (e.g., "Si" or "SiO$_2$")to return the float value for only that species. The composition can automatically be normalized using any of the standard normalization functions listed above and can be returned in any of the units discussed above. As with the `BatchFile.get_data()` function, a sample composition can be returned as a dictionary (default) or as a VESIcal Sample object (if `asSampleClass` is set to True).mysample.get_composition()The oxides considered by VESIcal are:print(v.oxides)['SiO2', 'TiO2', 'Al2O3', 'Fe2O3', 'Cr2O3', 'FeO', 'MnO', 'MgO', 'NiO', 'CoO', 'CaO', 'Na2O', 'K2O', 'P2O5', 'H2O', 'CO2']Extracting a single sample from a batch fileDefined within the `BatchFile()` class, the method `get_sample_composition()` allows for the extraction of a melt composition from a loaded Excel or CSV file.**Method structure:** `myfile.get_sample_composition(samplename, species=None, normalization=None, units=None, asSampleClass=False)`**Required inputs:**>`samplename`: The name of the sample, as a string, as defined in the 'Label' column of the input file.**Optional inputs:**>`species`: This is used if only the concentration of a single species (either oxide or element) is desired.>`normalization`: This is optional and determines the style of normalization performed on a sample. The default value is `None`, which returns the value-for-value un-normalized composition. Other normalization options are described in the BatchFile class description above.>`units`: The default is wt% oxides. Other options are described in the BatchFile class description above.>`asSampleClass`: Can be `True` or `False` (default). If set to `False`, this will return a dictionary with compositional values. If set to `True`, this will return a Sample object with compositional data stored within.**Outputs:**>The bulk composition stored in a dictionary or Sample object."""To get composition from a specific sample in the input data:""" sample_10 = myfile.get_sample_composition('10*', asSampleClass=True) """To see the extracted sample composition, uncomment the line below by removing the # and execute this code cell""" #sample_10.get_composition()Normalizing and transforming dataBefore performing model calculations on your data, it may be desired to normalize the input composition to a total of 100 wt%. For a user to decide whether normalization is prudent, is important to understand the influence any normalization, or lack thereof, to a composition will have on modeling results. Electron microprobe analyses of major elements in silicate glasses combined with volatile element analyses by SIMS and FTIR often sum to less than 100 wt%. This deficiency is normally attributed to subsurface charging, matrix corrections, and unknown redox states of Fe and S during analyses by electron microprobe (see Hughes et al., 2019). As an example, when normalized, a volatile-free basalt with a measured SiO$_2$ content of 46 wt% and an analytical total of 97 wt% actually contains 47.4 wt% SiO$_2$ (46/0.97; a 3% relative change in silica content). Many studies report major element data normalized to 100% with volatiles listed separately. The result is that, value for value, literature datasets can have totals several wt% less than 100 (if raw data are reported) or several wt% higher than 100 (if major elements are normalized anhydrous).To deal with this variation, VESIcal provides users with four options for normalization. Normalization types are: - None (no normalization) - 'standard': Normalizes an input composition to 100%. - 'fixedvolatiles': Normalizes major element oxides to 100 wt%, including volatiles. The volatile wt% will remain fixed, whilst the other major element oxides are reduced proportionally so that the total is 100 wt%. - 'additionalvolatiles': Normalizes major element oxide wt% to 100%, assuming it is volatile-free. If H$_2$O or CO$_2$ are passed to the function, their un-normalized values will be retained in addition to the normalized non-volatile oxides, summing to >100%. Normalization can be performed on a Sample object or on all samples within a BatchFile object using the `get_composition()` or `get_data()` methods (e.g., `myfile.get_composition(normalization='standard')` or `mysample.get_composition(normalization='additionalvolatiles')`. Note that, since a BatchFile object may have other data in addition to sample compositions (e.g., information on pressure, temperature, other user notes), `BatchFile.get_composition()` returns only compositional data, where as `BatchFile.get_data()` returns all data stored in the BatchFile object. The `normalization` argument can be passed to either. In the example below, we obtain the standard normalization of mysample and myfile and save these to new Sample and BatchFile objects called mysample_normalized and myfile_normalized. Note that `asSampleClass` or `asBatchFile` must be set to True in order to return a Sample or BatchFile object. Without this argument, a dictionary or pandas DataFrame will be returned and new Sample or BatchFile objects will need to be constructed from those in order to perform calculations on the normalized datasets."""Retrieve the standard normalization for one sample""" mysample_normalized = mysample.get_composition(normalization="standard", asSampleClass=True) #print(mysample_normalized.get_composition()) """Retrieve the standard normalization for all samples in a BatchFile""" myfile_normalized = myfile.get_data(normalization="standard", asBatchFile=True) #print(myfile_normalized.get_data())The Liu and all six AllisonCarbon models are not sensitive to normalization because they contain no compositional terms. Similarly, the expressions for Shishkina and MooreWater contain compositional terms expressed solely in terms of anhydrous cation fractions; the `additionalvolatiles` and `fixedvolatiles` normalization routines do not affect the relative abundances of major elements (and therefor anhydrous cation fractions). Thus, Shishkina and MooreWater are only affected by the standard normalization routine. In contrast, the Dixon model is highly sensitive to the choice of normalization because its compositional term for both H$_2$O and CO$_2$ is expressed solely in terms of the absolute melt SiO$_2$ content. The expressions of Iacono-Marziano are parameterized in terms of hydrous cation fractions and NBO/O, and so this model is sensitive to additionalvolatiles or fixedvolatiles normalization routines, which will change the relative proportions of volatiles to major elements. Even so, the effect of normalization on volatile solubility calculations is relatively small and of similar magnitude to the discrepancy between the hydrous total and 100 for the hydrous model. Thus, the choice of normalization is only important when data has hydrous totals that differ significantly from 100%. The Iacono-Marziano web app normalizes input data a la VESIcal's additionalvolatiles normalization routine. For consistency with the web app, VESIcal automatically uses the additionalvolatiles normaliztion during calculations with this model.The implementation of MagmaSat in VESIcal is sensitive to the relative proportion of major and volatile element components rather than the absolute concentrations entered (as with the whole MELTS family of models). Thus, calculations using raw, fixed- and additionalvolatile routines yield different results. If the hydrous total of an input composition is less than 100%, the fixedvolatile routine effectively reduces the relative proportion of volatiles to major elements, so calculated saturation pressures go down. Conversely, if inputs have high hydrous totals, the fixedvolatile routine increases the relative proportion of volatiles in the system, so the saturation pressure goes up. As with Iacono-Marziano, the percent discrepancy between calculations for different normalization routines is similar to the difference between the total and 100%. For saturation pressure calculations, the MagmaSat app automatically normalizes input data a la VESIcal's fixedvolatiles routine. Thus, we suggest that users should normalize their inputs using fixedvolatiles for consistency with previous studies. However, for maximum flexibility, no normalization on inputs is forced and so must be set by the user if desired. Further discussion on the effect of normalization in MagmaSat is provided in Supplementary Text S5 (and Supplementary Figs S22-S26).For example, consider a basalt with a measured SiO$_2$ content of 47.4 wt%, 1000 ppm dissolved CO$_2$, and an anhydrous (volatile-free) total of 96.77 wt%:mybasalt = v.Sample({'SiO2': 47, 'TiO2': 1.01, 'Al2O3': 17.46, 'Fe2O3': 0.89, 'FeO': 7.18, 'MgO': 7.63, 'CaO': 12.44, 'Na2O': 2.65, 'K2O': 0.03, 'P2O5': 0.08, 'CO2': 0.1})We can apply each normalization routine to this sample and examine how this will affect the saturation pressure predicted by each model:"""Normalize three ways""" mybasalt_std = mybasalt.get_composition(normalization="standard", asSampleClass=True) mybasalt_add = mybasalt.get_composition(normalization="additionalvolatiles", asSampleClass=True) mybasalt_fix = mybasalt.get_composition(normalization="fixedvolatiles", asSampleClass=True) """Choose a model to test""" mymodel = "IaconoMarziano" for basalt, normtype in zip([mybasalt, mybasalt_std, mybasalt_add, mybasalt_fix], ["Raw", "standard", "additionalvolatiles", "fixedvolatiles"]): print(str(normtype) + " Saturation Pressure = " + str(v.calculate_saturation_pressure(sample=basalt, temperature=1200, model=mymodel).result))Raw Saturation Pressure = 1848.031831425599 standard Saturation Pressure = 1906.545378962788 additionalvolatiles Saturation Pressure = 1848.2673972122493 fixedvolatiles Saturation Pressure = 1848.2611364359402Because the compositional effect on H$_2$O solubility is smaller, so are the changes in calculated saturation pressures for a pure-H$_2$O system, but they can still be significant for H$_2$O-rich liquids (where high H$_2$O contents can change totals and therefor SiO$_2$ contents more dramatically). Comparing User Data to Model Calibrations: Which Model Should I Use?MagmaSat is the most thermodynamically robust model implemented in VESIcal, and thus it is the most generally appropriate model to use (n.b. that it is also the most computationally expensive). However, one of the strengths of VESIcal is its ability to utilize up to seven different solubility models. Each of these models is based on its own calibration dataset, meaning the pressure-temperature-composition space over which models are calibrated is quite variable from model to model. The individual model calibrations are discussed in detail in this manuscript's companion paper (VESIcal Part II; Wieser et al., in prep).For the remainder of this section, all example calculations are carried out with MagmaSat, the default model of VESIcal. To use any other VESIcal model, simply add `model=` and the name of the desired model in quotes to any calculation (e.g., v.calculate_dissolved_volatiles(temperature=900, pressure=1000, model="Dixon"). The model names recognized by VESIcal are: MagmaSat, ShishkinaIdealMixing, Dixon, IaconoMarziano, Liu, AllisonCarbon, MooreWater. For more advanced use cases such as hybridizing models (see Section 3.9), pure-H$_2$O and pure-CO$_2$ models from within a mixed-fluid model can be used by adding 'Water' or 'Carbon' to the model name (e.g., DixonCarbon; note that MagmaSat does not have this functionality).Determination of the appropriate model to use with any sample is crucial to the correct application of these models, and so we stress the importance of understanding how a model's calibration space relates to the sample at hand. VESIcal includes some built-in functionality for comparing melt compositions from user loaded data to those in the datasets upon which each of the VESIcal models is calibrated using the method `calib_plot`. This can be visualized as a total alkalis vs silica (TAS) diagram (with fields and labels via the python tasplot library by ; https://bitbucket.org/jsteven5/tasplot/src/master/; Fig. 5a) or as any x-y plot in which x and y are oxides (Fig. 5b).**Method structure:** `calib_plot(user_data=None, model='all', plot_type='TAS', zoom=None, save_fig=False)`**Optional inputs:**>`user_data`: The default value is None, in which case only the model calibration set is plotted. User provided sample data describing the oxide composition of one or more samples. Multiple samples can be passed as an BatchFile object or pandas DataFrame. A single sample can be passed as a pandas Series.> `model`: The default value is 'all', in which case all model calibration datasets will be plotted. Otherwise, any model can be plotted by passing the name of the model desired (e.g., 'Liu'). Multiple models can be plotted by passing them as strings within a list (e.g., ['Liu', 'Dixon'])> `plot_type`: The default value is 'TAS', which returns a total alkalis vs silica (TAS) diagram. Any two oxides can be plotted as an x-y plot by setting plot_type='xy' and specifying x- and y-axis oxides, e.g., x='SiO2', y='Al2O3'.> `zoom`: The default is None in which case axes will be set to the default of 35≤x≤100 wt% and 0≤y≤25 wt% for TAS type plots and the best values to show the data for xy type plots. The user can pass "user_data" to plot the figure where the x and y axes are scaled down to zoom in and only show the region surrounding the user_data. A list of tuples may be passed to manually specify x and y limits. Pass in data as [(x_min, x_max), (y_min, y_max)]. For example, the default limits here would be passed in as [(35,100), (0,25)].> `save_fig`: The default value is False, in which case the plot will be generated and displayed but not saved. If the user wishes to save the figure, the desired filename (including the file extension, e.g., .png) can be passed here. Note that all plots in this Jupyter notebook can be saved by right clicking the plot and choosing "Save Image As...".**Outputs:**>A TAS or x-y plot of user data and model calibration data.**Figure 5**Figure 5a.v.calib_plot(user_data=myfile)Figure 5b.v.calib_plot(user_data=myfile, model='IaconoMarziano', plot_type='xy', x='SiO2', y='K2O', save_fig=False)*Figure 5: Example calibration plots. a. The default plot with user_data defined as myfile and no other options set. This produces a TAS digram with the user data plotted atop data from calibration datasets for all models. b. A plot with all options specified. This example produces an x-y plot for user_data (myfile) and the Iacono-Marziano calibration dataset where x and y are SiO*$_2$ *and K*$_2$*O concentration in wt%. Symbol shapes correspond to the volatile composition of experiments used to calibrate the model.* Using the functionality built into python and the matplotlib library, user data can be plotted on its own at any time, including before any calculations are performed. Almost any plot type imaginable can be produced, and users should refer to the maptlotlib documentation (https://matplotlib.org/3.2.1/index.html) if more complex plotting is desired. Calculating dissolved volatile concentrationsThe `calculate_dissolved_volatiles()` function calculates the concentration of dissolved H$_2$O and CO$_2$ in the melt at a given pressure-temperature condition and with a given H$_2$O-CO$_2$ fluid composition, defined as the mole fraction of H$_2$O in an H$_2$O-CO$_2$ fluid (XH$_2$O$^{fluid}$). The default MagmaSat model relies on the underlying functionality of MELTS, whose basic function is to calculate the equilibrium phase assemblage given the bulk composition of the system and pressure-temperature conditions. To calculate dissolved volatile concentrations thus requires computing the equilibrium state of a system at fixed pressure and temperature over a range of bulk volatile concentrations until a solution is found that satisfies the user defined fluid composition.First, the function makes an initial guess at the appropriate bulk volatile concentrations by finding the minimum dissolved volatile concentrations in the melt at saturation, while asserting that the weight fraction of H$_2$O/(H$_2$O+CO$_2$) in the system is equal to the user input mole fraction of H$_2$O/(H$_2$O+CO$_2$) in the fluid. This is done by increasing the H$_2$O and CO$_2$ concentrations appropriately until a fluid phase is stable. Once fluid saturation is determined, the code then performs directional, iterative, and progressively more refined searches, increasing the proportion of H$_2$O or CO$_2$ in the system if the mole fraction of H$_2$O calculated in the fluid is greater than or less than that defined by the user, respectively. Four iterative searches are performed; the precision of the match between the calculated and defined XH$_2$O$^{fluid}$ increases from 0.1 in the first iteration to 0.01, 0.001, and finally to 0.0001. Thus, the calculated dissolved volatile concentrations correspond to a system with XH$_2$O$^{fluid}$ within 0.0001 of the user defined value.For non-MagmaSat models, dissolved volatile concentrations are calculated directly from model equations.**Method structure:**> Single sample: `calculate_dissolved_volatiles(sample, temperature, pressure, X_fluid=1, verbose=False, model='MagmaSat').result`>BatchFile batch process: `myfile.calculate_dissolved_volatiles(temperature, pressure, X_fluid=1, print_status=True, model='MagmaSat')`**Standard inputs:**>`sample`, `temperature`, `pressure`, `X_fluid`, `model` (see Section 3.0.1).**Unique optional inputs:**>`verbose`: *Only for single sample calculations.* Default value is False, in which case H$_2$O and CO$_2$ concentrations are returned. If set to True, additional parameters are returned in a dictionary: H$_2$O and CO$_2$ concentrations in the fluid in mole fraction, temperature, pressure, and proportion of the fluid in the system in wt%.> `print_status`: *Only for batch calculations.* The default value is True, in which case the progress of the calculation will be printed to the terminal. The user may desire to see the status of the calculation, as this particular function can be quite slow, averaging between 3-5 seconds per sample.**Calculated outputs:**>If the single-sample method is used, a dictionary with keys 'H2O' and 'CO2' corresponding to the calculated dissolved H$_2$O and CO$_2$ concentrations in the melt is returned (plus additional variables 'temperature' in $^{\circ}$C, 'pressure' in bars, 'XH2O_fl', 'XCO2_fl', and 'FluidProportion_wtper' (the proportion of the fluid in the system in wt%) if `verbose` is set to True). >If the BatchFile method is used, a pandas DataFrame is returned with sample information plus calculated dissolved H$_2$O and CO$_2$ concentrations in the melt, the fluid composition in mole fraction, and the proportion of the fluid in the system in wt%. Pressure (in bars) and Temperature (in $^{\circ}$C) columns are always returned."""Calculate dissolved volatiles for sample 10*""" v.calculate_dissolved_volatiles(sample=sample_10, temperature=900.0, pressure=2000.0, X_fluid=0.5, verbose=True).result """Calculate dissolved for all samples in an BatchFile object""" dissolved = myfile.calculate_dissolved_volatiles(temperature=900.0, pressure=2000.0, X_fluid=1, print_status=True)[====================] 100% Working on sample KI-07**Table 5. Modeled dissolved volatile concentrations**dissolvedCalculating equilibrium fluid compositionsThe `calculate_equilibrium_fluid_comp()` function calculates the composition of a fluid phase in equilibrium with a given silicate melt with known pressure, temperature, and dissolved H$_2$O and CO$_2$ concentrations. The calculation is performed simply by calculating the equilibrium state of the given sample at the given conditions and determining if that melt is fluid saturated. If the melt is saturated, fluid composition and mass are reported back. If the calculation finds that the melt is not saturated at the given pressure and temperature, values of 0.0 will be returned for the H$_2$O and CO$_2$ concentrations in the fluid.**Method structure:**>Single sample: `calculate_equilibrium_fluid_comp(sample, temperature, pressure, verbose=False, model='MagmaSat').result`>BatchFile batch process: `myfile.calculate_equilibrium_fluid_comp(temperature, pressure=None, print_status=False, model='MagmaSat')`**Standard inputs:**>`sample`, `temperature`, `pressure`, `model` (see Section 3.0.1).**Unique optional inputs:**>`verbose`: *Only for single sample calculations.* Default value is False, in which case H$_2$O and CO$_2$ concentrations in the fluid in mol fraction are returned. If set to True, additional parameters are returned in a dictionary: H$_2$O and CO$_2$ concentrations in the fluid, mass of the fluid in grams, and proportion of the fluid in the system in wt%. >`print_status`: *Only for batch calculations.* The default value is False. If True is passed, the progress of the calculation will be printed to the terminal.**Calculated outputs:**>If the single-sample method is used, a dictionary with keys 'H2O' and 'CO2' is returned (plus additional variables 'FluidMass_grams' and 'FluidProportion_wtper' if `verbose` is set to True). >If the BatchFile method is used, a pandas DataFrame is returned with sample information plus calculated equilibrium fluid compositions, mass of the fluid in grams, and proportion of the fluid in the system in wt%. Pressure (in bars) and Temperature (in $^{\circ}$C) columns are always returned."""Calculate fluid composition for the extracted sample""" v.calculate_equilibrium_fluid_comp(sample=sample_10, temperature=900.0, pressure=100.0).resultBelow we calculate equilibrium fluid compositions for all samples at a single temperature of 900 $^{\circ}$C and a single pressure of 1,000 bars. Note that some samples in this dataset have quite low volatile concentrations (e.g., the Tucker et al. (2019) basalts from Kilauea), and so are below saturation at this P-T condition. The fluid composition for undersaturated samples is returned as values of 0 for both H$_2$O and CO$_2$. **Table 6. Isothermally modeled equilibrium fluid compositions**"""Calculate fluid composition for all samples in an BatchFile object""" eqfluid = myfile.calculate_equilibrium_fluid_comp(temperature=900.0, pressure=1000.0) eqfluidBelow, we calculate equilibrium fluid compositions for the same dataset using temperatures and pressures as defined in the input data (Table 3). Note that Samples "samp. HPR3-1_XL-3" and "samp. HPR3-1_XL-4_INCL-1" have a user-defined value of 0.0 for temperature and pressure, respectively. VESIcal automatically skips the calculation of equilibrium fluids for these samples and returns a warning to the user, which are both printed to the terminal below and appended to the "Warnings" column in the returned data.**Table 7. Modeled equilibrium fluid compositions with unique temperatures.** Warnings “Bad temperature” and “Bad pressure” indicate that no data (or 0.0 value data) was given for the temperature or pressure of that sample, in which case the calculation of that sample is skipped."""Calculate fluid composition for all samples with unique pressure and temperature values for each sample. Pressure and temperature values are taken from columns named "Press" and "Temp" in the example BatchFile""" eqfluid_wtemps = myfile.calculate_equilibrium_fluid_comp(temperature='Temp', pressure='Press') eqfluid_wtemps/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:4: UserWarning: Temperature for sample samp. HPR3-1_XL-3 is <=0. Skipping sample. after removing the cwd from sys.path. /opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:4: UserWarning: Pressure for sample samp. HPR3-1_XL-4_INCL-1 is <=0. Skipping sample. after removing the cwd from sys.path.Converting fluid composition unitsThe fluid composition is always returned in units of mol fraction. Two functions exist to transform only the H$_2$O-CO$_2$ fluid composition between mol fraction and wt% and can be applied to returned data sets from calculations. Both functions require that the user provide the dataframe containing fluid composition information plus the names of the columns corresponding to the H$_2$O and CO$_2$ concentrations in the fluid. The default values for column names are set to those that may be returned by VESIcal core calculations, such that they need not be specified unless the user has changed them or is supplying their own data (e.g., imported data not processed through a core calculation).**Method structure:**>Mol fraction to wt%: `fluid_molfrac_to_wt(data, H2O_colname='XH2O_fl_VESIcal', CO2_colname='XCO2_fl_VESIcal')`>Wt% to mol fraction: `fluid_wt_to_molfrac(data, H2O_colname='H2O_fl_wt', CO2_colname='CO2_fl_wt')`**Required inputs:**>`data`: A pandas DataFrame containing columns for H$_2$O and CO$_2$ concentrations in the fluid.**Optional inputs:**>`H2O_colname` and `CO2_colname`: The default values are 'XH2O_fl' and 'XCO2_fl' if input data are in mol fraction or 'H2O_fl_wt' and 'CO2_fl_wt' if the data are in wt%. Strings containing the name of the columns corresponding to the H$_2$O and CO$_2$ concentrations in the fluid.**Calculated outputs:**>The original data passed plus newly calculated values are returned in a DataFrame. **Table 8. Equilibrium fluid compositions converted from mol fraction to wt%**"""Converting from mol fraction to wt%""" eqfluid_wt = v.fluid_molfrac_to_wt(eqfluid) eqfluid_wt**Table 9. Equilibrium fluid compositions converted from wt% to mol fraction**"""Converting from wt% to mol fraction""" eqfluid_mol = v.fluid_wt_to_molfrac(eqfluid_wt) eqfluid_molCalculating saturation pressuresThe `calculate_saturation_pressure()` function calculates the minimum pressure at which a given silicate melt with known temperature and H$_2$O and CO$_2$ concentrations would be saturated with fluid. For MagmaSat, this is calculated by finding the pressure at which the smallest amount of vapor is present. This function also calculates the composition of the vapor in equilibrium with the melt at those conditions.The function works by calculating the equilibrium state of the given melt at very high pressure (20,000 bars). If no fluid is present at this pressure, the melt is undersaturated, and pressure is decreased in steps of 1,000 bars until the mass of vapor is >0 grams. If fluid is present, the saturation limit is found by increasing the pressure iteratively until the point at which no fluid is present. At this point, the pressure space is narrowed and searched in steps of 100 bars and then in steps of 10 bars until the saturation pressure is found. Thus, these calculations are accurate to 10 bars.For non-MagmaSat models, we use Brent's minimization method (via scipy's root_scalar optimization function) to find the pressure that satisfies the computational constraints. This is achieved by iterative calculation of the dissolved volatile concentration over a range of pressures and minimizing the difference between computed and given concentrations. This is only practical for non-MagmaSat models, where the dissolved volatiles calculation is extremely fast.**Method structure:**>Single sample: `calculate_saturation_pressure(sample, temperature, verbose=False, model='MagmaSat').result`>BatchFile batch process: `myfile.calculate_saturation_pressure(temperature, print_status=True, model='MagmaSat')`**Standard inputs:**>`sample`, `temperature`, `model` (see Section 3.0.1).**Unique optional inputs:**> `verbose`: *Only for single sample calculations.* Default value is False, in which case the saturation pressure in bars is returned. If set to True, additional parameters are returned in a dictionary: saturation pressure in bars, H$_2$O and CO$_2$ concentrations in the fluid, mass of the fluid in grams, and proportion of the fluid in the system in wt%.> `print_status`: *Only for batch calculations.* The default value is True, in which case the progress of the calculation will be printed to the terminal.**Calculated outputs:**>If the single-sample method is used, the saturation pressure in bars is returned as a numerical value (float) (plus additional variables 'XH2O_fl', 'XCO2_fl', 'FluidMass_grams', and 'FluidProportion_wtper' if `verbose` is set to True). >If the BatchFile method is used, a pandas DataFrame is returned with sample information plus calculated saturation pressures, equilibrium fluid compositions, mass of the fluid in grams, and proportion of the fluid in the system in wt%. Temperature (in $^{\circ}$C) is always returned."""Calculate the saturation pressure of the single sample we defined in Section 3.1.2 at 925 degrees C""" v.calculate_saturation_pressure(sample=mysample, temperature=925.0, verbose=True).result**Table 10. Isothermally modeled saturation pressures**"""Calculate the saturation pressure for all samples in an BatchFile object at 925 degrees C""" satPs = myfile.calculate_saturation_pressure(temperature=925.0) satPs[====================] 100% Working on sample KI-07**Table 11. Modeled saturation pressures with unique temperatures** The warning “Bad temper- ature” indicates that no data (or 0.0 value data) was given for the temperature of that sample,in which case the calculation of that sample is skipped."""Calculate the saturation pressure for all samples in an BatchFile object, taking temperature values from a column named "Temp" in the BatchFile""" satPs_wtemps = myfile.calculate_saturation_pressure(temperature="Temp") satPs_wtemps[================ ] 82% Working on sample samp. HPR3-1_XL-4_INCL-1Calculating isobars and isoplethsIn this example, we demonstrate how isobars (lines of constant pressure) and isopleths (lines of constant fluid composition) can be calculated for any one composition. A single melt composition can be extracted from a loaded batch file, or a composition can be entered by hand and stored within a dictionary. Due to computational intensity, isobars and isopleths can only be computed for one sample composition at a time.Once a single composition is defined, conditions over which to calculate isobars and isopleths must be specified. The generated plot is isothermal, so only one temperature can be chosen. Isobars and isopleths can be calculated for any number of pressures or XH$_2$O$^{fluid}$ values, respectively, passed as lists. The calculation is performed by iterating through possible concentrations of H$_2$O and CO$_2$ and calculating the equilibrium state for the system. The iteration begins at a fixed H$_2$O concentration, increasing the CO$_2$ concentration in steps of 0.1 wt% until a fluid phase is stable. The H$_2$O concentration is then increased by 0.5 wt% and CO$_2$ is again increased from 0 until a fluid phase is stable. This process is repeated for H$_2$O values ranging from 0–15 wt%. The H$_2$O and CO$_2$ concentrations from each system for which a fluid phase was found to be stable are saved and written to a pandas DataFrame, which is returned upon completion of the calculation.Isobars and isopleths are computed at fixed H$_2$O-CO$_2$ points for any given pressure. To generate curves using the MagmaSat model, polynomials are fit to computed points using numpy's polyfit method. This can be optionally disabled by setting `smooth_isobars` or `smooth_isopleths` to False. The curvature of the isobars depends strongly on the number of points used to fit a polynomial, deemed "control points", with curve fits becoming more accurate to the model as the number of control points increases. We found that above five control points, changes to the shape of the curve fits becomes negligible. Thus, as a compromise between accuracy and computation time, and to maintain consistency, MagmaSat isobars are always computed with 5 control points at XH$_2$O$^{fluid}$ values of 0, 0.25, 0.5, 0.75, and 1. Because non-MagmaSat models compute extremely quickly, all non-MagmaSat models use 51 control points per isobar and do not utilize polynomial fits to the data by default.**Method structure:**>*Only single sample calculations.* `calculate_isobars_and_isopleths(sample, temperature, pressure_list, isopleth_list=None, smooth_isobars=True, smooth_isopleths=True, print_status=True, model="MagmaSat").result`**Standard inputs:**>`sample`, `temperature`, `model` (see Section 3.0.1).**Unique required inputs:**>`pressure_list`: A list of all pressures in bars at which to calculate isobars. If only one value is passed it can be as float instead of list.**Unique optional inputs:**>`isopleth_list`: The default value is None in which case only isobars will be calculated. A list of all fluid composition values, in mole fraction H$_2$O (XH$_2$O$^{fluid}$), at which to calculate isopleths. Values can range from 0–1. If only one value is passed it can be as float instead of list. N.b. that, due to the method of isobar smoothing using control points as outlined above, each isopleth value passed here not equal to one of the five standard control point values (0, 0.25, 0.5, 0.75, or 1) will result in an an additional control point being used to smooth the isobars. Thus, entering additional isopleth values results not only in more isopleth outputs but also in ``smoother'' (i.e., more well constrained) isobars.>`smooth_isobars` and `smooth_isopleths`: The default value for both of these arguments is True, in which case polynomials will be fit to the computed data points.>`print_status`: The default value is True. If True, the progress of the calculations will be printed to the terminal.**Calculated outputs:** >The function returns two pandas DataFrames: the first has isobar data, and the second has isopleth data. Columns in the isobar dataframe are 'Pressure', 'H2Omelt', and 'CO2melt', corresponding to pressure in bars and dissolved H$_2$O and CO$_2$ in the melt in wt%. Columns in the isopleth dataframe are 'XH2O_fl', 'H2O_liq', and 'CO2_liq', corresponding to XH$_2$O$^{fluid}$ and dissolved H$_2$O and CO$_2$ in the melt in wt%."""Define all variables to be passed to the function for calculating isobars and isopleths""" """Define the temperature in degrees C""" temperature = 1200.0 """Define a list of pressures in bars:""" pressures = [1000.0, 2000.0, 3000.0]Next, the H$_2$O and CO$_2$ dissolved in the melt at saturation is calculated at the specified temperature and over the range of specified pressures. Note that, because this function calculates two things (isobars and isopleths), two variable names must be given (below, "isobars, isopleths"). This calculation can be quite slow, and so it is recommended to set print_status to True.isobars, isopleths = v.calculate_isobars_and_isopleths(sample=sample_10, temperature=temperature, pressure_list=pressures, isopleth_list=[0.25,0.5,0.75]).resultCalculating isobar at 1000.0 bars done. Calculating isobar at 2000.0 bars done. Calculating isobar at 3000.0 bars done. Done!Calculating degassing pathsA degassing path is a series of volatile concentrations both in the melt and fluid that a magma will follow during decompression. In the calculation, the saturation pressure is computed, and then the system is equilibrated along a trajectory of decreasing pressure values at discrete steps. The default number of steps to calculate is 50, but this can be defined by the user by setting the argument `steps` to any integer value. A detailed explanation of how non-MagmaSat models handle the calculation of mixed-fluid composition can be found in the supplement (Supplementary Text S2). If so desired, this calculation can be performed for any initial pressure, but the default is the saturation pressure. If a pressure is specified that is above the saturation pressure, the calculation will simply proceed from the saturation pressure, since the magma cannot degas until it reaches saturation. Completely open-system, completely closed-system or partially open-system degassing paths can be calculated by specifying what proportion of the fluid to fractionate. The fluid fractionation value can range between 0 (closed-system: no fluid is removed, all is retained at each pressure step) and 1 (open-system: all fluid is removed, none is retained at each pressure step). Closed and partially open-system runs allow the user to specify the initial presence of exsolved fluid that is in equilibrium with the melt at the starting pressure.**Method structure:**>*Only single-sample calculations.* `calculate_degassing_path(sample, temperature, pressure="saturation", fractionate_vapor=0.0, init_vapor=0.0, steps=50, model="MagmaSat").result`**Standard inputs:**>`sample`, `temperature`, `model` (see Section 3.0.1).**Unique optional inputs:**>`pressure`: The pressure at which to begin the degassing calculations, in bars. Default value is 'saturation', which runs the calculation with the initial pressure at the saturation pressure. If a pressure greater than the saturation pressure is input, the calculation will start at saturation, since this is the first pressure at which any degassing will occur.>`fractionate_vapor`: Proportion of vapor removed at each pressure step. Default value is 0.0 (completely closed-system degassing). Specifies the type of calculation performed, either closed system (0.0) or open system (1.0) degassing. If any value between <1.0 is chosen, user can also specify the 'init_vapor' argument (see below). A value in between 0 and 1 will remove that proportion of vapor at each step. For example, for a value of 0.2, the calculation will remove 20% of the vapor and retain 80% of the vapor at each pressure step.>`init_vapor`: Default value is 0.0. Specifies the amount of vapor (in wt%) coexisting with the melt before degassing.>`steps`: Default value is 50. Specifies the number of steps in pressure space at which to calculate dissolved volatile concentrations.**Calculated outputs:** >The function returns a pandas DataFrame with columns as: 'Pressure_bars', 'H2O_liq' and 'CO2_liq' (the concentration of H$_2$O and CO$_2$ in the melt, in wt%), 'XH2O_fl' and 'XCO2_fl' (the composition of the H$_2$O-CO$_2$ fluid, in mol fraction), and 'FluidProportion_wt' (the proportion of fluid in the fluid-melt system, in wt%).**Note**: The following two cells can take up to a minute to execute.temp = 1200 #temperature in °C """Calculate open, closed, and closed + 2 wt% initial vapor""" closed_df = v.calculate_degassing_path(sample=sample_10, temperature=temp).result open_df = v.calculate_degassing_path(sample=sample_10, temperature=temp, fractionate_vapor=1.0).result half_df = v.calculate_degassing_path(sample=sample_10, temperature=temp, fractionate_vapor=0.5).result exsolved_df = v.calculate_degassing_path(sample=sample_10, temperature=temp, init_vapor=2.0).result """Calculate closed-system degassing starting from a pressure of 2000 bars""" start2000_df = v.calculate_degassing_path(sample=sample_10, temperature=temp, pressure=2000.0).result[====================] 100% Calculating degassing path... [====================] 100% Calculating degassing path... [====================] 100% Calculating degassing path... [====================] 100% Calculating degassing path... [====================] 100% Calculating degassing path...PlottingAfter calculating isobars, isopleths, and degassing paths, any or all of these may be plotted in an H$_2$O versus CO$_2$ plot with one simple function call. The plot will be printed directly in the notebook or, if the code is run as script in a command line, the plot will appear it its own window, at which point it can be saved as an image file. VESIcal's `plot` function takes in lists of pandas DataFrames with calculated isobar, isopleth, and degassing path information (e.g., output from `calculate_isobars_and_isopleths or calculate_degassing_path()`) and plots data as isobars (lines of constant pressure), isopleths (lines of constant fluid composition), and degassing paths (lines indicating the concentrations of H$_2$O and CO$_2$ in a melt equilibrated along a path of decreasing pressure).Labels can be assigned to isobars, isopleths, and/or degassing paths separately. Any or all of these data can be passed to the `plot` function. Multiple sets of plottable data can be passed. For example, isobars calculated with two different models can be passed to the `isobars` argument as a list.VESIcal's plotting function is entirely based on python's matplotlib library, which comes standard with many installations of python. With matplotlib, users can create a large variety of plots (note that direct matplotlib functionality is used to create custom plots in several of this manuscript's supplementary Jupyter notebooks), and users should refer to the maptlotlib documentation (https://matplotlib.org/3.2.1/index.html) if more complex plotting is desired. If preferred, VESIcal outputs can be saved to an Excel or CSV file (see Section 3.10), and plotting can be done in any plotting program desired (e.g., MS Excel).The function returns both fig and axes matplotlib objects, which can be further edited by the user or plotted directly. Following matplotlib convention, the results of `plot()` should be saved to objects such as fig, ax as:`fig, ax = v.plot([options])`where [options] represents any optional inputs as defined here. Variables fig and ax can then be edited further using matplotlib tools. For example, the user might wish to set the minimum x-axis value to 0.5 as:`ax.set_xlim(left=0.5)`In Jupyter Notebook, a plot is automatically shown, but in the command line, the plot will only display after executing `v.show()`.**Method structure:**>`plot(isobars=None, isopleths=None, degassing_paths=None, custom_H2O=None, custom_CO2=None, isobar_labels=None, isopleth_labels=None, degassing_path_labels=None, custom_labels=None, custom_colors="VESIcal", custom_symbols=None, markersize=10, save_fig=False, extend_isobars_to_zero=True, smooth_isobars=False, smooth_isopleths=False)`**Optional inputs:**>`isobars`: DataFrame object containing isobar information as calculated by `calculate_isobars_and_isopleths()`. Or a listof DataFrame objects.>`isopleths`: DataFrame object containing isopleth information as calculated by `calculate_isobars_and_isopleths()`. Or a list of DataFrame objects.>`degassing_paths`: List of DataFrames with degassing information as generated by `calculate_degassing_path()`.>`custom_H2O`: List of floats or array-like shapes of H$_2$O concentration values to plot as points. For example `myfile.data['H2O']` is one array-like shape (here, pandas.Series) of H$_2$O values. Must be passed with `custom_CO2` and must be same length as `custom_CO2`.>`custom_CO2`: List of floats or array-like shapes of CO$_2$ values to plot as points. For example `myfile.data['CO2']` is one array-like shape of CO$_2$ values. Must be passed with `custom_H2O` and must be same length as `custom_H2O`.>`isobar_labels`: Labels for the plot legend. Default is None, in which case each plotted line will be given the generic legend name of "Isobars n", with n referring to the nth isobars passed. Isobar pressure is given in parentheses. The user can pass their own labels as a list of strings. If more than one set of isobars is passed, the labels should refer to each set of isobars, not each pressure. >`isopleth_labels`: Labels for the plot legend. Default is None, in which case each plotted isopleth will be given the generic legend name of "Isopleth n", with n referring to the nth isopleths passed. Isopleth XH$_2$O values are given in parentheses. The user can pass their own labels as a list of strings. If more than one set of isopleths is passed, the labels should refer to each set of isopleths, not each XH$_2$O value.>`degassing_path_labels`: Labels for the plot legend. Default is None, in which case each plotted line will be given the generic legend name of "Pathn", with n referring to the nth degassing path passed. The user can pass their own labels as a list of strings.>`custom_labels`: Labels for the plot legend. Default is None, in which case each group of custom points will be given the generic legend name of "Customn", with n referring to the nth degassing path passed. The user can pass their own labels as a list of strings.>`custom_colors` and `custom_symbols`: Custom colors and symbol shapes can be specified for (`custom_H2O`, `custom_CO2`) points. A list of color values or symbol types readable by Matplotlib (see Matplotlib documentation) can be entered. The length of this list must be equal to the lengths of `custom_H2O` and `custom_CO2`. If nothing is specified for custom_colors, VESIcal's default colors will be used. If nothing is specified for `custom_symbols`, all points will be plotted as filled circles.>`markersize`: The size of the symbols can be specified here. If not specified, the default value is marker size 10.>`save_fig`: Default value is False, in which case the figure will not be saved. If a string is passed, the figure will be saved with the string as the filename. The string must include the file extension.**Advanced inputs:** Most users will not need to use these inputs.>`extend_isobars_to_zero`: If set to True (the default), isobars will be extended to the plot axes, which are at x=0 and y=0, even if there is a finite solubility at zero partial pressure.>`smooth_isobars` and `smooth_isopleths`: If set to True, isobar or isopleth data will be fit to a polynomial and plotted. If set to False (the default), the raw input data will be plotted. Note that MagmaSat `calculate_isobars_and_isopleths()` calculations return already "smoothed" data (that is, the raw data are fit to polynomials before being returned). Raw "unsmoothed" data can be returned by MagmaSat `calculate_isobars_and_isopleths()` (see documentation on this method).**Calculated outputs:** >The function returns fig and axes matploblib objects defining a plot with x-axis as H$_2$O wt% in the melt and y-axis as CO$_2$ wt% in the melt. Isobars, or lines of constant pressure at which the sample magma composition is saturated, and isopleths, or lines of constant fluid composition at which the sample magma composition is saturated, are plotted if passed. Degassing paths, or the concentration of dissolved H$_2$O and CO$_2$ in a melt equilibrated along a path of decreasing pressure, is plotted if passed. A simple example: Isobars and isoplethsHere we plot the isobars at 1,000, 2,000, and 3,000 bars and isopleths at 0.25, 0.5, and 0.75 XH$_2$O$^{fluid}$ calculated for sample '10*' at 1,200 $^{\circ}$C in Section 3.6 onto one plot.**Figure 6**fig, ax = v.plot(isobars=isobars, isopleths=isopleths) v.show()*Figure 6 - Isobars and isopleths calculated for the sample, temperature, pressures, XH$_2$O$^{fluid}$ values, and with the model as defined in Section 3.6. Manuscript default values are sample* `10*` *at a 1,200 $^{\circ}$C with isobars at 1,000, 2,000, and 3,000 bars, isopleths at XH$_2$O$^{fluid}$ = 0, 0.25, 0.5, 0.75, and 1 calculated with MagmaSat*When plotting isobars and isopleths via MagmaSat, the values calculated by `calculate_isobars_and_isopleths()` are used to calculate polynomial fits using Numpy's `polyfit`. These polynomial fits, not the raw calculated data, are what have been plotted above. This method of fitting polynomial curves to these data is common in the literature (e.g., Newman & Lowenstern, 2002; IaconoMarziano et al., 2012; Iacovino et al., 2013) and is likely a very close approximation of the true saturation surface. Non-MagmaSat models do not calculate polynomial fits by default, but this can be done by passing `smooth_isobars=True` and `smooth_isopleths=True` to `plot()`.A user may wish to apply custom formatting to the plot, in which case the polynomial fits can be calculated and returned as a pandas DataFrame, which the user can then plot up manually using Matplotlib, MS Excel, or some other preferred method. To calculate polynomial fits to isobar and isopleth data, isobars and isopleths can be passed to `smooth_isobars_and_isopleths()`. For this advanced case, we refer the reader to the documentation. A simple example: Degassing pathsHere we plot all four degassing paths calculated for sample '10*' at 1,200 $^{\circ}$C in Section 3.7 onto one plot. We designate labels of "Open", "Half", "Closed", and "Exsolved" for the legend.**Figure 7**Figure 7afig, ax = v.plot(degassing_paths=[open_df, half_df, closed_df, exsolved_df], degassing_path_labels=["Open", "Half", "Closed", "Exsolved"]) v.show()Figure 7bfig, ax = v.plot(degassing_paths=[exsolved_df, start2000_df], degassing_path_labels=["Exsolved", "2000 bars"]) v.show()*Figure 7 - Degassing paths calculated for the sample, temperature, degassing style, initial exsolved fluid wt%, starting pressure, and model as designated in Section 3.6. Default manuscript values are sample* `10*` *at 1,200 °C. "Open", "Half", and "Closed" curves in (a) represent open-system, partially open-system (50% fractionated fluid), and closed-system degassing paths, respectively, starting at the saturation pressure. The "Exsolved" curve in (b) represents closed-system degassing with an initial exsolved fluid wt% = 2.0. The "2000" curve in (b) represents closed-system degassing calculated starting at a pressure of 2,000 bars.* Plotting multiple calculationsOne of the major advantages to VESIcal over any other modeling tool is the ability to quickly calculate and plot multiple calculations. VESIcal's `plot()` function is built on top of the popular Matplotlib python library and is designed to work with any VESIcal generated data. It can automatically plot and label one or multiple calculations. In addition, it can plot, as a scatter plot, any x-y points. The plot function always generates plots with H$_2$O on the x-axis and CO$_2$ on the y-axis. `scatterplot()` will take in and plot any x-y data with custom x- and y-axis labels. Generating other commonly used petrologic plots (e.g. Harker style diagrams) is already possible with Matplotlib, and so VESIcal does not duplicate this functionality, however this may be added in future updates.It may be tempting to plot multiple calculations on multiple samples and compare them, however we strongly caution against plotting data that do not correspond. For example, isobars and isopleths are calculated isothermally. If degassing paths are also plotted, the user should ensure that the degassing paths were calculated at the same temperature as the isobars and isopleths. Isobars, isopleths, and degassing pathsIn this example we will use data imported in Section 3.1 and calculations performed in Sections 3.5 and 3.6. Of course, all of the data calculated with VESIcal can be exported to an Excel or CSV file for manipulation and plotting as desired. However, some examples of plotting that can be done within this notebook or in a python script are shown below. Here we plot:- Isobars calculated at 1200 $^{\circ}$C and pressures of 1,000, 2,000, and 3,000 bars for sample 10*- Isopleths calculated at 1200 $^{\circ}$C and XH$_2$O$^{fluid}$ values of 0, 0.25, 0.5, 0.75, and 1 for sample 10*- An open-system degassing path for sample 10*- A closed-system degassing path for sample 10***Figure 8**fig, ax = v.plot(isobars=isobars, isopleths=isopleths, degassing_paths=[open_df, closed_df], degassing_path_labels=["Open System", "Closed System"]) v.show()*Figure 8 - Example of plotting multiple calculations on one plot. Isobars and isopleths as defined in Section 3.6 and shown in Section 3.8.1 and degassing curves as defined in Section 3.7 and shown in Section 3.8.2. Default manuscript values are for sample* `10*` *at 1,200 °C with isobars at 1,000, 2,000, and 3,000 bars, isopleths at XH$_2$O$^{fluid}$ values of 0, 0.25, 0.5, 0.75, and 1 with an open-system and a closed-system degassing path.* Isobars, isopleths, and degassing paths for multiple samplesFirst, we will calculate some new data for two different samples: a basanite (sample KI-07 from Iacovino et al., 2016) and a rhyolite (sample samp. P1968a from Myers et al., 2019). For both samples we will calculate and then plot:- Isobars and isopleths at 1100 $^{\circ}$C, pressures of 1,000 and 2,000 bars and fluid compositions of XH$_2$O$^{fluid}$ of 0.25, 0.5, and 0.75- Closed-system degassing paths at 1100 $^{\circ}$Cbasanite_sample = myfile.get_sample_composition('KI-07', asSampleClass=True) rhyolite_sample = myfile.get_sample_composition('samp. P1968a', asSampleClass=True) basanite_isobars, basanite_isopleths = v.calculate_isobars_and_isopleths(sample=basanite_sample, temperature=1100, pressure_list=[1000, 2000], isopleth_list=[0.25,0.75]).result rhyolite_isobars, rhyolite_isopleths = v.calculate_isobars_and_isopleths(sample=rhyolite_sample, temperature=1100, pressure_list=[1000, 2000], isopleth_list=[0.25,0.75]).result basanite_degassing_path = v.calculate_degassing_path(sample=basanite_sample, temperature=1100).result rhyolite_degassing_path = v.calculate_degassing_path(sample=rhyolite_sample, temperature=1100).resultCalculating isobar at 1000 bars done. Calculating isobar at 2000 bars done. Done! Calculating isobar at 1000 bars done. Calculating isobar at 2000 bars done. Done! [====================] 100% Calculating degassing path... [====================] 100% Calculating degassing path...**Figure 9**fig, ax = v.plot(isobars=[basanite_isobars, rhyolite_isobars], isopleths=[basanite_isopleths, rhyolite_isopleths], degassing_paths=[basanite_degassing_path, rhyolite_degassing_path], isobar_labels=["Basanite", "Rhyolite"], isopleth_labels=["Basanite", "Rhyolite"], degassing_path_labels=["Basanite", "Rhyolite"]) v.show()*Figure 9 - Example of plotting multiple calculations from multiple samples on the same plot. Note that the colors are automatically set to correspond to each sample for all plotted items (here, isobars, isopleths, and degassing paths). Samples, pressures, temperatures, XH$_2$O$^{fluid}$ values, and degassing path styles are defined above in this section. Manuscript default values are for a basanite (sample* `KI-07` *) and a rhyolite (sample* `samp. P1968a` *) at 1,100 °C, 1,000 and 2,000 bars, and XH$_2$O$^{fluid}$ = 0.25 and 0.75 and closed-system degassing.* Model hybridization (Advanced)One of the advantages of implementing the solubility models in a generic python module is the flexibility this affords the user in changing the way solubility models are defined and used. In particular, the structure allows any combination of pure-fluid models to be used together in modeling mixed-fluids, and fugacity or activity models can be quickly changed without modifying code. This allows advanced users to see how changing a fugacity or activity model implemented in any particular solubility model would affect model results. Instructions for hybridizing models can be found in Supplemental Jupyter notebook S10. Exporting dataOnce batch calculations have been performed, they can be exported to an Excel or CSV file with the `save_excel()` and `save_csv()` commands. These operations require that the user define a filename (what to name your new file) and a list of the calculation results to save to this file or files. Note that this requires that calculations have been assigned to variable names, which has been done in all of the given examples. For example, to calculate saturation pressures of an imported file saved to the variable 'myfile' and simply print the output, the user can type `myfile.calculate_saturation_pressures([options])`, where '[options]' are the required and optional inputs. However, to save this result to a variable (e.g., called 'my_satPs') so that it can be accessed later, the correct python syntax would be `my_satPs = myfile.calculate_saturation_pressures([options])`.Multiple calculations can be saved at once. If saving to an Excel file, each calculation is saved as its own sheet within a single file. If desired, the user can define the names of each of these sheets. If not specified, the sheets will be named 'Original_User_Data', which contains the original input data, and then 'CalcN' where N is the nth calculation in a list of calculations. If saving multiple calculations to a CSV file, each calculation will be saved to its own CSV file, and a file name for each of these is required. Advanced users note that the `calculations` argument takes in any pandas DataFrame object, meaning this functionality is not limited to VESIcal's prescribed outputs. The `save_excel()` and `save_csv()` methods use the pandas `to_excel` and `to_csv` methods, however not all options are implemented here. If saving to a CSV file, any arguments that can be passed to pandas `to_csv` method may be passed to VESIcal's `save_csv()`.**Method structures:**>`save_excel(filename, calculations, sheet_name=None)`>`save_csv(filenames, calculations)`**save_excel( ) Required inputs:**>`filename` (Excel): Name of the file to create. The extension (.xlsx) should be included along with the name itself, all in quotes (e.g., `filename='myfile.xlsx'`).>`calculations`: A list of variables containing calculated outputs from any of the core BatchFile functions: `calculate_dissolved_volatiles()`, `calculate_equilibrium_fluid_comp()`, and `calculate_saturation_pressure()`. This must be passed as a list type variable, even if only one calculation is given. This is done by enclosing the variable in square brackets (e.g., `calculations=[my_calculation]`).**save_excel( ) Optional inputs:**>`sheet_name`: The default value is None, in which case sheets will be saved as 'Original_User_data' (the data input by the user) followed by 'CalcN' where N is the nth calculation in `calculations`. Otherwise, a list of names for the sheets can be passed, with the names in quotes (e.g. `sheet_name=['SaturationPressures'])`. 'Original_User_data' will always be saved as the first sheet.**save_csv( ) Required inputs:**>`filenames` (CSV): Name of the file or files to create. The extension (.csv) should be included. If more than one filename is passed, it should be passed as a list. This is done by enclosing the filenames in square brackets (e.g., `filenames=["file1.csv", "file2.csv"]`).>`calculations`: same as for `save_excel()`. Must be same length as `filenames`.**Calculated outputs:** >An Excel or CSV file or files will be saved to the active directory (i.e., the same folder as this manuscript notebook or wherever the code is being used). Here we save five of the calculations performed on an imported data file earlier in this manuscript. The original user-input data are stored in the BatchFile object 'myfile'. In the following line we use the method `save_excel()` to save the original data and a list of calculations given by the calculations argument to an Excel file.myfile.save_excel(filename='testsave.xlsx', calculations=[dissolved, eqfluid, eqfluid_wtemps, satPs, satPs_wtemps], sheet_names=['dissolved', 'eqfluid', 'eqfluid_wtemps', 'SaturationPs', 'SatPs_wtemps'])Saved testsave.xlsxSaving data for re-import into VESIcalIn many cases, it may be preferable to compute large amounts of data using VESIcal and then reimport them, either to perform more analysis or to plot the data. Likewise, a user may wish to compute data in VESIcal and then send the results to a colleague, who can then re-import that data into VESIcal directly. For this case, we suggest using python's pickle package (https://wiki.python.org/moin/UsingPickle). Any python object, such as the results of a VESIcal calculation, can be "pickled" or saved as a python-readable file. To use pickle, users must first import the pickle module, then "dump" the desired contents to a pickle file. The pickled data can be accessed by "loading" the pickled file.Below we pickle our computed dissolved volatile concentrations by dumping our variable `dissolved` to a pickle file that we name "dissolved.p".import pickle pickle.dump(dissolved, open("dissolved.p", "wb"))In another python file or terminal session, `dissolved` can be loaded back in via:import pickle dissolved = pickle.load(open("dissolved.p", "rb"))Discussion and Applications Compositional Variation Within Datasets and Best PracticesIt has been clearly shown that the composition of a melt plays a strong role in determining the solubility of H$_2$O and CO$_2$ in magmas (Papale et al., 2006; Moore, 2008; Ghiorso et al., 2015; Wieser et al., 2020). Thus, compositional variance must be accounted for in any study examining solubility in multiple samples. A key use case where VESIcal can facilitate the adoption of this practice is in melt inclusion (MI) studies; specifically, where a single suite of MI with multiple melt compositions is examined using solubility models to interrogate magmatic degassing processes. Prior to the availability of VESIcal, the difficulty associated with performing multiple model calculations on multiple samples resulted in very few studies accounting for any compositional variance within their datasets. Indeed, until now, it has been difficult to even assess whether the potentially minimal compositional variance within a suite of melt inclusions from a single volcanic eruption would have any measurable effect on solubilities calculated for different MI.Using VESIcal, we can address the question: what is the quantitative effect of compositional variation within a single suite of melt inclusions upon calculated melt inclusion saturation pressures? And, how does this affect conclusions that might be drawn regarding volcanic degassing and eruptive processes? To investigate this, we use a dataset of basaltic melt inclusions from Cerro Negro volcano, Nicaragua (Roggensack, 2001). The compositional variation of these MI (Figure 10), while relatively restricted, results in quite variable mixed-fluid solubilities from sample to sample. To determine the end-member compositions within the dataset corresponding to the samples with the maximum and minimum combined H$_2$O-CO$_2$ solubilities, isobars were computed at 1200 $^{\circ}$C and 3,000 bars for all samples using the MagmaSat model in VESIcal. Maximum and minimum samples were taken as the isobar curves with the smallest and largest integral (area under the curve). We refer to this value as the “integrated mixed-volatile solubility” value, IMS, in units of concentration squared. The samples that produced maximum and minimum integrated solubilities are shown in Figures 10 and 11 in blue and green, respectively (sample 41b*, IMS=0.81 and 36a*, IMS=0.66 wt%$^2$ at 3,000 bars). A composition representing the average of all MI in the dataset is shown in orange (“Average Sample”, IMS=0.70 wt%$^2$ at 3,000 bars). A jupyter notebook to reproduce these calculations is provided in the supplement (Supplementary Jupyter Notebook S8).**Figure 10***Figure 10 - Harker style diagrams illustrating the compositional range of MIs from Cerro Negro volcano from Roggensack (2001). The “Average Sample” plotted as an orange dot represents a fictitious sample, calculated as the average of all MIs in the dataset. Sample 41b* and 36a* are the names of samples that produced isobars with maximum and minimum area under the curve, respectively (see text). Gray diamonds are all other data in the dataset.***Figure 11***Figure 11 - H$_2$O-CO$_2$ diagram with isobars for MI from Cerro Negro volcano \citep{roggensack2001} computed by VESIcal using MagmaSat at 1200 $^{\circ}$C, pressures of 500, 1000, 2000, 3000, and 4,000 bars. Curves shown are polynomials fitted to data computed by VESIcal. Blue and green curves correspond to samples 41b* and 36a*, which produced isobars with maximum and minimum area under the curve, respectively. Orange isobars were those computed for a fictitious sample representing the average composition of the MI dataset. Gray diamonds are all other data in the dataset.*At all pressures, the integrated mixed-volatile solubility across the Cerro Negro dataset varies as much as 10% relative (Figure 11). For these MI, this results in as much as 11.5% relative error in the calculation of saturation pressures (average error for the entire dataset of 6.8% relative). It is noteworthy that this error is not systematic either in terms of absolute value or sign. For example, when calculated using their own compositions, saturation pressures for maximum and minimum samples 41b* and 36a* are 3050 and 3090 bars, respectively. But, saturation pressures calculated for both of these MI using the dataset's average composition are 3020 and 3250 bars, respectively. That is an error of -30 and +160 bars or -1% and +5% respectively. Errors in these calculations, thus, may be quite small. But, in any case, removing this error completely is a simple task using VESIcal, and so we recommend that studies adopt the practice of calculating volatile solubilities (and associated values) in melts using the composition unique to each melt investigated.Even in cases where solubility values (e.g., saturation pressures) are not calculated, the error highlighted above plagues any isobar diagram over which multiple melt compositions are plotted (e.g., Figure 11). Alternative plots to the commonly used H$_2$O-CO$_2$ diagram are shown in Figure 12, in which the same dataset is plotted in terms of computed saturation pressure (at 1200 $^{\circ}$C calculated with VESIcal using MagmaSat) versus dissolved H$_2$O, dissolved CO$_2$, and fluid composition (as XH$_2$O$^{fluid}$ calculated with VESIcal using MagmaSat). These plots avoid the issues discussed above as they are compositionally independent, since the saturation pressure is calculated individually for each sample composition. Degassing trends are more accurately represented; H$_2$O and CO$_2$ concentrations lie along expected degassing trends with much less scatter than the H$_2$O-CO$_2$ plot. We can also see from this figure that the fluid composition during this eruption at Cerro Negro remained relatively constant at XH$_2$O$^{fluid}$ $\sim$0.8 from reservoir to surface, suggesting a scenario approaching closed-system degassing (i.e., melt volatile concentrations are buffered by the co-existing fluid composition).**Figure 12***Figure 12 - Saturation pressure at 1200 $^{\circ}$C calculated using VESIcal with MagmaSat versus measured dissolved H$_2$O and CO$_2$ concentrations and calculated fluid composition in Cerro Negro melt inclusions. These plots meaningfully illustrate degassing processes while avoiding issues associated with commonly used H$_2$O-CO$_2$ diagrams, which occur with even minor compositional variation within a given dataset.* Model ComparisonsOne of the possible workflows enabled through VESIcal is the ability to compute and compare (numerically and graphically) results from several models at once. To illustrate this point, we will take two single samples within the calibrated compositional range of several models, calculate isobars at multiple pressures, and plot the results. This is a common way to compare the solubility surface computed by different models for a single melt composition, and it is particularly useful since it quickly highlights the significant variation that exists between published models. The results of this exercise are shown here, and a Jupyter notebook to reproduce the code and calibration checks is available in the Supplement (Supplementary Jupyter Notebook S9).We use a fictitious alkali basalt that we name “alkbasalt” and a fictitious rhyolite whose compositions are given in Table 12. The use of VESIcal’s `calib_plot()` function (see supplement) illustrates that the composition of the alkali basalt is within the compositional calibration ranges of four mixed-fluid solubility models: MagmaSat, Iacono-Marziano, Dixon, and ShishkinaIdealMixing. The rhyolite is within the ranges of MagmaSat and Liu. Isobars were calculated with these models at 1200 °C for alkbasalt and 800 °C for rhyolite and pressures of 500, 1,000, and 2,000 bars, using the below code: **Table 12. Melt compositions used for modeling**model_comps = v.BatchFile("tables/Table_Model_Comps.xlsx") model_comps.data alkbasalt = model_comps.get_sample_composition("Alkali Basalt", asSampleClass=True) rhyolite = model_comps.get_sample_composition("Rhyolite", asSampleClass=True) alkbasalt_isobars, alkbasalt_isopleths = v.calculate_isobars_and_isopleths(sample=alkbasalt, temperature=1200, pressure_list=[500, 1000, 2000], isopleth_list=[0.5], print_status=True).result rhyolite_isobars, rhyolite_isopleths = v.calculate_isobars_and_isopleths(sample=rhyolite, temperature=800, pressure_list=[500, 1000, 2000], isopleth_list=[0.5]).result Iac_alkbasalt_isobars, Iac_alkbasalt_isopleths = v.calculate_isobars_and_isopleths(sample=alkbasalt, temperature=1200, pressure_list=[500, 1000, 2000], isopleth_list=[0.5], model="IaconoMarziano").result Dixon_alkbasalt_isobars, Dixon_alkbasalt_isopleths = v.calculate_isobars_and_isopleths(sample=alkbasalt, temperature=1200, pressure_list=[500, 1000, 2000], isopleth_list=[0.5], model="Dixon").result Shish_alkbasalt_isobars, Shish_alkbasalt_isopleths = v.calculate_isobars_and_isopleths(sample=alkbasalt, temperature=1200, pressure_list=[500, 1000, 2000], isopleth_list=[0.5], model="ShishkinaIdealMixing").result Liu_rhyolite_isobars, Liu_rhyolite_isopleths = v.calculate_isobars_and_isopleths(sample=rhyolite, temperature=800, pressure_list=[500, 1000, 2000], isopleth_list=[0.5], model="Liu").resultCalculating isobar at 500 bars done. Calculating isobar at 1000 bars done. Calculating isobar at 2000 bars Calculating isobar control point at XH2Ofluid = 0.75**Figure 13**Figure 13afig, ax = v.plot(isobars=[alkbasalt_isobars, Iac_alkbasalt_isobars, Dixon_alkbasalt_isobars, Shish_alkbasalt_isobars], isobar_labels=["MagmaSat", "Iacono-Marziano", "Dixon", "Shishkina"]) v.show()Figure 13bfig, ax = v.plot(isobars=[rhyolite_isobars, Liu_rhyolite_isobars], isobar_labels=["MagmaSat", "Liu"]) v.show()Connection to DBimport sqlalchemy as sql import pandas as pd try: engine = sql.create_engine('mssql+pymssql://data11:12345*@192.168.3.16:1433/AdventureWorks2019') print('connecxion extablecida') except: print('connexion fallida') query ="""SELECT [TransactionID] ,[ProductID] ,[ReferenceOrderID] ,[ReferenceOrderLineID] ,[TransactionDate] ,[TransactionType] ,[Quantity] ,[ActualCost] ,[ModifiedDate] FROM [AdventureWorks2019].[Production].[TransactionHistory]"""Leyendo la tabla de una consulta#Leyendo la tabla de una consulta df = pd.read_sql_table('Product', engine,schema='Production') df.head(5)Ejecutando un query con pandas#Ejecutando un query df2 = pd.read_sql_query(query,engine,index_col='TransactionID') df2.tail(5)MLconfigimport mlrun mlrun.set_environment(api_path = 'http://mlrun-api:8080', artifact_path = os.path.abspath('./'))> 2020-11-24 15:34:46,837 [warning] warning!, server (0.5.4-rc1) and client (0.5.4) ver dont matchSave# create job function object from notebook code fn = mlrun.code_to_function("describe", handler="summarize", description="describe and visualizes dataset stats", categories=["analysis"], labels = {"author": "yjb"}, code_output='.') fn.export()> 2020-11-24 15:35:06,452 [info] function spec saved to path: function.yamlTestsfn.apply(mlrun.platforms.auto_mount()) DATA_URL = 'https://s3.wasabisys.com/iguazio/data/iris/iris_dataset.csv' task = mlrun.NewTask(name="tasks-describe", handler=summarize, inputs={"table": DATA_URL}, params={'update_dataset': True, 'label_column': 'label'})Run Locallyrun = mlrun.run_local(task)> 2020-11-24 15:35:06,489 [warning] warning!, server (0.5.4-rc1) and client (0.5.4) ver dont match > 2020-11-24 15:35:06,489 [info] starting run tasks-describe uid=38d5c276628e46ff8634942b3585f636 DB=http://mlrun-api:8080 > 2020-11-24 15:35:06,538 [warning] warning!, server (0.5.4-rc1) and client (0.5.4) ver dont matchRun Remotelyfn.run(task, inputs={"table": DATA_URL})> 2020-11-24 15:35:22,209 [warning] warning!, server (0.5.4-rc1) and client (0.5.4) ver dont match > 2020-11-24 15:35:22,209 [info] starting run tasks-describe uid=2ed254b72b6e4ca3a9485324b4d626c7 DB=http://mlrun-api:8080 > 2020-11-24 15:35:22,403 [info] Job is running in the background, pod: tasks-describe-6lflx > 2020-11-24 15:35:31,679 [info] run executed, status=completed final state: completedLesson 8 Practice: SeabornUse this notebook to follow along with the lesson in the corresponding lesson notebook: [L08-Seaborn-Lesson.ipynb](./L08-Seaborn-Lesson.ipynb). InstructionsFollow along with the teaching material in the lesson. Throughout the tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: ![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/16/Apps-gnome-info-icon.png). You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. For each task, use the cell below it to write and test your code. You may add additional cells for any task as needed or desired. Task 1a SetupImport the following packages:+ seaborn as sns+ pandas as pd+ numpy as np+ matplotlib.pyplot as pltActivate the `%matplotlib inline` magic.import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inlineTask 2a Load Data+ View available datasets by calling `sns.get_dataset_names`.+ Choose one of those datasets and explore it.sns.get_dataset_names() penguins = sns.load_dataset('penguins')What is the shape?penguins.shapeWhat are the columns?penguins.columnsWhat are the data types?penguins.dtypesAre there missing values?penguins.isna().sum()Are there duplicated rows?penguins.duplicated().sum()For categorical columns find the unique set of categories.cat = ['species', 'island', 'sex'] penguins['species'].unique() penguins['island'].unique() penguins['sex'].unique()Is the data tidy?Yes the data is tidyTask 2b Preview SeabornTake some time to peruse the Seaborn [example gallery](https://seaborn.pydata.org/examples/index.html). Indicate which plot types are most interesting to you. Which do you expect will be most useful with current research projects you may be working on?Heat and cluster maps would be useful for displaying gene expression changes between populations. Boxplots would be useful for showing expression of a specific gene of interest across populations.Task 3a Using `relplot`Experiment with the `size`, `hue` and `style` semantics by applying them to another example dataset of your choice.*You should produce three or more plots for this task.*sns.relplot(x="body_mass_g", y="flipper_length_mm", hue="sex", data=penguins) sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'body_mass_g', data=penguins) sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins)Task 4a: Explore built-in stylesUsing a dataset of your choice, practice creating a plot for each of these different styles:+ darkgrid+ whitegrid+ dark+ white+ tickssns.set_style('whitegrid') sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins) sns.set_style('darkgrid') sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins) sns.set_style('dark') sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins) sns.set_style('white') sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins) sns.set_style('ticks') sns.relplot(x="bill_length_mm", y="bill_depth_mm", size = 'species',hue = 'sex', data=penguins)Task 4bExperiment with the style options and palettes introduced above. Create and demonstrate a style of your own using a dataset of your choice.custom_style = {'figure.facecolor': 'black', 'axes.facecolor': 'white'} color = sns.color_palette('colorblind') sns.set_style("darkgrid", custom_style) sns.relplot(x="bill_length_mm", y="bill_depth_mm", hue = 'sex', data=penguins, palette = color)Lec 07. Activation Functionsimport torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt %matplotlib inline1. Drawing Activation Functionsx = Variable(torch.linspace(-5, 5, 500)) y_sigmoid = torch.sigmoid(x).data.numpy() y_relu = F.relu(x).data.numpy() y_leaky_relu = F.leaky_relu(x, negative_slope=0.1).data.numpy() y_softplus = F.softplus(x).data.numpy() plt.figure(figsize=(12, 10)) plt.subplot(221) plt.plot(x.data.numpy(), y_sigmoid, c='blue', label='sigmoid') plt.ylim((-0.2,1.2)) plt.legend(loc='best') plt.subplot(222) plt.plot(x.data.numpy(), y_relu, c='blue', label='relu') plt.ylim((-1.2, 5)) plt.legend(loc='best') plt.subplot(223) plt.plot(x.data.numpy(), y_leaky_relu, c='blue', label='leaky_relu') plt.ylim((-1.2, 5)) plt.legend(loc='best') plt.subplot(224) plt.plot(x.data.numpy(), y_softplus, c='blue', label='softplus') plt.ylim((-1.2, 5)) plt.legend(loc='best') plt.show()2. Generating Datax = torch.linspace(-1, 1, 5000) y = x.pow(2) + 0.2*torch.rand(x.size()) x, y = Variable(x), Variable(y) plt.scatter(x.data.numpy(), y.data.numpy(), s=1) plt.show()3. Define NonLinear Modelx.shape, y.shape x = torch.unsqueeze(x, dim=1) y = torch.unsqueeze(y, dim=1) x.shape, y.shape # Build neural networks hidden = nn.Linear(1, 20, bias=True) activation = nn.ReLU() output = nn.Linear(20, 1, bias=True) model = nn.Sequential(hidden, activation, output) loss = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.1) for t in range(500): pred = model(x) cost = loss(pred, y) optimizer.zero_grad() cost.backward() optimizer.step() if t % 100 == 0: plt.scatter(x.data.numpy(), y.data.numpy(), s=1) plt.plot(x.data.numpy(), pred.data.numpy(), 'r--', lw=6) plt.text(0.5, 0, 'Loss=%.4f' % cost.item(), fontdict={'size':14, 'color':'b'}) plt.show()Coding Exercise 0102 1. Number: 1.1. Operations with numbers:1+1 2**3 8%5 8.0//5.02. String: 2.1. Basics:# A quote within another quote. print("He said, 'Python is very easy'") print('He said, "Python is very easy"') # Multi-line string. my_string = """Hello !!! my name is Python.""" print(my_string)2.2. Operations with strings:str1 = 'First string. ' str2 = 'Second string.' print(str1 + str2) str = 'Python. ' print(str*5)2.3. Indexing and slilcing strings:str = 'Life is too short, You need Python!' print(str[0]) print(str[-1]) print(str[0:4]) print(str[5:])2.4. Functions and methods for string objects:x = 'Python' print(len(x)) x.count('h') x.upper() x.lower() x.find('o') x.find('w') x.index('o') # 'w' is not found in x. This will result in an error. x.index('w') x = 'Life is too short, You need Python!' x.split(' ') y = x.split(' ') a = ' ' print(a.join(y))3. Boolean: 3.1. Operations with Booleans: AND operator:False and False False and True True and False True and True (3 > 2) and (5 < 4) (3 > 2) and (5 > 4)OR operator:False or False True or False False or True False or True True or TrueNOT operator:not True not False not (4 > 2) not (4 < 2)3.2. Implicit Boolean:x = [] y = [1,2,3] if x: print('Not empty.') else: print('Empty!') if y: print('Not empty.') else: print('Empty!')Basic operations in Colaboratory notebook Created: Feb 2019Updated: March 2021Authors: & Email: ---[Colaboratory](https://research.google.com/colaboratory/faq.html) (or Colab) is a free research tool from Google for machine learning education and research built on top of [Jupyter Notebook](https://jupyter.org/). From our experience, it is also an ideal tool for scientific collaboration and sharing your results with non-experts (as opposed to, say, collaboration via a GitHub project), especially when there are drastic fluctuations in coding skills across a team/collaboration. This particular notebook starts with a brief overview of Colab's UI and then shows some simple operations in Colab notebooks, including some basic plotting, code parameterization, usage of shell commands and installation of additional Python packages. Notice that many popular Python packages, such as tensorflow, pytorch, scikit, opencv, are already installed in Colaboratory. So we can just import them into our notebook and start having fun.--- Notebook rules:Some basic notebook rules:*General Jupyter notebooks rules:*1. Click inside a code cell and press SHIFT+ENTER to execute it.2. Use a Text cell with Markdown syntax to create rich text.3. Execute cells TOP TO BOTTOM. *Colab-specific rules:*4. Notebooks are saved to your Google Drive unless you're in Playground mode or have opened a notebook hosted on GitHub (like this notebook). In those scenarios you can click on "COPY TO DRIVE" to save a copy to your Drive.5. Mount your Google Drive to have a direct access from a notebook to the files stored in the drive (this includes Shared Drives).6. If using Colab's virtual storage only, all the uploaded/stored files will get deleted when a runtime is recycled.7. In addition to SHIFT+ENTER, in Colab you can also execute a cell CTRL+ENTER, COMMAND+ENTER (MacOS), or by clicking the play button which appears after hovering over the left side of the cell. Colab UI Opening notebooksYou can open any notebook hosted on GitHub by going to Colaboratory [start page](https://colab.research.google.com) and selecting GITHUB from a menu dialog. Then search for a specific GitHub repository and choose a notebook that you would like to open from the list of notebooks in that repository. From the same [start page](https://colab.research.google.com) you can view and open notebooks stored in your GDrive, upload a regular Jupyter notebook from your local machine and start an entirely new notebook. Colab currently only supports Python 3 kernels. You can also choose between GPU and [TPU](https://colab.research.google.com/notebooks/tpu.ipynb) hardware accelerator options for your Colab notebook (*Edit --> Notebook settings* or *Runtime --> Change runtime type*). Upload/download filesOnce you open a Google Colab notebook, it creates a virtual machine instance on Google Cloud. To upload files from your local machine to Colab virtual storage, use UPLOAD option from the left sidebar. To download files from Colab's virtual storage to your local machine, right-click on a file and then select ''Download".Mount DriveYou can also mount your google drive: once you click on MOUNT DRIVE in the left sidebar, it will insert a code cell into your notebook that you'll need to run to mount your google drive (it will ask for your authorization). Another way to download files (without mounting a google drive) is to use a `!gdown` or `!wget` commands (more details in the [Shell commands](scrollTo=JrF12-bqPKPm) section)from google.colab import drive drive.mount('/content/drive') ls /content/drive/MyDrive/ Shareddrives/Share your notebooksYou may share your notebooks the same way you share Google Docs and Google Slides (it needs to be stored in your/team Google Drive for this). You can also comment on individual code cells in a notebook and your collaborators will receive your comments (in a form of email notification), as long as the notebook is shared with them. To leave a comment, hover over the right side of any code cell and click *Add a Comment*. Alternatively, you may use a *Ctrl+Alt+M* shortcut. The ability to share comments can be very useful if you make some important additions/changes to the notebook (e.g. when you have an eureca moment) and you want your collaborators to see those additions/changes asap. It is also very useful when you get stuck and need help/advice. Simple math with numpy and matplotlib We can use "standard" python packages like [numpy](https://www.numpy.org/) and [matplotlib](https://matplotlib.org/) as well any many other popular packages for data manipulation and data vizualization, which are pre-installed in Colab .# Package for manipulation with numerical data import numpy as np # Package for plotting import matplotlib.pyplot as pltAs an example, let's make a plot of *sin* and *cos* waves using numpy and matplotlib.First, define *'x'* and *'y'*:# creates array of data points between 0 and 10, with 0.01 interval x = np.arange(0, 10, 0.01) # calculates sin function at each point y_sin = np.sin(x) # calculates cos function at each point y_cos = np.cos(x)Now plot sine and cosine waves:# creates a figure and a set of subplots (ax1 and ax2) fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8, 8)) # plot sine wave in the first subplot ax1.plot(x, y_sin) ax1.set_title('Sine wave') ax1.set_ylabel('y', fontsize=14, style='italic') # plot cosine wave in the second subplot ax2.plot(x, y_cos) ax2.set_title('Cosine wave') ax2.set_ylabel('y', fontsize=14, style='italic') # this axis is shared by two subplots ax2.set_xlabel('x (rad)', fontsize=14, style='italic');Forms You can use forms to parameterize your code. To add a form, use *Insert --> Add form field* while your cursor is a Code cell. Everytime you change the value in a form, the corresponding value in the code cell will adjust accordingly. Press SHIFT+ENTER or "PLAY" button on the left side to run the cell. In the example below we can adjust $a$ and $b$ parameters for $y = a sin(bx)$ function, level of noise and a figure size:#@title Using Forms in Colab a = 2 #@param {type:"slider", min:0.5, max:5, step:0.1} b = 2.1 #@param {type:"slider", min:0.5, max:5, step:0.1} c = 0.5 #@param {type: "slider", min:0, max:5, step:0.1} noise = 0.7 #@param {type:"slider", min:0, max:1, step:0.1} fig_size = 7 #@param {type:"slider", min:4, max:10, step:1} x = np.arange(-10, 10, 0.01) y_sin = a*np.sin(b*x) + c + noise*np.random.normal(size=x.shape) plt.figure(figsize=(2*fig_size, fig_size)) plt.plot(x, y_sin) plt.xlabel('x', fontsize=14, style='italic') plt.ylabel('y', fontsize=14, style='italic') plt.title('Sine wave') plt.show()Notice that you can also hide the entire code field (*More cell actions --> Form --> Hide code* or just double-clicking on a form) Shell commands Unlike Jupyter Lab, Colab doesn't have an option to open and work in a terminal/shell unless you are using Colab Pro which is not free. However, you can invoke shell commands by using either `%%shell` or `%%bash` ["cell magics"](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlcell-magics) or by prefixing the command with `!` . Notice that usually it is more practical to use a "scratch cell" for interacting with a shell instead of adding additional cells to your notebook, especially for one-line shell commands (*Insert* --> *Scratch code cell* or Ctrl+Alt+N shortcut) : The following command will list files and sub-directories in the current directory:!lsdrive sample_dataNote that some of the commonly used shell commands can be accessed directly in a cell, such as `ls` and `pwd`:pwdLet's create an empty directory. We first confirm that a directotry with such name doesn't already exist. Since this will result in multiple lines of code, we use `%%bash` command at the beginning of the cell, which indicates that all the code lines in this cell should be interpreted as shell script.%%bash newdir="ColabIntro" if [ ! -d $newdir ]; then mkdir $newdir fiView again files and directories in the current folder. You should now be able to see a new folder that we've just created.lsColabIntro/ drive/ sample_data/We can view a content of our new directory, which should be empty:ls ColabIntroLet's create a text file inside our new directory. We will place a "Hello World" greeting on the first line of this text file:!echo "Hello World" > ColabIntro/test.txtView content of our new folder. We should be able to see the text file we just created.ls ColabIntrotest.txtWe can read a text file with `cat` command!cat ColabIntro/test.txtHello WorldWe can also move or copy files between directories directly from a notebook. In the example below we copy the created text file to our root folder:!cp ColabIntro/test.txt ./ !lsColabIntro drive sample_data test.txtWe can also write individual Python files to "disk" directly from a notebook. For example, it is common to store utility ("helper") functions defined in your notebook for some data analysis procedures in a separate python module (called e.g. utils.py) and then import them into the notebook. We can write a file to disk (virtual storage or GDrive) either with standard shell commands or with a notebook "magic" command (preferred). Let's first do it with a standard shell command:**Notice that now Colab allows creating Python files and editing them in a separate "tab".**%%bash echo """# -*- coding: utf-8 -*- '''util functions''' ##### Import necessary modules ##### def pow3(x): '''Returns x cubed''' return x ** 3""" > utils1.pyView the Python file we just created:!cat utils1.py# -*- coding: utf-8 -*- '''util functions''' ##### Import necessary modules ##### def pow3(x): '''Returns x cubed''' return x ** 3Append to the already existing python file (e.g. add another utility function):%%bash echo """ def util2(): '''Brief description''' ##### Function body ##### return""" >> utils1.pyView our Python file again:!cat utils1.py# -*- coding: utf-8 -*- '''util functions''' ##### Import necessary modules ##### def pow3(x): '''Returns x cubed''' return x ** 3 def util2(): '''Brief description''' ##### Function body ##### returnHowever it is much easier to write and edit+overwrite individual files with `%%writefile` "magic" command:%%writefile utils2.py # -*- coding: utf-8 -*- """util functions""" ##### Import necessary modules ##### def pow3(x): '''Returns x cubed''' return x ** 3 print('testing the pow3 function, when x = 3:', pow3(3)) def util2(): """Brief description""" ##### Function body ##### returnWriting utils2.pyView file content:!cat utils2.py# -*- coding: utf-8 -*- """util functions""" ##### Import necessary modules ##### def pow3(x): '''Returns x cubed''' return x ** 3 print('testing the pow3 function, when x = 3:', pow3(3)) def util2(): """Brief description""" ##### Function body ##### returnYou can even execute these python files from the notebook using the `%run` magic function:%run utils2.py # checking the docstring pow3??Finally to download a large file from a google drive into your notebook, you can use a [gdown](https://pypi.org/project/gdown/) command followed by a *download* link. The difference between shared link and download link is that you need to replace 'open' with 'uc' in the link address. For example, this won't work:!gdown https://drive.google.com/open?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYq/usr/local/lib/python2.7/dist-packages/gdown/parse_url.py:31: UserWarning: You specified Google Drive Link but it is not the correct link to download the file. Maybe you should try: https://drive.google.com/uc?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYq .format(url='https://drive.google.com/uc?id={}'.format(file_id)) Downloading... From: https://drive.google.com/open?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYq To: /content/open?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYq 62.8kB [00:00, 9.03MB/s]But once you change 'open' to 'uc' it starts working:!gdown https://drive.google.com/uc?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYqDownloading... From: https://drive.google.com/uc?id=1nJZ9OmrbHVLZIRlCylb5cYrOxnqBLCYq To: /content/atom2.npy 0% 0.00/13.6k [00:00Now let's remove the directory and file(s) we created:!rm -r ColabIntro test.txt *.py *.npy open?* !lsdrive sample_dataDebuggingIn Jupyter notebook, perhaps one of the most convenient interfaces to debugging is the `%debug` magic command. If you call it after hitting an exception, it will automatically open an interactive debugging prompt at the point of the exception. The ipdb prompt lets you explore the current state of the stack, explore the available variables, and even run Python commands!Let's look at the following exception, then do some basic tasks–print the values of a and b, and type quit to quit the debugging session:def func1(a, b): return a / b def func2(x): a = x b = x - 1 return func1(a, b) func2(1) %debug> (2)func1()  1 def func1(a, b): ----> 2  return a / b  3   4 def func2(x):  5  a = x  ipdb> print(b) 0 ipdb> up > (7)func2()  5  a = x  6  b = x - 1 ----> 7  return func1(a, b)[0[...]InstallationsColab has most of popular machine/deep learning packages pre-installed but if a package that you would like to use is not in Colab, you can install it using ```pip``` command. For example:try: import numpyro except ModuleNotFoundError: !pip install numpyro # it is okay to just use this last lineIf a package needs to be installed with *setup.py* , you'll need to clone it first into your notebook's VM storage, cd into the cloned package directory and run ```python setup.py install``` command. For example:!git clone https://github.com/ziatdinovmax/GPim.git !cd GPim && python setup.py installimport pandas as pd data = pd.read_csv("drive/MyDrive/creditcard.csv") data.head(10) # amount -> transaction value print(data.isna().sum()) n_transactions = data['Class'].count() n_frauds = data['Class'].sum() # sum of all rows n_no_frauds = n_transactions - n_frauds frauds_percentage = n_frauds / n_transactions no_frauds_percentage = n_no_frauds / n_transactions print("Number of transactions: ", n_transactions) print("Number of frauds: ", n_frauds, "%.2f" %(frauds_percentage * 100)) print("Number of transactions without frouds: ", n_no_frauds, "%.2f" %(no_frauds_percentage * 100)) from sklearn.model_selection import StratifiedShuffleSplit def run_validator(x, y): validator = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0) for train_id, test_id in validator.split(x, y) : x_train, x_test = x[train_id], x[test_id] y_train, y_test = y[train_id], y[test_id] return x_train, x_test, y_train, y_test %%time from sklearn import tree def run_classifier(classifier, x_train, x_test, y_train): tree = classifier.fit(x_train, y_train) y_pred = tree.predict(x_test) # predict whether transactions are fraud or not return y_pred import matplotlib.pyplot as plt def save_tree(classifier, name): plt.figure(figsize=(200,100)) tree.plot_tree(classifier, filled=True, fontsize=14) plt.savefig(name) plt.close() from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score from sklearn.metrics import recall_score def validate_tree(y_test, y_pred): print(accuracy_score(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) print(precision_score(y_test, y_pred)) # fraud transactions print(recall_score(y_test, y_pred)) # number of fraud hits / number of transactions classified as frauds # validator execution x = data.drop('Class', axis=1).values y = data['Class'].values x_train, x_test, y_train, y_test = run_validator(x, y) # classifier execution of decision_tree_classifier decision_tree_classifier = tree.DecisionTreeClassifier() y_predict_decision_tree = run_classifier(decision_tree_classifier, x_train, x_test, y_train) # creation of the decision tree figure save_tree(decision_tree_classifier, 'tree_decision1.png') # decision tree validation validate_tree(y_test, y_predict_decision_tree) print(decision_tree_classifier) print(decision_tree_classifier.get_depth()) # classifier execution of decision_tree_classifier decision_tree_classifier = tree.DecisionTreeClassifier(max_depth=10, random_state=0) y_predict_decision_tree = run_classifier(decision_tree_classifier, x_train, x_test, y_train) # decision tree validation validate_tree(y_test, y_predict_decision_tree) # classifier execution of decision_tree_classifier decision_tree_classifier = tree.DecisionTreeClassifier(max_depth=10, random_state=0, min_samples_leaf=10) y_predict_decision_tree = run_classifier(decision_tree_classifier, x_train, x_test, y_train) # decision tree validation validate_tree(y_test, y_predict_decision_tree) %%time # classifier execution of decision_tree_classifier decision_tree_classifier = tree.DecisionTreeClassifier(max_depth=5, random_state=0) y_predict_decision_tree = run_classifier(decision_tree_classifier, x_train, x_test, y_train) # decision tree validation validate_tree(y_test, y_predict_decision_tree) %%time from sklearn.ensemble import RandomForestClassifier random_forest_classifier = RandomForestClassifier(n_estimators=50, random_state=0, max_depth=10) # n_estimators -> number of trees y_predict_random_forest = run_classifier(random_forest_classifier, x_train, x_test, y_train) save_tree(random_forest_classifier.estimators_[0], "random_forest1") save_tree(random_forest_classifier.estimators_[1], "random_forest2") validate_tree(y_test, y_predict_random_forest) %%time from sklearn.ensemble import AdaBoostClassifier adaboost_classifier = AdaBoostClassifier(random_state=0) y_predict_adaboost = run_classifier(adaboost_classifier, x_train, x_test, y_train) save_tree(adaboost_classifier.estimators_[0], "adaboost1") save_tree(adaboost_classifier.estimators_[1], "adaboost2") validate_tree(y_test, y_predict_adaboost) %%time from sklearn.ensemble import AdaBoostClassifier adaboost_classifier = AdaBoostClassifier(random_state=0, n_estimators=100) y_predict_adaboost = run_classifier(adaboost_classifier, x_train, x_test, y_train) validate_tree(y_test, y_predict_adaboost) %%time from sklearn.ensemble import AdaBoostClassifier adaboost_classifier = AdaBoostClassifier(random_state=0, n_estimators=200) y_predict_adaboost = run_classifier(adaboost_classifier, x_train, x_test, y_train) validate_tree(y_test, y_predict_adaboost)0.9995435553526912 [[28429 3] [ 10 39]] 0.9285714285714286 0.7959183673469388A Pythagorean triplet is a set of three natural numbers, $a < b < c$, for which, \begin{equation}a^2 + b^2 = c^2\end{equation} For example, $3^2 + 4^2 = 9 + 16 = 25 = 5^2$.There exists exactly one Pythagorean triplet for which $a + b + c = 1000$.Find the product $abc$. Remark This is a fairly straighforward constraint satisfaction problem (CSP) and is perhaps most easily solved in a CSP modelling language such as MiniZinc. However, to employ such tools would be to defeat the very purpose of the exercise, which is to give us practice with implementation.from six.moves import range, reduceVersion 1: The Obviouspair_sum_eq = lambda n, start=0: ((i, n-i) for i in range(start, (n>>1)+1)) list(pair_sum_eq(21, 5))Note that $3a < a + b + c = 1000$, so $a < \frac{1000}{3} \Leftrightarrow a \leq \lfloor \frac{1000}{3} \rfloor = 333$ so $1 \leq a \leq 333$. Therefore, we need only iterate up to 333 in the outermost loop. Now, $b + c = 1000 - a$, so $667 \leq b + c \leq 999$, so we look at all pairs $333 \leq b < c$ such that $b + c = 1000 - a$ with the help of the function `pair_sum_eq`. Within the innermost loop, the $a, b, c$ now satisfy the constraints $a < b < c$ and $a + b + c = 1000$ so now we need only check that they indeed form a Pythagorean triplet, i.e. $a^2 + b^2 = c^2$, and yield it.def pythagorean_triplet_sum_eq(n): for a in range(1, n//3+1): for b, c in pair_sum_eq(n-a, start=n//3): if a*a + b*b == c*c: yield a, b, c list(pythagorean_triplet_sum_eq(1000)) prod = lambda iterable: reduce(lambda x,y: x*y, iterable) prod(pythagorean_triplet_sum_eq(1000))Version 2: Euclid's Formula# TODOProbabilistic programming with PyMCimport numpy as np import matplotlib.pyplot as plt import pymc as mc %matplotlib inline import matplotlib as mpl import scipy.stats from pydataset import data mpl.rcParams['figure.figsize'] = (6.0, 2.0) mpl.rcParams['figure.dpi'] = 120 ## Utility function to plot the graph of a PyMC model def show_dag(model): dag = mc.graph.dag(model) dag.write("graph.png",format="png") from IPython.display import Image i = Image(filename='graph.png') return i mpl.style.use('ggplot') np.random.seed(2019) # Neat things * Demo of MCMC algorithms and their sampling properties: https://chi-feng.github.io/mcmc-demo/ * Hamiltoninan MCMC visually explained (great animations): http://arogozhnikov.github.io/2016/12/19/markov_chain_monte_carlo.html * [A Conceptual Introduction to Hamiltonian Monte Carlo](https://arxiv.org/abs/1701.02434) An excellent paper on the theory of Hamiltonian Monte Carlo sampling * [Introduction to MCMC](http://www.inference.org.uk/mackay/erice.pdf) by Topic purposeWe will cover probabilistic **inference**. Rather than learning a single set of parameters by optimisation, we can model probability distributions over possible models that might be compatible with our data. We'll use Monte Carlo sampling to make it simple and easy (if not very efficient) to work with probabilistic models. MCMC models:* **Data**, which we observe as a collection of examples.* A **model** which has **structure** (a DAG) and **parameters*** Part of the model is a likelihood function which has "contact" with data; these we will call **observed random variables*** Part of the model specifies distributions over parameters of the **observed variables**. These are **unobserved random variables** PyMC We'll use the excellent PyMC module to do the inference. If you have questions about this module, you can read [this tutorial](http://arxiv.org/abs/1507.08050) or the [API docs](https://pymc-devs.github.io/pymc/). Fitting a normal distribution Bayesian Normal fittingWe use Monte Carlo sampling to estimate the mean and standard deviation of some data.We assume we have data generated by a random process where $x \sim \mathcal{N}(\mu, \sigma^2)$, but we don't know $\mu$ or $\sigma$. We can place priors on $\mu$ and $\sigma$ and try and infer a distribution of plausible values. Test dataWe generate some synthetic data from a known normal distribution. In this case we **know** that our data is in fact normally distributed, so our model assumptions are guaranteed to be correct. This isn't the typical case!!$$x \sim \mathcal{N}(-1, 1.5)$$## generate data with a known distribution ## this will be our "observed" data n_samples = 300 x_data = np.random.normal(-1,1.5, (n_samples,)) fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.hist(x_data, bins=np.linspace(-5,5,15)) ax.set_title("Histogram of data") ax.set_xlabel("x") ax.set_ylabel("Count")We then create a model in PyMC. We have a single output variable `x`, which is **stochastic** and **observed**, and the data we have observed is `x_data`. As it is observed, we will use the likelihood of the data under different model settings to accept/reject samples in the process. We use $\tau$ to represent the *reciprocal of variance*, as this is the standard model that PyMC uses. It makes it slightly easier to parameterise in some cases.We have a model:$$\mu \sim \mathcal{N}(0, 10^2)\\\tau \sim \mathcal{\Gamma}(2.0, 20.0)\\x\sim\mathcal{N}(\mu, \frac{1}{\tau})\\$$# latent variables mu = mc.Normal('mu', mu=0, tau=1.0/(10*10)) # wide range for prior on means prec = mc.Gamma('prec', alpha=5.0, beta=20) # wide range for prior on precision import scipy.stats # plot the PDF of our prior xs = np.linspace(0, 100, 100) fig = plt.figure() ax = fig.add_subplot(1,1,1) # alpha = 1.0, beta = 20.0 ax.plot(xs, np.sqrt(1.0/scipy.stats.gamma(a=5.0, scale=8).pdf(xs))) ax.set_xlabel("$\\sigma$") ax.set_ylabel("$p(\\sigma)$") ax.set_title("Std. dev. $\\sigma$ prior (1/sqrt(tau))") # observed variable observed_stochastic = mc.Normal('observed', mu=mu, tau=prec, observed=True, value=x_data) # if we want to sample from unconditioned prior #observed_stochastic = mc.Normal('observed', #mu=mu, tau=prec)We have to set parents for every node. In this case, we have two parameters, $\mu$ and $\tau = \frac{1}{\sigma^2}$ to specify ($\tau$ is used to make it easier to parameterise priors over normals). We want to infer those, so we also make those stochastic variables, but unobserved (hidden). We specify the type of the distribution (here, `Normal` and `Uniform`) and we must then specify *those* parents. In this case, these are just concrete numbers (but we could go further if we wanted).We also add a "false" variable that will be used to make draws from the predictive posterior. It is a variable with the same parents as the observed posterior, but unobserved. Here we generate 20 posterior predictive samples for every accepted MCMC sample.We add a second "false" variable, that just transforms $tau$ back into $sigma$. The only purpose of doing this is so that PyMC captures samples in the right form and stores them in the trace object; we could just compute this after the fact.# generate samples with same distribution # here, we draw 20 samples in each sample pred_posterior = mc.Normal('predictive', mu=mu, tau=prec, size=20) # note: Lambda defines a deterministic transformation of its parents # and the lambda expression must specify the parent variable as a # default parameter (prec=prec) to make the model compile # (we'll see this as a triangular variable in the DAG, as it is not random) std_dev = mc.Lambda('std_dev', lambda prec=prec: np.sqrt(1.0/prec)) # display the graphical model model = mc.Model([mu, prec, std_dev, observed_stochastic, pred_posterior]) show_dag(model)We compile the model and show the graph. We can now draw samples from it, discarding the first portion:# sample from the distribution mcmc = mc.MCMC(model) # throw away first 5000 samples (burn=5000) mcmc.sample(iter=50000, burn=5000)The **trace** is the collection of posterior samples, as a straightforward array. We can plot these using the built in visualisation tool:# standard trace plot mc.Matplot.plot(mcmc)We can also access them directly as arrays and plot them more flexibly (including showing draws from the predictive posterior):def trace_hist(trace, name): n, bins, patches = plt.hist(trace, normed=True, bins=50) max_n = np.max(n) plt.title("Estimate of {var_name}".format(var_name=name)) # draw simple statistics ctr_max = 0.5 * (bins[np.argmax(n)] + bins[np.argmax(n)+1]) plt.axvline(ctr_max, ls='-', color='r', lw=2, label='MAP') plt.axvline(np.median(trace), ls='-', color='C1', lw=2, label='Median') plt.axvline(np.mean(trace), ls=':', color='k', label='Expected') # 90% credible interval plt.axvline(np.percentile(trace, 5.0), ls=':', color='C1') plt.axvline(np.percentile(trace, 95.0), ls=':', color='C1') plt.fill_between(x=[np.percentile(trace, 5.0), np.percentile(trace, 95.0)], y1=max_n, color='C1', alpha=0.2, label='90% credible') plt.text(np.mean(trace), 0.5*max_n, 'Mean') plt.legend() plt.gca().set_frame_on(False) def show_trace(mcmc, vars_): ## plot histograms of possible parameter values # from the trace for var,name in vars_.items(): fig = plt.figure() fig.set_facecolor("white") trace = mcmc.trace(var)[:].ravel() trace_hist(trace, name) def correlate_trace(mcmc, var_a, var_b): # plot the correlation between two variables # in the posterior as a scatter plot fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.scatter(mcmc.trace(var_a)[:], mcmc.trace(var_b)[:], s=1, alpha=0.1) ax.set_aspect(1.0) ax.set_xlabel(var_a) ax.set_ylabel(var_b) show_trace(mcmc, {"mu":"mean", "prec":"precision", "std_dev":"std_dev", "predictive":"predictive posterior"})We can see if there are any correlations in the parameters (there probably shouldn't be very strong correlation in this case, though we'd expect the estimated `std_dev` to be higher when the `mean` is further from the true mean).correlate_trace(mcmc, "mu", "std_dev")Changes to try:* Show prior predictive (remove `observed=`)* Show fixing \mu and \tau to known values* Adjust $n$ to show effect of prior/posterior Transformations of variablesFitting a uniform distribution instead, but using transformed variables. We parameterise in terms of centre and width of a uniform distribution, but transform these variables to the (lower, upper) form that the `Uniform` expects. This is a very simple example of transformations for inference.$$c \sim \mathcal{N}(0,10^2)\\w \sim \mathcal{\Gamma}(2.0, 200)\\l = c-w\\u = c+w\\x \sim \mathcal{U}(l,u)$$# latent variables x_data = np.random.uniform(-2, 3, size=80) np.random.seed(21225) # Note: I *force* a good starting point (i.e. non-zero probability) # otherwise PyMC will not be able to form the model # this is why value=0 and value=100 are used ctr = mc.Normal('ctr', mu=0, tau=1e-2, ) # wide range for prior on means width = mc.Gamma('width', alpha=2.0, beta=200.0, value=100.0) # wide range for prior on precision # uniform variable, in a unknown range # note use of Lambda to transform parameters lower = mc.Lambda('upper', lambda ctr=ctr, width=width: ctr-width) upper = mc.Lambda('lower', lambda ctr=ctr, width=width: ctr+width) observed_stochastic = mc.Uniform('observed', upper=upper, lower=lower, observed=True, value=x_data) pred_posterior = mc.Uniform('predictive', upper=upper, lower=lower) # display the graphical model model = mc.Model([ctr, width, observed_stochastic, pred_posterior, upper, lower]) show_dag(model) mcmc = mc.MCMC(model) mcmc.sample(iter=50000, burn=1000, thin=8) show_trace(mcmc, {"ctr":"centre", "width":"width", "predictive":"Predictive posterior"}) mc.Matplot.plot(mcmc)--- Linear regression Graphical models Transformations of expressions to graphs is familiar to most computer scientists -- it is an essential part of most optimising compilers. For example, the equation of a straight line might be written as a graph (this is how a compiler would break down the expression): Adding unknownsIf we have multiple dependent random variables whose distribution we want to infer, we can draw a graph of dependencies to form a *graphical model*. This explictly models dependencies between **random variables** (i.e. ones we don't know the value of precisely) and inference can be performed on the entire graph. **In CS terms, we are writing expressions down without fixing the variables, and then allowing the distribution of the values to be inferred when we observe data.** This inference process narrows down the likely range a random variable could take on (hopefully!).In a **probabilistic graphical model**, some nodes in the graph are **observed** -- that is we know their state because we have explicity measured it, and others are **unobserved** -- we know (or have guessed) the form of their distribution but not the parameters of that distribution. Some dependencies are deterministic (i.e. fully defined by the values of their parents), while others are stochastic. We can infer the **posterior** distribution of unobserved nodes by integrating over the possible values that could have occured given the observed values.We can modify our straight line equation to write a model for **linear regression**:All we need to do is specify that we expected the output $y$ to be normally distributed around the equation of a line given by $m$ and $c$; we can now **infer** $\sigma, m, c$ from observed data. Or we can fix any of them, and infer the remainder (if, e.g. we knew in advance that $c=0$). Our assumption here is that we will observe data which has a **latent structure** modelled by a linear dependence on a variable $x$, plus some normally-distributed observation noise.**Note that we must put *some* prior distribution on every stochastic node and we can only observe stochastic nodes.** ---- Let's implement the linear regression model in the intro in practice, using PyMC to build a graphical model and then run MCMC to sample from the posterior (i.e. estimate the distribution of random variables after seeing some evidence).### Bayesian Linear Regression with pymc ### We use Monte Carlo sampling to estimate the distribution of a linear function with a normally ### distributed error, given some observed data. ### Vaguely based on: http://matpalm.com/blog/2012/12/27/dead_simple_pymc/ and http://sabermetricinsights.blogspot.co.uk/2014/05/bayesian-linear-regression-with-pymc.html ## generate data with a known distribution ## this will be our "observed" data x = np.sort(np.random.uniform(0,20, (50,))) m = 2 c = 15 # Add on some measurement noise, with std. dev. 3.0 epsilon = data = np.random.normal(0, 3, x.shape) y = m * x + c + epsilon plt.plot(x,y, '.', label="Datapoints") plt.plot(x, m*x+c, '--', lw=3, label="True") plt.legend() plt.xlabel("x") plt.xlabel("y") ## Now, set up the PyMC model ## specify the prior distribution of the unknown line function variables ## Here, we assume a normal distribution over m and c m_unknown = mc.Normal('m', 0, 0.01) c_unknown = mc.Normal('c', 0, 0.001) ## specify a prior over the precision (inverse variance) of the error term # precision = 1/variance ## Here we specify a uniform distribution from 0.001 to 10.0 precision = mc.Uniform('precision', lower=0.001, upper=10.0) # this is just a convenience for plotting std_dev = mc.Lambda("std_dev", lambda precision=precision: np.sqrt(1.0/precision)) # specify the observed input variable # we use a normal distribution, but this has no effect -- # the values are fixed and the parameters # never updated; this is just a way of transforming x # into a variable pymc can work with # (it's really a hack) x_obs = mc.Normal("x_obs", 0, 1, value=x, observed=True) @mc.deterministic(plot=False) def line(m=m_unknown, c=c_unknown, x=x_obs): return x*m+c # specify the observed output variable y_obs = mc.Normal('y_obs', mu=line, tau=precision, value=y, observed=True ) model = mc.Model([m_unknown, c_unknown, precision, x_obs, y_obs, std_dev]) # display the graphical model show_dag(model) # sample from the distribution mcmc = mc.MCMC(model) mcmc.sample(iter=50000, burn=1000, thin=1) show_trace(mcmc, {"m":"m", "c":"c", "std_dev":"std_dev"})Draws from the posterior predictive model [](https://xkcd.com/2110)## now plot overlaid samples from the linear function ## Note: this *ignores* the error distribution we've estimated ## If we drew samples from the true posterior predictive, # we'd see much greater spread ## in possible simulations ms = mcmc.trace("m")[:] cs = mcmc.trace("c")[:] plt.title("Sampled fits") plt.plot(x, y, '.', label="Observed") xf = np.linspace(-20,40,200) for m,c in zip(ms[::20], cs[::20]): plt.plot(xf, xf*m+c, 'g-', alpha=0.01) plt.plot(x, x*m+c, '--', label="True", zorder=100) plt.legend() plt.xlim(-20,40) plt.ylim(-40,80)Simple diagnosticsIs our sampler taking uncorrelated samples? We can look at the **autocorrelation** of the samples. If they are perfectly unbiased, then this should be zero everywhere (no correlation between successive samples). We want to draw independent unbiased samples from the posterior, but an MCMC sampling process produces highly correlated samples (each sample depends on the previous). We want to measure and minimise that sample-to-sample correlation, which is captured by the autocorrelation.mc.Matplot.autocorrelation(mcmc)Basic convergence statistics Gelman-RubinMeasures intra-chain versus inter-chain variance (should be similar if mixing is good). Measures close to 1.0 indicate good mixing.# need to run chain multiple times to estimate the Gelman-Rubin # metric; as it compares different runs for similarity of variance for i in range(10): mcmc.sample(iter=50000, burn=1000, thin=1) # closer to 1.0 is better (means within chain variance is close to across chain # variance) mc.gelman_rubin(mcmc)Rafferty-LewisEstimates the burn-in and thinning required. See below for an explanation of burn-in and thinning.# this will print results for all 10 chains mc.raftery_lewis(mcmc, q=0.025, r=0.01)Sampling issues Burn-in and thinning The **great thing** about MCMC approaches is that you can basically write down your model and then run inference directly. There is no need to derive complex approximations, or to restrict ourselves to limited models for which we can compute answers analytically. Essentially, no maths by hand; everything is done algorithmically.The **bad thing** about MCMC approaches is that, even though it will do the "right thing" *asymptotically*, the choice of sampling strategy has a very large influence for the kind of sample runs that are practical to execute. Bayesian inference should depend only on the priors and the evidence observed; but MCMC approaches also depend on the sampling strategy used to approximate the posterior. Dealing with biased samplingMCMC tries to draw **independent, unbiased** samples from the posterior, but the sampling process (like Metropolis), is not inherently unbiased. For example, successive samples in a random walk are correlated and obviously not independent. And although the Markov Chain approach (under fairly relaxed assumptions) will asympotically sample from all of the posterior, if the random walk starts off very far from the bulk of the distribution, it will "wander in the wilderness" for some time before reaching significant probability density. This means early samples from the distribution might be unreasonably dense in very low probability regions in the posterior. How "good" the Markov chain is at sampling from the posterior is called **mixing**; some MCMC setups may mix very badly until they get warmed up.To mitigate these two common issues, there are a couple of standard tricks: * **Burn-in**, which ignores the first $n$ samples from an MCMC draw, to make sure the chain is "mixing" well. Typically, several thousand samples might be ignored.* **Thinnning**, which takes one sample from every $k$ consecutive samples from the chain, to reduce correlation. Values of raound 5-50 are common.Tuning these is a matter of art!The code below implements M-H sampling from the lecture notes, and then shows how burn-in and thinning can be applied.def metropolis(fx, q, x_init ,n): # Perform Metropolis MCMC sampling. # p(x): a function that can be evaluated anywhere. p(x) returns the value of p at x # q(): a function q that draws a sample from a symmetric distribution and returns it # x: a starting point # n: number of samples x = np.array(x_init) samples = np.zeros((n, x.shape[0])) accepted = np.zeros(n) # precompute random numbers random_uniform = np.random.uniform(0,1,n) for i in range(n): # find a new candidate spot to jump to x_prime = q(x) p_r = fx(x_prime) / fx(x) r = random_uniform[i] samples[i] = x_prime # if it's better, go right away if r0], samples[accepted<1] import scipy.stats # try adjusting the step size # 0.75 mixes well # 0.15 gets stuck in modes # 0.01 nevers goes anywhere # 5.0 is almost always rejected proposal_step_size = 5.0 # test the sampling process # create an interesting distribution p (just a mixture of two gaussians) A = np.array([[0.15, 0.9], [-0.3, 2.5]]) p1 = lambda x:scipy.stats.multivariate_normal(mean=[0,0], cov=A).pdf(x) p2 = lambda x:scipy.stats.multivariate_normal(mean=[3,0], cov=np.eye(2)).pdf(x) p = lambda x:p1(x)*0.5+p2(x)*0.5 # create a proposal distribution, with std. dev. 0.5 q = lambda x: np.random.normal(x,proposal_step_size,(2,)) # make 500 MCMC steps, starting in very bad point (to illustrate # the utility of burn in) accept, reject = metropolis(p,q,[10.5, 30], 5000) # plot a heatmap of the distribution, along with the # accepted and rejected samples from that MCMC chain fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1,1,1) ax.plot(accept[:,0], accept[:,1], label="Path", lw=0.4) ax.plot(accept[:,0], accept[:,1], 'b.', label='Accepted', markersize=1) ax.plot(reject[:,0], reject[:,1], 'rx', label='Rejected', markersize=1) ax.legend() x,y = np.meshgrid(np.linspace(-5,5,30), np.linspace(-4,4,30)) img = ax.imshow(p(np.dstack([x,y])), extent=[-4,4,-4,4], cmap='viridis', origin='lower') ax.grid("off") fig.colorbar(img, ax=ax, fraction=0.046, pad=0.04) ax.set_title("MCMC sampling with Metropolis-Hastings") ## Burn-in and thinning plot # introduce correlations y = accept[:,1] x = np.arange(len(y)) # discard 400 samples, keep every 8th sample burn = 400 thin = 8 plt.plot(x[0:burn], y[0:burn], 'r:') plt.plot(x[burn::thin], y[burn::thin], 'go', markersize=1) plt.plot(x[burn:], y[burn:], 'k:', alpha=0.1, markersize=0.2) plt.plot(x[burn:], y[burn:], 'k.', alpha=0.1, markersize=0.2) plt.axvline(burn, c='r') plt.text(15,2.5,"Burn-in period")The red samples are discarded during burn-in, and the green samples (thinned to every 8th sample) are kept during the remainder of the sampling process. This helps to draw unbiased samples from the posterior. [The PyMC manual](https://pymc-devs.github.io/pymc/modelchecking.html) explains a number of other diagnostic statistics and plots. **None of these are definitive**, but can give skilled MCMC practitioners insight into the operation of the sampling process.If you're interested in leaning more about MCMC, David Mackay's [book chapter](http://www.inference.phy.cam.ac.uk/mackay/itprnn/ps/356.384.pdf) is a good reference. ---- Logistic regression example: discrete dependent variableOn ye olde iris dataset, using the four flower measurements to predict whether or notthe species is `setosa` or another type of iris.We estimate a set of coefficients $\beta_0, \beta_1, \dots$ and use the logistic function to transform the a linear model into a probability for a Bernoulli variable.from pydataset import data from sklearn.model_selection import train_test_split iris = data("iris") iris["is_setosa"] = np.where(iris["Species"]=="setosa", 1, 0) iris.head() # split the data into a train and test set iris_train, iris_test = train_test_split(iris) print("Train size", iris_train.shape) print("Test size", iris_test.shape)Model:We have some coefficients $\beta$, which feed into our logistic function to produce $l$, and $y$ is Bernoulli distributed (0 or 1) with probability $l$.$$\beta_i \sim \mathcal{N}(0, 5)\\l = \frac{1}{1+e^{\beta_0 + \sum_i \beta_i x_i}}\\y \sim \mathcal{B}(l)\\$$# binary prediction of "is_setosa", using the four attributes # of the flower configuration # predictors (standardised) xs = np.array(iris_train.iloc[:, 0:4]) x_standardised = (xs - xs.mean()) / xs.std() # observed values ys = np.array(iris_train["is_setosa"]) # PyMC variable for inputs x_std = mc.Normal("x_std", 0, 1, value=x_standardised, observed=True) # 4 regression coefficients betas = mc.Normal("betas", mu=0, tau=1.0/(50*50), size=5, value=[0,0,0,0,0]) # link function @mc.deterministic def logistic(betas=betas, x_std=x_std): return 1.0 / (1 + np.exp(-(betas[0] + np.sum(betas[1:] * x_std)))) # observed output is Bernoulli distributed y = mc.Bernoulli("y", p=logistic, observed=True, value=ys) model = mc.Model([x_std, y,betas, logistic]) show_dag(model) mcmc = mc.MCMC(model) ## Run the sampler with 5 different chains mcmc.sample(iter=150000, burn=10000, thin=10) fig = plt.figure(figsize=(10,12)) for i in range(5): ax = fig.add_subplot(3,2,i+1) trace_hist(mcmc.trace("betas")[:,i], "$\\beta_{i}$".format(i=i)) plt.tight_layout() import sklearn.metrics # write link function for use in prediction def logistic_predict(betas, x_std): return 1.0 / (1 + np.exp(-(betas[0] + np.sum(betas[1:] * x_std, axis=1)))) # standardise predictors in test set test_x = iris_test.iloc[:, 0:4] test_x = (test_x - np.mean(test_x))/np.std(test_x) y_true = iris_test["is_setosa"]PredictionsWe can draw samples from the posterior and then use the regression coefficients to make new predictions. Annoyingly, we have to rewrite the logistic function, but this is easy to do.# plot for true versus predicted fig1 = plt.figure() ax1 = fig1.add_subplot(1,1,1) ax1.set_xlabel("True") ax1.set_ylabel("Predicted") ax1.set_title("True versus predicted") # plot for ROC curve fig2 = plt.figure() ax2 = fig2.add_subplot(1,1,1) ax2.set_xlabel("FPR") ax2.set_xlabel("TPR") confusions = [] beta_trace = mcmc.trace("betas")[:] # predict for i in range(6): # choose a random set of betas beta_ix = np.random.randint(0, beta_trace.shape[0]-1) beta_vec = beta_trace[beta_ix, :] y_pred = logistic_predict(beta_vec, test_x) ax1.scatter(y_true+np.random.normal(0,0.01, y_true.shape), y_pred,s=0.2) # bias is due to unbalanced classes (I think) y_class = np.where(y_pred<0.5, 0, 1) confusion = sklearn.metrics.confusion_matrix(y_true, y_class) confusions.append(confusion) fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_pred) ax2.plot(fpr, tpr)Distribution of confusion matricesWe can show the (samples from) distribution of confusion matrices if we want:fig = plt.figure() ax1 = fig.add_subplot(1,2,1) ax1.imshow(np.mean(confusions, axis=0)) ax2 = fig.add_subplot(1,2,2) ax2.imshow(np.std(confusions, axis=0)) # show samples from the confusion matrices confusions = np.array(confusions) # some tensor reshaping fun... confusion_pad = np.stack([confusions, np.zeros_like(confusions)]).swapaxes(0,1) flat = np.concatenate(np.concatenate(confusion_pad, axis=0), axis=1) plt.imshow(flat, cmap='magma')---- Switchpoint model: more complex logic*[Source: https://ourworldindata.org/extreme-history-methods]*Data not provided, so hand-digitised via https://apps.automeris.io/wpd/import pandas as pd from io import StringIO # load data from a string data = StringIO("""year,poverty_rate 1819.8097502972653, 83.88791593695271 1849.6789536266351, 81.646234676007 1869.655172413793, 75.48161120840629 1889.821640903686, 71.6987740805604 1909.6076099881093, 65.67425569176883 1928.8228299643283, 56.42732049036777 1949.7502972651605, 54.8861646234676 1959.6432818073722, 44.09807355516638 1969.7265160523186, 35.69176882661996 1979.8097502972653, 31.62872154115587 1991.6052318668253, 23.782837127845866 2004.922711058264, 13.695271453590195 2001.8787158145064, 17.19789842381782 1999.0249702734839, 19.159369527145344 1995.9809750297266, 19.299474605954472 1987.0392390011891, 24.483362521891436 1989.8929845422117, 24.483362521891436 1983.9952437574316, 27.98598949211906 1980.9512485136743, 33.450087565674266 1992.936979785969, 22.521891418563897""") poverty_ = pd.read_csv(data) # deleting the dodgy data point # uncomment to experiment # poverty = poverty_.drop(labels=[6]) poverty = poverty_ poverty poverty.plot(x='year', y='poverty_rate', kind='scatter') plt.gca().set_frame_on(False)HypothesisWe model the data with a linear regression, but where there is a switchpoint, where the regression coefficient changes (i.e. piecewise linear with two pieces). We estimate both the regression coefficients at each position and the location of the switchpoint.$$s \sim \mathcal{N}{(1960, 100)}\\\beta_0 \sim \mathcal{N}(50, 10)\\\beta_1 \sim \mathcal{N}(-1, 2)\\\beta_2 \sim \mathcal{N}(-1, 2)\\$$$$\mu = \begin{cases}xs & \beta_0 + \beta_2 (x-s)\\\end{cases}$$$$\tau \sim \mathcal{\Gamma}(1, 10) \\y \sim \mathcal{N}(\mu, \frac{1}{\tau})$$# PyMC variable for inputs x = mc.Normal("x", mu=0, tau=1, observed=True, value=poverty["year"]) # 3 betas beta_0 = mc.Normal("beta_0", mu=50, tau=1.0/(10.0)) beta_1 = mc.Normal("beta_1", mu=-1, tau=1.0/(2*2)) beta_2 = mc.Normal("beta_2", mu=-1, tau=1.0/(2*2)) precision = mc.Gamma("precision", alpha=0.5, beta=1) # alternatively, could postulate uniform #switch = mc.Uniform("switch", lower=1820, upper=2020) switch = mc.Normal("switch", mu=1935, tau=1.0/(50.0*50.0)) # link function @mc.deterministic def switch_mu(beta_0=beta_0, beta_1=beta_1, beta_2=beta_2, x=x, switch=switch): return np.where(x---- A simple mixture model: discrete + continuous latent variables When things get trickyWe can include both **discrete** and **continuous** variables. A very important case is where we have a **mixture model**. That is, we believe our observations come from one of a number of distributions. For example, in modelling human heights, we might expect height to be normally distributed, but to have two different distributions for men and women.It is very straightforward to add this to a PyMC graphical model; it is just another random variable to infer. However, sampling is another matter.In this case we do full **clustering**. That is, we suppose the data is generated by three different processes, each of which is normal with some unknown mean and variance, and we have to estimate:* The parameters of each of $n$ process/clusters* The index of the cluster/process that each observation belongs to.This means we have one discrete parameter for *every* data point; we need to label each data point during inference. This is very hard to sample from, as it is a high-dimensional discrete space.## Adapted from the example given at ## http://stackoverflow.com/questions/18987697/how-to-model-a-mixture-of-3-normals-in-pymc # if you touch this seed, the fit breaks :) # this is *not* a stable fit with these parameters! np.random.seed(2028) n = 3 ndata = 2000 ## Generate synthetic mixture-of-normals data, # with means at -50,0,+50, and std. dev of 5,10,1 v = np.random.randint( 0, n, ndata) data = ((v==0)*(np.random.normal(50,5,ndata)) + (v==1)*(np.random.normal(-50,10,ndata)) + (v==2)*np.random.normal(0,1,ndata)) ## Plot the original data plt.hist(data, bins=50); ## A Dirichlet distribution specifies the distribution over categories ## All 1 means that every category is equally likely dd = mc.Dirichlet('dd', theta=(1,)*n) ## This variable "selects" the category (i.e. the normal distribution) ## to use. The Dirichlet distribution sets the prior over the categories. category = mc.Categorical('category', p=dd, size=ndata) ## Now we set our priors the precision and mean of each normal distribution ## Note the use of "size" to generate a **vector** of variables # (i.e. one for each category) ## We expect the precision of each normal to be Gamma distributed # (this mainly forces it to be positive!) precs = mc.Gamma('precs', alpha=1, beta=10, size=n) ## And the means of the normal to be normally distributed, with a precision of 0.001 # (i.e. std. dev 1000) means = mc.Normal('means', 0, 1.0/(100*100), size=n) ## These deterministic functions link the means of the observed distribution # to the categories ## They just select one of the elements of the mean/precision vector, # given the current value of category ## The input variables must be specified in the parameters, so that # PyMC knows which variables to pass to it @mc.deterministic def mean(category=category, means=means): return means[category] @mc.deterministic def prec(category=category, precs=precs): return precs[category] ## Now we specify the variable we observe -- which is normally distributed, *but* ## we don't know the mean or precision. # Instead, we pass the **functions** mean() and pred() ## which will be used at each sampling step. ## We specify the observed values of this node, and tell PyMC these are observed ## This is all that is needed to specify the model obs = mc.Normal('obs', mean, prec, value=data, observed = True) ## Now we just bundle all the variables together for PyMC model = mc.Model({'dd': dd, 'category': category, 'precs': precs, 'means': means, 'obs': obs}) show_dag(model) mcmc = mc.MCMC(model) ## Now we tell the sampler what method to use ## Metropolis works well, but we must tell PyMC to use a specific ## discrete sampler for the category variable to get good results in a reasonable time mcmc.use_step_method(mc.AdaptiveMetropolis, model.means) mcmc.use_step_method(mc.AdaptiveMetropolis, model.precs) mcmc.use_step_method(mc.DiscreteMetropolis, model.category) ## this step is key! mcmc.use_step_method(mc.AdaptiveMetropolis, model.dd) ## Run the sampler with 5 different chains mcmc.sample(iter=150000, burn=1000) plt.figure() plt.hist(mcmc.trace('means', chain=None).gettrace()[:], normed=True, bins=np.linspace(-100,100,50)) plt.title("Estimated means") plt.legend(['Component 1', 'Component 2', 'Component 3']) plt.figure() ## show the result in terms of std. dev. (i.e sqrt(1.0/precision)) plt.title("Estimated std. dev") plt.hist(np.sqrt(1.0/mcmc.trace('precs', chain=None).gettrace()[:]), normed=True, bins=np.linspace(0,15,50)) plt.legend(['Component 1', 'Component 2', 'Component 3'])Mixture modelling without classificationIf all we wanted to do was to estimate the parameters of the mixture (i.e. the PDF), and *not* perform the clustering process that assigns labels to datapoints, then we can write a simpler model. We write a custom stochastic variable representing a mixture-of-Gaussian likelihood function with vector parameters. This then lets us estimate the distribution but does not identify the classes each data point belongs to. This has no discrete parameters and is easier to fit. We can try and do this class labeling post hoc, assigning each observation to the most probable class, but this loses the uncertainty about class membership that we have in the full model above.In this case we *don't* model the mixture weights and assume they are all equal (this could be done but it makes it harder to sample from).## We expect the precision of each normal to be Gamma distributed # (this mainly forces it to be positive!) precs = mc.Gamma('precs', alpha=1, beta=1, size=n) means = mc.Normal('means', 0, 1/1e5, size=n) @mc.stochastic(observed=True) def mog(means=means, precs=precs, value=data): def logp(value, means, precs): ll = [scipy.stats.norm.logpdf(x=value, loc=means[i], scale=np.sqrt(1.0/precs[i])) for i in range(len(means))] # note: we assign data points to the most likely component ll = np.sum(np.max(ll, axis=0)) return ll def random(means, precs): ix = np.random.choice(ixs) return scipy.stats.norm.rvs(loc=means[ix], scale=np.sqrt(1.0/precs[ix]), size=value.shape) obs = mog ## Now we just bundle all the variables together for PyMC model = mc.Model([precs, means, obs]) mcmc = mc.MCMC(model) mcmc.sample(iter=150000, burn=15000) mc.Matplot.trace(mcmc) bins = np.linspace(-100,100,200) plt.hist(mcmc.trace("means")[:,0], bins=bins); plt.hist(mcmc.trace("means")[:,1], bins=bins); plt.hist(mcmc.trace("means")[:,2], bins=bins); bins = np.linspace(0,20,50) to_std = lambda x: np.sqrt(1.0/x) plt.hist(to_std(mcmc.trace("precs")[:,0])) plt.hist(to_std(mcmc.trace("precs")[:,1])) plt.hist(to_std(mcmc.trace("precs")[:,2]))Imputation in quadratic regression In PyMC, variables can be **observed** (fixed) or **unobserved** (random). PyMC cycles through the array of known values for the **observed** variables and updates the rest of the graph.PyMC implements this using **imputation**, where certain missing values in an observed variable can be inferred (*imputed*) from the rest of the model. This creates new random variables and then infers the missing values. **Masked arrays** are used to implement imputation; these allow arrays to have "blank" values, that PyMC can fill in automatically.This approach creates one new random variable per missing data item; this can create very large models if you are not careful!## Example, using very simple quadratic regression model import numpy.ma as ma # masked array support ## generate the data for the regression x = np.sort(np.random.uniform(0, 20, (50,))) m = 2 c = 15 # Add on some measurement noise, with std. dev. 3.0 epsilon = data = np.random.normal(0, 200, x.shape) y = m * x * x + c + epsilon ## Now the imputation; we will try and infer missing some missing values of y (we still have the corresponding x) ## mark last three values of y invalid y_impute = y[:] n_missing = 6 impute_ixs = np.sort(np.random.randint(0, len(y)-1, size=n_missing)) y_impute[impute_ixs] = 0 y_impute = ma.masked_equal(y_impute,0) print("Y masked for imputation:", y_impute) # we will see the last three entries with -- # create the model (exactly as before, except we switch "y_impute" for "y") m_unknown = mc.Normal('m', 0, 0.01) c_unknown = mc.Normal('c', 0, 0.001) precision = mc.Gamma('precision', alpha=1, beta=5) std = mc.Lambda('std_dev', lambda precision=precision: np.sqrt(1.0/precision)) x_obs = mc.Normal("x_obs", 0, 1, value=x, observed=True) @mc.deterministic(plot=False) def line(m=m_unknown, c=c_unknown, x=x_obs): return x*x*m+c y_obs = mc.Normal('y_obs', mu=line, tau=precision, value=y_impute, observed=True) model = mc.Model([m_unknown, c_unknown, std, precision, x_obs, y_obs]) # sample from the distribution mcmc = mc.MCMC(model) mcmc.sample(iter=100000, burn=5000, thin=10) ## now we will have three entries in the y_obs trace from this run y_trace = mcmc.trace('y_obs')[:] fig = plt.figure() ax = fig.add_subplot(1,1,1) ## the original data ax.plot(x, y, '.', label="Data") ax.plot(x, x*x*m+c, ':', label="True") m_sample = mcmc.trace("m")[:] c_sample = mcmc.trace("c")[:] for i in range(20): m_post = np.random.choice(m_sample) c_post = np.random.choice(c_sample) ax.plot(x, x*x*m_post + c_post, "g", alpha=0.1, label="Posterior" if i==0 else None) # samples from posterior predicted for the missing values of y for i in range(len(impute_ixs)): ax.axvline(x[impute_ixs[i]], c='C1', alpha=0.1, label="Imputed" if i==0 else None) # plot the actual imputed data points ax.scatter(np.tile(x[impute_ixs[i]], (len(y_trace), 1)), y_trace[:,i], s=2, c='C3', marker='_', alpha=0.25) # uncomment to add box plots #ax.boxplot([y_trace[:,i]], positions = # [x[impute_ixs[i]]], widths=2, bootstrap=200, # notch=True, showfliers=False ) ax.set_xlim(-1,25) ax.set_xticks(np.arange(0,25,5)) ax.set_xticklabels(np.arange(0,25,5)) ax.legend() show_trace(mcmc, {"std_dev":"Standard deviation", "m":"m", "c":"c"})The motivation behind this notebook comes from [](https://sebastianraschka.com/) who constantly inspires me through his work in the field of deep learning. Quite a few days, he open-sourced his repo [deeplearning-models](https://github.com/rasbt/deeplearning-models) which contains implementations of a wide variety of deep learning models. I started with [this notebook](https://github.com/rasbt/deeplearning-models/blob/master/pytorch_ipynb/autoencoder/ae-basic.ipynb) which shows a very simplistic and minimal implementation of a fully-connected _autoencoder_.!pip install tensorflow-gpu==2.0.0-beta0 # Imports import tensorflow as tf from tensorflow.keras.datasets import mnist import numpy as np np.random.seed(7) print(tf.__version__) # Load the MNIST dataset (X_train, y_train), (X_test, y_test) = mnist.load_data() print(X_train.shape, X_test.shape) # Define the constants NUM_FEATURES = 784 UNITS = 32 # Custom class for a simple FC autoencoder class AutoEncoder(tf.keras.Model): def __init__(self, num_features, units): super(AutoEncoder, self).__init__() self.encoder = tf.keras.layers.Dense(units, activation='linear', input_shape=(num_features,), kernel_initializer='glorot_normal', bias_initializer='zeros') self.decoder = tf.keras.layers.Dense(num_features, activation='linear', input_shape=(units,)) self.leaky_relu = tf.keras.layers.LeakyReLU(0.5) def call(self, x): encoded = self.encoder(x) encoded = self.leaky_relu(encoded) decoded = tf.sigmoid(self.decoder(encoded)) return decoded # Instantiate the autoencoder auto_encoder = AutoEncoder(NUM_FEATURES, UNITS) # Define loss function and optimizer loss_func = tf.keras.losses.BinaryCrossentropy() optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) # Flatten the training images X_train_copy = X_train.copy() X_train_copy = X_train_copy.reshape(60000, 28*28).astype(np.float32) print(X_train_copy.shape) # Batches of 64 train_ds = tf.data.Dataset.from_tensor_slices((X_train_copy, y_train)).batch(64) # Average out the loss after each epoch train_loss = tf.keras.metrics.Mean(name='train_loss') # Train the model @tf.function def model_train(features): # Define the GradientTape context with tf.GradientTape() as tape: # Get the probabilities decoded = auto_encoder(features) # Calculate the loss loss = loss_func(decoded, features) # Get the gradients gradients = tape.gradient(loss, auto_encoder.trainable_variables) # Update the weights optimizer.apply_gradients(zip(gradients, auto_encoder.trainable_variables)) train_loss(loss) return decoded # Begin training decode_list = [] for epoch in range(20): for features, _ in train_ds: features = features decoded = model_train(features) template = 'Epoch {}, loss: {}' print (template.format(epoch+1, train_loss.result())) %matplotlib inline import matplotlib.pyplot as plt ########################## ### VISUALIZATION ########################## n_images = 15 image_width = 28 fig, axes = plt.subplots(nrows=2, ncols=n_images, sharex=True, sharey=True, figsize=(20, 2.5)) orig_images = features[:n_images].numpy() decoded_images = decoded[:n_images].numpy() for i in range(n_images): for ax, img in zip(axes, [orig_images, decoded_images]): curr_img = img[i] ax[i].imshow(curr_img.reshape((image_width, image_width)), cmap='binary')Web Scraping of BlockChain Whitepapers Using the following libraries for data extraction from the webpage https://www.allcryptowhitepapers.com/whitepaper-overview/* Requests* BeautifulSoup4* Selenium (Optional)#Loading Libraries import re import time import urllib import urllib.request from bs4 import BeautifulSoup import pandas as pd url = "https://www.allcryptowhitepapers.com/whitepaper-overview" page = urllib.request.urlopen(url) soup = BeautifulSoup(page,"html.parser") #Extract title of webpage print(soup.title.text) #Extract the table from containing links to the white papers table = soup.find("table") table_rows = table.find_all('td') print(table_rows) #Extract links from table links = table.find_all('a') whitepaper_urls = [] for link in links: url = link.get('href') whitepaper_urls.append(url) print(whitepaper_urls) df1 = pd.DataFrame(whitepaper_urls,columns=['Whitepaper_URLs']) print(df1) #Extract titles whitepaper_titles = soup.find_all('td',attrs={'class':'column-1'}) print(whitepaper_titles) type(whitepaper_titles) str_titles = str(whitepaper_titles) crypto_lists = BeautifulSoup(str_titles,"lxml").get_text() print(crypto_lists) type(crypto_lists) whitepaper_titles =crypto_lists.split(",") df = pd.DataFrame(whitepaper_titles,columns=['Cryptocurrency']) print(df) import numpy as np whitepaper_data = pd.concat([df,df1],sort=True,axis=1) whitepaper_data len(whitepaper_data)Binary classifiertrain_data = np.random.rand(10000, 4) train_labels = train_data[:,0] > 0.5 one_hot_train_labels = keras.utils.to_categorical(train_labels, num_classes=2) model = Sequential() model.add(Dense(6, activation='relu', input_dim=4)) model.add(Dense(6, activation='relu')) model.add(Dense(6, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(train_data, one_hot_train_labels, epochs=10, batch_size=32) test_data = np.random.rand(10, 4) print(test_data[:,0]) model.predict(test_data)[0.92801006 0.02156161 0.30338793 0.85019897 0.89210832 0.6237386 0.65671859 0.0762687 0.07459341 0.38272083]Multiclass classfifierbins = np.array([0.0, 0.25, 0.5, 0.75, 1.0]) train_data = np.random.rand(10000, 4) train_labels = np.digitize(train_data[:,0], bins) - 1 one_hot_train_labels = keras.utils.to_categorical(train_labels, num_classes=4) model = Sequential() model.add(Dense(6, activation='relu', input_dim=4)) model.add(Dense(6, activation='relu')) model.add(Dense(6, activation='relu')) model.add(Dense(4, activation='softmax')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(train_data, one_hot_train_labels, epochs=10, batch_size=32) test_data = np.random.rand(10, 4) print(test_data[:,0]) model.predict(test_data)Epoch 1/10 10000/10000 [==============================] - 1s 128us/step - loss: 0.5216 - acc: 0.7538 Epoch 2/10 10000/10000 [==============================] - 1s 61us/step - loss: 0.3711 - acc: 0.8259 Epoch 3/10 10000/10000 [==============================] - 1s 55us/step - loss: 0.2366 - acc: 0.9164 Epoch 4/10 10000/10000 [==============================] - 1s 52us/step - loss: 0.1543 - acc: 0.9700 Epoch 5/10 10000/10000 [==============================] - 1s 52us/step - loss: 0.1116 - acc: 0.9776 Epoch 6/10 10000/10000 [==============================] - 1s 61us/step - loss: 0.0877 - acc: 0.9808 Epoch 7/10 10000/10000 [==============================] - 1s 53us/step - loss: 0.0735 - acc: 0.9833 Epoch 8/10 10000/10000 [==============================] - 1s 53us/step - loss: 0.0634 - acc: 0.9860 Epoch 9/10 10000/10000 [==============================] - 1s 54us/step - loss: 0.0562 - acc: 0.9874 Epoch 10/10 10000/10000 [==============================] - 1s 61us/step - loss: 0.0504 - acc: 0.988[...]This notebook justifies the `dist=arenas` option in the `initial_state` function.In Arenas' paper, the initial condition is justified as follows:- Plants seeds according to the first reported cases- Adds in additional seeds for particular hotspots- This boils down to 47 seeds = 0.2 percent of the total March 20 2020 reported cases- These seeds are added to the 'asymptomatic infectious class', which is the first class after 'susceptible'.Our approach is slightly different:- Take a fixed number of people- Distribute these people as 'seeds' according the the number of hospitalisations at March 20th 2020- These are generally _not_ whole numbers, but the model does not see a difference between whole or fractional numbersimport pandas as pd import numpy as np # import glob %matplotlib notebook import matplotlib.pyplot as plt import sys import datetime # sys.path.insert(0, "../tools") from covid19model.data.mobility import * # contains all necessary functions from covid19model.visualization.utils import moving_avg from covid19model.data.sciensano import * # OPTIONAL: Load the "autoreload" extension so that package code can change %load_ext autoreload # OPTIONAL: always reload modules so that as you change code in src, it gets loaded %autoreload 2 max_date = '2020-03-20' values = 'hospitalised_IN' agg = 'arr' df = get_sciensano_COVID19_data_spatial(agg=agg, values=values, moving_avg=True) max_value = df.loc[max_date].sum() df_new = (df.loc[max_date] / max_value * 2) # Now properly scale this to the desired total number of people in the initial condition # Add particular seeds for special events (if needed in Belgium?) df_new.loc[11000]Correlationimport pandas_datareader as pdr import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib notebook tickers = ['SPY', 'TLT'] start = dt.datetime(2008, 1, 1) end = dt.datetime(2017, 12, 31) data = pdr.get_data_yahoo(tickers, start, end) data = data['Adj Close'] log_returns = np.log(data/data.shift()) log_returns.corr() fig, ax = plt.subplots() (data/data.iloc[0]).plot(ax=ax) data_set = data.loc['2008-05':'2011-04'] fig, ax = plt.subplots() (data_set/data_set.iloc[0]).plot(ax=ax)Project- Calculate the return (CAGR), maximal drawdown and volatility of TLT Step 1- Calculate the return (CAGR) of SPY and TLTcagr_spy = (data['SPY'].iloc[-1]/data['SPY'].iloc[0])**(1/10) - 1 cagr_tlt = (data['TLT'].iloc[-1]/data['TLT'].iloc[0])**(1/10) - 1 cagr_spy, cagr_tltStep 2- Calculate the maximum drawdown of SPY and TLTdef max_drawdown(data): rolling_max = data.cummax() daily_drawdown = data/rolling_max - 1 max_drawdown = daily_drawdown.cummin().iloc[-1] return max_drawdown max_drawdown(data['SPY']), max_drawdown(data['TLT'])step 3- Calculate the volatilitylog_returns.std()*(252**0.5)Regressionimport numpy as np from linearReg import linear_regression from logisticReg import LogisticRegression from accuracy import accuracy import pandas as pd from sklearn import datasets from sklearn.model_selection import train_test_split import matplotlib.pyplot as pltLinear Regression""" Importing Cars Datasets """ # Reading excel file df = pd.read_csv("data/cars.csv") X = np.array(df.iloc[:,:-1]) y = np.array(df.iloc[:,-1]) X_train, X_test, Y_train ,Y_test = train_test_split(X,y , test_size=0.2,shuffle=True) fig = plt.figure(figsize=(25,8)) plt.scatter(X_test[:,0],Y_test,color='b', marker=".",s=30) plt.show() linReg = linear_regression(lr=1e-6,n_iters=1000) Weights_LR, Losses_LR = linReg.fit(X_train,Y_train,verbose=False) plt.plot(Losses_LR) Losses_LR[-1] fig_reg, ax_reg = plt.subplots(1, 2, figsize=(25,10)) ax_reg[0].set_xlim(0,50) ax_reg[0].set_ylim(-150,150) ax_reg[1].set_xlim(0,50) ax_reg[1].set_ylim(-150,150) ax_reg[0].scatter(X[:,0],y,color='r',s=20 ) ax_reg[1].scatter(X[:,0],y,color = 'b',s=20) for i,line in enumerate(Weights_LR[::100]): b = line[0] W = line [1:] x0_min = 0 # The choice of x0(min/max) is based on the interval of x where the data is located, in our case the data is distributed in # the interval [-6, 4] over the x_axis x1_min = ((W[0]*x0_min) + b) / W[1] x0_max = 50 x1_max = ((W[0]*x0_max) + b) / W[1] # # # Classifier Equation # # # # # # x0*w0 + x1*w1 + b = 0 # # x1*w1 = -b - x0*w0 # # x1 = -(b + x0*w0) / w1 # # # # # # # # # # # # # # # # # # # # Y_hat = b + W*X_train if i == 10: c , ls, lw = 'k', '-', 2 ax_reg[0].plot( [x0_min,x0_max] ,[x1_min,x1_max], c=c, ls=ls, lw=lw, label="Prediction") # ax_reg[0].plot(X_train[:,0],Y_hat, c=c, ls=ls, lw=lw, label='Prediction') elif i == 0 : c, ls, lw = 'r', '--', 2 ax_reg[0].plot( [x0_min,x0_max] ,[x1_min,x1_max], c=c, ls=ls, lw=lw, label="0'th Iteration") # ax_reg[0].plot(X_train[:,0],Y_hat, c=c, ls=ls, lw=lw, label='0th Iteration') else: c , ls, lw = 'g', '--', 2 ax_reg[1].plot([x0_min,x0_max], [x1_min,x1_max], c = c, ls=ls, lw=lw, label=f"{i*100}'th Iteration") # ax_reg[1].plot(X_train[:,0],Y_hat, c=c, ls=ls, lw=lw, label=f"{i*100}'th Iteration") ax_reg[0].set_title("Plotting the predictor") ax_reg[1].set_title("Evolution of the predictor") ax_reg[0].legend() ax_reg[1].legend() plt.suptitle("Linear Regression",fontsize="x-large") plt.show() Y_predict = linReg.predict(X_test) fig = plt.figure(figsize=(8,6)) m1 = plt.scatter(X_train[:,0],Y_train,color='r',s=20,label='Training Data') m2 = plt.scatter(X_test[:,0],Y_test,color='b',s=20,label='Testing Data') b = linReg.weight[0] W = linReg.weight[1:] x0_min = 0 x1_min = ((W[0]*x0_min) + b) / W[1] x0_max = 50 x1_max = ((W[0]*x0_max) + b) / W[1] # plt.plot(X_test[:,0],Y_predict,color="black",linewidth=2,label='Prediction on the test data') plt.plot([x0_min,x0_max], [x1_min,x1_max],'-',color="black",linewidth=2,label='Prediction on the test data') plt.legend() plt.show() loss_test = np.mean((Y_test-Y_predict)**2)*(1/y.shape[0]) print(f"Predicted MSE : {loss_test}") linReg = linear_regression(lr=1e-5,n_iters=1000) Weights_LR, Losses_LR = linReg.fit(X_train,Y_train,verbose=False)Logistic Regression Approximation$$f(x) = wx + b$$\Sigmoid function $$s(x) = \frac{1}{1+e^{x}}$$Approximation function $$\hat{y} = h_{\theta}(x) = \frac{1}{1+e^{-f(x)}} = \frac{1}{1+e^{-(xw+b)}}$$ Cost Function 1. Cross-Entropy Error $$J(w,b) = J(\theta) = \frac{1}{N} \sum_{i=1}^{n}{[y^i \log(h_{\theta}(x^i)) + (1 - y^i) \log(1 - h_{\theta}(x^i))]}$$ 2. Loss Function - as seen in the lecture\    Since: $$P(y|x) = \frac{1}{1+ e^{-y }}$$\ It is clear that the loss function will increase monotonically if the probability $P(y|x)$ decreases.\ This implies that, it will increse monotonically if $1+ e^{-y }$ increases. Therefore, the loss function :\ $$J(w,b) = log( 1+ e^{-y })$$ The ERM problem associated with logistic regression : $$argmin_{w}(L_s(h_w)) = argmin_{w\in{R^d}}(\frac{1}{m} \sum_{i=1}^{m}\log(1+ e^{-y_i })) $$ To solve using gradient descent $$\nabla_{w}{L_s(h_w)} = \frac{-yx e^{-y }}{1+e^{-y }} $$ where $x = (1, x_1, x_2, ..., x_d)$ Update Rule$$w = w - \alpha . dw$$$$b = b - \alpha . db$$Where : $$J^{'}(\theta) = \begin{bmatrix} \frac{dJ}{dw} \\ \frac{dJ}{db} \end{bmatrix} =\begin{bmatrix} {...} \end{bmatrix} = \begin{bmatrix} \frac{1}{N} \sum{2 x_i (\hat{y} - y_i)}\\ \frac{1}{N} \sum{2 (\hat{y} -y_i)} \end{bmatrix}$$# """" bc = datasets.load_breast_cancer() Xl, Yl = bc.data, bc.target Xl_train, Xl_test, Yl_train, Yl_test = train_test_split(Xl, Yl, test_size=0.2, random_state=20) # """" # """" fig = plt.figure(figsize=(10,6)) plt.scatter(Xl[:,0],Yl,c=Yl,cmap='bwr' ,marker=".",s=30) plt.show() # """" def accuracy (y_true, y_pred): return np.sum(y_true == y_pred)/ len(y_true) logReg = LogisticRegression(lr=1e-5, n_iter=1000) # logReg = logisticRegression(lr=0.001) logReg.fit(Xl_train,Yl_train) prediction = logReg.predict(Xl_test) print(f"The accuracy of the Logitic regression model is : {accuracy(Yl_test,prediction)}") fig = plt.figure(figsize=(10,6)) plt.scatter(Xl_test[:,0],prediction,c=Yl_test,cmap='bwr', marker=".",s=30) plt.show()Multidimensional Linear Regression# #Reading excel file # df = pd.read_excel("data/pop.xlsx") # X_pop = np.array(df.iloc[:,:-1]) # Y_pop = np.array(df.iloc[:,-1]) # Xpop_train, Xpop_test, Ypop_train ,Ypop_test = train_test_split(X_pop,Y_pop, test_size=0.2,shuffle=True) Xr, yr = datasets.make_regression(n_samples=1000, n_features=1, noise=20, random_state=10) Xr_train, Xr_test, Yr_train, Yr_test = train_test_split(Xr, yr, test_size=0.25, random_state=20) fig = plt.figure(figsize=(25,8)) plt.scatter(Xr[:,0],yr,cmap='bwr' ,marker=".",s=30) plt.show() linReg = linear_regression(lr=1e-3,n_iters=10) Weights_r, Losses_r = linReg.fit(Xr_train,Yr_train,verbose=False) plt.plot(Losses_r) Losses_r[-1] fig_r, ax_r = plt.subplots(1, 2, figsize=(25,10)) ax_r[0].set_xlim(-5,5) ax_r[0].set_ylim(-150,150) ax_r[1].set_xlim(-5,5) ax_r[1].set_ylim(-150,150) ax_r[0].scatter(Xr[:,0],yr,color='r',s=20 ) ax_r[1].scatter(Xr[:,0],yr,color = 'b',s=20) for i,line in enumerate(Weights_r): b, W = line Yr_hat = b + W*Xr_train if i == 10: c , ls, lw = 'k', '-', 2 # ax_reg[0].plot( [x0_min,x0_max] ,[x1_min,x1_max], c=c, ls=ls, lw=lw, label="Prediction") ax_r[0].plot(Xr_train[:,0],Yr_hat, c=c, ls=ls, lw=lw, label='Prediction') elif i == 0 : c, ls, lw = 'r', '--', 2 # ax_reg[0].plot( [x0_min,x0_max] ,[x1_min,x1_max], c=c, ls=ls, lw=lw, label="0'th Iteration") ax_r[0].plot(Xr_train[:,0],Yr_hat, c=c, ls=ls, lw=lw, label='0th Iteration') else: c , ls, lw = 'g', '--', 1 # ax_reg[1].plot([x0_min,x0_max], [x1_min,x1_max], c = c, ls=ls, lw=lw, label=f"{i*100}'th Iteration") ax_r[1].plot(Xr_train[:,0],Yr_hat, c=c, ls=ls, lw=lw, label=f"{i}'th Iteration") ax_r[0].set_title("Plotting the predictor") ax_r[1].set_title("Evolution of the predictor") ax_r[0].legend() ax_r[1].legend() plt.suptitle("Linear Regression",fontsize="x-large") plt.show()Image Classification Notebook Author: Part 1: Implement a Convolutional Neural Network for the [CIFAR-10](https://en.wikipedia.org/wiki/CIFAR-10) dataset. Goals:- Implement different network architectures (e.g. simple CNN's, ResNets, etc.) and report the results.- Experiment with different optimizers and report the results. - Experiment with various other model parameters like learning rate and regularization. Reference to get started: [PyTorch CIFAR10 tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.htmlsphx-glr-beginner-blitz-cifar10-tutorial-py)# # # # # # # download/load, normalize, and preview CIFAR10 data using PyTorch # # # # # # # import torch import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np import time %matplotlib inline # assigns GPU as device print(f'GPUs: {torch.cuda.device_count()}') dev = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(dev)) # create a transform with mean=(0.5, 0.5, 0.5), and std=(0.5, 0.5, 0.5) to transform images # with range [0, 1] to tensors with range [-1, 1] transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # choose a batch size batch_size = 4 # load CIFAR10 training batches using torchvision (into ./data directory) and normalize data trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0) # load normalized CIFAR10 test batches using torchvision testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0) # explicitly define the classes present in CIFAR10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # preview a batch of the training images with a helper function def imshow(img): # first denormalize img = img / 2 + 0.5 npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # get a batch of training data to preview dataiter = iter(trainloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) # print the batch labels print([classes[labels[index]] for index in range(batch_size)]) # print some useful info print(f"Length of training data: {len(trainset.data)}") print(f"Length of testing data: {len(testset.data)}") print(f"Shape of CIFAR10 data: {images[0].shape}") # create another transform to adapt CIFAR-10 images to ImageNet models, i.e 224 x 224 # this transformation differs from the AlexNet transformation because I am not utilizing the pretrained model # see this link for AlexNet transformation: https://pytorch.org/hub/pytorch_vision_alexnet/ imagenet_transform = transforms.Compose([transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # load CIFAR10 training batches resized for ImageNet models imagenet_trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=imagenet_transform) imagenet_trainloader = torch.utils.data.DataLoader(imagenet_trainset, batch_size=batch_size, shuffle=True, num_workers=0) # load CIFAR10 test batches resized for ImageNet models imagenet_testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=imagenet_transform) imagenet_testloader = torch.utils.data.DataLoader(imagenet_testset, batch_size=batch_size, shuffle=False, num_workers=0) # get a batch of training data to preview from ImageNet set imagenet_dataiter = iter(imagenet_trainloader) imagenet_images, imagenet_labels = imagenet_dataiter.next() imshow(torchvision.utils.make_grid(imagenet_images)) # print the batch labels print([classes[imagenet_labels[index]] for index in range(batch_size)]) # print some useful info print(f"Length of training data transformed for ImageNet models: {len(imagenet_trainset.data)}") print(f"Length of testing data transformed for ImageNet models: {len(imagenet_testset.data)}") print(f"Shape of CIFAR10 data transformed for ImageNet models: {imagenet_images[0].shape}")Files already downloaded and verified Files already downloaded and verifiedFirst architecture: LeNet-5 adapted to 3-dimensional (RGB) data[LeNet-5](https://en.wikipedia.org/wiki/LeNet) is one of the earliest simple convolutional neural networks. It was designed for a 1 x 32 x 32 greyscale input, so here it is adapted for a 3 x 32 x 32 RGB input to accomidate color images in the CIFAR-10 dataset. ![LeNet-5 architecture](https://miro.medium.com/max/4308/1*1TI1aGBZ4dybR6__DI9dzA.png)# # # # # # # build modified LeNet-5 CNN using torch.nn as base # # # # # # # import torch.nn as nn import torch.nn.functional as F # initialize a dict to store different network architectures networks = {} # inherit neural network from PyTorch's torch.nn.Module class LeNet5(nn.Module): def __init__(self): super().__init__() # to inherit init of nn.Module # define the neural net layers: # convolutional layer with 3 input channels (RGB), 6 output channels, and a 5x5 convolution self.conv1 = nn.Conv2d(3, 6, 5) # 2 x 2 max-pooling layer with stride of 2 self.pool = nn.MaxPool2d(2, 2) # convolutional layer with 6 input channels (RGB), 16 output channels, and a 5x5 convolution self.conv2 = nn.Conv2d(6, 16, 5) # linear layers: 120 -> 84 -> 10 self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) # define the forward pass through the network def forward(self, x): # 2 x 2 max-pooling layer with stride of 2 defined above x = self.pool(F.relu(self.conv1(x))) # another 2 x 2 max-pooling layer with stride of 2 x = self.pool(F.relu(self.conv2(x))) # view tensor to reshape to 16 x 5 x 5 x = x.view(-1, 16 * 5 * 5) # pass through linear layers fc1 -> fc2 -> fc3 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # initialize the cnn and put it on a TPU lenet5 = LeNet5().to(dev) # print some useful info print(f"Modified LeNet-5 parameters: {len(list(lenet5.parameters()))}") print(f"Modified LeNet-5 architecture: \n{lenet5}") # append network to networks dict to analyze later networks[str(lenet5).split("(")[0]] = lenet5Modified LeNet-5 parameters: 10 Modified LeNet-5 architecture: LeNet5( (conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1)) (fc1): Linear(in_features=400, out_features=120, bias=True) (fc2): Linear(in_features=120, out_features=84, bias=True) (fc3): Linear(in_features=84, out_features=10, bias=True) )Second architecture: AlexNet adapted to CIFAR-10 data[AlexNet](https://en.wikipedia.org/wiki/AlexNet) is a pivotal deep convolutional neural network. The [paper](https://kr.nvidia.com/content/tesla/pdf/machine-learning/imagenet-classification-with-deep-convolutional-nn.pdf) has been cited 80,436 times as of May 2, 2021, according to Google Scholar. It was designed for the 1000 classes of the [ImageNet Challenge](https://en.wikipedia.org/wiki/ImageNetImageNet_Challenge) with an input size of 3 x 224 x 224, so here it is adapted for 10 classes to accommodate the CIFAR-10 dataset. Additionally, the CIFAR-10 dataset is transformed to 3 x 224 x 224 (as above) to fit the AlexNet architecture. ![AlexNet architecture](https://miro.medium.com/max/3688/1*eBDriuBwa5O8HPFUgerklA.png)# AlexNet model adapted from https://pytorch.org/vision/stable/_modules/torchvision/models/alexnet.html # inherit neural network from PyTorch's torch.nn.Module class AlexNet(nn.Module): def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) # view tensor to reshape to 256 x 6 x 6 x = x.view(x.size(0), 256 * 6 * 6) x = self.classifier(x) return x # initialize the cnn and put it on a TPU alexnet = AlexNet().to(dev) # print some useful info print(f"Modified AlexNet parameters: {len(list(alexnet.parameters()))}") print(f"Modified AlexNet architecture: \n{alexnet}") # append network to networks dict to analyze later networks[str(alexnet).split("(")[0]] = alexnetModified AlexNet parameters: 16 Modified AlexNet architecture: AlexNet( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2)) (1): ReLU(inplace=True) (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False) (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)) (4): ReLU(inplace=True) (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False) (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (7): ReLU(inplace=True) (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (9): ReLU(inplace=True) (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace=True) (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False) ) (avgpool): AdaptiveAvgPool2d(output_size=(6, 6)) (classifier): Sequential( (0): Dropout(p=0.5, inplace=False) [...]Third architecture: ResNet 18[ResNet18](https://arxiv.org/abs/1512.03385) is a residual neural network, meaning it utilizes skip connections to skip some layers during training. This concept has the benefit of avoiding vanishing gradients and accuracy saturation. It was designed for the 1000 classes of the [ImageNet Challenge](https://en.wikipedia.org/wiki/ImageNetImageNet_Challenge) with an input size of 3 x 224 x 224, so here it is adapted for 10 classes to accommodate the CIFAR-10 dataset. Additionally, the CIFAR-10 dataset is transformed to 3 x 224 x 224 (as above) to fit this architecture. ![ResNet18 architecture](https://images1.programmersought.com/67/f9/f941fa481a9d8987b5022f1a8bade653.png)# ResNet18 imported from torchvision import torchvision.models as models # import untrained model adapted for 10 classes and send it to the relevant device resnet = models.resnet18(pretrained=False, num_classes=10).to(dev) print(f"ResNet parameters: {len(list(resnet.parameters()))}") print(f"ResNet architecture: \n{resnet}") # append network to networks dict to analyze later networks[str(resnet).split("(")[0]] = resnetResNet parameters: 62 ResNet architecture: ResNet( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (1): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_sta[...]First optimizer: [stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent) (SGD) with momentumSGD with momentum is a stochastic approximation of gradient descent optimization from a subset of the data. "Momentum" refers to a method applied to SGD that accelerates gradient vectors in the desired direction.# # # # # # # utilize classification cross-entropy loss and SGD with momentum # # # # # # # import torch.optim as optim import copy # initialize a dict to store different optimizers for each network optimizers = {} # initialize a dict to save all network/optimizer permutations for comparison network_permutations = {} # initialize an SGD optimizer for each network for network_name in networks.keys(): network = networks[network_name] # copy network to dict to later train and compare all network-optimizer permutations network_permutations[network_name] = {"SGD" : copy.deepcopy(network)} network = network_permutations[network_name]["SGD"] optimizer = optim.SGD(network.parameters(), lr=0.001, momentum=0.9) # add list containing optimizer to optimizers dict using network name as key optimizers[network_name] = [optimizer]Second optimizer: ADAMADAM, or adaptive moment estimation, considers running averages of the gradients and the second moments of the gradients to improve performance on sparse gradients and noisy problems.# # # # # # # ADAM # # # # # # # # initialize an ADAM optimizer for each network for network_name in networks.keys(): network = networks[network_name] # copy network to dict to later train and compare all network-optimizer permutations network_permutations[network_name]["Adam"] = copy.deepcopy(network) network = network_permutations[network_name]["Adam"] optimizer = optim.Adam(network.parameters(), lr=0.001) # add list containing optimizer to optimizers dict using network name as key optimizers[network_name].append(optimizer)Third optimizer: AdagradAdagrad is a SGD optimizer that adapts the learning rate to the parameters, which is sometimes beneficial for sparse data.# # # # # # # Adagrad # # # # # # # # initialize an Adagrad optimizer for each network for network_name in networks.keys(): network = networks[network_name] # copy network to dict to later train and compare all network-optimizer permutations network_permutations[network_name]["Adagrad"] = copy.deepcopy(network) network = network_permutations[network_name]["Adagrad"] optimizer = optim.Adagrad(network.parameters(), lr=0.001) # add list containing optimizer to optimizers dict using network name as key optimizers[network_name].append(optimizer) print(optimizers){'LeNet5': [SGD ( Parameter Group 0 dampening: 0 lr: 0.001 momentum: 0.9 nesterov: False weight_decay: 0 ), Adam ( Parameter Group 0 amsgrad: False betas: (0.9, 0.999) eps: 1e-08 lr: 0.001 weight_decay: 0 ), Adagrad ( Parameter Group 0 eps: 1e-10 initial_accumulator_value: 0 lr: 0.001 lr_decay: 0 weight_decay: 0 )], 'AlexNet': [SGD ( Parameter Group 0 dampening: 0 lr: 0.001 momentum: 0.9 nesterov: False weight_decay: 0 ), Adam ( Parameter Group 0 amsgrad: False betas: (0.9, 0.999) eps: 1e-08 lr: 0.001 weight_decay: 0 ), Adagrad ( Parameter Group 0 eps: 1e-10 initial_accumulator_value: 0 lr: 0.001 lr_decay: 0 weight_decay: 0 )], 'ResNet': [SGD ( Parameter Group 0 dampening: 0 lr: 0.001 momentum: 0.9 nesterov: False weight_decay: 0 ), Adam ( Parameter Group 0 amsgrad: False betas: (0.9, 0.999) eps: 1e-08 lr: 0.001 weight_decay: 0 [...]Train the different networks with the specified optimizers:# time it start = time.time() # loop through each network architecture, optimizer, and train all combinations to compare performance epochs = 2 # define the loss criterion criterion = nn.CrossEntropyLoss() #.to(dev) # loop through all network architectures for network_name in optimizers.keys(): # loop through all optimizers for optimizer in optimizers[network_name]: # get the relevant network for the current network/optimizer permutation optimizer_name = str(optimizer).split("(")[0].strip() network = network_permutations[network_name][optimizer_name] # loop over the training dataset the specified number of times (epochs) for epoch in range(epochs): # initialize loss to zero for each epoch running_loss = 0.0 # loop over all training data batches # use 32 x 32 if network_name == "LeNet5": for i, data in enumerate(trainloader, 0): # unpack tuple inputs, labels = data # put inputs and labels on GPU inputs = inputs.to(dev) labels = labels.to(dev) # zero out parameter gradients optimizer.zero_grad() # forward pass, backward pass, and optimization outputs = network(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics every 2000 batches running_loss += loss.item() if i % 2000 == 1999: print('Epoch: %d, Batch: %5d, Loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 # use 224 x 224 else: for i, data in enumerate(imagenet_trainloader, 0): # unpack tuple inputs, labels = data # put inputs and labels on GPU inputs = inputs.to(dev) labels = labels.to(dev) # zero out parameter gradients optimizer.zero_grad() # forward pass, backward pass, and optimization outputs = network(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics every 2000 batches running_loss += loss.item() if i % 2000 == 1999: print('Epoch: %d, Batch: %5d, Loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print(f'Finished training {network_name} : {str(optimizer).split("(")[0]}') end = time.time() hours = int((end - start) / 60 / 60) minutes = int(((end - start) / 60) - (hours * 60)) seconds = int((end - start) - (minutes * 60) - (hours * 60 * 60)) print(f"Runtime: {hours} h {minutes} m {seconds} s")Check performance of the networks with the specified optimizers:# loop through all network architectures for network_name in optimizers.keys(): # loop through all optimizers for optimizer in optimizers[network_name]: # get the relevant network for the current network/optimizer permutation optimizer_name = str(optimizer).split("(")[0].strip() network = network_permutations[network_name][optimizer_name] # keep track of total predictions and correct predictions correct = 0 total = 0 # don't calculate gradients for outputs while analyzing overall performance with torch.no_grad(): # loop over all testing data batches # use 32 x 32 if network_name == "LeNet5": for data in testloader: # unpack tuple images, labels = data # put inputs and labels on GPU images = images.to(dev) labels = labels.to(dev) # calculate outputs by running images through the network outputs = network(images) # the class with the highest energy is the prediction _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() # use 224 x 224 else: for data in imagenet_testloader: # unpack tuple images, labels = data # put inputs and labels on GPU images = images.to(dev) labels = labels.to(dev) # calculate outputs by running images through the network outputs = network(images) # the class with the highest energy is the prediction _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy of {network_name} : {str(optimizer).split("(")[0].strip()} on test images: {(100 * correct / total)}%')Results: for lr = 0.001, weight_decay=0 (L2) **architecture : optimizer ::: start loss -> end loss**LeNet5 : SGD ::: Loss: 2.173 -> 1.304 LeNet5 : Adam ::: Loss: 1.844 -> 1.266 LeNet5 : Adagrad ::: Loss: 2.145 -> 1.798 AlexNet : SGD ::: Loss: 2.279 -> 1.009 AlexNet : Adam ::: Loss: 2.312 -> 2.303 AlexNet : Adagrad ::: Loss: 1.898 -> 0.960 ResNet : SGD ::: Loss: 2.081 -> 0.928 ResNet : Adam ::: Loss: 2.177 -> 0.853 ResNet : Adagrad ::: Loss: 1.815 -> 1.041 Runtime: 16 h 33 m (CPU) Runtime: 1 h 12 m (GPU) Accuracy of LeNet5 : SGD on test images: 53.11% Accuracy of LeNet5 : Adam on test images: 55.82% Accuracy of LeNet5 : Adagrad on test images: 35.62% Accuracy of AlexNet : SGD on test images: 65.63% Accuracy of AlexNet : Adam on test images: 10.0% Accuracy of AlexNet : Adagrad on test images: 66.87% Accuracy of ResNet : SGD on test images: 68.79% Accuracy of ResNet : Adam on test images: 69.95% Accuracy of ResNet : Adagrad on test images: 64.24% Results: for lr = 0.10, weight_decay=0 (L2)LeNet5 : SGD ::: Loss: 2.362 -> 2.358 LeNet5 : Adam ::: Loss: 3.188 -> 2.341 LeNet5 : Adagrad ::: Loss: 2.565 -> 2.303 AlexNet : SGD ::: Loss: 2.361 -> 2.362 AlexNet : Adam ::: Loss: 8576643381.285 -> 20.033 AlexNet : Adagrad ::: Loss: 17674910743.70 -> 2.309 ResNet : SGD ::: Loss: 2.385 -> 1.168 ResNet : Adam ::: Loss: 2.460 -> 2.342 ResNet : Adagrad ::: Loss: 2.192 -> 1.061 Runtime: 1 h 11 m (GPU) Accuracy of LeNet5 : SGD on test images: 10.0% Accuracy of LeNet5 : Adam on test images: 10.0% Accuracy of LeNet5 : Adagrad on test images: 10.0% Accuracy of AlexNet : SGD on test images: 10.0% Accuracy of AlexNet : Adam on test images: 10.0% Accuracy of AlexNet : Adagrad on test images: 9.7% Accuracy of ResNet : SGD on test images: 63.09% Accuracy of ResNet : Adam on test images: 10.0% Accuracy of ResNet : Adagrad on test images: 62.48% Results: for lr = 0.001, weight_decay=0.0001 (weighted L2)LeNet5 : SGD ::: Loss: 2.208 -> 1.281 LeNet5 : Adam ::: Loss: 1.897 -> 1.270 LeNet5 : Adagrad ::: Loss: 2.129 -> 1.798 AlexNet : SGD ::: Loss: 2.272 -> 1.032AlexNet : Adam ::: Loss: 2.307 -> 2.313 AlexNet : Adagrad ::: Loss: 1.866 -> 1.027 ResNet : SGD ::: Loss: 2.096 -> 0.898 ResNet : Adam ::: Loss: 2.151 -> 0.970 ResNet : Adagrad ::: Loss: 1.806 -> 1.010 Runtime: 1 h 17 m (GPU) Accuracy of LeNet5 : SGD on test images: 53.83% Accuracy of LeNet5 : Adam on test images: 53.99% Accuracy of LeNet5 : Adagrad on test images: 35.47% Accuracy of AlexNet : SGD on test images: 64.14% Accuracy of AlexNet : Adam on test images: 9.92% Accuracy of AlexNet : Adagrad on test images: 65.7% Accuracy of ResNet : SGD on test images: 68.5% Accuracy of ResNet : Adam on test images: 67.44% Accuracy of ResNet : Adagrad on test images: 64.76% Part 2: Implement a linear classifier for the [CIFAR-10](https://en.wikipedia.org/wiki/CIFAR-10) dataset. - Compare single linear layer vs. multiple linear layers- Compare inclusion of bias term vs. no bias term# # # # # # # build linear classifier with single layer using torch.nn as base # # # # # # # start = time.time() # time it # inherit neural network from PyTorch's torch.nn.Module class LinearClassifier(nn.Module): def __init__(self,): super().__init__() # to inherit init of nn.Module # input is 32 * 32 * 3 for cifar-10 and output is 10 classes self.layer1 = nn.Linear(32*32*3, 10, bias=True) # map to outputs with softmax self.sm = nn.Softmax(dim=1) # define the forward pass through the network def forward(self, x): x = x.reshape(-1, 3072) x = self.layer1(x) x = self.sm(x) return x # initialize the network network = LinearClassifier() # print some useful info print(f"Linear classifier parameters: {len(list(network.parameters()))}") print(f"Linear classifier architecture: \n{network}") # set criterion, optimizer, and epochs criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(network.parameters(), lr=0.001) epochs = 10 # loop over epochs and train the model for epoch in range(epochs): # initialize loss to zero for each epoch running_loss = 0.0 # loop over all training data batches for i, data in enumerate(trainloader, 0): # unpack tuple inputs, labels = data # zero out parameter gradients optimizer.zero_grad() # forward pass, backward pass, and optimization outputs = network(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics every 2000 batches running_loss += loss.item() if i % 2000 == 1999: print('Epoch: %d, Batch: %5d, Loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 # keep track of total predictions and correct predictions correct = 0 total = 0 # don't calculate gradients for outputs while analyzing overall performance with torch.no_grad(): # loop over all testing data batches and get the accuracy for data in testloader: # unpack tuple images, labels = data # calculate outputs by running images through the network outputs = network(images) # the class with the highest energy is the prediction _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy on test images: {(100 * correct / total)}%') end = time.time() hours = int((end - start) / 60 / 60) minutes = int(((end - start) / 60) - (hours * 60)) seconds = int((end - start) - (minutes * 60) - (hours * 60 * 60)) print(f"Runtime: {hours} h {minutes} m {seconds} s")Linear classifier parameters: 2 Linear classifier architecture: LinearClassifier( (layer1): Linear(in_features=3072, out_features=10, bias=True) (sm): Softmax(dim=1) ) Epoch: 1, Batch: 2000, Loss: 2.256 Epoch: 1, Batch: 4000, Loss: 2.194 Epoch: 1, Batch: 6000, Loss: 2.170 Epoch: 1, Batch: 8000, Loss: 2.152 Epoch: 1, Batch: 10000, Loss: 2.145 Epoch: 1, Batch: 12000, Loss: 2.134 Epoch: 2, Batch: 2000, Loss: 2.127 Epoch: 2, Batch: 4000, Loss: 2.122 Epoch: 2, Batch: 6000, Loss: 2.117 Epoch: 2, Batch: 8000, Loss: 2.114 Epoch: 2, Batch: 10000, Loss: 2.107 Epoch: 2, Batch: 12000, Loss: 2.106 Epoch: 3, Batch: 2000, Loss: 2.096 Epoch: 3, Batch: 4000, Loss: 2.100 Epoch: 3, Batch: 6000, Loss: 2.103 Epoch: 3, Batch: 8000, Loss: 2.097 Epoch: 3, Batch: 10000, Loss: 2.091 Epoch: 3, Batch: 12000, Loss: 2.100 Epoch: 4, Batch: 2000, Loss: 2.092 Epoch: 4, Batch: 4000, Loss: 2.085 Epoch: 4, Batch: 6000, Loss: 2.085 Epoch: 4, Batch: 8000, Loss: 2.086 Epoch: 4, Batch: 10000, Loss: 2.082[...]Results: for single layer linear classifier, softmax, 10 epochs, bias=FalseLoss: 2.250 -> 2.056 Runtime: 0 h 2 m (CPU) Accuracy on test images: 40.35% Results: for single layer linear classifier, softmax, 10 epochs, bias=TrueLoss: 2.248 -> 2.056 Runtime: 0 h 2 m (CPU) Accuracy on test images: 40.16%# # # # # # # build linear classifier with multiple layers using torch.nn as base # # # # # # # start = time.time() # time it # inherit neural network from PyTorch's torch.nn.Module class LinearClassifier(nn.Module): def __init__(self,): super().__init__() # to inherit init of nn.Module # input is 32 * 32 * 3 for cifar-10 and output is 10 classes self.layer1 = nn.Linear(32*32*3, 1024, bias=False) self.layer2 = nn.Linear(1024, 256, bias=False) self.layer3 = nn.Linear(256, 10, bias=False) # map to outputs with softmax self.sm = nn.Softmax(dim=1) # define the forward pass through the network def forward(self, x): x = x.reshape(-1, 3072) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.sm(x) return x # initialize the network network = LinearClassifier() # print some useful info print(f"Linear classifier parameters: {len(list(network.parameters()))}") print(f"Linear classifier architecture: \n{network}") # set criterion, optimizer, and epochs criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(network.parameters(), lr=0.001) epochs = 10 # loop over epochs and train the model for epoch in range(epochs): # initialize loss to zero for each epoch running_loss = 0.0 # loop over all training data batches for i, data in enumerate(trainloader, 0): # unpack tuple inputs, labels = data # zero out parameter gradients optimizer.zero_grad() # forward pass, backward pass, and optimization outputs = network(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics every 2000 batches running_loss += loss.item() if i % 2000 == 1999: print('Epoch: %d, Batch: %5d, Loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 # keep track of total predictions and correct predictions correct = 0 total = 0 # don't calculate gradients for outputs while analyzing overall performance with torch.no_grad(): # loop over all testing data batches and get the accuracy for data in testloader: # unpack tuple images, labels = data # calculate outputs by running images through the network outputs = network(images) # the class with the highest energy is the prediction _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy on test images: {(100 * correct / total)}%') end = time.time() hours = int((end - start) / 60 / 60) minutes = int(((end - start) / 60) - (hours * 60)) seconds = int((end - start) - (minutes * 60) - (hours * 60 * 60)) print(f"Runtime: {hours} h {minutes} m {seconds} s")Linear classifier parameters: 3 Linear classifier architecture: LinearClassifier( (layer1): Linear(in_features=3072, out_features=1024, bias=False) (layer2): Linear(in_features=1024, out_features=256, bias=False) (layer3): Linear(in_features=256, out_features=10, bias=False) (sm): Softmax(dim=1) ) Epoch: 1, Batch: 2000, Loss: 2.293 Epoch: 1, Batch: 4000, Loss: 2.271 Epoch: 1, Batch: 6000, Loss: 2.244 Epoch: 1, Batch: 8000, Loss: 2.222 Epoch: 1, Batch: 10000, Loss: 2.205 Epoch: 1, Batch: 12000, Loss: 2.191 Epoch: 2, Batch: 2000, Loss: 2.181 Epoch: 2, Batch: 4000, Loss: 2.172 Epoch: 2, Batch: 6000, Loss: 2.162 Epoch: 2, Batch: 8000, Loss: 2.159 Epoch: 2, Batch: 10000, Loss: 2.145 Epoch: 2, Batch: 12000, Loss: 2.142 Epoch: 3, Batch: 2000, Loss: 2.139 Epoch: 3, Batch: 4000, Loss: 2.124 Epoch: 3, Batch: 6000, Loss: 2.122 Epoch: 3, Batch: 8000, Loss: 2.125 Epoch: 3, Batch: 10000, Loss: 2.118 Epoch: 3, Batch: 12000, Loss: 2.117 Epoch: 4, Batch: 2000, Loss: 2.114 Epoch: 4[...]Results: for multiple layer linear classifier, softmax, 10 epochs, bias=FalseLoss: 2.295 -> 2.057 Runtime: 0 h 21 m (CPU) Accuracy on test images: 39.78% Results: for multiple layer linear classifier, softmax, 10 epochs, bias=TrueLoss: 2.293 -> 2.055 Runtime: 0 h 22 m (CPU) Accuracy on test images: 39.49%# # # # # # # build nearest neighbor classifier with L1 distance # # # # # # # # algorithm credit: 's CIS510 lecture slides start = time.time() # time it class NearestNeighbor: def __init__(self): pass def train(self, X, y): # X is N x D, where each row is an example. Y is 1-dimension x N, # where the nearest neighbor classifier simply remembers all the training data self.Xtr = X self.Ytr = y def predict(self, X): num_test = X.shape[0] # make sure output type matches input type Ypred = np.zeros(num_test, dtype = self.Ytr.dtype) # loop over all test rows for i in range(num_test): # find the nearest training image to the i'th test image # using the L1 distance (sum of absolute value differences) distances = np.sum(np.abs(self.Xtr - X[i,:]), axis=1) # get the index with the smallest distance min_index = np.argmin(distances) # predict the label of the nearest example Ypred[i] = self.Ytr[min_index] return Ypred # prepare training data Xtrain = [] ytrain = [] # loop over all training data batches for i, data in enumerate(trainloader, 0): # unpack tuple inputs, labels = data # convert to numpy array inputs = inputs.numpy() labels = labels.numpy() # reshape inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], inputs.shape[2], inputs.shape[3]).transpose(0,2,3,1).astype("float") # append to major lists Xtrain.append(inputs) ytrain.append(labels) # merge arrays Xtrain = np.concatenate(Xtrain) ytrain = np.concatenate(ytrain) # reshape X into rows Xtrain = Xtrain.reshape(Xtrain.shape[0], 32 * 32 * 3) # prepare testing data Xtest = [] ytest = [] # loop over all training data batches for i, data in enumerate(testloader, 0): # unpack tuple inputs, labels = data # convert to numpy array inputs = inputs.numpy() labels = labels.numpy() # reshape inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], inputs.shape[2], inputs.shape[3]).transpose(0,2,3,1).astype("float") # append to major lists Xtest.append(inputs) ytest.append(labels) # merge arrays Xtest = np.concatenate(Xtest) ytest = np.concatenate(ytest) # reshape X into rows Xtest = Xtest.reshape(Xtest.shape[0], 32 * 32 * 3) # initialize the model L1NNeighbor = NearestNeighbor() # train the model L1NNeighbor.train(Xtrain, ytrain) # test the model ypredicted = L1NNeighbor.predict(Xtest) # calculate the accuracy print(f'Accuracy on test images: {np.mean(ypredicted == ytest) * 100}%') end = time.time() hours = int((end - start) / 60 / 60) minutes = int(((end - start) / 60) - (hours * 60)) seconds = int((end - start) - (minutes * 60) - (hours * 60 * 60)) print(f"Runtime: {hours} h {minutes} m {seconds} s")Accuracy on test images: 38.59% Runtime: 3 h 30 m 43 sResults: for nearest neighbor classifier using L1 distance Runtime: 3 h 30 m (CPU)Accuracy on test images: 38.59%# # # # # # # build nearest neighbor classifier with L2 distance # # # # # # # start = time.time() # time it class NearestNeighbor: def __init__(self): pass def train(self, X, y): # X is N x D, where each row is an example. Y is 1-dimension x N, # where the nearest neighbor classifier simply remembers all the training data self.Xtr = X self.Ytr = y def predict(self, X): num_test = X.shape[0] # make sure output type matches input type Ypred = np.zeros(num_test, dtype = self.Ytr.dtype) # loop over all test rows for i in range(num_test): # find the nearest training image to the i'th test image # using the L2 distance distances = np.sqrt(np.sum(np.square(self.Xtr - X[i,:]), axis=1)) # get the index with the smallest distance min_index = np.argmin(distances) # predict the label of the nearest example Ypred[i] = self.Ytr[min_index] return Ypred # prepare training data Xtrain = [] ytrain = [] # loop over all training data batches for i, data in enumerate(trainloader, 0): # unpack tuple inputs, labels = data # convert to numpy array inputs = inputs.numpy() labels = labels.numpy() # reshape inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], inputs.shape[2], inputs.shape[3]).transpose(0,2,3,1).astype("float") # append to major lists Xtrain.append(inputs) ytrain.append(labels) # merge arrays Xtrain = np.concatenate(Xtrain) ytrain = np.concatenate(ytrain) # reshape X into rows Xtrain = Xtrain.reshape(Xtrain.shape[0], 32 * 32 * 3) # prepare testing data Xtest = [] ytest = [] # loop over all training data batches for i, data in enumerate(testloader, 0): # unpack tuple inputs, labels = data # convert to numpy array inputs = inputs.numpy() labels = labels.numpy() # reshape inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], inputs.shape[2], inputs.shape[3]).transpose(0,2,3,1).astype("float") # append to major lists Xtest.append(inputs) ytest.append(labels) # merge arrays Xtest = np.concatenate(Xtest) ytest = np.concatenate(ytest) # reshape X into rows Xtest = Xtest.reshape(Xtest.shape[0], 32 * 32 * 3) # initialize the model L2NNeighbor = NearestNeighbor() # train the model L2NNeighbor.train(Xtrain, ytrain) # test the model ypredicted = L2NNeighbor.predict(Xtest) # calculate the accuracy print(f'Accuracy on test images: {np.mean(ypredicted == ytest) * 100}%') end = time.time() hours = int((end - start) / 60 / 60) minutes = int(((end - start) / 60) - (hours * 60)) seconds = int((end - start) - (minutes * 60) - (hours * 60 * 60)) print(f"Runtime: {hours} h {minutes} m {seconds} s")Accuracy on test images: 35.39% Runtime: 3 h 18 m 51 sUsing the Prolog engineProbLog contains a builtin Prolog-like engine that can be also be used as a standalone Prolog.Unlike regular Prolog systems, each grounding of a query will occur only once as a result, even if their are multiple proofs.The engine support cyclic programs.from problog.engine import DefaultEngine from problog.logic import *First, we initialize a Prolog model.We can load it from a string.from problog.program import PrologString pl = PrologString(""" mother_child(trude, sally). father_child(tom, sally). father_child(tom, erica). father_child(mike, tom). sibling(X, Y) :- parent_child(Z, X), parent_child(Z, Y). parent_child(X, Y) :- father_child(X, Y). parent_child(X, Y) :- mother_child(X, Y). """)or we can construct it in Pythonfrom problog.program import SimpleProgram # Define the language of terms mother_child = Term('mother_child') father_child = Term('father_child') sibling = Term('sibling') parent_child = Term('parent_child') X, Y, Z = map(Var, 'XYZ') trude, sally, tom, erica, mike = map(Term, ['trude', 'sally', 'tom', 'erica', 'mike']) # Define the program pl = SimpleProgram() pl += mother_child(trude, sally) pl += father_child(tom, sally) pl += father_child(tom, erica) pl += father_child(mike, tom) pl += sibling(X, Y) << (parent_child(Z, X) & parent_child(Z, Y)) pl += parent_child(X, Y) << father_child(X, Y) pl += parent_child(X, Y) << mother_child(X, Y)Next we initialize the engine, and we prepare the model for querying.The second step is optional but recommended when we want to query the same model multiple times.engine = DefaultEngine() db = engine.prepare(pl)We can now query the model.The engine only supports queries that consist of a single Term.The result of ``query`` is a list of tuples representing the arguments of the query term.If the query fails, this list is empty.query_term = sibling(tom, sally) res = engine.query(db, query_term) print ('%s? %s' % (query_term, bool(res))) query_term = sibling(sally, erica) res = engine.query(db, query_term) print ('%s? %s' % (query_term, bool(res)))sibling(tom,sally)? False sibling(sally,erica)? TrueVariables in the query should be replaced by ``None`` or a negative number.query_term = sibling(None, None) res = engine.query(db, query_term) for args in res: print query_term(*args) query_term = Term('sibling', Term('sally'), None) res = engine.query(db, query_term) for args in res: print query_term(*args) query_term = Term('sibling', -1, -1) res = engine.query(db, query_term) for args in res: print query_term(*args) print engine.ground_all(db, queries=[query_term])Queries : * sibling(sally,sally) : 0 * sibling(erica,erica) : 0 * sibling(tom,tom) : 0Easy Leetcode Practice Questions Leetcode 1Variation of Two Sum problemProblem: Return True if two elements in the array sum to val. Otherwise return False Method oneApproach: iterate through list and add to every other element in the list Time complexity: O(N^2) nested for loopSpace complexity: O(1)def two_sum(arr, val): """ Return True if there are two elements in the array that sum to val. Otherwise return False Brute force method: iterate through list and add to every other element in the list Time complexity: O(N^2) nested for loop Space complexity: O(1) """ for idx1, n1 in enumerate(arr): for n2 in arr[idx1+1:]: if n1 + n2 == val: return True return FalseMethod twoApproach: Create lookup table using set. Need to delete the current num from setTime complexity: O(N) for loopSpace complexity: O(N)def two_sum(arr, val): """ Return True if there are two elements in the array that sum to val. Otherwise return False Using set, need to delete the current num from set Time complexity: O(N) for loop Space complexity: O(N) """ unique_val = set(arr) for num in arr: val2 = val - num if val2 in (unique_val - {num}): return True return FalseMethod threeApproach: Create lookup table using dictionary.Time complexity: O(N) for loopSpace complexity: O(N)def two_sum(arr, val): """ Return True if there are two elements in the array that sum to val. Otherwise return False Using dict Time complexity: O(N) for loop Space complexity: O(N) """ dictionary = {} for idx, el in enumerate(arr): val2 = val - el if val2 in dictionary: return True else: dictionary[el] = idx return False arr = [1, 4, 6, 9, 2, 11] val = 13 two_sum(arr,val) arr = [1, 4, 6, 9, 2, 11] val = 18 two_sum(arr,val)Leetcode 7 Problem: Given a 32-bit signed integer, reverse digits of an integer.Input: integerOutput: integer Method oneApproach: * Convert numeric to string, reverse string, then convert back to numericTime complexity: O(N)Space complexity: O(1)def reverse(x): """ :type x: int :rtype: int """ if x == 0: return(0) elif x > 0: x = str(x) x = x[::-1] return(int(x) if int(x) < pow(2,31) else 0) else: x = str(x) x = '-' + x[:0:-1] return(int(x) if int(x) > -pow(2,31) else 0) reverse(123) reverse(-123)Leetcode 8 Problem: Convert string to integerInput: stringOutput: IntegerEdge cases: If num > 232, return 231 Method OneApproach: no regexdef myAtoi(str): """ :type str: str :rtype: int """ s = "42" myAtoi(s) s = "-42" strip = s.strip() {c for c in strip if c.isnumeric()} s = "-42" myAtoi(s) s = "4193 with words" myAtoi(s) s = "words and 987" myAtoi(s) s = "-91283472332" myAtoi(s)Leetcode 13 Problem: Convert Roman numerals to integerInput: StringOutput: Integer Method oneApproach:* Convert string character to integer using dictionary* Add integers if larger numeral followed by smaller numerals: VIII would be 10 - 2 = 8* Subtract integers of smaller numeral followed by larger numeral: IV would be 5 - 1 = 4Time complexity: O(N)* for loopSpace complexity: O(1)def romanToInt(s): """ :type s: str :rtype: int """ roman_dict = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000} prev, total = 0, 0 for c in s: curr = roman_dict[c] if prev < curr: total -= prev total += curr - prev else: total += curr prev = curr return(total) romanToInt('III') romanToInt('IV') romanToInt('LVIII') romanToInt('MCMXCIV')Leetcode 21 Problem: Merge two sorted linked lists and return it as a new sorted list. The new list should be made by splicing together the nodes of the first two lists.Example:Input: * List one: 1->2->4* List two: 1->3->4Output: 1->1->2->3->4->4# Definition for singly-linked list. # class Node(object): # def __init__(self, val=0, next=None): # self.val = val # self.next = next # l1 = Node(1) # l1b = Node(2) # l1c = Node(4) # l1.next = l1b # l1b.next = l1c # l2 = Node(1) # l2b = Node(3) # l2c = Node(4) # l2.next = l1b # l2b.next = l1c # Definition for singly-linked list. # class Node(object): # def __init__(self, val=0, next=None): # self.val = val # self.next = next # # Create a Linked List from array # class LinkedList(object): # def __init__(self, sequence): # self.head = Node(sequence[0]) # current = self.head # for item in sequence[1:]: # current.next = Node(item) # current = current.next # l1 = [1,2,4] # l2 = [1,3,4]Method one Approach:* Keep track of output answer separate from the current node being iterated by while loop.* Select list that has node with lesser value as the next value, then point that list to the next node* Iterate through nodesTime Complexity: O(N) - while loopSpace Complexity: O(N)class ListNode(object): def __init__(self, val=0, next=None): self.val = val self.next = next def mergeTwoLists(l1: ListNode, l2: ListNode): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ current_node = ListNode(0) answer = current_node while l1 and l2: # while linked lists are not null if l1.val <= l2.val: current_node.next = l1 l1 = l1.next else: current_node.next = l2 l2 = l2.next current_node = current_node.next # Point to whichever node is not empty to get the last element of merged linked list current_node.next = l1 or l2 return answer.next # head of answer = 0 in the first iteration from creating the node l1 = [1,2,4] l2 = [1,3,4] mergeTwoLists(l1,l2)Method two Approach:* Recursion to find next nodeTime complexity: Space complexity:def mergeTwoLists(l1, l2): if not l1: return l2 if not l2: return l1 if l1.val > l2.val: answer = ListNode(l2.val) answer.next = self.mergeTwoLists(l1, l2.next) else: answer = ListNode(l1.val) answer.next = self.mergeTwoLists(l1.next, l2) return answerLeetcode 26 Problem: Return length of unique values. Don't have to remove duplicate elements from nums.Data: List of integersOutput: Number of unique values (integer) Method oneApproach:* If a number (i) if the same as previous number (i-1), remove it. * Return the length of the list, which gives the number of unique valuesExpected output:* One duplicate number, so final length should be len(nums)-1 = 2Time complexity:* O(k*N) due to pop duplicate at k location and for loop of length N.Space complexity: * O(1)def removeDuplicates(nums): """ :type nums: List[int] :rtype: int """ if len(nums) == 0: return 0 for i in reversed(range(1,len(nums))): if nums[i] == nums[i-1]: nums.pop(i) return len(nums) nums = [1, 1, 2] removeDuplicates(nums)Method twoApproach:* Find unique values using `set`, which returns a dictionary of unique values.* Convert dictionary into string to get list of unique values* Return length of list to get number of unique valuesExpected output:* One duplicate number, so final length should be len(nums)-1 = 2Time complexity:* O(N) for set?Space complexity: * O(1)def removeDuplicates(nums): """ :type nums: List[int] :rtype: int """ nums[:] = list(set(nums)) return len(nums) nums = [1, 1, 2] removeDuplicates(nums)This method doesn't work for this example however since it returns the list with the negative as the highest value. Need to sort the output array.Time complexity: * O(N2logN) for sorted (NlogN) and set (N)?def removeDuplicates(nums): """ :type nums: List[int] :rtype: int """ nums[:] = sorted(list(set(nums))) return len(nums) nums = [-1,0,0,0,0,3,3] removeDuplicates(nums)Leetcode 122 Problem: Calculate max profit given price per day. Can only purchase and sell one stock at a time.Input: List of IntegersOutput: Maximum profit (integer) Method oneApproach:* Calculate differences between stock prices each day. If prices increase, add the difference between the two prices.* Return the summed profitsExpected output: 7* 1-7 = no profit* 5-1 = 4* 3-5 = no profit* 6-3 = 3* 4-6 = no profitTime complexity: * O(N)Space complexity:* O(1)def maxProfit(prices): """ :type prices: List[int] :rtype: int """ maxprofit = 0 for i in range(1,len(prices)): if prices[i] > prices [i-1]: maxprofit += prices[i] - prices[i-1] return(maxprofit) prices = [7,1,5,3,6,4] maxProfit(prices)Leetcode 125 - Valid Palindrome Problem: Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases. Empty string counts as a valid palindrome. Method OneApproach: 1. join characters together if character is alphanumeric2. Compare original string and reversed stringTime complexity: O(N) for list comprehensionSpace complexity: O(N) to join all characters in original stringdef isPalindrome(s): """ :type s: str :rtype: bool """ s = ('').join([c.lower() for c in s if c.isalnum()]) r = ('').join(reversed(s)) if (s == r): return True return False s = "A man, a plan, a canal: Panama" isPalindrome(s) s = "race a car" isPalindrome(s)Leetcode 136 Problem: Given a non-empty array of integers, every element appears twice except for one. Find that single one.Input: List of integersOutput: integer Method OneIterate through list and only append integers that do not have a duplicate value.Time complexity: O(N2)* Append/remove within a for loopSpace complexity: O(N)def singleNumber(nums): """ :type nums: List[int] :rtype: int """ not_duplicate = [] for i in nums: if i not in not_duplicate: not_duplicate.append(i) else: not_duplicate.remove(i) return not_duplicate.pop()Method twoUse dictionary instead of list to count number of times an integer appears.Time complexity = O(N)* for loopSpace complexity = O(N)* dictionary is length Ndef singleNumber(nums): """ :type nums: List[int] :rtype: int """ from collections import defaultdict hash_table = defaultdict(int) for i in nums: hash_table[i] += 1 for i in hash_table: if hash_table[i] == 1: return i from collections import defaultdict hash_table = defaultdict(int) hash_table[3] nums = [4,1,2,1,2] singleNumber(nums)Method threeCount number of times an integer appears, return if count == 1.Time complexity = O(N)* for loopSpace complexity = O(N)* Size of numsdef singleNumber(nums): """ :type nums: List[int] :rtype: int """ for i in nums: if nums.count(i)==1: return iMethod four2*unique values - (list of integers) = integer without duplicate valueTime complexity = O(N)* sumSpace complexity = O(N)* Size of numsdef singleNumber(nums): """ :type nums: List[int] :rtype: int """ return(2*sum(set(nums))-sum(nums))Examplesnums = [2,2,1] singleNumber(nums) nums = [4,1,2,1,2] singleNumber(nums)Leetcode 189 Problem: Rotate an array by k steps. For example, [1,2,3,4] rotated by k=1 is [4,1,2,3]Input: List of integersOutput: List of integers Method oneApproach: * Store previous value in temporary variable* Replace i value with previous value, and store value in temporary variable ([7,2,3,4,5,6,7], previous = [1])* Go to the next index and replace the value with the previous value, until entire array has been rotated ([7,1,2,3,4,5,6])* Repeat this k timesExpected Output: * Original list [1,2,3,4,5,6,7]* rotate once: [7,1,2,3,4,5,6]* rotate twice: [6,7,1,2,3,4,5]* rotate three times: [5,6,7,1,2,3,4]Time complexity:* O(k*N)Space complexity:* O(1)def rotate(nums, k): """ :type nums: List[int] :type k: int :rtype: None Do not return anything, modify nums in-place instead. """ for i in range(k): previous = nums[-1] for j in range(len(nums)): nums[j], previous = previous, nums[j] return(nums) nums = [1,2,3,4,5,6,7] k = 3 rotate(nums,k)Method twoApproach:* Create a new array of length N* Write each element of new array based on k rotationsTime Complexity:* O(N) to iterate for i in rangeSpace complexity:* O(1)def rotate(nums,k): """ :type nums: List[int] :type k: int :rtype: None Do not return anything, modify nums in-place instead. """ new_array = [0]*len(nums) for i in range(len(nums)): new_array[(i+k) % len(nums)] = nums[i] nums[:] = new_array return(nums) nums = [1,2,3,4,5,6,7] k = 3 rotate(nums,k)Method three *Approach:* Rewrite i value as i+k value* Store previous value in a temporary variable* Replace values k steps away, one at a time, while storing previous value in temporary variable.Time complexity: O(N)Space complexity: O(1)def rotate(nums: List[int], k: int) -> None: n = len(nums) k %= n start = count = 0 while count < n: current, prev = start, nums[start] while True: next_idx = (current + k) % n nums[next_idx], prev = prev, nums[next_idx] current = next_idx count += 1 if start == current: break start += 1Method 4 *Approach:* Reverse entire array* Reverse the first k elements of the arrayTime complexity: O(N)Space complexity: O(1)class Solution: def reverse(self, nums: list, start: int, end: int) -> None: while start < end: nums[start], nums[end] = nums[end], nums[start] start, end = start + 1, end - 1 def rotate(self, nums: List[int], k: int) -> None: n = len(nums) k %= n self.reverse(nums, 0, n - 1) self.reverse(nums, 0, k - 1) self.reverse(nums, k, n - 1)Leetcode 217 Problem: Return True if array contains a duplicate. Return False if array has all unique values.Data: Array of integersOutput: Boolean Method oneApproach:* Remove duplicate elements* If len(final) == len(initial), return FalseExpected output:* There is a duplicate value, so function should return Truedef containsDuplicate(nums): """ :type nums: List[int] :rtype: bool """ sort_nums = sorted(nums) for i in reversed(range(1,len(nums))): if sort_nums[i] == sort_nums[i-1]: sort_nums.pop(i) if len(nums) == len(sort_nums): return (False) else: return(True) nums = [1,2,3,1] containsDuplicate(nums)Method twoApproach:* Use set to find unique values* Return True if len(nums) != len(unique values)Expected output:* There is a duplicate value, so function should return Truedef containsDuplicate(nums): """ :type nums: List[int] :rtype: bool """ return len(nums) != len(set(nums))Leetcode 242 Problem: Given two strings s and t , write a function to determine if t is an anagram of s.Input: stringOutput: stringdef isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ if len(s) != len(t): return False counter = {} for letter in s: if letter not in counter: counter[letter] = 1 else: counter[letter] += 1 for letter in t: if letter not in counter: return False else: counter[letter] -= 1 if counter[letter] < 0: return False return TrueLeetcode 350 Problem: Given two arrays, write a function to compute their intersection.Input: Two List of integersOutput: List of integersdef intersect(nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ # Sort lists nums1 = sorted(nums1) nums2 = sorted(nums2) # Order by list size if len(nums2) < len(nums1): nums1, nums2 = nums2, nums1 intersection = [] i = j = 0 # Compute intersection while i < len(nums1) and j < len(nums2): if nums1[i] == nums2[j]: intersection.append(nums1[i]) i += 1 j += 1 elif nums1[i] < nums2[j]: i += 1 else: # nums1[i] > nums2[j] j += 1 return(intersection) nums1 = [1,2,2,1] nums2 = [2,2] intersect(nums1, nums2) nums1 = [4,9,5] nums2 = [9,4,9,8,4] intersect(nums1, nums2)Leetcode 1207 Problem: Given an array of integers arr, write a function that returns true if and only if the number of occurrences of each value in the array is unique.Data: Array of integersOutput: Boolean Method oneApproach:* Create dictionary of counts for each unique value* Sort values* Remove duplicate elements* If lengths of arrays are the same (i.e., all unique values had a unique number of occurances), return TrueTime Complexity: Total computation time of `unique_num`: 1 + (N\*1) + N\*log(N) + (N\*1\*k) + 2\*N* create empty list = constant time* for loop using set() = N* append() = constant time * Total computational time of for loop = N*1 = N* sorted() = N*log(N)* for loop using len() = N* for loop using conditional statement = constant time* pop() = k * Total computational time of for loop = "k times N"* len() = constant timeThe biggest time constraint is the `sorted` function (NlogN). How would you make this function more efficient?* To improve the function, we can improve sorting using `set` or using `dict` approach instead of `sorted`.def unique_num(nums): counts=[] # constant time (= 1) for i in set(nums): # N, total time for this loop is N * 1 counts.append(nums.count(i)) # constant time (= 1) counts_sorted = sorted(counts) # N log N for i in range(len(counts)-1): # N if counts_sorted[i] == counts_sorted[i+1]: # constant time counts_sorted.pop(i) # k, depends on where the element are "popped" . If it's the last element, then it will be N return(len(counts)==len(counts_sorted)) # 2*constant time arr = [1,2,2,1,1,3] unique_num(arr) arr = [1,2] unique_num(arr) arr = [-3,0,1,-3,1,1,1,-3,10,0] unique_num(arr)Creating your own dataset from Google Images*by: and . Inspired by [](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)*!pip install fastai #!pip install -upgrade pip #!pip install -q fastai —upgrade pipRequirement already satisfied: fastai in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (1.0.57) Requirement already satisfied: bottleneck in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (1.2.1) Requirement already satisfied: spacy>=2.0.18 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (2.1.8) Requirement already satisfied: numexpr in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (2.6.5) Requirement already satisfied: packaging in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (17.1) Requirement already satisfied: numpy>=1.15 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (1.17.2) Requirement already satisfied: nvidia-ml-py3 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from fastai) (7.352.0) Requirement already satisfied: pyyaml in /home/ec2-user/anaconda3/envs/python3/li[...]In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats).from fastai.vision import *Get a list of URLs Search and scroll Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do.Scroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700.It is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, "canis lupus lupus", it might be a good idea to exclude other variants: "canis lupus lupus" -dog -arctos -familiaris -baileyi -occidentalisYou can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown. Download into file Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.Press CtrlShiftJ in Windows/Linux and CmdOptJ in Mac, and a small window the javascript 'Console' will appear. That is where you will paste the JavaScript commands.You will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands:```javascripturls = Array.from(document.querySelectorAll('.rg_di .rg_meta')).map(el=>JSON.parse(el.textContent).ou);window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n')));``` Create directory and upload urls file into your server Choose an appropriate name for your labeled images. You can run these steps multiple times to create different labels.folder = 'mountainbikes' file = 'urls_mountainbikes.csv' folder = 'racingcycles' file = 'urls_racingcycles.csv'You will need to run this cell once per each category.path = Path('data/bikes') dest = path/folder dest.mkdir(parents=True, exist_ok=True) path.ls()Finally, upload your urls file. You just need to press 'Upload' in your working directory and select your file, then click 'Upload' for each of the displayed files.![uploaded file](images/download_images/upload.png) Download images Now you will need to download your images from their respective urls.fast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.Let's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.You will need to run this line once for every category.file path folder classes = ['mountainbikes','racingcycles'] download_images(path/file, dest, max_pics=200) # If you have problems download, try with `max_workers=0` to see exceptions: download_images(path/file, dest, max_pics=20, max_workers=0)Then we can remove any images that can't be opened:for c in classes: print(c) verify_images(path/c, delete=True, max_size=500)View data#np.random.seed(42) #data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, # ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) # If you already cleaned your data, run this cell instead of the one before np.random.seed(42) data = ImageDataBunch.from_csv(path, folder=".", valid_pct=0.2, csv_labels='cleaned.csv', ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)Good! Let's take a look at some of our pictures then.data.classes data.show_batch(rows=3, figsize=(7,8)) data.classes, data.c, len(data.train_ds), len(data.valid_ds)Train modellearn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.lr_find() # If the plot is not showing try to give a start and end learning rate # learn.lr_find(start_lr=1e-5, end_lr=1e-1) learn.recorder.plot() learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4)) learn.save('stage-2')Interpretationlearn.load('stage-2'); interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix()Cleaning UpSome of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be.Using the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong.from fastai.widgets import *First we need to get the file paths from our top_losses. We can do this with `.from_toplosses`. We then feed the top losses indexes and corresponding dataset to `ImageCleaner`.Notice that the widget will not delete images directly from disk but it will create a new csv file `cleaned.csv` from where you can create a new ImageDataBunch with the corrected labels to continue training your model. In order to clean the entire set of images, we need to create a new dataset without the split. The video lecture demostrated the use of the `ds_type` param which no longer has any effect. See [the thread](https://forums.fast.ai/t/duplicate-widget/30975/10) for more details.db = (ImageList.from_folder(path) .split_none() .label_from_folder() .transform(get_transforms(), size=224) .databunch() ) # If you already cleaned your data using indexes from `from_toplosses`, # run this cell instead of the one before to proceed with removing duplicates. # Otherwise all the results of the previous step would be overwritten by # the new run of `ImageCleaner`. db = (ImageList.from_csv(path, 'cleaned.csv', folder='.') .split_none() .label_from_df() .transform(get_transforms(), size=224) .databunch() )Then we create a new learner to use our new databunch with all the images.learn_cln = cnn_learner(db, models.resnet34, metrics=error_rate) learn_cln.load('stage-2'); ds, idxs = DatasetFormatter().from_toplosses(learn_cln)Make sure you're running this notebook in Jupyter Notebook, not Jupyter Lab. That is accessible via [/tree](/tree), not [/lab](/lab). Running the `ImageCleaner` widget in Jupyter Lab is [not currently supported](https://github.com/fastai/fastai/issues/1539).# Don't run this in google colab or any other instances running jupyter lab. # If you do run this on Jupyter Lab, you need to restart your runtime and # runtime state including all local variables will be lost. ImageCleaner(ds, idxs, path)If the code above does not show any GUI(contains images and buttons) rendered by widgets but only text output, that may caused by the configuration problem of ipywidgets. Try the solution in this [link](https://github.com/fastai/fastai/issues/1539issuecomment-505999861) to solve it. Flag photos for deletion by clicking 'Delete'. Then click 'Next Batch' to delete flagged photos and keep the rest in that row. `ImageCleaner` will show you a new row of images until there are no more to show. In this case, the widget will show you images until there are none left from `top_losses.ImageCleaner(ds, idxs)` You can also find duplicates in your dataset and delete them! To do this, you need to run `.from_similars` to get the potential duplicates' ids and then run `ImageCleaner` with `duplicates=True`. The API works in a similar way as with misclassified images: just choose the ones you want to delete and click 'Next Batch' until there are no more images left. Make sure to recreate the databunch and `learn_cln` from the `cleaned.csv` file. Otherwise the file would be overwritten from scratch, losing all the results from cleaning the data from toplosses.ds, idxs = DatasetFormatter().from_similars(learn_cln) ImageCleaner(ds, idxs, path, duplicates=True) ??ImageCleanerRemember to recreate your ImageDataBunch from your `cleaned.csv` to include the changes you made in your data! Putting your model in production First thing first, let's export the content of our `Learner` object for production:learn.export()This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used). You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so:defaults.device = torch.device('cpu') img = open_image(path/'mountainbikes'/'00000021.jpg') imgWe create our `Learner` in production enviromnent like this, jsut make sure that `path` contains the file 'export.pkl' from before.learn = load_learner(path) pred_class,pred_idx,outputs = learn.predict(img) pred_classSo you might create a route something like this ([thanks](https://github.com/simonw/cougar-or-not) to for the structure of this code):```python@app.route("/classify-url", methods=["GET"])async def classify_url(request): bytes = await get_bytes(request.query_params["url"]) img = open_image(BytesIO(bytes)) _,_,losses = learner.predict(img) return JSONResponse({ "predictions": sorted( zip(cat_learner.data.classes, map(float, losses)), key=lambda p: p[1], reverse=True ) })```(This example is for the [Starlette](https://www.starlette.io/) web app toolkit.) Things that can go wrong - Most of the time things will train fine with the defaults- There's not much you really need to tune (despite what you've heard!)- Most likely are - Learning rate - Number of epochs Learning rate (LR) too highlearn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(1, max_lr=0.5)Learning rate (LR) too lowlearn = cnn_learner(data, models.resnet34, metrics=error_rate)Previously we had this result:```Total time: 00:57epoch train_loss valid_loss error_rate1 1.030236 0.179226 0.028369 (00:14)2 0.561508 0.055464 0.014184 (00:13)3 0.396103 0.053801 0.014184 (00:13)4 0.316883 0.050197 0.021277 (00:15)```learn.fit_one_cycle(5, max_lr=1e-5) learn.recorder.plot_losses()As well as taking a really long time, it's getting too many looks at each image, so may overfit. Too few epochslearn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False) learn.fit_one_cycle(1)Too many epochsnp.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.9, bs=32, ds_tfms=get_transforms(do_flip=False, max_rotate=0, max_zoom=1, max_lighting=0, max_warp=0 ),size=224, num_workers=4).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet50, metrics=error_rate, ps=0, wd=0) learn.unfreeze() learn.fit_one_cycle(40, slice(1e-6,1e-4))Titanic - Machine Learning from DisasterEste notebook busca resolver o problema do [Titanic - Machine Learning from Disaster](https://www.kaggle.com/c/titanic) proposto pela plataforma Kaggle. DadosA plataforma disponibiliza três arquivos:- `train.csv` - Dataframe para treino.- `test.csv` - Dataframe para as predições.- `gender_submission.csv` - Um exemplo de como deve ser o dataframe para submissão.O modelo de classificação utilizado é o `Random Forest`. Para a aplicação foi utilizado a biblioteca sklearn, pra mais informações clique [aqui](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html).import pandas as pd import numpy as np train_df = pd.read_csv('./train.csv')Ao abrir o arquivo `train.csv` nos deparamos com as seguintes colunas:- `PassengerID` - O ID do passageiro a bordo.- `Survived` - Informações se o passageiro sobreviveu `1` ou não `0`.- `Pclass` - Classe do passageiro `1` - primeira `2` - segunda e `3` - terceira.- `Name` - O nome do passageiro.- `Sex` - O sexo do passageiro.- `SibSp` - Número de esposas/irmãos do passageiro a bordo.- `Age` - A idade do passageiro.- `Parch` - Número de pais/filhos do passageiro a bordo.- `Ticket` - Código do Ticket.- `Fare` - Quanto o passageiro pagou na passagem.- `Cabin` - Número da cabine.- `Embarked` - Ponto de embarcação (C - Cherbourg, Q - Queenstown, S - Southampton).train_df.head(100)Temos 12 features e 891 linhas.print(train_df.shape)(891, 12)Vamos analisar as dimensões Sexo e Idade:import matplotlib.pyplot as plt plt.figure(figsize=(9,6)) # GRÁFICO DE DISPERSÃO DOS QUE SOBREVIVERAM people_who_survived = train_df.loc[train_df['Survived'] == 1] people_who_survived_men = people_who_survived[people_who_survived['Sex'] == 'male'] people_who_survived_women = people_who_survived[people_who_survived['Sex'] == 'female'] plt.scatter(people_who_survived_men['PassengerId'], people_who_survived_men['Age'], color="#4444aa") plt.scatter(people_who_survived_women['PassengerId'], people_who_survived_women['Age'], color="cyan") plt.yticks(np.arange(np.floor(train_df['Age'].min()), np.ceil(train_df['Age'].max() + 1), 20)) plt.title("Passageiros Sobreviventes") plt.rcParams["font.family"] = "Verdana" plt.rcParams["font.size"] = 12 plt.xlabel("ID do passageiro") plt.legend(["Homem", "Mulher"], loc=2) plt.ylabel("Idade") plt.show() plt.close() # GRÁFICO DE DISPERSÃO DOS QUE NÃO SOBREVIVERAM plt.figure(figsize=(9,6)) # substituindo as idades people_who_not_survived = train_df.loc[train_df['Survived'] == 0] people_who_not_survived_men = people_who_not_survived[people_who_not_survived['Sex'] == 'male'] people_who_not_survived_women = people_who_not_survived[people_who_not_survived['Sex'] == 'female'] plt.scatter(people_who_not_survived_men['PassengerId'], people_who_not_survived_men['Age'], color="#4444aa") plt.scatter(people_who_not_survived_women['PassengerId'], people_who_not_survived_women['Age'], color="cyan") plt.yticks(np.arange(np.floor(train_df['Age'].min()), np.ceil(train_df['Age'].max() + 1), 20)) plt.title("Passageiros que não sobreviveram") plt.rcParams["font.family"] = "Verdana" plt.rcParams["font.size"] = 12 plt.xlabel("ID do passageiro") plt.legend(["Homem", "Mulher"], loc=9) plt.ylabel("Idade") plt.show() plt.close() percentage_who_survived_men = people_who_survived_men['Survived'].shape[0]/train_df[train_df['Sex'] == "male"].shape[0] percentage_who_not_survived_men = people_who_not_survived_men['Survived'].shape[0]/train_df[train_df['Sex'] == "male"].shape[0] percentage_who_survived_women = people_who_survived_women['Survived'].shape[0]/train_df[train_df['Sex'] == "female"].shape[0] percentage_who_not_survived_women = people_who_not_survived_women['Survived'].shape[0]/train_df[train_df['Sex'] == "female"].shape[0] print("Porcentagem de homens que sobreviveram: {:.2f}%".format(percentage_who_survived_men*100)) print("Porcentagem de homens que não sobreviveram: {:.2f}%".format(percentage_who_not_survived_men*100)) print("\n") print("Porcentagem de mulheres que sobreviveram: {:.2f}%".format(percentage_who_survived_women*100)) print("Porcentagem de mulheres que não sobreviveram: {:.2f}%".format(percentage_who_not_survived_women*100))Porcentagem de homens que sobreviveram: 18.89% Porcentagem de homens que não sobreviveram: 81.11% Porcentagem de mulheres que sobreviveram: 74.20% Porcentagem de mulheres que não sobreviveram: 25.80%Com base nos dados acima homens têm uma chance muito maior de morrer que mulheres, faixa etária dos não sobreviventes, no entanto, parece não estar muito bem definida. Conclui-se o sexo pode estar ligado com a sobrevivência do passageiro, porém a idade não. Número de esposas/irmãos do passageiro a bordo (SibSp)grouped_by_sibsp_survived = people_who_survived.loc[:, ['Survived', 'SibSp']].groupby('SibSp') grouped_by_sibsp_not_survived = people_who_not_survived.loc[:, ['Survived', 'SibSp']].groupby('SibSp') count_survived_by_sibsp = grouped_by_sibsp_survived.count() count_survived_by_sibsp.columns = ['Count Survived'] count_not_survived_by_sibsp = grouped_by_sibsp_not_survived.count() count_not_survived_by_sibsp.columns = ['Count Not Survived'] count_survived_by_sibsp count_not_survived_by_sibsp check_count = count_not_survived_by_sibsp.sum()[0] + count_survived_by_sibsp.sum()[0] check_count plt.figure(figsize=(9,6)) ax = plt.axes() X = [count_survived_by_sibsp.index, count_not_survived_by_sibsp.index] y = [count_survived_by_sibsp.values, count_not_survived_by_sibsp.values] ax.plot(X[0], y[0],color="#4444aa") ax.plot(X[1], y[1], color="cyan") plt.xticks(np.arange(0,9,1)) plt.xlabel plt.legend(['Sobreviventes', 'Não Sobreviventes']) plt.title("SibSp x Quantidade de pessoas") plt.xlabel("SibSp") plt.ylabel("Qtde de Pessoas") plt.show() plt.close()Parchgrouped_by_parch_survived = people_who_survived.loc[:, ['Survived', 'Parch']].groupby('Parch') grouped_by_parch_not_survived = people_who_not_survived.loc[:, ['Survived', 'Parch']].groupby('Parch') count_survived_by_parch = grouped_by_sibsp_survived.count() count_survived_by_parch.columns = ['Quantidade que sobreviveram'] count_not_survived_by_parch = grouped_by_sibsp_not_survived.count() count_not_survived_by_parch.columns = ['Quantidade que não sobreviveram'] count_survived_by_parch count_not_survived_by_parch check_count = count_not_survived_by_parch.sum()[0] + count_survived_by_parch.sum()[0] check_count plt.figure(figsize=(9,6)) ax = plt.axes() X = [count_survived_by_parch.index, count_not_survived_by_parch.index] y = [count_survived_by_parch.values, count_not_survived_by_parch.values] ax.plot(X[0], y[0],color="#4444aa") ax.plot(X[1], y[1], color="cyan") plt.xticks(np.arange(0,9,1)) plt.xlabel plt.legend(['Sobreviventes', 'Não Sobreviventes']) plt.title("Parch x Quantidade de pessoas") plt.xlabel("SibSp") plt.ylabel("Qtde de Pessoas") plt.show() plt.close()Pclassgrouped_by_pclass_survived = people_who_survived.loc[:, ['Survived', 'Pclass']].groupby('Pclass') grouped_by_pclass_not_survived = people_who_not_survived.loc[:, ['Survived', 'Pclass']].groupby('Pclass') count_survived_by_pclass = grouped_by_pclass_survived.count() count_survived_by_pclass.columns = ['Quantidade que sobreviveram'] count_not_survived_by_pclass = grouped_by_pclass_not_survived.count() count_not_survived_by_pclass.columns = ['Quantidade que não sobreviveram'] count_not_survived_by_pclass count_survived_by_pclass check_count = count_not_survived_by_pclass.sum()[0] + count_survived_by_pclass.sum()[0] check_count N = np.arange(3) labels = ['1ª Classe', '2ª Classe', '3ª Classe'] list_count_survived_by_pclass = [elem[0] for elem in count_survived_by_pclass.values.tolist()] list_count_not_survived_by_pclass = [elem[0] for elem in count_not_survived_by_pclass.values.tolist()] fig, ax = plt.subplots(figsize=(9,6)) rects1 = ax.bar(N, list_count_survived_by_pclass, label='Sobreviveram', width=0.25) rects2 = ax.bar(N+0.25, list_count_not_survived_by_pclass, label='Não sobreviveram', width=0.25) plt.ylabel('Quantidade de pessoas') plt.title('Quantidade de pessoas que sobreviveram e não sobreviveram por classe do passageiro') plt.xticks([0, 1, 2]) ax.set_xticklabels(labels) plt.legend() plt.show() plt.close()Embarkedprint(people_who_survived[people_who_survived['Embarked'].notna()]['Embarked'].unique().tolist(), people_who_not_survived[people_who_not_survived['Embarked'].notna()]['Embarked'].unique().tolist()) grouped_by_embarked_survived = people_who_survived.loc[:, ['Survived', 'Embarked']].groupby('Embarked') grouped_by_embarked_not_survived = people_who_not_survived.loc[:, ['Survived', 'Embarked']].groupby('Embarked') count_survived_by_embarked = grouped_by_embarked_survived.count() count_survived_by_embarked.index = people_who_survived[people_who_survived['Embarked'].notna()]['Embarked'].unique() count_survived_by_embarked.columns = ['Quantidade que sobreviveram'] count_not_survived_by_embarked = grouped_by_embarked_not_survived.count() count_not_survived_by_embarked.index = people_who_not_survived[people_who_not_survived['Embarked'].notna()]['Embarked'].unique().tolist() count_not_survived_by_embarked.columns = ['Quantidade que não sobreviveram'] count_survived_by_embarked count_not_survived_by_embarked check_count = count_not_survived_by_embarked.sum()[0] + count_survived_by_embarked.sum()[0] check_count # - 2 na N = np.arange(3) labels = ['C - Cherbourg', 'Q - Queenstown', 'S - Southampton'] list_count_survived_by_embarked = [elem[0] for elem in count_survived_by_embarked.values.tolist()] list_count_not_survived_by_embarked = [elem[0] for elem in count_not_survived_by_embarked.values.tolist()] fig, ax = plt.subplots(figsize=(9,6)) rects1 = ax.bar(N, list_count_survived_by_embarked, label='Sobreviveram', width=0.25) rects2 = ax.bar(N+0.25, list_count_not_survived_by_embarked, label='Não sobreviveram', width=0.25) plt.ylabel('Quantidade de pessoas') plt.title('Quantidade de pessoas que sobreviveram e não sobreviveram por cidade de embarque') plt.xticks([0, 1, 2]) ax.set_xticklabels(labels) plt.legend() plt.show()Fare Histograma dos que sobreviveramplt.figure(figsize=(9,6)) plt.hist(people_who_survived['Fare'], bins=10) plt.ylabel('Quantidade de pessoas') plt.xlabel('Unidades monetárias') plt.title('Quantidade de pessoas sobreviventes vs Unidades monetárias pagas na passagem')Histograma dos que não sobreviveramplt.figure(figsize=(9,6)) plt.hist(people_who_not_survived['Fare'], bins=10) plt.ylabel('Quantidade de pessoas') plt.xlabel('Unidades monetárias') plt.title('Quantidade de pessoas não sobreviventes vs Unidades monetárias pagas na passagem')O ModeloCom base nos parâmetros apresentados, vamos ao modelo considerando as seguintes features para treino [`Pclass`, `Sex`, `SibSp`, `Parch`], que são os features que no parecer estão interligadas com a sobrevivência ou não de um passageiro.from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score train_df = pd.read_csv('./train.csv') train_df = train_df.fillna(train_df.mean()) X_train = train_df.loc[:, ['Pclass', 'Sex', 'SibSp', 'Parch']] X_train = pd.get_dummies(X_train, columns=['Pclass', 'Sex', 'SibSp', 'Parch'], prefix=['Pclass', 'Sex', 'SibSp', 'Parch']) X_train['Parch_9'] = 0 y_train = train_df.loc[:, ['Survived']] clf = RandomForestClassifier(n_estimators=150, max_depth=4) clf.fit(X_train, y_train.values.ravel()) test_df = pd.read_csv('./test.csv') test_df = test_df.fillna(test_df.mean()) X_test = test_df.loc[:, ['Pclass', 'Sex', 'SibSp', 'Parch']] X_test = pd.get_dummies(X_test, columns=['Pclass', 'Sex', 'SibSp', 'Parch'], prefix=['Pclass', 'Sex', 'SibSp', 'Parch']) y_pred = clf.predict(X_test) from sklearn.metrics import accuracy_score submission = pd.read_csv('./gender_submission.csv') y_test = submission['Survived'] submission.to_csv('./result.csv', index=False)Number of Tweets%cypher match (n:tweet) return count(n)1 rows affected.Number of users%cypher match (u:user) return count(u)1 rows affected.Top Tweets%%cypher match (n:tweet)-[r]-() with n, count(r) as deg order by deg desc limit 10 match (n)<-[:TWEETS]-(u:user) return u.screen_name as user, n.tid as tid, substring(n.text, 0, 20) as tweet, deg10 rows affected.Top Users%%cypher match (n:user)-[r]-() return n.screen_name as user, count(r) as deg order by deg desc limit 1010 rows affected.Top Tags%%cypher match (n:hashtag)-[r]-() return n.hashtag as hashtags, count(r) as deg order by deg desc limit 1010 rows affected.Language datalangs = %cypher match (n:tweet) where n.lang is not null return distinct n.lang, count(*) as num_tweets order by num_tweets desc lang_df = langs.get_dataframe() lang_df.set_index("n.lang")[:10].plot(kind="bar")% of tweets with geotags%cypher match (n:tweet) return count(n) %cypher match (n:tweet) where n.coordinates is not null return count(n) 6954 / 608049.0 # 1.1%Tweets by countrycountries = %cypher match (n:tweet) where n.coordinates is not null return distinct n.country, count(*) as num_tweets order by num_tweets desc countries_df = countries.get_dataframe() countries_df.set_index("n.country")[:20].plot(kind="bar")Average sentiment in English%cypher match (n:tweet) where n.lang = "en" return avg(n.polarity) as average_en_polarity %cypher match (n:tweet) where n.lang = "en" return avg(n.subjectivity) as average_en_subjectivity1 rows affected.Average sentiment in French%cypher match (n:tweet) where n.lang = "fr" return avg(n.polarity) as average_fr_polarity %cypher match (n:tweet) where n.lang = "fr" return avg(n.subjectivity) as average_fr_subjectivity1 rows affected.Paris Twitter Stars in Mali dataset Louis_Tomlinson%%cypher match (u:user {uid: '84279963'}) return u.screen_name %%cypher match (u:user {uid: '84279963'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '84279963'})<--(n:tweet) return u.screen_name, n.text1 rows affected.NiallOfficial%%cypher match (u:user {uid: '105119490'}) return u.screen_name %%cypher match (u:user {uid: '105119490'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '105119490'})<--(n:tweet) return u.screen_name, n.text2 rows affected.RecherchesP%%cypher match (u:user {uid: '4185722537'}) return u.screen_name0 rows affected.Harry_Styles%%cypher match (u:user {uid: '181561712'}) return u.screen_name %%cypher match (u:user {uid: '181561712'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '181561712'})<--(n:tweet) return u.screen_name, n.text1 rows affected.infos140%%cypher match (u:user {uid: '1356382759'}) return u.screen_name %%cypher match (u:user {uid: '1356382759'})-->(n:tweet) return u.screen_name, n.text27 rows affected.justinbieber%%cypher match (u:user {uid: '27260086'}) return u.screen_name %%cypher match (u:user {uid: '27260086'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '27260086'})<--(n:tweet) return u.screen_name, n.text3 rows affected.nytimes%%cypher match (u:user {uid: '807095'}) return u.screen_name %%cypher match (u:user {uid: '807095'})-->(n:tweet) return u.screen_name, n.text16 rows affected.AP%%cypher match (u:user {uid: '51241574'}) return u.screen_name %%cypher match (u:user {uid: '51241574'})-->(n:tweet) return u.screen_name, n.text27 rows affected.jean_jullien 1851229334%%cypher match (u:user {uid: '1851229334'}) return u.screen_name0 rows affected.Michael5SOS%%cypher match (u:user {uid: '403246803'}) return u.screen_name %%cypher match (u:user {uid: '403246803'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '403246803'})<--(n:tweet) return u.screen_name, n.text1 rows affected.taylorswift13%%cypher match (u:user {screen_name: 'taylorswift13'}) return u.screen_name, u.uid %%cypher match (u:user {uid: '17919972'})-->(n:tweet) return u.screen_name, n.text %%cypher match (u:user {uid: '17919972'})<--(n:tweet)<--(u2:user) return u.screen_name, n.text, u2.screen_name2 rows affected.Tutorial 02: Exploratory Model Analysis*Authors: *---This is a tutorial on using grama to do *exploratory model analysis*; to evaluate the model to generate data, then use that data to understand the model.**Learning Goals**: By completing this notebook, you will learn:1. How to use the verbs `gr.eval_monte_carlo` and `gr.eval_sinews`1. How to use `gr.plot_auto`1. Common grama arguments and defaults**Prerequisites**:- Familiarity with the Python programming language- [Tutorial 01: Introduction]()**Table of Contents**:1. [Initialize](s1)2. [Monte Carlo Simulation](s2)3. [Sweeps](s3) Initialize In order to perform model analysis, we first need to construct the model. For this exercise we'll use a pre-built model: the cantilever beam model. __Q1: Initialize grama__Import grama and the cantilever beam model.*Hint*: We initialized grama in the previous notebook; feel free to copy and paste from there.### # TASK: Set up grama # TODO: Import grama, make the cantilever_beam model ### # TODO: Import grama # TODO: Assign the cantilever_beam model to `md` # -- NO NEED TO MODIFY BELOW ---- md.printpretty()Monte Carlo Simulation --- __Q2: Monte Carlo__Perform a Monte Carlo simulation on model `md` with `gr.eval_monte_carlo`. Draw `100` samples, and use the nominal settings for the deterministic variables. Determine which arguments are required, and which are optional.*Hint*: In Jupyter, click-selecting a function and pressing `Shift + Tab` will bring up the documentation. Use this to investigate the arguments.### # TASK: Perform a monte carlo simulation (MCS) # TODO: Use gr.eval_monte_carlo, determine which arguments you need to set ### # TODO: Perform MCS, assign results to `df_mc` # -- NO NEED TO MODIFY BELOW ---- df_mc.describe()__Q3: Random Seeds__Run the code cell above a few times, and note how the results change. Then add the `seed` keyword argument with your favorite integer, and try again.Random seeds are useful when debugging Monte Carlo results, as they ensure the same "random" results on repeated execution. As a rough rule of thumb you should systematically use multiple seeds when testing algorithms, but fix one seed when studying a model. __Q4: Skip evaluation__Modify your code above, and use the `skip` keyword to skip evaluating the functions. Take the results of `gr.eval_monte_carlo` and pass them to `gr.plot_auto`.### # TASK: Skip evaluation # TODO: Use gr.eval_monte_carlo with the skip keyword ### # TODO: Perform MCS with skipped evaluation, assign results to `df_skip` gr.plot_auto(df_skip)Using the autoplotter with skipped evaluation provides a visualization of the *design of experiment* (DOE), or sampling plan. Note that `gr.eval_monte_carlo` also provides an estimate of the runtime of the DOE paired with the chosen model---this is only possible when the model as runtime estimates available. When studying more expensive models, running a `skip` check first to inspect the design is often a good idea: This practice can help you catch errors before using a lot of compute resources. __Q5: Autoplot evaluation__Modify your code above to evaluate the model functions. Take the results of `gr.eval_monte_carlo` and pass them to `gr.plot_auto`. Use the same seed as you used above when setting `skip=True`. Interpret the resulting histograms.### # TASK: Autoplot MCS # TODO: Use gr.eval_monte_carlo with gr.plot_auto ### # TODO: Perform MCS and visualize with gr.plot_autoBased on the MCS output histograms, you should be able to see that `c_area` is unaffected by the random variables, while `g_stress` and `g_disp` have a small faction of cases which lead to negative values. Since we used the same `seed` for the skipped and evaluated cases, we can guarantee the input design above matches the output results here. Sweeps --- Monte Carlo Simulation is very useful for estimating distributions and probabilities. However, sometimes we want a more qualitative understanding of the random variables' impact on model outputs. In this last section we will use *sweeps* to gain some qualitative understanding. __Q6: Sinew Design__Use the verb `gr.eval_sinews` to construct a sinew DOE. Visualize the design without evaluating. Describe the DOE in words.*Hint*: Use the same patterns we used for `gr.eval_monte_carlo` above.### # TASK: Sinew design # TODO: Use gr.eval_sinews to generate a design ### # TODO: Generate a sinew design but do not evaluate the model functions__Q7: Sinew Study__Use the verb `gr.eval_sinews` to evaluate the model. Visualize and interpret the results.*Hint*: Use the same patterns we used for `gr.eval_monte_carlo` above.### # TASK: Sinew evaluation # TODO: Use gr.eval_sinews to evaluate the model ### # TODO: Generate, evaluate, and visualize a sinew design**Note: BayesFast is still under development. Currently, this notebook is based on commit 758eb74.**import bayesfast as bf import numpy as np from distributed import Client, LocalCluster cluster = LocalCluster(n_workers=4, threads_per_worker=1) client = Client(cluster) client a = 5 b = 0.5 def f_0(x): return np.linalg.norm(x, 2, -1) def j_0(x): '''not actually used''' foo = x / np.linalg.norm(x, 2, -1) return foo if np.all(np.isfinite(foo)) else np.ones_like(foo) def f_1(x): return -(x - a)**2 / b def j_1(x): return -2 * (x - a) / b m_0 = bf.Module(f_0, input_vars='in', output_vars='model') m_1 = bf.Module(f_1, j_1, input_vars='model', output_vars='logp') d_0 = bf.Density(module_list=[m_0, m_1], var_dims=[2], input_vars='in', density_name='logp') s_0 = bf.modules.PolyModel('linear', 2, 1, input_vars='in', output_vars='model') s_1 = bf.modules.PolyModel('quadratic', 2, 1, input_vars='in', output_vars='model') opt_0 = bf.recipe.OptimizeStep(s_0, hmc_options={'n_iter':1500, 'n_warmup':500}, fit_options={'use_decay': True}) sam_0 = bf.recipe.SampleStep(s_1, alpha_n=5, reuse_steps=0, sample_options={'n_iter':1500, 'n_warmup':500}, fit_options={'use_decay': True}, logp_cutoff=False, adapt_metric=True) sam_1 = bf.recipe.SampleStep(s_1, alpha_n=5, reuse_steps=1, sample_options={'n_iter':1500, 'n_warmup':500}, fit_options={'use_decay': True}, logp_cutoff=False, adapt_metric=True) x_0 = bf.utils.random.multivariate_normal([10, 10], np.eye(2), 20) r_0 = bf.recipe.Recipe(density=d_0, client=client, optimize=opt_0, sample=[sam_0] * 5 + [sam_1] * 5, x_0=x_0, random_state=0) r_0.run() r_0.n_call %matplotlib inline from getdist import plots, MCSamples import matplotlib.pyplot as plt g = plots.get_subplot_plotter(subplot_size=5) sa = MCSamples(samples=np.concatenate([si.samples.reshape((-1, 2)) for si in r_0.result.data.sample], axis=-1), names=['x_{}'.format(i) for i in range(20)], labels=['x', 'y'] * 10) g.plots_2d(sa, param_pairs=[['x_{}'.format(i), 'x_{}'.format(i + 1)] for i in range(0, 20, 2)], nx=5, colors=['tab:blue'], lws=[2]) plt.show()Removed no burn inNotation:For convenience, the name of numpy arrays starts with _ such as _x, _y. You need to install **ffmpeg** beforehand! Basic Settings# audio file path audio_f = librosa.util.example_audio_file() # file path sr = 22050 # sample rate file_format = "ogg" # or wav, ... num_channels = 1 # mono n_fft = 2048 # number of fft length. 2**n win_length = 1000 # window length <= n_fft hop_length = 250 # hopping step n_mels = 80 # number of mels n_mfccs = 40 # number of mfccs preemp = .97 # preemphasis rate n_iter = 50 # Griffin-Lim's lawData load# librosa _y, sr = librosa.load(audio_f, sr=sr, mono=num_channels==1) _y.shape # TF def data_load(audio_f, sr=22050, file_format="wav", num_channels=1): audio_binary = tf.read_file(audio_f) y = tf.contrib.ffmpeg.decode_audio(audio_binary, file_format, sr, num_channels) return tf.squeeze(y, 1), sr y, sr = data_load(audio_f, sr, file_format, num_channels) y.eval().shapePreemphasis For theoretical explanations, check [this](https://www.quora.com/Why-is-pre-emphasis-i-e-passing-the-speech-signal-through-a-first-order-high-pass-filter-required-in-speech-processing-and-how-does-it-work)# Librosa def _preemphasize(y, rate=.97): ''' y: 1-D array. Waveform. rate: A python scalar. ''' y = np.append(y[0], y[1:]-rate*y[:-1]) return y _y = _preemphasize(_y, preemp) # TF def preemphasize(y, rate=.97): ''' y: 1-D tensor. Waveform. rate: A python scalar. ''' y = tf.concat((y[:1], y[1:]-rate*y[:-1]), -1) return y y = preemphasize(y, preemp)For demonstration, we use the first 100,000 samples._y = _y[:100000] y = y[:100000]Spectrograms Note that the output shape is the opposite in librosa and TF.# librosa def _get_spectrograms(y, sr=22050, n_fft=2048, win_length=2048, hop_length=512, n_mels=None, power=1): linear = librosa.stft(y, n_fft=n_fft, win_length=win_length, hop_length=hop_length) # linear spectrogram mag = np.abs(linear) # magnitude if n_mels is not None: mel_basis = librosa.filters.mel(sr, n_fft, n_mels) # (n_mels, 1+n_fft//2) mel = np.dot(mel_basis, mag**power) # (n_mels, t) # mel spectrogram else: mel = None return linear, mag, mel _linear, _mag, _mel = _get_spectrograms(_y, sr, n_fft, win_length, hop_length, n_mels) print(_linear.shape, _mag.shape, _mel.shape) # TF def get_spectrograms(y, sr=22050, n_fft=2048, win_length=2048, hop_length=512, n_mels=None, power=1): linear = tf.contrib.signal.stft(y, frame_length=win_length, frame_step=hop_length, fft_length=n_fft) # linear spectrogram mag = tf.abs(linear) # magnitude if n_mels is not None: mel_basis = tf.convert_to_tensor(librosa.filters.mel(sr, n_fft, n_mels), tf.float32) mel = tf.matmul(mag**power, mel_basis, transpose_b=True) # (t, n_mels) else: mel = None return linear, mag, mel linear, mag, mel = get_spectrograms(y, sr, n_fft, win_length, hop_length, n_mels) print(linear.eval().shape, mag.eval().shape, mel.eval().shape)(397, 1025) (397, 1025) (397, 80)MFCC# Librosa def _get_mfccs(y, sr=22050, n_fft=2048, win_length=2048, hop_length=512, n_mels=128, n_mfccs=20): _, _, mel = _get_spectrograms(y, sr, n_fft, win_length, hop_length, n_mels, power=2) mel = librosa.power_to_db(mel) mfccs = np.dot(librosa.filters.dct(n_mfccs, mel.shape[0]), mel) return mfccs _mfccs = _get_mfccs(_y, sr, n_fft, win_length, hop_length, n_mels, n_mfccs) # TF # Adapted from https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db def power2db(power, ref_value=1.0, amin=1e-10, top_db=80.0): if amin <= 0: raise ParameterError('amin must be strictly positive') def _log10(x): numerator = tf.log(x) denominator = tf.log(tf.constant(10, dtype=numerator.dtype)) return numerator / denominator log_spec = 10.0 * _log10(tf.maximum(amin, power)) log_spec -= 10.0 * _log10(tf.maximum(amin, ref_value)) if top_db is not None: if top_db < 0: raise ParameterError('top_db must be non-negative') log_spec = tf.maximum(log_spec, tf.reduce_max(log_spec) - top_db) return log_spec def get_mfccs(y, sr=22050, n_fft=2048, win_length=2048, hop_length=512, n_mels=128, n_mfccs=20): _, _, mel = get_spectrograms(y, sr, n_fft, win_length, hop_length, n_mels, power=2) mel = power2db(mel) # get mfccs basis = tf.convert_to_tensor(librosa.filters.dct(n_mfccs, n_mels), tf.float32) # (n_mfccs, n_mels) mfccs = tf.matmul(mel, basis, transpose_b=True) # => (t, n_mfccs) return mfccs mfccs = get_mfccs(y, sr, n_fft, win_length, hop_length, n_mels, n_mfccs)Let's check if those two types of mfccs are close enough.print(_mfccs.sum(), "===", mfccs.eval().sum()) print(_mfccs.max(), "===", mfccs.eval().max()) print(_mfccs.min(), "===", mfccs.eval().min())-121191.609551 === -118951.0 192.720920232 === 190.608 -722.122949129 === -721.386Griffin-Lim's Law# Librosa def _spectrogram2wav(spectrogram, n_iter=50, n_fft=2048, win_length=2048, hop_length=512): '''Converts spectrogram into a waveform using Griffin-lim's raw. ''' def invert_spectrogram(spectrogram): ''' spectrogram: [f, t] ''' return librosa.istft(spectrogram, hop_length, win_length) import copy X_best = copy.deepcopy(spectrogram) # [f, t] for i in range(n_iter): X_t = invert_spectrogram(X_best) est = librosa.stft(X_t, n_fft, hop_length, win_length) # [f, t] phase = est / np.maximum(1e-8, np.abs(est)) # [f, t] X_best = spectrogram * phase # [f, t] X_t = invert_spectrogram(X_best) y = np.real(X_t) return yRecontruct the waveform._wav = _spectrogram2wav(_mag, n_iter, n_fft, win_length, hop_length) plt.figure(figsize=(12, 4)) librosa.display.waveplot(_wav, sr) ipd.Audio(_wav, rate=sr) # TF def spectrogram2wav(spectrogram, n_iter=50, n_fft=2048, win_length=2048, hop_length=512): '''Converts spectrogram into a waveform using Griffin-lim's raw. ''' def invert_spectrogram(spectrogram): ''' spectrogram: [t, f] ''' spectrogram = tf.expand_dims(spectrogram,0) inversed = tf.contrib.signal.inverse_stft(spectrogram, win_length, hop_length, n_fft) squeezed = tf.squeeze(inversed, 0) return squeezed spectrogram = tf.cast(spectrogram, dtype=tf.complex64) # [t, f] X_best = tf.identity(spectrogram) for i in range(n_iter): X_t = invert_spectrogram(X_best) est = tf.contrib.signal.stft(X_t, win_length, hop_length, n_fft, pad_end=False) # (1, T, n_fft/2+1) phase = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64) # [t, f] X_best = spectrogram * phase # [t, t] X_t = invert_spectrogram(X_best) y = tf.real(X_t) return y wav = spectrogram2wav(mag, n_iter, n_fft, win_length, hop_length) plt.figure(figsize=(12, 4)) wav = wav.eval() librosa.display.waveplot(wav, sr) ipd.Audio(wav, rate=sr)06. 结合多保真优化 多保真优化简介 在上一个教程中,我们学习了如何实现一个简单的AutoML系统。但这个AutoML系统的核心:贝叶斯优化算法往往需要大量对采样的评价才能获得比较好的结果。然而,在自动机器学习(Automatic Machine Learning, AutoML)任务中评价往往通过 k 折交叉验证获得,在大数据集的机器学习任务上,`获得一个评价的时间代价巨大`。这也影响了优化算法在自动机器学习问题上的效果。所以一些`减少评价代价`的方法被提出来,其中**多保真度优化**(Multi-Fidelity Optimization)[[1]](refer-anchor-1)就是其中的一种。而**多臂老虎机算法**(Multi-armed Bandit Algorithm, MBA)[[2]](refer-anchor-2)是多保真度算法的一种。在此基础上,有两种主流的`bandit-based`优化策略:- Successive Halving (SH) [[3]](refer-anchor-3)- Hyperband (HB) [[4]](refer-anchor-4) 首先我们介绍连续减半(Successive Halving ,SH)。在连续减半策略中, 我们将`评价代价`参数化为一个变量`budget`,即预算。根据BOHB论文[[5]](refer-anchor-5)的阐述,我们可以根据不同的场景定义不同的budget,举例如下:1. 迭代算法的迭代数(如:神经网络的epoch、随机森林,GBDT的树的个数)2. 机器学习算法所使用的样本数3. 贝叶斯神经网络[[6]](refer-anchor-6)中MCMC链的长度4. 深度强化学习中的尝试数 举例说明,我们定义$budget_{max}=1$, $budget_{min}=\frac{1}{8}$, $\eta=2$ (`eta` = 2) 。在这里`budget`的语义表示使用$100\times budget$%的样本。1. 首先我们从配置空间(或称为超参空间)**随机采样**8个配置,实例化为8个机器学习模型。2. 然后用$\frac{1}{8}$的训练样本训练这8个模型并在验证集得到相应的损失值。3. 保留这8个模型中loss最低的前4个模型,其余的舍弃。4. 依次类推,最后仅保留一个模型,并且其`budget=1`(可以用全部的样本进行训练) ![SH](https://img-blog.csdnimg.cn/20201228104418342.png) 上图描述了例子中的迭代过程(图片来自[[1]](refer-anchor-1)) 。我们可以用`ultraopt.multi_fidelity`中的`SuccessiveHalvingIterGenerator`来实例化这一过程:from ultraopt.multi_fidelity import SuccessiveHalvingIterGenerator, HyperBandIterGenerator SH = SuccessiveHalvingIterGenerator(min_budget=1/8, max_budget=1, eta=2) SH.get_table()接下来我们介绍HyperBand(HB)的策略。SH = HyperBandIterGenerator(min_budget=1/8, max_budget=1, eta=2) SH.get_table()在UltraOpt中结合贝叶斯优化与多保真优化 我们注意到,上文描述的SH和HB策略在采样时都是**随机采样**,而`UltraOpt`将**优化器**和**多保真迭代生成器**这两个部分解耦和了,您可以将任意的**贝叶斯优化算法**和**多保真优化算法**进行组合。这样的组合其实就是BOHB(Bayesian Optimization Hyperband)算法[[5]](refer-anchor-5)。UltraOpt在很多代码上借鉴和直接使用了HpBandSter[[7]](refer-anchor-7)这个开源项目,我们感谢他们优秀的工作。 如果您需要采用多保真优化策略,您的评价函数需要增加一个`float`类型的`budget`参数:```pythondef evaluate(config: dict, budget:float) -> float : pass``` 为了测试, 我们采用`ultraopt.tests.mock`中自带的一个含有`budget`的评价函数,以及相应的配置空间:from ultraopt.tests.mock import evaluate, config_space from ultraopt import fmin from ultraopt.multi_fidelity import HyperBandIterGenerator在调用`ultraopt.fmin`函数时,采用多保真策略时需要做以下修改:1. 需要指定`multi_fidelity_iter_generator`(多保真迭代生成器)2. `n_iterations`参数与普通模式不同,不再代表评价函数的调用次数,而代表`iter_generator`的迭代次数,需要酌情设置3. `parallel_strategy`需要设置为`AsyncComm`,不改变默认值就没事 首先我们实例化一个`iter_generator`(多保真迭代生成器),并根据`get_table()`函数的可视化结果设置`n_iterations`。因为测试函数的`max_budget = 100`, 我们按照`25, 50, 100`来递增`budget`:iter_generator = HyperBandIterGenerator(min_budget=25, max_budget=100, eta=2) iter_generator.get_table() result = fmin(evaluate, config_space, n_iterations=50, multi_fidelity_iter_generator=iter_generator, n_jobs=3) result100%|██████████| 218/218 [00:00<00:00, 247.44trial/s, max budget: 100.0, best loss: 0.540]按budget分组的随时间变化拟合曲线:result.plot_convergence_over_time(yscale="log");`low_budget`推荐得到的优势配置会保留到`high_budget`,从而可以根据`loss-pairs`计算不同`budget`之间的相关性:result.plot_correlation_across_budgets();AutoML场景下的多保真优化 虽然`ultraopt.tests.mock`中提供的合成函数可以测试结合多保真策略的优化,但这毕竟不是真实场景。现在,我们就通过修改教程`05. Implement a Simple AutoML System`中的AutoML评价器,将其改造为一个支持多保真优化的评价器,并进行相应的测试。from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_digits import seaborn as sns import numpy as np import warnings from ultraopt.hdl import layering_config from sklearn.model_selection import StratifiedKFold # 采用分层抽样 warnings.filterwarnings("ignore") X, y = load_digits(return_X_y=True) cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0) def evaluate(config: dict, budget: float) -> float: layered_dict = layering_config(config) AS_HP = layered_dict['classifier'].copy() AS, HP = AS_HP.popitem() ML_model = eval(AS)(**HP) # 注释掉采用原版的采用所有数据进行训练的方法(相当于budget=1) # scores = cross_val_score(ML_model, X, y, cv=cv, scoring=metric) # ------------------------------------------------------------- # 采用在对【 5折交叉验证中的训练集 】进行采样的方法,采样率为 budget sample_ratio = budget scores = [] for i, (train_ix, valid_ix) in enumerate(cv.split(X, y)): rng = np.random.RandomState(i) size = int(train_ix.size * sample_ratio) train_ix = rng.choice(train_ix, size, replace=False) X_train = X[train_ix, :] y_train = y[train_ix] X_valid = X[valid_ix, :] y_valid = y[valid_ix] ML_model.fit(X_train, y_train) scores.append(ML_model.score(X_valid, y_valid)) # ------------------------------------------------------------- score = np.mean(scores) return 1 - score config = {'classifier:__choice__': 'LinearSVC', 'classifier:LinearSVC:C': 1.0, 'classifier:LinearSVC:dual': 'True:bool', 'classifier:LinearSVC:loss': 'squared_hinge', 'classifier:LinearSVC:max_iter': 600, 'classifier:LinearSVC:multi_class': 'ovr', 'classifier:LinearSVC:penalty': 'l2', 'classifier:LinearSVC:random_state': '42:int'} evaluate(config, 0.125) evaluate(config, 0.5) evaluate(config, 1)可以看到我们已经成功定义了一个结合多保真策略的AutoML评价器,并且按照一般规律:budget越大,评价代价也越大,模型表现也越好,loss越小。 我们将上述代码整合到`05. Implement a Simple AutoML System.py`脚本中,形成`06. Combine Multi-Fidelity Optimization.py`脚本:#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : # @Date : 2020-12-28 # @Contact : import warnings from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_digits from sklearn.model_selection import StratifiedKFold # 采用分层抽样 from sklearn.model_selection import cross_val_score import sklearn.metrics import numpy as np from ultraopt import fmin from ultraopt.hdl import hdl2cs, plot_hdl, layering_config from ultraopt.multi_fidelity import HyperBandIterGenerator warnings.filterwarnings("ignore") HDL = { 'classifier(choice)':{ "LinearSVC": { "max_iter": {"_type": "int_quniform","_value": [300, 3000, 100], "_default": 600}, "penalty": {"_type": "choice", "_value": ["l1", "l2"],"_default": "l2"}, "dual": {"_type": "choice", "_value": [True, False],"_default": False}, "loss": {"_type": "choice", "_value": ["hinge", "squared_hinge"],"_default": "squared_hinge"}, "C": {"_type": "loguniform", "_value": [0.01, 10000],"_default": 1.0}, "multi_class": "ovr", "random_state": 42, "__forbidden": [ {"penalty": "l1","loss": "hinge"}, {"penalty": "l2","dual": False,"loss": "hinge"}, {"penalty": "l1","dual": False}, {"penalty": "l1","dual": True,"loss": "squared_hinge"}, ] }, "RandomForestClassifier": { "n_estimators": {"_type": "int_quniform","_value": [10, 200, 10], "_default": 100}, "criterion": {"_type": "choice","_value": ["gini", "entropy"],"_default": "gini"}, "max_features": {"_type": "choice","_value": ["sqrt","log2"],"_default": "sqrt"}, "min_samples_split": {"_type": "int_uniform", "_value": [2, 20],"_default": 2}, "min_samples_leaf": {"_type": "int_uniform", "_value": [1, 20],"_default": 1}, "bootstrap": {"_type": "choice","_value": [True, False],"_default": True}, "random_state": 42 }, "KNeighborsClassifier": { "n_neighbors": {"_type": "int_loguniform", "_value": [1,100],"_default": 3}, "weights" : {"_type": "choice", "_value": ["uniform", "distance"],"_default": "uniform"}, "p": {"_type": "choice", "_value": [1, 2],"_default": 2}, }, } } CS = hdl2cs(HDL) g = plot_hdl(HDL) default_cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0) X, y = load_digits(return_X_y=True) class Evaluator(): def __init__(self, X, y, metric="accuracy", cv=default_cv): # 初始化 self.X = X self.y = y self.metric = metric self.cv = cv def __call__(self, config: dict, budget: float) -> float: layered_dict = layering_config(config) AS_HP = layered_dict['classifier'].copy() AS, HP = AS_HP.popitem() ML_model = eval(AS)(**HP) # scores = cross_val_score(ML_model, self.X, self.y, cv=self.cv, scoring=self.metric) # ------------------------------------------------------------- # 采用在对【 5折交叉验证中的训练集 】进行采样的方法,采样率为 budget sample_ratio = budget scores = [] for i, (train_ix, valid_ix) in enumerate(self.cv.split(X, y)): rng = np.random.RandomState(i) size = int(train_ix.size * sample_ratio) train_ix = rng.choice(train_ix, size, replace=False) X_train = X[train_ix, :] y_train = y[train_ix] X_valid = X[valid_ix, :] y_valid = y[valid_ix] ML_model.fit(X_train, y_train) y_pred = ML_model.predict(X_valid) score = eval(f"sklearn.metrics.{self.metric}_score")(y_valid, y_pred) scores.append(score) # ------------------------------------------------------------- score = np.mean(scores) return 1 - score evaluator = Evaluator(X, y) iter_generator = HyperBandIterGenerator(min_budget=1/4, max_budget=1, eta=2) result = fmin(evaluator, HDL, optimizer="ETPE", n_iterations=30, multi_fidelity_iter_generator=iter_generator, n_jobs=3) print(result)100%|██████████| 130/130 [00:31<00:00, 4.10trial/s, max budget: 1.0, best loss: 0.012] +--------------------------------------------------------------------------------------------------------------------------+ | HyperParameters | Optimal Value | +-----------------------------------------------------+----------------------+----------------------+----------------------+ | classifier:__choice__ | KNeighborsClassifier | KNeighborsClassifier | KNeighborsClassifier | | classifier:KNeighborsClassifier:n_neighbors | 5 | 4 | 4 | | classifier:KNeighborsClassifier:p | 2:int | 2:int | 2:int | | classifier:KNeighborsClassifier:weights | distance | distance | distance | | classifier:LinearSVC:C [...]我们可以对结合多保真策略得到的优化结果进行数据分析:import pylab as plt plt.rcParams['figure.figsize'] = (16, 12) plt.subplot(2, 2, 1) result.plot_convergence_over_time(); plt.subplot(2, 2, 2) result.plot_concurrent_over_time(num_points=200); plt.subplot(2, 2, 3) result.plot_finished_over_time(); plt.subplot(2, 2, 4) result.plot_correlation_across_budgets();Network Operations Pre-Processing# nuclio: ignore import nuclioDefine the MLRun environment%nuclio config kind = "job" %nuclio config spec.image = "mlrun/ml-models"%nuclio: setting kind to 'job' %nuclio: setting spec.image to 'mlrun/ml-models'Functionimport os import pandas as pd from mlrun.datastore import DataItem def aggregate(context, df_artifact: DataItem, save_to: str = 'aggregated-df.pq', keys: list = None, metrics: list = None, labels: list = None, metric_aggs: list = ['mean'], label_aggs: list = ['max'], suffix: str = '', window: int = 3, center: bool = False, inplace: bool = False): """Time-series aggregation function Will perform a rolling aggregation on {df_artifact}, over {window} by the selected {keys} applying {metric_aggs} on {metrics} and {label_aggs} on {labels}. adding {suffix} to the feature names. if not {inplace}, will return the original {df_artifact}, joined by the aggregated result. :param df_artifact: MLRun input pointing to pandas dataframe (csv/parquet file path) :param save_to: Where to save the result dataframe. * If relative will add to the {artifact_path} :param keys: Subset of indexes from the source dataframe to aggregate by (default=all) :param metrics: Array containing a list of metrics to run the aggregations on. (default=None) :param labels: Array containing a list of labels to run the aggregations on. (default=None) :param metric_aggs: Array containing a list of aggregation function names to run on {metrics}. (Ex: 'mean', 'std') (default='mean') :param label_aggs: Array containing a list of aggregation function names to run on {metrics}. (Ex: 'max', 'min') (default='max') :param suffix: Suffix to add to the feature name, E.g: __ (Ex: 'last_60_mintes') (default='') :param window: Window size to perform the rolling aggregate on. (default=3) :param center: If True, Sets the value for the central sample in the window, If False, will set the value to the last sample. (default=False) :param inplace: If True, will return only the aggregated results. If False, will join the aggregated results with the original dataframe """ context.logger.info(f'Aggregating {df_artifact.url}') input_df = df_artifact.as_df() # Verify there is work to be done if not (metrics or labels): raise ValueError('please specify metrics or labels param') # Select the correct indexes if keys: current_index = input_df.index.names indexes_to_drop = [col for col in input_df.index.names if col not in keys] df = input_df.reset_index(level=indexes_to_drop) else: df = input_df # For each metrics if metrics: metrics_df = df.loc[:, metrics].rolling(window=window, center=center).aggregate(metric_aggs) # Flatten all the aggs metrics_df.columns = ['_'.join(col).strip() for col in metrics_df.columns.values] # Add suffix if suffix: metrics_df.columns = [f'{metric}_{suffix}' for metric in metrics_df.columns] if not inplace: final_df = pd.merge(input_df, metrics_df, suffixes=('', suffix), left_index=True, right_index=True) else: final_df = metrics_df # For each label if labels: labels_df = df.loc[:, labels].rolling(window=window, center=center).aggregate(label_aggs) # Flatten all the aggs labels_df.columns = ['_'.join(col).strip() for col in labels_df.columns.values] # Add suffix if suffix: labels_df.columns = [f'{label}_{suffix}' for label in labels_df.columns] if metrics: final_df = pd.merge(final_df, labels_df, suffixes=('', suffix), left_index=True, right_index=True) else: if not inplace: final_df = pd.merge(input_df, labels_df, suffixes=('', suffix), left_index=True, right_index=True) else: final_df = labels_df # Save the result dataframe context.log_dataset(key='aggregate', df=final_df, format='parquet', local_path=save_to) # nuclio: end-codeTest> This test uses the metrics data, created by the [Generator function](https://github.com/mlrun/demo-network-operations/blob/master/notebooks/generator.ipynb) from MLRun's [Network Operations Demo](https://github.com/mlrun/demo-network-operations) To test it yourself, please generate this dataset or use any of your available csv/parquet datasets.from mlrun import code_to_function, mount_v3io, NewTask, mlconf, run_local mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080' metrics_path = '/User/v3io/bigdata/netops_metrics_parquet/20200329T133835-20200329T143835.parquet' metrics = pd.read_parquet('/User/v3io/bigdata/netops_metrics_parquet/20200329T133835-20200329T143835.parquet')Local TestDefine the aggregate test taskaggregate_task = NewTask(name='aggregate', project='network-operations', params={'metrics': ['cpu_utilization'], 'labels': ['is_error'], 'metric_aggs': ['mean', 'sum'], 'label_aggs': ['max'], 'suffix': 'daily', 'inplace': False, 'window': 5, 'center': True, 'save_to': 'aggregate.pq'}, inputs={'df_artifact': metrics_path}, handler=aggregate) aggregate_run = run_local(aggregate_task)[mlrun] 2020-05-04 14:13:43,871 artifact path is not defined or is local, artifacts will not be visible in the UI [mlrun] 2020-05-04 14:13:43,958 starting run aggregate uid=332bd4f750584bc8a5f08f96e8d048b5 -> http://10.194.95.255:8080 [mlrun] 2020-05-04 14:13:44,119 Aggregating /User/v3io/bigdata/netops_metrics_parquet/20200329T133835-20200329T143835.parquet [mlrun] 2020-05-04 14:13:44,633 log artifact aggregate at aggregate.pq, size: 281983, db: YTest on cluster Convert the code to an MLRun functionfn = code_to_function('aggregate', handler='aggregate') fn.spec.description = "Rolling aggregation over Metrics and Lables according to specifications" fn.metadata.categories = ["data-prep"] fn.metadata.labels = {'author': 'orz'} fn.export('function.yaml') aggregate_run = fn.apply(mount_v3io(remote='bigdata', mount_path='/User/v3io/bigdata')).run(aggregate_task)[mlrun] 2020-05-04 14:14:01,425 artifact path is not defined or is local, artifacts will not be visible in the UI [mlrun] 2020-05-04 14:14:01,459 starting run aggregate uid=85dbdb7a453845ad9530b2d3e076229f -> http://10.194.95.255:8080 [mlrun] 2020-05-04 14:14:02,141 Job is running in the background, pod: aggregate-tnhbl [mlrun] 2020-05-04 14:19:06,359 artifact path is not defined or is local, artifacts will not be visible in the UI [mlrun] 2020-05-04 14:19:06,544 Aggregating /User/v3io/bigdata/netops_metrics_parquet/20200329T133835-20200329T143835.parquet [mlrun] 2020-05-04 14:19:07,271 log artifact aggregate at aggregate.pq, size: 281973, db: Y [mlrun] 2020-05-04 14:19:08,625 run executed, status=completed final state: succeededShow resultspd.read_parquet(aggregate_run.artifact('aggregate')['target_path'])Unzip datasets and prepare data:import os import seaborn as sns from imblearn.metrics import geometric_mean_score from sklearn.model_selection import cross_val_score, ShuffleSplit from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from multi_imbalance.datasets import load_datasets from multi_imbalance.ensemble.soup_bagging import SOUPBagging from multi_imbalance.utils.data import load_arff_dataset from multi_imbalance.utils.min_int_maj import maj_int_min %matplotlib inline sns.set_style('darkgrid') dataset = load_datasets()['new_ecoli'] X, y = dataset.data, dataset.target print(X[:5]) print(y[:5]) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25) clf = KNeighborsClassifier() vote_classifier = SOUPBagging(clf, n_classifiers=50, maj_int_min=maj_int_min['new_ecoli']) vote_classifier.fit(X_train, y_train) y_pred = vote_classifier.predict(X_test) geometric_mean_score(y_test, y_pred, correction=0.001) X, y = load_arff_dataset(f'{os.getcwd()}/../../data/arff/new_ecoli.arff') clf = make_pipeline(StandardScaler(), SOUPBagging()) cv = ShuffleSplit(n_splits=5, test_size=0.3, random_state=0) print(cross_val_score(clf, X, y, cv=cv))/home/plutasnyy/anaconda3/envs/multi-imbalance/lib/python3.7/site-packages/sklearn/base.py:197: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None. FutureWarning) /home/plutasnyy/anaconda3/envs/multi-imbalance/lib/python3.7/site-packages/sklearn/base.py:197: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None. FutureWarning) /home/plutasnyy/anaconda3/envs/multi-imbalance/lib/python3.7/site-packages/sklearn/base.py:197: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None. FutureWarning) /home/plutasnyy/anaconda3/envs/multi-imbalance/lib/python3.7/site-packages/sklearn/base.py:197: FutureWarning: From version 0.24, get_params will raise an At[...]GUI with Classifiers integrated You may need to install the following packages# Code for installing the packages !pip install PySimpleGUI !pip install pyperclip !pip install chessLooking in indexes: https://pypi.douban.com/simple Requirement already satisfied: PySimpleGUI in g:\anaconda3\lib\site-packages (4.30.0) Looking in indexes: https://pypi.douban.com/simple Requirement already satisfied: pyperclip in g:\anaconda3\lib\site-packages (1.8.1) Looking in indexes: https://pypi.douban.com/simple Requirement already satisfied: chess in g:\anaconda3\lib\site-packages (1.3.0)One-Click executable of the GUI Note: in the menubar, select "mode" -> "run" to start play You can change the theme by using the "Settings for Assignment COMP5318"# %load python_easy_chess_gui.py #!/usr/bin/env python3 """ python_easy_chess_gui.py Requirements: Python 3.7.3 and up PySimpleGUI Square Mapping board = [ 56, 57, ... 63 ... 8, 9, ... 0, 1, 2, ... ] row = [ 0, 0, ... 1, 1, ... ... 7, 7 ... ] col = [ 0, 1, 2, ... 7 0, 1, 2, ... ... 0, 1, 2, ... 7 ] Python-Chess Square Mapping board is the same as in PySimpleGUI row is reversed col is the same as in PySimpleGUI """ import PySimpleGUI as sg import os import sys import subprocess import threading from pathlib import Path, PurePath # Python 3.4 and up import queue import copy import time from datetime import datetime import json import pyperclip import chess import chess.pgn import chess.engine import chess.polyglot import logging log_format = '%(asctime)s :: %(funcName)s :: line: %(lineno)d :: %(' \ 'levelname)s :: %(message)s' logging.basicConfig(filename='pecg_log.txt', filemode='w', level=logging.DEBUG, format=log_format) APP_NAME = 'Python Easy Chess GUI' APP_VERSION = 'v1.11' BOX_TITLE = '{} {}'.format(APP_NAME, APP_VERSION) platform = sys.platform ico_path = {'win32': {'pecg': 'Icon/pecg.ico', 'enemy': 'Icon/enemy.ico', 'adviser': 'Icon/adviser.ico'}, 'linux': {'pecg': 'Icon/pecg.png', 'enemy': 'Icon/enemy.png', 'adviser': 'Icon/adviser.png'}, 'darwin': {'pecg': 'Icon/pecg.png', 'enemy': 'Icon/enemy.png', 'adviser': 'Icon/adviser.png'}} MIN_DEPTH = 1 MAX_DEPTH = 1000 MANAGED_UCI_OPTIONS = ['ponder', 'uci_chess960', 'multipv', 'uci_analysemode', 'ownbook'] GUI_THEME = ['Green', 'GreenTan', 'LightGreen', 'BluePurple', 'Purple', 'BlueMono', 'GreenMono', 'BrownBlue', 'BrightColors', 'NeutralBlue', 'Kayak', 'SandyBeach', 'TealMono', 'Topanga', 'Dark', 'Black', 'DarkAmber'] # import our classifiers sys.path.insert(0,'../') try: import Classifiers import BoardHelper from PIL import ImageGrab, ImageOps import numpy as np # this is to fix high-resolution monitor problem so that the # screen capture function could work. # ref:https://github.com/PySimpleGUI/PySimpleGUI/issues/2962 import ctypes ctypes.windll.user32.SetProcessDPIAware() except: print("import failed. Check your path.") saved_model_path = "../saved_model/" abc_model_file = saved_model_path + "abc_dump.pkl" svc_model_file = saved_model_path + "svc_dump.pkl" # construct the CNN classifier, and read weights.n cnn = Classifiers.CNNClassifier() cnn.LoadMostRecentModelFromDirectory("../CNN_training_checkpoint/") svc = Classifiers.SVCClassifier() svc.LoadModel(svc_model_file) abc = Classifiers.ABClassifier() abc.LoadModel(abc_model_file) def get_grid(window, elem): widget = elem.Widget box = (widget.winfo_rootx(), widget.winfo_rooty(), widget.winfo_rootx() + widget.winfo_width(), widget.winfo_rooty() + widget.winfo_height()) grab = np.asarray(ImageGrab.grab(bbox=box)) return grab def get_whole_board(window, gray = False): elem = window.FindElement(key=(0, 0)) widget = elem.Widget box = (widget.winfo_rootx(), widget.winfo_rooty(), widget.winfo_rootx() + widget.winfo_width() * 8, widget.winfo_rooty() + widget.winfo_height() * 8) pil_img = ImageGrab.grab(bbox=box) if gray: pil_img = ImageOps.grayscale(pil_img) board_image = np.asarray(pil_img) # ImageGrab.grab(bbox=box).save("board.png") return board_image def save_grid_as_file(window, elem, file_name): os.makedirs(os.path.dirname(file_name), exist_ok = True) grid = get_grid(window, elem) PIL.Image.fromarray(grid).save(file_name) def ClassifyBoard(window, classifier, window_element_label): # use classifier to process the image if isinstance(classifier, Classifiers.SVCClassifier) or isinstance(classifier, Classifiers.ABClassifier): board_image = get_whole_board(window, True) else: board_image = get_whole_board(window, False) predicted = classifier.Predict(board_image) labels = np.array(predicted).reshape(8, 8) # update user interface #print(labels) textbox = window.FindElement(window_element_label) textbox.Update(type(classifier).__name__ + "\n" + np.array2string(labels)) PIECE_THEME = [ str(i + 1) for i in range(32) ] + [ "default" ] IMAGE_PATH = 'Images/60_scaled' # path to the chess pieces PIECE_IMAGE_PATH = "../chess-generator/ChessGenerator/pieces/" BLANK = 0 # piece names PAWNB = 1 KNIGHTB = 2 BISHOPB = 3 ROOKB = 4 KINGB = 5 QUEENB = 6 PAWNW = 7 KNIGHTW = 8 BISHOPW = 9 ROOKW = 10 KINGW = 11 QUEENW = 12 # Absolute rank based on real chess board, white at bottom, black at the top. # This is also the rank mapping used by python-chess modules. RANK_8 = 7 RANK_7 = 6 RANK_6 = 5 RANK_5 = 4 RANK_4 = 3 RANK_3 = 2 RANK_2 = 1 RANK_1 = 0 initial_board = [[ROOKB, KNIGHTB, BISHOPB, QUEENB, KINGB, BISHOPB, KNIGHTB, ROOKB], [PAWNB, ] * 8, [BLANK, ] * 8, [BLANK, ] * 8, [BLANK, ] * 8, [BLANK, ] * 8, [PAWNW, ] * 8, [ROOKW, KNIGHTW, BISHOPW, QUEENW, KINGW, BISHOPW, KNIGHTW, ROOKW]] white_init_promote_board = [[QUEENW, ROOKW, BISHOPW, KNIGHTW]] black_init_promote_board = [[QUEENB, ROOKB, BISHOPB, KNIGHTB]] HELP_MSG = """(A) To play a game You should be in Play mode. 1. Mode->Play 2. Make move on the board (B) To play as black You should be in Neutral mode 1. Board->Flip 2. Mode->Play 3. Engine->Go If you are already in Play mode, go back to Neutral mode via Mode->Neutral (C) To flip board You should be in Neutral mode 1. Board->Flip (D) To paste FEN You should be in Play mode 1. Mode->Play 2. FEN->Paste (E) To show engine search info after the move 1. Right-click on the Opponent Search Info and press Show (F) To Show book 1 and 2 1. Right-click on Book 1 or 2 press Show """ # Images/60 blank = os.path.join(IMAGE_PATH, 'blank.png') bishopB = os.path.join(IMAGE_PATH, 'bB.png') bishopW = os.path.join(IMAGE_PATH, 'wB.png') pawnB = os.path.join(IMAGE_PATH, 'bP.png') pawnW = os.path.join(IMAGE_PATH, 'wP.png') knightB = os.path.join(IMAGE_PATH, 'bN.png') knightW = os.path.join(IMAGE_PATH, 'wN.png') rookB = os.path.join(IMAGE_PATH, 'bR.png') rookW = os.path.join(IMAGE_PATH, 'wR.png') queenB = os.path.join(IMAGE_PATH, 'bQ.png') queenW = os.path.join(IMAGE_PATH, 'wQ.png') kingB = os.path.join(IMAGE_PATH, 'bK.png') kingW = os.path.join(IMAGE_PATH, 'wK.png') #images = {BISHOPB: bishopB, BISHOPW: bishopW, PAWNB: pawnB, PAWNW: pawnW, # KNIGHTB: knightB, KNIGHTW: knightW, # ROOKB: rookB, ROOKW: rookW, KINGB: kingB, KINGW: kingW, # QUEENB: queenB, QUEENW: queenW, BLANK: blank} # default theme of EasyChessGui theme_default = {BISHOPB: bishopB, BISHOPW: bishopW, PAWNB: pawnB, PAWNW: pawnW, KNIGHTB: knightB, KNIGHTW: knightW, ROOKB: rookB, ROOKW: rookW, KINGB: kingB, KINGW: kingW, QUEENB: queenB, QUEENW: queenW, BLANK: blank} # themes of our dataset dataset_themes = {} for i in range(32): display_name = str(i+1) dataset_themes.setdefault(display_name, {}) theme_dir = PIECE_IMAGE_PATH + "/" + str(i+1) + "/" dataset_themes[display_name] = { \ BLANK : os.path.join(blank), # use the blank from default BISHOPB : os.path.join(theme_dir, 'b_b.png'), BISHOPW : os.path.join(theme_dir, 'b_w.png'), PAWNB : os.path.join(theme_dir, 'p_b.png'), PAWNW : os.path.join(theme_dir, 'p_w.png'), KNIGHTB : os.path.join(theme_dir, 'n_b.png'), KNIGHTW : os.path.join(theme_dir, 'n_w.png'), ROOKB : os.path.join(theme_dir, 'r_b.png'), ROOKW : os.path.join(theme_dir, 'r_w.png'), QUEENB : os.path.join(theme_dir, 'q_b.png'), QUEENW : os.path.join(theme_dir, 'q_w.png'), KINGB : os.path.join(theme_dir, 'k_b.png'), KINGW : os.path.join(theme_dir, 'k_w.png') } dataset_themes["default"] = theme_default # images = dataset_themes["default"] # Promote piece from psg (pysimplegui) to pyc (python-chess) promote_psg_to_pyc = {KNIGHTB: chess.KNIGHT, BISHOPB: chess.BISHOP, ROOKB: chess.ROOK, QUEENB: chess.QUEEN, KNIGHTW: chess.KNIGHT, BISHOPW: chess.BISHOP, ROOKW: chess.ROOK, QUEENW: chess.QUEEN,} INIT_PGN_TAG = { 'Event': 'Human vs computer', 'White': 'Human', 'Black': 'Computer', } # (1) Mode: Neutral menu_def_neutral = [ ['&Mode', ['Play']], ['Boar&d', [ 'Flip', 'Color', ['Brown::board_color_k', 'Blue::board_color_k', 'Green::board_color_k', 'Gray::board_color_k'], 'Theme', GUI_THEME, ] ], ["Settings for &Assignment COMP5318", [ 'Piece Theme', PIECE_THEME ] ], ['&Engine', ['Set Engine Adviser', 'Set Engine Opponent', 'Set Depth', 'Manage', ['Install', 'Edit', 'Delete']]], ['&Time', ['User::tc_k', 'Engine::tc_k']], ['&Book', ['Set Book::book_set_k']], ['&User', ['Set Name::user_name_k']], ['Tools', ['PGN', ['Delete Player::delete_player_k']]], ['&Settings', ['Game::settings_game_k']], ['&Help', ['About']], ] # (2) Mode: Play, info: hide menu_def_play = [ ['&Mode', ['Neutral']], ["Settings for &Assignment COMP5318", [ 'Piece Theme', PIECE_THEME ] ], ['&Game', ['&New::new_game_k', 'Save to My Games::save_game_k', 'Save to White Repertoire', 'Save to Black Repertoire', 'Resign::resign_game_k', 'User Wins::user_wins_k', 'User Draws::user_draws_k']], ['FEN', ['Paste']], ['&Engine', ['Go', 'Move Now']], ['&Help', ['About']], ] class Timer: def __init__(self, tc_type='fischer', base=300000, inc=10000, period_moves=40): """ :param tc_type: time control type ['fischer, delay, classical'] :param base: base time in ms :param inc: increment time in ms can be negative and 0 :param period_moves: number of moves in a period """ self.tc_type = tc_type # ['fischer', 'delay', 'timepermove'] self.base = base self.inc = inc self.period_moves = period_moves self.elapse = 0 self.init_base_time = self.base def update_base(self): """ Update base time after every move :return: """ if self.tc_type == 'delay': self.base += min(0, self.inc - self.elapse) elif self.tc_type == 'fischer': self.base += self.inc - self.elapse elif self.tc_type == 'timepermove': self.base = self.init_base_time else: self.base -= self.elapse self.base = max(0, self.base) self.elapse = 0 class GuiBook: def __init__(self, book_file, board, is_random=True): """ Handle gui polyglot book for engine opponent. :param book_file: polgylot book filename :param board: given board position :param is_random: randomly select move from book """ self.book_file = book_file self.board = board self.is_random = is_random self.__book_move = None def get_book_move(self): """ Returns book move either random or best move """ reader = chess.polyglot.open_reader(self.book_file) try: if self.is_random: entry = reader.weighted_choice(self.board) else: entry = reader.find(self.board) self.__book_move = entry.move except IndexError: logging.warning('No more book move.') except Exception: logging.exception('Failed to get book move.') finally: reader.close() return self.__book_move def get_all_moves(self): """ Read polyglot book and get all legal moves from a given positions. :return: move string """ is_found = False total_score = 0 book_data = {} cnt = 0 if os.path.isfile(self.book_file): moves = '{:4s} {:<5s} {}\n'.format('move', 'score', 'weight') with chess.polyglot.open_reader(self.book_file) as reader: for entry in reader.find_all(self.board): is_found = True san_move = self.board.san(entry.move) score = entry.weight total_score += score bd = {cnt: {'move': san_move, 'score': score}} book_data.update(bd) cnt += 1 else: moves = '{:4s} {:<}\n'.format('move', 'score') # Get weight for each move if is_found: for _, v in book_data.items(): move = v['move'] score = v['score'] weight = score/total_score moves += '{:4s} {:<5d} {:<2.1f}%\n'.format(move, score, 100*weight) return moves, is_found class RunEngine(threading.Thread): pv_length = 9 move_delay_sec = 3.0 def __init__(self, eng_queue, engine_config_file, engine_path_and_file, engine_id_name, max_depth=MAX_DEPTH, base_ms=300000, inc_ms=1000, tc_type='fischer', period_moves=0, is_stream_search_info=True): """ Run engine as opponent or as adviser. :param eng_queue: :param engine_config_file: pecg_engines.json :param engine_path_and_file: :param engine_id_name: :param max_depth: """ threading.Thread.__init__(self) self._kill = threading.Event() self.engine_config_file = engine_config_file self.engine_path_and_file = engine_path_and_file self.engine_id_name = engine_id_name self.own_book = False self.bm = None self.pv = None self.score = None self.depth = None self.time = None self.nps = 0 self.max_depth = max_depth self.eng_queue = eng_queue self.engine = None self.board = None self.analysis = is_stream_search_info self.is_nomove_number_in_variation = True self.base_ms = base_ms self.inc_ms = inc_ms self.tc_type = tc_type self.period_moves = period_moves self.is_ownbook = False self.is_move_delay = True def stop(self): """ Interrupt engine search """ self._kill.set() def get_board(self, board): """ Get the current board position """ self.board = board def configure_engine(self): """ Read the engine config file pecg_engines.json and set the engine to use the user_value of the value key. Our option name has 2 values, default_value and user_value. Example for hash option 'name': Hash 'default': default_value 'value': user_value If default_value and user_value are not the same, we will set the engine to use the user_value by the command, setoption name Hash value user_value However if default_value and user_value are the same, we will not send commands to set the option value because the value is default already. """ with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: if p['name'] == self.engine_id_name: for n in p['options']: if n['name'].lower() == 'ownbook': self.is_ownbook = True # Ignore button type for a moment. if n['type'] == 'button': continue if n['type'] == 'spin': user_value = int(n['value']) default_value = int(n['default']) else: user_value = n['value'] default_value = n['default'] if user_value != default_value: try: self.engine.configure({n['name']: user_value}) logging.info('Set {} to {}'.format( n['name'], user_value)) except Exception: logging.exception('{Failed to configure ' 'engine}') def run(self): """ Run engine to get search info and bestmove. If there is error we still send bestmove None. :return: bestmove thru que """ folder = Path(self.engine_path_and_file) folder = folder.parents[0] try: if platform == 'win32': self.engine = chess.engine.SimpleEngine.popen_uci( self.engine_path_and_file) else: self.engine = chess.engine.SimpleEngine.popen_uci( self.engine_path_and_file, cwd=folder) except chess.engine.EngineTerminatedError: logging.warning('Failed to start {}.'.format(self.engine_path_and_file)) self.eng_queue.put('bestmove {}'.format(self.bm)) return except Exception: logging.exception('Failed to start {}.'.format( self.engine_path_and_file)) self.eng_queue.put('bestmove {}'.format(self.bm)) return # Set engine option values try: self.configure_engine() except Exception: logging.exception('Failed to configure engine.') # Set search limits if self.tc_type == 'delay': limit = chess.engine.Limit( depth=self.max_depth if self.max_depth != MAX_DEPTH else None, white_clock=self.base_ms/1000, black_clock=self.base_ms/1000, white_inc=self.inc_ms/1000, black_inc=self.inc_ms/1000) elif self.tc_type == 'timepermove': limit = chess.engine.Limit(time=self.base_ms/1000, depth=self.max_depth if self.max_depth != MAX_DEPTH else None) else: limit = chess.engine.Limit( depth=self.max_depth if self.max_depth != MAX_DEPTH else None, white_clock=self.base_ms/1000, black_clock=self.base_ms/1000, white_inc=self.inc_ms/1000, black_inc=self.inc_ms/1000) start_time = time.perf_counter() if self.analysis: is_time_check = False with self.engine.analysis(self.board, limit) as analysis: for info in analysis: if self._kill.wait(0.1): break try: if 'depth' in info: self.depth = int(info['depth']) if 'score' in info: self.score = int(info['score'].relative.score( mate_score=32000))/100 self.time = info['time'] if 'time' in info \ else time.perf_counter() - start_time if 'pv' in info and not ('upperbound' in info or 'lowerbound' in info): self.pv = info['pv'][0:self.pv_length] if self.is_nomove_number_in_variation: spv = self.short_variation_san() self.pv = spv else: self.pv = self.board.variation_san(self.pv) self.eng_queue.put('{} pv'.format(self.pv)) self.bm = info['pv'][0] # score, depth, time, pv if self.score is not None and \ self.pv is not None and self.depth is not None: info_to_send = '{:+5.2f} | {} | {:0.1f}s | {} info_all'.format( self.score, self.depth, self.time, self.pv) self.eng_queue.put('{}'.format(info_to_send)) # Send stop if movetime is exceeded if not is_time_check and self.tc_type != 'fischer' \ and self.tc_type != 'delay' and \ time.perf_counter() - start_time >= \ self.base_ms/1000: logging.info('Max time limit is reached.') is_time_check = True break # Send stop if max depth is exceeded if 'depth' in info: if int(info['depth']) >= self.max_depth \ and self.max_depth != MAX_DEPTH: logging.info('Max depth limit is reached.') break except Exception: logging.exception('Failed to parse search info.') else: result = self.engine.play(self.board, limit,info=chess.engine.INFO_ALL) logging.info('result: {}'.format(result)) try: self.depth = result.info['depth'] except KeyError: self.depth = 1 logging.exception('depth is missing.') try: self.score = int(result.info['score'].relative.score( mate_score=32000)) / 100 except KeyError: self.score = 0 logging.exception('score is missing.') try: self.time = result.info['time'] if 'time' in result.info \ else time.perf_counter() - start_time except KeyError: self.time = 0 logging.exception('time is missing.') try: if 'pv' in result.info: self.pv = result.info['pv'][0:self.pv_length] if self.is_nomove_number_in_variation: spv = self.short_variation_san() self.pv = spv else: self.pv = self.board.variation_san(self.pv) except Exception: self.pv = None logging.exception('pv is missing.') if self.pv is not None: info_to_send = '{:+5.2f} | {} | {:0.1f}s | {} info_all'.format( self.score, self.depth, self.time, self.pv) self.eng_queue.put('{}'.format(info_to_send)) self.bm = result.move # Apply engine move delay if movetime is small if self.is_move_delay: while True: if time.perf_counter() - start_time >= self.move_delay_sec: break logging.info('Delay sending of best move {}'.format(self.bm)) time.sleep(1.0) # If bm is None, we will use engine.play() if self.bm is None: logging.info('bm is none, we will try engine,play().') try: result = self.engine.play(self.board, limit) self.bm = result.move except Exception: logging.exception('Failed to get engine bestmove.') self.eng_queue.put('bestmove {}' .format(self.bm)) logging.info('bestmove {}'.format(self.bm)) def quit_engine(self): """ Quit engine """ logging.info('quit engine') try: self.engine.quit() except AttributeError: logging.info('AttributeError, self.engine is already None') except Exception: logging.exception('Failed to quit engine.') def short_variation_san(self): """ Returns variation in san but without move numbers """ if self.pv is None: return None short_san_pv = [] tmp_board = self.board.copy() for pc_move in self.pv: san_move = tmp_board.san(pc_move) short_san_pv.append(san_move) tmp_board.push(pc_move) return ' '.join(short_san_pv) class EasyChessGui: queue = queue.Queue() is_user_white = True # White is at the bottom in board layout def __init__(self, theme, engine_config_file, user_config_file, gui_book_file, computer_book_file, human_book_file, is_use_gui_book, is_random_book, max_book_ply, max_depth=MAX_DEPTH): self.theme = theme self.user_config_file = user_config_file self.engine_config_file = engine_config_file self.gui_book_file = gui_book_file self.computer_book_file = computer_book_file self.human_book_file = human_book_file self.max_depth = max_depth self.is_use_gui_book = is_use_gui_book self.is_random_book = is_random_book self.max_book_ply = max_book_ply self.opp_path_and_file = None self.opp_file = None self.opp_id_name = None self.adviser_file = None self.adviser_path_and_file = None self.adviser_id_name = None self.adviser_hash = 128 self.adviser_threads = 1 self.adviser_movetime_sec = 10 self.pecg_auto_save_game = 'pecg_auto_save_games.pgn' self.my_games = 'pecg_my_games.pgn' self.repertoire_file = {'white': 'pecg_white_repertoire.pgn', 'black': 'pecg_black_repertoire.pgn'} self.init_game() self.fen = None self.psg_board = None self.menu_elem = None self.engine_id_name_list = [] self.engine_file_list = [] self.username = 'Human' self.human_base_time_ms = 5 * 60 * 1000 # 5 minutes self.human_inc_time_ms = 10 * 1000 # 10 seconds self.human_period_moves = 0 self.human_tc_type = 'fischer' self.engine_base_time_ms = 3 * 60 * 1000 # 5 minutes self.engine_inc_time_ms = 2 * 1000 # 10 seconds self.engine_period_moves = 0 self.engine_tc_type = 'fischer' # Default board color is brown self.sq_light_color = '#F0D9B5' self.sq_dark_color = '#B58863' # Move highlight, for brown board self.move_sq_light_color = '#E8E18E' self.move_sq_dark_color = '#B8AF4E' self.gui_theme = 'Reddit' self.images = dataset_themes["default"] self.is_save_time_left = False self.is_save_user_comment = True def update_game(self, mc, user_move, time_left, user_comment): """ Used for saving moves in the game. :param mc: move count :param user_move: :param time_left: :param user_comment: Can be a 'book' from the engine :return: """ # Save user comment if self.is_save_user_comment: # If comment is empty if not (user_comment and user_comment.strip()): if mc == 1: self.node = self.game.add_variation(user_move) else: self.node = self.node.add_variation(user_move) # Save clock (time left after a move) as move comment if self.is_save_time_left: rem_time = self.get_time_h_mm_ss(time_left, False) self.node.comment = '[%clk {}]'.format(rem_time) else: if mc == 1: self.node = self.game.add_variation(user_move) else: self.node = self.node.add_variation(user_move) # Save clock, add clock as comment after a move if self.is_save_time_left: rem_time = self.get_time_h_mm_ss(time_left, False) self.node.comment = '[%clk {}] {}'.format(rem_time, user_comment) else: self.node.comment = user_comment # Do not save user comment else: if mc == 1: self.node = self.game.add_variation(user_move) else: self.node = self.node.add_variation(user_move) # Save clock, add clock as comment after a move if self.is_save_time_left: rem_time = self.get_time_h_mm_ss(time_left, False) self.node.comment = '[%clk {}]'.format(rem_time) def create_new_window(self, window, flip=False): """ Close the window param just before turning the new window """ loc = window.CurrentLocation() window.Disable() if flip: self.is_user_white = not self.is_user_white layout = self.build_main_layout(self.is_user_white) w = sg.Window('{} {}'.format(APP_NAME, APP_VERSION), layout, default_button_element_size=(12, 1), auto_size_buttons=False, location=(loc[0], loc[1]), icon=ico_path[platform]['pecg']) # Initialize White and black boxes while True: button, value = w.Read(timeout=50) self.update_labels_and_game_tags(w, human=self.username) break window.Close() return w def delete_player(self, name, pgn, que): """ Delete games of player name in pgn. :param name: :param pgn: :param que: :return: """ logging.info(f'Enters delete_player()') pgn_path = Path(pgn) folder_path = pgn_path.parents[0] file = PurePath(pgn) pgn_file = file.name # Create backup of orig backup = pgn_file + '.backup' backup_path = Path(folder_path, backup) backup_path.touch() origfile_text = Path(pgn).read_text() backup_path.write_text(origfile_text) logging.info(f'backup copy {backup_path} is successfully created.') # Define output file output = 'out_' + pgn_file output_path = Path(folder_path, output) logging.info(f'output {output_path} is successfully created.') logging.info(f'Deleting player {name}.') gcnt = 0 # read pgn and save each game if player name to be deleted is not in # the game, either white or black. with open(output_path, 'a') as f: with open(pgn_path) as h: game = chess.pgn.read_game(h) while game: gcnt += 1 que.put('Delete, {}, processing game {}'.format( name, gcnt)) wp = game.headers['White'] bp = game.headers['Black'] # If this game has no player with name to be deleted if wp != name and bp != name: f.write('{}\n\n'.format(game)) game = chess.pgn.read_game(h) if output_path.exists(): logging.info('Deleting player {} is successful.'.format(name)) # Delete the orig file and rename the current output to orig file pgn_path.unlink() logging.info('Delete orig pgn file') output_path.rename(pgn_path) logging.info('Rename output to orig pgn file') que.put('Done') def get_players(self, pgn, q): logging.info(f'Enters get_players()') players = [] games = 0 with open(pgn) as h: while True: headers = chess.pgn.read_headers(h) if headers is None: break wp = headers['White'] bp = headers['Black'] players.append(wp) players.append(bp) games += 1 p = list(set(players)) ret = [p, games] q.put(ret) def get_engine_id_name(self, path_and_file, q): """ Returns id name of uci engine """ id_name = None folder = Path(path_and_file) folder = folder.parents[0] try: if platform == 'win32': engine = chess.engine.SimpleEngine.popen_uci( path_and_file, cwd=folder, creationflags=subprocess.CREATE_NO_WINDOW) else: engine = chess.engine.SimpleEngine.popen_uci( path_and_file, cwd=folder) id_name = engine.id['name'] engine.quit() except Exception: logging.exception('Failed to get id name.') q.put(['Done', id_name]) def get_engine_hash(self, eng_id_name): """ Returns hash value from engine config file """ eng_hash = None with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: if p['name'] == eng_id_name: # There engines without options try: for n in p['options']: if n['name'].lower() == 'hash': return n['value'] except KeyError: logging.info('This engine {} has no options.'.format( eng_id_name)) break except Exception: logging.exception('Failed to get engine hash.') return eng_hash def get_engine_threads(self, eng_id_name): """ Returns number of threads of eng_id_name from pecg_engines.json. :param eng_id_name: the engine id name :return: number of threads """ eng_threads = None with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: if p['name'] == eng_id_name: try: for n in p['options']: if n['name'].lower() == 'threads': return n['value'] except KeyError: logging.info('This engine {} has no options.'.format( eng_id_name)) break except Exception: logging.exception('Failed to get engine threads.') return eng_threads def get_engine_file(self, eng_id_name): """ Returns eng_id_name's filename and path from pecg_engines.json file. :param eng_id_name: engine id name :return: engine file and its path """ eng_file, eng_path_and_file = None, None with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: if p['name'] == eng_id_name: eng_file = p['command'] eng_path_and_file = Path(p['workingDirectory'], eng_file).as_posix() break return eng_file, eng_path_and_file def get_engine_id_name_list(self): """ Read engine config file. :return: list of engine id names """ eng_id_name_list = [] with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: if p['protocol'] == 'uci': eng_id_name_list.append(p['name']) eng_id_name_list = sorted(eng_id_name_list) return eng_id_name_list def update_user_config_file(self, username): """ Update user config file. If username does not exist, save it. :param username: :return: """ with open(self.user_config_file, 'r') as json_file: data = json.load(json_file) # Add the new entry if it does not exist is_name = False for i in range(len(data)): if data[i]['username'] == username: is_name = True break if not is_name: data.append({'username': username}) # Save with open(self.user_config_file, 'w') as h: json.dump(data, h, indent=4) def check_user_config_file(self): """ Check presence of pecg_user.json file, if nothing we will create one with ['username': 'Human'] :return: """ user_config_file_path = Path(self.user_config_file) if user_config_file_path.exists(): with open(self.user_config_file, 'r') as json_file: data = json.load(json_file) for p in data: username = p['username'] self.username = username else: # Write a new user config file data = [] data.append({'username': 'Human'}) # Save data to pecg_user.json with open(self.user_config_file, 'w') as h: json.dump(data, h, indent=4) def update_engine_to_config_file(self, eng_path_file, new_name, old_name, user_opt): """ Update engine config file based on params. :param eng_path_file: full path of engine :param new_name: new engine id name :param new_name: old engine id name :param user_opt: a list of dict, i.e d = ['a':a, 'b':b, ...] :return: """ folder = Path(eng_path_file) folder = folder.parents[0] folder = Path(folder) folder = folder.as_posix() file = PurePath(eng_path_file) file = file.name with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: command = p['command'] work_dir = p['workingDirectory'] if file == command and folder == work_dir and old_name == p['name']: p['name'] = new_name for k, v in p.items(): if k == 'options': for d in v: # d = {'name': 'Ponder', 'default': False, # 'value': False, 'type': 'check'} default_type = type(d['default']) opt_name = d['name'] opt_value = d['value'] for u in user_opt: # u = {'name': 'CDrill 1400'} for k1, v1 in u.items(): if k1 == opt_name: v1 = int(v1) if default_type == int else v1 if v1 != opt_value: d['value'] = v1 break # Save data to pecg_engines.json with open(self.engine_config_file, 'w') as h: json.dump(data, h, indent=4) def is_name_exists(self, name): """ :param name: The name to check in pecg.engines.json file. :return: """ with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for p in data: jname = p['name'] if jname == name: return True return False def add_engine_to_config_file(self, engine_path_and_file, pname, que): """ Add pname config in pecg_engines.json file. :param engine_path_and_file: :param pname: id name of uci engine :return: """ folder = Path(engine_path_and_file).parents[0] file = PurePath(engine_path_and_file) file = file.name option = [] with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) try: if platform == 'win32': engine = chess.engine.SimpleEngine.popen_uci( engine_path_and_file, cwd=folder, creationflags=subprocess.CREATE_NO_WINDOW) else: engine = chess.engine.SimpleEngine.popen_uci( engine_path_and_file, cwd=folder) except Exception: logging.exception('Failed to add {} in config file.'.format(pname)) que.put('Failure') return try: opt_dict = engine.options.items() except Exception: logging.exception('Failed to get engine options.') que.put('Failure') return engine.quit() for opt in opt_dict: o = opt[1] if o.type == 'spin': # Adjust hash and threads values if o.name.lower() == 'threads': value = 1 logging.info('config {} is set to {}'.format(o.name, value)) elif o.name.lower() == 'hash': value = 32 logging.info('config {} is set to {}'.format(o.name, value)) else: value = o.default option.append({'name': o.name, 'default': o.default, 'value': value, 'type': o.type, 'min': o.min, 'max': o.max}) elif o.type == 'combo': option.append({'name': o.name, 'default': o.default, 'value': o.default, 'type': o.type, 'choices':o.var}) else: option.append({'name': o.name, 'default': o.default, 'value': o.default, 'type': o.type}) # Save engine filename, working dir, name and options wdir = Path(folder).as_posix() protocol = 'uci' # Only uci engine is supported so far self.engine_id_name_list.append(pname) data.append({'command': file, 'workingDirectory': wdir, 'name': pname, 'protocol': protocol, 'options': option}) # Save data to pecg_engines.json with open(self.engine_config_file, 'w') as h: json.dump(data, h, indent=4) que.put('Success') def check_engine_config_file(self): """ Check presence of engine config file pecg_engines.json. If not found we will create it, with entries from engines in Engines folder. :return: """ ec = Path(self.engine_config_file) if ec.exists(): return data = [] cwd = Path.cwd() self.engine_file_list = self.get_engines() for fn in self.engine_file_list: # Run engine and get id name and options option = [] # cwd=current working dir, engines=folder, fn=exe file epath = Path(cwd, 'Engines', fn) engine_path_and_file = str(epath) folder = epath.parents[0] try: if platform == 'win32': engine = chess.engine.SimpleEngine.popen_uci( engine_path_and_file, cwd=folder, creationflags=subprocess.CREATE_NO_WINDOW) else: engine = chess.engine.SimpleEngine.popen_uci( engine_path_and_file, cwd=folder) except Exception: logging.exception(f'Failed to start engine {fn}!') continue engine_id_name = engine.id['name'] opt_dict = engine.options.items() engine.quit() for opt in opt_dict: o = opt[1] if o.type == 'spin': # Adjust hash and threads values if o.name.lower() == 'threads': value = 1 elif o.name.lower() == 'hash': value = 32 else: value = o.default option.append({'name': o.name, 'default': o.default, 'value': value, 'type': o.type, 'min': o.min, 'max': o.max}) elif o.type == 'combo': option.append({'name': o.name, 'default': o.default, 'value': o.default, 'type': o.type, 'choices':o.var}) else: option.append({'name': o.name, 'default': o.default, 'value': o.default, 'type': o.type}) # Save engine filename, working dir, name and options wdir = Path(cwd, 'Engines').as_posix() name = engine_id_name protocol = 'uci' self.engine_id_name_list.append(name) data.append({'command': fn, 'workingDirectory': wdir, 'name': name, 'protocol': protocol, 'options': option}) # Save data to pecg_engines.json with open(self.engine_config_file, 'w') as h: json.dump(data, h, indent=4) def get_time_mm_ss_ms(self, time_ms): """ Returns time in min:sec:millisec given time in millisec """ s, ms = divmod(int(time_ms), 1000) m, s = divmod(s, 60) # return '{:02d}m:{:02d}s:{:03d}ms'.format(m, s, ms) return '{:02d}m:{:02d}s'.format(m, s) def get_time_h_mm_ss(self, time_ms, symbol=True): """ Returns time in h:mm:ss format. :param time_ms: :param symbol: :return: """ s, ms = divmod(int(time_ms), 1000) m, s = divmod(s, 60) h, m = divmod(m, 60) if not symbol: return '{:01d}:{:02d}:{:02d}'.format(h, m, s) return '{:01d}h:{:02d}m:{:02d}s'.format(h, m, s) def update_text_box(self, window, msg, is_hide): """ Update text elements """ best_move = None msg_str = str(msg) if not 'bestmove ' in msg_str: if 'info_all' in msg_str: info_all = ' '.join(msg_str.split()[0:-1]).strip() msg_line = '{}\n'.format(info_all) window.FindElement('search_info_all_k').Update( '' if is_hide else msg_line) else: # Best move can be None because engine dies try: best_move = chess.Move.from_uci(msg.split()[1]) except Exception: logging.exception('Engine sent {}.'.format(best_move)) sg.Popup('Engine error, it sent a {} bestmove.\n'.format( best_move) + 'Back to Neutral mode, it is better to ' 'change engine {}.'.format( self.opp_id_name), icon=ico_path[platform]['pecg'], title=BOX_TITLE) return best_move def get_tag_date(self): """ Return date in pgn tag date format """ return datetime.today().strftime('%Y.%m.%d') def init_game(self): """ Initialize game with initial pgn tag values """ self.game = chess.pgn.Game() self.node = None self.game.headers['Event'] = INIT_PGN_TAG['Event'] self.game.headers['Date'] = self.get_tag_date() self.game.headers['White'] = INIT_PGN_TAG['White'] self.game.headers['Black'] = INIT_PGN_TAG['Black'] def set_new_game(self): """ Initialize new game but save old pgn tag values""" old_event = self.game.headers['Event'] old_white = self.game.headers['White'] old_black = self.game.headers['Black'] # Define a game object for saving game in pgn format self.game = chess.pgn.Game() self.game.headers['Event'] = old_event self.game.headers['Date'] = self.get_tag_date() self.game.headers['White'] = old_white self.game.headers['Black'] = old_black def clear_elements(self, window): """ Clear movelist, score, pv, time, depth and nps boxes """ window.FindElement('search_info_all_k').Update('') window.FindElement('_movelist_').Update(disabled=False) window.FindElement('_movelist_').Update('', disabled=True) window.FindElement('polyglot_book1_k').Update('') window.FindElement('polyglot_book2_k').Update('') window.FindElement('advise_info_k').Update('') #window.FindElement('comment_k').Update('') window.FindElement('cnn_prediction').Update('') window.FindElement('abc_prediction').Update('') window.FindElement('svc_prediction').Update('') window.Element('w_base_time_k').Update('') window.Element('b_base_time_k').Update('') window.Element('w_elapse_k').Update('') window.Element('b_elapse_k').Update('') def update_labels_and_game_tags(self, window, human='Human'): """ Update player names """ engine_id = self.opp_id_name if self.is_user_white: window.FindElement('_White_').Update(human) window.FindElement('_Black_').Update(engine_id) self.game.headers['White'] = human self.game.headers['Black'] = engine_id else: window.FindElement('_White_').Update(engine_id) window.FindElement('_Black_').Update(human) self.game.headers['White'] = engine_id self.game.headers['Black'] = human def get_fen(self): """ Get fen from clipboard """ self.fen = pyperclip.paste() # Remove empty char at the end of FEN if self.fen.endswith(' '): self.fen = self.fen[:-1] def fen_to_psg_board(self, window): """ Update psg_board based on FEN """ psgboard = [] # Get piece locations only to build psg board pc_locations = self.fen.split()[0] board = chess.BaseBoard(pc_locations) old_r = None for s in chess.SQUARES: r = chess.square_rank(s) if old_r is None: piece_r = [] elif old_r != r: psgboard.append(piece_r) piece_r = [] elif s == 63: psgboard.append(piece_r) try: pc = board.piece_at(s^56) except Exception: pc = None logging.exception('Failed to get piece.') if pc is not None: pt = pc.piece_type c = pc.color if c: if pt == chess.PAWN: piece_r.append(PAWNW) elif pt == chess.KNIGHT: piece_r.append(KNIGHTW) elif pt == chess.BISHOP: piece_r.append(BISHOPW) elif pt == chess.ROOK: piece_r.append(ROOKW) elif pt == chess.QUEEN: piece_r.append(QUEENW) elif pt == chess.KING: piece_r.append(KINGW) else: if pt == chess.PAWN: piece_r.append(PAWNB) elif pt == chess.KNIGHT: piece_r.append(KNIGHTB) elif pt == chess.BISHOP: piece_r.append(BISHOPB) elif pt == chess.ROOK: piece_r.append(ROOKB) elif pt == chess.QUEEN: piece_r.append(QUEENB) elif pt == chess.KING: piece_r.append(KINGB) # Else if pc is None or square is empty else: piece_r.append(BLANK) old_r = r self.psg_board = psgboard self.redraw_board(window) def change_square_color(self, window, row, col): """ Change the color of a square based on square row and col. """ btn_sq = window.FindElement(key=(row, col)) is_dark_square = True if (row + col) % 2 else False bd_sq_color = self.move_sq_dark_color if is_dark_square else \ self.move_sq_light_color btn_sq.Update(button_color=('white', bd_sq_color)) def relative_row(self, s, stm): """ The board can be viewed, as white at the bottom and black at the top. If stm is white the row 0 is at the bottom. If stm is black row 0 is at the top. :param s: square :param stm: side to move :return: relative row """ return 7 - self.get_row(s) if stm else self.get_row(s) def get_row(self, s): """ This row is based on PySimpleGUI square mapping that is 0 at the top and 7 at the bottom. In contrast Python-chess square mapping is 0 at the bottom and 7 at the top. chess.square_rank() is a method from Python-chess that returns row given square s. :param s: square :return: row """ return 7 - chess.square_rank(s) def get_col(self, s): """ Returns col given square s """ return chess.square_file(s) def redraw_board(self, window): """ Redraw board at start and afte a move. :param window: :return: """ for i in range(8): for j in range(8): color = self.sq_dark_color if (i + j) % 2 else \ self.sq_light_color piece_image = self.images[self.psg_board[i][j]] elem = window.FindElement(key=(i, j)) elem.Update(button_color=('white', color), image_filename=piece_image, ) #save_grid_as_file(window, elem, "./grids/" + str(i) + "-" + str(j) + ".png") def render_square(self, image, key, location): """ Returns an RButton (Read Button) with image image """ if (location[0] + location[1]) % 2: color = self.sq_dark_color # Dark square else: color = self.sq_light_color return sg.RButton('', image_filename=image, size=(1, 1), border_width=0, button_color=('white', color), pad=(0, 0), key=key) def select_promotion_piece(self, stm): """ Allow user to select a piece type to promote to. :param stm: side to move :return: promoted piece, i.e QUEENW, QUEENB ... """ piece = None board_layout, row = [], [] psg_promote_board = copy.deepcopy(white_init_promote_board) if stm \ else copy.deepcopy(black_init_promote_board) # Loop through board and create buttons with images for i in range(1): for j in range(4): piece_image = self.images[psg_promote_board[i][j]] row.append(self.render_square(piece_image, key=(i, j), location=(i, j))) board_layout.append(row) promo_window = sg.Window('{} {}'.format(APP_NAME, APP_VERSION), board_layout, default_button_element_size=(12, 1), auto_size_buttons=False, icon=ico_path[platform]['pecg']) while True: button, value = promo_window.Read(timeout=0) if button is None: break if type(button) is tuple: move_from = button fr_row, fr_col = move_from piece = psg_promote_board[fr_row][fr_col] logging.info('promote piece: {}'.format(piece)) break promo_window.Close() return piece def update_rook(self, window, move): """ Update rook location for castle move. :param window: :param move: uci move format :return: """ if move == 'e1g1': fr = chess.H1 to = chess.F1 pc = ROOKW elif move == 'e1c1': fr = chess.A1 to = chess.D1 pc = ROOKW elif move == 'e8g8': fr = chess.H8 to = chess.F8 pc = ROOKB elif move == 'e8c8': fr = chess.A8 to = chess.D8 pc = ROOKB self.psg_board[self.get_row(fr)][self.get_col(fr)] = BLANK self.psg_board[self.get_row(to)][self.get_col(to)] = pc self.redraw_board(window) def update_ep(self, window, move, stm): """ Update board for e.p move. :param window: :param move: python-chess format :param stm: side to move :return: """ to = move.to_square if stm: capture_sq = to - 8 else: capture_sq = to + 8 self.psg_board[self.get_row(capture_sq)][self.get_col(capture_sq)] = BLANK self.redraw_board(window) def get_promo_piece(self, move, stm, human): """ Returns promotion piece. :param move: python-chess format :param stm: side to move :param human: if side to move is human this is True :return: promoted piece in python-chess and pythonsimplegui formats """ # If this move is from a user, we will show a window with piece images if human: psg_promo = self.select_promotion_piece(stm) # If user pressed x we set the promo to queen if psg_promo is None: logging.info('User did not select a promotion piece, ' 'set this to queen.') psg_promo = QUEENW if stm else QUEENB pyc_promo = promote_psg_to_pyc[psg_promo] # Else if move is from computer else: pyc_promo = move.promotion # This is from python-chess if stm: if pyc_promo == chess.QUEEN: psg_promo = QUEENW elif pyc_promo == chess.ROOK: psg_promo = ROOKW elif pyc_promo == chess.BISHOP: psg_promo = BISHOPW elif pyc_promo == chess.KNIGHT: psg_promo = KNIGHTW else: if pyc_promo == chess.QUEEN: psg_promo = QUEENB elif pyc_promo == chess.ROOK: psg_promo = ROOKB elif pyc_promo == chess.BISHOP: psg_promo = BISHOPB elif pyc_promo == chess.KNIGHT: psg_promo = KNIGHTB return pyc_promo, psg_promo def set_depth_limit(self): """ Returns max depth based from user setting """ user_depth = sg.PopupGetText( 'Current depth is {}\n\nInput depth [{} to {}]'.format( self.max_depth, MIN_DEPTH, MAX_DEPTH), title=BOX_TITLE, icon=ico_path[platform]['pecg']) try: user_depth = int(user_depth) except Exception: user_depth = self.max_depth logging.exception('Failed to get user depth.') self.max_depth = min(MAX_DEPTH, max(MIN_DEPTH, user_depth)) def define_timer(self, window, name='human'): """ Returns Timer object for either human or engine. """ if name == 'human': timer = Timer(self.human_tc_type, self.human_base_time_ms, self.human_inc_time_ms, self.human_period_moves) else: timer = Timer(self.engine_tc_type, self.engine_base_time_ms, self.engine_inc_time_ms, self.engine_period_moves) elapse_str = self.get_time_h_mm_ss(timer.base) is_white_base = self.is_user_white and name == 'human' or \ not self.is_user_white and name != 'human' window.Element('w_base_time_k' if is_white_base else 'b_base_time_k').Update( elapse_str) return timer def play_game(self, window, engine_id_name, board): """ User can play a game against and engine. :param window: :param engine_id_name: :param board: current board position :return: """ window.FindElement('_movelist_').Update(disabled=False) window.FindElement('_movelist_').Update('', disabled=True) is_human_stm = True if self.is_user_white else False move_state = 0 move_from, move_to = None, None is_new_game, is_exit_game, is_exit_app = False, False, False # Do not play immediately when stm is computer is_engine_ready = True if is_human_stm else False # For saving game move_cnt = 0 is_user_resigns = False is_user_wins = False is_user_draws = False is_search_stop_for_exit = False is_search_stop_for_new_game = False is_search_stop_for_neutral = False is_search_stop_for_resign = False is_search_stop_for_user_wins = False is_search_stop_for_user_draws = False is_hide_book1 = True is_hide_book2 = True is_hide_search_info = True # Init timer human_timer = self.define_timer(window) engine_timer = self.define_timer(window, 'engine') # Game loop while not board.is_game_over(claim_draw=True): moved_piece = None # Mode: Play, Hide book 1 if is_hide_book1: window.Element('polyglot_book1_k').Update('') else: # Load 2 polyglot book files ref_book1 = GuiBook(self.computer_book_file, board, self.is_random_book) all_moves, is_found = ref_book1.get_all_moves() if is_found: window.Element('polyglot_book1_k').Update(all_moves) else: window.Element('polyglot_book1_k').Update('no book moves') # Mode: Play, Hide book 2 if is_hide_book2: window.Element('polyglot_book2_k').Update('') else: ref_book2 = GuiBook(self.human_book_file, board, self.is_random_book) all_moves, is_found = ref_book2.get_all_moves() if is_found: window.Element('polyglot_book2_k').Update(all_moves) else: window.Element('polyglot_book2_k').Update('no book moves') # Mode: Play, Stm: computer (first move), Allow user to change settings. # User can start the engine by Engine->Go. if not is_engine_ready: window.FindElement('_gamestatus_').Update( 'Mode Play, press Engine->Go') while True: button, value = window.Read(timeout=100) # Mode: Play, Stm: computer (first move) if button == 'New::new_game_k': is_new_game = True break # Mode: Play, Stm: Computer first move if button == 'Neutral': is_exit_game = True break if button == 'About': sg.PopupScrolled(HELP_MSG, title=BOX_TITLE) continue if button == 'Paste': try: self.get_fen() self.set_new_game() board = chess.Board(self.fen) except Exception: logging.exception('Error in parsing FEN from clipboard.') continue self.fen_to_psg_board(window) # If user is black and side to move is black if not self.is_user_white and not board.turn: is_human_stm = True window.FindElement('_gamestatus_').Update( 'Mode Play') # Elif user is black and side to move is white elif not self.is_user_white and board.turn: is_human_stm = False window.FindElement('_gamestatus_').Update( 'Mode Play, press Engine->Go') # When computer is to move in the first move, don't # allow the engine to search immediately, wait for the # user to press Engine->Go menu. is_engine_ready = True if is_human_stm else False self.game.headers['FEN'] = self.fen break if button == 'Go': is_engine_ready = True break if button is None: logging.info('Quit app X is pressed.') is_exit_app = True break if is_exit_app or is_exit_game or is_new_game: break # If side to move is human if is_human_stm: move_state = 0 while True: button, value = window.Read(timeout=100) # Update elapse box in m:s format elapse_str = self.get_time_mm_ss_ms(human_timer.elapse) k = 'w_elapse_k' if not self.is_user_white: k = 'b_elapse_k' window.Element(k).Update(elapse_str) human_timer.elapse += 100 if not is_human_stm: break # Mode: Play, Stm: User, Run adviser engine if button == 'Start::right_adviser_k': self.adviser_threads = self.get_engine_threads( self.adviser_id_name) self.adviser_hash = self.get_engine_hash( self.adviser_id_name) adviser_base_ms = self.adviser_movetime_sec * 1000 adviser_inc_ms = 0 search = RunEngine(self.queue, self.engine_config_file, self.adviser_path_and_file, self.adviser_id_name, self.max_depth, adviser_base_ms, adviser_inc_ms, tc_type='timepermove', period_moves=0, is_stream_search_info=True) search.get_board(board) search.daemon = True search.start() while True: button, value = window.Read(timeout=10) if button == 'Stop::right_adviser_k': search.stop() # Exit app while adviser is thinking if button is None: search.stop() is_search_stop_for_exit = True try: msg = self.queue.get_nowait() if 'pv' in msg: # Reformat msg, remove the word pv at the end msg_line = ' '.join(msg.split()[0:-1]) window.Element('advise_info_k').Update(msg_line) except Exception: continue if 'bestmove' in msg: # bestmove can be None so we do try/except try: # Shorten msg line to 3 ply moves msg_line = ' '.join(msg_line.split()[0:3]) msg_line += ' - ' + self.adviser_id_name window.Element('advise_info_k').Update(msg_line) except Exception: logging.exception('Adviser engine error') sg.Popup('Adviser engine {} error.\n'.format( self.adviser_id_name) + \ 'It is better to change this engine.\n' + 'Change to Neutral mode first.', icon=ico_path[platform]['pecg'], title=BOX_TITLE) break search.join() search.quit_engine() break # Mode: Play, Stm: user if button == 'Show::right_search_info_k': is_hide_search_info = False break # Mode: Play, Stm: user if button == 'Hide::right_search_info_k': is_hide_search_info = True window.Element('search_info_all_k').Update('') break # Mode: Play, Stm: user if button == 'Show::right_book1_k': is_hide_book1 = False break # Mode: Play, Stm: user if button == 'Hide::right_book1_k': is_hide_book1 = True break # Mode: Play, Stm: user if button == 'Show::right_book2_k': is_hide_book2 = False break # Mode: Play, Stm: user if button == 'Hide::right_book2_k': is_hide_book2 = True break if button is None: logging.info('Quit app X is pressed.') is_exit_app = True break # redraw when changed theme if button in PIECE_THEME: print("selected piece theme " + button) self.images = dataset_themes[button] self.redraw_board(window) continue if is_search_stop_for_exit: is_exit_app = True break # Mode: Play, Stm: User if button == 'New::new_game_k' or is_search_stop_for_new_game: is_new_game = True self.clear_elements(window) break if button == 'Save to My Games::save_game_k': logging.info('Saving game manually') with open(self.my_games, mode = 'a+') as f: self.game.headers['Event'] = 'My Games' f.write('{}\n\n'.format(self.game)) break # Mode: Play, Stm: user if button == 'Save to White Repertoire': with open(self.repertoire_file['white'], mode = 'a+') as f: self.game.headers['Event'] = 'White Repertoire' f.write('{}\n\n'.format(self.game)) break # Mode: Play, Stm: user if button == 'Save to Black Repertoire': with open(self.repertoire_file['black'], mode = 'a+') as f: self.game.headers['Event'] = 'Black Repertoire' f.write('{}\n\n'.format(self.game)) break # Mode: Play, stm: User if button == 'Resign::resign_game_k' or is_search_stop_for_resign: logging.info('User resigns') # Verify resign reply = sg.Popup('Do you really want to resign?', button_type=sg.POPUP_BUTTONS_YES_NO, title=BOX_TITLE, icon=ico_path[platform]['pecg']) if reply == 'Yes': is_user_resigns = True break else: if is_search_stop_for_resign: is_search_stop_for_resign = False continue # Mode: Play, stm: User if button == 'User Wins::user_wins_k' or is_search_stop_for_user_wins: logging.info('User wins by adjudication') is_user_wins = True break # Mode: Play, stm: User if button == 'User Draws::user_draws_k' or is_search_stop_for_user_draws: logging.info('User draws by adjudication') is_user_draws = True break # Mode: Play, Stm: User if button == 'Neutral' or is_search_stop_for_neutral: is_exit_game = True self.clear_elements(window) break # Mode: Play, stm: User if button == 'About': sg.PopupScrolled(HELP_MSG, title=BOX_TITLE,) break # Mode: Play, stm: User if button == 'Go': if is_human_stm: is_human_stm = False else: is_human_stm = True is_engine_ready = True window.FindElement('_gamestatus_').Update( 'Mode Play, Engine is thinking ...') break # Mode: Play, stm: User if button == 'Paste': # Pasting fen is only allowed before the game starts. if len(self.game.variations): sg.Popup('Press Game->New then paste your fen.', title='Mode Play') continue try: self.get_fen() self.set_new_game() board = chess.Board(self.fen) except Exception: logging.exception('Error in parsing FEN from clipboard.') continue self.fen_to_psg_board(window) is_human_stm = True if board.turn else False is_engine_ready = True if is_human_stm else False window.FindElement('_gamestatus_').Update( 'Mode Play, side: {}'.format( 'white' if board.turn else 'black')) self.game.headers['FEN'] = self.fen break # Mode: Play, stm: User, user starts moving if type(button) is tuple: # If fr_sq button is pressed if move_state == 0: move_from = button fr_row, fr_col = move_from piece = self.psg_board[fr_row][fr_col] # get the move-from piece # Change the color of the "fr" board square self.change_square_color(window, fr_row, fr_col) move_state = 1 moved_piece = board.piece_type_at(chess.square(fr_col, 7-fr_row)) # Pawn=1 # Else if to_sq button is pressed elif move_state == 1: is_promote = False move_to = button to_row, to_col = move_to button_square = window.FindElement(key=(fr_row, fr_col)) # If move is cancelled, pressing same button twice if move_to == move_from: # Restore the color of the pressed board square color = self.sq_dark_color if (to_row + to_col) % 2 else self.sq_light_color # Restore the color of the fr square button_square.Update(button_color=('white', color)) move_state = 0 continue # Create a move in python-chess format based from user input user_move = None # Get the fr_sq and to_sq of the move from user, based from this info # we will create a move based from python-chess format. # Note chess.square() and chess.Move() are from python-chess module fr_row, fr_col = move_from fr_sq = chess.square(fr_col, 7-fr_row) to_sq = chess.square(to_col, 7-to_row) # If user move is a promote if self.relative_row(to_sq, board.turn) == RANK_8 and \ moved_piece == chess.PAWN: is_promote = True pyc_promo, psg_promo = self.get_promo_piece( user_move, board.turn, True) user_move = chess.Move(fr_sq, to_sq, promotion=pyc_promo) else: user_move = chess.Move(fr_sq, to_sq) # Check if user move is legal if user_move in board.legal_moves: # Update rook location if this is a castle move if board.is_castling(user_move): self.update_rook(window, str(user_move)) # Update board if e.p capture elif board.is_en_passant(user_move): self.update_ep(user_move, board.turn) # Empty the board from_square, applied to any types of move self.psg_board[move_from[0]][move_from[1]] = BLANK # Update board to_square if move is a promotion if is_promote: self.psg_board[to_row][to_col] = psg_promo # Update the to_square if not a promote move else: # Place piece in the move to_square self.psg_board[to_row][to_col] = piece self.redraw_board(window) board.push(user_move) move_cnt += 1 # Update clock, reset elapse to zero human_timer.update_base() # Update game, move from human time_left = human_timer.base # we ignored the comments to better format the move list, and our prediction. # user_comment = value['comment_k'] user_comment = "\n" self.update_game(move_cnt, user_move, time_left, user_comment) window.FindElement('_movelist_').Update(disabled=False) window.FindElement('_movelist_').Update('') window.FindElement('_movelist_').Update( self.game.variations[0], append=True, disabled=True) # Clear comment and engine search box #window.FindElement('comment_k').Update('') window.Element('search_info_all_k').Update('') # Change the color of the "fr" and "to" board squares self.change_square_color(window, fr_row, fr_col) self.change_square_color(window, to_row, to_col) # call our classifiers window.refresh() ClassifyBoard(window, cnn, "cnn_prediction") ClassifyBoard(window, abc, "abc_prediction") ClassifyBoard(window, svc, "svc_prediction") is_human_stm = not is_human_stm # Human has done its move k1 = 'w_elapse_k' k2 = 'w_base_time_k' if not self.is_user_white: k1 = 'b_elapse_k' k2 = 'b_base_time_k' # Update elapse box elapse_str = self.get_time_mm_ss_ms( human_timer.elapse) window.Element(k1).Update(elapse_str) # Update remaining time box elapse_str = self.get_time_h_mm_ss( human_timer.base) window.Element(k2).Update(elapse_str) window.Element('advise_info_k').Update('') # Else if move is illegal else: move_state = 0 color = self.sq_dark_color \ if (move_from[0] + move_from[1]) % 2 else self.sq_light_color # Restore the color of the fr square button_square.Update(button_color=('white', color)) continue if is_new_game or is_exit_game or is_exit_app or \ is_user_resigns or is_user_wins or is_user_draws: break # Else if side to move is not human elif not is_human_stm and is_engine_ready: is_promote = False best_move = None is_book_from_gui = True # Mode: Play, stm: Computer, If using gui book if self.is_use_gui_book and move_cnt <= self.max_book_ply: # Verify presence of a book file if os.path.isfile(self.gui_book_file): gui_book = GuiBook(self.gui_book_file, board, self.is_random_book) best_move = gui_book.get_book_move() logging.info('Book move is {}.'.format(best_move)) else: logging.warning('GUI book is missing.') # Mode: Play, stm: Computer, If there is no book move, # let the engine search the best move if best_move is None: search = RunEngine(self.queue, self.engine_config_file, self.opp_path_and_file, self.opp_id_name, self.max_depth, engine_timer.base, engine_timer.inc, tc_type=engine_timer.tc_type, period_moves=board.fullmove_number) search.get_board(board) search.daemon = True search.start() window.FindElement('_gamestatus_').Update( 'Mode Play, Engine is thinking ...') while True: button, value = window.Read(timeout=100) # Update elapse box in m:s format elapse_str = self.get_time_mm_ss_ms(engine_timer.elapse) k = 'b_elapse_k' if not self.is_user_white: k = 'w_elapse_k' window.Element(k).Update(elapse_str) engine_timer.elapse += 100 # Hide/Unhide engine searching info while engine is thinking if button == 'Show::right_search_info_k': is_hide_search_info = False if button == 'Hide::right_search_info_k': is_hide_search_info = True window.Element('search_info_all_k').Update('') # Show book 1 while engine is searching if button == 'Show::right_book1_k': is_hide_book1 = False ref_book1 = GuiBook(self.computer_book_file, board, self.is_random_book) all_moves, is_found = ref_book1.get_all_moves() if is_found: window.Element('polyglot_book1_k').Update(all_moves) else: window.Element('polyglot_book1_k').Update('no book moves') # Hide book 1 while engine is searching if button == 'Hide::right_book1_k': is_hide_book1 = True window.Element('polyglot_book1_k').Update('') # Show book 2 while engine is searching if button == 'Show::right_book2_k': is_hide_book2 = False ref_book2 = GuiBook(self.human_book_file, board, self.is_random_book) all_moves, is_found = ref_book2.get_all_moves() if is_found: window.Element('polyglot_book2_k').Update(all_moves) else: window.Element('polyglot_book2_k').Update('no book moves') # Hide book 2 while engine is searching if button == 'Hide::right_book2_k': is_hide_book2 = True window.Element('polyglot_book2_k').Update('') # Exit app while engine is thinking if button is None: search.stop() is_search_stop_for_exit = True # Forced engine to move now and create a new game if button == 'New::new_game_k': search.stop() is_search_stop_for_new_game = True # Forced engine to move now if button == 'Move Now': search.stop() # Mode: Play, Computer is thinking if button == 'Neutral': search.stop() is_search_stop_for_neutral = True if button == 'Resign::resign_game_k': search.stop() is_search_stop_for_resign = True if button == 'User Wins::user_wins_k': search.stop() is_search_stop_for_user_wins = True if button == 'User Draws::user_draws_k': search.stop() is_search_stop_for_user_draws = True # Get the engine search info and display it in GUI text boxes try: msg = self.queue.get_nowait() except Exception: continue msg_str = str(msg) best_move = self.update_text_box(window, msg, is_hide_search_info) if 'bestmove' in msg_str: logging.info('engine msg: {}'.format(msg_str)) break search.join() search.quit_engine() is_book_from_gui = False # If engine failed to send a legal move if best_move is None: break # Update board with computer move move_str = str(best_move) fr_col = ord(move_str[0]) - ord('a') fr_row = 8 - int(move_str[1]) to_col = ord(move_str[2]) - ord('a') to_row = 8 - int(move_str[3]) piece = self.psg_board[fr_row][fr_col] self.psg_board[fr_row][fr_col] = BLANK # Update rook location if this is a castle move if board.is_castling(best_move): self.update_rook(window, move_str) # Update board if e.p capture elif board.is_en_passant(best_move): self.update_ep(best_move, board.turn) # Update board if move is a promotion elif best_move.promotion is not None: is_promote = True _, psg_promo = self.get_promo_piece(best_move, board.turn, False) # Update board to_square if move is a promotion if is_promote: self.psg_board[to_row][to_col] = psg_promo # Update the to_square if not a promote move else: # Place piece in the move to_square self.psg_board[to_row][to_col] = piece self.redraw_board(window) board.push(best_move) move_cnt += 1 # Update timer engine_timer.update_base() # Update game, move from engine time_left = engine_timer.base if is_book_from_gui: engine_comment = 'book' else: engine_comment = '' self.update_game(move_cnt, best_move, time_left, engine_comment) window.FindElement('_movelist_').Update(disabled=False) window.FindElement('_movelist_').Update('') window.FindElement('_movelist_').Update( self.game.variations[0], append=True, disabled=True) # Change the color of the "fr" and "to" board squares self.change_square_color(window, fr_row, fr_col) self.change_square_color(window, to_row, to_col) # call our classifiers window.refresh() ClassifyBoard(window, cnn, "cnn_prediction") ClassifyBoard(window, abc, "abc_prediction") ClassifyBoard(window, svc, "svc_prediction") is_human_stm = not is_human_stm # Engine has done its move k1 = 'b_elapse_k' k2 = 'b_base_time_k' if not self.is_user_white: k1 = 'w_elapse_k' k2 = 'w_base_time_k' # Update elapse box elapse_str = self.get_time_mm_ss_ms(engine_timer.elapse) window.Element(k1).Update(elapse_str) # Update remaining time box elapse_str = self.get_time_h_mm_ss(engine_timer.base) window.Element(k2).Update(elapse_str) window.FindElement('_gamestatus_').Update('Mode Play') # Auto-save game logging.info('Saving game automatically') if is_user_resigns: self.game.headers['Result'] = '0-1' if self.is_user_white else '1-0' self.game.headers['Termination'] = '{} resigns'.format( 'white' if self.is_user_white else 'black') elif is_user_wins: self.game.headers['Result'] = '1-0' if self.is_user_white else '0-1' self.game.headers['Termination'] = 'Adjudication' elif is_user_draws: self.game.headers['Result'] = '1/2-1/2' self.game.headers['Termination'] = 'Adjudication' else: self.game.headers['Result'] = board.result(claim_draw = True) base_h = int(self.human_base_time_ms / 1000) inc_h = int(self.human_inc_time_ms / 1000) base_e = int(self.engine_base_time_ms / 1000) inc_e = int(self.engine_inc_time_ms / 1000) if self.is_user_white: if self.human_tc_type == 'fischer': self.game.headers['WhiteTimeControl'] = str(base_h) + '+' + \ str(inc_h) elif self.human_tc_type == 'delay': self.game.headers['WhiteTimeControl'] = str(base_h) + '-' + \ str(inc_h) if self.engine_tc_type == 'fischer': self.game.headers['BlackTimeControl'] = str(base_e) + '+' + \ str(inc_e) elif self.engine_tc_type == 'timepermove': self.game.headers['BlackTimeControl'] = str(1) + '/' + str(base_e) else: if self.human_tc_type == 'fischer': self.game.headers['BlackTimeControl'] = str(base_h) + '+' + \ str(inc_h) elif self.human_tc_type == 'delay': self.game.headers['BlackTimeControl'] = str(base_h) + '-' + \ str(inc_h) if self.engine_tc_type == 'fischer': self.game.headers['WhiteTimeControl'] = str(base_e) + '+' + \ str(inc_e) elif self.engine_tc_type == 'timepermove': self.game.headers['WhiteTimeControl'] = str(1) + '/' + str(base_e) self.save_game() if board.is_game_over(claim_draw=True): sg.Popup('Game is over.', title=BOX_TITLE, icon=ico_path[platform]['pecg']) if is_exit_app: window.Close() sys.exit(0) self.clear_elements(window) return False if is_exit_game else is_new_game def save_game(self): """ Save game in append mode """ with open(self.pecg_auto_save_game, mode = 'a+') as f: f.write('{}\n\n'.format(self.game)) def get_engines(self): """ Get engine filenames [a.exe, b.exe, ...] :return: list of engine filenames """ engine_list = [] engine_path = Path('Engines') files = os.listdir(engine_path) for file in files: if not file.endswith('.gz') and not file.endswith('.dll') \ and not file.endswith('.bin') \ and not file.endswith('.dat'): engine_list.append(file) return engine_list def create_board(self, is_user_white=True): """ Returns board layout based on color of user. If user is white, the white pieces will be at the bottom, otherwise at the top. :param is_user_white: user has handling the white pieces :return: board layout """ file_char_name = 'abcdefgh' self.psg_board = copy.deepcopy(initial_board) board_layout = [] if is_user_white: # Save the board with black at the top start = 0 end = 8 step = 1 else: start = 7 end = -1 step = -1 file_char_name = file_char_name[::-1] # Loop through the board and create buttons with images for i in range(start, end, step): # Row numbers at left of board is blank row = [] for j in range(start, end, step): piece_image = self.images[self.psg_board[i][j]] row.append(self.render_square(piece_image, key=(i, j), location=(i, j))) board_layout.append(row) return board_layout def build_main_layout(self, is_user_white=True): """ Creates all elements for the GUI, icluding the board layout. :param is_user_white: if user is white, the white pieces are oriented such that the white pieces are at the bottom. :return: GUI layout """ sg.ChangeLookAndFeel(self.gui_theme) sg.SetOptions(margins=(0, 3), border_width=1) # Define board board_layout = self.create_board(is_user_white) board_controls = [ [sg.Text('Mode Neutral', size=(36, 1), font=('Consolas', 10), key='_gamestatus_')], [sg.Text('White', size=(7, 1), font=('Consolas', 10)), sg.Text('Human', font=('Consolas', 10), key='_White_', size=(24, 1), relief='sunken'), sg.Text('', font=('Consolas', 10), key='w_base_time_k', size=(11, 1), relief='sunken'), sg.Text('', font=('Consolas', 10), key='w_elapse_k', size=(7, 1), relief='sunken') ], [sg.Text('Black', size=(7, 1), font=('Consolas', 10)), sg.Text('Computer', font=('Consolas', 10), key='_Black_', size=(24, 1), relief='sunken'), sg.Text('', font=('Consolas', 10), key='b_base_time_k', size=(11, 1), relief='sunken'), sg.Text('', font=('Consolas', 10), key='b_elapse_k', size=(7, 1), relief='sunken') ], [sg.Text('Adviser', size=(7, 1), font=('Consolas', 10), key='adviser_k', right_click_menu=['Right', ['Start::right_adviser_k', 'Stop::right_adviser_k']]), sg.Text('', font=('Consolas', 10), key='advise_info_k', relief='sunken', size=(46,1))], [sg.Text('Move list', size=(16, 1), font=('Consolas', 10))], [sg.Multiline('', do_not_clear=True, autoscroll=True, size=(52, 8), font=('Consolas', 10), key='_movelist_', disabled=True)], [sg.Text('Comment', size=(7, 1), font=('Consolas', 10))], #[sg.Multiline('', do_not_clear=True, autoscroll=True, size=(52, 8), # font=('Consolas', 10), key='comment_k')], [sg.Multiline('', do_not_clear=True, autoscroll=True, size=(52, 9), font=('Consolas', 10), key='cnn_prediction')], [sg.Multiline('', do_not_clear=True, autoscroll=True, size=(52, 9), font=('Consolas', 10), key='abc_prediction')], [sg.Multiline('', do_not_clear=True, autoscroll=True, size=(52, 9), font=('Consolas', 10), key='svc_prediction')], [sg.Text('BOOK 1, Comp games', size=(26, 1), font=('Consolas', 10), right_click_menu=['Right', ['Show::right_book1_k', 'Hide::right_book1_k']]), sg.Text('BOOK 2, Human games', font=('Consolas', 10), right_click_menu=['Right', ['Show::right_book2_k', 'Hide::right_book2_k']])], [sg.Multiline('', do_not_clear=True, autoscroll=False, size=(23, 4), font=('Consolas', 10), key='polyglot_book1_k', disabled=True), sg.Multiline('', do_not_clear=True, autoscroll=False, size=(25, 4), font=('Consolas', 10), key='polyglot_book2_k', disabled=True)], [sg.Text('Opponent Search Info', font=('Consolas', 10), size=(30, 1), right_click_menu=['Right', ['Show::right_search_info_k', 'Hide::right_search_info_k']])], [sg.Text('', key='search_info_all_k', size=(55, 1), font=('Consolas', 10), relief='sunken')], ] board_tab = [[sg.Column(board_layout)]] self.menu_elem = sg.Menu(menu_def_neutral, tearoff=False) # White board layout, mode: Neutral layout = [ [self.menu_elem], [sg.Column(board_tab), sg.Column(board_controls)] ] return layout def set_default_adviser_engine(self): try: self.adviser_id_name = self.engine_id_name_list[1] \ if len(self.engine_id_name_list) >= 2 \ else self.engine_id_name_list[0] self.adviser_file, self.adviser_path_and_file = \ self.get_engine_file(self.adviser_id_name) except IndexError as e: logging.warning(e) except Exception: logging.exception('Error in getting adviser engine!') def get_default_engine_opponent(self): engine_id_name = None try: engine_id_name = self.opp_id_name = self.engine_id_name_list[0] self.opp_file, self.opp_path_and_file = self.get_engine_file( engine_id_name) except IndexError as e: logging.warning(e) except Exception: logging.exception('Error in getting opponent engine!') return engine_id_name def main_loop(self): """ Build GUI, read user and engine config files and take user inputs. :return: """ engine_id_name = None layout = self.build_main_layout(True) # Use white layout as default window window = sg.Window('{} {}'.format(APP_NAME, APP_VERSION), layout, default_button_element_size=(12, 1), auto_size_buttons=False, icon=ico_path[platform]['pecg']) # Read user config file, if missing create and new one self.check_user_config_file() # If engine config file (pecg_engines.json) is missing, then create it self.check_engine_config_file() self.engine_id_name_list = self.get_engine_id_name_list() # Define default opponent engine, user can change this later. engine_id_name = self.get_default_engine_opponent() # Define default adviser engine, user can change this later. self.set_default_adviser_engine() self.init_game() # Initialize White and black boxes while True: button, value = window.Read(timeout=50) self.update_labels_and_game_tags(window, human=self.username) break # Mode: Neutral, main loop starts here while True: button, value = window.Read(timeout=50) # Mode: Neutral if button is None: logging.info('Quit app from main loop, X is pressed.') break # Mode: Neutral, Delete player if button == 'Delete Player::delete_player_k': win_title = 'Tools/Delete Player' player_list = [] sum_games = 0 layout = [ [sg.Text('PGN', size=(4, 1)), sg.Input(size=(40,1), key='pgn_k'), sg.FileBrowse()], [sg.Button('Display Players', size=(48,1))], [sg.Text('Status:', size=(48, 1), key='status_k', relief='sunken')], [sg.T('Current players in the pgn', size=(43, 1))], [sg.Listbox([], size=(53, 10), key='player_k')], [sg.Button('Delete Player'), sg.Cancel()] ] window.Disable() w = sg.Window(win_title, layout, icon=ico_path[platform]['pecg']) while True: e, v = w.Read(timeout=10) if e is None or e == 'Cancel': break if e == 'Display Players': pgn = v['pgn_k'] if pgn == '': logging.info('Missing pgn file.') sg.Popup('Please locate your pgn file by pressing ' 'the Browse button followed by Display ' 'Players.', title=win_title, icon=ico_path[platform]['pecg']) break t1 = time.perf_counter() que = queue.Queue() t = threading.Thread(target=self.get_players, args=(pgn, que,),daemon=True) t.start() msg = None while True: e1, v1 = w.Read(timeout=100) w.Element('status_k').Update( 'Display Players: processing ...') try: msg = que.get_nowait() elapse = int(time.perf_counter() - t1) w.Element('status_k').Update( 'Players are displayed. Done! in ' + str(elapse) + 's') break except Exception: continue t.join() player_list = msg[0] sum_games = msg[1] w.Element('player_k').Update(sorted(player_list)) if e == 'Delete Player': try: player_name = v['player_k'][0] except IndexError as e: logging.info(e) sg.Popup('Please locate your pgn file by ' 'pressing the Browse button followed by Display Players.', title=win_title, icon=ico_path[platform]['pecg']) break except Exception: logging.exception('Failed to get player.') break t1 = time.perf_counter() que = queue.Queue() t = threading.Thread(target=self.delete_player, args=(player_name, v['pgn_k'], que,), daemon=True) t.start() msg = None while True: e1, v1 = w.Read(timeout=100) w.Element('status_k').Update( 'Status: Delete: processing ...') try: msg = que.get_nowait() if msg == 'Done': elapse = int(time.perf_counter() - t1) w.Element('status_k').Update( player_name + ' was deleted. Done! ' 'in ' + str(elapse) + 's') break else: w.Element('status_k').Update( msg + '/' + str(sum_games)) except Exception: continue t.join() # Update player list in listbox player_list.remove(player_name) w.Element('player_k').Update(sorted(player_list)) w.Close() window.Enable() continue # Mode: Neutral, Set User time control if button == 'User::tc_k': win_title = 'Time/User' layout = [ [sg.T('Base time (minute)', size=(16, 1)), sg.Input(self.human_base_time_ms/60/1000, key='base_time_k', size=(8, 1))], [sg.T('Increment (second)', size=(16, 1)), sg.Input(self.human_inc_time_ms/1000, key='inc_time_k', size=(8, 1))], [sg.T('Period moves', size=(16, 1), visible=False), sg.Input(self.human_period_moves, key='period_moves_k', size=(8, 1), visible=False)], [sg.Radio('Fischer', 'tc_radio', key='fischer_type_k', default=True if self.human_tc_type=='fischer' else False), sg.Radio('Delay', 'tc_radio', key='delay_type_k', default=True if self.human_tc_type == 'delay' else False)], [sg.OK(), sg.Cancel()] ] window.Disable() w = sg.Window(win_title, layout, icon=ico_path[platform]['pecg']) while True: e, v = w.Read(timeout=10) if e is None: break if e == 'Cancel': break if e == 'OK': base_time_ms = int(1000 * 60 * float(v['base_time_k'])) inc_time_ms = int(1000 * float(v['inc_time_k'])) period_moves = int(v['period_moves_k']) tc_type = 'fischer' if v['fischer_type_k']: tc_type = 'fischer' elif v['delay_type_k']: tc_type = 'delay' self.human_base_time_ms = base_time_ms self.human_inc_time_ms = inc_time_ms self.human_period_moves = period_moves self.human_tc_type = tc_type break w.Close() window.Enable() continue # Mode: Neutral, Set engine time control if button == 'Engine::tc_k': win_title = 'Time/Engine' layout = [ [sg.T('Base time (minute)', size=(16, 1)), sg.Input(self.engine_base_time_ms / 60 / 1000, key='base_time_k', size=(8, 1))], [sg.T('Increment (second)', size=(16, 1)), sg.Input(self.engine_inc_time_ms / 1000, key='inc_time_k', size=(8, 1))], [sg.T('Period moves', size=(16, 1), visible=False), sg.Input(self.engine_period_moves, key='period_moves_k', size=(8, 1), visible=False)], [sg.Radio('Fischer', 'tc_radio', key='fischer_type_k', default=True if self.engine_tc_type == 'fischer' else False), sg.Radio('Time Per Move', 'tc_radio', key='timepermove_k', default=True if self.engine_tc_type == 'timepermove' else False, tooltip='Only base time will be used.') ], [sg.OK(), sg.Cancel()] ] window.Disable() w = sg.Window(win_title, layout, icon=ico_path[platform]['pecg']) while True: e, v = w.Read(timeout=10) if e is None: break if e == 'Cancel': break if e == 'OK': base_time_ms = int( 1000 * 60 * float(v['base_time_k'])) inc_time_ms = int(1000 * float(v['inc_time_k'])) period_moves = int(v['period_moves_k']) tc_type = 'fischer' if v['fischer_type_k']: tc_type = 'fischer' elif v['timepermove_k']: tc_type = 'timepermove' self.engine_base_time_ms = base_time_ms self.engine_inc_time_ms = inc_time_ms self.engine_period_moves = period_moves self.engine_tc_type = tc_type break w.Close() window.Enable() continue # Mode: Neutral, set username if button == 'Set Name::user_name_k': win_title = 'User/username' layout = [ [sg.Text('Current username: {}'.format( self.username))], [sg.T('Name', size=(4,1)), sg.Input( self.username, key='username_k', size=(32,1))], [sg.OK(), sg.Cancel()] ] window.Disable() w = sg.Window(win_title, layout, icon=ico_path[platform]['pecg']) while True: e, v = w.Read(timeout=10) if e is None: break if e == 'Cancel': break if e == 'OK': backup = self.username username = self.username = v['username_k'] if username == '': username = backup self.update_user_config_file(username) break w.Close() window.Enable() self.update_labels_and_game_tags(window, human=self.username) continue # Mode: Neutral if button == 'Install': button_title = 'Engine/Manage/' + button new_engine_path_file, new_engine_id_name = None, None install_layout = [ [sg.Text('Current configured engine names')], [sg.Listbox(values=self.engine_id_name_list, size=(48,10), disabled=True)], [sg.Button('Add'), sg.Button('Cancel')] ] window.Disable() install_win = sg.Window(title=button_title, layout=install_layout, icon=ico_path[platform]['pecg']) while True: e, v = install_win.Read(timeout=100) if e is None or e == 'Cancel': break if e == 'Add': button_title += '/' + e add_layout = [[sg.Text('Engine', size=(6, 1)), sg.Input(key='engine_path_file_k'), sg.FileBrowse()], [sg.Text('Name', size=(6, 1)), sg.Input(key='engine_id_name_k', tooltip='Input name'), sg.Button('Get Id Name')], [sg.OK(), sg.Cancel()]] install_win.Disable() add_win = sg.Window(button_title, add_layout) is_cancel_add_win = False while True: e1, v1 = add_win.Read(timeout=100) if e1 is None: is_cancel_add_win = True break if e1 == 'Cancel': is_cancel_add_win = True break if e1 == 'Get Id Name': new_engine_path_file = v1['engine_path_file_k'] que = queue.Queue() t = threading.Thread(target=self.get_engine_id_name, args=(new_engine_path_file, que,), daemon=True) t.start() is_update_list = False while True: try: msg = que.get_nowait() break except Exception: pass t.join() if msg[0] == 'Done' and msg[1] is not None: is_update_list = True new_engine_id_name = msg[1] else: is_cancel_add_win = True sg.Popup( 'This engine cannot be ' 'installed. Please select ' 'another engine. It should be uci ' 'engine.', title=button_title + '/Get Id name') if is_update_list: add_win.Element('engine_id_name_k').Update( new_engine_id_name) # If we fail to install the engine, we exit # the install window if is_cancel_add_win: break if e1 == 'OK': try: new_engine_path_file = v1[ 'engine_path_file_k'] new_engine_id_name = v1['engine_id_name_k'] if new_engine_id_name != '': # Check if new_engine_id_name is already existing if self.is_name_exists( new_engine_id_name): sg.Popup( '{} is existing. Please ' 'modify the name! You can ' 'modify the config later thru ' 'Engine->Manage->Edit'.format( new_engine_id_name), title=button_title, icon=ico_path[platform]['pecg']) continue break else: sg.Popup('Please input engine id ' 'name, or press Get Id Name ' 'button.', title=button_title, icon=ico_path[platform]['pecg']) except Exception: logging.exception('Failed to get engine ' 'path and file') # Outside add window while loop add_win.Close() install_win.Enable() # Save the new configured engine to pecg_engines.json. if not is_cancel_add_win: que = queue.Queue() t = threading.Thread( target=self.add_engine_to_config_file, args=(new_engine_path_file, new_engine_id_name, que,), daemon=True) t.start() while True: try: msg = que.get_nowait() break except Exception: continue t.join() if msg == 'Failure': sg.Popup('Failed to add {} in config ' 'file!'.format(new_engine_id_name), title=button_title, icon=ico_path[platform]['pecg']) self.engine_id_name_list = \ self.get_engine_id_name_list() break install_win.Close() window.Enable() # Define default engine opponent and adviser if engine_id_name is None: engine_id_name = self.get_default_engine_opponent() if self.adviser_id_name is None: self.set_default_adviser_engine() self.update_labels_and_game_tags(window, human=self.username) continue # Mode: Neutral if button == 'Edit': button_title = 'Engine/Manage/' + button opt_name = [] ret_opt_name = [] engine_path_file, engine_id_name = None, None edit_layout = [ [sg.Text('Current configured engine names')], [sg.Listbox(values=self.engine_id_name_list, size=(48,10), key='engine_id_name_k')], [sg.Button('Modify'), sg.Button('Cancel')] ] window.Disable() edit_win = sg.Window(button_title, layout=edit_layout, icon=ico_path[platform]['pecg']) is_cancel_edit_win = False while True: e, v = edit_win.Read(timeout=100) if e is None or e == 'Cancel': is_cancel_edit_win = True break if e == 'Modify': option_layout, option_layout2 = [], [] button_title += '/' + e try: orig_idname = engine_id_name = v['engine_id_name_k'][0] except Exception: sg.Popup('Please select an engine to modify.', title='/Edit/Modify', icon=ico_path[platform]['pecg']) continue # Read engine config file with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) # First option that can be set is the config name option_layout.append( [sg.Text('name', size=(4, 1)), sg.Input(engine_id_name, size=(38, 1), key='string_name_k')]) opt_name.append(['name', 'string_name_k']) for p in data: name = p['name'] path = p['workingDirectory'] file = p['command'] engine_path_file = Path(path, file) option = p['options'] if name == engine_id_name: num_opt = len(option) opt_cnt = 0 for o in option: opt_cnt += 1 name = o['name'] value = o['value'] type_ = o['type'] if type_ == 'spin': min_ = o['min'] max_ = o['max'] key_name = type_ + '_' + name.lower() + '_k' opt_name.append([name, key_name]) ttip = 'min {} max {}'.format(min_, max_) spin_layout = \ [sg.Text(name, size=(16, 1)), sg.Input(value, size=(8, 1), key=key_name, tooltip=ttip)] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append(spin_layout) else: option_layout.append(spin_layout) elif type_ == 'check': key_name = type_ + '_' + name.lower() + '_k' opt_name.append([name, key_name]) check_layout = \ [sg.Text(name, size=(16, 1)), sg.Checkbox('', key=key_name, default=value)] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append(check_layout) else: option_layout.append(check_layout) elif type_ == 'string': key_name = type_ + '_' + name + '_k' opt_name.append([name, key_name]) # Use FolderBrowse() if 'syzygypath' in name.lower(): sy_layout = \ [sg.Text(name, size=(16, 1)), sg.Input(value, size=(12, 1), key=key_name), sg.FolderBrowse()] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append(sy_layout) else: option_layout.append(sy_layout) # Use FileBrowse() elif 'weightsfile' in name.lower(): weight_layout = \ [sg.Text(name, size=(16, 1)), sg.Input(value, size=(12, 1), key=key_name), sg.FileBrowse()] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append( weight_layout) else: option_layout.append( weight_layout) else: str_layout = \ [sg.Text(name, size=(16, 1)), sg.Input(value, size=(16, 1), key=key_name)] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append( str_layout) else: option_layout.append( str_layout) elif type_ == 'combo': key_name = type_ + '_' + name + '_k' opt_name.append([name, key_name]) var = o['choices'] combo_layout = [ sg.Text(name, size=(16, 1)), sg.Combo(var, default_value=value, size=(12, 1), key=key_name)] if num_opt > 10 and opt_cnt > num_opt//2: option_layout2.append(combo_layout) else: option_layout.append(combo_layout) break option_layout.append([sg.OK(), sg.Cancel()]) if len(option_layout2) > 1: tab1 = [[sg.Column(option_layout)]] tab2 = [[sg.Column(option_layout2)]] modify_layout = [[sg.Column(tab1), sg.Column(tab2)]] else: modify_layout = option_layout edit_win.Disable() modify_win = sg.Window(button_title, layout=modify_layout, icon=ico_path[platform]['pecg']) is_cancel_modify_win = False while True: e1, v1 = modify_win.Read(timeout=100) if e1 is None or e1 == 'Cancel': is_cancel_modify_win = True break if e1 == 'OK': engine_id_name = v1['string_name_k'] for o in opt_name: d = {o[0]: v1[o[1]]} ret_opt_name.append(d) break edit_win.Enable() modify_win.Close() break # Get out of edit_win loop # Outside edit_win while loop # Save the new configured engine to pecg_engines.json file if not is_cancel_edit_win and not is_cancel_modify_win: self.update_engine_to_config_file( engine_path_file, engine_id_name, orig_idname, ret_opt_name) self.engine_id_name_list = self.get_engine_id_name_list() edit_win.Close() window.Enable() continue # Mode: Neutral if button == 'Delete': button_title = 'Engine/Manage/' + button delete_layout = [ [sg.Text('Current configured engine names')], [sg.Listbox(values=self.engine_id_name_list, size=(48, 10), key='engine_id_name_k')], [sg.Button('Delete'), sg.Cancel()] ] window.Disable() delete_win = sg.Window(button_title, layout=delete_layout, icon=ico_path[platform]['pecg']) is_cancel = False while True: e, v = delete_win.Read(timeout=100) if e is None or e == 'Cancel': is_cancel = True break if e == 'Delete': try: engine_id_name = v['engine_id_name_k'][0] except Exception: sg.Popup('Please select an engine to delete.', title=button_title, icon=ico_path[platform]['pecg']) continue with open(self.engine_config_file, 'r') as json_file: data = json.load(json_file) for i in range(len(data)): if data[i]['name'] == engine_id_name: logging.info('{} is found for deletion.'.format( engine_id_name)) data.pop(i) break # Save data to pecg_engines.json with open(self.engine_config_file, 'w') as h: json.dump(data, h, indent=4) break # Save the new configured engine to pecg_engines.json file if not is_cancel: self.engine_id_name_list = self.get_engine_id_name_list() delete_win.Close() window.Enable() continue # Mode: Neutral, Allow user to change opponent engine settings if button == 'Set Engine Opponent': current_engine_file = self.opp_file current_engine_id_name = self.opp_id_name logging.info('Backup current engine list and file.') logging.info('Current engine file: {}'.format( current_engine_file)) layout = [ [sg.T('Current Opponent: {}'.format(self.opp_id_name), size=(40,1))], [sg.Listbox(values=self.engine_id_name_list, size=(48,10), key='engine_id_k')], [sg.OK(), sg.Cancel()] ] # Create new window and disable the main window w = sg.Window(BOX_TITLE + '/Select opponent', layout, icon=ico_path[platform]['enemy']) window.Disable() while True: e, v = w.Read(timeout=10) if e is None or e == 'Cancel': # Restore current engine list and file logging.info('User cancels engine selection. ' + 'We restore the current engine data.') self.opp_file = current_engine_file logging.info('Current engine data were restored.') logging.info('current engine file: {}'.format( self.opp_file)) break if e == 'OK': # We use try/except because user can press OK without # selecting an engine try: engine_id_name = self.opp_id_name = v['engine_id_k'][0] self.opp_file, self.opp_path_and_file = self.get_engine_file( engine_id_name) except IndexError: logging.info('User presses OK but did not select ' 'an engine.') except Exception: logging.exception('Failed to set engine.') finally: if current_engine_id_name != self.opp_id_name: logging.info('User selected a new opponent {' '}.'.format(self.opp_id_name)) break window.Enable() w.Close() # Update the player box in main window self.update_labels_and_game_tags(window, human=self.username) continue # Mode: Neutral, Set Adviser engine if button == 'Set Engine Adviser': current_adviser_engine_file = self.adviser_file current_adviser_path_and_file = self.adviser_path_and_file layout = [ [sg.T('Current Adviser: {}'.format(self.adviser_id_name), size=(40,1))], [sg.Listbox(values=self.engine_id_name_list, size=(48,10), key='adviser_id_name_k')], [sg.T('Movetime (sec)', size=(12, 1)), sg.Spin([t for t in range(1, 3600, 1)], initial_value=self.adviser_movetime_sec, size=(8, 1), key='adviser_movetime_k')], [sg.OK(), sg.Cancel()] ] # Create new window and disable the main window w = sg.Window(BOX_TITLE + '/Select Adviser', layout, icon=ico_path[platform]['adviser']) window.Disable() while True: e, v = w.Read(timeout=10) if e is None or e == 'Cancel': self.adviser_file = current_adviser_engine_file self.adviser_path_and_file = current_adviser_path_and_file break if e == 'OK': movetime_sec = int(v['adviser_movetime_k']) self.adviser_movetime_sec = min(3600, max(1, movetime_sec)) # We use try/except because user can press OK without selecting an engine try: adviser_eng_id_name = self.adviser_id_name = v['adviser_id_name_k'][0] self.adviser_file, self.adviser_path_and_file = self.get_engine_file( adviser_eng_id_name) except IndexError: logging.info('User presses OK but did not select an engine') except Exception: logging.exception('Failed to set engine.') break window.Enable() w.Close() continue # Mode: Neutral if button == 'Set Depth': self.set_depth_limit() continue # Mode: Neutral, Allow user to change book settings if button == 'Set Book::book_set_k': # Backup current values, we will restore these value in case # the user presses cancel or X button current_is_use_gui_book = self.is_use_gui_book current_is_random_book = self.is_random_book current_max_book_ply = self.max_book_ply layout = [ [sg.Text('This is the book used by your ' 'engine opponent.')], [sg.T('Book File', size=(8, 1)), sg.T(self.gui_book_file, size=(36, 1), relief='sunken')], [sg.T('Max Ply', size=(8, 1)), sg.Spin([t for t in range(1, 33, 1)], initial_value=self.max_book_ply, size=(6, 1), key='book_ply_k')], [sg.CBox('Use book', key = 'use_gui_book_k', default=self.is_use_gui_book)], [sg.Radio('Best move', 'Book Radio', default = False if self.is_random_book else True), sg.Radio('Random move', 'Book Radio', key='random_move_k', default = True if self.is_random_book else False)], [sg.OK(), sg.Cancel()], ] w = sg.Window(BOX_TITLE + '/Set Book', layout, icon=ico_path[platform]['pecg']) window.Disable() while True: e, v = w.Read(timeout=10) # If user presses X button if e is None: self.is_use_gui_book = current_is_use_gui_book self.is_random_book = current_is_random_book self.max_book_ply = current_max_book_ply logging.info('Book setting is exited.') break if e == 'Cancel': self.is_use_gui_book = current_is_use_gui_book self.is_random_book = current_is_random_book self.max_book_ply = current_max_book_ply logging.info('Book setting is cancelled.') break if e == 'OK': self.max_book_ply = int(v['book_ply_k']) self.is_use_gui_book = v['use_gui_book_k'] self.is_random_book = v['random_move_k'] logging.info('Book setting is OK') break window.Enable() w.Close() continue # Mode: Neutral, Settings menu if button == 'Game::settings_game_k': win_title = 'Settings/Game' layout = [ [sg.CBox('Save time left in game notation', key='save_time_left_k', default=self.is_save_time_left, tooltip='[%clk h:mm:ss] will appear as\n' + 'move comment and is shown in move\n' + 'list and saved in pgn file.')], [sg.OK(), sg.Cancel()], ] w = sg.Window(win_title, layout, icon=ico_path[platform]['pecg']) window.Disable() while True: e, v = w.Read(timeout=10) if e is None or e == 'Cancel': break if e == 'OK': self.is_save_time_left = v['save_time_left_k'] break window.Enable() w.Close() continue # Mode: Neutral, Change theme if button in GUI_THEME: self.gui_theme = button window = self.create_new_window(window) continue if button in PIECE_THEME: print("selected piece theme " + button) self.images = dataset_themes[button] self.redraw_board(window) continue # Mode: Neutral, Change board to gray if button == 'Gray::board_color_k': self.sq_light_color = '#D8D8D8' self.sq_dark_color = '#808080' self.move_sq_light_color = '#e0e0ad' self.move_sq_dark_color = '#999966' self.redraw_board(window) window = self.create_new_window(window) continue # Mode: Neutral, Change board to green if button == 'Green::board_color_k': self.sq_light_color = '#daf1e3' self.sq_dark_color = '#3a7859' self.move_sq_light_color = '#bae58f' self.move_sq_dark_color = '#6fbc55' self.redraw_board(window) window = self.create_new_window(window) continue # Mode: Neutral, Change board to blue if button == 'Blue::board_color_k': self.sq_light_color = '#b9d6e8' self.sq_dark_color = '#4790c0' self.move_sq_light_color = '#d2e4ba' self.move_sq_dark_color = '#91bc9c' self.redraw_board(window) window = self.create_new_window(window) continue # Mode: Neutral, Change board to brown, default if button == 'Brown::board_color_k': self.sq_light_color = '#F0D9B5' self.sq_dark_color = '#B58863' self.move_sq_light_color = '#E8E18E' self.move_sq_dark_color = '#B8AF4E' self.redraw_board(window) window = self.create_new_window(window) continue # Mode: Neutral if button == 'Flip': window.FindElement('_gamestatus_').Update('Mode Neutral') self.clear_elements(window) window = self.create_new_window(window, True) continue # Mode: Neutral if button == 'About': sg.PopupScrolled(HELP_MSG, title='Help/About') continue # Mode: Neutral if button == 'Play': if engine_id_name is None: logging.warning('Install engine first!') sg.Popup('Install engine first! in Engine/Manage/Install', icon=ico_path[platform]['pecg'], title='Mode') continue # Change menu from Neutral to Play self.menu_elem.Update(menu_def_play) self.psg_board = copy.deepcopy(initial_board) board = chess.Board() while True: button, value = window.Read(timeout=100) window.FindElement('_gamestatus_').Update('Mode Play') window.FindElement('_movelist_').Update(disabled=False) window.FindElement('_movelist_').Update('', disabled=True) start_new_game = self.play_game(window, engine_id_name, board) window.FindElement('_gamestatus_').Update('Mode Neutral') self.psg_board = copy.deepcopy(initial_board) self.redraw_board(window) board = chess.Board() self.set_new_game() if not start_new_game: break # Restore Neutral menu self.menu_elem.Update(menu_def_neutral) self.psg_board = copy.deepcopy(initial_board) board = chess.Board() self.set_new_game() continue window.Close() def main(): engine_config_file = 'pecg_engines.json' user_config_file = 'pecg_user.json' pecg_book = 'Book/pecg_book.bin' book_from_computer_games = 'Book/computer.bin' book_from_human_games = 'Book/human.bin' is_use_gui_book = True is_random_book = True # If false then use best book move max_book_ply = 8 theme = 'Reddit' pecg = EasyChessGui(theme, engine_config_file, user_config_file, pecg_book, book_from_computer_games, book_from_human_games, is_use_gui_book, is_random_book, max_book_ply) pecg.main_loop() if __name__ == "__main__": main()Classifiers imported Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 23, 23, 32) 896 _________________________________________________________________ activation (Activation) (None, 23, 23, 32) 0 _________________________________________________________________ dropout (Dropout) (None, 23, 23, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 21, 21, 32) 9248 _________________________________________________________________ activation_1 (Activation) (None, 21, 21, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 19, 19, 32) 9248 ___________________________________[...]Imports# pip install yellowbrick import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from scipy.stats import skew from tqdm import tqdm from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler from sklearn.cluster import KMeans, AffinityPropagation, DBSCAN from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor, BaggingRegressor, StackingRegressor from sklearn.linear_model import LinearRegression, RidgeCV, LassoCV from sklearn.neighbors import KNeighborsRegressor, RadiusNeighborsRegressor from sklearn.svm import SVR # from catboost import CatBoostRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor # from yellowbrick.cluster import SilhouetteVisualizer # from umap import UMAP from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn import decomposition # from category_encoders import TargetEncoderFirst looktr = pd.read_csv('train.csv') te = pd.read_csv('test_public.csv') train = tr.copy() test = te.copy()общая инф, train, testprint('Train', '\nРазмер таблицы:', train.shape, ': индексы строк с 0 по ', train.shape[0]-1, '\nТипы данных:', train.dtypes.unique(), '\nПропуски:', train.isna().sum().to_frame(name="nans").query("nans > 0")["nans"].count()) print('Test', '\nРазмер таблицы:', test.shape, ': индексы строк с 0 по ', test.shape[0]-1, '\nТипы данных:', test.dtypes.unique(), '\nПропуски:', test.isna().sum().to_frame(name="nans").query("nans > 0")["nans"].count()) train.describe() test.describe() train.head(5) # индексы строк с 0 по 860749 test.head(5) # индексы строк с 0 по 261189 train['id2'].unique() test['id2'].unique() corr_table = train.corr().sort_values(['mb'], ascending=True) corr_table columns_without_corr = corr_table[corr_table['mb'].isna() == True].index columns_without_corrграфики# fig, axes = plt.subplots(nrows=1, ncols=2, figsize = (13,8)) # train.pivot_table('Unnamed: 0', 'id2', 'mb', 'count').plot(kind='bar', stacked=True) # train.pivot_table('Unnamed: 0', 'place_id', 'mb', 'count').plot(kind='bar', stacked=True) # plt.figure(figsize=(20,15)) # sns.heatmap(train.corr(), annot = True, fmt='.1g', vmin = -1, vmax = 1, center = 0, cmap = 'seismic', linewidths=2, linecolor='black')df_alldf_concat = pd.concat([train, test], sort=True).reset_index(drop=True) df_concat #всего 1121940, посл.было:261189выборосы удалить?# a = df_concat[(np.abs(stats.zscore(df_concat)) > 3).all(axis=1)] # бОльше 3х сигм = outliardf_all_dum: encoding для категориальной id2df_all_dum = pd.get_dummies(df_concat, columns=['id2']) df_all_dumdf_all_fill: заполняем пропуски Т.к. у нас только int и float значения, заполняя Nan, можем брать сразу всю таблицу, не выделяя колонок, - и заполнть mode, median, mean, mean-target-encoding и т.д.df_all_fill = df_all_dum.copy() df_all_fill = df_all_fill.fillna(df_all_fill.median()) df_all_fill print(df_all_fill.isna().sum().to_frame(name="nans").query("nans > 0")["nans"].count()) # _> значит, незаполненных значений не осталось!0df_all_fill: логарифмируемplt.hist(df_all_fill['mb'], bins=100) skew_col = df_all_fill.apply(lambda x: skew(x)) #смотрим симметричность данных: симметрия = 0 skew_col.sort_values(ascending=False) skew_col = skew_col[abs(skew_col) > 0.5] # чем дальше от 0, тем менне симметричные данные. skew_features = df_all_fill[skew_col.index] # все numeric ячейки с skew_col > 0.5 #skew_features.columns df_all_fill[skew_features.columns] = np.log1p(df_all_fill[skew_features.columns]) # логарифмируем все skew_col, и target тоже # df_all_fill[skew_features.columns] # df_all_fill # df_all_2['SalePrice'] = np.log1p(df_all_2['SalePrice']) # логарифмируем SalePrice df_all_fill df_all_fill.shape plt.hist(df_all_fill['mb'], bins=100) df_all_fill['mb'].mode()StandardScalerfeatures = df_all_fill.drop(['mb'], axis=1).columns.to_list() # берём все колонки с type=int64&float64 -> удаляем target, для scaler-а остальных фичей len(features) ss_scaler = StandardScaler() df_all_fill[features] = ss_scaler.fit_transform(df_all_fill[features]) #итого: scaled_X_tr - фичи, по которым будем учить модель на tr в X_train! X = df_all_fill[features]понижение размерностиfrom sklearn.decomposition import PCA pca = PCA(n_components = 0.95) # we want the explained variance to be between 95–99% pca.fit(X) X_pca = pca.transform(X) # from yellowbrick.cluster import SilhouetteVisualizer # model = KMeans(4, random_state=42) # visualizer = SilhouetteVisualizer(model, colors='yellowbrick') # visualizer.fit(X_pca) # visualizer.show()очень долго.# from yellowbrick.cluster import SilhouetteVisualizer # model = KMeans(10, random_state=42) # visualizer = SilhouetteVisualizer(model, colors='yellowbrick') # visualizer.fit(X_pca) # visualizer.show()кластеризация# km = KMeans(6) # или НЕ 4 - выбрать из Силуета # km.fit(X_pca) # op = km.labels_ # df_all_fill['Class'] = opделим на train и test Train Размер таблицы: (860750, 84) : индексы строк с 0 по 860749 Test Размер таблицы: (261190, 41) : индексы строк с 0 по 261189df = df_all_fill.copy() df_train = df[0:860750] x_df_train = df[0:860750].drop('mb', axis = 1) # индексы строк с 0 по 860749 y_df_train = df['mb'][0:860750] df_test = df[860750:].drop('mb', axis = 1) # индексы строк с- все после 860749, БЕЗ таргета x_train, x_valid, y_train, y_valid = train_test_split(x_df_train, y_df_train.to_numpy(), random_state=42, test_size = 0.2)задаём SMAPEdef smape(y_true, y_pred): return np.mean(np.abs(y_true - y_pred) / np.abs(y_true + y_pred + 1e-15))baseline LinearRegression()lr = LinearRegression() lr.fit(x_train, y_train) train_smape_lr = smape(np.expm1(y_train), np.expm1(lr.predict(x_train))) valid_smape_lr = smape(np.expm1(y_valid), np.expm1(lr.predict (x_valid))) print("train smape:", train_smape_lr) print("test smape:", valid_smape_lr)train smape: 0.3889223505679647 test smape: 0.3741137991730892LassoCV()lasso = LassoCV(alphas=np.arange(0.0001, 3, 0.1)) lasso.fit(x_train, y_train) train_smape_lasso = smape(np.expm1(y_train), np.expm1(lasso.predict(x_train))) valid_smape_lasso = smape(np.expm1(y_valid), np.expm1(lasso.predict (x_valid))) print("train smape:", train_smape_lasso) print("test smape:", valid_smape_lasso)/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_coordinate_descent.py:644: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Duality gap: 16691.33077871883, tolerance: 7.584446413898085 positive, /usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_coordinate_descent.py:644: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Duality gap: 16700.77352438592, tolerance: 7.594667215817495 positive, /usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_coordinate_descent.py:644: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Duality gap: 16690.18465044758, tolerance: 7.594808749424364 positive, /usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_coordinate_descent.py:644: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Duality gap: 16644.556533[...]RidgeCV()ridge = RidgeCV(alphas=np.arange(0.001, 3, 0.1)) ridge.fit(x_train, y_train) train_smape_ridge = smape(np.expm1(y_train), np.expm1(ridge.predict(x_train))) valid_smape_ridge = smape(np.expm1(y_valid), np.expm1(ridge.predict (x_valid))) print("train smape:", train_smape_ridge) print("test smape:", valid_smape_ridge)train smape: 8.920089665411664 test smape: 8.508392385287802модели серьёзнее GradientBoostingRegressor()gbr = GradientBoostingRegressor(max_depth=3, learning_rate = 0.05, n_estimators=250) gbr.fit(x_train, y_train) train_smape_gbr = smape(np.expm1(y_train), np.expm1(gbr.predict(x_train))) valid_smape_gbr = smape(np.expm1(y_valid), np.expm1(gbr.predict (x_valid))) print("train smape:", train_smape_gbr) print("test smape:", valid_smape_gbr)train smape: 0.3395922289983112 test smape: 0.3397219473502614XGBRegressor()xgbr = XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=200) xgbr.fit(x_train, y_train) train_smape_xgbr = smape(np.expm1(y_train), np.expm1(xgbr.predict(x_train))) valid_smape_xgbr = smape(np.expm1(y_valid), np.expm1(xgbr.predict (x_valid))) print("train smape:", train_smape_xgbr) print("test smape:", valid_smape_xgbr)[12:04:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror. train smape: 0.33740671382766385 test smape: 0.33529547225681144попробуем Grid Search? очень долго, больше 3ч...xgbr_example = XGBRegressor() params_xgbr = {'max_depth': [2, 5, 10], #задать сначала [2, 5, 10] -> потом (i for i in range [..., ...]) 'learning_rate': [0.1, 0.05], #задать сначала [0.1, 0.05] -> потом что-то одно 'n_estimators': [100, 200, 300] } #задать сначала [100, 200, 300] -> потом (i for i in range [..., ...]) Grid_Search_xgbr = GridSearchCV(xgbr_example, param_grid=params_xgbr, scoring='neg_mean_squared_error') Grid_Search_xgbr.fit(x_train, y_train) GS_xgbr_best_params = Grid_Search_xgbr.best_params_ GS_xgbr_best_paramsИТОГ меньшую ошибку показал xgbr - его используем для предикта на testresult= np.expm1(xgbr.predict(df_test)) resultTitanic analysisimport pandas as pd # visualization import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline train = pd.read_csv('./data/train.csv') train.head() print('Training set ( Rows , Columns ): ') print(train.shape) train.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): PassengerId 891 non-null int64 Survived 891 non-null int64 Pclass 891 non-null int64 Name 891 non-null object Sex 891 non-null object Age 714 non-null float64 SibSp 891 non-null int64 Parch 891 non-null int64 Ticket 891 non-null object Fare 891 non-null float64 Cabin 204 non-null object Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.6+ KBFollowing are missing values* Age* Cabin* Embarked Fill nulls in Agetrain['Age'].fillna(train['Age'].mean(), inplace = True)Fill nulls in Embarkedmost_freq_port = train['Embarked'].mode()[0] train['Embarked'].fillna(most_freq_port, inplace = True) train_describe_numeric = train.describe() train_describe_numericCreate Age_Group categorydef getAgeGroup (age) : if age <= 10: return 1 elif age <= 18: return 2 elif age <= 28: return 3 elif age <= 45: return 4 elif age <= 65: return 5 else: return 6 train[ 'Age_Group' ] = train[ 'Age' ].map(getAgeGroup) train[ 'Age_Group' ].head() print('So survival rate is ') print(train_describe_numeric.loc['mean', 'Survived']) train_describe_categorical = train.describe(include=['O']) train_describe_categoricalFind family sizetrain["Family_Size"] = train['SibSp'] + train['Parch'] + 1 train["Family_Size"].head()Create family_type categorydef getFamilyType(size): if size == 1: return 1 elif 2 <= size <= 4: return 2 else: return 3 train['Family_Type'] = train['Family_Size'].map(getFamilyType) train['Family_Type'].head()Convert gender to 1 / 0train[ 'Sex' ] = train[ 'Sex' ].map( lambda s : 1 if s == 'male' else 0 ) train[ 'Sex' ].head()Survival rate by Age_Groupsurvival_rate_by_age_group = train[['Age_Group', 'Survived']].groupby(['Age_Group'], as_index = False).mean() survival_rate_by_age_group = survival_rate_by_age_group.sort_values(by = 'Survived', ascending = False) survival_rate_by_age_group.head()Survival rate by classsurvival_rate_by_class = train[['Pclass', 'Survived']].groupby(['Pclass'], as_index = False).mean() survival_rate_by_class = survival_rate_by_class.sort_values(by = 'Survived', ascending = False) survival_rate_by_class g = sns.FacetGrid(train, col = 'Survived') g.map(plt.hist, 'Pclass', bins = 5)The upper class had a much higher chance of surviving the disaster Survival rate by gendersurvival_rate_by_gender = train[["Sex", "Survived"]].groupby(['Sex'], as_index = False).mean() survival_rate_by_gender = survival_rate_by_gender.sort_values(by = 'Survived', ascending = False) survival_rate_by_genderWomen were much more likely to survive Survival rate by family sizesurvival_rate_by_family_size = train[["Family_Size", "Survived"]].groupby(['Family_Size'], as_index = False).mean() survival_rate_by_family_size = survival_rate_by_family_size.sort_values(by = 'Survived', ascending = False) survival_rate_by_family_size g = sns.FacetGrid(train, col = 'Survived') g.map(plt.hist, 'Age', bins = 30)Survival rate by Embarkedsurvival_rate_by_embarked = train[["Embarked", "Survived"]].groupby(['Embarked'], as_index = False).mean() survival_rate_by_embarked = survival_rate_by_embarked.sort_values(by = 'Survived', ascending = False) survival_rate_by_embarkedGet dummies of Age_Group and Family_Typetrain = pd.get_dummies( train, columns = ['Age_Group', 'Family_Type', 'Sex', 'Pclass'], prefix = ['Age_Group', 'Family_Type', 'Sex', 'Pclass'], drop_first = True ) train.info() columns_to_drop = ['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Family_Size', 'Embarked', 'Age'] train.drop( columns_to_drop, axis = 1, inplace = True) train.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 11 columns): Survived 891 non-null int64 Age_Group_2 891 non-null uint8 Age_Group_3 891 non-null uint8 Age_Group_4 891 non-null uint8 Age_Group_5 891 non-null uint8 Age_Group_6 891 non-null uint8 Family_Type_2 891 non-null uint8 Family_Type_3 891 non-null uint8 Sex_1 891 non-null uint8 Pclass_2 891 non-null uint8 Pclass_3 891 non-null uint8 dtypes: int64(1), uint8(10) memory usage: 15.7 KBPrepare the test datasettest = pd.read_csv('./data/test.csv') test.head() test.info() RangeIndex: 418 entries, 0 to 417 Data columns (total 11 columns): PassengerId 418 non-null int64 Pclass 418 non-null int64 Name 418 non-null object Sex 418 non-null object Age 332 non-null float64 SibSp 418 non-null int64 Parch 418 non-null int64 Ticket 418 non-null object Fare 417 non-null float64 Cabin 91 non-null object Embarked 418 non-null object dtypes: float64(2), int64(4), object(5) memory usage: 36.0+ KBFill nulls in test agetest['Age'].fillna(test['Age'].mean(), inplace = True)Create Age_Group in testtest[ 'Age_Group' ] = test[ 'Age' ].map(getAgeGroup) test[ 'Age_Group' ].head()Create Family_sizetest["Family_Size"] = test['SibSp'] + test['Parch'] + 1 test["Family_Size"].head()Create Family_Typetest['Family_Type'] = test['Family_Size'].map(getFamilyType) test['Family_Type'].head()Convert gender to 1/0test[ 'Sex' ] = test[ 'Sex' ].map( lambda s : 1 if s == 'male' else 0 ) test[ 'Sex' ].head() test = pd.get_dummies( test, columns = ['Age_Group', 'Family_Type', 'Sex', 'Pclass'], prefix = ['Age_Group', 'Family_Type', 'Sex', 'Pclass'], drop_first = True ) test.info() X_test = test.drop( columns_to_drop, axis = 1) X_test.info() RangeIndex: 418 entries, 0 to 417 Data columns (total 10 columns): Age_Group_2 418 non-null uint8 Age_Group_3 418 non-null uint8 Age_Group_4 418 non-null uint8 Age_Group_5 418 non-null uint8 Age_Group_6 418 non-null uint8 Family_Type_2 418 non-null uint8 Family_Type_3 418 non-null uint8 Sex_1 418 non-null uint8 Pclass_2 418 non-null uint8 Pclass_3 418 non-null uint8 dtypes: uint8(10) memory usage: 4.2 KBSplit into X_train and y_trainy_train = train['Survived'] X_train = train.drop( [ 'Survived' ], axis = 1)Random Forest# machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier random_forest = RandomForestClassifier(n_estimators = 100) random_forest.fit(X_train, y_train) random_forest.score(X_train, y_train) y_pred = random_forest.predict(X_test) acc_random_forest = round(random_forest.score(X_train, y_train) * 100, 2) acc_random_forest submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": y_pred }) submission.to_csv('./data/submission.csv', index = False) submission.Survived.duplicated().sum()# Import the dependencies import cv2 import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist_data = input_data.read_data_sets("./data", one_hot = True) num_training = mnist_data.train.num_examples num_testing = mnist_data.test.num_examples num_validation = mnist_data.validation.num_examples print("MNIST Datasize: Training samples: {0}, Testing samples:{1}) # Network parameters of Nerural Network n_input = 784 # Input image of size 28 x 28 n_hidden= 512 # First hidden layer n_hidden= 256 # Second hidden layer n_hidden =128 # Third hidden layer n_output = 10 # Output layer having (0-9) digits learning_rate = le-4 epochs = 3000 batch_size = 128 keep_prob = tf.placeholder(tf.float32) # Building tensorflow graph X = tf.placeholder(tf.float32, [None, n_input]) Y = tf.placeholder(tf.float32,[None, n_output]) # Weight defination nn_weight = {"W1": tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev= 0.1)), "W2": tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)), "W3": tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], stddev= 0.1)), "Wout":tf.Variable(tf.truncated_normal([n_hidden_3, n_output], stddev= 0.1)) } nn_bias = { 'B1':tf.Variable(tf.truncated_normal([n_hidden_1])), { 'B2':tf.Variable(tf.truncated_normal([n_hidden_1])), { 'B3':tf.Variable(tf.truncated_normal([n_hidden_1])), { 'B4':tf.Variable(tf.truncated_normal([n_hidden_1])) }Importsimport sys sys.path.append('../ClusterPlot') sys.path.append('./utils') import pandas as pd import numpy as np import seaborn as sns %matplotlib notebook import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from DataSetFactory import DataSetFactory from ClusterPlot import ClusterPlot RANDOM_STATE = 42 ds = DataSetFactory.get_dataset('hourglass2', random_state=RANDOM_STATE, sample=None, is_subset=False)3D Plotfig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') xs = ds.df[ds.df[ds.label_col] == 0]['X'] ys = ds.df[ds.df[ds.label_col] == 0]['Y'] zs = ds.df[ds.df[ds.label_col] == 0]['Z'] ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w') xs = ds.df[ds.df[ds.label_col] == 1]['X'] ys = ds.df[ds.df[ds.label_col] == 1]['Y'] zs = ds.df[ds.df[ds.label_col] == 1]['Z'] ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w') # remove ticks ax = plt.gca() ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticklabels([]) ax.zaxis.set_ticklabels([]) for line in ax.xaxis.get_ticklines(): line.set_visible(False) for line in ax.yaxis.get_ticklines(): line.set_visible(False) for line in ax.zaxis.get_ticklines(): line.set_visible(False) ax.view_init(elev=60, azim=60) plt.show() X = ds.df[ds.feature_cols].values y = ds.df[ds.label_col].valuesCluster Plotsblobs_cp = ClusterPlot(reduce_all_points=False, n_iter=1, batch_size=0, learning_rate=0.1, dataset='Hourglass2', class_to_label=ds.class_to_label, show_fig=True, random_state=RANDOM_STATE, show_inner_blobs=True, show_blobs=True, show_label_level_plots=True, show_anchor_level_plot=False, birch_threshold=0.42, alpha=0.8, douglas_peucker_tolerance=0.3, smooth_iter=3, magnitude_step=False, show_loss_plot=False) print(blobs_cp) low_dim_blobs_cp = blobs_cp.fit_transform(X, y)2020-05-16 12:57:14,997 - ClusterPlot-34057 - INFO - finding intra class anchors using birch 2020-05-16 12:57:14,997 - ClusterPlot-34057 - INFO - finding intra class anchors using birch 2020-05-16 12:57:15,264 - ClusterPlot-34057 - INFO - UnSupervised Dim Reduction 2020-05-16 12:57:15,264 - ClusterPlot-34057 - INFO - UnSupervised Dim Reduction 2020-05-16 12:57:15,267 - ClusterPlot-34057 - INFO - Dim Reduction only anchors 2020-05-16 12:57:15,267 - ClusterPlot-34057 - INFO - Dim Reduction only anchors 2020-05-16 12:57:15,344 - ClusterPlot-34057 - INFO - Dim Reduction only anchors - generate random points in low dim per anchor 2020-05-16 12:57:15,344 - ClusterPlot-34057 - INFO - Dim Reduction only anchors - generate random points in low dim per anchor 100%|███████████████████████████████████████████████████████████████████████████████| 966/966 [00:07<00:00, 137.00it/s] 2020-05-16 12:57:22,643 - ClusterPlot-34057 - INFO - Starting iteration 1 loss = 0.54629754974956 2020-05-16 12:57:22,64[...]use Decoratorsfrom inspect import getgeneratorstate from functools import wraps def coroutine(func): @wraps(func) def primer(*args, **kwargs): gen = func(*args, **kwargs) next(gen) return gen return primer coro_avg = averager() getgeneratorstate(coro_avg) @coroutine def averager(): total = 0.0 count = 0 average = None while True: d = yield average total +=d count += 1 average = total / count coro_avg = averager() getgeneratorstate(coro_avg) coro_avg.send(40) coro_avg.send(10) coro_avg.send('spam') coro_avg.send(15) class DemoException(Exception): """""" def demo_exc_handling(): print("-> coroutine started") while True: try: x = yield except DemoException: #<1> print('*** DemoException handled.') else: print("-> coroutine received: {!r}".format(x)) raise RuntimeError('This line should never run.') exc_coro = demo_exc_handling() next(exc_coro) exc_coro.send(11)-> coroutine received: 11throw exceptionsexc_coro.send(11) exc_coro.throw(DemoException) getgeneratorstate(exc_coro) exc_coro = demo_exc_handling() next(exc_coro) exc_coro.send(11) exc_coro.throw(ZeroDivisionError) getgeneratorstate(exc_coro)Handling exceptionclass DemoException(Exception): """""" def demo_finally(): print("-> coroutine started") try: while True: try: x = yield except DemoException: print("*** DemoException handled. Continuing...") else: print("->coroutine received: {!r}".format(x)) finally: print("-> coroutine ending") fin_coro = demo_finally() next(fin_coro) fin_coro.send(11) fin_coro.send(12) fin_coro.send("spam") fin_coro.throw(ZeroDivisionError)-> coroutine endingReturn a value from Coroutinefrom collections import namedtuple Result = namedtuple('Result', 'count average') def averager(): total = 0.0 count = 0 average = None while True: term = yield if term is None: break total += term count += 1 average = total/count return Result(count, average) coro_avg = averager() next(coro_avg) coro_avg.send(10) coro_avg.send(20) coro_avg.send(None)Here, we import our TMPRSS2 QSAR Dataset, Dark Chemical Matter Dataset, and Screening Library# collect dataset assays = pd.read_pickle('../processed_data/combined_dataset.pkl') assays = assays[assays.activity_target.isin(['Active', 'Inactive'])] # get rid of any 'Inconclusive' dcm = pd.read_pickle('../processed_data/DarkChemicalMatter_processed.pkl.gz') # testing data: screening_data = pd.read_pickle('../processed_data/screening_data_processed.pkl') screening_dataHere, we combine our assay data and dark chemical matter data. We next 80%/20% train/test split. This data is split into a training set (80%) and a testing/validation set (20%)X_assays = np.stack(assays.morgan_fingerprint) y_assays = np.ones(len(X_assays)) X_dcm = np.stack(dcm.morgan_fingerprint) y_dcm = np.zeros(len(X_dcm)) X_combined = np.append(X_assays, X_dcm, axis = 0) y_combined = np.append(y_assays, y_dcm) X_train, X_test, y_train, y_test = train_test_split(X_combined, y_combined, test_size=0.2)Here we use SKLearn GridSearch CV function to identify optimal C parameter for our preliminary SVM Classifier (trained on training set only)Cs = np.logspace(-6, 2, 16) clf = GridSearchCV(estimator=LinearSVC(random_state=0, tol=1e-5, max_iter = 10000, dual = False), param_grid=dict(C=Cs), n_jobs=-1) clf.fit(X_train, y_train) c_param_SVC_train = clf.best_estimator_.C c_param_SVC_trainUsing the C parameter calculated above, we determine the Total Accuracy, False Positive Rate, False Negative Rate of our SVM ClassifierSVM_validation = make_pipeline(StandardScaler(), LinearSVC(random_state=0, tol=1e-5, C=c_param_SVC_train, max_iter = 10000, dual = False)) SVM_validation.fit(X_train, y_train) pred = SVM_validation.predict(X_test) accuracy = np.sum(pred == y_test)/y_test.size accuracy i = 0 false_positive = 0 total_positive = 0 false_negative = 0 total_negative = 0 while(i < len(pred)): if(y_test[i] == 0): total_negative += 1 if(pred[i] == 1): false_positive += 1 elif(y_test[i] == 1): total_positive += 1 if(pred[i] == 0): false_negative += 1 i = i + 1 false_positive/total_positive false_negative/total_negativeHere, we use SKLearn GridSearch CV function to identify optimal C parameter for our full SVM Classifier (trained on training set and testing set)Cs = np.logspace(-6, 2, 16) clf = GridSearchCV(estimator=LinearSVC(random_state=0, tol=1e-5, max_iter = 10000, dual = False), param_grid=dict(C=Cs), n_jobs=-1) clf.fit(X_combined, y_combined) c_param_SVC_test = clf.best_estimator_.C c_param_SVC_testHere, we use our full SVM Classifier to identify potentially-active compounds from our screening librarySVM_testing = make_pipeline(StandardScaler(), LinearSVC(random_state=0, tol=1e-5, C=c_param_SVC_test, max_iter = 10000, dual = False)) SVM_testing.fit(X_combined, y_combined) screening_compounds = np.stack(screening_data.morgan_fingerprint) pred = SVM_testing.predict(screening_compounds) screening_data['predictions'] = pred inactiveCompounds = screening_data[(screening_data['predictions'] == 0)].index active_screening_compounds = screening_data.drop(inactiveCompounds) len(active_screening_compounds) #split training and testing data for each dataset, fill nan with acvalue_target #y_assays_logKi = np.log10(assays.acvalue_scaled_to_tmprss2.fillna(assays.acvalue_target)) #train_X, test_X, train_y, test_y = train_test_split(X_assays, y_assays_logKi, test_size=0.2)Next, we identify the subset of the training data for which Ki values can be scaled to TMPRSS2 for use in regression analysis. This data is split into a training set (80%) and a testing/validation set (20%)y_assays_logKi_raw = np.log10(assays.acvalue_scaled_to_tmprss2) nan_array = np.isnan(y_assays_logKi_raw) not_nan = ~nan_array y_assays_logKi = y_assays_logKi_raw[not_nan] X_assays = X_assays[not_nan] train_X, test_X, train_y, test_y = train_test_split(X_assays, y_assays_logKi, test_size=0.2)Next, we use SKLearn GridSearch CV function to identify optimal C parameter for our preliminary Support Vector Regressor (trained on training set only)# Use SKLearn GridSearch CV function to identify optimal C parameter for SVM regression (training set) Cs = np.logspace(-6, 2, 16) clf = GridSearchCV(estimator=LinearSVR(random_state=0, tol=1e-5, max_iter = 10000, dual = True), param_grid=dict(C=Cs), n_jobs=-1) clf.fit(train_X, train_y) c_param_SVR_test = clf.best_estimator_.C c_param_SVR_testUsing the C paramater calculated above, we calculate the RMSE of our regressor and the correlation coefficient between our predicted and ground-truth values.#Run SVM regression using SKLearn on test set. Linear regression for prediction accuracy svmReg = make_pipeline(StandardScaler(), LinearSVR(random_state=0, tol=1e-5, C=c_param_SVR_test, max_iter = 10000, dual = True)) svmReg.fit(train_X, train_y) pred = svmReg.predict(test_X) MSE = mean_squared_error(test_y, pred) RMSE = np.sqrt(MSE) print("SVR RMSE:{}".format(RMSE)) plt.scatter(test_y, pred) plt.xlabel('log10(Actual Ki), μM') plt.ylabel('log10(Predicted Ki), μM') plt.title('SVM Validation Data') corr = scipy.stats.pearsonr(test_y, pred) print(corr)SVR RMSE:0.7509391213003528 (0.7255503910813106, 4.023811243492027e-15)Next, we use SKLearn GridSearch CV function to identify optimal C parameter for our full Support Vector Regressor (trained on training set and testing set)#SKLearn C parameter optimization Cs = np.logspace(-6, 2, 16) clf_full = GridSearchCV(estimator=LinearSVR(random_state=0, tol=1e-5, max_iter = 10000, dual = True), param_grid=dict(C=Cs), n_jobs=-1) clf_full.fit(X_assays, y_assays_logKi) c_param_full = clf_full.best_estimator_.C c_param_fullFinally, using this C parameter, we screen the active compounds identified by our SVM Classifier to identify the compounds which are predicted to bind most effectively to TMPRSS2#Run regressor (trained on full dataset) test_compounds = np.stack(active_screening_compounds.morgan_fingerprint) svmReg_full = make_pipeline(StandardScaler(), LinearSVR(random_state=0, tol=1e-5, C=c_param_full, max_iter = 10000, dual = True)) svmReg_full.fit(X_assays, y_assays_logKi) pred_values = svmReg_full.predict(test_compounds) #identify top hits active_screening_compounds['pred_value'] = pred_values active_screening_compounds.sort_values(by='pred_value').head(20) plt.hist(active_screening_compounds.pred_value, bins = 20) plt.xlabel('log10(Predicted Ki of test compound), μM') plt.ylabel('Abundance of Compounds in Bin') plt.title('Predicted Ki Values of Potentially-Active Compounds')Here, we save raw results, as well as our results with duplicates removedactive_screening_compounds_sorted = active_screening_compounds.sort_values(by='pred_value') active_screening_compounds_sorted['RMSE'] = RMSE active_screening_compounds_sorted.drop(columns=['morgan_fingerprint', 'predictions'], inplace=True) active_screening_compounds_sorted.to_csv('../results/svm_screening_results_raw.csv') active_screening_compounds_sorted["name"] = active_screening_compounds_sorted["name"].str.lower() active_screening_compounds_sorted.drop_duplicates(subset=['name'], keep='first', inplace=True) active_screening_compounds_sorted.to_csv('../results/svm_screening_results_no_duplicate_names.csv')Environment%matplotlib inline import matplotlib.pyplot as plt import os import time from typing import Iterable, Tuple from dataclasses import dataclass import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torchvision import datasets, transformsArchitecture of LeNet5 ![lenet5.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wgARCADvAyADASIAAhEBAxEB/8QAHAABAAICAwEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAABx5cYq/hs9Qdki7NbLtJbp9xvATQAGI5JK6iQdFRdupYmlhGzzZDtoNzsmG7lvZFc6S4S1lZ2M0MRlxHJxVyYxHLw+vVVqkSjpZ6sMF3e+nrCPBC7gxEO6Jwqu4tY3mkqz2WBurfFDrazl867K7u/Sk93aaOxjFcmBkAAAAwZYxHJgZYGRQAAAAAAAAADjywVLqbwRS+ov7KeL3YzQKAxmKkp66+Er1umRPcwFE9zAVWJmulWKroWLnQb8a3ZeHNqXe9Gi1mX7ry9WboNPN4wd8h2PPea6gM+hM1uEvj54J7EhcewqibEhBX0Kn1TG638MkxMpFrhIcw+Vmi0kng+Wz9mk31ng8HTt128g1fvrXempu0tvtrqExfeaBn1lg6fcQaXe9dd7rN3vuh2N4l3vgs8z02Y1kAAAAAAAAeI9qOiRI6JEjokSOiRI6JEjokSOiRQ7a6oriPbfUBvhoW+Ghb4aFvhoW+GqsyESktLT9W9lp+CfTa5iUatEtb+W02XRr9vjUpyEXlRq2FWFn1kTBJoieiSRIXLuqRnBNaMvTBTPXNatNxLYfsy24r3ycicMtCKRB7hgJLPQDa2yvHj9ZWmz9fhJ14NhyOvnkY6u3VxXsu124rfZzyjhnkULAAAAAAAAGi3uiIYAAAAAAD37DX7Aq7p7uklWi90LJO8g9byD1vIPW8g9byDcTOtN2XV59Z1HsV95CzFZCzVZC1vDo5OVnWf0X85lrVRa9Tkl5yzRmvkumFt7qmpiTOlLr4FRx+0KgPRMYnIi30XlBU9XfVNSle3z89esv7yemQkVj9l8T529UvqgtmS0Teoj1m4PljnctLl5TX5auwnTGQAAAAAAAABot7oiGAAeT1fMhK7E81KH1C0e8AAPfsNfsCrunu6TdxmTRgnul91ck4QcThBxOEHE4QcThBxYe6qL3Eui0p6TUpT4zRN6NDYcaFy6bSzEripvqT5eLQraya2Pf3SnqI3v8C1ttUEzJZTlxYKi1di1Qd0qg8iLqRiTlSVj9UVGV9fnzx6z6ezG5GKutLifLHdaFUl8y/5bvAmkBnw+WF1UqXPYHyxcRYzGQAAAAAAABot7oiGAEENbq6k+pjbavaD5g+n6d2pZoAPfsNfsCrunu6TdxiTxgmNc2NXIAdevNo45MsZAHu8PuJd093SeDR7ztNUnXkIgl4h91wQW71xeUFeVb9LfORNYXNIWdffJ/cQnb7kWfs6lnBIqht4Up2TyrzslFcbkv3EdkZUNafVFQEBvr569R9QIxJzFV2rg+Vuy0KsLvnPyxdJPoFPR8s8brpYtuy/la2S0WMgAAAAAADRb3REMAqy0/mM9ff8AQPpPmP0fSnWU/Xv0Foydd1JXaAe/Ya/YFXdPd0m7jEnjBMa5sauQDWeGQeU1fpyABgz7/B7Sb92jhxP+2tPSWPDfBpiUPF6jmjPcSC/vme9yzNVsO0rmsfoihjdx+QR81HdKN+Vx6J4LL9lU2KbGqbWwULvZxWJzmFQ7M+ieWg3xT9c/U1OkEvj599J9QovKDjVVr8T5YzZ9XFzWH8r3AWRA55k+WOF20qWha3ytZpbrjyAAAAAAGi3uiIYCO07xuk3oABGShvpOoY+fRSOe4kmw1+wKu6e7pN3GJPGCY1zY1cgAAGGQxkY3el3ZeUJs7UFMduz1h2w2ZQ4x5vV5THLj6yMX5Sv0kRvqtUQbXynxGkryYVaTjxxXYHhzKJiVNm2BYXtq+xzvriyek+e5lMa7MzOkfefRndGZKU/XX1PTZBL4+f8A0H1EikrONUWzxPlfNn1eW1Z/ytaha0Dng+WOF30mWJcHyxYJdLhzAAAAAGi3uiIZw5j5p+lccgABoN/oE33yb9Z1Wss4b72kNlW3sA+ZNH9S6Ao/w2FCUlsMkNarKkX5EmxHMkjaDYHvYyAN3pN2fQun3GnOEcsfWlMw6y60HX2DQ9MkEUuSC38R/Fmit+Nk5K5jV18SgIT9PVKVjtdrghPq4dJIuUOkh9NbzRb06oRPPKfOtgSmEHKZUGPqDuiUxKXr76mpgg97UF3H1GicrMVLPtaUKuv1EZsjS7A8UMtrmUD6L2EJmuQAAAAA0W90RDAAAAIxJ0sW6ZebifOUjRzPz2AsH826k8UTCfo+D2RWAW5XN56lthqW2Gn8UlEO7pWPB7wbvSbs+hdPuNOSEEailoCr1oCr1oCr53tQBGpLF5QAIlLRV8TuipipO/oyeljJfe41G5OjV+2Tlc6O7/Mnyhsr2yebbN8sa4yqPlc+a8uJSM7mmY12xyrV7SLSkMcI7I5IdNZu3HEvNxwc3DJycMnJxxZzccS83HNZ1e0wQpNhCU2EJTYQlNhCU2EJTbEQpNRFJXhUYlOg3xnxevEV1T30lVhAE/VAE/EAT8QBP8EBT5EB3cj2NWjp9vp4kTgObCsgAOI5HEjEp83oMsDLGRFpTgp9b2SoFv5jX7HjmovKItKDRwOx/NEZ2epkskb5dfo06tFL9Xm5120zqZ5dexkl+cGovKY5Ia89LXhE82Eb3ediQjyz3sqAbiQ7ci2omvXEK81kkq/3TPGkO6J765qs7miks1nIlAAAAAceXGIBsdR5CR+uN9R7N7COcm49kd811Lt3EthczMKAA0e812VW7He66uvn7/HZr+Ei1y630bHaJHZxWVlRuBa4c+EV9uY/0WSP06LWtbKRwbumffsIp3zcn38J3usyvp7ukp2XZ68o9s/X1bng9Ow9ubEeztkNmqk8SkES0W4ZCrbSj0eGNbjo1N7qffosa93Ljzs8vg58j3Or06km2ui3soAAAAAAAAAAAAAGGRwx2E4OeFMjDKAoAABjIwyGMjDIYyAAMMjGOSOGeQ4uQ45yoDGQxkMMjDIwyAAAMMjGQwyMMjDIxkAAAAAAAP/EADMQAAEDAwMCBQQCAQQDAQAAAAQCAwUAAQYRFTQQFBITFiAzMDI1QAcxNiEiQVAkJWBw/9oACAEBAAEFAv8A8H16a1rWta+zWta1rWta1/8Ah9a1+rk5Dg8a5HjBO4+spLhSSHsvnDiAmm8jkGatlR9qJyCSYsy+7JRDJJxce5JPyo+MPlIJacK8pzISYUR+5aMhhn3fO/4/7fWta8Va1r9GWkrx7TcgYw3FS6ZAAzJEBzXqaPuz6uBsSZPggrayEF4uQyICLetk8fdpeVxjaPUIHddSB2ymWsfAZbGhhBG3QWXluCNOpXDhuIKxIF8e+PAKZZYQO07CBvDSmJsHJg8YTDvOxIr7NoQJLZ7gcWmPVEtpFKbMHYlGiDrrtavHavMTp47VJSosSMXPhgkn5CKAR6mA7EQ1BjG/C7j/ANXepHKChFyOVqbUPk7y3yjy1SmOFlyEd9CZA75kFouy4Ue4kfMQKZl5OMkXaTiTjAjcES7Kh4tcWTk4JR75+NkvECQEjGFtYm6Kb9It7t2N5VW8rreV1vKqDfuSzWRjXJYvDSp4UAI6IJGgXj5PIYS8qYTAEDJYjVCENwBrBWRgKkIeRiTLS0n4UyEYM2cGVByZQlo59Ep/1d6sAHu4cDGrPyBgdwAezEe7FA2jY76GY2utksiyoeDsM1NKdjCxBjXooSRMjJBP+31La3v1rWta1rWta1rWpPhEvduxvtb7W+0JmHasw0pu4uleGvDXhomSWy/vDtXNMfdClWTHLV4aICYLSllKEaddfra/uXqQcZXMDGBnuAmOpEsW0PDipSgb6CkWXY+JGkmQscGEI8pFqS0hFvKRevKR4/fIr8sCPRcN2BQ5dzIkPPyc7IOx0VadkhyrZgehJE5KDLYOvJQEnwGWvOd2JdbE5WxOVBvbSIIXYtPU/mGSyhiBclcGdFlm8kKs4fD0Iewe1bpOSK4yPKdlYoaElCX0ycssCSDyxgpx7Og2aj5fcS9dKYlwyXGDmC6u8lLmta1rV6OlR46mZkQmi5wMKvVEf5Q02GXcqfDCsDIsyTX7GlaVpWlae+WeW2/3T1d09XdPV3T1d09XdPV3T1d09XdPV3T1d09XdPVEuKcb6GPtiiA3iu1Fm4oN9+TDemNzjlkyBsWGPKjwwmPogY7tDBmhIyT4APMcV5aN8ZrfGa3xmgMqFFRFTTUvboYM6sqYQpEiHF92xFA7YaPJ+e8bDtEOpl3o66V2WmRCZkA1Mg2GZejwLy8AxN0di10xr+LsLdBAbBu98UbGnekIJxizmTO3HNCkjLIOPPUqA7h8+pDxA5ApghBw6HjBXo4p5oqMvDEtNPFRMei6I/8A6OZ5EwS4wvcCK3AitwIrcCK3AitwIrcCK3AitwIrcCK3Aimpg1msRPIO6SwO5xewGMNsrdvZ2MccMFxcmMOiccfi4iUxN04BhF2mJThSfAB5hPHTbW+xt1sbdbG3UR/6ewBqildMkDIdmotpbIhknYR0bI0sPhZcgwu6bKSuIcBU3JMnUh56Fx+zFgmwG7sh1p1yjJHYUgmfWiYtRobZzFkJTbTo5rZDMpIOsQRTpI3TT9Ukmwre8oreUVvKK3lFbyit5RW8oreUVvKK3lFbyit5RW8oreUUzKIedmPnnfvBE7x7YrVsVq2K1bFatitWxWrYrVsVq2K1bFatitUJbZ6Dke6d+hprWSWtsgPMJ47f31vt6329b7eg8puHeDyC8w/0lEquZOWvYsOPYdFYBZHejjHXyKMCYPaf7+Cb9cN163aoHMBzE+qIu1xzGC7VJwdzSL4bGdxb+vZeTEsp0WKdZi7Bitfry/F+sBzJj55774Pllk9ozvqa31Nb6mt9TW+prfU1vqa31Nb6mt9TW+poTJkiuxeTpkjaefQwncR63EetxHrcR63EetxHpK2ymyxWUjE8dv771f8AtMIpSdiVWxKqDZ2h8SQsU50zG17ygH+gZ0k+OUxPljrgJ8uQkumR43p0DMdBfiJdqXYfgIwpXpoJFbOQ3UsNkAzLOUTNXyeTqGJKmm9i8demI5VS+IjeUpN0KbcUy5A5AiTR+tL8X6wHMmPnnvvg+XM8Idi5LuxO1sTtbE7WxO1sTtbE7WxO1sTtbE7WxO1sTtQwKoyQZlEPuy/GcdQyncBq3AatwGrcBq3AaoyZBaGYKYObkB2rAN/fer/2z8Tky225vrVb61QWTMDPRuTMSZdaa0VHPuPzTSmZMCMaJGADRGkiyTrxHTJMc8HQQt0F+HmGpVnrkeOePoOQ4K7Bzjcs30yLHbG2Um6VIWptcBkSZFP6svxfaUU0GwZ/KQ7b8BkwmQs+4DmTHzz33wfLmeFE8552zDW9sVvbFb2xW9sVvbFb2xW9sVvbFb2xW9sVvbFCZAMwQVkop9pnhCx7haNkfrZH62R+tkfrZH6x5y0UJeyX2rwgNk+G+l/7a+IvlNRLzzeyP1sj9QYa42RZkmn3ek9BGlywQjoI8hJLEfZyJ9h2Oywk0/pkmOeX0EKcCehphqWY1165Jjni6MvLHdgp5Eo30yLHrHJUm6VJVdCseyKx1v1Jfi+3JpUjLpyKxCNjAshh38KmIaWam472gcyY+ee++D5czwonnSPC+oDzJnhQfGJOaEvvA1bwNW8DVvA1bwPTeWx6UMPJJZva2i/ua+IvlR/Cek2GHN5GreRqDnhWSRsmCKf6SIrrpU+2pk2PjrGNhRlgi2pdTjnTJMc8roKU4E/DTLUqx00rI8d16NOqZXAZAmTRr0yLHrHJum6VWve18eyPu61/Tl+L7M9ndoiP46gLAx1Scc1LA4zIPYfkXtA5kx88998Hy5nhRPOkeF9QHmTPCg+NO/KOK4TW1E1tRNbUTW1E1tRNRprI4DblnkdqzTnyF8qP4UpzmQXn0bUTW1E1EAvCySJFhaumVAElSUUM6K0cf2V28g8txvNPMcrSsjx3yegpTgb8LNNyrPXI8c16IWppeP5DaQt0yHHrH2Um6Ff1WPZJ59a/pS/F65JljONuScuzlGThSARSOn8iY/34OCT+8RXsA5kx88998Hy5nhRPOkeF1WuzaNzGq17Kt7geZM8KD4078sFTxDbFbkNW5DVuQ1bkNW5DUHkEe2MOS2W1cdupO2kjH8KU50RwXi2WFbkNW5DUxKCpfTkEepXSXbUt7IEKQsIJRt2oVbbu9I6XrI8c8joKU4G9Czbcqz1yPHOiVXQrHsjsdbpkOPWPspN0KrHcl8f6Uvxev8hPCjwuL4ExMQxX8WkN32zLoSm/5Amo68f/ACOHIOFocwPLWnUPtdQOZMfPPffB8uZ4UTzpHhdZL8eUQI8CNGsdt2N7V25Sa/8ANTXnFWrvF2rcW7UDJi91KGjvBwfFnflganfi92LfhaVEhOKJbS0/Kc6H4M3y2x3Hbdk/XZP0MG8kru2eubNqW7CNqSostIiN7Zr1oHakK8xN7VkeO9vehiXBHoWbblmOmlZHjn+tWv4b49kneWt0yHHrSCVJu2qsdyStf0Jfi9ctIXk2WjDoEY6LbS7a+ORnc5pA75D/AMbTvcCdQOZMfPPffB8uZ4UTzpHhex0Vp5G2MVtrVdgmuxrsl12j9duTagACijshw58QKOxst1uTh5VpyOGyG1pN3JUtpMlLWFdedQtfloVMqTZqX86lSC214xPBIi0OpdtSg2VqyJFm5iH4U1zILjqcSiu4aruGqSQ1rY8fS19baWqZt/tnfgGGWUvZSaalGUNpV403trWR47216HIcFehZtuWY6XrJMc06WvpfHck7i1v6rIcetIJUm6FVjmSeXVr6/Xl+L0yCUtDQ/wDGcXd573ZOK5ieUBlNnC9AOZMfPPffB8uZ4UTzpHhfUhPy8vHbqHaK2ip35YKp34uhXFE4rg7T1EgNpKgAD1QrkY8yrdFh0xOF3Q67BnEvjMiOTfLg+PPffZOteXevBevLvQyrdtV02vWaptYKE5b76B293GprKY5DQ76CmVWtesjx3tb0OQ4K9BzaJZnrkmO+Hp/zjuSeb1yDHrSCVJuhVY9kfb3TfX60vxen8lSSiCoONTDxXtLnmhScslo6bicByewYlp4XwjygZVAcyY+ee++D5czwonnSPC+pCfl1K8KUtjyrOXjIGIgqnfi6FcUTi0VzMW/C9CIKPJWvH7eB7DnEKyFqQjz4jJLCNy0yK8uAvrf2DcenGW3rSYzLI8xwW21PL2wmootoeObcS6i9tayLHe06DkOCvQc4iWa6XrI8c8PXHcl65BjyZFKk3bVWPZHcOk3sq31Jfi0pXhTi4T09mPuA/I3/ANbZtHWhMhFhhe2Ix9BFCYwtBK4OYbrJ93GdipcoYiSyllYsRNiXMLlAyA/qQn5d/wCDHvwkpBsSzjsMzE1O/F0WnxotHPJTeOMpUWapcCFkm12tlTdd9lKL73ON0nKJG1XzKzdSEtESr652AAtLOwJComMEUuTamB2d0IZpqaEcvZXitreoT8RS20uWythtuHi+ctdkJ3AegZoFsVghspq9tayLHezvQ76xXYOcblWul6yPG/B1x3JfBRMkIFXqMJVTYbsyq2LyKqtiMheoEKQj0/Ul+L0tbS/uA/I1m2LqnEhp8IlAcyio8c258aKGzNJ1CAjRSTJTG4+4fp5lNbYY3Xhlm67yQRW8XTVpsOmzh3a119sJ+Xf+DHvwlFhWLrLAUiD+7Fvwnt0ouOQUvLotgZ2HhwH1TeOxqWUwgiKeg2XrIxplm4rK2G4T8P0IGaLbLiQxmJDhdMU/CVe2tsix24d6YfWM7BTrcq1RkyKI5rKSNJwwG1WxGPtVsVjNAoQCNTa2n6MvxfouxpfedjKU/FyRDCI6TQjsZSgwJa5W2TlbZN0/CTJKJ+Bkg4+LGNuYeKfYPyC68guvILryC68gurjEqpyIu7Xp7/cmILbuI08y30hPy7/wY9+E6TUNaYb9Dor0OivQ6K9Dor0OivQ6KjAdtD6OuK9ReyagbTCtitD1kjtmAt0XZKDluI7t6u6fqIkzUxe6H0uXNRTuVLYpeXqITJShrscK8S5esZKZYhHMkjW77+ldXOk37PYee47fD5CmsZlRnWAJGRsHHjgN0aZYO/sKfcRN/Xl+L9YDmVM3ul+piNvKh2xW8beR4X1IT8u/8GPfhPqO/wCTe00LvLZvGqHiVl3uMOS8kfzDFV5Ziqh4sl2K9PMqrao1qRaAGG6EMILY9KxtelY2h8djRbNtIaTf+sdWp2ErxVrr7J3+61rx2rWnxFOyXirxWrxV4q8VeKvFata1rxV4q8deLqewoljaX62l+tpfraX62l+tpfraX62l+tpfraX62l+tpfraX62l+hY15oiprkdC2bkMTEWpiM+pCfl3/gx78J9R3/J/dPxK5dj0S/Xol+vRL1eiX6AHuIFTv+S3qbIWJDx035QU3PqGtESSnhPVa0jO5Woar5Uuo7KURkS1l1jmr5ESo5rKfEuLyi8i90nf7ohSkMoGY9MSeUFjEb8R242Qyfm2nSHWUZOYo6Ilz3iTDjn5JU6ddI03I3prI5AYZvJ5UcOZvKsJBkzCiR5+TklO5OemrfpyYzhD1a1rUoKo2P8ARhlejDK9GGV6MMr0YZXowyvRhlejDK9GmV6NLr0aZXo0yo/FShDn/gx78LrWvvterdL3q7SLve69a+53/JKkQ7SABMWgmI9KOPqDiGxRvSjrgtsXU8/bHEWkXcTtZ30281S8Y8ZDeLKSoTHUCdZhpb19ejmKxry38VjCXlYzHLLbxWMbuzjMexZvFIxp1uJGaUfj4Mm6rG49RloUNLaYUNKB8XjRrJxaOSy9jYDzbEKELS8VjF3+uiYk5FD+TCCOKycFJdp0S7ZGXDdladG7UDKUHOt5bHLQnKgLoj5FqSb98sslMbHyqgiJ7I3URRGUkAJ9VLvGM5e8ewxlq2A5PIzEMM5EQotvLXjGY43vweqr+GzMzKHDOZIEyWjKQXCVT4Vmi8tYSDeaGsiPy1oxNssA8q2UheUAe3IsU7e9kR5xTBaMi80aJyk1iIFy1ZADOZuPoGyklxa8pIej2MjeZpvJyXlQcmqXA9sxNPMTrcyhhPqQh2VkZmwJD2WNN2cyxlBCMqaWSPmYRBEhmDrUb6j8ty2WtuXjZBEoD+5em4yWjWiICRSi+OPJDXiinZArHpSUYdx6R0vi5LzpONykmHLQL5xONRZEW17zQWpAVvG7Kd9GMLZtjKb16PZTQGLjR95PF0qh28UacbFgBwzZPG3hURYdwI3qu2qWYuYBCIx89SW4B9ti2IOXuZj0lL07BSPcOYmWS0ZASUxeVx98wnHI4iNCp1FnUM4wlF2cTZZIExBkdpeHMvWGxVhh6+PD+XEYldEa3jIzT0rBKYcxeNdiob2pi7WmJeAIOJaxOzRUlFEPnO4uX4b4yntvTlu2Cw+4ql4wa9Hqxp+z8bjVwi4aO2qP/fvatOun6On1dPq6Vp9LT2adNK0/+3//xAAkEQACAAQFBQEAAAAAAAAAAAAAARARMUECEyAh8DBAUGBhgP/aAAgBAwEBPwH0ew+/YqmGmmg69ivDLQumovQpTkT2mNb7d0nJzRRSUVt+T02jMxcSMzFxIzHxIzHxIzHxL0lWF9FCw52jMYx7wX0qX6ki2i4vohF9VhyVI36FvP8A/8QAKxEAAQIEBAMJAQAAAAAAAAAAAQARECExYSAwQVFAYPACEjJQcZGx0eGA/9oACAECAQE/AeRdV18I1QQQVuMHXv8ASPhRqcImhTqyG2bq6stst+A1fG2EwnRb5hiJ4C/ddNNroGXFEOGVS5gQq5wiZ/x+wTBMmTclGkobIVQvHfCXmyNstlaGuAowOkLwLtJekLIfSEDPskIs5XXx+wEdQtfb9U0bee//xABREAACAQMBAwcHBwkECAYDAQABAgMABBESEyExECIzQVFxkQUUIzJCYXIgUoGCobHBMDRAYnOSstHhJEOi0jVQU3SDk8LwBhVUY3CUJWCzZP/aAAgBAQAGPwL/AOal2bmLaTRxPKvFFLYJp4k8pS2u2j3xmXJ4+sM+H01fWpOzaG4X0czmXTGV6m9+/uq0XajzaO2aXZ7+OoDtq282Ee1nnWHMoyBnO+tdy1nshJNDnSy70zzicnA92+riPTC8oELRvJbvCOe2ngxye+pItETywzmOWeKBpFxpDA7MNq69+/dUcsM8ccssYO1QalHbjP415DtV13BmSRnLTmLXjtYDNWNtDNKAyonpXxqc6vXZcZA0HhjNCPbho5dsugMzquhgA4yc4O/dnspbSa8kk2vlQwSTA6Tp0k4GOHDFMBpuYxdPaxmZ+cDu0kn5o668mF7rVFIrjYoulc6eNeRrk3Ekkl/tNujPkcCdw6sYx/8Ao0WiPbTzyiGKPOAWPafoJq587siTEAVNrz9pnqFWc7oYXuQcJxx9NeZuNnDHDt5ZWVvoxuxW01y517PZbB9rqxnGjGrhv4Ui5bYNbtcbfZtgAHu76KSyttAQNCRszZPDcB7q82WVtpnTvjYKW+bqxjPurZXMrK4TaELGzaVzjUcDcKLiSU4fZ7MQPtCcZ3LjJ3b6ibbORIpcBYXY4HEkAbse+kt9sWd8YZUYpvGRz8Y39/yHilQSRuMMrcDUqC3UiUaX1ktkdm+tEUIUa9pxOS3aTRd4wzlDFq69J4iow6BhGQyZ6jWhrdGXUzYPa3GmiSMR6ymtsklgrZxSxebDSrFhgnOTx38aWKNQkajAUdVR27QLso/UAyNNR7Ei2MahAunKEDhu91NLtQzEY0RpoQfRUsTwqUkfaMO1u2lTzaPSEaMA79x4+NW0kkZZ09HAsaln4cBVz5Rjj82MeoTGUFTH1nceHbUc0Tao5F1Ke0VPaoHLw41tp5oPZmt5xXGs53Vxoz3UoijG7f1mhBKz69xYrGzKmeGojhmvN9M9zOBlo7aIyFR78cKF1rcpr2WgRMZNfzdGM5pZUEiKeqWMo3gaayAneZTpYpA5RTjO9sY/1a+zthNov/N9I4lNnrJ76c27wLCZYFSaT1cOOJoRK9vdp5xHF5xbnKHUDkd4xVzZ25iTZQJMGkGeJYfhUN1dGL0yB1WMYx+RiZJFhmt5BNG78AR2+7BNSz3lxGQwAWOIcxffmrW3huIp0hyJGXfnuq5bzhVzCI8fNIbVvrVFNAuqTU6RPJpdQuAC2rO476W1N1Fh7WW2Y4+cdQIpLu5uYdqro+zjB4BWX/qrbZieHbvOC2rXzsnHHHXxq7kEoXb2otwCOHOJz9tSSw3QCu6FossuoBNOCVOffUcNu8JHmzq0rqdPOfNWrwTrFHCI12i6lkZVGNLDODn3/k2cDOK6MeNdGPGujHjXRjxoORjkhHmz3Kq+TsX0Sp+spqRNsVgWUtHFeqGkcaRjVp9+fsq0S6gHnSW4Vph1fq15SnFmP7TOvPQLnTjj41qNstwi2sqrqxjWeFYg8nq0bwQCZcA6nDHUSM849/GvJ8F55Ma5j2t06W2E9U6cHTnH0VZSeZmWdNnzpNLKig9TZ1Lgdmc1PHFEJbjHo+0VeSwx3DSXBiaGWKXSkZAw2oZ38PfVzb3VvHcy4mKxTSrGNbNuk5x383AyOGKSRbWW5ihkjDJbS4OVjK6tQO87+o1bgQvlC7ESS854dW6Amg1vZ3NrI1yJXl23ojHgbtOfo4fT/q3T58nnPnfnew3Z6PRj8auBa36v/aUuDbqQ2jT7PdvpdtdLYbORXjlOMKw4d9SXV/5UhlnvFCK+6NdA4aRn31b2qvtBEgXV2/kfJ6loktjdATNOMxgaWxqHZqxUdvpiW0Quw0g7F/Shden5igltPCm80uUfZyBNccSRZi0Etq0gA78YNXUvk7YxQrAYQqMNpKCwzJJ+r3++r42LwST+cpsorNfQTEpvRezhk4qGGa4gl8ozwrtLy5wuwGfZ7Gz2fTQmzCZPPwunH9pxoxnPzOv9Af6PvppMZx1V0X210X210X20E82z9ajNo2eGxjPyXQKuBXqrSNBDA7pklX3Ej3Hqow4aC5HrQS7m/ryhZ4o5lHVIoagqqFUbtIG7/V9qsClXiu9Utrssa9xzLq7jU8o2llDb28kcEUKMJQOt+/hgUW8sGSLyhrxtWi1JBzdxA4bx9tWVi8OwnmgaN7p4zzYdR347W44qJUyUCgDP5EhhkHqNLHMh5hyjxsUZD7iOFLO0txdyr6huZdWnuFblHhWFULjsFeqPCtekau3G/wDIXLbXYYjY7U+xu40kNwZ7CSa3f+1R3O1jlwN77+B66uZrKR1sWCCPzp2k1nPOfecjNeRYlkVYWnbWhB52EJ6iOypLi3CNLqQLtPV3sB+NFbhrR4Y7rzZisbKTlc6vWOPtqRsQSBrfbxMYHjX1gOJPOG/jgVPb4t57hDEdpDCTzX1exqySNPb10s5ZGZuJjBAznsPCpf8AvrpU4ajXSLXSLXSLRiYayW1ZFEgacdvyJe+mjEYOPfWsQK3VxpLW5tEA3ssisdSHtB6q9Jq8pWn+0UemTvHtd48K2kEiyL7urleaNQ8upY0DcNTMFGfGrmZ5or5REWXERVlfq3Z3ipVuH85uEZNUS2xgMYPWQzHI/lUMegyRebyzMqjnEqUx/FQQwvGdsIDzlYBiMjeDWdlIyhdbHKjCZIDcd+cZ3VcxRwSCOB9mZmxgtuO7r6+TZxXUUj/NVqbYzJLp3HQ2cUqFgGbgO35Kbd8F9yqBkt9FQbOZW2zFE+IDJH2VLtZMbM4fCk6d2ajk2p0yHCcw87dmohFMGMjFFHXkDJFSmWTAibQ+FJ0nAP3EVtYGLJ2lcf6kUK5UY6q6VvGulbxrpW8a6VvGulbxrpW8a6VvGulbxrpW8a6VvGulbxrpW8afUxbf18s003QohZ89lXckvknzKLZ87aKrBkPVuJ3/AKtPGPJC2sWRr0vGXXG/nIGyMVbWJiM05DOH081Ob29pBqfyYyDZ2sYlcsPRqAfwqe4dUm0ESska62LY3fTitt5qLcXSKFESAvg4OK2PmkeyY69OOvtoxQoI41wAq8BvqX/vrqL4qZj1b69STwr1JPCvUk8KYNHMcnqA/nUhhSRdGM6wOWQiNiCeoVKGGk7t30UJNpp92mluNe0wCNOMUqbPGevNbeJmtLv/AG0XH6R7X01s/KkYjHBbuLom7/mHv3e+gQcg9lSwT9Gw4g4x7688uvLE1wjAwxTF1XTv36dIGTuoXEnlgyTXen07lOeq8BwxjefGgZZJEOyaLmY6yp/6RV3BA0s8146apSVTZEe2MAcKRoZpLUCJYSI9POVeHEbvoq40FjtpTKdXUcAfhTd1StHJGztaSCKNYNMgbfjnZqTTciS22MWqeNBGY2z0fNH2HfVrMBJ6OGc+i9b1eqvKMkU5kS2SC5VUuGmDb21rqIHEDhUMkt35tBPC08ZkmeLBLbhzQckDTuq6lup5HaNY1C5ZU3oMnT38kF+0Uk1t5u0BMUZcxnUDnA376Xyt5pOLfz7a7FYztAmxKatHHjvxxry9cCCVFuVxEsiFWbCY9XjX/h1Y2lt3iT0kiICU9H15FWd4ouL3Fw8tw4XXIdSac6VHdwFeX5Vglj87Z2iR1w7DZqvD6KtlYYYRKCD3f6kTuqPQ5XI6q6Zq6Zq6Zq6Zq6Zq6Zq6Zq6Zq6Zq6Zq6Zq5lw61dbeVpdOnGfp5Lq01adtG0eezIp7gW0EbR7L+zWrZ2ug5J3gb+yrS2RY3eGJ7fZpBKJjq3FjlcA95x768myoQiW8citv529QBimmt7o3CebGNY7g8WyTzsDhV9Ya45FlU6JOByV3593Z7qiVLx4biO3WDSDzN2M9VRoWLlVA1Hrp/o++pe78ai+KpfhoCukaukaukapdHP2mPW91OGUDTy3DJBI6nTvVc+yKCyIUbPBhitOjVuzSvsCcfrVFALZl2hxnVRBAIPVW08mSCDtt5N8Tf5foprG9hNrcMN9vKdzj9U+0P+zVssfk+VrjziYJi2Z9gDI3O0qOyptna3k0d3ZbKMvbPqMmty2oY5udWd9QRvvdY1U9+Pk6EeKPEBlG0QttWz6m7h31ZWcUYKO2mdyfUOkkAe/kaGTVoPEKcUABgDgOU6QC/Vmo7eWZUuZvKD2xljXcigFt2fhxUyzvtJIJmh2mMasdf6RqIzXqNXqNXqNXqNXqNXqNXqNXqNXqNXqNXqNXqNXqNXqNSoEIzSd1Q9xopnTgZrpfsrpfsrpfsrpfsrpfsrpfsrpfsrpfsrpfsrpfsrpfsqXHpNpj7K0aMfkrrd1D7xUXxVJ8NL38nRfbXRDxrovtpvQBtXvqSMxbPSuc55TgHhS5GObUbMnOI30ksa4dTkGtLtkY5NlcRiVM5weo9o7K1w6vKVoOMTn0qdx6/pr80f9+vzRv3qP9mu0ZeIEJkx+7msSXqQHsuPRfxYrMM0cw7Y2DckssV0bczRbGUaA2pd/hxq1mSHZvA4fIY87AwM/KKm6hDDiNoKdPPFQmfzgOky6lftFC3trhJjksfSBmY9Z/SPrfl4u+k7qh7jTfB+IrXjVvxiuiPjXRHxroj410R8a6I+NdEfGuiPjXRHxroj410R8a6I+Na9gx3dtLbiBkLZ3k8mXOBXSV0ldJXSV0ldJXU6HtFSEQxg46lqT4TS99GjQO1G8dldKPCulHhUkhO01LjArToI3Z5V3f3dRd1PGpGke6tSFc/DSQzFTGQeC45WurVd3F4x945FmhbS4+33VkbpB68ZrVN5PtZG+c0K5r0XnNv+xupFHhnFei8rXY90mhx/Dn7aMtr5Rim08Yzb4/E0dtOg7NCj+VbrkZ96Cuf5WMUw9ZIYFX7816Xyjfzf8bR/ABXpIXuP94neX+ImjJ5Pt4raQcY4lCh/60QwwRxFK6MVcbwRWzlwtyP8X6P9b8vF30ndUPcab4PxFfWFCMYBPbXSJXSJXSJXSJXSJXSJXSJXSJXSJXSJXrpSXDsrKoO5aVAjAml+KtTtpHaa6Za6Za6Za6Za6Za0vdRqc9ZomJ1mTgcVcYjUejPVS99GjSd1Muh8g4ro3ro3rU0UhGMbsUsCRSox62xyyMqjSeG+pkb1hj7qEjFs+40J4tRcDHO4UqELg9g5WurVebxeMdXvHIs0LaXWtS82UevH2fIa6tV53F4x1+8ciyxNodeBrB5lwvrJ/LlNxbjFwOK/PogjBHEUGUlWHAihDPhbkeDfo31vlPNM4iiTizdVabeyeeP57Pp/CiYCUlX1on9Yf0+XF30ndUPcab4PxFfWFJTO3Ba4P4Vwfwrg/hXB/CuD+FcH8K4P4Vwfwrg/hXB/CuD+FI5WTA7BSRRrJqLe0KPeKLIVwDjfXFPGuKeNcU8a4p41xTxp45t7F882sEZVhwNE+axA/DXCjSd1S/EaV1K4bfxrinjXFPGknlwUAI5vHhQQasnt5Z5oodUbYwcjsoRTLocdVaFVSMddCQRxkioYGiiCu2MjPK11arzfbjHV7+RZYW0OtZHNmHrx9nyGurVedxeMdfvHIskbFHXgRWh+Zcjivb7xyme3GLgcR86iCCCOo0GU4I4Ghb3BxcDgfn/ov1vlJ5MsTm3VtI7GPWxrYG3SdiOfI43tUXlCwz5sTzf1f1TUV3DwbivzT2fKi76TuqHuNN8H4ivrCkqbu/KxfFR7xTfFQD539gri3hXFvCuLeFcW8K4t4UoLPuHzaSVPVcahyGk7ql+I1D8NFGJyPdXFvCuLeFI7FsD9WkiRn1ucDK8pZUJFAONJ0imYuVwcVFOHLFDnFKugbzytdWq8zi8Y6vfyLLE2hxWRzZV9dOz5DXVqu/i8Y+8ciuhKsvAitnLhbgdXzvfymeAYuBxHz6IIww4g1kHBoW9y2JvZY+1+ifW+Tso2xcXPMX3DrNefSr6e49X3JyS2s4ykgx3e+pfJ12cQSNpb3Hqb5UXfSd1Q9xpvg/EV9YUlTd35WL4qPeKb4qi7qOzGce+vU/xV6n+KvU/xV6n+KvU+2reN2w6oARQZeBrok/dpu+pfiNQ/DUta0XK99ep9tep9tW8si4RWyTmgA+88oaKCSRdAGVXNOJo2iJPBhSczVqpW2PA9tKvmuMnHrcrXVqvM4vGOrkWaJtLrXzZl9ZPkNdWq7+Lxj7+QOjFWG8EUIZubc/xcpngGLgcR8+irDBG7BrdS2103pOCOfa/Q/rfItlliaXbZPMPDFRS3Enm9jkJz/ZQUotbiJ1AwFRhuHKL+FfT2452PaT+lbKRs3NvhW946j8mLvpO6oe403wfiK+sKSpu75BZjhRvJrphQI3g/Li+Kj3im+Kou6pvooGRtOa6YV0wrphXSiumFRqblQQPfQlibWh4MK6NT9FXOOG0b76h+GpaHea0yOFNdMK6YVGTMMA0At0pJOByppUtu6hUGoFdx402lguntpG2i7jmujbla6tV9HxdB7PIssTaXWvmTL6yfIa6tF97xj7xyBlOGG8EUILg4uOo/P5TPAMXA4j51FWGkjdg8i2123O9iQ9ff+hfW+QzSwRS3L+jiZlyV7qF1dySxPIfR6CPVrNpfo37RdJr0Lzuo+Y+0HhWm+s0cDrZChqO3mtJY3lOjA5woSICbR9+PnRniPo/CkkjbUjjUp7R8iLvpO6oe403wfiK+sKSpu75Fz8BqRGeJxo4E+6oiNoh0D1JWH41zLq4X6wb7xXNuwfjiz9xFf3Eniv8AOt9sp+CT+eK51rMO7B+6ucsifEhqImZQM+1uo6J433j1XBpviqLuqb6Ki+L5cH0/fyFmtYmY7ySlSIihVB3AVJQ7zQ+AVlELD3V0TV0TVCTG2A4++ukXx5bTSpbc3D6Kl1KV4cRQZgTndur1Xro5fCg3URnka6tV9FxZB7PIssTaHXga+ZMvrJ8hru1X3vGPvHJkbu6lt7kgT9TfP5TPAMXA4j59FWGlhxB5FtbtvgkP3H9B+t8iHydAcxxNsvp9o/8AfZUcMYwka6V5cOocfrCo7gWUSzRtqVlGN9NoGbmHnx+/tH00/k2VvSQ86PPzfkRd9J3VD3Gm+D8RX1hSVN3fJKvGpU+6vVYdzmvWlHdK3866acf8U1uuZx9evzyf/D/Kvz2X6VT+VfnZ+mMVDF5zFzmxl7fV/wBVNMf/ACyTeN4s2Q/x0WjWyGG+fMn3Go83Kjdu03Uh/iU0/mtzD7xI4P8A0VGLiCycZ4qcH7653k1GP6twBRM1v5u3ZrDZ8KZjwUZoE2FzpIzq5uPHNcy3Zv8AiR/5qjV7OYa+GCh/GoIpJthJv5kw0Hj76yjBh2g8hYxgk1cKowN33UO80PgFSfFXOYDvrpF8a6RfGh6RfGh6eP8Ae5YsCo/irQnHGd9ez40oOdw7KBHA8hubZfRe0g9n+nIssTaHXrr5k6+snyGu7Vfe8Y+8cq21y3pfYkPtf15TPAMXA6vn0VYYYcQeRbW6bm+xIfuP6B9blubr2lXmfF1Vc+VJd+PRoT2+0flxeULYYikbaADh+stRXEJ1RSKGU8sXfSd1Q9xpvg/EV9YUlTd35W0/aUYNez3g5xWy2m0zzs4xUXdU30VF38s3wH7qh+AVz41f3kVaaC8WWb1W/VqLReRSxnPorqDUOPuIrUfJAB+f5MudJ/dbSK/0jPafq+VLU6f39331qNpHex/7XyfOr/Y2PxrVdZt7h/Zug0JPdq4/RWzt90fHcc0PgFSfHUXdW4V6pr1TXq1FvHqjk4Vb4Ht/hR+GtbnAr1/sNIDMcgfMNJKhyjDINY6qNzbD0PtoPZ5FliYq68CK382dfWT8fkNd2q7uLxj7+VbW7bn8EkPX7jymaAYuB/ioqwww3EHq5FtrpvRcFkPs/l/rctr5Lh5x9dlHWx4Cra0X+7XnHtbr+VJD5vdSmPGpoYS4G7NSW7bSK4XnxbVdPO7N9T2U6yOseZU0DOB7VAvtoAf9rC6jxxivQ3cMvuWQGou+k7qh7jTfB+Ir6wpKm7vytp8dE9QqO4GSrLu6qtwnWp41N9FRd/LN8B+6ofgHJZ/E38NQfT9/KXe0i2h/vFXS/iN9FYry5jQ/3bttF/xVriMP/CzB/Du+ygrK55gO8iT7d1MtxGBlvnaD4NioeeYj2TKU+/j9FS47B8mL4RyYkRXH6wzQMcSIdXFVxR7xQVBljXRfaKt4pH0uq4IoOpyprB4U1zbLmH2k+b/TkWWNtDr11g8ydfWT8fkNdWq7uLxj7xyra3be5JD+PKZoQFuR/ioqw0sOIPItvcnMHsv82sg5H5X63ITjOOyp767iZNkdoUcYwfZHy/Kn7VP/AOa1jjTSxxrsZ1LqpG7fxqIxmaLKjorhx9mcVzrmSQdk6RyD7VqPYTRx7/7tXh/gasrcbTHVtg/8afjVuJYkO44wg/A0xayZub7Oa0yQyRHV1/1pDtCB8JqYR3ULnHAOM/lbT46k+E1Z/sxSPKzgoMDSaGxLnXx1Go/i5WU8CMUFS/mUDcOav8q3eVJP+UtI3n5Zk4ZUD8KhNt5QtNnvwJYya3t5Pm7lK/jW/wAl2Ug7Rc4/Cuf5C1/sphXpP/Dt6nvDIfxr0vkrykvw25b7q21zH5Rt206edbMv4Vso7zmNvInQ/wAqQ28lumRv2LbPPhUhtfKnmr9WmQfhjP00mLuC+jzu1bm+yvT2Ug/Wi5wrTttm3zZObWQciuNWf7JeTDKGHvpiqKp1DgKjrUxwBXSrUatcoCKEkTh0PWKwd4o3FsuYPaX5n9ORZYm0uvA1g8ycesn8vkNdWi83i8Y6u7lW2u25vBJD1e41/aLqGH9o4FeiMtx+xiZvtxihLa+TZ45utpdKBhXQY72r1UHe1bG4KPB1c7ev5X635Lyp+1T/APmvILnzkRLbROdGjj1/hUAHAIPu5Iu/kUzwrKV4aqDwQLGxbGRXD2qQS20Ug/WQVKdgUwPYkYDwzXoppofgbFei8otjsdNX2nNevbSr7wQa9JYBh/7cgr0tpcxd6Z+6udJs/wBouK5s8Z+t8q0+OpPhNWf7McgyxGOyoCGJy3y4Pp+/5YZt2BioOYH1A8VqXaWVu/xRA0mmyhTLeyumuajp8Mzj8awZbnHZtSfvrMN1dQn9RwPwrTJO05zuZhvqz/ZLy7OZBInYaaSK3RHHAipvh5Yvib7+TB3jsprm3X0B9Zfm/wBORZY20OvAisHCXC+snJsy+0m/2MQ1P4Cuaq+TIT7TYkm8PVH057q3vOx4ks/H7K9WRu96/N897tQW2s4YcfNTf+hfW/JTzW18LdZiCUaDXvAx2+6v9Kp/9UfzqSJ/KqaJFKH+yjgfrUFHlVNw/wDSj+df6VT/AOqP51HjysgOf/SD/NX+nIv/AKI/zV/pyL/6Q/zVpfy3HjOfzIf5q2j+VklGoc3zQD/qpMXyj/gf1qXN+hGOHm/9a/Ol/wCV/Wvzpf8Alf1r86X/AJX9a/Ol/wCV/Wvzpf8Alf1rfcoe+H+tc94W77cVkTaPgUr/ANVZTynKO9dX3miJ5/OGz62jTu5bT46k+E1Z/sxyxoZDFoOeGa/Oz+5/Wvzs/uf1r87P7n9a/Oz+5/Wvzs/uf1r87P7n9aSANr09fLbpqOg2sh0+/WnyYyZjFoGPVzWRMZdfauKEh4Kc7u6tXmFzp4+z/OlZbOcgjI9T/NX5lP4p/mr8xm/ej/zVageR7lgIxztrFv8A8df6Gn+maL/NXO8lMvxXMY/Gufawp33kdFV8mm5H/sy6/wCFauNH/h2VBp9d5dOP3gKO2thAOr0mrPJFtJUTe3rNjrrT55GzfNQ6j9lehs72futyo8WxREfkkIP/APXcqv8ADqpn/syBjnZxsSF+kiv7s/WpZIgFdeBDiv8A8jcG3ThsLXm6u9+PhitFvCkQ/VHJBlS22lEQ92fk+T4g2I3jlLL2404/QPrfl4u/k8m4JGbkcO48mw2mz5wOcZrzjznaafZ0Y/Gpu78rafHUnwmrP9mPytt/ukv8cfyl52nHurmttCxIAA91FPNrjJTHqe6okFnMSqAbyq/jW6CJPikz+Fb5ol+GP+tWhfyncY2a81FVcfZXpZ7qb452pLXzJGLRmTU/O4H316K3ij+BAORopBlHGCK6Fv3zXRN++awtpE3Xl11ffWEUIOxRijVmzksxTeT8vyb/AL4n4/JtLnICwrICO3Vj+X6BpXGc17PjXs+Nez417PjXs+Nez417PjXs+Nez417PjXs+Nez417PjXs+NI504Hv5PJn+9D7jylAcZq4fWDhfytp8dSfCas/2Y/K23+6S/xx/LijRwhVtW+vzlPCvzlPCvzhPCvzlPCoICcmNAueS3/wB2f+Icl5PEcSRxMyn34qS8e+ubzY2+1eF7Yxj6CRTxQDTKvm76zww8oUir2a5fmw3Ey57FU0txL5OmjhlUNAwIOvPAH5pOaeKawkS8WSNNgHBzrOFIP0Glh8wc3jXBttiJBubTqznsxVlbPEouxrjaOWZY1Gk7+ce+omsLOS8dojKyBgNIBx9JyDV6kkTwQRzW6Id2rn44j6aic2bpZTS7GO51De3Vu7DirQNZvBFdKzQyFwc6eOR1cvk3/fE/HkkZBqYKSB21b+VJIrq7upItvLdwSYeM8TxI3Dhine3bbQxSIjKIDp34zqft39VQtzMt5S81+rqx41cRzKq3GykaGCSPSrEcNMnAijHc5MyzQeimhMTqC2CfePfVuQ22tp5HQMICse4MRpY+twryX51JE8d9A8mhExsyMePGryC0mgt1tI1Y7Zc6yd+/sFTXyvF5pFdi22GnLMMhc5+mrW5d4TBNdSW2yCbwAWAOfq1ZXlyYpYrqCWTZImChVdXGpbme2Zozb7RWeLQquSABx3jf9lW0T38Jc3UW+NcHfncR2ULK1aO1ZprpjIy6tySY4fTSpFJDbstq8zto1ZZZCu73HFQXDAW9o0cbl9ltE3+trI3p+iWJQZEc4du7B+RPAmAzrgZrpI66SPxrpI/Gukj8a6SPxrpI/Gukj8a6SPxr14/Gukj8a9eOukj8agmZ0Ko2TipPhNWf7MflRLpG0A0hvd+Wt/8Adn/iHJcWpbQJUKah1U9g7nQ0Wy1DuxUj3N8ZZHEK5WLSAI31Dr66uoCxkS4kkkbP63VSW8vlKRoYVCwBUxpxwLfOIxXnF1dme6MkT6xHpGIzkKB9NC72xyLk3GnHbHoxQmhuFWcSSPmWEOuG6sVG9vfmK42WxklMIOsZzw3YNSN52xjkaFyGXLao8b8+/FRRNeFrGGXbRwaN4bqy3YM15M9MW8yDgbvW1cthoXVoukY46hv5XLQNpdtTwrK4iY+9M6fsp5JICdba2QSsELfO05xn315yYTtdpth6RtIf52nOM0/9n1Kysmh3ZkUNxCqTgfRRxEzklTqlldzzfV3k0kiwNlCSgMrlUzx0jOB9FWpSPSbZSkW880Hj91bS4iLNp0nS7KHXsbB5w76862PpNQfGttGocG0Zxn31HHsRojkMqjJ3MSTn7TVsghGm3BWIZPNBGDUgW31h49liV2cBPmjJ3Dupo9nIdRDa2mcvu4c7Od1KphK6XaQNHIytlvW5wOd9DZQCPEWwGPmdlL6AhQoTQsjBWA4ahnnfT+gT3Fhb27WsTsiiVyHm0nBx2Vs5taMoBl5hIiz849Veb621CUQltB0hzwGahfa4WVmUZHWudWe7FXEtvqaVItrGsqFRIO0U0xfCxyCF93Bzjd9tKNIgXbSxnaZ36OsVI5kaNEjMup0K6k+cO2pmLSJswGw8ZBIO4EDrpni1DS2llddJB/IXJtMedaPR6u2m13V7rWF3ktPKC72IGco2Kl2CmKZ7A3auD6vDd9tTLdWOi4RVdQsmpdJOMk43Y66S5ENvqaUx585Gz79WPwq082sVlluFmJVpcKuzbB3466Eh8nCGA2b3UCrJx0YypHVxpopYPMbg7GVCr68oZVUjv309rbWxuZmmmAM0oAXQR7vfSPZ2O1YW3nMyvJp0DeMDtPNNW9xp0bVA+ns+RmvPrS0ge0ydELOdrIueI6h3V5uzMCGCM+g6FY8ATWxDPnaNDq2Z06xxXPbuq3lM3o5kaRD+qBvNTS26sZo9B2cylMqzAZrVr3bfzbh7fZQlfFtHqmDbTOcIeNSyO7wiNdZEkZU6e0CpHJkUxuqGNoyHy3q7vfW1izpzjDLgg8jacasbs1aC8u723vHkxKlygMEvuQgbvdVi4h0m7lki9b1dIY5/w1ZPfWbOZbUyxuj6mlKrnBHVmri42MBMWnGi4Gnf2k8Md1FUtEluPOltsRzZQ6l1A5xUG2sBFE9w1qWEuSHGerHDdQnez81Fzbym3mD6mDKpO8fRUarG91cSC3TDyALlkJzw91LbR2Sm/wBtJE0Rl5g0YJOrHvHVQuGh2DamQx5zjBI+UbX/AMwjsIRAJMtBtNRzUkUrmeSG185aVU0hl39X0UscekQPPAoBXfpeMsajt47eW7uHQybOLG5R176dktbiWOJBJO4XoRx31Igt5nhjlSJ51A0AtjH3itHm06w+cG125A06wcUiYZIpCwjmJGGwM8M5HDrpriDyfOCQrxNKvNdSwGePvqWPZy3E5n2SW6qAw5gY9fv41FHFaXEtxIXXYADUrJxB8ahuoshJBnDDeP064tLA2ptpJGeKaV2Dwajk83HO3k43ir+1gkglgvwNpPOxDodIVjpA53DtFTwq6c+8juFJJ9VSp7OPNryizSgWk8biJF4xu4w5+ykS5NpEYLdoYzE7HaMQBqPN3D3b6ntozbeay3SXW1LnWMFSV04/V45opK0S24kuTqVjqKyrjhjiKEFybSLYWj28RjZjrJAGo7uaN3DfUssfm8itarDsp84OHz/2anWchVeTVHAsrSiIY4am3n8hJby50SDB0nBpXu7y4v8AQjJGJtI0AjB9UDJqRJr27n1W3mgLFebH7sL7uNSs99dSXL6QtwSupAOA3DHjxpZFu51uxKZjcYTJJGDzdOnh7qiKSzuYxMBtGBztDlur3U0NsZHmis5baEMRztQ6/Cv7TdXFzKRGuuQrlArBgBgdorzpGkL6pGwSMc/Ger3UkPk+G7ZfNjBtoJ41J3nc+rq38Rvq1t2OWijVDju+Rg9dHydavbLb5Ijuyx2kaE/Mxgn6aurKN4DYXM4nad2O1XhkacYPDjmok1x5Tyg12d59UsTjhx315URpl2MqGO0A/ulY6jn632CpJbnzWCUQrDGsTsynnhmYnSPm8K2aebeaefi91lzrxn1dOPxp4JHhjQNcFXDEk7RtQyMU0l15rBIsIhjSJ2YHnKzMTp/V4VfyoltOs+xxDOSM6M53j1eO400Vw+SXLKgkMmzX5uo7zyMjDKsMEVAj311PawMHjtpNOkEcN+MnHfUL+d3LRwSPJFASulCwIPVk8aSNry7njiiMMIdh6IHjggDfUjS3lxLcNoxOQgK6TkbguD9NbZrm4mlM63BZ9O91XT1ClUNJzLk3Y3j1zndw4b6gjv7id2SJ4xEWXEOrccYHZ25qKUPLqjMZG8ewpUdXvpZ7OK5lka4eZnt5UWRNQwcatxG7rqKCfpcszZbVjLE8fp+VJfl864RFs9PDBzmpZra8S121ubaUPDtN3aN431HN5znRJC+Nn8xNPb15qO8s7lLa5WMwttI9orKTnhkbxU8UPlLEd3GEuTLDqdt2CVORjI9xq7gSfQs80co5nqhNO7j+pQiM+R56bzOjtbVp/rSxmeFrNNWFFsBKwPUz56vcBTWcvlNWiWNY4dMGODAgvzucd3VijdxXiJe7bbBjDlN6BWXGrhu7ahu5LrbThpZJTowHZ8cN+4DAqK12m10Z52nHX/8ALH//xAAsEAEAAgECBAYCAwEBAQEAAAABABEhMUEQUWHwcYGRobHxINEwweFAUGBw/9oACAEBAAE/If8A8G0lJZKShK85TnLJcslOcpzlOcpKc5Tn/wDDXKSkv+Qev8FAgOzTV7XAmVexkuujnIhjLC+CJlyoWd1RCniwusWU33HF87lj0IgYtQRarS/3AaxGA10zIrNEc2b8hgdV1AqzC+UDoFrCY5BELDe4RsDfmLCnWtm46REk1Y3i83bDcfqivSStgXXWpmEGmGS8uCJcHqis2yjPJyow2fOW5UA1Btyii1AMy9ApfRdWVc6cvea8WYwltxeMOcP/AFy1GvDThp/CSb6Rg3Q0Ahp0j9cwguqhBEdbwa3UUvQI3F2IK21ZniGjwugiz16a4lyGGP3ixozG9fYpgjterOK1YLZY7hUClUWCB7SymAtCtO9rOUZBYJ1VliwctEUYHY01mHOBCqzcct1t2phQhuhW9RqlA3JqaUoyGT+FTahWEygywbNktQlRRNwNENvnF9TMi1dnKCtGMqihPWMl1IWbnztjQ20hIWrdfEe4uYzqM9WjnMr9q3AlllqvI1pGzVheMYLOy+M0iOXnGOvAZLvm671makFyMl56jYZIeWMB5Pn1OsUnd0S0pM6G8I8t1/Td5jDWxlM6KqWaNM2MekzJzGp8mJWLSjVm0eq5W1S+VxH/ABZ5RoA3ZcYgjF0xVcNXHVJG/tGqrbDlmAvmiC3DyVSzqNG1czE9JYCGlYADThNX/wA1qPddq1XlaiYSbwcuTaplUbYSQbdBo7xESoFrQaemNXix5dbtzt/C4SVVkIg5gecr27OMtbMq/BLn/N1LTBxrvCQDf+FNHTFc4bqqNqCNtjSw2g2VpA6Irs6xxWisGGW7tpSgt1QrA2VaNJapQBublCsFMfFiaqwdszGODftAZuy93MQVPpF0FOmos/jc0aMPjX4+tbxtGqUcAkS7acWjJyvS4kqcQDBqmri70tKrXLKA9d3vygumCItV50PN0gAR9cKtLvydpU1f75aUVpd/NFgITVQ6NIX4NtIwEW+Q1TVqqikYTK2Cyy6XTEqfQtKwbRyatpf1LtCuMrhWN5iEChjaAALlZ5tlREExF0Nm2oauml6x6xhvUlGt0FNFf+YU1rF58rpbvRigWa+jEZ1K8IireE8R0a4gYq1qSli8tu94mgZZVDf+Cpzqx44AXsVC6uVD4Dt9S3kQ6NiLw+qxgpy+B4RxHZTGQzSr3NrEMUnEpAIuwJt2ect87gr2TzDRuBWJkfmgK9w4aYiuVlSpUr8GcE7RpUL8TXUd+L8E2/Z7q/D/AIWKqtxvlGsjaeLgeuLELRc7NhNlAqlWb7x1jgD2Os5nJ1LgvTgRClhQfBhZCpQByqVlAS+C4P8AFcXguXLl/wDSvN1V1gqZos1pwRY1UyUua1J7jWXEOy1RrQyX4jlATrtyiBRxFNt9IkqQ6krH8FwuuuCxh6Ogg1W4YFlaKHbsF1vVwb7MQAG0FVRcK8HgHxYeMMS5cviwKXy5ZM/DXygbDjGw5diYKGu9Sxup4SowgoOaXUVCxpU+eac81ypLXwrKc7WYMjU2qkLi6VVuZpENRMHzRadLwNoYaPZVWDpsMwNbSkVxU9V3cjRnsT4Rh21Fs79n0TPq2EnMwqfCr8HddCB865eiKsSwZoXwBENHSuokwBBtKung+A9Ws2gW3yuSbMVnBgd3UgdA2fCDIPNtICt8bK1YOwBuXcGpk5sMHCNxgIFtbtektle7ec+KarxmKLmlBQQiwFms1BSXLVhQL2FuqjRbiB1e6CuNZrC2Es61L953uaa1+AVCwvNyuca0MsKdJrdRQ2QWs2VrxYFGMZlxM8tVsFZxmUIbymUBshmNBsXeC0YyJ16mLYvf/oS5XgrwU/PLqmlX8spSlKUpSlKTawCrcWlBUFlS32mC88GF4ixerJGGsMm1FwtW6bhPE5IevaZs56tNG+mkL5oHpFpxpIu9G61PLpDxsDFtUUxpeamM1hbu3xSgL5qwJ7E+E9iggrDaoOf1P3Pr37n1z9x09ZOLQsUl3elLy449/HViPdwKTGb86mpv4wvWmAVrme/+f6nIDFYumkOg9IVXmiXl556VuR6Q2xLHdLANVYIbA7Ig30jG+TQ4YNbrR0jGVYUqwhRcFrVZuVAAC9F7kbz0tbjS2AppQUMLoNYc7H1stsozmjmY1ye4oHSPe/iBsAowOYL5GsEw+Nk6uFnkNFbi7uUJTCrIg9WFNkPxqWagYGUxPeFKNlJ89GE6c2xQTrXmLOkdJbPNISymgKUHQmeIEwzxMYqYN1EQ6PHlWwt0slHTWAEUGDOMkN6WninUWFGtDzZZBs+orvLXQ5iategNLP8AxOx6wNJSzd/JmZmZmZmCoY6hBfRh3lwNenyyLgcLMeHaUN+bkWw/b5pmlNJd0BbtCcOTg2gpqoW0DgH8YTd63CLdevDgawHZdUNoOoMLWsynNa9COAAalDWZeL8EDo7PhwL3j4nWhCZ3+ifWk+pIanPOTLl4ynQhKvrxwrINDC2ZzYRmAm75BqJsC0+kQQjfevaEmSlFicooyW7XgjVdcOkTTGdSSePAybCawpXaRoauVWtkI6LQVQ3EdAw5ibWR6AMqUlRh0EDaClLdOXPozMD2QSqtcLenjNEVvd3c5Y2hlBABg5SkdJeahiqL2mTBEM1XoyPO4X5WopWEG9JKLlDAQ/5LjhdUT7Yn2xPtifbE+2J9sT7Yn2xPtifbE+2J9sT7Yn2xFaTq1ne9Z3rpCa3nXUP7nWR10ddHXR10ddHXR10ddHXR1EU/OmKy/cXCGru+FcKJRKlcEDJfjDIAeRwi96+J7BHeNjhwI6aGgxBt3/2DBrVWvPFVUUyHSCyYNZfOtK2amHktMZZTaqBiKU0ciaJqjmZl3yxMeezOWXWfTP1Ok+X9R/WFfNCryhYDaWr0EdG+k9oymhvA0VXqz9YBYO5Fujw9OBXF0jYYpBR9YYX4qW6vm4dljVSnYja1vA/59Hwfz9p04b2rpPcIFZqpRPqU+pT6lPqU+pT6lPqU+pT6lPqU+pS7g2KDD0IqjoXwAZ9U6X0Z0vozpfRnS+jOl9GdJ6MboYRMDBzPFEZreyp7BwPfSt7Q8UpOZANJWZS9HIvB0jgJKaecxaNmnzhGtNX0DKddazC1Y3eBDTh4FxadD5I6RGSd9BzG5MEyUq6/ZOsygHnVw1nkgby/FC90MveQUxJYDrneSWTzIj5EGNTZbL9CLf7JaNufqxJfOEBPlELM08rYISI6k2Y8ZbekYYYbJodZ+of82j4P5+06cN7V0nuENXu1ju7hHYv6nYv6nYv6nYv6nYv6nYv6nYv6nYv6nYv6nYv6nZv6hSkGy8lRuw1bU9olRAw+5n3M+5n3M+5guwTUC/xsyL5R8WMoTaewcL3zPZ/iLllJKncn7ncn7ncrgywx4qYWC9mMTqCWsBskB2ktT0wtVqYK1jVJgO8pmXa7OXjwqWF+yD37qbbiXkTk8yaWJkcr+zrBvgk76GeJHGusQiqxg2knideiXHSAxJgwf6iJlUhTcVeNrpGMUJrp1Tr0l/8ALo+D8gbxbqITCLWu9Qt71NrI2Or16vz7Tpw3tXSe4Q1O7WezfiXxVW1rPp0+nT6dPp0+nT6dPp0+nT6dPp0+nSwM26eXjDzcREPmdu5w6jUM+3T7dPt0+3T7VLSmC5lUTdA7cGFggsaYlurpynvGe3/E7NzlTTUtXPtk+0R5GRa8oMcaVMcGJr0lronPpL56VuOvhAjkG7S8WOG6+ZcNVytPGXKxLPNftDpwX5oHZOTzJjn42XM6QGjgk7LHf4cHzFeyh6kvSMBuMEKToH9xEzUhkiAHWGzCskz4P9dIP/Jo+D8dMuIhypxwPQH9QhQ8Wpv4EYQX7aLfpJp/kaSjzahqvy7Tpw3tXSe4Q1O7Wezfj+bPYp3bnO/6EQ0JsyflGMY5ucwGbXKtmtMMrDSBtrdnt/xOzc57TDfdauIpc2HXVtprfgEL4MQWwyEWrZUxNBiBKYy6KuIQwi74JjM8CpfQ6dODzk67eDzJWytr6ufhBs4Npt39U4Cd2vZQow+dh5P1C0S4O8lhs/cZMytQMJoBsTZ5w68fRH9w/wCNo+D8XfKdGv8Ar159IjxXNM9Lx19OGrJRuth4ShFrumx9Eryfy7Tpw3tXSe4Q1O7Wezfj+bPYp3bnO/6E97+ZR9HvRO2P3O2P3O2P3O2P3O+f3MId+0aiTXmGO56aVBKAek7NzntM9wfEHjbek6P0/udH6f3MBHkGipZOWgpmsqGaVyC5fFqUnSGHRLryr9zOZqak7dSttvy4NieG2HrOk1ie3Yf6ekwf231OnGppi/wjg8g7KkZRwB4HWdekG4lw7ZLHB/qJAdbUuCoUiN3O40lPX/jaPg/CqLsRZpn3mDGZ6g6bufWDUwrUaY45gqlWdzzVvhcrqztOf8yvL8e06cN7V0nuENTu1ns34/HD3FUbETLw88xygVibk3/L2Kd25zv+hPefnhAQSqLnes7Vnas7dncsyMqFYe0L30YYly7qYBwAqo6oPTz4HwcLYWTdM7VnasMPCrmV5LFOXTlCVFim1o7zOCU0rcgjQC4YLh0p5z7omsFlOTkxHcfuQ6cE/uw/09Jh9C/3OkMnCpb4I/scEwMsMjA2EVon+oNyoNonQP7i4FW1BmjZidCkV6OrrL/4dHwfg3YaFYyq2oikLuMBi0TNt+0YVHIJ6xcM7RcEfNfxDGCil+entDYwwHtquftF/BTmHqI9RDSGh0TR/DtOnDe1dJ7hDU7tZ7N+PxzufKIBCxG3RjxqMtCt7H0jvqPGnsAKBW5403w934R8ZXyp8Il/UAR2b+yHjzi3DpHdPeie9/PAY+I+Pxrh7r5olxjE0BVlXJw6AnuT4OBu0c2IgZqxPqWfSsLaRVNCkzUSGSVMGNbS94ybxTBzg6haMT5XgT/J/wBytsCFwEqrOTFshYbVzOk7uWK1gm3BZ/k6cXCc9fbHtEzEBFDd4JABTh9P9RWSoQw3QH7j0kULI8KfC/ZfshbTJ/waPg/CsLhTT9bSANGEcgqVw6cMBigCLvBo41lTzus7I9wnrRQVqeT8/h2nThvauk9whqd2s9m/H44g65JqxEIp08Af7nJ8E45fmP5Zc/snySnQPX8zRosXbdx6TL70ielIzFZWDbztFm2oJ02ny+kJMwZ3JWQPbAwPB6NdtIO66C9EjogaH4hcFblDDoXB+BCiq+RPgNI8ASZLReheE/votMtMLnSM6SDZHxFaw8BtA8HB3cObOx6Er8npbjKQOckHDa5IAI2O8W1Bnig/1BVwn+IrorZaooJWcrdcxYLSRZKlVcTN3O+8ztU1joVWD4hjQH+504gqbEdF9j+uCAjSNjCqbT7C9rltWsqESPnRTl4x8DKwKeXDwd16dms2DZ/Po+DjcCq3Use7PlDlsnLW8+gnq/ntHWS39n9zwXJInzx7Tpw3tXSe4Q1O7Wezfj+LK4Vw9ggo0NTbSBge/eB/U95+eE7npx7vzTsPKZx7kV9YTb5PGWlwA0gHXYv3lxEObf2w9WGtMbYfKB9cC9b29bn5DC1Fi3h0A9yJABAcx1naObO86E92iax8CfST6yFhl5k37c/pDJNQD4ygAz0I91+SIqS1dTDq07sQgkA/TMxKkVZBVA0yMZNU37gdPjgiBv0vCUxBv8nTimNJ4Ii6dH9nAwG6dpop7FNesNJULofIY8/WPpYgUo58HYVf0h6SgI2Vh/m0fBxv6NOiDu5kpIukNzL9V/Je58OkAs6IwZNARA6m5iEmXJeuKvQ19ZimF3WPF8kWprmj0viG9q6T3CGp3az2b8fzZ7DAZ0FZUNZfJBJQU26zXDvenHu/NO0cuHZeae6+bjuk18KvdNIZEb+IVfWPR2tr+iZbefD1bsv7RLryKNGnw2XYkNWjOzh5jDYrHIS5cvh2rlwNBzYHT1jheCgq8uFtuLCb9q7t5UQemtM2kQYRQFFIx7tql7ztXBVaLDMEBeJ0dPwdb4fTof1xs8ZL0FDSOkBsGmgeT1j3U0CkeCiLYRvw+EMgQsTf+XR8HDNai6FrGlTuFoZuRXpD8zYuRR2YppjIUITzb849JKsPT5XKAV3aIfeGGxD3kfiJCdAXyBgaEzHvJq5/SZ4TEGmzozEqjpDfoYlcllQ05hU/yDlDWDZf8nsM7xy4GxykEbzl5FJ0nfdOJ6ySrrB8vRfRG3Hi/wDUSbRXXWU6Qbekc16BOzH+qefBC9Ip2y5L+aiOONi+0BxxBw0UOq3mucYKppHOlZBtDCbXynXCYYMOFfEXuJSKdBdjdoE23Gqe1r7S1j4794ZDTRG51nrFfeYjNxIULmsLHRvPdvxE4NUswyEjzYvVmnCbSYZQs1HeKmI2Pw7VwWyywhqsepr16PwagcvdnR049lBP9Id2K7ssUr0Z8nyTDAXngZcz+oCF+2ZVgdtuh4Q/k0fBxFEMu/P+AW6Qa2e7YG64LTRno4oS/hAhpctEcDmqf1AWw4anjErYddt1IOGYD0Mp7RV6Pe4B95aKY6p5kDcdSD6Ue0GvVil8qgHCdjJjAs8b8yD3c8hAaEfD8fYZ3jlxN0POkuLiN+H5M7Hq43xfNFwXoA5wBMpVTCFhX6KN7lL0nSZZ7wD0lf1aL6XivpGHmAubXuMA2Ma+PWd05S5cYore3NG7yyT3rj3Tm4GwWYVoxWSljv7eXBOariwLTxOp0lwk/ZQ3ZvPHHXg6e98WM+taC23f8Y5TXvGEBRfdbwyZoQ9TqzkK8Jf/AAaPg/iOLpxgdaduDauxROkU+6V6AHC7RDTxsgqg6YUwmNApeUT05neLLqnNOUsOqlBf46qqqvose3IsN2lz/Yjod95UdIkKx6k1yuKq4Yoffrx9hneOX4GtYjczY8Z9Dj6HH0OPocfQ4+rwaKbZlXbxCWw9sIEfdhpxZb8TDJfnOjRrA94AKoRqcoosQZJpkYhsgw8Bb/YkKamATAzu/BkcYVWesdmv6kTrp9z1FUktVfSDhiFdI3JZ3VQeUad/Zbei94iXFIB7ZAeNOdkoX6QG3hf5jhYuIXpNy8XO5z5erOYg8S+LvCaew7dbPtDi6RAY0aLK8rfX/g0fB/P2HSEppAttTQgRwMvQzpLgHmK8a/zM9hneOX/IZQwd/wA02yMorfrD160vNGlk0iw639ptS6q9D+82E9ZfV/pBwnUaFaWX95888egwy9cToCtXOZjs0wRY86sTWJ2J/cqkaxI0TuPAXoQ+hLQrEWuWXKdYGL4955cbJV55SrBexrroamkSLZSIOcrKcKsLxJKVe0pejAMG+BzTDl/nqqqqqqqqqsrrtqD8I9pxTLHiC6g/l9hneOX8xngkGn5Z2Oxu8JLP259zn3+fc4bJgG9HETol/jULoYQbNho8F0o55Mao9KFeMSr9YUjRmrSvQhYLUFoOmBL68poVyt20LVg3pUUXCTAD8qa1GzM6DU9B0VUtkC79ZelAA1rWDLMfgLaX6NJdlVyF6W1iUHwnLL/moHog3w7zy4erxY1YJReoTQYAVl08tIg1TIDCVA5VqxKN62zn+wRitXzfplrVdo5jR7BHELq7QL1mvDSIIVfNxWuYUZvC2Zm8EWswahaAcnKNZZS4usoee7BWkFwZz5HLXklllDmMVbNhUo80tcEj3N4ikAHi6tZ3HnEAnM2mJ1WzyjpJBVvSxkSq4ssWtFeA1WmeOv53pLolyQjelC/fi8EbCzGifYs+xT7FPsU+xT7FPsU+xT7tPtU+7Z9yhgpkm4+42inKNpp0zBs/FiNYivhRFNZ9wJFPUJcuX+LKXDTjctCbuKBXAtsVcJJvyzWpMVaQAFRbNMykGkK15isZYrTHOcAbe8QPDiFlhatSt7zYFz6tPYbuWNCqSitpohSNzHfu8SrAoCtOetxGrU+sRX2ZY1jGL15cUWZSwVCKZeyYqzyqGJcvY6BgswhzDimbwWjZ8t5QJgBiqp2BovExkxDw6weqsyhecjpQC9BMQGcu1gUgO2ktAG+Nsmq24AQMqV0sA1zo15S976tBdQHQsSxnmSDKkyQclwjbncQ1c/WlvHoQojLmzGblshm1Opt4dJkNWKXbdfIDKM2y9lQDUyXUc01zQL3jfNc69ZerlaH+NKeKBW9/ysWIHT5CLQo2IXd1tLo12UcIFjVGWLXastrRdlSnqw4V5Ayax5CIhl1UyZ2m9cOZA9xivxTygXZVeNwHlEcWtph4S5k8z2xFhcYhkXjMfZHxPWVKlSpUqVGRIVqVy64miRKh6gBzsFNMSlO5aqzsy88P/c7mPWo5Hpc1SCeCF7jpWV7TQRj2rXLLYxy8hLqmUYAK06q5g+auxFgrQTrrCjbzbAsxNhS9ZTRojYSanJyTGsyKyg3kXX4GqwGWNcVwAUWqzCi15kdjriUmzi2essn7wTpgVqYhwBajWU3Kv7gggt8AmzJnWZp1Fra0PcwPTgE2RTFaFvLSVUTfVNDDJfKNc0VmkK28kC68aiNRHgxUBqeL2uEbUQJvSHm+N3L6BH2WZ/eAMfJagJkMms3RUF0yC93ouAygbApLGOeJTV90lmal63h6QYZlekKYvI10ldu+4ujZqvW9ork0RpbiCV9UQFfo3O5PDjUrgWyqtFQ7laE8JjrBLYeSbQi9Shb1sJQ0JdlAtQatdYNW9ZYaKjYZQGUAdOFb1tOUYhLONWWU1urNaqZwdBdlhoFVC6gnowbHGGBhpyQOeEWFJeCBlh+2MNxKTNaNG8PCvqXMH/twJYodAkVGU9QJjt8IaySxY0GdGbcHnmC6Z1lve6KnR02JndhLlQuSAYjRq3ibJMLqAprhzNIHMK8xGwI84zEBdzwnBoeaBZqBTZGjAmzI7TBY9EIoRYi6UX/ARuqAHUdmYMdIrI2Ri25RMIJtwhQyblwA6AE3YAo50eaFWYA43MoKGkJRUUKbO7OEKOPUAtdGbOcasQ17MEYSU5FFecO5vaWS52ypuLGCsGDG6c1zhBGBoUo/gDBZgzUPubxRhNtNTnKXk6ZbMWFxoq9I9hTiir+AHSGriwtzW8QY2YskLqg8ELpVGucdc3duhdLeNdXImQCS5BpVThL03mvWNF4QrQFHXWWTCgRwkrWCF6QGRvEmgMp5pwtgRzg6kXeel1u5JLQY8CpjSVmdWtQPyf6Ckk0atzDalVaSM2tG4tUjmQDAgU6HLEsJZZk1LxPXrLxdbMVXkbYtSVVIjRrHwO/ao2Ci/iFd2lmW+0u2x6wql3Ju/IFORtwyX10qE6NcBVKaskzZPOPhX1vxHTrLx0Zgmg4hY3uxmc55yhCeDNGpOfQuVCc6wZ6zdOLcF3etfCVF/lkoIV5aoaM62VZboHhaE5Fy/JzDKRmVmYJ1lDgg5YmiEGfGdKi1tp0t5/8AbrKlSyOMcQRlJUqAD/DUqVKlSnKUSjlwr8NZRKlROCgmJpAgVwqVKlcKlOUpKlEqvyqU5SiUcpUpylEpKSpRK/8ACqUf/I//2gAMAwEAAgADAAAAEPPPPPPPPPPPPPPPPPNPOPPMNNNPPPPPPPPPPPPPPPPPPPPPPPPPPPPtNPPPMMIlNMNPCI0Pi59wEdPM9c9PPPPPPPPPPPPPPPPPPPPP5IFPPPMQcTz+2MssNvABIHKPnP8AGpDzzzzzT3rzTzzzzzzzzzyxVvfzzjz6/wAsMUGNhaNokgg0U84QZCWqG1izhfsU88888888wwwwwwwwwc88888oIhsyOAQAQo0Qk0QO8Ex4Up4owc8888888888844888A8UAAAAAkcw88sEMkQk8Mc8UUEs40Ekww888888888888cEU88A8EwwwwwwwMMMoc48scIMkoscs0MQ400k08888888888owAU88A8UAAIEIAA80AAwowIMwksg0w0kYYgc8Yo0888888888AIQ808A8UAA08Y004A004I0I8kQck0IIs8EkMog4o0888888840U8go0A8UAAAAwQw4g4gQYAo84QgEowUwU8skk0Acc88888884U88VyOUIX0AUMAAAoooQQYsYw4o88IAUc0EUU84A4cc8888888888Tl9APtQwwgAgAo8888M8g8YE48IYJBU8cuUk2By2669y0Uwwwwwy2cEWiMMMMHMu+08408I88oI68qNH3Pqr2UVJau1/CYg888888HRmhpCMMMPxT4hn85cAwk0Ard4RHc8QIsHsYU8888888888888sc3kcK888sc8Msc8s87xh0sss88888scMc8888888888//xAAqEQABAwEGBgMBAQEAAAAAAAABABEhMUFRYbHB8BAgcZGh0UCB4TBQgP/aAAgBAwEBPxD/AHKIwhKE04WtwMhlaepzRkRuv52N5e0bv9jsg4AWoHAAuGU79ozS86+x2TEOTa2SeH4snCb4wqohxjkdUYAGzQJzXq3IMUXDFSZuhRkE4Hy6lyTuSiXhWvxpHDDkqyuGOhQEb6IlwReoQibN3byRDgi9Y84D/wB308F00AGxAMAOWvNUAES8hGr4e/YVSPvP0QCqAd2fytG7lInHe+oRwxzDeH++AEBFgAnlt0B1TOEBOUjz6X2Gf7f0mmUfKIKoEAOgeGGgQcAAIRTHyz5BAwAfyuRDkHdAMhxMiOQgFdU//BL8r/5p1w32AcwV0tmC6WzBYY2YLDGzBE92zD47p/4OjFeOCcfDbntbha3IY5GVXz4VQ6NH1TrdwNU9d3/iM/Tzt1mMxpwqgxkwggXe/wA7KTtuFISIj378IM4JpmGMIJFYc6N2RBAg2gjwQnEjeT5KAIrzhUDoTXctlKFhOOjIABJv/E8OQQBKiXwaPqgYB1AF0zusjIv5ITU3d+8BWVIE1QZw9HCZiJqAe7FYwJzLeEQwDIIQ6xTG7/xW7x/FYRgdG1TEHjZu9F4C492jyi1iBmaIQz3HvYi8Nf4Y/nwSH+CZQiPkf//EACsRAAIBAQUIAgMBAQAAAAAAAAERACExQWGh8BBRcYGRscHRIDBA4fFQgP/aAAgBAgEBPxD/AFWJZMPiKwF1mM3Y7Mdiq5u4DtAwxs/n76jcFvprQyoiLP77ylBJOPfXVXCAQnh49HrKAjjyDNMx0l+9LdjgrQaujDX4xgCj3joj7o5QWrO6KVWM9HT4F0IiEVq6Em499OAIvlBbSUBISu/UDbOrB2AlyhIFscFY/ikCr4Sy0uhZGl7gpwRGCzXGAkEUs1r+/QQFICDGPtWuIUvBfDUk/EBa1fLUAXwoDMAqokTVnozNTMewTLHxH7+qtdb46prVnKDHDsXmuW2dYm8PZHiOoHHIPxCAhFUT66whSsYcl7jWIqhn+UAxXwk8QzfsyjcY+WTXcwliT9Sq4KMC/wBk9ztot2gihspbEYi3/wAFKI7Uf80gG2YxmMYu+LF3/mPa6rYxTGEgRgxiXqP6HAQdrqoC6fFhPYSBGB9b2WQtFQCCvV8qIA9e5WXjGV6A+NH5iWrHk/UMEjSw+VBsALxLkvby4IA0e8HMfvrAEBy8QUAEIJCNq6GlYJCxoAM33HSI6KMFcwddLJVK9LnvhIu+ZiZSuz9RAaa0cpeeBZuEkhjWqRBhcLNdfgJIpAZpj2KzUNrlAAW39fUsBHHwuxgARLDy/GyovQgNWLsrc8oSj17FZrlDcG8dxCJRP8IOXhu9DzsEgLaLN+IwwsdODhddXeXZyqCCLKdy/EsF7UyXV7Ed1KVaV8LIaEN2fqALdwdVXluiCph0ZfiBEB7q8fwQVDsX2kOAIIQgn8j/xAAsEAEAAgEDAgUFAQEBAQEBAAABABEhMUFRYfAQcYGRoSCxwdHxMEBQ4WBw/9oACAEBAAE/EP8A+DKC1qIF2VBtElG5xHUIQXQs6CdYiTedaZgpmY7pMd0lm5LzRENRLP8A8LUa3gmjcELvErZnXSLRBH/I5KWCvVjAFqLqEdMnQop1OiTNHCDBFXjBoQROaiolKKDtcMnC0olYBoQCZgW2lumXBell0Gkw31EVXUdoZoOsec4NQxQ10Uq5jeaK0mR1oVSgjNlNPWDNwGjJMBSs2tQgHWLJeVZRYWNIYqZpMLkIDLNveE3EVcBo2JOFZpOE0HhiIDbCMWrgDMBWhzJllIKSLGJtdJaHLlUA2s/pDQkJhjbmaWKxp/641uAixgHZILVW3tKXo+0zb7f4XUu0UVAUBD4IDAyRs9fnsKc5sSIWQxAWduBjGoBdFjG2x/P20WtclBcAoqJO1inOlDgltRcBTsChxeiqwgIpQTC+rhX+UILitUjhNdjo2C0XUiEQD6jkQUG8uGLSy7PDMRhNIqdhFC4eeFQeRGB4GFlcMuuNGakAlDZ9C9cE29RGFJWg+bogHICU51IGqV0mCAoAFkoCoydNKAUSUkHGSpb/APot3VgRnmKYVul2O9czzjm0EOgPkKM6sQ/SnTqpWJ1MADoUJatMKUAQahjq7YdZkpyLH1IWSJq1KAVXSIh+SRLEKltWaLUFszlLJKQAQKBBExDqvCB7azdtTLdlyRKU2VJWtwAVTWClMWOsMsLJAgURX8YaqLoBLHcIO4gzA0V1SF0OsraLbuouQYUSj1iIVosqpOdYis3kot8oKLPiC1Mi6GdXQl9UVArZIC0unYuLbd/eCW6QBkEzLWdVemoDgurQumZr55DhHY9uL2l99ecaO95pqy5r/wCXph2HT5X53FA0cG8SjRcX+pWCslZuLo9EjcAbpEKN0y1XhXAQKKofOYOIr6UJN1KrR/wdZnjrd0yLQc4LGQjaygF9xcyrQFbsBVDNYhWBjkbDEQ9GKsesEWYEpaOJcDbZQEOEGgQ1cctxHL1RalBVTN3ENzdeHkiC1UUnWOXiA3Q4+HF8CrtQ0dmIk6yEBRWmYmBjT6oMGIAtLCkFOFSXLujbSU1d4wsYEC0LBoRocH+Y5QBqMj8p/cT+wn9hOdPVLyoWsKajF49bUEUXcloisHSCnB9sHtQBJLLVQs4He4l7sa0L87ROzhUqNo1GzUqithC2oF8a4uLTlmxzLnvpalJtqQsW1qEmGjBCsxu01Qos5cRZ5WaBLHREWEsCBU162YbyWY6Qw4CNFmTJkkQCtRTwiphlldtlG9wyGe3rgVlQJmkZAOq6ADPmTBO1AhJhx9gYtdiiIf8Al1VxzLGwHkUW6ai1XRpWY63PCdJFxo4zxUybiPM5OCCGw0tJVwJsPZrQrKjYo0xZCXsslFBQuuX/AAco0NRudtRnLQCgj4YMhBqyC5gbZEb+1MOT3AWFU55WjSk01NnZWZMyznVThZjHBCqrUSoLso0s0lgt3ANF9hYXuJnCwmFrvVO1ZtRChluarueeefHHgtEaGkDor3mVpXnHYozGF1UcJrqiX0o+FMHBDMqtRS/WV5ez9SvL2fqJN3s/UsKcDl23WkMhmx5aBu/WX7uvSUNEer2xC3aZvGIDei88y728tdUfuIZcGj/UMWqQQUaYjAqgKazb7XiVjadfGS0uoTbNdvvAVlu9es6wEEyAoPWVZrq4gMB0MR0EPOFQZDHl0hhBoWUrUE7RHU/xWiWlGkvnGktV1LcEVxLcf9Gsl4x8UstSeOVaFoDxDHA4Iqj66uSgpssnyTurXli2wCZMWYbgyodQ7rQG5mO9GibzdVf1qjz0lphoYR5gcM5wzTAaVNINJhExLGohdLOJSS6pZdRwiWxRNOppHcFZA8laR0Zyhm2iUxHDdUNcX6y0tLS2unhoYxh0Dpp5WtDY8DPWVnCBUi6KLT215LuGqwzIq2SvQqK6MYBBkRFDOgE6FWlKThxRrpC8hL06EME6FpBoF4kgUtrszQ6oCaLB2ESg2m0lTA10ThXQBihRRstq11XMBWAbKJq8RtPEfbxDBtaXqbQfypwKQAzWcQps2UVxe0LrP0CYzRXC2HjrF4awswt6hepLOZLPkE6uhfpHUq4OLoECGzy5IM3h6hNcHCDGufBVQhoFAM1IGUQJdwEWVQ0Z/wAbEcwxa9IuaCMKRVgBMAcT0rSQGFbBqsBtchYsmVeLNLTBq7lCCA6Og7MKYhZBUkwPYjWsDgl3iGiUC1dKhtnbzouA7U3xUIRXpgljUOBSMDFoatGWXp3TasxYhhXFEGtYG5YmgXMAFg0XSoAitUWcwiyfCKtWUVAVUyJWdCa25TecVCk60XahmDeBIcnCZDcLzIdo+NgRJBcplHGdY1AEDMwCBLERsxBsP+cdc6FPMQ8+8UErHG0CaX7wDQ+tJUsSFuFP5M/qZ/Uz+pn9TP6mf1M/qZ/Uz+pn9TP6mXqgiKY8HSU1/JC7SmyjioSje35KCOiiVYKbj0As0sbELAUboMvLRu1GlimgytmAkZ+jhGS9q9cVhmxBLCwiIb7HYoMlAsxHe+BhFTDQB5XjX2vAMmtDA3gwYmHnEI2jw+d06xaCoUtAeUgxP4PiDjzvuFgqup6y/g4gijzZDJ4WlqKIKIYqt9UHOepCQz5dWka0hE6oHWZctKio56JxILpTcimta8+KHmo6KpONd0lU2Z5ogmwQdywgdx0gB0CwdH9KA0QyhjNmLGQMLORQXbjTclwTOAFyCinBEHgZ1MikNoQG41V1fIVMEOXZlYcKR9apDRF1CLwUzXf0RYpDENDbnWBALbGjVylXI49TU6oq5RkmK+MtTegDIFxB7y3xs7ckoa4Yy7q2WCzGBXlZxGptI1t8WGmN6Vark0pFYgUWtsrInM11ls3CqkdoigG+i2EnIEEjBlgAXEkfsqlziQMIBdazEErcLrYcdRjDD+2oQRULZaBdMHShvxZMveEIRouFBw5Bkg6IiQ0P/EktsPeBKuX5+z+p2p+p2p+p2p+p2p+p2p+p2p+p2p+p2p+p2p+p2p+o927SZfaLSD0vSvOj2jozSWGbysW9XdSg91ADiAtFmyy2ETUVQ1KxaJhRFSoPctAkTtU0xe2d6t68LWqwrThGHrry9row4ksBhbVaYXYJFdkF0tFygGRt4CqAtq8EDQtcZz0IS1ALbg9mfFfmd55S3rpitS2omA9NafrO4PxKdA7OISfI1csK124gAGiRduq/LwQBeCCGLB6FpDkfaAAjiEpMJwxv8Aqj4eI+jWIFsTfqgcSog3oZZOkUc40kUodRNo/uwpB2DIcGBu9If5hCasO012CrLqBkz5AEh4iA1QMpwBylXlp1LjFAgXrJrvIG+RlcY0impKDe8N1crXR850KrsEsukq9WMpylTdIIdkRTK36TDUQqXa3KxhNy5Uv0EIUA2qscTQxdcwD0RBoPJFcFBQurofKYhFHwwjbgC6kWBBCIqgTiAgNAWMM4KqGwwQhda7xFaYhp/wATghRuN0vP+/HHHHHHHHHHHHHDCLACEdtdy7xzKwStNrqhH8X+5/BfufwX7n8F+5/BfufwX7n8F+5/BfufwX7n8F+5/NfuINl2fGNrgO7KzfTbSUcSjsSiLagzpHtKcRCUhXgwAGtC5qsJgGXxX5nfeU7rzG3l0Ld4cNF3n+xmCsPnlVoZkCqcQcGBXmpWhNYhUdA6UuabxkQ4xW7Hu40VtvWUo6wRb0yiLAoM2QEWHEIyhJvIApLIwHRIwoWz63KSjagELswcguLrtOFx4HYtBKIq1pCyOoV3i0v0l66LKY5tsu0d+UHF4gpydLTeR2WR6+IGiTGwGoWp2uZhTPWIdS5Xg6W8TVIqbURsRWGnDjcUNChEDrFxAi25VlLbQBoAELLrcoNpRxKDb/k7jw/79n1Tves7Rz4Tjt5WinPWH+RBBBBBBBBBBZIM4yOa7yl2kIXbOa+ZZzC6LQotvGIH2HxO9PxO9PxO9PxO9PxO+PxGDE2BDZEzF9iRo9ELI8rXI3iO68z7T9poK3PlUoeKG2LLr5n9JP7SGDUhoWvMVuUpEw1LHcmtDFYbCmsBODL0JkxzUkGudZ6zLHKqSn1g4ZIVZTNylH1zNb6S/VZ+vnC/+OMGpsmM7MX6zkjLuiJqzHE3c5M0++Yk1PF9yOp6jLm0PyZH5LCDdHm/OlRmwo1nUWFzj0mBRxZD1KT0zN4g+uYETks84GFtsIcrvwdRhVV/hu78f0jC2HyUDelQb40kDdEU0F1jjTF0hqJskL0+iFuPe8xnlOEbezzG7N/83ceH6lC+mfBaL25+vs+qd71naOfGcKL27QqFF5qf3Jf3Jf3Jf3Jf3Jf3Jf3Jf3Jf3Jf3JV/ulmtJ2qhLA352jzJDCKF2XiNUdfwMUVuFKLar6jnOc4utjVpcSxCWTTNviOsgEBnuEdW1oYOpBXoft4AzHBef0S9doAUadU8Ukk6I6F8cMJEHRrY1W2aOY4qmtUtiJz0aCjbaFOagkypnyYsO8EwQajxD1ptQwbAH5g8qFEcJ3W5BALYhKSzhjLhqHdtUTbVG22GOHh36R1S8tRWnomTnjMbOHFlzDk2pkq6qoCQRrXwubq4Tr6uA05A6Mm8CkGqnFTc8wHrZpTpTAaOWOD8y1rUgHeG4taAxkG/BxTvmLudUgaicwh/ai2RGdKzCTYdNV9FmsAGsG/8Ak7jw/Tfp5zB7rxGbeXYNWX+uCs3uZNdUBnG/LCppHFPWnEHB6en1dn1Tves7Rz4zjXaeUGZ0E2vKdtfmdtfmdtfmdtfmdtfmdtfmdtfmdtfmdtfmdtfmdtfmKaoBLlHDmFY1QDDwpehaoMeSLqorRuh2Os7U/E7U/E7U/E7Q/E693cQLiSCwG9cMYDoDFC0TqRsoAXKtmukUaFVi2Lx95qod/wCE7Pyh+YRIB5xO9vxOv9P/AISyiGzKBhoq0JcrkQmAuz0g3MIvn0WYfCEyiIBcSoKxtJoyrDFbZeiRsQIMqxNuqAaXiUysukBWscggm5zHRl2TevI+5NtdLobca9THrF4lgyDXkCxIorUEb+WPZ98whVdNeFq4uAcSEbTqlyXnQ6xu0ps1xp+oRlGuo+96poxyieXA3HU5NoKQcmsNmJe+sZQtfTs7xNpK0xsnMGGRukNETSuZSiYgEbjtXXdrACt/+TuPD9KlkAarxv7E1nexdnbmKcFgVLtIJuspF30gyYtW5lFfC7mb3LPU1CvSWFX8PNfKkuz6uz6p3vWdo58ZxrsPKdv1P9e6dfA35aTBqa4Bqf2k/t5/bz+3i4nzIEQVDchWzHpT6aRWWbRWGW2KqLcrqaLzcNC94nZ+Xg1ERgVQuv3P6Cf0EC+dJFlGnrFiaGyour5RzptBX/yDrzMZQpmMTQ5q39Sk0kJHF3Cs7DjB1fVlAxCeLaimlyylESm9GWN+6m7ie7ZtLvRMmuuIHcyHKNw3MWMdGOEy4C6rZ94FxddSvDgFdZXvMR6dw55I6dOZZzZdKOP1LnJwa+4edW8PTfrMCaCGTAbdOLGnwIoGonMFcJ+kmwdYUZVKoRou337QEMPt/wAfceH6FyHOJYuTXhlBxQA8lgGhzW2DgstydEMdIVTTSx+8WY59xGZ5exZ3DciIdOe+6v6ez6p3vWdo58ZxrsPKdv1P9e6dfA35aSr+VKlhouKXfL0nQxdDF0MXSRKjR00v+CKxlo2g2XKq+wKsiW0vK79oDwBoUGcGCCu3y8GN1q3PRGQAhS09fAssVLHcLQ1guhvcjtjalatGpNXl95d3jx6A7ZxYQLt3rG6nMcKGKrUBqBoVbpvibaFs5Av3QMwkEESmzXzi3Ja36+5/I22mpp2qzN6fEvxGPIi5DdaU+eoS1UAXOV8nVBsvwx36wGaNk6mqO2NQl40Rq6SkzWeHpDYLqRbldsx4+M0Q0PcjhZwqlg6TQCYAl4fz3xGIPEoDCecNowhpE0TrDrRQQxsGfa7+caVZh/4u48PiFtGu0DlpAFjYdbcfJlQCsfXWFsvvJtccesEJQq2AAabTJtfFbvnLobZlcYdY35keY6SdNwiNOvix5HOfo7Pqne9Z2jnxnGuw8p2/U+hJPw0EWr0iXBitKHXTTV9GCXE9hMiS8CyXWv0906+Bvy0u/cYmxbVKrxvLuTkNLUP/AIH6T+R+k/kfpF8LG+P0mPFlNUfrKE61u1awyPnJwNOvWJ1K2rlzykOcACAOAlCQ+fA1yryK7wU1Ai6NnHRn8j9J/I/SP/bGIDrVMFZwGUQHmaiu9MRs3j2h+phpD5IeLIC2wuvp7x1s2hbu+PKJSVgFANfE4LkvueZeiGtCYQsTioqO1CtS5P1qGn2Mo66Jw/sl3NRuE1S3W5C9ANjVffXxFQ8+Di5fSXNpcSees3DzmRycamH+aQQkkAJwjtUua+VUNzjp3lC8V0jZuY2vEoxv0gUx6dhqBqPHrBbCxVJh9+8zV2+VujfDdC6aVyf8PceHxd8XtAIJf1kgugtm9XtKsjsMIftB4TEIEF5Pxab6tSv0rJnOMEK8S1ca437ay3MkXiCSkN/KKyDoTVnqB5jCjoFmTCw8xPo7Pqne9Z2jnxnGuw8p2/U+hedteeNiDBVhyaJgghOjOUrpHowHQ+xTfP8APhqHjq6eZR/Sfs0hs41/KMr6drj/AJQrrVSTzQVGL12istFAUhtby8tZWkJVOtflAUsVkMOp2ldX3ZXn7srz92V5+7G2v3YlVtk0xvM23UYDujh4ppNqtRWj662gBgIfLQuHdOYunxNHp+cs7QoQaH8+D1WzJgHbQBFYNYV01MVDdjvK29YqlGtg0bryY3gCT2TKr8yVabixGmDK1soN92NpaTimAHIAwQQafeM2EKcgnWO0S5Ympc3bbyl1ptp90bsMox1HZExTiYRRrmTHrL4hpEsqFa16xhZSRcdQ+UeZxEKcL331h3TBvMCZE5gEWxxXoOxX3mSSpRbmnC3UHZ/PePPtUBNI8N7Tcd+dO+6qbaMP7XSfg90IirNE/wCDuPD4peNbxVRODR7sR9kcde7YTVRoDCvadus9a8o/fCqrXqQDFJUApiUTe4ZXbeRhq9DH37HS0E1lsPnX5H6Oz6p3vWdo58ZxrsPKdv1PoFQCFIlicMo6xhF02ZMw9FOJ9oQ7JOIS/Fb3IfEA/cxBfSx+4wbZnDq9QQRbuF3yUl6sZptjqL/cmfPqQ1KmaHMS8goho5UGvEqjlolGjQe8RKnUs53ufcicJuAgzYK6g8iHW5ff5PzCtdA4q6YA84VQGNqIg9oOrh1BYoqxyxTdVhGvfjXpE5ZOZCy9B/YsZilnAD0w6lFil6ikzEvJQZlYVsTRGSd85mMS+XkiONkv0n8L+5/C/uLGA2tVfNzJ5oU/vgJAWDROYqKhyXDMI1tGvVUdYFwa1y5gQvRArBr6wiMhArxXEq6wliwpz6S+5BcbMJEAjd6S621ZqcG7Pm6Ym6tHPpPuPM8q2d4QKWLrdjrrnbfwS5aWYdYtKzbInINuRpam8q0cnm5In84DSN4ThNYLci4opjp8Py1aGPKtJS73likhYBudhssoNyi3INR84/8A24iyBLxd0XY4Wl5xMZBCxN+v+/ceHxWZVN/baWHCjhMiFRJ5AvktoYxx9IZxrd/iL+WIsbTVgArX4SmLY7gaeBomyJ49n1Tves7Rz4zjXYeU7fqf4VKeCGVDT9tMUnkmRV6WS2MvVReirf6mL7/izFJgoV7wHrfvmsqC5Ji4o7DWhjLSOiPlS4uZDdWi2Eho+8r/AH2tKMg5zuaR0waB9fMlHrSgNdOjqxs7e3GhowEHIl5vxcGy4P4oAOjcHmFXq6F6ks3fgy+QkLpBd/zl5SmrZP6P9T+z/UyRSzV1vFYggSBJXWsQCNiWJvPM5wGMCtVhy4gQXG3BAhlCW3TSLrnaNB1Ui6moYsIwSb0xDYQCxHUTcZUvRVrv3Py1w4CjXPSXzUZFvI+yCiSCdfur4fBLhG5eKlJFs2LqPc19whtzRo3XrEkUEVdPWnZ0jLQZMBbWaLZYfOOxm+spnGsve8pAm32GIUmZqADp+YmHA83o+cr1UnbbRfYO2IF6YQbE5Hfz/wBu48PhdJizWuaz30uXu2jluNu1aHW3hzIQFWH1PpKgBpj6HSIcIMmgtpvLDWZUwYtOQDVyxuLpHmx9RixCvkttZXQCbAFjmCrbIHM7WX+KWjvv6M73rO0c+M412HlO36n+vZ+GXN20F4C2Gtc2Ve0s2cszbLlVMY7TkX3nyP3w8UnZePgo2BuLyQolVAy9BkmAigoDcCTzikkRWCbhV6qmCLwugjC50u+cfqul0IqZeivBF0dnVuyHmA6wK7gexy7y3LLcsty+8oQa3IaMwfj+FdtRBcgGmLtovlHFgawYncuWaZbXheM6pF9Vy2P4h6FMzJvIUzOwrWlmm5cpkMKxNxNxuNKFq7Xwavy6jSgPzL09krO4nDpUK1kuap1dVxt4VcqGhnW4IW+8DwOfUQDNZEM8zd16o0nk8yzkGqgcfCD7zQ8t4MsXLxinQXscGPSjVidEiBtjcqalWoHpZ3vjpBXKSsCWI7n+vceHwJ0buoAyBupj1iSE5q0oEqp5WK97xWlfS+GQzYVJWPSt4fEiqMU3VUa0rWksDEscW6WcDREC4V7sZ/rLvxoxGHo/TDpKCkoAnkL6edZfSLogW4DzV8pbW0USxdEniuq/uD4oPpBSjLVA945B1x9sw3p6kAAiOibwzpnyjjXHhZz/AIdn4Z2PnOy8RCUwARs3Y3H6yKKDZVBzMMu4t88PBeUc1SARr3gYaNpmhdbK5uIb6dKESCyJMCai6yyk2Aed3Vc3vC7ZNgH1aoU4/Vn8rfvGsM1svlJRz4F5DszDd8Z7mDcEiIS0yoRb9EyFJQ1vSbtHHhreV8yVeDIS6PXX3JptJ2i2RAq9nzYCxRhfKAi84C0paSPtPvBCNdOTkSBGKiGU5dX0zQyuIOHF8ymbeuqtuTPscoC1r0wcyhNmGi3h1xC5AKWelGCImtBRryRh8gIKwJSPRjfDF2p1R20aXOE12nB08PRNEZq4esMPWXG3lLvwstVYmbIRcWg3yS9yPMm7m3ndgt4uytGqTeUTa0vVKoTg0Fpo7U5LcwLvkFcUlnT02kBY1CaFqFkbYthxYtsFPmZUHj8AlTgLc6Y1lcbVDV/6dx4fB2cYbyXBmlQ0LBpaGdd/qcEfr4IXtWuzV1ASmHOw6OFgNMQrksDUNDvjw7PoxFawugWRWlh7EPBCjJImXkPpMmjahXszLeLJRos3Yecs7CNeTn/JpLktd0E+cKVWjJH5lPidVmPNho+kVQD7dKK9WKEwZWKdLXxHIZ0E/ggy+sFt9LhQkOirGadJ+r9PHs/DOx852XjwqpNsBu9dYkrNVAz+ipUwri583xCBq/B0l2wOGU/Qxwlm8muYZSU2rT7S71QnjNYvB9ZdcYGks8YnwIt9RQfEfOZS9nGDK4FKW/ZjcXCaabILQKUti2EB9f4yhvKVdkLiBjaTRhMjLcS6xmZ5rQLddoapvrUw+NYNYUAQdRHaIyZm1tU5Wv6TVOeuIMqWMStnZObgDTKOK6861raBS7xErrCybFSnsmhxAFXxWtOEXkS3RhQq2ugtVoAugAASPkCPgJYm83PgzAy4a/Nv1GAQAeBRKLV5/wCDuPD/AIukcKBSLMMAg14WTAQpFTU9ClH3m9grPlDwsuu+SAadqSktHmB1gTHsMbKwT7mE+lkluqrfiHSdRsBb1Q4VxEZGLvU/hp/hp/hp/hpCkOy8OZpA24+2s94WD+RpvQiWOWze4st2DVjQLgaRcvJ49n4Z2PnOy8eJUwQ2ZFUj6vnz58+KdGRIpsv7DpbWviHjccVAeQL1mh45HMqZFBZw7itIX2SsDHnUVwfX1WAlBZmPQqggpd88R+IlkAI54JLApulf3lDADxNM0GOioDRfCQTAT0H2cHsnoX3wy6TY32WY6TpLsdJaWOsAxkw+UDTvApeSD3l0hzpnWZYtuK85l1CaHK4CV9pYXz4HL86lIzjzADrZfxtBmvnodCg8NK4h7Xzn3EFBQGR9XJ0YxW4MD2zN14LRrmWlrQKeabXVuFBvWCNoRhSQ71C+OsFHjqRHMvUNcxn5f/A7jw/7r3/3QWQTJLUNbVqY0ekwEFs7HVqxka3zKP8Axol5wqrnb9T/AF7PwzsfOdl4/wBtRofQlwQm1W5b9SADU0UGjc4i+zKUqzKvhUMjgqhHgWRStviznsmfau+DGHL3VGOFj5qhlKOog9AHkRx0XpCzCsudxgSjhzfoRDNtVcQahgla5NGH52ZqVq33sArNYibu7V6Sh2aDD0MTPb3y+UCCFMnUV1iTW6IPt7IRZfOYW2Sa/ROSXhfKVlge5L9ogrOSCysxam00FXdbawe81WM1DdgPJNTCQW09Bc0cPtLq1bLK3gmBzubnpEtn2g2wlckGXLXEHQtF5MmJhGRiNAnmQNE0nRDbFE7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U7z/U5ERJqvKafAoaR0grNMrCm5YfpYLk6/69n4Z2PnOy8f6ia5cUtDy+q9qkwDEK3zcqXde7+3zgQBSd2sL1veX9ko7T5huXfoRFh1rxIakHKUQElI65NJg/s3DJWCasstyQNcAUnD3fRWkZjA4FeNcOsbFTgL4UUEaVqtEALdU3oaeiC3UMBIllXMILuchei4RwvLnIahaQVvQBYhtWoE7VTFSxoVBarvSXJCixbpI6i4WOoOwTyk1ctgG5ZVEmkagwbSt06TM2+id0k+Kp9Ro9ZU/ZW9oMAApVHUyiGedlqI/fA5ul1DXqTYy+TfrEKOafKIksVIwF0q5ZTzoWQUGsQaSvQXKBbKKiL6QMWWFekOS6oJoUmNYkMnZOi0poSLduhUMyvBDe271+jTdhF07tRVm4ulIwdTayn4nMhClsgnlnnoIuZwqXNAvIVHIK18DfB0gY7H+JpO/AUaDEVMepkgI3U2g4vWNGj/AKkxgW3XWmGI02bMdJ5pRKJRKJRKJRKJRKJRBu8Qc5JZEkWsrLBJr4CtnVkQcRdpWkspylxUF0Gxz7QTtvidyfidyfidyfidyfidyfidyfidyfiV8fye0HfR5PiJC3PBm+IM57TpFb0BIL0xPNS73wWrJhecRThB6gbjiLYlMtxj6FRBbUtSOVSvBNAPmxc+juLC8K/SWDS4JZa9IK7ePCsdGocGYWLr5isNV6z0le13LVHYC30PMlzVVcpRS8DIFlpfMTwIQHRTS960jzFs17SFFe6gaQeSALBkqcgKD8S07nUi9VQYFWpdpgXrIeuS1BW1YCGlNQoxvehoV2esCuQuPikpCbHDUMHxzUrNFVQEo7USgfU5YFhwbljs6qn6wPd6oVoFQydW28YE5aBtd3NiFoNDLxdJXoWSwuoNrV7msbgIETY2q5bVm7R22alQQdZVgUbvMtGUABI6+aLF8hsW357+sF7xFOIUQRWBlF1VrC5ajGYzdaAJAEoL0wUWMIJRTaog6uEphRW4oHAq15HGYJGrGUAAUAVVGbBDhsD68PJttKmdFAFgk5kldKq8JxHap9a2BUaWpYYwUAbnZ3d4HYIcZZXkh5+RglTYUVQAoa+jh2jKGllbhG0k2ytntrhhuPe8m7/qqIgYrMceUY2myPeYAxvadtCBuLUuCnRjGfCOoVaZZZU5gnRWxggSyL3ArqQFMJlglNNnYjLk0SNSYxnBs5j6sFYfUunIRRoriVFJph2SrsbthHeIlM/41AOnLEDpG54KeJhB3B/gAJemwYPUcXi6vEwhaEOBhDGuVAphzQNw9I08gMaZmQzLYIceMI3Ri0kw8ghMbppzYMygdEFQzecrc7BbFVX9lS0RbSFXdJUwkWbKRxQqCVYUA0eFBTaWhQM2tZuExjXIvZ0BpYSryDWFYJ3S/odMGo7AWsa9rh9ZgmQKBaMxfzoxlMdUuo5mIP41SnMtRbOK1gMdqkbE4hBvcGsG6qovRyt6GLC9ZZ1lMqq08gvTMys39MGlFAtKaWjHGjRDzgugu1Kcy6QzgxTwQqDIPEWa9Ue0fERg3EkoEBBl0XVvEzgX42rXqlgrVFWmWiRDbKtzCqx5YATRZYy7IozSjiMrOJmpGm0KKu4Z6YJmCqhgJSlLxbgoH07CXra5LxzD6z/URRqHW21A1GlxAV0ixtLuhQqml28LCV7STJaoUxXWYZt4m2+OYFeCVuUqto6RrlTTNWVA8y4Zphxxo3LBZPHMt9VEAGHDHYszDL2aTASFILtYCGN+k4cslWQDTJNfnCUUi6pUyzUtsB0Yr86pDqFt3GcuPAEsErQsBlI+EfrTHVWh0upBpuGw9CNu5ETbQNXGaXGnTjYEGwiI5IQcF81oC8Iiem//AGJcBZTAOY5RDs8FwHKWeQeugBVnKIqVD37aVJlNEgBsLWYEM7kgNAhwizfVxRA6sBaSfW21AMbGKXhsGqloKo7x2ORHiEVMRYgI3iGYCyfTpB6210BBE+LJzMxCFlB0zJSmt/xGBpgH+DGd23RB5BBE0QjwJ+1m15ePC1SqiXeiRXQaoWm27xSgcr678quayyhQ2YJaU1UAol2tqSiqELEFgrXi2xxReOLwqoGghJkHFEZVs3ZaB1D1sDBqQqxhIFBoXet3iBy9MOjNLANVNYKto5lDILtZj6KdiQ6JmDrpjoFdjg6KEYyYaJEVUEfRyyQirTZEOGlgSIWGllIFPULYBsQaZCOCsfbQHEABZmclj844VqnGQpLzA1GE4sQCNCTlvYsV0PEqYBYNKE67LLkHzKhxXUbHICJ0mPwVEMsDWjBKqGTVpQNSVsixkX1wQw2BSMF3UwPkVxNhQOHIKaWyknD/ACfWAHoFFGI1F7KBCybWnaFDEOp8uZgSyxlVwLINxaRQtgDt8ADlEg9NXCdvRCKgUpGBze0hAbmUS0GiyHxna3yVVqRdFQRgaQrWRwMQjCjWPpdJUraxepFeGCq1jiBJpZAZLqMOAS4iGx8T0/dzWiq1SpuIZytQIDGhGymMjOMLcEWAwDEYxyxrSd4hprWNZMCXEAz0/nXwA/ZQyDUg6yECoNxGa0C2oM0bOrSMahL7SkqTDDV0tQhiYbVsWNFoZtNHW3RPHysbvNbf9qApieCAq6gEAlgiLWZjhghwaxOcStYIITDn5/wqU4lOJQbrPMS6kpxOknSIlslDtEO30Iap0uspxK0Y0g7BAXdZig2gIIgNCA7FQdBXgh1lOJTiVqqxKCV4imodszQwYlXaKJguAtRX1Uu6juB85ZtLnIlOIuVSpiqscToEAVDXWBNCVFViV4/8JDqXKtj6K/6qlSjiUG0o4lf+d//Z)from torch.nn.modules.conv import Conv2d from torch.nn.modules.pooling import MaxPool2d class LeNet5(nn.Module): def __init__(self): super().__init__() # Convolution layers self._body = nn.Sequential( nn.Conv2d(in_channels=1,out_channels=6, kernel_size=5), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(in_channels=6,out_channels=16,kernel_size=5), nn.ReLU(inplace = True), nn.MaxPool2d(kernel_size=2), ) # Fully connected layers self._head = nn.Sequential( nn.Linear(in_features=16*5*5,out_features=120), nn.ReLU(inplace=True), nn.Linear(in_features=120,out_features=84), nn.ReLU(inplace=True), nn.Linear(in_features=84,out_features=10), ) def forward(self,x): x = self._body(x) x = x.view(x.size()[0],-1) x = self._head(x) return x lenet5_model = LeNet5() print(lenet5_model)LeNet5( (_body): Sequential( (0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1)) (1): ReLU(inplace=True) (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (3): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1)) (4): ReLU(inplace=True) (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (_head): Sequential( (0): Linear(in_features=400, out_features=120, bias=True) (1): ReLU(inplace=True) (2): Linear(in_features=120, out_features=84, bias=True) (3): ReLU(inplace=True) (4): Linear(in_features=84, out_features=10, bias=True) ) )Prepare data * dataloader: iterator that create batch data, transform data when required.* What we need to do here with the img data: (1) resize img to 32*32 - transform.resize (2) Rescale the values of pixels from 0-255 to 0-1 - transforms.ToTensor (3) Normalize data. - transform.Normalizedef get_data(batch_size, data_root='data', num_workers=1): train_test_transforms = transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) # train dataloader train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=data_root, train=True, download=True, transform=train_test_transforms), batch_size=batch_size, shuffle=True, num_workers=num_workers ) # test dataloader test_loader = torch.utils.data.DataLoader( datasets.MNIST(root=data_root, train=False, download=True, transform=train_test_transforms), batch_size=batch_size, shuffle=False, num_workers=num_workers ) return train_loader, test_loaderConfiguration & system setup@dataclass class SystemConfiguration: ''' Describes the common system setting needed for reproducible training ''' seed: int = 0 cudnn_benchmark_enabled: bool = True cudnn_deterministic: bool = True @dataclass class TrainingConfiguration: ''' Describes configuration of the training process ''' batch_size: int = 32 epochs_count: int = 20 learning_rate: float = 0.01 log_interval: int = 100 test_interval: int = 1 data_root: str = "data" num_workers: int = 10 device: str = 'cuda' def setup_system(system_config: SystemConfiguration) -> None: torch.manual_seed(system_config.seed) if torch.cuda.is_available(): torch.backends.cudnn_benchmark_enabled = system_config.cudnn_benchmark_enabled torch.backends.cudnn.deterministic = system_config.cudnn_deterministic # system & training configuration system_configuration=SystemConfiguration() setup_system(system_configuration) training_configuration=TrainingConfiguration() batch_size_to_set = training_configuration.batch_size num_workers_to_set = training_configuration.num_workers epoch_num_to_set = training_configuration.epochs_count # if GPU is available use training config, else lower batch_size, num_workers and epochs count if torch.cuda.is_available(): device = "cuda" else: device = "cpu" batch_size_to_set = 16 num_workers_to_set = 2 epoch_num_to_set = 5 train_loader, test_loader = get_data( batch_size=batch_size_to_set, data_root=training_configuration.data_root, num_workers=num_workers_to_set )Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to data/MNIST/raw/train-images-idx3-ubyte.gzTraining processdef train( train_config: TrainingConfiguration, model: nn.Module, optimizer: torch.optim.Optimizer, train_loader: torch.utils.data.DataLoader, epoch_idx: int ) -> Tuple[float, float]: # change model in training mode model.train() batch_loss = np.array([]) batch_acc = np.array([]) for batch_idx, (data, target) in enumerate(train_loader): indx_target = target.clone() data = data.to(train_config.device) target = target.to(train_config.device) optimizer.zero_grad() output = model(data) loss = F.cross_entropy(output, target) loss.backward() optimizer.step() batch_loss = np.append(batch_loss, [loss.item()]) prob = F.softmax(output, dim=1) pred = prob.data.max(dim=1)[1] # correct prediction correct = pred.cpu().eq(indx_target).sum() # accuracy acc = float(correct) / float(len(data)) batch_acc = np.append(batch_acc, [acc]) if batch_idx % train_config.log_interval == 0 and batch_idx > 0: print( 'Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f}'.format( epoch_idx, batch_idx * len(data), len(train_loader.dataset), loss.item(), acc ) ) epoch_loss = batch_loss.mean() epoch_acc = batch_acc.mean() return epoch_loss, epoch_accValidation processdef validate( train_config: TrainingConfiguration, model: nn.Module, test_loader: torch.utils.data.DataLoader, ) -> Tuple[float, float]: model.eval() test_loss = 0 count_corect_predictions = 0 # turn off gradient-computation with torch.no_grad(): for data, target in test_loader: indx_target = target.clone() data = data.to(train_config.device) target = target.to(train_config.device) output = model(data) test_loss += F.cross_entropy(output, target).item() prob = F.softmax(output, dim=1) pred = prob.data.max(dim=1)[1] count_corect_predictions += pred.cpu().eq(indx_target).sum() test_loss = test_loss / len(test_loader) accuracy = 100. * count_corect_predictions / len(test_loader.dataset) print( '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, count_corect_predictions, len(test_loader.dataset), accuracy ) ) return test_loss, accuracy/100.0Main processordef main(system_configuration=SystemConfiguration(), training_configuration=TrainingConfiguration()): # system configuration setup_system(system_configuration) batch_size_to_set = training_configuration.batch_size num_workers_to_set = training_configuration.num_workers epoch_num_to_set = training_configuration.epochs_count # if GPU is available use training config, else lower batch_size, num_workers and epochs count if torch.cuda.is_available(): device = "cuda" else: device = "cpu" batch_size_to_set = 16 num_workers_to_set = 2 epoch_num_to_set = 5 # data loader train_loader, test_loader = get_data( batch_size=batch_size_to_set, data_root=training_configuration.data_root, num_workers=num_workers_to_set ) # Update training configuration training_configuration = TrainingConfiguration( device=device, epochs_count=epoch_num_to_set, batch_size=batch_size_to_set, num_workers=num_workers_to_set ) # initiate model model = LeNet5() # send model to device (GPU/CPU) model.to(training_configuration.device) # optimizer optimizer = optim.SGD( model.parameters(), lr=training_configuration.learning_rate ) best_loss = torch.tensor(np.inf) # epoch train/test loss epoch_train_loss = np.array([]) epoch_test_loss = np.array([]) # epoch train/test accuracy epoch_train_acc = np.array([]) epoch_test_acc = np.array([]) # training time measurement t_begin = time.time() for epoch in range(training_configuration.epochs_count): train_loss, train_acc = train(training_configuration, model, optimizer, train_loader, epoch) epoch_train_loss = np.append(epoch_train_loss, [train_loss]) epoch_train_acc = np.append(epoch_train_acc, [train_acc]) elapsed_time = time.time() - t_begin speed_epoch = elapsed_time / (epoch + 1) speed_batch = speed_epoch / len(train_loader) eta = speed_epoch * training_configuration.epochs_count - elapsed_time print( "Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format( elapsed_time, speed_epoch, speed_batch, eta ) ) if epoch % training_configuration.test_interval == 0: current_loss, current_accuracy = validate(training_configuration, model, test_loader) epoch_test_loss = np.append(epoch_test_loss, [current_loss]) epoch_test_acc = np.append(epoch_test_acc, [current_accuracy]) if current_loss < best_loss: best_loss = current_loss print("Total time: {:.2f}, Best Loss: {:.3f}".format(time.time() - t_begin, best_loss)) return model, epoch_train_loss, epoch_train_acc, epoch_test_loss, epoch_test_acc model, epoch_train_loss, epoch_train_acc, epoch_test_loss, epoch_test_acc = main()/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 10 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. cpuset_checked))Show resultplt.rcParams["figure.figsize"] = (10, 6) x = range(len(epoch_train_loss)) plt.figure plt.plot(x, epoch_train_loss, color='r', label="train loss") plt.plot(x, epoch_test_loss, color='b', label="validation loss") plt.xlabel('epoch no.') plt.ylabel('loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() plt.rcParams["figure.figsize"] = (10, 6) x = range(len(epoch_train_loss)) plt.figure plt.plot(x, epoch_train_acc, color='r', label="train accuracy") plt.plot(x, epoch_test_acc, color='b', label="validation accuracy") plt.xlabel('epoch no.') plt.ylabel('accuracy') plt.legend(loc='center right') plt.title('Training and Validation Accuracy') plt.show()Save and predictionmodels = 'models' if not os.path.exists(models): os.makedirs(models) model_file_name = 'lenet5_mnist.pt' model_path = os.path.join(models, model_file_name) # transfer the model to cpu. model.to('cpu') # save the state_dict torch.save(model.state_dict(), model_path) lenet5_mnist = LeNet5() lenet5_mnist.load_state_dict(torch.load(model_path)) def prediction(model, train_config, batch_input): # turn off gradient-computation with torch.no_grad(): model.to(train_config.device) model.eval() data = batch_input.to(train_config.device) output = model(data) prob = F.softmax(output, dim=1) pred_prob = prob.data.max(dim=1)[0] pred_index = prob.data.max(dim=1)[1] return pred_index.cpu().numpy(), pred_prob.cpu().numpy()Examplebatch_size = 5 train_config = TrainingConfiguration() if torch.cuda.is_available(): train_config.device = "cuda" else: train_config.device = "cpu" test = torch.utils.data.DataLoader( datasets.MNIST(root=train_config.data_root, train=False, download=True, transform=transforms.functional.to_tensor), batch_size=batch_size, shuffle=False, num_workers=1 ) image_transforms = transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) test_trans = torch.utils.data.DataLoader( datasets.MNIST(root=train_config.data_root, train=False, download=True, transform=image_transforms), batch_size=batch_size, shuffle=False, num_workers=1 ) for data, _ in test_trans: # pass the loaded model pred, prob = prediction(lenet5_mnist, train_config, data) break plt.rcParams["figure.figsize"] = (3, 3) for images, _ in test: for i, img in enumerate(images): img = transforms.functional.to_pil_image(img) plt.imshow(img, cmap='gray') plt.gca().set_title('Prediction: {0}, Prob: {1:.2}'.format(pred[i], prob[i])) plt.show() breakMcStas First time setup McStas Scriptfrom mcstasscript.interface import functions # Each time a new conda env is created and used McStas must be configured my_configurator = functions.Configurator() my_configurator.set_mcrun_path("/usr/local/bin/") my_configurator.set_mcstas_path("/usr/local/mcstas/2.5/")McStas instr filefrom mcstasscript.interface import instr, plotter, functions ISIS_SANS2d_Mantid = instr.McStas_instr("ISIS_SANS2d_Mantid_generated") ISIS_SANS2d_Mantid.add_parameter("double", "L1", value=3.926) ISIS_SANS2d_Mantid.add_parameter("double", "A1w", value=0.03) ISIS_SANS2d_Mantid.add_parameter("double", "A1h", value=0.02) ISIS_SANS2d_Mantid.add_parameter("double", "S6", value=0.006) ISIS_SANS2d_Mantid.add_parameter("double", "A2", value=0.006) ISIS_SANS2d_Mantid.add_parameter("double", "Lmin", value=1.0) ISIS_SANS2d_Mantid.add_parameter("double", "Lmax", value=14.0) ISIS_SANS2d_Mantid.add_parameter("double", "model_nr", value=5.0) a1 = ISIS_SANS2d_Mantid.add_component("a1", "Progress_bar") a1.set_AT(['0', '0', '0'], RELATIVE="ABSOLUTE") Origin = ISIS_SANS2d_Mantid.add_component("Origin", "Arm") Origin.set_AT(['0', '0', '0'], RELATIVE="ABSOLUTE") isis_source = ISIS_SANS2d_Mantid.add_component("isis_source", "ISIS_moderator") isis_source.Face = "\"E2\"" isis_source.Emin = "-Lmax" isis_source.Emax = "-Lmin" isis_source.dist = 3.68 isis_source.focus_xw = 0.0365 isis_source.focus_yh = 0.021 isis_source.xwidth = -1 isis_source.yheight = -1 isis_source.CAngle = 0.0 isis_source.SAC = 1 isis_source.set_AT(['0.0', ' 0.0', ' 0.00001'], RELATIVE="Origin") isis_source.set_ROTATED(['0.0', ' 0.0', ' 0.0'], RELATIVE="Origin") lmon1 = ISIS_SANS2d_Mantid.add_component("lmon1", "L_monitor") lmon1.nL = 140 lmon1.filename = "\"lmon1.dat\"" lmon1.xmin = -0.04 lmon1.xmax = 0.04 lmon1.ymin = -0.03 lmon1.ymax = 0.03 lmon1.Lmin = 0.0 lmon1.Lmax = 17.0 lmon1.set_AT(['0.0', ' 0.0', ' 3.698'], RELATIVE="isis_source") psd1 = ISIS_SANS2d_Mantid.add_component("psd1", "PSD_monitor") psd1.nx = 100 psd1.ny = 100 psd1.filename = "\"psd1.dat\"" psd1.xmin = -0.05 psd1.xmax = 0.05 psd1.ymin = -0.05 psd1.ymax = 0.05 psd1.set_AT(['0.0', ' 0.0', ' 3.699'], RELATIVE="isis_source") bender1 = ISIS_SANS2d_Mantid.add_component("bender1", "Guide_gravity") bender1.w1 = .0355 bender1.h1 = .020 bender1.w2 = .0355 bender1.h2 = .020 bender1.l = 0.3245 bender1.nslit = 9 bender1.d = .0005 bender1.mleft = 1 bender1.mright = 3 bender1.mtop = 1 bender1.mbottom = 1 bender1.wavy = 0 bender1.set_AT(['0', ' 0', ' 3.7'], RELATIVE="isis_source") bender1.set_ROTATED(['0.0', ' 0.137099', ' 0.0'], RELATIVE="isis_source") bender2 = ISIS_SANS2d_Mantid.add_component("bender2", "Guide_gravity") bender2.w1 = .0355 bender2.h1 = .020 bender2.w2 = .0355 bender2.h2 = .020 bender2.l = 0.3245 bender2.nslit = 9 bender2.d = .0005 bender2.mleft = 1 bender2.mright = 3 bender2.mtop = 1 bender2.mbottom = 1 bender2.wavy = 0 bender2.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender1") bender2.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender1") bender3 = ISIS_SANS2d_Mantid.add_component("bender3", "Guide_gravity") bender3.w1 = .0355 bender3.h1 = .020 bender3.w2 = .0355 bender3.h2 = .020 bender3.l = 0.3245 bender3.nslit = 9 bender3.d = .0005 bender3.mleft = 1 bender3.mright = 3 bender3.mtop = 1 bender3.mbottom = 1 bender3.wavy = 0 bender3.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender2") bender3.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender2") bender4 = ISIS_SANS2d_Mantid.add_component("bender4", "Guide_gravity") bender4.w1 = .0355 bender4.h1 = .020 bender4.w2 = .0355 bender4.h2 = .020 bender4.l = 0.3245 bender4.nslit = 9 bender4.d = .0005 bender4.mleft = 1 bender4.mright = 3 bender4.mtop = 1 bender4.mbottom = 1 bender4.wavy = 0 bender4.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender3") bender4.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender3") bender5 = ISIS_SANS2d_Mantid.add_component("bender5", "Guide_gravity") bender5.w1 = .0355 bender5.h1 = .020 bender5.w2 = .0355 bender5.h2 = .020 bender5.l = 0.3245 bender5.nslit = 9 bender5.d = .0005 bender5.mleft = 1 bender5.mright = 3 bender5.mtop = 1 bender5.mbottom = 1 bender5.wavy = 0 bender5.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender4") bender5.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender4") bender6 = ISIS_SANS2d_Mantid.add_component("bender6", "Guide_gravity") bender6.w1 = .0355 bender6.h1 = .020 bender6.w2 = .0355 bender6.h2 = .020 bender6.l = 0.3245 bender6.nslit = 9 bender6.d = .0005 bender6.mleft = 1 bender6.mright = 3 bender6.mtop = 1 bender6.mbottom = 1 bender6.wavy = 0 bender6.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender5") bender6.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender5") bender7 = ISIS_SANS2d_Mantid.add_component("bender7", "Guide_gravity") bender7.w1 = .0355 bender7.h1 = .020 bender7.w2 = .0355 bender7.h2 = .020 bender7.l = 0.3245 bender7.nslit = 9 bender7.d = .0005 bender7.mleft = 1 bender7.mright = 3 bender7.mtop = 1 bender7.mbottom = 1 bender7.wavy = 0 bender7.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender6") bender7.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender6") bender8 = ISIS_SANS2d_Mantid.add_component("bender8", "Guide_gravity") bender8.w1 = .0355 bender8.h1 = .020 bender8.w2 = .0355 bender8.h2 = .020 bender8.l = 0.3245 bender8.nslit = 9 bender8.d = .0005 bender8.mleft = 1 bender8.mright = 3 bender8.mtop = 1 bender8.mbottom = 1 bender8.wavy = 0 bender8.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender7") bender8.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender7") bender9 = ISIS_SANS2d_Mantid.add_component("bender9", "Guide_gravity") bender9.w1 = .0355 bender9.h1 = .020 bender9.w2 = .0355 bender9.h2 = .020 bender9.l = 0.3245 bender9.nslit = 9 bender9.d = .0005 bender9.mleft = 1 bender9.mright = 3 bender9.mtop = 1 bender9.mbottom = 1 bender9.wavy = 0 bender9.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender8") bender9.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender8") bender10 = ISIS_SANS2d_Mantid.add_component("bender10", "Guide_gravity") bender10.w1 = .0355 bender10.h1 = .020 bender10.w2 = .0355 bender10.h2 = .020 bender10.l = 0.3245 bender10.nslit = 9 bender10.d = .0005 bender10.mleft = 1 bender10.mright = 3 bender10.mtop = 1 bender10.mbottom = 1 bender10.wavy = 0 bender10.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender9") bender10.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender9") lmonb = ISIS_SANS2d_Mantid.add_component("lmonb", "L_monitor") lmonb.nL = 140 lmonb.filename = "\"lmonB.dat\"" lmonb.xmin = -0.018 lmonb.xmax = 0.018 lmonb.ymin = -0.018 lmonb.ymax = 0.018 lmonb.Lmin = 0.0 lmonb.Lmax = 17.0 lmonb.set_AT(['0.0', ' 0.0', ' 0.326'], RELATIVE="bender10") psd2 = ISIS_SANS2d_Mantid.add_component("psd2", "PSD_monitor") psd2.nx = 100 psd2.ny = 100 psd2.filename = "\"psd2.dat\"" psd2.xmin = -0.025 psd2.xmax = 0.025 psd2.ymin = -0.025 psd2.ymax = 0.025 psd2.set_AT(['0.0', ' 0.0', ' 0.001'], RELATIVE="lmonb") guide_in = ISIS_SANS2d_Mantid.add_component("guide_in", "Slit") guide_in.xmin = -0.015 guide_in.xmax = 0.015 guide_in.ymin = -.01 guide_in.ymax = +.01 guide_in.set_AT(['0', ' 0', ' 0.2845'], RELATIVE="psd2") guide_straight1 = ISIS_SANS2d_Mantid.add_component("guide_straight1", "Guide_gravity") guide_straight1.w1 = .030 guide_straight1.h1 = .020 guide_straight1.w2 = .030 guide_straight1.h2 = .020 guide_straight1.l = 1.985 guide_straight1.mleft = 1 guide_straight1.mright = 1 guide_straight1.mtop = 1 guide_straight1.mbottom = 1 guide_straight1.wavy = 0 guide_straight1.set_AT(['0', ' 0', ' 0.0075'], RELATIVE="guide_in") guide_straight2 = ISIS_SANS2d_Mantid.add_component("guide_straight2", "Guide_gravity") guide_straight2.w1 = .030 guide_straight2.h1 = .020 guide_straight2.w2 = .030 guide_straight2.h2 = .020 guide_straight2.l = 1.985 guide_straight2.mleft = 1 guide_straight2.mright = 1 guide_straight2.mtop = 1 guide_straight2.mbottom = 1 guide_straight2.wavy = 0 guide_straight2.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight1") guide_straight3 = ISIS_SANS2d_Mantid.add_component("guide_straight3", "Guide_gravity") guide_straight3.w1 = .030 guide_straight3.h1 = .020 guide_straight3.w2 = .030 guide_straight3.h2 = .020 guide_straight3.l = 1.985 guide_straight3.mleft = 1 guide_straight3.mright = 1 guide_straight3.mtop = 1 guide_straight3.mbottom = 1 guide_straight3.wavy = 0 guide_straight3.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight2") guide_straight4 = ISIS_SANS2d_Mantid.add_component("guide_straight4", "Guide_gravity") guide_straight4.w1 = .030 guide_straight4.h1 = .020 guide_straight4.w2 = .030 guide_straight4.h2 = .020 guide_straight4.l = 1.985 guide_straight4.mleft = 1 guide_straight4.mright = 1 guide_straight4.mtop = 1 guide_straight4.mbottom = 1 guide_straight4.wavy = 0 guide_straight4.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight3") psd3 = ISIS_SANS2d_Mantid.add_component("psd3", "PSD_monitor") psd3.nx = 100 psd3.ny = 100 psd3.filename = "\"psd3.dat\"" psd3.xmin = -0.030 psd3.xmax = 0.030 psd3.ymin = -0.030 psd3.ymax = 0.030 psd3.set_AT(['0.0', ' 0.0', ' 7.999'], RELATIVE="guide_in") aperture1 = ISIS_SANS2d_Mantid.add_component("aperture1", "Slit") aperture1.xwidth = "A1w" aperture1.yheight = "A1h" aperture1.set_AT(['0', ' 0', ' 8.000'], RELATIVE="guide_in") lmonitor2 = ISIS_SANS2d_Mantid.add_component("lmonitor2", "L_monitor") lmonitor2.nL = 140 lmonitor2.filename = "\"lmonitor2.dat\"" lmonitor2.xmin = -0.0155 lmonitor2.xmax = 0.0155 lmonitor2.ymin = -0.0105 lmonitor2.ymax = 0.0105 lmonitor2.Lmin = 0.0 lmonitor2.Lmax = 17.0 lmonitor2.set_AT(['0.0', ' 0.0', ' 2.651'], RELATIVE="aperture1") S6 = ISIS_SANS2d_Mantid.add_component("S6", "Slit") S6.radius = "S6" S6.set_AT(['0', ' 0', ' 2.800'], RELATIVE="aperture1") sourceMantid = ISIS_SANS2d_Mantid.add_component("sourceMantid", "Arm") sourceMantid.set_AT(['0', ' 0', ' -18.087'], RELATIVE="S6") APERTURE2 = ISIS_SANS2d_Mantid.add_component("APERTURE2", "Slit") APERTURE2.radius = "A2" APERTURE2.set_AT(['0', ' 0', ' L1 '], RELATIVE="aperture1") lmon2 = ISIS_SANS2d_Mantid.add_component("lmon2", "L_monitor") lmon2.nL = 140 lmon2.filename = "\"Edet0.dat\"" lmon2.xmin = -0.0075 lmon2.xmax = 0.0075 lmon2.ymin = -0.0075 lmon2.ymax = 0.0075 lmon2.Lmin = 0.0 lmon2.Lmax = 17.0 lmon2.set_AT(['0.0', ' 0.0', ' 0.285'], RELATIVE="APERTURE2") psd4 = ISIS_SANS2d_Mantid.add_component("psd4", "PSD_monitor") psd4.nx = 100 psd4.ny = 100 psd4.filename = "\"psd4.dat\"" psd4.xmin = -0.0075 psd4.xmax = 0.0075 psd4.ymin = -0.0075 psd4.ymax = 0.0075 psd4.set_AT(['0.0', ' 0.0', ' 0.286'], RELATIVE="APERTURE2") psd5 = ISIS_SANS2d_Mantid.add_component("psd5", "PSD_monitor") psd5.nx = 100 psd5.ny = 100 psd5.filename = "\"psd5.dat\"" psd5.xmin = -0.0075 psd5.xmax = 0.0075 psd5.ymin = -0.0075 psd5.ymax = 0.0075 psd5.restore_neutron = 1 psd5.set_AT(['0.0', ' 0.0', ' 0.18'], RELATIVE="psd4") sampleMantid = ISIS_SANS2d_Mantid.add_component("sampleMantid", "SANS_benchmark2") sampleMantid.xwidth = 0.01 sampleMantid.yheight = 0.01 sampleMantid.zthick = 0.001 sampleMantid.model = "model_nr" sampleMantid.dsdw_inc = 0.0 sampleMantid.sc_aim = 1.0 sampleMantid.sans_aim = 1.00 sampleMantid.singlesp = 1.0 sampleMantid.append_EXTEND("if (!SCATTERED) ABSORB;") sampleMantid.set_SPLIT("") sampleMantid.set_AT(['0', ' 0', ' 0.2'], RELATIVE="psd4") detector = ISIS_SANS2d_Mantid.add_component("detector", "PSD_monitor") detector.nx = 200 detector.ny = 200 detector.filename = "\"PSD.dat\"" detector.xmin = -0.5 detector.xmax = 0.5 detector.ymin = -0.5 detector.ymax = 0.5 detector.restore_neutron = 1 detector.set_AT(['0', ' 0', ' 3.9'], RELATIVE="sampleMantid") nD_Mantid_1 = ISIS_SANS2d_Mantid.add_component("nD_Mantid_1", "Monitor_nD") nD_Mantid_1.xmin = -0.5 nD_Mantid_1.xmax = 0.5 nD_Mantid_1.ymin = -0.5 nD_Mantid_1.ymax = 0.5 nD_Mantid_1.restore_neutron = 1 nD_Mantid_1.options = "\"mantid square x limits=[-0.5 0.5] bins=128 y limits=[-0.5 0.5] bins=128,, neutron pixel t,, list all neutrons\"" nD_Mantid_1.filename = "\"bank01_events.dat\"" nD_Mantid_1.set_AT(['0', ' 0', ' 3.9'], RELATIVE="sampleMantid") lmon_post = ISIS_SANS2d_Mantid.add_component("lmon_post", "L_monitor") lmon_post.nL = 140 lmon_post.filename = "\"lmonitor_post.dat\"" lmon_post.xmin = -0.5 lmon_post.xmax = 0.5 lmon_post.ymin = -0.5 lmon_post.ymax = 0.5 lmon_post.Lmin = 0.0 lmon_post.Lmax = 17.0 lmon_post.restore_neutron = 1 lmon_post.set_AT(['0.0', ' 0.0', ' 3.9'], RELATIVE="sampleMantid")McStas Simulation Run McStasdata = ISIS_SANS2d_Mantid.run_full_instrument(foldername="data_1E7_nr15", parameters={"model_nr":15}, ncount=1E7) plot = plotter.make_sub_plot(data)number of elements in data list = 12 Plotting data with name nD_Mantid_1 Plotting data with name lmon1 Plotting data with name psd1 Plotting data with name lmonb Plotting data with name psd2 Plotting data with name psd3 Plotting data with name lmonitor2 Plotting data with name lmon2 Plotting data with name psd4 Plotting data with name psd5 Plotting data with name detector Plotting data with name lmon_postMcStas IDF for Mantidimport subprocess cmd_idf = "mcdisplay.pl ISIS_SANS2d_Mantid_generated.instr --format=Mantid -n0 model_nr=5" subprocess.call([cmd_idf], shell=True)Run McStas with NeXusdata_nexus = ISIS_SANS2d_Mantid.run_full_instrument(foldername="data_1E7_nexus_nr15", parameters={"model_nr":15}, ncount=1E7, custom_flags=" -c --format=NeXus ") print(data_nexus)data_1E7_nexus_nr15/mccode.h5Mantid Setup Mantidfrom mantid.simpleapi import */home/dram_demo/miniconda3/envs/dmsc-demo-panosc/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_convertersLoad event data# Load McStas event data from section 1 ws = Load(data_nexus)Rebin TOFsample = Rebin(ws[0],'1000', False)Convert to lambdasample = ConvertUnits(sample, 'Wavelength')Rebin lambabinning = '1.75,0.1,16.5' sample = Rebin(sample, binning)Load McStas normalization spectrummcstasMonitor = CloneWorkspace(mtd['Edet0.dat_ws'])Convert McStas data to Mantid histogrammcstasMonitor = ConvertToHistogram(mcstasMonitor) mcstasMonitor = rebin(mcstasMonitor, binning) monitor = CreateWorkspace(mcstasMonitor.dataX(0), mcstasMonitor.dataY(0), mcstasMonitor.dataE(0), UnitX='Wavelength')Setup q gridbinningMASK = '0.0035,0.001,0.4'Reduce to I(q) and normailzedreduced_norm = Q1D(sample, binningMASK, WavelengthAdj='monitor')Save fileSaveAscii(InputWorkspace='reduced_norm', Filename='Mantid_reduced.dat', Separator='Tab', ScientificFormat=True)Plot I(q)%matplotlib notebook import matplotlib.pyplot as plt import numpy as np # Remove zero and inf data_q = np.column_stack((reduced_norm.readX(0)[:-1], reduced_norm.readY(0))) data_q = data_q[np.logical_not(np.isnan(data_q[:, 1]))] data_q = data_q[np.logical_not(np.isinf(data_q[:, 1]))] data_q = data_q[data_q[:, 1] != 0] plt.plot(data_q[:,0], data_q[:,1], 'k-', lw=2) plt.yscale('log') plt.xlabel('q [ 1/Å ]') plt.ylabel('I(q) [a.u.]') # Save data for SasView np.savetxt("Iq_cleaned.dat", data_q, header=" ", comments='') # Save data for SasView with errors data_q_err = np.column_stack((data_q[:,0], data_q[:,1], 0.2*data_q[:,1])) np.savetxt("Iq_cleaned_err.dat", data_q_err, header=" ", comments='') Iq_file = "Iq_cleaned_err.dat"SasView Setup Sasmodles and SasViewfrom sasmodels.core import load_model from sasmodels.bumps_model import Model, Experiment from sasmodels.data import load_data from bumps.names import * from bumps.fitters import fit from bumps.formatnum import format_uncertainty from bumps.formatnum import * from bumps.dream.stats import var_stats, format_varsSetup fit function Without polydispersiondef fit_data(filename, fit_method, bumps_samples, bumps_burn): """ Run SasView SasModels fit :return: """ # Load data to fit test_data = load_data(filename) # Set up fit model kernel = load_model('sphere') pars = dict(radius=150, background=0.0, scale=1E-6, sld=7.0, sld_solvent=1.0) model = Model(kernel, **pars) # SET THE FITTING PARAMETERS model.radius.range(10, 200) model.scale.range(1E-7, 1E0) model.background.range(0, 1000) M = Experiment(data=test_data, model=model) problem = FitProblem(M) print("Initial chisq", problem.chisq_str()) result = fit(problem, method=fit_method, samples=bumps_samples, burn=bumps_burn) draw = result.state.draw(portion=1.0) all_vstats = var_stats(draw) return all_vstatsWith polydispersiondef fit_data_pd(filename, fit_method, bumps_samples, bumps_burn): """ Run SasView SasModels fit :return: """ # Load data to fit test_data = load_data(filename) # Set up fit model kernel = load_model('sphere') pars = dict(radius=150, background=0.0, scale=1E-6, sld=7.0, sld_solvent=1.0, radius_pd=0.03, radius_pd_n=35, radius_pd_nsigma=3) model = Model(kernel, **pars) # SET THE FITTING PARAMETERS model.radius.range(10, 200) model.scale.range(1E-7, 1E1) model.background.range(0, 100) model.radius_pd.range(0.0, 0.1) M = Experiment(data=test_data, model=model) problem = FitProblem(M) print("Initial chisq", problem.chisq_str()) result = fit(problem, method=fit_method, samples=bumps_samples, burn=bumps_burn) draw = result.state.draw(portion=1.0) all_vstats = var_stats(draw) return all_vstatsSetup print all results functiondef print_all_results(bumps_fit_result): """ Print parameters for fit :param bumps_fit_result: :return: """ for v in bumps_fit_result: print(v.label, v.mean, v.median, v.best, v.p68[0], v.p68[1], v.p95[0], v.p95[1])Setup print parameter result functiondef print_results(bumps_fit_result, parmeter_to_print, conf_interval): """ SasModels "true" parmeter can be [v.mean, v.median, v.best] :param bumps_fit_result: :param parmeter_to_print: :param conf_interval: :return: """ for v in bumps_fit_result: if v.label == parmeter_to_print: if conf_interval: print(parmeter_to_print + ': ' + str(v.best) + ',' + str(0.5*(v.p95[1]-v.p95[0])) + '\n') else: print(parmeter_to_print + ': ' + v.best + ',' + 0.5*(v.p68[1]-v.p68[0]) + '\n')Fit data# Fit data from just made McStas-Mantid workflow. With polydisersion. fit_result = fit_data('Iq_cleaned_err.dat', 'dream', 10000, 100) # Fit data from just made McStas-Mantid workflow. With polydisersion. fit_result_pd = fit_data_pd('Iq_cleaned_err.dat', 'dream', 10000, 100)Initial chisq 24.504(12) # steps: 250, # draws: 10000Print resultsprint_results(fit_result, 'radius', True) print_results(fit_result, 'scale', True) print_results(fit_result, 'background', True) print_results(fit_result_pd, 'radius', True) print_results(fit_result_pd, 'scale', True) print_results(fit_result_pd, 'background', True) print_results(fit_result_pd, 'radius_pd', True)radius: 150.62558534662642,0.9583572344242555 scale: 6.454342453383203e-05,3.1361001278749804e-06 background: 7.573610196899322e-10,8.580608061779222e-08 radius_pd: 0.03672939600521507,0.006375012101543617Plot data and model - Without polydispersion# Plot McStas-Mantid data and SasView model. Without polydispersion. from numpy import logspace, linspace from matplotlib import pyplot as plt from sasmodels.core import load_model from sasmodels.direct_model import call_kernel # Load data q2, Iq2, Iqerr2 = np.loadtxt('Iq_cleaned_err.dat', unpack=True, skiprows=2) model = load_model('sphere') q =linspace(0.001, 0.5, num=200) kernel = model.make_kernel([q]) Iq = call_kernel(kernel, dict(radius=148.18, scale=4.16e-05, background=5.78e-07, sld=7.0, sld_solvent=1.0)) plt.semilogy(q, Iq, label='SasView') plt.semilogy(q2, Iq2, label='McStas_Mantid') plt.xlabel('q (1/A)') plt.ylabel('I(q)') plt.title('McStas-Mantid: Sphere radius 150 Å. Without polydispersion.') plt.legend(loc='upper right') plt.show()Plot data and model - With polydispersion# Plot McStas-Mantid data and SasView model. With polydispersion to mimic instrument resolution effects. # Still same mono-disperse McStas scattering kernel. from numpy import logspace, linspace from matplotlib import pyplot as plt from sasmodels.core import load_model from sasmodels.direct_model import call_kernel # Load data q2, Iq2, Iqerr2 = np.loadtxt('Iq_cleaned_err.dat', unpack=True, skiprows=2) model = load_model('sphere') q =linspace(0.001, 0.5, num=200) kernel = model.make_kernel([q]) Iq = call_kernel(kernel, dict(radius=150.62, radius_pd=0.036,radius_pd_n=35 , scale=6.45e-05, background=2e-08, sld=7.0, sld_solvent=1.0)) plt.semilogy(q, Iq, label='SasView') plt.semilogy(q2, Iq2, label='McStas_Mantid') plt.xlabel('q (1/A)') plt.ylabel('I(q)') plt.title('McStas-Mantid: Sphere radius 150 Å. With ploydispersion.') plt.legend(loc='upper right') plt.show()Dataset Statistics for Compound Disease Sentences This notebook is designed to show statistics on data extracted from pubmed. The following cells below here are needed to set up the environment.%load_ext autoreload %autoreload 2 %matplotlib inline from collections import Counter from itertools import product import os import pickle import sys sys.path.append(os.path.abspath('../../../modules')) import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm_notebook sns.set(rc={'figure.figsize':(12,6), "font.size":17}) #Set up the environment username = "danich1" password = "" dbname = "pubmeddb" #Path subject to change for different os database_str = "postgresql+psycopg2://{}:{}@/{}?host=/var/run/postgresql".format(username, password, dbname) os.environ['SNORKELDB'] = database_str from snorkel import SnorkelSession session = SnorkelSession() from snorkel.models import candidate_subclass, Candidate CompoundDisease = candidate_subclass('CompoundDisease', ['Compound', 'Disease']) from utils.notebook_utils.dataframe_helper import write_candidates_to_excel, make_sentence_dfLoad and Merge DataFramesedge_level_df = ( pd.read_table("input/compound_treats_disease.tsv.xz") .drop(["disease", "sources", "resource", "resource_id"], axis=1) ) edge_level_df.head(2) sql=''' select cand_id as candidate_id, doid_id, drugbank_id, sentence_id, text, array_length(words, 1) as sen_length from ( select cand_id, "Disease_cid" as doid_id, "Compound_cid" as drugbank_id, sentence_id from ( select compound_disease.id as "cand_id", compound_disease."Disease_id", compound_disease."Disease_cid", compound_disease."Compound_cid", candidate.split from compound_disease inner join candidate on compound_disease.id=candidate.id ) as candidate_splits inner join span on candidate_splits."Disease_id"=span.id ) as candidate_sen inner join sentence on candidate_sen.sentence_id=sentence.id ''' candidate_sentence_df = pd.read_sql(sql, database_str) candidate_sentence_df.head(2) total_candidates_df= ( edge_level_df .merge(candidate_sentence_df, on=["doid_id", "drugbank_id"]) ) total_candidates_df.head(2) dev_candidates = ( session .query(CompoundDisease) .filter( CompoundDisease.id.in_( total_candidates_df .query("split==10") .sample(10000, random_state=100) .candidate_id .tolist() ) ) .all() ) dev_df = make_sentence_df(dev_candidates) dev_df.head(2) test_candidates = ( session .query(CompoundDisease) .filter( CompoundDisease.id.in_( total_candidates_df .query("split==11") # Black list ethanol, alcohol and alcohold dependence # Samples too many sentences with the above entities .query("drugbank_id!='DB00898'&doid_id!='DOID:0050741'") .sample(10000, random_state=120) .candidate_id .tolist() ) ) .all() ) test_df = make_sentence_df(test_candidates) test_df.head(2) #write_candidates_to_excel(dev_df, "../data/sentences/sentence_labels_dev.xlsx") #write_candidates_to_excel(test_df, "../data/sentences/sentence_labels_test.xlsx")Distribution of Sentence Lengthsns.distplot(total_candidates_df["sen_length"], rug=False) total_candidates_df["sen_length"].describe().astype(int)Something seems fishy about this distribution. The number of words (tokens) for a given sentence is in the hundreds range. Intuitively, that doesn't make sense, since the average number of words for a given sentence is 36. Possible reason for this abnormality is a parsing error, so lets take a look at this 834 word sentence.total_candidates_df.query("sen_length==798").iloc[0]["text"]The above sentence shows that a long list of drugs that a issue will cover. This isn't helpful in our case, because we want to look at sentences that contain compounds and diseases; therefore, we should remove sentences of this nature by defining a cutoff score of 83 or less words.sns.distplot(total_candidates_df.query("sen_length < 83+1")["sen_length"], rug=False) total_candidates_df.query("sen_length < 83+1")["sen_length"].describe().astype(int)This distribution looks a bit more reasonable compared to the above distribution. After filtering out the outliers, we still have a pleathora of sentences on the order of 1.1 million.total_candidates_df.to_csv("output/all_ctd_candidates.tsv.xz", sep="\t", compression="xz", index=False)making an image by pixelsb=77 g=88 r=126 # r, g, and b are 512x512 float arrays with values >= 0 and < 1. rgbArray = np.zeros((28,28,3), 'uint8') rgbArray[..., 0] = r#*256 rgbArray[..., 1] = g#*256 rgbArray[..., 2] = b#*256 img = Image.fromarray(rgbArray) #img.save('myimg.jpeg') imgplot = plt.imshow(img)Loading Datasettest_size_param = 0.2 test_size =int((145185*20)/100) dataset = pd.read_csv('train_all.csv') #, encoding='utf-8' ,index_col=0) dataset.head() #without learning , accuracy will be more than 70% :"D #i will get more than 70% if i just classify each class belong to class 2 dataset['class'].value_counts(normalize=True) dataset["class"].value_counts(normalize=True).plot(kind="bar", yticks=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0], rot=0) #plt.savefig('unpalanced_data.png', bbox_inches='tight')shuffle the datasetfrom sklearn.utils import shuffle shuffled_dataset = shuffle(dataset, random_state = 0) shuffled_dataset.head(10) training_set = shuffled_dataset[test_size:] testing_set = shuffled_dataset[0:test_size] X_test = testing_set.iloc[:,1:4] Y_test = testing_set.iloc[:,4] X_train = training_set.iloc[:,1:4] Y_train = training_set.iloc[:,4] from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis qda = QuadraticDiscriminantAnalysis(store_covariance=True) qda.fit(X_train, Y_train) ''' X_pred = pd.read_csv('test_all.csv') y_pred = qda.predict(X_pred.iloc[:, 1:4]) my_submission = pd.DataFrame({'Id': X_pred.iloc[:,0], 'class': y_pred}) # you could use any filename. We choose submission here my_submission.to_csv('submission0.csv', index=False) ''' print("QDA Priors : "+str(qda.priors_)) print("QDA Decision Function : "+str(qda.decision_function(X_train))) print("QDA Score : "+str(qda.score(X_test, Y_test)))QDA Priors : [0.28040706 0.71959294] QDA Decision Function : [48.54728706 51.56429404 43.53913177 ... 11.53488716 47.42043019 41.83218136] QDA Score : 0.9843303371560423Building QuadraticDiscriminantAnalysis (QDA)#You have to separate the two classes #Skin class class1 = training_set[training_set['class'] == 1] #non_skin class class2 = training_set[training_set['class'] == 2] print(class1.describe()) print(class2.describe()) #Calculate the mean of each class class1_mu = (class1[['B', 'G', 'R']].mean(axis=0)).values class2_mu = (class2[['B', 'G', 'R']].mean(axis=0)).values #Calculate the standard deviation of each class class1_sigma = (class1[['B', 'G', 'R']].cov()).values class2_sigma = (class2[['B', 'G', 'R']].cov()).values #Inverse the standard deviation of each class class1_sigmaInv =inv(class1_sigma) class2_sigmaInv =inv(class2_sigma) #Calculate the threshold p1 = class1.shape[0]/training_set.shape[0] p2 = class2.shape[0]/training_set.shape[0] th = math.log(p2/p1) t1=np.dot(np.dot(class2_mu.T, class2_sigmaInv),class2_mu) t2=np.dot(np.dot(class1_mu.T, class1_sigmaInv),class1_mu) term=t1-t2Evaluate the QDAdef score( x ): quadratic = np.dot(np.dot(x.T,(class2_sigmaInv - class2_sigmaInv)),x) linear = 2*np.dot(x.T,np.reshape(np.dot(class2_sigmaInv,class2_mu)-np.dot(class1_sigmaInv,class1_mu),(3,1))) return quadratic-linear+term def classify (h_x): if(h_x > th): return 1 else: return 2 y_score = np.apply_along_axis( score, axis=1, arr=X_test.values ) y_pred =np.apply_along_axis(classify, axis=1, arr= y_score) print("Accuracy: "+str(sum(y_pred==Y_test)*100/X_test.shape[0])) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred) print("Confusion matrix of the implemented QDA:") print(cm) from sklearn.metrics import confusion_matrix lin_cm = confusion_matrix(Y_test, qda.predict(X_test)) print("Confusion matrix of the built-in QDA:") print(lin_cm)Confusion matrix of the built-in QDA: [[ 7739 379] [ 76 20843]]Building KNNneighbors = 3 knn = KNeighborsClassifier(n_neighbors=neighbors) knn.fit(X_train, Y_train) y_predKnn = knn.predict(X_test) cm_knn = confusion_matrix(Y_test, y_predKnn) print("Confusion matrix of the Knn model:") print(cm_knn)Confusion matrix of the Knn model: [[ 8118 0] [ 9 20910]]Building LogisticRegressionfrom sklearn.linear_model import LogisticRegression logreg = LogisticRegression() logreg.fit(X_train, Y_train) print('Accuracy of Logistic regression classifier on training set: {:.2f}' .format(logreg.score(X_train, Y_train))) print('Accuracy of Logistic regression classifier on test set: {:.2f}' .format(logreg.score(X_test, Y_test)))Accuracy of Logistic regression classifier on training set: 0.93 Accuracy of Logistic regression classifier on test set: 0.93Building SVMfrom sklearn.svm import SVC clf = SVC() clf.fit(X_train, Y_train) print ('SVM classifier score : ', clf.score(X_test, Y_test)) print ('Pred label : ', clf.predict(X_test))SVM classifier score : 0.9954885146537177 Pred label : [1 2 2 ... 2 1 1]Visualizations countplotimport seaborn as sns import pylab as pl from pandas.tools.plotting import scatter_matrix from matplotlib import cm sns.countplot(dataset['class'],label="Count") plt.show()C:\Users\Ali\Anaconda3\lib\site-packages\seaborn\categorical.py:1428: FutureWarning: remove_na is deprecated and is a private function. Do not use. stat_data = remove_na(group_data)Box plotdataset.drop(['class', 'id'], axis=1).plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False, figsize=(9,9), title='Box Plot for each input variable') plt.savefig('Box_plot.png') plt.show()Histogram plotdataset.drop(['class', 'id'] ,axis=1).hist(bins=30, figsize=(9,9)) pl.suptitle("Histogram for each numeric input variable") plt.savefig('RGB_hist') plt.show()Scatter matrix plotfeature_names = ['R', 'G', 'B'] X = dataset[feature_names] y = dataset['class'] cmap = cm.get_cmap('gnuplot') scatter = pd.scatter_matrix(X, c = y, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap = cmap) plt.suptitle('Scatter-matrix for each input variable') plt.savefig('RGB_scatter_matrix') plt.show()C:\Users\Ali\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: FutureWarning: pandas.scatter_matrix is deprecated. Use pandas.plotting.scatter_matrix instead """visualize the relationship between the features and the response using scatterplotssns.pairplot(dataset, x_vars=['R', 'G', 'B'], y_vars='class', size=3)#, aspect=0.7 plt.show()Histogram of predicted probabilities# 8 bins plt.hist(y_pred, bins=8) plt.title('Histogram of predicted probabilities') plt.xlabel('Predicted probability of classes') plt.ylabel('Frequency') plt.show()Compare Algorithms# prepare models models = [] models.append(('LR', LogisticRegression())) models.append(('KNN', KNeighborsClassifier())) models.append(('QDA', QuadraticDiscriminantAnalysis())) models.append(('SVM', SVC())) neighbors = 3 # evaluate each model in turn results = [] names = [] scoring = 'accuracy' for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state= 7) cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg)LR: 0.933534 (0.002647) KNN: 0.999707 (0.000189) QDA: 0.982867 (0.000995) SVM: 0.994559 (0.000798)Boxplot for algorithm comparisonfig = plt.figure(figsize=(10,10)) fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) #plt.savefig('algorithms_comparision') plt.show()![](https://delftswa.gitbooks.io/desosa-2017/content/matplotlib/images-matplotlib/matplotlib.png) - [**What is Matplotlib?**](What-is-Matplotlib?)- [**General Matplotlib Tips**](General-Matplotlib-Tips) - [**Importing Matplotlib**](Importing-Matplotlib) - [**Setting Styles**](Setting-Styles) - [**How to Display Your Plots**](How-to-Display-Your-Plots) - [**Saving Figures to File**](Saving-Figures-to-File) - [**Two Interfaces for the Price of One**](Two-Interfaces-for-the-Price-of-One)- [**Multiple Subplots**](Multiple-Subplots) - [**plt.axes: Subplots by Hand**](plt.axes:-Subplots-by-Hand) - [**plt.subplot: Simple Grids of Subplots**](plt.subplot:-Simple-Grid-of-Subplots) - [**plt.subplots: The Whole Grid in One Go**](plt.subplots:-The-Whole-Grid-in-One-Go) - [**Simple Line Plots**](Simple-Line-Plots) - [**Adjusting the Plot: Axes Line Colors and Styles**](Adjusting-the-Plot:-Axes-Line-Colors-and-Styles) - [**Adjusting the Plot: Axes Limits**](Adjusting-the-Plot:-Axes-Limits) - [**Labeling Plots**](Labeling-Plots) - [**Aside: Matplotlib Gotchas**](Aside:-Matplotlib-Gotchas)- [**Simple Scatter Plots**](Simple-Scatter-Plots) - [**Scatter Plots with plt.plot**](Scatter-Plots-with-plt.plot) - [**Scatter Plots with plt.scatter**](Scatter-Plots-with-plt.scatter) - [**Histograms**](Histograms) What is Matplotlib? We'll now take an in-depth look at the [Matplotlib](https://matplotlib.org/) **package for visualization in Python**.Matplotlib is a **multi-platform** data visualization library built on **NumPy** arrays, and designed to work with the broader **SciPy** stack.It was conceived by in 2002, originally as a patch to IPython for enabling interactive MATLAB-style plotting via [gnuplot](http://www.gnuplot.info/) from the IPython command line.IPython's creator, , was at the time scrambling to finish his PhD, and let John know he wouldn’t have time to review the patch for several months.John took this as a cue to set out on his own, and the Matplotlib package was born, with version 0.1 released in 2003.It received an early boost when it was adopted as the plotting package of choice of the Space Telescope Science Institute (the folks behind the Hubble Telescope), which financially supported Matplotlib’s development and greatly expanded its capabilities.In recent years, however, the interface and style of Matplotlib have begun to show their age.Still, I'm of the opinion that we cannot ignore Matplotlib's strength as a well-tested, cross-platform graphics engine.Recent Matplotlib versions make it relatively easy to set new global plotting styles (see [Customizing Matplotlib: Configurations and Style Sheets](04.11-Settings-and-Stylesheets.ipynb)), and people have been developing new packages that build on its powerful internals to drive Matplotlib via cleaner, more modern APIs—for example, **Seaborn** (discussed in [Visualization With Seaborn](04.14-Visualization-With-Seaborn.ipynb)), [ggpy](http://yhat.github.io/ggpy/), [HoloViews](http://holoviews.org/), [Altair](http://altair-viz.github.io/), and **even Pandas** itself can be used as wrappers around Matplotlib's API.Even with wrappers like these, **it is still often useful to dive into Matplotlib's syntax to adjust the final plot output.**For this reason, I believe that Matplotlib itself will remain a vital piece of the data visualization stack, even if new tools mean the community gradually moves away from using the Matplotlib API directly. General Matplotlib TipsBefore we dive into the details of creating visualizations with Matplotlib, there are a few useful things you should know about using the package. Importing MatplotlibJust as we use the ``np`` shorthand for NumPy and the ``pd`` shorthand for Pandas, we will use some standard shorthands for Matplotlib imports:import matplotlib as mpl # esto nunca usa import matplotlib.pyplot as plt #%matplotlib inlineThe ``plt`` interface is what we will use most often, as we shall see throughout this chapter. Setting StylesWe will use the ``plt.style`` directive to choose appropriate aesthetic [styles](https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html) for our figures.Here we will set the ``classic`` style, which ensures that the plots we create use the classic Matplotlib style:plt.style.use('classic') # Que estilos hay por defecto print(plt.style.available)['Solarize_Light2', '_classic_test_patch', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'tableau-colorblind10']Throughout this section, we will adjust this style as needed.Note that the stylesheets used here are supported as of Matplotlib version 1.5; if you are using an earlier version of Matplotlib, only the default style is available.For more information on stylesheets, see [Customizing Matplotlib: Configurations and Style Sheets](https://matplotlib.org/3.3.1/tutorials/introductory/customizing.html). How to Display Your Plots ``show()`` or No ``show()``? A visualization you can't see won't be of much use, but just how you view your Matplotlib plots depends on the context.The best use of Matplotlib differs depending on how you are using it; roughly, **the three applicable contexts are using Matplotlib in a script, in an IPython terminal, or in an IPython notebook.**# cuando utilicemos .py (script de python) para mostrar los gráficos será necesario añadir al final del código # que está en este notebook: plt.show()Plotting from an IPython notebookThe IPython notebook is a browser-based interactive data analysis tool that can combine narrative, code, graphics, HTML elements, and much more into a single executable document.Plotting interactively within an IPython notebook can be done with the ``%matplotlib`` command, and works in a similar way to the IPython shell.In the IPython notebook, you also have the option of embedding graphics directly in the notebook, with two possible options:- ``%matplotlib notebook`` will lead to *interactive* plots embedded within the notebook- ``%matplotlib inline`` will lead to *static* images of your plot embedded in the notebookFor this book, we will generally opt for ``%matplotlib inline``:%matplotlib inlineAfter running this command (it needs to be done only once per kernel/session), any cell within the notebook that creates a plot will embed a PNG image of the resulting graphic:import numpy as np x = np.linspace(0,10, 100) fig = plt.figure() # crear una figura plt.plot(x, np.sin(x), '-') plt.plot(x, np.cos(x), '--r')Saving Figures to FileOne nice feature of Matplotlib is the ability to save figures in a wide variety of formats.Saving a figure can be done using the ``savefig()`` command.For example, to save the previous figure as a PNG file, you can run this:fig.savefig('my_figure.png') #fig.savefig('img/my_figure.png') esto lo guardaría en la carpeta img que está en el current working directoryWe now have a file called ``my_figure.png`` in the current working directory: To confirm that it contains what we think it contains, let's use the IPython ``Image`` object to display the contents of this file:from IPython.display import Image Image('my_figure.png')In ``savefig()``, the file format is inferred from the extension of the given filename. Note that when saving your figure, it's not necessary to use ``plt.show()`` or related commands. Two Interfaces for the Price of OneA potentially confusing feature of Matplotlib is its dual interfaces: a convenient MATLAB-style state-based interface, and a more powerful object-oriented interface. We'll quickly highlight the differences between the two here. MATLAB-style Interface**Matplotlib was originally written as a Python alternative for MATLAB users**, and much of its syntax reflects that fact.The MATLAB-style tools are contained in the pyplot (``plt``) interface.For example, the following code will probably look quite familiar to MATLAB users:plt.figure() plt.subplot(2,1,2) #filas, columnas, a cual de los dos lienzos quiero acceder plt.plot(x, np.sin(x)) plt.subplot(2,1,1) #filas, columnas, a cual de los dos lienzos quiero acceder plt.plot(x, np.cos(x))It is important to note that this interface is *stateful*: it keeps track of the **"current" figure and axes, which are where all ``plt`` commands are applied.**You can get a reference to these using the ``plt.gcf()`` (get current figure) and ``plt.gca()`` (get current axes) routines.While this stateful interface is fast and convenient for simple plots, it is easy to run into problems.For example, once the second panel is created, how can we go back and add something to the first?This is possible within the MATLAB-style interface, but a bit clunky.Fortunately, there is a better way. Object-oriented interfaceThe object-oriented interface is available for these more complicated situations, and for when you want more control over your figure.Rather than depending on some notion of an "active" figure or axes, in the object-oriented interface the plotting functions are *methods* of explicit ``Figure`` and ``Axes`` objects.To re-create the previous plot using this style of plotting, you might do the following:# First create a grid of plots # ax will be an array of two Axes objects fig, ax = plt.subplots(2,2) ax[0,1].plot(x, np.sin(x)) ax[1,0].plot(x, np.cos(x))For more simple plots, the choice of which style to use is largely a **matter of preference, but the object-oriented approach can become a necessity as plots become more complicated.**Throughout this chapter, we will switch between the MATLAB-style and object-oriented interfaces, depending on what is most convenient.In most cases, the difference is as small as switching ``plt.plot()`` to ``ax.plot()``, but there are a few gotchas that we will highlight as they come up in the following sections. Multiple Subplots **Sometimes it is helpful to compare different views of data side by side.**To this end, Matplotlib has the concept of *subplots*: groups of smaller axes that can exist together within a single figure.These subplots might be insets, grids of plots, or other more complicated layouts.#cambiamos el estilo de los gráficos plt.style.use('seaborn-white')``plt.axes``: Subplots by HandThe most basic method of creating an axes is to use the ``plt.axes`` function.As we've seen previously, by default this creates a standard axes object that fills the entire figure.``plt.axes`` also takes an optional argument that is a list of four numbers in the figure coordinate system.These numbers represent ``[left, bottom, width, height]`` in the figure coordinate system, which ranges from 0 at the bottom left of the figure to 1 at the top right of the figure.For example, we might create an inset axes at the top-right corner of another axes by setting the *x* and *y* position to 0.65 (that is, starting at 65% of the width and 65% of the height of the figure) and the *x* and *y* extents to 0.2 (that is, the size of the axes is 20% of the width and 20% of the height of the figure):# standard axes Matlab oriented ax1 = plt.axes() ax2 = plt.axes([0.65, 0.65, 0.2, 0.3]) # primero y segundo el origen de coordenadas, el tercero es el ancho y el cuarto alto plt.figure() plt.axes([0.1, 0.5, 0.8, 0.4], ylim = (-1.2,1.2)) plt.axes([0.1, 0.1, 0.8, 0.4], ylim = (-1.2,1.2)) plt.plot(np.sin(x)) plt.plot(np.cos(x))The equivalent of this command within the object-oriented interface is ``fig.add_axes()``. Let's use this to create two vertically stacked axes:fig1 = plt.figure() ax1 = fig1.add_axes([0.1, 0.5, 0.8, 0.4], ylim = (-1.2,1.2)) ax2 = fig1.add_axes([0.1, 0.1, 0.8, 0.4], ylim = (-1.2,1.2)) ax1.plot(np.sin(x), '--') ax2.plot(np.cos(x))We now have two axes (the top with no tick labels) that are just touching: the bottom of the upper panel (at position 0.5) matches the top of the lower panel (at position 0.1 + 0.4). ``plt.subplot``: Simple Grids of SubplotsAligned columns or rows of subplots are a common-enough need that Matplotlib has several convenience routines that make them easy to create.The lowest level of these is ``plt.subplot()``, which creates a single subplot within a grid.As you can see, this command takes three integer arguments—the number of rows, the number of columns, and the index of the plot to be created in this scheme, which runs from the upper left to the bottom right:plt.subplot(2,3,1) for i in range(1, 7): plt.subplot(2,3,i) plt.text(0.5, 0.5, str((2,3,i)), fontsize=18, ha= 'center') # ha --> horizontalalignmentThe command ``plt.subplots_adjust`` can be used to adjust the spacing between these plots.The following code uses the equivalent object-oriented command, ``fig.add_subplot()``:fig = plt.figure(figsize = (20,5)) # figsize default: [6.4, 4.8] fig.subplots_adjust(hspace=0.4, wspace = 0.4) for i in range(1,7): ax = fig.add_subplot(2,3,i) ax.text(0.5, 0.5, str((2,3,i)), fontsize=18, ha= 'center')We've used the ``hspace`` and ``wspace`` arguments of ``plt.subplots_adjust``, which specify the **spacing along the height and width of the figure**, in units of the subplot size (in this case, the space is 40% of the subplot width and height). ``plt.subplots``: The Whole Grid in One Go**The approach just described can become quite tedious when creating a large grid of subplots, especially if you'd like to hide the x- and y-axis labels on the inner plots.**For this purpose, ``plt.subplots()`` is the easier tool to use (note the ``s`` at the end of ``subplots``). Rather than creating a single subplot, this function creates a full grid of subplots in a single line, returning them in a NumPy array.The arguments are the number of rows and number of columns, along with optional keywords ``sharex`` and ``sharey``, which allow you to specify the relationships between different axes.Here we'll create a $2 \times 3$ grid of subplots, where all axes in the same row share their y-axis scale, and all axes in the same column share their x-axis scale:fig, ax = plt.subplots(2,3, sharex= 'col', sharey = 'row')Note that by specifying ``sharex`` and ``sharey``, we've automatically removed inner labels on the grid to make the plot cleaner.The resulting grid of axes instances is returned within a NumPy array, allowing for convenient specification of the desired axes using standard array indexing notation:ax[0,1] # axes are in a two-dimensional array, indexed by [row, col] for i in range(2): #dos filas for j in range(3): #tres columnas ax[i, j].text(0.5, 0.5, str((i,j)), fontsize = 18, ha= 'left') figIn comparison to ``plt.subplot()``, ``plt.subplots()`` is more consistent with Python's conventional 0-based indexing. Simple Line Plots Perhaps the simplest of all plots is the visualization of a single function $y = f(x)$.Here we will take a first look at creating a simple plot of this type. For all Matplotlib plots, we start by creating a figure and an axes.In their simplest form, a figure and axes can be created as follows:fig_lin = plt.figure(figsize = (7,7)) ax_lin = plt.axes()In Matplotlib, the *figure* (an instance of the class ``plt.Figure``) can be thought of as a single **container that contains all the objects representing axes, graphics, text, and labels.**The *axes* (an instance of the class ``plt.Axes``) is what we see above: a bounding box with **ticks and labels, which will eventually contain the plot elements that make up our visualization.**Throughout this book, we'll commonly use the variable name ``fig`` to refer to a figure instance, and ``ax`` to refer to an axes instance or group of axes instances.Once we have created an axes, we can use the ``ax.plot`` function to plot some data. Let's start with a simple sinusoid:ax_lin.plot(x,np.sin(x)) ax_lin.plot(x, np.cos(x)) fig_lin # MATLAB ORIENTED plt.figure() plt.clf() #figure plt.cla() #axesAlternatively, we can use the pylab interface and let the figure and axes be created for us in the background:plt.plot(x, np.cos(x))If we want to create a single figure with multiple lines, we can simply call the ``plot`` function multiple times:plt.plot(x, np.cos(x)) plt.plot(x, np.sin(x))That's all there is to plotting simple functions in Matplotlib!We'll now dive into some more details about how to control the appearance of the axes and lines. Adjusting the Plot: Line Colors and Styles The first adjustment you might wish to make to a plot is to control the line colors and styles.The ``plt.plot()`` function takes additional arguments that can be used to specify these.To adjust the color, you can use the ``color`` keyword, which accepts a string argument representing virtually any imaginable color.The color can be specified in a variety of ways:plt.plot(x, np.sin(x - 0), color='blue') # specify color by name plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk) plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1 plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF) plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1 plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supportedIf no color is specified, Matplotlib will automatically cycle through a set of default colors for multiple lines.Similarly, the line style can be adjusted using the ``linestyle`` keyword:plt.plot(x, x + 0, linestyle='solid') plt.plot(x, x + 1, linestyle='dashed') plt.plot(x, x + 2, linestyle='dashdot') plt.plot(x, x + 3, linestyle='dotted'); # For short, you can use the following codes: plt.plot(x, x + 4, linestyle='-') # solid plt.plot(x, x + 5, linestyle='--') # dashed plt.plot(x, x + 6, linestyle='-.') # dashdot plt.plot(x, x + 7, linestyle=':'); # dottedIf you would like to be extremely terse, these ``linestyle`` and ``color`` codes can be combined into a single non-keyword argument to the ``plt.plot()`` function:plt.plot(x, x + 0, '-g') # solid green plt.plot(x, x + 1, '--c') # dashed cyan plt.plot(x, x + 2, '-.k') # dashdot black plt.plot(x, x + 3, ':r'); # dotted redThese single-character color codes reflect the standard abbreviations in the RGB (Red/Green/Blue) and CMYK (Cyan/Magenta/Yellow/blacK) color systems, commonly used for digital color graphics.There are many other keyword arguments that can be used to fine-tune the appearance of the plot; for more details, I'd suggest viewing the docstring of the ``plt.plot()`` function using IPython's help tools. Adjusting the Plot: Axes LimitsMatplotlib does a decent job of choosing default axes limits for your plot, but sometimes it's nice to have finer control.The most basic way to adjust axis limits is to use the ``plt.xlim()`` and ``plt.ylim()`` methods:#ajustar los ejes plt.plot(x, np.sin(x)) plt.xlim(-2,11) plt.ylim(-1.5,1.5)If for some reason you'd like either axis to be displayed in reverse, you can simply reverse the order of the arguments:plt.plot(x, np.sin(x)) plt.xlim(11,-2) plt.ylim(1.5,-1.5)A useful related method is ``plt.axis()`` (note here the potential confusion between *axes* with an *e*, and *axis* with an *i*).The ``plt.axis()`` method allows you to set the ``x`` and ``y`` limits with a single call, by passing a list which specifies ``[xmin, xmax, ymin, ymax]``:plt.plot(x, np.sin(x)) plt.axis([-2, 11, -1.5, 1.5])The ``plt.axis()`` method goes even beyond this, allowing you to do things like automatically tighten the bounds around the current plot:plt.plot(x, np.sin(x)) plt.axis('tight')For more information on axis limits and the other capabilities of the ``plt.axis`` method, refer to the ``plt.axis`` docstring. Labeling PlotsAs the last piece of this section, we'll briefly look at the labeling of plots: titles, axis labels, and simple legends.Titles and axis labels are the simplest such labels—there are methods that can be used to quickly set them:plt.plot(x, np.sin(x)) plt.title("La curva del seno") plt.xlabel("x") plt.ylabel("$sin(x)$")**The position, size, and style of these labels can be adjusted using optional arguments to the function.**For more information, see the Matplotlib documentation and the docstrings of each of these functions. When multiple lines are being shown within a single axes, it can be useful to create a **plot legend** that labels each line type.Again, **Matplotlib has a built-in way of quickly creating such a legend.**It is done via the (you guessed it) ``plt.legend()`` method.Though there are several valid ways of using this, I find it easiest to specify the label of each line using the ``label`` keyword of the plot function:plt.plot(x, np.sin(x), '-g', label = 'sin(x)') plt.plot(x, np.cos(x), ':b', label = 'cos(x)') plt.xlabel("x") plt.axis('equal') plt.legend()As you can see, the ``plt.legend()`` function keeps track of the line style and color, and matches these with the correct label.More information on specifying and formatting plot legends can be found in the ``plt.legend`` docstring. Aside: Matplotlib GotchasWhile most ``plt`` functions translate directly to ``ax`` methods (such as ``plt.plot()`` → ``ax.plot()``, ``plt.legend()`` → ``ax.legend()``, etc.), this is not the case for all commands.In particular, functions to set limits, labels, and titles are slightly modified.**For transitioning between MATLAB-style functions and object-oriented methods, make the following changes:**- ``plt.xlabel()`` → ``ax.set_xlabel()``- ``plt.ylabel()`` → ``ax.set_ylabel()``- ``plt.xlim()`` → ``ax.set_xlim()``- ``plt.ylim()`` → ``ax.set_ylim()``- ``plt.title()`` → ``ax.set_title()``In the object-oriented interface to plotting, rather than calling these functions individually, it is often more convenient to use the ``ax.set()`` method to set all these properties at once:ax = plt.axes() ax.plot(x, np.sin(x)) ax.set(xlim=(0,10), ylim=(-2,2), xlabel='x', ylabel = 'sin(x)', title="A simple plot")Simple Scatter Plots Another commonly used plot type is the simple scatter plot, a close cousin of the line plot.Instead of points being joined by line segments, here the points are represented individually with a dot, circle, or other shape. Scatter Plots with ``plt.plot``In the previous section we looked at ``plt.plot``/``ax.plot`` to produce line plots.It turns out that this same function can produce scatter plots as well:plt.plot(x, np.sin(x), '3', color = 'black' )The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here:rng = np.random.RandomState(0) for marker in ['o', '.', ',', 'x', 'v', '^', '<', '>', 's', 'd']: plt.plot(rng.rand(5), rng.rand(5), marker, label = f"marker = {marker}") plt.legend() plt.xlim(0,1.8)For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them:x = np.linspace(0,10, 20) plt.plot(x, np.sin(x), '-ok')Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers:plt.plot(x, np.sin(x), '-p', color = 'gray', markersize = 15, markerfacecolor = 'white', markeredgecolor = 'green', markeredgewidth = 2, linewidth = 4)This type of flexibility in the ``plt.plot`` function allows for a wide variety of possible visualization options.For a full description of the options available, refer to the ``plt.plot`` documentation. Scatter Plots with ``plt.scatter``A second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function:plt.scatter(x, np.sin(x), marker = 'o') #esto y utilizar plot es lo mismo**The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.**Let's show this by creating a random scatter plot with points of many colors and sizes.In order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level:#rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) plt.scatter(x, y, alpha = 0.3) colors = rng.rand(100) sizes = 1000 * rng.rand(100) plt.scatter(x, y, c = colors, s = sizes, alpha = 0.4, cmap = 'viridis') plt.colorbar() # añade una escala de coloresNotice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels.In this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data.For example, we might use the Iris data from Scikit-Learn, where each sample is one of three types of flowers that has had the size of its petals and sepals carefully measured:from sklearn.datasets import load_iris iris = load_iris() features = iris.data.T plt.scatter(features[0], features[1]) plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1]) iris.target plt.style.use('bmh') plt.scatter(features[0], features[1], alpha = 0.5, s = 100*features[3], c = iris.target, cmap = 'viridis') plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1])We can see that this scatter plot has given us the ability to **simultaneously explore four different dimensions of the data:**the (x, y) location of each point corresponds to the sepal length and width, the size of the point is related to the petal width, and the color is related to the particular species of flower.Multicolor and multifeature scatter plots like this can be useful for both exploration and presentation of data. Histograms A simple histogram can be a great first step in understanding a dataset.data = np.random.randn(1000) plt.hist(data)The ``hist()`` function has many options to tune both the calculation and the display; here's an example of a more customized histogram:plt.hist(data, bins = 30, color = 'steelblue', alpha = 0.5)The ``plt.hist`` docstring has more information on other customization options available.Transparency ``alpha`` is very useful when comparing histograms of several distributions:x1 = np.random.normal(0,0.8,1000) x2 = np.random.normal(-2,1,1000) x3 = np.random.normal(3,2,1000) kwargs = dict(histtype = 'bar', alpha = 0.3, bins = 40) plt.hist(x1, **kwargs) plt.hist(x2, **kwargs) plt.hist(x3, **kwargs) kwargsEjemploplt.style.use('seaborn-white') # industria del tomate ventas = [100,500,200,150,800] meses = [1,2,3,4,5] plt.bar(meses,ventas) plt.ylabel('Tomates') plt.xlabel('Mes') plt.xticks(meses, labels = ('Enero', 'Febrero', 'Marzo', 'Abril', 'May'), rotation = 45) plt.legend(['Venta tomates mes'], loc ='best') plt.title("Prevision de venta de tomates") plt.annotate('Vendimos muchos\ntomates maduros', xy = (2,ventas[1]), xytext = (3, ventas[1]*1.25), arrowprops = dict(facecolor = '#00FF00', shrink = 0.05)) plt.figure() with plt.xkcd(): ventas_tomates = np.array([100,500,200,150,800]) ventas_naranjas = np.flip(np.array(ventas_tomates)) meses = np.array([1,2,3,4,5]) plt.bar(meses-0.1, ventas_tomates, 0.2) plt.bar(meses+0.1, ventas_tomates, 0.2) plt.ylabel('Tomates/Naranjas') plt.xlabel('Mes') plt.xticks(meses, labels = ('Enero', 'Febrero', 'Marzo', 'Abril', 'May'), rotation = 45) plt.legend(["Tomates", "Naranjas"], loc = 'best') plt.title("Prevision de venta") plt.annotate('Vendimos muy pocos', xy = (1,ventas_tomates[0]+10), xytext = (3, ventas[1]*1.20), arrowprops = dict(facecolor = '#FFF000', shrink = 0.05))* **What is a phishing attack?*** Phishing is a type of social engineering attack often used to steal user data, including login credentials and credit card numbers. It occurs when an attacker, masquerading as a trusted entity, dupes a victim into opening an email, instant message, or text message. * Importing some useful librariesimport pandas as pd # use for data manipulation and analysis import numpy as np # use for multi-dimensional array and matrix import seaborn as sns # use for high-level interface for drawing attractive and informative statistical graphics import matplotlib.pyplot as plt # It provides an object-oriented API for embedding plots into applications %matplotlib inline # It sets the backend of matplotlib to the 'inline' backend: import time # calculate time from sklearn.linear_model import LogisticRegression # algo use to predict good or bad from sklearn.naive_bayes import MultinomialNB # nlp algo use to predict good or bad from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split # spliting the data between feature and target from sklearn.metrics import classification_report # gives whole report about metrics (e.g, recall,precision,f1_score,c_m) from sklearn.metrics import confusion_matrix # gives info about actual and predict from nltk.tokenize import RegexpTokenizer # regexp tokenizers use to split words from text from nltk.stem.snowball import SnowballStemmer # stemmes words from sklearn.feature_extraction.text import CountVectorizer # create sparse matrix of words using regexptokenizes from sklearn.pipeline import make_pipeline # use for combining all prerocessors techniuqes and algos from PIL import Image # getting images in notebook # from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator# creates words colud from bs4 import BeautifulSoup # use for scraping the data from website # from selenium import webdriver # use for automation chrome import networkx as nx # for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks. import pickle# use to dump model import warnings # ignores pink warnings warnings.filterwarnings('ignore') # phishing_data1 = pd.read_csv('phishing_urls.csv',usecols=['domain','label'],encoding='latin1', error_bad_lines=False) # phishing_data1.columns = ['URL','Label'] # phishing_data2 = pd.read_csv('phishing_data.csv') # phishing_data2.columns = ['URL','Label'] # phishing_data3 = pd.read_csv('phishing_data2.csv') # phishing_data3.columns = ['URL','Label'] # for l in range(len(phishing_data1.Label)): # if phishing_data1.Label.loc[l] == '1.0': # phishing_data1.Label.loc[l] = 'bad' # else: # phishing_data1.Label.loc[l] = 'good'* **Concatenate All datasets in one.**# frames = [phishing_data1, phishing_data2, phishing_data3] # phishing_urls = pd.concat(frames) #saving dataset # phishing_urls.to_csv(r'phishing_site_urls.csv', index = False)* **Loading the main dataset.**phish_data = pd.read_csv('phishing_site_urls.csv') phish_data.head() phish_data.tail() phish_data.info() RangeIndex: 549346 entries, 0 to 549345 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 URL 549346 non-null object 1 Label 549346 non-null object dtypes: object(2) memory usage: 8.4+ MB* **About dataset*** Data is containg 5,49,346 unique entries.* There are two columns.* Label column is prediction col which has 2 categories A. Good - which means the urls is not containing malicious stuff and **this site is not a Phishing Site.** B. Bad - which means the urls contains malicious stuffs and **this site isa Phishing Site.*** There is no missing value in the dataset.phish_data.isnull().sum() # there is no missing values* **Since it is classification problems so let's see the classes are balanced or imbalances**#create a dataframe of classes counts label_counts = pd.DataFrame(phish_data.Label.value_counts()) #visualizing target_col sns.set_style('darkgrid') sns.barplot(label_counts.index,label_counts.Label)Preprocessing * **Now that we have the data, we have to vectorize our URLs. I used CountVectorizer and gather words using tokenizer, since there are words in urls that are more important than other words e.g ‘virus’, ‘.exe’ ,’.dat’ etc. Lets convert the URLs into a vector form.** RegexpTokenizer* A tokenizer that splits a string using a regular expression, which matches either the tokens or the separators between tokens.tokenizer = RegexpTokenizer(r'[A-Za-z]+') phish_data.URL[0] # this will be pull letter which matches to expression tokenizer.tokenize(phish_data.URL[0]) # using first row print('Getting words tokenized ...') t0= time.perf_counter() phish_data['text_tokenized'] = phish_data.URL.map(lambda t: tokenizer.tokenize(t)) # doing with all rows t1 = time.perf_counter() - t0 print('Time taken',t1 ,'sec') phish_data.sample(5)SnowballStemmer* Snowball is a small string processing language, gives root wordsstemmer = SnowballStemmer("english") # choose a language print('Getting words stemmed ...') t0= time.perf_counter() phish_data['text_stemmed'] = phish_data['text_tokenized'].map(lambda l: [stemmer.stem(word) for word in l]) t1= time.perf_counter() - t0 print('Time taken',t1 ,'sec') phish_data.sample(5) print('Getting joiningwords ...') t0= time.perf_counter() phish_data['text_sent'] = phish_data['text_stemmed'].map(lambda l: ' '.join(l)) t1= time.perf_counter() - t0 print('Time taken',t1 ,'sec') phish_data.sample(5)Visualization **1. Visualize some important keys using word cloud**#sliceing classes bad_sites = phish_data[phish_data.Label == 'bad'] good_sites = phish_data[phish_data.Label == 'good'] bad_sites.head() good_sites.head()* create a function to visualize the important keys from url Creating Model CountVectorizer* CountVectorizer is used to transform a corpora of text to a vector of term / token counts.#create cv object cv = CountVectorizer() #help(CountVectorizer()) feature = cv.fit_transform(phish_data.text_sent) #transform all text which we tokenize and stemed feature[:5].toarray() # convert sparse matrix into array to print transformed features* Spliting the datatrainX, testX, trainY, testY = train_test_split(feature, phish_data.Label) rf = RandomForestClassifier(n_estimators=100) rf.fit(trainX,trainY) rf.score(testX,testY)LogisticRegression* Logistic Regression is a Machine Learning classification algorithm that is used to predict the probability of a categorical dependent variable. In logistic regression, the dependent variable is a binary variable that contains data coded as 1 (yes, success, etc.) or 0 (no, failure, etc.). In other words, the logistic regression model predicts P(Y=1) as a function of X.# create lr object lr = LogisticRegression() lr.fit(trainX,trainY) lr.score(testX,testY).*** Logistic Regression is giving 96% accuracy, Now we will store scores in dict to see which model perform best**Scores_ml = {} Scores_ml['Logistic Regression'] = np.round(lr.score(testX,testY),2) print('Training Accuracy :',lr.score(trainX,trainY)) print('Testing Accuracy :',lr.score(testX,testY)) con_mat = pd.DataFrame(confusion_matrix(lr.predict(testX), testY), columns = ['Predicted:Bad', 'Predicted:Good'], index = ['Actual:Bad', 'Actual:Good']) print('\nCLASSIFICATION REPORT\n') print(classification_report(lr.predict(testX), testY, target_names =['Bad','Good'])) print('\nCONFUSION MATRIX') plt.figure(figsize= (6,4)) sns.heatmap(con_mat, annot = True,fmt='d',cmap="YlGnBu")Training Accuracy : 0.9782480479795345 Testing Accuracy : 0.9636514559077306 CLASSIFICATION REPORT precision recall f1-score support Bad 0.90 0.97 0.93 36597 Good 0.99 0.96 0.97 100740 accuracy 0.96 137337 macro avg 0.95 0.96 0.95 137337 weighted avg 0.97 0.96 0.96 137337 CONFUSION MATRIXMultinomialNB* Applying Multinomial Naive Bayes to NLP Problems. Naive Bayes Classifier Algorithm is a family of probabilistic algorithms based on applying Bayes' theorem with the “naive” assumption of conditional independence between every pair of a feature.# create mnb object mnb = MultinomialNB() mnb.fit(trainX,trainY) mnb.score(testX,testY)*** MultinomialNB gives us 95% accuracy**Scores_ml['MultinomialNB'] = np.round(mnb.score(testX,testY),2) print('Training Accuracy :',mnb.score(trainX,trainY)) print('Testing Accuracy :',mnb.score(testX,testY)) con_mat = pd.DataFrame(confusion_matrix(mnb.predict(testX), testY), columns = ['Predicted:Bad', 'Predicted:Good'], index = ['Actual:Bad', 'Actual:Good']) print('\nCLASSIFICATION REPORT\n') print(classification_report(mnb.predict(testX), testY, target_names =['Bad','Good'])) print('\nCONFUSION MATRIX') plt.figure(figsize= (6,4)) sns.heatmap(con_mat, annot = True,fmt='d',cmap="YlGnBu") acc = pd.DataFrame.from_dict(Scores_ml,orient = 'index',columns=['Accuracy']) sns.set_style('darkgrid') sns.barplot(acc.index,acc.Accuracy)*** So, Logistic Regression is the best fit model, Now we make sklearn pipeline using Logistic Regression**pipeline_ls = make_pipeline(CountVectorizer(tokenizer = RegexpTokenizer(r'[A-Za-z]+').tokenize,stop_words='english'), LogisticRegression()) trainX, testX, trainY, testY = train_test_split(phish_data.URL, phish_data.Label) pipeline_ls.fit(trainX,trainY) pipeline_ls.score(testX,testY) print('Training Accuracy :',pipeline_ls.score(trainX,trainY)) print('Testing Accuracy :',pipeline_ls.score(testX,testY)) con_mat = pd.DataFrame(confusion_matrix(pipeline_ls.predict(testX), testY), columns = ['Predicted:Bad', 'Predicted:Good'], index = ['Actual:Bad', 'Actual:Good']) print('\nCLASSIFICATION REPORT\n') print(classification_report(pipeline_ls.predict(testX), testY, target_names =['Bad','Good'])) print('\nCONFUSION MATRIX') plt.figure(figsize= (6,4)) sns.heatmap(con_mat, annot = True,fmt='d',cmap="YlGnBu") pickle.dump(pipeline_ls,open('phishing.pkl','wb')) loaded_model = pickle.load(open('phishing.pkl', 'rb')) result = loaded_model.score(testX,testY) print(result) * Bad links => this are phishing sites yeniik.com.tr/wp-admin/js/login.alibaba.com/login.jsp.php fazan-pacir.rs/temp/libraries/ipad www.tubemoviez.exe svision-online.de/mgfi/administrator/components/com_babackup/classes/fx29id1.txt * Good links => this are not phishing sites www.youtube.com/ youtube.com/watch?v=qI0TQJI3vdU www.retailhellunderground.com/ restorevisioncenters.com/html/technology.html predict_bad = ['yeniik.com.tr/wp-admin/js/login.alibaba.com/login.jsp.php','fazan-pacir.rs/temp/libraries/ipad','tubemoviez.exe','svision-online.de/mgfi/administrator/components/com_babackup/classes/fx29id1.txt'] predict_good = ['youtube.com/','youtube.com/watch?v=qI0TQJI3vdU','retailhellunderground.com/','restorevisioncenters.com/html/technology.html'] loaded_model = pickle.load(open('phishing.pkl', 'rb')) #predict_bad = vectorizers.transform(predict_bad) # predict_good = vectorizer.transform(predict_good) result = loaded_model.predict(predict_bad) result2 = loaded_model.predict(predict_good) print(result) print("*"*30) print(result2)['bad' 'bad' 'bad' 'bad'] ****************************** ['good' 'good' 'good' 'good']Este código fue escrito por y forma parte de su curso inicial  para la asignatura "Laboratorio" . Puedes ver la exposición completa en en sus notas para el curso. Allí se expone cómo partiendo de código escrito en Python puro se puede averiguar qué zonas del código necesitan mejora y cómo implementar esas mejoras usando Cython y numpy.Con este programa se obtiene una representación del conjunto de Mandelbrot, que, por definición, es el conjunto de números complejos $c$ tales que la órbita de $0$ mediante la iteración de la función $f(z)=z^2+c$, es decir el conjunto de puntos en el plano complejo$$\{c,c^2+c,(c^2+c)^2+c,\dots\}$$permanece acotado (es decir, hay un número $R$, que puede ser muy grande y depende de $c$, tal que toda la órbita de cero está contenida en el círculo de centro cero y radio $R$). Debes analizar con cuidado el código  para averiguar cómo funciona, entendiendo en particular el papel que juegan los parámetros que suministramos a la función. Cuando a un parámetro le asignamos un valor en la definición, por ejemplo $int N=5000$ quiere decir que ese es el valor por defecto y no es necesario asignarlo al llamar a la función.El conjunto de Mandelbrot es quizá el ejemplo más sencillo de conjunto fractal, y es interesante variar la función que se itera cambiando la línea $z=z*z+c$ por otra expresión en función de $z$ y $c$.def mandelbrot_sage(x0,x1,y0,y1,N=100,L=200,R=3): '''returns an array NxN to be plotted with matrix_plot ''' m = matrix(ZZ,N,N,[0]*N**2) deltax = (x1-x0)/N deltay = (y1-y0)/N for j in range(N): for k in range(N): c = (x0+j*deltax)+ I*(y0+k*deltay) z=CC(0) h=0 while (hTiene el aspecto del fractal de mandelbrot, pero la resolución es muy pobre. Aumentando $N$ y $L$ se conseguiría mejor resolución, pero tarda demasiado.%%cython import numpy as np cimport numpy as np cimport cython def mandelbrot_cython(float x0,float x1,float y0,float y1, int N=5000, int L=500, float R=3): '''returns an array NxN to be plotted with matrix_plot ''' cdef double complex c, z, I cdef float deltax, deltay, R2 = R*R cdef int h, j, k cdef np.ndarray[np.uint16_t, ndim=2] m m = np.zeros((N,N), dtype=np.uint16) I = complex(0,1) deltax = (x1-x0)/N deltay = (y1-y0)/N for j in range(N): for k in range(N): c = (x0+j*deltax)+ I*(y0+k*deltay) z=0 h=0 while (h"Geo Data Science with Python" Notebook Exercise 6e--- Downloading Science Data with pydapIf you work in teams, please indicate your colaborators below!NAME = "" COLLABORATORS = ""Very slow, and bad implementation, and buggy, but this is an implementation of Haas' reservoir 2004num_neurons = 50 connectivity = 0.2 spectral_radius = 1.0 dt = 0.5 r = Reservoir(num_neurons,connectivity,spectral_radius,init_pattern = 'random',feedback=True,bias=False,fb_scale=1,record_history=True) num_warmup = 100 num_train = 100 # Let the network warm up for step in range(num_warmup): r.forward() # Training dataset for s in range(num_train): targ = torch.sin(torch.tensor(s*dt)) r.y = torch.tensor([targ]) r.forward() # Train it readout_weight = torch.tensor(r.readout_w,requires_grad=True) epochs = 400 lr=0.01 for e in range(epochs): x = torch.tensor(r.history['state_vectors'])[num_warmup:num_warmup+num_train,:] y = torch.tensor(r.history['outputs'])[num_warmup:num_warmup+num_train,:] diff = y - torch.matmul(x,readout_weight) se = diff.pow(2) mse = torch.sum(se) / (num_train-num_warmup) mse.backward() grad = readout_weight.grad readout_weight = readout_weight - grad*lr readout_weight = torch.tensor(readout_weight,requires_grad=True) print(mse) r.readout_w = readout_weight/home/alpinefunker/.local/lib/python3.6/site-packages/ipykernel_launcher.py:27: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). /home/alpinefunker/.local/lib/python3.6/site-packages/ipykernel_launcher.py:47: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).Distribution graphs (histogram/bar graph) of sampled columns:plotPerColumnDistribution(df1, 10, 5):10: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later. plt.subplot(nGraphRow, nGraphPerRow, i + 1)Correlation matrix:plotCorrelationMatrix(df1, 8)Scatter and density plots:plotScatterMatrix(df1, 12, 10)Benchmark FRESA.CAD BSWIMS final Script This algorithm implementation uses R code and a Python library (rpy2) to connect with it, in order to run the following it is necesary to have installed both on your computer:- R (you can download in https://www.r-project.org/) - install rpy2 by pip install rpy2import numpy as np import pandas as pd import sys from pathlib import Path sys.path.append("../tadpole-algorithms") import tadpole_algorithms from tadpole_algorithms.models import Benchmark_FRESACAD_R from tadpole_algorithms.preprocessing.split import split_test_train_tadpole #rpy2 libs and funcs import rpy2.robjects.packages as rpackages from rpy2.robjects.vectors import StrVector from rpy2.robjects import r, pandas2ri from rpy2 import robjects from rpy2.robjects.conversion import localconverter # Load D1_D2 train and possible test data set data_path_train_test = Path("data/TADPOLE_D1_D2.csv") data_df_train_test = pd.read_csv(data_path_train_test) # Load data Dictionary data_path_Dictionaty = Path("data/TADPOLE_D1_D2_Dict.csv") data_Dictionaty = pd.read_csv(data_path_Dictionaty) # Load D3 possible test set data_path_test = Path("data/TADPOLE_D3.csv") data_D3 = pd.read_csv(data_path_test) # Load D4 evaluation data set data_path_eval = Path("data/TADPOLE_D4_corr.csv") data_df_eval = pd.read_csv(data_path_eval) # Split data in test, train and evaluation data train_df, test_df, eval_df = split_test_train_tadpole(data_df_train_test, data_df_eval) #instanciate the model to get the functions model = Benchmark_FRESACAD_R() #set the flag to true to use a preprocessed data USE_PREPROC = False #preprocess the data D1Train,D2Test,D3Train,D3Test = model.extractTrainTestDataSets_R("data/TADPOLE_D1_D2.csv","data/TADPOLE_D3.csv") # AdjustedTrainFrame,testingFrame,Train_Imputed,Test_Imputed = model.preproc_tadpole_D1_D2(data_df_train_test,USE_PREPROC) AdjustedTrainFrame,testingFrame,Train_Imputed,Test_Imputed = model.preproc_with_R(D1Train,D2Test,data_Dictionaty,usePreProc=USE_PREPROC) #Train Congitive Models modelfilename = model.Train_Congitive(AdjustedTrainFrame,usePreProc=USE_PREPROC) #Train ADAS/Ventricles Models regresionModelfilename = model.Train_Regression(AdjustedTrainFrame,Train_Imputed,usePreProc=USE_PREPROC) print(regresionModelfilename) print(regresionModelfilename) print(type(regresionModelfilename)) #Predict Forecast_D2 = model.Forecast_All(modelfilename, regresionModelfilename, testingFrame, Test_Imputed, submissionTemplateFileName="data/TADPOLE_Simple_Submission_TeamName.xlsx", usePreProc=USE_PREPROC) #data_forecast_test = Path("data/_ForecastFRESACAD.csv") #Forecast_D2 = pd.read_csv(data_forecast_test) from tadpole_algorithms.evaluation import evaluate_forecast from tadpole_algorithms.evaluation import print_metrics # Evaluate the model dictionary = evaluate_forecast(eval_df, Forecast_D2) # Print metrics print_metrics(dictionary) # AdjustedTrainFrame,testingFrame,Train_Imputed,Test_Imputed = model.preproc_tadpole_D1_D2(data_df_train_test,USE_PREPROC) D3AdjustedTrainFrame,D3testingFrame,D3Train_Imputed,D3Test_Imputed = model.preproc_with_R(D3Train, D3Test, data_Dictionaty, MinVisit=18, colImputeThreshold=0.15, rowImputeThreshold=0.10, includeID=False, usePreProc=USE_PREPROC) #Train Congitive Models D3modelfilename = model.Train_Congitive(D3AdjustedTrainFrame,usePreProc=USE_PREPROC) #Train ADAS/Ventricles Models D3regresionModelfilename = model.Train_Regression(D3AdjustedTrainFrame,D3Train_Imputed,usePreProc=USE_PREPROC) #Predict Forecast_D3 = model.Forecast_All(D3modelfilename, D3regresionModelfilename, D3testingFrame, D3Test_Imputed, submissionTemplateFileName="data/TADPOLE_Simple_Submission_TeamName.xlsx", usePreProc=USE_PREPROC) from tadpole_algorithms.evaluation import evaluate_forecast from tadpole_algorithms.evaluation import print_metrics # Evaluate the D3 model dictionary = evaluate_forecast(eval_df,Forecast_D3) # Print metrics print_metrics(dictionary)[[74 11 1] [20 62 10] [ 2 11 19]] mAUC (multiclass Area Under Curve): 0.858 bca (balanced classification accuracy): 0.784 adasMAE (ADAS13 Mean Absolute Error): 5.631 ventsMAE (Ventricles Mean Absolute Error), in % ICV: 1.110 adasWES (ADAS13 Weighted Error Score): 5.659 ventsWES (Ventricles Weighted Error Score ), in % ICV: 1.111 adasCPA (ADAS13 Coverage Probability Accuracy for 50% Confidence Interval: 0.162 ventsCPA (Ventricles Coverage Probability Accuracy for 50% Confidence Interval: 0.393M2 and S2 tidal height model Plots an animated tidal height timeseries from a set of components by splitting into clockwise and anticlockwise rotating components.import numpy as np import xarray as xr import matplotlib import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from IPython.display import HTML import seaborn as sns matplotlib.rcParams['animation.embed_limit'] = 2**128 sns.set() sns.set_style("dark") # sns.set_palette("dark")Make new TidalHeightComp and ComponetSet classes to hold tidal data.class TidalHeightComp: def __init__(self, name, period, height): self.name = name self.period = period self.height = height # break into symmetrical clockwise and anticlockwise rotating components, each with half amplitude self.ccw_comp = (self.height[0]/2.0,self.height[1]) self.cw_comp = (self.height[0]/2.0,-self.height[1]) self.rotate = ["CCW","CW"] def get_name(self): return self.name def get_period(self): return self.period def get_height(self,rotate): return self.height def timeseries(self,t): return xr.DataArray([self.ccw_comp[0]*np.exp(1j*self.ccw_comp[1])*np.exp(1j*2.0*np.pi*(t/self.period)) ,self.cw_comp[0]*np.exp(1j*self.cw_comp[1])*np.exp(1j*2.0*np.pi*(-t/self.period))] ,coords=[("rotate",self.rotate),("time",t)] ,dims=["rotate","time"]) class ComponentSet: def __init__(self): self.tides = {} def add_tide(self,tide): self.tides[tide.get_name()]=tide def list_tides(self): return self.tides.keys() def tide(self,name): return self.tides[name]build set of tidal componentstides = ComponentSet() tides.add_tide(TidalHeightComp('M2',12.42060120,(1.09,1.0*np.pi))) tides.add_tide(TidalHeightComp('S2',12.00000000,(0.47,(1.0/3.0)*np.pi))) tides.add_tide(TidalHeightComp('N2',12.65834751,(0.23,(1.0/4.0)*np.pi))) tides.add_tide(TidalHeightComp('O1',25.81933871,(0.10,1.66*np.pi))) tides.add_tide(TidalHeightComp('K1',23.93447213,(0.05,0.75*np.pi)))array of times, in hours, to calculate currents at. One day I will make these real dates and use real tidal components.# dimensions, times for calculation t = np.arange(0, 1000.0, 0.5)choose which components to plot - order of selection matters# build xarray DataSet of tidal timeseries to plot - use all or specify a subset td = xr.Dataset() # for comp in tides.list_tides(): for comp in ['M2','S2','N2','K1','O1']: tide = tides.tide(comp) td = td.assign({comp:tide.timeseries(t)})plot and animatethis plots a single frame initially. Not sure how to stop this happening.fig,ax = plt.subplots(figsize=(4,8),dpi=100) i = 0 scale = 0.1 origin = 0 + 1j*0 new_origin = 0 + 1j*0 patches_c = [] patches_a = [] # component arrows and circles ii=0 for varname, da in td.data_vars.items(): patches_c.append(plt.Circle((new_origin.real,new_origin.imag),np.abs(da.sel(rotate="CCW").data[0]),fill=False,color='C'+str(ii),lw=1.0)) patches_a.append(plt.Arrow(new_origin.real,new_origin.imag,da.sel(rotate="CCW").data[i].real,da.sel(rotate="CCW").data[i].imag, width=1.0*scale,color='C'+str(ii))) new_origin = new_origin + da.sel(rotate="CCW").data[i] ii += 1 patches_c.append(plt.Circle((new_origin.real,new_origin.imag),np.abs(da.sel(rotate="CW").data[0]),fill=False,color='C'+str(ii),lw=1.0)) patches_a.append(plt.Arrow(new_origin.real,new_origin.imag,da.sel(rotate="CW").data[i].real,da.sel(rotate="CW").data[i].imag, width=1.0*scale,color='C'+str(ii))) new_origin = new_origin + da.sel(rotate="CW").data[i] ii += 1 for patch in patches_c: ax.add_patch(patch) for patch in patches_a: ax.add_patch(patch) # total velocity tracking point scat = ax.scatter(new_origin.real,new_origin.imag,color='xkcd:tomato',zorder=10) # total velocity ellipse trace x = np.array([new_origin.real]) y = np.array([new_origin.imag]) line, = ax.plot(x,y,zorder=0,color='xkcd:black',alpha=0.5) ax.set_aspect('equal') ax.set_xlim(-2.0,2.0) ax.set_ylim(-8,2.0) def animate(i): global x,y,patches_a # remove old arrows (arrows cannot be updated) for patch in patches_a: patch.remove() patches_a = [] # loop over all tidal components new_origin = 0 + 1j*0 ii = 0 for varname, da in td.data_vars.items(): patches_c[ii].center = (new_origin.real,new_origin.imag) patches_a.append(plt.Arrow(new_origin.real,new_origin.imag,da.sel(rotate="CCW").data[i].real,da.sel(rotate="CCW").data[i].imag, width=1.0*scale,color='C'+str(ii))) new_origin = new_origin + da.sel(rotate="CCW").data[i] ii += 1 patches_c[ii].center = (new_origin.real,new_origin.imag) patches_a.append(plt.Arrow(new_origin.real,new_origin.imag,da.sel(rotate="CW").data[i].real,da.sel(rotate="CW").data[i].imag, width=1.0*scale,color='C'+str(ii))) new_origin = new_origin + da.sel(rotate="CW").data[i] ii += 1 for patch in patches_a: ax.add_patch(patch) # total velocity tracking point scat.set_offsets(np.c_[new_origin.real,new_origin.imag]) # total velocity ellipse trace x = np.append(x,new_origin.real) y = np.append((y-0.005),new_origin.imag) line.set_xdata(x) line.set_ydata(y) return scat,lineSet up the animationanim = FuncAnimation( fig, animate,frames = 1600,interval = 50,blit=True)Run the animation. This will take some time to build...be patient...HTML(anim.to_jshtml())`customize_charlie`Python equivalent of the `Customize Charlie` program. Play with Charlie and make him discover the world around him. Required robot* Charlie Source codeYou can find the code in the accompanying [`.py` file](https://github.com/arturomoncadatorres/lego-mindstorms/blob/main/base/charlie/programs/customize_charlie.py). To get it running, simply copy and paste it in a new Mindstorms project. Importsfrom mindstorms import MSHub, Motor, MotorPair, ColorSensor, DistanceSensor, App from mindstorms.control import wait_for_seconds, wait_until, Timer from mindstorms.operator import greater_than, greater_than_or_equal_to, less_than, less_than_or_equal_to, equal_to, not_equal_to import mathInitializationprint("-"*15 + " Execution started " + "-"*15 + "\n") hub = MSHub() app = App()Turn off center button By setting its color to blackprint("Turning center button off...") hub.status_light.on('black') print("DONE!")Set arm motors to starting positionIn the original Scratch program, there's a `Charlie - Calibrate` block. I don't know exactly how the calibration is done, but in the end I think that it just sets the motor to position 0.Notice that moving motors to a specific position needs to be done individually (and sequentially). In other words, we cannot run a `MotorPair` to a position, only one motor at a time.print("Setting arm motors to position 0...") motor_b = Motor('B') # Left arm motor_f = Motor('F') # Right arm motor_b.run_to_position(0) motor_f.run_to_position(0) print("DONE!")Configure motorsprint("Configuring motors...") motors_wheels = MotorPair('A', 'E') print("DONE!")Rise armsprint("Rising arms...") motor_f.set_default_speed(-75) motors_arms = MotorPair('B', 'F') motors_arms.move(-90, unit='degrees') # Negative angle is clockwise movement motors_arms.move(90, unit='degrees') # Positive angle is counterclockwise movement print("DONE!")Customizing Charlie Defining functionsI think that the main point of the original program is to show how Scratch's `My Block`s work.This is the equivalent of Python functions. Thus, first we will define them.It is important to note how the objects `hue`, `app`, and all the motors are defined globally(and don't need to be passed as arguments to the functions). It is also worth mentioning thatit is good practice that functions always return something, even if it is a `None`.def charlie_happy(): print("Making Charlie happy...") hub.light_matrix.show_image('HAPPY') app.start_sound('Robot 1') # Move forward. motors_wheels.move(10, steering=0) # Move arms. motor_f.run_for_degrees(90) for ii in range(0,3): motor_f.run_for_seconds(0.2, speed=100) motor_f.run_for_seconds(0.2, speed=-100) motor_f.run_to_position(0, direction='shortest path') return None def charlie_silly(): print("Making Charlie silly...") hub.light_matrix.show_image('SILLY') app.start_sound('Robot 2') # Turn. motors_wheels.move(10, unit='cm', steering=100) # Move arms. for ii in range(0, 2): motor_b.run_to_position(90, direction='clockwise') motor_b.run_to_position(270, direction='counterclockwise') motor_b.run_to_position(0, direction='shortest path') return None def charlie_scared(): print("Making Charlie scared...") hub.light_matrix.show_image('CONFUSED') # We have no SCARED image. app.start_sound('Robot 3') motors_arms.move(-90, unit='degrees', steering=0) # Raise arms motors_wheels.move(5, unit='cm', steering=-100) # Turn back motors_arms.move(90, unit='degrees', steering=0) # Lower arms return NoneExecution functionsNow we can call the functionsprint("Should we make Charlie happy?") charlie_happy() wait_for_seconds(2) print("DONE!") print("Should we make Charlie silly?") charlie_silly() wait_for_seconds(2) print("DONE!") print("Should we make Charlie scared?") charlie_scared() wait_for_seconds(2) print("DONE!") print("-"*15 + " Execution ended " + "-"*15 + "\n")COVID-19 OPEN RESEARCH DATASETimport os import json import pandas as pd %cd '/home/myilmaz/devel/covid551982_1475446_bundle_archive/'/home/myilmaz/devel/covid551982_1475446_bundle_archivePapers researching chronic kidney disease as a comorbidity riskkag=pd.read_csv('Kaggle/target_tables/8_risk_factors/Chronic kidney disease.csv') kag.head() %ls document_parses keep=['Epidemiology, clinical course, and outcomes of critically ill adults with COVID-19 in New York City: a prospective cohort study'] arts=set(kag['Study'])-set(keep) os.path.getsize('document_parses/pdf_json')/1000000Filter papers that mention "creatinine" but are not listed as comorbidity risk paperscreat=[] alltext=[] for i in os.listdir('document_parses/pdf_json'): save=0 savee=0 with open('document_parses/pdf_json/'+i) as json_file: data = json.load(json_file) if data['metadata']['title'] not in list(arts): doc=[] text='' for c,j in enumerate(data['body_text']): row=[i,data['metadata']['title'],data['body_text'][c]['section'],c,data['body_text'][c]['text']] if data['body_text'][c]['text'].lower().find('creatinine')>-1: save+=1 if data['body_text'][c]['text'].lower().find('covid')>-1: savee+=1 if data['body_text'][c]['text'].lower().find('sars-cov-2')>-1: savee+=1 doc.append(row) text+=data['body_text'][c]['text'] if save>0: if savee>0: creat.append(doc) alltext.append(text) else: pass import numpy as np np.min([len(i) for i in alltext]) np.max([len(i) for i in alltext]) print('Average document length is {} words'.format(np.mean([len(i) for i in alltext]))) jsons=[j[0] for i in creat for j in i] titles=[j[1] for i in creat for j in i] sections=[j[2] for i in creat for j in i] sectionNo=[j[3] for i in creat for j in i] text=[j[4] for i in creat for j in i] creats=pd.DataFrame(None,columns=['jsons','titles','sections','sectionNo','text']) creats=pd.DataFrame(None,columns=['jsons','titles','sections','sectionNo','text']) creats.jsons=jsons creats.titles=titles creats.sections=sections creats.sectionNo=sectionNo creats.text=text creats.head() docs=creats.copy# NUMBER OF UNIQUE DOCUMENTS IN THE DATA SET docs['jsons'].nunique()(deep=True) docs.drop_duplicates(keep='first',inplace=True) # NUMBER OF UNIQUE DOCUMENTS IN THE DATA SET docs['jsons'].nunique() docs.to_csv('covid.csv') #START HERE docs=pd.read_csv('covids.csv')Use pretrained NER model to find Problems, Tests, and Treatmentsimport os import pyspark.sql.functions as F from pyspark.sql.functions import monotonically_increasing_id import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp.base import * # Install pyspark ! pip install --ignore-installed -q pyspark==2.4.4 # Install Spark NLP ! pip install --ignore-installed -q spark-nlp==2.5 import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp.base import * from pyspark.sql.functions import monotonically_increasing_id import pyspark.sql.functions as F import pyspark.sql.types as t spark=sparknlp.start() docs.fillna('',inplace=True) docs.head(1) sparkdocs=spark.createDataFrame(docs).toDF('index','docid','title','section','sectionNo','text') document_assembler = DocumentAssembler() \ .setInputCol("text")\ .setOutputCol('document') sentence_detector = SentenceDetector() \ .setInputCols(["document"]) \ .setOutputCol("sentence") tokenizer = Tokenizer() \ .setInputCols(["sentence"]) \ .setOutputCol("token") word_embeddings = WordEmbeddingsModel.load("/home/myilmaz/cache_pretrained/embeddings_clinical_en_2.4.0_2.4_1580237286004")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") clinical_ner = NerDLModel.load('/home/myilmaz/cache_pretrained/ner_clinical_en_2.4.0_2.4_1580237286004') \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner") ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "ner"]) \ .setOutputCol("ner_chunk") nlpPipeline = Pipeline(stages=[document_assembler,sentence_detector,tokenizer, word_embeddings, clinical_ner,ner_converter ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) results=model.transform(sparkdocs) results.columns exploded = results.select('docid','title','section','sectionNo',F.explode(F.arrays_zip('token.metadata','token.result','ner.result')).alias("cols")) \ .select('docid','title','section','sectionNo',F.expr("cols['0'].sentence").alias("sentid"), F.col('cols.1').alias("token"),F.col('cols.2').alias("label"))Save annotated documents for further analysisexploded.write.option("header", "true").csv("covid2.csv") os.listdir('covid2.csv')[0] import pyspark.sql.types as t myschema = t.StructType( [ t.StructField('docid', t.StringType(), True), t.StructField('title', t.StringType(), True), t.StructField('section', t.StringType(), True), t.StructField('sectionNo', t.StringType(), True), t.StructField('sentid', t.IntegerType(), True), t.StructField('token', t.StringType(), True), t.StructField('label', t.StringType(), True) ] ) csvs=os.listdir('covid2.csv') big=pd.DataFrame(None) for i in csvs: dfs=spark.read.csv('covid2.csv/'+i,sep=',',schema=myschema,header=True) one=dfs.toPandas() big=big.append(one) big.head() import numpy as np tokens=[] savei='' save=0 for i,j in zip(big.token,big.label): if j.split('-')[0]!='I': if save<0: tokens[save]=savei tokens.append(np.nan) savei=i save=0 continue else: tokens.append(savei) savei=i save=0 continue elif j.split('-')[0]=='I': savei+=' '+i save-=1 tokens.append(np.nan) else: tokens.append(np.nan) if save<0: tokens[save]=savei tokens.append(np.nan) else: tokens.append(savei) tokens=tokens[1:] big['chunks']=tokens bigdf=big[big['chunks'].notnull()] bigdf=bigdf[bigdf['label']!='O'] bigdf['chunks'].value_counts()Prep for visualizationsproblems=bigdf[bigdf['label']=='B-PROBLEM'] tests=bigdf[bigdf['label']=='B-TEST'] len(problems[problems['chunks']=='proteinuria']) probhist=pd.DataFrame(problems['chunks'].value_counts()) probhist=probhist.rename(columns={'chunks':'counts'}) probhist2=probhist.iloc[0:100] testhist=pd.DataFrame(tests['chunks'].value_counts()) testhist=testhist.rename(columns={'chunks':'counts'}) testhist2=testhist.iloc[0:100]Look at most frequent "Test" entitiestesthist2.head(40) import matplotlib.pyplot as plt import seaborn as sns fig, ax = plt.subplots(figsize =(24,12)) chart=sns.barplot(testhist2.index,testhist2['counts']) chart.set_xticklabels(testhist2.index,rotation=90) plt.title('Test Entities',fontsize=18) plt.show()The pretrained model is returning a lot of false positive for Test entities, but you can still see that kidney related tests such as "creatinine" are well represented in the dataset. Look at most frequent "Problem" entitiesprobhist2.head(40) import seaborn as sns import seaborn as sns fig, ax = plt.subplots(figsize =(24,12)) chart=sns.barplot(probhist2.index,probhist2['counts']) chart.set_xticklabels(probhist2.index,rotation=90) plt.title('Problem Entities',fontsize=18) plt.show()You can see that kidney related problems such as "AKI" are well represented in the dataset. Find 'Test' entities near the most frequent kidney related 'Problem' entityproblems=pd.DataFrame(problems).reset_index(drop=True) problems['sectionid']=problems.docid+'-'+problems.section tests=pd.DataFrame(tests).reset_index(drop=True) tests['sectionid']=tests.docid+'-'+tests.section akis=pd.DataFrame(problems[problems['chunks']=='AKI']).reset_index(drop=True) a=list(set(akis['sectionid'])) akitest=tests[tests['sectionid'].isin(a)] akicount=pd.DataFrame(akitest.groupby(['chunks'])['label'].count()).reset_index() akicount=akicount.sort_values(by='label',ascending=False).reset_index(drop=True) akicount.columns=['chunk','counts'] akicount import seaborn as sns import seaborn as sns fig, ax = plt.subplots(figsize =(24,12)) chart= sns.barplot(akicount['chunk'][0:50],akicount['counts'][0:50]) chart.set_xticklabels(akicount.chunk,rotation=90) plt.title("Clinical Tests Near 'AKI'",fontsize=20) plt.show()Our clinical tests NER is returning a lot of false positives but we still see that creatinine, CRP, and PCR tests are well represented in the dataset, appearing in the same section as "AKI". This tells me the information is probably not historical and I will have measurements that I can use for predictions as well as terms to use for topic modelling and text classification. Find 'Problem' entities near the most frequent kidney related 'Test' entitycreatins=pd.DataFrame(tests[tests['chunks']=='creatinine']).reset_index(drop=True) b=list(set(creatins['sectionid'])) creatprob=problems[problems['sectionid'].isin(b)] creatcount=pd.DataFrame(creatprob.groupby(['chunks'])['label'].count()).reset_index() creatcount=creatcount.sort_values(by='label',ascending=False).reset_index(drop=True) creatcount.columns=['chunk','counts'] creatcount creatcounts=creatcount.iloc[0:50] import seaborn as sns import seaborn as sns fig, ax = plt.subplots(figsize =(24,12)) chart= sns.barplot(creatcounts.chunk,creatcounts['counts']) chart.set_xticklabels(creatcounts.chunk,rotation=90) plt.title("Patient Problems Near 'Creatinine' Test",fontsize=20) plt.show()AKI, hypertension, diabetes, and acute kidney injury are all well represented in the dataset, appearing in the same section as "creatinine" tests. This tells me the information is probably not historical and I will have measurements that I can use for predictions as well as terms to use for topic modelling and text classification. Frequency of 'patient' mentions in documentspatient=pd.DataFrame(big[(big['token'].str.lower()=='patient')|(big['token'].str.lower()=='patients')]).reset_index(drop=True) patients=patient.groupby(['docid'])['token'].count() patients=patients.reset_index() patients=patients.rename(columns={'token':'counts'}) len(patients) sns.boxplot(patients['counts']) plt.title('Frequency of Patient Mentions in 1568 Documents',fontsize=14)Frequency of 'case report' mentions in documentscase=pd.DataFrame(big[(big['section'].str.lower()=='case report')|(big['section']=='case study')|(big['chunks'].str.lower()=='case report')|(big['chunks'].str.lower()=='case study')|(big['section'].str.lower()=='case reports')|(big['section']=='case studies')|(big['chunks'].str.lower()=='case reports')|(big['chunks'].str.lower()=='case studies')]).reset_index(drop=True) cases=case.groupby(['docid'])['section'].count() cases=cases.reset_index() cases=cases.rename(columns={'section':'counts'}) len(cases) sns.boxplot(cases['counts']) plt.title('Frequency of Case Report/Study Mentions in 78 Documents',fontsize=14)78 documents refer to case reports a median of about 550 times (The average document length is about 30,000 words.) I think I will have enough patient data to attempt some predictions.artlist=kag['Study'] pres=[] doc=[] for i in os.listdir('document_parses/pdf_json'): with open('document_parses/pdf_json/'+i) as json_file: data = json.load(json_file) if data['metadata']['title'] in list(artlist): for c,j in enumerate(data['body_text']): row=[i,data['metadata']['title'],data['body_text'][c]['section'],data['body_text'][c]['text']] doc.append(row) pres.append(doc) jsons=[j[0] for i in pres for j in i] titles=[j[1] for i in pres for j in i] sections=[j[2].lower() for i in pres for j in i] text=[j[1].lower()+'. '+j[2].lower()+'. '+j[3].lower() for i in pres for j in i] pres2=pd.DataFrame(None,columns=['jsons','titles','sections','text']) pres2['jsons']=jsons pres2['titles']=titles pres2['section']=sections pres2['text']=text pres2.head(1) case=pd.DataFrame(pres2[(pd.Series(pres2['section']).str.contains('case report'))|(pd.Series(pres2['section']).str.contains('case study'))|(pd.Series(pres2['text']).str.contains('case report'))|(pd.Series(pres2['text']).str.contains('case study'))|(pd.Series(pres2['section']).str.contains('case reports'))|(pd.Series(pres2['section']).str.contains('case studies'))|(pd.Series(pres2['text']).str.contains('case reports'))|(pd.Series(pres2['text']).str.contains('case studies'))]).reset_index(drop=True) case.head() len(case) case['jsons'].nunique() case['titles'].value_counts()Classification Template Installing Pycaret# !pip install pycaret --user # !pip install pycaret-nightly --userImport Librariesimport pandas as pd from pycaret.classification import *Import Dataset# path to your dataset, can be a csv file or xlsx dataset_path = "../Bank_Personal_Loan_Modelling_transformed.xlsx" ## use code as per the type of data source ## use below line to read data from csv file ## df = pd.read_csv(dataset_path) df = pd.read_excel(dataset_path, index_col=0) df.head() target = 'Personal Loan'Data Setup* See [here](https://github.com/pycaret/pycaret/blob/master/tutorials/Binary%20Classification%20Tutorial%20Level%20Beginner%20-%20%20CLF101.ipynb) for notebook example(basic level)* See [here](https://pycaret.org/classification/) for classification documentation.# to-do: separate cat and numbers data=setup(df,target=target, categorical_features=['Family', 'Education'], train_size = 0.8, fold=5)Comparing models and selecting top 3#Selecting top3 models for tuning top3_models=compare_models(n_select=3, fold=5) print(top3_models) # to-do # separate notebooks, have basic ones in first # then tune it using hyperparamaters # then use ensembling, stacking and blending # same for regression, classificationTuning Models* Compare model just evaluates using the default hyperparameters, tune model will use cross validation to tune the models, here we will tune top 3 models selected in compare models. NOTE: hyperparameter tuning is performed in a separate notebook,tune_model(top3_models[0]) # tune_model(top3_models[1]) # tune_model(top3_models[2]) #Tuning the top 3 models #tuned_model_top3=[tune_model(i) for i in top3_models] #print(tuned_model_top3)Ensembling* Create ensemble using the top 3 tuned model NOTE: Notice the output of a cell is for last model executed. Not all three.## Ensembling top 3 tuned models # bagged_tuned_top3=[ensemble_model(i,method='Bagging') for i in tuned_model_top3] # print(bagged_tuned_top3)Blending Models## Blend top3 models # blender=blend_models(estimator_list=top3_models)Stacking models# stacker=stack_models(top3_models)Plot Model resultsplot_model(top3_models[0]) # plot_model(top3_models[1]) # plot_model(top3_models[2])Evaluate Modelsevaluate_model(top3_models[0]) # evaluate_model(top3_models[1]) # evaluate_model(top3_models[2])2. Explainability Techniques * Read more on using pycaret models separately with SHAP* Or better how to work with SHAP in pycaret# !pip install shap final_model = top3_models[0] final_model import shap explainer = shap.Explainer(final_model) # data is the information grid returned by the setup method, type is tuple # seems like second index contains the transformed dataframe transformed_df = data[5] transformed_df[target] = data[2] transformed_df.head() # #Error in this cell # shap_values = explainer(transformed_df, check_additivity=False) # shap.plots.waterfall(shap_values[0]) # shap.initjs() ## Visualize first prediction # shap.plots.force(shap_values[2]) # display(shap.plots.force(explainer.expected_value[0], shap_values[0]))SHAP on sample dataset# !pip install xgboost import xgboost import shap # train an XGBoost model X, y = shap.datasets.boston() model = xgboost.XGBRegressor().fit(X, y) # explain the model's predictions using SHAP # (same syntax works for LightGBM, CatBoost, scikit-learn, transformers, Spark, etc.) explainer = shap.Explainer(model) shap_values = explainer(X) shap_values # visualize the first prediction's explanation shap.plots.waterfall(shap_values[0]) #Visualize all predictions shap.plots.scatter(shap_values[:,"AGE"],shap_values) shap.plots.beeswarm(shap_values)Interpretation using Pycaret SHAP implementation* Note that is only supports tree based model, read more [here](https://pycaret.org/classification/).top3_models #[interpret_model(i) for i in best_model] interpret_model(top3_models[1],)Not optimizing in this step# , use_holdout = False # final_model=automl(optimize = 'Accuracy') print(top3_models)[XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.300000012, max_delta_step=0, max_depth=6, min_child_weight=1, missing=nan, monotone_constraints='()', n_estimators=100, n_jobs=-1, num_parallel_tree=1, objective='binary:logistic', random_state=272, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='auto', use_label_encoder=True, validate_parameters=1, verbosity=0), GradientBoostingClassifier(ccp_alpha=0.0, criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, mi[...]Saving final model# from google.colab import drive # drive.mount('/content/gdrive') type(top3_models[0]) for model in top3_models: model_name = model.__class__.__name__ save_model(model,'./saved_models/{0}'.format(model_name)) print('{0} model saved!'.format(model_name)) transformed_df.head() transformed_df.to_excel('../Bank_Personal_Loan_Modelling_transformed.xlsx')Install fastai2 from github!pip install -U pandas --upgrade !pip install -U fastcore --upgrade !pip install -U fastai --upgrade #!pip install -Uqq git+https://github.com/tyoc213/fastai_xla_extensions@fix_prev_lenet VERSION = "20200707" #"20200515" @param ["1.5" , "20200325", "nightly"] !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py #!TORCH_SHOW_CPP_STACKTRACES=1 python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev !python pytorch-xla-env-setup.py --version $VERSION --apt-packages libomp5 libopenblas-dev !pip freeze | grep torch !pip freeze | grep fast #import fastai_xla_extensions.core from fastai.vision.all import * default_device() path = untar_data(URLs.MNIST_SAMPLE) Path.BASE_PATH = path; path.ls() (path/'train').ls()multi TPU# Configures training (and evaluation) parameters import torchvision from torchvision import datasets import torchvision.transforms as transforms import torch_xla.distributed.parallel_loader as pl import torch_xla.core.xla_model as xm import torch_xla.distributed.xla_multiprocessing as xmp from fastai.vision.all import * import time from fastai.test_utils import * print(f'torch version {torch.__version__}') import pdb path = untar_data(URLs.MNIST_SAMPLE) Path.BASE_PATH = path; path.ls() def debug_on(*exceptions): if not exceptions: exceptions = (AssertionError, ) def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except exceptions: pdb.post_mortem(sys.exc_info()[2]) return wrapper return decorator class Lenet2(nn.Module): def __init__(self): super(Lenet2, self).__init__() self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) self.fc1 = nn.Linear(400, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) # Only 2 outputs instead of 10 @debug_on(KeyError) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x @debug_on(KeyError) def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features # third level patch... alll!!!! class _BaseOptimizer(): "Common functionality between `Optimizer` and `OptimWrapper`" @debug_on(KeyError) def __getattr__(self, name): #print(f"================= = = = = = ORIGINAL BASE OPTIMIZER {name} = = = = = =================") return getattr(self, name) @debug_on(KeyError) def all_params(self, n=slice(None), with_grad=False): #print(f"================= = = = = = ORIGINAL BASE OPTIMIZER def all_params(self, n=slice(None), with_grad=False): = = = = = =================") #print(f"================= = = = = = {type(self)} {dir(self)} = = = = = =================") #print(f"================= = = = = = {type(self.param_groups)} {dir(self.param_groups)} = = = = = =================") ###print(f"================= = = = = = {self.param_groups} = = = = = =================") # TODO: unroll this loop res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg) return L(o for o in res if o[0].grad is not None) if with_grad else res @debug_on(KeyError) def _set_require_grad(self, rg, p,pg,state,h): p.requires_grad_(rg or state.get('force_train', False)) @debug_on(KeyError) def freeze_to(self, n): self.frozen_idx = n if n >= 0 else len(self.param_lists) + n if self.frozen_idx >= len(self.param_lists): warn(f"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.") for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o) for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o) @debug_on(KeyError) def freeze(self): assert(len(self.param_lists)>1) self.freeze_to(-1) @debug_on(KeyError) def set_freeze(self, n, rg, ignore_force_train=False): for p in self.param_lists[n]: p.requires_grad_(rg or (state.get('force_train', False) and not ignore_force_train)) @debug_on(KeyError) def unfreeze(self): self.freeze_to(0) @debug_on(KeyError) def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper) @debug_on(KeyError) def _set_hyper(self, k, v): for v_,h in zip(v, self.hypers): h[k] = v_ @debug_on(KeyError) def set_hyper(self, k, v): if isinstance(v, slice): if v.start: v = even_mults(v.start, v.stop, len(self.param_lists)) else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop] v = L(v, use_list=None) if len(v)==1: v = v*len(self.param_lists) assert len(v) == len(self.hypers), f"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups." self._set_hyper(k, v) @property def param_groups(self): #print('%%%%%%%%%%%%%%%%%%%% P A R A M G R O U P S %%%%%%%%%%%%%%%%%%%%') return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)] @param_groups.setter def param_groups(self, v): #print('%%%%%%%%%%%%%%%%%%%% P A R A M G R O U P S S E T T E R %%%%%%%%%%%%%%%%%%%%') for pg,v_ in zip(self.param_lists,v): pg = v_['params'] for hyper,v_ in zip(self.hypers,v): for k,t in v_.items(): if k != 'params': hyper[k] = t @debug_on(KeyError) def __setstate__(self, d): #print('%%%%%%%%%%%%%%%%%%%%****** ####### set state baseeeeee') self.__dict__.update(d) # #print('%%%%%%%%%%%%%%%%%%%%****** ####### set state ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD') @debug_on(KeyError) def __getstate__(self): #print('%%%%%%%%%%%%%%%%%%%%****** ####### get state baseeeeee') d = dict(self.__dict__) return d # Cell @debug_on(KeyError) def _update(state, new=None): if new is None: return state if isinstance(new, dict): state.update(new) return state # Cell @log_args(but='params,cbs,defaults') class Optimizer(_BaseOptimizer): "Base optimizer class for the fastai library, updating `params` with `cbs`" _keep_on_clear = ['force_train', 'do_wd'] @debug_on(KeyError) def __getattr__(self, name): #print(f"================= = = = = = BASE OPTIMIZER {name} = = = = = =================") return getattr(self, name) def __init__(self, params, cbs, train_bn=True, **defaults): #print(f"================= = = = = = OPTIMIZER(_BaseOptimizer) = = = = = =================\n"*10) params = L(params) self.cbs,self.state,self.train_bn = L(cbs),defaultdict(dict),train_bn defaults = merge(*self.cbs.attrgot('defaults'), defaults) self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params]) self.hypers = L({} for _ in range_of(self.param_lists)) self.set_hypers(**defaults) self.frozen_idx = 0 @debug_on(KeyError) def zero_grad(self): for p,*_ in self.all_params(with_grad=True): p.grad.detach_() p.grad.zero_() @debug_on(KeyError) def step(self): #print(f"================= = = = = = BASE OPTIMIZER STEPPPPPPPPPPP P P P P PP P P P P = = = = = =================") for p,pg,state,hyper in self.all_params(with_grad=True): for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper})) self.state[p] = state @debug_on(KeyError) def clear_state(self): for p,pg,state,hyper in self.all_params(): self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state} @debug_on(KeyError) def state_dict(self): state = [self.state[p] for p,*_ in self.all_params()] return {'state': state, 'hypers': self.hypers} @debug_on(KeyError) def load_state_dict(self, sd): assert len(sd["hypers"]) == len(self.param_lists) assert len(sd["state"]) == sum([len(pg) for pg in self.param_lists]) self.hypers = sd['hypers'] self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])} ### second patch #@patch #def param_groups(self:Optimizer): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)] #@patch #def param_groups(self:Optimizer, v): # for pg,v_ in zip(self.param_lists,v): pg = v_['params'] # for hyper,v_ in zip(self.hypers,v): # for k,t in v_.items(): # if k != 'params': hyper[k] = t # created to copy optimizer and have `__get_state__` and `__set_state__` @debug_on(KeyError) @patch def step(self:Optimizer): #print(f'=========== * = ⁰ = * = * = step optimizer 0') for p,pg,state,hyper in self.all_params(with_grad=True): #print(f'=========== * = ⁰ = * = * = step optimizer 1') for cb in self.cbs: #print(f'=========== * = ⁰ = * = * = step optimizer INTERNAL') state = _update(state, cb(p, **{**state, **hyper})) #print(f'=========== * = ⁰ = * = * = step optimizer UPDATE') self.state[p] = state #print(f'=========== * = ⁰ = * = * = step optimizer ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD') # the new thing!!!! a PiclableOptimizer that has __getstate__() and __setstate__() this last is not needed AFAIK class PickableOpt(Optimizer): def __init__(self, opt): #print('############## %%%%%%%%%%%%%%%%%%%%%%%%%%% deep copy from optimizer!!!!') self.__dict__ = deepcopy(opt.__dict__) self.opt = opt def __getstate__(self): #print('############## %%%%%%%%%%%%%%%%%%%%%%%%%%% PickableOpt#__getstate__!!! get state xla opt callback') v = vars(self) # v['param_groups'] = lambda x: return torch.zeros(7) #print(f'vars type are: {type(v)}') #print(f'vars type are: {len(v)}') #print(f'vars type are: {v.keys()}') #print(f'vars are: {v}') #print(f'vars are: {v}') v = self.state_dict() v['param_groups'] = self.param_groups return v #### first patch class XLAOptimProxy2: "Proxy optimizer to override `opt.step` with Pytorch XLA sync method `xm.optimizer_step` " def __init__(self,opt, barrier=True): #print(f"================= = = = = = XLAOptimProxy2 = = = = = =================") self.opt = PickableOpt(opt) self._barrier = barrier @debug_on(KeyError) def xla_step(self): #print('step....') xm.optimizer_step(self.opt,barrier=self._barrier) # sync on gradient update @debug_on(KeyError) def __getattr__(self,name): #print('*** *** ***',name) # print(f'***** ***** {name} {getattr(self.op, name, "*** no lo tiene")}') if name == 'step': # override proxying for step return getattr(self,'xla_step') if name in ('barrier','_barrier'): return getattr(self,name) # proxy everything else return getattr(self.opt,name) @property def barrier(self): return self._barrier @barrier.setter def barrier(self,v): self._barrier = v class XLAOptCallback2(Callback): 'Callback to replace `opt.step` with `xm.optimizer_step(opt)` as required to run on TPU' def __init__(self, barrier=True): self._barrier = barrier @debug_on(KeyError) def before_fit(self): 'replace opt with proxy which calls `xm.optimizer_step` instead of `opt.step` and set `dls.device` and model to `xla_device`' to_device(self.dls, device=xm.xla_device()) self.model.to(self.dls.device) if self.learn.opt is not None: if not isinstance(self.learn.opt,XLAOptimProxy2): opt = self.learn.opt self.learn.opt = XLAOptimProxy2(opt, barrier=self._barrier) @debug_on(KeyError) def after_fit(self): 'restore original opt ' if isinstance(self.learn.opt, XLAOptimProxy2): opt = self.learn.opt.opt self.learn.opt = opt def map_fn(index, flags): # from fastai.callback.all import * dede = xm.xla_device() print(f'index is {index} and flags are {flags}') #xm.rendezvous('init') if not xm.is_master_ordinal(): print(f"this is {dede}:{index} entering download once") xm.rendezvous('download_only_once') dblock = DataBlock( splitter = GrandparentSplitter(), item_tfms = Resize(28), blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, get_y = parent_label, batch_tfms = [] ) if xm.is_master_ordinal(): xm.master_print(f'this is {dede} exiting download once') xm.rendezvous('download_only_once') xm.master_print('creating lenet_tpu') lenet_tpu = Lenet2() xm.master_print('lenet created, goiing for dls_tpu') dls_tpu = dblock.dataloaders(path, device=dede) xm.master_print(f'creating learner!!! for {dede}') tpu_learner = Learner(dls_tpu, lenet_tpu, metrics=accuracy, loss_func=F.cross_entropy, cbs=[]) print(f"################ fit for {dede}") xm.master_print(f'***** fit for {dede}') tpu_learner.fit(1, cbs=[XLAOptCallback2()]) xm.master_print(f'***** end fit for {dede}') t = torch.randn((2, 2), device=dede) print("################Process", index ,"is using", xm.xla_real_devices([str(dede)])[0]) # https://stackoverflow.com/a/9929970/682603 # excepthook # import traceback import logging import os, sys def my_excepthook(excType, excValue, traceback, logger): print("=== *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% Logging an uncaught exception", exc_info=(excType, excValue, traceback)) sys.excepthook = my_excepthook sys.unraisablehook = my_excepthook ##############threading.excepthook #https://docs.python.org/3/library/sys.html#sys.excepthook print('launching n procs') flags={} flags['batch_size'] = 32 flags['num_workers'] = 8 flags['num_epochs'] = 1 flags['seed'] = 1234 xmp.spawn(map_fn, args=(flags,), nprocs=8, start_method='fork') print('end of launch')torch version 1.7.0a0+12b5bdc launching n procs index is 0 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:1 exiting download once index is 5 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:5 entering download once index is 7 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:7 entering download once index is 6 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:6 entering download once index is 1 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:1 entering download once index is 3 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:3 entering download once index is 2 and flags are {'batch_size': 32, 'num_workers': 8, 'num_epochs': 1, 'seed': 1234} this is xla:0:2 entering download once index is 4 and flags are {'batc[...]Strings & Numbers Presenting text (strings)print("{:<15} {:<15} {:<15}".format("Change", "Return", "Volatility")) print("-"*42)Change Return Volatility ------------------------------------------Number Classesprint(type(5)) print(type(10.0)) Combining Stings and Numbers in Outputprint("{:<15}{:<15}{:<15}".format("Change", "Return", "Volatility")) print('-'*42) print("{:<15.2f} {:<15.3f} {:<15.4f}".format(2.31,.0145, .2345)) x = 5 y = 10.0Mathematical Operatorsprint(x + y) print(x - y) print(x * y) print(x / y) print(x ** y) print(x // y) print(x % y)15.0 -5.0 50.0 0.5 9765625.0 0.0 5.0Incrementingx = 5 x += x x**Classical K-Means**import matplotlib.pyplot as plot import seaborn as seasns; seasns.set() import numpy as nump from sklearn.cluster import KMeans as skKMeans from sklearn.datasets import make_blobs X, y_true = make_blobs(n_samples=250, centers=4, cluster_std=0.60, random_state=0) plot.scatter(X[:, 0], X[:, 1], s=50); plot.show() kmeans_res = skKMeans(n_clusters=4) kmeans_res.fit(X) y_val_kmeans = kmeans_res.predict(X) plot.scatter(X[:, 0], X[:, 1], c=y_val_kmeans, s=50, cmap='viridis') centers = kmeans_res.cluster_centers_ plot.scatter(centers[:, 0], centers[:, 1], c='grey', s=200, alpha=0.5); plot.show() kmeans_res = skKMeans(n_clusters=4) kmeans_res.fit(X) y_val_kmeans = kmeans_res.predict(X) plot.scatter(X[:, 0], X[:, 1], c=y_val_kmeans, s=50, cmap='viridis') centers = kmeans_res.cluster_centers_ plot.scatter(centers[:, 0], centers[:, 1], c='grey', s=200, alpha=0.5); plot.show()Sveučilište u Zagrebu Fakultet elektrotehnike i računarstva Strojno učenje 2018/2019 http://www.fer.unizg.hr/predmet/su ------------------------------ Laboratorijska vježba 3: Stroj potpornih vektora i algoritam k-najbližih susjeda*Verzija: 0.3 Zadnji put ažurirano: 9. studenog 2018.*(c) 2015-2017 , Objavljeno: **9. studenog 2018.** Rok za predaju: **3. prosinca 2018. u 07:00h**------------------------------ UputeTreća laboratorijska vježba sastoji se od sedam zadataka. U nastavku slijedite upute navedene u ćelijama s tekstom. Rješavanje vježbe svodi se na **dopunjavanje ove bilježnice**: umetanja ćelije ili više njih **ispod** teksta zadatka, pisanja odgovarajućeg kôda te evaluiranja ćelija. Osigurajte da u potpunosti **razumijete** kôd koji ste napisali. Kod predaje vježbe, morate biti u stanju na zahtjev asistenta (ili demonstratora) preinačiti i ponovno evaluirati Vaš kôd. Nadalje, morate razumjeti teorijske osnove onoga što radite, u okvirima onoga što smo obradili na predavanju. Ispod nekih zadataka možete naći i pitanja koja služe kao smjernice za bolje razumijevanje gradiva (**nemojte pisati** odgovore na pitanja u bilježnicu). Stoga se nemojte ograničiti samo na to da riješite zadatak, nego slobodno eksperimentirajte. To upravo i jest svrha ovih vježbi.Vježbe trebate raditi **samostalno**. Možete se konzultirati s drugima o načelnom načinu rješavanja, ali u konačnici morate sami odraditi vježbu. U protivnome vježba nema smisla.import numpy as np import scipy as sp import pandas as pd import mlutils import matplotlib.pyplot as plt %pylab inlinePopulating the interactive namespace from numpy and matplotlib1. Klasifikator stroja potpornih vektora (SVM) (a) Upoznajte se s razredom [`svm.SVC`](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.htmlsklearn.svm.SVC), koja ustvari implementira sučelje prema implementaciji [`libsvm`](http://www.csie.ntu.edu.tw/~cjlin/libsvm/). Primijenite model `SVC` s linearnom jezgrenom funkcijom (tj. bez preslikavanja primjera u prostor značajki) na skup podataka `seven` (dan niže) s $N=7$ primjera. Ispišite koeficijente $w_0$ i $\mathbf{w}$. Ispišite dualne koeficijente i potporne vektore. Završno, koristeći funkciju `mlutils.plot_2d_svc_problem` iscrtajte podatke, decizijsku granicu i marginu. Funkcija prima podatke, oznake i klasifikator (objekt klase `SVC`). Izračunajte širinu dobivene margine (prisjetite se geometrije linearnih modela).from sklearn.svm import SVC seven_X = np.array([[2,1], [2,3], [1,2], [3,2], [5,2], [5,4], [6,3]]) seven_y = np.array([1, 1, 1, 1, -1, -1, -1]) seven_classifier = SVC(kernel='linear') seven_classifier.fit(seven_X, seven_y) print(f"w0:\n {seven_classifier.intercept_}") print(f"w:\n {seven_classifier.coef_}") print(f"Dual coefs:\n {seven_classifier.dual_coef_}") print(f"Support vectors:\n{seven_classifier.support_vectors_}") print(f"Support vectors indices:\n{seven_classifier.support_}") print(f"Margin width:\n{2/np.sqrt(np.linalg.norm(seven_classifier.coef_, axis=1))}") mlutils.plot_2d_svc_problem(seven_X, seven_y, seven_classifier)w0: [3.99951172] w: [[-9.99707031e-01 -2.92968750e-04]] Dual coefs: [[-4.99707031e-01 -1.46484375e-04 4.99853516e-01]] Support vectors: [[5. 2.] [5. 4.] [3. 2.]] Support vectors indices: [4 5 3] Margin width: [2.00029299]**Q:** Koliko iznosi širina margine i zašto? **Q:** Koji primjeri su potporni vektori i zašto? (b) Definirajte funkciju `hinge(model, x, y)` koja izračunava gubitak zglobnice modela SVM na primjeru `x`. Izračunajte gubitke modela naučenog na skupu `seven` za primjere $\mathbf{x}^{(2)}=(3,2)$ i $\mathbf{x}^{(1)}=(3.5,2)$ koji su označeni pozitivno ($y=1$) te za $\mathbf{x}^{(3)}=(4,2)$ koji je označen negativno ($y=-1$). Također, izračunajte prosječni gubitak SVM-a na skupu `seven`. Uvjerite se da je rezultat identičan onome koji biste dobili primjenom ugrađene funkcije [`metrics.hinge_loss`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.hinge_loss.html).from sklearn.metrics import hinge_loss def hinge(h, y): return np.maximum(0, 1. - h * y) def svm_hinge(model, x, y): h = model.decision_function(x) return np.sum([hinge(h[i], y[i]) for i in range(y.shape[0])])/y.shape[0] def built_in_hinge(model, x, y): h = model.decision_function(x) return hinge_loss(y, h) print('Seven set') print(f"hinge loss: {svm_hinge(seven_classifier, seven_X, seven_y)}") print(f"built-in hinge loss: {built_in_hinge(seven_classifier, seven_X, seven_y)}") test_X = np.array([[3.5, 2], [3, 2], [4, 2]]) test_y = np.array([1, 1, -1]) print('Test seven set') print(f"hinge loss: {svm_hinge(seven_classifier, test_X, test_y)}") print(f"built-in hinge loss: {built_in_hinge(seven_classifier, test_X, test_y)}") mlutils.plot_2d_svc_problem(test_X, test_y, seven_classifier)Test seven set hinge loss: 0.5001139322916662 built-in hinge loss: 0.5001139322916662(c) Vratit ćemo se na skupove podataka `outlier` ($N=8$) i `unsep` ($N=8$) iz prošle laboratorijske vježbe (dani niže) i pogledati kako se model SVM-a nosi s njima. Naučite ugrađeni model SVM-a (s linearnom jezgrom) na ovim podatcima i iscrtajte decizijsku granicu (skupa s marginom). Također ispišite točnost modela korištenjem funkcije [`metrics.accuracy_score`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html).from sklearn.metrics import accuracy_score outlier_X = np.append(seven_X, [[12,8]], axis=0) outlier_y = np.append(seven_y, -1) unsep_X = np.append(seven_X, [[2,2]], axis=0) unsep_y = np.append(seven_y, -1) plt.figure(figsize=(10,8)) plt.subplot(2, 1, 1) outlier_svm = SVC(kernel='linear') outlier_svm.fit(outlier_X, outlier_y) print(f"Outlier set accuracy score: {accuracy_score(outlier_svm.predict(outlier_X), outlier_y)}") mlutils.plot_2d_svc_problem(outlier_X, outlier_y, outlier_svm) plt.subplot(2, 1, 2) unsep_svm = SVC(kernel='linear') unsep_svm.fit(unsep_X, unsep_y) print(f"Unsep set accuracy score: {accuracy_score(unsep_svm.predict(unsep_X), unsep_y)}") mlutils.plot_2d_svc_problem(unsep_X, unsep_y, unsep_svm)Outlier set accuracy score: 1.0 Unsep set accuracy score: 0.875**Q:** Kako stršeća vrijednost utječe na SVM? **Q:** Kako se linearan SVM nosi s linearno neodvojivim skupom podataka? 2. Nelinearan SVM Ovaj zadatak pokazat će kako odabir jezgre utječe na kapacitet SVM-a. Na skupu `unsep` iz prošlog zadatka trenirajte tri modela SVM-a s različitim jezgrenim funkcijama: linearnom, polinomijalnom i radijalnom baznom (RBF) funkcijom. Varirajte hiperparametar $C$ po vrijednostima $C\in\{10^{-2},1,10^2\}$, dok za ostale hiperparametre (stupanj polinoma za polinomijalnu jezgru odnosno hiperparametar $\gamma$ za jezgru RBF) koristite podrazumijevane vrijednosti. Prikažite granice između klasa (i margine) na grafikonu organiziranome u polje $3x3$, gdje su stupci različite jezgre, a retci različite vrijednosti parametra $C$.graph_index = 1 plt.figure(figsize=(17, 12)) for kernel in ['linear', 'poly', 'rbf']: for C in [0.01, 1, 100]: plt.subplot(3, 3, graph_index) graph_index += 1 plt.title(f"C={C}, kernel={kernel}") svc = SVC(C=C, kernel=kernel) svc.fit(unsep_X, unsep_y) mlutils.plot_2d_svc_problem(unsep_X, unsep_y, svc) plt.show()/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning) /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning) /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this [...]3. Optimizacija hiperparametara SVM-a Pored hiperparametra $C$, model SVM s jezgrenom funkcijom RBF ima i dodatni hiperparametar $\gamma=\frac{1}{2\sigma^2}$ (preciznost). Taj parametar također određuje složenost modela: velika vrijednost za $\gamma$ znači da će RBF biti uska, primjeri će biti preslikani u prostor u kojem su (prema skalarnome produktu) međusobno vrlo različiti, što će rezultirati složenijim modelima. Obrnuto, mala vrijednost za $\gamma$ znači da će RBF biti široka, primjeri će biti međusobno sličniji, što će rezultirati jednostavnijim modelima. To ujedno znači da, ako odabremo veći $\gamma$, trebamo jače regularizirati model, tj. trebamo odabrati manji $C$, kako bismo spriječili prenaučenost. Zbog toga je potrebno zajednički optimirati hiperparametre $C$ i $\gamma$, što se tipično radi iscrpnim pretraživanjem po rešetci (engl. *grid search*). Ovakav pristup primjenjuje se kod svih modela koji sadrže više od jednog hiperparametra. (a) Definirajte funkciju > `grid_search(X_train, X_validate, y_train, y_validate, c_range=(c1,c2), g_range=(g1,g2), error_surface=False)` koja optimizira parametre $C$ i $\gamma$ pretraživanjem po rešetci. Funkcija treba pretražiti hiperparametre $C\in\{2^{c_1},2^{c_1+1},\dots,2^{c_2}\}$ i $\gamma\in\{2^{g_1},2^{g_1+1},\dots,2^{g_2}\}$. Funkcija treba vratiti optimalne hiperparametre $(C^*,\gamma^*)$, tj. one za koje na skupu za provjeru model ostvaruju najmanju pogrešku. Dodatno, ako je `surface=True`, funkcija treba vratiti matrice (tipa `ndarray`) pogreške modela (očekivanje gubitka 0-1) na skupu za učenje i skupu za provjeru. Svaka je matrica dimenzija $(c_2-c_1+1)\times(g_2-g_1+1)$ (retci odgovaraju različitim vrijednostima za $C$, a stupci različitim vrijednostima za $\gamma$).from sklearn.metrics import accuracy_score, zero_one_loss def grid_search(X_train, X_validate, y_train, y_validate, c_range=(0,5), g_range=(0,5), error_surface=False): C_star = 0 gamma_star = 0 best_score = 0 surface_dimensions = (c_range[1] - c_range[0] + 1, g_range[1] - g_range[0] + 1) train_surface = np.zeros(surface_dimensions) test_surface = np.zeros(surface_dimensions) for c in range(c_range[0], c_range[1] + 1): for g in range(g_range[0], g_range[1] + 1): C = 2 ** c gamma = 2 ** g model = SVC(C=C, gamma=gamma, kernel='rbf') model.fit(X_train, y_train) h = model.predict(X_validate) score = accuracy_score(h, y_validate) if score > best_score: best_score = score C_star = C gamma_star = gamma if error_surface: i = c - c_range[0] j = g - g_range[0] test_surface[i][j] = zero_one_loss(h, y_validate) train_surface[i][j] = zero_one_loss(model.predict(X_train), y_train) return C_star, gamma_star, train_surface, test_surface(b) Pomoću funkcije [`datasets.make_classification`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html) generirajte **dva** skupa podataka od $N=200$ primjera: jedan s $n=2$ dimenzije i drugi s $n=1000$ dimenzija. Primjeri neka dolaze iz dviju klasa, s time da svakoj klasi odgovaraju dvije grupe (`n_clusters_per_class=2`), kako bi problem bio nešto složeniji, tj. nelinearniji. Neka sve značajke budu informativne. Podijelite skup primjera na skup za učenje i skup za ispitivanje u omjeru 1:1.Na oba skupa optimirajte SVM s jezgrenom funkcijom RBF, u rešetci $C\in\{2^{-5},2^{-4},\dots,2^{15}\}$ i $\gamma\in\{2^{-15},2^{-14},\dots,2^{3}\}$. Prikažite površinu pogreške modela na skupu za učenje i skupu za provjeru, i to na oba skupa podataka (ukupno četiri grafikona) te ispišite optimalne kombinacije hiperparametara. Za prikaz površine pogreške modela možete koristiti funkciju `mlutils.plot_error_surface`.from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split two_dim_X, two_dim_y = make_classification( n_samples=200, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=2) thousand_dim_X, thousand_dim_y = make_classification( n_samples=200, n_features=1000, n_informative=1000, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=2) X_train, X_test, y_train, y_test = train_test_split(two_dim_X, two_dim_y, test_size=0.5) C_star_2, gamma_star_2, train_surface_2, test_surface_2 = grid_search(X_train, X_test, y_train, y_test, c_range=(-5, 15), g_range=(-15, 3), error_surface=True) plt.figure(figsize=(13, 10)) print(f"Two-dimensional C*: {C_star_2}, gamma*: {gamma_star_2}") plt.subplot(2, 2, 1) plt.title('2-dimensional train surface') mlutils.plot_error_surface(train_surface_2, c_range=(-5, 15), g_range=(-15, 3)) plt.subplot(2, 2, 2) plt.title('2-dimensional test surface') mlutils.plot_error_surface(test_surface_2, c_range=(-5, 15), g_range=(-15, 3)) X_train, X_test, y_train, y_test = train_test_split(thousand_dim_X, thousand_dim_y, test_size=0.5) C_star_1000, gamma_star_1000, train_surface_1000, test_surface_1000 = grid_search(X_train, X_test, y_train, y_test, c_range=(-5, 15), g_range=(-15, 3), error_surface=True) print(f"Thousand-dimensional C*: {C_star_1000}, gamma*: {gamma_star_1000}") plt.subplot(2, 2, 3) plt.title('1000-dimensional train surface') mlutils.plot_error_surface(train_surface_1000, c_range=(-5, 15), g_range=(-15, 3)) plt.subplot(2, 2, 4) plt.title('1000-dimensional test surface') mlutils.plot_error_surface(test_surface_1000, c_range=(-5, 15), g_range=(-15, 3)) plt.show()Two-dimensional C*: 4096, gamma*: 0.5 Thousand-dimensional C*: 0.03125, gamma*: 3.0517578125e-05**Q:** Razlikuje li se površina pogreške na skupu za učenje i skupu za ispitivanje? Zašto? **Q:** U prikazu površine pogreške, koji dio površine odgovara prenaučenosti, a koji podnaučenosti? Zašto? **Q:** Kako broj dimenzija $n$ utječe na površinu pogreške, odnosno na optimalne hiperparametre $(C^*, \gamma^*)$? **Q:** Preporuka je da povećanje vrijednosti za $\gamma$ treba biti popraćeno smanjenjem vrijednosti za $C$. Govore li vaši rezultati u prilog toj preporuci? Obrazložite. 4. Utjecaj standardizacije značajki kod SVM-a U prvoj laboratorijskoj vježbi smo pokazali kako značajke različitih skala mogu onemogućiti interpretaciju naučenog modela linearne regresije. Međutim, ovaj problem javlja se kod mnogih modela pa je tako skoro uvijek bitno prije treniranja skalirati značajke, kako bi se spriječilo da značajke s većim numeričkim rasponima dominiraju nad onima s manjim numeričkim rasponima. To vrijedi i za SVM, kod kojega skaliranje nerijetko može znatno poboljšati rezultate. Svrha ovog zadataka jest eksperimentalno utvrditi utjecaj skaliranja značajki na točnost SVM-a.Generirat ćemo dvoklasni skup od $N=500$ primjera s $n=2$ značajke, tako da je dimenzija $x_1$ većeg iznosa i većeg raspona od dimenzije $x_0$, te ćemo dodati jedan primjer koji vrijednošću značajke $x_1$ odskače od ostalih primjera:from sklearn.datasets import make_classification X, y = make_classification(n_samples=500,n_features=2,n_classes=2,n_redundant=0,n_clusters_per_class=1, random_state=69) X[:,1] = X[:,1]*100+1000 X[0,1] = 3000 mlutils.plot_2d_svc_problem(X, y)(a) Proučite funkciju za iscrtavanje histograma [`hist`](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.hist). Prikažite histograme vrijednosti značajki $x_0$ i $x_1$ (ovdje i u sljedećim zadatcima koristite `bins=50`).def plot_data_histograms(X): plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.title('x0 histogram') plt.hist(X[:,0], bins=50) plt.subplot(1, 2, 2) plt.title('x1 histogram') plt.hist(X[:,1], bins=50) plt.show() plot_data_histograms(X)(b) Proučite razred [`preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html). Prikažite histograme vrijednosti značajki $x_0$ i $x_1$ ako su iste skalirane min-max skaliranjem (ukupno dva histograma).from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() mm_scaled_X = scaler.fit_transform(X) plot_data_histograms(mm_scaled_X)**Q:** Kako radi ovo skaliranje? **Q:** Dobiveni histogrami su vrlo slični. U čemu je razlika? (c) Proučite razred [`preprocessing.StandardScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html). Prikažite histograme vrijednosti značajki $x_0$ i $x_1$ ako su iste skalirane standardnim skaliranjem (ukupno dva histograma).from sklearn.preprocessing import StandardScaler scaler = StandardScaler() ss_scaled_X = scaler.fit_transform(X) plot_data_histograms(ss_scaled_X)**Q:** Kako radi ovo skaliranje? **Q:** Dobiveni histogrami su vrlo slični. U čemu je razlika? (d) Podijelite skup primjera na skup za učenje i skup za ispitivanje u omjeru 1:1. Trenirajte SVM s jezgrenom funkcijom RBF na skupu za učenje i ispitajte točnost modela na skupu za ispitivanje, koristeći tri varijante gornjeg skupa: neskalirane značajke, standardizirane značajke i min-max skaliranje. Koristite podrazumijevane vrijednosti za $C$ i $\gamma$. Izmjerite točnost svakog od triju modela na skupu za učenje i skupu za ispitivanje. Ponovite postupak više puta (npr. 30) te uprosječite rezultate (u svakom ponavljanju generirajte podatke kao što je dano na početku ovog zadatka).**NB:** Na skupu za učenje treba najprije izračunati parametre skaliranja te zatim primijeniti skaliranje (funkcija `fit_transform`), dok na skupu za ispitivanje treba samo primijeniti skaliranje s parametrima koji su dobiveni na skupu za učenje (funkcija `transform`).iter = 100 min_max_scaled_score = [] ss_scaled_score = [] non_scaled_score = [] for i in range(iter): X, y = make_classification(n_samples=500,n_features=2,n_classes=2,n_redundant=0,n_clusters_per_class=1, random_state=69) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5) svc = SVC() svc.fit(X_train, y_train) non_scaled_score.append(accuracy_score(svc.predict(X_test), y_test)) non_scaled_score.append(accuracy_score(svc.predict(X_train), y_train)) scaler = MinMaxScaler() mm_scaled_X_train = scaler.fit_transform(X_train) mm_scaled_X_test = scaler.transform(X_test) svc = SVC() svc.fit(mm_scaled_X_train, y_train) min_max_scaled_score.append(accuracy_score(svc.predict(mm_scaled_X_test), y_test)) min_max_scaled_score.append(accuracy_score(svc.predict(mm_scaled_X_train), y_train)) scaler = StandardScaler() ss_scaled_X_train = scaler.fit_transform(X_train) ss_scaled_X_test = scaler.transform(X_test) svc = SVC() svc.fit(ss_scaled_X_train, y_train) ss_scaled_score.append(accuracy_score(svc.predict(ss_scaled_X_test), y_test)) ss_scaled_score.append(accuracy_score(svc.predict(ss_scaled_X_train), y_train)) print(f"Average score on non-scaled data: {np.average(np.array(non_scaled_score).reshape(2, iter), axis=1)}") print(f"Average score on standard-scaled data: {np.average(np.array(ss_scaled_score).reshape(2, iter), axis=1)}") print(f"Average score on min-max scaled data: {np.average(np.array(min_max_scaled_score).reshape(2, iter), axis=1)}")/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning) /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning) /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this [...]**Q:** Jesu li rezultati očekivani? Obrazložite. **Q:** Bi li bilo dobro kada bismo funkciju `fit_transform` primijenili na cijelom skupu podataka? Zašto? Bi li bilo dobro kada bismo tu funkciju primijenili zasebno na skupu za učenje i zasebno na skupu za ispitivanje? Zašto? 5. Algoritam k-najbližih susjeda U ovom zadatku promatrat ćemo jednostavan klasifikacijski model imena **algoritam k-najbližih susjeda**. Najprije ćete ga samostalno isprogramirati kako biste se detaljno upoznali s radom ovog modela, a zatim ćete prijeći na analizu njegovih hiperparametara (koristeći ugrađeni razred, radi efikasnosti). (a) Implementirajte klasu `KNN`, koja implementira algoritam $k$ najbližih susjeda. Neobavezan parametar konstruktora jest broj susjeda `n_neighbours` ($k$), čija je podrazumijevana vrijednost 3. Definirajte metode `fit(X, y)` i `predict(X)`, koje služe za učenje modela odnosno predikciju. Kao mjeru udaljenosti koristite euklidsku udaljenost ([`numpy.linalg.norm`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html); pripazite na parametar `axis`). Nije potrebno implementirati nikakvu težinsku funkciju.from numpy.linalg import norm from collections import Counter class KNN(object): def __init__(self, n_neighbors=3): self.n_neighbors = n_neighbors def fit(self, X_train, y_train): self.X = X_train self.y = y_train self.classes = list(set(y_train)) def predict(self, X_test): result = [] for x_t in X_test: norms = enumerate([norm(x_t - x) for x in self.X]) norms = sorted(norms, key=lambda x: x[1]) neighbours = norms[:self.n_neighbors] counter = Counter(self.classes) for n in neighbours: counter[self.y[n[0]]] += 1 result.append(counter.most_common(1)[0][0]) return np.array(result)(b) Kako biste se uvjerili da je Vaša implementacija ispravna, usporedite ju s onom u razredu [`neighbors.KNeighborsClassifier`](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html). Budući da spomenuti razred koristi razne optimizacijske trikove pri pronalasku najboljih susjeda, obavezno postavite parametar `algorithm=brute`, jer bi se u protivnom moglo dogoditi da Vam se predikcije razlikuju. Usporedite modele na danom (umjetnom) skupu podataka (prisjetite se kako se uspoređuju polja; [`numpy.all`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.all.html)).from sklearn.datasets import make_classification X_art, y_art = make_classification(n_samples=100, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=2, random_state=69) mlutils.plot_2d_clf_problem(X_art, y_art) from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score knn = KNN() knn.fit(X_art, y_art) print('Custom kNN') print('All examples have correct classification: ' + str(np.all( np.append(y_art.reshape(y_art.shape[0], 1), knn.predict(X_art).reshape(y_art.shape[0], 1), axis=1 ) )) ) print(f"score: {accuracy_score(y_art, knn.predict(X_art))}") knn_native = KNeighborsClassifier(n_neighbors=3, algorithm='brute') knn_native.fit(X_art, y_art) print('Native kNN') print('All examples have correct classification: ' + str(np.all( np.append( y_art.reshape(y_art.shape[0], 1), knn_native.predict(X_art).reshape(y_art.shape[0], 1), axis=1 ) )) ) print(f"score: {accuracy_score(y_art, knn_native.predict(X_art))}")Custom kNN All examples have correct classification: False score: 0.83 Native kNN All examples have correct classification: False score: 0.836. Analiza algoritma k-najbližih susjeda Algoritam k-nn ima hiperparametar $k$ (broj susjeda). Taj hiperparametar izravno utječe na složenost algoritma, pa je stoga izrazito važno dobro odabrati njegovu vrijednost. Kao i kod mnogih drugih algoritama, tako i kod algoritma k-nn optimalna vrijednost hiperametra $k$ ovisi o konkretnom problemu, uključivo broju primjera $N$, broju značajki (dimenzija) $n$ te broju klasa $K$. Kako bismo dobili pouzdanije rezultate, potrebno je neke od eksperimenata ponoviti na različitim skupovima podataka i zatim uprosječiti dobivene vrijednosti pogrešaka. Koristite funkciju: `mlutils.knn_eval` koja trenira i ispituje model k-najbližih susjeda na ukupno `n_instances` primjera, i to tako da za svaku vrijednost hiperparametra iz zadanog intervala `k_range` ponovi `n_trials` mjerenja, generirajući za svako od njih nov skup podataka i dijeleći ga na skup za učenje i skup za ispitivanje. Udio skupa za ispitivanje definiran je parametrom `test_size`. Povratna vrijednost funkcije jest četvorka `(ks, best_k, train_errors, test_errors)`. Vrijednost `best_k` je optimalna vrijednost hiperparametra $k$ (vrijednost za koju je pogreška na skupu za ispitivanje najmanja). Vrijednosti `train_errors` i `test_errors` liste su pogrešaka na skupu za učenja odnosno skupu za testiranje za sve razmatrane vrijednosti hiperparametra $k$, dok `ks` upravo pohranjuje sve razmatrane vrijednosti hiperparametra $k$. (a)Na podatcima iz zadatka 5, pomoću funkcije `mlutils.plot_2d_clf_problem` iscrtajte prostor primjera i područja koja odgovaraju prvoj odnosno drugoj klasi. Ponovite ovo za $k\in[1, 5, 20, 100]$. **NB:** Implementacija algoritma `KNeighborsClassifier` iz paketa `scikit-learn` vjerojatno će raditi brže od Vaše implementacije, pa u preostalim eksperimentima koristite nju.plt.figure(figsize=(15, 10)) graph_index = 1 for k in [1, 5, 20, 100]: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_art, y_art) print(f"Accuracy score for k={k}: {accuracy_score(y_art, knn.predict(X_art))}") plt.subplot(2, 2, graph_index) graph_index += 1 plt.title(f"k={k}") mlutils.plot_2d_clf_problem(X_art, y_art, knn.predict) plt.show()Accuracy score for k=1: 1.0 Accuracy score for k=5: 0.84 Accuracy score for k=20: 0.82 Accuracy score for k=100: 0.5**Q:** Kako $k$ utječe na izgled granice između klasa? **Q:** Kako se algoritam ponaša u ekstremnim situacijama: $k=1$ i $k=100$? (b) Pomoću funkcije `mlutils.knn_eval`, iscrtajte pogreške učenja i ispitivanja kao funkcije hiperparametra $k\in\{1,\dots,20\}$, za $N=\{100, 500, 1000, 3000\}$ primjera. Načinite 4 zasebna grafikona (generirajte ih u 2x2 polju). U svakoj iteraciji ispišite optimalnu vrijednost hiperparametra $k$ (najlakše kao naslov grafikona; vidi [`plt.title`](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.title.html)).plt.figure(figsize=(15, 10)) graph_index = 1 for N in [100, 500, 1000, 3000]: ks, best_k, train_errors, test_errors = mlutils.knn_eval(n_instances=N) plt.subplot(2, 2, graph_index) graph_index += 1 plt.title(f"N={N} best_k={best_k}") plt.plot(range(1, train_errors.shape[0] + 1), train_errors, label="train error") plt.plot(range(1, test_errors.shape[0] + 1), test_errors, label="test error") plt.legend() plt.show()**Q:** Kako se mijenja optimalna vrijednost hiperparametra $k$ s obzirom na broj primjera $N$? Zašto? **Q:** Kojem području odgovara prenaučenost, a kojem podnaučenost modela? Zašto? **Q:** Je li uvijek moguće doseći pogrešku od 0 na skupu za učenje? (c) Kako bismo provjerili u kojoj je mjeri algoritam k-najbližih susjeda osjetljiv na prisustvo nebitnih značajki, možemo iskoristiti funkciju [`datasets.make_classification`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html) kako bismo generirali skup primjera kojemu su neke od značajki nebitne. Naime, parametar `n_informative` određuje broj bitnih značajki, dok parametar `n_features` određuje ukupan broj značajki. Ako je `n_features > n_informative`, onda će neke od značajki biti nebitne. Umjesto da izravno upotrijebimo funkciju `make_classification`, upotrijebit ćemo funkciju `mlutils.knn_eval`, koja samo preuzime ove parametre, ali nam omogućuje pouzdanije procjene.Koristite funkciju `mlutils.knn_eval` na dva načina. U oba koristite $N=1000$ primjera, $n=10$ značajki i $K=5$ klasa, ali za prvi neka su svih 10 značajki bitne, a za drugi neka je bitno samo 5 od 10 značajki. Ispišite pogreške učenja i ispitivanja za oba modela za optimalnu vrijednost $k$ (vrijednost za koju je ispitna pogreška najmanja).plt.figure(figsize=(15, 10)) ks, best_k, train_errors, test_errors = mlutils.knn_eval( n_instances=1000, n_features=10, n_classes=5, n_informative=10, test_size=0.3, k_range=(1, 20), n_trials=100) plt.subplot(2, 2, 1) plt.title(f"best_k={best_k} test_error={round(test_errors[best_k-1], 4)} n_informative={10}") plt.plot(range(1, train_errors.shape[0] + 1), train_errors, label="train error") plt.plot(range(1, test_errors.shape[0] + 1), test_errors, label="test error") plt.legend() ks, best_k, train_errors, test_errors = mlutils.knn_eval( n_instances=1000, n_features=10, n_classes=5, n_informative=5, test_size=0.3, k_range=(1, 20), n_trials=100) plt.subplot(2, 2, 2) plt.title(f"best_k={best_k} test_error={round(test_errors[best_k-1], 4)} n_informative={5}") plt.plot(range(1, train_errors.shape[0] + 1), train_errors, label="train error") plt.plot(range(1, test_errors.shape[0] + 1), test_errors, label="test error") plt.legend() plt.show()**Q:** Je li algoritam k-najbližih susjeda osjetljiv na nebitne značajke? Zašto? **Q:** Je li ovaj problem izražen i kod ostalih modela koje smo dosad radili (npr. logistička regresija)? **Q:** Kako bi se model k-najbližih susjeda ponašao na skupu podataka sa značajkama različitih skala? Detaljno pojasnite. 7. "Prokletstvo dimenzionalnosti" "Prokletstvo dimenzionalnosti" zbirni je naziv za niz fenomena povezanih s visokodimenzijskim prostorima. Ti fenomeni, koji se uglavnom protive našoj intuiciji, u većini slučajeva dovode do toga da se s porastom broja dimenzija (značajki) smanjenje točnost modela. Općenito, povećanje dimenzija dovodi do toga da sve točke u ulaznome prostoru postaju (u smislu euklidske udaljenosti) sve udaljenije jedne od drugih te se, posljedično, gube razlike u udaljenostima između točaka. Eksperimentalno ćemo provjeriti da je to doista slučaj. Proučite funkciju [`metrics.pairwise.pairwise_distances`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html). Generirajte 100 slučajnih vektora u različitim dimenzijama $n\in[1,2,\ldots,50]$ dimenzija te izračunajte *prosječnu* euklidsku udaljenost između svih parova tih vektora. Za generiranje slučajnih vektora koristite funkciju [`numpy.random.random`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.random.html). Na istom grafu skicirajte krivulje prosječnih udaljenosti za euklidsku i kosinusnu udaljenost (parametar `metric`).from sklearn.metrics.pairwise import pairwise_distances from numpy.random import random e_distances=[] c_distances=[] for n in range(1, 51): e_distances.append(np.average(pairwise_distances(random(size=(100,n)), metric='euclidean'))) c_distances.append(np.average(pairwise_distances(random(size=(100,n)), metric='cosine'))) plt.figure(figsize=(10,6)) plt.xlabel('Dimensions') plt.ylabel('Average distance') plt.plot(range(1, 51), e_distances, label="euclidean_distance") plt.plot(range(1, 51), c_distances, label="cosine_distance") plt.legend() plt.show()Setup the environment# Set working directory from google.colab import drive, files drive.mount('/content/gdrive') %cd './gdrive/My Drive/Multivariate Discounting/Scripts and Notebooks/' # Connect TPU, if desired %tensorflow_version 2.x import tensorflow as tf print("Tensorflow version " + tf.__version__) try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection print('Running on TPU ', tpu.cluster_spec().as_dict()['worker']) except ValueError: raise BaseException('ERROR: Not connected to a TPU runtime') tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)Tensorflow version 2.8.0 Running on TPU ['10.102.111.218:8470'] INFO:tensorflow:Deallocate tpu buffers before initializing tpu system.Import packages we'll need# System import os import sys from IPython.display import clear_output import warnings warnings.filterwarnings("ignore") # Data Manipulation import pandas as pd import numpy as np # Visualizations import seaborn as sns import matplotlib.pyplot as plt # Analytics import statsmodels.api as sm import statsmodels.formula.api as smf from math import log import scipy.stats as stats from sklearn import metrics from scipy.optimize import curve_fit from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor as RFR from sklearn.ensemble import GradientBoostingRegressor as GBR !pip install shap import shap import random # Script preference pd.set_option("display.max_columns", None)Collecting shap Downloading shap-0.40.0-cp37-cp37m-manylinux2010_x86_64.whl (564 kB) [?25l  |▋ | 10 kB 27.9 MB/s eta 0:00:01  |█▏ | 20 kB 10.9 MB/s eta 0:00:01  |█▊ | 30 kB 8.3 MB/s eta 0:00:01  |██▎ | 40 kB 8.0 MB/s eta 0:00:01  |███ | 51 kB 4.3 MB/s eta 0:00:01  |███▌ | 61 kB 5.1 MB/s eta 0:00:01  |████ | 71 kB 5.4 MB/s eta 0:00:01  |████▋ | 81 kB 5.6 MB/s eta 0:00:01  |█████▏ | 92 kB 6.3 MB/s eta 0:00:01  |█████▉ | 102 kB 5.0 MB/s eta 0:00:01  |██████▍ | 112 kB 5.0 MB/s eta 0:00:01  |███████ | 122 kB 5.0 MB/s eta 0:00:01  |███████▌ | 133 kB 5.0 MB/s et[...]Make sure dataframe is ready for fitting# Read in the data df = pd.read_csv('../Data/01_raw/data_long.csv').drop(['Unnamed: 0'], axis=1) #%% One hot encode categorical variables for predictions encode_df = pd.DataFrame(df['indiff_point']) encode_df.columns = ['indiff_point'] encode_df = pd.concat([encode_df, pd.get_dummies(df['Group'], prefix='Group')], axis=1) encode_df = pd.concat([encode_df, pd.get_dummies(df['Commodity'], prefix='Commodity')], axis=1) encode_df = pd.concat([encode_df, pd.get_dummies(df['Gain_Loss'], prefix='Gain_Loss')], axis=1) encode_df = pd.concat([encode_df, pd.get_dummies(df['Delay_Prob'], prefix='Delay_Prob')], axis=1) encode_df['Amount'] = df['Amount'] encode_df['IV'] = df['IV'] encode_df['pid'] = df['pid'] encode_df = encode_df.dropna() encode_df = encode_df.reset_index(drop=True) encode_df = encode_df.drop(['Group_Control', 'Gain_Loss_Loss', 'Delay_Prob_Probability'], axis=1) encode_df[::500]Functionsdef calc_aic(n, loss, num_params): aic = n * log(loss) + 2 * num_params return aic def calc_bic(n, loss, num_params): bic = n * log(loss) + num_params * log(n) return bic def plot_pred(predictors, observed_data, title, save_name=None, rand_state=5687463, model=None, plot_cond=False): # Fit the model if model=='RF': regr = RFR(max_depth=5, random_state=rand_state) elif model=='LR': regr = LinearRegression() elif model=='GBR': regr = GradientBoostingRegressor(random_state=rand_state) elif model=='ABR': regr = AdaBoostRegressor(random_state=rand_state) regr.fit(predictors, observed_data) try: num_params = len(regr.coef_) + 1 except: num_params = np.nan r2 = regr.score(predictors, observed_data) predictions = regr.predict(predictors) mse = metrics.mean_squared_error(observed_data, predictions) rmse = np.sqrt(mse) mae = metrics.mean_absolute_error(observed_data, predictions) try: aic = calc_aic(n=len(observed_data), loss=mse, num_params=num_params) except: aic = np.nan try: bic = calc_bic(n=len(observed_data), loss=mse, num_params=num_params) except: bic= np.nan # Plot if desired if plot_cond==True: # Plot it fig, ax = plt.subplots(figsize=(10, 10)) sns.regplot(x=observed_data, y=regr.predict(predictors), color='k', ) plt.title(title, fontsize=36, pad=16) ax.text(0.7, 0.08, r"$r^2=$"+f'{round(r2, 2)}', fontsize=24) plt.xticks(fontsize=20) plt.xlim(0, 1) plt.yticks(fontsize=20) plt.ylim(0, 1) plt.ylabel("Predicted Indifference Proportion", fontsize=30, labelpad=20) plt.xlabel("Observed Indifference Proportion", fontsize=30, labelpad=20) plt.savefig(f'../Figures/{save_name}.png', bbox_inches="tight") plt.show() return r2, mse, rmse, mae, aic, bic def box_swarm(df, width=15, save_name=None, xlim=None, ylim=None): fig, ax = plt.subplots(figsize=(width, 12)) sns.boxplot(x='variable', y='value', data=df, hue='group', dodge=True, palette='colorblind', showmeans=True, meanprops={"marker":"*", "markerfacecolor":"white", "markeredgecolor":"black", "markersize":"18"}) sns.swarmplot(x='variable', y='value', data=df, hue='group', dodge=True, alpha=0.5, color='k') plt.ylabel('Metric', fontsize=30, labelpad=16) plt.yticks(fontsize=16) plt.ylim(ylim) plt.xticks(fontsize=16) plt.xlim(xlim) plt.xlabel('', fontsize=20) plt.savefig(f'../Figures/{save_name}.png', bbox_inches='tight') plt.show()Random Forest Hyperparameters to tune:* n_estimators: Number of trees in the forest* max_depth: The maximum depth of each tree. Modeling# Grid search using RF preds = list(encode_df)[1:-1] # Use all but participant id b/c we're fitting at the PID level fit = [] vac = [] mse = [] mae = [] rmse = [] group = [] depth = [] ests = [] for i in range(0, 10): rand_state = int(round(random.random()*10000, 0)) for n_est in [10, 50, 100, 250]: for max_depth in [10, 50, 100]: for p in encode_df['pid'].unique(): # Isolate participant data temp_df = encode_df[encode_df['pid']==p].dropna() predictors = temp_df[preds].astype(float) observed_data = temp_df['indiff_point'] temp_df = temp_df.reset_index(drop=True) # Fit RF regressor regr = RFR(n_estimators=n_est, max_depth=max_depth) regr.fit(predictors, observed_data) predictions = regr.predict(predictors) # Save loss metrics for this fit fit.append('RF') vac.append(regr.score(predictors, observed_data)) mse.append(metrics.mean_squared_error(observed_data, predictions)) rmse.append(np.sqrt(metrics.mean_squared_error(observed_data, predictions))) mae.append(metrics.mean_absolute_error(observed_data, predictions)) if temp_df['Group_Cocaine'][0]==1: group.append('cocaine') else: group.append('control') depth.append(max_depth) ests.append(n_est) clear_output() print(f'Fitting model:\nIter: {i+1} of 10\nn_est: {n_est}\ndepth: {max_depth}\nPID: {p}') fit_df_rf = pd.DataFrame({'fit':fit, 'vac':vac, 'mse':mse, 'rmse':rmse, 'mae':mae, 'depth':depth, 'ests':ests, 'group':group}) fit_df_rf.to_csv(f'../Data/03_hyperparameter_tuning/param_tuning_RF.csv') # Identify optimal hyperparams, on average testing = fit_df_rf[fit_df_rf['vac']==fit_df_rf['vac'].max()] testing = testing.append(fit_df_rf[fit_df_rf['mse']==fit_df_rf['mse'].min()]) testing = testing.append(fit_df_rf[fit_df_rf['rmse']==fit_df_rf['rmse'].min()]) testing = testing.append(fit_df_rf[fit_df_rf['mae']==fit_df_rf['mae'].min()]) testing = testing.drop_duplicates() print(testing.sort_values(by=['mae'], ascending=True)) testing.describe()fit vac mse rmse mae depth ests group 12 RF 1.0 1.110810e-32 1.053950e-16 5.776629e-17 10 10 cocaine 5088 RF 1.0 1.137704e-32 1.066632e-16 5.925320e-17 10 10 cocaine 1704 RF 1.0 1.198130e-32 1.094591e-16 6.341653e-17 10 10 cocaine 5135 RF 1.0 1.479626e-32 1.216399e-16 6.748074e-17 50 10 cocaine 1751 RF 1.0 1.479746e-32 1.216448e-16 6.782769e-17 50 10 cocaine 4618 RF 1.0 1.479746e-32 1.216448e-16 6.782769e-17 100 10 cocaine 623 RF 1.0 1.479851e-32 1.216491e-16 6.808790e-17 50 10 cocaine 5182 RF 1.0 1.479867e-32 1.216498e-16 6.817463e-17 100 10 cocaine 3443 RF 1.0 1.479867e-32 1.216498e-16 6.817463e-17 50 10 cocaine 3960 RF 1.0 1.479867e-32 1.216498e-16 6.817463e-17 10 10 cocaine 4054 RF 1.0 1.479987e-32 1.216547e-16 6.852158e-17 100 10 cocaine 670 RF 1.0 1.480589e-32 1.216794e-16 6.886852e-17 100 [...]Looks like n_ests of ~10 and depth of ~50 is the best on average. Use those hyperparmater values to fit everyone's model.# Isolate list of features preds = list(encode_df)[1:-1] # All features but PID b/c we're fitting at the PID level # Empty lists to store data in pids = [] fit = [] vac = [] mse = [] mae = [] rmse = [] group = [] shap_vals = pd.DataFrame() rand_state = int(round(random.random()*10000, 0)) for p in encode_df['pid'].unique(): # Isolate participant data temp_df = encode_df[encode_df['pid']==p].dropna() predictors = temp_df[preds].astype(float) observed_data = temp_df['indiff_point'] temp_df = temp_df.reset_index(drop=True) # Fir RF regressor regr = RFR(n_estimators=10, max_depth=50) model = regr.fit(predictors, observed_data) predictions = regr.predict(predictors) # Save loss metrics for this fit pids.append(p) fit.append('RF') vac.append(model.score(predictors, observed_data)) mse.append(metrics.mean_squared_error(observed_data, predictions)) rmse.append(np.sqrt(metrics.mean_squared_error(observed_data, predictions))) mae.append(metrics.mean_absolute_error(observed_data, predictions)) if temp_df['Group_Cocaine'][0]==1: group.append('cocaine') else: group.append('control') depth.append(max_depth) ests.append(n_est) # Save shapley values explainer = shap.TreeExplainer(model) shap_values = explainer(predictors) shap_df = pd.DataFrame(shap_values.values) shap_df.columns = shap_values.feature_names shap_df['pids'] = p shap_vals = shap_vals.append(shap_df) clear_output() fit_df_rf = pd.DataFrame({'fit':fit, 'vac':vac, 'mse':mse, 'rmse':rmse, 'mae': mae, 'group': group, 'pids': pids}) fit_df_rf.to_csv(f'../Data/03_hyperparameter_tuning/final_RF_fits.csv') shap_vals_rf.to_csv(f'../Data/04_final_fits/shap_values_RF.csv')Visualizations# Change to long format for easier plotting fit_long_rf = fit_df_rf.melt(id_vars=['fit', 'group', 'pids'], value_vars=['vac', 'mse', 'rmse', 'mae']) # Show every 18th row fit_long_rf[::18]Loss metrics# Box and swarms for loss metrics box_swarm(df=fit_long_rf[fit_long_rf['variable']=='vac'], width=4, ylim=(0, 1)) box_swarm(df=fit_long_rf[fit_long_rf['variable']!='vac'])Feature Importances# CREATE DATAFRAME #Create arrays from feature importance and feature names feature_importance = np.array(model.feature_importances_) feature_names = np.array(predictors.columns) #Create a DataFrame using a Dictionary data={'feature_names':feature_names,'feature_importance':feature_importance} fi_df = pd.DataFrame(data) #Sort the DataFrame in order decreasing feature importance fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True) fi_df # Plot it ticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] labels = ['0\nLess', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7\nMore'] plt.figure(figsize=(6, 6)) sns.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'], color='#002d4f') plt.xlabel('Importance', fontsize=30, labelpad=20) plt.xticks(ticks = ticks, labels = labels, fontsize=14) plt.ylabel('', fontsize=30, labelpad=20, rotation=0) plt.yticks(fontsize=14) # plt.ylim(-0.5, 9.5) plt.show()Shapley Valuesshap.summary_plot(shap_values_rf) shap.dependence_plot('IV', shap_values_rf.values, predictors) testing = predictors.copy() testing['IV'] = np.log(testing['IV']) shap.dependence_plot('IV', shap_values.values, testing) shap_dfGradient Boosting Hyperparameters to tune:* learning_rate: Shrinks contribution of each tree* n_est: The number of boosting stages to perform* max_depth: Max depth of the individual regression estimators# Grid search using RF preds = list(encode_df)[1:-1] fit = [] vac = [] mse = [] mae = [] rmse = [] group = [] depth = [] ests = [] l_rate = [] for i in range(0, 10): rand_state = int(round(random.random()*10000, 0)) for n_est in [10, 50, 100, 250]: for max_depth in [10, 50, 100]: for rate in [0.001, 0.1, 1] for p in encode_df['pid'].unique(): # Isolate participant data temp_df = encode_df[encode_df['pid']==p].dropna() predictors = temp_df[preds].astype(float) observed_data = temp_df['indiff_point'] temp_df = temp_df.reset_index(drop=True) # Fir RF regressor regr = GBR(n_estimators=n_est, max_depth=max_depth, learning_rate=rate) regr.fit(predictors, observed_data) predictions = regr.predict(predictors) # Save loss metrics for this fit fit.append('GB') vac.append(regr.score(predictors, observed_data)) mse.append(metrics.mean_squared_error(observed_data, predictions)) rmse.append(np.sqrt(metrics.mean_squared_error(observed_data, predictions))) mae.append(metrics.mean_absolute_error(observed_data, predictions)) if temp_df['Group_Cocaine'][0]==1: group.append('cocaine') else: group.append('control') depth.append(max_depth) ests.append(n_est) l_rate.append(rate) clear_output() print(f'Fitting model:\nIter: {i+1} of 10\nn_est: {n_est}\ndepth: {max_depth}\nPID: {p}') fit_df = pd.DataFrame({'fit':fit, 'vac':vac, 'mse':mse, 'rmse':rmse, 'mae':mae, 'depth':depth, 'ests':ests, 'lrate':l_rate, 'group':group}) fit_df.to_csv(f'../Data/03_hyperparameter_tuning/param_tuning_GB.csv')An Analysis of Anscombe's quartet dataset 1. An explanation to the background of the dataset "Frank" Anscombe (13 May 1918 – 17 October 2001) was an English statistician.Born in Hove in England, Anscombe was educated at Trinity College at Cambridge University. After serving in the Second World War, he joined Rothamsted Experimental Station for two years before returning to Cambridge as a lecturer. He later became interested in statistical computing, and stressed that "a computer should make both calculations and graphs", and illustrated the importance of graphing data with four data sets now known as Anscombe's quartet Anscombe's quartet comprises four datasets that have nearly identical simple descriptive statistics, yet appear very different when graphed. Each dataset consists of eleven (x,y) points. They were constructed in 1973 by the statistician Francis Anscombe to demonstrate both the importance of graphing data before analyzing it and the effect of outliers on statistical properties. He described the article as being intended to counter the impression among statisticians that "numerical calculations are exact, but graphs are rough."*https://en.wikipedia.org/wiki/Anscombe%27s_quartet Perhaps the most elegant demonstration of the dangers of summary statistics is Anscombe’s Quartet. It’s a group of four datasets that appear to be similar when using typical summary statistics, yet tell four different stories when graphed. Speculation on how Anscombe created the datasetWhile very popular and effective for illustrating the importance of visualizations, it is not known how Anscombe came up with his dataset. Unfortunately, Anscombe does not report how the datasets were created, nor suggest any method to create new ones. Chatterjee and Firat 2007They proposed a genetic algorithm based approach where 1,000 random datasets were created with identical summary statistics, then combined and mutated with an objective function to maximize the “graphical dissimilarity” between the initial and final scatter plots. While the datasets produced were graphically dissimilar to the input datasets, they did not have any discernable structure in their composition. . and . (2007). Generating Data with Identical Statistics but Dissimilar Graphics. The American Statistician 61, 3, 248–254. 6 Govindaraju and HasletGovindaraju and Haslett developed a method for regressing datasets towards their sample means while maintaining the same linear regression formula [7]. In 2009, the same authors extended their procedure to creating “cloned” datasets [8]. In addition to maintaining the same linear regression as the seed dataset, their cloned datasets also maintained the same means (but not the same standard deviations). and . (2008). Illustration of regression towards the means. International Journal of Mathematical Education in Science and Technology 39, 4, 544–550. While Chatterjee and Firat wanted to create datasets as graphically dissimilar as possible, Govindaraju and Haslett’s cloned datasets were designed to be visually similar, with a proposed application of confidentializing sensitive data for publication purposes Datasets which are identical over a number of statistical properties, yet produce dissimilar graphs, are frequently used to illustrate the importance of graphical representations when exploring dataThe effectiveness of Anscombe’s Quartet is not due to simply having four different data sets which generate same statistical properties, it is that four clearly different and identifiably distinct datasets are producing the same statistical properties. 2. Plot the interesting aspects of the dataset Anscombe’s Quartet consists of four data sets with eleven (x, y) pairs. First three data sets have the same x values. All four data sets have the same standard output when performing a typical regression so it is easy to assume that the data sets are also the same. Here are the values from Anscombe’s Quartet data sets.import matplotlib.pyplot as pl pl.plot(df['x1'], df['y1'],'') pl.plot(df['x2'], df['y2'],'') pl.plot(df['x3'], df['y3'],'') pl.plot(df['x4'], df['y4'],'')Sometimes when analysing data you may find that looking at the calculations may not produce accurate results unless you understand the underlying data. It is important to visualise data to see what you’re dealing with. This can be achieved using graphs, in this case scatter plots, specifically.Scatter plots are useful to perceive the broad features of data and look behind those features to see what is there. A good statistical analysis includes looking at the data from different points of view. To see why you need to visualise data we are going to look at Anscombe’s Quartet.import seaborn as sns sns.set(style="ticks") df=sns.load_dataset("anscombe") sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df, col_wrap=2, ci=None, palette="muted", scatter_kws={"s": 50, "alpha": 1}) plt.show()- Dataset I consists of a set of points that appear to follow a rough linear relationship with some variance. - Dataset II fits a neat curve but doesn’t follow a linear relationship . - Dataset III looks like a tight linear relationship between x and y, except for one large outlier. - Dataset IV looks like x remains constant, except for one outlier as well.Computing summary statistics wouldn’t have told us any of these stories. Instead, it’s important to visualize the data to get a clear picture of what’s going on. Additionl plots of the dataset for visualizationpurpose onlysns.pairplot(df) sns.pairplot(df, hue='x') sns.pairplot(df, hue='y')The descriptive statistics of the variable dataset The Anscombe quartet dataset set can be imported into my notebook for analysis, by either importing the file as acsv file or inmporting through seaborn where it is already installed. First of all, let us get the data set, that’s immediate to do as it comes ready loadable within the seaborn library:import seaborn as sns df = sns.load_dataset("anscombe") print(df)dataset x y 0 I 10.0 8.04 1 I 8.0 6.95 2 I 13.0 7.58 3 I 9.0 8.81 4 I 11.0 8.33 5 I 14.0 9.96 6 I 6.0 7.24 7 I 4.0 4.26 8 I 12.0 10.84 9 I 7.0 4.82 10 I 5.0 5.68 11 II 10.0 9.14 12 II 8.0 8.14 13 II 13.0 8.74 14 II 9.0 8.77 15 II 11.0 9.26 16 II 14.0 8.10 17 II 6.0 6.13 18 II 4.0 3.10 19 II 12.0 9.13 20 II 7.0 7.26 21 II 5.0 4.74 22 III 10.0 7.46 23 III 8.0 6.77 24 III 13.0 12.74 25 III 9.0 7.11 26 III 11.0 7.81 27 III 14.0 8.84 28 III 6.0 6.08 29 III 4.0 5.39 30 III 12.0 8.15 31 III 7.0 6.42 32 III 5.0 5.73 33 IV 8.0 6.58 34 IV 8.0 5.76 35 IV 8.0 7.71 36 IV 8.0 8.84 37 IV 8.0 8.47 38 IV 8.0 7.04 39 IV 8.0 5.25 40 IV 19.0[...]First, we can get a glimpse of how many examples (rows) and how many attributes (columns) the Anscombe dataset contains with the shape methoddf.shape df.head() df.tail()We can see from the output above that the Anscombe dataset is comprised of 43 rows and 3 columns. Next we can take a look at a summary of each Anscombe attribute. This includes the count, mean, min and max values as well as some percentiles:df.describe() df.groupby("dataset").describe() # Mean of Data Set I df.iloc[0:11].mean() # Mean of Data Set II df.iloc[11:22].mean() # Mean of Data Set III df.iloc[22:33].mean() # Mean of Data Set IV df.iloc[33:44].mean() import pandas as pd # Data manipulation # Import the dataset as a csv file , which I have save in my repository # Import pandas. import pandas as pd df =pd.read_csv("https://raw.githubusercontent.com/MarianneLawless/Fundamentals-of-DA-Pratical-Assignments/master/Anscombe2.csv") df df.describe() import numpy as np np.round(df.corr(),decimals=3)Why this data set is so interesting All the summary statistics you’d think to compute are close to identical:- The average x value is 9 for each dataset- The average y value is 7.50 for each dataset- The variance for x is 11 and the variance for y is 4.12- The correlation between x and y is 0.816 for each dataset- A linear regression (line of best fit) for each dataset follows the equation y = 0.5x + 3So far these four datasets appear to be pretty similar But when we plot these four data sets on an x/y coordinate plane, we get the following results:sns.set(style="ticks") df=sns.load_dataset("anscombe") sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df, col_wrap=2, ci=None, palette="muted", scatter_kws={"s": 50, "alpha": 1}) plt.show()seq2seq构建写对联AI 代码参考:[seq2seq-couplet](https://github.com/wb14123/seq2seq-couplet) 问题背景介绍对联又称对子,对仗工整,平仄协调,是一字一音的汉文语言独特的艺术形式,是中国传统文化瑰宝。对联的上下联有着非常工整的对应关系,我们可以尝试使用神经网络学习对应关系,进而完成对对联任务,而之前提到的seq2seq模型,是非常典型的序列映射学习模型,可以在本场景下使用。![](../img/couplet.jpeg) seq2seq对对联 \[稀牛学院 x 网易云课程\]《AI工程师(自然语言处理方向)》课程资料 by [@寒小阳](https://blog.csdn.net/han_xiaoyang) 这里构建的对对联AI应用也是seq2seq模型,使用的就是我们在上一门中讲解到的模型。 ![](../img/[1]_seq2seq_1.gif) ![](../img/[8]_seq2seq_8.gif) ![](../img/attention_tensor_dance.gif) 数据读取from queue import Queue from threading import Thread import random def padding_seq(seq): """padding每个输入sequence为最大的sequence长度 arg:seq of ids return: results, padding到max_len的id list """ results = [] max_len = 0 for s in seq: if max_len < len(s): max_len = len(s) for i in range(0, len(seq)): l = max_len - len(seq[i]) results.append(seq[i] + [0 for j in range(l)]) return results def encode_text(words, vocab_indices): """把文本序列映射为id序列 args: words, 输入对联中每个字组成的list vocab_indices,词到id的dict return:文本序列对应的id序列 """ return [vocab_indices[word] for word in words if word in vocab_indices] def decode_text(labels, vocabs, end_token=''): """把id序列映射为文本序列 args: labels, decoder输出的预测结果list vocab,id到词的dict return:results,' '连接的预测文本 """ results = [] for idx in labels: word = vocabs[idx] if word == end_token: return ' '.join(results) results.append(word) return ' '.join(results) def read_vocab(vocab_file): """读取词表文件 return:vocabs,list包含文件中的所有字及,,',' """ f = open(vocab_file, 'rb') vocabs = [line.decode('utf-8')[:-1] for line in f] f.close() return vocabs class SeqReader(): """输入序列读取类""" def __init__(self, input_file, target_file, vocab_file, batch_size, queue_size=2048, worker_size=2, end_token='
', padding=True, max_len=50): self.input_file = input_file self.target_file = target_file self.vocabs = read_vocab(vocab_file) # 词到id的dict self.vocab_indices = dict((c, i) for i, c in enumerate(self.vocabs)) self.batch_size = batch_size self.padding = padding self.data_queue = Queue(queue_size) self.worker_size = worker_size self.end_token = end_token self.max_len = max_len with open(self.input_file, 'rb') as f: for i, line in enumerate(f): pass f.close() self.single_lines = i + 1 # 输入文件总行数 self.data_size = int(self.single_lines / batch_size) # batch总数 self.data_pos = 0 # 指针,self.data中的某一个索引 self._init_reader() def start(self): """多线程运行_init_reader()""" for i in range(self.worker_size): t = Thread(target=self._init_reader()) t.daemon = True # 守护线程,后台运行 t.start() return def read_single_data(self): """读取一组数据, return:{ 'in_seq': in_seq, 'in_seq_len': len(in_seq), 'target_seq': target_seq, 'target_seq_len': len(target_seq) - 1 } """ if self.data_pos >= len(self.data): random.shuffle(self.data) self.data_pos = 0 result = self.data[self.data_pos] self.data_pos += 1 return result def read(self): """batch生成器 yield:batch,dict类型,{ 'in_seq': [[seq1], [seq2], ...], 'in_seq_len': [int, int, ...], 'target_seq': [[seq1], [seq2], ...], 'target_seq_len': [int, int, ...] } """ while True: batch = { 'in_seq': [], 'in_seq_len': [], 'target_seq': [], 'target_seq_len': [] } for i in range(0, self.batch_size): item = self.read_single_data() batch['in_seq'].append(item['in_seq']) batch['in_seq_len'].append(item['in_seq_len']) batch['target_seq'].append(item['target_seq']) batch['target_seq_len'].append(item['target_seq_len']) if self.padding: batch['in_seq'] = padding_seq(batch['in_seq']) batch['target_seq'] = padding_seq(batch['target_seq']) yield batch def _init_reader(self): """文件读取,预处理数据格式 self.data保存了转化为id的每组input sequence、target sequence的dict,储 存在list中 """ self.data = [] # 初始化输出数据为空list input_f = open(self.input_file, 'rb') target_f = open(self.target_file, 'rb') for input_line in input_f: input_line = input_line.decode('utf-8')[:-1] # target_line按行读取 target_line = target_f.readline().decode('utf-8')[:-1] # 文本以' '为每个字的分隔符 input_words = [x for x in input_line.split(' ') if x != ''] if len(input_words) >= self.max_len: input_words = input_words[:self.max_len - 1] input_words.append(self.end_token) target_words = [x for x in target_line.split(' ') if x != ''] if len(target_words) >= self.max_len: target_words = target_words[:self.max_len - 1] target_words = ['',] + target_words # 加入开始符 target_words.append(self.end_token) # 加入结束符 in_seq = encode_text(input_words, self.vocab_indices) target_seq = encode_text(target_words, self.vocab_indices) self.data.append({ 'in_seq': in_seq, 'in_seq_len': len(in_seq), 'target_seq': target_seq, 'target_seq_len': len(target_seq) - 1 # 不计入 }) input_f.close() target_f.close() self.data_pos = len(self.data)评估函数# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python implementation of BLEU and smooth-BLEU. This module provides a Python implementation of BLEU and smooth-BLEU. Smooth BLEU is computed following the method outlined in the paper: , . ORANGE: a method for evaluating automatic evaluation metrics for machine translation. COLING 2004. """ import collections import math def _get_ngrams(segment, max_order): """Extracts all n-grams upto a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred. (all n-grams upto max_order),keys:n-gram,value:count """ ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty. bleu:float,翻译句子的bleu得分, precisions:list, 包含每种ngram的准确率, bp:brevity penalty, 短句惩罚系数, ratio:translation_length / min(reference_length), translation_length:int,翻译长度, reference_length:int,最短的reference长度 """ matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order reference_length = 0 translation_length = 0 for (references, translation) in zip(reference_corpus, translation_corpus): reference_length += min(len(r) for r in references) translation_length += len(translation) merged_ref_ngram_counts = collections.Counter() # 同时考虑多个references for reference in references: merged_ref_ngram_counts |= _get_ngrams(reference, max_order)# 位或 translation_ngram_counts = _get_ngrams(translation, max_order) overlap = translation_ngram_counts & merged_ref_ngram_counts # 位与 # matches_by_order:{len(ngram):sum of counts} for ngram in overlap: matches_by_order[len(ngram) - 1] += overlap[ngram] # possible_matches_by_order(可匹配n-gram总数): # {len(ngram):sum of each ngram} for order in range(1, max_order + 1): possible_matches = len(translation) - order + 1 if possible_matches > 0: possible_matches_by_order[order - 1] += possible_matches precisions = [0] * max_order for i in range(0, max_order): if smooth: precisions[i] = ((matches_by_order[i] + 1.) / (possible_matches_by_order[i] + 1.)) else: if possible_matches_by_order[i] > 0: precisions[i] = ( float(matches_by_order[i]) / possible_matches_by_order[i]) else: precisions[i] = 0.0 if min(precisions) > 0: p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) geo_mean = math.exp(p_log_sum) else: geo_mean = 0 # 翻译长度惩罚(对较短的翻译基于较大的惩罚,以防止短翻译准确率会更高的问题) ratio = float(translation_length) / reference_length if ratio > 1.0: bp = 1. else: bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return (bleu, precisions, bp, ratio, translation_length, reference_length)定义seq2seqimport tensorflow as tf from tensorflow.contrib import rnn from tensorflow.python.layers import core as layers_core def getLayeredCell(layer_size, num_units, input_keep_prob, output_keep_prob=1.0): '''多层rnn单元构造''' return rnn.MultiRNNCell([ rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell( name='basic_lstm_cell', num_units=num_units), input_keep_prob, output_keep_prob) for i in range(layer_size) ]) def bi_encoder(embed_input, in_seq_len, num_units, layer_size, input_keep_prob): '''双向rnn编码器 embed_input:embeddirding后的输入序列 num_units:隐藏层单元数 layer_size:rnn层数 return: bidirectional_dynamic_rnn不同于bidirectional_rnn,结果级联分层输出,可 concat到一起 encoder_output: 每个timestep输出,每层输出按最后一维concat到一起 encoder_state:每层的final state,(output_state_fw, output_state_bw) ''' # encode input into a vector bi_layer_size = int(layer_size / 2) encode_cell_fw = getLayeredCell(bi_layer_size, num_units, input_keep_prob) encode_cell_bw = getLayeredCell(bi_layer_size, num_units, input_keep_prob) bi_encoder_output, bi_encoder_state = tf.nn.bidirectional_dynamic_rnn( cell_fw=encode_cell_fw, cell_bw=encode_cell_bw, inputs=embed_input, sequence_length=in_seq_len, dtype=embed_input.dtype, time_major=False) # concat encode output and state encoder_output = tf.concat(bi_encoder_output, -1) encoder_state = [] for layer_id in range(bi_layer_size): encoder_state.append(bi_encoder_state[0][layer_id]) encoder_state.append(bi_encoder_state[1][layer_id]) encoder_state = tuple(encoder_state) return encoder_output, encoder_state def attention_decoder_cell(encoder_output, in_seq_len, num_units, layer_size, input_keep_prob): '''attention decoder return: 加入attention_mechanim的decoder cell ''' attention_mechanim = tf.contrib.seq2seq.BahdanauAttention( num_units, encoder_output, in_seq_len, normalize=True) # attention_mechanim = tf.contrib.seq2seq.LuongAttention(num_units, # encoder_output, in_seq_len, scale = True) cell = getLayeredCell(layer_size, num_units, input_keep_prob) cell = tf.contrib.seq2seq.AttentionWrapper( cell, attention_mechanim, attention_layer_size=num_units) return cell def decoder_projection(output, output_size): return tf.layers.dense( output, output_size, activation=None, use_bias=False, name='output_mlp') def train_decoder(encoder_output, in_seq_len, target_seq, target_seq_len, encoder_state, num_units, layers, embedding, output_size, input_keep_prob, projection_layer): '''只进行train过程''' decoder_cell = attention_decoder_cell(encoder_output, in_seq_len, num_units, layers, input_keep_prob) batch_size = tf.shape(in_seq_len)[0] init_state = decoder_cell.zero_state( batch_size, tf.float32).clone(cell_state=encoder_state) helper = tf.contrib.seq2seq.TrainingHelper( target_seq, target_seq_len, time_major=False) decoder = tf.contrib.seq2seq.BasicDecoder( decoder_cell, helper, init_state, output_layer=projection_layer) outputs, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder, maximum_iterations=100) return outputs.rnn_output def infer_decoder(encoder_output, in_seq_len, encoder_state, num_units, layers, embedding, output_size, input_keep_prob, projection_layer): '''seq2seq函数中可以使用beamsearch方法来进行decoder''' decoder_cell = attention_decoder_cell(encoder_output, in_seq_len, num_units, layers, input_keep_prob) batch_size = tf.shape(in_seq_len)[0] init_state = decoder_cell.zero_state( batch_size, tf.float32).clone(cell_state=encoder_state) # TODO: start tokens and end tokens are hard code """ helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( embedding, tf.fill([batch_size], 0), 1) decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper, init_state, output_layer=projection_layer) """ decoder = tf.contrib.seq2seq.BeamSearchDecoder( cell=decoder_cell, embedding=embedding, start_tokens=tf.fill([batch_size], 0), end_token=1, initial_state=init_state, beam_width=10, output_layer=projection_layer, length_penalty_weight=1.0) outputs, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder, maximum_iterations=100) return outputs.sample_id def seq2seq(in_seq, in_seq_len, target_seq, target_seq_len, vocab_size, num_units, layers, dropout): """seq2seq模型建立 return: training -- outputs.rnn_output: rnn输出的概率分布结果 infering -- outputs.sample_id:输出词id """ in_shape = tf.shape(in_seq) batch_size = in_shape[0] # 训练开启dropout,预测不开启 if target_seq != None: input_keep_prob = 1 - dropout else: input_keep_prob = 1 # 全连接层输出预测 projection_layer = layers_core.Dense(vocab_size, use_bias=False) # embedding input and target sequence with tf.device('/cpu:0'): embedding = tf.get_variable( name='embedding', shape=[vocab_size, num_units]) embed_input = tf.nn.embedding_lookup(embedding, in_seq, name='embed_input') # encode and decode encoder_output, encoder_state = bi_encoder( embed_input, in_seq_len, num_units, layers, input_keep_prob) decoder_cell = attention_decoder_cell(encoder_output, in_seq_len, num_units, layers, input_keep_prob) batch_size = tf.shape(in_seq_len)[0] # decoder初始化,权重初始化,并且将cell state初始化为encoder的final state init_state = decoder_cell.zero_state( batch_size, tf.float32).clone(cell_state=encoder_state) if target_seq != None: embed_target = tf.nn.embedding_lookup( embedding, target_seq, name='embed_target') helper = tf.contrib.seq2seq.TrainingHelper( embed_target, target_seq_len, time_major=False) else: # TODO: start tokens and end tokens are hard code # 0,1分别应对应句子的起始符id和终止符id helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( embedding, tf.fill([batch_size], 0), 1) decoder = tf.contrib.seq2seq.BasicDecoder( decoder_cell, helper, init_state, output_layer=projection_layer) outputs, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder, maximum_iterations=100) if target_seq != None: return outputs.rnn_output else: return outputs.sample_id def seq_loss(output, target, seq_len): '''计算损失 target:包括一个起始符 ''' target = target[:, 1:] cost = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=output, labels=target) batch_size = tf.shape(target)[0] # 不每个句子对都达到max_timestep,排除多余字符带来的损失 loss_mask = tf.sequence_mask(seq_len, tf.shape(output)[1]) cost = cost * tf.to_float(loss_mask) return tf.reduce_sum(cost) / tf.to_float(batch_size)模型定义import tensorflow as tf from os import path import random class Model(): """创建模型类""" def __init__(self, train_input_file, train_target_file, test_input_file, test_target_file, vocab_file, num_units, layers, dropout, batch_size, learning_rate, output_dir, save_step=100, eval_step=1000, param_histogram=False, restore_model=False, init_train=True, init_infer=False): self.num_units = num_units # 单个RNN结构中,神经元数目 self.layers = layers self.dropout = dropout self.batch_size = batch_size self.learning_rate = learning_rate self.save_step = save_step self.eval_step = eval_step self.param_histogram = param_histogram self.restore_model = restore_model # boolen self.init_train = init_train self.init_infer = init_infer if init_train: self.train_reader = SeqReader(train_input_file, train_target_file, vocab_file, batch_size) self.train_reader.start() # 多线程 self.train_data = self.train_reader.read() # yield batch self.eval_reader = SeqReader(test_input_file, test_target_file, vocab_file, batch_size) self.eval_reader.start() self.eval_data = self.eval_reader.read() self.model_file = path.join(output_dir, 'model.ckpl') self.log_writter = tf.summary.FileWriter(output_dir) if init_train: self._init_train() self._init_eval() if init_infer: self.infer_vocabs = reader.read_vocab(vocab_file) self.infer_vocab_indices = dict( (c, i) for i, c in enumerate(self.infer_vocabs)) self._init_infer() self.reload_infer_model() def gpu_session_config(self): # allow_growth: 刚一开始分配少量的GPU容量,然后按需慢慢的增加 config = tf.ConfigProto() config.gpu_options.allow_growth = True return config def _init_train(self): '''初始化训练会话''' self.train_graph = tf.Graph() with self.train_graph.as_default(): # 输入 self.train_in_seq = tf.placeholder( tf.int32, shape=[self.batch_size, None]) self.train_in_seq_len = tf.placeholder( tf.int32, shape=[self.batch_size]) self.train_target_seq = tf.placeholder( tf.int32, shape=[self.batch_size, None]) self.train_target_seq_len = tf.placeholder( tf.int32, shape=[self.batch_size]) # 输出 output = seq2seq(self.train_in_seq, self.train_in_seq_len, self.train_target_seq, self.train_target_seq_len, len(self.train_reader.vocabs), self.num_units, self.layers, self.dropout) self.train_output = tf.argmax(tf.nn.softmax(output), 2) # 损失 self.loss = seq_loss(output, self.train_target_seq, self.train_target_seq_len) # 梯度截断 params = tf.trainable_variables() gradients = tf.gradients(self.loss, params) clipped_gradients, _ = tf.clip_by_global_norm(gradients, 0.5) self.train_op = tf.train.AdamOptimizer( learning_rate=self.learning_rate).apply_gradients( list(zip(clipped_gradients, params))) # 变量统计输出历史变化情况 if self.param_histogram: for v in tf.trainable_variables(): tf.summary.histogram('train_' + v.name, v) tf.summary.scalar('loss', self.loss) self.train_summary = tf.summary.merge_all() self.train_init = tf.global_variables_initializer() self.train_saver = tf.train.Saver() self.train_session = tf.Session( graph=self.train_graph, config=self.gpu_session_config()) def _init_eval(self): '''初始化测试操作''' self.eval_graph = tf.Graph() with self.eval_graph.as_default(): self.eval_in_seq = tf.placeholder( tf.int32, shape=[self.batch_size, None]) self.eval_in_seq_len = tf.placeholder( tf.int32, shape=[self.batch_size]) self.eval_output = seq2seq( self.eval_in_seq, self.eval_in_seq_len, None, None, len(self.eval_reader.vocabs), self.num_units, self.layers, self.dropout) if self.param_histogram: for v in tf.trainable_variables(): tf.summary.histogram('eval_' + v.name, v) self.eval_summary = tf.summary.merge_all() self.eval_saver = tf.train.Saver() self.eval_session = tf.Session( graph=self.eval_graph, config=self.gpu_session_config()) def _init_infer(self): '''初始化推断''' self.infer_graph = tf.Graph() with self.infer_graph.as_default(): self.infer_in_seq = tf.placeholder(tf.int32, shape=[1, None]) self.infer_in_seq_len = tf.placeholder(tf.int32, shape=[1]) self.infer_output = seq2seq(self.infer_in_seq, self.infer_in_seq_len, None, None, len(self.infer_vocabs), self.num_units, self.layers, self.dropout) self.infer_saver = tf.train.Saver() self.infer_session = tf.Session( graph=self.infer_graph, config=self.gpu_session_config()) def train(self, epochs, start=0): if not self.init_train: raise Exception('Train graph is not inited!') with self.train_graph.as_default(): if path.isfile(self.model_file + '.meta') and self.restore_model: print("Reloading model file before training.") self.train_saver.restore(self.train_session, self.model_file) else: self.train_session.run(self.train_init) total_loss = 0 for step in range(start, epochs): data = next(self.train_data) # yeild in_seq = data['in_seq'] in_seq_len = data['in_seq_len'] target_seq = data['target_seq'] target_seq_len = data['target_seq_len'] output, loss, train, summary = self.train_session.run( [ self.train_output, self.loss, self.train_op, self.train_summary ], feed_dict={ self.train_in_seq: in_seq, self.train_in_seq_len: in_seq_len, self.train_target_seq: target_seq, self.train_target_seq_len: target_seq_len }) total_loss += loss self.log_writter.add_summary(summary, step) if step % self.save_step == 0: self.train_saver.save(self.train_session, self.model_file) print(("Saving model. Step: %d, loss: %f" % (step, total_loss / self.save_step))) # print sample output sid = random.randint(0, self.batch_size - 1) input_text = decode_text(in_seq[sid], self.eval_reader.vocabs) output_text = decode_text(output[sid], self.train_reader.vocabs) target_text = decode_text( target_seq[sid], self.train_reader.vocabs).split(' ')[1:] target_text = ' '.join(target_text) print('******************************') print(('src: ' + input_text)) print(('output: ' + output_text)) print(('target: ' + target_text)) if step % self.eval_step == 0: bleu_score = self.eval(step) print(("Evaluate model. Step: %d, score: %f, loss: %f" % (step, bleu_score, total_loss / self.save_step))) eval_summary = tf.Summary(value=[ tf.Summary.Value(tag='bleu', simple_value=bleu_score) ]) self.log_writter.add_summary(eval_summary, step) if step % self.save_step == 0: total_loss = 0 def eval(self, train_step): '''测试函数,bleu_score''' with self.eval_graph.as_default(): self.eval_saver.restore(self.eval_session, self.model_file) bleu_score = 0 target_results = [] output_results = [] for step in range(0, self.eval_reader.data_size): data = next(self.eval_data) in_seq = data['in_seq'] in_seq_len = data['in_seq_len'] target_seq = data['target_seq'] target_seq_len = data['target_seq_len'] outputs = self.eval_session.run( self.eval_output, feed_dict={ self.eval_in_seq: in_seq, self.eval_in_seq_len: in_seq_len }) for i in range(len(outputs)): output = outputs[i] target = target_seq[i] output_text = decode_text( output, self.eval_reader.vocabs).split(' ') target_text = decode_text( target[1:], self.eval_reader.vocabs).split(' ') prob = int( self.eval_reader.data_size * self.batch_size / 10) target_results.append([target_text]) output_results.append(output_text) # 随机输出结果 if random.randint(1, prob) == 1: print('====================') input_text = decode_text( in_seq[i], self.eval_reader.vocabs) print(('src:' + input_text)) print(('output: ' + ' '.join(output_text))) print(('target: ' + ' '.join(target_text))) return compute_bleu(target_results, output_results)[0] * 100 def reload_infer_model(self): # 重新加载推理图 with self.infer_graph.as_default(): self.infer_saver.restore(self.infer_session, self.model_file) def infer(self, text): if not self.init_infer: raise Exception('Infer graph is not inited!') with self.infer_graph.as_default(): in_seq = encode_text( text.split(' ') + [ '', ], self.infer_vocab_indices) in_seq_len = len(in_seq) outputs = self.infer_session.run( self.infer_output, feed_dict={ self.infer_in_seq: [in_seq], self.infer_in_seq_len: [in_seq_len] }) output = outputs[0] output_text = decode_text(output, self.infer_vocabs) return output_text模型训练m = Model( './couplet/train/in.txt', './couplet/train/out.txt', './couplet/test/in.txt', './couplet/test/out.txt', './couplet/vocabs', num_units=256, layers=4, dropout=0.2, batch_size=32, learning_rate=0.001, output_dir='./models/output_couplet', restore_model=False ) m.train(5000000)Saving model. Step: 0, loss: 1.017375 ****************************** src: 龙 根 在 抱 , 基 业 于 胸 , 发 展 进 行 时 , 须 知 岁 月 无 期 、 资 源 有 限 output: 牿 帮 帮 酼 酼 酼 酼 酼 邀 酼 灣 灣 灣 灣 灣 灣 灣 灣 灣 灣 灣 灣 灣 灣 帮 帮 帮 帮 target: 红 线 铭 心 , 蓝 图 入 梦 , 和 谐 规 划 处 , 尤 记 尺 疆 易 失 、 寸 土 难 还 INFO:tensorflow:Restoring parameters from ./models/output_couplet\model.ckpl ==================== src:千 里 青 青 草 , 碧 色 连 天 , 无 边 春 色 共 谁 看 output: target: 一 池 淡 淡 荷 , 清 香 扑 面 , 几 度 花 香 伴 月 眠 ==================== src:青 山 随 我 隐 output: target: 碧 水 为 君 酬 ==================== src:治 水 禹 王 功 , 看 龙 伏 波 平 , 赫 赫 千 秋 歌 大 业 output: target: 富 民 政 策 好 , 喜 鱼 香 稻 熟 , 熙 熙 百 姓 乐 丰 年 ==================== src:苦 楚 output: target: 艰 难 ==================== src:雨 前 茶 绿 宜 烹 雪 output: target: 宴 罢 酒 高 莫 驾 车 ==================== src:以 善 结 禅 缘 , 诚 通 佛 性 , 人 和 兼 地 利 output: target: 任 山 扬 龙 首 , 水 唱 凤 歌 , 雨 顺 更 风 调 ==================== src:万 里 香 风 菩 萨 道 output: target: 一 声 梵 唱 华 严 经 ==================== src:释 怀 心 见 佛 output: target: 难 道 不 如 人 =================[...]import ctgan import warnings warnings.filterwarnings('ignore') from datetime import datetime,timedelta,date from ctgan import CTGANSynthesizer import pandas as pd from table_evaluator import load_data, TableEvaluator df = pd.read_csv('/content/drive/MyDrive/BTCUSD_day.csv') ##print the first five rows of your dataset for a little overview print(df.head()) save_df = df # sorted columns for new synthetic data as we will be using this to sort the new columns for our synthetic dataset sorted_columns = [ i for i in df.columns] ##the number of synthetic samples i want to generate fro this data SAMPLES = 365 ##store the last date in a variable, we will use this later for our synthetic dataset last_date_1 = df.Date[0] ##format to a date format and let our loop count equal the number of SAMPLES we wish to generate last_date = tuple(last_date_1.split('-')) last_date_int = tuple([int(i) for i in last_date]) last_date_split = date(*last_date_int) +timedelta(1) date_for_syn_data = [] for i in range(SAMPLES,0,-1): new = last_date_split + timedelta(i) date_for_syn_data.append(datetime.strftime(new,'%Y-%m-%d')) ##we can now drop the dates and symbols column df_syn = df.drop(['Date','Symbol'],1) new_df = df_syn ##instantiate the CTGANSynthesizer class ctgan_syn = CTGANSynthesizer(epochs=15) ctgan_syn.fit(new_df) ##generate 365 samples ct_data = ctgan_syn.sample(SAMPLES) ##check for similarities between the real data and synthetic data table_evaluator = TableEvaluator(new_df, ct_samples) table_evaluator.visual_evaluation() # format all numbers to 2 decimal places def to_two_dp(x): return round(x,2) ct_data.apply(lambda x:to_two_dp(x)) ##create the btc symbol and date column ct_data['Symbol'] = ['BTCUSD' for i in range(SAMPLES)] ct_data['Date'] = date_for_syn_data ##resort the columns of our dataset ct_data_sort = ct_data.reindex(sorted_columns, axis=1) ##print our newly ordered synthetic dataset print(ct_data_sort.head()) ##merge the real data and synthetic data and save to a csv file df_new = pd.concat([ct_data_sort,save_df],ignore_index=True) df_new.to_csv('/content/drive/MyDrive/BTC_USD_version-2.csv',index=False) print(df_new.info()) RangeIndex: 2012 entries, 0 to 2011 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 2012 non-null object 1 Symbol 2012 non-null object 2 Open 2012 non-null float64 3 High 2012 non-null float64 4 Low 2012 non-null float64 5 Close 2012 non-null float64 6 Volume BTC 2012 non-null float64 7 Volume USD 2012 non-null float64 dtypes: float64(6), object(2) memory usage: 125.9+ KB NoneSeismic RaysWe can use Snell's Law and simple geometry to calculate the path of a ray through a layered earth model, from source to receiver. One approach is to "shoot" rays of one type (direct, multiple, etc.) from the source at different angles and see where they end up reaching the surface. We could guess and check angles manually until we find the right distance to the reciever. The computer does the ray-shooting calculation fast enough that we might as well calculate the distance for a range of angles all at once, then select the angle with the closest result.Once we have the takeoff angle we can calculate the horizontal slowness $p$ of the ray. Then we can also determine the traveltime and solve for the wave amplitude.import numpy as np # import matplotlib import matplotlib.pyplot as pltExercise 1: "Shoot rays"We'll work on modeling the two Upland, California earthquakes shown in lecture. The receiver distance for the records was 44km. We can begin with modeling the events at a depth of 25km. 1.1: Define parameters of the model#Define Parameters b1=2400 #layer 1 velocity m/s rho1=2400 #layer 1 density kg/m^3 z1=4000 #depth of layer 1 m mu1=rho1*(b1**2) b2=3500 #layer 2 velocity m/s rho2=2670 #layer 2 density kg/m^3 mu2=rho2*(b2**2) b3=4500 #layer 2 velocity m/s rho3=3300 #layer 2 density kg/m^3 z3=35000 #depth of layer 3 m mu3=rho3*(b3**2) h=25000 #depth of source(earthquake) m x=44000 #distance of recording station m1.2: Shooting individual raysYou can play with the angle i here to determine the arrival distance, time, and amplitude for the direct wave.#find traveltime: first we need to find the angle i that results in a ray reaching the station at desired distance #distance test equation i=62.9 #test angle of incidence in degrees x1=(h-z1)*np.tan(i*np.pi/180) x2=z1*np.tan(np.arcsin(b1/b2*np.sin(i*np.pi/180))) xtest=x1 + x2 print(f' i={i:f} deg. estimated_X={xtest/1000:.3f} km target_X={x/1000:.3f} km') #next find slownesses p=np.sin(i*np.pi/180)/b2 n1=np.sqrt(1/b1**2 - p**2) n2=np.sqrt(1/b2**2 - p**2) print(f'slownesses p={p:e} n1={n1:e} n2={n2:e} s/m') #and traveltime t=p*x + (h-z1)*n2 + z1*n1 #and amplitude T=(2*mu2*n2)/(mu1*n1 + mu2*n2) amp=1*T print(f'Wave arrival time={t:.3f} s amplitude={amp:.3f}')1.3: Searching for rays by distance (10 pts)Now we'll use the same equations as above over a range of angles to find the angle more efficiently. The equations for the direct wave are included. Fill in the equations for the first multiple--one reflection in the upper layer.#First Direct #automatically find i i=np.arange(0,90.0,0.0001) xdir1=(h-z1)*np.tan(i*np.pi/180) xdir2=z1*np.tan(np.arcsin(b1/b2*np.sin(i*np.pi/180))) xtestdir=xdir1 + xdir2 Idir=np.argmin(np.abs(xtestdir-x)) #find index of minimum difference between xtest and target distance #next find slownesses p=np.sin(i[Idir]*np.pi/180)/b2 n1=np.sqrt(1/b1**2 - p**2) n2=np.sqrt(1/b2**2 - p**2) #and traveltime tdir=p*xtestdir[Idir] + (h-z1)*n2 + z1*n1 #and amplitude T=(2*mu2*n2)/(mu1*n1 + mu2*n2) ampdir=1*T #Next First Multiple xmul1 = ... ... xtestmul = ... Imul=np.argmin(np.abs(xtestmul-x)) #find index of minimum difference between xtest and target distance #next find slownesses p=np.sin(i[Imul]*np.pi/180)/b2 n1=np.sqrt(1/b1**2 - p**2) n2=np.sqrt(1/b2**2 - p**2) #and traveltime tmul=p*xtestmul[Imul] + (h-z1)*n2 + 3*z1*n1 #note factor of 3 SOLUTION #and amplitude T=(2*mu2*n2)/(mu1*n1 + mu2*n2) R= ... ampmul= ... print(f'Source depth: {h/1000} km\n') print(f'Direct:\ni={i[Idir]:.4f} degrees\nx={xtestdir[Idir]/1000:.3f} km\nWave arrival time={tdir:.3f} s amplitude={ampdir:.3f}\n') print(f'Multiple:\ni={i[Imul]:.4f} degrees\nx={xtestmul[Imul]/1000:.3f} km\nWave arrival time={tmul:.3f} s amplitude={ampmul:.3f}\n') print(np.abs(ampdir/ampmul)) # note: the tester below expects the original 25km test depth grader.check("q1.3")1.4: Evaluating source depth (10 pts)For the given distance, velocity, and density parameters re-compute the direct and multiple wave characteristics for a source depth of 9km and 18km. Discuss the timing and relative amplitude of the direct wave and first multiple arrivals. How might these arrivals be used to distinguish source depth when reviewing a seismogram? _Type your answer here, replacing this text._ Exercise 2: Compare to RecordThe tangential component records for the two 1988 Upland California earthquakes are included in Fig. 1. **Figure 1**: Broadband, Wood-Anderson long period, and Wood-Anderson short period (WASP) records for the 1988 Upland California earthquakes. 2.1: Measure records (10 pts)Make relative time and relative amplitude measurements for the direct and multiple arrivals. Use the WASP records and measure the amplitudes from peak to peak. _Type your answer here, replacing this text._ 2.2: Estimate Depth (10 pts)Use your relative time and relative amplitude measurements to estimate the depth of the two 1988 Upland California earthquakes. First, find the source depth that gives an amplitude ratio of 1 (e.g. ampdir/ampmul = 1). What does this tell you about the depths of the two Upland earthquakes? Now try refining the depth of each event. How close are you able to fit the calculated relative times and amplitudes to what you measured? Remember that the calculation is set up for a source in the second layer. _Type your answer here, replacing this text._ Exercise 3: Extra CreditThe following exercises are worth 10pts each, which will be added to your overall homework score. They are independent problems, so you can do as many or few as you wish. Any extra credit submissions are due one week beyond the rest of the assignment. 3.1: Improve the Fit (10 pts)Attempt to improve the fit to the earthquakes by adjusting the upper layer shear wave velocity and/or layer thickness to better match relative timing and refine your relative depth estimates. 3.2: Plot the Rays (10 pts)Write code to plot the model and the ray paths for the direct, and first multiple. An example plot with the direct (red) and Moho reflection (green, not calculated or required) is shown below.**Figure 2**: Example ray plot showing direct (red) and Moho reflection (green) rays. 3.3: Moho Reflection (10 pts)Set up the problem and write modified code for the Moho reflection (green ray above). Based on your arrival time and amplitude results, speculate whether or not the Moho reflection is important in the Upland earthquake data. SubmissionYou should see two options for submission: The `eps130_export` and the `grader.export()` cells. Use whichever works for you. If all else fails, print to pdf from your browser.Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a pdf file for you to submit. **Please save before exporting!** The exporter will not see any unsaved changes to your notebook.!../eps130_export eps130_hw6_seismicRays_v1.2.ipynb[Access your pdf here.](./eps130_hw6_seismicRays_v1.2.pdf)Remember to check that you pdf shows your most recent work before submitting. SubmissionMake sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!**# Save your notebook first, then run this cell to export your submission. grader.export()Gradients$\pi$$\pi$cs231n.github.io/neural-networks-3/from math import sin, cos import matplotlib.pyplot as plt import numpy as np #from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets @np.vectorize def any_function(x): return (x)**2 + 3*sin(x) - 4*cos((x)**2) x = np.arange(-5, 5, 0.1) y = any_function(x) curveFigure = plt.figure() plt.plot(x,y, figure = curveFigure) plt.show()Next, we need to find the deviation. A very simple and popular method is using [symmetric difference](https://en.wikipedia.org/wiki/Numerical_differentiation).@np.vectorize def numerical_derivative(x, f, h = 0.001): return (f(x+h) - f(x-h))/(2.0*h) ytick = numerical_derivative(x, any_function) plt.plot(x,ytick) plt.show()Now, we look for an $x$ for which $f'(x) = 0$. Seems like a difficult function to optimize since there are many values where this is the case.For starters we can choose one point randomly, say $x = 3$, and start our gradient descend - basically we move $x$ to the negative gradient, only scaled by some 'rate' = $\mu$, i.e. $x_{\mathtt{new}} = x_{\mathtt{old}} - \mu * f'(x_{\mathtt{old}})$.Furthermore, since $f'(x_1)$ is basically the slope of the tangent at $(x_1,y_1)$ we simply can draw it by using the equation $y = f'(x_1)\cdot(x - x_1) + y_1$@np.vectorize def tangent(x, x_p, any_function): y_p = any_function(x_p) m = numerical_derivative(x_p, any_function) y = m*(x - x_p) + y_p return y #tangent([min(x), max(x)], xn, any_function) xn = 3 mu = 0.2 x_range = [min(x), max(x)] y_range = tangent(x_range, xn, any_function) plt.plot(x,y) plt.plot(x_range, y_range, '-g') plt.plot(xn, any_function(xn), '.r') plt.ylim(min(y)-1, max(y)+1) plt.show() #mu = 0.9*mu xnew = xn - mu * numerical_derivative(xn, any_function) xn = xnew print 'xn: %f, f(xn) = %f, f\'(xn) = %f' % (xn, any_function(xn), numerical_derivative(xn, any_function)) print 'mu = %f' % (mu) plt.plot(x,y) plt.plot(xn, any_function(xn), '.r') plt.show() # TODO: animate!xn: 0.415843, f(xn) = -2.555534, f'(xn) = 4.148432 mu = 0.200000This example shows very nicely that altought we are close at finding the minimum we still have trouble to converge. This is due to the fixed learning rate. Hence, it is sensible to reduce the learning rate with time, e.g. by using exponetial decay $\mu = \mu_0*e^{-kt}$ ([see for others](http://cs231n.github.io/neural-networks-3/anneal)).def decay_exp(mu_0, t, k): return mu_0 * np.exp(-k*t) def optimize_simple(f, x, mu, mudecay = decay_exp, k = 1, maxiter = 1000, eps = 0.001): y = f(x) i = 1 yn = np.inf xhist = [x] yhist = [y] gradhist = [np.inf] mu_act = mudecay(mu, 0, k) while (not np.isclose(y, yn)) and (i < maxiter): y = yn ftick_x = numerical_derivative(x, f) x = x - mu_act * ftick_x yn = f(x) xhist.append(x) yhist.append(yn) gradhist.append(ftick_x) mu_act = mudecay(mu, i, k) i += 1 return xhist, yhist, gradhist plt.plot(x,y) xhist, yhist, gradhist = optimize_simple(any_function, 2, 0.2) print len(xhist) plt.plot(xhist, yhist, '.r') plt.show() @interact(x_in = (-4, 4, 0.1), mu = (0.01, 1, 0.01), k = (0.01, 5, 0.01)) def interactive_optim(x_in, mu, k): xhist, yhist, gradhist = optimize_simple(any_function, x_in, mu, k=k) xx = np.arange(min(xhist), max(xhist), 0.01) yy = any_function(xx) print len(xhist) plt.plot(xx,yy) plt.plot(xhist, yhist, '.r') plt.show()See https://github.com/cmoscardi/embedded_d3_example/blob/master/Embedded_D3.ipynb%%javascript require.config({ paths: { d3: "//d3js.org/d3.v4" } }); %%javascript element.append("
"); %%html %%javascript require(['d3'], function(d3){ //a weird idempotency thing $("#chart1").remove(); //create canvas element.append("
"); $("#chart1").width("960px"); $("#chart1").height("600px"); var margin = {top: 20, right: 20, bottom: 30, left: 40}; var width = 880 - margin.left - margin.right; var height = 500 - margin.top - margin.bottom; var svg = d3.select("#chart1").append("svg") .style("position", "relative") .style("max-width", "960px") .attr("width", width + "px") .attr("height", (height + 50) + "px") .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); var width = 960, height = 600; //set data var color = d3.scaleOrdinal(d3.schemeCategory20); var simulation = d3.forceSimulation() .force("link", d3.forceLink().id(function(d) { return d.id; })) .force("charge", d3.forceManyBody()) .force("center", d3.forceCenter(width / 2, height / 2)); d3.json("data.json", function(error, graph) { if (error) throw error; var link = svg.append("g") .attr("class", "links") .selectAll("line") .data(graph.links) .enter().append("line") .attr("stroke-width", function(d) { return Math.sqrt(d.value); }); var node = svg.append("g") .attr("class", "nodes") .selectAll("circle") .data(graph.nodes) .enter().append("circle") .attr("r", 5) .attr("fill", function(d) { return color(d.group); }) .call(d3.drag() .on("start", dragstarted) .on("drag", dragged) .on("end", dragended)); node.append("title") .text(function(d) { return d.id; }); simulation .nodes(graph.nodes) .on("tick", ticked); simulation.force("link") .links(graph.links); function ticked() { link .attr("x1", function(d) { return d.source.x; }) .attr("y1", function(d) { return d.source.y; }) .attr("x2", function(d) { return d.target.x; }) .attr("y2", function(d) { return d.target.y; }); node .attr("cx", function(d) { return d.x; }) .attr("cy", function(d) { return d.y; }); } }); function dragstarted(d) { if (!d3.event.active) simulation.alphaTarget(0.3).restart(); d.fx = d.x; d.fy = d.y; } function dragged(d) { d.fx = d3.event.x; d.fy = d3.event.y; } function dragended(d) { if (!d3.event.active) simulation.alphaTarget(0); d.fx = null; d.fy = null; } }); %%html %%javascript require(['d3'], function(d3){ //a weird idempotency thing $("#chart1").remove(); //create canvas element.append("
"); $("#chart1").width("960px"); $("#chart1").height("600px"); var margin = {top: 20, right: 20, bottom: 30, left: 40}; var width = 880 - margin.left - margin.right; var height = 500 - margin.top - margin.bottom; var svg = d3.select("#chart1").append("svg") .style("position", "relative") .style("max-width", "960px") .attr("width", width + "px") .attr("height", (height + 50) + "px"); var g = svg.append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); var width = 960, height = 600; //set data var color = d3.scaleOrdinal(d3.schemeCategory20); var simulation = d3.forceSimulation() .force("link", d3.forceLink().distance(10).strength(0.5)) .force("charge", d3.forceManyBody()) .force("center", d3.forceCenter(width / 2, height / 2)); var div = d3.select("body").append("div") .attr("class", "tooltip") .style("opacity", 0); d3.json("data.json", function(error, graph) { if (error) throw error; var nodes = graph.nodes, nodeById = d3.map(nodes, function(d) { return d.id; }), links = graph.links, bilinks = []; links.forEach(function(link) { var s = link.source = nodeById.get(link.source), t = link.target = nodeById.get(link.target), i = {}; // intermediate node nodes.push(i); links.push({source: s, target: i}, {source: i, target: t}); bilinks.push([s, i, t]); }); var link = svg.selectAll(".link") .data(bilinks) .enter().append("path") .attr("class", "link"); var node = svg.selectAll(".node") .data(nodes.filter(function(d) { return d.id; })) .enter().append("circle") .attr("class", "node") .attr("r", 5) .attr("fill", function(d) { return color(d.group); }) .call(d3.drag() .on("start", dragstarted) .on("drag", dragged) .on("end", dragended) ); node.append("title") .text(function(d) { return d.id; }); simulation .nodes(nodes) .on("tick", ticked); simulation.force("link") .links(links); node.on("mouseover", handleMouseOver) .on("mouseout", handleMouseOut); function ticked() { link.attr("d", positionLink); node.attr("transform", positionNode); } }); function positionLink(d) { return "M" + d[0].x + "," + d[0].y + "S" + d[1].x + "," + d[1].y + " " + d[2].x + "," + d[2].y; } function positionNode(d) { return "translate(" + d.x + "," + d.y + ")"; } function dragstarted(d) { if (!d3.event.active) simulation.alphaTarget(0.3).restart(); d.fx = d.x, d.fy = d.y; } function dragged(d) { d.fx = d3.event.x, d.fy = d3.event.y; } function dragended(d) { if (!d3.event.active) simulation.alphaTarget(0); d.fx = null, d.fy = null; } // Create Event Handlers for mouse function handleMouseOver(d, i) { // Add interactivity div.transition() .duration(200) .style("opacity", .9); div.html(d.id) .style("left", (d3.event.pageX) + "px") .style("top", (d3.event.pageY - 28) + "px"); } function handleMouseOut(d, i) { div.transition() .duration(500) .style("opacity", 0); } });Using [Ocean Parcels](https://oceanparcels.org) to track the surface movement of contaminants. Original code written by & , CSIRO, and code requires additional python libraries stored in /Parcel_Utils/ within the [SSAM Ocean Parcels Repo](https://bitbucket.csiro.au/users/por07g/repos/ssam_oceanparcels/browse)import sys import os import math from pathlib import Path from pprint import pprint from parcels import AdvectionRK4, VectorField, Variable from parcels import FieldSet, plotTrajectoriesFile, Variable, ScipyParticle, Field import numpy as np from datetime import timedelta sys.path.append('/ocean/rlovindeer/Atlantis/ssam_oceanparcels/Parcels_Utils/particle_tracking/parcels/') from util.seed_particles import get_particles, get_release_times # from util.parse_wildcards import parse_wildcardsINFO: Compiled ParcelsRandom ==> /tmp/parcels-2926/libparcels_random_c0a8b1c7-3a85-485c-a82e-d62c35d63a61.soSelect the location of interest, which is specified by a shapefile.file_id = int(input( )) scenario = {1 : "5b_Turn_Point_Diluted_bitumen", 2 : "6a_VancouverHarbour_BunkerC", 3 : "7a_JohnsonStrait_BunkerC",} print("\nScenario running :", scenario[file_id], sep = " ") #Kernels def WindAdvectionRK4(particle, fieldset, time): """Advection of particles using fourth-order Runge-Kutta integration. Function needs to be converted to Kernel object before execution""" if particle.beached == 0: wp = fieldset.wind_percentage ## this need to be add to the fieldset if wp > 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] u1 = u1 * wp v1 = v1 * wp lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UVwind[time + .5 * particle.dt, particle.depth, lat1, lon1] u2 = u2 * wp v2 = v2 * wp lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UVwind[time + .5 * particle.dt, particle.depth, lat2, lon2] u3 = u3 * wp v3 = v3 * wp lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UVwind[time + particle.dt, particle.depth, lat3, lon3] u4 = u4 * wp v4 = v4 * wp u_wind = (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt v_wind = (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached = 2 def BeachTesting(particle, fieldset, time): """ Testing if particles are on land. if 'yes' particle will be removed""" if particle.beached == 2: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] #print(u, v) if u == 0 and v == 0: particle.beached = 1 else: particle.beached = 0 def DeleteParticle(particle, fieldset, time): particle.delete() def DecayParticle(particle, fieldset, time): dt = particle.dt field_decay_value = fieldset.decay decay = math.exp(0 * dt/field_decay_value) #math.exp(-1.0 * dt/field_decay_value) particle.decay_value = particle.decay_value * decay # Data Paths currents = Path('/ocean/rlovindeer/Atlantis/Physics/Raw_Transport_Data/') winds = Path('/ocean/rlovindeer/Atlantis/Physics/Wind/') sea_grid = Path('/ocean/rlovindeer/Atlantis/Physics/Grids/ubcSSnBathymetryV17-02_a29d_efc9_4047.nc') air_grid = Path('/ocean/rlovindeer/Atlantis/Physics/Grids/ubcSSaAtmosphereGridV1_0f03_6268_df4b.nc') # Salish Sea NEMO Model Grid, Geo-location and Bathymetry, v17-02 # Currents # u_data_path = currents + '2018-01*URaw_variables.nc' # v_data_path = currents + '2018-01*VRaw_variables.nc' # u_current = parse_wildcards(u_data_path, 'u') # v_current = parse_wildcards(v_data_path, 'v') u_current = sorted([p for p in currents.glob('2018-01*URaw_variables.nc')]) v_current = sorted([p for p in currents.glob('2018-01*VRaw_variables.nc')]) filenames = { 'U': {'lon': sea_grid,'lat': sea_grid,'data': u_current}, 'V': {'lon': sea_grid,'lat': sea_grid,'data': v_current} } variables = {'U': 'uVelocity','V': 'vVelocity'} dimensions = {'lon': 'longitude', 'lat': 'latitude', 'time': 'time'} print('creating from_nemo') fieldset = FieldSet.from_nemo(filenames, variables, dimensions, allow_time_extrapolation=True) print('creating from_nemo done') fieldset.add_constant('decay', 1.0 * 3600.0) print('add_constant decay') # HRDPS, Salish Sea, Atmospheric Forcing Grid, Geo-location, v1" # wind_data_path = winds + '*_Wind_variables.nc' # wind_paths = parse_wildcards(wind_data_path, 'u') wind_paths = sorted([p for p in winds.glob('*_Wind_variables.nc')]) wind_filenames = {'lon': os.fspath(air_grid),'lat': os.fspath(air_grid),'data': wind_paths} wind_dimensions = {'lon': 'longitude', 'lat': 'latitude', 'time': 'time'} pprint(wind_filenames) Uwind_field = Field.from_netcdf(wind_filenames, ('U_wind', 'u_wind'), wind_dimensions, fieldtype='U', allow_time_extrapolation=True, transpose=False, deferred_load=False) Vwind_field = Field.from_netcdf(wind_filenames, ('V_wind', 'v_wind'), wind_dimensions, fieldtype='V', allow_time_extrapolation=True, transpose=False, deferred_load=False) print('wind data loaded') # change longitude for the wind field Uwind_field.grid.lon = Uwind_field.grid.lon - 360 Vwind_field.grid.lon = Vwind_field.grid.lon - 360 [x_min, x_max, y_min, y_max] = Uwind_field.grid.lonlat_minmax Uwind_field.grid.lonlat_minmax = [x_min - 360, x_max - 360, y_min, y_max] Vwind_field.grid.lonlat_minmax = [x_min - 360, x_max - 360, y_min, y_max] ## adding the wind field to the fieldset object fieldset.add_field(Uwind_field) fieldset.add_field(Vwind_field) wind_field = VectorField('UVwind', Uwind_field, Vwind_field) fieldset.add_vector_field(wind_field) # wind_percentage # We need to do a sensitivity analysis of the percetage of wind to be used here wind_percentage = 1 fieldset.add_constant('wind_percentage', wind_percentage/100.0)Just in case we want to add a maximum age fieldset_sum.add_constant('max_age', dispersal_length)class MyParticle(ScipyParticle): initial_time = -100 decay_value = Variable('decay_value', dtype=np.float32, initial=1.0) beached = Variable('beached', dtype=np.int32, initial=0.) age = Variable('age', dtype=np.int32, initial=0.) # Particle Features num_particles_per_day = 100 feature_release_index = 0 input_shapefile_name = "/ocean/rlovindeer/Atlantis/ssam_oceanparcels/SalishSea/Shape_Scenarios/" + scenario[file_id] + ".shp" release_depth = -0.1 release_start_time = '2018-01-01' ## winter start on December, Summer Jul - Aug ## ask Susan about when to do simulation release_end_time = '2018-01-02' release_start_time = np.datetime64(release_start_time) release_end_time = np.datetime64(release_end_time) time_origin = fieldset.U.grid.time_origin.time_origin print('setting up particles') [release_times, p, num_particles] = get_release_times(time_origin, num_particles_per_day, release_start_time, release_end_time) pset = get_particles(fieldset, num_particles, input_shapefile_name, MyParticle, feature_release_index, release_times, release_depth) print(pset) # Building the kernels decay_kernel = pset.Kernel(DecayParticle) beaching_kernel = pset.Kernel(BeachTesting) ForcingWind_kernel = pset.Kernel(WindAdvectionRK4) # Adding to the main kernel my_kernel = AdvectionRK4 + decay_kernel + ForcingWind_kernel + beaching_kernel output_file_name = scenario[file_id] + '_decay_test1.nc' print(output_file_name) try: os.system('rm ' + output_file_name) except: pass print('executing particle kernel') ## Output properties output_file = pset.ParticleFile(name= output_file_name, outputdt = timedelta(minutes = 60)) pset.execute(my_kernel, # the kernel (which defines how particles move) runtime=timedelta(hours = 24*6), # the total length of the run dt = timedelta(minutes = 60), # the timestep of the kernel output_file = output_file) # the file name and the time step of the outputs output_file.close() plotTrajectoriesFile(output_file_name); print('particle trajectories completed')INFO: Temporary output files are stored in out-HZJPOJXC. INFO: You can use "parcels_convert_npydir_to_netcdf out-HZJPOJXC" to convert these to a NetCDF file during the run. 100% (518400.0 of 518400.0) |############| Elapsed Time: 0:15:17 Time: 0:15:17Jupyter Notebook - mode - 명령모드(esc) : 셀을 수정할 때 사용(ctrl + m) - 편집모드(enter) : 셀안의 내용을 수정할 때 사용(ctrl + y)- style - Markdown : 셀안에 설명을 작성할 때 사용 - Code : 파이썬 코드를 작성할 때 사용- 단축키 - 셀 실행 : shift + enter or ctrl + enter - 셀 삭제 : (명령모드) x버튼 - 되돌리기 : (명령모드) z버튼 - 셀 생성 : (명령모드) a(위에), b(아래) Magic Command- 셀 내부에서 특별하게 동작하는 커멘드 - % : 한줄의 magic command를 동작 - %% : 셀단위의 magic command를 동작 - 주요 magic command - pwd : 현재 주피터 노트북 파일의 경로 - ls : 현재 디렉토리의 파일 리스트 출력 - whos : 현재 선언된 변수를 출력 - reset : 현재 선언된 변수를 삭제%pwd a = 1 %whos %reset %whosInteractive namespace is empty.Shell Command- 주피터 노트북을 실행 쉘 환경의 명령을 사용- 명령어 앞에 !를 붙여서 실행- 주요 명령어 - ls, cat, echo, ...!echo python # 뒤에 오는 문자열을 출력 !ls01_jupyter_notebook.ipynb Info: click the 🐍 symbol for viewing the source code (based on Andrew's NG Deep Learning courses...) Oop's !! Deep Neural Network from Scratch...from dnn.initialization import Initialization as init from dnn.activation import Activation as activate from processing import * np.random.seed(1) %reload_ext autoreload %autoreload 2 Processing... let's roll 🎶 [🐍](./processing.py)![processing][image_ref_syy3ak06][image_ref_syy3ak06]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDozNCswODowMBDNJRYAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy80OS84Mi80OTgyM2Q0MmM0OTM1Y2UxZjQ1MWE1NjQwZjY2YWEwZi5zdmfdpFd6AAAFdElEQVRoQ82ZTSh9QRTAz3u+35NvyfORlI+SkhLqhUgkYaFYyUrZ2VNYWLBDRPlY2MlGYqEokY2UsCCSBWFFvr/vf868mTH3uo/7eH/v/uo0Z+bNnTfnzZwzZ+6zKAT4Af39/TA1NQVXV1fw+voKLy8v8Pb2Bu/v71RwWLlE+FfJX2mxWJjm0mWxWq209PPzE+Lv7w92ux1qa2uhp6eHPUmeJYN6bAgObhZOTk4gISEBrKxumMTERKaZAz4fjwxZXV2F09NTVjMPQ0NDnm2thoYGmJ6eZjWAvr4+CAsLg8DAQLGH+f5GfrMF+bSwREE/Qx98fn6m/tjW1kZ9E8nPz6cdDZOZmYmjUwkPD2etvqGkpETMJSoqSvFoa11fXzMNICAggGm+AXcB5/Hx0TMfwQc4vo5cGIY5uMU8MgT3p1nAM4aDvuORIehkHF+viPz9xGWMGcKjAy8RPhD+Gt8JP+G9iWwIji8MQUcuLi6mHbSCjo0lTopzcXFB23CvficYlnm68ZXU1dX9yGB8lobfnZ0dEcrMIOTQxWl9SU1NjehPDHGF3+zsbCxMA+ZO30FXQcI6OjrKVHMxPj7ONGNYysrKlKWlJVYFWFlZoftUTju0+1tGW3cHjslBHYUHAwzrKGQurAdAYWEhrK+vs9pn0J9mZ2dZjRAfHy/2WlJSEhnfdzgcDjGX2NhY1qoPuY+IvijWp6cnUroICgpimm+Q0477+3umGcMqH3K+zp8wVHM8zSIsNptN4dZj9Nre3qa6FryJbW5u0nxLvtaSVVbJV+Dqk7AJycnJrEVNeno6HBwcUB39Uj63tHzyEbKdxD7Lyckhc/kMCQaijzdkb2+PjawmIyND1e8rZB8hAUexkjaiu5ATMRn5ku8N2tvbmeY9rPwNB+IulKalpTHNO2RlZTHNi5BVEEuUl5fnWjcdcnNzFfQn4pCivzvBMbEfiUJKcHCwYrfblYiICCUuLk5xOp1sxM/8amsRReBuayHo6Hd3d9TRyThfCjop9kPnfnh4gNvbW7i8vITz83P6AuN/YMhH/gqjWYIWnLdq5j8dyFvIPyrS1dVFV/U7qCFmWhEt3d3dQHwMOjs7WcsH8rzxIMUlEC1FRUU0adSjo6MD1tbW6C2RRzo+GF9JTDDxvezIyAikpKTQNk/IzMyE/f19VlPT3NwMk5OTrAb0YJ2bm6N6aGgoLXE2VPBdkR69vb2ij1H5CdqopZXl5WXWU1GIwaK9oaEBy4+OpaWlrJsakrqIPkbl8PCQPW0ckqKI5zGkFhQUqMaMjo5mPV0cHR0pMzMzVCeff3TEu4keY2Njoo9R+QmyIXyMyspKVdv19TVt16LykfLyclhcXGQ1NQMDA4AXsJCQEHHZki9emDljGh4eHg4tLS2GrqtaMIMgK8lq1BLqs2TLsxaA+fl5qKqqYjU1wtqKigqXeT4iNTVVzAUF2draUrVNTEzQdi2GT/a/QH5vxtFe9tydK6YyRL7k4d8VCJ4jMuTHZ5oa1cxxv/81ODG8rOGN8OzsjLUCkFBMS35GcNz92KpW+aopQy5C1MEcDgd1aDwAvSU4MQwg2i1UXV1Ny5iYGFpyoqKimPYZ4UiNjY3UcWTq6+vF538pMq2trbSNrA5r+Qz+IuJhkgawZhfaGP5XsrCwwGZgHBgeHhYD7O7usmbv39ONysbGBpuBZ9A/Q4+Pj+nfvLKPYF3+BxeTtKamJhr+cF9rhe/370SvH4J+8qsrMDVHB/yICyZlZkc3lmlTeeLwTDMvuobI8RyJjIxkmnnRNeTm5oZpLmw2G9PMi64h2pxHmyaYEV1D5LfiiDZNMCO6hvA8h+PtN43/A11DnE4nPTOQwcFBWpobgH9o+1btUzQHIwAAAABJRU5ErkJggg==trainF, trainL, testF, testL, classes = repackNgDatasets() ; exploringImages(trainF) trainX, testX = flatteningImages(trainF, testF)trainX and testX shapes >> 12288 209 12288 50 Activation... [🐍](./dnn/activation.py)![Activation][image_ref_bmt1vdn5][image_ref_bmt1vdn5]: data:image/png; [See forward](forward) [and backward passes...](backward)%pycat ./dnn/activation.py Initialization... [🐍](./dnn/initialization.py)![Initialization][image_ref_tyq0xbu8][image_ref_tyq0xbu8]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMjoxOSswODowMD4hM+wAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy80Ni8zNy80NjM3ZTlmNmEzNWFmNmY5NmQyYzA5ZGRlNDQ5NmZmZS5zdmd1lL66AAAFnklEQVRoQ9WaS0gcTRCAazca38+4GBVDiI9IxCiKGkER9OIpmIsxKIoePIgRzUU8RlB8nBUVROJBhQgiQVDEBA9KNAQ0l6AgokEQwVfWd9T+u3p6drvHWfXy74wfFNvdVTNbtf2qnlkLocA9ZH19HfLz8+Hk5ASWl5fh3gYSGhoKe3t7rBwdHQ1WVhL4+fMnFBYWwuPHj+HZs2dQWVkJq6urXGse1CCQP3/+AGCPqKSkpGDv6EpeXh63Mgeibw8ePCCOoWWxWPDjRgICAuDv37+8Ziyiv35+fsrQevHiBWu4DbvdDrRneM08BAUFAczNzUndhPL+/Xvy9etXJu/evbump6sE616j+Pfvn+RPamoqwYktNXZ2dnJzJ7W1tZJNcXEx1xhDVVWV5E99fT2xPHr0iOzs7NC6ArXjJRlxTNpsNtje3uY1gPb2djZ3Tk9Pgf5acHFxAZeXl3B1deVS8Hu0Ze2nto1Oavj+/Tvs7+/zb1ZQVzApOleINjQo3krIy5cvJZ27hW4PzA8IDw+XFHocHx9LNnQD4ho5QHeLuCVAeXm5pCwrK+MqJ3iBaFNdXc01hNA0QdK5S4aHh7kHCpbNzU0SFRVFdU7oxgilpaVsnHd3d8Pa2hrXKBweHrK1W2VwcJCN2/Pzc8fcoPdmnyI4z7AdRa2rbSpqG2K1Wpl4eHjAw4cPwd/fH9LT0+H58+dML0FvQt6+fStFe5N8+PABLzEdLBCEboq6jouSnZ3Nrc2HIxCkrq5ONwCUpqYmbmVOdNP41tZW+PHjBxuXOTk5QCc315gL3Mtwz4qMjKQ/9T2lubnZMVp6enr0ewTZ3d1l2a6npydvMRe4mqmuh4SEXD9YlZSUsOWPpi5saGHZjENL/P3ZMo89giwtLTm6ypXQczK3Nh7Rr8DAQGXVwrRcVNwkdONjNzIa0Sc6tJQ5ou6kd8HHxwdo7sVrxiH6jJmJFVMQLZiSYNAoi4uLvFUBH798+fKF14xha2uLlxRiY2MBXr16JXWT3ulvfHxcsjHyQQRdTYk2Y+/q6iI4VKRGV4g2ot3KygqJiIgg9NxMvLy8CF0Wr9n+34LgPnGtUQ/RRrTTtrtbvn37pviRlJQkKex2O1OI/Pr1S7LJyMjgGmMDmZyc5F5QPzAZFJU4/rTQFUKy6e3t5RpCPn36JOncITU1Nfzbnbhcfp88ecIOSfTgxVuc0Et4SQYPVpjEoV57qHJ1jYrWB6yrByt86HBrqoSBfP78WYr4Jpmfn8dLTIdj1mIGqee4KGNjY9zafDgCQTCXevr06bUA0tLSyMHBAbcyJ7ppPI712dlZdujHg5VZaWxsZHOyo6Pj/r7oyczMhIWFBVYuKCjQD2RiYoIZ4eOX3NxcoEOLa8yDuMoFBwfTCSAwMjLimBdaUXdQsyD6hmm8I5CKigpJqSd0THJr4xH9Cg0NVQJpaWmRFDfJwMAAu5HRiD7ZbDZiwZcm2l0Td9LXr1+z3ZnuHbzViXIfYxHnSExMDID2oRzuGVri4uIkm48fP3KNMUxNTUn+FBUVEQvNqcjGxgatK1A7XnKCL+fpRslrAAkJCfD7929eA7aW46nt7OyMvejBHE0VvB/2LIpYFut6NnqfOFLwwSFNdPk3K8zMzLBPKTpXiDZ4eFJpa2uTdO6WsLAw5gf4+vpKCleINngaVBHbjRAVa1ZWFq07mZ6e5iUn2gcUuEmqYJptBMnJyWy4qVhGR0fJmzdveFWhv78f6ARi5b6+PqitrWVlFZrKAz0lsjK+BMUXLzhHcPXDwHBFUeWm+m06nBMo6osefLmUmJgIDQ0NypMTAZaieHt7s4l6F+Lj49m/ccwGCwRfK+ODt7sgdqeZYAMce+To6Ih1qSsCAwNNGwTimKl09WLr9dDQEJvM6DgK/veEziOgBytuaUYA/gN8XtjIsiI+VgAAAABJRU5ErkJggg== [See the model...](model)%pycat ./dnn/initialization.py Forward Propagation...![Forward][image_ref_ndmxz8d0][image_ref_ndmxz8d0]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMTo0MSswODowMK4ZyOwAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9kYy9iMS9kY2IxYWFmOTdmMTY2MDVlMzI2ODc3ODAxMjA2NWRkYy5zdmdBbf7sAAAE2ElEQVRoQ9WaSyh8YRTAj8F/zIJRXiUiSZ4rj9JEicXIypZIXtmwsFGesRCRBUnJayGPEhslCzbYIDtKyCsW8kxDnvf/fXPPdee7c81/xtxP8//V0Tn3fPfc79zvfQcIv8Dg4KAQEREhGAwGITQ0VMjLyxPW1tbQqw3cE0lISBAAQFVqamqwlPtwTaSsrEw1AVtpaGjA0u7BLRGLxaJacTXp6enBu34Ot0QaGxuZyoaFhQmXl5fC6Ogoc12SiYkJvPNncEvEbDYzFZ2cnESPIIyPjzM+SZaWlrCE6+hIAC48Pz+jJqLX61EDIGMH+vv70ZLJz8+H7e1ttFyDWyKvr6+oifj5+aEmUltbC62trWjJpKenw/HxMVrOwy2R9/d31ER8fX1Rk2lvbwcyBaMlExMTAw8PD2g5x68l4uPjgxrL0NAQFBYWoiUTGBiImnNwS4SMP9REvkuEMj8/DyaTCS0ZLy8v1P6NRyRCWV9fh7i4OLRk/vz5g5pjPCYRyv7+PoSEhKAl8vb2BmQNQut7fi0Rb29v1BxzdXVl1wr0WnJyMlrqeFSLSLy8vKAms7u7C7m5uWjZwy2Rj48P1ERcSYSifBGU1dVVKCkpQYuFWyLK6dfZQWvL3d0dajJkqwPNzc1o2UAy50JUVBR9pV9ydnaGHtc4ODhg4kgyMjKCJUS4JRIcHMw8+PHxET2uQ0+TtrEkIV0NS3BMRPlQd5mamrKLSeX09NTq55JIaWkp87C0tDT0uEdnZycTVxKKF/1DDFVIvwbSJazTIV2YqNDZiA5kNaEDuqurCzY3NzGCCN2ap6amouUeFRUVMDY2hpZIU1MTpmMD/eJBfJpJdnY2RtaOrKws5hmJiYls1zIajUwBd4XG40V4ePjXc3Q6nXxCpDtNV88AjsjMzIT7+3u0tMd29bfukml2BQUFzJv8qdAWyMnJEcgYsb41Xii7Vnx8vABPT0/MRSrV1dV4i+dRWVlpV19y0hRA+dmmuLgYb/E8uru7mbpKQgHaFWwv0hbyRGZnZ5l6SkKWBKsfkpKSGAdZD6wOT2JjY4OpoyR0HyYB0dHRjJMseOhyn4WFBaG8vBytn3F0dMTUT5KVlRUsIQK28zEVrVB2hZ9AtvFMDEnUPq8C/b3CtpBWmEwmJu75+Tl6nOPz85O5X5KOjg4swaIjNxC/9ig/mVosFtScg6zWqMlUVVVBS0sLWizcElHGpRtOZ1E7TZJFG4aHh9GyR0daBVVtUcZVnuG/gxzI7JImxwBYXFxESx2PSiQ2NhZubm7QEiFHZtja2kLre7glouRfiWRkZACZatESCQgIgJOTE7Qcwy0R5WB1NBbNZrPqW3dlN85tsCsT+a5FioqKYHl5GS0ZV18wtxZRfpBTS6Surg6mp6fRknFlhpPg1iLKKVRZuba2NhgYGEBL5vr62uWvkhRuLWIwGFAToR8nJHp7e4Gs0GjJHB4eQlBQEFouotfraSZfohWk2zBxU1JShNvbW6Gvr4+5Lom7p0r62x4TUCsuLi6YuI6EDHa86+fQ3y2YoFpC/3nGNraazM3NYWn3oNMkE1hr/P39mfi2MjMzg6Xch8Rjg/Ogvr7e+u1JekZmZqawt7eHXm3wioyMpGcFEl+EXEPt/0K3s7NjnfKMRiNQ/f8E4C+W5hlABHhyUwAAAABJRU5ErkJggg==def forwardPass(X, parameters): A = X L = len(parameters) // 2 caches = [] # Hidden Layers... for l in range(1, L): AP = A Wl = 'W' + str(l) bl = 'b' + str(l) ############################################################## A = activate('ReLU', False, parameters[Wl].dot(AP) + parameters[bl], None).compute() linearCache = (AP, parameters[Wl], parameters[bl]) activationCache = parameters[Wl].dot(AP) + parameters[bl] caches.append((linearCache, activationCache)) # Output Layer... WL = 'W' + str(L) bL = 'b' + str(L) ################################################################## AL = activate('Sigmoid', False, parameters[WL].dot(A) + parameters[bL], None).compute() linearCache = (A, parameters[WL], parameters[bL]) activationCache = parameters[WL].dot(A) + parameters[bL] caches.append((linearCache, activationCache)) return AL, caches Loss & Cost Computing...![Loss and Cost Computing][image_ref_wffecri0][image_ref_wffecri0]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDo1NiswODowMEE9PbgAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9lMS80Yi9lMTRiNTFkMzM5MDU4NjM4NDBhNDM2Yzg5ZjFiYTk1ZS5zdmdHCpa5AAAF3ElEQVRoQ9WaTUhUURTHr+OYSKjhd0YMRUofgmKKaFaUbiIiSDAwqIQgW+RGSEQEN610Ky7ESKUPDcs2kS5qFhn0QQsFEckvEhN0o41KaN7uee/MzD1v3rwvnYl+cJjz3v/cc9993nc/3jOGCxgyMzPDWlpamNfrZZubm2xnZ4f9+fNH+QUDINxvWuCcy+XCI5WYmBj0VPzHEAs+5Pd4PKyhoYHV1dUpmiNEQoXr16/Dlf1z+/37N16RPZSGVFdX6yb9V+YENjU1pZvsX9qdO3fw8qzDtF3qxYsXKEWP9fV1npaWRq5DD7jpZ8+eJXFg79+/5yw1NTVworS0FItEn46ODnJxPp8PFZXW1laiy+Z2u+E3eKKsrAyLRZ/Lly8HrgNseXkZFc4HBweJprWkpCQec/jwYf7jxw9xrCLKoRddtMO0fB1aTSY+Pp6Njo4ydvv2bdK6r1+/KndBy7Fjx0icE0tMTORbW1uYMcjs7CyJO3nyJCqcP3v2jGglJSWoUFyXLl0SepCnT5+iF6SxsZF9//4dj5zz69cvJoZ6PArS19eHnooYgNBjrLu7Gz0V8SyhpwEmIPETsMzMTGxjkAcPHpCY3RjMWVpyc3NJjFhhoKL0L2LhUJS4uDjT4OPHj5MYuxYbG8uPHDnCt7e3MWMQbayfT58+kfPnz59HJRSllPY5ef78uSJGg6GhIVJ3VVUVKpzfu3ePaKKboRKK0pC3b9+SAlevXlXEaAAXLtf9+vVrVDgXIxLRjAisfo2Gv0gSrl4YGMT8oPiAmPmZmFvwKJTAmls8A+ipzM3NoRc5tCPhqVOn0GPs0aNH6KnU1taip0+gITU1NeipPHnyBD0K7B3gLjoxMSIyMY9gJsYeP36MnsqtW7fQY6ynpwc9FVnTBboWMD09HeiLYPKk5Ec7KDgx+fnLzs4m2srKCipK/yJmRuAvcvToUfRUJiYm0Isci4uL6KnPiljAKv7w8LDy6+fKlSvoGYANUhAFyF0QCVFREdtdLhpMYuxYVlYWZuJcdCuiyXsQ0c2JNjAwgEp4SEPE8oQkgK4UKS5evEjq+vDhAyr2uxUQEiUngNk4Usj1gPmZn58n56EHWIG+8hBkZGSgx5Q3HDCe7zVi6YGeyrlz59ALHclEr0DPBGxQgIaGBnJHYOe219y/f5/UIS89YD0ma2KvhIoxIQ359u0bSXTmzBlUKMnJySRuNyZjpBmhG2mWrKKiIiTGqcnbhv7+fqLduHEDFXNCnhHgwoUL6Kl8+fIFvb1HXnr09vaip3Lz5k30LIANInR1dZE7U19fj0oQ+e3LbmxychIzOu9WgG407KvlhPv370clcoyPj5M6CwsLUbGGbtdyu91kCb2+vs58Ph8eRQbbi0Qt2KAQtENkU1MTKpFB21XX1tZQsUbYhvz8+ZMkBmtvb0d175HrgZ2hXQyfqGvXrpEKomV1dXV4BdYxHRoOHDigW1kk7ePHj1i7dUwbAhQUFOhWGClzguVSb9684UVFRRH/C0EdTiDfEP9ndOeR/xFbDRGLuMAbEdjjDw4OomKfzs5O5a0K5IJPA2LeQsUhSgczYWxsLKQv+628vByjrHPo0CHdXGB2J0I/pg1ZXFzUrVC2yspKjDYnPT1dN4dsTjAttW/fPt3KtKZ946LHw4cPdctqLdzHHCMMGzI6OkoqgE8Lfl69ekW0nJwcVMIjx4NNTEwo5+FTQ0JCAtE2NzcVzSqGDWlpaSHJR0ZGUFGBj5CybsTGxgaJ1X7raGtrI7rdz+SGo5b//0/8xMbGoqeiPTYC3sjImOXS1m0KNkiXd+/ekbuUn5+PCuder5doHo8HlfDI8WALCwuohK7pVldXUbGG6cMuJzeyly9fYonwNDc365bVWl5eHpawjmlDtJ+O9ay4uBijzdE+V3rmBEulPn/+rFshGKyM7ZKSkqKbC2xpaQmj7GGr+Xfv3lW2pC6Xi58+fZp877MLvKk5ceIEF0sUfvDgQd7Y2IiKEzj/C47XG3d06OZRAAAAAElFTkSuQmCCdef computeCost(AL, Y): m = Y.shape[1] return np.squeeze( (1. / m) * (-np.dot(Y, np.log(AL).T) - np.dot(1 - Y, np.log(1 - AL).T))) Backward Propagation...![backward][image_ref_o9kyay67][image_ref_o9kyay67]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMTozMiswODowMJU022gAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy81NS84Ny81NTg3Y2NjNjMzMGE2YjE3YTY2ZjNjOTcyZWYyZmQ5Zi5zdmf9LWc4AAAE7ElEQVRoQ9WaXSg8XxjHj5e1CGELKYs7Wldec6NEES7kBkVK3EnhakNKckchN5aSlBRXyAUXoiR5KS8bJRcUUV6KvDP/c2aeMXNmZ9nZOeO3/089v55nzpnnnO/OzHnzQxxDdnd3ufT0dC44OJgLDAzkYmNjub6+Pig1FmZCBgcHOYSQqlmtVqhlHEyEkF9dTYDcKisrobYx6BYyMjKi2nE1u729hbvYo0vI7OysaocdDgd3f3/Pv1Ly6y0tLXAne7wWsrq6SnVSNCJCZGZmhirLz8+HEvb44wY043Q6UV5eHkQS+FtBDQ0NECFkNpvBE3h+fgaPPZqFXF1dIZvNBpGE3W5Hra2tEAkohby9vYHHHk1CXl9fUVxcHEQS9fX1qLe3FyKJoKAg8AQ+Pj7AY48mIXiiA0+itLQUjY2NQUSDJ0XwBD4/P8Fjj8dC/Pz8wJPIyspC8/PzELmiFPL19QUeezwSEhERAZ5EcnIy2tzchEgdpRA8uIDHnl+FJCYmooeHB4gEIiMj0enpKUTu8RkhmZmZ6OzsDCIB8ord3d1B9DMBAQHgCfwTIWVlZWh7exsiCS3v+T8X0tjYiBYWFiCS0DoPmEwm8AT+dNTq6elBo6OjEEmQiVDZsd/4y3mEWmtNTEzwayKlHR4eQg1tXF9fU3nwZAol7PkWsra2RjUq2tLSEtTQDn4CVK7w8HAoYQ8v5OLigmpQNDxj85X0oMxpFHxmZWPEOjo6+Ap6watkKm9FRQWUsMWPbFPb2tpwGxJVVVVoamoKIn0cHR2h1NRUiATS0tIQ/qHQ+/s7P2kqjQzbok8GDLKKxq8lSkhIcBlAvsnNzaV+MTwJgkZ24IUl1YZe6+/vh8wSZLnxXSEqKgous4eMWPLO6LXQ0FDILOCPwdcFjNzBXV5eosLCQoj08/T0RK/Is7OzKaX4/QWNxnBwcMAVFxdzFouFatdbKyoq4vOioaEhl8Ly8nK+0BfByyeX/j4+PgrDb3R0tEshHsn4G32R6upqqq92u12a2eUFog0PD0Opb4G3EVQ/yTHTtxByoCYvFG1ubg5q+A54K0H10WazSUIITqeTqiDazs4O1PCOmpoabnp6GiL9KIUkJSXRQghkkSivJBreKUINbchzDAwMwFX9yPPGx8e7CiGMj49TFUUjo4MWTk5OqPsLCgqgRD/yvDExMepHpnV1dai7uxsiibCwMPA8Q3loQQ74jIBsv93u2Ts7O6lzXBG18y13KLe2ZJFoBOQBuRVCcDgcqKSkBCKJkJAQ8LRh1J79xyciQg4hMjIyIBJ4eXlBsbGxELlHeJUljBLy6xMR2dra4vcCcvB+HKWkpEDkGVqOkrTgsRACOahTfuzHx8dkPwORK8onooxZoUkIQTkKETY2NlS/I8JfCfHoG1Gi1pnFxUVUW1sLkYSyrvLkkRWan4gI2dQomZycRM3NzRAJKA/kyB7cCLx6IgQy/JIdnxK8t0Ht7e0QuX7ceHsKHlv4J4//8Rp3i8yuri7u5uZGWJXKrjc1NcGd+pHnNZvN6mstLayvr1NJf7Lz83O4Sz/yvCaTSb8QwvLyMpVYzVj/jV2eG397bIQQVlZWqORyw/MP1GKHPL+/vz87IQRyhpyTk/PdAF5gGvbfNuRC+Lbg4v8O+SrcarV6N/z6Avv7+/wfZS0WC9rb20P/AX8B9QRNcdMPAAAAAElFTkSuQmCCdef backwardPass(AL, Y, caches): grads = {} L = len(caches) Y = Y.reshape(AL.shape) # Initializing the backpropagation dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) linearCache, activationCache = caches[L - 1] AP, W, b = linearCache m = AP.shape[1] ############################################################## dZ = activate('Sigmoid', True, activationCache, dAL).compute() grads["dA" + str(L - 1)] = np.dot(W.T, dZ) grads["dW" + str(L)] = 1. / m * np.dot(dZ, AP.T) grads["db" + str(L)] = 1. / m * np.sum(dZ, axis=1, keepdims=True) for l in reversed(range(L - 1)): linearCache, activationCache = caches[l] AP, W, b = linearCache m = AP.shape[1] ########################################################## dZ = activate('ReLU', True, activationCache, grads["dA" + str(l + 1)]).compute() grads["dA" + str(l)] = np.dot(W.T, dZ) grads["dW" + str(l + 1)] = 1. / m * np.dot(dZ, AP.T) grads["db" + str(l + 1)] = 1. / m * np.sum(dZ, axis=1, keepdims=True) return grads Weights Updating...![Update][image_ref_95hj81z6][image_ref_95hj81z6]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDo0MyswODowMN+vEoEAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9hMy9mNy9hM2Y3M2FlODExYmYwMTg1ZDBiMWQ4ZmMwNzY4ZTExOC5zdme1U+zXAAAFH0lEQVRoQ9WZx0ssTRDA28UcMCdEUQ/mcNKToJgOHhQFFcGbFw/+AWK4CIrhJCh4FFRMqKAnURADevCkIKKIijmgYsbYr6u3/HZ7p3d3fJ8z8n5QbPV09U7VTHdP1Qyh/yjj4+PU1dWVRkRE0Ovra+oAB8k/iIODA2qExMfHEwPqmtPa2spPCA6ABAQEkKqqKnJ6eooW6rm7u0PNyOPjI9F8anV1dcEdtyk5OTlorY6joyNhfGRkJNU0kIqKCuGEtsTR0RFH2Wd7e1sYGx0dTTWbWs3NzWRwcBBb9nl/fydBQUHYso3l1PLw8GDhaMDNzY1wxUCSkpLowcEBWlD68vJC6+vrFXYtLS1oYZ2+vj5hTFFRkTZTq7i4WDhRamoq9igZGBgQbEFsMT09rbAfGhqi5Pj4mPr7+1ODwaAw+ClhOxO6IcfFxUU6Tq0AJDExUdr5k2KP/Px86Tg1Ag9DwPDx8cHav8vn5ydq6vH29ibPz8/E19fXeGBvb48/6pmqmVxcXPCrZg1nZ2fpOEsJDg7mC3t+fh5HmtBksefl5QkOpKenY4+SyclJwRbkb9AkENn2m5aWxo9/waY07ejoUNg1NDSgxffQJBCgsbFR4aQ98fLywtHfR7NAgJKSEqnD1uT/QGD7YjsA/6OpqSk8/HO0tbUpHLYUW2tILSQhIUH4U62AuQ9F0Nd54CFYVlbGE8CfgMC8NA/k/Pwcu6yztbVFKysreUYAYyBzzcjI4DnQb0ECAwOFQPb397FLTkpKimAvk6WlJbTWD2J+u0E2NzexS+Tt7U2wsyc9PT04Uh8MUHKa8/T0hJqIk5MTauqorq4mCwsL2NIeQ2xsLKpGNjY2UDNRWlqKmon29nYCeRq7GPx3eXnZlPcgmZmZqOkAq+IU04JdSeP9Qiz7d3Z2sEeJu7u7YAvbrx7w/db8xPYEFrstRkdHpeO0krCwMJ6U8kAuLy+lRjKpqanhDlsDtm/ZOC0lJibG+PIBFvz9/T1h0wKaNmEpP2py2DMFNf2ADeq/tyienp78RReriUlhYSFvy1hbW0NNjr1+LYCNxm5Ocnt7q7iVUOdbIygoSLCtq6vDHm1RlVzl5uYKzoEMDw9jrxF4+xceHq6w0wvVZ7J0UI2MjIzgaO1RHcjV1ZXUWWtSW1uLI/XhW/ce3g76+PhIHTeX/v5+HKEdX7W+n58fZbvW9wL5YmZmhmZnZwvOR0VF0ebmZrTQHvNzx8XF/V0gPwlcFLbdU5ancafYM42Wl5dTto2jhZK7uzshEDc3t98LBOoellELDlkKvAWVAdu/uR1cBN2+WJkzNzdHIiMjCatx8IgcyMTh69bDwwMeMWJZakBGonsgh4eHhK0vbKmDleOoGbH8PgLlg+6BsIoUNRNTU1P87rBZw3O+7u5u7DFhXttsb2+jZgTurq5rZGJiQpjbINaATxGWtru7u5RNN8qmm3Actnvy+vpKQ0JChA69pLOzE92Wo/YFH2BITk4mZ2dnrK0/GRkZqMlRUyqfnJzwX4PlwtETg8H2EoUdyxrQxwpCEhoaytuGpaUlrvwG6+vrqMlZXV1FzURWVhZhmTf/OCS8AeITTEPYCXmOBmuxqalJmNvwQLSFuS2IrZceuu5agKVzkPSxeY69RljFp7CDutwWugcyOzurcFKNsDIc/0GO7oEAvb29UmetyeLiIo60zq8EAqysrEidthRb68KcXwvkC3gowocenoozx2HNFBQU0LGxMbRQA6V/AEiJ+CBZEM8AAAAAAElFTkSuQmCCdef update_parameters(parameters, grads, learning_rate): L = len(parameters) // 2 for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - \ learning_rate * grads["dW" + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - \ learning_rate * grads["db" + str(l+1)] return parameters Prediction...![Prediction][image_ref_1x45se6c][image_ref_1x45se6c]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMjoyMyswODowMBTea0EAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9hOC8wOS9hODA5YWU1ODc2MDFhNDViZDYxMWVmNzBjZWEzOGUxYi5zdmc52mzyAAAFXElEQVRoQ9WaXUhUQRSAx9RVCxU1jSRSW1ZQM5QNUgwl6CFR9EFLX0RR0AfRfFBQH3woeonopcA3QVIJRMSCSAoUwR/8BTUfUuyl0EI0NS0tnebMnnXv3nt213/vfnBqfs7MnDN3ZvbOuXpwATsGZmdnWX9/P5uYmGCfPn2S+R8/frCtrS0GQ3h4eDCDwcDOnz/PLl26xIxGI7t+/Tq7desWu3v3LvP398eeDgk4clhevnzJExMTYSKORUpKSriYAOz9YBzYkeHhYZ6UlEQaclwSHBzMu7q6cMT9se+lNTY2xu7du8eWlpawxDG+vr7MZDIxYRDz8/OTy2p7e5utrq6yb9++sYWFBdR0zoULF5iYOBYbG4slTpDuuECsY82sKSU8PJw3NDRwsT+whWtWVlZ4W1sbf/DgAdmnUu7fv4+tHOPUkd7eXrJjq5SXl/Nfv36h9tGASUhLSyPHscrGxgZqa3HoSH19PdkZSFlZGWrZ8/nzZ/748WOemprKvb29ybYgly9flrMMT0TN8vIyv3HjBtkOZHJyEjXtIR0pKCggOwERRypqWfj79y+vrKwkdfcr7e3t2JuN7u5uUhdE7FfUsqFxpK6ujmwcExODGjays7NJ3cOII65cuULqw5NTYtfDyMgI2Sg+Ph41LHR0dJB6SoE2+fn58mlVVVXxwsJCnpKSQuq62syODhsldjlKWRylWGvB2TK6c+eO/J1xBWzapqYmedK9f/8eS50Dvy3q8YqLi7FW4cjTp081iiDKk6K6uprUEee9Zu+cBNTY1lNzzxFKqaamBms5HxoaInXi4uJQ4+R5/vy5ZvysrCxZJx0ZHR3VKIAoMZvNmvrAwECsPT3UNoAA50SCtba2wn925OXlYcpCbm4upmz8/PkTU6eH2FeYsvHmzRuLO9Rsv337VnqqpK+vj1dUVPBnz55hyekzMzOjsRV+oOVLY0BAAFtfXxdlNr5//87CwsIwpy/gJVTJzZs3mXREXQGIYkzpD7W9oaGhTO4RLy8vWaBkd3cXU/rn9+/fFkeuXr0qC5R8/foVU/oHrsnSEVhjauD+rUd6enowZePatWsWRzIyMmSBkubmZkzpC8qu5ORky2aHjLtseMrOgYEBaazk9u3be+eyVcSPINbqA7i0qW0EAfYcmZubI5WoS8xZQdn36NEjS538F3F0ZxZHMWqcHSEhIaRtVuwcAShlkLPEURwNXnataCwcHx8nG4Hs7Oyg1umRkJBA2gJXciXkVL969YpsDHJaLC0tcU9PT9IG6hByaFlLSwvZSWRkJGqcHA8fPiTHBnEUinI6xR8/fiQ7+/DhA2ocH3ClhiAFNZ5VXr9+jdpaXK6V2tpaTYfp6elYezSmpqbk8eko5GOViIgIvrW1ha1oXAax//z5IwPRSqKiotj8/DzmaJ48eSID0PB9RBwS7N+/f0zMugyCf/nyBbVcIy54LDMzE3NOkO44YXp6WjNDJpMJa2kgJqxucxARb+MH/qzg0hGIMKoHysnJwVoao9GoaeNKgoKC5DJWRxD3i1NH3r17Rw4KrzPO2Nzc5BcvXiTbghgMBi6uDvL+39nZia2OhlNHKCMOs9HFPpPB7pPEoSOlpaWkI3qFtAzCn5QTL168QA39QR6/4k2TiU2HOQsQMoJvgHpFXnWViFnXOAHAt3NdI5+LAihSC3zB0jt2SwuiKfAZWo1CRbfsLa3u7m7SCSh3B5xGUcxmMxO3MMzpG/lEioqKZEaNuzghWVtb29vUSmlsbISH5TbAK7LGCfiTDHfDw8fHBy4twn4bkIe/rXInzi0uLrL4+HiZiY6OZoODg27nBGOM/QczS1SCFRuZBwAAAABJRU5ErkJggg==def predict(X, y, parameters): m = X.shape[1] n = len(parameters) // 2 p = np.zeros((1, m)) probas, caches = forwardPass(X, parameters) for i in range(0, probas.shape[1]): p[0, i] = 1 if probas[0, i] > 0.5 else 0 print("Accuracy: " + str(np.sum((p == y) / m))) return p Modelization...![Modelization][image_ref_hdzkittd][image_ref_hdzkittd]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwM/AAAAABJRU5ErkJggg==def L_layer_model(X, Y, layersDims, learning_rate, num_iterations, print_cost=False): np.random.seed(1) costs = [] parameters = init(layersDims).compute() # Loop (gradient descent) for i in range(0, num_iterations): AL, caches = forwardPass(X, parameters) cost = computeCost(AL, Y) grads = backwardPass(AL, Y, caches) parameters = update_parameters(parameters, grads, learning_rate) # Print the cost every 100 training examples and plot the cost if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" % (i, cost)) if print_cost and i % 100 == 0: costs.append(cost) plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters parameters = L_layer_model(trainX, trainL, learning_rate=0.0075, layersDims=[12288, 20, 7, 5, 1], num_iterations=2500, print_cost=True) pred_train = predict(trainX, trainL, parameters) pred_test = predict(testX, testL, parameters)Accuracy: 0.8It's quite clear in today's age that the biggest companies in the world, make most of their profits from harvesting and productionalising their user's data. With privacy becoming more and more of a concern in everyday life as we become more connected, it's almost becoming a human right for our privacy to be protected, especially by those who profit from it.Differential privacy in particular is a model that aims to protect the data's owners against bad actors reversing the aggregated data to find details of individual users.For the example in this post, we will use a dataset that includes each persons name, age, email and annual income. In this example, say we wanted to determine how many people in our dataset make over $50,000 annually. Instead of sharing all of the specific people and their incomes, we would rather share the aggregate data. Someone viewing our results might know that 30% of our set make over that threshold, but not which people.**However**, let's say someone viewing the results wants to know the specific income of one person. To do this, they've gone and collected the background information on every person except the person of interest to them. If they know which of the 4999 people make over the threshold, they can determine whether the person of question makes over or under the threshold.This type of attack is known as a differentiated attack, and is very difficult to protect against, and is what differential privacy aims to defend against. The primary method of achieving privacy is by adding random noise to the aggregate data (the private key in a sense of cryptography). In our example from above our results might say 27% - 32% of people make over the threshold rather than the specific number. This still achieves an outcome that people can understand the results, but protects the privacy of the users within it.Now let's use the following packages:1. `mimesis` to generate the user data2. `pandas` to calculate the exact values (statistical analysis)3. `pydp` to calculate the same values but maintaining privacy of the usersimport pydp as dp from pydp.algorithms.laplacian import BoundedSum, BoundedMean, Count, Max from mimesis import Person from mimesis import Address from mimesis.enums import Gender from mimesis import Datetime person = Person('en') import pandas as pd import random person = Person() addess = Address() datetime = Datetime() def create_rows_mimesis(number_of_rows=1): output = [{"name":person.full_name(), "age": person.age(), "email":person.email(), "income": random.randint(10000,100000)} for x in range(number_of_rows)] return output income_data = pd.DataFrame(create_rows_mimesis(5000)) income_data # Calculate count with no differential privacy def typical_count_above(column_name, limit): return income_data[income_data[column_name] > limit].count()[column_name] number_over_threshold = typical_count_above('income', 50000) print(f"Number of users with income over $50,000: {number_over_threshold} or {(number_over_threshold / 5000) * 100:.1f}%")Number of users with income over $50,000: 2810 or 56.2%As we can see from the calculations above, `typical_count_above` counts the number of users over a limit for a specified column with no preservation of privacy whatsoever.# Calculate count with differential privacy def private_count_above(column_name, privacy_budget, limit): x = Count(privacy_budget, dtype='int') return x.quick_result(list(income_data[income_data[column_name] > limit][column_name])) private_number_over_threshold = private_count_above('income', 0.8, 50000) print(f"PRIVATE: Number of users with income over $50,000: {private_number_over_threshold} or {(private_number_over_threshold / 5000) * 100:.1f}%")PRIVATE: Number of users with income over $50,000: 2809 or 56.2%The `private_count_above` function works very similarly to `typical_count_above` but using the Differential Privacy Library by Google to count the number of users above the limit and preserves privacy by using the Laplacian mechanism for adding noise to the dataset. Also note that we are able to tune the privacy budget for the acceptable loss of privacy, with 0 denoting no loss whatsoever is acceptable. Mean ExampleNow let's repeat the same example, but by determining the average income across all users in the dataset.# Calculate mean with no differential privacy def typical_mean(column_name): return income_data[column_name].mean() # Calculate mean with differential privacy def private_mean(column_name, privacy_budget): x = BoundedMean(privacy_budget, income_data[column_name].min(), income_data[column_name].max()) return x.quick_result(list(income_data[column_name])) true_mean_income = typical_mean('income') private_mean_income = private_mean('income', 0.5) print(f"True mean income: {true_mean_income}") print(f"Private mean income: {private_mean_income}, 0.8 privacy budget") print(f"Private mean income: {private_mean('income', 0.1)}, 0.1 privacy budget")True mean income: 55467.1134 Private mean income: 55470.37580603853, 0.8 privacy budget Private mean income: 55589.33064063336, 0.1 privacy budgetADVANCED NUMPYimport numpy as np import pandas as pd import numba as nb # Advanced NumPy # Let's go deeper into the NumPy library for array computing. # This will include more internal detail about the ndarray type # and more advanced array manipulations and algorithms. # Ndarray Object Internals # The NumPy ndarray provides a means to interpret a block of homogeneous data (either contiguous or strided) # as a multidimensional array object. # The data type, or dtype, determines how the data is interpreted as being floating point, integer, # boolean, or any of the other types. # Part of what makes ndarray flexible is that every array object is a strided view on a block of data. # You might wonder, for example, how the array view arr[::2, ::-1] does not copy any data. # The reason is that the ndarray is more than just a chunk of memory and a dtype; # it also has “striding” information that enables the array to move through memory with varying step sizes. # More precisely, the ndarray internally consists of the following: # • A pointer to data—that is, a block of data in RAM or in a memory-mapped file # • The data type or dtype, describing fixed-size value cells in the array # • A tuple indicating the array’s shape # • A tuple of strides, integers indicating the number of bytes to “step” in order to # advance one element along a dimension # For example, a 10 × 5 array would have shape (10, 5): np.ones((10, 5)).shape # A typical (C order) 3 × 4 × 5 array of float64 (8-byte) values has strides (160, 40,8) # (knowing about the strides can be useful because, in general, the larger the strides on a particular axis, # the more costly it is to perform computation along that axis): np.ones((3, 4, 5), dtype=np.float64).strides # While it is rare that a typical NumPy user would be interested in the array strides, # they are the critical ingredient in constructing “zero-copy” array views. Strides can even be negative, # which enables an array to move “backward” through memory # (this would be the case, for example, in a slice like obj[::-1] or obj[:, ::-1]). # NumPy dtype Hierarchy # You may occasionally have code that needs to check whether an array contains integers, # floating-point numbers, strings, or Python objects. # Because there are multiple types of floating-point numbers (float16 through float128), # checking that the dtype is among a list of types would be very verbose. # Fortunately, the dtypes have superclasses such as np.integer and np.floating, # which can be used in conjunction with the np.issubdtype function: ints = np.ones(10, dtype=np.uint16) ints floats = np.ones(10, dtype=np.float32) floats np.issubdtype(ints.dtype, np.integer) np.issubdtype(floats.dtype, np.floating) # You can see all of the parent classes of a specific dtype by calling the type’s mro method: np.float64.mro() # Therefore, we also have: np.issubdtype(ints.dtype, np.number) # Most NumPy users will never have to know about this, but it occasionally comes in handy. # Advanced Array Manipulation # There are many ways to work with arrays beyond fancy indexing, slicing, and boolean subsetting. # While much of the heavy lifting for data analysis applications is handled by higher-level functions # in pandas, you may at some point need to write a data algorithm that is not found # in one of the existing libraries. # Reshaping Arrays # In many cases, you can convert an array from one shape to another without copying any data. # To do this, pass a tuple indicating the new shape to the reshape array instance method. # For example, suppose we had a one-dimensional array of values that we wished to rearrange into a matrix: arr = np.arange(8) arr arr.reshape((4,2)) # A multidimensional array can also be reshaped: arr.reshape((4, 2)).reshape((2, 4)) # One of the passed shape dimensions can be –1, in which case the value used for tha dimension # will be inferred from the data: arr = np.arange(15) arr arr.reshape((5, -1)) # Since an array’s shape attribute is a tuple, it can be passed to reshape, too: other_arr = np.ones((3, 5)) other_arr other_arr.shape arr.reshape(other_arr.shape) # The opposite operation of reshape from one-dimensional to a higher dimension # is typically known as flattening or raveling: arr = np.arange(15).reshape((5, 3)) arr arr.ravel() # ravel does not produce a copy of the underlying values if the values in the result # were contiguous in the original array. # The flatten method behaves like ravel except it always returns a copy of the data: arr.flatten() # The data can be reshaped or raveled in different orders. This is a slightly nuanced topic for new NumPy users # and is therefore the next subtopic. # C Versus Fortran Order # NumPy gives you control and flexibility over the layout of your data in memory. # By default, NumPy arrays are created in row major order. # Spatially this means that if you have a two-dimensional array of data, # the items in each row of the array are stored in adjacent memory locations. # The alternative to row major ordering is column major order, # which means that values within each column of data are stored in adjacent memory locations. # For historical reasons, row and column major order are also know as C and Fortran order, respectively. # In the FORTRAN 77 language, matrices are all column major. # Functions like reshape and ravel accept an order argument indicating the order to use the data in the array. # This is usually set to 'C' or 'F' in most cases # (there are also less commonly used options 'A' and 'K'; see the NumPy documentation): arr = np.arange(12).reshape((3, 4)) arr arr.ravel() arr.ravel('C') arr.ravel('F') arr.ravel('A') arr.ravel('K') # Reshaping arrays with more than two dimensions can be a bit mind-bending. # The key difference between C and Fortran order is the way in which the dimensions are walked: # C/row major order # Traverse higher dimensions first (e.g., axis 1 before advancing on axis 0). # Fortran/column major order # Traverse higher dimensions last (e.g., axis 0 before advancing on axis 1). # Concatenating and Splitting Arrays # numpy.concatenate takes a sequence (tuple, list, etc.) of arrays and joins them together # in order along the input axis: arr1 = np.array([[1, 2, 3], [4, 5, 6]]) arr1 arr2 = np.array([[7, 8, 9], [10, 11, 12]]) arr2 np.concatenate([arr1, arr2], axis=0) np.concatenate([arr1, arr2], axis=1) # There are some convenience functions, like vstack and hstack, for common kinds of concatenation. # The preceding operations could have been expressed as: np.vstack((arr1, arr2)) np.hstack((arr1,arr2)) # split, on the other hand, slices apart an array into multiple arrays along an axis: arr = np.random.randn(5, 2) arr first, second, third = np.split(arr, [1, 3]) first second third # The value [1, 3] passed to np.split indicate the indices at which to split the array into pieces. # Array concatenation functions # Function Description # concatenate Most general function, concatenates collection of arrays along one axis # vstack, row_stack Stack arrays row-wise (along axis 0) # hstack Stack arrays column-wise (along axis 1) # column_stack Like hstack, but converts 1D arrays to 2D column vectors first # dstack Stack arrays “depth”-wise (along axis 2) # split Split array at passed locations along a particular axis # hsplit/vsplit Convenience functions for splitting on axis 0 and 1, respectively # Stacking helpers: r_ and c_ # There are two special objects in the NumPy namespace, r_ and c_, that make stacking arrays more concise: arr = np.arange(6) arr arr1 = arr.reshape((3, 2)) arr1 arr2 = np.random.randn(3, 2) arr2 np.r_[arr1, arr2] np.c_[arr1, arr2] np.c_[np.r_[arr1, arr2], arr] # These additionally can translate slices to arrays: np.c_[1:6, -10:-5] np.r_[1:6, -10:-5] # See the docstring for more on what you can do with c_ and r_. # Repeating Elements: tile and repeat # Two useful tools for repeating or replicating arrays to produce larger arrays # are the repeat and tile functions. # repeat replicates each element in an array some number of times, producing a larger array: arr = np.arange(3) arr arr.repeat(3) arr.repeat(4) arr.repeat(5) # The need to replicate or repeat arrays can be less common with NumPy than it is with other array programming # frameworks like MATLAB. One reason for this is that broadcasting often fills this need better. # By default, if you pass an integer, each element will be repeated that number of times. # If you pass an array of integers, each element can be repeated a different number of times: arr.repeat([2, 3, 4]) arr.repeat([4, 7, 9]) # Multidimensional arrays can have their elements repeated along a particular axis. arr = np.random.randn(2, 2) arr arr.repeat(2, axis=0) arr.repeat(2, axis=1) arr.repeat(7, axis=0) arr.repeat(7, axis=1) # Note that if no axis is passed, the array will be flattened first, which is likely not what you want. # Similarly, you can pass an array of integers when repeating a multidimensional array # to repeat a given slice a different number of times: arr.repeat([2, 3], axis=0) arr.repeat([2, 3], axis=1) # tile, on the other hand, is a shortcut for stacking copies of an array along an axis. # Visually you can think of it as being akin to “laying down tiles”: arr np.tile(arr, 2) np.tile(arr, 7) # The second argument is the number of tiles; with a scalar, the tiling is made row by row, # rather than column by column. # The second argument to tile can be a tuple indicating the layout of the “tiling”: arr np.tile(arr, (2, 1)) np.tile(arr, (1, 2)) np.tile(arr, (3, 2)) np.tile(arr, (2, 3)) np.tile(arr, (2, 7)) np.tile(arr, (7, 2)) # Fancy Indexing Equivalents: take and put # One way to get and set subsets of arrays is by fancy indexing using integer arrays: arr = np.arange(10) * 100 arr inds = [7, 1, 2, 6] inds arr[inds] # There are alternative ndarray methods that are useful in the special case of only making # a selection on a single axis: arr.take(inds) arr.put(inds, 42) arr arr.put(inds, [40, 41, 42, 43]) arr # To use take along other axes, you can pass the axis keyword: inds = [2, 0, 2, 1] inds arr = np.random.randn(2, 4) arr arr.take(inds, axis=1) # put does not accept an axis argument but rather indexes into the flattened (onedimensional, C order) version # of the array. # Thus, when you need to set elements using an index array on other axes, # it is often easiest to use fancy indexing. # Broadcasting # Broadcasting describes how arithmetic works between arrays of different shapes. # It can be a powerful feature, but one that can cause confusion, even for experienced users. # The simplest example of broadcasting occurs when combining a scalar value with an array: arr = np.arange(5) arr arr * 4 # Above we say that the scalar value 4 has been broadcast to all of the other elements in # the multiplication operation. # For example, we can demean each column of an array by subtracting the column means. # In this case, it is very simple: arr = np.random.randn(4, 3) arr arr.mean() arr.mean(0) arr.mean(1) demeaned = arr - arr.mean(0) demeaned demeaned.mean() demeaned.mean(0) demeaned.mean(1) # Demeaning the rows as a broadcast operation requires a bit more care. # Fortunately, broadcasting potentially lower dimensional values across any dimension of an array # (like subtracting the row means from each column of a two-dimensional array) # is possible as long as you follow the rules. # This brings us to: # The Broadcasting Rule # Two arrays are compatible for broadcasting if for each trailing dimension (i.e., starting from the end) # the axis lengths match or if either of the lengths is 1. Broadcasting is then performed over the missing # or length 1 dimensions. # Even as an experienced NumPy user, I often find myself having to pause and draw a diagram as I think about # the broadcasting rule. # Consider the last example and suppose we wished instead to subtract the mean value from each row. # Since arr.mean(0) has length 3, it is compatible for broadcasting across axis 0 because the trailing # dimension in arr is 3 and therefore matches. # According to the rules, to subtract over axis 1 (i.e., subtract the row mean from each row), # the smaller array must have shape (4, 1): arr row_means = arr.mean(1) row_means row_means.shape row_means.reshape((4,1)) demeaned = arr - row_means.reshape((4,1)) demeaned demeaned.mean() demeaned.mean(0) demeaned.mean(1) # Broadcasting Over Other Axes # Broadcasting with higher dimensional arrays can seem even more mind-bending, # but it is really a matter of following the rules. If you don’t, you’ll get an error like this: arr - arr.mean(1) # It’s quite common to want to perform an arithmetic operation with a lower dimensional array # across axes other than axis 0. # According to the broadcasting rule, the “broadcast dimensions” must be 1 in the smaller array. # In the example of row demeaning shown here, this meant reshaping the row means to be shape (4, 1) # instead of (4,): arr - arr.mean(1).reshape((4, 1)) # In the three-dimensional case, broadcasting over any of the three dimensions is only a matter of reshaping # the data to be shape-compatible. # A common problem, therefore, is needing to add a new axis with length 1 specifically # for broadcasting purposes. # Using reshape is one option, but inserting an axis requires constructing a tuple # indicating the new shape.This can often be a tedious exercise. # Thus, NumPy arrays offer a special syntax for inserting new axes by indexing. # We use the special np.newaxis attribute along with “full” slices to insert the new axis: arr = np.zeros((4, 4)) arr arr_3d = arr[:, np.newaxis, :] arr_3d arr_3d.shape arr_1d = np.random.normal(size=3) arr_1d arr_1d[:, np.newaxis] arr_1d[np.newaxis, :] # Thus, if we had a three-dimensional array and wanted to demean axis 2, say, we would need to write: arr = np.random.randn(3, 4, 5) arr depth_means = arr.mean(2) depth_means depth_means.shape demeaned = arr - depth_means[:, :, np.newaxis] demeaned demeaned.mean(2) # You might be wondering if there’s a way to generalize demeaning over an axis without sacrificing performance. # There is, but it requires some indexing gymnastics: def demean_axis(arr, axis=0): means = arr.mean(axis) # This generalizes things like [:, :, np.newaxis] to N dimensions indexer = [slice(None)] * arr.ndim indexer[axis] = np.newaxis return arr - means[indexer] # Setting Array Values by Broadcasting # The same broadcasting rule governing arithmetic operations also applies to setting values via array indexing. # In a simple case, we can do things like: arr = np.zeros((4, 3)) arr arr[:] = 5 arr # However, if we had a one-dimensional array of values we wanted to set into the columns of the array, # we can do that as long as the shape is compatible: col = np.array([1.28, -0.42, 0.44, 1.6]) col arr[:] = col[:, np.newaxis] arr arr[:2] = [[-1.37], [0.509]] arr #Advanced ufunc Usage # While many NumPy users will only make use of the fast element-wise operations provided # by the universal functions, there are a number of additional features that occasionally can help you # write more concise code without loops. # ufunc Instance Methods # Each of NumPy’s binary ufuncs has special methods for performing certain kinds # of special vectorized operations: # Method Description # reduce(x) Aggregate values by successive applications of the operation # accumulate(x) Aggregate values, preserving all partial aggregates # reduceat(x, bins) “Local” reduce or “group by”; reduce contiguous slices of data # to produce aggregated array # outer(x, y) Apply operation to all pairs of elements in x and y; # the resulting array has shape x.shape + y.shape # reduce takes a single array and aggregates its values, optionally along an axis, # by performing a sequence of binary operations. # For example, an alternative way to sum elements in an array is to use np.add.reduce: arr = np.arange(10) arr np.add.reduce(arr) arr.sum() # The starting value (0 for add) depends on the ufunc. # If an axis is passed, the reduction is performed along that axis. # This allows you to answer certain kinds of questions in a concise way. # As a less trivial example, we can use np.logical_and to check whether the values # in each row of an array are sorted: np.random.seed(12346) # for reproducibility arr = np.random.randn(5, 5) arr arr[::2].sort(1) # sort a few rows arr[:, :-1] < arr[:, 1:] np.logical_and.reduce(arr[:, :-1] < arr[:, 1:], axis=1) # Note that logical_and.reduce is equivalent to the all method. # accumulate is related to reduce like cumsum is related to sum. # It produces an array of the same size with the intermediate “accumulated” values: arr = np.arange(15).reshape((3, 5)) arr np.add.accumulate(arr, axis=1) np.add.accumulate(arr, axis=0) np.add.accumulate(arr) # outer performs a pairwise cross-product between two arrays: arr = np.arange(3).repeat([1, 2, 2]) arr np.multiply.outer(arr, np.arange(5)) #The output of outer will have a dimension that is the sum of the dimensions of the inputs: x, y = np.random.randn(3, 4), np.random.randn(5) x y x, y result = np.subtract.outer(x, y) result result.shape # The last method, reduceat, performs a “local reduce,” in essence an array groupby operation # in which slices of the array are aggregated together. # It accepts a sequence of “bin edges” that indicate how to split and aggregate the values: arr = np.arange(10) arr np.add.reduceat(arr, [0, 5, 8]) # The results are the reductions (here, sums) performed over arr[0:5], arr[5:8], and arr[8:]. # As with the other methods, you can pass an axis argument: arr = np.multiply.outer(np.arange(4), np.arange(5)) arr np.add.reduceat(arr, [0, 2, 4], axis=1) # Writing New ufuncs in Python # There are a number of facilities for creating your own NumPy ufuncs. # The most general is to use the NumPy C API, but let's look at pure Python ufuncs. # numpy.frompyfunc accepts a Python function along with a specification for the number of inputs and outputs. # For example, a simple function that adds element-wise would be specified as: def add_elements(x, y): return x + y add_them = np.frompyfunc(add_elements, 2, 1) add_them add_them(np.arange(8), np.arange(8)) # Functions created using frompyfunc always return arrays of Python objects, # which can be inconvenient. # Fortunately, there is an alternative (but slightly less featureful) function, numpy.vectorize, # that allows you to specify the output type: add_them = np.vectorize(add_elements, otypes=[np.float64]) add_them add_them(np.arange(8), np.arange(8)) # These functions provide a way to create ufunc-like functions, # but they are very slow because they require a Python function call to compute each element, # which is a lot slower than NumPy’s C-based ufunc loops: arr = np.random.randn(10000) arr %timeit add_them(arr, arr) %timeit np.add(arr, arr) # Structured and Record Arrays # You may have noticed up until now that ndarray is a homogeneous data container; # that is, it represents a block of memory in which each element takes up the same number of bytes, # determined by the dtype. # On the surface, this would appear to not allow you to represent heterogeneous or tabular-like data. # A structured array is an ndarray in which each element can be thought of as representing a struct in C # (hence the “structured” name) or a row in a SQL table with multiple named fields: dtype = [('x', np.float64), ('y', np.int32)] dtype sarr = np.array([(1.5, 6), (np.pi, -2)], dtype=dtype) sarr # There are several ways to specify a structured dtype (see the online NumPy documentation). # One typical way is as a list of tuples with (field_name, field_data_type). # Now, the elements of the array are tuple-like objects whose elements can be accessed like a dictionary: sarr[0] sarr[0]['y'] # The field names are stored in the dtype.names attribute. # When you access a field on the structured array, a strided view on the data is returned, # thus copying nothing: sarr['x'] # Nested dtypes and Multidimensional Fields # When specifying a structured dtype, you can additionally pass a shape (as an int or tuple): dtype = [('x', np.int64, 3), ('y', np.int32)] dtype arr = np.zeros(4, dtype=dtype) arr # In this case, the x field now refers to an array of length 3 for each record: arr[0]['x'] # Conveniently, accessing arr['x'] then returns a two-dimensional array # instead of a one-dimensional array as in prior examples: arr['x'] # This enables you to express more complicated, nested structures as a single block of memory in an array. # You can also nest dtypes to make more complex structures. # Here is an example: dtype = [('x', [('a', 'f8'), ('b', 'f4')]), ('y', np.int32)] dtype data = np.array([((1, 2), 5), ((3, 4), 6)], dtype=dtype) data['x'] data['y'] data['x']['a'] # pandas DataFrame does not support this feature directly, though it is similar to hierarchical indexing. # Why Use Structured Arrays? # Compared with, say, a pandas DataFrame, NumPy structured arrays are a comparatively low-level tool. # They provide a means to interpreting a block of memory as a tabular structure # with arbitrarily complex nested columns. # Since each element in the array is represented in memory as a fixed number of bytes, # structured arrays providea very fast and efficient way of writing data to # and from disk (including memory maps), transporting it over the network, and other such uses. # As another common use for structured arrays, writing data files as fixed-length # record byte streams is a common way to serialize data in C and C++ code, # which is commonly found in legacy systems in industry. # As long as the format of the file is known # (the size of each record and the order, byte size, and data type of each element), # the data can be read into memory with np.fromfile. # More About Sorting # Like Python’s built-in list, the ndarray sort instance method is an in-place sort, # meaning that the array contents are rearranged without producing a new array: arr = np.random.randn(6) arr # When sorting arrays in-place, remember that if the array is a view on a different ndarray, # the original array will be modified: arr = np.random.randn(3, 5) arr arr[:, 0].sort() # Sort first column values in-place arr # On the other hand, numpy.sort creates a new, sorted copy of an array. # Otherwise, it accepts the same arguments (such as kind) as ndarray.sort: arr = np.random.randn(5) arr np.sort(arr) arr # All of these sort methods take an axis argument for sorting the sections of data # along the passed axis independently: arr = np.random.randn(3, 5) arr arr.sort(axis=1) arr arr.sort(axis=0) arr # You may notice that none of the sort methods have an option to sort in descending order. # This is a problem in practice because array slicing produces views, # thus not producing a copy or requiring any computational work. # Many Python users are familiar with the “trick” that for a list values, # values[::-1] returns a list in reverse order. # The same is true for ndarrays: arr[:, ::-1] # Indirect Sorts: argsort and lexsort # In data analysis you may need to reorder datasets by one or more keys. # For example, a table of data about some students might need to be sorted by last name, # then by first name. # This is an example of an indirect sort, and if you’ve read the pandas-related books # you must have already seen many higher-level examples. # Given a key or keys (an array of values or multiple arrays of values), # you wish to obtain an array of integer indices (I refer to them colloquially as indexers) # that tells you how to reorder the data to be in sorted order. # Two methods for this are argsort and numpy.lexsort. # As an example: values = np.array([5, 0, 1, 3, 2]) values indexer = values.argsort() indexer values[indexer] # As a more complicated example, this code reorders a two-dimensional array by its first row: arr = np.random.randn(3, 5) arr arr[0] = values arr arr[:, arr[0].argsort()] # lexsort is similar to argsort, but it performs an indirect lexicographical sort on multiple key arrays. # Suppose we wanted to sort some data identified by first and last names: first_name = np.array(['Bob', 'Jane', 'Steve', 'Bill', 'Barbara']) first_name last_name = np.array(['Jones', 'Arnold', 'Arnold', 'Jones', 'Walters']) last_name sorter = np.lexsort((first_name, last_name)) sorter zip(last_name[sorter], first_name[sorter]) # lexsort can be a bit confusing the first time you use it because the order in which the # keys are used to order the data starts with the last array passed. # Above, last_name was used before first_name. # pandas methods like Series’s and DataFrame’s sort_values method are implemented with variants # of these functions (which also must take into account missing values). # Alternative Sort Algorithms # A stable sorting algorithm preserves the relative position of equal elements. # This can be especially important in indirect sorts where the relative ordering is meaningful: values = np.array(['2:first', '2:second', '1:first', '1:second', '1:third']) values key = np.array([2, 2, 1, 1, 1]) key indexer = key.argsort(kind='mergesort') indexer values.take(indexer) # The only stable sort available is mergesort, which has guaranteed O(n log n) performance # (for complexity buffs), but its performance is on average worse than the default quicksort method. # This is not something that most users will ever have to think about, # but it’s useful to know that it’s there. # A summary of available array sorting methods and their relative performance (and performance guarantees): # Kind Speed Stable Work space Worst case # 'quicksort' 1 No 0 O(n^2) # 'mergesort' 2 Yes n / 2 O(n log n) # 'heapsort' 3 No 0 O(n log n) # Partially Sorting Arrays # One of the goals of sorting can be to determine the largest or smallest elements in an array. # NumPy has optimized methods, numpy.partition and np.argpartition, # for partitioning an array around the k-th smallest element: np.random.seed(12345) arr = np.random.randn(20) arr np.partition(arr, 3) # After you call partition(arr, 3), the first three elements in the result are the smallest three values # in no particular order. # numpy.argpartition, similar to numpy.argsort, returns the indices that rearrange the data # into the equivalent order: indices = np.argpartition(arr, 3) indices arr.take(indices) # numpy.searchsorted: Finding Elements in a Sorted Array # searchsorted is an array method that performs a binary search on a sorted array, # returning the location in the array where the value would need to be inserted to maintain sortedness: arr = np.array([0, 1, 7, 12, 15]) arr arr.searchsorted(9) # You can also pass an array of values to get an array of indices back: arr.searchsorted([0, 8, 11, 16]) # You might have noticed that searchsorted returned 0 for the 0 element. # This is because the default behavior is to return the index at the left side of a group of equal values: arr = np.array([0, 0, 0, 1, 1, 1, 1]) arr arr.searchsorted([0, 1]) arr.searchsorted([0, 1], side='right') # As another application of searchsorted, suppose we had an array of values between 0 and 10,000, # and a separate array of “bucket edges” that we wanted to use to bin the data: data = np.floor(np.random.uniform(0, 10000, size=50)) data bins = np.array([0, 100, 1000, 5000, 10000]) bins #To then get a labeling of which interval each data point belongs to (where 1 would mean the bucket [0, 100)), # we can simply use searchsorted: labels = bins.searchsorted(data) labels # This, combined with pandas’s groupby, can be used to bin data: pd.Series(data).groupby(labels).mean() # Writing Fast NumPy Functions with Numba # Numba is an open source project that creates fast functions for NumPy-like data using CPUs, GPUs, # or other hardware. It uses the LLVM Project to translate Python code into compiled machine code. # To introduce Numba, let’s consider a pure Python function that computes # the expression (x - y).mean() using a for loop: def mean_distance(x, y): nx = len(x) result = 0.0 count = 0 for i in range(nx): result += x[i] - y[i] count += 1 return result / count #This function is very slow: x = np.random.randn(10000000) x y = np.random.randn(10000000) y %timeit mean_distance(x, y) %timeit (x - y).mean() # The NumPy version is over 100 times faster. # We can turn this function into a compiled Numba function using the numba.jit function: import numba as nb numba_mean_distance = nb.jit(mean_distance) numba_mean_distance # We could also have written this as a decorator: @nb.jit def mean_distance(x, y): nx = len(x) result = 0.0 count = 0 for i in range(nx): result += x[i] - y[i] count += 1 return result / count # The resulting function is actually faster than the vectorized NumPy version: %timeit numba_mean_distance(x, y) # Numba cannot compile arbitrary Python code, but it supports a significant subset of pure Python # that is most useful for writing numerical algorithms. # Numba is a deep library, supporting different kinds of hardware, modes of compilation, and user extensions. # It is also able to compile a substantial subset of the NumPy Python API without explicit for loops. # Numba is able to recognize constructs that can be compiled to machine code, # while substituting calls to the CPython API for functions that it does not know how to compile. # Numba’s jit function has an option, nopython=True, which restricts allowed code to Python code that can # be compiled to LLVM without any Python C API calls. jit(nopython=True) has a shorter alias numba.njit. # In the previous example, we could have written: from numba import float64, njit @njit(float64(float64[:], float64[:])) def mean_distance(x, y): return (x - y).mean() # I encourage you to learn more by reading the online documentation for Numba. # Creating Custom numpy.ufunc Objects with Numba # The numba.vectorize function creates compiled NumPy ufuncs, which behave like built-in ufuncs. # Let’s consider a Python implementation of numpy.add: from numba import vectorize @vectorize def nb_add(x, y): return x + y # Now we have: x = np.arange(10) x nb_add(x, x) nb_add.accumulate(x, 0) # Advanced Array Input and Output # Ordinarily in numpy, np.save and np.load are for storing arrays in binary format on disk. # There are a number of additional options to consider for more sophisticated use. # In particular, memory maps have the additional benefit of enabling you to work with datasets # that do not fit into RAM. # Memory-Mapped Files # A memory-mapped file is a method for interacting with binary data on disk as though it is stored # in an in-memory array. # NumPy implements a memmap object that is ndarray-like, enabling small segments of a large file to be read # and written without reading the whole array into memory. # Additionally, a memmap has the same methods as an in-memory array and thus can be substituted # into many algorithms where an ndarray would be expected. # To create a new memory map, use the function np.memmap and pass a file path, dtype,shape, and file mode: mmap = np.memmap('mymmap', dtype='float64', mode='w+', shape=(10000, 10000)) mmap # Slicing a memmap returns views on the data on disk: section = mmap[:5] section # If you assign data to these, it will be buffered in memory (like a Python file object), # but you can write it to disk by calling flush: section[:] = np.random.randn(5, 10000) section mmap.flush() # writing it to disk mmap del mmap # Whenever a memory map falls out of scope and is garbage-collected, # any changes will be flushed to disk also. # When opening an existing memory map, you still have to specify the dtype and shape, # as the file is only a block of binary data with no metadata on disk: mmap = np.memmap('mymmap', dtype='float64', shape=(10000, 10000)) mmap # Memory maps also work with structured or nested dtypes as described in a previously # Performance Tips # Getting good performance out of code utilizing NumPy is often straightforward, # as array operations typically replace otherwise comparatively extremely slow pure Python loops. # The following list briefly summarizes some things to keep in mind: # • Convert Python loops and conditional logic to array operations and boolean array operations # • Use broadcasting whenever possible # • Use arrays views (slicing) to avoid copying data # • Utilize ufuncs and ufunc methods # If you can’t get the performance you require after exhausting the capabilities provided by NumPy alone, # consider writing code in C, Fortran, or Cython. # The Importance of Contiguous Memory # In some applications the memory layout of an array can significantly affect the speed of computations. # This is based partly on performance differences having to do with the cache hierarchy of the CPU; # operations accessing contiguous blocks of memory (e.g., summing the rows of a C order array) # will generally be the fastest because the memory subsystem will buffer the appropriate blocks of memory # into the ultrafast L1 or L2 CPU cache. # Also, certain code paths inside NumPy’s C codebase have been optimized for the contiguous case # in which generic strided memory access can be avoided. # To say that an array’s memory layout is contiguous means that the elements are stored in memory in the order # that they appear in the array with respect to Fortran (columnmajor) or C (row major) ordering. # By default, NumPy arrays are created as Ccontiguous or just simply contiguous. # A column major array, such as the transpose of a C-contiguous array, # is thus said to be Fortran-contiguous. # These properties can be explicitly checked via the flags attribute on the ndarray: arr_c = np.ones((1000, 1000), order='C') arr_c arr_f = np.ones((1000, 1000), order='F') arr_f arr_c.flags arr_f.flags arr_c.flags.c_contiguous arr_c.flags.f_contiguous arr_f.flags.c_contiguous arr_f.flags.f_contiguous # In this example, summing the rows of these arrays should, in theory, be faster for arr_c than arr_f # since the rows are contiguous in memory. # Here I check for sure using %timeit: %timeit arr_c.sum(1) %timeit arr_f.sum(1) # When you’re looking to squeeze more performance out of NumPy, this is often a place to invest some effort. # If you have an array that does not have the desired memory order, # you can use copy and pass either 'C' or 'F': arr_f.copy('C').flags arr_f.copy('F').flags arr_c.copy('F').flags arr_c.copy('C').flags # When constructing a view on an array, keep in mind that the result is not guaranteed to be contiguous: arr_c[:50].flags.contiguous arr_c[:, :50].flagsLet's have a look at the Allele Frequencies of intraindividual variants.fig, ax = plt.subplots(figsize=(20, 7.5)) interactive(scatterplot_interactive, min_af=widgets.FloatSlider(min=0.0, max=1.0, value=0.05, step=0.01), min_occurence=widgets.IntSlider(min=1, step=1), min_af_delta=widgets.FloatSlider(min=0.0, max=1.0, value=0, step=0.01), )There are 3 slides that control the scatterplot above. One can set the minimum allele frequecy for a variant to be plotted,the minimum allele frequency delta (the highest allele frequency minus the lowest allele frequency observed), and the min_occurenceslider, which selects only vartiants that have been observed at least N times.At a minimum allele frequency of 0.05, allele frequency delta of 0.5 and a minimum occurence of 3 (meaning that variant has been sampled 3 times)we see multiple variants.10779T/A for instance can be the major variant as well as the minor variant.DELTA = 0.5 filtered_af_delta = df[df['AF_delta'] > DELTA].reset_index(drop=True).sort_values(['textual_variant', 'AF_delta']) filtered_af_delta.style.bar(subset=['AF'],color='#d65f5f')Residential Potential Study Figures 1. Import relevant packagesimport sys import os import glob #import sqlalchemy import pandas as pd #import numpy as np #import ipywidgets as widgets #import numpy as np #import pandas as pd #import textwrap #import ipywidgets from ipywidgets import interact, interactive, fixed, interact_manual #import IPython.display #from IPython.display import display, clear_output #import plotly.graph_objects as go path1 = os.getcwd() sys.path.append("../../..")2. Grab and plot program specific yearly Market Potential savings data from Residential Potential Study Flat files# read each sheet of Resi excel file df_Savings = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Energy Savings') #df_TP_Savings = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res TP') #df_Participants = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Participants') #df_Costs = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Costs') #Rename column headers Program_Savings = df_Savings.loc[:,['Realistic Achievable Potential By Measure','Unnamed: 3',\ 'Cumulative Annual Energy (MWh) Savings - GROSS','Unnamed: 30','Unnamed: 31',\ 'Unnamed: 32','Unnamed: 33','Unnamed: 34','Unnamed: 35','Unnamed: 36','Unnamed: 37',\ 'Unnamed: 38','Unnamed: 39','Unnamed: 40','Unnamed: 41','Unnamed: 42','Unnamed: 43',\ 'Unnamed: 44','Unnamed: 45','Unnamed: 46','Unnamed: 47','Unnamed: 48']] dict = {'Cumulative Annual Energy (MWh) Savings - GROSS':'2022','Unnamed: 30':'2023','Unnamed: 31':'2024',\ 'Unnamed: 32':'2025','Unnamed: 33':'2026','Unnamed: 34':'2027','Unnamed: 35':'2028','Unnamed: 36':'2029',\ 'Unnamed: 37':'2030','Unnamed: 38':'2031','Unnamed: 39':'2032','Unnamed: 40':'2033','Unnamed: 41':'2034',\ 'Unnamed: 42':'2035','Unnamed: 43':'2036','Unnamed: 44':'2037','Unnamed: 45':'2038','Unnamed: 46':'2039',\ 'Unnamed: 47':'2040','Unnamed: 48':'2041','Realistic Achievable Potential By Measure':'Measure',\ 'Unnamed: 3':'Program'} Program_Savings.rename(columns=dict,inplace=True) #condition for program name Program_name = 'CAMR' # selecting rows based on program_name condition Program_Savings = Program_Savings.loc[Program_Savings['Program'].isin([Program_name])] Program_Savings = Program_Savings.reset_index(drop=True) #display(Program_Savings) Savings_PerYear = Program_Savings.sum(axis = 0, skipna =True) Savings_PerYear = Savings_PerYear.reset_index(drop = False) Savings_PerYear = Savings_PerYear.drop(Savings_PerYear.index[[0,1]]) dict = {'index': 'Year', 0:'Savings'} Savings_PerYear.rename(columns=dict,inplace=True) Savings_PerYear = Savings_PerYear.reset_index(drop = True) #Savings_PerYear = Savings_PerYear.reset_index(drop = False) #display(Savings_PerYear) #display(Program_Savings.head()) print(type(Program_name)) x = Savings_PerYear.plot('Year', 'Savings',xlabel='Year',ylabel='MWh') #, kind = 'scatter') # read each sheet of Resi excel file df_Savings = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Energy Savings') #df_TP_Savings = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res TP') #df_Participants = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Participants') #df_Costs = pd.read_excel('PS_Residential_Results.xlsx', sheet_name = 'Res Costs') #Rename column headers Program_Savings = df_Savings.loc[:,['Realistic Achievable Potential By Measure','Unnamed: 3',\ 'Cumulative Annual Energy (MWh) Savings - GROSS','Unnamed: 30','Unnamed: 31',\ 'Unnamed: 32','Unnamed: 33','Unnamed: 34','Unnamed: 35','Unnamed: 36','Unnamed: 37',\ 'Unnamed: 38','Unnamed: 39','Unnamed: 40','Unnamed: 41','Unnamed: 42','Unnamed: 43',\ 'Unnamed: 44','Unnamed: 45','Unnamed: 46','Unnamed: 47','Unnamed: 48']] dict = {'Cumulative Annual Energy (MWh) Savings - GROSS':'2022','Unnamed: 30':'2023','Unnamed: 31':'2024',\ 'Unnamed: 32':'2025','Unnamed: 33':'2026','Unnamed: 34':'2027','Unnamed: 35':'2028','Unnamed: 36':'2029',\ 'Unnamed: 37':'2030','Unnamed: 38':'2031','Unnamed: 39':'2032','Unnamed: 40':'2033','Unnamed: 41':'2034',\ 'Unnamed: 42':'2035','Unnamed: 43':'2036','Unnamed: 44':'2037','Unnamed: 45':'2038','Unnamed: 46':'2039',\ 'Unnamed: 47':'2040','Unnamed: 48':'2041','Realistic Achievable Potential By Measure':'Measure',\ 'Unnamed: 3':'Program'} Program_Savings.rename(columns=dict,inplace=True) #condition for program name Program_name = 'CAMR' # selecting rows based on program_name condition x = Program_Savings.Program.unique() display(x)Build Pipeline NotebookThis notebook will exercise the drift detection MLOps `build pipeline`%%capture !pip install -U pandas seabornSetup👇 Set the project name for your drift pipeline and store variableproject_name = "<>" # << Update this drift detection project %store project_nameGet back the project id and regionimport sagemaker import json sess = sagemaker.session.Session() region_name = sess._region_name sm_client = sess.sagemaker_client project_id = sm_client.describe_project(ProjectName=project_name)["ProjectId"] artifact_bucket = f"sagemaker-project-{project_id}-{region_name}" print(f"Project: {project_name} ({project_id})")Data PrepLet's copy some trip data and taxi zone files to the input locationfrom sagemaker.s3 import S3Downloader, S3Uploader # Download trip data and taxi zones to input folder download_uri = "s3://nyc-tlc/trip data/green_tripdata_2018-02.csv" S3Downloader().download(download_uri, "input/data") download_uri = "s3://nyc-tlc/misc/taxi_zones.zip" S3Downloader().download(download_uri, "input/zones") # Upload input to the target location input_data_uri = f"s3://{artifact_bucket}/{project_id}/input" S3Uploader().upload("input", input_data_uri) print("Listing input files:") for s3_uri in S3Downloader.list(input_data_uri): print(s3_uri)TrainStart the pipeline now that we have uploaded some datafrom sagemaker.workflow.pipeline import Pipeline pipeline_name = f"{project_name}-build" pipeline = Pipeline(pipeline_name) # Start pipeline execution = pipeline.start() execution_name = execution.arn.split("/")[-1] print(f"Waiting for execution: {execution_name} for pipeline {pipeline_name}...") execution.wait() execution_status = execution.describe()["PipelineExecutionStatus"] print(f"Status: {execution_status}")List the execution steps. Note that we have baseline and training jobs.for step in execution.list_steps(): print("Step: {}, Status: {}".format(step["StepName"], step["StepStatus"]))EvaluateGet the estimator for the training job in the pipeline.from sagemaker.estimator import Estimator def get_execution_step(step_name): return [ step["Metadata"] for step in execution.list_steps() if step["StepName"] == step_name ] training_job_arn = get_execution_step("TrainModel")[0]["TrainingJob"]["Arn"] training_job_name = training_job_arn.split("/")[-1] estimator = Estimator.attach(training_job_name)Download the Debugger XGBoost training reportSageMaker Debugger generates a [XGBoost Training Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-training-xgboost-report.html) by a processing jobs that run concurrent to the training job. Let's wait for it to complete.# get name of the xgboost training report xgb_report_job_name = [ rule["RuleEvaluationJobArn"].split("/")[-1] for rule in estimator.latest_training_job.rule_job_summary() if "CreateXgboostReport" in rule["RuleConfigurationName"] ][0] print("Waiting for XGBoost training report to complete...") sm_client.get_waiter("processing_job_completed_or_stopped").wait( ProcessingJobName=xgb_report_job_name ) print("Done")ℹ️ The code below will download the output from the Debugger report in the `report` folder. Click the link to open the report.from IPython.display import FileLink from sagemaker.s3 import S3Downloader, S3Uploader # Get the s3 output report_uri = sm_client.describe_processing_job(ProcessingJobName=xgb_report_job_name)[ "ProcessingOutputConfig" ]["Outputs"][0]["S3Output"]["S3Uri"] # Download the notebook from the report S3Downloader().download(f"{report_uri}/xgboost_report.html", "report") FileLink("report/xgboost_report.html", result_html_prefix="Open Report: ")Approve Modelℹ️ Once we are happy with this training job, we can [Update the Approval Status](https://docs.aws.amazon.com/sagemaker/latest/dg/model-registry-approve.html) of a model.model_package_arn = get_execution_step("RegisterModel")[0]["RegisterModel"]["Arn"] model_package_version = model_package_arn.split("/")[-1] print(f"Model version: {model_package_version}")Let's update the status to approvedmodel_package_update_input_dict = { "ModelPackageArn": model_package_arn, "ModelApprovalStatus": "Approved", } model_package_update_response = sm_client.update_model_package( **model_package_update_input_dict )Next Steps✅ Now that our model is approved, head over to the [deployment-pipeline](deployment-pipeline.ipynb) or [batch-pipeline](batch-pipeline.ipynb) notebook to test your model in staging and promote to production. Clean upExecute the following cell to delete any registered models.response = sm_client.list_model_packages(ModelPackageGroupName=project_name) for model_package in response["ModelPackageSummaryList"]: print("Deleting Version {}".format(model_package["ModelPackageArn"].split("/")[-1])) sm_client.delete_model_package(ModelPackageName=model_package["ModelPackageArn"])Execute the following cell to delete cloudformation stacks1. SageMaker Pipeline Workflow Model Package Groupimport boto3 cfn = boto3.client("cloudformation") for stack_name in [ f"sagemaker-{project_name}-pipeline", ]: print("Deleting stack: {}".format(stack_name)) cfn.delete_stack(StackName=stack_name) cfn.get_waiter("stack_delete_complete").wait(StackName=stack_name)The following code will clean up all objects in the artifact bucket and delete the SageMaker project.s3_resource = boto3.resource("s3") s3_artifact_bucket = s3_resource.Bucket(artifact_bucket) s3_artifact_bucket.object_versions.delete() print("Artifact bucket objects deleted") sm_client.delete_project(ProjectName=project_name) print("SageMaker Project deleted")**Fields** - C-KALWOCHE-0 - Year / Calendar Week - C-B00-0 - Bundesland (not needed) - C-ALTERGR65-0 (not needed) - ALTERSGR65-1 0 bis 64 Jahre - ALTERSGR65-2 65 Jahre und älter - C-C11-0 (not needed) - C11-1 männlich - C11-2 weiblich - C11-0 Nicht klassifizierbar - F-ANZ-1 - Count of deaths Our goal is to overlay each year to see if 2020 is different in terms of death statistics We are going to reformat the data, we need the following columns: - year - calendar week - count death# Summe der Todesfälle pro Kalenderwoche group = df['F-ANZ-1'].groupby(df['C-KALWOCHE-0']).sum() #group.head() #type(group) frame = {'Anzahl Todesfälle': group } result_df = pd.DataFrame(frame).reset_index() # Spalte für Jahr result_df['Jahr']= result_df['C-KALWOCHE-0'].apply(lambda x: x[5:9]) # Spalte für KW result_df['KW']= result_df['C-KALWOCHE-0'].apply(lambda x: int(x[9:11])) result_df.head(13)**Entangled qubits**import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, Aer, IBMQ, QuantumRegister, ClassicalRegister, execute, BasicAer from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator # Loading your IBM Quantum account(s) provider = IBMQ.load_account() import math %matplotlib inline # Set up the program a = QuantumRegister(1, name='a') b = QuantumRegister(1, name='b') a_c = ClassicalRegister(1, name='ac') b_c = ClassicalRegister(1, name='bc') qc = QuantumCircuit(a, b, a_c, b_c) qc.h(a) # put a into a superposition of 0 and 1 qc.cx(a, b) # entangle a and b qc.measure(a, a_c) qc.measure(b, b_c) backend = BasicAer.get_backend('statevector_simulator') job = execute(qc, backend) result = job.result() counts = result.get_counts(qc) print('counts:',counts) outputstate = result.get_statevector(qc, decimals=3) print(outputstate) qc.draw() # draw the circuit[0.+0.j 0.+0.j 0.+0.j 1.+0.j]Dataset Statistics for Disease Gene Sentences This notebook is designed to show statistics on the data extracted from pubmed. The following cells below here are needed to set up the environment.%load_ext autoreload %autoreload 2 %matplotlib inline from collections import Counter from itertools import product import os import pickle import sys sys.path.append(os.path.abspath('../../../modules')) import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm_notebook sns.set(rc={'figure.figsize':(12,6), "font.size":17}) #Set up the environment username = "danich1" password = "" dbname = "pubmeddb" #Path subject to change for different os database_str = "postgresql+psycopg2://{}:{}@/{}?host=/var/run/postgresql".format(username, password, dbname) os.environ['SNORKELDB'] = database_str from snorkel import SnorkelSession session = SnorkelSession() from snorkel.models import candidate_subclass, Candidate DiseaseGene = candidate_subclass('DiseaseGene', ['Disease', 'Gene']) from utils.notebook_utils.dataframe_helper import write_candidates_to_excel, make_sentence_dfLoad and Merge DataFramesedge_level_df = pd.read_csv("input/disease_associates_gene.tsv.xz", sep="\t") edge_level_df.head(2) sql=''' select cand_id as candidate_id, doid_id, entrez_gene_id, sentence_id, text, array_length(words, 1) as sen_length from ( select cand_id, "Disease_cid" as doid_id, "Gene_cid" as entrez_gene_id, sentence_id from ( select disease_gene.id as "cand_id", disease_gene."Disease_id", disease_gene."Disease_cid", disease_gene."Gene_cid", candidate.split from disease_gene inner join candidate on disease_gene.id=candidate.id ) as candidate_splits inner join span on candidate_splits."Disease_id"=span.id ) as candidate_sen inner join sentence on candidate_sen.sentence_id=sentence.id ''' candidate_sentence_df = pd.read_sql(sql, database_str).astype({"entrez_gene_id": int}) candidate_sentence_df.head(2) total_candidates_df= ( edge_level_df .merge(candidate_sentence_df, on=["doid_id", "entrez_gene_id"]) ) total_candidates_df.head(2) dev_candidates = ( session .query(DiseaseGene) .filter( DiseaseGene.id.in_( total_candidates_df .query("split==1") .sample(10000, random_state=100) .candidate_id .tolist() ) ) .all() ) dev_df = make_sentence_df(dev_candidates) dev_df.head(2) test_candidates = ( session .query(DiseaseGene) .filter( DiseaseGene.id.in_( total_candidates_df .query("split==2") .sample(10000, random_state=120) .candidate_id .tolist() ) ) .all() ) test_df = make_sentence_df(test_candidates) test_df.head(2) #write_candidates_to_excel(dev_df, "../data/sentences/sentence_labels_dev.xlsx") #write_candidates_to_excel(test_df, "../data/sentences/sentence_labels_test.xlsx")Distribution of Sentence Lengthsns.distplot(total_candidates_df["sen_length"], rug=False) total_candidates_df["sen_length"].describe().astype(int)Something seems fishy about this distribution. The number of words (tokens) for a given sentence is in the thousands range. Intuitively, that doesn't make sense, since the average number of words for a given sentence is 37. Possible reason for this abnormality is a parsing error. Lets take a look at this 1120 word sentence.total_candidates_df.query("sen_length==957").iloc[0]["text"]The above suspicion was correct. This is a parsing error where the list of authors are combined with the title of their work for a winter symposium. The following can be found at this id link: [27090254](https://www.ncbi.nlm.nih.gov/pubmed/27090254). The goal here is to take these parsing errors into account and determine an optimal cutoff point for these sentences. Using common statsitic rules any point that is greater than two standard deviations away from the mean will be removed.sns.distplot(total_candidates_df.query("sen_length < 83+1")["sen_length"], rug=False) total_candidates_df.query("sen_length < 83+1")["sen_length"].describe().astype(int)This distribution looks a bit more reasonable compared to the above distribution. After filtering out the outliers, we still have a pleathora of sentences on the order of 3.6 million. (removed 146841 sentences).total_candidates_df.to_csv("output/all_dag_candidates.tsv.xz", sep="\t", compression="xz", index=False)Offline Plotting TutorialThe dataset comes with a tool for offline (i.e. not live as the data are coming in) plotting. This notebook explains how to use it and what it is capable of plotting. **NOTE**: This notebook only covers the plotting of numerical data. For categorical (string-valued) data, please see `Offline plotting with categorical data.ipynb`.The tool in question is the function `plot_by_id`.%matplotlib notebook import numpy as np import qcodes as qc from typing import List, Dict, Tuple, Any import matplotlib.pyplot as plt import qcodes as qc from qcodes import Parameter, new_experiment, Measurement from qcodes.dataset.plotting import plot_by_id from qcodes.dataset.database import initialise_databaseFirst we make an experimental run, so that we have something to plot.initialise_database() new_experiment('test_plot_by_id', 'nosample')Next we make a handful of parameters to be used in the examples of this notebook.For those curious, setting `set_cmd=None` and `get_cmd=None` makes the `Parameters` settable and gettable without them being hooked up to any external/auxiliary action (in old QCoDeS versions, this was known as a `ManualParameter`).# Make a handful of parameters to be used in the examples x = Parameter(name='x', label='Voltage', unit='V', set_cmd=None, get_cmd=None) t = Parameter(name='t', label='Time', unit='s', set_cmd=None, get_cmd=None) y = Parameter(name='y', label='Voltage', unit='V', set_cmd=None, get_cmd=None) y2 = Parameter(name='y2', label='Current', unit='A', set_cmd=None, get_cmd=None) z = Parameter(name='z', label='Majorana number', unit='Anyonic charge', set_cmd=None, get_cmd=None)A single, simple 1D sweepmeas = Measurement() meas.register_parameter(x) meas.register_parameter(y, setpoints=(x,)) xvals = np.linspace(-3.4, 4.2, 250) # shuffle randomly the values in order to test that plot # that is to be created for this data is a correct line # that does not depend on the order of the data np.random.shuffle(xvals) with meas.run() as datasaver: for xnum in xvals: noise = np.random.randn()*0.1 # multiplicative noise yeah yeah datasaver.add_result((x, xnum), (y, 2*(xnum+noise)**3 - 5*(xnum+noise)**2)) dataid = datasaver.run_idStarting experimental run with id: 443Now let us plot that run. The function `plot_by_id` takes the `run_id` of the run to plot as a positional argument. Furthermore, the user may specify the matplotlib axis object (or list of axis objects) to plot on. If no axes are specified, the function creates new axis object(s). The function returns a tuple of a list of the axes and a list of the colorbar axes (just `None`s if there are no colorbars).axes, cbaxes = plot_by_id(dataid)Using the returned axis, we can e.g. change the plot linewidth and color. We refer to the matplotlib documentation for details on matplotlib plot customization.my_ax = axes[0] line = my_ax.lines[0] line.set_color('#223344') line.set_linewidth(3)Rescaling units and ticks`plot_by_id` can conveniently rescale the units and ticks of the plot. For example, if one of the axes is voltage in units of `V`, but the values are in the range of millivolts, then `plot_by_id` will rescale the ticks of the axis to show `5` instead of `0.005`, and the unit in the axis label will be adjusted from `V` to `mV`.This feature works with the relevant SI units, and some others. In case the units of the parameter are not from that list, or are simply not specified, ticks and labels are left intact.The feature can be explicitly turned off by passing `rescale_axes=False` to `plot_by_id`.The following plot demontrates the feature.meas = Measurement() meas.register_parameter(t) meas.register_parameter(y, setpoints=(t,)) with meas.run() as datasaver: for tnum in np.linspace(-3.4, 4.2, 50): noise = np.random.randn()*0.1 datasaver.add_result((t, tnum*1e-6), (y, (2*(tnum+noise)**3 - 5*(tnum+noise)**2)*1e3)) dataid = datasaver.run_id plot_by_id(dataid)Two interleaved 1D sweepsNow we make a run where two parameters are measured as a function of the same parameter.meas = Measurement() meas.register_parameter(x) meas.register_parameter(y, setpoints=[x]) meas.register_parameter(y2, setpoints=[x]) xvals = np.linspace(-5, 5, 250) with meas.run() as datasaver: for xnum in xvals: datasaver.add_result((x, xnum), (y, xnum**2)) datasaver.add_result((x, xnum), (y2, -xnum**2)) dataid = datasaver.run_idStarting experimental run with id: 445In such a situation, `plot_by_id` by default creates a new axis for **each** dependent parameter. Sometimes this is not desirable; we'd rather have both plots on the same axis. In such a case, we might pass the same axis twice to `plot_by_id`.axes, cbaxes = plot_by_id(dataid)Let's do that nowfig, ax = plt.subplots(1) axes, cbaxes = plot_by_id(dataid, axes=[ax, ax])Regular 2D rectangular sweep scanFor 2D plots, a colorbar is usually present. As mentioned above, `plot_by_id` returns this.meas = Measurement() meas.register_parameter(x) meas.register_parameter(t) meas.register_parameter(z, setpoints=(x, t)) xvals = np.linspace(-4, 5, 50) tvals = np.linspace(-500, 1500, 25) with meas.run() as datasaver: for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv datasaver.add_result((x, xv), (t, tv), (z, zv)) dataid = datasaver.run_id axes, colorbars = plot_by_id(dataid)A fairlt normal situation is that the colorbar was somehow mislabelled. Using the returned colorbar, the label can be overwritten.colorbar = colorbars[0] colorbar.set_label('Correct science label')Warped 2D rectangular sweep scanA nice feature of `plot_by_id` is that the grid may be warped; it makes no difference.Here we warp the x axis of the previous scan to increase the resolution in the right half plane.xvals = np.linspace(-4, 5, 50) + np.cos(-1/6*np.pi*xvals) tvals = np.linspace(-500, 1500, 25) with meas.run() as datasaver: for xv in xvals: for tv in tvals: zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv datasaver.add_result((x, xv), (t, tv), (z, zv)) dataid = datasaver.run_id axes, cbaxes = plot_by_id(dataid)Interrupted 2D scans (a hole in the cheese)In case a sweep in interrupted, the entire grid will not be filled out. This is also supported,in fact, any single rectangular hole is allowedxvals = np.linspace(-4, 5, 50) + np.cos(2/9*np.pi*xvals+np.pi/4) tvals = np.linspace(-500, 1500, 25) # define two small forbidden range functions def no_x(xv): if xv > 0 and xv < 3: return True else: return False def no_t(tv): if tv > 0 and tv < 450: return True else: return False with meas.run() as datasaver: for xv in xvals: for tv in tvals: if no_x(xv) and no_t(tv): continue else: zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv datasaver.add_result((x, xv), (t, tv), (z, zv)) dataid = datasaver.run_id axes, colorbars = plot_by_id(dataid)Fancy plotting As a final example, let us combine several plots in one window.We first make a little grid of axes.fig, figaxes = plt.subplots(2, 2)Next, we make some runs (shamelessly copy-pasting from above).# First run meas = Measurement() meas.register_parameter(x) meas.register_parameter(y, setpoints=(x,)) xvals = np.linspace(-3.4, 4.2, 250) with meas.run() as datasaver: for xnum in xvals: noise = np.random.randn()*0.1 # multiplicative noise yeah yeah datasaver.add_result((x, xnum), (y, 2*(xnum+noise)**3 - 5*(xnum+noise)**2)) rid1 = datasaver.run_id # Second run meas = Measurement() meas.register_parameter(x) meas.register_parameter(t) meas.register_parameter(z, setpoints=(x, t)) xvals = np.linspace(-4, 5, 50) tvals = np.linspace(-500, 1500, 25) with meas.run() as datasaver: for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv datasaver.add_result((x, xv), (t, tv), (z, zv)) rid2 = datasaver.run_idStarting experimental run with id: 449 Starting experimental run with id: 450And then we put them just where we please.axes, colorbars = plot_by_id(rid1, figaxes[0, 0]) axes, colorbars = plot_by_id(rid2, figaxes[1, 1], colorbars)Note that if we want to replot on an axis with a colorbar we probably also want to reuse the colorbaraxes, colorbars = plot_by_id(rid2, figaxes[1, 1], colorbars) fig.tight_layout()Rasterizing By default Matplotlib renders each individual data point as a separate square in 2D plots when storing in a vector format (pdf,svg). This is not a problem for small data sets, but the time needed to generate a pdf increases rapidly with the number of data points. Therefore, `plot_by_id` will automatically rasterize the data (lines, ticks and labels are still stored as text) if more than 5000 data points are plotted. The particular value of the rasterization threshold can be set in the `qcodesrc.json` config file.Alternatively the rasterized keyword can be passed to the `plot_by_id_function`meas = Measurement() meas.register_parameter(x) meas.register_parameter(t) meas.register_parameter(z, setpoints=(x, t)) xvals = np.linspace(-4, 5, 100) tvals = np.linspace(-500, 1500, 500) with meas.run() as datasaver: for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv datasaver.add_result((x, xv), (t, tv), (z, zv)) dataid = datasaver.run_idStarting experimental run with id: 451To get a feeling for the time difference between rasterzing and not, we time the two approaches here.%%time axeslist, _ = plot_by_id(dataid) axeslist[0].figure.savefig(f'test_plot_by_id_{dataid}.pdf') %%time axeslist, _ = plot_by_id(dataid, rasterized=False) axeslist[0].figure.savefig(f'test_plot_by_id_{dataid}.pdf')1. Reading Data & Preprocessingimport glob import os import random import pandas as pd import numpy as np %matplotlib inline import matplotlib import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import confusion_matrix import itertools from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.decomposition import PCA import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler import matplotlib import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import confusion_matrix from keras.utils import to_categorical from keras.layers import Dropout from keras.layers import Dense from keras import models from sklearn.ensemble import IsolationForest from keras import optimizers import itertools from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier os.chdir("/scratch/rk9cx/conn_log_labelled_runtime/labelled_2019-01-10//") #random sampling filename = "merged.csv" n = sum(1 for line in open(filename)) - 1 #number of records in file (excludes header) s = 1000000 #desired sample size skip = sorted(random.sample(range(1,n+1),n-s)) #the 0-indexed header will not be included in the skip list #preprocessing data df = pd.read_csv(filename, skiprows=skip) df = df.drop(df.columns[0], axis=1) df = pd.concat([df, df['history'].str.join('|').str.get_dummies()], axis = 1) df = df.drop(["history","honeypot","blacklist","whitelist"], axis=1) df = pd.concat([df, pd.get_dummies(df.conn_state.apply(pd.Series), prefix="", prefix_sep="")], axis = 1) df = df.drop(["conn_state"], axis=1) df = df.replace('-', 0) df.head() df.to_csv("sample.csv", index=False) df = pd.read_csv("sample.csv", index_col= False) df.head() df.T.head(60) #more cleaning sample = df.drop(["ts","src_ip","src_port","dest_ip","dest_port","src_ip_ext"], axis=1) sample["duration"] = sample["duration"].astype(float) sample["duration"] = np.log((sample["duration"].astype('int'))+0.001) sample["src_bytes"] = sample["src_bytes"].astype(float) sample["src_bytes"] = np.log((sample["src_bytes"].astype('int'))+0.001) sample["dest_bytes"] = sample["dest_bytes"].astype(float) sample["dest_bytes"] = np.log((sample["dest_bytes"].astype('int'))+0.001) sample["src_pkts"] = np.log((sample["src_pkts"].astype('int'))+0.001) sample["dest_pkts"] = np.log((sample["dest_pkts"].astype('int'))+0.001)2. Principal Component Analysis#Standard scaling scaler = StandardScaler() pred_variables = sample.loc[:, sample.columns != 'label'] resp_variables = sample.loc[:, sample.columns == 'label'] pred_variables = scaler.fit_transform(pred_variables) pca = PCA(n_components=2) X = pred_variables X_r = pca.fit_transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) #combining PCs and response variable X_c = pd.DataFrame(data=X_r) X_c['label'] = resp_variables.iloc[:,0].values X_c.columns = ['PC1', 'PC2',"label"] #Plot fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) targets = [1,0] colors = ['r', 'g'] for target, color in zip(targets,colors): indicesToKeep = X_c.label == target ax.scatter(X_c.loc[indicesToKeep, 'PC1'] , X_c.loc[indicesToKeep, 'PC2'] , c = color , s = 50) ax.legend(targets) ax.grid()3. Downsamplingmal = sample.loc[sample['label'] == 1].head(298226) ben = sample.loc[sample['label'] == 0] sampler = pd.concat([mal,ben], axis = 0) sampler.label.value_counts(normalize = True) X = sampler.loc[:, sampler.columns != 'label'] Y = sampler.loc[:, sampler.columns == 'label'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42)4. Modelling#function for plotting confusion Matrix def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout()4.1 Logistic RegressionLR = LogisticRegressionCV(cv=5, random_state=0, multi_class='multinomial').fit(X_train, y_train) pred_y=LR.predict(X_test) cm= confusion_matrix(y_test,pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100)0.8838107266904236 0.9972312139905128 13.04242154192026 93.323857146483234.2 Random Forestm = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train, y_train) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100)0.9972312139905128 0.8838107266904236 0.3157650336661249 93.323857146483235. 's Methodologyfrom fastai.imports import * from fastai.structured import *5.1 Class Distribution - (Malicious/Benign) = (70/30)rf_sample = df.drop(["src_ip_ext","src_ip","dest_ip"], axis=1) rf_sample["duration"] = rf_sample["duration"].astype(float) rf_sample["duration"] = np.log((rf_sample["duration"].astype('int'))+0.001) rf_sample["src_bytes"] = rf_sample["src_bytes"].astype(float) rf_sample["src_bytes"] = np.log((rf_sample["src_bytes"].astype('int'))+0.001) rf_sample["dest_bytes"] = rf_sample["dest_bytes"].astype(float) rf_sample["dest_bytes"] = np.log((rf_sample["dest_bytes"].astype('int'))+0.001) rf_sample["src_pkts"] = np.log((rf_sample["src_pkts"].astype('int'))+0.001) rf_sample["dest_pkts"] = np.log((rf_sample["dest_pkts"].astype('int'))+0.001) rf_sample['ts'] = pd.to_datetime(rf_sample['ts'],unit='s') rf_sample["src_port"] = rf_sample["src_port"].astype('category') rf_sample["dest_port"] = rf_sample["dest_port"].astype('category') rf_sample["src_port"] = rf_sample["src_port"].cat.codes rf_sample["dest_port"] = rf_sample["dest_port"].cat.codes from datetime import datetime, timedelta import numpy as np import math xhr, yhr = [], [] for i in df.ts: temp = (datetime.fromtimestamp(i) - timedelta(hours=-5)).strftime('%Y-%m-%d %H:%M:%S')[11:13] xhr.append(np.sin(2*math.pi*int(temp)/24)) yhr.append(np.cos(2*math.pi*int(temp)/24)) rf_sample['xhr'] = xhr rf_sample['yhr'] = yhr #converting date into different fatures add_datepart(rf_sample, 'ts') X = rf_sample.loc[:, rf_sample.columns != 'label'] Y = rf_sample.loc[:, rf_sample.columns == 'label'] rf_sample.T.head(70) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) #subsetting train values for faster model execution def split_vals(a,n): return a[:n].copy(), a[n:].copy() X_train_sub, _ = split_vals(X_train, 50000) y_train_sub, _ = split_vals(y_train, 50000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_sub, y_train_sub) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') rf_sample[["label","local"]].corr() tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right")5.2 Class Distribution - (Malicious/Benign) - (50/50)mal = rf_sample.loc[rf_sample['label'] == 1].head(298226) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) X = rf_sampler.loc[:, rf_sample.columns != 'label'] Y = rf_sampler.loc[:, rf_sample.columns == 'label'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) X_train_sub, _ = split_vals(X_train, 500000) y_train_sub, _ = split_vals(y_train, 500000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_sub, y_train_sub) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right")5.3 Class Distribution - (Malicious/Benign) - (10/90)mal = rf_sample.loc[rf_sample['label'] == 1].head(29822) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) X = rf_sampler.loc[:, rf_sample.columns != 'label'] Y = rf_sampler.loc[:, rf_sample.columns == 'label'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) X_train_sub, _ = split_vals(X_train, 500000) y_train_sub, _ = split_vals(y_train, 500000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_sub, y_train_sub) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right")5.4 Class Distribution - (Malicious/Benign) - (1/99)mal = rf_sample.loc[rf_sample['label'] == 1].head(298226) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) X = rf_sampler.loc[:, rf_sample.columns != 'label'] Y = rf_sampler.loc[:, rf_sample.columns == 'label'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) X_train_sub, _ = split_vals(X_train, 500000) y_train_sub, _ = split_vals(y_train, 500000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_sub, y_train_sub) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right")5.5 Models without Unix Timerf_sample = df.drop(["src_ip_ext","src_ip"], axis=1) rf_sample["duration"] = rf_sample["duration"].astype(float) rf_sample["duration"] = np.log((rf_sample["duration"].astype('int'))+0.001) rf_sample["src_bytes"] = rf_sample["src_bytes"].astype(float) rf_sample["src_bytes"] = np.log((rf_sample["src_bytes"].astype('int'))+0.001) rf_sample["dest_bytes"] = rf_sample["dest_bytes"].astype(float) rf_sample["dest_bytes"] = np.log((rf_sample["dest_bytes"].astype('int'))+0.001) rf_sample["src_pkts"] = np.log((rf_sample["src_pkts"].astype('int'))+0.001) rf_sample["dest_pkts"] = np.log((rf_sample["dest_pkts"].astype('int'))+0.001) rf_sample['ts'] = pd.to_datetime(rf_sample['ts'],unit='s') rf_sample["src_port"] = rf_sample["src_port"].astype('category') rf_sample["dest_ip"] = rf_sample["dest_ip"].astype('category') rf_sample["dest_port"] = rf_sample["dest_port"].astype('category') rf_sample["src_port"] = rf_sample["src_port"].cat.codes rf_sample["dest_ip"] = rf_sample["dest_ip"].cat.codes rf_sample["dest_port"] = rf_sample["dest_port"].cat.codes #converting date into different fatures add_datepart(rf_sample, 'ts') rf_sample.columns rf_sample.drop(["tsElapsed"], axis= 1, inplace=True) mal = rf_sample.loc[rf_sample['label'] == 1].head(298226) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) X = rf_sampler.loc[:, rf_sampler.columns != 'label'] Y = rf_sampler.loc[:, rf_sampler.columns == 'label'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) X_train.label #subsetting train values for faster model execution def split_vals(a,n): return a[:n].copy(), a[n:].copy() X_train_sub, _ = split_vals(X_train, 50000) y_train_sub, _ = split_vals(y_train, 50000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_sub, y_train_sub) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right")5.5 Train-Test Split based on timestamp#converting date into different fatures add_datepart(rf_sample, 'ts') rf_sample = rf_sample.sort_values(by=['tsElapsed'], ascending=True) mal = rf_sample.loc[rf_sample['label'] == 1].head(298226) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) rf_sampler = rf_sampler.sort_values(by=['tsElapsed'], ascending=True) #rf_sampler.drop(["tsElapsed"], axis= 1, inplace=True) X = rf_sampler.loc[:, rf_sampler.columns != 'label'] Y = rf_sampler.loc[:, rf_sampler.columns == 'label'] X.shape validation = 200000 def split_vals(a,n): return a[:n].copy(), a[n:].copy() # split point: length of dataset minus validation set size. split_point = len(X)-validation # split X X_train, X_test = split_vals(X, split_point) # split y y_train, y_test = split_vals(Y, split_point) X_train.tail() X_test.head() #subsetting train values for faster model execution X_train_sub, _ = split_vals(X_train, 50000) y_train_sub, _ = split_vals(y_train, 50000) m = RandomForestClassifier(n_estimators=200, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train, y_train) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right") y_train.label.value_counts(normalize = True).plot('bar') y_test.label.value_counts(normalize = True).plot('bar')5.6 Train-Test Split based on timestamp (Without tsElapsed)#converting date into different fatures add_datepart(rf_sample, 'ts') rf_sample = rf_sample.sort_values(by=['tsElapsed'], ascending=True) mal = rf_sample.loc[rf_sample['label'] == 1].head(298226) ben = rf_sample.loc[rf_sample['label'] == 0] rf_sampler = pd.concat([mal,ben], axis = 0) rf_sampler.label.value_counts(normalize = True) rf_sampler = rf_sampler.sort_values(by=['tsElapsed'], ascending=True) rf_sampler.drop(["tsElapsed"], axis= 1, inplace=True) X = rf_sampler.loc[:, rf_sampler.columns != 'label'] Y = rf_sampler.loc[:, rf_sampler.columns == 'label'] Y.label.value_counts() validation = 200000 def split_vals(a,n): return a[:n].copy(), a[n:].copy() # split point: length of dataset minus validation set size. split_point = len(X)-validation # split X X_train, X_test = split_vals(X, split_point) # split y y_train, y_test = split_vals(Y, split_point) y_test.label.value_counts(normalize=True) #subsetting train values for faster model execution X_train_sub, _ = split_vals(X_train, 50000) y_train_sub, _ = split_vals(y_train, 50000) m = RandomForestClassifier(n_estimators=500, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train, y_train) pred_y = m.predict(X_test) cm= confusion_matrix(y_test, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right") y_train.label.value_counts(normalize = True).plot('bar') y_test.label.value_counts(normalize = True).plot('bar')5.7 Train-Test Split based on timestamp (Without tsElapsed) - With Better Class Balance in Testtest = pd.concat([X_test, y_test], axis = 1) mal = test.loc[test['label'] == 1] ben = test.loc[test['label'] == 0].head(5315) test_df = pd.concat([mal,ben], axis = 0) test_df.label.value_counts(normalize = True) y_test_new = test_df["label"] X_test_new = test_df.drop(["label"], axis = 1) train = pd.concat([X_train, y_train], axis = 1) mal = train.loc[train['label'] == 1].head(102848) ben = train.loc[train['label'] == 0] train_df = pd.concat([mal,ben], axis = 0) train_df.label.value_counts(normalize = True) y_train_new = train_df["label"] X_train_new = train_df.drop(["label"], axis = 1) m = RandomForestClassifier(n_estimators=500, min_samples_leaf=100, max_features=0.5, n_jobs=-1) m.fit(X_train_new, X_train_new) pred_y = m.predict(X_test_new) cm= confusion_matrix(y_test_new, pred_y) plt.figure() plot_confusion_matrix(cm, classes=[0,1],title='Confusion matrix, without normalization') tn, fp, fn, tp = cm.ravel() precision=tp/(tp+fp) recall=tp/(tp+fn) fpr = fp/(fp+ tn) accuracy = (tp + tn)/(tn + tp + fn + fp) print(precision, recall, fpr*100, accuracy*100) from sklearn.metrics import roc_curve from sklearn.metrics import auc # Compute fpr, tpr, thresholds and roc auc fpr, tpr, thresholds = roc_curve(y_test_new, pred_y) roc_auc = auc(fpr,tpr) # Plot ROC curve plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') # random predictions curve plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate or (1 - Specifity)') plt.ylabel('True Positive Rate or (Sensitivity)') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right") feat_importances = pd.Series(m.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') y_train_new.value_counts().plot('bar') y_test_new.value_counts().plot('bar')Exploratory data analysis of photovoltaic datasetThis notebook is part of a project to predict the production of solar energy of a photovoltaic system on top of a house. In this notebook, we will consider the exploratory data analysis of the photovoltaic dataset that was obtained from the interface of the machine. Table of content [1. Import libraries](section1) [2. Load all the data into one dataframe](section2) [3. Visualize](section3) [4. Compute total energy production for each day](section4) [5. Visualize seasonal energy production](section5) [6. Check for missing data](section6) 1. Import librariesimport pandas as pd import numpy as np import os import json import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from scipy.integrate import simps, quad sns.set()2. Load all the data into one dataframe# Folder where the data is located DATAFOLDER = '../01.Original_data/photovoltaic/' # files in that folder files = [f for f in os.listdir(DATAFOLDER) if f.endswith('.csv')] # take only csv files len(files)We have 1740 csv files with photovoltaic data. Let us import them.df = pd.DataFrame() for f in files: df_temp = pd.read_csv(f'{DATAFOLDER}/{f}') df_temp.columns = ['timestamp', 'production'] # timestamp, production in W # conver the timestamp column df_temp['timestamp'] = pd.to_datetime(df_temp['timestamp'], format="%d.%m.%Y %H:%M") df_temp = df_temp.set_index('timestamp', drop=True) df = df.append(df_temp) # breakLet's save it because this took a while.df = df.sort_index() df.to_csv('../02.Prepared_data/photovoltaic/raw_data.csv', index=True) df.head() # import df = pd.read_csv('../02.Prepared_data/photovoltaic/raw_data.csv', index_col=0, parse_dates=True) df['day'] = df.index.date df['time'] = df.index.time df.info() df.describe() df.head() df.tail()We see that we have data from October 2017 until end of June 2019 and only the production data of the photovoltaic system. The resolution of the data is in 15-minte intervals.Let us see how many days we have and if the overall number of intervals does check out.n_days = len(df['day'].unique()) print(f'We have {n_days} days in the dataset.') df_agg = df.groupby('day').agg('count').loc[:, 'production'] df_agg.value_counts()We have 624 days in the dataset.Interesting, so typically there are 96 entries ($24*4$ for the total of the 15-minute intervals in one day) per day, but there are 6 days where this criteria is not met. Let's see which these are!m = df_agg == 96# mask df_agg[~m] df.loc['2019-03-30'].indexIt is clear now that these values are from duplicates, generated by the mining process. We just have to drop them.df = df.drop_duplicates(keep='first') n_days = len(df['day'].unique()) print(f'We have {n_days} days in the dataset.') df.groupby('day').agg('count').loc[:, 'production'].value_counts()We have 624 days in the dataset.Perfect, let's now visualize the data. 3. Visualize# %matplotlib widget %matplotlib inline fig, ax = plt.subplots(figsize=(8,5)) X = df.index.values # apparent col = 'production' Y = df[col].values plt.plot(X, Y, label='Production in W') ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y %b')) plt.xticks(rotation=45) plt.xlabel('Date') plt.ylabel('Production in W') plt.legend() plt.tight_layout() plt.show()/Users/hkromer/anaconda3/envs/solarAnalytics/lib/python3.7/site-packages/pandas/plotting/_matplotlib/converter.py:102: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters. To register the converters: >>> from pandas.plotting import register_matplotlib_converters >>> register_matplotlib_converters() warnings.warn(msg, FutureWarning)Here we have the hourly production, which is not what we are looking for, let us look at the total daily production in units of kWh by integrating the curve for each day. 4. Compute total energy production for each dayin kWh!# %matplotlib widget %matplotlib inline fig, ax = plt.subplots(figsize=(8,5)) X = df.loc['2018-07-03'].index.values # apparent col = 'production' Y = df.loc['2018-07-03', col] plt.plot(X, Y, label='Production in W') ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) plt.xticks(rotation=45) plt.xlabel('Date') plt.ylabel('Production in W') plt.legend() plt.tight_layout() plt.show() # compute the kWh for one given day def compute_kwh(df): """ Calculate the kWh for a given production output for one day in a dataframe, df. Returns the kWh """ df['seconds_after_midnight'] = df.reset_index().index * 15 * 60 x = df['seconds_after_midnight'].values y = df['production'].values y_int = simps(y=y, x=x) / (3.6e6) # in units of kWh return y_int # groupby day # apply function to compute kWh df_int = pd.DataFrame(df.groupby('day').apply(lambda x: compute_kwh(x))) df_int.columns = ['energy']Let's also visualize that.# %matplotlib widget %matplotlib inline fig, ax = plt.subplots(figsize=(8,5)) X = df_int.index.values Y = df_int['energy'].values plt.plot(X, Y) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y %b')) plt.xticks(rotation=45) plt.xlabel('Date') plt.ylabel('Daily total energy in kWh') plt.tight_layout() plt.show()Very good! Let's look at these values.df_int.describe()Interesting, we see that the mean of the production is 22.7 kWh for any day, with extremas of 0 and 56 kWh. Most of the data is between 0 and 20.8 kWh (50%). 5. Visualize seasonal energy productionLet us also consider the four seasons.In the northern hemisphere, the four seasons are defined as (meteorologically):* Spring: March until May* Summer: June until August* Fall: September until November* Winter: December until Februarydf_int = df_int.set_index(pd.to_datetime(df_int.reset_index().loc[:, 'day'])) d_season = { 'spring': [3, 4, 5], 'summer': [6, 7, 8], 'fall': [9, 10, 11], 'winter': [12, 1, 2] } df_int['month'] = df_int.index.month for k in d_season.keys(): months = d_season[k] for month in months: df_int['month'] = df_int['month'].replace({month: k}) # to check, make it with some integers d_season_int = { 'spring': 1, 'summer': 2, 'fall': 3, 'winter': 0 } s = df_int['month'].map(d_season_int) df_int = df_int.rename(columns={'month': 'season'})Let us check if we encoded the months correctly!# %matplotlib widget %matplotlib inline fig, ax = plt.subplots(figsize=(8,5)) plt.plot(s.index, s.values) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y %b')) plt.xticks(rotation=45) plt.xlabel('Date') plt.ylabel('Season') plt.tight_layout() plt.show()Seems fine (note that winter is 0, spring 1, summer 2, fall 3, as expected).To get the seasonal production, let us group the data by the season and make a violinplot.# %matplotlib widget %matplotlib inline fig, ax = plt.subplots(figsize=(5,4)) sns.violinplot(x="season", y="energy", data=df_int, ax=ax, inner='quartile', bw=0.1, order=[ "winter", "spring", "summer", "fall"]) plt.xticks(fontsize=8) plt.ylabel('Energy production in kWh') plt.xlabel('Season') plt.tight_layout() # plt.savefig('./seasonal_production.png', dpi=300) plt.show()What we see here is the distribution of daily energy production grouped by the different season. We identify the following key takeaways:* The total productions are distributed to higher values for the summer and spring with medians of around 42 and 28 kWh, respectively, compared to around 8 kWh in the winter and 18 kWh in the fall.* The shape of the distribution is more uniform in the spring and fall compared to the winter and summer. In the winter, the distribution is skewed towards lower values, and in the summer towards higher values. 6. Check for missing data# what missing values there are percent_missing = df.isnull().sum() * 100 / len(df) missing_value_df = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing, 'absolute_missing': df.isnull().sum()}) missing_value_df # what missing values there are percent_missing = df_int.isnull().sum() * 100 / len(df_int) missing_value_df = pd.DataFrame({'column_name': df_int.columns, 'percent_missing': percent_missing, 'absolute_missing': df_int.isnull().sum()}) missing_value_df df_int.to_csv('../02.Prepared_data/photovoltaic/integrated_daily.csv', index=True)21.3.1 환경 준비 - Anaconda에서 개발환경 만들기```bashconda env create -f env_CH21.ymlconda activate book-word2vecgit clone https://github.com/machrisaa/tensorflow-vgg tensorflow_vgg```import time import numpy as np import tensorflow as tf from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import zipfile dataset_folder_path = 'data' dataset_filename = 'text8.zip' dataset_name = 'Text8 Dataset' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(dataset_filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar: urlretrieve( 'http://mattmahoney.net/dc/text8.zip', dataset_filename, pbar.hook) if not isdir(dataset_folder_path): with zipfile.ZipFile(dataset_filename) as zip_ref: zip_ref.extractall(dataset_folder_path) with open('data/text8') as f: text = f.read() from collections import Counter def preprocess(text): # Replace punctuation with tokens so we can use them in our model text = text.lower() text = text.replace('.', ' ') text = text.replace(',', ' ') text = text.replace('"', ' ') text = text.replace(';', ' ') text = text.replace('!', ' ') text = text.replace('?', ' ') text = text.replace('(', ' ') text = text.replace(')', ' ') text = text.replace('--', ' ') text = text.replace('?', ' ') # text = text.replace('\n', ' ') text = text.replace(':', ' ') words = text.split() # Remove all words with 5 or fewer occurrences word_counts = Counter(words) trimmed_words = [word for word in words if word_counts[word] > 5] return trimmed_words words =preprocess(text) print(words[:30]) print("Total words: {}".format(len(words))) print("Unique words: {}".format(len(set(words)))) def create_lookup_tables(words): """ Create lookup tables for vocabulary :param words: Input list of words :return: A tuple of dicts. The first dict.... """ word_counts = Counter(words) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab vocab_to_int, int_to_vocab = create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words] import random threshold = 1e-5 word_counts = Counter(int_words) total_count = len(int_words) freqs = {word: count/total_count for word, count in word_counts.items()} p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts} train_words = [word for word in int_words if random.random() < (1 - p_drop[word])] def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' R = np.random.randint(1, window_size+1) start = idx - R if (idx - R) > 0 else 0 stop = idx + R target_words = set(words[start:idx] + words[idx+1:stop+1]) return list(target_words) def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y train_graph = tf.Graph() with train_graph.as_default(): inputs = tf.placeholder(tf.int32, [None], name='inputs') labels = tf.placeholder(tf.int32, [None, None], name='labels') n_vocab = len(int_to_vocab) n_embedding = 200 # Number of embedding features with train_graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs) # Number of negative labels to sample n_sampled = 100 with train_graph.as_default(): softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1)) softmax_b = tf.Variable(tf.zeros(n_vocab)) # Calculate the loss using negative sampling loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab) cost = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer().minimize(cost) with train_graph.as_default(): valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True)) normalized_embedding = embedding / norm valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset) similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding)) # If the checkpoints directory doesn't exist: !mkdir checkpoints epochs = 10 batch_size = 1000 window_size = 10 with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: iteration = 1 loss = 0 sess.run(tf.global_variables_initializer()) for e in range(1, epochs+1): batches = get_batches(train_words, batch_size, window_size) start = time.time() for x, y in batches: feed = {inputs: x, labels: np.array(y)[:, None]} train_loss, _ = sess.run([cost, optimizer], feed_dict=feed) loss += train_loss if iteration % 100 == 0: end = time.time() print("Epoch {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Avg. Training loss: {:.4f}".format(loss/100), "{:.4f} sec/batch".format((end-start)/100)) loss = 0 start = time.time() if iteration % 1000 == 0: # note that this is expensive (~20% slowdown if computed every 500 steps) sim = similarity.eval() for i in range(valid_size): valid_word = int_to_vocab[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = int_to_vocab[nearest[k]] log = '%s %s,' % (log, close_word) print(log) iteration += 1 save_path = saver.save(sess, "checkpoints/text8.ckpt") embed_mat = sess.run(normalized_embedding) with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) embed_mat = sess.run(embedding) %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt from sklearn.manifold import TSNE viz_words = 500 tsne = TSNE() embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :]) fig, ax = plt.subplots(figsize=(14, 14)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)Formas de Seleçãoimport pandas as pd # noqa E402 data = [ (1, 2, 3, 4), (5, 6, 7, 8), (8, 10, 11, 12), (13, 14, 15, 16), ] df = pd.DataFrame(data, 'l1 l2 l3 l4'.split(), 'c1 c2 c3 c4'.split()) # A seleção de uma coluna, com um par de colchetes, retorna um objeto Series df['c1'] type(df['c1']) # Já a seleção com 2 pares de colchetes retorna um DataFrame df[['c3', 'c1']] type(df[['c3', 'c1']]) # A seleção de todas as linhas de um DataFrame pode ser feita com [:] df[:] # Para selecionar a partir de uma linha, informamos essa linha como primeiro # parâmetro da seleção df[1:] # Para selecionar uma fatia, podemos usar um fatiamento normal. df[1:3] # Podemos selecionar linhas e colunas df[1:][['c3', 'c1']] df # Podemos fazer seleções a partir dos rótulos das linhas (o index) com .loc[] df.loc['l3'] df.loc[['l3', 'l2']] # podemos selecionar um só elemento df.loc['l1', 'c2'] # Podemos fazer seleção usando índices numéricos. Para isso, devemos usar o # .iloc df.iloc[0, 1] df.loc[['l3', 'l1'], ['c4', 'c1']] df.iloc[[2, 0], [3, 0]]ML.Net - Recommender -> Cientista de Dados 👋()[![Linkedin Badge](https://img.shields.io/badge/-LinkedIn-blue?style=flat-square&logo=Linkedin&logoColor=white&link=https://www.linkedin.com/in/davi-ramos/)](https://www.linkedin.com/in/davi-ramos/)[![Twitter Badge](https://img.shields.io/badge/-Twitter-1DA1F2?style=flat-square&logo=Twitter&logoColor=white&link=https://twitter.com/Daviinfo/)](https://twitter.com/Daviinfo/)// ML.NET Nuget packages installation #r "nuget:Microsoft.ML" #r "nuget:Microsoft.ML.Recommender" using System; using System.IO; using Microsoft.ML; using Microsoft.ML.Data; using Microsoft.ML.Trainers; public class MovieRating { [LoadColumn(0)] public float userId; [LoadColumn(1)] public float movieId; [LoadColumn(2)] public float Label; } public class MovieRatingPrediction { public float Label; public float Score; } // Load data public static (IDataView training, IDataView test) LoadData(MLContext mlContext) { // Load training & test datasets using datapaths var trainingDataPath = Path.Combine(Environment.CurrentDirectory, "Datasets\\Movies_Data", "recommendation-ratings-train.csv"); var testDataPath = Path.Combine(Environment.CurrentDirectory, "Datasets\\Movies_Data", "recommendation-ratings-test.csv"); IDataView trainingDataView = mlContext.Data.LoadFromTextFile(trainingDataPath, hasHeader: true, separatorChar: ','); IDataView testDataView = mlContext.Data.LoadFromTextFile(testDataPath, hasHeader: true, separatorChar: ','); return (trainingDataView, testDataView); } // Build and train model public static ITransformer BuildAndTrainModel(MLContext mlContext, IDataView trainingDataView) { // Add data transformations IEstimator estimator = mlContext.Transforms.Conversion.MapValueToKey(outputColumnName: "userIdEncoded", inputColumnName: "userId") .Append(mlContext.Transforms.Conversion.MapValueToKey(outputColumnName: "movieIdEncoded", inputColumnName: "movieId")); // Set algorithm options and append algorithm var options = new MatrixFactorizationTrainer.Options { MatrixColumnIndexColumnName = "userIdEncoded", MatrixRowIndexColumnName = "movieIdEncoded", LabelColumnName = "Label", NumberOfIterations = 20, ApproximationRank = 100 }; var trainerEstimator = estimator.Append(mlContext.Recommendation().Trainers.MatrixFactorization(options)); Console.WriteLine("=============== Training the model ==============="); ITransformer model = trainerEstimator.Fit(trainingDataView); return model; } // Evaluate model public static void EvaluateModel(MLContext mlContext, IDataView testDataView, ITransformer model) { // Evaluate model on test data & print evaluation metrics Console.WriteLine("=============== Evaluating the model ==============="); var prediction = model.Transform(testDataView); var metrics = mlContext.Regression.Evaluate(prediction, labelColumnName: "Label", scoreColumnName: "Score"); Console.WriteLine("Root Mean Squared Error : " + metrics.RootMeanSquaredError.ToString()); Console.WriteLine("RSquared: " + metrics.RSquared.ToString()); } public static void UseModelForSinglePrediction(MLContext mlContext, ITransformer model) { Console.WriteLine("=============== Making a prediction ==============="); var predictionEngine = mlContext.Model.CreatePredictionEngine(model); // Create test input & make single prediction var testInput = new MovieRating { userId = 6, movieId = 10 }; var movieRatingPrediction = predictionEngine.Predict(testInput); if (Math.Round(movieRatingPrediction.Score, 1) > 3.5) { Console.WriteLine("Movie " + testInput.movieId + " is recommended for user " + testInput.userId); } else { Console.WriteLine("Movie " + testInput.movieId + " is not recommended for user " + testInput.userId); } } //Save model public static void SaveModel(MLContext mlContext, DataViewSchema trainingDataViewSchema, ITransformer model) { // Save the trained model to .zip file var modelPath = Path.Combine(Environment.CurrentDirectory, "Datasets\\Movies_Data", "MovieRecommenderModel.zip"); Console.WriteLine("=============== Saving the model to a file ==============="); mlContext.Model.Save(model, trainingDataViewSchema, modelPath); } // MLContext to be shared across the model creation workflow objects MLContext mlContext = new MLContext(); // Load data (IDataView trainingDataView, IDataView testDataView) = LoadData(mlContext); // Build & train model ITransformer model = BuildAndTrainModel(mlContext, trainingDataView); // Evaluate quality of model EvaluateModel(mlContext, testDataView, model); // Use model to try a single prediction (one row of data) UseModelForSinglePrediction(mlContext, model); // Save model SaveModel(mlContext, trainingDataView.Schema, model);=============== Training the model =============== =============== Evaluating the model =============== Root Mean Squared Error : 0,9963857075088528 RSquared: 0,4097941818101922 =============== Making a prediction =============== Movie 10 is recommended for user 6 =============== Saving the model to a file ===============Make Input Files For `$CLAW/geoclaw/examples/tsunami/eta_init_force_dry`For this example simple artificial topography is generated in order to illustrate various things.Contents: - [Define ocean topography](topo_ocean) - [Define topo for small coastal region](topo_coast) - [Create dtopo for an earthquake source](dtopo) - [Force Dry array](force_dry) Running this notebook should create a set of files in the directory `input_files`.Alternatively, running make input or equivalently python make_input_files.py will run the python script version of this notebook, which was created with the command jupyter nbconvert --to python --TagRemovePreprocessor.enabled=True \ --TagRemovePreprocessor.remove_cell_tags="['hide-py']" \ make_input_files.ipynbThis will only work if [nbconvert](https://nbconvert.readthedocs.io/en/latest/index.html) is installed.Note that cells in this notebook that create plots are not included in the `.py` version (due to the cell tag `hide-py` that is applied to these cells, visible if you select `View -> Cell Toolbar -> Tags` in the notebook menu).%matplotlib inline from pylab import * from scipy.interpolate import interp1d import os from clawpack.geoclaw import topotools, marching_front, dtopotools from clawpack.visclaw import plottoolsDirectory for input files:inputdir = 'input_files' os.system('mkdir -p %s' % inputdir) print('Input files will be put in directory %s' % inputdir)Define ocean topographyThis simple topography is piecewise linear in $x$ (longitude) with a continental shelf and beach, and constant in the $y$ (latitude) direction. It is placed at the equator so distances are roughly equal in $x$ and $y$, and also placed at longitude 0.# Define piecewise linear function (unequally spaced): xocean = array([-2,-1,-0.5,-0.1,0.1]) zocean = array([-3000,-3000,-100,-100,100]) # Interpolate to equally spaced grid for topofile: xo = arange(-2,0.2,0.1) yo = array([-2,2]) zfunc = interp1d(xocean,zocean,fill_value="extrapolate") zo = zfunc(xo) # Convert to 2d arrays: Xo,Yo = meshgrid(xo,yo) Zo = vstack((zo,zo)) figure(figsize=(12,5)) subplot(121) contourf(Xo,Yo,Zo) colorbar() title('Ocean Topography') subplot(122) plot(xo,zo,'k-') fill_between(xo,zo,maximum(zo,0),color=[.5,.5,1]) title('Topography on transect');Save as a topofile:topo = topotools.Topography() topo.set_xyZ(xo,yo,Zo) topofile = '%s/topo_ocean.tt3' % inputdir topo.write(topofile, topo_type=3, Z_format="%11.3e") print('Created ', topofile)Define topo for small coastal regionWe define some more complicated topography on a finer grid over a small coastal region with 1/3 arcsecond resolution, chosen to be aligned with integer multiples of degrees (e.g. a grid point at longitude `x=0` and latitude `y=0`) as typical of real DEMs from NCEI. This is important when aligning computational grids and fgmax grids (if used) in `setrun.py`. We will use a cutoff function so that this fine-scale topo matches the linear beach profile of the ocean topography along the edges of this rectangle. The cutoff is 1 in the center of the rectangle and decays to 0 at the edges:# choose DEM grid points: arcsec13 = 1./(3*3600.) # 1/3 arcsecond print('arcsec13 = %.6f degrees = %.2f meters' % (arcsec13,arcsec13*111e3)) x = arange(-100*arcsec13, 150*arcsec13, arcsec13) y = arange(-55*arcsec13, 55*arcsec13, arcsec13) X,Y = meshgrid(x,y) print('X.shape = ', X.shape) x1,x2 = x.min(), x.max() y1,y2 = y.min(), y.max() print('Extent of coastal topo: (%.6f, %.6f, %.6f, %.6f)' % (x1,x2,y1,y2)) # define the cutoff function: w = 0.001 # width of cutoff layer cutoff = 1. / (1. + exp(1e4*(X-(x2-w))) + exp(1e4*((x1+w)-X)) \ + exp(1e4*(Y-(y2-w))) + exp(1e4*((y1+w)-Y))) figure(figsize=(10,6)) contourf(X,Y,cutoff) colorbar(shrink=0.5) gca().set_aspect(1) title('Cutoff function');The topography in this region is the linearly sloping beach augmented by a Gaussian dip. The beach slope is chosen to agree with the ocean topography offshore (1 km / degree, about 1/100), while onshore there is a smaller slope in this region for illustration.Z0 = 1e3*X # sloping beach matching ocean topography Z1 = where(X<0, 1e3*X, 0.2e3*X) # smaller slope on shore R1 = (X-0.004)**2 + (Y-0.002)**2 Z1 += -4*exp(-500000*R1) # Gaussian dip Z = (1-cutoff)*Z0 + cutoff*Z1Plot the coastal topography:# colors: c = [[.2,.2,1],[.5,.5,1],[.8,.8,1],[.7,1,.7],[.2,.8,0],[.9,.8,.2]] figure(figsize=(12,7)) subplot(211) contourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g']) gca().set_aspect(1.) xticks(rotation=20) xlabel('Longitude') ylabel('Latitude') subplot(212) contourf(X*111e3,Y*111e3,Z,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X*111e3,Y*111e3,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g']) gca().set_aspect(1.) xticks(rotation=20) xlabel('meters') ylabel('meters') tight_layout();The lower plot in the figure above shows the same topography as on the top, but with x,y units of meters to better show the scale. Recall that 1 degree is about 111 km and 1/3 arcsec is about 10 meters.In the plots above, the red contour is at $Z = 0$, and hence is the "shoreline". However, the isolated "lake" with elevation $Z < 0$ could be dry land below sea level. Normally with GeoClaw this region would be filled with water initially up to $Z = 0$ everywhere. Below in [the Force_Dry section](force_dry), we discuss how to force this region to be initialized as dry if it is in fact dry land. Save this as a topofile:topo = topotools.Topography() topo.set_xyZ(x,y,Z) topofile = '%s/topo_shore.tt3' % inputdir topo.write(topofile, topo_type=3, Z_format="%11.3e") print('Created ', topofile)Plot both topo sets togetherThe coastal region above is very small compared to the ocean region defined above. Here we plot both together:def plot_topo(add_colorbar=False): contourf(Xo,Yo,Zo,[-2,-1,0,1,2],colors=c,extend='both') contourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both') if add_colorbar: cb = colorbar() cb.set_label('meters') #contour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g']) plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],'k') gca().set_aspect(1.) xticks(rotation=20) xlabel('Longitude') ylabel('Latitude') figure(figsize=(12,6)) subplot(121) plot_topo() title('Ocean Topography') subplot(122) plot_topo(add_colorbar=True) axis([-0.005, 0.015, -0.01, 0.01]) title('Zoom around shore') tight_layout()In the plot on the left above, the black rectangle showing the extent of the coastal DEM is barely visible. Zooming in shows that the topography does match up near the edges of this rectangle. In GeoClaw the finest available topography is used when computing cell-averaged topo values, so the coastal DEM will be used for any cell that overlaps this region. Create dtopo for an earthquake source:We define a simple earthquake in which there is uniform slip on a single subfault. The parameters are chosen to be somewhat reasonable for a subduction zone event offshore, but the shape is a bit odd (width 100 km and length 50 km) in order to give a smallish event with the desired onshore subsidence, for illustration purposes.subfault = dtopotools.SubFault() subfault.strike = 0. subfault.length = 50.e3 subfault.width = 100.e3 subfault.depth = 10.e3 subfault.slip = 5. subfault.rake = 90. subfault.dip = 10. subfault.longitude = -1. subfault.latitude = 0. subfault.coordinate_specification = "top center" fault = dtopotools.Fault() fault.subfaults = [subfault] print("Earthquake magnitude: Mw = %.2f" % fault.Mw()) dtopo_fname = '%s/dtopo_test.tt3' % inputdir print("Using Okada model to create dtopo file", dtopo_fname) x_deform = linspace(-2, 1, 100) y_deform = linspace(-1, 1, 100) times = [1.] fault.create_dtopography(x_deform,y_deform,times) dtopo = fault.dtopo dtopo.write(dtopo_fname, dtopo_type=3) figure(figsize=(12,6)) ax = subplot(121) dtopo.plot_dZ_colors(2.,axes=ax,dZ_interval=0.5) contour(Xo,Yo,Zo,[-110,-90,0],colors=['b','b','r'],linestyles='--') ax.set_aspect(1.) axis([-2,0.5,-2,2]) xlabel('Longitude') ylabel('Latitude') ax = subplot(122) ylat = 0. jlat = where(dtopo.y<=ylat)[0].max() plot(dtopo.x, dtopo.dZ[0,jlat,:],'g') plot(dtopo.x, 0*dtopo.x, 'k') xlabel('Longitude') title('Vertical displacement on transect at latitude %.2f' % ylat);The left plot above shows the sea floor deformation as contours and colors, along with the extent of the continental shelf as blue dashed lines and the shoreline as a red dashed line. The plot on the right shows the vertical deformation along a transect at latitude 0 going through the coastal region of interest. We can compute the subsidence at the location on the shoreline where our fine scale topography is defined as:xlon = 0. ilon = where(dtopo.x<=xlon)[0].max() ylat = 0. jlat = where(dtopo.y<=ylat)[0].max() #print(ilon,jlat) dz0 = dtopo.dZ[0,jlat,ilon] print('Surface deformation at x=%.2f, y=%.2f is dz = %.2f meters' \ % (xlon,ylat,dz0))This subsidence is enough to significantly change the shoreline location, as seen below:figure(figsize=(12,6)) subplot(211) contourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g']) gca().set_aspect(1.) xticks(rotation=20) #xlim(-0.002,0.008) xlabel('Longitude') ylabel('Latitude') title('Original topo') subplot(212) Z_postquake = Z + dz0 contourf(X,Y,Z_postquake,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X,Y,Z_postquake,[-2,-1,0,1,2],colors=['b','b','r','g','g']) gca().set_aspect(1.) xticks(rotation=20) #xlim(-0.002,0.008) xlabel('Longitude') ylabel('Latitude') title('Subsided topo, dz = %.2f m' % dz0); tight_layout() savefig('topo_with_dz.png') # save this figure to show in run_geoclaw.ipynbForce Dry arrayNow suppose that the onshore lake shown in the plots above is really a depression that should be dry land in spite of being below sea level. We can use the marching front algorithm from [`clawpack.geoclaw.marching_front`](http://depts.washington.edu/clawpack/sampledocs/dev_v5.7.0/marching_front.html) to identify points that are below sea level but disconnected from the coast. We use the marching front algorithm starting by assuming any point with `Z < Z1 = -5` meters should be wet and marching to find all connected points with elevation up to `Z = Z2 = 0`:wet_points = marching_front.select_by_flooding(topo.Z, Z1=-5., Z2=0., max_iters=None)See the documentation page [Force Cells to be Dry Initially](http://depts.washington.edu/clawpack/sampledocs/dev_v5.7.0/force_dry.html) for more discussion of the cells below...Zdry = ma.masked_array(topo.Z, wet_points) Zwet = ma.masked_array(topo.Z, logical_not(wet_points)) figure(figsize=(12,6)) subplot(211) contourf(X,Y,Zdry,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X,Y,Z,[-2,-1,0,1,2],colors='k',linewidths=0.8) gca().set_aspect(1.) xticks(rotation=20) #xlim(-0.002,0.008) xlabel('Longitude') ylabel('Latitude') title('Colored points are identified as initially dry'); subplot(212) contourf(X,Y,Zwet,[-2,-1,0,1,2],colors=c,extend='both') cb = colorbar(shrink=0.9) cb.set_label('meters') contour(X,Y,Z,[-2,-1,0,1,2],colors='k',linewidths=0.8) gca().set_aspect(1.) xticks(rotation=20) #xlim(-0.002,0.008) xlabel('Longitude') ylabel('Latitude') title('Colored points are identified as initially wet'); tight_layout();Create `force_dry_init` array for GeoClawFirst we buffer the points identified above as discussed in the the documentation page [Force Cells to be Dry Initially](http://depts.washington.edu/clawpack/sampledocs/dev_v5.7.0/force_dry.html).dry_points = 1 - wet_points dry_points_sum = dry_points[1:-1,1:-1] + dry_points[0:-2,1:-1] + dry_points[2:,1:-1] + \ dry_points[1:-1,0:-2] + dry_points[0:-2,0:-2] + dry_points[2:,0:-2] + \ dry_points[1:-1,2:] + dry_points[0:-2,2:] + dry_points[2:,2:] # initialize array to 0 everywhere: force_dry_init = zeros(dry_points.shape) # reset in interior to 1 if all points in the 3x3 block around it are dry: force_dry_init[1:-1,1:-1] = where(dry_points_sum == 9, 1, 0)And finally create the input file needed for GeoClaw. Note that this creates a file with the same format as a topofile having `topo_type == 3` as described in [Topography Data documentation](http://www.clawpack.org/topo.html). We specify `Z_format= '%1i'` to print out single-digit integers since this file has values 0 or 1 rather than topography elevations (with 1 indicated points that should be forced to be dry when initializing grid patches in GeoClaw).force_dry_init_topo = topotools.Topography() force_dry_init_topo.set_xyZ(topo.x,topo.y,force_dry_init) fname_force_dry_init = '%s/force_dry_init.tt3' % inputdir force_dry_init_topo.write(fname_force_dry_init, topo_type=3, Z_format='%1i') print('Created %s' % fname_force_dry_init)- Split the data in Cell painting & L1000 into train/test based on their compoundsimport os import requests import pickle import argparse import pandas as pd import numpy as np import re from os import walk from collections import Counter import random import shutil # Run with both "" and "_subsample" for the two Cell Painting input data types file_indicator="" cp_data_path = '../../1.Data-exploration/Profiles_level4/cell_painting/cellpainting_lvl4_cpd_replicate_datasets/' l1000_data_path = "../../1.Data-exploration/Profiles_level4/L1000/L1000_lvl4_cpd_replicate_datasets/" cpd_split_path = '../1.compound_split_train_test/data' df_level4_cp = pd.read_csv( os.path.join(cp_data_path, f'cp_level4_cpd_replicates{file_indicator}.csv.gz'), low_memory = False ) df_level4_L1 = pd.read_csv( os.path.join(l1000_data_path, 'L1000_level4_cpd_replicates.csv.gz'), compression='gzip', low_memory = False ) df_cpds_moas_lincs = pd.read_csv(os.path.join(cpd_split_path, 'split_moas_cpds.csv')) df_cpds_moas_lincs.head() all_cpds = df_cpds_moas_lincs['pert_iname'].unique() df_level4_cp = df_level4_cp.loc[df_level4_cp['pert_iname'].isin(all_cpds)].reset_index(drop=True) df_level4_L1 = df_level4_L1.loc[df_level4_L1['pert_iname'].isin(all_cpds)].reset_index(drop=True) df_level4_cp.shape df_level4_L1.shape df_level4_cp['moa'] = df_level4_cp['moa'].apply(lambda x: x.lower()) df_level4_L1['moa'] = df_level4_L1['moa'].apply(lambda x: x.lower()) df_cpds_moas = df_cpds_moas_lincs.copy() len(df_cpds_moas['pert_iname'].unique()) ##no of compounds in the whole data len(df_cpds_moas['moa'].unique()) ##no of MOA def create_moa_targets(df): """Create the binary multi-label MOA targets for each compound""" df['val'] = 1 df_moas_targets = pd.pivot_table(df, values=['val'], index='pert_iname',columns=['moa'], fill_value=0) df_moas_targets.columns.names = (None,None) df_moas_targets.columns = df_moas_targets.columns.droplevel(0) df_moas_targets = df_moas_targets.reset_index().rename({'index':'pert_iname'}, axis = 1) return df_moas_targets df_moa_targets = create_moa_targets(df_cpds_moas) df_moa_targets df_level4_cp = df_level4_cp.merge(df_moa_targets, on='pert_iname') df_level4_L1 = df_level4_L1.merge(df_moa_targets, on='pert_iname') df_level4_cp.shape df_level4_L1.shape- compounds split (80/20) based on MOAs -- based on split_moas_cpdstrain_cpds = df_cpds_moas_lincs[df_cpds_moas_lincs['train']]['pert_iname'].unique() test_cpds = df_cpds_moas_lincs[df_cpds_moas_lincs['test']]['pert_iname'].unique() len(train_cpds) len(test_cpds) def train_test_split(train_cpds, test_cpds, df): df_trn = df.loc[df['pert_iname'].isin(train_cpds)].reset_index(drop=True) df_tst = df.loc[df['pert_iname'].isin(test_cpds)].reset_index(drop=True) return df_trn, df_tst df_level4_cp_trn, df_level4_cp_tst = train_test_split(train_cpds, test_cpds, df_level4_cp) df_level4_L1_trn, df_level4_L1_tst = train_test_split(train_cpds, test_cpds, df_level4_L1) df_level4_cp_trn.shape df_level4_cp_tst.shape df_level4_L1_trn.shape df_level4_L1_tst.shape- Shuffle train data - 2nd train data - Shuffle the target labels in the train data so that replicates of the same compound/MOA have different MOA labelsdef create_shuffle_data(df_trn, target_cols): """Create shuffled train data where the replicates of each compound are given wrong target labels""" df_trn_cpy = df_trn.copy() df_trn_tgts = df_trn_cpy[target_cols].copy() rand_df = pd.DataFrame(np.random.permutation(df_trn_tgts), columns =df_trn_tgts.columns.tolist()) df_trn_cpy.drop(target_cols, axis = 1, inplace = True) df_trn_cpy = pd.concat([df_trn_cpy, rand_df], axis = 1) return df_trn_cpy target_cols = df_moa_targets.columns[1:] df_lvl4_cp_trn_shuf = create_shuffle_data(df_level4_cp_trn, target_cols) df_lvl4_L1_trn_shuf = create_shuffle_data(df_level4_L1_trn, target_cols) df_lvl4_cp_trn_shuf.shape df_lvl4_L1_trn_shuf.shape- Save to CSVdef save_to_csv(df, path, file_name, compress=None): """saves dataframes to csv""" if not os.path.exists(path): os.mkdir(path) df.to_csv(os.path.join(path, file_name), index=False, compression=compress) save_to_csv(df_level4_cp_trn, "model_data/cp/", f'train_lvl4_data{file_indicator}.csv.gz', compress="gzip") save_to_csv(df_level4_cp_tst, "model_data/cp/", f'test_lvl4_data{file_indicator}.csv.gz', compress="gzip") save_to_csv(df_lvl4_cp_trn_shuf, "model_data/cp/", f'train_shuffle_lvl4_data{file_indicator}.csv.gz', compress="gzip") save_to_csv(df_level4_L1_trn, "model_data/L1/", 'train_lvl4_data.csv.gz', compress="gzip") save_to_csv(df_level4_L1_tst, "model_data/L1/", 'test_lvl4_data.csv.gz', compress="gzip") save_to_csv(df_lvl4_L1_trn_shuf, "model_data/L1/", 'train_shuffle_lvl4_data.csv.gz', compress="gzip") save_to_csv(df_moa_targets, "model_data/cp/", f'target_labels{file_indicator}.csv') save_to_csv(df_moa_targets, "model_data/L1/", 'target_labels.csv')submitted by Part 1 - training CBOW and Skipgram models# load library gensim (contains word2vec implementation) import gensim # ignore some warnings (probably caused by gensim version) import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import multiprocessing cores = multiprocessing.cpu_count() # Count the number of cores from tqdm import tqdm # importing needed libs import os import re import nltk import pickle import scipy import numpy as np from bs4 import BeautifulSoup as bs from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.pyplot as plt # downloading needed data nltk.download('stopwords') nltk.download('wordnet') nltk.download('punkt') nltk.download('averaged_perceptron_tagger') from google.colab import drive drive.mount('/content/drive') ! mkdir data ! cp 'drive/MyDrive/IRLAB/A3/FIRE_Dataset_EN_2010.rar' './data/FIRE_Dataset_EN_2010.rar' > nul ! unrar x data/FIRE_Dataset_EN_2010.rar data > nul ! tar -xvf './data/FIRE_Dataset_EN_2010/English-Data.tgz' -C './data/FIRE_Dataset_EN_2010/' > nul class DataReader: def read_and_process(self, data_dir): # stopwords stopwords = set(nltk.corpus.stopwords.words('english')) # wordnet lemmatizer stemmer = nltk.stem.PorterStemmer() file_names = [] text_tokens = [] i = 0 # iterating over 2004, 2005, 2006, 2007 etc dirs for dir in tqdm(os.listdir(data_dir)): dir_name = os.path.join(data_dir,dir) # iterating over bengal, business, foreign etc dirs for sub_dir in os.listdir(dir_name): sub_dir_name = os.path.join(dir_name,sub_dir) data_files = os.listdir(sub_dir_name) for f in data_files: f_name = os.path.join(sub_dir_name,f) with open(f_name,'r') as fobj: content = fobj.read() soup = bs(content, "lxml") # find text tag temp_text_data = soup.find('text').text # converting text to lower case temp_text_data = temp_text_data.lower() # removing numbers and special chars temp_text_data = re.sub(r'[^\w\s]', '', temp_text_data) temp_text_data = re.sub(r'\d+', '', temp_text_data) # tokens tokens = nltk.word_tokenize(temp_text_data) # removing stopwords tokens = [token for token in tokens if token not in stopwords] # lemmatizing tokens = list(map(stemmer.stem,tokens)) # removing empty files if len(tokens) > 0: text_tokens.append(tokens) file_names.append(f) if i%5000==0: print(i, ' - ', f) i += 1 # list of tokens, list of file names return text_tokens, file_names data_dir = "./data/FIRE_Dataset_EN_2010/TELEGRAPH_UTF8/" dr = DataReader() text_tokens, file_names = dr.read_and_process(data_dir) for sentence in text_tokens[30:40]: print(sentence)['telegraph', 'calcutta', 'sport', 'citi', 'mohammedan', 'sport', 'defend', 'habibur', 'rahman', 'mondal', 'hospitalis', 'late', 'tuesday', 'owe', 'dehydr', 'habibur', 'collaps', 'soon', 'match', 'eastern', 'railway', 'admit', 'nurs', 'home', 'barasat', 'learnt', 'fast', 'month', 'ramadan', 'condit', 'stabl', 'accord', 'club', 'offici', 'sourav', 'kothari', 'made', 'last', 'four', 'stage', 'west', 'bengal', 'open', 'snooker', 'meet', 'brc', 'tuesday', 'defeat', 'niraj', 'khemka', 'quarter', 'final', 'aditya', 'goenka', 'beat', 'shah', 'baaz', 'khan', 'shyam', 'jagtiani', 'account', 'srivardhan', 'poddar', 'footbal', 'cfl', 'premier', 'divis', 'east', 'bengal', 'vs', 'mohun', 'bagan', 'salt', 'lake', 'stadium', 'pm', 'snooker', 'west', 'bengal', 'state', 'open', 'brc', 'semifin', 'pm'] ['telegraph', 'calcutta', 'sport', 'lack', 'commit', 'show', 'french', 'display', 'talk', 'tactic', 'pk', 'banerje', 'mix', 'feel', 'watch', 'franc', 'held', 'south', 'korea', 'sunday', 'night', 'one', 'h[...]cbow model# CBOW Model w2v_model = gensim.models.Word2Vec(min_count=20, window=5, size=100, sample=6e-5, alpha=0.03, min_alpha=0.0007, negative=20, workers=cores-1, sg=0 ) w2v_model.build_vocab(text_tokens, progress_per=10000) w2v_model.train(text_tokens, total_examples=w2v_model.corpus_count, epochs=5, report_delay=1) w2v_model.init_sims(replace=True) # word vectors are stored in model.wv print("Size of the vocabulary: %d number of unique words have been considered" % len(w2v_model.wv.vocab)) example_word = 'woman' print("\nWord vector of " + example_word) print(w2v_model.wv[example_word].size) print(w2v_model.wv[example_word]) print("\nWords with most similar vector representations to " + example_word) print(w2v_model.wv.most_similar(example_word)) # similarity directly: print("\nCosine similarity to other words:") print(w2v_model.similarity('woman','man')) print(w2v_model.similarity('woman','tree')) # words most similar to "man" w2v_model.wv.most_similar("man") # words most similar to "politician" w2v_model.wv.most_similar("politician") w2v_model.wv.most_similar(positive=["king", "girl"], negative=["queen"], topn=10) import numpy as np labels = [] count = 0 max_count = 50 X = np.zeros(shape=(max_count, len(w2v_model['car']))) for term in w2v_model.wv.vocab: X[count] = w2v_model[term] labels.append(term) count+= 1 if count >= max_count: break # It is recommended to use PCA first to reduce to ~50 dimensions from sklearn.decomposition import PCA pca = PCA(n_components=50) X_50 = pca.fit_transform(X) # Using TSNE to further reduce to 2 dimensions from sklearn.manifold import TSNE model_tsne = TSNE(n_components=2, random_state=0) Y = model_tsne.fit_transform(X_50) # Show the scatter plot import matplotlib.pyplot as plt plt.scatter(Y[:,0], Y[:,1], 20) # Add labels for label, x, y in zip(labels, Y[:, 0], Y[:, 1]): plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10) plt.show()/usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:783: FutureWarning: The default initialization in TSNE will change from 'random' to 'pca' in 1.2. FutureWarning, /usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:793: FutureWarning: The default learning rate in TSNE will change from 200.0 to 'auto' in 1.2. FutureWarning,skipgram model# SkipGram Model w2v_model = gensim.models.Word2Vec(min_count=20, window=5, size=100, sample=6e-5, alpha=0.03, min_alpha=0.0007, negative=20, workers=cores-1, sg=1 ) w2v_model.build_vocab(text_tokens, progress_per=10000) w2v_model.train(text_tokens, total_examples=w2v_model.corpus_count, epochs=5, report_delay=1) w2v_model.init_sims(replace=True) # word vectors are stored in model.wv print("Size of the vocabulary: %d number of unique words have been considered" % len(w2v_model.wv.vocab)) example_word = 'woman' print("\nWord vector of " + example_word) print(w2v_model.wv[example_word].size) print(w2v_model.wv[example_word]) print("\nWords with most similar vector representations to " + example_word) print(w2v_model.wv.most_similar(example_word)) # similarity directly: print("\nCosine similarity to other words:") print(w2v_model.similarity('woman','man')) print(w2v_model.similarity('woman','tree')) w2v_model.wv.most_similar("man") w2v_model.wv.most_similar("politician") w2v_model.wv.most_similar(positive=["king", "girl"], negative=["queen"], topn=10) # probably not enough data? import numpy as np labels = [] count = 0 max_count = 50 X = np.zeros(shape=(max_count, len(w2v_model['car']))) for term in w2v_model.wv.vocab: X[count] = w2v_model[term] labels.append(term) count+= 1 if count >= max_count: break # It is recommended to use PCA first to reduce to ~50 dimensions from sklearn.decomposition import PCA pca = PCA(n_components=50) X_50 = pca.fit_transform(X) # Using TSNE to further reduce to 2 dimensions from sklearn.manifold import TSNE model_tsne = TSNE(n_components=2, random_state=0) Y = model_tsne.fit_transform(X_50) # Show the scatter plot import matplotlib.pyplot as plt plt.scatter(Y[:,0], Y[:,1], 20) # Add labels for label, x, y in zip(labels, Y[:, 0], Y[:, 1]): plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10) plt.show()/usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:783: FutureWarning: The default initialization in TSNE will change from 'random' to 'pca' in 1.2. FutureWarning, /usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:793: FutureWarning: The default learning rate in TSNE will change from 200.0 to 'auto' in 1.2. FutureWarning,Part 2 - Training token classification modelsimport nltk nltk.download('opinion_lexicon') from nltk.corpus import opinion_lexicon import gensim.downloader[nltk_data] Downloading package opinion_lexicon to /root/nltk_data... [nltk_data] Unzipping corpora/opinion_lexicon.zip.preparing datapositives = list(opinion_lexicon.positive()) negatives = list(opinion_lexicon.negative()) positives = [(tok, 1) for tok in positives ] negatives = [(tok, 0) for tok in negatives ] data = positives + negatives final_dataset = [] categories = [] for word, category in data: try: emb = wv_model.wv[word] final_dataset.append(emb) categories.append(category) except: continue/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:6: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).SVCfrom sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(final_dataset, categories, test_size=0.25, stratify=categories) wv_model = gensim.downloader.load('glove-twitter-100') from sklearn.svm import SVC svc = SVC() svc.fit(x_train, y_train) print(f'Score: {svc.score(x_test, y_test)}')Score: 0.8982808022922636Feed Forward Neural net for classificationimport numpy as np import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense from tensorflow.keras.metrics import Precision, Recall vector_size = len(x_train[0]) batch_size = 64 epochs = 20 def NN(input_size, activation): inputs = Input(shape=(input_size, )) x = Dense(64, activation=activation)(inputs) x = Dense(32, activation=activation)(x) x = Dense(16, activation=activation)(x) outputs = Dense(1, activation='sigmoid')(x) return Model(inputs = inputs, outputs = outputs, name='token_classification') model = NN(100, 'relu') model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[Precision(), Recall()]) H = model.fit(np.array(x_train), np.array(y_train), batch_size=batch_size, epochs=epochs, validation_split=0.1) l, p, r = model.evaluate(np.array(x_test), np.array(y_test)) print(f'F1: {2 * p * r/ (p+r)}')F1: 0.8050458604580735Initiation à la programmation réseau Les sockets ou «connecteurs» Une **socket** - qu'on peut traduire par «prise» ou «connecteur» - est un objet qui représente un **canal de communication** d'un «point» vers un «autre». Le module standard `socket` de Python permet de créer de tels objets et donc de programmer des **applications client-serveur** en s'appuyant sur TCP ou UDP.```pythonfrom socket import socket``` *Note*: Par défaut, une *socket* utilise les protocoles *IPv4* et *TCP*. Vue d'ensemble d'une application réseau Il faut bien avoir à l'esprit qu'une telle application est formée de deux programmes: le **serveur** et le **client**. Voilà l'organisation schématique d'une application réseau qui fait intervenir trois canaux de communication ou socket:- socket `i`: du *serveur* vers une *interface réseau* (sert à établir une connexion avec le client),- socket `c`: du *serveur* vers un *client* (après connexion de celui-ci),- socket `s`: du *client vers le serveur*. En bref, voici la logique du code pour obtenir ces trois sockets:- côté serveur - socket `i`: `i = socket(); i.bind( (ip, port) ); i.listen()`- côté serveur - socket `c`: `c, adr_c = i.accept()`- côté client - socket `s`: `s = socket(); s.connect( (ip, port) )` Les sockets `c` et `s` sont utilisées pour la communication client/serveur:- pour **envoyer** `.send()`,- et **recevoir** `.recv()` des **données**. Les données échangées - le type bytes On ne peut échanger sur le réseau que des **suites d'octets** qui correspondent en Python au type `bytes`.txt, so1, so2 = 'salut', b'salut', b'\x73\x61\x6c\x75\x74' print(f"txt == so1? {txt == so1}, so1 == so2? {so1 == so2}") print(f"type txt? {type(txt)}, type so1? {type(so1)}") for o in so1: # et pour so2? print(f"{chr(o)} - {o} - {o:b} - {o:x}")*Rappel* - table ASCII: | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | A | B | C | D | E | F ||-------|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:----:|:-----:|:-----:|:----:|:----:|:----:|:-----:|| **0** | `NUL` | `SOH` | `STX` | `ETX` | `EOT` | `ENQ` | `ACK` | `BEL` | `BS` | `HT` | `LF` | `VT` | `FF` | `CR` | `SO` | `SI` || **1** | `DLE` | `DC1` | `DC2` | `DC3` | `DC4` | `NAK` | `SYN` | `ETB` | `CAN` | `EM` | `SUB` | `ESC` | `FS` | `GS` | `RS` | `US` || **2** | `SP` | ! | " | | $ | % | & | ' | ( | ) | * | + | , | - | . | / || **3** | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | : | ; | | ? || **4** | @ | A | B | C | D | E | F | G | H | I | J | K | L | M | N | O || **5** | P | Q | R | S | T | U | V | W | X | Y | Z | [ | \ | ] | ^ | _ || **6** | ` | a | b | c | d | e | f | g | h | i | j | k | l | m | n | o || **7** | p | q | r | s | t | u | v | w | x | y | z | { | \| | } | ~ | `DEL` | Dans la suite d'octets `b'salut'`, chaque *caractère* correspond à un **octet**: son point de code ascii (compris entre 0 et 127). `str.encode()` et `bytes.decode()` À ce stade nous retiendrons deux méthodes pour passer du type `str` au type `bytes` et vice-versa:- `str` vers `bytes`: `str.encode()` renvoie la **suite d'octets** qui correspond à la chaîne de caractère via l'encodage fourni,ex = "cela coûte 10€" print("utf8:", ex.encode("utf8")) print("défaut:", ex.encode()) # quel est l'encodage par défaut? print("latin9:", ex.encode("latin1")) # que se passe-t-il si on met latin1 ou ascii? pourquoi?- `bytes` vers `str`: `bytes.decode()` retourne la chaîne de caractères associè à la suite d'octet via l'encodage fourni.octets = b"10\xe2\x82\xac" chaine = octets.decode() # utf8 par défaut! print(chaine)ReluX = tf.Variable(tf.random.normal((10,1))) with tf.GradientTape() as gt: activations = tf.nn.relu(X) gradients = gt.gradient(activations,X) plt.scatter(X.numpy(),activations.numpy()) plt.scatter(X.numpy(),gradients)SigmoidX = tf.Variable(tf.linspace(-10,10,40)) with tf.GradientTape() as gt: activations = tf.nn.sigmoid(X) gradients = gt.gradient(activations,X) plt.scatter(X.numpy(),activations.numpy()) plt.scatter(X.numpy(),gradients)MLP from Scratchfrom tensorflow.keras import datasets fashion_mnist = datasets.fashion_mnist (train_x,train_y),(test_x,test_y) = fashion_mnist.load_data()Flatten and normalize train and testtrain_x, test_x = [tf.reshape(i,(-1,28*28))/255 for i in [train_x, test_x]] train_dataset = tf.data.Dataset.from_tensor_slices((train_x,train_y)).batch(32)Create Model From Scratchhidden_layer_units = 20 w1 = tf.Variable(tf.random.normal((train_x.shape[-1],hidden_layer_units))) b1 = tf.Variable(tf.random.normal((1,))) w2 = tf.Variable(tf.random.normal((hidden_layer_units,10))) b2 = tf.Variable(tf.random.normal((1,))) def relu(x): zeros = tf.zeros_like(x) return tf.math.maximum(x,zeros) def forward_pass(x): y1 = tf.tensordot(x,w1,1)+b1 y1_act = relu(y1) y2 = tf.tensordot(y1_act,w2,1)+b2 return y2 loss = tf.nn.sparse_softmax_cross_entropy_with_logits optimizer = tf.keras.optimizers.Adam(0.01) for i in range(10): for samples, y_labels in train_dataset: y_labels = tf.cast(y_labels,tf.int32) with tf.GradientTape() as gt: op = forward_pass(samples) y_loss = loss(y_labels,op) z_loss = tf.reduce_sum(y_loss) grads = gt.gradient(z_loss,[w1,b1,w2,b2]) optimizer.apply_gradients(zip(grads,[w1,b1,w2,b2])) print(z_loss)tf.Tensor(28.801332, shape=(), dtype=float32) tf.Tensor(19.41587, shape=(), dtype=float32) tf.Tensor(17.861305, shape=(), dtype=float32) tf.Tensor(15.241432, shape=(), dtype=float32) tf.Tensor(15.054805, shape=(), dtype=float32) tf.Tensor(13.348408, shape=(), dtype=float32) tf.Tensor(12.871298, shape=(), dtype=float32) tf.Tensor(13.447802, shape=(), dtype=float32) tf.Tensor(13.293612, shape=(), dtype=float32) tf.Tensor(13.184313, shape=(), dtype=float32)Unsupervised Learning for Physical Interaction through Video Prediction - Authors: , , .- Year: 2016 Abstract Based on an initial state (i.e: the position of the robot) and an action to execute (i.e: push a block from x1, y1 to x2, y2), predict the physiscal interaction before executing the action. Models The models learn physic instead of the object appearance: makes them able to generalize unseen object. Three models are proposed in this paper: 1. Dynamic Neural Advection (DNA) - For pixels that are constrained in a local region; - Outputs a distrbution over locations in the previoys frame for each pixel in the new frame; - the predicted pixel value becomes the expectation under the distribution. 2. Convolutional Dynamix Neural Advection (CDNA) - Variant of a DNA; - Output multupe normalized convolution kernels to apply to the previous image to compute new pixel values. 3. Spatial Transformer Predictors (STP) - Output the parameters of multiple affine transformations to apply to the previous image - The predicted transformation handle separate objects Initial decisions by the authors - Why skip connection between convLSTM1 to conv2 and convLSTM3 to convLSTM7 instead of some other like LSTM4 to LSTM7?- Why state_action are concatenated with convLSTM4 instead of other layer?- Why output CDNA kernel after convLSTM5 instead of other layer? * "Those decisions were somewhat arbitrary and we did not see significant changes in performance from making small changes to the architecture." [GitHub Issue 681](https://github.com/tensorflow/models/issues/681) * "Generally, we chose the skip connections and the concatenations based on the dimensionality of the layers. For example, the state and action are concatenated at the lowest dimensional layers, and the skip connections are performed between layers of the same dimension." [GitHub Issue 681](https://github.com/tensorflow/models/issues/681) - Are the hyperparameters used in the paper the same as the default options in prediction_train.py? * "For the paper, I downsampled with PIL's antialiasing method, outside of tensorflow. In this code, the images are downsampled in tensorflow, using bicubic interpolation. This isn't a great option, as it causes the images to be a bit pixelated. A convolution-based downsampling would be a better option." [GitHub Issue 553](https://github.com/tensorflow/models/issues/553) * "I use layer norm after every layer, which I didn't do in the paper. I think this only makes things more stable." [GitHub Issue 553](https://github.com/tensorflow/models/issues/553) * Train/val split is different from what I used." [GitHub Issue 553](https://github.com/tensorflow/models/issues/553) * "The PSNR calculation that is saved in a scalar summary is not quite correct. It is done for an entire batch of images, but should be done for each image independently and then averaged. This is pretty easy to fix." [GitHub Issue 553](https://github.com/tensorflow/models/issues/553) Results No results were provided in the litterature. Only for the exception of model and loss comparison after a prediction. The authors change the "time_step" (number of frame to predict) to obtain different results.But some insight was provided inside the issues of the GitHub repository of the code:* "On a standard NVIDIA Titan X GPU, full training (**100k iterations**) should take a few days, depending on the model options." [GitHub Issue 537](https://github.com/tensorflow/models/issues/537)* That said, you should see **reasonable predictions after only 15k iterations.**" [GitHub Issue 537](https://github.com/tensorflow/models/issues/537) Architecture The core trunk of each models is made of: 1. One 5x5 convolution with a stride of 2; 2. Seven convolutional LSTMS; 3. A full-resolution mask for compositing the various transformed predictions (CDNA and STP only); 4. **** Two skip connections exists in the network to preserve high-resolution informations: - LSTM 1 to convolution 2; - LSTM 3 to LSTM 7. The main differences between the three models are: 1. CDNA: - Ten filters of size 5 x 5 are created in the transormation area, they're normalized to via a spatial softmax; - **** The spatial is used to return the expected pixiel location of the new image based on the distribution of the filters over the previous image - The transformations corresponds to a convolution 2. STP: - Ten filters of size 3 x 2 of affine transformation matrices (with a [spatial transformer](https://arxiv.org/abs/1506.02025)) - The transformations are applied to the preceding image to create 10 separate transformed images - The transformations correspond to an affine transformation 3. DNA: - No filters for the transformation - The transformation parameters are outputted at the last layer, in the same place as the mask - The transformation correspond to a 5 x 5 convolutional kernel Non-deterministic behaviour in TensorFlow For some operation, TensorFlow is non-deterministic (the same result is not guaranteed at each iteration) for both the GPU and the CPU. Especialy, in our case, the function "reduce_sum" may have a different behaviour at each executions. To test that, the code bellow was produced.See this [issue](https://github.com/tensorflow/tensorflow/issues/3103) on GitHubdef debug(tensor, session, feed_dict): arr = tensor.eval(session=session, feed_dict=feed_dict) return arr[0][0][0][0][0] # Print the first element stop = 1500 i = 0 while i < stop: norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) Debug.push(norm_factor, "Norm factor 1", deb) i += 1 # cdna_kerns is an array of shape (32, 5, 5, 1, 10) # The average result at arr[0][0][0][0][0] is "", min is "" and max is "" # The result is supposed to be 19.780447TensorFlow to Chainer and Numpy conversion Bellow are the conversion for TensorFlow's functions to Chainer's and Numpy's functionsimport numpy as np import tensorflow as tf from tensorflow.contrib.layers.python import layers as tf_layers import chainer as chainer def print_tf_shape(tensor): print("[TF] Shape is {}".format(tensor.get_shape())) def print_ch_shape(variable): print("[Chainer] Shape is {}".format(variable.shape)) def print_np_shape(array): print("[Numpy] Shape is {}".format(array)) # Create a Tensor/Variable x = np.arange(9.0) tf_res = tf.constant(x) ch_res = chainer.variable.Variable(x) print_tf_shape(tf_res) print_ch_shape(ch_res) # Split an array into multiple sub-arrays x = np.random.randint(0, 255,(2,6)) tf_res = tf.split(axis=1, num_or_size_splits=2, value=x) ch_res = chainer.functions.split_axis(chainer.variable.Variable(x), indices_or_sections=2, axis=1) print(len(tf_res)) print_tf_shape(tf_res[0]) print_tf_shape(tf_res[1]) print(len(ch_res)) print_ch_shape(ch_res[0]) print_ch_shape(ch_res[1]) # Join a sequence of arrays along an existing axis x = np.random.randint(0,255, (32, 32, 32, 32)) y = np.random.randint(0,255, (32, 32, 32, 32)) tf_res = tf.concat(axis=0, values=[tf.constant(x), tf.constant(y)]) ch_res = chainer.functions.concat((chainer.variable.Variable(x), chainer.variable.Variable(y)), axis=0) print_tf_shape(tf_res) print_ch_shape(ch_res) # Gives a new shape to an array without changing its data x = np.random.randint(0.,255., (32, 32, 32, 32)) tf_res = tf.reshape(tf.constant(x), [x.shape[0], -1]) ch_res = chainer.functions.reshape(chainer.variable.Variable(x), (x.shape[0], -1)) print_tf_shape(tf_res) print_ch_shape(ch_res) # Construct an array by repeating the number of times given by reps x = np.random.randint(0.,255., (1, 1, 1, 1)) tf_res = tf.tile(x, [2,2,2,2]) ch_res = chainer.functions.tile(x, (2,2,2,2)) print_tf_shape(tf_res) print_ch_shape(ch_res) # AdamOptimizer from chainer.links.model.vision import resnet learning_rate = 0.01 tf_res = tf.train.AdamOptimizer(learning_rate).minimize(tf.constant([])) ch_res = chainer.optimizers.Adam(alpha=learning_rate) model = resnet.ResNet50Layers() ch_res.setup(model) # ... ch_res.update() # 2D convolution tf_image = np.float32(np.random.randint(0.,255., (32, 64, 64, 3))) chainer_image = np.float32(np.random.randint(0.,255., (32, 3, 64, 64))) tf_res = tf.contrib.slim.layers.conv2d(tf_image, 32, [5, 5], stride=2, normalizer_fn=None) ch_res = chainer.links.Convolution2D(in_channels=3, out_channels=32, ksize=(5, 5), stride=2, pad=5/2)(chainer_image) print_tf_shape(tf_res) print_ch_shape(ch_res) # Layer normalization tf_image = np.float32(np.random.randint(0.,255., (32, 64, 64, 3))) chainer_image = np.float32(np.random.randint(0.,255., (32, 3, 64, 64))) tf_res = tf_layers.layer_norm(tf_image) ch_res = chainer.functions.reshape(chainer_image, (chainer_image.shape[0], -1)) ch_res = chainer.links.LayerNormalization()(ch_res) ch_res = chainer.functions.reshape(ch_res, (chainer_image.shape[0], chainer_image.shape[1], chainer_image.shape[2], chainer_image.shape[3])) print_tf_shape(tf_res) print_ch_shape(ch_res) # 2D Deconvolution tf_image = np.float32(np.random.randint(0.,255., (32, 64, 64, 3))) chainer_image = np.float32(np.random.randint(0.,255., (32, 3, 64, 64))) tf_res = tf.contrib.slim.layers.conv2d_transpose(tf.constant(tf_image), tf_image.shape[3], 3, stride=2) ch_res = chainer.links.Deconvolution2D(in_channels=chainer_image.shape[1], out_channels=chainer_image.shape[1], ksize=(3,3), stride=2, outsize=(chainer_image.shape[2]*2, chainer_image.shape[3]*2), pad=3/2)( chainer.variable.Variable(chainer_image) ) print_tf_shape(tf_res) print_ch_shape(ch_res) # Softmax tf_image = np.float32(np.random.randint(0.,255., (32, 64, 64, 3))) chainer_image = np.float32(np.random.randint(0.,255., (32, 3, 64, 64))) tf_res = tf.nn.softmax(tf.constant(tf_image)) ch_res = chainer.functions.softmax(chainer.variable.Variable(chainer_image)) print_tf_shape(tf_res) print_ch_shape(ch_res) # Relu tf_image = np.float32(np.random.randint(0.,255., (32, 64, 64, 3))) chainer_image = np.float32(np.random.randint(0.,255., (32, 3, 64, 64))) tf_res = tf.nn.relu(tf.constant(tf_image)) ch_res = chainer.functions.relu(chainer_image) print_tf_shape(tf_res) print_ch_shape(ch_res)[TF] Shape is (32, 64, 64, 3) [Chainer] Shape is (32, 3, 64, 64)---import numpy as np ar = np.array(l) ar---arr = np.array([2, 4, 'ankit', 2.5, True]) arr l.sort() l l.pop() l ar.mean()---ar ar[2:5] ar b = ar[2:4] b b[0] b.view---l l[0:1] l c = l[1:] c---ages = np.array([20, 21, 25, 26, 24, 30, 28]) ages ages.sort() ages _ages = ages[2:5] _ages _ages[0] _ages _ages[:] = [38, 39, 40] _ages ages _ages_copy = ages.copy() _ages_copy _ages_copy[0] = 1 _ages_copy ages---l la = l[1:] la la[:] = [11, 12] la limport tensorflow as tf from tensorflow import keras import numpy as np import time import os from datetime import datetime (train_x, train_y), (test_x, test_y) = keras.datasets.mnist.load_data() train_x = train_x / 255.0 test_x = test_x / 255.0 train_x = tf.expand_dims(train_x, 3) test_x = tf.expand_dims(test_x, 3) val_x = train_x[:5000] val_y = train_y[:5000] lenet_5_model = keras.models.Sequential([ keras.layers.Conv2D(6, kernel_size=5, strides=1, activation='tanh', input_shape=train_x[0].shape, padding='same'), keras.layers.AveragePooling2D(), keras.layers.Conv2D(16, kernel_size=5, strides=1, activation='tanh', padding='valid'), keras.layers.AveragePooling2D(), keras.layers.Flatten(), keras.layers.Dense(120, activation='tanh'), keras.layers.Dense(84, activation='tanh'), keras.layers.Dense(10, activation='softmax') ]) lenet_5_model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) start_time = datetime.timestamp(datetime.now()) lenet_5_model.fit(train_x, train_y, epochs=100, validation_data=(val_x, val_y)) end_time = datetime.timestamp(datetime.now()) elapsed = end_time - start_time elapsed lenet_5_model.evaluate(test_x, test_y) !pip install git+git://github.com/albertbup/deep-belief-network.git@master_gpu import numpy as np np.random.seed(0) from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.metrics.classification import accuracy_score from datetime import datetime start_time = datetime.timestamp(datetime.now()) # from dbn.tensorflow import SupervisedDBNClassification from dbn import SupervisedDBNClassification # use "from dbn import SupervisedDBNClassification" for computations on CPU with numpy # Loading dataset digits = load_digits() X, Y = digits.data, digits.target # Data scaling X = (X / 16).astype(np.float32) # Splitting data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # Training classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256], learning_rate_rbm=0.05, learning_rate=0.1, n_epochs_rbm=10, n_iter_backprop=100, batch_size=32, activation_function='relu', dropout_p=0.2) classifier.fit(X_train, Y_train) # Save the model classifier.save('model.pkl') # Restore it classifier = SupervisedDBNClassification.load('model.pkl') # Test Y_pred = classifier.predict(X_test) print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)) end_time = datetime.timestamp(datetime.now()) elapsed = end_time - start_time elapsed[START] Pre-training step: >> Epoch 1 finished RBM Reconstruction error 2.710084 >> Epoch 2 finished RBM Reconstruction error 1.796745 >> Epoch 3 finished RBM Reconstruction error 1.434709 >> Epoch 4 finished RBM Reconstruction error 1.201816 >> Epoch 5 finished RBM Reconstruction error 1.108185 >> Epoch 6 finished RBM Reconstruction error 0.992179 >> Epoch 7 finished RBM Reconstruction error 0.916095 >> Epoch 8 finished RBM Reconstruction error 0.919944 >> Epoch 9 finished RBM Reconstruction error 0.853218 >> Epoch 10 finished RBM Reconstruction error 0.850179 >> Epoch 1 finished RBM Reconstruction error 2.870218 >> Epoch 2 finished RBM Reconstruction error 1.499493 >> Epoch 3 finished RBM Reconstruction error 1.111499 >> Epoch 4 finished RBM Reconstruction error 0.950356 >> Epoch 5 finished RBM Reconstruction error 0.802263 >> Epoch 6 finished RBM Reconstruction error 0.677447 >> Epoch 7 finished RBM Reconstruction error 0.610802 >> Epoch 8 finished RBM Reconstructi[...]Análise dos resultados LeNet-5|Epochs | Tempo de treinamento | Perda | Acuracidade | GPU ||-------|----------------------|-------|-------------|-----|| 5 | 206s | 0.0567| 0.9810 | Não || 5 | 21s | 0.0392| 0.9876 | Sim || 10 | 416s | 0.0497| 0.9855 | Não || 50 | 212s | 0.0626| 0.9859 | Sim || 100 | 426s | 0.0657| 0.9885 | Sim | DBN|Epochs RBM | Epochs ANN | Tempo de Treinamento | Perda | Acuracidade ||-----------|------------|----------------------|-------|-------------|| 20 | 100 | 103s | 2.2227| 0.9805 | ConclusãoOs tempos de execução são muito próximos quando são utilizadas GPUs para realizar o processamento, porém a Lenet-5 produz resultados mais consistentes em menos iterações.Here we import packages and define functions for pre-processing the data, including converting from netcdf to dataframe, formatting time columns and converting vapor pressure to water vapor concentration in ppm with Ideal Gas Law.TO DO:- Calculate the max correlation and corresponding lag for a whole year (365 days).- Examine within-day variation in the correlation/optimal lag- Shift WVIA time series and plot isotopesimport pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import seaborn as sns import xarray as xr from sklearn.linear_model import LinearRegression from src.data import make_dataset as mkd # imports source package # OPTIONAL: Load the "autoreload" extension so that code can change %load_ext autoreload # OPTIONAL: always reload modules so that as you change code in src, it gets loaded %autoreload 2 %matplotlib inline def nc_to_df(datafile, variable_list): # open netcdf file ds = xr.open_dataset(datafile) # select 4 components of radiation selected_ds = ds[variable_list] # convert to a pandas.dataframe object selected_df = selected_ds.to_dataframe() selected_df = selected_df.reset_index().set_index(['time']) try: del selected_df['site'] except: print('Nothing to delete, dataframe is single indexed') return selected_df #not used def filter_column(df, column, threshmax=20000, threshmin=0): ''' Replaces erroneous outliers with NaN. Then, fills value with last valid value or next. Created because of huge spikes in H20_ppm. Function only works for H20_ppm at this point. Need a better filtering method. One that only filters spikes. ''' df.loc[(df[column]threshmax), column] = None return df def format_time_columns(df): '''Adds a string date column and datetime colums. Assumes that the index is called time.''' df = df.reset_index() df['strtime'] = [time.strftime('%y-%m-%d-%H-%M-%S-%f') for time in df['time']] df['datetime'] = pd.to_datetime(df['strtime'], format='%y-%m-%d-%H-%M-%S-%f') return df def volconc_to_ppm(volumeconc, P, T, R=8.31441, molecularmass=18.01528): '''Converts a concentration in metric dimensions to dimensionless ppm value using ideal gas law. Need to confirm this conversion is correct''' moleswater = volumeconc/molecularmass volumewater = moleswater*R*(T+273.15)/(P*1000) return volumewater*10**6 def rescale(inputdf, columnlist, index, offset, factor): '''Resamples the top and bottom water vapor ppm columns to given offset. See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases for possible offsets. Divides by factor to normalize to units of ppm. Index must be included in column list. ''' df = inputdf.loc[:,columnlist] df = df.set_index(index).resample(offset).agg(np.sum)/factor return df.iloc[1:,] def detrend(series): # fit linear model X = [i for i in range(0, len(series))] X = np.reshape(X, (len(X), 1)) y = series.values model = LinearRegression() model.fit(X, y) # calculate trend trend = model.predict(X) # detrend detrended = [y[i]-trend[i] for i in range(0, len(series))] return detrended def get_best_lag(datapath, columns): ''' Gets best lag based on highest cross correlation between top and bottom ppm. This "best lag" is for the entire day, the optimal lag varies throughout the day.''' df= nc_to_df(datapath, columns) df['H2Oppm_top']=volconc_to_ppm(np.array(df['h2o']),np.array(df['press']), np.array(df['Ts'])) df = format_time_columns(df) if df.isna().values.sum() > 0: df = df.interpolate(method='linear') df_1sec = rescale(inputdf=df,columnlist=['H2Oppm_top','WVIA_H2Oppm','time'],index='time', offset='1S',factor=10) top_ppm_detrended = detrend(df_1sec['H2Oppm_top']) bottom_ppm_detrended = detrend(df_1sec['WVIA_H2Oppm']) (lags, corrs, line , plot) = plt.xcorr(bottom_ppm_detrended, top_ppm_detrended, maxlags=1000) plt.title(datapath_ts2013_march) maxindex = list(corrs).index(max(corrs)) print("The best lag is", lags[maxindex], "seconds with a correlation of", max(corrs)) return lags[maxindex], df def shift_WVIA(datapath, columns): '''Shifts all WVIA columns by best lag determined by cross correlation. Removes rows with NaN values due to shift.''' (bestlag, df) = get_best_lag(datapath, columns) WVIAcolumns = [x for x in df.columns if 'WVIA' in x] othercolumns = [x for x in df.columns if 'WVIA' not in x] return pd.concat([df.loc[:,othercolumns], df.loc[:,WVIAcolumns].shift(bestlag)], axis=1).dropna() #globals columns = ['WVIA_HDOoverH2O', 'WVIA_DoverH', 'WVIA_O18overO16', 'WVIA_H2O_O18_16', 'WVIA_delD', 'WVIA_del18O', 'WVIA_H2Oppm', 'e_hmp','press', 'h2o', 'Ts'] datapath_ts2013_march = '../data/raw/test/raw_MpalaTower_2013_072.nc' # shifted_df = shift_WVIA(datapath_ts2013_march,columns) def max_xcorr(x, y, normed = True, maxlags=10): '''Returns the maximum cross correlation between two 1D arrays. ''' Nx = len(x) if Nx != len(y): raise ValueError('x and y must be equal length') c = np.correlate(x, y, mode=2) if normed: c /= np.sqrt(np.dot(x, x) * np.dot(y, y)) if maxlags is None: maxlags = Nx - 1 if maxlags >= Nx or maxlags < 1: raise ValueError('maxlags must be None or strictly positive < %d' % Nx) c = c[Nx - 1 - maxlags:Nx + maxlags] return max(c) df= nc_to_df(datapath_ts2013_march, columns) df['H2Oppm_top']=volconc_to_ppm(np.array(df['h2o']),np.array(df['press']), np.array(df['Ts'])) df = format_time_columns(df) if df.isna().values.sum() > 0: df = df.interpolate(method='linear') # df['time']=np.round(df['time'].astype(np.int64), -7).astype('datetime64[ns]') df_1sec = rescale(inputdf=df,columnlist=['H2Oppm_top','WVIA_H2Oppm','time', 'e_hmp', 'press', 'Ts'],index='time', offset='1S',factor=10) df_detrended = df_1sec.apply(detrend) df_detrended df_detrended.values.shape from numpy.lib.stride_tricks import as_strided as strided # https://stackoverflow.com/questions/37447347/dataframe-representation-of-a-rolling-window/41406783#41406783 def get_sliding_window(df, W, return2D=0): a = df.values s0,s1 = a.strides m,n = a.shape #https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.lib.stride_tricks.as_strided.html #testing striding on full array #out = strided(a,shape=(m-W+1,W,n),strides=(s0,s0,s1), writeable=False) out = strided(a,shape=(m,W,n),strides=(s0,s0,s1), writeable=False) if return2D==1: return out.reshape(a.shape[0]-W+1,-1) else: return out # window of 1800 seconds, or 30 minutes rolled_df = get_sliding_window(df_detrended, 1800) top_strided = rolled_df[:,:,0:1] bottom_strided = rolled_df[:,:,1] %%time max_xcorrs = [] for i,chunk in enumerate(top_strided): top_s_reshaped = np.reshape(top_strided[i],len(top_strided[i])) bottom_s_reshaped = np.reshape(bottom_strided[i],len(bottom_strided[i])) max_xcorrs.append(max_xcorr(top_s_reshaped, bottom_s_reshaped, maxlags=100)) corr_df = pd.DataFrame(max_xcorrs) corr_df.to_csv('max_lag_corrs.csv', index=False)CPU times: user 54.2 s, sys: 39.9 ms, total: 54.2 s Wall time: 54.2 sTo check if the plot of the rolling max lag correlation is legit, I'm plotting in a zoom in of the lag corrs, ppm, and vapor pressure. I'm also caluclating the max lag correlation manually by subsetting the dataframe with iloc and using bounds by the number of seconds that have passed. It looks like this matches the rolling max correlations I am getting by using the strides function I pulled from stack overflow to apply the rolling max lag correlation. Zooming in closer to the ppm there looks to be some sharp disagreement and a big dip in vapor pressure and temperature.df_1sec = df_1sec.reset_index() df_detrended = df_detrended.reset_index() corr_df_merge = pd.concat([corr_df, df_detrended], axis=1).set_index('time') #subsetting series by the half hour, in units of seconds start_sec = 60*30*12 end_sec = 60*30*13 plt.figure() #this plots the rolling max lag corrs calculated below ax = corr_df_merge.iloc[start_sec:end_sec].plot(kind='line',y=0, label='Rolling Max Lag Corrs') top_prob = corr_df_merge.iloc[start_sec:end_sec]['H2Oppm_top'].values bottom_prob = corr_df_merge.iloc[start_sec:end_sec]['WVIA_H2Oppm'].values # manual calc of max lag corr at same point as start of plot of rolling max lag corrs # corresponds to a max of 500 seconds in either direction for lagging the 30 minute time series true_corr = max_xcorr(top_prob, bottom_prob, normed = True, maxlags=1000) true_time = pd.Timestamp(year=2013, month=3, day =13, hour=6, minute=0) point = pd.DataFrame({'x': [true_time], 'y': [true_corr]}) point.plot(x='x', y='y', ax=ax, style='bx', label='Manual max Lag Correlation, from here to 30 min') plt.title('Test Calculation of Cross Correlation (at lowest Cross Correlations)') plt.figure() corr_df_merge.iloc[start_sec:end_sec].plot(kind='line',y=['H2Oppm_top','WVIA_H2Oppm']) plt.title('Water Vapor (ppm) Detrended') corr_df_merge.iloc[start_sec:end_sec].plot(kind='line',y=['Ts']) plt.title('Temperature Detrended') corr_df_merge.iloc[start_sec:end_sec].plot(kind='line',y=['press']) plt.title('Pressure Detrended') top_prob = corr_df_merge.iloc[start_sec:end_sec]['H2Oppm_top'].values bottom_prob = corr_df_merge.iloc[start_sec:end_sec]['WVIA_H2Oppm'].values max_xcorr(top_prob, bottom_prob, normed = True, maxlags=1000) corr_df_merge.iloc[start_sec:end_sec].plot(kind='line',y=['e_hmp']) plt.title('Vapor Pressure (Kpa) Detrended') df_1sec = df_1sec.reset_index() df_detrended = df_detrended.reset_index() corr_df_merge = pd.concat([corr_df, df_detrended], axis=1).set_index('time') corr_df_merge.plot(kind='line',y=0) plt.ylabel('Half-Hourly Max Lag Correlations') plt.xlabel('Time, UTC') plt.title('WVIA and LiCOR Max Lag Correlations') corr_df_merge.plot(kind='line',y=['e_hmp']) plt.ylabel('ppm') plt.xlabel('Time, UTC') plt.title('relative humidity') corr_df_merge.plot(kind='line',y=['Ts']) plt.ylabel('humidity') plt.xlabel('Time, UTC') plt.title('air temperature') corr_df_merge.plot(kind='line',y=['H2Oppm_top','WVIA_H2Oppm']) plt.ylabel('ppm') plt.xlabel('Time, UTC') plt.title('water vapor ppm') corr_df_merge.columns import plotly.plotly as py import plotly.graph_objs as go corr_df_merge = corr_df_merge.reset_index() tracetop = go.Scattergl( x=corr_df_merge['time'], y=corr_df_merge['H2Oppm_top'], mode = 'markers' ) tracebottom = go.Scattergl( x=corr_df_merge['time'], y=corr_df_merge['WVIA_H2Oppm'], mode = 'markers' ) data=[tracetop, tracebottom] py.iplot(data,filename='basic-scatter') df_dt.rolling(window=1800).corr() # can't do lag correlation with pandas methods # see this issue: maybe try rolling apply https://stackoverflow.com/questions/21025821/python-custom-function-using-rolling-apply-for-pandas/21026837#21026837 # and this https://stackoverflow.com/questions/37486502/why-does-pandas-rolling-use-single-dimension-ndarray/37491779#37491779 rollcorrs df_dt.resample(offset='30T').apply() #https://plot.ly/python/getting-started/ #No paid account required, Scattergl is required for big data as opposed to Scatter import plotly.plotly as py import plotly.graph_objs as go trace = go.Scattergl( x=shifted_df['time'], y=shifted_df['WVIA_delD','WVIA_'], mode = 'markers' ) data=[trace] py.iplot(data,filename='basic-scatter')High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~rbavery/0 or inside your plot.ly account where it is named 'basic-scatter'Old code that might still be useful# unused def check_negatives(df): for val in df.value: if val < 0: print('We got a negative!') print('Done checking for negatives') return def big_changes_filter(): brf['difference'] = brf.diff(periods=1).value # the .diff method gets rate of change for i, diffval in enumerate(brf.difference): if abs(diffval) > 6: brf.value[i] = None brf.difference[i] = None if abs(brf.value[i]) > 40: brf.value[i] = None def std_filter(): stddev= np.sqrt(np.sum((brf.difference-brf.difference.mean())**2)/len(brf.difference)) brf['stddevs']=brf.difference/stddev for i, std in enumerate(brf.stddevs): if abs(std) > 4: brf['value'][i] = None brf['difference'][i] = NoneIdea, use cross spectrum analysis to look at how the top and bottom ppm vary together over large time scales. Cross spectrum will be useful for more interesting questions as well.import numpy from matplotlib import pyplot import pycwt as wavelet from pycwt.helpers import find # Then, we load the dataset and define some data related parameters. In this # case, the first 19 lines of the data file contain meta-data, that we ignore, # since we set them manually (*i.e.* title, units). # url = 'http://paos.colorado.edu/research/wavelets/wave_idl/nino3sst.txt' # dat = numpy.genfromtxt(url, skip_header=19) # title = 'NINO3 Sea Surface Temperature' # label = 'NINO3 SST' # units = 'degC' # t0 = 1871.0 # dt = 0.25 # In years dat = numpy.array(WVIAdf5sec['WVIA_H2Oppm']) # sub in my array title = 'Wavelet Power Spectra of Bottom Tower Water Vapor in ppm' label = 'Bottom Tower WV' units = 'ppm' t0 = 0 dt = 30 # In seconds # We also create a time array in years. N = dat.size t = numpy.arange(0, N) * dt + t0 # We write the following code to detrend and normalize the input data by its # standard deviation. Sometimes detrending is not necessary and simply # removing the mean value is good enough. However, if your dataset has a well # defined trend, such as the Mauna Loa CO\ :sub:`2` dataset available in the # above mentioned website, it is strongly advised to perform detrending. # Here, we fit a one-degree polynomial function and then subtract it from the # original data. p = numpy.polyfit(t - t0, dat, 1) dat_notrend = dat - numpy.polyval(p, t - t0) std = dat_notrend.std() # Standard deviation var = std ** 2 # Variance dat_norm = dat_notrend / std # Normalized dataset # The next step is to define some parameters of our wavelet analysis. We # select the mother wavelet, in this case the Morlet wavelet with # :math:`\omega_0=6`. mother = wavelet.Morlet(6) s0 = 2 * dt # Starting scale, in this case 2 * 0.25 years = 6 months dj = 1 / 12 # Twelve sub-octaves per octaves J = 7 / dj # Seven powers of two with dj sub-octaves alpha, _, _ = wavelet.ar1(dat) # Lag-1 autocorrelation for red noise # The following routines perform the wavelet transform and inverse wavelet # transform using the parameters defined above. Since we have normalized our # input time-series, we multiply the inverse transform by the standard # deviation. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat_norm, dt, dj, s0, J, mother) iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std # We calculate the normalized wavelet and Fourier power spectra, as well as # the Fourier equivalent periods for each wavelet scale. power = (numpy.abs(wave)) ** 2 fft_power = numpy.abs(fft) ** 2 period = 1 / freqs # We could stop at this point and plot our results. However we are also # interested in the power spectra significance test. The power is significant # where the ratio ``power / sig95 > 1``. signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother) sig95 = numpy.ones([1, N]) * signif[:, None] sig95 = power / sig95 # Then, we calculate the global wavelet spectrum and determine its # significance level. glbl_power = power.mean(axis=1) dof = N - scales # Correction for padding at edges glbl_signif, tmp = wavelet.significance(var, dt, scales, 1, alpha, significance_level=0.95, dof=dof, wavelet=mother) # We also calculate the scale average between 2 years and 8 years, and its # significance level. # sel = find((period >= 2) & (period < 8)) sel = find((period >= 120) & (period < 1800)) #substitute my own periods of interest Cdelta = mother.cdelta scale_avg = (scales * numpy.ones((N, 1))).transpose() scale_avg = power / scale_avg # As in Torrence and Compo (1998) equation 24 scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0) scale_avg_signif, tmp = wavelet.significance(var, dt, scales, 2, alpha, significance_level=0.95, dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother) # Finally, we plot our results in four different subplots containing the # (i) original series anomaly and the inverse wavelet transform; (ii) the # wavelet power spectrum (iii) the global wavelet and Fourier spectra ; and # (iv) the range averaged wavelet spectrum. In all sub-plots the significance # levels are either included as dotted lines or as filled contour lines. # Prepare the figure pyplot.close('all') pyplot.ioff() figprops = dict(figsize=(11, 8), dpi=72) fig = pyplot.figure(**figprops) # First sub-plot, the original time series anomaly and inverse wavelet # transform. ax = pyplot.axes([0.1, 0.75, 0.65, 0.2]) ax.plot(t, iwave, '-', linewidth=1, color=[0.5, 0.5, 0.5]) ax.plot(t, dat, 'k', linewidth=1.5) ax.set_title('a) {}'.format(title)) ax.set_ylabel(r'{} [{}]'.format(label, units)) # Second sub-plot, the normalized wavelet power spectrum and significance # level contour lines and cone of influece hatched area. Note that period # scale is logarithmic. bx = pyplot.axes([0.1, 0.37, 0.65, 0.28], sharex=ax) levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16] bx.contourf(t, numpy.log2(period), numpy.log2(power), numpy.log2(levels), extend='both', cmap=pyplot.cm.viridis) extent = [t.min(), t.max(), 0, max(period)] bx.contour(t, numpy.log2(period), sig95, [-99, 1], colors='k', linewidths=2, extent=extent) bx.fill(numpy.concatenate([t, t[-1:] + dt, t[-1:] + dt, t[:1] - dt, t[:1] - dt]), numpy.concatenate([numpy.log2(coi), [1e-9], numpy.log2(period[-1:]), numpy.log2(period[-1:]), [1e-9]]), 'k', alpha=0.3, hatch='x') bx.set_title('b) {} Wavelet Power Spectrum ({})'.format(label, mother.name)) bx.set_ylabel('Period (seconds)') # Yticks = 2 ** numpy.arange(numpy.ceil(numpy.log2(period.min())), numpy.ceil(numpy.log2(period.max()))) bx.set_yticks(numpy.log2(Yticks)) bx.set_yticklabels(Yticks) # Third sub-plot, the global wavelet and Fourier power spectra and theoretical # noise spectra. Note that period scale is logarithmic. cx = pyplot.axes([0.77, 0.37, 0.2, 0.28], sharey=bx) cx.plot(glbl_signif, numpy.log2(period), 'k--') cx.plot(var * fft_theor, numpy.log2(period), '--', color='#cccccc') cx.plot(var * fft_power, numpy.log2(1./fftfreqs), '-', color='#cccccc', linewidth=1.) cx.plot(var * glbl_power, numpy.log2(period), 'k-', linewidth=1.5) cx.set_title('c) Global Wavelet Spectrum') cx.set_xlabel(r'Power [({})^2]'.format(units)) cx.set_xlim([0, glbl_power.max() + var]) cx.set_ylim(numpy.log2([period.min(), period.max()])) cx.set_yticks(numpy.log2(Yticks)) cx.set_yticklabels(Yticks) pyplot.setp(cx.get_yticklabels(), visible=False) # Fourth sub-plot, the scale averaged wavelet spectrum. dx = pyplot.axes([0.1, 0.07, 0.65, 0.2], sharex=ax) dx.axhline(scale_avg_signif, color='k', linestyle='--', linewidth=1.) dx.plot(t, scale_avg, 'k-', linewidth=1.5) dx.set_title('d) {}--{} minutes scale-averaged power'.format(2, 8)) dx.set_xlabel('Time (seconds)') dx.set_ylabel(r'Average variance [{}]'.format(units)) ax.set_xlim([t.min(), t.max()]) pyplot.show() fig.savefig('30secondstep_wvia_day.png')/usr/local/anaconda3/lib/python3.6/site-packages/numpy/core/numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part return array(a, dtype, copy=False, order=order)Write Func to resample Dataframe to 10 minute frequency Resample both top and bottom tower H20ppm measurements and compute correlogram for single day Run wavelet analysis with Molet wavelet to get spectral analysis figure showing how lag correlations varies with period (10, 20 ,30 minute periods during a day) and with time over the course of a year (multiple days). This will let us see if the lag changes significantly throughout the year, hopefully. use df shift method to shift values of WVIA backward to match with corresponding measurements at the top of the towerdatafile_flux2013 = '../data/raw/flux/raw_MpalaTower_2013_071.nc' variable_list_flux2013 = ['wnd_spd','press_mean','batt_volt_Avg','batt_volt_Std'] df = convert_to_df(datafile_flux2013, variable_list_flux2013) df = df.reset_index() df['strtime'] = [time.strftime('%y-%m-%d-%H-%M-%S-%f') for time in df['time']] df['datetime'] = pd.to_datetime(df['strtime'], format='%y-%m-%d-%H-%M-%S-%f') df.plot(x='datetime', y='batt_volt_Avg')Resample DF to given interval and plotresamp_5min_df = df.resample('5T', on='datetime').mean() resamp_5min_df.reset_index().plot(x='datetime', y='WVIA_H2Oppm')why are there two different versions of the WVIA data and why are the .dat files in ts_data folder produced with a V11 program? I checked the difference between the files, output is in my tower_programs folder. It seems that there isn't much difference, the variables all look the same, but yet the file size difference between a ts file and a wvia file is an order of mag why is the wvia data renamed ts in each file? Why does the ts data have a shorter time range (2013-2015) than the wvia data (2012-2016) Also, there exists a V12 program, MainTowerCR3000_V12.CR3 yet I can find no outputs that use this program in the raw netcdf folder (haven't checked all the ts_data files)metaheader = Dataset('../data/raw/test/raw_MpalaTower_2012_167_wvia.nc') metaheader metaheader = Dataset('../data/raw/test/raw_MpalaTower_2013_071_ts.nc') metaheader**Linear Algebra for CpE****Laboratory 10 : Linear Transformations** Now that you have a understood the fundamentals of matrices and their operations we can move on to a more conceptual and practical application of linear algebra. **Objectives**At the end of this activity you will be able to:1. Be familiar with the role of matrix operations.2. Visualize matrix operations.3. Justify the precedence of matrix operations through Python. **Discussion**import numpy as np import matplotlib.pyplot as plt import scipy.linalg as la %matplotlib inline**Transformation**You can recall that a vector can be scaled or translated through different vector operations. We'll now dwell more on the translation and transformation of multi-dimensional vectors (i.e. matrices). This is possible using matrix operations. Take note that not all operations to matrices or $\mathbb{R}^2$ vectors are linear. Linear transformations leave the origin fixed and preserve parallelism. Scaling, shearing, rotation and reflexion of a plane are examples of linear transformations. Let's try to revisit them in this notebook.References:[Linear transformations in Numpy](https://mmas.github.io/linear-transformations-numpy) **Geometric Translation**There are two prime requirements for linear geometric translations:1. Vectors remain linear upon applying the linear function2. The origin of the vector does not change.To make representation easier, I have provided a user-defined function for plotting the quivers of the vectors. The function takes in the matrix we wish to transform and a transformation matrix. If no transformation matrix is provided, the default is an identity matrix.def plot_quiv(x,t_mat=np.eye(2)): x_prime = x @ t_mat size= (2,2) plt.figure(figsize=(4,4)) plt.xlim(-size[0],size[0]) plt.ylim(-size[1],size[1]) plt.xticks(np.arange((-size[0]), size[0]+1, 1.0)) plt.yticks(np.arange((-size[1]), size[1]+1, 1.0)) plt.quiver([0,0],[0,0], x_prime[:,0], x_prime[:,1], angles='xy', scale_units='xy',scale=1, color=['red','blue'])## use column spaces plt.grid() plt.show() A = np.array([ [1, 0], [0, 1] ]) plot_quiv(A)**Repositioning/Translation**t_mat = np.array([ [-1,0], [0,1] ]) plot_quiv(A, t_mat) t_mat = np.array([ [-1,0], [0,-1] ]) plot_quiv(A, t_mat) t_mat = np.array([ [1,0], [0,-1] ]) plot_quiv(A, t_mat)**Shears**shear = np.array([ [1,1], [0,1] ]) plot_quiv(A, shear) shear = np.array([ [1,0], [1,1] ]) plot_quiv(A, shear) shear = np.array([ [-1,1], [1,1] ]) plot_quiv(A, shear) shear = np.array([ [1.5,0], [1.5,1.5] ]) plot_quiv(A, shear)**Scaling**scale = np.array([ [2,0], [0,2] ]) plot_quiv(A, scale) scale = np.array([ [0.5,0], [0,2] ]) plot_quiv(A, scale)**Rotation**def rot_matrix(theta): theta = np.deg2rad(theta) rot_mat = np.array([ [np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)] ]) return rot_mat r_mat = rot_matrix(75) plot_quiv(A, r_mat)**3D Transformations**def plot_3d_quiv(x, t_mat=np.eye(3)): x_prime = x @ t_mat fig = plt.figure(figsize=(5,5)) ax1 = fig.gca(projection='3d') ax1.set_xlim([-2, 2]) ax1.set_ylim([-2, 2]) ax1.set_zlim([-2, 2]) ax1.set_xlabel("X (roll)") ax1.set_ylabel("Y (pitch)") ax1.set_zlabel("Z (yaw)") origin = (0,0,0) ax1.quiver(origin, origin, origin, x_prime[:,0], x_prime[:,1], x_prime[:,2], arrow_length_ratio=0.1, colors=['red','blue','green']) plt.grid() plt.show() X = np.eye(3) t_mat = np.array([ [0.5,1,1], [0.5,1,2], [0.5,1,0] ]) plot_3d_quiv(X, t_mat) def rot_matrix_3d(theta1, theta2, theta3): alpha = [np.deg2rad(theta1), np.deg2rad(theta2), np.deg2rad(theta3)] roll = np.array([ [1,0,0], [0, np.cos(alpha[0]), -np.sin(alpha[0])], [0, np.sin(alpha[0]), np.cos(alpha[0])] ]) pitch = np.array([ [np.cos(alpha[1]), 0, np.sin(alpha[1])], [0 ,1 ,0], [-np.sin(alpha[1]), 0, np.cos(alpha[1])] ]) yaw = np.array([ [np.cos(alpha[2]), -np.sin(alpha[2]), 0], [np.sin(alpha[2]), np.cos(alpha[2]), 0], [0,0,1] ]) res = roll @ pitch @ yaw return roll, pitch, yaw, res r_x, r_y, r_z, r_mat = rot_matrix_3d(0,0,0) M = np.array([ [2,0,0], [0,1,0], [0,0,0.5] ]) plot_3d_quiv(M, r_mat)**Supplementary Activity** Try to implement the linear transformations using spancs using the scatterplot view. Just do at least one example of linear transformation for a 2D space.import matplotlib.pyplot as plt x = [2,4,6,8,10,12,14,16,18,20,22,24] y = [10,20,30,40,50,60,70,80,90,100,110,120] plt.scatter(x, y) plt.show() AA = np.array([ [-1, 0], [1.5, 2] ]) plot_quiv(AA) scale = np.array([ [1,2], [6,5] ]) plt.scatter(AA, scale) plt.show()Show what the current PyCo version is!git rev-parse HEAD import numpy as np import matplotlib.pyplot as plt from PyCo.Topography import read_topography from PyCo.Topography import open_topography from PyCo.Topography import TopographyPyCo conventionsfirst index (from the left) of a numpy array: x- directionsecond index : y-directionheights = np.zeros((4,4)) heights[0 , : ] = 1 # same x all y heights[0, -2] = 0.5 heights[1,0] = 0.25 t = Topography(heights, physical_sizes=(1,1)) def prepare_plot(): fig, ax = plt.subplots() ax.set_aspect(1) ax.set_xlabel("x") ax.set_ylabel("y") return fig, axDifferent ways to plot 2D data Pcolormesh giving the x and y meshgridIn this case nothing can go wrong because for each datapoint the x and y value is given. However `pcolormesh` interpretsthe x and values as the corners of the boxes and the last z values are not displayed because there are less boxes then corners.fig, ax = prepare_plot() x, y, z = t.positions_and_heights() plt.colorbar(ax.pcolormesh(x, y, z, )) # check: transposing everything here doesn't change anything fig, ax = prepare_plot() plt.colorbar(ax.pcolormesh(x.T, y.T, z.T))trick: add a point to the x and y arrays so that all the z values will be displayed. Here we also shift the values so that (x, y) represent the center of the pixel rather then the left cornerfig, ax = prepare_plot() x_ = ((np.arange(z.shape[0]+1) - 0.5) * t.pixel_size[0]).reshape(-1,1) y_ = ((np.arange(z.shape[1]+1) - 0.5) * t.pixel_size[1]).reshape(1,-1) cb = plt.colorbar(ax.pcolormesh(x_*np.ones_like(y_), y_*np.ones_like(x_), z))pcolormesh with height values onlyin this case we need to comply to pcolormesh's conventions, that are different then ours. We need to transpose the data. Note that here all pixels are displayed.fig, ax = prepare_plot() plt.colorbar(ax.pcolormesh(z.T))Matshowfig, ax = prepare_plot() cb = plt.colorbar(ax.matshow(z.T)) ax.invert_yaxis() ax.tick_params(labelbottom=True, bottom=True, labeltop=False, top=False)imshowfig, ax = prepare_plot() cb = plt.colorbar(ax.imshow(z.T)) ax.invert_yaxis()Adjust ticksWith methods that use only the height array like `imshow` the ticks correspond to the index in your array. You can displayphysical values by setting thefig, ax = prepare_plot() cb = plt.colorbar(ax.imshow(z.T)) ax.invert_yaxis() import matplotlib.ticker as ticker format_string='{0:g}' ax.xaxis.set_major_formatter(ticker.FuncFormatter( lambda x, pos: format_string.format(x*t.pixel_size[0]))) ax.yaxis.set_major_formatter(ticker.FuncFormatter( lambda x, pos: format_string.format(x * t.pixel_size[1] )))or if you want to decide yourself on tick spacingfig, ax = prepare_plot() cb = plt.colorbar(ax.imshow(z.T)) ax.invert_yaxis() nx, ny = t.nb_grid_pts sx, sy = t.physical_sizes ticksx = np.linspace(0, sx, 3 ) ticksy = np.linspace(0, sy, 3 ) ax.set_xticks(ticksx / sx * nx - 0.5) # convert to "index" units ax.set_xticklabels([f"{v:.2f}" for v in ticksx]) ax.set_yticks(ticksy / sy * ny - 0.5) ax.set_yticklabels([f"{v:.2f}" for v in ticksy]);import tensorflow as tf import numpy as np from tensorflow import keras class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy') >= 0.99): print("\nReached 99% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() import matplotlib.pyplot as plt plt.imshow(x_train[2]) #To show image #print(x_train[0]) print(y_train[2]) ##To see the pixels and colors of the images and labels x_train = x_train / 255.0 x_test = x_test / 255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) model.fit(x_train, y_train, epochs = 10, callbacks = [callbacks]) model.evaluate(x_test, y_test) classifications = model.predict(x_test) plt.imshow(x_test[6]) #To show image print(classifications[6]) print(y_test[6])[3.3832603e-12 2.7064018e-10 4.6898784e-08 6.5356849e-09 9.9987078e-01 2.0895502e-07 2.1394226e-09 2.8383211e-05 9.3328403e-05 7.2237513e-06] 4Noise model selection on NANOGrav pulsars%load_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import glob, os, json, string, pickle import matplotlib.pyplot as plt import matplotlib as mpl import logging, inspect, copy logging.basicConfig(level=logging.WARNING) import enterprise from enterprise.pulsar import Pulsar import enterprise_extensions from enterprise_extensions import models, model_utils, blocks from enterprise_extensions.models import model_singlepsr_noise from enterprise_extensions.chromatic import solar_wind, chromatic from enterprise_extensions.hypermodel import HyperModelYou'll need sksparse for get_coefficients() with common signals!Red-noise model selection on 12.5yr Dataset Get par, tim, and noise files# psr = Pulsar('./partim_no_noise/J0613-0200_NANOGrav_11yv0.gls.strip.par', # './partim_no_noise/J0613-0200_NANOGrav_11yv0.tim', # ephem='DE436') # noisefiles = sorted(glob.glob('../11yr_stochastic_analysis/nano11y_data/noisefiles/*.json')) # params = {} # for noisefil in noisefiles: # with open(noisefil, 'r') as fp: # params.update(json.load(fp))Load Pickle Filepsrname = 'J1911+1347' filepath = './no_dmx_pickles/' filepath += '{0}_ng12p5yr_v3_nodmx_ePSR.pkl'.format(psrname) with open(filepath,'rb') as fin: psr=pickle.load(fin)Testing models with GP DM variations__Very Import:__ What follows is an __example__ of noise model selection. For *most* pulsars the choice of noise models used in any given model selection analysis will be different than the ones chosen here. Those working on pulsars highlighted in the 11-year noise model analysis should include those models in their analyses and also use the best combination of models from that work in any final model selection that is done. Setup GP model selectionred_psd = ['powerlaw'] dm_nondiag_kernel = ['None','sq_exp', 'periodic'] dm_sw_gp = [True, False] white_vary = TrueUse the inspect package to pull the arguments from `model_singlepsr_noise` and make a template for the keyword arguments (kwargs) dictionary we will be using to keep track of these various models.args = inspect.getfullargspec(model_singlepsr_noise) keys = args[0][1:] vals = args[3] model_template = dict(zip(keys,vals)) model_templateHere we show one work flow where we set up a `for` loop to go through the various models. Make sure to save the `model_labels` and `model_kwargs`. The former will be useful for making noise flower plots, while the latter will be the final product for a pulsar in this analysis.# Create list of pta models for our model selection nmodels = len(red_psd) * len(dm_nondiag_kernel) * len(dm_sw_gp) mod_index = np.arange(nmodels) ptas = dict.fromkeys(mod_index) model_dict = {} model_labels = [] ct = 0 for red in red_psd: for dm in dm_nondiag_kernel: for sw_gp in dm_sw_gp: if dm == 'None': dm_var = False else: dm_var = True # Copy template kwargs dict and replace values we are changing. kwargs = copy.deepcopy(model_template) kwargs.update({'dm_var':dm_var, 'dmgp_kernel':'nondiag', 'psd':red, 'white_vary':white_vary, 'dm_nondiag_kernel':dm, 'dm_sw_deter':True, 'dm_sw_gp':sw_gp, 'swgp_basis': 'powerlaw'}) # Instantiate single pulsar noise model ptas[ct] = model_singlepsr_noise(psr, **kwargs) # Add labels and kwargs to save for posterity and plotting. model_labels.append([string.ascii_uppercase[ct],red, dm, sw_gp]) model_dict.update({str(ct):kwargs}) ct += 1 # Instantiate a collection of models super_model = HyperModel(ptas) super_model.params model_labelsSet the out directory for you chains and other sampler setup !!! Important !!! Please set the chain directory outside of the git repository (easier) or at least do not try and commit your chains to the repo.outdir = '/Users/hazboun/nanograv_detection/ent_ext_testing/{}/nondiag_dmgp/'.format(psr.name) emp_distr_path = './wn_emp_dists/{0}_ng12p5yr_v3_std_plaw_emp_dist.pkl'.format(psr.name) sampler = super_model.setup_sampler(resume=True, outdir=outdir, empirical_distr=emp_distr_path) model_params = {} for ky, pta in ptas.items(): model_params.update({str(ky) : pta.param_names}) with open(outdir+'/model_params.json' , 'w') as fout: json.dump(model_params, fout, sort_keys=True, indent=4, separators=(',', ': ')) with open(outdir+'/model_kwargs.json' , 'w') as fout: json.dump(model_dict, fout, sort_keys=True, indent=4, separators=(',', ': ')) with open(outdir+'/model_labels.json' , 'w') as fout: json.dump(model_labels, fout, sort_keys=True, indent=4, separators=(',', ': ')) # sampler for N steps N = int(5e6) x0 = super_model.initial_sample() sampler.sample(x0, N, SCAMweight=30, AMweight=15, DEweight=50, burn=100000)/Users/hazboun/anaconda3/envs/pint/lib/python3.6/site-packages/enterprise/signals/parameter.py:64: RuntimeWarning: divide by zero encountered in log logpdf = np.log(self.prior(value, **kwargs))Example with chromatic models. The analysis above seems to indicate that `J1600-3053` wants an annual-DM term whenever a Fourier basis representation of the DM variations is used. We use that informatio in the following. We also fix the red-noise PSD to be a `power-law`.psrname = 'J1600-3053' filepath = './no_dmx_pickles/' filepath += '{0}_ng12p5yr_v3_nodmx_ePSR.pkl'.format(psrname) with open(filepath,'rb') as fin: psr=pickle.load(fin) # Create list of pta models for our model selection ptas = {} model_labels = [] model_dict = {} kwargs = copy.deepcopy(model_template) kwargs.update({'dm_var':True, 'dmgp_kernel':'nondiag', 'white_vary':True, 'dm_nondiag_kernel':'sq_exp_rfband', 'dm_sw_deter':True, }) # Instantiate single pulsar noise model ptas[0] = model_singlepsr_noise(psr, **kwargs) # Add labels and kwargs to save for posterity and plotting. # Model, DM Kernel, Chromatic, Chrom Idx model_labels.append(['A','sq_exp_rfband', False, None]) model_dict.update({str(0):kwargs}) kwargs = copy.deepcopy(model_template) kwargs.update({'dm_var':True, 'dmgp_kernel':'nondiag', 'white_vary':True, 'dm_nondiag_kernel':'periodic_rfband', 'dm_sw_deter':True, }) # Instantiate single pulsar noise model ptas[1] = model_singlepsr_noise(psr, **kwargs) # Add labels and kwargs to save for posterity and plotting. # Model, DM Kernel, Chromatic, Chrom Idx model_labels.append(['B','periodic_rfband', False, None]) model_dict.update({str(1):kwargs}) kwargs = copy.deepcopy(model_template) kwargs.update({'dm_var':True, 'dmgp_kernel':'nondiag', 'white_vary':True, 'dm_nondiag_kernel':'sq_exp_rfband', 'chrom_gp':True, 'chrom_gp_kernel':'nondiag', 'chrom_kernel':'sq_exp', 'dm_sw_deter':True, }) # Instantiate single pulsar noise model ptas[2] = model_singlepsr_noise(psr, **kwargs) # Add labels and kwargs to save for posterity and plotting. # Model, DM Kernel, Chromatic, Chrom Idx model_labels.append(['C','sq_exp_rfband', True, 'sq_exp']) model_dict.update({str(2):kwargs}) kwargs = copy.deepcopy(model_template) kwargs.update({'dm_var':True, 'dmgp_kernel':'nondiag', 'white_vary':True, 'dm_nondiag_kernel':'sq_exp_rfband', 'chrom_gp':True, 'chrom_gp_kernel':'nondiag', 'chrom_kernel':'sq_exp', 'dm_sw_deter':True, }) # Instantiate single pulsar noise model ptas[3] = model_singlepsr_noise(psr, **kwargs) # Add labels and kwargs to save for posterity and plotting. # Model, DM Kernel, Chromatic, Chrom Idx model_labels.append(['D','sq_exp_rfband', True, 'periodic']) model_dict.update({str(3):kwargs}) # Instanciate a collection of models super_model = HyperModel(ptas) outdir = '/Users/hazboun/nanograv_detection/ent_ext_testing/{}/chrom_dmgp/'.format(psr.name) emp_distr_path = './wn_emp_dists/{0}_ng12p5yr_v3_std_plaw_emp_dist.pkl'.format(psr.name) sampler = super_model.setup_sampler(resume=True, outdir=outdir, empirical_distr=emp_distr_path) model_params = {} for ky, pta in ptas.items(): model_params.update({str(ky) : pta.param_names}) with open(outdir+'/model_params.json' , 'w') as fout: json.dump(model_params, fout, sort_keys=True, indent=4, separators=(',', ': ')) with open(outdir+'/model_kwargs.json' , 'w') as fout: json.dump(model_dict, fout, sort_keys=True, indent=4, separators=(',', ': ')) with open(outdir+'/model_labels.json' , 'w') as fout: json.dump(model_labels, fout, sort_keys=True, indent=4, separators=(',', ': ')) # sampler for N steps N = int(5e6) x0 = super_model.initial_sample() sampler.sample(x0, N, SCAMweight=30, AMweight=15, DEweight=50, burn=200000)Finished 0.68 percent in 13527.782160 s Acceptance rate = 0.5012941. Setup Env and Dependencies Run Below command in Terminal/CMD to config with CV2. >conda update anaconda-navigator >conda update navigator-updater >pip install opencv-python# Import Tensorflow 2.0 #%tensorflow_version 2.0 import tensorflow as tf !pip install mitdeeplearning import mitdeeplearning as mdl import numpy as np import pandas as pd import os import time import functools from IPython import display as ipythondisplay from tqdm import tqdm !apt-get intall abcmidi timidity > /dev/null 2>&1 from IPython.display import Image #from IPython.display import gif ### To Play *.mp3 music, import vlc ## pip install python-vlc -> No ## brew install --cask vlc -> Yes import vlc # Check using a GPU, if not switch runtimes # Using Runtime > Change Runtime Type > GPU #assert len(tf.config.list_physical_devices('GPU')) > 02. Dataset 2.1 Import Songs with ABC Notation Format Gathered a dataset of thousands of Irish folk songs, represented in [ABC notation](https://en.wikipedia.org/wiki/ABC_notation)Image(url= "https://www.fg-a.com/st-patricks-day/irish-men-drinking-animated.gif") Image(url="https://upload.wikimedia.org/score/q/1/q1dv95u4m1ib00y0df6n47q5g6fd1g2/q1dv95u4.png") # Play The Legacy jig by vlc MediaPlayer p = vlc.MediaPlayer("https://upload.wikimedia.org/score/q/1/q1dv95u4m1ib00y0df6n47q5g6fd1g2/q1dv95u4.mp3") p.play() p.stop() # Translate Staff to ACB notation # Markdown: use two spaces to break the line (get into a new line)X:1 T:The Legacy Jig M:6/8 L:1/8 R:jig K:G GFG BAB | gfg gab | GFG BAB | d2A AFD | GFG BAB | gfg gab | age edB |1 dBA AFD :|2 dBA ABd |: efe edB | dBA ABd | efe edB | gdB ABd | efe edB | d2d def | gfe edB |1 dBA ABd :|2 dBA AFD |]# Get CWD Current Working Directory cwd = os.getcwd()def extract_song_snippet(): pattern = '(^|\n\n)(.*?)\n\n' search_results = re.findall(pattern, text, overlapped=True, flag=re.DOTALL) songs = [song[1] for song in search_results] print("Found {} songs in text".format(len(songs))) return songssongs = []with open(os.path.join(cwd, 'dataset', 'irish.abc'), 'r') as f: text = f.read() songs =extract_song_snippet(text) example_song = song[0]print("\nExample song: ")print(example_song)from mitdeeplearning import lab1 songs = lab1.load_training_data() example_song1 = songs[0] example_song2 = songs[1] # Songs is a list; Each song is as a single data element with the index from 0-806 songs[0] print(songs[0]) print('\nExample song1: ') print(example_song1) print('\nExample song2: ') print(example_song1) songsExample song1: X:1 T:Alexander's Z: id:dc-hornpipe-1 M:C| L:1/8 K:D Major (3ABc|dAFA DFAd|fdcd FAdf|gfge fefd|(3efe (3dcB A2 (3ABc|! dAFA DFAd|fdcd FAdf|gfge fefd|(3efe dc d2:|! AG|FAdA FAdA|GBdB GBdB|Acec Acec|dfaf gecA|! FAdA FAdA|GBdB GBdB|Aceg fefd|(3efe dc d2:|! Example song2: X:1 T:Alexander's Z: id:dc-hornpipe-1 M:C| L:1/8 K:D Major (3ABc|dAFA DFAd|fdcd FAdf|gfge fefd|(3efe (3dcB A2 (3ABc|! dAFA DFAd|fdcd FAdf|gfge fefd|(3efe dc d2:|! AG|FAdA FAdA|GBdB GBdB|Acec Acec|dfaf gecA|! FAdA FAdA|GBdB GBdB|Aceg fefd|(3efe dc d2:|!2.2 Convert ABC Notation to Audio waveform# Convert the ABC notation to audio file and listen to it. lab1.play_song(example_song1)Data Representation: Song's titleKey: C Major | D Major Tempo (1/4, 3/4, 1/8)# Join the list of song strings into a single string containing all songs, by connector as \n songs_joined ="n\n".join(songs) songs_joined len(songs_joined) ### create a set for Songs_joined: Deduplicate and sort # Set has unduplicated data elements vocab = sorted(set(songs_joined)) print("There are", len(vocab), "unique characters in dataset") # Vocabulary is a sorted and unduplicated set with order vocab2.3 Process the dataset for learning task Step 1: Train RNN model to learn patterns in ABC notations musicStep 2: Use the trained model to generate new piece of music based on the learned music dynamics Task: Given a character, or a sequence of characters (representated by ABC), what is the most probable next character? Solution: Input a sequence of characters into the RNN model, and train the model to predict the output, which is the following character at each time step. RNNs maintian an internal state that depends on perviously seen elements, so infomation about all characters seen up until a given moment will be taken into account in generating the prediction. 2.3.1 Vectorize the text Create a numerical representation of text-based dataset. Generate two lookup tables: 1) Lookup table maps chracters to numbers: ABC Notes-> Index2) Lookup table Maps numbers back to characters: Index -> ABC Notes### Define numerical represntation of text ### # Create a mapping from character to unique index # Mapping character "a" to an index"i", for each i, a is the character in list (vocab) # char2idx = {a:i for i, a in enumerate(vocab)} char2idx ### Create a mapping from indices to characers: idx2char # Inverse for char2idx and convert back from unique index to character in Vocabulary idx2char = np.array(vocab) idx2char len(vocab) a = ("b", "g", "a", "1", "d", "L","f", "Q", "(", "2", "c", "h", "e", "&") x = sorted(a) x #// Order: Simbols > Numbers > Uppercase letter > Lowercase letter print("{") for char,_ in zip(char2idx, range(40)): print(' {:4s}: {:3d},'.format(repr(char), char2idx[char])) print(' ...\n') ### Vectorize the Songs String def vectorize_string(string): vectorized_list = np.array([char2idx[s] for s in string ]) return vectorized_list vectorized_songs = vectorize_string(songs_joined) vectorized_songs print('{}--- characters mappted to integers ---> {}'.format(repr(songs_joined[:10]),vectorized_songs)) vectorized_songs vectorized_songs.shape[0]2.4 Create Training Examples and Targets Divide the text into example sequences which is used during training[Input] -> RNN -> [Target]Target is the one letter shift to rightText == "Hello", 'seq_length==4' => Input Sequence=="Hell"-> RNN -> Output/target="ello"Break the text into chunks of 'seq_length+1'### Batch definition to create training examples ### def get_batch(vectorized_songs, seq_length, batch_size): # Get the length of the vectorized songs string, n n = vectorized_songs.shape[0] -1 # Randomly choose the starting indices for examples in the training batch idx = nprandom.choice(n-seq_length, batch_size) # Construct a list of input sequences for the training batch input_batch = [vectorized_songs[i:i+seq_length] for i in idx] # Construct a list of output sequences for the training batch Output_batch = [vectorized_songs[i+1: i+1+seq_length] for i in idx] X_batch = np.reshape(input_batch, [batch_size, seq_length]) y_batch = np.reshape(output_batch, [batch_size, seq_length]) return x_batch, y_batch vectorized_songs.shape[0] test_args = (vectorized_songs, 10, 2) test_args dict1 = {1: "abcd", "apples": 3, "fruits": ["apples", "mangoes"], } print("Initial Dict = {}".format(dict1)) dict1[2] = 7 print("Initial Dict = {}".format(dict1)) print("Size of the dict = {}\n".format(len(dict1))) print("Key = {} and Val = {}".format("apples", dict1["apples"])) print("Key = {} and Val = {}".format("fruits", dict1.get("fruits", False))) print("Key = {} and Val = {}\n".format("mangoes", dict1.get("mangoes", False))) dict1.get("fruits", False) dict1.values() # creation of tuple tup1 = ('physics', 'chemistry', 1997, 2000) # Convert to tuple tup2 = tuple([2, 3, 4, 1, 2, 6]) # Both tuple initially print("Tuple tup1 : {}".format(tup1)) print("Tuple tup2 : {}\n".format(tup2)) get_batch = (vectorized_songs, 10, 2) if not test_batch_func_types(get_batch, test_args) or \ not test_batch_func_shapes(get_batch, test_args) or \ not test_batch_func_next_step(get_batch, test_args): print("======\n[FAIL] could not pass tests") else: print("======\n[PASS] passed all tests!") x_batch, y_batch = get_batch(vectorized_songs, seq_length=5, batch_size=1) for i, (input_idx, target_idx) in enumerate(zip(np.squeeze(x_batch), np.squeeze(y_batch))): print("Step {:3d}".format(i)) print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx]))) print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx])))Feature Selectionweather_for_selection = weather.drop(['RISK_MM'], axis = 1) weather_for_selection.shape #change date to month weather_for_selection['Date'] = weather_for_selection['Date'].astype('datetime64') weather_for_selection['Date'] = weather_for_selection['Date'].dt.month weather_for_selection.rename(columns = {'Date':'Month'}, inplace = True) remove_all_nan = weather_for_selection[~weather_for_selection.isna()] #split features into two lists based on their datatype def split_col(dataframe): categorical_features = [] numerical_features = [] for i in dataframe.columns.values: if dataframe[i].dtypes != 'object': numerical_features.append(i) else: categorical_features.append(i) return categorical_features, numerical_features display(weather_for_selection.head()) category_f, numerical_f = split_col(remove_all_nan.iloc[:,:-1])Look at the replationship between each feature and the target variabledef selection_catergory(category_f): result = [] for i in np.arange(len(category_f)): x = remove_all_nan[~remove_all_nan[category_f[i]].isna()] feature = LabelEncoder().fit_transform(x[category_f[i]]) label = x['RainTomorrow'] fstat, pval = chi2(feature.reshape(-1,1), label) mi = mutual_info_classif(feature.reshape(-1,1), label) result.append([category_f[i], round(fstat[0],5), round(pval[0],5), round(mi[0],5)]) return pd.DataFrame(result, columns =['Category_f', 'Chi2', 'Pval', 'MI']) def selection_number(numerical_f): result = [] for i in np.arange(len(numerical_f)): x = remove_all_nan[~remove_all_nan[numerical_f[i]].isna()] feature = StandardScaler().fit_transform(x[[numerical_f[i]]]) label = x['RainTomorrow'] fstat, pval = f_classif(feature.reshape(-1,1), label) mi = mutual_info_classif(feature.reshape(-1,1), label) result.append([numerical_f[i], round(fstat[0],5), round(pval[0],5), round(mi[0],5)]) return pd.DataFrame(result, columns =['Number_f', 'Fstat', 'Pval', 'MI']) result1 = selection_catergory(category_f) result2 = selection_number(numerical_f) display(result1) display(result2) fig = plt.figure(figsize = (20,10)) ax = fig.add_subplot(111) ax.plot(result1['Category_f'], result1['Pval'], label = 'Category_Pval', linewidth = 3) ax.plot(result1['Category_f'], [0.05]*6, linestyle = '--', label = 'Pval_Benchmark', linewidth = 3) plt.tick_params(axis = 'x',rotation = 30) plt.xlabel('Categorical_features') plt.ylabel('P-value') plt.legend() fig2 = plt.figure(figsize = (20,10)) ax2 = fig2.add_subplot(111) ax2.plot(result2['Number_f'], result2['Pval'],label = 'Number_Pval', linewidth = 3) ax2.plot(result2['Number_f'], [0.05]*len(result2['Number_f']), linestyle = '--', label = 'Pval_Benchmark', linewidth = 3) plt.tick_params(axis = 'x',rotation = 30) plt.xlabel('Numerical_features') plt.ylabel('P-value') plt.legend()Conclusion: Drop 'Month' which has relatively higher value, although its p-value is less than 0.05import sklearn sklearn.metrics.SCORERS.keys()First Trial: Remove all rows with nan valuesdisplay(weather_for_selection.head()) weather_first_trial = weather_for_selection.dropna() weather_first_trial.drop(['Month'], axis = 1, inplace = True) display(weather_first_trial.head()) weather_first_trial.shape result2['Number_f'].values[1:] x_train, x_test, y_train, y_test = train_test_split(weather_first_trial.iloc[:,:-1], weather_first_trial.iloc[:,-1] , test_size = 0.2, random_state = 300) col = ColumnTransformer(transformers=[('standardized', StandardScaler(), result2['Number_f'].values[1:])],remainder = 'passthrough', sparse_threshold = 0) x_test1 = x_test.copy() y_test1 = y_test.copy() #Resampling x_train = pd.get_dummies(x_train, drop_first = True) x_test = pd.get_dummies(x_test, drop_first = True) x_train_columns = x_train.columns.values x_train = col.fit_transform(x_train) x_test = col.transform(x_test) x_train, y_train = SMOTE(sampling_strategy='minority').fit_resample(x_train, y_train) # neg_pos = 35220.0/9916.0 #models logisticR = LogisticRegression(penalty = 'l2', solver = 'newton-cg', multi_class = 'multinomial',random_state = 300) randomF = RandomForestClassifier(random_state=300) adaboost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth = 1),n_estimators=50, random_state=300) xgboost = XGBClassifier(random_state=300) x_train_columns %%time def check_model(models, models_name): result_positive = [] for model in range(len(models)): print(f'Processing: {models_names[model]}') pipeline1 = Pipeline(steps=[('model', models[model])]) pipeline1.fit(x_train, y_train) y_predicted = pipeline1.predict(x_test) f1score = f1_score(y_test, y_predicted,average = None)[1] result_positive.append([models_names[model], round(f1score,4)]) return pd.DataFrame(result_positive, columns = ['Model_Name', 'f1_Score_Positive'])CPU times: user 11 µs, sys: 1e+03 ns, total: 12 µs Wall time: 17.9 µsCompare 4 models' performance%%time models = [logisticR, randomF, adaboost, xgboost] models_names = ['logisticR', 'randomF', 'adaboost', 'xgboost'] result_positive = check_model(models, models_names) result_positive plt.figure(figsize = (20,10)) plt.plot(result_positive['Model_Name'], result_positive['f1_Score_Positive'], label = 'Model_Result', linewidth = 3) plt.legend(loc =5) plt.xlabel('Model_Name') plt.ylabel('f1_Score_Positive') plt.title('Model Selection')Final Modelweather_first_trial['RainTomorrow'].value_counts() # neg_pos = 35220.0/9916.0 xgboost1 = XGBClassifier(n_estimators=300,random_state=300, max_depth = 5, cv = 3) xgboost1.fit(x_train, y_train) y_predicted = xgboost1.predict(x_test) print(classification_report(y_test, y_predicted, target_names=['No', 'Yes'])) print(f1_score(y_test, y_predicted,average = None)[1]) cvs = cross_val_score(estimator=xgboost1, X=x_train, y = y_train,cv = 10, verbose=2,scoring='accuracy') print(f'10-fold Cross Validation: {cvs.mean()}') fig = plt.figure(figsize = (10,5)) ax = fig.add_subplot(111) plot_confusion =confusion_matrix(y_test, y_predicted) sns.heatmap(plot_confusion, annot = True, xticklabels = ['No', 'Yes'], yticklabels = ['No', 'Yes']) plt.xlabel('Prediction') plt.ylabel('Actual') fig2 = plt.figure(figsize = (10,15)) ax = fig2.add_subplot(111) plot_importance(xgboost1, max_num_features = 10, ax=ax) features = weather_first_trial.columns[:-1] print(list(features)) def prediction (data): data = pd.get_dummies(data, drop_first = True) data = col.fit_transform(data) print(xgboost1.predict(data)) Location = list(weather_first_trial['Location'].unique()) WindGustDir = list(weather_first_trial['WindGustDir'].unique()) WindDir9am = list(weather_first_trial['WindDir9am'].unique()) WindDir3pm = list(weather_first_trial['WindDir3pm'].unique()) print('------------------------') print(f'Location:\n{Location}') print('------------------------') print(f'WindGustDir:\n{WindGustDir}') print('------------------------') print(f'WindDir9am:\n{WindDir9am}') print('------------------------') print(f'WindDir3pm:\n{WindDir3pm}') # ['Location', 'MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', # 'WindGustDir', 'WindGustSpeed', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', # 'WindSpeed3pm', 'Humidity9am', 'Humidity3pm', 'Pressure9am', 'Pressure3pm', # 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm', 'RainToday', 'RainYesturady'] #input a list of all feature above to below function prediction()['Location', 'MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustDir', 'WindGustSpeed', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am', 'Humidity3pm', 'Pressure9am', 'Pressure3pm', 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm', 'RainToday', 'RainYesturady'] ------------------------ Location: ['Cobar', 'CoffsHarbour', 'Moree', 'NorfolkIsland', 'Sydney', 'SydneyAirport', 'WaggaWagga', 'Williamtown', 'Canberra', 'Sale', 'MelbourneAirport', 'Melbourne', 'Mildura', 'Portland', 'Watsonia', 'Brisbane', 'Cairns', 'Townsville', 'MountGambier', 'Nuriootpa', 'Woomera', 'PerthAirport', 'Perth', 'Hobart', 'AliceSprings', 'Darwin'] ------------------------ WindGustDir: ['SSW', 'S', 'NNE', 'WNW', 'N', 'SE', 'ENE', 'NE', 'E', 'SW', 'W', 'WSW', 'NNW', 'ESE', 'SSE', 'NW'] ------------------------ WindDir9am: ['ENE', 'SSE', 'NNE', 'WNW', 'NW', 'N', 'S', 'SE', 'NE', 'W', 'SSW', 'E', 'NNW', 'ESE', 'WSW', 'SW'] ------------------------ WindDir3pm: ['SW', 'SSE', 'N[...]Select the top 6 features (based on the feature importances above)selection = x_train_columns[[9,5,11,13,4,2]] # [9,5,11,13,4,2] selection x_weather_selected = weather_first_trial[selection] y_weather_selected = weather_first_trial.iloc[:,-1] display(x_weather_selected.head()) display(y_weather_selected.head()) x_train_s, x_test_s, y_train_s, y_test_s = train_test_split(x_weather_selected, y_weather_selected, test_size = 0.2, random_state = 200) coltrans = ColumnTransformer(transformers = [('standard', StandardScaler(), selection)]) NegoverPos = y_train_s.value_counts()[0] / y_train_s.value_counts()[1] x_train_s = coltrans.fit_transform(x_train_s) x_test_s = coltrans.transform(x_test_s) def determine_n_estimators(start = 100, end = 110, step = 1, **kwarg): result = [] for i in range(start,end+1,step): xgboost2 = XGBClassifier(random_state=300, n_estimators=i, max_depth = 3, learning_rate=0.1, min_child_weight = 1, gamma = 0.2, subsample = 0.8, colsample_bytree= 0.8, objective= 'binary:logistic', scale_pos_weight = NegoverPos, n_jobs=-1) xgboost2.fit(x_train_s, y_train_s) y_predicted1 = xgboost2.predict(x_test_s) result.append([i,f1_score(y_test_s, y_predicted1,average = None)[1]]) print(xgboost2.get_params) plotgraph = pd.DataFrame(result, columns = ['n_estimators', 'f1_score for +ve result']) sns.lineplot(data = plotgraph, x = 'n_estimators', y = 'f1_score for +ve result') plt.title(f'n_estimators from {start} to {end}, step = {step}') print(plotgraph.sort_values('f1_score for +ve result', ascending = False).iloc[0]) return result determine_n_estimators(start = 100, end = 1000, step = 100) n_estimators 900.000000 f1_score for +ve result 0.642586 Name: 8, dtype: float64Tuning Hyperparameters (I only kept the last turning and update the hyperparameter to the function above)xgboost2 = XGBClassifier(random_state=300, n_estimators=900, max_depth = 3, learning_rate=0.1, min_child_weight = 1, gamma = 0.2, subsample = 0.8, colsample_bytree= 0.8, objective= 'binary:logistic', scale_pos_weight = NegoverPos, n_jobs=-1) param2 = {'subsample':(0.6,0.7,0.8), 'colsample_bytree':(0.6,0.7,0.8)} gridsearch2 = GridSearchCV(estimator = xgboost2, param_grid=param2, cv = 3, scoring='f1') gridsearch2.fit(x_train_s, y_train_s) y_predicted1 = gridsearch2.predict(x_test_s) print(classification_report(y_test_s, y_predicted1, target_names=['No', 'Yes'])) gridsearch2.best_params_ gridsearch2.best_score_ plot_importance(xgboost2)Second Trialweather2 = pd.read_csv('weatherAUS.csv') weather2.shape weather2.isna().sum() weather2.drop(['RISK_MM', 'Date'],axis =1, inplace = True)Find Features with higher correlationsweather2['RainTomorrow'] = LabelBinarizer().fit_transform(weather2.RainTomorrow) corr = weather2.corr(method = 'pearson') corr new_weather = weather2.drop(['MinTemp', 'MaxTemp','Evaporation', 'WindSpeed9am', 'WindSpeed3pm', 'Temp9am', 'Temp3pm', 'Pressure3pm', 'WindGustSpeed'], axis =1) # display(new_weather[(abs(corr['Sunshine']) >= 0.5) & (abs(corr['Sunshine']) < 1)]) display(new_weather.head()) corr #combine sunshine + humidity9pm + humidity3pm + cloud9am + cloud3pm #pressure9am + pressure 3pm ls = corr.drop(['MinTemp', 'MaxTemp','Evaporation', 'WindSpeed9am', 'WindSpeed3pm', 'Temp9am', 'Temp3pm'], axis =1) weight = corr.loc['RainTomorrow',['Sunshine','Humidity9am','Humidity3pm','Cloud9am','Cloud3pm']].reset_index() weight new_weather.head() minmax = MinMaxScaler(feature_range = (0,1)) columntransformer = ColumnTransformer(transformers = [('ordinal', OrdinalEncoder(),['Location', 'WindGustDir','WindDir9am', 'WindDir3pm', 'RainToday'])],remainder = 'passthrough', sparse_threshold = 0) new_weather.dropna(inplace = True) x_train_2nd, x_test_2nd, y_train_2nd, y_test_2nd = train_test_split(new_weather.iloc[:,:-1], new_weather.iloc[:,-1], test_size = 0.2, random_state = 222) x_train_2nd.head()Combine feartures with higher correlationnumerical_features = ['Rainfall', 'Sunshine','Humidity9am','Humidity3pm','Pressure9am', 'Cloud9am','Cloud3pm'] #minmax the numerical_features minmax = MinMaxScaler(feature_range=(0, 1)) x_train_2nd[numerical_features] = minmax.fit_transform(x_train_2nd[numerical_features]) x_test_2nd[numerical_features] = minmax.transform(x_test_2nd[numerical_features]) display(x_train_2nd.head()) x_train_2nd['Sunshine_Humidity_Cloudy'] = x_train_2nd['Sunshine'] * weight.iloc[0,1] + x_train_2nd['Humidity9am'] * weight.iloc[1,1] + x_train_2nd['Humidity3pm'] * weight.iloc[2,1] + x_train_2nd['Cloud9am'] * weight.iloc[3,1] + x_train_2nd['Cloud3pm'] * weight.iloc[4,1] # x_train_2nd['Pressure9am3pm'] = x_train_2nd['Pressure9am'] * weight.iloc[5,1] + x_train_2nd['Pressure3pm'] * weight.iloc[6,1] x_test_2nd['Sunshine_Humidity_Cloudy'] = x_test_2nd['Sunshine'] * weight.iloc[0,1] + x_test_2nd['Humidity9am'] * weight.iloc[1,1] + x_test_2nd['Humidity3pm'] * weight.iloc[2,1] + x_test_2nd['Cloud9am'] * weight.iloc[3,1] + x_test_2nd['Cloud3pm'] * weight.iloc[4,1] # x_test_2nd['Pressure9am3pm'] = x_test_2nd['Pressure9am'] * weight.iloc[5,1] + x_test_2nd['Pressure3pm'] * weight.iloc[6,1] x_train_2nd.drop(weight['index'].values, axis = 1, inplace = True) x_test_2nd.drop(weight['index'].values, axis = 1, inplace = True) display(x_train_2nd.head()) test = x_train_2nd.copy() test['RainT'] = y_train_2nd test.corr() # x_train_2nd['Sunshine_Humidity_Cloudy_WindGustSpeed'] = x_train_2nd['Sunshine_Humidity_Cloudy'] * 0.481937 + x_train_2nd['WindGustSpeed'] * 0.237568 # x_test_2nd['Sunshine_Humidity_Cloudy_WindGustSpeed'] = x_test_2nd['Sunshine_Humidity_Cloudy'] * 0.481937 + x_test_2nd['WindGustSpeed'] * 0.237568 # x_train_2nd.drop(['WindGustSpeed','Sunshine_Humidity_Cloudy'], axis = 1, inplace = True) # x_test_2nd.drop(['WindGustSpeed','Sunshine_Humidity_Cloudy'], axis = 1, inplace = True) # x_train_2nd.head() x_train_2nd = columntransformer.fit_transform(x_train_2nd) x_test_2nd = columntransformer.fit_transform(x_test_2nd) x_train_2nd, y_train_2nd = SMOTE(sampling_strategy='minority').fit_resample(x_train_2nd, y_train_2nd) y_train_2nd.tolist().count(1) x_train_2nd.shape xgboost_2nd = XGBClassifier(n_estimators=350, learning_rate=0.1, max_depth=5, random_state=222 ) xgboost_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = xgboost_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd,predicted_y_2nd)) plot_importance(xgboost_2nd)Remove the least important featurex_train_2nd = np.delete(x_train_2nd, 4, axis = 1) x_test_2nd = np.delete(x_test_2nd, 4, axis = 1) x_train_2nd.shape xgboost_2nd = XGBClassifier(random_state=300, n_estimators=100, max_depth = 9, learning_rate=0.1, min_child_weight = 2, gamma = 0.1, reg_alpha= 0.01, subsample = 0.9, colsample_bytree= 0.85, objective= 'binary:logistic', n_jobs=-1) param_2nd = {'n_estimators':np.arange(100,1000,100)} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1') gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) # plot_importance(xgboost_2nd) gridsearch_2nd.best_params_ gridsearch_2nd.best_estimator_1st tuningparam_2nd = { 'max_depth':(8,9,10), 'min_child_weight':(2,3,4)} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1', verbose=2) gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) gridsearch_2nd.best_params_ gridsearch_2nd.best_estimator_2rd Tuningparam_2nd = {'gamma':[0.05, 0.1, 0.15]} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1', verbose=2) gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) gridsearch_2nd.best_params_ gridsearch_2nd.best_estimator_3nd Tuningparam_2nd = { 'subsample':[0.80,0.85, 0.9, 0.95], 'colsample_bytree':[0.70, 0.75, 0.80, 0.85, 0.90]} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1', verbose=2) gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) gridsearch_2nd.best_params_ gridsearch_2nd.best_estimator_4th Tuningparam_2nd = { 'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1', verbose=2) gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) print(gridsearch_2nd.best_params_) gridsearch_2nd.best_score_ param_2nd = {'n_estimators':np.arange(100,1000,100)} gridsearch_2nd = GridSearchCV(estimator = xgboost_2nd, param_grid=param_2nd, cv = 3, scoring='f1', verbose=2,n_jobs=-1) gridsearch_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = gridsearch_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) print(gridsearch_2nd.best_params_) gridsearch_2nd.best_estimator_ xgboost_2nd = XGBClassifier(random_state=300, n_estimators=100, max_depth = 9, learning_rate=0.1, min_child_weight = 2, gamma = 0.1, reg_alpha= 0.01, subsample = 0.9, colsample_bytree= 0.85, objective= 'binary:logistic', n_jobs=-1) xgboost_2nd.fit(x_train_2nd, y_train_2nd) predicted_y_2nd = xgboost_2nd.predict(x_test_2nd) print(classification_report(y_test_2nd, predicted_y_2nd)) plot_importance(xgboost_2nd)Further Study- can investigate more about features which are highly correlated- can try to use prediction models to predict the missing values- can create new features based on domain knowledge!ipython nbconvert --to=python Project_2_basic_ver.ipynb[TerminalIPythonApp] WARNING | Subcommand `ipython nbconvert` is deprecated and will be removed in future versions. [TerminalIPythonApp] WARNING | You likely want to use `jupyter nbconvert` in the future [NbConvertApp] Converting notebook Project_2_basic_ver.ipynb to python [NbConvertApp] Writing 22156 bytes to Project_2_basic_ver.pyT81-577: Applied Data Science for Practioners - Instructor: , School of Engineering and Applied Science, Washington University in St. Louis The Final Project Name: April 30, 2020 References- https://www.kaggle.com/dejavu23/sms-spam-or-ham-beginner- https://www.kaggle.com/muzzzdy/sms-spam-detection-with-various-classifiers- https://www.kaggle.com/jcbrooks/airlines-delay-and-cancellation-analysis As for automation by pipeline In this project, the both of predictors and target variables doesn't have any missing value. Besides, predictors can be converted to sparse matrix called 'features' as shown below. So, the Principle Component Analysis (PCA) method cannot be used, too. Therefore, I couldn't use pipeline in this case. Instead, I used the function I coded as 'hptuning' in the case of hyperparameter tuning. This function plays a role of automation in the hyperparameter tuning by random grid search. Table of Contents1. [Import necessary libraries and packages](import)2. [Pre-processing: Exploratory Data Analysis (EDA)](EDA)3. [Visualization of SMS messages](VisSMS) - [Fig.1 Histogram of ham and spam](fig1) - [Fig.2 Pie graph](fig2) - [Fig.3 Histogram of txt length](fig3) - [Fig.4 Histogram of txt length-2 (The merge of two plots of Fig. 3)](fig4)4. [Visualization by WordCloud](VisWC) - [Fig.5 WordCloud for Ham messages](fig5) - [Fig.6 WordCloud for Spam messages](fig6)5. [Data Cleaning for word analysis](Dclean) - Lower Case - Punctuation Removal - Number of stopwords - Removal of stopwords - [Fig.7 The visualization of top 30 frequent word in ham message](fig7) - [Fig.8 The visualization of top 30 frequent word in spam message](fig8)6. [Text vectorization](Tvec)7. [Classifiers and Predictions](Clfp) - [7.1 Support Vector Classifier](SVC) - [7.2 K-Nearest Neighbor](knc) - [7.3 Multinomial NB](mnb) - [7.4 Decision Tree Classifier](dtc) - [7.5 Logistic Regression](lrc) - [7.6 Random Forest Classifier](rf) - [7.7 AdaBoost Classifier](abc) - [7.8 Bagging Classifier](bc) - [7.9 Extra Trees Classifier](etc) - [7.10 Gradient boosting tree](gb) - [7.11 Newral network approach](mlp) - [Case Study 1](cs1) - [Case Study 2 -Stemming-](cs2) - [Case Study 3 -Message Length-](cs3)8. [Results of Case Studies](Rslts) - [Fig.9 Result of the Case Study 1](fig9) - [Fig.10 Results of the Case Studyies 1 and 2](fig10) - [Fig.11 Results of the Case Studyies 1, 2 and 3](fig11)#%load_ext pycodestyle_magic1. Import necessary libraries and packages#%%pycodestyle import string import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt from nltk.corpus import stopwords from nltk.stem import SnowballStemmer from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer %matplotlib inline #%%pycodestyle import string import string from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier #%%pycodestyle from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import RandomizedSearchCV #%%pycodestyle import wordcloud import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS #%%pycodestyle import os import sys src_dir = os.path.join(os.getcwd(), '..', 'src') sys.path.append(src_dir)2. Pre-processing: Exploratory Data Analysis (EDA)#%%pycodestyle # import raw data from 'raw' folder sms_proc = pd.read_csv('../data/raw/spam.csv', encoding='latin-1') # take a look at the dataset sms_proc.head() #%%pycodestyle # Drop ''unnamed' columns and Rename v1 and v2 sms_proc = sms_proc.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1) sms_proc = sms_proc.rename(columns={'v1':'label', 'v2':'message'}) #%%pycodestyle sms_proc.head() #%%pycodestyle # check if NA exists in each column sms_proc.isnull().any()We can confirm that there are no NaN value in 'label' and 'message'.#%%pycodestyle # Copy the preporecessed data sms = sms_proc.copy()3. Visualization of SMS messages Fig.1 Histogram of ham and spam#%%pycodestyle fig1 = sms plt.style.use('seaborn-bright') plt.rcParams["figure.figsize"] = (7, 5) sns.countplot(x="label", data=fig1, saturation=0.5)Fig.2 Pie graph#%%pycodestyle sms["label"].value_counts().plot(kind='pie', explode=[0, 0.1], figsize=(6, 6), autopct='%1.1f%%', shadow=True) plt.style.use('seaborn-bright') plt.ylabel("Spam vs Ham") plt.legend(["Ham", "Spam"]) plt.show() #%%pycodestyle # new feature 'message length' sms['length'] = sms['message'].apply(len) sms.head()Fig.3 Histogram of txt length#%%pycodestyle sns.set() mpl.rcParams['patch.force_edgecolor'] = True plt.style.use('seaborn-bright') sms.hist(column='length', by='label', bins=50, figsize=(11, 5))The above plot infers that spam message tends to be a lenghthy.The below plot shows the above two plots in one fifgure by label. Fig.4 Histogram of txt length-2 (The merge of two plots of Fig. 3)#%%pycodestyle ax = sns.distplot( sms.query('label=="ham"')['length'], bins=50, color='blue', kde=False, label='ham' ) ax = sns.distplot( sms.query('label=="spam"')['length'], bins=25, color='green', kde=False, label='spam' ) plt.rcParams["figure.figsize"] = (18, 15) ax.set(xlim=(0, 300), ylim=(0, 1500)) plt.legend(fontsize='35') plt.show()4. Visualization by WordCloud WordCloud is a data visualization technique used for representing text data in which the size of each word indicates its frequency or importance. https://www.geeksforgeeks.org/generating-word-cloud-python/#%%pycodestyle sms_ham = sms[sms['label'] == 'ham'].copy() sms_spam = sms[sms['label'] == 'spam'].copy() #%%pycodestyle def show_wordcloud(sms_spam_or_ham, title): """VIsualize by WordCloud.""" text = ' '.join(sms_spam_or_ham['message'].astype(str).tolist()) stopwords = set(wordcloud.STOPWORDS) fig_wordcloud = wordcloud.WordCloud(stopwords=stopwords, background_color='lightgrey', colormap='viridis', width=800, height=600).generate(text) plt.figure(figsize=(10, 7), frameon=True) plt.imshow(fig_wordcloud) plt.axis('off') plt.title(title, fontsize=20) plt.show()Fig.5 WordCloud for Ham messages#%%pycodestyle show_wordcloud(sms_ham, "Ham messages")Fig.6 WordCloud for Spam messages#%%pycodestyle show_wordcloud(sms_spam, "Spam messages")5. Data Cleaning for word analysis#%%pycodestyle word = sms.copy()Lower Case Converting all texts to lower case#%%pycodestyle # lower case word['message'] = word['message'].apply(lambda x: " ".join (x.lower() for x in x.split())) word['message'].head()Punctuation Removal#%%pycodestyle # Remove punctuation word['message'] = word['message'].str.replace(r'[^\w\s]', '') word['message'].head()Number of stopwordsStopwords are the English words such as 'and', 'the', 'you', which does not add much meaning to a sentence. Stopwords are sometimes/often removed to avoid them being analyzed in the case of text analysis.#%%pycodestyle # from nltk.corpus import stopwords stop = stopwords.words('english') word['stopwords'] = word['message'].apply(lambda x: len([x for x in x.split() if x in stop])) word[['message', 'stopwords']].head() #%%pycodestyle word = word.drop('stopwords', axis=1)Removal of stopwords#%%pycodestyle # Remove stopwords from nltk.corpus import stopwords stop = stopwords.words('english') word['message'] = word['message'].apply(lambda x: " ".join(x for x in x.split() if x not in stop)) word['message'].head()Top 30 Frequent word#%%pycodestyle # Top 30 Frequent word freq = pd.Series(' '.join(word['message']).split()).value_counts()[:30] freq #%%pycodestyle word_ham = word.loc[word['label'] == 'ham'] word_spam = word.loc[word['label'] == 'spam']Top 30 Frequent word in ham message#%%pycodestyle # Top 30 Frequent word in ham message freq_ham = pd.Series(' '.join(word_ham['message']).split()).value_counts()[:30] freq_ham #%%pycodestyle freq_ham = pd.DataFrame(freq_ham) column = ["total"] freq_ham.columns = column freq_ham.head() #%%pycodestyle x = freq_ham.indexFig.7 The visualization of top 30 frequent word in ham message#%%pycodestyle plt.figure(figsize=(20, 8)) sns.set_context("notebook", 0.9, {"lines.linewidth": 4}) sns.set_style(style='whitegrid') ax = sns.barplot(x, data=freq_ham, y="total") ax.set(ylabel='total') # plt.xticks(rotation=90) plt.show()Top 30 Frequent word in spam message#%%pycodestyle # Top 30 Frequent word in spam message freq_spam = pd.Series(' '.join (word_spam['message']).split()).value_counts()[:30] freq_spam #%%pycodestyle freq_spam = pd.DataFrame(freq_spam) column = ["total"] freq_spam.columns = column freq_spam.head() #%%pycodestyle x = freq_spam.indexFig.8 The visualization of top 30 frequent word in spam message#%%pycodestyle plt.figure(figsize=(20, 8)) sns.set_context("notebook", 0.9, {"lines.linewidth": 4}) ax = sns.barplot(x, data=freq_spam, y="total") ax.set(ylabel='total') # plt.xticks(rotation=90) plt.show()6. Text vectorization#%%pycodestyle text_msg = sms['message'].copy() #%%pycodestyle def text_process(text): """Remove punctuation and stopwords""" text = text.translate(str.maketrans(' ', ' ', string.punctuation)) text = [word for word in text.split() if word.lower() not in stopwords.words('english')] return " ".join(text) #%%pycodestyle text_msg = text_msg.apply(text_process) #%%pycodestyle vectorizer = TfidfVectorizer("english") #%%pycodestyle features = vectorizer.fit_transform(text_msg)7. Classifiers and Predictions#%%pycodestyle # Split our features to test and train set features_train, features_test, labels_train, labels_test = train_test_split( features, sms['label'], test_size=0.3, random_state=111)Automation by the hyperparameter#%%pycodestyle def hptuning(clf, param_grid): """Hyperparameter Tuning.""" random_grid_search = RandomizedSearchCV( estimator=clf, param_distributions=param_grid) random_grid_search.fit(features_train, labels_train) best_grid_randomsearch = random_grid_search.best_estimator_ print('Best hyperparameters ... \n', best_grid_randomsearch) #from features import hyperparameter_tuning #from features.hyperparameter_tuning import hptuning7.1 Support Vector Classifier#%%pycodestyle svc = SVC() print(svc)SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='auto_deprecated', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'gamma': [1.0, 3.0, 5.0, 10.0, 20.0] }SVC model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) svc = SVC(kernel='linear', gamma=5.0, random_state=111).fit( features_train, labels_train) labels_pred = svc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.930 Precision: 0.984 Accuracy: 0.980 F1: 0.955 [[1438 2] [ 32 200]]7.2 K-Nearest Neighbor#%%pycodestyle knc = KNeighborsClassifier() print(knc)KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform')Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'n_neighbors': [1, 10, 25, 50, 75, 100] } #%%pycodestyle # Hyperparameter tuning rgs = hptuning(knc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 6 is smaller than n_iter=10. Running 6 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)K-Nearest Neighbor model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) knc = KNeighborsClassifier(n_neighbors=1).fit(features_train, labels_train) labels_pred = knc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.797 Precision: 0.969 Accuracy: 0.944 F1: 0.857 [[1440 0] [ 94 138]]7.3 Multinomial NB#%%pycodestyle mnb = MultinomialNB() print(mnb)MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'alpha': [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0] } #%%pycodestyle # Hyperparameter tuning rgs = hptuning(mnb, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 7 is smaller than n_iter=10. Running 7 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)Multinomial NB model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) mnb = MultinomialNB(alpha=0.2).fit(features_train, labels_train) labels_pred = mnb.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.966 Precision: 0.969 Accuracy: 0.984 F1: 0.967 [[1428 12] [ 14 218]]7.4 Decision Tree Classifier#%%pycodestyle dtc = DecisionTreeClassifier() print(dtc)DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best')Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'min_samples_split': [2, 5, 7, 10, 30] } #%%pycodestyle # Hyperparameter tuning rgs = hptuning(dtc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 5 is smaller than n_iter=10. Running 5 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)Decision Tree model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) dtc = DecisionTreeClassifier(min_samples_split=7, random_state=111).fit( features_train, labels_train) labels_pred = dtc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.907 Precision: 0.918 Accuracy: 0.959 F1: 0.913 [[1409 31] [ 38 194]]7.5 Logistic Regression#%%pycodestyle lrc = LogisticRegression() print(lrc)LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, l1_ratio=None, max_iter=100, multi_class='warn', n_jobs=None, penalty='l2', random_state=None, solver='warn', tol=0.0001, verbose=0, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'solver': ['liblinear', 'saga'], 'penalty': ['l1', 'l2'] } #%%pycodestyle # Hyperparameter tuning rgs = hptuning(lrc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 4 is smaller than n_iter=10. Running 4 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning) //miniconda3/lib/python3.7/site-packages/sklearn/linear_model/sag.py:337: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge "the coef_ did not converge", ConvergenceWarning)Logistic Regression model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) lrc = LogisticRegression(solver='liblinear', penalty='l1').fit( features_train, labels_train) labels_pred = lrc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.822 Precision: 0.929 Accuracy: 0.943 F1: 0.865 [[1425 15] [ 80 152]]7.6 Random Forest Classifier#%%pycodestyle rf = RandomForestClassifier() print(rf)RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators='warn', n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'n_estimators': [5, 10, 28, 29, 30, 31, 32, 33, 40, 50], 'random_state': [111] } #%%pycodestyle # Hyperparameter tuning hptuning(rf, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning)Random Forest model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) rf = RandomForestClassifier(n_estimators=29, random_state=111).fit( features_train, labels_train) labels_pred = rf.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.890 Precision: 0.983 Accuracy: 0.969 F1: 0.930 [[1440 0] [ 51 181]]7.7 AdaBoost Classifier#%%pycodestyle abc = AdaBoostClassifier() print(abc)AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=1.0, n_estimators=50, random_state=None)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'n_estimators': [10, 50, 60, 61, 62, 63, 65, 70], 'random_state': [111] } #%%pycodestyle # Hyperparameter tuning hptuning(abc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 8 is smaller than n_iter=10. Running 8 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)AdaBoostClassifier model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) abc = AdaBoostClassifier(n_estimators=70, random_state=111).fit( features_train, labels_train) labels_pred = abc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.911 Precision: 0.966 Accuracy: 0.971 F1: 0.936 [[1432 8] [ 40 192]]7.8 Bagging Classifier#%%pycodestyle bc = BaggingClassifier() print(bc)BaggingClassifier(base_estimator=None, bootstrap=True, bootstrap_features=False, max_features=1.0, max_samples=1.0, n_estimators=10, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'n_estimators': [5, 8, 9, 10, 11, 12, 13, 15, 20], 'random_state': [111] } #%%pycodestyle # Hyperparameter tuning hptuning(bc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 9 is smaller than n_iter=10. Running 9 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)Bagging Classifier model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) bc = BaggingClassifier(n_estimators=5, random_state=111).fit( features_train, labels_train) labels_pred = bc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.913 Precision: 0.941 Accuracy: 0.966 F1: 0.926 [[1420 20] [ 37 195]]7.9 Extra Trees Classifier#%%pycodestyle etc = ExtraTreesClassifier() print(etc)ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators='warn', n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'n_estimators': [5, 8, 9, 10, 11, 12, 13, 15, 20], 'random_state': [111] } #%%pycodestyle # Hyperparameter tuning hptuning(etc, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning) //miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:266: UserWarning: The total space of parameters 9 is smaller than n_iter=10. Running 9 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)Extra Classifier model run#%%pycodestyle np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) etc = ExtraTreesClassifier(n_estimators=11, random_state=111).fit( features_train, labels_train) labels_pred = etc.predict(features_test) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_pred, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_pred, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_pred))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_pred, average='macro'))) confusion = confusion_matrix(labels_test, labels_pred) print(confusion)Recall: 0.924 Precision: 0.983 Accuracy: 0.978 F1: 0.951 [[1438 2] [ 35 197]]7.10 Gradient boosting tree#%%pycodestyle gb = GradientBoostingClassifier() print(gb)GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_iter_no_change=None, presort='auto', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'learning_rate': [0.01, 0.1, 0.5, 1.0], 'max_depth': [1, 3, 5], 'n_estimators': [10, 100, 200], 'random_state': [111] } #%%pycodestyle # Hyperparameter tuning hptuning(gb, param_grid)//miniconda3/lib/python3.7/site-packages/sklearn/model_selection/_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning. warnings.warn(CV_WARNING, FutureWarning)Gradient Boosting Tree model run#%%pycodestyle gb = GradientBoostingClassifier(n_estimators=200, learning_rate=0.1, max_depth=5).fit(features_train, labels_train) labels_predicted = gb.predict(features_test) confusion = confusion_matrix(labels_test, labels_predicted) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_predicted, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_predicted, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_predicted))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_predicted, average='macro'))) confusion = confusion_matrix(labels_test, labels_predicted) print(confusion)Recall: 0.912 Precision: 0.960 Accuracy: 0.970 F1: 0.934 [[1429 11] [ 39 193]]7.11 Newral network approach#%%pycodestyle mlp = MLPClassifier() print(mlp)MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08, hidden_layer_sizes=(100,), learning_rate='constant', learning_rate_init=0.001, max_iter=200, momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True, power_t=0.5, random_state=None, shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False, warm_start=False)Hyperparameter tuning by random search#%%pycodestyle param_grid = { 'alpha': [0.0001, 0.0003, 0.003], 'max_iter': [10, 100, 200] } #%%pycodestyle random_grid_search = RandomizedSearchCV( estimator=mlp, param_distributions=param_grid, cv=3, n_jobs=-1, verbose=2, n_iter=10) random_grid_search.fit(features_train, labels_train) #%%pycodestyle mlp = MLPClassifier(hidden_layer_sizes=[10], alpha=0.0003, max_iter=200, random_state=111).fit( features_train, labels_train) labels_predicted = mlp.predict(features_test) confusion = confusion_matrix(labels_test, labels_predicted) print('Recall: {:.3f}'.format(recall_score(labels_test, labels_predicted, average='macro'))) print('Precision: {:.3f}'.format(precision_score(labels_test, labels_predicted, average='macro'))) print('Accuracy: {:.3f}'.format(accuracy_score(labels_test, labels_predicted))) print('F1: {:.3f}'.format(f1_score(labels_test, labels_predicted, average='macro'))) confusion = confusion_matrix(labels_test, labels_predicted) print(confusion)Recall: 0.943 Precision: 0.982 Accuracy: 0.982 F1: 0.961 [[1436 4] [ 26 206]]Introduce various classifiers#%%pycodestyle svc = SVC(kernel='linear', gamma=5.0, random_state=111) knc = KNeighborsClassifier(n_neighbors=1) mnb = MultinomialNB(alpha=0.2) dtc = DecisionTreeClassifier(min_samples_split=7, random_state=111) lrc = LogisticRegression(solver='liblinear', penalty='l1') rf = RandomForestClassifier(n_estimators=29, random_state=111) abc = AdaBoostClassifier(n_estimators=70, random_state=111) bc = BaggingClassifier(n_estimators=5, random_state=111) etc = ExtraTreesClassifier(n_estimators=11, random_state=111) gb = GradientBoostingClassifier(n_estimators=200, learning_rate=0.1, max_depth=5) mlp = MLPClassifier(hidden_layer_sizes=[10], alpha=0.0003, max_iter=200, random_state=111) #%%pycodestyle clfs = {'SVC':svc, 'KN':knc, 'NB': mnb, 'DT': dtc, 'LR': lrc, 'RF': rf, 'AdaBoost': abc, 'BgC': bc, 'ETC': etc, 'GB': gb, 'NN':mlp}Funtions to fit these classifiers and make predictions#%%pycodestyle def train_classifier(clf, feature_train, labels_train): """Training by classifier of clf""" clf.fit(feature_train, labels_train) #%%pycodestyle def predict_labels(clf, features): """Predict by using test predictors""" return (clf.predict(features))Case Study 1 Run all classifiers and save their results#%%pycodestyle pred_scores = [] for k, v in clfs.items(): train_classifier(v, features_train, labels_train) pred = predict_labels(v, features_test) pred_scores.append((k, [accuracy_score(labels_test, pred)])) #%%pycodestyle rlt = pd.DataFrame.from_items(pred_scores, orient='index', columns=['Score']) rlt//miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:2: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.Case Study 2 -Stemming-Stemming refers to normalizing words into its base form or root form.For example, there are words, 'waited', 'waiting', and 'waits', The root word of them is 'wait'.Here, we stem our text message and clarify the effect ot stemming.#%%pycodestyle def stemmer(text): """Stemmer""" text = text.split() words = "" for i in text: stemmer = SnowballStemmer("english") words += (stemmer.stem(i))+" " return words #%%pycodestyle text_msg = text_msg.apply(stemmer) #%%pycodestyle features = vectorizer.fit_transform(text_msg) #%%pycodestyle # Split our features to test and train set features_train, features_test, labels_train, labels_test = train_test_split( features, sms['label'], test_size=0.3, random_state=111) #%%pycodestyle # Run all classifier and Save the results pred_scores = [] for k, v in clfs.items(): train_classifier(v, features_train, labels_train) pred = predict_labels(v, features_test) pred_scores.append((k, [accuracy_score(labels_test, pred)])) #%%pycodestyle rlt2 = pd.DataFrame.from_items(pred_scores, orient='index', columns=['Score2']) rlt2 = pd.concat([rlt, rlt2], axis=1) rlt2//miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:2: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.Case Stdy 3 -Message Length-Finally, the message length is taken into account in all models.#%%pycodestyle # message length lf = sms['length'].values newfeat = np.hstack((features.todense(), lf[:, None])) #%%pycodestyle # Split our features to test and train set features_train, features_test, labels_train, labels_test = train_test_split( newfeat, sms['label'], test_size=0.3, random_state=111) # Run all classifier and Save the results pred_scores = [ ] for k, v in clfs.items(): train_classifier(v, features_train, labels_train) pred = predict_labels(v, features_test) pred_scores.append((k, [accuracy_score(labels_test, pred)])) #%%pycodestyle rlt3 = pd.DataFrame.from_items(pred_scores, orient='index', columns=['Score3']) rlt3 = pd.concat([rlt2, rlt3], axis=1) rlt3//miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:2: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.8. Results of Case Studies Fig.9 Result of the Case Study 1#%%pycodestyle rlt.plot(kind='bar', ylim=(0.9, 1.0), figsize=(11, 6), align='center', colormap="Accent") sns.set(style='darkgrid') plt.xticks(np.arange(11), rlt.index) plt.ylabel('Accuracy Score') plt.title('Distribution by Classifier') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)The best accuracy score is around 0.984 in the Naive Bayes classifier for multinomial models (NB). Suppor vector classifier (SVC) also shows high accuracy score. On the other hand, k-neighbor classifier and Logistic Regression show the lowest accuracy score of all. Fig.10 Results of the Case Studyies 1 and 2#%%pycodestyle rlt2.plot(kind='bar', ylim=(0.85, 1.0), figsize=(11, 6), align='center', colormap="Accent") plt.xticks(np.arange(11), rlt2.index) plt.ylabel('Accuracy Score') plt.title('Distribution by Classifier') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)The results are almost the same with those of case study 1. However, stemming shows a little bit improvement of accuracy score in many methods. Fig.11 Results of the Case Studyies 1, 2 and 3#%%pycodestyle rlt3.plot(kind='bar', ylim=(0.85, 1.0), figsize=(11, 6), align='center', colormap="Accent") plt.xticks(np.arange(11), rlt3.index) plt.ylabel('Accuracy Score') plt.title('Distribution by Classifier') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)Predicting Oil Prices Using an RNN with LSTMResearchers have found that recurrent neural networks (RNN) with LSTM can outperform traditional forecasting models like ARIMA when forecasting future values of certain time series data. (For an example see [A comparison of artificial neural network and time series models for forecasting commodity prices](https://www.sciencedirect.com/science/article/pii/0925231295000208))This Python 3 notebook will demonstrate how to apply an RNN with LSTM to forecast weekly West Texas crude oil prices. The data used to train the model covers the time period from 01/03/1986 to 3/30/2018. The data was downloaded from the [Federal Reserve Bank of St. Louis](https://fred.stlouisfed.org) Setup1. Download the file with West Texas crude oil prices from [here](https://raw.githubusercontent.com/djccarew/timeseries-rnn-lab-part1/master/data/WCOILWTICO.csv) to your local system. The name of the file is WCOILWTICO.csv.2. Click on the data icon at the top right of the notebook window and then select and upload the WCOILWTCO.csv file.![Data icon](https://github.com/djccarew/timeseries-rnn-lab-part1/raw/master/images/ss6.png) 3. Once the file is uploaded, place your cursor in the code cell below and select Insert to code->Insert pandas Dataframe.![Insert code](https://github.com/djccarew/timeseries-rnn-lab-part1/raw/master/images/ss7.png) This will insert the code to load the file from Object Storage into a DataFrame4. Run each cell in the notebook after reading the description of what is being done with each cell Import Data# With your cursor in this cell, insert the code to read the dataset into a DataFrame as instructed in step 3) # of the setup instructions above # New version of imported DataFrame indexed by the DATE column # Make sure variable name on the right of the assigment statement matches the value inserted # into the code cell above data = df_data_1.set_index('DATE')Build the model# Required imports from math import sqrt from numpy import concatenate from matplotlib import pyplot import pandas as pd from datetime import datetime from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM import plotly.offline as py import plotly.graph_objs as go import numpy as np py.init_notebook_mode(connected=True) %matplotlib inline # Plot the data read in cop_trace = go.Scatter(x=data.index, y=data['WCOILWTICO'], name= 'Price') py.iplot([cop_trace]) # Create a scaled version of the data with oil prices normalized between 0 and 1 values = data['WCOILWTICO'].values.reshape(-1,1) values = values.astype('float32') scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) # Split the data between training and testing # The first 70% of the data is used for training while the remaining 30% is used for validation train_size = int(len(scaled) * 0.7) test_size = len(scaled) - train_size train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:] print(len(train), len(test)) # Generates the X and Y data from the downloaded dataset. The last n values in the input data are left off # and the Y values are generated by shifting the X values by n # where n is the value of the prev_periods paramater # See the example below , prev_periods is set to 2 # Original X (weeks 1 - 5) = 1.05, 1.15, 1.25, 1.35, 1.45 # New X (weeks 1 - 3) = 1.05, 1.15, 1.25 # Y = 1.25, 1.35, 1.45 # def gen_datasets(dataset, prev_periods=1): dataX, dataY = [], [] for i in range(len(dataset) - prev_periods): a = dataset[i:(i + prev_periods), 0] dataX.append(a) dataY.append(dataset[i + prev_periods, 0]) print(len(dataY)) return np.array(dataX), np.array(dataY) # Generate testing and validation data # We'll use a sliding window size of 1 week to predict the next week's price prev_periods = 1 trainX, trainY = gen_datasets(train, prev_periods) testX, testY = gen_datasets(test, prev_periods) # Reshape into a numpy arraya of shape (m, 1, prev_periods) where m is the number of training or testing values trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # Build RNN - this should take a a few minutes model = Sequential() model.add(LSTM(100, input_shape=(trainX.shape[1], trainX.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae']) history = model.fit(trainX, trainY, epochs=50, batch_size=32, validation_data=(testX, testY), verbose=0, shuffle=False) # Check out MSE, RMSE, MAE for training and testing data training_error = model.evaluate(trainX, trainY, verbose=0) print('Training error: %.5f MSE (%.5f RMSE) %.5f MAE' % (training_error[0], sqrt(training_error[0]), training_error[1])) testing_error = model.evaluate(testX, testY, verbose=0) print('Testing error: %.5f MSE (%.5f RMSE) %.5f MAE' % (testing_error[0], sqrt(testing_error[0]), testing_error[1])) # Plot validation loss vs epoch number pyplot.plot(history.history['loss'], label='training loss') pyplot.plot(history.history['val_loss'], label='test loss') pyplot.legend() pyplot.show() # Plot prediction vs actual using scaled values (0, 1) yhat_test = model.predict(testX) print(yhat_test.shape) pyplot.plot(yhat_test, color='red', label='prediction') pyplot.plot(testY, color='blue', label='actual') pyplot.legend() pyplot.show() # Convert scaled prices back to original scale (USD) yhat_test_inverse = scaler.inverse_transform(yhat_test.reshape(-1, 1)) testY_inverse = scaler.inverse_transform(testY.reshape(-1, 1)) # Add dates back dates = data.tail(len(testX)).index testY_reshape = testY_inverse.reshape(len(testY_inverse)) yhat_test_reshape = yhat_test_inverse.reshape(len(yhat_test_inverse)) # Calculate MSE< RMSE based on original USD prices mse = mean_squared_error(testY_inverse, yhat_test_inverse) rmse = sqrt(mse) print('Test MSE(USD): %.3f Test RMSE(USD): %.3f' % (mse, rmse)) # Plot actual vs predicted using actual dates and USD actual = go.Scatter(x=dates, y=testY_reshape, line = dict(color = ('rgb(0, 0, 255)'), width = 4), name= 'Actual Price') predicted = go.Scatter(x=dates, y=yhat_test_reshape, line = dict(color = ('rgb(255, 0, 0)'), width = 4), name= 'Predicted Price') py.iplot([predicted, actual])Run new data through modelAs part of the data prep that last weeks price (3/30/2018) was left off because we had no data for the following week (4/6/2018). Let's use this value to predict the price for the week of 4/6/2018# Grab last week of normalized data and reshape into shape expected by model scaled_last_prices = scaled[len(scaled) - prev_periods:len(scaled),:] scaled_last_prices = np.reshape(scaled_last_prices, (1, 1, prev_periods)) print(scaled_last_prices) # Predict the price for the week of 4/6/2018 using the model # Note this will be on a scale of (0,1) #print(new_scaled_last_prices.shape) next_price_prediction = model.predict(scaled_last_prices) # Transform scaled predicion back to a USD price next_price_inverse = scaler.inverse_transform(next_price_prediction.reshape(-1, 1)) print(next_price_inverse)Regular Expresions: * Regular expressions(regex) are a powerful language for matchig text patterns.* regex is used to check if a sequence of characters(string) contains the specified search pattern. Why to use regex?* For manipulation of text data.* To fetch the required information out of a text. It acts as a powerful search engine.* For validatig text inputs regex are extensively used on websites.In data science regex is helps in preprocessing for text analysis. With respect to Python there is a library by name `re` which is used to perform tasks relaing to regular expressions.What are the advantages of using a `re` library:* It is fast, so using them surely saves time.* Helps us write short codes to carry on the operations.import re #this is how we import the library ## Flow of regular expression will be written in this fashion. text = 'this is to denote the text' #text to validate the working of the syntax. pattern = r'\d' # pattern will have the particular syntax of regular expressions match = re.match(pattern, text) # match will run the regex expressions print(match) #### Why in pattern have I used r outside the string. Here 'r' stands for raw. It helps us to get rid of tabs e.g. text1 = 'a\tb' #string substituted '\t' as a tab. text2 = r'a\tb' # '\t' is preserved as it is. print(text1) print(text2)a b a\tbCommonly used methods to deal with the regular expressions.Finding and replacing matched patterns. Use compiled object methods for additional options and fine-tuning parameters Use method :::::: To re.match :::::: Find match at start of string re.search :::::: Find the first matchre.findall :::::: Retrieve all matching strings re.finditer :::::: Retrive all matches re.sub :::::: Replace a matching string re.split :::::: Split text based on match Anchors:Use :::::: To specify position ^ :::::: At start of string or line \A :::::: At start of string \Z :::::: At end of string $ :::::: At end of string or line \b :::::: On word boundary \B :::::: Not on word boundary## Let us look at the example: Does it have str = "The rain in Spain" #Check if the string starts with "The": x = re.findall("\AThe", str) print(x) if (x): print("Yes, there is a match!") else: print("No match") ## \b : Returns a match where the specified characters are at the beginning or at the end of a word: str = "The brain in Spain" #Check if "ain" is present at the beginning of a WORD: x = re.findall(r"\bspa", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") import re ## Returns a match where the specified characters are present, but NOT at the beginning (or at the end) of a word str = "The rain in Spain" #Check if "ain" is present, but NOT at the beginning of a word: x = re.findall(r"\Bain", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \Z Returns a match if the specified characters are at the end of the string import re str = "The rain in Spain" #Check if the string ends with "Spain": x = re.findall("pain\Z", str) print(x) if (x): print("Yes, there is a match!") else: print("No match") ## $ Ends with str = "hello world" #Check if the string ends with 'world': x = re.findall("world$", str) if (x): print("Yes, the string ends with 'world'") else: print("No match")No matchCHARACTER CLASSES Use :::::: To match character \w :::::: Word character. [0-9_a-zA-Z] and Unicode word characters \W :::::: Non-word character \d :::::: Decimal digit and Unicode digits \D :::::: Not a decimal digit \s :::::: White-space character [ \t\n\r\f\v] and Unicode spaces \S :::::: Non-white-space char## \w : Returns a match where the string contains any word characters (characters from a to Z, digits from 0-9, # and the underscore _ character) str = "The rain in Spain" #Return a match at every word character (characters from a to Z, digits from 0-9, and the underscore _ character): x = re.findall("\w", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \W : Returns a match where the string DOES NOT contain any word characters import re str = "The rain in Spain" #Return a match at every NON word character (characters NOT between a and Z. Like "!", "?" white-space etc.): x = re.findall("\W", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \d : Returns a match where the string contains digits (numbers from 0-9) import re str = "The rain in Spain is 23.4 cms" #Check if the string contains any digits (numbers from 0-9): x = re.findall("\d", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \D : Returns a match where the string DOES NOT contain digits import re str = "The rain in Spain" #Return a match at every no-digit character: x = re.findall("\D", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \s : Returns a match where the string contains a white space character import re str = "The rain in Spain" #Return a match at every white-space character: x = re.findall("\s", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## \S : Returns a match where the string DOES NOT contain a white space character import re str = "The rain in Spain" #Return a match at every NON white-space character: x = re.findall("\S", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match")['T', 'h', 'e', 'r', 'a', 'i', 'n', 'i', 'n', 'S', 'p', 'a', 'i', 'n'] Yes, there is at least one match!QUANTIFIERS Greedy :::::: Lazy :::::: Matches 1 * :::::: *? :::::: 0 or more times 2 + :::::: +? :::::: 1 or more times 3 ? :::::: ?? :::::: 0 or 1 time 4 {n} :::::: {n}? :::::: Exactly n times 5 {n,} :::::: {n,}? ::::: At least n times 6 {n,m} :::::: {n,m}? :::: From n to m times## * - Zero or more occurrences import re str = "The rain in Spain falls mainly in the plain!" #Check if the string contains "ai" followed by 0 or more "x" characters: x = re.findall("ait*", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## + - One or more occurrences import re str = "The rain in Spain falls aixt mainly in the plain!" #Check if the string contains "ai" followed by 1 or more "x" characters: x = re.findall("aix+", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## ? - 0 or 1 time. str = "The rain in Spain falls mainly in the plain!" #Check if the string contains "ai" followed by 1 or more "x" characters: x = re.findall("ai?", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## {n} - Excactly the specified number of occurrences import re str = "The rain in Spain falls mainly in the plain!" #Check if the string contains "a" followed by exactly two "l" characters: x = re.findall("al{2}", str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match") ## {n,m} - Excactly the specified number of occurrences import re str = "dhek leta hon mein 61234 aa 324 gaya 21331 hein 121w4" #Check if the string contains "a" followed by exactly two "l" characters: x = re.findall('[0-9]{3,4}', str) print(x) if (x): print("Yes, there is at least one match!") else: print("No match")['6123', '324', '2133', '121'] Yes, there is at least one match!The Engineering WOrld - A Dot To Explore ...___ Treating missing valuesimport numpy as np import pandas as pd from pandas import Series, DataFrameFiguring out what data is missingmissing = np.nan series_obj = Series(['row 1', 'row 2', missing, 'row 4','row 5', 'row 6', missing, 'row 8']) series_obj # object_name.isnull() # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # The .isnull() method returns a Boolean value that describes (True or False) whether an element in a # Pandas object is a null value. series_obj.isnull()Filling in for missing valuesnp.random.seed(25) DF_obj = DataFrame(np.random.randn(36).reshape(6,6)) DF_obj DF_obj.ix[3:5, 0] = missing DF_obj.ix[1:4, 5] = missing DF_obj # object_name.fillna(numeric value) # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # The .fillna method() finds each missing value from within a Pandas object and fills it with the # numeric value that you've passed in. filled_DF = DF_obj.fillna(0) filled_DF # object_name.fillna(dict) # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # You can pass a dictionary into the .fillna() method. The method will then fill in missing values # from each column Series (as designated by the dictionary key) with its own unique value # (as specified in the corresponding dictionary value). filled_DF = DF_obj.fillna({0: 0.1, 5: 1.25}) filled_DF # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # You can also pass in the method='ffill' arguement, and the .fillna() method will fill-forward any # missing values with values from the last non-null element in the column Series. fill_DF = DF_obj.fillna(method='ffill') fill_DFCounting missing valuesnp.random.seed(25) DF_obj = DataFrame(np.random.randn(36).reshape(6,6)) DF_obj.ix[3:5, 0] = missing DF_obj.ix[1:4, 5] = missing DF_obj # object_name.isnull().sum() # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # To generate a count of how many missing values a DataFrame has per column, just call the .isnull() # method off of the object, and then call the .sum() method off of the matrix of Boolean values it # returns. DF_obj.isnull().sum()Filtering out missing values# object_name.dropna() # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # To identify and drop all rows from a DataFrame that contain ANY missing values, simply call the # .dropna() method off of the DataFrame object. NOTE: If you wanted to drop columns that contain # any missing values, you'd just pass in the axis=1 argument to select and search the DataFrame # by columns, instead of by row. DF_no_NaN = DF_obj.dropna(axis=1) DF_no_NaN # object_name.dropna(how='all') # ♔┈♔┈♔┈( WHAT THIS DOES )┈♔┈♔┈♔ # To identify and drop only the rows from a DataFrame that contain ALL missing values, simply # call the .dropna() method off of the DataFrame object, and pass in the how='all' argument. DF_obj.dropna(how='all')Example usageTo use `cipher_ha2573` in a project:import cipher_ha2573 print(cipher_ha2573.__version__)1. Asignación de variables En Python no se necesita especificar explícitamente el tipo de variable. En la imagen siguiente se observa que antes del nombre de la variable `age` aparece la palabra `int`. Esto es porque en otros lenguajes de programación sí se requiere especificar el tipo de variable al mismo tiempo de definir su nombre. Afortunadamente, Python, automáticamente, hace la definición del tipo de variables sin necesidad de hacerlo manualmente. ![ffff.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAu4AAAFRCAMAAAAct9jFAAAC9FBMVEUAAAAAgIAWYnsAgIAAgIAAf39AoKADgYG02toTXngAgIAVSVkNenwBgIAGfH4Cf4AKe30UPUsRWHEBgIAAgIAAdXUAgIAAgIAAgIAVUmYAfoAQLzkCg4IQRlATPEgUUGEAgID///8FAAAVTlkFAAAFAAAFAAAAgIAFAADQHx/Z6uzQHx/l8fPL4eQAgIAoboUAgICSyMgGf3+excwAgIBHgpUTTFcAgIAajY05eI3A294AgIBUqqqVx8gFAAAFAAD4/Pzw+Phut7dXjqATPUonj48DgYFpm6sGY2YFAAAHZWYZiop7p7Uok5MFAAAFBQayzdQxlJSr1tYAgIAgkJCIxMQFAAAQSVBZfwSOtcAJYmgYUVsFAAAFAAA8nZ3ZGSRFn59ksbEXSVUEAQHQHx9ZfwYQiIjhFSgXjIzQHx82m5s4m5vQHx/2BzQEgID2CDTQHx/QHx8xmZkDgYECQ0MMhoa43Nz2BzQAgIDQHx8AgIAgkZEbjo4+n5+BwMDYHCd5vb0Qh4cAgIBbra3UHCFns7M) 2. Tipos de variables  2.1 Enterosnumero_entero = 100 print(numero_entero)1002.2 Float (decimales)numero_decimal = 1.1 print(numero_decimal) print(numero_entero + numero_decimal)101.12.3 Strings (texto)texto = "Hola mundo" print(texto)Hola mundo2.4 Listaslista_ejemplo = ["Lunes", "Martes", "Miércoles", "Jueves", "Viernes"] lista_ejemplo[5] lista_numeros = [1, 2, 3, 4, 5] print(lista_numeros) lista_ejemplo2 = ['Lunes', 1, "Martes", 2] print(lista_ejemplo2) print(lista_ejemplo)['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes']2.5 Tuplastupla_ejemplo = ("Lunes", "Martes", "Miércoles", "Jueves", "Viernes") print(tupla_ejemplo) nueva_variable1 = list(tupla_ejemplo) nueva_variable1 nueva_variable2 = tuple(lista_ejemplo)2.6 Diccionariosdicc = {"Lunes", "Martes", "Miércoles", "Jueves", "Viernes"} print(dicc){'Lunes', 'Martes', 'Miércoles', 'Viernes', 'Jueves'}2.3 Booleans (lógicos)a = True b = False a == b3. Strings Para los lingüistas, los strings son el tipo de dato más importante. El trabajo que hacen mayormente es con letras, palabras y oraciones, las cuales son representadas computacionalmente por medio de strings.string1 = "Texto" print(string1) string2 = '''Esta es una cadena que contiene más de una línea. Por lo tanto se escribe con tres comillas consecutivas.''' print(string2) string2[0]3.2 Comparación'gato' == 'gato' a = 'gato' b = 'perro' # el signo == compara # el signo = asigna valores a variables # no son intercambiables estos signos a == b print(a) a < b a > b3.3 Longitudtexto = '''Esta es una cadena que contiene más de una línea. Por lo tanto se escribe con tres comillas consecutivas.''' len(texto) if (len(texto) > len(a)): print("Verdadero")Verdadero4. Listas  4.1 Longitud# len(frutas) imprime el número de elementos en la lista frutas frutas = ['manzana', 'pera', 'naranja', 'uvas', 'sandía'] len(frutas)4.2 Añadir elementos - append()# nombre de la lista . append('elemento a añadir') frutas.append('plátano') print(frutas)['manzana', 'pera', 'naranja', 'uvas', 'sandía', 'plátano']4.3 Eliminar elementos - pop()frutas.pop() print(frutas) frutas.pop(1) print(frutas)['manzana', 'naranja', 'uvas', 'sandía']4.4 Ordenar elementos - sort()frutas.sort() print(frutas)['manzana', 'naranja', 'sandía', 'uvas']4.5 Acceder a elementos individualesfrutas[2]4.6 Slices ![list.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCAEtAwoDASIAAhEBAxEB/8QAHQABAQACAwEBAQAAAAAAAAAAAAcGCAQFCQMCAf/EA)slicing = "geeksforgeeks" len(slicing) # [inclusivo:no-inclusivo] variable = slicing[1:3] print(slicing) slicing[0:13] slicing[-13:] slicing[-13:-1] slicing[:13] numeros = [1,2,3,4,5,6,7,8,9,10] primera_mitad = numeros[:5] print(primera_mitad) segunda_mitad = numeros[5:] print(segunda_mitad) primera_mitad + segunda_mitad nombre = 'Juan' apellido = ' Vasquez' print(nombre + apellido)5. TuplasSon un tipo de dato fijo. Esto quiere decir que los elementos dentro de las tuplas no pueden ser modificados una vez que son definidos. Su funcionamiento es muy similar al de las listas.frutas = ('manzana', 'pera', 'naranja', 'uvas', 'sandía') frutas[1] len(frutas) frutas.pop() lista_frutas = list(frutas) print(lista_frutas) lista_frutas.pop() print(lista_frutas)['manzana', 'pera', 'naranja', 'uvas']6. DiccionariosEste tipo de datos son conjuntos de pares de elementos. El primer elemento de cada conjunto puede ser utilizado para buscar al segundo elemento. Se refieren a los elementos dentro de un diccionario como `"llaves": valores`.Muchas de las funciones para listas y tuplas también sirven para diccionarios.calificaciones = {'Luis': 10, 'Martha': 9, 'Ana': 10, 'Pedro': 7} print(calificaciones) calificaciones['Pedro'] len(calificaciones) calificaciones['Luis']6.1 Pertenencia - in'Luis' in calificaciones 'Carlos' in calificaciones6.2 Eliminar un elementodel(calificaciones['Ana']) print(calificaciones){'Luis': 10, 'Martha': 9, 'Pedro': 7}6.3 Listar todas las llaves de un diccionariocalificaciones.keys()6.4 Listar todos los valores en un diccionariocalificaciones.values()6.5 Añadir elementos a un diccionariocalificaciones['Carlos'] = 8 print(calificaciones) 8 + 8 calificaciones['Regina'] = 10 print(calificaciones) print(calificaciones){'Luis': 10, 'Martha': 9, 'Pedro': 7, 'Carlos': 8, 'Regina': 10}Ejercicios# Busca si la palabra "es" esta en el texto ejemplo1 = 'Este es un ejemplo' "abc" in ejemplo1 # Imprime todo el texto en mayúsculas mayusculas = ejemplo1.upper() # el ' ' significa espacio en blanco. Se pone pues # Python no separa las palabras si no se le dice explicitamente print(mayusculas + ' ' + ejemplo1) # Escribe código que imprima los elementos de una lista en las posiciones 0, 4 y 5. ejemplo3 = ['Rojo', 'Verde', 'Blanco', 'Negro', 'Rosa', 'Amarillo'] ejemplo3.pop(5)Basic Command Line UsageFor the purpose of this presentation, we focus on some basic descriptive statistic command and some common regression model technique. All examples are based on the datasets stored in the data directory.# Importing the dependency import os import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.linear_model import LogisticRegression import statsmodels.api as sm import statsmodels.formula.api as smf from statsmodels.nonparametric.smoothers_lowess import lowess # Viewing the current working directory os.getcwd() # Importing the Boston dataset for linear regression analysis. boston = pd.read_csv('./data/Boston.csv') # Print the first five row of the data. boston.head() # Summarize the data set. boston.describe() # Summarize a subset of columns boston[["tax", "crim"]].describe() # Print the type of the data boston.dtypes # If we are not sure about what the function does, we can use the help() function or "?". help(print) ?print # Using a subset of data for descriptive summary, for instand, only data with 'crim' over 5 subset1 = boston.loc[boston['crim'] > 5, : ] # subset1.head() # Print the statistical summary of the median home value in the subset of the data. subset1['medv'].describe() # Suppose we are interested to create a subset with crim > 5 and chas = 1 subset2 = boston.loc[(boston['crim']>5) & (boston['chas']==1), : ] # subset2.head() # Print the statistical summary of the median home value in the subset of the data. subset2['medv'].describe()Creating Graph with Matplotly* Graphing correlation among variables (Pairs Plot)* Graphing quantitative Data (Histogram)* Graphing Quantitative Data with Categories (Box Plot)* Graphing Quantitative variables against each other (Scatter Plot)# Graphing correlation among variables sns.pairplot(boston) # Our dataset has a variable "medv" which represents the median home's value. # Creating a histogram for it is easy: sns.distplot(boston['medv'], bins=10, kde=False) # Our dataset identifies whether a home is located nearby the river or not "chas" # We can use a boxplot to compare nearby or none nearby river home value: sns.boxplot(x=boston["chas"], y=boston["medv"], data=boston) # Our data contains variables on median home value and per capita crime rate. # A scatter plot for these two variables is easily created with: sns.scatterplot(x=boston["crim"], y=boston["medv"], data=boston)Single Linear Regression ModelThere are two main stream statistics libraries are generally used in Python:* StatsModels: More general statistic library for statisticians and researchers because it provides some of the statictical elements that Scikit-Learn does not offer, such as adjusted R^2, AIC, BIC, etc.* Scikit-Learn: More for industry to use for building machine learning model because it has the capability to implement and create pipline building web application for clients use. The downside is that it will not general a statistical summary like we usually seen in Stata or R.In this demo, we will mainly focus on using StatsModels library. Note that there is no one saying one is better than the other one, but most of the data scientist will use SciKit-Learn library because of the popularity of machine leanring and AI building.# Checking the correlation between variables. boston.corr() # Create the X and y variable to pass into the model. # The reason we need to reshape the data dimension because SciKit-Learn only takes numpy array for modeling. X = boston['crim'].values.reshape(-1,1) y = boston["medv"].values.reshape(-1,1) # Estimate the predicted effect of per capita crime rate on the median home value in Boston: # We will first use SciKit-Learn library for this example. from sklearn.linear_model import LinearRegression model = LinearRegression() model # Once we store the Linear Regression model, we can use the model to fit the data X and y model.fit(X, y) print(model) # The estimated parameters from the model. print('Weight coefficents: ', model.coef_) print('y-axis intercept: ', model.intercept_) # As you can see from this result, there is no statistical summary we can print from the model fit with SciKit-Learn. # Now, let's consider using the StatsModel library for the same model specification. import statsmodels.api as sm import statsmodels.formula.api as smf from statsmodels.nonparametric.smoothers_lowess import lowess model = sm.OLS(y, sm.add_constant(X)) print(model) # Fitting the model and printing the summary fit1 = model.fit() print(fit1.summary()) # Plotting the regression results, residual vs. fitted plot residuals = fit1.resid fitted = fit1.fittedvalues smoothed = lowess(residuals, fitted) plt.rcParams.update({'font.size': 16}) plt.rcParams["figure.figsize"]=(8,7) fig, ax = plt.subplots() ax.scatter(fitted, residuals, edgecolors='k', facecolors='none') ax.plot(smoothed[:, 0], smoothed[:, 1], color='r') ax.set_ylabel('Residuals') ax.set_xlabel('Fitted Values') ax.set_title('Residuals vs. Fitted') ax.plot([min(fitted), max(fitted)], [0,0], color='k', linestyle=':', alpha=0.3) plt.show() # Printing the first 5 fitted values print(fitted[0:5]) # Printing the first 5 residual values print(residuals[0:5]) # Plotting the data with the regression line. plt.scatter(X, y) plt.plot(X, fitted, color='red') plt.show() # Another option for plotting the regression line is to use Seaborn sns.lmplot(x='crim', y='medv', data=boston)Multiple Linear RegressionWe are going to mainly focus on StatsModels from now and on.# Estimate the predicted effect of the per capita crime rate, lower status # of the population, and dummy on median home value in Boston: # First we need to define the X and y variables in our model. X = boston[['crim', 'lstat', 'chas']] y = boston['medv'] # Fitting the model with StatsModels OLS function model2 = sm.OLS(y, sm.add_constant(X)) fit2 = model2.fit() print(fit2.summary()) # Robust Tests # Ramsey RESET Test (Cannot find any useable function from StatsModels) # Breusch-Pagan / Cook-Weisberg test for Heteroskedasticity from statsmodels.stats.diagnostic import het_breuschpagan from statsmodels.stats.diagnostic import het_white white_test = het_white(fit2.resid, fit2.model.exog) print(f"LM Statistic: {white_test[0]}") print(f"p-value: {white_test[1]}") bp_test = het_breuschpagan(fit2.resid, fit2.model.exog) print(f"LM Statistic: {bp_test[0]}") print(f"p-value: {bp_test[1]}") # Variance Inflation Factor (VIF) Test from statsmodels.stats.outliers_influence import variance_inflation_factor # Create a dataframe to save the results X1 = sm.add_constant(X) vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(X1.values, i) for i in range(X1.shape[1])] vif["features"] = X1.columns # Inspect VIF factors vif.round(1)Logistic Regression ModelAgain, we are going to focus on using StatsModels for the logistic model because it has more statistic elements reported from the summary tabel.# Importing the Default dataset for linear regression analysis. default_data = pd.read_csv('./data/Default.csv') # Print the first five rows of the data default_data.head() # Descriptive Summary of the data set default_data.describe() # Estimate the log odds of Default using the average balance that the customer has remaining on their credit card after making their monthly payment. # This time we are going to use the glm function with statsmodel, which is similar to R. import statsmodels.formula.api as smf model3 = smf.glm(formula='default~balance+income+student', data=default_data, family=sm.families.Binomial(link=sm.genmod.families.links.logit)) # Fitting themodel fit3 = model3.fit() # Printing the summary table print(fit3.summary()) # Store the predicted probability from the model. ypred = fit3.predict(default_data[['student', 'balance', 'income']]) print(ypred[0:10]) y_predicted = [] for i in ypred: if i > 0.5: y_predicted.append("No") if i <= 0.5: y_predicted.append("Yes") print(y_predicted[0:10]) # Robust Tests: # Ramsey RESET test, again not available in the documentation. # #Breusch-Pagan / Cook-Weisberg test for heteroskedasticity from statsmodels.stats.diagnostic import het_breuschpagan from statsmodels.stats.diagnostic import het_white white_test = het_white(fit3.resid_response, fit3.model.exog) print(f"WH: {white_test[0]}") print(f"p-value: {white_test[1]}") bp_test = het_breuschpagan(fit3.resid_response, fit3.model.exog) print(f"BP: {bp_test[0]}") print(f"p-value: {bp_test[1]}") # Variance Inflation Factor (VIF) Test from statsmodels.stats.outliers_influence import variance_inflation_factor # Create a dataframe to save the results df = pd.get_dummies(data=default_data, columns=['student']) X = df[['student_Yes', 'balance', 'income']] X1 = sm.add_constant(X) vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(X1.values, i) for i in range(X1.shape[1])] vif["features"] = X1.columns # Inspect VIF factors vif.round(1) # Compute ROC curve and area under the curve from sklearn.metrics import roc_curve, auc y_test = [] for i in default_data['default']: # print(i) if i == "Yes": y_test.append(0) if i == "No": y_test.append(1) print(y_test[100:150]) fpr, tpr, thresholds = roc_curve(y_test, ypred) roc_auc = auc(fpr, tpr) print("Area under the ROC curve: %f" % roc_auc) # Plotting the ROC curve plt.clf() plt.plot(fpr, tpr, label='ROC Curve (Area = %0.2f)' %roc_auc) plt.plot([0,1], [0,1], 'k--') plt.xlim([0.0,1.0]) plt.ylim([0.0,1.0]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic Example') plt.legend(loc="lower right") plt.show() # Printing the confusion matrix from sklearn.metrics import confusion_matrix, classification_report con_matrix = confusion_matrix(default_data["default"], y_predicted) print(con_matrix) class_report = classification_report(default_data["default"], y_predicted) print(class_report) # Or we can use the Pandas crosstab() function y_actu = pd.Series(default_data['default'], name='Actual') y_pred = pd.Series(y_predicted, name='Predicted') df_confusion = pd.crosstab(y_actu, y_pred) print(df_confusion)Data Loadingimport os os.chdir('electrochemistry_sean_mcintosh/electrochem') !ls import numpy as np import pandas as pd import matplotlib.pyplot as plt import pickle from pathlib import Path import seaborn as sns from tqdm.notebook import trange, tqdm import wandb sns.set() DATA_DIR = Path('../data') # Do not read in 'Mix 1' sheet, as that has been updated in 'mix_1_updated.xlsx' sheet_names = ['Seawater - No Heavy Metals', 'Copper', 'Cadmium', 'Lead'] xcel = pd.read_excel(DATA_DIR / 'main.xlsx', sheet_name=sheet_names) # Read in updated mix sheet mix = pd.read_excel(DATA_DIR / 'mix_1_updated.xlsx') seawater = xcel['Seawater - No Heavy Metals'] copper = xcel['Copper'] cadmium = xcel['Cadmium'] lead = xcel['Lead'] seawater['label'] = 'Sw' seawater = seawater.drop(['Unnamed: 0', 'Unnamed: 1', 'Unnamed: 2'], axis=1) seawater.head(3) copper['label'] = 'Cu' cadmium['label'] = 'Cd' lead['label'] = 'Pb' lead.columns copper = copper.drop(['Unnamed: 0', 'Unnamed: 1', 'Concentration'], axis=1) cadmium = cadmium.drop(['Unnamed: 0', 'Analyte', 'Concentration'], axis=1) lead = lead.drop(['Unnamed: 0', 'Analyte', 'Concentration'], axis=1) dfs = [copper, cadmium, lead, seawater] for df in dfs: print(df.shape) df = pd.concat(dfs, ignore_index=True) df.label.value_counts(normalize=True) df.head() df.to_csv(DATA_DIR / 'four_class_dataset.csv') X = df.iloc[:, :-1].values y = df.iloc[:, -1].values df.shapeMediumGiven a binary tree, flatten it to a linked list in-place.For example, given the following tree: 1 / \ 2 5 / \ \ 3 4 6The flattened tree should look like: 1 \ 2 \ 3 \ 4 \ 5 \ 6 ThoughtUsing stack to stack the highest right node. And make the root to connect to the left node since we wanna make it like preorder traversal. Then disconnect the left pointer between root and left node.# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def flatten(self, root: TreeNode) -> None: """ Do not return anything, modify root in-place instead. """ if not root: return None if not root.left and not root.right: return stack = [root] node = None while stack: if node: root.right = node root = root.right node = stack.pop() if node.right: stack.append(node.right) node.right = None if node.left: node.right = node.left node.left = None stack.append(node.right) if node: root.right = node root = root.rightCA3 810197626In this assignment we must infer if a Digikala review is positive or negative using naive bayes. The iniital information is gathered from a train dataset and then we use it on a test dataset.Naive bayes uses Bayes rule and conditional independence as its base. It's called naive because of its assumption of independence of the features. For example in a spam filter model, it doesn't care about order of the words in a message or sentence and assumes them independent (aka bag of words). This model uses bag of words similar to spam filter.Naive bayes, although simple, still can get an acceptable result therefore it's used mostly for a fast, easy and experimental model.from __future__ import unicode_literals import hazm import numpy as np import pandas as pd import time import collections import math------------ Loaded DataThe comment_train.csv file contains our train data and comment_test.csv contains the test data.train_data = pd.read_csv('comment_train.csv') test_data = pd.read_csv('comment_test.csv')--------- Stop WordsConjuctions, punctuation marks and whitespaces are chosen as stop words. Hazm's stop word list was also tried and the results didn't change much.stop_list = ['و', 'یا', 'را', '!', '؟', '?', '.', ',', '،', '\r', '\n', '\t', 'به', 'از', 'ُ', 'ً', 'ٍ'] # stop_list = hazm.stopwords_list()-------- Title and CommentThe title of reviews were appended to the comment with a weight of 2. Meaning they were repated twice in the comment so they can have higher effect on the result.train_data['comment'] = train_data['comment'] + ' ' + 2*(train_data['title']+ ' ') test_data['comment'] = test_data['comment'] + ' ' + 2*(test_data['title']+ ' ')------------ Pre-ProcessDiffrent approaches were tried for the pre-process including removing stop words and Hazm's stemmer, lemmatizer and normalizer. Hazm's stemmer didn't function very well as it removed ending letters of most of the words that it shouldn't have. The final pre-process is removing stop words, then normalizing each word and then lemmatizing that word. Lemmatization vs StemmingBoth lemmatization and stemming try to find the root of a word so different variation of words and verbs become same words. This way we can get more accurate histogram and frequency of those words.Stemming mostly relies on cutting of the suffixes and prefixes of a word to return its root, for example changing **می‌رفتم** to **رفت**. But sometimes it fails and cuts some parts that are in the root, Eg. changing **پایان** to **پا**. An english example would be **clearly** to **clear**.On the other hand, lemmatization tries to find root of the word in the context and takes its meaning into account, it may use a database of words and their meanings to do its job. As an example it can change **می‌روم** to **رفت** but stemming may fail and change it to **می‌رو**. Or an english example is changing **worse** to **bad**.stemmer = hazm.Stemmer() lem = hazm.Lemmatizer() normalizer = hazm.Normalizer() def filter_stop_words(x): return x not in stop_list def lem_n_norm(x): splitted = lem.lemmatize(normalizer.normalize(x)).split('#') return splitted[0] def pre_process(words): result = list(map(lem_n_norm, filter(filter_stop_words, words))) return list(filter(None, result))---------- Naive Bayes ClassifierAs explained in the introduction it uses Bayes rule and assumption of conditional independence as its base. ModelThis model has two classes (**recommended** and **not_recommended**). Features of the classes are the words used in each class, ie. the words appearing in each review of the class so if a word is repeated many times in recommended reviews, a comment with that word gets higher chance of being classified as recommended. Bag of WordsThe bag of words model is used so we treat all the words the same regardless of their position in a sentence. To make the bag of words, we combine all the recommended reviews and tokenize them, then put them in a single list of words. We do the same for not_recommended reviews. We also treat probability of a word independent of another word given its class. ProcessFirst we count occurance of each word in the words list for recommended and not_recommended.def get_freqs(rec_list, not_rec_list): rec_freq = dict(collections.Counter(rec_list)) not_rec_freq = dict(collections.Counter(not_rec_list)) return rec_freq, not_rec_freq, len(rec_list), len(not_rec_list)Prior ProbabilityThe initial probability of a message being recommended or not_recommended without having any evidences. It's calculated by dividing number of each class by the total reviews:$P(recommended) = \dfrac{recommended\_review\_count}{total\_review\_count}$It's 0.5 for both classes:(train_data['recommend'] == 'recommended').sum() / len(train_data)LikelihoodThe probability of appearance of a word in a review given it is labaled recommended or not_recommended. It would be (it's easy to calculate):$P(word\ |\ recommended) = \dfrac{frequency\_in\_recommended\_words(word)}{total\ recommended\ words\ count}$ EvidenceAppearance of each word in a review is an evidence PosteriorFinding probability of being recommended given seeing a word. It isn't easy in a direct way but using the Bayes rule we can calculate it with the other informations:$ P(recommended\ |\ word) = \dfrac{P(recommended)*P(word\ |\ recommended)}{P(word)} $P(word) is the evidence here. Labeling in this problemWe can use the said equations to get posterior probablities of each review for beaing recommended or not_recommended and by comparing them, we label that review based on which of them has a higher value. Note that we don't need to calculate the evidence as it is equal in both classes. We assumed each word conditionally independent given recommended/not_recommended so we just multiply the probabilities.As an example for the sentance **word1 word2**:$P(recommended\ |\ sentance) = P(recommended)*P(word1 | recommended)*P(word2 | recommended)$We calculate this for not_recommended too and then compare them. ------- Additive SmoothingSometimes a word can appear in the not_recommended reviews training data but not in the recommended. For this reason the posterior probability of not_recommended would be equal to zero. For example it we didn't see **word2** in recommended reviews of training data this would happen:$P(word2\ |\ recommended) = 0$$P(recommended\ |\ sentance) = P(recommended)*P(word1 | recommended)*P(word2 | recommended) = 0$So the review would be classified as not_recommended, no matter what the other words are. To solve this problem we use Additive Smoothing to eliminate these 0 probabilities.We add an Alpha to the count of each word in all classes, and assign alpha to count of those missing words in each class. So in the example, **word2** count would be Alpha for recommended words list, and Alpha + prev_count for the not_recommended words lits.Alpha = 1 was used for this problem.def get_smoothed_freqs(rec_list, not_rec_list): rec_freq = dict(collections.Counter(rec_list)) not_rec_freq = dict(collections.Counter(not_rec_list)) rec_word_count = len(rec_list) not_rec_word_count = len(not_rec_list) for word in rec_list: rec_freq[word] += 1 if word not in not_rec_freq: not_rec_word_count += 1 not_rec_freq[word] = 1 for word in not_rec_list: not_rec_freq[word] += 1 if word not in rec_freq: rec_word_count += 1 rec_freq[word] = 1 return rec_freq, not_rec_freq, rec_word_count, not_rec_word_count---------------------------- Label functionVery small numbers were considered 0 by Python so the Log function was used for probabilites so we can sum them instead of multiplying them and avoid getting very small values.def label(comment, info, pre_proc=None): words = hazm.word_tokenize(comment) if pre_proc: words = pre_proc(words) rec_freq, not_rec_freq, rec_word_count, not_rec_word_count = info rec_score = math.log(0.5) # prior not_rec_score = math.log(0.5) for word in words: if word not in rec_freq and word not in not_rec_freq: #ignore extra words continue if word not in rec_freq: # no smoothing rec_score = float('-inf') break if word not in not_rec_freq: # no smoothing not_rec_score = float('-inf') break rec_score += math.log(rec_freq[word] / rec_word_count) not_rec_score += math.log(not_rec_freq[word] / not_rec_word_count) if rec_score > not_rec_score: return 'recommended' else: return 'not_recommended'-------- Creating Bag of WordsAll the comments are added to a single list for each class.rec_words = [] not_rec_words = [] for i, row in train_data.iterrows(): if row['recommend'] == 'recommended': rec_words += hazm.word_tokenize(row['comment']) else: not_rec_words += hazm.word_tokenize(row['comment'])--------------Here the label function is called with and without pre-processing and smoothing:nothing_info = get_freqs(rec_words, not_rec_words) smoothed_info = get_smoothed_freqs(rec_words, not_rec_words) pre_info = get_freqs(pre_process(rec_words), pre_process(not_rec_words)) pre_smoothed_info = get_smoothed_freqs(pre_process(rec_words), pre_process(not_rec_words)) test_data['pre_smooth'] = test_data['comment'].apply(label, args=[pre_smoothed_info, pre_process]) test_data['smooth'] = test_data['comment'].apply(label, args=[smoothed_info]) test_data['pre'] = test_data['comment'].apply(label, args=[pre_info, pre_process]) test_data['nothing'] = test_data['comment'].apply(label, args=[nothing_info]) def print_results(label): correct_recs_detected = ((test_data['recommend'] == test_data[label]) & (test_data[label] == 'recommended')).sum() all_recs_detected = (test_data[label] == 'recommended').sum() total_recs = (test_data['recommend'] == 'recommended').sum() accuracy = (test_data['recommend'] == test_data[label]).sum() / len(test_data) * 100 precision = correct_recs_detected / all_recs_detected * 100 recall = correct_recs_detected / total_recs * 100 f1 = 2 * (precision * recall) / (precision + recall) print('-------------------------') print(label + ':\n') print(f'{"Accuracy":>10}: \t {accuracy :.2f} %\n') print(f'{"Precision":>10}: \t {precision :.2f} %\n') print(f'{"Recall":>10}: \t {recall :.2f} %\n') print(f'{"F1":>10}: \t {f1 :.2f} %\n')----------- Evaluation PrecisionIf we only use precision, in a case if we detect only on recommended and that is correct, we get 100%, so it can't be used alone. Generally if our model detects a few comments as recommended it can get a high precision although the model is not very good. RecallIf we detect lots of recommended comments including many correct and many wrongs ones, we get a high recall value but still the model is not good. For example if we label all the reviews as recommended, it gets 100% recall. F1To combat the downsides of recall and precision, we get an average of these two values to generate F1 score. F1 is **harmonic mean** of recall and precision. Harmonic mean takes multiple parameters (in this context both recall and precision) into account. This value is a better representation of the correctness of our model.print_results('pre_smooth') print_results('smooth') print_results('pre') print_results('nothing')------------------------- nothing: Accuracy: 90.00 % Precision: 89.60 % Recall: 90.50 % F1: 90.05 %ResultsWe can see when we use additive smoothing, it improves our model with a noticeable difference. The reason is it prevents our model from deciding a label solely based on a word that wasn't in a class training data and lets the model take more words into account.But our pre-process is not very effective, the reason can be because it takes some context away, for example negative and positive verbs become the same word or some words lose their meanings. -------- When our model makes mistakeIn the example below both unique words in the comments has higher score given it's recommended although it's not_recommended. So our model labels it as recommended.The reason can be the context that words are used in. Our Naive Bayes model ignores the context and the sentence completely. But in reality a positive word can have a negative meaning given context and the verb used. For example **ایراد** can be used as **ایراد ندارد** and **ایراد دارد**, these two sentences have different meaning but our model treat the word **ایراد** the same. Also the negative and positive verbs become same verbs in the pre-processing.Another reason can be small stop words set. We haven't considered all neutral words so they give different weights to the classes although they are not really trustable for labeling.rec_freq, not_rec_freq, rec_word_count, not_rec_word_count = pre_smoothed_info wrongs = test_data[test_data['recommend'] != test_data['pre_smooth']].reset_index() print('real label: ', wrongs.iloc[5]['recommend']) print('our label: ', wrongs.iloc[5]['pre_smooth']) print('comment:\n', wrongs.iloc[5]['comment']) print('') print('دستگاه score in recommended words ', rec_freq['دستگاه'] / rec_word_count) print('دستگاه score in not_recommended words ', not_rec_freq['دستگاه'] / not_rec_word_count) print('ایراد score in recommended words ', rec_freq['ایراد'] / rec_word_count) print('ایراد score in not_recommended words ', not_rec_freq['ایراد'] / not_rec_word_count) for i, row in wrongs.tail().iterrows(): print('real label: ', row['recommend']) print('our label: ', row['pre_smooth']) print('comment:\n',row['comment']) print('\n------------------\n')real label: not_recommended our label: recommended comment: باسلام خدمت دوستان من تعجب میکنم از چیه این تعریف میکنن گوشی من سامسونگ اس ۶ هستش ۲۵۵۰ حالا ۳.۵ بار شارژ میکنه کنار بحثم اینجاست ۲ساعت نیم میکشه شارژ کامل که واقعا خوب نیست فاجعه هستش و مورد دیگه اداپتور من فست هستش تازه با فست قشنگ ۷الی۸ ساعت میکشه شارژ بشه چیه این خوبه اخه تعریف میکنید نه شکل ظاهر مناسب نه ابعاد خوب دیر شارژ میشه شارژ کند انجام میده تنها مزیت این گارانتی هستش تموم شد رفت پاور بانک پاور بانک ------------------ real label: recommended our label: not_recommended comment: من دوسه ماهی هست این کفشدازردیجی گرفتم متاسفانه کیفیت چسب کفی خوب نیست و از جلو بلند شده و اینکه بنداش کیفیت لازم رو نداره و پا داخلش بو میگیره قالباشم دقیق نیست بنظرم ارزش این پول نداره ... پیشنهاد نمیدم پیشنهاد نمیدم ------------------ real label: not_recommended our label: recommended comment: این آچار لوله گیر خیلی سنگینه،برای کارمداوم وکسانی که دست وبازوی ضعیفی دارند اصلا مناسب نیست.اگرقبل ازخریدبه دست می گرفتم،ا[...]Example Alternative Growth FitPlease first see the other Jupyter Notebook, explaining the basics of accessing mycelyso's HDF5 files.Furthermore, this file assumes the `output.h5` described in the other notebook to be present in the current directory.Within this notebook, we will fit the mycelium length data using a third-party library, [*croissance*](https://github.com/biosustain/croissance) (DOI: [10.5281/zenodo.229905](https://dx.doi.org/10.5281/zenodo.229905) by (2017)). Please install the current version off github first: ```pip install https://github.com/biosustain/croissance/archive/master.zip```First, some general setup …%matplotlib inline %config InlineBackend.figure_formats=['svg'] import pandas pandas.options.display.max_columns = None import numpy as np import warnings import croissance from croissance.figures import PDFWriter as CroissancePDFWriter from matplotlib import pyplot class OutputInstead: @classmethod def savefig(cls, fig): pyplot.gcf().set_size_inches(10, 12) pyplot.show() # croissance's PDFWriter is supposed to write to a PDF # but we want an inline figure, so we mock some bits CroissancePDFWriter.doc = OutputInstead CroissancePDFWriter._include_shifted_exponentials = False def display_result(result, name="Mycelium Length"): return CroissancePDFWriter.write(CroissancePDFWriter, name, result) warnings.simplefilter(action='ignore', category=FutureWarning) pyplot.rcParams.update({ 'figure.figsize': (10, 6), 'svg.fonttype': 'none', 'font.sans-serif': 'Arial', 'font.family': 'sans-serif', 'image.cmap': 'gray_r', 'image.interpolation': 'none' })Opening the HDF5 fileWe will load the `output.h5` using `pandas.HDFStore` …store = pandas.HDFStore('output.h5', 'r') root = store.get_node('/') for image_file in root.results: print(image_file) for position in image_file: print(position) break/results/mycelyso_S_lividans_TK24_Complex_Medium_nd046_138_ome_tiff (Group) '' /results/mycelyso_S_lividans_TK24_Complex_Medium_nd046_138_ome_tiff/pos_000000000_t_Collected (Group) ''and load the first growth curveresult_table_collected = store[position.result_table_collected._v_pathname] timepoint = result_table_collected.timepoint / (60*60) length = result_table_collected.graph_edge_length pyplot.title('Length over Time') pyplot.xlabel('Time [h]') pyplot.ylabel('Length [µm]') pyplot.plot(timepoint, length)Here, we will use the third party tool `croissance` to fit the data to an exponential growth model:curve = pandas.Series(data=np.array(length), index=np.array(timepoint)) estimator = croissance.Estimator() result = estimator.growth(curve) # print(result) print(result.growth_phases)[GrowthPhase(start=9.928127173487246, end=22.594538504723342, slope=0.36693539043981077, intercept=3.1588539520729086, n0=-25.525547240977755, attributes={'SNR': 172.5009988033075, 'rank': 100.0})]And furthermore use its plotting functionality to show the results:print("Growth rate as determined by croissance µ=%.2f" % (result.growth_phases[0].slope,)) display_result(result)Growth rate as determined by croissance µ=0.37Reading DBimport pandas as pd from equity_db import MongoAPI, ReadDB from equity_db.query.asset_query import AssetQueryCRSP Data# making our api connection con = MongoAPI('test', 'crsp') # making the reader to construct our queries reader = ReadDB(con)1.0We run a query against our specified mongo collection using the *.get_asset_data()* method.*.get_asset_data* returns a AssetQuery class which is by default a generator which can be turned into a dataframe and cached.We can cache the query's results by calling the *.set_save()* method.We can also specify any trading calendar to filter join our query results onto.This can be done using *.set_calendar('cal_name')*.Valid calendar names are specified in the pandas_market_calendars package.start = pd.Timestamp(year=2010, month=1, day=1) end = pd.Timestamp(year=2020, month=1, day=1) # Here's an example query. We get back an AssetQuery object which can be turned into a DataFrame by using AssetQuery.df combo_data: AssetQuery = reader.get_asset_data( assets=['AAPL', 'JNJ', 'MMM', 'MSFT', 'AMD', 'NVDA', 'KL', 'L', 'HD'], # tickers to query # close, high, low, open, company name, and website fields=['prccd', 'prchd', 'prcld', 'prcod', 'conm', 'weburl'], start=start, # start date end=end, # end date search_by='tic' # We are searching by ticker. Will also be set as level 1 in MultiIndex ) # now lets turn the query into a DataFrame df_of_combo_data: pd.DataFrame = combo_data.df df_of_combo_data.sort_index() # all static data is turned into a categorical type df_of_combo_data.info() # However we cant call .df twice because AssetQuery holds mongo queries as a generator like object which be read into memory once. try: display(combo_data.df) except ValueError as e: print('We hit ValueError')We hit ValueErrorNow let's do the same query but let's cache the query along with specifying the calendar.# Same query as above new_combo_data: AssetQuery = reader.get_asset_data( assets=['AAPL', 'JNJ', 'MMM', 'MSFT', 'AMD', 'NVDA', 'KL', 'L', 'HD'], # tickers to query # close, high, low, open, company name, and website fields=['prccd', 'prchd', 'prcld', 'prcod', 'conm', 'weburl'], start=start, # start date end=end, # end date search_by='tic' # in this query we are searching by ticker ) # Now lets call *set_save()* telling the AssetQuery to cache the query # We have also filtered the dates in the dataframe to only valid the NYSE trading dates # we can use any calendar in pandas_market_calenders or along with "365" which includes every single possible day df_of_new_combo_data: pd.DataFrame = new_combo_data.set_save().set_calendar('NYSE').df df_of_new_combo_data.sort_index() # Since we cached the query we can use .df an unlimited amount of times with no problems new_combo_data.df new_combo_data.df new_combo_data.df new_combo_data.df new_combo_data.df.sort_index()Can also pull data for static data only query'sNotice how the returned frame does not hold a timeseriesstatic_data: AssetQuery = reader.get_asset_data( assets=['AAPL', 'JNJ', 'MMM', 'MSFT', 'AMD', 'NVDA', 'KL', 'L', 'HD'], fields=['conm', 'weburl', 'cusip', 'lpermno'], # static fields search_by='tic' # in this query we are using ticker as our asset id ) # now lets turn the searched data into a DataFrame df_of_static_data = static_data.df df_of_static_data.sort_index() # we are searching by permno here (called lpermno in the crsp compustat linked dataset) static_data_lpermno: AssetQuery = reader.get_asset_data( assets=['14593', '61241', '66181', '22111', '16865', '26710', '22592', '10107', '86580'], fields=['tic', 'conm', 'weburl'], # static fields search_by='lpermno' # in this query we are using ticker as our asset id ) # now lets turn the searched data into a DataFrame df_of_static_data_lpermno = static_data_lpermno.df df_of_static_data_lpermno.sort_index()Compustat DataTo query a different collection we have two options1. Create new MongoAPI and ReadDB objects which have the collection as compustat2. We can pass the collection argument to *get_asset_data* with the collection we would like to query.Option one consists of the same steps as shown above just swapping the collection name in MongoAPI with 'compustat'. However, this can create near identical repetitive code.Option two is simply passing the collection name (in this case 'compustat') to the collection argument in *get_asset_data*. If the collection argument is passed the reader object will **for this query only** automatically override the collection we specified in the MongoAPI object.**Sparse data will not be resampled**# Same query as above compustat_query: AssetQuery = reader.get_asset_data( collection='compustat', # <--------- <--------- <--------- <--------- <--------- assets=['AAPL', 'JNJ', 'MMM', 'MSFT', 'AMD', 'NVDA', 'KL', 'L', 'HD'], # tickers to query # lpermno, company name, revenue for quarter, net income for quarter, date sec released report fields=['lpermno', 'conm', 'revtq', 'niq', 'rdq'], start=start, end=end, search_by='tic' ) compustat_query.set_save().df.sort_index()Show locations of TAO arrays ingested in ECCOv4r4import numpy as np import matplotlib.pyplot as plt import xarray import cartopy.crs as ccrs import re plt.style.use('thesis')Read from the csv filefilename = 'MRB_CSV/tao_grep.txt' read = {'Latitude': lambda line : float(line.split(',decimal')[0].split(' ')[-1]), 'Longitude': lambda line : float(line.split(',decimal')[0].split(' ')[-1]), 'Year': lambda line : int(line.split(',,,')[0].split(' ')[-1]), 'Month': lambda line : int(line.split(',,,')[0].split(' ')[-1]), 'Day': lambda line : int(line.split(',,,')[0].split(' ')[-1]) } tao = {key:[] for key in read.keys()} with open(filename,'r') as f: for line in f: for key, readme in read.items(): if key in line: tao[key].append(readme(line))Create date and sorttao['date'] = [np.datetime64(f'{Y}-{M:02d}-{D:02d}') for Y,M,D in zip(tao['Year'],tao['Month'],tao['Day'])] tao_sorted = {} for key in tao.keys(): tao_sorted[key] = [x for _,x in sorted(zip(tao['date'],tao[key]))]Plotticks = np.linspace(0,len(tao['date'])-1,5) ticklabels = [tao_sorted['date'][int(tt)] for tt in ticks] fig, ax = plt.subplots(constrained_layout=True, subplot_kw={'projection':ccrs.Robinson(central_longitude=-180)}) ax.set_extent([-240, -60, -30, 30], crs=ccrs.PlateCarree()) mappable = ax.scatter(tao_sorted['Longitude'],tao_sorted['Latitude'], c=range(len(tao_sorted['date'])), marker='.', cmap='plasma', transform=ccrs.PlateCarree()) ax.coastlines() cbar = fig.colorbar(mappable,ticks=ticks, orientation='horizontal') cbar.ax.set_xticklabels(ticklabels) fig.savefig('../figures/tao_locations.jpg',bbox_inches='tight',dpi=300) print(len(tao['date']))257954Constructing Ego Networks from Retweets(using pre-saved files instead of Twitter authentication) University of ArizonaEmail: Web: www.yotamshmargad.com IntroductionTwitter has become a prominent online social network, playing a major role in how people all over the world share and consume information. Moreover, while some social networks have made it difficult for researchers to extract data from their servers, Twitter remains relatively open for now. This tutorial will go through the details of how to construct a Twitter user’s ego network from retweets they have received on their tweets. Instead of focusing on who follows who on Twitter, the method instead conceptualizes edges as existing between users if they have recently retweeted each other.Conceptualizing edges as retweets has two primary benefits. First, it captures recent interactions between users rather than decisions that they may have made long ago (i.e. following each other) that may not translate into meaningful interaction today. Second, users often have many more followers than they do retweeters. The method proposed can thus be used to analyze even relatively popular users. 1. Importing libraries# Import the libraries we need import json import time import networkx import matplotlib.pyplot as plt from collections import Counter # Check working directory os.getcwd() # Set working directory os.chdir('FOLDER FOR SAVING FILES') # Check working directory os.getcwd()2. Pulling ego tweets# Read saved ego tweets with open('egotweet.json', 'r') as file: ego = json.load(file) # Looking at a json object ego[0] # Accessing an element of ego tweets ego[0]["id_str"] # Storing one of ego's tweet id egoid = ego[0]["id_str"] # Storing and printing ego tweet ids and retweet counts tweetids = [] retweets = [] if len(ego) != 0: for egotweet in ego: tweetids.append(egotweet["id_str"]) retweets.append(egotweet["retweet_count"]) print(egotweet["id_str"],egotweet["retweet_count"])3. Pulling retweeters# Sleep for 10 seconds time.sleep(10) # Reading saved ego retweeters with open('check.json', 'r') as file: check = json.load(file) with open('self.json', 'r') as file: self = json.load(file) with open('allretweeters.json', 'r') as file: allretweeters = json.load(file) # Printing tweet ids, retweet counts, # retweeters obtained, and whether a self tweet is included for a, b, c, d in zip(tweetids,retweets,check,self): print(a, b, c, d) len(allretweeters) allretweeters4. Visualizing the network of retweeters# Assigning edge weight to be number of tweets retweeted weight = Counter() for (i, j) in allretweeters: weight[(i, j)] +=1 weight # Defining weighted edges weighted_edges = list(weight.items()) weighted_edges # Defining the network object G = networkx.Graph() G.add_edges_from([x[0] for x in weighted_edges]) # Visualizing the network networkx.draw(G, width=[x[1] for x in weighted_edges])5. Pulling retweeter tweets# Defining the set of unique retweeters unique = [x[0][1] for x in weighted_edges] len(unique) unique # Reading saved retweeter tweets with open('alters.json', 'r') as file: alters = json.load(file) len(alters) # Printing the number of tweets pulled for each retweeter for alt in alters: print(len(alt)) # Storing and printing alter ids, tweet ids, and retweet counts altids = [] alttweetids = [] altretweets = [] for alt in alters: for alttweet in alt: altids.append(alttweet["user"]["id_str"]) alttweetids.append(alttweet["id_str"]) altretweets.append(alttweet["retweet_count"]) print(alttweet["user"]["id_str"],alttweet["id_str"],alttweet["retweet_count"])6. Pulling retweeters of retweeters# Reading saved alter retweeters with open('altcheck.json', 'r') as file: altcheck = json.load(file) with open('altself.json', 'r') as file: altself = json.load(file) with open('altretweeters.json', 'r') as file: altretweeters = json.load(file) with open('allalt.json', 'r') as file: allalt = json.load(file) # Printing alter user ids, tweet ids, retweet counts, # retweeters obtained, and whether a self tweet is included for a, b, c, d, e in zip(altids,alttweetids,altretweets,altcheck,altself): print(a, b, c, d, e) len(allalt) allalt7. Visualizing the full network of retweetersweight = Counter() for (i, j) in allalt: weight[(i, j)] +=1 weight all_edges = weighted_edges + list(weight.items()) all_edges # Defining the full network object G = networkx.Graph() G.add_edges_from([x[0] for x in all_edges]) # Visualizing the full network networkx.draw(G, width=[x[1] for x in all_edges])WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress from pprint import pprint from tqdm.notebook import tqdm # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180)Generate Cities List# List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities)Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).# Create new dataframe to store the weather data weather_df = pd.DataFrame() print(f'Beginning Data Retrieval') print(f'-----------------------------') # Loop through the cities list and pull the weather data from weather API for i in tqdm(range(len(cities))): city_name = cities[i] weather_url = f'https://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={weather_api_key}' # Request weather data from API weather_api_data = requests.get(weather_url).json() if weather_api_data['cod'] == '404': print(f'Processing Record {(i+1)%50} of Set {int((i+1)/50)+1} | City not found. Skipping...') continue else: # Append weather data into weather_df weather_df = weather_df.append([[city_name, weather_api_data['coord']['lat'], weather_api_data['coord']['lon'], weather_api_data['main']['temp_max'], weather_api_data['main']['humidity'], weather_api_data['clouds']['all'], weather_api_data['wind']['speed'], weather_api_data['sys']['country'], weather_api_data['dt'] ]]) print(f'Processing Record {(i+1)%50} of Set {int((i+1)/50)+1} | {city_name}') # Reset the columns name weather_df.columns=['City', 'Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness', 'Wind Speed', 'Country', 'Date'] # Reset the rows index weather_df = weather_df.reset_index(drop=True) # Store the data back to csv weather_df.to_csv(output_data_file, index=False)Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame# Print the weather_df information weather_df.info() # Print weather_df weather_df.head()Inspect the data and remove the cities where the humidity > 100%.----Skip this step if there are no cities that have humidity > 100%.# Check if weather_df has any humidity > 100% weather_df.describe() # Create test dataframe for the humidity test since there is no humidity > 100% in the original data weather_test_df = weather_df.copy() # Append the test point into the weather_test_df weather_test_df = weather_test_df.append({'City':'test_for_hum', 'Lat' : 1.0, 'Lng' : 1.0, 'Max Temp' : 273, 'Humidity' : 101, 'Cloudiness' : 0, 'Wind Speed' : 1.5, 'Country' : 'US', 'Date' : 1588986960}, ignore_index=True) weather_test_df # Get the indices of cities that have humidity over 100%. # Find the one that is less and equal to 100% hum_over_bool = (weather_test_df['Humidity'] <= 100) # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". clean_city_data = weather_test_df.loc[hum_over_bool, :] clean_city_data # Extract relevant fields from the data frame # Export the City_Data into a csvPlotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot# Plot the scatter plot plt.scatter(weather_df['Lat'], weather_df['Max Temp'], c='steelblue', edgecolors='black') plt.title('City Latitude vs. Max Temperature ') plt.xlabel('Latitude') plt.ylabel('Max Temperature (°F)') plt.grid() plt.show()As the plot show above that at latitude around the center of the latitude has the hotter temperature. Edge of the latitude has colder temperature. Latitude vs. Humidity Plotplt.scatter(weather_df['Lat'], weather_df['Humidity'], c='steelblue', edgecolors='black') plt.title('City Latitude vs. Humidity ') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid() plt.show()Humidity plot itself could not tell too much story by latitude itself since it looks like it is affect more by the ocean, so the higher humidity % is probably clost to the ocean itself. Latitude vs. Cloudiness Plotplt.scatter(weather_df['Lat'], weather_df['Cloudiness'], c='steelblue', edgecolors='black') plt.title('City Latitude vs. Cloudiness ') plt.xlabel('Latitude') plt.ylabel('Cloudiness (%)') plt.grid() plt.show()Cloudiness plot looks like cloudiness is the location depended, same latitude can be either 100% cloudiness or 0% cloudines. Latitude vs. Wind Speed Plotplt.scatter(weather_df['Lat'], weather_df['Wind Speed'], c='steelblue', edgecolors='black') plt.title('City Latitude vs. Wind Speed ') plt.xlabel('Latitude') plt.ylabel('Wind Speed (mph)') plt.grid() plt.show()Wind speed plot show that most of the places dont have a lot of wind. Usually the areas have the higher wind speed is close to the higher latitude. Linear Regression# OPTIONAL: Create a function to create Linear Regression plots def liner_reg(input_df, x, y) -> tuple: x_values = input_df[x].astype('float') y_values = input_df[y].astype('float') (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) return (line_eq, x_values, regress_values, rvalue) # Create Northern and Southern Hemisphere DataFrames northern_index = (weather_df['Lat'] >= 0) northern_weather_df = weather_df.loc[northern_index,:] southern_index = (weather_df['Lat'] < 0) southern_weather_df = weather_df.loc[southern_index,:] def plot_lin_reg(hemi, ax, ay, py): """ Plot """ if hemi.upper() == 'N': temp_df = northern_weather_df elif hemi.upper() == 'S': temp_df = southern_weather_df else: print(f'Issue with hemisphere') if py == 'Max Temp': py_label = 'Max Temperature (°F)' elif py == 'Humidity': py_label = 'Humidity (%)' elif py == 'Cloudiness': py_label = 'Cloudiness (%)' elif py == 'Wind Speed': py_label = 'Wind Speed (mph)' else: print(f'Issue with py') plt.scatter(temp_df['Lat'], temp_df[py], c='steelblue', edgecolors='black') plt.xlabel('Latitude') plt.ylabel(py_label) # Plot regression line (line_eq, x_values, regress_values, rvalue) = liner_reg(temp_df, 'Lat', py) plt.plot(x_values,regress_values,"r-") plt.annotate(line_eq,(ax,ay),fontsize=15,color="red") print(f"R squared: {rvalue**2}") plt.show() returnNorthern Hemisphere - Max Temp vs. Latitude Linear Regressionplot_lin_reg('N', 10, 260, 'Max Temp')R squared: 0.7475743650033473The linear regression shows that the higher latitude the colder the area will be. Southern Hemisphere - Max Temp vs. Latitude Linear Regressionplot_lin_reg('S', -55, 300, 'Max Temp')R squared: 0.5976057487890921The linear regression shows that closer to the center of the latitude it becomes warmer and warmer. Northern Hemisphere - Humidity (%) vs. Latitude Linear Regressionplot_lin_reg('N', 45, 5, 'Humidity')R squared: 0.01797411985643502Humidity is pretty flat across the northern hemisphere, most of the areas have high humidity level. Southern Hemisphere - Humidity (%) vs. Latitude Linear Regressionplot_lin_reg('S', -55, 24, 'Humidity')R squared: 0.010181747289829181Humidity is pretty flat across the southern hemisphere as well, and closer to the 0 latitude, the humitidty is higher. Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regressionplot_lin_reg('N', 45, 5, 'Cloudiness')R squared: 0.005294785591858632Most of the areas in the northern hemispher, cloudiness level is either 100% or 0%. Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regressionplot_lin_reg('S', -55, 5, 'Cloudiness')R squared: 0.0027461614344849354For southern hemisphere, most of the areas around -30 to -20 latitude have 0% cloudiness. Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regressionplot_lin_reg('N', 10, 14.5, 'Wind Speed')R squared: 0.03125292107585675Northern hemisphere has pretty average wind speed between 2~4 mph. Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regressionplot_lin_reg('S', -55, 9, 'Wind Speed')R squared: 0.0038568289651671715FastAI DenseNet for Skin Cancer Classification%reload_ext autoreload %autoreload 2 %matplotlib inline from fastai.vision import * from fastai.metrics import error_rate bs = 64 # dependent on GPU in useObtaining the Datasetfrom pathlib import Path path = Path('./data') #! kaggle datasets download -d fanconic/skin-cancer-malignant-vs-benign -p "{path}" #! unzip "{path}/skin-cancer-malignant-vs-benign.zip" -d "{path}"Pre-process/Vizpath_train = path/'train' path_test = path/'test' tfms = get_transforms(flip_vert=True, max_warp=0.1) data = ImageDataBunch.from_folder(path, train="/train", valid_pct=0.2, ds_tfms=tfms, size=224, bs=bs).normalize(imagenet_stats) data.show_batch(rows=3,figsize=(7,6))First DenseNet 121 Attemptlearn = cnn_learner(data, models.densenet121, metrics=error_rate) learn.summary() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(6, 1e-2) learn.save('stage-1-dn-121') learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(5, slice(1e-4, 1e-3)) learn.save('stage-2-dn121-96')'open-visualizations' for repeated measures in Python - 2** ** *19-03-2020* BackgroundThis tutorial is a follow up on my ['Open-visualizations tutorial for repeated measures in R'](https://github.com/jorvlan/open-visualizations/tree/master/R) and contributes to a GitHub repository called ['open-visualizations'](https://github.com/jorvlan/open-visualizations). Next to this notebook, I have also created another tutorial in Python with a slightly different approach which includes R-like behavior with `plotnine`. See ['open-visualizations'](https://github.com/jorvlan/open-visualizations) to view that tutorial. If you have any questions, or suggestions for improvement, please open an issue in the GitHub repository [open-visualizations](https://github.com/jorvlan/open-visualizations). If you use my repository for your research, please reference it. Load librariesimport numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as pltActivate folder where figures are stored- By default, figures will not be saved. If you want to save figures, set savefig to `True`.savefig = False if savefig: #Load libraries import os from os.path import isdir #Get current working directory, but you can specify your own directory of course. cwd = os.getcwd() if os.path.exists(cwd + "/repmes_tutorial_2_python/figs"): print("Directory already exists") #Assign the existing directory to a variable fig_dir = cwd + "/repmes_tutorial_2_python/figs" elif not os.path.exists(cwd + "/repmes_tutorial_2_python/figs"): print("Directory does not exist and will be created ......") os.makedirs(cwd + "/repmes_tutorial_2_python/figs") if isdir(cwd + "/repmes_tutorial_2_python/figs"): print('Directory was created succesfully') #Assign the created directory to a variable fig_dir = cwd + "/repmes_tutorial_2_python/figs" else: print("Something went wrong")Initialize a dataset# Create a dummy dataset N=30 np.random.seed(3) data = np.random.normal(size=(N,)) #Create the dataframe in a wide format with 'Before' and 'After ' as columns df = pd.DataFrame({'Before': data, 'After': data+1}) #Set the amount of jitter and create a dataframe containing the jittered x-axis values jitter_1 = 0 np.random.seed(3) df_jitter_1 = pd.DataFrame(np.random.normal(loc=0, scale=jitter_1, size=df.values.shape), columns=df.columns) #Update the dataframe with adding a number based on the length on the columns. Otherwise all datapoints would be at the same x-axis location. df_jitter_1 += np.arange(len(df.columns)) #Inspect the created dataframe pd.options.display.float_format = '{:.3f}'.format print("The dataframe with 2 variables ") print(df[['Before', 'After']])The dataframe with 2 variables Before After 0 1.789 2.789 1 0.437 1.437 2 0.096 1.096 3 -1.863 -0.863 4 -0.277 0.723 5 -0.355 0.645 6 -0.083 0.917 7 -0.627 0.373 8 -0.044 0.956 9 -0.477 0.523 10 -1.314 -0.314 11 0.885 1.885 12 0.881 1.881 13 1.710 2.710 14 0.050 1.050 15 -0.405 0.595 16 -0.545 0.455 17 -1.546 -0.546 18 0.982 1.982 19 -1.101 -0.101 20 -1.185 -0.185 21 -0.206 0.794 22 1.486 2.486 23 0.237 1.237 24 -1.024 -0.024 25 -0.713 0.287 26 0.625 1.625 27 -0.161 0.839 28 -0.769 0.231 29 -0.230 0.770Figure 1- In Figure 1, we only display the individual datapoints.# Define pre-settings w = 6 h = 6 title_size = 20 xlab_size = 15 ylab_size = 20 labels = ['Before', 'After'] # Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_1[col], df[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) #Additional settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((labels), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 1: individual datapoints', size = title_size) sns.despine() if savefig: plt.savefig(fig_dir + "/figure1.png", width = w, height = h)Figure 2# Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_1[col], df[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) for idx in df.index: ax.plot(df_jitter_1.loc[idx,['Before','After']], df.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((labels), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 2: individual datapoints with lines', size = title_size) sns.despine() if savefig: plt.savefig(fig_dir + "/figure2.png", width = w, height = h)Figure 3#Set the amount of jitter and create a dataframe containing the jittered x-axis values jitter_2 = 0.05 np.random.seed(3) df_jitter_2 = pd.DataFrame(np.random.normal(loc=0, scale=jitter_2, size=df.values.shape), columns=df.columns) #Update the dataframe with adding a number based on the length on the columns. Otherwise all datapoints would be at the same x-axis location. df_jitter_2 += np.arange(len(df.columns)) # Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_2[col], df[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) for idx in df.index: ax.plot(df_jitter_2.loc[idx,['Before','After']], df.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((labels), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 3: individual datapoints with lines and jitter', size = title_size) sns.despine() if savefig: plt.savefig(fig_dir + "/figure3.png", width = w, height = h)Figure 4#Merge dataframe from wide to long for sns.pointplot df_long = pd.melt(df, value_vars=['Before','After']) # Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_2[col], df[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) for idx in df.index: ax.plot(df_jitter_2.loc[idx,['Before','After']], df.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) sns.pointplot(x='variable', y='value', ci=95, data=df_long, join=False, scale=1.5, color = 'black', capsize = .03) #palette = 'Paired' #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((labels), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 4: individual datapoints with lines, jitter and statistics', size = title_size) sns.despine() if savefig: plt.savefig(fig_dir + "/figure4.png", width = w, height = h)Figure 5# Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_2[col], df[col], 'o', alpha=1, zorder=2, ms=10, mew=1.5) for idx in df.index: ax.plot(df_jitter_2.loc[idx,['Before','After']], df.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) sns.pointplot(x='variable', y='value', ci=95, data=df_long, join=False, scale=0.01, color = 'black', capsize = .03) sns.violinplot(x='variable', y='value', data=df_long, hue = 'variable', split = True, inner = 'quartile', cut=1) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((labels), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 5: individual datapoints, lines, jitter, statistics, violins', size = title_size) ax.legend_.remove() sns.despine() plt.setp(ax.collections, alpha=.02) if savefig: plt.savefig(fig_dir + "/figure5.png", width = w, height = h)Figure 6#Create a dataframe do display 4 conditions df_2 = pd.DataFrame({'Before': data, 'After': data+1, 'Before1': data, 'After1': data-1}) df_jitter_3 = pd.DataFrame(np.random.normal(loc=0, scale=jitter_2, size=df_2.values.shape), columns=df_2.columns) df_jitter_3 #Do an additional step to create a jittered values for the 4 columns.. i.e., jitter values around condition 1 and 2 + jitter values for condition 3 and 4. df_jitter_3 += np.arange(len(df_2.columns)) df_jitter_3 # Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df_2: ax.plot(df_jitter_3[col], df_2[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) for idx in df_2.index: ax.plot(df_jitter_3.loc[idx,['Before','After']], df_2.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) ax.plot(df_jitter_3.loc[idx,['Before1','After1']], df_2.loc[idx,['Before1','After1']], color = 'gray', linewidth = 2, linestyle = '--', alpha =.3) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((['Before', 'After', 'Before', 'After']), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 6: individual datapoints with lines, jitter: 4 conditions', size = title_size) sns.despine() plt.setp(ax.collections, alpha=.02) plt.setp(ax, xticks=[0, 1, 2, 3, 4]) if savefig: plt.savefig(fig_dir + "/figure6.png", width = w, height = h)Figure 7#Merge dataframe from wide to long for sns.pointplot df_long_2 = pd.melt(df_2, value_vars=['Before','After', 'Before1', 'After1']) # Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df_2: ax.plot(df_jitter_3[col], df_2[col], 'o', alpha=.6, zorder=2, ms=10, mew=1.5) for idx in df_2.index: ax.plot(df_jitter_3.loc[idx,['Before','After']], df_2.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '--',alpha = .3) ax.plot(df_jitter_3.loc[idx,['Before1','After1']], df_2.loc[idx,['Before1','After1']], color = 'gray', linewidth = 2, linestyle = '--', alpha = .3) sns.pointplot(x='variable', y='value', ci=95, data=df_long_2, join=False, scale=1.5, color = 'black', capsize = .03) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((['Before', 'After', 'Before', 'After']), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 7: individual datapoints with lines, jitter, statistics: 4 conditions', size = title_size) sns.despine() plt.setp(ax, xticks=[0, 1, 2, 3, 4]) if savefig: plt.savefig(fig_dir + "/figure7.png", width = w, height = h)Figure 8# Create empty figure and plot the individual datapoints fig, ax = plt.subplots(figsize=(15,9)) for col in df: ax.plot(df_jitter_2[col], df[col], 'o', alpha=.8, zorder=2, ms=10, mew=1.5) for idx in df.index: ax.plot(df_jitter_2.loc[idx,['Before','After']], df.loc[idx,['Before','After']], color = 'gray', linewidth = 2, linestyle = '-',alpha = .2) for value in df_long_2: sns.violinplot(x='variable', y='value', data=df_long, hue = 'variable', split = True, inner = 'quartile', cut=1, dodge = True) sns.boxplot(x='variable', y='value', data=df_long, hue = 'variable', dodge = True, width = 0.2, fliersize = 2) #Additonal settings ax.set_xticks(range(len(df.columns))) ax.set_xticklabels((['Before', 'After']), size= xlab_size) ax.set_xlim(-1, len(df.columns)) ax.set_ylabel('Value', size = ylab_size) ax.set_title('Figure 8: individual datapoints with lines, jitter, statistics, box- and violin', size = title_size) sns.despine() ax.legend_.remove() plt.setp(ax.collections, alpha=.1) if savefig: plt.savefig(fig_dir + "/figure8.png", width = w, height = h)Baseline: Linear model logistic regressionJust simple linear model on Logistic regression, used to tune and create ml pipeline with baseline modelhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html%load_ext autoreload %autoreload 2 DATASET_PATH = '../data/raw/zalando-fashionmnist/' random_seed = 654321 img_width = 28 img_height = 28 from matplotlib import cm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import yaml train_df = pd.read_csv(f'{DATASET_PATH}/fashion-mnist_train.csv', dtype=np.int16) test_df = pd.read_csv(f'{DATASET_PATH}/fashion-mnist_test.csv', dtype=np.int16) train_df.shape, test_df.shape def extract_X_y(df): pixel_features = df.columns[df.columns.str.contains('pixel')] return df[pixel_features], df['label'] X_train, y_train = extract_X_y(train_df) X_test, y_test = extract_X_y(test_df) X_train.shape, y_train.shape, X_test.shape, y_test.shape def load_label_titles(): with open(f'../data/raw/label-titles.yaml', 'r') as stream: return yaml.safe_load(stream) label_titles = load_label_titles() label_titlesTODO: would normalization (scale) helps?solver = 'lbfgs' # for small datasets 'liblinear' # 'sag' and 'saga' for large # 'newton-cg' multi_class='multinomial' clf = LogisticRegression(solver=solver, multi_class=multi_class, random_state=random_seed, n_jobs=-1, max_iter=100) clf.fit(X_train, y_train) train_accuracy = clf.score(X_train, y_train) test_accuracy = clf.score(X_test, y_test) print('train_accuracy', train_accuracy) print('test_accuracy', test_accuracy) def plot_confusion_matrix(confusion_matrix, class_names=None, fontsize=14): """ based on https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823 """ df_cm = pd.DataFrame( confusion_matrix, index=class_names, columns=class_names, ) fig = plt.figure(figsize=(10,7)) heatmap = sns.heatmap(df_cm, annot=True, fmt="d") heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize) heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize) plt.ylabel('True label') plt.xlabel('Predicted label') return figTODO: what is the right way to store metrics and confusion matrix (png image)?y_train_pred = clf.predict(X_train) train_cm = confusion_matrix(y_train, y_train_pred, range(len(label_titles))) plot_confusion_matrix(train_cm, label_titles); y_test_pred = clf.predict(X_test) test_cm = confusion_matrix(y_test, y_test_pred, range(len(label_titles))) plot_confusion_matrix(test_cm, label_titles);Distribution of constructiveness (Check if it's skewed)df['constructive_nominal'] = df['constructive'].apply(nominalize_constructiveness) cdict = df['constructive_nominal'].value_counts().to_dict() # Plot constructiveness distribution in the data # The slices will be ordered and plotted counter-clockwise. labels = 'Constructive', 'Non constructive', 'Not sure' items =[cdict['yes'], cdict['no'], cdict['not_sure']] total = sum(cdict.values()) size =[round(item/float(total) * 100) for item in items] print(size) colors = ['xkcd:green', 'xkcd:red', 'xkcd:orange'] plot_donut_chart(size,labels,colors, 'Constructiveness distribution (Total = ' + str(total) + ')')[47.0, 38.0, 15.0]Distribution of toxicity (Check if skewed)df['crowd_toxicity_level_nominal'] = df['crowd_toxicity_level'].apply(nominalize_toxicity) # Plot toxicity distribution with context (avg score) toxicity_counts_dict = {'Very toxic':0, 'Toxic':0, 'Mildly toxic':0, 'Not toxic':0} toxicity_counts_dict.update(df['crowd_toxicity_level_nominal'].value_counts().to_dict()) print(toxicity_counts_dict) total = sum(toxicity_counts_dict.values()) # The slices will be ordered and plotted counter-clockwise. labels = 'Very toxic', 'Toxic', 'Mildly toxic', 'Not toxic' size=[toxicity_counts_dict['Very toxic'],toxicity_counts_dict['Toxic'],toxicity_counts_dict['Mildly toxic'],toxicity_counts_dict['Not toxic']] colors = ['xkcd:red', 'xkcd:orange', 'xkcd:blue', 'xkcd:green'] plot_donut_chart(size,labels,colors, 'Toxicity distribution (avg score) (Total = ' + str(total) + ')'){'Very toxic': 1, 'Toxic': 17, 'Mildly toxic': 254, 'Not toxic': 767}Distribution of toxicity in constructive and non-constructive comments (Check if the dists are very different) Plot toxicity distribution for constructive commentstoxicity_column_name = 'crowd_toxicity_level_nominal' constructive_very_toxic = df[(df['constructive_nominal'] == 'yes') & (df[toxicity_column_name] == 'Very toxic')].shape[0] print('Constructive very toxic: ', constructive_very_toxic) constructive_toxic = df[(df['constructive_nominal'] == 'yes') & (df[toxicity_column_name] == 'Toxic')].shape[0] print('Constructive toxic: ', constructive_toxic) constructive_mildly_toxic = df[(df['constructive_nominal'] == 'yes') & (df[toxicity_column_name] == 'Mildly toxic')].shape[0] print('Constructive mildly toxic: ', constructive_mildly_toxic) constructive_not_toxic = df[(df['constructive_nominal'] == 'yes') & (df[toxicity_column_name] == 'Not toxic')].shape[0] print('Constructive non toxic: ', constructive_not_toxic) labels = 'Very toxic', 'Toxic', 'Mildly toxic', 'Not toxic' size=[constructive_very_toxic, constructive_toxic, constructive_mildly_toxic, constructive_not_toxic] total = sum(size) colors = ['xkcd:red', 'xkcd:orange', 'xkcd:blue', 'xkcd:green'] plot_donut_chart(size,labels,colors, 'Toxicity in constructive comments (Total = ' + str(total) + ')')Plot toxicity distribution for non-constructive comments# Plot toxicity (with context) distribution for non constructive comments nconstructive_very_toxic = df[(df['constructive_nominal'] == 'no') & (df[toxicity_column_name] == 'Very toxic')].shape[0] print('Non constructive very toxic: ', nconstructive_very_toxic) nconstructive_toxic = df[(df['constructive_nominal'] == 'no') & (df[toxicity_column_name] == 'Toxic')].shape[0] print('Non constructive toxic: ', nconstructive_toxic) nconstructive_mildly_toxic = df[(df['constructive_nominal'] == 'no') & (df[toxicity_column_name] == 'Mildly toxic')].shape[0] print('Non constructive mildly toxic: ', nconstructive_mildly_toxic) nconstructive_not_toxic = df[(df['constructive_nominal'] == 'no') & (df[toxicity_column_name] == 'Not toxic')].shape[0] print('Non constructive non toxic: ', nconstructive_not_toxic) labels = 'Very toxic', 'Toxic', 'Mildly toxic', 'Not toxic' size=[nconstructive_very_toxic, nconstructive_toxic, nconstructive_mildly_toxic, nconstructive_not_toxic] total = sum(size) colors = ['xkcd:red', 'xkcd:orange', 'xkcd:blue', 'xkcd:green'] plot_donut_chart(size,labels,colors,'Toxicity in non-constructive comments (Total = ' + str(total) + ')')Plot toxicity distribution for ambiguous comments# Plot toxicity (with context) distribution for ambiguous comments ns_very_toxic = df[(df['constructive_nominal'] == 'not_sure') & (df[toxicity_column_name] == 'Very toxic')].shape[0] print('Ambiguous very toxic: ', ns_very_toxic) ns_toxic = df[(df['constructive_nominal'] == 'not_sure') & (df[toxicity_column_name] == 'Toxic')].shape[0] print('Ambiguous toxic: ', ns_toxic) ns_mildly_toxic = df[(df['constructive_nominal'] == 'not_sure') & (df[toxicity_column_name] == 'Mildly toxic')].shape[0] print('Ambiguous mildly toxic: ', ns_mildly_toxic) ns_not_toxic = df[(df['constructive_nominal'] == 'not_sure') & (df[toxicity_column_name] == 'Not toxic')].shape[0] print('Ambiguous non toxic: ', ns_not_toxic) labels = 'Very toxic', 'Toxic', 'Mildly toxic', 'Not toxic' size=[ns_very_toxic, ns_toxic, ns_mildly_toxic, ns_not_toxic] total = sum(size) colors = ['xkcd:red', 'xkcd:orange', 'xkcd:blue', 'xkcd:green'] plot_donut_chart(size,labels,colors, 'Toxicity (no context) in ambiguous comments (Total = ' + str(total) + ')')Check how the annotators did on internal gold questions# Starting from batch three we have included internal gold questions for toxicity and constructiveness (20 each) with # internal_gold_constructiveness flag True. In the code below, we examine to what extent the annotators agreed with # these internal gold questions. # Get a subset dataframe with internal gold questions for constructiveness internal_gold_con_df = df[df['constructive_internal_gold'].notnull()].copy() # Call secret_gold_evaluation_constructiveness function from data_quality_analysis_functions print('Disagreement on constructiveness secret gold questions (%): ', secret_gold_evaluation_constructiveness(internal_gold_con_df)) # Get a subset dataframe with internal gold questions for toxicity internal_gold_tox_df = df[df['crowd_toxicity_level_internal_gold'].notnull()].copy() # Call secret_gold_evaluation_toxicity function from data_quality_analysis_functions print('Disagreement on toxicity secret gold questions (%): ', secret_gold_evaluation_toxicity(internal_gold_tox_df))Disagreement on toxicity secret gold questions (%): 25.0Criando um algoritmo de Trading de criptomoedas em Python :)# Instalando bibliotecas necessárias #!pip install yfinance !pip install pandas !pip install numpy !pip install matplotlib # Importando bibliotecas e apelidando #import yfinance as yf import pandas as pd import numpy as np from matplotlib import pyplot as plt from matplotlib.dates import DateFormatter # Retrieve two weeks of Bitcoin to USD exchange rates with a 1 hour interval and save the dataframe to a variable. BTC_USD = yf.download("BTC-USD", start='2020-01-01', end='2020-12-31', interval='1d') BTC_USD.head() fig, ax = plt.subplots(dpi=500) # Formatting the date axis date_format = DateFormatter("%h-%d-%y") ax.xaxis.set_major_formatter(date_format) ax.tick_params(axis='x', labelsize=8) fig.autofmt_xdate() # Plotting the closing price against the date (1 day interval) ax.plot(BTC_USD['Close'], lw=0.75) # Adding labels and title to the plot ax.set_ylabel('Price of Bitcoin (USD)') ax.set_title('Bitcoin to USD Exchange Rate') ax.grid() # adding a grid # Displaying the price chart plt.show() # Compute a 9-day Simple Moving Average with pandas BTC_USD['SMA_9'] = BTC_USD['Close'].rolling(window=9, min_periods=1).mean() # Add a column to BTC_USD containing a Simple Moving Average using an interval size of 30 days BTC_USD['SMA_30'] = BTC_USD['Close'].rolling(window=30, min_periods=1).mean() # Display the last 5 entries of the dataframe BTC_USD.tail() fig, ax = plt.subplots(dpi=500) # Formatting the date axis date_format = DateFormatter("%h-%d-%y") ax.xaxis.set_major_formatter(date_format) ax.tick_params(axis='x', labelsize=8) fig.autofmt_xdate() # Plotting the closing price against the date (1 day interval) ax.plot(BTC_USD['Close'], lw=0.75, label='Closing Price') # Added label """ You have already seen the code above earlier - we are simply reusing it. Below we plot the 9 and 30 day Simple Moving Averages and give them the appropriate label """ ax.plot(BTC_USD['SMA_9'], lw=0.75, alpha=0.75, label='9 Day SMA') ax.plot(BTC_USD['SMA_30'], lw=0.75, alpha=0.75, label='30 Day SMA') # Adding labels and title to the plot ax.set_ylabel('Price of Bitcoin (USD)') ax.set_title('Bitcoin to USD Exchange Rate') ax.grid() # adding a grid ax.legend() # adding a legend # Displaying the price chart plt.show() # Create a pandas dataframe that is the same size as the BTC_USD dataframe and covers the same dates trade_signals = pd.DataFrame(index=BTC_USD.index) # Define the intervals for the Fast and Slow Simple Moving Averages (in days) short_interval = 10 long_interval = 40 # Compute the Simple Moving Averages and add it to the dateframe as new columns trade_signals['Short'] = BTC_USD['Close'].rolling(window=short_interval, min_periods=1).mean() trade_signals['Long'] = BTC_USD['Close'].rolling(window=long_interval, min_periods=1).mean() # Create a new column populated with zeros trade_signals['Signal'] = 0.0 # Wherever the Shorter term SMA is above the Longer term SMA, set the Signal column to 1, otherwise 0 trade_signals['Signal'] = np.where(trade_signals['Short'] > trade_signals['Long'], 1.0, 0.0) # Create a new column in the trade_signals Dataframe called Position by calling the diff() method on the Signal column. trade_signals['Position'] = trade_signals['Signal'].diff() # Testando a estratégia definida fig, ax = plt.subplots(dpi=500) # Formatting the date axis date_format = DateFormatter("%h-%d-%y") ax.xaxis.set_major_formatter(date_format) ax.tick_params(axis='x', labelsize=8) fig.autofmt_xdate() # Plotting the Bitcoin closing price against the date (1 day interval) ax.plot(BTC_USD['Close'], lw=0.75, label='Closing Price') # Plot the shorter-term moving average ax.plot(trade_signals['Short'], lw=0.75, alpha=0.75, color='orange', label='Short-term SMA') # Plot the longer-term moving average ax.plot(trade_signals['Long'], lw=0.75, alpha=0.75, color='purple', label='Long-term SMA') # Adding green arrows to indicate buy orders ax.plot(trade_signals.loc[trade_signals['Position']==1.0].index, trade_signals.Short[trade_signals['Position'] == 1.0], marker=6, ms=4, linestyle='none', color='green') # Adding red arrows to indicate sell orders ax.plot(trade_signals.loc[trade_signals['Position'] == -1.0].index, trade_signals.Short[trade_signals['Position'] == -1.0], marker=7, ms=4, linestyle='none', color='red') # Adding labels and title to the plot ax.set_ylabel('Price of Bitcoin (USD)') ax.set_title('Bitcoin to USD Exchange Rate') ax.grid() # adding a grid ax.legend() # adding a legend # Displaying the price chart plt.show() # Define how much money you will start with (in USD) initial_balance = 10000.0 # ten thousand USD # Create dataframe containing all the dates considered backtest = pd.DataFrame(index=trade_signals.index) # Add column containing the daily percent returns of Bitcoin backtest['BTC_Return'] = BTC_USD['Close'] / BTC_USD['Close'].shift(1) # Current closing price / yesterday's closing price # Add column containing the daily percent returns of the Moving Average Crossover strategy backtest['Alg_Return'] = np.where(trade_signals.Signal == 1, backtest.BTC_Return, 1.0) # Add column containing the daily value of the portfolio using the Crossover strategy backtest['Balance'] = initial_balance * backtest.Alg_Return.cumprod() # cumulative product fig, ax = plt.subplots(dpi=500) # Formatting the date axis date_format = DateFormatter("%h-%d-%y") ax.xaxis.set_major_formatter(date_format) ax.tick_params(axis='x', labelsize=8) fig.autofmt_xdate() # Plotting the value of Buy and Hold Strategy ax.plot(initial_balance*backtest.BTC_Return.cumprod(), lw=0.75, alpha=0.75, label='Buy and Hold') # Plotting total value of Crossing Averages Strategy ax.plot(backtest['Balance'], lw=0.75, alpha=0.75, label='Crossing Averages') # Adding labels and title to the plot ax.set_ylabel('USD') ax.set_title('Value of Portfolio') ax.grid() # adding a grid ax.legend() # adding a legend # Displaying the price chart plt.show()(source: https://machinelearningmastery.com/)# example of calculation 1d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv1D # define input data data = asarray([0, 0, 0, 1, 1, 0, 0, 0]) data = data.reshape(1, 8, 1) # create model model = Sequential() model.add(Conv1D(1, 3, input_shape=(8, 1))) # define a vertical line detector weights = [asarray([[[0]],[[1]],[[0]]]), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) print(yhat) # example of calculation 2d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), input_shape=(8, 8, 1))) # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])]) # example of calculation 2d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), input_shape=(8, 8, 1))) # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])]) # example of calculation 2d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), input_shape=(8, 8, 1))) # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])]) # example of calculation 2d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), input_shape=(8, 8, 1))) # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])]) # example of calculation 2d convolutions from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), input_shape=(8, 8, 1))) # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # confirm they were stored print(model.get_weights()) # apply filter to input data yhat = model.predict(data) for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])])2. scikit-learnで統計的学習 2.1 はじめに本ノートブックでは,[A tutorial on statistical-learning for scientific data processing](http://scikit-learn.org/stable/tutorial/statistical_inference/index.html)を参考に,scikit-learnにおける統計的学習について復習します.統計的学習とは,[統計的推論](https://kotobank.jp/word/%E7%B5%B1%E8%A8%88%E7%9A%84%E6%8E%A8%E8%AB%96-103447)(有限のサンプルから母集団の統計的特性を推し量ること)を目的とする,機械学習の一分野です.from sklearn import datasets iris = datasets.load_iris() data = iris.data data.shape iris.DESCR % matplotlib inline import matplotlib.pyplot as plt import numpy as np from sklearn import svm # Iris データセットを使う iris = datasets.load_iris() features = iris.data target = iris.target target_names = iris.target_names labels = target_names[target] # Petal length と Petal width を特徴量として取り出す setosa_petal_length = features[labels == 'setosa', 2] setosa_petal_width = features[labels == 'setosa', 3] setosa = np.c_[setosa_petal_length, setosa_petal_width] versicolor_petal_length = features[labels == 'versicolor', 2] versicolor_petal_width = features[labels == 'versicolor', 3] versicolor = np.c_[versicolor_petal_length, versicolor_petal_width] virginica_petal_length = features[labels == 'virginica', 2] virginica_petal_width = features[labels == 'virginica', 3] virginica = np.c_[virginica_petal_length, virginica_petal_width] # 教師信号を作る training_data = np.r_[setosa, versicolor, virginica] training_labels = np.r_[ np.zeros(len(setosa)), np.ones(len(versicolor)), np.ones(len(versicolor)) * 2, ] # グラフのサイズを指定する plt.figure(figsize=(12, 8)) for i in np.arange(4): plt.subplot(2, 2, i+1) # 教師信号をプロットする plt.scatter(setosa[:, 0], setosa[:, 1], color='red') plt.scatter(versicolor[:, 0], versicolor[:, 1], color='blue') plt.scatter(virginica[:, 0], virginica[:, 1], color='green') # 教師信号で学習する gamma1=0.1 gamma2=0.001 if i == 0: plt.title('gamma:1->1') clf = svm.SVC() clf.gamma=gamma1 clf.fit(training_data, training_labels) clf.gamma=gamma1 if i == 1: plt.title('gamma:1->2') clf = svm.SVC() clf.gamma=gamma1 clf.fit(training_data, training_labels) clf.gamma=gamma2 if i == 2: plt.title('gamma:2->1') clf = svm.SVC() clf.gamma=gamma2 clf.fit(training_data, training_labels) clf.gamma=gamma1 if i == 3: plt.title('gamma:2->2') clf = svm.SVC() clf.gamma=gamma2 clf.fit(training_data, training_labels) clf.gamma=gamma1 # データの範囲でメッシュ状に点を取る training_x_min = training_data[:, 0].min() - 1 training_x_max = training_data[:, 0].max() + 1 training_y_min = training_data[:, 1].min() - 1 training_y_max = training_data[:, 1].max() + 1 grid_interval = 0.02 xx, yy = np.meshgrid( np.arange(training_x_min, training_x_max, grid_interval), np.arange(training_y_min, training_y_max, grid_interval), ) # 各点を分類する Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # 分類結果を表示する Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.bone, alpha=0.2) plt.grid() # グラフを表示する plt.autoscale() plt.show()Project Stories Load Dataproject_stories = pd.read_pickle('../map/output/project_stories.pkl.gz') project_stories.head() postcodes = pd.read_pickle('../postcodes/output/postcode_lookup.pkl.gz') postcodes.head()Mergeproject_stories_with_constituency = pd.merge( project_stories, postcodes[['postcode', 'parliamentary_constituency_name']] ) [ project_stories.shape, project_stories_with_constituency.shape ]Fix up the end date so it doesn't have an unnecessary timestamp.end_date_string = project_stories_with_constituency.end_date.dt.strftime('%Y-%m-%d').copy() end_date_string[end_date_string == 'NaT'] = np.nan project_stories_with_constituency.end_date = end_date_string project_stories_with_constituency.head() project_stories_with_constituency.to_csv('output/project_stories_with_constituency.csv', index=False)NHS Data Load Datanhs_stories = pd.read_pickle('../map/output/nhs_stories.pkl.gz') nhs_stories_with_constituency = pd.merge( nhs_stories, postcodes[['postcode', 'parliamentary_constituency_name']], ) [ nhs_stories.shape, nhs_stories_with_constituency.shape ] nhs_stories_with_constituency.head() nhs_stories_with_constituency.to_csv('output/nhs_stories_with_constituency.csv', index=False)BentoML Example: Deploy to AWS Lambda[BentoML](http://bentoml.ai) is an open source framework for building, shipping and running machine learning services. It provides high-level APIs for defining an ML service and packaging its artifacts, source code, dependencies, and configurations into a production-system-friendly format that is ready for deployment.This notebook demonstrates how to use BentoML to deploy a machine learning model as a serverless REST API endpoint to AWS Lambda. For this demo, we are using the [Sentiment Analysis with Scikit-learn](https://github.com/bentoml/BentoML/blob/master/examples/sklearn-sentiment-clf/sklearn-sentiment-clf.ipynb) example, using dataset from [Sentiment140](http://help.sentiment140.com/for-students/).![Impression](https://www.google-analytics.com/collect?v=1&tid=UA-112879361-3&cid=555&t=event&ec=nb&ea=open&el=official-example&dt=deploy-with-serverless)!pip install -I bentoml !pip install sklearn pandas numpy import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, roc_auc_score, roc_curve from sklearn.pipeline import Pipeline import bentomlPrepare Dataset%%bash if [ ! -f ./trainingandtestdata.zip ]; then wget -q http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip unzip -n trainingandtestdata.zip fi columns = ['polarity', 'tweetid', 'date', 'query_name', 'user', 'text'] dftrain = pd.read_csv('training.1600000.processed.noemoticon.csv', header = None, encoding ='ISO-8859-1') dftest = pd.read_csv('testdata.manual.2009.06.14.csv', header = None, encoding ='ISO-8859-1') dftrain.columns = columns dftest.columns = columnsModel Trainingsentiment_lr = Pipeline([ ('count_vect', CountVectorizer(min_df = 100, ngram_range = (1,1), stop_words = 'english')), ('lr', LogisticRegression())]) sentiment_lr.fit(dftrain.text, dftrain.polarity) Xtest, ytest = dftest.text[dftest.polarity!=2], dftest.polarity[dftest.polarity!=2] print(classification_report(ytest,sentiment_lr.predict(Xtest))) sentiment_lr.predict([Xtest[0]])Define ML Service with BentoML%%writefile sentiment_lr_model.py import pandas as pd import bentoml from bentoml.artifact import PickleArtifact from bentoml.handlers import DataframeHandler @bentoml.artifacts([PickleArtifact('sentiment_lr')]) @bentoml.env(pip_dependencies=['sklearn', 'numpy', 'pandas']) class SentimentLRModel(bentoml.BentoService): @bentoml.api(DataframeHandler, typ='series') def predict(self, series): """ predict expects pandas.Series as input """ return self.artifacts.sentiment_lr.predict(series)Save BentoML service archivefrom sentiment_lr_model import SentimentLRModel # Initialize bentoML model with artifacts bento_model = SentimentLRModel.pack( sentiment_lr=sentiment_lr ) # Save bentoML model to directory saved_path = bento_model.save() # print the directory containing exported model archive (prefixed with model name and version) print(saved_path)Load BentoML Service from archiveimport bentoml # Load exported bentoML model archive from path bento_model = bentoml.load(saved_path) # Call predict on the restored sklearn model bento_model.predict(pd.Series(["hello", "hi"]))Deploy to AWS lambda```bashbentoml deployment create DEPLOYMENT_NAME --bento BENTO_TAG --platform PLATFORM``` Arguments:* deployment name: The file path or s3 that contains BentoML bundles. Options:* bento: REQUIRED. The bento model and version in format of name:version.* platform: REQUIRED. The platform that you want to deploy bentoml bundle to. For serverless, we support aws-lambda, aws-lambda-py2 and gcp-function.* region: OPTIONAL The cloud provider's region you want to deploy in.bento_tag = '{name}:{version}'.format(name=bento_model.name, version=bento_model.version) print(bento_tag) !bentoml deployment create sentiment-serverless --bento {bento_tag} --platform aws-lambda --region us-west-2Make curl request to the Lambda endpointUpdate the URL from previous command's return result and run following command in the terminal```bashcurl -i \--header "Content-Type: application/json" \--data '["good movie", "bad food"]' \--request POST \https://URL``` Check deployment status```bentoml deployment describe DEPLOYMENT_NAME --namespace=NAMESPACE ``` Arguments:* deployment_name Options:* namespace: OPTIONAL. Name space of the deployment!bentoml deployment describe sentiment-serverlessDelete serverless Deployment```bashbentoml deployment delete DEPLOYMENT_NAME --namespace NAMESPACE``` Arguments:* deployment name Options:* namespace: OPTIONAL. Name space of the deployment!bentoml deployment delete sentiment-serverlessDataloader# imports import os import numpy as np import pandas as pd import scipy.signal from scipy import signal, fft import numpy as np from scipy import signal, fft # tqdm is a library for showing the progress bar from tqdm.notebook import tqdm # plotting library import matplotlib.pyplot as plt %matplotlib inline import matplotlib.pyplot as plt plt.rc('axes', axisbelow=True) plt.rcParams['figure.figsize'] = [16, 7] def load_data(file, channels): ''' Load data from csv file. Params ------ file: string path and name to the csv file channels: np array channels to be returned Return ------ data: np array (n_ch, n_s) EEG data ''' df= pd.read_csv(file,sep=',')# as df: data = np.asarray(df.values) return np.transpose(data[:,channels]) def norm_freq(f, fs): return f * 2 / fs def bandpass_multiEEG(data,f_low,f_high,fs): ''' Bandpass multi channel EEG Params ------ data: np array (n_ch,n_s) EEG data f_low: float lower corner frequency [Hz] f_high: float upper corner_frequency [Hz] fs: float sampling frequency Return ------ data_filt: np array (n_ch, n_s) filtered EEG data ''' low_freq = norm_freq(f_low, fs) high_freq = norm_freq(f_high, fs) filt_coef = scipy.signal.butter(N=2, Wn=[low_freq, high_freq], btype='bandpass', output='sos') data_filt = np.zeros(data.shape) for chan in range(data.shape[0]): data_filt[chan] = scipy.signal.sosfilt(filt_coef, data[chan]) return data_filtDefinition neu erstellter Funktionen:Anzupassen: - number of channels- number of samples per trial- number of trials- number of runsdef load_labels(file): ''' Load data from txt file. Params ------ file: string path and name to the csv file ''' df= pd.read_csv(file,sep=',',header=None)# as df: data = np.asarray(df.values) return np.transpose(data[:,:]) #definition of trial loader def load_trials(filename): X = load_data(filename,range(17)) #load all 17 channels channels = 17 samples = 2000 number_of_trials =15 number_of_runs =5 #trials[trialnumber, channelnumber, time] trials = np.zeros((number_of_trials, channels, samples)) for k in range(number_of_trials): trials[k] = X[:,500+samples*k:2500+samples*k] return trials #definition of run loader def load_runs(filenames): channels = 17 samples = 2000 number_of_trials =15 number_of_runs =5 runs = np.zeros((number_of_runs, number_of_trials, channels, samples)) for k in range(number_of_runs): runs[k] = load_trials(filenames[k]) return runs #Y[runnumber][trialnumber] def Y_loader(protocols): number_of_trials = 15 number_of_runs = 5 Y=np.zeros((number_of_runs, number_of_trials)) for k in range(number_of_runs): a = load_labels(protocols[k]) Y[k] = a[0][0:number_of_trials] return Y #combination of run loader and Y_loader def dataloader(EEG_filenames,protocols): return load_runs(filenames), Y_loader(protocols)Ausprobieren der Funktion:Das Array Filenames sollte die .csv Dokumente des Biowolf/Unicorn enthalten. Das Array Protocols sollte die .txt Dokumente von MATLAB enthaltenX hat folgende Dimensionen:- RunNumber- TrialNumber- ChannelNumber- TimeY hat folgende Dimensionen:- RunNumber- TrialNumber#Example with Beni filenames = ['data/Session_1_Unicorn_Beni/UnicornRecorder_20201127_Beni_MM_trial_1.csv', 'data/Session_1_Unicorn_Beni/UnicornRecorder_20201127_Beni_MM_trial_2.csv', 'data/Session_1_Unicorn_Beni/UnicornRecorder_20201127_Beni_MM_trial_3.csv', 'data/Session_1_Unicorn_Beni/UnicornRecorder_20201127_Beni_MM_trial_4.csv', 'data/Session_1_Unicorn_Beni/UnicornRecorder_20201127_Beni_MM_trial_5.csv' ] protocols = ['data/Protocols/Beni/Unicorn_measurement 27-Nov-2020_trial_1_Beni.txt', 'data/Protocols/Beni/Unicorn_measurement 27-Nov-2020_trial_2_Beni.txt', 'data/Protocols/Beni/Unicorn_measurement 27-Nov-2020_trial_3_Beni.txt', 'data/Protocols/Beni/Unicorn_measurement 27-Nov-2020_trial_4_Beni.txt', 'data/Protocols/Beni/Unicorn_measurement 27-Nov-2020_trial_5_Beni.txt'] #Y = Y_loader(protocols) X,Y = dataloader(filenames,protocols) print(X.shape) plt.plot(X[1][13][2]) print(Y[1][13])1 Introduction 1.0 Package importsimport numpy as np import pandas as pd from googletrans import Translator import time import warnings import tabula2 Definitions 2.0 Parameter definitionsdata_location = '../data/brazil/groundtruth-brazil.csv' data_location_en = '../data/brazil/groundtruth-brazil-en.csv' data_location_pdf = '../data/brazil/groundtruth-brazil.pdf'2.1 Function definitionstranslator = Translator() def translate_worker(string: str, translator: 'Translator' = translator) -> str: if string == 'nan' or pd.isna(string): return string language = translator.detect(string) if language.lang != 'en': time.sleep(3) string_trans = translator.translate(string) if string_trans.src in ['pt', 'es']: #assert (string.src in ['id', 'ms', 'jw', 'su', 'gu']), f'Incorrect input language of {string.src}' string = string_trans.text else: warnings.warn(f'Incorrect language of {string_trans.src}') return string3 Execution 3.0 Convert PDF to CSVdf = tabula.read_pdf(data_location_pdf, pages='all', encoding='utf-8') df_colnames = ['State', 'City', 'Conflict Name', 'Area', 'Date', 'Families Involved', 'Property Type', 'Jurisdiction', 'Families displaced', 'Attempt Threat Expulsion', 'Eviction', 'Eviction threats', 'Houses Destroyed', 'Land destroyed', 'Belongings Destroyed', 'Guns', 'Invasion', 'Result', 'Cause', 'Type of Violence'] cols_to_keep = ['State', 'City', 'Conflict Name', 'Area', 'Date', 'Families Involved', 'Property Type', 'Jurisdiction', 'Result', 'Cause', 'Type of Violence'] for x in range(len(df)): df[x].columns = df_colnames df = pd.concat(df) df = df.drop([x for x in df.columns if x not in cols_to_keep], axis = 1) df = df[df['State'] != 'Estado'] df = df.dropna(thresh = df.shape[1] -3) df = df.reset_index() df = df.drop('index', axis = 1) for column in df.columns: for row in range(0, len(df)): if isinstance(df[column][row], str): df[column][row] = df[column][row].replace('\r', ' ') df[column][row] = df[column][row].replace('/', ' ') df.tail(5) df.to_csv(data_location, index = False)3.1 Translate Portuguese to Englishdata = pd.read_csv(data_location_en) cols_to_translate = ['Property Type', 'Jurisdiction', 'Result', 'Cause', 'Type of Violence'] # 0-1 already done data.tail(5) for i in range(len(data)): data['Jurisdiction'][i] = data['Jurisdiction'][i].replace(" ento", "ento") if data['Jurisdiction'][i].count(" ") == 1: data['Jurisdiction'][i] = data['Jurisdiction'][i].replace(" ", "") data['Jurisdiction'][i] = data['Jurisdiction'][i].replace(" ç", "ç") data['Jurisdiction'][i] = data['Jurisdiction'][i].replace("aT", "a T") for col in cols_to_translate[1:2]: print(col) for row in range(0, len(data)): print(row) text = translate_worker(data[col][row]) print(f'Translated {data[col][row]} to {text} for column {col}, row {row}') print('\n') data[col][row] = text data.to_csv(data_location_en, index = False)**Course:** TVM4174 - Hydroinformatics for Smart Water Systems Example 2: Plotting the Hazen-Williams head loss equation for pipes *Developed by *import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns sns.set_style('darkgrid')Hazen-Williams head loss formula$ \Delta h \ = \ \frac{10.67 \cdot Q^{1.852}}{C^{1.852} d^{4.8704}} \cdot L $def hw_headloss(Q=1/1000, C=100, d=0.25, L=50): """calculate the head loss according to Hazen-Williams equation :param Q: volumetric flow rate, m^3/s (cubic meter per second) :param C: Pipe roughness coefficient :param d: inside pipe diameter , m (meters) :param L: length of pipe in meters :return: head loss in meters (water) """ S = 10.67 * (Q ** 1.852) / ((C ** 1.852) * d ** 4.8704) return - S * L # df = pd.read_excel('C_factors.xlsx', index_col=0) df = pd.read_csv('data/C_factors.csv', sep=';', index_col=0) query = [x.startswith('Cast') for x in df.index] data = df[query] data = data.mean(axis=1) d = 0.25 Q = np.linspace(0, 0.1, 100) colors = sns.color_palette('viridis', len(data)) markers = 'ov^sD' styles = ['-', '--'] for ii, (name, c_value) in enumerate(data.iteritems()): h = hw_headloss(Q, C=c_value, d=d) plt.plot(1000 * Q, h, label=name.replace('Cast iron', ''), color=colors[ii], marker=markers[ii], markevery=10, linestyle=styles[ii % 2]) plt.legend(loc=3, ncol=1, fontsize=16, frameon=False, title='Age', title_fontsize=18) plt.title(f'Headloss over a cast iron pipe with D={d*1000:.0f} mm and \n L=50 m as a function of flow rate $Q$ and age', fontsize=18) plt.xlim((0, 100)) plt.ylim((None, 0)) plt.xticks(fontsize=14) plt.yticks(fontsize=14, rotation=45) plt.xlabel(r'$Q \quad (\frac{L}{s}) $', fontsize=18) plt.ylabel(r'$\Delta h \quad (m) $', fontsize=18) plt.show()Curso Macroeconomia Prof. # Importando o numpy e nomeando-o import numpy as np #importando o numpy (junto com todos seus módulos) e o nomenando como np #os módulos em default são: random, fft, lib, linalg, testing e core # Importanto todos os objetos públicos do numpy dentro do namespace corrente from numpy import * # Importanto funções do numpy de módulos especificios from numpy.random import rand, randn #importando as funções rand e randn do módulo random do numpy # rand = retorna números aleatórios dentro dentro de um formato # randn = retorna uma matriz com dados com base em uma distribuição normal padronizada from numpy.fft import fft, ifft #importando as funções fft e ifft do módulo fft do numpy # fft = calcula a unidimensional transformação discreta de Fourier # ifft = calcula a unidimensional transformação discreta inversa de Fourier # Demais importações from scipy.stats import beta #Importanto a função beta do módulo stats do scipy # beta = Uma variável aleatória contínua beta import matplotlib.pyplot as plt #importando o módulo(sub-pacote) pyplot do matplotlib e o nomenando como plt from sympy import Symbol, solve #Importa Symbol representa a possibilidade de operações com símbolos #Importa solve permite a solução de equações from pylab import plot, arange, legend #Importa a função de elaboração de gráficos da biblioteca pylab #Função plot para gerar gráficos #Função arange para definir as faixas de valores, o numpy possui essa função #Inclui legenda N = np.arange(1,10000,0.01) #Usando a função arange do numpy #x fica no intervalo entre 0 e 4 (domínio) e vai variar de 0.5 entre os pontosDerivação da função de produçãoimport numpy as np # importando numpy import matplotlib.pyplot as plt# importanto o matplotlib para a geração dos gráficos alpha = 0.8 k = 1 A = 10 y = A*((k)**alpha)*((N)**(1-alpha)) plt.figure(figsize=(6, 4)) plt.subplot(1, 1, 1) plt.plot(N,y, color='blue', linewidth=2.5, linestyle="--", label='$F(K,N)$') #plt.ylabel('Demanda (Z), Produção (Y)') plt.ylabel('Produto') plt.xlabel('Horas Trab. (N)') legend(loc='best') # Inclui a linha do eixo 'x' #plt.xticks(()) #plt.yticks(()) #plt.text(0.5, 0.5, 'subplot(2,1,1)', ha='center', va='center', size=24, alpha=.5) # Inclui a linha do eixo 'x' #plt.xticks(()) #plt.yticks(()) legend(loc='best') plt.tight_layout() plt.savefig('funcao producao') plt.show() PmgN = (1-alpha)*(y/N) plt.figure(figsize=(6, 4)) plt.subplot(1, 1, 1) plt.plot(N, PmgN, color='blue', linewidth=2.5, linestyle="--", label='$PmgN$') #plt.ylabel('Demanda (Z), Produção (Y)') plt.ylabel('Produtividade') plt.xlabel('Horas Trab. (N)') legend(loc='best') # Inclui a linha do eixo 'x' #plt.xticks(()) #plt.yticks(()) #plt.text(0.5, 0.5, 'subplot(2,1,1)', ha='center', va='center', size=24, alpha=.5) # Inclui a linha do eixo 'x' #plt.xticks(()) #plt.yticks(()) legend(loc='best') plt.tight_layout() plt.savefig('produtividade') plt.show()Função Cobb-Douglas com capital e trabalho variáveisfrom sympy import * import numpy as np from matplotlib.pylab import plt %matplotlib inline init_printing(use_latex=True) # Register symbols: var("L K Y A a") # Cobb-Douglass production function: Y = (A*(L**a))*(K**(1-a)) # Assign number to A and a: Ys = Y.subs({A: 10, a:0.6}) # Plot 3D chart in which K and L are changed 0 to 10: plotting.plot3d(Ys, (K,0,10), (L,0,10))--- Compute Best Models for Each Metric Based on Mean VS DTK+Meansimple_ranked_cv = get_ranked_by_means_df(gather_df_cv, metric_names) complex_ranked_cv = get_model_ordering(agg_comp_dict, metric_names) top_n = 1 req_metrics = ['ROC AUC PriA-SSB FP', 'BEDROC AUC PriA-SSB FP','PR auc.integral PriA-SSB FP', 'NEF AUC PriA-SSB FP', 'NEF_1 % PriA-SSB FP', 'EF_1 % PriA-SSB FP'] simple_top_n = simple_ranked_cv[req_metrics].iloc[:top_n,:] complex_top_n = complex_ranked_cv[req_metrics].iloc[:top_n,:]Ranking by Folds Mean Onlydisplay('Ranking by Means. Shows model-mean pairs.', simple_ranked_cv[req_metrics])Ranking by DTK+Meansdisplay('Ranking by DTK+Means. Shows model-rank pairs.', complex_ranked_cv[req_metrics])--- Best Modelfor m in req_metrics: simple_model = simple_top_n[m].iloc[0] simple_model = simple_model[:simple_model.index(',')] simple_top_n[m].iloc[0] = simple_model complex_model = complex_top_n[m].iloc[0] complex_model = complex_model[:complex_model.index(',')] complex_top_n[m].iloc[0] = complex_model final_df = simple_top_n.T final_df = pd.merge(final_df, complex_top_n.T, left_index=True, right_index=True) final_df.columns = ['Best by Mean','Best by DTK+Mean'] final_df.columns.name = 'Metric' final_df.index = [s.replace(' PriA-SSB FP', '') for s in final_df.index] display(final_df)Fastai Chapter 1- toc: true- branch: main- badges: true- comments: true- author: - categories: [fastpages, jupyter] // TODO:- [ ] Fisish filling in the sections- [ ] Answer the questionaire Your Deep Learning JourneyThis is my third time restarting this [course](https://course.fast.ai/). I have been struggling to find a way to force myself to stick to it. I am finally heeding the advice in [Lesson 3](https://course.fast.ai/videos/?lesson=3) and starting to try to write a blog about it. This is a topic that I find deeply fascinating. If you are interested in Machine Learning I highly recommend checking it out for yourself. A note on format for this blog. My plan is to go through each chapter of the [book](https://www.amazon.com/Deep-Learning-Coders-fastai-PyTorch/dp/1492045527) and write a blog post on it using the Jupyter Notebook defined for it. As discussed in the first lesson this book is written with Jupyter Notebooks. All of the chapters can be found in this github [repo](https://github.com/fastai/fastbook). I am using the "clean" version which just leaves the headers within the chapters. I will fill in each one with ideas I find interesting or examples of what I learned from it. Deep Learning Is for Everyone"You don't need lots of math, data or expensive computers." There are a lot of tools that abstact the need to do a lot of complicated statistics that machine learning is built on. We can leverage those tools to build models that learn how to solve problems at or even above an average human level. "We've seen record-braking reults with <50 items of data"This isn't sourced in the book, but I think it may be demonstrated later."You don't need expensive computers."I was able to setup an account on [PaperSpace](https://www.paperspace.com/) and run a free GPU to run this notebook.In the book they also list example use cases for all kinds of professions from Medicine to Playing games. Neural Networks: A Brief HistoryBriefly touches on the initial concecption of a Neural Net in 1943 by . Also references a very influential book Perceptrons writen by and . The most pivotal work by , , and the Parallel Distributed Processing PDP Research Group in 1986. Who We AreExplains who and are and what their motivations for creating the book were. How to Learn Deep Learning"The basic idea is to teach the whole game. That means that if you’re teaching baseball, you first take people to a baseball game or get them to play it. You don’t teach them how to wind twine to make a baseball from scratch, the physics of a parabola, or the coefficient of friction of a ball on a bat." - from Making Learning Whole.This is probably my favorite metaphor in the book. It is applicable to so much more than just machine learning.* Teaching the whole game* Always teaching through examples* Simplifying as much as possible* Removing barriers Your Projects and Your MindsetPlayful and curious. The Software: PyTorch, fastai, and Jupyter (And Why It Doesn't Matter)These are just easiest they have found to use and they are free. Your First Model"It is crucial that you run your own experiments in parallel with this book in order to learn." - SylvainThey cover basic setup of a GPU Deep Learning Server. Why you should avoid buying your own. It is definately doable, but it is "distracting." To continue the baseball methaphor I think this would fall under building the baseball rather than playing the game. They also cover what a GPU is and why it is better than a CPU at the math the models will use to do the learning. Getting a GPU Deep Learning ServerThe list of recommended servers is on the [book's website](https://course.fast.ai) under "Notebook Servers." The three they have listed currently are Google's Colab, Gradient PaperSpace, and AWS Sagemaker. Running Your First NotebookBelow is the actual Python code needed to build our first model. I added some comments to disambiguate what each piece is doing.# CLICK ME # Importing the fastai lib, Jeremy covers why they use import * in the lesson from fastai.vision.all import * # This is where all the images of pets are stored path = untar_data(URLs.PETS)/'images' # A simple function to check if an image is a Cat. This particular data set is label by using an uppercase letter for an image of cat. def is_cat(x): return x[0].isupper() # This is organizing the data and loading it into memory on the GPU. "valid_pct" is telling the function to reserve 20% of the images for learning validation. "item_tfms" is resizing all of the images to be the same size to keep the input consistent. dls = ImageDataLoaders.from_name_func( path, get_image_files(path), valid_pct=0.2, seed=42, label_func=is_cat, item_tfms=Resize(224)) # "cnn_learner" is our learning function it is using a convolutional neural networks (cnn) with a resnet34 architecture. The error_rate is how accurate the model is at predicting whether or not the image is a cat. learn = cnn_learner(dls, resnet34, metrics=error_rate) # This fine tunes the model # "This is the key to deep learning — determining how to fit the parameters of a model to get it to solve your problem. To fit a model, we have to provide at least one piece of information: how many times to look at each image (known as number of epochs). The number of epochs you select will largely depend on how much time you have available, and how long you find it takes in practice to fit your model. If you select a number that is too small, you can always train for more epochs later." learn.fine_tune(1)Sidebar: This Book Was Written in Jupyter NotebooksBrief sidebar on what Jupyter Notebooks are. It is an interactive python REPL (Read Evaluate Print Loop) with markdown support. End sidebarThe stuff below can only be run on a running notebook.uploader = widgets.FileUpload() uploader #hide # For the book, we can't actually click an upload button, so we fake it uploader = SimpleNamespace(data = ['images/chapter1_cat_example.jpg']) img = PILImage.create(uploader.data[0]) is_cat,_,probs = learn.predict(img) print(f"Is this a cat?: {is_cat}.") print(f"Probability it's a cat: {probs[1].item():.6f}")What Is Machine Learning?" started working on a different way to get computers to complete tasks, which he called machine learning. In his classic 1962 essay “Artificial Intelligence: A Frontier of Automation,” he wrote: Programming a computer for such computations is, at best, a difficult task, not primarily because of any inherent complexity in the computer itself but, rather, because of the need to spell out every minute step of the process in the most exasperating detail. Computers, as any programmer will tell you, are giant morons, not giant brains.Suppose we arrange for some automatic means of testing the effectiveness of any current weight assignment in terms of actual performance and provide a mechanism for altering the weight assignment so as to maximize the performance. We need not go into the details of such a procedure to see that it could be made entirely automatic and to see that a machine so programmed would “learn” from its experience.There are a number of powerful concepts embedded in this short statement:* The idea of a “weight assignment” * The fact that every weight assignment has some “actual performance”* The requirement that there be an “automatic means” of testing that performance* The need for a “mechanism” (i.e., another automatic process) for improving the performance by changing the weight assignments" What Is a Neural Network?"What we would like is some kind of function that is so flexible that it could be used to solve any given problem, just by varying its weights....But what about that process? One could imagine that you might need to find a new “mechanism” for automatically updating weight for every problem. This would be laborious. What we’d like here as well is a completely general way to update the weights of a neural network, to make it improve at any given task. Conveniently, this also exists!This is called stochastic gradient descent (SGD)."This is the mathmatical underpinning of how ML works. A Bit of Deep Learning Jargon* The functional form of the model is called its architecture (but be careful — sometimes people use model as a synonym of architecture, so this can get confusing). * The weights are called parameters.* The predictions are calculated from the independent variable, which is the data not including the labels.* The results of the model are called predictions.* The measure of performance is called the loss.* The loss depends not only on the predictions, but also on the correct labels (also known as targets or the dependent variable); e.g., “dog” or “cat.” Limitations Inherent To Machine LearningThis is left in the clean version of the book on the fastbook repo. I think it is important to leave it here as well.From this picture we can now see some fundamental things about training a deep learning model:- A model cannot be created without data.- A model can only learn to operate on the patterns seen in the input data used to train it.- This learning approach only creates *predictions*, not recommended *actions*.- It's not enough to just have examples of input data; we need *labels* for that data too (e.g., pictures of dogs and cats aren't enough to train a model; we need a label for each one, saying which ones are dogs, and which are cats).Generally speaking, we've seen that most organizations that say they don't have enough data, actually mean they don't have enough *labeled* data. If any organization is interested in doing something in practice with a model, then presumably they have some inputs they plan to run their model against. And presumably they've been doing that some other way for a while (e.g., manually, or with some heuristic program), so they have data from those processes! For instance, a radiology practice will almost certainly have an archive of medical scans (since they need to be able to check how their patients are progressing over time), but those scans may not have structured labels containing a list of diagnoses or interventions (since radiologists generally create free-text natural language reports, not structured data). We'll be discussing labeling approaches a lot in this book, because it's such an important issue in practice.Since these kinds of machine learning models can only make *predictions* (i.e., attempt to replicate labels), this can result in a significant gap between organizational goals and model capabilities. For instance, in this book you'll learn how to create a *recommendation system* that can predict what products a user might purchase. This is often used in e-commerce, such as to customize products shown on a home page by showing the highest-ranked items. But such a model is generally created by looking at a user and their buying history (*inputs*) and what they went on to buy or look at (*labels*), which means that the model is likely to tell you about products the user already has or already knows about, rather than new products that they are most likely to be interested in hearing about. That's very different to what, say, an expert at your local bookseller might do, where they ask questions to figure out your taste, and then tell you about authors or series that you've never heard of before. How Our Image Recognizer WorksDoes essentailly what I did with the code comments in the Running Your First Notebook section, but with a bit more detail. They touch on the idea of "Overfitting" a model to the data. If a model is overfit to the data it will give bad predictions on new data. What Our Image Recognizer Learned Image Recognizers Can Tackle Non-Image Tasks Jargon Recap Deep Learning Is Not Just for Image Classificationpath = untar_data(URLs.CAMVID_TINY) dls = SegmentationDataLoaders.from_label_func( path, bs=8, fnames = get_image_files(path/"images"), label_func = lambda o: path/'labels'/f'{o.stem}_P{o.suffix}', codes = np.loadtxt(path/'codes.txt', dtype=str) ) learn = unet_learner(dls, resnet34) learn.fine_tune(8) learn.show_results(max_n=6, figsize=(7,8)) from fastai.text.all import * dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test') learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy) learn.fine_tune(4, 1e-2)If you hit a "CUDA out of memory error" after running this cell, click on the menu Kernel, then restart. Instead of executing the cell above, copy and paste the following code in it:```from fastai.text.all import *dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test', bs=32)learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)learn.fine_tune(4, 1e-2)```This reduces the batch size to 32 (we will explain this later). If you keep hitting the same error, change 32 to 16.learn.predict("I really liked that movie!")Sidebar: The Order Matters End sidebarfrom fastai.tabular.all import * path = untar_data(URLs.ADULT_SAMPLE) dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary", cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'], cont_names = ['age', 'fnlwgt', 'education-num'], procs = [Categorify, FillMissing, Normalize]) learn = tabular_learner(dls, metrics=accuracy) learn.fit_one_cycle(3) from fastai.collab import * path = untar_data(URLs.ML_SAMPLE) dls = CollabDataLoaders.from_csv(path/'ratings.csv') learn = collab_learner(dls, y_range=(0.5,5.5)) learn.fine_tune(10) learn.show_results()MASH 2-2 cascade Introduction We will simulate here a 2-2 MASH cascade.The example is taken from . The package used here -- `python-deltasigma` -- is a port of 's MATLAB Delta-Sigma toolbox, available at: http://www.mathworks.com/matlabcentral/fileexchange. The credit goes to him for all algorithms employed. Modulator description Each modulator in the cascade is described by the ABCD matrix:ABCD1 = [[1., 0., 1., -1.], [1., 1., 0., -2.], [0., 1., 0., 0.]] ABCD1 = np.array(ABCD1, dtype=np.float32)Each quantizer has 9 levels.We need to describe the modulator in terms of its ABCD matrix:ABCD = [[1, 0, 0, 0, 1, -1, 0], [1, 1, 0, 0, 0, -2, 0], [0, 1, 1, 0, 0, 0, -1], [0, 0, 1, 1, 0, 0, -2], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]] ABCD = np.array(ABCD, dtype=np.float_)The modulator will have two quantizer, each of them having 9 levels, or slightly more than 3 bit. For this reason `nlev` is set to an array.nlev = [9, 9]Transfer functionsWe can now calculate the transfer functions associated with the modulator.Notice there will be 6 of them, **4 NTFs**:1. $NTF_{0,0}$: from the quantization noise injected by the 1st quantizer, to the output of the 1st DSM.2. $NTF_{1,0}$: from the quantization noise injected by the 1st quantizer, to the output of the 2nd DSM.3. $NTF_{1,1}$: from the quantization noise injected by the 2nd quantizer, to the output of the 2nd DSM.4. $NTF_{0,1}$: Theoretically it also exists a transfer function from the quantization noise injected by the 2nd quantizer, to the output of the 1st DSM. Since the signal connections between the blocks are unidirectional, the noise added downstream cannot affect the signals upstream, and this transfer function will be null.And **2 STFs**:1. $STF_0$: From the signal input to the output of the 1st DSM.2. $STF_1$: From the signal input to the output of the 2nd DSM.k = [1., 1.] ntfs, stfs = ds.calculateTF(ABCD, k)//anaconda/lib/python2.7/site-packages/scipy/signal/filter_design.py:400: BadCoefficients: Badly conditioned filter coefficients (numerator): the results may be meaningless "results may be meaningless", BadCoefficients)Noise transfer to the first outputprint "NTF_00:\n" print ds.pretty_lti(ntfs[0, 0]) print "NTF_01:\n" print ds.pretty_lti(ntfs[0, 1])NTF_01: 0Noise transfer to the second outputprint "NTF_10:\n" print ds.pretty_lti(ntfs[1, 0]) print "NTF_11:\n" print ds.pretty_lti(ntfs[1, 1])NTF_11: (z - 1)^2 ----------- z^2NTF pole-zero plotsfigure(figsize=(20, 6)) subplot(131) title("$NTF_{0,0}$") ds.plotPZ(ntfs[0, 0], showlist=True) subplot(132) title("$NTF_{1,0}$") ds.plotPZ(ntfs[1, 0], showlist=True) subplot(133) title("$NTF_{1,1}$") ds.plotPZ(ntfs[1, 1], showlist=True)Signal transfer functionsprint "STF_0:\n" print ds.pretty_lti(stfs[0]) print "\n\nSTF_1:\n" print ds.pretty_lti(stfs[1])STF_0: 1 ----- z^2 STF_1: 1 ----- z^4STF pole-zero plotsfigure(figsize=(13, 4)) subplot(121) title("$STF_{0}$") ds.plotPZ(stfs[0], showlist=True) subplot(122) title("$STF_{1}$") ds.plotPZ(stfs[1], showlist=True)Compensation of the quantization noiseOverall, the outputs $V_1$ and $V_2$ are given by:$$V_1 = u\,z^{-2}+(1 - z^{-1})^2\,e_1$$ $$V_2 = u\, z^{-4} -2 (1 - 0.5z^{-1})\,z^{-3}\,e_1 +(1 - z^{-1})^2\,e_2 $$It can be shown that, combining $V_1$ and $V_2$, multipliying each of them repectively by:$$M_1 = z^{-3} - 2z^{-4}$$and$$M_2 = (1 - z^{-1})^2 $$and then summing the result, gives an overall output $V_{OUT}$ with expression:$$V_{TOT} = M_1V_1 + M_2V_2 = u\,z^{-4} + (1 - z^{-1})^4e_2.$$The terms in $e_1$ do not appear in the above equation as they cancel out, the second modulator allows for the compensation of the quantization noise introduced by the first. Overall, as it can be seen by the above equation, the system provides fourth order noise shaping by employing two second order DS loops.We briefly verify that numerically:def zpk_multiply(a, b): za, pa, ka = ds._utils._get_zpk(a) zb, pb, kb = ds._utils._get_zpk(b) pa = pa.tolist() if hasattr(pa, 'tolist') else pa pb = pb.tolist() if hasattr(pb, 'tolist') else pb za = za.tolist() if hasattr(za, 'tolist') else za zb = zb.tolist() if hasattr(zb, 'tolist') else zb return ds.cancelPZ((za+zb, pa+pb, ka*kb)) v1n = zpk_multiply(ntfs[0, 0], ([2, -1], [1, 0, 0, 0, 0])) v2n = zpk_multiply(ntfs[1, 0], ([1, 1], [0, 0], 1)) ntf_eq = zpk_multiply(ntfs[1, 1], ntfs[1, 1]) # compute v1n/v2n and check that it is equal to -1 res = zpk_multiply(v1n, (ds._utils._get_zpk(v2n)[1], ds._utils._get_zpk(v2n)[0], 1./ds._utils._get_zpk(v2n)[2])) print "The quantization noise cancels out: %s" % (int(ds.pretty_lti(res)) == -1)The quantization noise cancels out: TrueThe improvement in the NTF of the cascaded system may be better visualized plotting the spectras:figure(figsize=(16, 6)) subplot(121) ds.figureMagic(name='$NTF_{0,0} = NTF_{1,1}$') ds.PlotExampleSpectrum(ntfs[1, 1], M=31) ylabel('dBFS/NBW') subplot(122) ds.figureMagic(name='$M_1NTF_{0,0}+M_2\left(NTF_{1,0} + NTF_{1,1}\\right) = NTF_{0,0}^2$') ds.PlotExampleSpectrum(ntf_eq, M=31) #ds.PlotExampleSpectrum(ntfs[0, 0], M=31) tight_layout()Numerical simulation of the 2-2 cascade and SNR improvement Previously we simulated the NTF of a single modulator and the *expected* equivalent NTF when the two outputs are filtered and combined. Here we simulate the cascade of modulators with the ABCD matrix, computing their outputs $v_1$ and $v_2$, which are then numerically filtered and combined. Lastly, we check that the SNR improvement is as expected.Notice we needed to scale down the amplitude of the input sine since a sine wave at -3dBFS was pushing the modulator to instability. The filtering transfer functions $M_1$ and $M_2$ need to be expressed in terms of coefficients of $z^{-1}$ to be passed to `scipy`'s `lfilter`.The coefficients are:filtM1 = [0., 0., 0., 2., -1.] filtM2 = [1., -2., 1.] figure(figsize=(16, 6)) M = nlev[0] - 1 osr = 64 f0 = 0. f1, f2 = ds.ds_f1f2(OSR=64, f0=0., complex_flag=False) delta = 2 Amp = ds.undbv(-3) # Test tone amplitude, relative to full-scale. f = 0.3 # will be adjusted to a bin N = 2**12 f1_bin = np.round(f1*N) f2_bin = np.round(f2*N) fin = np.round(((1 - f)/2*f1 + (f + 1)/2*f2) * N) # input sine t = np.arange(0, N).reshape((1, -1)) u = Amp*M*np.cos((2*np.pi/N)*fin*t) # simulate! don't forget to pass a list (or tuple or ndarray) # as nlev value or the simulation will not be aware of the # multiple quantizers vx, _, xmax, y = ds.simulateDSM(u, ABCD, nlev=nlev) # separate output #1 and output #2 v1 = vx[0, :] v2 = vx[1, :] # filter and combine vf = lfilter(filtM1, [1.], v1) + lfilter(filtM2, [1.], v2) # compute the spectra window = ds.ds_hann(N) NBW = 1.5/N spec0 = np.fft.fft(vf*window)/(M*N/2)/ds.undbv(-6) spec1 = np.fft.fft(v1*window)/(M*N/2)/ds.undbv(-6) spec2 = np.fft.fft(v1*window)/(M*N/2)/ds.undbv(-6) freq = np.linspace(0, 0.5, N/2 + 1) plt.hold(True) plt.plot(freq, ds.dbv(spec0[:N/2 + 1]), 'c', linewidth=1, label='V1') plt.plot(freq, ds.dbv(spec2[:N/2 + 1]), '#fb8b00', linewidth=1, label='VF') # smooth, calculate the theorethical response and the SNR for VF spec0_smoothed = ds.circ_smooth(np.abs(spec0)**2., 16) plt.plot(freq, ds.dbp(spec0_smoothed[:N/2 + 1]), 'b', linewidth=3) Snn0 = np.abs(ds.evalTF(ntf_eq, np.exp(2j*np.pi*freq)))**2 * 2/12*(delta/M)**2 plt.plot(freq, ds.dbp(Snn0*NBW), 'm', linewidth=1) snr0 = ds.calculateSNR(spec0[f1_bin:f2_bin + 1], fin - f1_bin) msg = 'VF:\nSQNR = %.1fdB\n @ A = %.1fdBFS & osr = %.0f\n' % \ (snr0, ds.dbv(spec0[fin]), osr) plt.text(f0 + 1 / osr, - 15, msg, horizontalalignment='left', verticalalignment='center') # smooth, calculate the theorethical response and the SNR for V1 spec1_smoothed = ds.circ_smooth(np.abs(spec1)**2., 16) plt.plot(freq, ds.dbp(spec1_smoothed[:N/2 + 1]), '#d40000', linewidth=3) Snn1 = np.abs(ds.evalTF(ntfs[0, 0], np.exp(2j*np.pi*freq)))**2 * 2/12*(delta/M)**2 plt.plot(freq, ds.dbp(Snn1*NBW), 'm', linewidth=1) snr1 = ds.calculateSNR(spec1[f1_bin:f2_bin + 1], fin - f1_bin) msg = 'V1:\nSQNR = %.1fdB\n @ A = %.1fdBFS & osr = %.0f\n' % \ (snr1, ds.dbv(spec1[fin]), osr) plt.text(f0 + 1/osr, - 15-30, msg, horizontalalignment='left', verticalalignment='center') plt.text(0.5, - 135, 'NBW = %.1e ' % NBW, horizontalalignment='right', verticalalignment='bottom') ds.figureMagic((0, 0.5), 1./16, None, (-160, 0), 10, None) legend() title("Spectra"); xlabel("Normalized frequency $f \\rightarrow 1$");ylabel("dBFS/NBW"); print "Overall the SNR improved by %g (!) at OSR=%d." % (snr0-snr1, osr)Overall the SNR improved by 48.0339 (!) at OSR=64.Notice that, as it often happen, it is not immediate to see by eye that the composed signal $v_f$ has better SNR than $v_1$ (or $v_2$).In fact, consider the following plot of the signals from which the above spectra and SNRs were calculated:figure(figsize=(14, 6)) plot(vf[100:800], label='$v_f$') plot(v1[100:800], label='$v_1$') plot(u[:, 100:800].T, 'r', label='$u$') xlabel('sample #'); legend();ConclusionsThis notebook showed how it is possible, in the case of a Noise Shaping Multi-stage (MASH) cascade, to: * calculate the signal and noise transfer functions, * simulate the topology,* filter and combine the outputs and* evaluate the SNR improvement,with `python-deltasigma`.#%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py %load_ext version_information %reload_ext version_information %version_information numpy, scipy, matplotlib, deltasigma//anaconda/lib/python2.7/site-packages/IPython/core/formatters.py:827: FormatterWarning: JSON expects JSONable list/dict containers, not JSON strings FormatterWarning)Fig. 1weight_df = pd.read_csv('../ch2_weighing/results/2020_S/weight_ct.csv', index_col='Unnamed: 0') weight_df.index = pd.DatetimeIndex(weight_df.index) weight_df.index = np.append(weight_df.index[:-20], pd.DatetimeIndex(['2020-07-03']*20)) harvest_df = pd.read_excel('./data/harvest_FWnDW_2020S.xlsx', index_col='date', sheet_name='A2') # fruit_df = weight_df[['Idv fruit FW', 'Fruit DW']].dropna() # fruit_df.columns = harvest_df.columns # harvest_df = pd.concat([harvest_df, fruit_df], axis=0) harvest_df = harvest_df.sort_index() # harvest_df = harvest_df.iloc[3:] roots_DW = np.array([301.25, 293.55, 315.15, 356.45, 341.55, 267.65, 303.55, 231.05, 253.05, 272.35, 334.35])/1000 roots_DW_mean = roots_DW.mean() rs_ratio_df = pd.DataFrame([0.23255813953488372, 0.14789272030651343, 0.11954022988505748, 0.13678160919540233, 0.2835814088817321], index=weight_df.index.unique(), columns=['RS_ratio']) DW_sum_df = weight_df[[col for col in weight_df.columns if col.endswith('DW')]].sum(axis=1).groupby(weight_df.index).mean() roots_df = (DW_sum_df.T * rs_ratio_df.T).T roots_df.columns = ['root DW'] roots_df['root FW'] = roots_df['root DW']/0.1325 roots_df.index = pd.DatetimeIndex(roots_df.index) weight_df = pd.concat([weight_df, roots_df], axis=1) weight_df.columns fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*2.2))) grid = plt.GridSpec(2, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax0.plot(weight_df.index, weight_df['Stem FW'], 'o', mec='k', mew=0.5, c=cmap[3]) ax1.plot(weight_df.index, weight_df['Leaf FW'], 'o', mec='k', mew=0.5, c=cmap[3]) ax2.plot(weight_df.index, weight_df['petiole FW'], 'o', mec='k', mew=0.5, c=cmap[3]) ax3.plot(weight_df.index, weight_df['Idv fruit FW'], 'o', mec='k', mew=0.5, c=cmap[3]) ax0.set_xbound(weight_df.index.min(), weight_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(weight_df.index.min(), weight_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(weight_df.index.min(), weight_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(weight_df.index.min(), weight_df.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(9)) ax0.yaxis.set_minor_locator(LinearLocator(17)) ax0.set_ybound(0, 800) ax1.yaxis.set_major_locator(LinearLocator(8)) ax1.yaxis.set_minor_locator(LinearLocator(15)) ax1.set_ybound(0, 700) ax2.yaxis.set_major_locator(LinearLocator(7)) ax2.yaxis.set_minor_locator(LinearLocator(13)) ax2.set_ybound(0, 180) ax3.yaxis.set_major_locator(LinearLocator(8)) ax3.yaxis.set_minor_locator(LinearLocator(15)) ax3.set_ybound(0, 1400) ax0.set_ylabel('Stem fresh weight (g)') ax1.set_ylabel('Leaf fresh weight (g)') ax2.set_xlabel('Date') ax2.set_ylabel('Petiole fresh weight (g)') ax3.set_ylabel('Fruit fresh weight (g)') ax0.axes.xaxis.set_ticklabels([]) ax1.axes.xaxis.set_ticklabels([]) fig.tight_layout() fig.savefig('./figures/Fig1-1.svg', transparent=True, format='svg') plt.show()파프리카는 무한생장하니까 리니어에 가까울 것이다?fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*2.2))) grid = plt.GridSpec(2, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax0.plot(weight_df.index, weight_df['Stem DW'], 'o', mec='k', mew=0.5, c=cmap[2]) ax1.plot(weight_df.index, weight_df['Leaf DW'], 'o', mec='k', mew=0.5, c=cmap[2]) ax2.plot(weight_df.index, weight_df['petiole DW'], 'o', mec='k', mew=0.5, c=cmap[2]) ax3.plot(weight_df.index, weight_df['Fruit DW'], 'o', mec='k', mew=0.5, c=cmap[2]) ax0.set_xbound(weight_df.index.min(), weight_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(weight_df.index.min(), weight_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(weight_df.index.min(), weight_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(weight_df.index.min(), weight_df.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(5)) ax0.yaxis.set_minor_locator(LinearLocator(21)) ax0.set_ybound(0, 200) ax1.yaxis.set_major_locator(LinearLocator(7)) ax1.yaxis.set_minor_locator(LinearLocator(13)) ax1.set_ybound(0, 120) ax2.yaxis.set_major_locator(LinearLocator(5)) ax2.yaxis.set_minor_locator(LinearLocator(21)) ax2.set_ybound(0, 20) ax3.yaxis.set_major_locator(LinearLocator(7)) ax3.yaxis.set_minor_locator(LinearLocator(13)) ax3.set_ybound(0, 120) ax0.set_ylabel('Stem dry weight (g)') ax1.set_ylabel('Leaf dry weight (g)') ax2.set_xlabel('Date') ax2.set_ylabel('Petiole dry weight (g)') ax3.set_ylabel('Fruit dry weight (g)') ax0.axes.xaxis.set_ticklabels([]) ax1.axes.xaxis.set_ticklabels([]) fig.tight_layout() fig.savefig('./figures/Fig1-2.svg', transparent=True, format='svg') plt.show() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.plot(harvest_df.index, harvest_df['fw'], 'o', mec='k', mew=0.5, c=cmap[0]) ax1.plot(harvest_df.index, harvest_df['dw'], 'o', mec='k', mew=0.5, c=cmap_m[0]) ax0.set_xbound(harvest_df.index.min(), harvest_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(harvest_df.index.min(), harvest_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(7)) ax0.yaxis.set_minor_locator(LinearLocator(13)) ax0.set_ybound(0, 300) ax1.yaxis.set_major_locator(LinearLocator(7)) ax1.yaxis.set_minor_locator(LinearLocator(13)) ax1.set_ybound(0, 30) ax0.set_xlabel('Date') ax0.set_ylabel('Harvested fruit FW (g)') ax1.set_ylabel('Harvested fruit DW (g)') fig.tight_layout() fig.savefig('./figures/Fig1-3.svg', transparent=True, format='svg') plt.show() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.set_xbound(weight_df.index.min(), weight_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(weight_df.index.min(), weight_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(6)) ax0.yaxis.set_minor_locator(LinearLocator(11)) ax0.set_ybound(0, 100) ax1.yaxis.set_major_locator(LinearLocator(7)) ax1.yaxis.set_minor_locator(LinearLocator(13)) ax1.set_ybound(4, 16) ax0.set_xlabel('Date') ax0.set_ylabel('Root DW (g)') ax1.set_ylabel('Root/Shoot DW ratio') fig.tight_layout() # fig.savefig('./figures/Fig1-4.svg', transparent=True, format='svg') fig.savefig('./figures/Fig1-4.png', transparent=True, dpi=600, format='png') plt.show()Fig. 1-planBweight_er_df = weight_df.groupby(weight_df.index).std() weight_avg_df = weight_df.groupby(weight_df.index).mean() weight_avg_df.columns fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*2.2))) grid = plt.GridSpec(2, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax0.errorbar(weight_avg_df.index, weight_avg_df['Stem FW'], yerr=weight_er_df['Stem FW'], marker='o', capsize=5, ms=5, c=cmap[3], mec='k', mew=0.5) ax1.errorbar(weight_avg_df.index, weight_avg_df['Leaf FW'], yerr=weight_er_df['Leaf FW'], marker='o', capsize=5, ms=5, c=cmap[3], mec='k', mew=0.5) ax2.errorbar(weight_avg_df.index, weight_avg_df['petiole FW'], yerr=weight_er_df['petiole FW'], marker='o', capsize=5, ms=5, c=cmap[3], mec='k', mew=0.5) ax3.errorbar(weight_avg_df.index, weight_avg_df['Idv fruit FW'], yerr=weight_er_df['Idv fruit FW'], marker='o', capsize=5, ms=5, c=cmap[3], mec='k', mew=0.5) ax0.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(9)) ax0.yaxis.set_minor_locator(LinearLocator(17)) ax0.set_ybound(0, 800) ax1.yaxis.set_major_locator(LinearLocator(8)) ax1.yaxis.set_minor_locator(LinearLocator(15)) ax1.set_ybound(0, 700) ax2.yaxis.set_major_locator(LinearLocator(7)) ax2.yaxis.set_minor_locator(LinearLocator(13)) ax2.set_ybound(0, 180) ax3.yaxis.set_major_locator(LinearLocator(8)) ax3.yaxis.set_minor_locator(LinearLocator(15)) ax3.set_ybound(0, 1400) ax0.set_ylabel('Stem fresh weight (g)') ax1.set_ylabel('Leaf fresh weight (g)') ax2.set_xlabel('Date') ax2.set_ylabel('Petiole fresh weight (g)') ax3.set_ylabel('Fruit fresh weight (g)') ax0.axes.xaxis.set_ticklabels([]) ax1.axes.xaxis.set_ticklabels([]) fig.tight_layout() fig.savefig('./figures/Fig1-1.svg', transparent=True, format='svg') plt.show() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*2.2))) grid = plt.GridSpec(2, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax0.errorbar(weight_avg_df.index, weight_avg_df['Stem DW'], yerr=weight_er_df['Stem DW'], marker='o', capsize=5, ms=5, c=cmap[2], mec='k', mew=0.5) ax1.errorbar(weight_avg_df.index, weight_avg_df['Leaf DW'], yerr=weight_er_df['Leaf DW'], marker='o', capsize=5, ms=5, c=cmap[2], mec='k', mew=0.5) ax2.errorbar(weight_avg_df.index, weight_avg_df['petiole DW'], yerr=weight_er_df['petiole DW'], marker='o', capsize=5, ms=5, c=cmap[2], mec='k', mew=0.5) ax3.errorbar(weight_avg_df.index, weight_avg_df['Fruit DW'], yerr=weight_er_df['Fruit DW'], marker='o', capsize=5, ms=5, c=cmap[2], mec='k', mew=0.5) ax0.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(weight_avg_df.index.min(), weight_avg_df.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(5)) ax0.yaxis.set_minor_locator(LinearLocator(21)) ax0.set_ybound(0, 200) ax1.yaxis.set_major_locator(LinearLocator(7)) ax1.yaxis.set_minor_locator(LinearLocator(13)) ax1.set_ybound(0, 120) ax2.yaxis.set_major_locator(LinearLocator(5)) ax2.yaxis.set_minor_locator(LinearLocator(21)) ax2.set_ybound(0, 20) ax3.yaxis.set_major_locator(LinearLocator(7)) ax3.yaxis.set_minor_locator(LinearLocator(13)) ax3.set_ybound(0, 120) ax0.set_ylabel('Stem dry weight (g)') ax1.set_ylabel('Leaf dry weight (g)') ax2.set_xlabel('Date') ax2.set_ylabel('Petiole dry weight (g)') ax3.set_ylabel('Fruit dry weight (g)') ax0.axes.xaxis.set_ticklabels([]) ax1.axes.xaxis.set_ticklabels([]) fig.tight_layout() fig.savefig('./figures/Fig1-2.svg', transparent=True, format='svg') plt.show() hv_avg_df = harvest_df.groupby(harvest_df.index).sum() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.errorbar(hv_avg_df.index, hv_avg_df['fw']/1000, marker='o', capsize=5, ms=5, c=cmap[0], mec='k', mew=0.5) ax1.errorbar(hv_avg_df.index, hv_avg_df['dw'], marker='o', capsize=5, ms=5, c=cmap_m[0], mec='k', mew=0.5) ax0.set_xbound(harvest_df.index.min(), harvest_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(harvest_df.index.min(), harvest_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(6)) ax0.yaxis.set_minor_locator(LinearLocator(11)) ax0.set_ybound(0, 5) ax1.yaxis.set_major_locator(LinearLocator(5)) ax1.yaxis.set_minor_locator(LinearLocator(17)) ax1.set_ybound(0, 400) ax0.set_xlabel('Date') ax0.set_ylabel('Harvested fruit FW (kg)') ax1.set_ylabel('Harvested fruit DW (g)') fig.tight_layout() fig.savefig('./figures/Fig1-3.svg', transparent=True, format='svg') plt.show()Fig. 11?SW2_df = pd.read_csv('../ch2_weighing/results/2020_S/SW2_greenhouse.csv', index_col='Unnamed: 0') SW2_df.index = pd.DatetimeIndex(SW2_df.index) SW2_df = SW2_df.loc['2020-03-05 00:00:00': '2020-07-03 23:59:00'] SW2_df = SW2_df.interpolate() rockwool_slab = np.array([626, 708, 650, 642]) rockwool_cube = np.array([48, 46, 50, 48, 46]) rockwool = (rockwool_slab + rockwool_cube.mean()*4)/1000 rockwool_mean = rockwool.mean() substrate_volume = (120*12*7.5 + 10*10*6.5*4)/1000 water_w_df = substrate_volume*SW2_df['subs_VWC']/100 SW2_df['water'] = water_w_df SW2_df.loc[:, 'loadcell_1'] = SW2_df.loc[:, 'loadcell_1'] - rockwool_mean SW2_df.loc[:, 'loadcell_2'] = SW2_df.loc[:, 'loadcell_2'] - rockwool_mean SW2_df.loc[:, 'loadcell_3'] = SW2_df.loc[:, 'loadcell_3'] - rockwool_mean weight_df = pd.read_csv('../ch2_weighing/results/2020_S/weight_ct.csv', index_col='Unnamed: 0') weight_df.index = pd.DatetimeIndex(weight_df.index) weight_df.index = np.append(weight_df.index[:-20], pd.DatetimeIndex(['2020-07-03']*20)) harvest_df = pd.read_excel('./data/harvest_FWnDW_2020S.xlsx', index_col='date', sheet_name='A2') # fruit_df = weight_df[['Idv fruit FW', 'Fruit DW']].dropna() # fruit_df.columns = harvest_df.columns # harvest_df = pd.concat([harvest_df, fruit_df], axis=0) harvest_df = harvest_df.sort_index() # harvest_df = harvest_df.iloc[3:] roots_DW = np.array([301.25, 293.55, 315.15, 356.45, 341.55, 267.65, 303.55, 231.05, 253.05, 272.35, 334.35])/1000 roots_DW_mean = roots_DW.mean() rs_ratio_df = pd.DataFrame([0.23255813953488372, 0.14789272030651343, 0.11954022988505748, 0.13678160919540233, 0.2835814088817321], index=weight_df.index.unique(), columns=['RS_ratio']) DW_sum_df = weight_df[[col for col in weight_df.columns if col.endswith('DW')]].sum(axis=1).groupby(weight_df.index).mean() roots_df = (DW_sum_df.T * rs_ratio_df.T).T roots_df.columns = ['root DW'] roots_df['root FW'] = roots_df['root DW']/0.1325 roots_df.index = pd.DatetimeIndex(roots_df.index) weight_df = pd.concat([weight_df, roots_df], axis=1) ratio_df = [weight_df['Stem FW']/weight_df['Stem DW'].interpolate(), weight_df['Leaf FW']/weight_df['Leaf DW'].interpolate(), weight_df['petiole FW']/weight_df['petiole DW'].interpolate(), weight_df['Idv fruit FW']/weight_df['Fruit DW'].interpolate(), weight_df['root FW']/weight_df['root DW'].interpolate() ] ratio_df = pd.concat(ratio_df, axis=1) ratio_df.columns = ['stem', 'leaf', 'pet', 'fruit', 'root'] ratio_df.iloc[1, 0] = np.nan ratio_df.iloc[-17, 2] = np.nan ratio_df.iloc[-12:-10, 2] = np.nan ratio_avg_df = ratio_df.groupby(ratio_df.index).mean() ratio_er_df = ratio_df.groupby(ratio_df.index).std() ratio_df = ratio_df[ratio_df < 30] fruit_ratio = harvest_df['fw']/harvest_df['dw'] fruit_ratio_avg = fruit_ratio.groupby(fruit_ratio.index).mean() fruit_ratio_er = fruit_ratio.groupby(fruit_ratio.index).std() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*2.2))) grid = plt.GridSpec(2, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax0.errorbar(ratio_avg_df.index, ratio_avg_df['stem'], yerr=ratio_er_df['stem'], marker='o', capsize=5, ms=5, c=cmap[0], mec='k', mew=0.5) ax1.errorbar(ratio_avg_df.index, ratio_avg_df['leaf'], yerr=ratio_er_df['leaf'], marker='o', capsize=5, ms=5, c=cmap[0], mec='k', mew=0.5) ax2.errorbar(ratio_avg_df.index, ratio_avg_df['pet'], yerr=ratio_er_df['pet'], marker='o', capsize=5, ms=5, c=cmap[0], mec='k', mew=0.5) # ax3.errorbar(ratio_avg_df.index, ratio_avg_df['fruit'], yerr=ratio_er_df['fruit'], marker='o', capsize=5, ms=5, c=cmap[0], mec='k', mew=0.5) ax3.errorbar(fruit_ratio_avg.index, fruit_ratio_avg, yerr=fruit_ratio_er, marker='o', capsize=5, ms=5, c=cmap[3], mec='k', mew=0.5) ax0.set_xbound(ratio_df.index.min(), ratio_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(ratio_df.index.min(), ratio_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(ratio_df.index.min(), ratio_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(fruit_ratio_avg.index.min(), fruit_ratio_avg.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(7)) ax0.yaxis.set_minor_locator(LinearLocator(13)) ax0.set_ybound(4, 10) ax1.yaxis.set_major_locator(LinearLocator(6)) ax1.yaxis.set_minor_locator(LinearLocator(11)) ax1.set_ybound(5, 10) ax2.yaxis.set_major_locator(LinearLocator(5)) ax2.yaxis.set_minor_locator(LinearLocator(9)) ax2.set_ybound(9, 13) ax3.yaxis.set_major_locator(LinearLocator(5)) ax3.yaxis.set_minor_locator(LinearLocator(9)) ax3.set_ybound(8, 16) ax0.set_ylabel('Stem FW/DW ratio') ax1.set_ylabel('Leaf FW/DW ratio') ax2.set_xlabel('Date') ax2.set_ylabel('Petiole FW/DW ratio') ax3.set_xlabel('Date') ax3.set_ylabel('Fruit FW/DW ratio') fig.tight_layout() fig.savefig('./figures/Fig1.svg', transparent=True, format='svg') plt.show()Fig. 3harvest_df1 = pd.read_excel('./data/A2_fruit_20200226-20200703.xlsx', index_col='Day of harvest') harvest_df1 = harvest_df1.resample('7d').sum() temp_df1 = pd.read_excel('./data/A2A3_fruit_20200102.xlsx', sheet_name='CT_L', index_col='Day of harvest').drop(['Treatments'], axis=1) temp_df2 = pd.read_excel('./data/A2A3_fruit_20200102.xlsx', sheet_name='CT_R', index_col='Day of harvest').drop(['Treatments'], axis=1) harvest_df2 = pd.concat([temp_df1, temp_df2]) harvest_df2 = harvest_df2.resample('7d').sum() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.plot(harvest_df1.index, harvest_df1['Fresh weight']/1000, '-o', mec='k', mew=0.5, c=cmap[0]) ax1.plot(harvest_df2.index, harvest_df2['Fresh weight']/1000, '-o', mec='k', mew=0.5, c=cmap[0]) ax0.set_xbound(harvest_df1.index.min(), harvest_df1.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(5)) ax0.yaxis.set_minor_locator(LinearLocator(9)) ax1.set_xbound(harvest_df2.index.min(), harvest_df2.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.yaxis.set_major_locator(LinearLocator(5)) ax1.yaxis.set_minor_locator(LinearLocator(9)) ax0.set_ybound(0, 40) ax1.set_ybound(0, 40) ax1.axes.yaxis.set_ticklabels([]) ax0.set_xlabel('Date (mm-dd)') ax0.set_ylabel('Harvested fruit weight (kg)') fig.tight_layout() fig.savefig('./figures/Fig3.svg', transparent=True, format='svg') plt.show()Fig. 4label_df = pd.read_csv('./results/model_output/transformer_label.csv', index_col='Unnamed: 0').sort_index()*1000 pred_df = pd.read_csv('./results/model_output/transformer_pred.csv', index_col='Unnamed: 0').sort_index()*1000 label_df.index = pd.DatetimeIndex(label_df.index) pred_df.index = pd.DatetimeIndex(pred_df.index) label_df = label_df.groupby(label_df.index).mean() pred_df = pred_df.groupby(pred_df.index).mean() weight_er_df['tot_fw'] = weight_df.loc[:, 'Stem FW':'Idv fruit FW'].sum(axis=1).groupby(weight_df.index).std() weight_er_df['tot_dw'] = weight_df.loc[:, 'Fruit DW':'petiole DW'].sum(axis=1).groupby(weight_df.index).std() pred_df['tot_dw'] = pred_df[['stem_dw', 'leaf_dw', 'petiole_dw', 'fruit_dw']].sum(axis=1) pred_df.loc[pred_df['harvest_fw'] < 0, 'harvest_fw'] = 0 pred_df.loc[pred_df['harvest_dw'] < 0, 'harvest_dw'] = 0 weight_avg_df['tot_fw'] = weight_avg_df.loc[:, 'Stem FW':'Idv fruit FW'].sum(axis=1) weight_avg_df['tot_dw'] = weight_avg_df.loc[:, 'Fruit DW':'petiole DW'].sum(axis=1) pred_df.rolling(5).mean() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.plot(pred_df.index, pred_df.rolling(5).mean()['tot_fw']/1000, ms=5, c=cmap[3], mec='k', mew=0.5) ax1.plot(pred_df.index, pred_df.rolling(5).mean()['tot_dw']/1000, ms=5, c=cmap[2], mec='k', mew=0.5) ax0.errorbar(weight_avg_df.index, weight_avg_df['tot_fw']/1000, yerr=weight_er_df['tot_fw']/1000, fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax1.errorbar(weight_avg_df.index, weight_avg_df['tot_dw'], yerr=weight_er_df['tot_dw'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax0.set_xbound(pred_df.index.min(), pred_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(pred_df.index.min(), pred_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(6)) ax0.yaxis.set_minor_locator(LinearLocator(11)) ax0.set_ybound(0, 2.5) ax1.yaxis.set_major_locator(LinearLocator(4)) ax1.yaxis.set_minor_locator(LinearLocator(7)) ax1.set_ybound(0, 300) ax0.set_ylabel('Total fresh weight (kg)') ax1.set_ylabel('Total dry weight (g)') ax0.set_xlabel('Date') fig.tight_layout() fig.savefig('./figures/Fig4-1.svg', transparent=True, format='svg') plt.show() fig = plt.figure(figsize=((8/2.54*4), (6/2.54*2.2))) grid = plt.GridSpec(2, 4) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax2 = plt.subplot(grid[2]) ax3 = plt.subplot(grid[3]) ax4 = plt.subplot(grid[4]) ax5 = plt.subplot(grid[5]) ax6 = plt.subplot(grid[6]) ax7 = plt.subplot(grid[7]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_position(('outward', 10)) ax2.spines['bottom'].set_position(('outward', 5)) ax3.spines['right'].set_visible(False) ax3.spines['left'].set_position(('outward', 10)) ax3.spines['bottom'].set_position(('outward', 5)) ax4.spines['right'].set_visible(False) ax4.spines['left'].set_position(('outward', 10)) ax4.spines['bottom'].set_position(('outward', 5)) ax5.spines['right'].set_visible(False) ax5.spines['left'].set_position(('outward', 10)) ax5.spines['bottom'].set_position(('outward', 5)) ax6.spines['right'].set_visible(False) ax6.spines['left'].set_position(('outward', 10)) ax6.spines['bottom'].set_position(('outward', 5)) ax7.spines['right'].set_visible(False) ax7.spines['left'].set_position(('outward', 10)) ax7.spines['bottom'].set_position(('outward', 5)) ax0.plot(pred_df.index, pred_df['stem_fw'], ms=5, c=cmap[3], mec='k', mew=0.5) ax1.plot(pred_df.index, pred_df['leaf_fw'], ms=5, c=cmap[3], mec='k', mew=0.5) ax2.plot(pred_df.index, pred_df['petiole_fw'], ms=5, c=cmap[3], mec='k', mew=0.5) ax3.plot(pred_df.index, pred_df['fruit_fw'], ms=5, c=cmap[3], mec='k', mew=0.5) ax0.errorbar(weight_avg_df.index, weight_avg_df['Stem FW'], yerr=weight_er_df['Stem FW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax1.errorbar(weight_avg_df.index, weight_avg_df['Leaf FW'], yerr=weight_er_df['Leaf FW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax2.errorbar(weight_avg_df.index, weight_avg_df['petiole FW'], yerr=weight_er_df['petiole FW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax3.errorbar(weight_avg_df.index, weight_avg_df['Idv fruit FW'], yerr=weight_er_df['Idv fruit FW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax4.plot(pred_df.index, pred_df['stem_dw']/1000, ms=5, c=cmap[2], mec='k', mew=0.5) ax5.plot(pred_df.index, pred_df['leaf_dw']/1000, ms=5, c=cmap[2], mec='k', mew=0.5) ax6.plot(pred_df.index, pred_df['petiole_dw']/1000, ms=5, c=cmap[2], mec='k', mew=0.5) ax7.plot(pred_df.index, pred_df['fruit_dw']/1000, ms=5, c=cmap[2], mec='k', mew=0.5) ax4.errorbar(weight_avg_df.index, weight_avg_df['Stem DW'], yerr=weight_er_df['Stem DW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax5.errorbar(weight_avg_df.index, weight_avg_df['Leaf DW'], yerr=weight_er_df['Leaf DW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax6.errorbar(weight_avg_df.index, weight_avg_df['petiole DW'], yerr=weight_er_df['petiole DW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax7.errorbar(weight_avg_df.index, weight_avg_df['Fruit DW'], yerr=weight_er_df['Fruit DW'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax0.set_xbound(pred_df.index.min(), pred_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(pred_df.index.min(), pred_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax2.set_xbound(pred_df.index.min(), pred_df.index.max()) ax2.xaxis.set_major_locator(LinearLocator(5)) ax2.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax3.set_xbound(pred_df.index.min(), pred_df.index.max()) ax3.xaxis.set_major_locator(LinearLocator(5)) ax3.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax4.set_xbound(pred_df.index.min(), pred_df.index.max()) ax4.xaxis.set_major_locator(LinearLocator(5)) ax4.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax5.set_xbound(pred_df.index.min(), pred_df.index.max()) ax5.xaxis.set_major_locator(LinearLocator(5)) ax5.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax6.set_xbound(pred_df.index.min(), pred_df.index.max()) ax6.xaxis.set_major_locator(LinearLocator(5)) ax6.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax7.set_xbound(pred_df.index.min(), pred_df.index.max()) ax7.xaxis.set_major_locator(LinearLocator(5)) ax7.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(5)) ax0.yaxis.set_minor_locator(LinearLocator(17)) ax0.set_ybound(0, 800) ax1.yaxis.set_major_locator(LinearLocator(8)) ax1.yaxis.set_minor_locator(LinearLocator(15)) ax1.set_ybound(0, 700) ax2.yaxis.set_major_locator(LinearLocator(4)) ax2.yaxis.set_minor_locator(LinearLocator(10)) ax2.set_ybound(0, 180) ax3.yaxis.set_major_locator(LinearLocator(8)) ax3.yaxis.set_minor_locator(LinearLocator(15)) ax3.set_ybound(0, 1400) ax4.yaxis.set_major_locator(LinearLocator(6)) ax4.yaxis.set_minor_locator(LinearLocator(16)) ax4.set_ybound(0, 150) ax5.yaxis.set_major_locator(LinearLocator(6)) ax5.yaxis.set_minor_locator(LinearLocator(11)) ax5.set_ybound(0, 100) ax6.yaxis.set_major_locator(LinearLocator(6)) ax6.yaxis.set_minor_locator(LinearLocator(16)) ax6.set_ybound(0, 15) ax7.yaxis.set_major_locator(LinearLocator(4)) ax7.yaxis.set_minor_locator(LinearLocator(10)) ax7.set_ybound(0, 90) ax0.set_ylabel('Stem fresh weight (g)') ax1.set_ylabel('Leaf fresh weight (g)') ax2.set_ylabel('Petiole fresh weight (g)') ax3.set_ylabel('Fruit fresh weight (g)') ax4.set_ylabel('Stem dry weight (g)') ax5.set_ylabel('Leaf dry weight (g)') ax6.set_xlabel('Date') ax6.set_ylabel('Petiole dry weight (g)') ax7.set_ylabel('Fruit dry weight (g)') ax0.axes.xaxis.set_ticklabels([]) ax1.axes.xaxis.set_ticklabels([]) ax2.axes.xaxis.set_ticklabels([]) ax3.axes.xaxis.set_ticklabels([]) fig.tight_layout() fig.savefig('./figures/Fig4-2.svg', transparent=True, format='svg') plt.show() fig = plt.figure(figsize=((8/2.54*2.2), (6/2.54*1.2))) grid = plt.GridSpec(1, 2) ax0 = plt.subplot(grid[0]) ax1 = plt.subplot(grid[1]) ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 10)) ax0.spines['bottom'].set_position(('outward', 5)) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_position(('outward', 10)) ax1.spines['bottom'].set_position(('outward', 5)) ax0.plot(pred_df.index, pred_df['harvest_fw']/1000, '-', ms=5, c=cmap[0], mec='k', mew=0.5) ax1.plot(pred_df.index, pred_df['harvest_dw'], '-', ms=5, c=cmap_m[0], mec='k', mew=0.5) ax0.errorbar(hv_avg_df.index, hv_avg_df['fw']/1000, fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax1.errorbar(hv_avg_df.index, hv_avg_df['dw'], fmt='o', capsize=5, ms=5, c='k', mew=0.5) ax0.set_xbound(pred_df.index.min(), pred_df.index.max()) ax0.xaxis.set_major_locator(LinearLocator(5)) ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax1.set_xbound(pred_df.index.min(), pred_df.index.max()) ax1.xaxis.set_major_locator(LinearLocator(5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) ax0.yaxis.set_major_locator(LinearLocator(4)) ax0.yaxis.set_minor_locator(LinearLocator(7)) ax0.set_ybound(0, 6) ax1.yaxis.set_major_locator(LinearLocator(5)) ax1.yaxis.set_minor_locator(LinearLocator(9)) ax1.set_ybound(0, 400) ax0.set_ylabel('Harvest fresh weight (kg)') ax1.set_ylabel('Harvest dry weight (g)') ax0.set_xlabel('Date') fig.tight_layout() fig.savefig('./figures/Fig4-3.svg', transparent=True, format='svg') plt.show()premio1="Viaje todo incluído para dos personas a San Andrés" premio2="Un pasadía a los termales de San Vicente incluyendo almuerzo" premio3="Viaje todo incluido para dos personas a Santa Marta" premio4="Pasadía al desierto de Tatacoa" premio5="No hay premio" concursante = input("Ingrese el nombre del concursante: ") Colorbalota = input("Ingrese el color de la balota: ") valorVariable = int (input("Ingrese el valor de la variable: ")) if (Colorbalota == "Rosado"): adicional1 = (valorVariable * 15) // 100 if (adicional1 > 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio1, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas ",adicional1," en efectivo.") elif (adicional1 < 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio1, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas dos de cine 4D para la pelicula de estreno del mes incluyendo un solo combo de palomitas.") elif (Colorbalota == "Verde"): adicional2 = (valorVariable * 20) // 100 if (adicional2 > 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio2, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas ",adicional2," en efectivo.") elif (adicional2 < 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio2, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas dos de cine 4D para la pelicula de estreno del mes incluyendo un solo combo de palomitas.") elif (Colorbalota == "Azul"): adicional3 = (valorVariable * 5) // 100 if (adicional3 > 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio3, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas ",adicional3," en efectivo.") elif (adicional3 < 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio3, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas dos de cine 4D para la pelicula de estreno del mes incluyendo un solo combo de palomitas.") elif (Colorbalota == "Gris"): adicional4 = (valorVariable * 20) // 100 if (adicional4 > 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio4, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas ",adicional4," en efectivo.") elif (adicional4 < 120000): print("La empresa VIVAFLY se complace en anunciar que el participante ",concursante," gano ",premio4, "en nuestro sorteo de viajes de AMOR y AMISTAD") print("Mas dos de cine 4D para la pelicula de estreno del mes incluyendo un solo combo de palomitas.") elif (Colorbalota == "Rojo"): print("La empresa VIVAFLY lamenta anunciar que el participante ",concursante,", no gano un viaje en nuestro sorteo de viajes de AMOR y AMISTAD") print("Pero gano un premio de 120000 en efectivo.") else: print("El color ingresado no esta en la plataforma. Intente de nuevo.") - Exploratory Data Analysis The data We have a large data set with over 18MB of data. In the following exploratory data analysis we will look into this data. This will give us an understanding of what can be done and what cannot be done with the data.The goal is to make an analysis looking into who the best driver in all of formula ones history has been. However, to see if this is possible, we will have to take a look at the data.#Importing libraries import pandas as pd import matplotlib.pyplot as plt from analysis1_pipeline import pipeline import numpy as np import seaborn as sns #running my pipeline function pipeline("../data/raw/","../data/processed/Johan_Processed/") #Importing the raw data and substituing the '\N' values for NaN. circuits = pd.read_csv("../data/processed/Johan_Processed/circuits.csv") drivers = pd.read_csv("../data/processed/Johan_Processed/drivers.csv") results = pd.read_csv("../data/processed/Johan_Processed/results.csv") seasons = pd.read_csv("../data/processed/Johan_Processed/seasons.csv") status = pd.read_csv("../data/processed/Johan_Processed/status.csv") lap_times = pd.read_csv("../data/processed/Johan_Processed/lap_times.csv") pit_stops = pd.read_csv("../data/processed/Johan_Processed/pit_stops.csv") qualifying = pd.read_csv("../data/processed/Johan_Processed/qualifying.csv") races = pd.read_csv("../data/processed/Johan_Processed/races.csv") constructors = pd.read_csv("../data/processed/Johan_Processed/constructors.csv") constructor_results = pd.read_csv("../data/processed/Johan_Processed/constructor_results.csv") constructor_standings = pd.read_csv("../data/processed/Johan_Processed/constructor_standings.csv") driver_standings = pd.read_csv("../data/processed/Johan_Processed/driver_standings.csv") #setting the seaborn theme sns.set_theme(style="whitegrid",palette="Blues_d") #cleaning the data #Since we are going to use the results df and the races df together we are going to merge them together using their raceId. We will also remove those columns that we know we will not need. results_races = pd.merge(results,races,on="raceId",how="left") results_races = results_races.drop(["url","time_y","rank","date","name","round","Unnamed: 0_y","Unnamed: 0_x"],axis=1)Explanatory Data AnalysisTo get some inseights into the data we will look at the following parts of the data: - How much of the data is missing in the main dataset - Types of in the dataset - The average finish time for each season - When in races the fastests lap are set for each season - The number of different races in each season --- The biggest dataframe is the merged results_races dataframe. This dataframe has a lot of interresting columns for the analysis. However, we will need to look into how of the data is actually there.#Finding out much data is missing. x = pd.DataFrame((1-(results_races.isnull().sum())/(np.size(results_races["driverId"])))) plt.xticks(rotation=70) ax = sns.barplot(x=results_races.columns, y=x[0], data=x,palette="Blues_d") plt.ylabel("Ratio") plt.title("Ratio of missing values in the columns");Here we see that some values are missing for almost two thirds of data in some columns. For the position results we will also have to make sure that we use the column positionTest or positionOrder, since these columns do not miss any values. However, other than these columns, almost all of the columns has no missing values.---Next we will look into what types of data is in the results_races dataframe:results_races.dtypesHere we have integers or float values for all the columns but one. This will make working with the data easier. ---Apart from just looking how much data and what kind of data we have, we also want to see what general trends there is in the data. Therefore we will take a look at how the average finishing time per race has evolved over the years.#Finding the average times per season season = results_races[["year","milliseconds"]].copy() season = season.groupby("year").mean().reset_index() season["minutes"]= season["milliseconds"]/(1000*60) #plotting the average times sns.lineplot(x=season["year"],y=season["minutes"],data=season) plt.title("Average finish time per season") plt.xlabel("Season"); plt.ylabel("Average finish time per season(minutes)");Here we see a clear downward going trends. We will have to make sure that this trends does not bias an analysis on the best driver in formula ones history towards more recent drivers. --- For the fastest lap there is only data from 2004 and onwards. We will see when in the race the fastest lap has been set thorugh the seasons.#For the fastest lap there is only data from 2004 and onwards #We find the mean fastest lap for each season devided by the mean number of laps driven each season. #Going through each season and finding the average lap # for all the cars that finish the race season = results_races.groupby("year").mean().reset_index() season["ratioLap"] = season["fastestLap"]/season["laps"] #plotting how far into the race the fastest lap has been set throughout the seasons ax = sns.lineplot(x=season["year"],y=season["ratioLap"],data=season) ax.set(xlim=(2004,2021)) plt.title("Mean Fastest Lap number /Mean number of laps driven per race") plt.xlabel("Season") plt.ylabel("Percent of the race driven") plt.xticks(rotation=40) plt.show()From this we see that cars are generally setting the fastest lap later in the race in the more recent seasons. --- Another thing that could inflience the analysis is the amount of race that are per season. We will therefore look at how the amount of races has changed through the seasons.#Number of races per season year_season = results_races.groupby("year") year_season = year_season.agg({"raceId": "nunique", "circuitId":"nunique"}) year_season = year_season.reset_index() #plotting the number of races per season ax = sns.lineplot(x=year_season["year"],y=year_season["raceId"],data=year_season) ax.set(xlim=(1950,2021)) plt.title("Number of races per season") plt.xlabel("season") plt.ylabel("Number of races") plt.xticks(rotation=40) plt.show() print("Number of diffrent races is:",results_races["raceId"].unique().size,"\n") print("The average number of races per season is:",results_races["raceId"].unique().size/results_races["year"].unique().size,"\n")Coefficients of variation across topicsWhich topics mostly reflect differences within books, and which ones differences between books?import sys, csv, math import numpy as np import pandas as pd from matplotlib import pyplot %matplotlib inline # A useful function def getdoc(anid): ''' Gets the docid part of a character id ''' if '|' in anid: thedoc = anid.split('|')[0] else: print('error', anid) thedoc = anid return thedoc # Initialize some variables # We're going to do this for a 200-topic model. doctopic_path = '../fic200/fic200_doctopics.txt' veclen = 200 # Two variables that will hold a list of CVs and means # for each topic. Each item in the list represents a # coefficient of variation, or a mean, for a different # document. doc_cvs = dict() doc_means= dict() for i in range(veclen): doc_means[i] = [] doc_cvs[i] = []Now we actually loop through the doctopic matrix measuring the mean and standard deviation of each topic in each book.names = ['theindex', 'charid'] names.extend(["topic" + str(x) for x in range (200)]) dtm = pd.read_csv(doctopic_path, sep = '\t', names = names) dtm.columns dtm = dtm.assign(docid = dtm.charid.map(getdoc)) groupedbydoc = dtm.groupby('docid') singletons = 0 for doc, group in groupedbydoc: for i in range(veclen): if len(group) > 1: the_mean = np.mean(group['topic' + str(i)]) the_std = np.std(group['topic' + str(i)]) the_cv = the_std/the_mean else: the_mean = 0 the_std = 0 the_cv = 0 singletons += 1 doc_cvs[i].append(the_cv) doc_means[i].append(the_mean) print(singletons/veclen, " single-char vols.") mean_cv_withinbook = dict() cv_acrossbook = dict() mean_acrossbook = dict() for i in range(veclen): mean_acrossbook[i] = np.mean(doc_means[i]) mask = np.array(doc_means[i]) > mean_acrossbook[i] / 8 mean_cv_withinbook[i] = np.mean(np.array(doc_cvs[i])[mask]) cv_acrossbook[i] = np.std(doc_means[i]) / mean_acrossbook[i] x = [mean_cv_withinbook[x] for x in range(veclen)] y = [cv_acrossbook[x] for x in range(veclen)] sizes = [math.sqrt(mean_acrossbook[x] * 200000) for x in range(veclen)] fig, ax = pyplot.subplots(figsize = (8, 8)) pyplot.scatter(x, y, s = sizes, alpha = 0.5) # ax.set_ylim(0, 8) # ax.set_xlim(1.4, 3) pyplot.show() keypath = '../fic200/fic200_keys.txt' keys = [] boring = {'said', 'had', 'was'} with open(keypath, encoding = 'utf-8') as f: for line in f: fields = line.strip().split('\t') text = fields[2] words = text.split()[1 : ] interestingwords = [] for w in words: if w not in boring: interestingwords.append(w) if len(interestingwords) > 20: break keys.append(interestingwords) keys = [' '.join(x) for x in keys] tuples = [] for i in range(veclen): if mean_cv_withinbook[i] > 2.7: tuples.append((mean_cv_withinbook[i], cv_acrossbook[i], i, keys[i])) tuples.sort() tuples keys[1]Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1].Silhouette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster.In this example the silhouette analysis is used to choose an optimal value for n_clusters. The silhouette plot shows that the n_clusters value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4.Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when n_clusters is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the n_clusters is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Versionimport sklearn sklearn.__version__Imports This tutorial imports [make_blobs](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.htmlsklearn.datasets.make_blobs), [KMeanssilhouette_samples](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.htmlsklearn.cluster.KMeans), [silhouette_samples](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_samples.htmlsklearn.metrics.silhouette_samples) and [silhouette_score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.htmlsklearn.metrics.silhouette_score).import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np print(__doc__)Automatically created module for IPython interactive environmentGenerating Sample Data# This particular setting has one distinct cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6]Plot Resultsfigures = [] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig = tools.make_subplots(rows=1, cols=2, print_grid=False, subplot_titles=('The silhouette plot for the various clusters.', 'The visualization of the clustered data.')) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] fig['layout']['xaxis1'].update(title='The silhouette coefficient values', range=[-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. fig['layout']['yaxis1'].update(title='Cluster label', showticklabels=False, range=[0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i colors = cm.spectral(cluster_labels.astype(float) / n_clusters) filled_area = go.Scatter(y=np.arange(y_lower, y_upper), x=ith_cluster_silhouette_values, mode='lines', showlegend=False, line=dict(width=0.5, color=colors), fill='tozerox') fig.append_trace(filled_area, 1, 1) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples # The vertical line for average silhouette score of all the values axis_line = go.Scatter(x=[silhouette_avg], y=[0, len(X) + (n_clusters + 1) * 10], showlegend=False, mode='lines', line=dict(color="red", dash='dash', width =1) ) fig.append_trace(axis_line, 1, 1) # 2nd Plot showing the actual clusters formed colors = matplotlib.colors.colorConverter.to_rgb(cm.spectral(float(i) / n_clusters)) colors = 'rgb'+str(colors) clusters = go.Scatter(x=X[:, 0], y=X[:, 1], showlegend=False, mode='markers', marker=dict(color=colors, size=4) ) fig.append_trace(clusters, 1, 2) # Labeling the clusters centers_ = clusterer.cluster_centers_ # Draw white circles at cluster centers centers = go.Scatter(x=centers_[:, 0], y=centers_[:, 1], showlegend=False, mode='markers', marker=dict(color='green', size=10, line=dict(color='black', width=1)) ) fig.append_trace(centers, 1, 2) fig['layout']['xaxis2'].update(title='Feature space for the 1st feature', zeroline=False) fig['layout']['yaxis2'].update(title='Feature space for the 2nd feature', zeroline=False) fig['layout'].update(title="Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters) figures.append(fig)For n_clusters = 2 The average silhouette_score is : 0.704978749608 For n_clusters = 3 The average silhouette_score is : 0.588200401213 For n_clusters = 4 The average silhouette_score is : 0.650518663273 For n_clusters = 5 The average silhouette_score is : 0.563764690262 For n_clusters = 6 The average silhouette_score is : 0.450466629437n_clusters = 2py.iplot(figures[0])n_clusters = 3py.iplot(figures[1])n_clusters = 4py.iplot(figures[2])n_clusters = 5py.iplot(figures[3])n_clusters = 6py.iplot(figures[4]) from IPython.display import display, HTML display(HTML('')) display(HTML('')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'silhouette-analysis.ipynb', 'scikit-learn/plot-kmeans-silhouette-analysis/', 'Selecting the number of clusters with silhouette analysis on KMeans clustering | plotly', ' ', title = 'Selecting the number of Clusters with Silhouette Analysis on KMeans Clustering | plotly', name = 'Selecting the number of Clusters with Silhouette Analysis on KMeans Clustering ', has_thumbnail='true', thumbnail='thumbnail/silhoutte.jpg', language='scikit-learn', page_type='example_index', display_as='clustering', order=15, ipynb= '~Diksha_Gabha/2853')Import Things needed for the project%%capture import pandas as pd !pip install vaderSentiment import vaderSentiment**Loading the data**cleaned = pd.read_csv("https://raw.githubusercontent.com/buildweek-saltiest-hacker/data-engineering-api/master/hacker-comments.csv") cleaned.head()**Using Vader Sentiment Analysis**from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer score = SentimentIntensityAnalyzer()Rober Notebook https://github.com/BrokenShell/SaltyHacker/blob/master/nlp.pyVader Documentationhttps://pypi.org/project/vaderSentiment/ Creating a the ranking for each comment.# creating a new dataframe that just has the information needed text = cleaned['hacker_comment'] name = cleaned['hacker_name'] salty_hackers = pd.DataFrame({ 'Name':name, 'Comment': text }) salty_hackers.head() comment = salty_hackers['Comment'] ranking = [] for i in comment: scores = score.polarity_scores(i) final_score = scores['compound'] rounded_score = round(final_score*10, 2) ranking.append(rounded_score) salty_hackers['comment_ranking'] = ranking salty_hackers.head() sample_data = salty_hackers.iloc[:10] sample_data salty_hackers['comment_ranking'].describe() average = salty_hackers.groupby(by='Name').mean() average average[:10] average['comment_ranking'] average_dict = average['comment_ranking'].to_dict() all_users = average_dict.keys() user_list = list(all_users) users = salty_hackers['Name'] user_ranking = [] for user in users: user_rank = average_dict[user] round_user_rank = round(user_rank, 2) user_ranking.append(round_user_rank) user_ranking[:10] salty_hackers['user_ranking'] = user_ranking salty_hackers.head() salty_hackers['user_ranking'].describe()Exporting Final Data Setcompression_opts = dict(method='zip',archive_name='salty_hackers.csv') salty_hackers.to_csv('salty_hackers.zip', index=False, compression=compression_opts)7장 시계열을 위한 상태공간 모델 - Python (HMM)> 시계열을 위한 상태공간 모델 중 HMM에 대한 소스코드 입니다.- author: ""- toc: false- comments: false- categories: [state space model, HMM, python]- permalink: /chapter7-hmm-python/- badges: true- hide_github_badge: true%matplotlib inline width = 6 height = 3 import matplotlib matplotlib.rcParams['figure.figsize'] = [width, height] import pandas as pd import numpy as np import matplotlib.pyplot as plt import hmmlearn from hmmlearn.hmm import GaussianHMM print(pd.__version__) print(np.__version__) print(hmmlearn.__version__)0.24.1 1.14.6 0.2.2Look at the datanile = pd.read_csv("Nile.csv", index_col = 0) nile.head() plt.plot(nile.year, nile.val)Let's take a look at the hmmlearn APIvals = np.expand_dims(nile.val.values, 1) n_states = 2 model = GaussianHMM(n_components=n_states, n_iter=100).fit(vals) hidden_states = model.predict(vals) np.bincount(hidden_states) plt.plot(hidden_states)Exercise: how can we package this more conveniently?def fitHMM(vals, n_states): vals = np.reshape(vals,[len(vals),1]) # fit Gaussian HMM to Q model = GaussianHMM(n_components=n_states, n_iter=100).fit(vals) # classify each observation as state 0 or 1 hidden_states = model.predict(vals) # fit HMM parameters mus = np.squeeze(model.means_) sigmas = np.squeeze(np.sqrt(model.covars_)) transmat = np.array(model.transmat_) print(mus) print(sigmas) # # re-order parameters in ascending order of mean of underlying distribution # idx = np.argsort(mus) # mus = mus[idx] # sigmas = sigmas[idx] # transmat = transmat[idx, :][:, idx] # state_dict = {} # states = [i for i in range(n_states)] # for i in idx: # state_dict[i] = states[idx[i]] # relabeled_states = [state_dict[h] for h in hidden_states] relabeled_states = hidden_states return (relabeled_states, mus, sigmas, transmat, model) hidden_states, mus, sigmas, transmat, model = fitHMM(nile.val.values, 2)[1097.15261711 850.75596948] [133.74749638 124.44593534]Exercise: how might we be able to plot this more sensibly?def plot_states(ts_vals, states, time_vals): fig, ax1 = plt.subplots() color = 'tab:red' ax1.set_xlabel('Year)') ax1.set_ylabel('Nile river flow', color=color) ax1.plot(time_vals, ts_vals, color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('Hidden state', color=color) ax2.plot(time_vals,states, color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.show() plot_states(nile.val, hidden_states, nile.year)Exercise: can we improve on the analysis above? Cut off the 'special' regionnp.where(hidden_states == 0) hidden_states, mus, sigmas, transmat, model = fitHMM(nile.val.values, 3) plot_states(nile.val, hidden_states, nile.year) mus np.set_printoptions(precision = 3, suppress = True) transmat musExercise: generate new synthetic data from the model and then fit it with a fresh HMM model Easy to sample from an existing HMM modelres = np.squeeze(model.sample(1000)[0]) plt.plot(res)Then refithidden_states, mus, sigmas, transmat, model = fitHMM(res, 3) def plot_states_no_time(ts_vals, states): fig, ax1 = plt.subplots() color = 'tab:red' ax1.set_xlabel('Time)') ax1.set_ylabel('Value', color=color) ax1.plot(ts_vals, color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('Hidden state', color=color) ax2.plot(states, color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.show() plot_states_no_time(res[1:100], hidden_states[1:100]) transmatMultimodal gene regulatory network Here we will use dual-omics SHARE-seq dataset, more specicially the dataset from figure4 in [SHARE-seq study](https://www.sciencedirect.com/science/article/abs/pii/S0092867420312538), "multiome_ma2020_fig4" as an example to illustrate how SIMBA performs gene regulatory network (GRN) analysisimport os import numpy as np import pandas as pd import simba as si si.__version__ workdir = 'result_multiome_shareseq' si.settings.set_workdir(workdir) si.settings.set_figure_params(dpi=80, style='white', fig_size=[5,5], rc={'image.cmap': 'viridis'}) # to make plots prettier from IPython.display import set_matplotlib_formats set_matplotlib_formats('retina')Identify master regulatorsadata_G = si.read_h5ad(os.path.join(workdir,'adata_G.h5ad')) adata_M = si.read_h5ad(os.path.join(workdir,'adata_M.h5ad')) adata_all = si.read_h5ad(os.path.join(workdir,'adata_all.h5ad')) adata_cmp_CG = si.read_h5ad(os.path.join(workdir,'adata_cmp_CG.h5ad')) adata_cmp_CM = si.read_h5ad(os.path.join(workdir,'adata_cmp_CM.h5ad')) # find paired TF motifs and TF genes motifs_genes = pd.DataFrame(columns=['motif', 'gene']) for x in adata_M.obs_names: x_split = x.split('_') for y in adata_G.obs_names: if y in x_split: motifs_genes.loc[motifs_genes.shape[0]] = [x,y] print(motifs_genes.shape) motifs_genes.head() list_tf_motif = motifs_genes['motif'].tolist() list_tf_gene = motifs_genes['gene'].tolist() df_metrics_motif = adata_cmp_CM.var.copy() df_metrics_gene = adata_cmp_CG.var.copy() df_metrics_motif.head() df_metrics_gene.head()plot SIMBA metrics in order to help set the cutoffs latersi.pl.entity_metrics(adata_cmp_CG,x='max',y='gini', show_texts=False, show_cutoff=True, show_contour=True, c='#607e95', cutoff_x=1.5, cutoff_y=0.35) si.pl.entity_metrics(adata_cmp_CM,x='max',y='gini', show_texts=False, show_cutoff=True, show_contour=True, c='#92ba79', cutoff_x=3, cutoff_y=0.7)identify master regulatorsdf_MR = si.tl.find_master_regulators(adata_all, list_tf_motif=list_tf_motif, list_tf_gene=list_tf_gene, cutoff_gene_max=1.5, cutoff_gene_gini=0.35, cutoff_motif_max=3, cutoff_motif_gini=0.7, metrics_gene=df_metrics_gene, metrics_motif=df_metrics_motif ) df_MRIdentify target genesadata_CP = si.read_h5ad(os.path.join(workdir,'adata_CP.h5ad')) adata_PM = si.read_h5ad(os.path.join(workdir,'adata_PM.h5ad')) #make sure the indices of motifs are the same as in `motifs_genes` adata_PM.var.index = 'M_' + adata_PM.var.index master_regulators = ['Lef1', 'Hoxc13', 'Relb', 'Gata6', 'Nfatc1'] list_tf_motif = motifs_genes[np.isin(motifs_genes['gene'], master_regulators)]['motif'].tolist() list_tf_gene = motifs_genes[np.isin(motifs_genes['gene'], master_regulators)]['gene'].tolist()The following step can be ignored if `si.tl.gene_scores(adata_CP)` has been performed before# get the overlap between peaks and gene loci # the field `uns['gene_scores']['overlap']` will be added to adata_CP _ = si.tl.gene_scores(adata_CP,genome='mm10',use_gene_weigt=True,use_top_pcs=False) dict_tf_targets = si.tl.find_target_genes(adata_all, adata_PM, list_tf_motif=list_tf_motif, list_tf_gene=list_tf_gene, adata_CP=adata_CP, cutoff_gene=5000) dict_tf_targets.keys() dict_tf_targets['M_ENSMUSG00000027985_LINE1723_Lef1_D'] dict_tf_targets['M_ENSMUSG00000001655_LINE1151_Hoxc13_D']save resultsadata_CP.write(os.path.join(workdir,'adata_CP.h5ad')) import pickle with open('./result_multiome_shareseq/dict_tf_targets.pickle', 'wb') as handle: pickle.dump(dict_tf_targets, handle, protocol=pickle.HIGHEST_PROTOCOL)Sci-Fi IRL A Data Storytelling Project by ---- Datalogue 004 ---------- Resources- [PushShift API GitHub Repo](https://github.com/pushshift/api) --- Pre-Session Notes After figuring out how to use the API yesterday, I ran into a wall as far as inspiration goes. > Now what?I can do all of the things I need to do to finish this project, but I realized that a simple query such as the frequency of mentions of the terms "utopia" and "dystopia" may not show anything interesting or give me any good meat to chew on in my analysis.I came up with some ideas to get me over that wall.Looking at differences across and between subreddits seems to be where I feel like the most interesting stories can be found. However, I'm not going to overwhelm myself with trying to encompass ALL THE SUBREDDITS with this project. That would go against the whole "MVP" ethos. Here's what I'm going to do today / for this project...I'm going to:1. Choose a few subreddits from the list below that I believe will have interesting differences in certain areas2. Come up with a list of words that can be considered a measure of optimism or hope for the future3. Create a hypothesis regarding the words and the subreddits4. Gather data on the chosen words in the chosen subreddits using the PushShift API5. Explore the data with tables and visualizations6. Find the most compelling storytelling devices and base my writing off of those7. ???8. Profit --- 1. List of Subreddits, organized by category Entertainment- entertainment, 1.1m- scifi, 1.2m- sciencefiction, 113k- AskScienceFiction, 163k- WritingPrompts, 14.0m- writing, 888k- movies, 21.5m- gaming, 23.7m- books, 17.1m- suggestmeabook, 596k Science / Technology- Futurology, 14.2m- futureporn, 161k- space, 15.9m- technology, 8.2m- science, 22.4m- askscience, 18.1m- MachineLearning, 779k- artificial, 87.7k- TechNewsToday, 82.6k- EverythingScience, 175k Media / General- worldnews, 22.2m- news, 19m- politics, 5.4m- philosophy, 14.1m- conspiracy, 984k- skeptic, 134k- changemyview, 830k- AskReddit, 24.6m- environment, 608k- memes, 6.4m Of course nine subreddits per category might be a bit much. However, I think I'll start here and prune as needed along the way. --- 2. List of Keywords I had the thought that using "utopia" and "dystopia" alone might cause the data and results to be biased because those are not in the vernacular of many. Therefore, it would bias the data toward those who consume sci-fi or are into futurism. While gathering data on these folks is somewhat the point of the analysis, I would think there to be other words that would provide more of a fair judgement of popular sentiment. - Basic - Utopia / Dystopia ------ Imports# Three Musketeers import pandas as pd import numpy as np import matplotlib.pyplot as plt # For using the API import requests # import json # from pandas.io.json import json_normalize # More advanced vizualizations from bokeh.plotting import figure, output_file, output_notebook, show from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter--- Configuration# Set pandas display option to allow for more columns and rows pd.set_option("display.max_columns", 100) pd.set_option("display.max_rows", 500)--- MVPTo start things out and get things rolling, I'm going to do a much simpler analysis of a single subreddit...> r/suggestmeabookThe reason I'm choosing to use this particular subreddit is that an effective measure of the content people *actually* enjoy is what they will suggest to friends, colleagues, or strangers on the InterWebz ---- ø ---- Notes 1. Abbreviations and variable names| Abbreviation | Meaning || ----- | ------------------ || smab | suggestmeabook | ---- ø ----# Comments that mention "utopian" in r/suggestmeabook q = "utopian" sub = "suggestmeabook" fields = [ "author", "body", "created_utc", "parent_id", "score", "subreddit", ] after = "10y" smab_utopia_url = f"https://api.pushshift.io/reddit/search/comment/?q={q}&subreddit={sub}&fields={','.join(fields)}" # smab_utopia_url = f"https://api.pushshift.io/reddit/search/comment/?q={q}&subreddit={sub}&after={after}&fields={','.join(fields)}" print(smab_utopia_url) # Send the request and save into response object resp_1 = requests.get(smab_utopia_url) # Look at the status code print(resp_1.status_code) # Use assert to stop the notebook's execution if request fails (if not 200) assert resp_1.status_code == 200 # Parse the json response into a python object json_resp_1 = resp_1.json() # Check out the json json_resp_1["data"][0:2]Although I might use other data for the visualizations, this data has some very valuable information. Namely, I am able to browse the data to get a feel for the overall quality of the comments. If, for example, I looked through and found that the majority of the comments that mentioned the keyword `utopia` were not saying it in the context of describing (science-fiction) stories, I would not be able to explore the hypothesis I originally set out to test / the question I originally set out to answer or simply explore. --- Aggregated smab According to the [PushShift API Documentation](https://github.com/pushshift/api):> The size parameter was set to 0 because we are only interested in getting aggregation data and not comment data. The aggregation data is returned in the response under the key aggs -> created_utc.With that in mind, I'm going to take out the `size=0` parameter in order to capture both the aggregation and the comments.# Comments that mention "utopia" in r/suggestmeabook, aggregated by month q = "utopia" sub = "suggestmeabook" # before = "27d" aggs = "created_utc" freq = "month" smab_utopia_agg_url = f"https://api.pushshift.io/reddit/search/comment/?q={q}&subreddit={sub}&aggs={aggs}&frequency={freq}" print(smab_utopia_agg_url) # Send the request and save into response object resp_2 = requests.get(smab_utopia_agg_url) # Look at the status code print(resp_2.status_code) # Use assert to stop the notebook's execution if request fails (if not 200) assert resp_2.status_code == 200 # Parse the json response into a python object json_resp_2 = resp_2.json() # Eyeball the jerry-rigged head of the aggregated json json_resp_2["aggs"]["created_utc"][0:10] # Eyeball the full comment json json_resp_2["data"][0]Looks like that worked! So I can combine the aggregate and non-aggregate queries into a single one# Convert the python object into a pandas dataframe df_smab_agg = pd.DataFrame(json_resp_2["aggs"]["created_utc"]) df_smab_agg.head() # Convert "key" into a datetime column df_smab_agg["key"] = pd.to_datetime(df_smab_agg["key"], unit="s", origin="unix") df_smab_agg.head() # Rename "key" to reflect the fact that it is the beginning of the time bucket # (in this case the month) df_smab_agg = df_smab_agg.rename(mapper={"key": "month", "doc_count": "utopia"}, axis="columns") print(df_smab_agg.shape) df_smab_agg.head() # Convert the comments into a pandas dataframe df_smab_data = pd.DataFrame(json_resp_2["data"]) print(df_smab_data.shape) df_smab_data.head()(25, 29)--- MVP Func Combine the steps above into functions to make things fasterdef subreddit_agg(query, subreddit, frequency="month", aggs="created_utc"): """ Returns the JSON response of a PushShift API aggregate comment search as a Python dictionary. Note: if you're reading this note, that means that this function is still only written with the intention of automating a specific set of actions for a specific project. ---- Arguments ---- query: (str) keyword to search. subreddit: (str) subreddit name frequency: (str) set the size of the time buckets. aggs: (str) aggregate function name. Default is "created_utc". (For more information, read the PushShift API Documentation.) ------------------- """ # Build the query url based on endpoints and parameters url = f"https://api.pushshift.io/reddit/search/comment/?q={query}&subreddit={subreddit}&aggs={aggs}&frequency={frequency}" # Send the request and save the response into the response object response = requests.get(url) # Check the response; stop execution if failed assert response.status_code == 200 # Parse the JSON into a Python dictionary # and return it for further processing return response.json() def time_agg_df(data, keyword, frequency="month"): """ Returns cleaned Pandas DataFrame of keyword frequency over time, given correctly-formatted Python dictionary. Renames the frequency column to keyword; converts month to datetime. Note: if you're reading this note, that means that this function is still only written with the intention of automating a specific set of actions for a specific project. ---- Arguments ---- data: (dict) Python dictionary converted from JSON API response. keyword: (str) the keyword that was queried. time_bucket: (str) size of time buckets, which is also the name of the resulting DataFrame column. Defaults to "month". ------------------- """ # Convert the python object into a pandas dataframe df = pd.DataFrame(data["aggs"]["created_utc"]) # Convert "key" into a datetime column df["key"] = pd.to_datetime(df["key"], unit="s", origin="unix") # Rename "key" to reflect the fact that it is the beginning of the time bucket df = df.rename(mapper={"key": frequency, "doc_count": keyword}, axis="columns") # Return the DataFrame return df # Function to convert the comment data into pandas dataframe def data_df(data): """ Returns Reddit comments in Pandas DataFrame, given the correctly-formatted Python dictionary. Note: if you're reading this note, that means that this function is still only written with the intention of automating a specific set of actions for a specific project. ---- Arguments ---- data: (dict) Python dictionary converted from JSON API response. ------------------- """ # Convert the comments into a pandas dataframe df = pd.DataFrame(data["data"]) # Return the DataFrame return df def df_to_csv(data, filename): """ Basically just a wrapper around the Pandas `.to_csv()` method, created to standardize the inputs and outputs. ---- Arguments ---- data: (pd.DataFrame) Pandas DataFrame to be saved as a csv. filepath: (str) name or path of the file to be saved. ------------------- """ # Saves the DataFrame to csv data.to_csv(path_or_buf=filename) # And that's it, folks!--- Gather the Data# Comments that mention "utopia" (and, I'm assuming, "utopian" as well) in r/suggestmeabook, aggregated by month q = "utopia" sub = "suggestmeabook" # Make the request and create the Python dictionary smab_utopia_dict = subreddit_agg(q, sub, "month") # Parse the time aggregation to pd.DataFrame df_smab_agg_uto = time_agg_df(smab_utopia_dict, "utopia") print(df_smab_agg_uto.shape) print() print(df_smab_agg_uto.dtypes) print() df_smab_agg_uto.head() # Comments that mention "dystopia" (and, I'm assuming, "dystopian" as well) in r/suggestmeabook, aggregated by month q = "dystopia" sub = "suggestmeabook" # Make the request and create the Python dictionary smab_dystopia_dict = subreddit_agg(q, sub, "month") # Parse the time aggregation to pd.DataFrame df_smab_agg_dys = time_agg_df(smab_dystopia_dict, "dystopia") # Take a look at the results print(df_smab_agg_dys.shape) print() print(df_smab_agg_dys.dtypes) print() df_smab_agg_dys.head()(65, 2) dystopia int64 month datetime64[ns] dtype: object--- Joining Utopia and Dystopia# Join the two dataframes together # - using "inner" because I only want rows with both df_smab_agg = pd.merge(df_smab_agg_uto, df_smab_agg_dys, how="inner", on="month") df_smab_agg.head() # Reorder columns to get "month" first cols = ["month", "utopia", "dystopia"] df_smab_agg = df_smab_agg[cols] df_smab_agg.head()--- Visualization# Basic Bokeh visualization # Output to current notebook output_notebook() # Create new plot with title and axis labels p = figure(title="Utopia vs Dystopia", x_axis_type="datetime", x_axis_label='Date', y_axis_label='Frequency') # Add a line renderer with legend and line thickness p.line(df_smab_agg["month"], df_smab_agg["utopia"], legend="Utopia", line_width=2, line_color="blue") p.line(df_smab_agg["month"], df_smab_agg["dystopia"], legend="Dystopia", line_width=2, line_color="red") # Style the legend p.legend.location = "top_left" p.legend.title = 'Comments in r/suggestmeabook that mention:' p.legend.title_text_font_style = "bold" p.legend.title_text_font_size = "8pt" # Show the results show(p)--- Saving the Data# Save the joined utopia / dystopia dataset df_to_csv(df_smab_agg, "suggestmeabook_utopia_dystopia_by_month.csv") # Create the "utopia" comments DataFrame smab_comments_uto = data_df(smab_utopia_dict) # Save the "utopia" comments dataset to csv df_to_csv(smab_comments_uto, "suggestmeabook_utopia_comments.csv") # Create and save the "dystopia" comments dataset smab_comments_dys = data_df(smab_dystopia_dict) # Save the "dystopia" comments dataset to csv df_to_csv(smab_comments_dys, "suggestmeabook_dystopia_comments.csv")--- Extras# TODO: write a function that combines all the above functions # so I can call it once on a subreddit with a certain pair of keywords # And get out the dataframes def reddit_data_setter(keywords, subreddits, frequency="month", aggs="created_utc"): """ Creates two DataFrames that holds combined data of each combination of keyword / subreddit. Note: if you're reading this note, that means that this function is still only written with the intention of automating a specific set of actions for a specific project. ---- Arguments ---- keywords: (list) keyword(s) to search. subreddits: (list) name of subreddit(s) to include. frequency: (str) set the size of the time buckets. aggs: (str) aggregate function name. Default is "created_utc". (For more information, read the PushShift API Documentation.) ------------------- """ columns = [] # Empty list to hold column names # Use itertools to create the columns names from itertools import product for column in product(subreddits, keywords): column = list(column) col_name = "_".join(column) columns.append(col_name) # Create an empty DataFrame for the loop below using column names generated above df_all = pd.DataFrame(columns=columns) return df_all # One method of creating the initial dataframe # I ended up using the date column to initiate itTurns out I will have to use the above itertools function to create the columns...maybe. We'll see. Nope. Didn't end up using it. --- `reddit_data_setter()` v1.0def reddit_data_setter(keywords, subreddits, csv=False, frequency="month", aggs="created_utc"): """ Creates two DataFrames that holds combined data of each combination of keyword / subreddit. Note: if you're reading this note, that means that this function is still only written with the intention of automating a specific set of actions for a specific project. ---- Arguments ---- keywords: (list) keyword(s) to search. subreddits: (list) name of subreddit(s) to include. csv: (bool) if True, save the resulting dataframes as csv file. frequency: (str) set the size of the time buckets. aggs: (str) aggregate function name. Default is "created_utc". (For more information, read the PushShift API Documentation.) ------------------- """ from time import sleep from functools import reduce comment_df_list = [] # Empty list to hold comment dataframes word_df_list = [] # Empty list to hold monthly word count dataframes df_comm = pd.DataFrame() # Empty dataframe for comment data df_main = pd.DataFrame() # Empty dataframe for keyword counts # Create the "month" (datetime) column - to be used when joining df_main["month"] = pd.date_range(start="2005-01-01", end="2019-09-01", freq="MS") # Loop through keywords and subreddits to create dictionaries for each combination # subreddit is outer loop because I want to go through list of keywords # for one subreddit before moving onto the next for subreddit in subreddits: for word in keywords: # Create column name that matches above / main DataFrame col_name = f"{subreddit}_{word}" # Increase sleep time and indicate current subreddit / keyword print(f"Starting {col_name}") sleep(0.5) print("...") print() # Make request and convert response to dictionary dictionary = subreddit_agg(word, subreddit) # Append aggs word count df to word_df_list word_df_list.append(time_agg_df(dictionary, col_name)) # Append comments df to comment_df_list comment_df_list.append(data_df(dictionary)) # # Add subreddit aggregate for keyword to df dict # word_df_dict[f"{col_name}"] = time_agg_df(dictionary, col_name) # # Append comments df to comment_df_list # comment_df_dict[f"{col_name}"] = data_df(dictionary) # Sleep for 1 sec to stay within API limits print(f"Finished {col_name}") sleep(0.5) print("...") sleep(0.5) print() # Set index to month column then concat df_main = pd.concat([df.set_index("month") for df in word_df_list], axis=1, join="outer").reset_index() # Merge word_df_list dataframes into df_main # df_main = pd.concat(word_df_list, how="outer", axis=1) # df_main = reduce(lambda x, y: pd.merge(x, y, how="outer", on="month"), word_df_list) # Concatenate comment_df_dicts dataframes df_comm = pd.concat(comment_df_list, sort=False, join="outer", axis=0) if csv: df_to_csv(df_main, f"monthly-{'_'.join(subreddits)}-{'_'.join(keywords)}.csv") df_to_csv(df_comm, "df_comm.csv") # Return df_main, df_comm, respectively return df_main, df_commParallelizing Code with the built-in ```multiprocessing``` module. First, let's create a single-threaded function that is conducive to being multithreaded:import numpy as np import time def GenerateData_Singlethread(procnum, NumElems, return_dict): """ generates a simple function of the integers from (procnum)*NumElems to (procnum)*NumElems-1 The output is passed by reference to return_dict """ A = np.arange( (procnum)*NumElems, (procnum+1)*NumElems) return_dict[procnum] = np.cos(A/50000.0)-1.3*np.cos(A*2.0/50000.0)And here is where the magic happens:from multiprocessing import Pool, Process import multiprocessing NumThreads = 4 NumSamples = 5*(10**6) tic = time.time() # start timer - benchmark how much faster the multithreaded version over the single threaded TotalElems = 0 manager = multiprocessing.Manager() return_dict = manager.dict() #just a pointer - dict() is where we shall store #the values generated by the function in each process jobs = [] NumElemsInEachThread = [] # record the number of elements we will generate in each thread for k in range(NumThreads): p = multiprocessing.Process(target=GenerateData_Singlethread, args=(k, int(np.ceil(NumSamples/NumThreads)),return_dict, ) ) #create a process, and call the target function with each Process # the arguments of each function call are given in the form of a tuple NumElemsInEachThread.append( int(np.ceil(NumSamples/NumThreads)) ) jobs.append(p) p.start() for proc in jobs: proc.join() # wait until each process has halted before continuing print('Elapsed Time after running all threads = %2.2f sec'%(time.time()-tic))Elapsed Time after running all threads = 1.57 secNow that all the functions have been run, we will need to retrieve the data from the ```return_dict```.#now combine the data tic = time.time() TotalElems = np.sum(NumElemsInEachThread) DataAll = np.zeros((TotalElems,)) # create an empty vector (or a matrix) the size of the total data OrderOfExecution = np.flip(return_dict.keys()) # outputs of functions are stored in a stack in return_dict # .keys() gives the order in which each process was completed and pushed into the stack # when we pop each of them out, they would be in reverse order, hence the np.flip() command for k in (OrderOfExecution): DataAll[ int(np.sum(NumElemsInEachThread[0:k])):int(np.sum(NumElemsInEachThread[0:(k+1)]))] = \ return_dict.popitem()[1] #once we have popped everything from the stack, running return_dict.popitem()[1] again will result in an error: try: return_dict.popitem()[1] except: print('Could not pop item from return_dict anymore') print(DataAll) print('Parts Assignment: %2.2f sec'%(time.time()-tic)) print(DataAll.shape) import matplotlib.pyplot as plt plt.plot(DataAll) plt.title('Multithreaded output')What happens when we run the single thread instead?procnum2 = 0 NumElems2 = NumSamples tic = time.time() PassByRefArr= [np.zeros((NumElems2,1))] GenerateData_Singlethread(procnum= procnum2, NumElems = NumElems2, return_dict = PassByRefArr) print('Elapsed Time (single-threaded) = %2.2f sec'%(time.time()-tic)) print(PassByRefArr[0].shape) plt.plot(PassByRefArr[0])Elapsed Time (single-threaded) = 0.27 sec (5000000,)Welcome to ExkaldiIn this section, we will train the triphone HMM-GMM with delta feature.import exkaldi import os dataDir = "librispeech_dummy"Firstly, instantiate a triphone model object.model = exkaldi.hmm.TriphoneHMM() model___model___ is unavaliable now. We have to initialize its data. We will use these files which have been generated in early steps.___tree___ and ___treeStats___ : generated in 6_train_decision_tree ___topo___: generated in 5_train_mono_HMM-GMMActually, you can initialize the TriphoneHMM object from feature directly, but here we use tree statistics file.treeFile = os.path.join(dataDir, "exp", "train_delta", "tree") treeStatsFile = os.path.join(dataDir, "exp", "train_delta", "treeStats.acc") topoFile = os.path.join(dataDir, "exp", "topo") model.initialize(tree=treeFile, treeStatsFile=treeStatsFile, topoFile=topoFile) model.infoThe training steps of triphone HMM are almost the same as monophone HMM except that we don't use equally aligning at the first time.We will introduce the traing step in a nutshell. Training in detailAt the first step, we must generate the new alignment data in the first step. You can convert the lastest alignment data generated by monophone model to a new alignment data corresponding to triphone. Use the generated alignment and final monophone model.aliFile = os.path.join(dataDir, "exp", "train_mono", "final.ali") monoFile= os.path.join(dataDir, "exp", "train_mono", "final.mdl") ali = exkaldi.load_ali(aliFile) newAli = exkaldi.hmm.convert_alignment( ali=ali, originHmm=monoFile, targetHmm=model, tree=treeFile ) newAliIn another way, align feature again directly with new triphone model. In the next steps, we will review the steps to train a HMM-GMM model and use it to align acoustic feature.del newAliPrepare a lexicons (generated in 3_prepare_lexicons).lexFile = os.path.join(dataDir, "exp", "lexicons.lex") lexicons = exkaldi.load_lex(lexFile) lexiconsPrepare int-format transcription (generated in 5_train_mono_HMM-GMM ).intTransFile = os.path.join(dataDir, "exp", "text.int") trans = exkaldi.load_transcription(intTransFile) trans.subset(nHead=1)Prepare L.fst file (generated in 3_prepare_lexicons).Lfile = os.path.join(dataDir, "exp", "L.fst")Prepare feature (generated in 2_feature_processing).featFile = os.path.join(dataDir, "exp", "train_mfcc_cmvn.ark") feat = exkaldi.load_feat(featFile) feat = feat.add_delta(order=2) feat.dim1. Compile new train graph.outDir = os.path.join(dataDir, "exp", "train_delta") exkaldi.utils.make_dependent_dirs(outDir, pathIsFile=False) trainGraphFile = os.path.join(outDir, "train_graph") model.compile_train_graph(tree=treeFile, transcription=trans, LFile=Lfile, outFile=trainGraphFile, lexicons=lexicons)2. Align acoustic feature.ali = model.align(feat, trainGraphFile, lexicons=lexicons) ali3. Accumulate statistics.statsFile = os.path.join(outDir, "stats.acc") model.accumulate_stats(feat=feat, ali=ali, outFile=statsFile)4. Update HMM-GMM parameters.targetGaussians = 300 model.update(statsFile, targetGaussians) model.infoTraining in high-levelIn this step, we will introduce how to training the triphone in directly.del model del ali del trans os.remove(trainGraphFile) os.remove(statsFile)Some file paths or objects defined above will be used here. Firstly, initialize model. Give lexicons as a optional parameter.model = exkaldi.hmm.TriphoneHMM(lexicons=lexicons) model.initialize(tree=treeFile, treeStatsFile=treeStatsFile, topoFile=topoFile) model.infoThen train it.outDir = os.path.join(dataDir, "exp", "train_delta") transFile = os.path.join(dataDir, "train", "text") aliIndex = model.train(feat=feat, transcription=transFile, LFile=Lfile, tree=treeFile, tempDir=outDir, numIters=10, maxIterInc=8, totgauss=1500 )Final model and alignment have been saved in file automatically. Look the final model information.model.infoCDC NDI Mortality - Syft Duet - Data Owner 🎸 This worksheet is intended to illustrate functionality of a shared statistical platform, using a partially synthetic public-use dataset that mirrors the restricted-use dataset. Ultimately, these processes would apply to the restricted-use data.Sample data compiled from the public-use linked mortality files share at https://www.cdc.gov/nchs/data-linkage/mortality.htm provided by the National Center for Health Statistics (NCHS). PART 1: Launch a Duet Server and ConnectAs a Data Owner, you want to allow someone else to perform data science on data that you own and likely want to protect.In order to do this, we must load our data into a locally running server within this notebook. We call this server a "Duet".To begin, you must launch Duet and help your Duet "partner" (a Data Scientist) connect to this server.You do this by running the code below and sending the code snippet containing your unique Server ID to your partner and following the instructions it gives!import syft as sy duet = sy.launch_duet(loopback=True) sy.load("pandas") sy.load("statsmodels") sy.load("numpy") duet.requests.add_handler(action="accept") import zipfile import requests import io from urllib.request import urlopen csv_file = "mort_match_nhis_all_years.csv" zip_file = f"{csv_file}.zip" url = f"https://datahub.io/madhava/mort_match_nhis_all_years/r/{zip_file}" res = requests.get(url) z = zipfile.ZipFile(io.BytesIO(res.content)) with z.open(z.namelist()[0], 'r') as myfile: binaryCSV = myfile.read() import pandas as pd df = pd.read_csv(io.BytesIO(binaryCSV), encoding='utf-8') df[df.MORTSTAT == 0].head() len(df) df_ptr = df.send(duet, tags=["df"]) df # local stats # Select the records that died by cancer that were eligible for linkage # 002-Malignant neoplasms (C00-C97) cancer = df[(df.UCOD_LEADING == 2) & (df.ELIGSTAT == 1)] # Select the records that died due to heart disease and were eligible for linkage # 001-Diseases of heart (I00-I09, I11, I13, I20-I51) heart = df[(df.UCOD_LEADING == 1) & (df.ELIGSTAT == 1)] # Compute simple means and for the cancer and heart subgroups that had diabetes # listed as a multiple cause of death cancer["DIABETES"].mean() # Compute simple means and standard deviations for the cancer and heart subgroups # that had diabetes as a multiple cause of death heart["DIABETES"].mean() # Sample means data should account for weights. Write a custom function that uses the weights. def weighted_mean(dx, key, weight_key="WGT_NEW"): w = dx[weight_key] v = dx[key] return (w * v).sum() / w.sum() weighted_mean(cancer, "DIABETES"), weighted_mean(heart, "DIABETES") # Example of a small subgroup (sample size = 6) # Cancer-deaths from males aged 47 who died in 2015 # We should check for small cell sizes here subgroup = cancer[(cancer.SEX == 1) & (cancer.AGE_P == 47) & (cancer.DODYEAR == 2015)] print(subgroup["DIABETES"].mean()) print(weighted_mean(subgroup, "DIABETES")) print(len(subgroup)) # These stats are problematic, as the subgroup is too small to report (n=6) subgroup import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial # Drop any missing values in the dataset (those under 18) df = df.dropna(subset=["MORTSTAT"]) # Keep only the eligible portion df = df[df.ELIGSTAT == 1] # Ignore people > 80 df = df[df.AGE_P <= 80] # A person is alive if MORTSTAT==0 df["is_alive"] = df.MORTSTAT == 0 # Assign a helpful column for sex (0==male, 1==female) df["sex"] = "male" df.loc[df.SEX == 2, "sex"] = "female" x = df["AGE_P"] _x = sm.add_constant(x) _y = df["is_alive"] results = GLM(_y, _x, family=Binomial()).fit() print(results.summary()) predict_x = range(x.min(), x.max() + 1, 1) preds = results.predict(sm.add_constant(predict_x)) # share predictions preds.send(duet, tags=["preds"]) try: import pylab as plt import seaborn as sns plt.figure(figsize=(12, 5)) plt.plot(predict_x, preds, "k", lw=3, label="Best Fit for all data") sns.lineplot(data=df, x="AGE_P", y="is_alive", hue="sex", err_style="bars") sns.despine() except ImportError: print("Cant import seaborn try:\n!pip install seaborn")Day 5: Binary Boarding You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport control.You write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input); perhaps you can find your seat through process of elimination.Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane (numbered 0 through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63) or the back (64 through 127). The next letter indicates which half of that region the seat is in, and so on until you're left with exactly one row.For example, consider just the first seven characters of FBFBBFFRLR:```Start by considering the whole range, rows 0 through 127.F means to take the lower half, keeping rows 0 through 63.B means to take the upper half, keeping rows 32 through 63.F means to take the lower half, keeping rows 32 through 47.B means to take the upper half, keeping rows 40 through 47.B keeps rows 44 through 47.F keeps rows 44 through 45.```The final F keeps the lower of the two, row 44.The last three characters will be either L or R; these specify exactly one of the 8 columns of seats on the plane (numbered 0 through 7). The same process as above proceeds again, this time with only three steps. L means to keep the lower half, while R means to keep the upper half.For example, consider just the last 3 characters of FBFBBFFRLR:```Start by considering the whole range, columns 0 through 7.R means to take the upper half, keeping columns 4 through 7.L means to take the lower half, keeping columns 4 through 5.The final R keeps the upper of the two, column 5.```So, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has ID 44 * 8 + 5 = 357.Here are some other boarding passes:* BFFFBBFRRR: row 70, column 7, seat ID 567.* FFFBBBFRRR: row 14, column 7, seat ID 119.* BBFFBBFRLL: row 102, column 4, seat ID 820.As a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass?with open("input.txt","r") as f: input_data = f.read().split("\n") def string2id(string): row = string[:7] col = string[7:] row_low = 0 row_up = 127 for fb in row: if fb == "F": row_up = (row_low + row_up) //2 else: row_low = (row_low + row_up) //2 + 1 col_low = 0 col_up = 7 for rl in col: if rl == "R": col_low = (col_low + col_up) //2 + 1 else: col_up = (col_low + col_up) //2 return row_low * 8 + col_low def largest_seat(input_data): return max([string2id(x) for x in input_data]) largest_seat(input_data)Part Two Ding! The "fasten seat belt" signs have turned on. Time to find your seat.It's a completely full flight, so your seat should be the only missing boarding pass in your list. However, there's a catch: some of the seats at the very front and back of the plane don't exist on this aircraft, so they'll be missing from your list as well.Your seat wasn't at the very front or back, though; the seats with IDs +1 and -1 from yours will be in your list.def missing_id(input_data): id_list = [string2id(x) for x in input_data] return (min(id_list)+max(id_list)) * (len(id_list) + 1)//2 - sum(id_list) missing_id(input_data)Project 2 - Data Characterization About the data: The obtained data shows film permits granted for New York City. Permits are generally required when asserting the exclusive use of city property, like a sidewalk, a street, or a park. I found this data through the suggestions for project 2 ideas on Blackboard. My story:Growing up I have watched a lot of American movies and TV shows. Many of these have shown New York City. After I came to the USA I myself visited many of the places in New York City (NYC) and visualized the movies and shows I had watched as a kid. I did not get to see an actual film shoot though. So, when I saw this data, the data scientist in me thought I should figure out when do movies actually shoot in NYC. Following questions came to my mind:1. Can this data tell me popular timing of day for film shoots? * The answer to the first question is that most popular time of day for shooting is between 5 AM and mid-day. * Theater "shoots" are an outlier when events per hour of day are analyzed and we see a lot of them seem to happen in hour "zero" or mid-night. However, this is not an issue from the perspective of analysis as this could be reasonable and not an anomaly. This is because a lot of theater shows start in the evening and can run upto mid-night. 2. Can this data tell me the popular day of the week when shooting activities occur? * Weekday-wise permit counts and the normalized value of the permit count show that weekends are outliers when shoots per day are considered. * We were able to conclude from the number of shoots per day that weekdays are fairly well balanced in matters of shooting activities.3. Can it tell me popular months of year for film shoots? * So, the answer to our third question is TV shoots happen in phases. Mostly in Fall months but some in Spring months as well. Movie shoots happen starting around Spring, peaking around summer and again a bit in the Fall.4. Winter in New York city is very beautiful due to all the snow but are the shooting really happening in the harsh winter conditions of NYC? * The graph for normalized value of total number of permits per month answers our fourth question that winter is really a bad time to shoot in New York City as the number of events go down but there still are a non-zero number of shooting activities happening. This is especially true for TV shows.5. I know some Bollywood movies have shot in Staten Island because of a large Indian community in that area but is it a popular location in general? * The graph of normalized value of total number of permits per borough and type of activity shows that Staten Island is NOT in-fact a popular shooting location.6. I like a lot of web series and watch Youtube stars like who films in New York City. Given the popularity of Youtube in recent times are web shoots are rising in the city? * After filtering out some top "shooting" categories we were able to see a clear rising trend of WEB shoot activity in New York City!7. Which locations in New York City are popular for movie shoots? * WEST 48th STREET, New York City, New York is near Times Square. Intuitively this seems to be a reasonable location to be considered popular. Data properties and access information:* Download [link](https://data.cityofnewyork.us/api/views/tg4x-b46p/rows.csv?accessType=DOWNLOAD) for data source.* Data available through [NYC Open Data site](https://data.cityofnewyork.us/City-Government/Film-Permits/tg4x-b46p).* Downloaded file named: "Film_Permits.csv".* There is no cost to accessing this data.* Accessing this data does not require creation of an account.* Accessing this data does not violate any laws.* This data does not appear to have been previously analyzed based on a Google search.* A preliminary survey of the data indicates there are 40,682 rows, 14 columns, and the file size is 15.4 MB.!pip install geopy !pip install humanize !pip install folium import numpy as np import pandas as pd import time import datetime from datetime import datetime import calendar import chardet import missingno as msno import matplotlib import matplotlib.pyplot as plt import seaborn as sns from sklearn import preprocessing import os import random import re from geopy.geocoders import Nominatim import json import humanize import folium import warnings warnings.filterwarnings("ignore") start_time = time.time() print('Pandas',pd.__version__) print('Matplotlib',matplotlib.__version__) print('Seaborn',sns.__version__) print('File Size In MB : ',(os.path.getsize('Film_Permits.csv')/1048576),' MB') NYC = 'New York City'Collecting geopy Using cached https://files.pythonhosted.org/packages/75/3e/80bc987e1635ba9e7455b95e233b296c17f3d3bf3d4760fa67cdfc840e84/geopy-1.19.0-py2.py3-none-any.whl Collecting geographiclib<2,>=1.49 (from geopy) Installing collected packages: geographiclib, geopy Successfully installed geographiclib-1.49 geopy-1.19.0 Collecting humanize Installing collected packages: humanize Successfully installed humanize-0.5.1 Collecting folium Using cached https://files.pythonhosted.org/packages/43/77/0287320dc4fd86ae8847bab6c34b5ec370e836a79c7b0c16680a3d9fd770/folium-0.8.3-py2.py3-none-any.whl Requirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from folium) (1.11.0) Requirement already satisfied: requests in /opt/conda/lib/python3.6/site-packages (from folium) (2.20.1) Requirement already satisfied: numpy in /opt/conda/lib/python3.6/site-packages (from folium) (1.13.3) Requirement already satisfied: jinja2 in /opt/conda/lib/python3.6/site-packages (from folium) [...]Exploring data**Encoding check for the input CSV file to ensure data is in the right format**with open('Film_Permits.csv','rb') as fraw: file_content = fraw.read() chardet.detect(file_content)**Character encoding of the CSV file is ascii and confidence level is 1(100%).**Exploring file contents from the CSV:!head -n 3 Film_Permits.csvEventID,EventType,StartDateTime,EndDateTime,EnteredOn,EventAgency,ParkingHeld,Borough,CommunityBoard(s),PolicePrecinct(s),Category,SubCategoryName,Country,ZipCode(s) 455604,Shooting Permit,12/11/2018 08:00:00 AM,12/11/2018 11:59:00 PM,12/07/2018 11:00:12 PM,"Mayor's Office of Film, Theatre & Broadcasting","STANHOPE STREET between WILSON AVENUE and MYRTLE AVENUE, WILSON AVENUE between MELROSE STREET and GEORGE STREET, MELROSE STREET between WILSON AVENUE and KNICKERBOCKER AVENUE, WILSON AVENUE between STOCKHOLM STREET and STANHOPE STREET",Brooklyn,4,83,Film,Feature,United States of America,"11221, 11237" 455593,Shooting Permit,12/11/2018 07:00:00 AM,12/11/2018 09:00:00 PM,12/07/2018 05:57:34 PM,"Mayor's Office of Film, Theatre & Broadcasting","STARR AVENUE between BORDEN AVENUE and VAN DAM STREET, REVIEW AVENUE between BORDEN AVENUE and VAN DAM STREET",Queens,2,108,Television,Episodic series,United States of America,11101**Next, I will extract data from the CSV file and insert into a dataframe for processing**pd.options.display.max_rows = 40 start_time_before_load = time.time() film_permits_df = pd.read_csv("Film_Permits.csv") print('Time taken to load the data : ',time.time() - start_time_before_load,'seconds') film_permits_df.shapeTime taken to load the data : 0.3617970943450928 secondsThe csv/dataframe contains 40682 rows and 14 columnsLet us explore the data a bit using head(), tail(), info(), describe()film_permits_df.head() film_permits_df.tail() film_permits_df.info() film_permits_df.describe() film_permits_df.describe(include='all') film_permits_df.describe(include='object')**Next, I will explore the column metadata...*** What are the data types for the columns in our data?* How many unique entries are there in each column where type is object?* Below I will exlpore the first five rows of each column where type is object? * Why am I exploring unique entries for objects? * Because there could possibly be categorical data or datetime data in an object column. * After finishing the data exploration I will transform these object type columns with categorical data into 'category' type and object type columns with datetime data into 'datetime' typefirst_n_entries=5 print('Total rows in the dataframe:', film_permits_df.shape[0]) for col, col_type in film_permits_df.dtypes.iteritems(): if(col_type=='object'): print(col, 'has', film_permits_df[col].nunique(), 'unique entries') print('First', first_n_entries, 'entries are') print(film_permits_df[col][0:first_n_entries]) print('')Total rows in the dataframe: 40682 EventType has 4 unique entries First 5 entries are 0 Shooting Permit 1 Shooting Permit 2 Shooting Permit 3 Shooting Permit 4 Shooting Permit Name: EventType, dtype: object StartDateTime has 16151 unique entries First 5 entries are 0 12/11/2018 08:00:00 AM 1 12/11/2018 07:00:00 AM 2 12/11/2018 09:00:00 AM 3 12/10/2018 07:00:00 AM 4 12/11/2018 06:00:00 AM Name: StartDateTime, dtype: object EndDateTime has 19635 unique entries First 5 entries are 0 12/11/2018 11:59:00 PM 1 12/11/2018 09:00:00 PM 2 12/11/2018 11:00:00 PM 3 12/10/2018 08:00:00 PM 4 12/11/2018 11:00:00 PM Name: EndDateTime, dtype: object EnteredOn has 40470 unique entries First 5 entries are 0 12/07/2018 11:00:12 PM 1 12/07/2018 05:57:34 PM 2 12/07/2018 04:45:33 PM 3 12/07/2018 04:20:34 PM 4 12/07/2018 04:17:03 PM Name: EnteredOn, dtype: object EventAgency has 1 unique entries First 5 entries are 0 Mayor's Office of Film, Th[...]* In the data set, there are Thirteen object type columns: EventType, StartDateTime, EndDateTime, EnteredOn, EventAgency, ParkingHeld, Borough, CommunityBoard(s), PolicePrecinct(s), Category, SubCategoryName, Country and ZipCode(s) Data Type Transformation* Now, I will count the frequency of these unique values per column and print frequency of top five most frequent elements.* I will check if a column with object data type has categorical data or not?* I will check if a column with object data type has datetime data or not?* If and when necessary, I will perform some transformations on the data.for this_column in film_permits_df.columns: print('====', this_column, 'has', film_permits_df[this_column].nunique(), 'unique entries ====') print(film_permits_df[this_column].value_counts().head(5)) print('')==== EventID has 40682 unique entries ==== 66602 1 126487 1 125565 1 50657 1 179741 1 Name: EventID, dtype: int64 ==== EventType has 4 unique entries ==== Shooting Permit 35774 Theater Load in and Load Outs 3380 Rigging Permit 1028 DCAS Prep/Shoot/Wrap Permit 500 Name: EventType, dtype: int64 ==== StartDateTime has 16151 unique entries ==== 11/13/2018 06:00:00 AM 24 12/01/2014 06:00:00 AM 22 10/06/2014 06:00:00 AM 20 11/19/2018 06:00:00 AM 20 10/24/2018 06:00:00 AM 20 Name: StartDateTime, dtype: int64 ==== EndDateTime has 19635 unique entries ==== 08/04/2014 09:00:00 PM 14 09/22/2015 10:00:00 PM 14 08/31/2015 09:00:00 PM 14 11/18/2015 10:00:00 PM 14 10/05/2015 09:00:00 PM 13 Name: EndDateTime, dtype: int64 ==== EnteredOn has 40470 unique entries ==== 01/30/2018 12:43:07 PM 6 06/12/2012 06:58:12 PM 5 05/28/2018 09:52:30 AM 5 10/03/2018 01:48:16 PM 4 07/03/2018 12:45:41 PM 4 N[...]* After exploring the data I observed that EventType, EventAgency, Borough, Category, SubCategoryName and Country columns contain categorical data.* I will transform these columns into 'category' data type.* Also StartDateTime, EndDateTime, EnteredOn columns contain datetime data.* I will transform the above three columns into 'datetime' data type.""" Next, I transform the object data type for EventType to 'category' data type """ film_permits_df['EventType'] = film_permits_df['EventType'].astype('category') film_permits_df['EventType'].dtype """ Next, I transform the object data type for EventAgency to 'category' data type """ film_permits_df['EventAgency'] = film_permits_df['EventAgency'].astype('category') film_permits_df['EventAgency'].dtype """ Next, I transform the object data type for Borough to 'category' data type """ film_permits_df['Borough'] = film_permits_df['Borough'].astype('category') film_permits_df['Borough'].dtype """ Next, I transform the object data type for Category to 'category' data type """ film_permits_df['Category'] = film_permits_df['Category'].astype('category') film_permits_df['Category'].dtype """ Next, I transform the object data type for SubCategoryName to 'category' data type """ film_permits_df['SubCategoryName'] = film_permits_df['SubCategoryName'].astype('category') film_permits_df['SubCategoryName'].dtype """ Next, I transform the object data type for Country to 'category' data type """ film_permits_df['Country'] = film_permits_df['Country'].astype('category') film_permits_df['Country'].dtype def get_date(d1): return datetime.strptime(d1,"%m/%d/%Y %I:%M:%S %p").strftime('%m/%d/%Y %H:%M:%S') """ Next, I transform the object data type for StartDateTime to 'datetime' data type """ film_permits_df['StartDateTime']=film_permits_df['StartDateTime'].astype(str) film_permits_df['StartDateTime']=film_permits_df['StartDateTime'].apply(get_date) film_permits_df['StartDateTime']=pd.to_datetime( film_permits_df['StartDateTime'], format='%m/%d/%Y %H:%M:%S') """ Next, I transform the object data type for EndDateTime to 'datetime' data type """ film_permits_df['EndDateTime']=film_permits_df['EndDateTime'].astype(str) film_permits_df['EndDateTime']=film_permits_df['EndDateTime'].apply(get_date) film_permits_df['EndDateTime']=pd.to_datetime( film_permits_df['EndDateTime'], format='%m/%d/%Y %H:%M:%S') """ Next, I transform the object data type for EnteredOn to 'datetime' data type """ film_permits_df['EnteredOn']=film_permits_df['EnteredOn'].astype(str) film_permits_df['EnteredOn']=film_permits_df['EnteredOn'].apply(get_date) film_permits_df['EnteredOn']=pd.to_datetime( film_permits_df['EnteredOn'], format='%m/%d/%Y %H:%M:%S')Let us look at the data types of columns after transformationfilm_permits_df.dtypesNow the dataframe has...* Four object type columns: ParkingHeld, CommunityBoard(s), PolicePrecinct(s) and ZipCode(s)* Three datetime Type columns: StartDateTime, EndDateTime and EnteredOn* Six categorical columns: EventType, EventAgency, Borough, Category, SubCategoryName and Country* One numerical columns: EventID with data type int64 Data clean up, Missing data detection and Fill up Black = filled; white = empty""" Searching for missing data in sample set of 300 randomly selected data points """ _=msno.matrix(film_permits_df.sample(300)) plt.xlabel('Features in data',fontsize=16) plt.ylabel('Gaps in data',fontsize=16) plt.show() """ Searching for missing data in sample set of 3000 randomly selected data points """ _=msno.matrix(film_permits_df.sample(3000)) plt.xlabel('Features in data',fontsize=16) plt.ylabel('Gaps in data',fontsize=16) plt.show()Data Clean upThe data looks fairly clean to me from the graphs above but jsut to make sure, I will perform the following tasks:* Drop all rows and columns where entire row or column is NaN.* Drop columns with duplicate data or with 50% missing value.* Drop columns where all rows have the same value. * Such columns have no data variety and nothing useful to contribute to my data analysis.print('Shape of data frame before Cleanup :',film_permits_df.shape) print('Drop all rows and columns where entire row or column is NaN.') film_permits_df.dropna(how='all',axis=0,inplace=True) # rows film_permits_df.dropna(how='all',axis=1,inplace=True) # columns print('Drop columns with duplicate data or with 50% missing value.') half_count = len(film_permits_df)*.5 film_permits_df = film_permits_df.dropna(thresh=half_count, axis=1) film_permits_df = film_permits_df.drop_duplicates() print('Drop columns where all rows have the same value.') for this_column in film_permits_df.columns: if (film_permits_df[this_column].nunique()==1): unique_entry=film_permits_df.iloc[0][this_column] print('Drop column ',this_column,' where all rows have the same value : ', unique_entry) film_permits_df.drop([this_column],axis=1,inplace=True) print('Shape of data frame after cleanup :',film_permits_df.shape)Shape of data frame before Cleanup : (40682, 14) Drop all rows and columns where entire row or column is NaN. Drop columns with duplicate data or with 50% missing value. Drop columns where all rows have the same value. Drop column EventAgency where all rows have the same value : Mayor's Office of Film, Theatre & Broadcasting Shape of data frame after cleanup : (40682, 13)Through the above process I was able to conclude that in my dataset...* There are no rows and columns where entire row or column is NaN.* There are no columns with duplicate data and with 50% missing value.* There is one column, EventAgency where all rows have the same value. - Hence, I will be dropping the column EventAgency as it has no data variety and nothing useful to contribute to my data analysis.. Missing data detection and fill up using random sampling in a meaningful way **That is get data from the same borough**film_permits_df.head().T """ Counting null data per column """ film_permits_df.isnull().sum() """ Percentage of missing data per column """ (film_permits_df.isnull().sum()/len(film_permits_df)).sort_values(ascending=False)We were able to find that ZipCode(s), PolicePrecinct(s), CommunityBoard(s) columns have some missing data **Filling up missing data through sampling of data in same boroughs**print("Data index for missing ZipCode(s)",list(film_permits_df[film_permits_df['ZipCode(s)'].isnull()].index)) print("Data index for missing CommunityBoard(s)",list(film_permits_df[film_permits_df['CommunityBoard(s)'].isnull()].index)) print("Data index for missing PolicePrecinct(s)",list(film_permits_df[film_permits_df['PolicePrecinct(s)'].isnull()].index)) ''' Viewing the missing data ''' film_permits_df.iloc[[1138, 6038, 17714, 20833, 23054, 26856, 39837]] ''' Boroguh based sampling for ZipCode(s), PolicePrecinct(s), CommunityBoard(s) data ''' zipcode_smapling_dict={} communityboard_smapling_dict={} policeprecinc_smapling_dict={} null_index=list(film_permits_df[film_permits_df['ZipCode(s)'].isnull()].index) print(null_index) for indx in null_index: print('index :',indx) this_borough=film_permits_df.iloc[indx]['Borough'] print(this_borough) sample_zipcode=random.choice(list(film_permits_df[(film_permits_df['Borough']==this_borough) & (film_permits_df['ZipCode(s)'].notnull())]['ZipCode(s)'])) sample_communityboard=random.choice(list(film_permits_df[(film_permits_df['Borough']==this_borough) & (film_permits_df['CommunityBoard(s)'].notnull())]['CommunityBoard(s)'])) sample_policeprecinct=random.choice(list(film_permits_df[(film_permits_df['Borough']==this_borough) & (film_permits_df['PolicePrecinct(s)'].notnull())]['PolicePrecinct(s)'])) zipcode_smapling_dict[indx]=sample_zipcode communityboard_smapling_dict[indx]=sample_communityboard policeprecinc_smapling_dict[indx]=sample_policeprecinct print(zipcode_smapling_dict) print(communityboard_smapling_dict) print(policeprecinc_smapling_dict) ''' Filling up the missing values with sampled data ''' film_permits_df['ZipCode(s)'].fillna(zipcode_smapling_dict,inplace=True) film_permits_df['CommunityBoard(s)'].fillna(communityboard_smapling_dict,inplace=True) film_permits_df['PolicePrecinct(s)'].fillna(policeprecinc_smapling_dict,inplace=True) ''' Checking filled up data ''' film_permits_df.iloc[[1138, 6038, 17714, 20833, 23054, 26856, 39837]] film_permits_df.isnull().sum()**Missing data have been filled up successfully for ZipCode(s), PolicePrecinct(s), CommunityBoard(s) columns** Start of data analysis - Visualization and Exploratory Data Analysis***... for Film Permit data in New York City***Let's ask our data some questions about film permits in New York City.* How many types of "shooting" activities are happening in New York City? * What kind of "shooting" activities are these?print("There are",film_permits_df['Category'].nunique(), "kinds of \"shooting\" activities happening in",NYC) for shoot_category in film_permits_df['Category'].unique(): print(shoot_category)There are 9 kinds of "shooting" activities happening in New York City Film Television Commercial WEB Theater Still Photography Documentary Student Music Video* How many permits for each category of "shooting" activity have been granted in New York City?film_permits_df['Category'].value_counts() plt.figure(figsize=(15,10)) sns.countplot(x='Category',data=film_permits_df,order=film_permits_df['Category'].value_counts().index) plt.title("Number of permits granted in each category of \"shooting\" activity in New York",fontsize=20) plt.xlabel("Category",fontsize=16) plt.ylabel("Number of permits",fontsize=16) plt.show()* How many kinds of events are being granted permits in New York City? * What are these event categories?print("There are",film_permits_df['EventType'].nunique(), "kinds of events that are being granted permits in",NYC) for permit_category in film_permits_df['EventType'].unique(): print(permit_category)There are 4 kinds of events that are being granted permits in New York City Shooting Permit Rigging Permit Theater Load in and Load Outs DCAS Prep/Shoot/Wrap Permit* How many permits have been granted per category of event?film_permits_df['EventType'].value_counts() plt.figure(figsize=(15,10)) sns.countplot(x='EventType',data=film_permits_df,order=film_permits_df['EventType'].value_counts().index) plt.title("Number of permits granted per event type in New York",fontsize=20) plt.xlabel("Event type",fontsize=16) plt.ylabel("Number of permits",fontsize=16) plt.show()* Do all boroughs in New York City see some "shooting" activity? * Which boroughs are shoot permits being granted for?if film_permits_df['Borough'].nunique() == 5: print("Yes, shoot permits are being granted for:") else: print("No, shoot permits are being granted for:") for boroughs in film_permits_df['Borough'].unique(): print(boroughs)Yes, shoot permits are being granted for: Brooklyn Queens Manhattan Bronx Staten Island* How many "shooting" activities are happening in each borough?film_permits_df['Borough'].value_counts()I assume that a lot of foreign movies are shot in New York City. Its not just movies from Hollywood/USA. * Is that assumption true? * Which countries are shooting movies in New York?if film_permits_df['Country'].nunique() == 1 and film_permits_df['Country'].unique() == 'United States of America': print("No, it is not true. Only US based shoots are happening in",NYC) else: print("Yes, it is true. All the following countries come to shoot in",NYC) for countries in film_permits_df['Country'].unique(): print(countries)Yes, it is true. All the following countries come to shoot in New York City United States of America France Australia Canada United Kingdom Panama Netherlands Japan GermanyHow many shoots are happening per country?film_permits_df['Country'].value_counts()**Method defined to compute normalized value for a series**Formula for normalization [used](https://www.statisticshowto.datasciencecentral.com/normalized/) is as follows:$\mathbf{X_{new}} = {X - X_{min} \over X_{max} - X_{min}}$''' This method will return the value normalized between 0 and 1, for a number in a series given the number, maximum value and minimum value in the series ''' def compute_norm(number, max_val, min_val): return (number - min_val)/(max_val - min_val) ''' This method will take a series and return a df with the normalized values for that series. Created as we will reuse this a number of times. ''' def get_normalized_value_df(series_to_process, category_col_name, count_col_name): column_list = [] column_list.append(category_col_name) column_list.append(count_col_name) series_to_df = pd.DataFrame(list(series_to_process.items()), columns=column_list) normalized_value_list = [] for num in np.array(series_to_df[count_col_name]): normalized_value_list.append(compute_norm(number=float(num), max_val=float(series_to_process.nlargest(1)), min_val=float(series_to_process.nsmallest(1)) ) ) series_to_df['norm_'+count_col_name] = normalized_value_list return series_to_dfProcessing date time to extract year, month, hour, day of event''' Computing the number of shooting permits per year ''' film_permits_df['Year'] = film_permits_df['StartDateTime'].apply(lambda time: time.year) film_permits_df['Month'] = (film_permits_df['StartDateTime'].dt.month).apply(lambda x : calendar.month_abbr[x]) film_permits_df['Hour'] = film_permits_df['StartDateTime'].apply(lambda time: time.hour) film_permits_df['Year'].value_counts() ''' Computing the number of shooting permits per month ''' months=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] film_permits_df['Year'] = film_permits_df['StartDateTime'].apply(lambda time: time.year) film_permits_df['Hour'] = film_permits_df['StartDateTime'].apply(lambda time: time.hour) film_permits_df['Month'] = pd.Categorical( film_permits_df['Month'], categories=months, ordered=True) film_permits_df['Month'].value_counts() ''' Computing the number of shooting permits per weekday ''' weekdays=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] film_permits_df["Weekday"] = film_permits_df['StartDateTime'].dt.weekday_name film_permits_df['Weekday'] = pd.Categorical( film_permits_df['Weekday'], categories=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'], ordered=True) film_permits_df['Weekday'].value_counts()**Extracting the top five category of shooting a activity for processing**top_category = film_permits_df['Category'].value_counts().head(5).index.values top_category top_category_df = film_permits_df[(film_permits_df['Category']=='Television')|(film_permits_df['Category']=='Film') |(film_permits_df['Category']=='Theater')|(film_permits_df['Category']=='Commercial') |(film_permits_df['Category']=='Still Photography')] top_category_pivot_df=top_category_df.pivot_table(values='EventID', index='Month', columns='Year', aggfunc=np.size)Next, we move onto the important questions we wanted to answer:First on list, we have:* "Can this data tell me popular timing of day for film shoots?"* "Can this data tell me the popular day of the week when shooting activities occur?"* To answer the first question, let's find out the hour of events for top five category of shooting activitytop_category_hour_pivot = top_category_df.pivot_table(values='EventID', index='Category', columns=top_category_df['StartDateTime'].dt.hour, aggfunc=np.size) top_category_df.groupby([top_category_df['StartDateTime'].dt.hour, 'Category',])['EventID'].count().unstack().plot(marker='o',figsize=(15,10)) plt.title('Number of permits at hours of the day for top five category',fontsize=20) plt.ylabel('Number of permits',fontsize=16) plt.xlabel('Hours of the day',fontsize=16) plt.xticks(np.arange(24)) plt.show() ''' Computing the normalized value of total number of shooting permits per hour of day We are computing normalized values to determine the outlier hours for shooting activities. ''' hourly_permits_df = get_normalized_value_df( series_to_process=film_permits_df['StartDateTime'].dt.hour.value_counts(), category_col_name='hour',count_col_name='permit_count') hourly_permits_df.plot.bar(x='hour', y='norm_permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Hour of day',fontsize=16) plt.ylabel('Normalized number of permits',fontsize=16) plt.title('Normalized value of total permits per hour of a day',fontsize=20) plt.show()From the above two graphs we can see that:* **The answer to the first question is that most popular time of day for shooting is between 5 AM and mid-day.*** **The outlier for hour zero is due to a lot of theater shows ending at mid-night. See purple line above.** * To answer the second question, let's find out the weekly trend for permits acquired per weekday in top five category of shooting activitiestop_category_df.groupby(['Weekday','Category',])['EventID'].count().unstack().plot(marker='o',figsize=(15,10)) plt.title('Weekly trend for permits acquired in top-five category of shooting activities',fontsize=20) plt.xticks(np.arange(7),weekdays) plt.xlabel('Week Day',fontsize=16) plt.ylabel('Number of permits',fontsize=16) plt.show() ''' Computing the normalized value of number of shooting permits per weekday We are computing normalized values to detect if weekends are outliers for number of shooting activities. ''' weekday_df = get_normalized_value_df(series_to_process=film_permits_df['Weekday'].value_counts(), category_col_name='weekday',count_col_name='permit_count') weekday_df.plot.bar(x='weekday', y='norm_permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Week Day',fontsize=16) plt.ylabel('Normalized of permits',fontsize=16) plt.title('Normalized value of total number of permits per weekday',fontsize=20) plt.show()* **From the above two graphs of weekday-wise number of permits and the normalized value of number of permits, we can now answer the second question.*** **We can conclude that apart from the weekend every day is fairly well balanced in matters of shooting activities**. Next, we look at our data to find out: * "Can it tell me popular months of year for film shoots?"* "Winter in New York city is very beautiful due to all the snow but are the shoots really happening in the harsh winter conditions of NYC?"* To answer the third question, let's find out the monthly trend for permits acquired per month in top five category of shooting activitiestop_category_df.groupby(['Month','Category',])['EventID'].count().unstack().plot(marker='o',figsize=(15,10)) plt.title('Number of permits per month for top five category of shooting activity',fontsize=20) plt.xticks(np.arange(12),months) plt.xlabel('Month',fontsize=16) plt.ylabel('Number of permits',fontsize=16) plt.show() ''' Computing the normalized value of total number of shooting permits per month We are computing normalized values to detect if Winter months are outliers for number of shooting activities. ''' month_df = get_normalized_value_df(series_to_process=film_permits_df['Month'].value_counts(), category_col_name='month',count_col_name='permit_count') month_df.plot.bar(x='month', y='norm_permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Month',fontsize=16) plt.ylabel('Normalized number of permits',fontsize=16) plt.title('Normalized value of total permits per month',fontsize=20) plt.show()From the above two graphs of month-wise number of shooting permits in each category and normalized value of total shooting permits per month we can see that:* Winter is generally a bad time for shooting.* From my knowledge "of watching too many TV shows", I know that they generally follow a fall shooting schedule with a fall finale and then resume shooting in spring with a season finale. This schedule is clearly visible in this graph if you look at the red line.* New York winters are cold. Naturally it would logically and logistically be easy to film movies during summer. We can see that pattern when we look at the orange line.* Fall is still a good enough time to shoot outdoors in New York. More so because of fall colors that brings out the [beauty of nature](https://www.timeout.com/newyork/things-to-do/where-to-see-the-best-fall-foliage-in-nyc) in New York.* **So, the answer to our third question is TV shoots happen in phases. Mostly in Fall but some in Spring. Movie shoots happen starting around Spring, peaking around summer and again a bit in the Fall.*** **The graph for normalized value of total permits per month answers our fourth question that winter is really a bad time to shoot in New York City as the number of events go down but there still are a non-zero number of shooting activities happening. This is especially true for TV shows.** From the permit data I would like to next find out the answer to: "I know some Bollywood movies have shot in Staten Island because of a large Indian community in that area but is it a popular location in general?"''' Computing the normalized value of number of shooting permits per borough and event combo We are computing normalized values to detect if Staten Island is an outlier for number of shooting activities. ''' borough_df = get_normalized_value_df( series_to_process=film_permits_df.groupby(['Borough','EventType'])['EventID'].count(), category_col_name='borough_and_event', count_col_name='permit_count') borough_df.plot.bar(x='borough_and_event', y='norm_permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Borough and Event combination',fontsize=16) plt.ylabel('Normalized number of permits',fontsize=16) plt.title('Normalized value of total permits per borough and event combination',fontsize=20) plt.show()* From the graph above we can clearly see that shooting permits are most common in Manhattan and Brooklyn. * Staten Island has the lowest number among the five boroughs.* **Which means that we have our answered the fifth question. Staten Island is NOT in-fact a popular shooting location.** Next, we take a look at some of the less popular events that acquire shooting permits in New York City. We would like to find out the answer to: "I like a lot of web series and watch Youtube stars like who films in New York City. Given the popularity of Youtube in recent times are web shoots are rising in the city?"" * If we look at year wise number of permits for each category of shooting activity, it is difficult to find out web shooting activities. As it is sort of an outlier when compared to movies or TV shoots.film_permits_df.groupby(['Year','Category'])['EventID'].count().unstack().plot(kind='bar',figsize=(15,10)) plt.title('Year wise number of permits for each category of shooting activity',fontsize=20) plt.setp(plt.gca().get_xticklabels(), rotation=0, fontsize=12) plt.xlabel('Year',fontsize=16) plt.ylabel('Number of permits',fontsize=16) plt.show() ''' Computing the normalized value of number of shooting permits per borough and event combo ''' year_permit_df = get_normalized_value_df( series_to_process=film_permits_df.groupby(['Category','Year'])['EventID'].count(), category_col_name='category_year', count_col_name='permit_count') year_permit_df.plot.bar(x='category_year', y='norm_permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Category and Year',fontsize=16) plt.ylabel('Normalized number of permits',fontsize=16) plt.title('Normalized value of total permits per category over the years',fontsize=20) plt.show()* So we look at the data that is "NOT IN" the popular shooting activity category.web_df = film_permits_df[~film_permits_df['Category'].isin(top_category)] web_df.groupby(['Year','Category'])['EventID'].count().unstack().plot(kind='bar',figsize=(15,10)) plt.title('Year wise number of permits for each low popularity shooting activity category',fontsize=20) plt.setp(plt.gca().get_xticklabels(), rotation=0, fontsize=12) plt.xlabel('Year',fontsize=16) plt.ylabel('Number of permits',fontsize=16) plt.show()* No further normalization is required in this case, as we are just looking for an up or down trend and not detecting outliers or comparing numerical values.* **From the above graph we can see a clear rising trend of WEB shoot activity in New York City!** Lastly, we seek the answer for "Which locations in New York City are popular for movie shoots?"We determine this using the areas where parking was held for a shooting. Assumption being people don't want to walk too far to shoot their movies/shows.**Top ten parking held locations for shooting activities** Remove multiple whitespaces learned from this [SO link](https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python)Using [GeoPy](https://github.com/geopy/geopy) to extract lat long from street addressgeolocator = Nominatim() street_address_list = [] lat_long_list = [] parking_series = film_permits_df['ParkingHeld'].value_counts().head(10) parking_df = pd.DataFrame(list(parking_series.items()), columns=['ParkingHeld','permit_count']) for street_info in parking_df['ParkingHeld']: street_address = street_info.split('between')[0].strip() found_numbers = re.search(r'\d+', street_address) if found_numbers is not None: indices = list(found_numbers.span()) street_number = street_address[indices[0]:indices[1]] street_parts = street_address.split(street_number) street_address = street_parts[0] + humanize.ordinal(street_number) + street_parts[1] + ', New York City, New York' else: street_address = street_address + ', New York City, New York' location_dict = geolocator.geocode(street_address).raw latitude = float(location_dict['lat']) longitude = float(location_dict['lon']) street_address_list.append(street_address) lat_long_list.append([latitude,longitude]) new_df = pd.DataFrame({'ParkingHeld':street_address_list}) parking_df.update(new_df) parking_df['lat_long'] = lat_long_list parking_df parking_df.plot.bar(x='ParkingHeld', y='permit_count', figsize=(15,10)) plt.setp(plt.gca().get_xticklabels(), rotation=90, fontsize=12) plt.setp(plt.gca().get_yticklabels(), fontsize=12) plt.xlabel('Top ten shooting locations',fontsize=16) plt.ylabel('Number of permits',fontsize=16) plt.title('Number of permits for top ten shooting locations',fontsize=20) plt.show()Using the [Folium library](https://python-visualization.github.io/folium/quickstart.html) let's take a look at where the popular shooting locations are in New York City!folium_map = folium.Map(location=parking_df.iloc[0]['lat_long'], zoom_start=11, tiles='Stamen Terrain') for curr_loc in list(parking_df.index): folium.Marker(location=parking_df.iloc[curr_loc]['lat_long'], popup=parking_df.iloc[curr_loc]['ParkingHeld'] ).add_to(folium_map) folium_map.add_child(folium.ClickForMarker(popup='Waypoint')) folium_map* The top 10 filming locations can be seen in the graph and map above.* **WEST 48th STREET, New York City, New York** is near Times Square. Intuitively this seems to be a reasonable location to be considered popular.print('Total Time taken:',time.time() - start_time,'seconds')Total Time taken: 23.470895767211914 secondsTo file pdbfrom molsysmt.tools import openmm_Topology #openmm_Topology.to_file_pdb(item)_Lambda School Data Science_ Regression Sprint Challenge For this Sprint Challenge, you'll predict the price of used cars. The dataset is real-world. It was collected from advertisements of cars for sale in the Ukraine in 2016.The following import statements have been provided for you, and should be sufficient. But you may not need to use every import. And you are permitted to make additional imports.%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import statsmodels.api as sm from statsmodels.stats.outliers_influence import variance_inflation_factor[The dataset](https://raw.githubusercontent.com/ryanleeallred/datasets/master/car_regression.csv) contains 8,495 rows and 9 variables:- make: manufacturer brand- price: seller’s price in advertisement (in USD)- body: car body type- mileage: as mentioned in advertisement (‘000 Km)- engV: rounded engine volume (‘000 cubic cm)- engType: type of fuel- registration: whether car registered in Ukraine or not- year: year of production- drive: drive typeRun this cell to read the data:df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/car_regression.csv') print(df.shape) df.sample(10)(8495, 9)Predictive Modeling with Linear Regression 1.1 Split the data into an X matrix and y vector (`price` is the target we want to predict).target = 'price' y = df[target] X = df.drop(target, axis=1)1.2 Split the data into test and train sets, using `train_test_split`.You may use a train size of 80% and a test size of 20%.X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42)1.3 Use scikit-learn to fit a multiple regression model, using your training data.Use `year` and one or more features of your choice. You will not be evaluated on which features you choose. You may choose to use all features.features = ['mileage', 'year'] lr = LinearRegression() lr.fit(X_train[features], y_train)1.4 Report the Intercept and Coefficients for the fitted model.print('Intercept: ' + str(lr.intercept_)) print('Coefficients: ' + str(lr.coef_[0]) + ', ' + str(lr.coef_[1]))Intercept: -2080152.8168321538 Coefficients: -45.95570887948974, 1047.96289234215371.5 Use the test data to make predictions.X_test['pred_price'] = lr.predict(X_test[features]) #concatenate X_test and y_test combined_test = pd.concat([X_test, y_test],axis=1)1.6 Use the test data to get both the Root Mean Square Error and $R^2$ for the model. You will not be evaluated on how high or low your scores are.#Get the mean squared error of predicted price from X_test and true price from y_test mse = mean_squared_error(combined_test['price'], combined_test['pred_price']) #Calc the root mse rmse = mse**(0.5) #Get the R2 score r2 = r2_score(combined_test['price'], combined_test['pred_price']) #print the results: print('Root mean squared error: ' + str(rmse)) print('R2 score: ' + str(r2))Root mean squared error: 23023.64138316448 R2 score: 0.180218903288735351.7 How should we interpret the coefficient corresponding to the `year` feature?One sentence can be sufficient The co-efficients for each feature are multiplied by the observation value for that feature in order to calculate the dependent/predicted variable, which in this case is price. So the year of the car is multiplied by its coefficent, and that is added to the other features multiplied by their coefficients and then added to the constant, and the result is the predicted value for the cars price. 1.8 How should we interpret the Root Mean Square Error?One sentence can be sufficient The root mean squared error is the standard deviation of the residuals, or prediction errors. Basically its a measure of how spread out the prediction errors are for this model. 1.9 How should we interpret the $R^2$?One sentence can be sufficient The R2 is the percentage of y that is explained by the x variables included in the model -Here the R2 score of 0.18 means only 18% of the variance in the car price can be explained by teh features we have included in the model. This is pretty low, we probably need a new model in order to improve our predictive accuracy. Log-Linear and Polynomial Regression 2.1 Engineer a new variable by taking the log of the price varible.df['ln_price'] = np.log(df['price'])2.2 Visualize scatterplots of the relationship between each feature versus the log of price, to look for non-linearly distributed features.You may use any plotting tools and techniques.target = 'ln_price' for feature in df.drop(columns=[target, 'price'], axis=1).columns: sns.residplot(x= feature, y=target, data=df, lowess=True, line_kws=dict(color='r')) plt.show()2.3 Create polynomial feature(s)You will not be evaluated on which feature(s) you choose. But try to choose appropriate features.df['year_squared'] = df['year']**(2) df['mileage_squared'] = df['mileage']**2 df.head()2.4 Use the new log-transformed y variable and your x variables (including any new polynomial features) to fit a new linear regression model. Then report the: intercept, coefficients, RMSE, and $R^2$.#most of this code is repeated from above linear regression model: target = 'ln_price' y = df[target] X = df.drop(target, axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42) features = ['mileage_squared', 'year_squared'] lr = LinearRegression() lr.fit(X_train[features], y_train) print('Intercept: ' + str(lr.intercept_)) print('Coefficients: ' + str(lr.coef_[0]) + ', ' + str(lr.coef_[1])) X_test['pred_price'] = lr.predict(X_test[features]) combined_test = pd.concat([X_test, y_test],axis=1) mse = mean_squared_error(combined_test['ln_price'], combined_test['pred_price']) rmse = mse**(0.5) r2 = r2_score(combined_test['ln_price'], combined_test['pred_price']) print('Root mean squared error: ' + str(rmse)) print('R2 score: ' + str(r2))Intercept: -88.65395229965064 Coefficients: 1.6045770334444348e-08, 2.4300817178069537e-05 Root mean squared error: 0.7135786256884349 R2 score: 0.46868371287869992.5 How do we interpret coefficients in Log-Linear Regression (differently than Ordinary Least Squares Regression)?One sentence can be sufficient In Log-Linear Regression, the coefficients are expressed in percentage terms. A 1 unit change in the x (feature) variable results in a percentage change in the y predicted variable. In ordinary least squares, the coefficient represents the raw magnitude of change in the y variable from a one unit change in the x variable. Decision Trees 3.1 Use scikit-learn to fit a decision tree regression model, using your training data.Use one or more features of your choice. You will not be evaluated on which features you choose. You may choose to use all features.You may use the log-transformed target or the original un-transformed target. You will not be evaluated on which you choose.target = 'ln_price' X = df.drop(target, axis=1) y = df[target] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42) features = ['year_squared', 'mileage_squared'] tree = DecisionTreeRegressor() tree.fit(X[features], y)3.2 Use the test data to get the $R^2$ for the model. You will not be evaluated on how high or low your scores are.X_test['pred_ln_price'] = tree.predict(X_test[features]) combined_test = pd.concat([X_test, y_test], axis=1) print("Root mean squared error: " + str(mean_squared_error(combined_test['ln_price'], combined_test['pred_ln_price'])**(0.5))) print('R2 score: ' + str(r2_score(combined_test['ln_price'], combined_test['pred_ln_price'])))Root mean squared error: 0.5275931899747331 R2 score: 0.7095523775750867Regression Diagnostics 4.1 Use statsmodels to run a log-linear or log-polynomial linear regression with robust standard errors.target = 'ln_price' y = df[target] X = df.drop(columns=[target, 'price']) X = sm.add_constant(X) model = sm.OLS(y, X) results = model.fit(cov_type='HC3') print(results.summary())OLS Regression Results ============================================================================== Dep. Variable: ln_price R-squared: 0.674 Model: OLS Adj. R-squared: 0.673 Method: Least Squares F-statistic: 1693. Date: Fri, 03 May 2019 Prob (F-statistic): 0.00 Time: 16:42:11 Log-Likelihood: -6963.9 No. Observations: 8495 AIC: 1.395e+04 Df Residuals: 8484 BIC: 1.403e+04 Df Model: 10 Covariance Type: HC3 =================================================================================== coef std err [...]4.2 Calculate the Variance Inflation Factor (VIF) of our X variables. Do we have multicollinearity problems?Yes, this model suffers from a multicolinearity problem. The variance inflation factors for year and year_squared are greater than 10 (much greater, in fact: 1.6*e^5), which is a good rule of thumb benchmark for high VIF. Also, mileage and mileage squared are 7.5 and 5, not quite reaching the benchmark of 10, but higher than the rest of the features in the model which are all around 1-1.6. Additionally, the statsmodels OLS summary report indicates in the warnings that the model may have strong multi-colinearity.vif = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] pd.Series(vif, X.columns)Calorimeter Clusters CollectionA jet is built from these clusters. Lets take a dive into the raw versions.import matplotlib.pyplot as plt from config import ds_zee as dsThere are several `CaloCluster` collections. For this we'll go after `egammaClusters`.clusters = (ds .SelectMany(lambda e: e.CaloClusters('egammaClusters')) .Select(lambda cluster: cluster.pt() / 1000.0) .AsAwkwardArray('ClusterPt') .value()) plt.hist(clusters.ClusterPt, bins=100, range=(0, 10)) plt.xlabel('Cluster $p_T$ [GeV]') plt.ylabel('Number of clusters') _ = plt.title(r'$e\gamma$ clusters in $Z\rightarrow ee$ events')The DatamodelThe data model when this documentation was last built was:from func_adl_servicex_xaodr21.xAOD.calocluster_v1 import CaloCluster_v1 help(CaloCluster_v1)Help on class CaloCluster_v1 in module func_adl_servicex_xaodr21.xAOD.calocluster_v1: class CaloCluster_v1(builtins.object) | A class | | Methods defined here: | | badChannelList(self) -> 'func_adl_servicex_xaodr21.vector_xaod_caloclusterbadchanneldata_v1_.vector_xAOD_CaloClusterBadChannelData_v1_' | A method | | clearDecorations(self) -> 'bool' | A method | | e(self) -> 'float' | A method | | energyBE(self, layer: 'int') -> 'float' | A method | | et(self) -> 'float' | A method | | eta(self) -> 'float' | A method | | etaBE(self, layer: 'int') -> 'float' | A method | | getClusterEtaSize(self) -> 'int' | A method | | getClusterPhiSize(self) -> 'int' | A method | | getSisterCluster(self) -> 'func_adl_servicex_xaodr21.xAOD.calocluster_v1.CaloCluster_v1' | A method | | hasNonConstStore(self) -> 'bool' | A method | | hasStore(self) -> 'bool' | A method [...](tutorial-hello)= Hello world!In this tutorial, you will create a storage and use it to track calls to aPython function. You will save time when calling the function on the same inputsa second time, by having the result loaded for you instead of being computedagain. Define an operationFirst, import the necessary objects and create a storage:from mandala.all import * storage = Storage(in_memory=True)The `storage` object is used to store the inputs and outputs for calls to Pythonfunctions. To make this automatic, you must tell the storage about the functionby decorating the function with the `@op()` decorator, passing the storage asargument:@op(storage) def add(x, y) -> int: print('Hello world!') # ...some long computation... return x + yCreating operation add...This process of connecting a function to the storage turns it into an**operation**, which is the fundamental unit of tracking and computation inMandala. Call the operation in a `run` context to save resultsNext, call the operation `add` you just defined:add(23, 42)Hello world!This is just the normal behavior of the function. Importantly, **no data wassaved to storage by the above call**, and the function returns what you wouldexpect. To track the results of computations, you must instead wrap them in the`run` context manager:with run(storage): result = add(23, 42) resultHello world!The above code recorded in storage that the function `add` was called on theinputs `23` and `42`, and that the result's value was `65`. The returned object`result` is a **value reference**. Value references represent an arbitraryPython object wrapped together with metadata used for storage and computation. Call the operation again to *load* resultsImportantly, re-running this code will not recompute the function, since the resultsalready exist in storage. Instead, the storage will figure out for you that youhave already called the function on these inputs, and will retreive the result;this is called **retracing** the computation:with run(storage): result = add(23, 42) resultObserve that this time nothing was printed out, because `add` was not actuallycomputed. Exercise: extend the computationAs an exercise, run the following code next:with run(storage): for n in range(20, 25): result = add(n, 42)Hello world! Hello world! Hello world! Hello world!What happened? How many times was `"Hello, world!"` printed out? Why? Exercise: compose operationsOperations are intended to be used as building blocks of larger computations. Below, define an increment operation `inc` that on input `x` returns `x+1` and printsout a message.### your code here @op(storage) def inc(x) -> int: print('Hello from inc!') return x + 1Creating operation inc...Now run the following code that combines `add` and `inc` into a toy pipeline andsaves the results:with run(storage): for n in range(3): n_inc = inc(n) result = add(n, n_inc)Hello from inc! Hello world! Hello from inc! Hello world! Hello from inc! Hello world!Run the computation again; is anything printed out? Why?with run(storage): for n in range(3): n_inc = inc(n) result = add(n, n_inc)COVID-19: DATA SCIENCE AND MACHINE LEARNING VISUALIZATIONSI am currently using the [NYTimes](https://github.com/nytimes/covid-19-data) and [JHU CSSE](https://github.com/CSSEGISandData/COVID-19) database in this notebook. NYTimes is for USA data and JHU CSSE is for international data. Conveniently, they are all written in `.csv` files which Pandas can take great advantage of.If you are planning to run the notebook on your own computer, please make sure you have all the dependencies installed!import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import geopandas import pycountry import plotly import plotly.express as px import plotly.figure_factory as ff import time from datetime import datetime import json import os from urllib.request import urlopen from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager from pathlib import Path %matplotlib inline with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) plotly.io.orca.config.executable = '/path/to/orca' plotly.io.orca.config.save()Pulling the DatabasesFor cloning the repository that contain the databases, I used `.gitignore` to prevent pushing large files to this repository. I do not modify the directories that contain the databases and I leave them as is.#Please run this cell if databases directory is empty! !git clone https://github.com/CSSEGISandData/COVID-19 databases/jhucsse !git clone https://github.com/nytimes/covid-19-data databases/nytimesRun the cells below to get the most recent databases!!git -C databases/jhucsse pull origin master !ls databases/jhucsse !git -C databases/nytimes pull origin master !ls databases/nytimesLoad `.csv` Files into Pandas DataFramesAll the recent databases are pulled from GitHubCOVID19_confirmed = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") COVID19_deaths = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv") COVID19_recovered = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv") #Global variables databases = [COVID19_confirmed, COVID19_deaths, COVID19_recovered] dataset = [(COVID19_confirmed, "confirmed"), (COVID19_deaths, "deaths"), (COVID19_recovered, "recovered")] all_countries = list(COVID19_confirmed['Country/Region']) all_countries = list(dict.fromkeys(all_countries)) dates = list(COVID19_confirmed.columns) dates.remove('Country/Region') dates.remove('Province/State') dates.remove('Lat') dates.remove('Long') COVID19_US = pd.read_csv("databases/nytimes/us.csv") #Already provided with the JHU CSSE COVID19_US_states = pd.read_csv("databases/nytimes/us-states.csv") COVID19_US_counties = pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) COVID19_US_states = COVID19_US_states.sort_values(by=['date']) states = list(set(COVID19_US_states['state'].tolist()))Data CleaningCleaning unnecessary data.def data_clean(data): data = data.drop(['Province/State', 'Lat', 'Long'], axis=1) data = data.groupby(['Country/Region'])[dates].sum() data.columns = pd.to_datetime(data.columns) return data COVID19_confirmed = data_clean(COVID19_confirmed) COVID19_deaths = data_clean(COVID19_deaths) COVID19_recovered = data_clean(COVID19_recovered) dataset = [(COVID19_confirmed, "confirmed"), (COVID19_deaths, "deaths"), (COVID19_recovered, "recovered")]Data Charts with Worldwide DataFunctions to compile graphsdef cases_country(country_name, data, label=None): country_plot = data.loc[country_name] country_plot = country_plot.sort_index().plot(label=label) if data.equals(COVID19_confirmed): data_label = "Confirmed Cases" elif data.equals(COVID19_recovered): data_label = "Recovered" else: data_label = "Deaths" #Labelings country_plot.set_ylabel("Number of " + data_label) country_plot.set_xlabel("Day") country_plot.set_title(data_label + " in " + country_name) return country_plot def update_all_cases_country_individual(): for country in all_countries: for data in dataset: cases_country(country, data[0]) plt.savefig("cases_country_individual/" + country + "_" + data[1] + ".png") plt.tight_layout() plt.clf() def all_cases_country(country_name): for data in dataset: plt = cases_country(country_name, data[0], label=data[1]) #Labelings plt.set_ylabel("Number of Cases") plt.set_xlabel("Day") plt.set_title("COVID-19 in " + country_name) plt.legend() return plt def update_all_cases_country(): for country in all_countries: all_cases_country(country) plt.savefig("cases_country/" + country + "_all.png") plt.tight_layout() plt.clf() def worldwide_cases(): world = COVID19_confirmed.sum().plot(label="confirmed") world = COVID19_deaths.sum().plot(label="deaths") world = COVID19_recovered.sum().plot(label="recovered") #Labelings world.set_ylabel("Number of Cases") world.set_xlabel("Day") world.set_title("COVID-19 Worldwide") world.legend() plt.savefig("COVID19_worldwide") return world def cases_country_active(country_name): """ active = confirmed - deaths - recovered """ confirmed = COVID19_confirmed.loc[country_name] deaths = COVID19_deaths.loc[country_name] recovered = COVID19_recovered.loc[country_name] active = confirmed - deaths - recovered active = active.plot() active.set_ylabel("Number of Active Cases") active.set_xlabel("Day") active.set_title("Active COVID-19 Cases in " + country_name) def update_all_cases_country_active(): for country in all_countries: cases_country_active(country) plt.savefig("cases_country_active/" + country + "_active_cases.png") plt.tight_layout() plt.clf() def worldwide_active(): confirmed = COVID19_confirmed.sum() deaths = COVID19_deaths.sum() recovered = COVID19_recovered.sum() active = confirmed - deaths - recovered active = active.plot() #Labelings active.set_ylabel("Number of Active Cases") active.set_xlabel("Day") active.set_title("COVID-19 Active Cases Worldwide") plt.savefig("COVID19_worldwide_active.png") return active #Still testing and experimenting def compare_countries(list_countries): for country in list_countries: for data in dataset: country_data = data[0] country_data = country_data.loc[country] country_data = country_data.plot(label=data[1] + " in " + str(country)) country_data.set_ylabel("Number of Cases") country_data.set_xlabel("Day") country_data.set_title("COVID-19 in " + str(list(list_countries))) country_data.legend() #plt.savefig(str(list_countries) + ".png") compare_countries(['US']) update_all_cases_country_individual() update_all_cases_country() worldwide_cases() update_all_cases_country_active() worldwide_active()Data with US DataFunctions that chart US data, from the NYTimes database.def cases_by_state(state): df = COVID19_US_states.loc[COVID19_US_states['state'] == state].drop(['fips'], axis=1) pd.to_datetime(df['date']) df = df.set_index(df['date']).drop(['date'], axis=1) df = df.sort_values(by=['date']) #return df return df.plot() def update_states(): for state in states: us_cases = cases_by_state(state) us_cases.set_ylabel("Cases") us_cases.set_xlabel("Day") us_cases.set_title("COVID-19 Cases in " + str(state)) plt.savefig("cases_us_states/" + str(state) + ".png") plt.clf cases_by_state('Washington') update_states()Visualizations with Global Dataworld = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) COVID19_geo_confirmed = dataset[0][0] COVID19_geo_deaths = dataset[1][0] COVID19_geo_recovered = dataset[2][0] COVID19_geo_confirmed.reset_index(level=0, inplace=True) COVID19_geo_deaths.reset_index(level=0, inplace=True) COVID19_geo_recovered.reset_index(level=0, inplace=True) COVID19_geo_confirmed = COVID19_geo_confirmed.rename(columns={"Country/Region" : "name"}) COVID19_geo_deaths = COVID19_geo_deaths.rename(columns={"Country/Region" : "name"}) COVID19_geo_recovered = COVID19_geo_recovered.rename(columns={"Country/Region" : "name"}) COVID19_geo_confirmed = COVID19_geo_confirmed.replace({'Taiwan*': 'Taiwan', 'Korea, South': 'South Korea', 'US' : 'United States of America'}) COVID19_geo_deaths = COVID19_geo_deaths.replace({'Taiwan*': 'Taiwan', 'Korea, South': 'South Korea', 'US' : 'United States of America'}) COVID19_geo_recovered = COVID19_geo_recovered.replace({'Taiwan*': 'Taiwan', 'Korea, South': 'South Korea', 'US' : 'United States of America'}) #Delete any islands and Antartica world_confirmed = world.merge(COVID19_geo_confirmed, how = 'inner', on = 'name') world_confirmed = world_confirmed[(world_confirmed.pop_est>0) & (world_confirmed.name!="Antarctica")] world_deaths = world.merge(COVID19_geo_deaths, how = 'inner', on = 'name') world_deaths = world_deaths[(world_deaths.pop_est>0) & (world_deaths.name!="Antarctica")] world_recovered = world.merge(COVID19_geo_recovered, how = 'inner', on = 'name') world_recovered = world_recovered[(world_recovered.pop_est>0) & (world_recovered.name!="Antarctica")] dates = list(COVID19_geo_confirmed) dates.remove('name') dates.sort() def geo_confirmed_corona(data): for i in range(len(dates)): fig, ax = plt.subplots(1, 1) world_confirmed.plot(column=dates[i], cmap='Reds', ax=ax, legend=True, legend_kwds={'label': "Population by Country", 'orientation': "horizontal"}) #Save the figure fig = matplotlib.pyplot.gcf() fig.set_size_inches(20, 10) fig.savefig('geo/geo_confirmed_normalized/' + str(i).zfill(3) + '.png', dpi=100) plt.close(fig) def geo_deaths_corona(data): for i in range(len(dates)): fig, ax = plt.subplots(1, 1) world_deaths.plot(column=dates[i], cmap='Greys', ax=ax, legend=True, legend_kwds={'label': "Population by Country",'orientation': "horizontal"}) #Save the figure fig = matplotlib.pyplot.gcf() fig.set_size_inches(20, 10) fig.savefig('geo/geo_deaths_normalized/' + str(i).zfill(3) + '.png', dpi=100) plt.close(fig) def geo_recovered_corona(data): for i in range(len(dates)): fig, ax = plt.subplots(1, 1) world_recovered.plot(column=dates[i], cmap='Greens', ax=ax, legend=True, legend_kwds={'label': "Population by Country",'orientation': "horizontal"}) #Save the figure fig = matplotlib.pyplot.gcf() fig.set_size_inches(20, 10) fig.savefig('geo/geo_recovered_normalized/' + str(i).zfill(3) + '.png', dpi=100) plt.close(fig) geo_confirmed_corona(COVID19_geo_confirmed) geo_deaths_corona(COVID19_geo_deaths) geo_recovered_corona(COVID19_geo_recovered) #New color theme to display color gradients Solarcorona = ['rgb(252, 222, 156)', 'rgb(250, 164, 118)', 'rgb(240, 116, 110)', 'rgb(227, 79, 111)', 'rgb(220, 57, 119)', 'rgb(185, 37, 122)', 'rgb(124, 29, 111)', '#1b0c41', '#000004'] #SCRAP PREVIOUS #New dataframes mapping = {country.name: country.alpha_3 for country in pycountry.countries} geo_confirmed = COVID19_confirmed.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) geo_deaths = COVID19_deaths.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) geo_recovered = COVID19_recovered.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) #confirmed 500000 #deaths 10000 #recovered 100000 def geo_frames(df, file, case_range): for i in range(len(geo_confirmed.columns)): fig = px.choropleth(df, locations=df.index, color=geo_confirmed.columns[i], range_color=(0, case_range), color_continuous_scale=Solarcorona, ) fig.update_layout( title_text = 'COVID-19 Cases Confirmed GitHub: briancpark', ) #fig.show() fig.write_html("geo/" + file + "_html/" + str(i).zfill(3) + ".html") #geo_frames(geo_confirmed, "geo_confirmed", 200000) """ for i in range(len(geo_confirmed.columns)): driver = webdriver.Chrome(ChromeDriverManager().install()) driver.get("file:///Users/brianpark/Desktop/PUBLISHED%20Projects/COVID19/geo/geo_confirmed_html/" + str(i).zfill(3) + ".html") time.sleep(3) driver.save_screenshot("geo/geo_confirmed/" + str(i).zfill(3) + ".png") driver.close() """ #len(geo_confirmed.columns)Visualizations with US DataFor some reason, plotly doesn't want to work when `write_image()` is called, so I used selenium instead to screenshot. Not a very convenient way, but in the end it gives high quality graphs.dates = list(set(list(COVID19_US_states['date']))) dates.sort(key = lambda date: datetime.strptime(date, '%Y-%m-%d')) states = list(set(list(COVID19_US_states['state']))) def create_html(i, data_type, data_label, color_gradient, max_cases, date): df = COVID19_US_counties#pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) df = df[df['date']==date] fig = px.choropleth(df, geojson=counties, locations='fips', color=data_type, color_continuous_scale=color_gradient, range_color=(0, max_cases), scope="usa", labels={data_type:data_type} ) fig.update_layout(title_text = 'USA COVID-19 ' + data_label + ' by County on ' + str(date) + " GitHub:briancpark") fig.write_html("geo/geo_us_" + data_type + "_html/" + str(i).zfill(3) + ".html") def update_us_cases_by_counties(max_cases, limit): i = 0 for date in dates: create_html(i, 'cases', 'Cases', "Sunsetdark", max_cases, date) i = i + 1 def update_us_deaths_by_counties(max_cases, limit): i = 0 for date in dates: create_html(i, 'deaths', 'Deaths', "Greys", max_cases, date) i = i + 1 def convert_to_png(file, limit): if file == "confirmed": directory = "geo_us_cases" else: directory = "geo_us_deaths" for i in range(len(dates) - limit, len(dates)): options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1280x720'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/" + directory + "_html/" + str(i).zfill(3) + ".html") time.sleep(3) driver.save_screenshot("geo/" + directory + "/" + str(i).zfill(3) + ".png") driver.close()Compile TimelapsesCreate beautiful, and yet alarming dynamic graphs that implement time with COVID-19. `ffmpeg` is used to compile the `.gif` and `.mp4` files. Please make sure you have that installed. Can be simply done in your terminal via `pip install ffmpeg`def compile_timelapse(): !rm timelapses/global_confirmed_timelapse.mp4 !rm timelapses/global_deaths_timelapse.mp4 !rm timelapses/global_recovered_timelapse.mp4 !rm timelapses/global_confirmed_timelapse.gif !rm timelapses/global_deaths_timelapse.gif !rm timelapses/global_recovered_timelapse.gif !ffmpeg -r 15 -i geo/geo_confirmed/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/global_confirmed_timelapse.mp4 !ffmpeg -r 15 -i geo/geo_deaths/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/global_deaths_timelapse.mp4 !ffmpeg -r 15 -i geo/geo_recovered/%03d.png -c:v libx26|4 -r 30 -pix_fmt yuv420p timelapses/global_recovered_timelapse.mp4 !ffmpeg -i timelapses/global_confirmed_timelapse.mp4 timelapses/global_confirmed_timelapse.gif !ffmpeg -i timelapses/global_deaths_timelapse.mp4 timelapses/global_deaths_timelapse.gif !ffmpeg -i timelapses/global_recovered_timelapse.mp4 timelapses/global_recovered_timelapse.gif !rm timelapses/global_confirmed_normalized_timelapse.mp4 !rm timelapses/global_deaths_normalized_timelapse.mp4 !rm timelapses/global_recovered_normalized_timelapse.mp4 !rm timelapses/global_confirmed_normalized_timelapse.gif !rm timelapses/global_deaths_normalized_timelapse.gif !rm timelapses/global_recovered_normalized_timelapse.gif !ffmpeg -r 15 -i geo/geo_confirmed_normalized/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/global_confirmed_normalized_timelapse.mp4 !ffmpeg -r 15 -i geo/geo_deaths_normalized/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/global_deaths_normalized_timelapse.mp4 !ffmpeg -r 15 -i geo/geo_recovered_normalized/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/global_recovered_normalized_timelapse.mp4 !ffmpeg -i timelapses/global_confirmed_normalized_timelapse.mp4 timelapses/global_confirmed_normalized_timelapse.gif !ffmpeg -i timelapses/global_deaths_normalized_timelapse.mp4 timelapses/global_deaths_normalized_timelapse.gif !ffmpeg -i timelapses/global_recovered_normalized_timelapse.mp4 timelapses/global_recovered_normalized_timelapse.gif !rm timelapses/us_confirmed_timelapse.mp4 !rm timelapses/us_confirmed_timelapse.gif !rm timelapses/us_deaths_timelapse.mp4 !rm timelapses/us_deaths_timelapse.gif !ffmpeg -r 15 -i geo/geo_us_cases/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/us_confirmed_timelapse.mp4 !ffmpeg -i timelapses/us_confirmed_timelapse.mp4 timelapses/us_confirmed_timelapse.gif !ffmpeg -r 15 -i geo/geo_us_deaths/%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p timelapses/us_deaths_timelapse.mp4 !ffmpeg -i timelapses/us_deaths_timelapse.mp4 timelapses/us_deaths_timelapse.gif update_us_cases_by_counties(10000, 5)#len(dates) update_us_deaths_by_counties(1000, 5) convert_to_png("confirmed", 5) #len(dates)) #10 convert_to_png("deaths", 5) compile_timelapse() ## print(px.colors.sequential.Inferno) """#Debugging cell with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) plotly.io.orca.config.executable = '/path/to/orca' plotly.io.orca.config.save() df = pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) df = df[df['date']=="2020-05-08"] df['cases'] = 1 / np.log10(df["cases"]) Solarcorona = ['rgb(252, 222, 156)', 'rgb(250, 164, 118)', 'rgb(240, 116, 110)', 'rgb(227, 79, 111)', 'rgb(220, 57, 119)', 'rgb(185, 37, 122)', 'rgb(124, 29, 111)', '#1b0c41', '#000004'] #Solarcorona = ['rgb(252, 222, 156)', 'rgb(250, 164, 118)', 'rgb(220, 57, 119)', 'rgb(185, 37, 122)', 'rgb(124, 29, 111)', '#1b0c41', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004'] fig = px.choropleth(df, geojson=counties, locations='fips', color='cases', color_continuous_scale=Solarcorona, range_color=(0, 100000), scope="usa", labels={'cases':'cases'} ) fig.update_layout(#margin={"r":0,"t":0,"l":0,"b":0}), title_text = 'USA COVID-19 Cases by County on ' + "05-08" + " GitHub:briancpark", coloraxis_colorbar=dict( title="Population", ticktext=["1M", "10M", "100M", "1B"], )) fig.show() """ """ #Debugging cell with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) plotly.io.orca.config.executable = '/path/to/orca' plotly.io.orca.config.save() df = pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) df = df[df['date']=="2020-05-08"] Solarcorona = ['rgb(252, 222, 156)', 'rgb(250, 164, 118)', 'rgb(240, 116, 110)', 'rgb(227, 79, 111)', 'rgb(220, 57, 119)', 'rgb(185, 37, 122)', 'rgb(124, 29, 111)', '#1b0c41', '#000004'] #Solarcorona = ['rgb(252, 222, 156)', 'rgb(250, 164, 118)', 'rgb(220, 57, 119)', 'rgb(185, 37, 122)', 'rgb(124, 29, 111)', '#1b0c41', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004', '#000004'] fig = px.choropleth(df, geojson=counties, locations='fips', color='deaths', color_continuous_scale="Greys", range_color=(0, 100), scope="usa", labels={'deaths':'deaths'}, ) fig.update_layout(#margin={"r":0,"t":0,"l":0,"b":0}), title_text = 'USA COVID-19 Deaths by County on ' + "05-08" + " GitHub:briancpark") fig.show() """ """ #Debugging this cell df = pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) df['fips'] = pd.to_numeric(df['fips'], errors='coerce') df = df.dropna(subset=['fips']) df['fips'] = df['fips'].astype(int) df = df[df['date']=="2020-05-05"] colorscale = ["#f7fbff", "#ebf3fb", "#deebf7", "#d2e3f3", "#c6dbef", "#b3d2e9", "#9ecae1", "#85bcdb", "#6baed6", "#57a0ce", "#4292c6", "#3082be", "#2171b5", "#1361a9", "#08519c", "#0b4083", "#08306b"] endpts = list(np.linspace(1, 1000, len(colorscale) - 1)) fips = df['fips'].tolist() values = df['cases'].tolist() fig = ff.create_choropleth( fips=fips, values=values, scope=['usa'], binning_endpoints=endpts, colorscale=colorscale, show_state_data=False, show_hover=True, asp = 2.9, title_text = 'COVID-19 in USA', legend_title = 'Cases' ) fig.layout.template = None fig.show() #plotly.io.write_image(fig, "image.png", format=None, scale=None, width=None, height=None) """Machine Learning MethodsA start. Goal is to predict coronavirus cases before a lockdown has initiated, and then compare it to real data. I will learn how machine learning works somedaydf = COVID19_US_states.loc[COVID19_US_states['state'] == 'New York'].drop(['fips'], axis=1) pd.to_datetime(df['date']) df = df.set_index(df['date']).drop(['date'], axis=1) df = df.sort_values(by=['date']) #df.plot() #ates_test = nydates = df.index.to_list()[:22] lockdown_dates = df.index.to_list()[22:] df.head(22) from scipy.optimize import curve_fit x = np.array(range(len(nydates))) y = np.array(df['cases'].tolist()[:22]) def func(x, a, b, c): return a * np.exp(-b * x) + c popt, pcov = curve_fit(func, x, y, p0=(1, 1e-6, 1)) popt xx = np.array(range(len(nydates) + len(lockdown_dates))) yy = func(xx, *popt) #plt.plot(x, y, 'ko') #plt.plot(xx, yy) plt.plot(df.index.to_list(), df['cases'].to_list())Drafts and Debugging For More Visualizations!import plotly.express as px df = px.data.election() geojson = px.data.election_geojson() #print(df["district"][2]) #print(geojson["features"][0]["properties"]) df = COVID19_US_counties[COVID19_US_counties['state'] == 'Alabama'] df = df[df['date'] == '2020-04-01'] df from urllib.request import urlopen import json with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) import pandas as pd df = df import plotly.express as px fig = px.choropleth(df, geojson=counties, locations='fips', color='cases', color_continuous_scale="Viridis", range_color=(0, 10000), scope="usa", labels={'cases':'cases'} ) #fig.show() counties def retrieve_state_json(state): custom = counties.copy() for county in custom["features"]: if county['properties']["STATE"] != "01": custom["features"].remove(county) custom = counties.copy() for county in custom["features"]: if county['properties']["STATE"] != "01": custom["features"].remove(county) custom from urllib.request import urlopen import json with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) import pandas as pd df = df import plotly.express as px fig = px.choropleth(df, geojson=custom, locations='fips', color='cases', color_continuous_scale="Viridis", range_color=(0, 10000), scope="usa", labels={'cases':'cases'} ) fig.update_geos(fitbounds="locations") #fig.show() import plotly.express as px df = df geojson = custom fig = px.choropleth(df, geojson=custom, color="cases", locations="county", featureidkey="01001", projection="mercator" ) fig.update_geos(fitbounds="locations", visible=True) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) #fig.show() custom custom['features'][0]BREAK#New dataframes mapping = {country.name: country.alpha_3 for country in pycountry.countries} geo_confirmed = COVID19_confirmed.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) geo_deaths = COVID19_deaths.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) geo_recovered = COVID19_recovered.rename(index={'Korea, South': 'Korea, Republic of', 'US' :'United States', 'Taiwan*' :'Taiwan, Province of China', 'Vietnam':'Viet Nam', 'Russia': 'Russian Federation', 'Iran': 'Iran, Islamic Republic of', 'Tanzania': 'Tanzania, United Republic of', 'Laos': "Lao People's Democratic Republic", 'Syria': 'Syrian Arab Republic', 'Brunei': 'Brunei Darussalam', 'Venezuela': 'Venezuela, Bolivarian Republic of', 'Bolivia': 'Bolivia, Plurinational State of', 'Moldova': 'Moldova, Republic of'}).rename(index = lambda name: mapping.get(name)) """ df = geo_deaths fig = px.choropleth(df, locations=df.index, color="2020-02-10", # lifeExp is a column of gapminder range_color=(0, 10000), color_continuous_scale=Solarcorona, ) fig.update_layout( title_text = 'COVID-19 Cases Confirmed GitHub: briancpark', ) fig.show() """Mask UsageData was provided by the New York Times. Playing around with more data here.#Load mask usage database US_masks = pd.read_csv("databases/nytimes/mask-use/mask-use-by-county.csv") US_masks US_masks['COUNTYFP'] = US_masks['COUNTYFP'].apply(lambda x: '{0:0>5}'.format(x)) fig = px.choropleth(US_masks, geojson=counties, locations='COUNTYFP', color='ALWAYS', color_continuous_scale="reds", range_color=(0, 1), scope="usa", labels={'masks':'always'} ) fig.update_layout(title_text = 'US Citizens Who Always Wears Masks in Social Distancing GitHub:briancpark') fig.write_html("geo/us_mask_usage_html/always.html") #fig.show() fig = px.choropleth(US_masks, geojson=counties, locations='COUNTYFP', color='FREQUENTLY', color_continuous_scale="reds", range_color=(0, 1), scope="usa", labels={'masks':'always'} ) fig.update_layout(title_text = 'US Citizens Who Frequently Wears Masks in Social Distancing GitHub:briancpark') fig.write_html("geo/us_mask_usage_html/frequently.html") #fig.show() fig = px.choropleth(US_masks, geojson=counties, locations='COUNTYFP', color='SOMETIMES', color_continuous_scale="reds", range_color=(0, 1), scope="usa", labels={'masks':'always'} ) fig.update_layout(title_text = 'US Citizens Who Sometimes Wears Masks in Social Distancing GitHub:briancpark') fig.write_html("geo/us_mask_usage_html/sometimes.html") #fig.show() fig = px.choropleth(US_masks, geojson=counties, locations='COUNTYFP', color='RARELY', color_continuous_scale="reds", range_color=(0, 1), scope="usa", labels={'masks':'always'} ) fig.update_layout(title_text = 'US Citizens Who Rarely Wears Masks in Social Distancing GitHub:briancpark') fig.write_html("geo/us_mask_usage_html/rarely.html") #fig.show() fig = px.choropleth(US_masks, geojson=counties, locations='COUNTYFP', color='NEVER', color_continuous_scale="reds", range_color=(0, 1), scope="usa", labels={'masks':'always'} ) fig.update_layout(title_text = 'US Citizens Who Never Wears Masks in Social Distancing GitHub:briancpark') fig.write_html("geo/us_mask_usage_html/never.html") #fig.show() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/us_mask_usage_html/always.html") time.sleep(3) driver.save_screenshot("geo/us_mask_usage/always.png") driver.close() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/us_mask_usage_html/sometimes.html") time.sleep(3) driver.save_screenshot("geo/us_mask_usage/sometimes.png") driver.close() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/us_mask_usage_html/frequently.html") time.sleep(3) driver.save_screenshot("geo/us_mask_usage/frequently.png") driver.close() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/us_mask_usage_html/rarely.html") time.sleep(3) driver.save_screenshot("geo/us_mask_usage/rarely.png") driver.close() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080'); driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.get("file:///Users/brianpark/Projects/COVID-19-Visualizations/geo/us_mask_usage_html/never.html") time.sleep(3) driver.save_screenshot("geo/us_mask_usage/never.png") driver.close() import subprocess as cmd from datetime import date cmd.run("git add .", check=True, shell=True) cmd.run('git commit -m ' + '''"''' + str(date.today()) + ''' update"''', check=True, shell=True) #os.system('git push') !git pushLipids# download subprocess.run(['executables/get_lipid_data.sh']); # process usecols = range(1,11) skiprows = [168] df = pd.read_excel('data/lipids/1-s2.0-S0092867415006418-mmc3.xlsx', header = 1, index_col = 0, usecols = usecols, skiprows = lambda x : x in skiprows) C = np.corrcoef(np.array(df)) # save data np.savetxt('data/lipids/lipid_corr.txt', C, fmt='%1.6f')Genes >>> CAUTION - Big Data!# download subprocess.run(['executables/get_gene_data.sh']); # unzip subprocess.run(['executables/unzip_gene_data.sh']); # process df = pd.read_csv('data/genes/circadiaNET_correlation_matrices/arabidopsis_thaliana_correlation_matrix.txt', header = 0, index_col = 0, delimiter = ' ') C = np.corrcoef(np.array(df)) # save np.savetxt('data/genes/gene_corr.txt', C, fmt='%1.6f')Cells# process name = 'FLS18 TNF' df = pd.read_excel('data/cells/connectivity_FLS11_data.xls', sheet_name=name) D = squareform(pdist(df[['Position X','Position Y','Position Z']])) # save np.savetxt('data/cells/cell_D.txt',D)Soil GIF# download subprocess.run(['executables/get_soil_data.sh']); # extract frames subprocess.run(['executables/extract_frames.sh']);This might take a while...     (~4h on my computer)# process path = 'data/soil/frames' pic_list = sorted(file for file in os.listdir(path) if file.endswith('jpg')) n = len(pic_list) D_mse = np.zeros([n,n]) C_ssim = np.ones([n,n]) t1 = time.time() for i, (jpg_a, jpg_b) in enumerate(combinations(pic_list, 2), 0): node_a = int(*re.findall(r'\d+', jpg_a)) - 1 node_b = int(*re.findall(r'\d+', jpg_b)) - 1 if i%1000==0: t2 = time.time() print(f'{i/782.1:.3f}% - {t2-t1:.3f}sec') img_a = mpimg.imread(f'{path}/{jpg_a}',0) img_b = mpimg.imread(f'{path}/{jpg_b}',0) img_a = cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY) img_b = cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY) D_mse[node_a, node_b] = mse( img_a, img_b) D_mse[node_b, node_a] = D_mse[node_a, node_b] C_ssim[node_a, node_b] = ssim(img_a, img_b) C_ssim[node_b, node_a] = C_ssim[node_a, node_b] # save data np.savetxt('data/soil/soil_gray_mse.txt' , D_mse , fmt='%1.6f') np.savetxt('data/soil/soil_gray_ssim.txt', C_ssim, fmt='%1.6f')IFNrootDir = "homo_sapiens.sbml" # If SMBL files are not yet in the working directory if not rootDir in os.listdir(): # Download all human reactions from Reactome in SBML format urllib.request.urlretrieve("https://reactome.org/download/current/homo_sapiens.3.1.sbml.tgz", "/tmp/reactome_smbl.tgz") tar = tarfile.open("/tmp/reactome_smbl.tgz") tar.extractall(rootDir) tar.close() pathwayFile = 'R-HSA-913531.sbml' sbml = ET.parse(rootDir + '/' + pathwayFile) model = sbml.getroot().find("{http://www.sbml.org/sbml/level3/version1/core}model") reactions = model.find("{http://www.sbml.org/sbml/level3/version1/core}listOfReactions") # List species annotated as "simple chemical" to remove them from networks, # to avoid creating star structures with the most common small molecules smallMolec = {term.attrib['id'] for term in model.find("{http://www.sbml.org/sbml/level3/version1/core}listOfSpecies") if ('sboTerm' in term.attrib.keys()) and (term.attrib['sboTerm'] == "SBO:0000247")} pathwayName = model.attrib['name'] pathwayID = model.attrib['id'] G = nx.DiGraph() # For each reaction in the pathway for reaction in reactions: products = reaction.find("{http://www.sbml.org/sbml/level3/version1/core}listOfProducts") reagents = reaction.find("{http://www.sbml.org/sbml/level3/version1/core}listOfReactants") if not products or not reagents: # print("No products or no reagents") break products = {product.attrib['species'] for product in products if product.attrib['species'] not in smallMolec} reagents = {reagent.attrib['species'] for reagent in reagents if reagent.attrib['species'] not in smallMolec} # Add edge from reagents to products G.add_edges_from([(r,p) for r in reagents for p in products]) nx.write_edgelist(G, 'data/IFNs/IFN_edgelist.csv')Arctic GIF Temperatures Image Processing (?) Watts-Strogatz# generate data with time stamps N = 100 k = 6 p = 1.0 for i in range(7): W = nx.watts_strogatz_graph(N,k,p) dgm = rng.diagram(W, induce=True) name = str(int(time.time()*10**6))+'.csv' rng.save_dgm(dgm, f'data/watts_strogatz/dgms/N{N}/k{k}/p{p:.6f}/{name}') # collect diagrams to a single score sheet N = 1024 k = 8 path = f'data/watts_strogatz/dgms/N{N}/k{k}' p_list = sorted([float(file[1:]) for file in os.listdir(path) if file.startswith('p')]) df = pd.DataFrame() for p in p_list: scores_tmp = [] for i, file in enumerate(os.listdir(f'{path}/p{p:.6f}')): if not file.endswith('.csv'): continue dgm = rng.load_dgm(fname=f'{path}/p{p:.6f}/{file}') scores_tmp.append(dgm.GGS) df_tmp = pd.DataFrame({p:scores_tmp}) df = pd.concat([df,df_tmp], axis=1) df.to_csv(f'data/watts_strogatz/GGS/N{N}/k{k}/GGS.csv')Erdos-RenyiN = 2**8 path = f'data/erdos_renyi/ER_annealing_10000/dgms/N{N}' p_list = sorted([float(file[1:]) for file in os.listdir(path) if file.startswith('p')]) df = pd.DataFrame() for p in p_list: scores_tmp = [] for i, file in enumerate(os.listdir(f'{path}/p{p:.6f}')): if not file.endswith('.csv'): continue dgm = rng.load_dgm(fname=f'{path}/p{p:.6f}/{file}') scores_tmp.append(dgm.GGS) df_tmp = pd.DataFrame({p:scores_tmp}) df = pd.concat([df,df_tmp], axis=1) df.to_csv(f'data/erdos_renyi/ER_annealing_10000/GGS/{N}.csv')CountVectorizer with Mulinomial Naive Bayes (Benchmark Model)from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer from sklearn.naive_bayes import BernoulliNB, MultinomialNB countVect = CountVectorizer() X_train_countVect = countVect.fit_transform(X_train_cleaned) print("Number of features : %d \n" %len(countVect.get_feature_names())) #6378 print("Show some feature names : \n", countVect.get_feature_names()[::1000]) # Train MultinomialNB classifier mnb = MultinomialNB() mnb.fit(X_train_countVect, y_train) import pickle pickle.dump(countVect,open('countVect_imdb.pkl','wb')) from sklearn import metrics from sklearn.metrics import accuracy_score,roc_auc_score def modelEvaluation(predictions): ''' Print model evaluation to predicted result ''' print ("\nAccuracy on validation set: {:.4f}".format(accuracy_score(y_test, predictions))) print("\nAUC score : {:.4f}".format(roc_auc_score(y_test, predictions))) print("\nClassification report : \n", metrics.classification_report(y_test, predictions)) print("\nConfusion Matrix : \n", metrics.confusion_matrix(y_test, predictions)) predictions = mnb.predict(countVect.transform(X_test_cleaned)) modelEvaluation(predictions) import pickle pickle.dump(mnb,open('Naive_Bayes_model_imdb.pkl','wb'))TfidfVectorizer with Logistic Regressionfrom sklearn.linear_model import LogisticRegression tfidf = TfidfVectorizer(min_df=5) #minimum document frequency of 5 X_train_tfidf = tfidf.fit_transform(X_train) print("Number of features : %d \n" %len(tfidf.get_feature_names())) #1722 print("Show some feature names : \n", tfidf.get_feature_names()[::1000]) # Logistic Regression lr = LogisticRegression() lr.fit(X_train_tfidf, y_train) feature_names = np.array(tfidf.get_feature_names()) sorted_coef_index = lr.coef_[0].argsort() print('\nTop 10 features with smallest coefficients :\n{}\n'.format(feature_names[sorted_coef_index[:10]])) print('Top 10 features with largest coefficients : \n{}'.format(feature_names[sorted_coef_index[:-11:-1]])) predictions = lr.predict(tfidf.transform(X_test_cleaned)) modelEvaluation(predictions) from sklearn.model_selection import GridSearchCV from sklearn import metrics from sklearn.metrics import roc_auc_score, accuracy_score from sklearn.pipeline import Pipeline estimators = [("tfidf", TfidfVectorizer()), ("lr", LogisticRegression())] model = Pipeline(estimators) params = {"lr__C":[0.1, 1, 10], "tfidf__min_df": [1, 3], "tfidf__max_features": [1000, None], "tfidf__ngram_range": [(1,1), (1,2)], "tfidf__stop_words": [None, "english"]} grid = GridSearchCV(estimator=model, param_grid=params, scoring="accuracy", n_jobs=-1) grid.fit(X_train_cleaned, y_train) print("The best paramenter set is : \n", grid.best_params_) # Evaluate on the validaton set predictions = grid.predict(X_test_cleaned) modelEvaluation(predictions)The best paramenter set is : {'lr__C': 10, 'tfidf__max_features': None, 'tfidf__min_df': 3, 'tfidf__ngram_range': (1, 2), 'tfidf__stop_words': None} Accuracy on validation set: 0.8720 AUC score : 0.8720 Classification report : precision recall f1-score support 0 0.87 0.87 0.87 249 1 0.87 0.88 0.87 251 accuracy 0.87 500 macro avg 0.87 0.87 0.87 500 weighted avg 0.87 0.87 0.87 500 Confusion Matrix : [[216 33] [ 31 220]]Word2Vec**Step 1 : Parse review text to sentences (Word2Vec model takes a list of sentences as inputs)****Step 2 : Create volcabulary list using Word2Vec model.****Step 3 : Transform each review into numerical representation by computing average feature vectors of words therein.****Step 4 : Fit the average feature vectors to Random Forest Classifier.**tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') def parseSent(review, tokenizer, remove_stopwords=False): raw_sentences = tokenizer.tokenize(review.strip()) sentences = [] for raw_sentence in raw_sentences: if len(raw_sentence) > 0: sentences.append(cleanText(raw_sentence, remove_stopwords, split_text=True)) return sentences # Parse each review in the training set into sentences sentences = [] for review in X_train_cleaned: sentences += parseSent(review, tokenizer,remove_stopwords=False) print('%d parsed sentence in the training set\n' %len(sentences)) print('Show a parsed sentence in the training set : \n', sentences[10])4500 parsed sentence in the training set Show a parsed sentence in the training set : ['the', 'crimson', 'rivers', 'is', 'one', 'of', 'the', 'most', 'over', 'directed', 'over', 'the', 'top', 'over', 'everything', 'mess', 'i', 've', 'ever', 'seen', 'come', 'out', 'of', 'france', 'there', 's', 'nothing', 'worse', 'than', 'a', 'french', 'production', 'trying', 'to', 'out', 'do', 'films', 'made', 'in', 'hollywood', 'and', 'cr', 'is', 'a', 'perfect', 'example', 'of', 'such', 'a', 'wannabe', 'horror', 'action', 'buddy', 'flick', 'i', 'almost', 'stopped', 'it', 'halfway', 'through', 'because', 'i', 'knew', 'it', 'wouldn', 't', 'amount', 'to', 'anything', 'but', 'french', 'guys', 'trying', 'to', 'show', 'off', 'the', 'film', 'starts', 'off', 'promisingly', 'like', 'some', 'sort', 'of', 'expansive', 'horror', 'film', 'but', 'it', 'quickly', 'shifts', 'genres', 'from', 'horror', 'to', 'action', 'to', 'x', 'files', 'type', 'to', 'buddy', 'flick', 'that', 'in', 'the', 'end', 'cr', 'is', 'all', [...]Creating Volcabulary List usinhg Word2Vec Modelfrom wordcloud import WordCloud from gensim.models import word2vec from gensim.models.keyedvectors import KeyedVectors num_features = 300 #embedding dimension min_word_count = 10 num_workers = 4 context = 10 downsampling = 1e-3 print("Training Word2Vec model ...\n") w2v = Word2Vec(sentences, workers=num_workers, min_count = min_word_count,\ window = context, sample = downsampling) w2v.init_sims(replace=True) w2v.save("w2v_300features_10minwordcounts_10context") #save trained word2vec model print("Number of words in the vocabulary list : %d \n" %len(w2v.wv.index2word)) #4016 print("Show first 10 words in the vocalbulary list vocabulary list: \n", w2v.wv.index2word[0:10])Training Word2Vec model ...Averaging Feature Vectorsdef makeFeatureVec(review, model, num_features): ''' Transform a review to a feature vector by averaging feature vectors of words appeared in that review and in the volcabulary list created ''' featureVec = np.zeros((num_features,),dtype="float32") nwords = 0. index2word_set = set(model.wv.index2word) #index2word is the volcabulary list of the Word2Vec model isZeroVec = True for word in review: if word in index2word_set: nwords = nwords + 1. featureVec = np.add(featureVec, model[word]) isZeroVec = False if isZeroVec == False: featureVec = np.divide(featureVec, nwords) return featureVec def getAvgFeatureVecs(reviews, model, num_features): ''' Transform all reviews to feature vectors using makeFeatureVec() ''' counter = 0 reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32") for review in reviews: reviewFeatureVecs[counter] = makeFeatureVec(review, model,num_features) counter = counter + 1 return reviewFeatureVecs X_train_cleaned = [] for review in X_train: X_train_cleaned.append(cleanText(review, remove_stopwords=True, split_text=True)) trainVector = getAvgFeatureVecs(X_train_cleaned, w2v, num_features) print("Training set : %d feature vectors with %d dimensions" %trainVector.shape) # Get feature vectors for validation set X_test_cleaned = [] for review in X_test: X_test_cleaned.append(cleanText(review, remove_stopwords=True, split_text=True)) testVector = getAvgFeatureVecs(X_test_cleaned, w2v, num_features) print("Validation set : %d feature vectors with %d dimensions" %testVector.shape)Random Forest Classiferfrom sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=1000) rf.fit(trainVector, y_train) predictions = rf.predict(testVector) modelEvaluation(predictions)LSTM**Step 1 : Prepare X_train and X_test to 2D tensor.** **Step 2 : Train a simple LSTM (embeddign layer => LSTM layer => dense layer).** **Step 3 : Compile and fit the model using log loss function and ADAM optimizer.**from keras.preprocessing import sequence from keras.utils import np_utils from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Lambda from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM, SimpleRNN, GRU from keras.preprocessing.text import Tokenizer from collections import defaultdict from keras.layers.convolutional import Convolution1D from keras import backend as K from keras.layers.embeddings import Embedding top_words = 40000 maxlen = 200 batch_size = 62 nb_classes = 4 nb_epoch = 6 # Vectorize X_train and X_test to 2D tensor tokenizer = Tokenizer(nb_words=top_words) #only consider top 20000 words in the corpse tokenizer.fit_on_texts(X_train) # tokenizer.word_index #access word-to-index dictionary of trained tokenizer sequences_train = tokenizer.texts_to_sequences(X_train) sequences_test = tokenizer.texts_to_sequences(X_test) X_train_seq = sequence.pad_sequences(sequences_train, maxlen=maxlen) X_test_seq = sequence.pad_sequences(sequences_test, maxlen=maxlen) # one-hot encoding of y_train and y_test y_train_seq = np_utils.to_categorical(y_train, nb_classes) y_test_seq = np_utils.to_categorical(y_test, nb_classes) print('X_train shape:', X_train_seq.shape) print("========================================") print('X_test shape:', X_test_seq.shape) print("========================================") print('y_train shape:', y_train_seq.shape) print("========================================") print('y_test shape:', y_test_seq.shape) print("========================================") model1 = Sequential() model1.add(Embedding(top_words, 128, dropout=0.2)) model1.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) model1.add(Dense(nb_classes)) model1.add(Activation('softmax')) model1.summary() model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model1.fit(X_train_seq, y_train_seq, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1) # Model evluation score = model1.evaluate(X_test_seq, y_test_seq, batch_size=batch_size) print('Test loss : {:.4f}'.format(score[0])) print('Test accuracy : {:.4f}'.format(score[1])) len(X_train_seq),len(y_train_seq) print("Size of weight matrix in the embedding layer : ", \ model1.layers[0].get_weights()[0].shape) # get weight matrix of the hidden layer print("Size of weight matrix in the hidden layer : ", \ model1.layers[1].get_weights()[0].shape) # get weight matrix of the output layer print("Size of weight matrix in the output layer : ", \ model1.layers[2].get_weights()[0].shape) import pickle pickle.dump(model1,open('model1.pkl','wb'))LSTM with Word2Vec Embedding2v = Word2Vec.load("w2v_300features_10minwordcounts_10context") embedding_matrix = w2v.wv.syn0 print("Shape of embedding matrix : ", embedding_matrix.shape) top_words = embedding_matrix.shape[0] #4016 maxlen = 300 batch_size = 62 nb_classes = 4 nb_epoch = 7 # Vectorize X_train and X_test to 2D tensor tokenizer = Tokenizer(nb_words=top_words) #only consider top 20000 words in the corpse tokenizer.fit_on_texts(X_train) # tokenizer.word_index #access word-to-index dictionary of trained tokenizer sequences_train = tokenizer.texts_to_sequences(X_train) sequences_test = tokenizer.texts_to_sequences(X_test) X_train_seq1 = sequence.pad_sequences(sequences_train, maxlen=maxlen) X_test_seq1 = sequence.pad_sequences(sequences_test, maxlen=maxlen) # one-hot encoding of y_train and y_test y_train_seq1 = np_utils.to_categorical(y_train, nb_classes) y_test_seq1 = np_utils.to_categorical(y_test, nb_classes) print('X_train shape:', X_train_seq1.shape) print("========================================") print('X_test shape:', X_test_seq1.shape) print("========================================") print('y_train shape:', y_train_seq1.shape) print("========================================") print('y_test shape:', y_test_seq1.shape) print("========================================") len(X_train_seq1),len(y_train_seq1) embedding_layer = Embedding(embedding_matrix.shape[0], #4016 embedding_matrix.shape[1], #300 weights=[embedding_matrix]) model2 = Sequential() model2.add(embedding_layer) model2.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) model2.add(Dense(nb_classes)) model2.add(Activation('softmax')) model2.summary() model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model2.fit(X_train_seq1, y_train_seq1, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1) # Model evaluation score = model2.evaluate(X_test_seq1, y_test_seq1, batch_size=batch_size) print('Test loss : {:.4f}'.format(score[0])) print('Test accuracy : {:.4f}'.format(score[1])) print("Size of weight matrix in the embedding layer : ", \ model2.layers[0].get_weights()[0].shape) print("Size of weight matrix in the hidden layer : ", \ model2.layers[1].get_weights()[0].shape) print("Size of weight matrix in the output layer : ", \ model2.layers[2].get_weights()[0].shape)Lesson Twofrom tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense model = Sequential() #create model structure model.add(Dense(256, activation='relu', input_shape=(new_dimension,))) model.add(Dense(128, activation='relu')) model.add(Dense(no_labels, activation='softmax')) #compile the model model.compile(optimizer='adam', loss=tf.keras.losses.categorical_crossentropy, metrics=['accuracy']) #train the model history = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=20, batch_size=1000) #evaluate model with test set test_loss, test_accuracy = model.evaluate(test_images, test_labels) print('Test loss: {}'.format(test_loss)) print('Test accuracy: {}'.format(test_accuracy)) plt.figure() plt.plot(history.history['loss'], 'blue') plt.plot(history.history['val_loss'], 'red') plt.legend(['Training loss', 'Validation Loss']) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss Curves - before regularisation') #overfitting identified, now we import dropout for regularization from tensorflow.keras.layers import Dropout reg_model = Sequential() reg_model.add(Dense(256, activation='relu', input_shape=(new_dimension,))) reg_model.add(Dropout(0.4)) reg_model.add(Dense(128, activation='relu')) reg_model.add(Dropout(0.4)) reg_model.add(Dense(no_labels, activation='softmax')) #compile and train reg_model reg_model.compile(optimizer='adam', loss=tf.keras.losses.categorical_crossentropy, metrics=['accuracy']) reg_history = reg_model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=20, batch_size=1000) #evaluate reg_model test_loss, test_accuracy = reg_model.evaluate(test_images, test_labels) print('Test loss: {}'.format(test_loss)) print('Test accuracy: {}'.format(test_accuracy)) #check for overfitting plt.figure() plt.plot(reg_history.history['loss'], 'blue') plt.plot(reg_history.history['val_loss'], 'red') plt.legend(['Training loss', 'Validation Loss']) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss Curves - after regularisation') "big" > "small" ((10 >= 5*2) and (10 <= 5*2)) 1/0Lambda School Data Science - Making Data-backed AssertionsThis is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it. Lecture - generating a confounding variableThe prewatch material told a story about a hypothetical health condition where both the drug usage and overall health outcome were related to gender - thus making gender a confounding variable, obfuscating the possible relationship between the drug and the outcome.Let's use Python to generate data that actually behaves in this fashion!import random dir(random) # Reminding ourselves what we can do here # Let's think of another scenario: # We work for a company that sells accessories for mobile phones. # They have an ecommerce site, and we are supposed to analyze logs # to determine what sort of usage is related to purchases, and thus guide # website development to encourage higher conversion. # The hypothesis - users who spend longer on the site tend # to spend more. Seems reasonable, no? # But there's a confounding variable! If they're on a phone, they: # a) Spend less time on the site, but # b) Are more likely to be interested in the actual products! # Let's use namedtuple to represent our data from collections import namedtuple # purchased and mobile are bools, time_on_site in seconds User = namedtuple('User', ['purchased','time_on_site', 'mobile']) example_user = User(False, 12, False) print(example_user) # And now let's generate 1000 example users # 750 mobile, 250 not (i.e. desktop) # A desktop user has a base conversion likelihood of 10% # And it goes up by 1% for each 15 seconds they spend on the site # And they spend anywhere from 10 seconds to 10 minutes on the site (uniform) # Mobile users spend on average half as much time on the site as desktop # But have twice as much base likelihood of buying something users = [] for _ in range(250): # Desktop users time_on_site = random.uniform(10, 600) purchased = random.random() < 0.1 + (time_on_site // 1500) users.append(User(purchased, time_on_site, False)) for _ in range(750): # Mobile users time_on_site = random.uniform(5, 300) purchased = random.random() < 0.2 + (time_on_site // 1500) users.append(User(purchased, time_on_site, True)) random.shuffle(users) print(users[:10]) # Let's put this in a dataframe so we can look at it more easily import pandas as pd user_data = pd.DataFrame(users) user_data.head() # Let's use crosstabulation to try to see what's going on pd.crosstab(user_data['purchased'], user_data['time_on_site']) # OK, that's not quite what we want # Time is continuous! We need to put it in discrete buckets # Pandas calls these bins, and pandas.cut helps make them time_bins = pd.cut(user_data['time_on_site'], 5) # 5 equal-sized bins pd.crosstab(user_data['purchased'], time_bins) # We can make this a bit clearer by normalizing (getting %) pd.crosstab(user_data['purchased'], time_bins, normalize='columns') # That seems counter to our hypothesis # More time on the site seems to have fewer purchases # But we know why, since we generated the data! # Let's look at mobile and purchased pd.crosstab(user_data['purchased'], user_data['mobile'], normalize='columns') # Yep, mobile users are more likely to buy things # But we're still not seeing the *whole* story until we look at all 3 at once # Live/stretch goal - how can we do that? pd.crosstab([user_data['purchased'], time_bins], user_data['mobile'], normalize='columns') data = user_data.groupby('mobile') data.head()Assignment - what's going on here?Consider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people.Try to figure out which variables are possibly related to each other, and which may be confounding relationships.# TODO - your code here # Use what we did live in lecture as an example # HINT - you can find the raw URL on GitHub and potentially use that # to load the data with read_csv, or you can upload it yourself import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Upgrade to the latest Seaborn to make sure that scatterplots exist. !pip install seaborn --upgrade pd.set_option('display.height', 1000) pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 100) pd.set_option('display.width', 1000) data_url = 'https://raw.githubusercontent.com/LambdaSchool/DS-Sprint-01-Dealing-With-Data/master/module4-databackedassertions/persons.csv' persons = pd.read_csv(data_url) # The first column is unnamed, so I rewrite the column names to call it 'ID' persons.columns = ['ID', 'age', 'weight', 'exercise_time'] persons.head() ''' Let's see what those variables look like on their own. ''' fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 10)) sns.distplot(persons['age'], bins=30, ax=ax1, axlabel=False); sns.distplot(persons['weight'], bins=30, ax=ax2, axlabel=False); sns.distplot(persons['exercise_time'], bins=30, ax=ax3, axlabel=False); ax1.set_ylabel("Age"); ax2.set_ylabel("Weight"); ax3.set_ylabel("Exercise Time");Alright, looks like Age is truncated at the ends, weight is truncated at the bottom, and Exercise time is truncated at the top. Also, Age looks uniformly distributed while low weights and exercise times predominate. My first guess is that exercise time is a function of both. Let's use scatterplots to see some pairwise relationships.fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5)) sns.scatterplot(persons['age'], persons['weight'], ax=ax1); ax1.set_xlabel("Age"); ax1.set_ylabel("Weight"); sns.scatterplot(persons['age'], persons['exercise_time'], ax=ax2); ax2.set_xlabel("Age"); ax2.set_ylabel("Exercise time"); sns.scatterplot(persons['weight'], persons['exercise_time'], ax=ax3); ax3.set_xlabel("Weight"); ax3.set_ylabel("Exercise time");What the...?? Alright, there's definitely something going on here. While weight seems to be uniformly distributed among age groups, exercise time starts decreasing linearly after 60 years and after 150 lbs. Clearly, I need to look at exercise time as a function of both the other two variables at once. Time for a heatmap.''' Age and weight are binned so that we can aggregate exercise times within each combination of bins and hopefully see a pattern. ''' age_bins = pd.cut(persons.age, np.arange(15,85,5)) weight_bins = pd.cut(persons.weight, np.arange(100,260,10)) fig, ax1 = plt.subplots(1, figsize=(8, 7)) sns.heatmap(pd.crosstab(age_bins, weight_bins, values=persons.exercise_time, aggfunc='mean'), ax=ax1, square=True); ax1.invert_yaxis()Alright, we basically see the same pattern we'd expect from the earlier graphs, but it's actually LESS clear where the cutoffs are from the heatmap. The really clear relationships are visible only in the pairwise graphs earlier. Someone pointed out that weight could be the actual dependent variable here, not exercise time. So does anything clearer show up if we graph weights in terms of the other two variables instead?age_bins = pd.cut(persons.age, np.arange(15,85,5)) weight_bins = pd.cut(persons.weight, np.arange(100,260,10)) exercise_bins = pd.cut(persons.exercise_time, np.arange(0,300,20)) fig, ax1 = plt.subplots(1, figsize=(8, 7)) sns.heatmap(pd.crosstab(age_bins, exercise_bins, values=persons.weight, aggfunc='mean'), ax=ax1, square=True); ax1.invert_yaxis()Not particularly. This is even less clear than the previous case. Alright, what if I tried binning by age, and looking for relationships between weight and exercise time within each of those age bins? I create two bins right around 60, where age seems to start having an effect.young = persons[persons['age'] < 60] old = persons[persons['age'] >= 60] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) sns.scatterplot(young['weight'], young['exercise_time'], ax=ax1); ax1.set_xlabel("Weight"); ax1.set_ylabel("Exercise time"); sns.scatterplot(old['weight'], old['exercise_time'], ax=ax2); ax2.set_xlabel("Weight"); ax2.set_ylabel("Exercise time");Demand forecasting with BigQuery and TensorFlowIn this notebook, we will develop a machine learning model to predict the demand for taxi cabs in New York.To develop the model, we will need to get historical data of taxicab usage. This data exists in BigQuery. Let's start by looking at the schema.import google.datalab.bigquery as bq import pandas as pd import numpy as np import shutil %bq tables describe --name bigquery-public-data.new_york.tlc_yellow_trips_2015Analyzing taxicab demand Let's pull the number of trips for each day in the 2015 dataset using Standard SQL.%bq query SELECT EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber FROM `bigquery-public-data.new_york.tlc_yellow_trips_2015` LIMIT 5Modular queries and Pandas dataframe Let's use the total number of trips as our proxy for taxicab demand (other reasonable alternatives are total trip_distance or total fare_amount). It is possible to predict multiple variables using Tensorflow, but for simplicity, we will stick to just predicting the number of trips.We will give our query a name 'taxiquery' and have it use an input variable '$YEAR'. We can then invoke the 'taxiquery' by giving it a YEAR. The to_dataframe() converts the BigQuery result into a Pandas dataframe.%bq query -n taxiquery WITH trips AS ( SELECT EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber FROM `bigquery-public-data.new_york.tlc_yellow_trips_*` where _TABLE_SUFFIX = @YEAR ) SELECT daynumber, COUNT(1) AS numtrips FROM trips GROUP BY daynumber ORDER BY daynumber query_parameters = [ { 'name': 'YEAR', 'parameterType': {'type': 'STRING'}, 'parameterValue': {'value': 2015} } ] trips = taxiquery.execute(query_params=query_parameters).result().to_dataframe() trips[:5]Benchmark Often, a reasonable estimate of something is its historical average. We can therefore benchmark our machine learning model against the historical average.avg = np.mean(trips['numtrips']) print('Just using average={0} has RMSE of {1}'.format(avg, np.sqrt(np.mean((trips['numtrips'] - avg)**2))))Just using average=400309.5589041096 has RMSE of 51613.65169049127The mean here is about 400,000 and the root-mean-square-error (RMSE) in this case is about 52,000. In other words, if we were to estimate that there are 400,000 taxi trips on any given day, that estimate is will be off on average by about 52,000 in either direction. Let's see if we can do better than this -- our goal is to make predictions of taxicab demand whose RMSE is lower than 52,000.What kinds of things affect people's use of taxicabs? Weather data We suspect that weather influences how often people use a taxi. Perhaps someone who'd normally walk to work would take a taxi if it is very cold or rainy.One of the advantages of using a global data warehouse like BigQuery is that you get to mash up unrelated datasets quite easily.%bq query SELECT * FROM `bigquery-public-data.noaa_gsod.stations` WHERE state = 'NY' AND wban != '99999' AND name LIKE '%LA GUARDIA%'Variables Let's pull out the minimum and maximum daily temperature (in Fahrenheit) as well as the amount of rain (in inches) for La Guardia airport.%bq query -n wxquery SELECT EXTRACT (DAYOFYEAR FROM CAST(CONCAT(@YEAR,'-',mo,'-',da) AS TIMESTAMP)) AS daynumber, MIN(EXTRACT (DAYOFWEEK FROM CAST(CONCAT(@YEAR,'-',mo,'-',da) AS TIMESTAMP))) dayofweek, MIN(min) mintemp, MAX(max) maxtemp, MAX(IF(prcp=99.99,0,prcp)) rain FROM `bigquery-public-data.noaa_gsod.gsod*` WHERE stn='725030' AND _TABLE_SUFFIX = @YEAR GROUP BY 1 ORDER BY daynumber DESC query_parameters = [ { 'name': 'YEAR', 'parameterType': {'type': 'STRING'}, 'parameterValue': {'value': 2015} } ] weather = wxquery.execute(query_params=query_parameters).result().to_dataframe() weather[:5]Merge datasets Let's use Pandas to merge (combine) the taxi cab and weather datasets day-by-day.data = pd.merge(weather, trips, on='daynumber') data[:5]Exploratory analysis Is there a relationship between maximum temperature and the number of trips?j = data.plot(kind='scatter', x='maxtemp', y='numtrips')/usr/local/envs/py3env/lib/python3.5/site-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))The scatterplot above doesn't look very promising. There appears to be a weak downward trend, but it's also quite noisy.Is there a relationship between the day of the week and the number of trips?j = data.plot(kind='scatter', x='dayofweek', y='numtrips')/usr/local/envs/py3env/lib/python3.5/site-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))Hurrah, we seem to have found a predictor. It appears that people use taxis more later in the week. Perhaps New Yorkers make weekly resolutions to walk more and then lose their determination later in the week, or maybe it reflects tourism dynamics in New York City.Perhaps if we took out the confounding effect of the day of the week, maximum temperature will start to have an effect. Let's see if that's the case:j = data[data['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')/usr/local/envs/py3env/lib/python3.5/site-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))Removing the confounding factor does seem to reflect an underlying trend around temperature. But ... the data are a little sparse, don't you think? This is something that you have to keep in mind -- the more predictors you start to consider (here we are using two: day of week and maximum temperature), the more rows you will need so as to avoid overfitting the model. Adding 2014 and 2016 data Let's add in 2014 and 2016 data to the Pandas dataframe. Note how useful it was for us to modularize our queries around the YEAR.data2 = data # 2015 data for year in [2014, 2016]: query_parameters = [ { 'name': 'YEAR', 'parameterType': {'type': 'STRING'}, 'parameterValue': {'value': year} } ] weather = wxquery.execute(query_params=query_parameters).result().to_dataframe() trips = taxiquery.execute(query_params=query_parameters).result().to_dataframe() data_for_year = pd.merge(weather, trips, on='daynumber') data2 = pd.concat([data2, data_for_year]) data2.describe() j = data2[data2['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')/usr/local/envs/py3env/lib/python3.5/site-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))The data do seem a bit more robust. If we had even more data, it would be better of course. But in this case, we only have 2014-2016 data for taxi trips, so that's what we will go with. Machine Learning with Tensorflow We'll use 80% of our dataset for training and 20% of the data for testing the model we have trained. Let's shuffle the rows of the Pandas dataframe so that this division is random. The predictor (or input) columns will be every column in the database other than the number-of-trips (which is our target, or what we want to predict).The machine learning models that we will use -- linear regression and neural networks -- both require that the input variables are numeric in nature.The day of the week, however, is a categorical variable (i.e. Tuesday is not really greater than Monday). So, we should create separate columns for whether it is a Monday (with values 0 or 1), Tuesday, etc.Against that, we do have limited data (remember: the more columns you use as input features, the more rows you need to have in your training dataset), and it appears that there is a clear linear trend by day of the week. So, we will opt for simplicity here and use the data as-is. Try uncommenting the code that creates separate columns for the days of the week and re-run the notebook if you are curious about the impact of this simplification.import tensorflow as tf shuffled = data2.sample(frac=1, random_state=13) # It would be a good idea, if we had more data, to treat the days as categorical variables # with the small amount of data, we have though, the model tends to overfit #predictors = shuffled.iloc[:,2:5] #for day in range(1,8): # matching = shuffled['dayofweek'] == day # key = 'day_' + str(day) # predictors[key] = pd.Series(matching, index=predictors.index, dtype=float) predictors = shuffled.iloc[:,1:5] predictors[:5] shuffled[:5] targets = shuffled.iloc[:,5] targets[:5]Let's update our benchmark based on the 80-20 split and the larger dataset.trainsize = int(len(shuffled['numtrips']) * 0.8) avg = np.mean(shuffled['numtrips'][:trainsize]) rmse = np.sqrt(np.mean((targets[trainsize:] - avg)**2)) print('Just using average={0} has RMSE of {1}'.format(avg, rmse))Just using average=402667.6826484018 has RMSE of 62394.11232075195Linear regression with tf.contrib.learn We scale the number of taxicab rides by 400,000 so that the model can keep its predicted values in the [0-1] range. The optimization goes a lot faster when the weights are small numbers. We save the weights into ./trained_model_linear and display the root mean square error on the test dataset.SCALE_NUM_TRIPS = 600000.0 trainsize = int(len(shuffled['numtrips']) * 0.8) testsize = len(shuffled['numtrips']) - trainsize npredictors = len(predictors.columns) noutputs = 1 tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ... shutil.rmtree('./trained_model_linear', ignore_errors=True) # so that we don't load weights from previous runs estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear', feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values)) print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output") def input_fn(features, targets): return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS) estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000) pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS ) rmse = np.sqrt(np.mean(np.power((targets[trainsize:].values - pred), 2))) print('LinearRegression has RMSE of {0}'.format(rmse))WARNING:tensorflow:From :9: infer_real_valued_columns_from_input (from tensorflow.contrib.learn.python.learn.estimators.estimator) is deprecated and will be removed in a future version. Instructions for updating: Please specify feature columns explicitly. WARNING:tensorflow:From /usr/local/envs/py3env/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:142: setup_train_data_feeder (from tensorflow.contrib.learn.python.learn.learn_io.data_feeder) is deprecated and will be removed in a future version. Instructions for updating: Please use tensorflow/transform or tf.data. WARNING:tensorflow:From /usr/local/envs/py3env/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py:96: extract_dask_data (from tensorflow.contrib.learn.python.learn.learn_io.dask_io) is deprecated and will be removed in a future version. Instructions for updating: Please feed input to tf.data to support dask. WARNIN[...]The RMSE here (57K) is lower than the benchmark (62K) indicates that we are doing about 10% better with the machine learning model than we would be if we were to just use the historical average (our benchmark). Neural network with tf.contrib.learn Let's make a more complex model with a few hidden nodes.SCALE_NUM_TRIPS = 600000.0 trainsize = int(len(shuffled['numtrips']) * 0.8) testsize = len(shuffled['numtrips']) - trainsize npredictors = len(predictors.columns) noutputs = 1 tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ... shutil.rmtree('./trained_model', ignore_errors=True) # so that we don't load weights from previous runs estimator = tf.contrib.learn.DNNRegressor(model_dir='./trained_model', hidden_units=[5, 5], feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values)) print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output") def input_fn(features, targets): return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS) estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000) pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS ) rmse = np.sqrt(np.mean((targets[trainsize:].values - pred)**2)) print('Neural Network Regression has RMSE of {0}'.format(rmse))WARNING:tensorflow:float64 is not supported by many models, consider casting to float32. starting to train ... this will take a while ... use verbosity=INFO to get more verbose output WARNING:tensorflow:From /usr/local/envs/py3env/lib/python3.5/site-packages/tensorflow/python/util/deprecation.py:497: calling DNNRegressor.predict (from tensorflow.contrib.learn.python.learn.estimators.dnn) with outputs=None is deprecated and will be removed after 2017-03-01. Instructions for updating: Please switch to predict_scores, or set `outputs` argument. WARNING:tensorflow:float64 is not supported by many models, consider casting to float32.Using a neural network results in similar performance to the linear model when I ran it -- it might be because there isn't enough data for the NN to do much better. (NN training is a non-convex optimization, and you will get different results each time you run the above code). Running a trained model So, we have trained a model, and saved it to a file. Let's use this model to predict taxicab demand given the expected weather for three days.Here we make a Dataframe out of those inputs, load up the saved model (note that we have to know the model equation -- it's not saved in the model file) and use it to predict the taxicab demand.input = pd.DataFrame.from_dict(data = {'dayofweek' : [4, 5, 6], 'mintemp' : [60, 40, 50], 'maxtemp' : [70, 90, 60], 'rain' : [0, 0.5, 0]}) # read trained model from ./trained_model estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear', feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(input.values)) pred = np.multiply(list(estimator.predict(input.values)), SCALE_NUM_TRIPS ) print(pred)WARNING:tensorflow:float64 is not supported by many models, consider casting to float32. WARNING:tensorflow:float64 is not supported by many models, consider casting to float32. [354762.78 306764.7 387226.62]eDNA conversion of practice data**Resources:**- https://docs.gbif-uat.org/publishing-sequence-derived-data/1.0/en/## Imports import pandas as pd import numpy as np import random from datetime import datetime # for handling dates import pytz # for handling time zones ## Ensure my general functions for the MPA data integration project can be imported, and import them import sys sys.path.insert(0, "C:\\Users\\dianalg\\PycharmProjects\\PythonScripts\\eDNA") import WoRMS # functions for querying WoRMS REST APILoad data## Plate data plate = pd.read_csv('Plate_S_ASV_OBIS_data.csv') print(plate.shape) plate.head()(280440, 11)**Note** that there are 60 unique FilterIDs and 60 unique Sequence_IDs in this table. Is that usual, to have FilterID = Sequence_ID? **IDs are a combination of what information?** Sequence_ID appears to be just the FilterID with underscore and S added.## Sequence_ID is just FilterID + '_S' temp = plate[['FilterID', 'Sequence_ID']].copy() temp.drop_duplicates(inplace=True) print(temp.shape) temp.head()(60, 2)**FilterID seems to be composed of:**- SAMPLING_cruise from plate_meta- c + SAMPLING_station_number (or just SAMPLING_station, lower case and zero-padded?)- _ + SAMPLING_bottle- _ + edna- _ + 1, 2 or 3 (replicate?)## ASV taxa table taxa = pd.read_csv('Filtered_ASV_taxa_table_all.csv') print(taxa.shape) print(taxa.columns) taxa.head()(4711, 79) Index(['ASV', 'Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species', '14213c01_12_edna_1', '14213c01_12_edna_2', '14213c01_12_edna_3', '22013c01_12_edna_1', '22013c01_12_edna_2', '22013c01_12_edna_3', 'CN13Dc01_12_edna_1', 'CN13Dc01_12_edna_2', 'CN13Dc01_12_edna_3', '05114c01_12_edna_1', '05114c01_12_edna_2', '05114c01_12_edna_3', '14714c01_12_edna_1', '14714c01_12_edna_2', '14714c01_12_edna_3', '19114c01_12_edna_1', '19114c01_12_edna_2', '19114c01_12_edna_3', '30214c01_12_edna_1', '30214c01_12_edna_2', '30214c01_12_edna_3', '32414c01_12_edna_1', '32414c01_12_edna_2', '32414c01_12_edna_3', '12015c01_12_edna_1', '12015c01_12_edna_2', '12015c01_12_edna_3', '18815c01_12_edna_1', '18815c01_12_edna_2', '18815c01_12_edna_3', 'EB_20161116', 'pcrblank_1', '28215c01_12_edna_1', '28215c01_12_edna_2', '28215c01_12_edna_3', '34915c01_12_edna_1', '34915c01_12_edna_2', '34915c01_12_edn[...]It looks like this shows the number of reads for each ASV detected for each FilterID (columns), plus the matched taxa information for each ASV, if available. Blank cells have been filled with the word 'unassigned'. There are some column names that do not correspond to FilterIDs. They are the taxonomy columns (of course), in addition to the following:- EB_20161116- pcrblank_1- pcrblank2- EB_20161121- EB_20161228- EB_20170117- pcrblank3- CB_CANON160925_1- CB_CANON160925_2- CB_CANON160925_3- ArtComm2Are some of these controls? **Yes - here are the possible control types:**- CB = collection blank which was taken on the ship. Water that should be "clean" was passed through the filter, so there should be no reads in this sample, although it tends to be the "dirtiest" control. It checks for contamination during filtration, I think.- EB = extraction blank which was taken in the lab but contains no DNA (i.e. a negative control), so there should be no reads in this sample. It checks if contamination occurred during DNA extraction (lab work pre-PCR).- pcr blank = A sample that went through PCR but contained no input DNA (i.e. a negative control), so there should be no reads in this sample. It checks if contamination occurred during PCR.- ArtComm = artificial community that went through PCR and contained DNA from species that should not be in the study system (i.e. a positive control), so you know what results you expect.**Note** that there is some bioinformatic post-processing you can do to adjust for things that popped up in your controls.```pythonfor col in taxa.columns: if col not in plate['FilterID'].unique(): print(col)```## Plate metadata plate_meta = pd.read_csv('Plate_S_meta_OBIS_data.csv') print(plate_meta.shape) print(plate_meta.columns) plate_meta.head()(60, 52) Index(['SequenceID', 'sample_name', 'order', 'tag_sequence', 'primer_sequence_F', 'primer_sequence_R', 'library_tag_combo', 'library', 'date_PCR', 'sample_type', 'locus', 'tag_number', 'R1', 'R2', 'SAMPLING_cruise', 'SAMPLING_station_number', 'SAMPLING_bottle', 'depth', 'SAMPLING_station', 'SAMPLING_project', 'SAMPLING_platform', 'SAMPLING_platform_type', 'SAMPLING_dec_lat', 'SAMPLING_dec_lon', 'temp', 'salinity', 'chlorophyll', 'pressure_dbar', 'nitrate', 'diss_oxygen', 'SAMPLING_real_depth', 'SAMPLING_transmiss_%', 'SAMPLING_sig_t', 'SAMPLING_fluor', 'SAMPLING_date_time', 'description', 'SAMPLING_PI', 'SAMPLING_institute', 'env_biome', 'env_feature', 'env_material', 'samp_collection_device', 'project_name', 'samp_vol_we_dna_ext', 'samp_filter_size_ext', 'samp_filter_ext_type', 'samp_store_temp', 'seq_meth', 'sequencing_facility', 'geo_loc_name', 'investigation_type', 'FilterID'], [...]This shows some metadata associated with each SequenceID and FilterID. This metadata **seems mostly redundant with that in MB_20200317_1326_18S_analysis_metadata.csv**, so for now I'll just work with it. Conversion PlanAccording to GBIF's new guide to publishing sequence-derived biodiversity data, my occurrence file should include the following:eventID - Highly recommended, **SequenceID in plate and plate_meta** eventDate - Required, **SAMPLING_date_time in plate_meta** decimalLatitude - Highly recommended, **SAMPLING_dec_lat in plate_meta** decimalLongitude - Highly recommended, **SAMPLING_dec_lon in plate_meta** env_broad_scale - Recommended, equivalent to env_biome in MIxS, the major environmental system your sample or specimen came from, use [subclasses of ENVO´s biome class](http://www.ontobee.org/ontology/ENVO?iri=http://purl.obolibrary.org/obo/ENVO_00000428), **env_biome in plate_meta** env_local_scale - Recommended, equivalent to env_feature in MIxS, the entity or entities which are in your sample or specimen´s local vicinity and which you believe have significant causal influences on your sample or specimen, use terms that are present in ENVO and which are of smaller spatial grain than your entry for env_broad_scale, **env_feature in plate_meta** env_medium - Recommended, equivalent to env_material in MIxS, environmental material that immediately surrounded your sample or specimen prior to sampling, use [subclasses of ENVO´s environmental material class](http://www.ontobee.org/ontology/ENVO?iri=http://purl.obolibrary.org/obo/ENVO_00010483), **env_material in plate_meta**sop - Recommended, standard operating procedures used in assembly and/or annotation of metagenomes or a reference to a well documented protocol (e.g. using protocols.io). **Does something like this exist? If not, what information might be important to include here?** lib_layout - Recommended, equivalent to lib_const_meth in MIxS, whether to expect single, paired, or other configuration of reads. **Is this relevant here?** target_gene - Highly recommended, targeted gene or marker name for marker-based studies (e.g. 16S rRNA), **locus? Does there need to be more info included here?** target_subfragment - Highly recommended, name of subfragment of a gene or marker (e.g. V6). **Is this relevant here? Should the tag_sequence fit in somewhere?** pcr_primer_name_forward - Highly recommended, name of forward primer. **Do these primers have names?** pcr_primer_forward - Highly recommended, sequence of the forward primer, **primer_sequence_F in plate_meta** pcr_primer_name_reverse - Highly recommended, name of reverse primer pcr_primer_revers - Highly recommended, sequence of the reverse primer, **primer_sequence_R in plate_meta** pcr_primer_reference - Highly recommended, reference for primers (e.g. a DOI to a paper). **Is there a reference for primers?** DNA_sequence - Highly recommended, the actual DNA sequence of the ASV. TaxonID is highly recommended if DNA_sequence is not provided, **ASV in plate** scientificName - Required, Latin name of the closest known taxon or an OTU identifier from BOLD or UNITE, **a combination of Genus and Species from plate, or the lowest available taxon. This will have to be looked into more, especially for cases where no traditional taxonomic classification is available.** kingdom - Highly recommended, **Kingdom in plate** phylum - Recommended, **Phylum in plate** class - Recommended, **Class in plate** order - Recommended, **Order in plate** family - Recommended, **Family in plate** genus - Recommended, **Genus in plate** basisOfRecord - Required, MaterialSample materialSampleID - Highly recommended, an identifier for the MaterialSample, use the biosample ID if one was obtained from a nucleotide archive otherwise construct a globally unique identifier. **Is this Sequence_ID again? FilterID? Something else?** identificationRemarks - recommended, specification of taxonomic identification process ideally including data on applied algorithm and reference database as well as on level of confidence in the resulting identification. **Is this information available somewhere?** identificationReferences - recommended, link to protocol or code. **Is this information available somewhere?** organismQuantity - Highly recommended, number of reads, **Reads in plate** organismQuantityType - Highly recommended, DNA sequence reads sampleSizeValue - Highly recommended, total number of reads in the sample for calculating the relative abundance of sequence variants. **Is it accurate to just sum all the reads by Sequence_ID?** sampleSizeUnit - Highly recommended, DNA sequence reads associatedSequences - recommended, list of identifiers linking to archived (raw) sequence reads. **Are these sequences already archived?** Conversion## eventID occ = pd.DataFrame({'eventID':plate['Sequence_ID']}) print(occ.shape) occ.head() ## Merge with plate_meta to obtain eventDate, decimal Lat and Lon occ = occ.merge(plate_meta[['SequenceID', 'SAMPLING_date_time', 'SAMPLING_dec_lat', 'SAMPLING_dec_lon', 'env_biome', 'env_feature', 'env_material', 'locus', 'primer_sequence_F', 'primer_sequence_R']], how='left', left_on='eventID', right_on='SequenceID') occ.drop(columns='SequenceID', inplace=True) occ.columns = ['eventID', 'eventDate', 'decimalLatitude', 'decimalLongitude', 'env_broad_scale', 'env_local_scale', 'env_medium', 'target_gene', 'pcr_primer_forward', 'pcr_primer_reverse'] occ.head()**Need to add sop? lib_layout? target_subfragment? primer names? primer references?**## Format eventDate pst = pytz.timezone('America/Los_Angeles') eventDate = [pst.localize(datetime.strptime(dt, '%Y-%m-%d %H:%M')).isoformat() for dt in occ['eventDate']] occ['eventDate'] = eventDate occ.head() ## Add DNA_sequence occ['DNA_sequence'] = plate['ASV'] occ.head()Before adding taxonomic information, I'll have to clean up the Genus and Species columns in the plate df. **Weird values include:**- 'unassigned' has been used when there's no Genus data; also when there's no Species data- 'g_', **I assume** also signifies no Genus; 's_' seems to be the equivalent in Species- 'unknown' also has been used- 'no_hit' in both Genus and Species- 'Herdmania <dinoflagellates>' - The Species entry for this seems to give both Genus and Species (Herdmania litoralis) - Side note: **How can a Phylum be unknown, but then Class, Order, Family, etc. be known?**- 'Halofilum <green algae>' - Similarly, Species is Halofilum ramosum- 'Candida <clade Candida/Lodderomyces clade>' - This has Species unassigned, Family = DebaryomycetaceaeOnly in Species:- 'uncultured marine eukaryote'- 'eukaryote clone OLI11007'- 'Dinophyceae sp. UDMS0803'- 'uncultured marine picoeukaryote'- 'Chaetoceros sp. UNC1415'- 'bacterium'**Note** there are loads of terms like those given above in the Species column. **What do they mean and where do they come from?** Maybe some of those numbers correspond to BOLD or UNITE OTU ID's. **Also note** that the Species column often contains both Genus and Species - the proper scientific name - if available. I'm not sure if this always is the case, but maybe I can just use the Species column for scientificName rather than combining Genus and Species.## scientificName, taxonomic info occ['scientificName'] = plate['Species'] occ['kingdom'] = plate['Kingdom'] occ['phylum'] = plate['Phylum'] occ['class'] = plate['Class'] occ['order'] = plate['Order'] occ['family'] = plate['Family'] occ['genus'] = plate['Genus'] occ.head()**So we don't have to do the WoRMS thing for eDNA data?****Note** that:- 63600 (out of a total 280440 records) have scientificName = 'unassigned'- 163380 have scientificName = 's_'- 0 have scientificName = 'unknown'- 12540 have scientificName = 'no_hit'It seems like for the unassigned and s_ species, other taxonomic levels of classification (e.g. Family) are known (although I haven't checked this in all cases). When the species is 'no_hit', though, it seems like it's 'no_hit' across the board.**Let me replace these values with NaNs to make things easier to work with.**## Clean scientificName and taxonomy columns cols = ['scientificName', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus'] occ[cols] = occ[cols].replace({'unassigned':np.nan, 's_':np.nan, 'g_':np.nan, 'unknown':np.nan, 'no_hit':np.nan})Ok, so now we can see that:- 9300 records have full taxonomic info (all columns are not NaN)- A total of 239520 do not have a species designation (i.e. scientificName = NaN). This is as expected based on the above numbers.- Of these, 9840 are only missing species- 120 are only missing genus- 300 are only missing family- 1020 are only missing order- 780 are only missing class- 10860 are only missing phylum- 0 are only missing kingdom- This leaves 216600 records with more than one field missing**How do I handle these? Based on the standard information, it seems like I should fill NaNs in the species column with the lowest known taxonomic rank. If all the rows are NaN, the record cannot be submitted to OBIS. *But*, before deleting these, I need to look into the non-Linnaean name options.****Note** that 33360 rows have ALL NaNs in ALL taxonomic columns## Fill missing values in the species column with the lowest available taxonomic rank occ['scientificName'] = occ['scientificName'].combine_first(occ['genus']) occ['scientificName'] = occ['scientificName'].combine_first(occ['family']) occ['scientificName'] = occ['scientificName'].combine_first(occ['order']) occ['scientificName'] = occ['scientificName'].combine_first(occ['class']) occ['scientificName'] = occ['scientificName'].combine_first(occ['phylum']) occ['scientificName'] = occ['scientificName'].combine_first(occ['kingdom'])Ok, so now, **what do the weird species designations mean?****[Barcode of Life Barcode Index Numbers (BINs)](http://www.boldsystems.org/index.php)** are of the format: BOLD:AAA0000 (where A can be any letter A-Z, and 0 can be any number 0-9). These are based on CO1 sequences (animals), rRNA ITS sequences (fungi), or rbcL and matK sequences (plants). **[UNITE Species Hypotheses (SHs)](https://unite.ut.ee/index.php)** are of the format: SH1566366.08FU. These are based on the rRNA ITS region.None of the species designations seem to match either of these formats.**Can we search for BINs and/or SHs using the DNA_sequence (and can I automate this process using an API)? Or do we have to throw out anything that's not a valid Linnean name? To check for validity, would I still need to run things through WoRMS?**## What does WoRMS throw back? names = occ['scientificName'].unique() names = names[~pd.isnull(names)] # remove NaN name_id_dict, name_name_dict, name_taxid_dict, name_class_dict = WoRMS.run_get_worms_from_scientific_name(names, verbose_flag=True) print(len(names)) print(len(name_name_dict)) # OK, so at least most of these are matching, even if it's only the first word (e.g. Oithonidae sp. DZMB624 matching to Oithonidae)816 730**I'm going to complete the conversion for now**, ignoring the weird species designations. I can dig into that more after talking with Katie and better understanding what they mean.## basisOfRecord occ['basisOfRecord'] = 'MaterialSample' occ.head()**materialSampleID** should be added. **Have these sequences been archived?** If so, use the biosample id from the archive. Otherwise, it's just a unique designation for the actual material sample - perhaps the same as Sequence_ID.**Also need to look into:**- identificationRemarks - recommended, specification of taxonomic identification process ideally including data on applied algorithm and reference database as well as on level of confidence in the resulting identification. Is this information available somewhere?- identificationReferences - recommended, link to protocol or code. Is this information available somewhere?## organismQuantity (number of reads) occ['organismQuantity'] = plate['Reads'] occ['organismQuantityType'] = 'DNA sequence reads' occ.head()There are 215537 rows where the number of reads is 0. **What does this mean? Is that an absence record??**## sampleSizeValue count_by_seq = plate.groupby('Sequence_ID', as_index=False)['Reads'].sum() occ = occ.merge(count_by_seq, how='left', left_on='eventID', right_on='Sequence_ID') occ.drop(columns='Sequence_ID', inplace=True) occ.rename(columns={'Reads':'sampleSizeValue'}, inplace=True) print(occ.shape) occ.head()(280440, 22)sampleSizeValue is supposed to be the total number of reads in the sample for calculating the relative abundance of sequence variants. **Is it accurate to just sum all the reads by Sequence_ID?**## sampleSizeUnit occ['sampleSizeUnit'] = 'DNA sequence reads' occ.head()**Are there any dois to associatedSquences for these data?** Should be a list of identifiers linking to archived (raw) sequence reads. Save## Save occ.to_csv('eDNA_practice_plate_occ_20201021.csv', index=False, na_rep='NaN')get names of each condition for laterpd.Categorical(luminescence_raw_df.condition) names = luminescence_raw_df.condition.unique() for name in names: print(name) #get list of promoters pd.Categorical(luminescence_raw_df.Promoter) prom_names = luminescence_raw_df.Promoter.unique() for name in prom_names: print(name)NIR1(1000bp):LucN/35S:LucF NOS:LucN/35S:LucF STAP4:LucN/35S:LucF 35S:LucN/35S:LucF UBQ10:LucN/35S:LucF UBQ10:LucN/UBQ10:LucFtest normality#returns test statistic, p-value for name1 in prom_names: for name in names: print('{}: {}'.format(name, stats.shapiro(luminescence_raw_df['nluc/fluc'][luminescence_raw_df.condition == name])))K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411) K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411) K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411) K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411) K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411) K3G1: (0.8049345016479492, 0.010666717775166035) W5: (0.6742411851882935, 0.003271136898547411)not normal#test variance stats.levene(luminescence_raw_df['nluc/fluc'][luminescence_raw_df.condition == names[0]], luminescence_raw_df['nluc/fluc'][luminescence_raw_df.condition == names[1]], ) test = luminescence_raw_df.groupby('Promoter') test stats.mannwhitneyu(value['nluc/fluc'][value.condition == names[0]], value['nluc/fluc'][value.condition == names[1]]) for promoter, value in test: print(stats.mannwhitneyu(value['nluc/fluc'][value.condition == names[0]], value['nluc/fluc'][value.condition == names[1]])) for promoter, value in test: print(value)well_row well_col content fluc_luminescence nluc_luminescence \ 9 D 1 Sample X10 19847.0 12610960.0 10 D 2 Sample X11 22995.0 21749086.0 11 D 3 Sample X12 2243.0 756846.0 name condition nluc/fluc date Promoter 9 70 + 34 K3G1 635.408878 31.10.19 35S:LucN/35S:LucF 10 70 + 34 K3G1 945.818047 31.10.19 35S:LucN/35S:LucF 11 70 + 34 W5 337.425769 31.10.19 35S:LucN/35S:LucF well_row well_col content fluc_luminescence nluc_luminescence \ 0 A 1 Sample X1 118850.0 12998.0 1 A 2 Sample X2 1694.0 2953.0 2 A 3 Sample X3 1331.0 334.0 name condition nluc/fluc date Promoter 0 25 + 34 K3G1 0.109365 31.10.19 NIR1(1000bp):LucN/35S:LucF 1 [...]Exploratory Data Analysis. 1. Defining the Question a) Specifying the Data Analytic Question > Financial Inclusion: Identify how we can predict which individuals are most likely to have or use a bank account across Kenya, Rwanda, Tanzania, and Uganda. b) Defining the Metric for Success the objectives for this data include:1. indentifying which individuals are more likely to use the bank2. their age groups c) Understanding the context to solve the challenge of financial inclusion, in several regions across east Africa, i have been assigned to figure out how we can predict which individuals are most likely to have or use a bank account. Your solution will help provide an indication of the state of financial inclusion in Kenya, Rwanda, Tanzania, and Uganda, while providing insights into some of the key demographic factors that might drive individuals’ financial outcomes. d) Recording the Experimental Design 1. reading in the datasets2. checking the data for duplicates and missinng data3. dropping of duplicates and removal filling in of missing data values with zero4. Finding and dealing with outliers, anomalies, and missing data within the dataset.5. Performing of univariate, bivariate and multivariate analysis recording of observations.6. Implementing the solution by performing the respective analysis i.e. factor analysis, principal component analysis, and discriminant analysis.7. Challenge your solution by providing insights on how you can make improvements. 2. Reading the Data# Loading the Data from the source # dataset url = http://bit.ly/FinancialDataset import pandas as pd import numpy as np import seaborn as sns import matplotlib from matplotlib import pyplot as plt %matplotlib inline ## url = 'http://bit.ly/FinancialDataset' df = pd.read_csv(url) df3. Checking the Data# Determining the no. of records in the dataset df.shape # running str() str(df) # Previewing the top of the dataset df.head() # Previewing the bottom of the dataset df.tail() # Checking whether each column has an appropriate datatype df.dtypes df.info() RangeIndex: 23524 entries, 0 to 23523 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 country 23510 non-null object 1 year 23524 non-null int64 2 uniqueid 23524 non-null object 3 Has a Bank account 23488 non-null object 4 Type of Location 23509 non-null object 5 Cell Phone Access 23513 non-null object 6 household_size 23496 non-null float64 7 Respondent Age 23490 non-null float64 8 gender_of_respondent 23490 non-null object 9 The relathip with head 23520 non-null object 10 marital_status 23492 non-null object 11 Level of Educuation 23495 non-null object 12 Type of Job 23494 non-null object dtypes: float64(2), int64(1), object(10) memory usage: 2.3+ MB4. Tidying the Dataset# Checking for Outliers outliers=[] def detect_outlier(data_1): threshold=3 mean_1 = np.mean(data_1) std_1 =np.std(data_1) for y in data_1: z_score= (y - mean_1)/std_1 if np.abs(z_score) > threshold: outliers.append(y) return outliers detect_outlier(df['household_size']) outliers=[] def detect_outlier(data_1): threshold=3 mean_1 = np.mean(data_1) std_1 =np.std(data_1) for y in data_1: z_score= (y - mean_1)/std_1 if np.abs(z_score) > threshold: outliers.append(y) return outliers detect_outlier(df['Respondent Age']) # quantiles Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 # Removing outliers based on the IQR range and stores the result in the data frame 'df_out' # --- # df1 = df[~((df < (Q1 - 1.5 * IQR)) | (df > (Q3 + 1.5 * IQR))).any(axis=1)] # Printing the shape of our new dataset # --- # print(df1.shape) # Printing the shape of our old dataset # --- # print(df.shape) # More data cleaning procedures df1.drop_duplicates() # Identifying the Missing Data df1.isnull().values.any() # counting missing values in ech column df1.isnull().sum() # Dealing with the Missing Data df1.dropna() # drop all duplicates df1.drop_duplicates(inplace=True) # drop columns to_drop = ['uniqueid','The relathip with head','marital_status','year'] df1.drop(to_drop, axis=1, inplace=True) df1.columns5. **Exploratory Analysis**df1.info() Int64Index: 22903 entries, 0 to 23522 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 household_size 22876 non-null float64 1 Respondent Age 22870 non-null float64 dtypes: float64(2) memory usage: 1.1 MB**Univariate analysis**--- ***Measures of Central tendency***#finding the mean age df1['Respondent Age'].mean() #finding the median age df1['Respondent Age'].median() # finding the modal age df1['Respondent Age'].mode() #finding the mean household size df1['household_size'].mean() # finding the median household size df1['household_size'].median() # finding the modal household size df1['household_size'].mode()***Measures of Dispersion***# standard deviation of household sizes df1['household_size'].std() # variance of household sizes df1['household_size'].var() # standard deviation of ages df1['Respondent Age'].std() # variance of ages df1['Respondent Age'].var()***Quantiles***# finding quantiles of household sizes df1['household_size'].quantile([0.25,0.5,0.75]) # finding quantiles of ages df1['Respondent Age'].quantile([0.25,0.5,0.75])***skewness***df1['household_size'].skew() df1['Respondent Age'].skew()***kurtosis***df1['Respondent Age'].kurt() df1['household_size'].kurt() ## summary startistics df1['household_size'].describe() df1['Respondent Age'].describe()***visualization techniques*** Ploting the univariate summaries and recording our observations## boxplot sns.boxplot(df1['household_size'], showmeans=True) df1['household_size'].value_counts().head().plot.bar() df1['Respondent Age'].value_counts().sort_index().plot.bar()Recommendations> While from the data there is no assurity that the household size detemined whether or not individuals had bank accounts across Kenya, Rwanda, Tanzania, and Uganda, it is clear that most of the older individuals had bank accounts.i would recommend that children and the youth be educated on the importance of bank accounts. **Bivariate analysis**# Ploting the bivariate summaries and recording our observations # scatterplot sns.pairplot(df1) plt.show() # Calculating the pearson coefficient pearson_coeff = df1["Respondent Age"].corr(df1["household_size"], method="pearson") print(pearson_coeff) # Checking whether you have to define the pearson coeff = df1["Respondent Age"].corr(df1["household_size"]) print(coeff) # heatmap sns.heatmap(df1.corr(),annot=True) plt.show() df1.info() Int64Index: 22903 entries, 0 to 23522 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 country 22889 non-null object 1 Has a Bank account 22867 non-null object 2 Type of Location 22888 non-null object 3 Cell Phone Access 22892 non-null object 4 household_size 22876 non-null float64 5 Respondent Age 22870 non-null float64 6 gender_of_respondent 22870 non-null object 7 Level of Educuation 22874 non-null object 8 Type of Job 22873 non-null object dtypes: float64(2), object(7) memory usage: 2.4+ MB**Multivariate analysis**sns.pairplot(df1) plt.show()7. Implementing the Solution **Data Reduction Techniques** 1. Principal component analysisdf1.head() df1.describe() from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(df1[['household_size', 'Respondent Age']]) scaled_data = scaler.transform(df1[['household_size', 'Respondent Age']]) scaled_data = pd.DataFrame(scaled_data) df2 = scaled_data.fillna(0) df2.head() from sklearn.decomposition import PCA logmodel = PCA(n_components=2) logmodel.fit(df2) pca_x = logmodel.transform(df2) df2.shape pca_x.shape df3 = pd.DataFrame(logmodel.components_, columns = ['household_size', 'Respondent Age']) plt.figure(figsize=(10,6)) sns.heatmap(df3) plt.figure(figsize=(10,6)) ty=sns.scatterplot(pca_x[:,0], pca_x[:,1]) sns.despine(left=True) ty.set_title('PCA Results') ty.set_ylabel('Second Principle Component ') ty.set_xlabel('First Principle Component ') pca_x2. Factor analysisdf1.info() df1.head() df1.columns # drop columns to_drop = ['country','Has a Bank account','Type of Location','Cell Phone Access','gender_of_respondent','Level of Educuation','Type of Job'] df1.drop(to_drop, axis=1, inplace=True) df1.info() df1.head() import tensorflow as tf # installing factor analyser !pip install factor_analyzer==0.2.3 from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity chi_square_value,p_value=calculate_bartlett_sphericity(df1) chi_square_value, p_value from sklearn.datasets import load_digits from sklearn.decomposition import FactorAnalysis df4 = df1.fillna(0) from factor_analyzer.factor_analyzer import calculate_kmo kmo_all,kmo_model=calculate_kmo(df4) kmo_model from sklearn.datasets import load_iris fa = FactorAnalysis() fa.fit(df4, 25) # Checking the Eigenvalues ev,v = fa.get_eigenvalues() ev fa = FactorAnalysis() fa.analysis(df, 6, rotation="varimax") fa.loadings3. Discriminant analysisfrom sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from tqdm import tqdm_notebook import warnings warnings.filterwarnings('ignore') df4.shape X = df4.iloc[:, 0:4].values y = df4.iloc[:2, ].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA lda = LDA(n_components=1) X_train = lda.fit_transform(X_train, y) X_test = lda.transform(X_test) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(max_depth=2, random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) print('Accuracy' + str(accuracy_score(y_test, y_pred)))8. Challenging the solution > The easy solution is nice because it is, well, easy, but you should never allow those results to hold the day. You should always be thinking of ways to challenge the results, especially if those results comport with your prior expectation.df4.shape # Reviewing the Solution N = 22903 N <- nrow(df) idx <- sample(N, N, replace = TRUE) df4 <- df[idx, ]Conduct AnalysisThis notebook is intended to help analyze my research questionsfrom scripts.project_functions import * import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np sns.set_theme(style = "darkgrid", font_scale = 1.25) df = load_and_process('../data/raw/combine_data.csv') dfAnalyze draft pick and combine performanceTo compare players, I first normalize the **Forty**, **Vertical**, **BenchReps**, **BroadJump**, **Cone**, and **Shuttle** athletic tests to better compare. For the normalization, I want scores between 0 and 1. Since a lower score is better in the Forty, Cone and Shuttle, I multiple these but -1 so I always get that higher scores are better. Next I create a new column called Score. We sum all of the normalized athelics scores in this column. The higher the number, the better the overall performance of the athlete.df['FortyNormal'] = (df['Forty'] - df['Forty'].min()) / (df['Forty'].max() - df['Forty'].min()) * -1 df['VerticalNormal'] = (df['Vertical'] - df['Vertical'].min()) / (df['Vertical'].max() - df['Vertical'].min()) * 1 df['BenchRepsNormal'] = (df['BenchReps'] - df['BenchReps'].min()) / (df['BenchReps'].max() - df['BenchReps'].min()) * 1 df['BroadJumpNormal'] = (df['BroadJump'] - df['BroadJump'].min()) / (df['BroadJump'].max() - df['BroadJump'].min()) * 1 df['ConeNormal'] = (df['Cone'] - df['Cone'].min()) / (df['Cone'].max() - df['Cone'].min()) * -1 df['ShuttleNormal'] = (df['Shuttle'] - df['Shuttle'].min()) / (df['Shuttle'].max() - df['Shuttle'].min()) * -1 df['Score'] = df['FortyNormal'] + df['VerticalNormal'] + df['BenchRepsNormal'] + df['BroadJumpNormal'] + df['ConeNormal'] + df['ShuttleNormal'] df.sort_values(by=['Score'], ascending = False) df['Score'].isnull().sum()There are so many null values! I want to work with TE or WR. As the the athletic tests highlight receiver skills. You have to jump to catch, you need to be fast to create separation. The benchpress reps is more for linemen who do a lot of pushing, but Wide Receivers block sometimes and Tight Ends block very often. I suspect Tight Ends will have a more complete dataset. Let's look at how many nulls we have.WR_data = df[df['Pos'] == 'WR'] TE_data = df[df['Pos'] == 'TE'] TE_data['Player'].count() count_nan_in_df = TE_data.isnull().sum() print (count_nan_in_df) WR_data['Player'].count() count_nan_in_df = WR_data.isnull().sum() print (count_nan_in_df)Player 0 Pos 0 Ht 0 Wt 0 Forty 8 Vertical 160 BenchReps 454 BroadJump 177 Cone 286 Shuttle 273 Year 0 AV 0 Team 322 Round 0 Pick 0 FortyNormal 8 VerticalNormal 160 BenchRepsNormal 454 BroadJumpNormal 177 ConeNormal 286 ShuttleNormal 273 Score 561 dtype: int64Wide receivers have 69% of their scores as null, whereas Tight Ends have only 43% as null. Tight Ends are the better set. It looks like many wide receivers did not partipate in the bench press as expected.p1 = sns.relplot(x = "Score", y = "Round", data = TE_data) plt.title('Score versus Draft Pick') p1.set(xlabel = 'Generated Score', ylabel = 'Draft Round')Wow. It worked! There is a very clear trend here. The higher the score the earlier the Tight Ends were picked in the draft. Who has that score of 1.8? He was picked in the first round of course!TE_data.sort_values(by=['Score'], ascending = False).iloc[0]. He was selected sixth overall in the 2006 draft. He played for the 49ers for 9 years from 2006 until mid 2015, when he was traded to the Denver Broncos. In 2016 we was traded to the Washington Redskins, now named the Washington Football team, and played there until he retired in 2019. From Bleacher the report:> Davis has displayed potential and he’s had flashes of brilliance—the 13-touchdown season in 2009, the back-to-back seasons of 900+ receiving yards, and the unbelievable playoff performances last year (10 receptions, 292 yards, and four touchdowns in two games). But he needs to put it together, be more consistent, and a better quarterback than would also help his case.However, football is a team game.p1 = sns.relplot(x = "Score", y = "AV", data = TE_data) plt.title('Score versus Approximate Value') p1.set(xlabel = 'Generated Score', ylabel = 'Approximate Value')AV or Approximate value is a one value score created by the founder of Pro Football Reference. The higher the AV the better. The value is based on how important the player is to the team. It is a nontrivial score to obtain. you can read about it [read about it](https://www.pro-football-reference.com/blog/index37a8.html)The scatterplot above of AV versus Score does not show a strong correlation - maybe a very weak one at best. How do physical dimensions of the players compare by position?Let's start by breaking our data into subsets positions based on role.- Linemen (both offensive and defensive)- Offense (QB, RB, FB, WR, TE)- Defense (CB and S)- Linebacker (OLB, ILB)- Special (punters and kickers)We remove the normalized and score columns.df = df.drop(['FortyNormal', 'VerticalNormal', 'BenchRepsNormal', 'BroadJumpNormal'], axis=1) df = df.drop(['ConeNormal', 'ShuttleNormal', 'Score'], axis=1) df df.Pos.unique() lineman = df[(df['Pos'] == 'OT') | (df['Pos'] == 'OG') | (df['Pos'] == 'DT') | (df['Pos'] == 'DE') |(df['Pos'] == 'C')] offense = df[(df['Pos'] == 'QB') | (df['Pos'] == 'RB') | (df['Pos'] == 'FB') | (df['Pos'] == 'WR') | (df['Pos'] == 'TE')] defense = df[(df['Pos'] == 'S') | (df['Pos'] == 'CB')] linebacker = df[(df['Pos'] == 'OLB') | (df['Pos'] == 'ILB')] special = df[(df['Pos'] == 'PK')] plt.figure(figsize = (8,7)) p1 = sns.violinplot(x = 'Pos',y = 'Ht', data = lineman) plt.title('Lineman Height by Position') p1.set(xlabel = 'Position', ylabel = 'Height') plt.figure(figsize = (8,7)) p2 = sns.violinplot(x = 'Pos',y = 'Wt', data = lineman) plt.title('Lineman Weight by Position') p2.set(xlabel = 'Position', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Wt", data = linemen) plt.title('Lineman Weight over Time') p1.set(xlabel = 'Year', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Ht", data = linemen) plt.title('Lineman Height over Time') p1.set(xlabel = 'Year', ylabel = 'Height')The offensive tackles are tallest linemen. Looks like the Center is not that tall, but weighs a lot compared to the other linemen. The weight and height over time is pretty consistent. Linemen are typically between 72" and 80" (6' and 6'8") and between 250 and 340 pounds.plt.figure(figsize = (8,7)) p1 = sns.violinplot(x = 'Pos',y = 'Ht', data = offense) plt.title('Offense Height by Position') p1.set(xlabel = 'Position', ylabel = 'Height') plt.figure(figsize = (8,7)) p2 = sns.violinplot(x = 'Pos',y = 'Wt', data = offense) plt.title('Offense Weight by Position') p2.set(xlabel = 'Position', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Wt", data = offense) plt.title('Offense Weight over Time') p1.set(xlabel = 'Year', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Ht", data = offense) plt.title('Offense Height over Time') p1.set(xlabel = 'Year', ylabel = 'Height')Observations:- TE are the tallest- WR height varies the most- RB are the shortest- WR are the lightest (as they are most probably the fastest)- the offense height and weight has been pretty consistent over the years. Most look to be between 70 and 77 inches and between 180 and 260 pounds. Definitely lighter and short than the linemen!plt.figure(figsize = (8,7)) p1 = sns.violinplot(x = 'Pos',y = 'Ht', data = defense) plt.title('Defense Height by Position') p1.set(xlabel = 'Position', ylabel = 'Height') plt.figure(figsize = (8,7)) p2 = sns.violinplot(x = 'Pos',y = 'Wt', data = defense) plt.title('Defense Weight by Position') p2.set(xlabel = 'Position', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Wt", data = defense) plt.title('Defense Weight over Time') p1.set(xlabel = 'Year', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Ht", data = defense) plt.title('Defense Height over Time') p1.set(xlabel = 'Year', ylabel = 'Height')Obervations:- Cornerbacks are smaller than the safeties. They need to cover the wide receviers! The average size is about 185 - 215 pounds and between 69 and 74 inches.plt.figure(figsize = (8,7)) p1 = sns.violinplot(x = 'Pos',y = 'Ht', data = linebacker) plt.title('Linebacker Height by Position') p1.set(xlabel = 'Position', ylabel = 'Height') plt.figure(figsize = (8,7)) p2 = sns.violinplot(x = 'Pos',y = 'Wt', data = linebacker) plt.title('Linebacker Weight by Position') p2.set(xlabel = 'Position', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Wt", data = linebacker) plt.title('Linebacker Weight over Time') p1.set(xlabel = 'Year', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Ht", data = linebacker) plt.title('Linebacker Height over Time') p1.set(xlabel = 'Year', ylabel = 'Height')Observations:-Outside Line backers are taller-Linebacks are between 230 - 255 pounds and 72 - 76 inchesplt.figure(figsize = (8,7)) p1 = sns.violinplot(x = 'Pos',y = 'Ht', data = special) plt.title('Special Height by Position') p1.set(xlabel = 'Position', ylabel = 'Height') plt.figure(figsize = (8,7)) p2 = sns.violinplot(x = 'Pos',y = 'Wt', data = special) plt.title('Special Weight by Position') p2.set(xlabel = 'Position', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Wt", data = special) plt.title('Special Weight over Time') p1.set(xlabel = 'Year', ylabel = 'Weight') p1 = sns.relplot(x = "Year", y = "Ht", data = special) plt.title('Special Height over Time') p1.set(xlabel = 'Year', ylabel = 'Height')![Pytorch](../../../pytorch_logo_2018.svg) Pytorch 中级篇(5):语言模型(Language Model (RNN-LM))>参考代码>>**yunjey的 [pytorch tutorial系列](https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/language_model/main.py)** 语言模型 学习资料语言模型这一块不是很想接触。就照着yunjey的代码,一带而过吧。>**博客**>>[CS224d笔记4——语言模型和循环神经网络(Recurrent Neural Network, RNN)](https://wugh.github.io/posts/2016/03/cs224d-notes4-recurrent-neural-networks/?utm_source=tuicool&utm_medium=referral)>>[浅谈神经网络中的梯度爆炸问题](https://www.jianshu.com/p/79574b0f2959) Pytorch 实现# 包 import torch import torch.nn as nn import numpy as np from torch.nn.utils import clip_grad_norm from data_utils import Dictionary, Corpus #data_utils代码在https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/language_model/data_utils.py # 设备配置 # Device configuration torch.cuda.set_device(1) # 这句用来设置pytorch在哪块GPU上运行 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 超参数设置 # Hyper-parameters embed_size = 128 hidden_size = 1024 num_layers = 1 num_epochs = 5 num_samples = 1000 # number of words to be sampled batch_size = 20 seq_length = 30 learning_rate = 0.002Penn Treebank 数据集corpus = Corpus() ids = corpus.get_data('data/train.txt', batch_size) vocab_size = len(corpus.dictionary) num_batches = ids.size(1) // seq_length基于RNN的语言模型class RNNLM(nn.Module): def __init__(self, vocab_size, embed_size, hidden_size, num_layers): super(RNNLM, self).__init__() self.embed = nn.Embedding(vocab_size, embed_size) self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True) self.linear = nn.Linear(hidden_size, vocab_size) def forward(self, x, h): # Embed word ids to vectors x = self.embed(x) # Forward propagate LSTM out, (h, c) = self.lstm(x, h) # Reshape output to (batch_size*sequence_length, hidden_size) out = out.reshape(out.size(0)*out.size(1), out.size(2)) # Decode hidden states of all time steps out = self.linear(out) return out, (h, c) # 实例化一个模型 model = RNNLM(vocab_size, embed_size, hidden_size, num_layers).to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # 定义函数:截断反向传播 def detach(states): return [state.detach() for state in states]训练模型for epoch in range(num_epochs): # 初始化隐状态和细胞状态 states = ( torch.zeros(num_layers, batch_size, hidden_size).to(device), torch.zeros(num_layers, batch_size, hidden_size).to(device) ) for i in range(0, ids.size(1) - seq_length, seq_length): # Get mini-batch inputs and targets inputs = ids[:, i:i+seq_length].to(device) targets = ids[:, (i+1):(i+1)+seq_length].to(device) # Forward pass states = detach(states) outputs, states = model(inputs, states) loss = criterion(outputs, targets.reshape(-1)) # Backward and optimize model.zero_grad() loss.backward() clip_grad_norm(model.parameters(), 0.5) optimizer.step() step = (i+1) // seq_length if step % 100 == 0: print ('Epoch [{}/{}], Step[{}/{}], Loss: {:.4f}, Perplexity: {:5.2f}' .format(epoch+1, num_epochs, step, num_batches, loss.item(), np.exp(loss.item())))/home/ubuntu/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:19: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.测试和保存模型with torch.no_grad(): with open('sample.txt', 'w') as f: # 初始化隐状态和细胞状态 state = (torch.zeros(num_layers, 1, hidden_size).to(device), torch.zeros(num_layers, 1, hidden_size).to(device)) # 随机选择一个单词 prob = torch.ones(vocab_size) input = torch.multinomial(prob, num_samples=1).unsqueeze(1).to(device) for i in range(num_samples): # Forward propagate RNN output, state = model(input, state) # Sample a word id prob = output.exp() word_id = torch.multinomial(prob, num_samples=1).item() # Fill input with sampled word id for the next time step input.fill_(word_id) # File write word = corpus.dictionary.idx2word[word_id] word = '\n' if word == '' else word + ' ' f.write(word) if (i+1) % 100 == 0: print('Sampled [{}/{}] words and save to {}'.format(i+1, num_samples, 'sample.txt')) # 保存模型 torch.save(model.state_dict(), 'model.ckpt')Kali ini kita akan re structure journal IOP SCIENCE yang membahas mengenai peng-clusteran provinsi di Indonesia. Kali ini kita akan menggunakan KMeans Clustering Method sebagai algoritmanya sesuai dengan jurnal yang ada. Import Library yang akan digunakanimport pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from google.colab import files upload = files.upload() data = pd.read_csv('prov.csv') data data.keys() data.describe() data.head(10) data.tail() import seaborn as sns sns.pairplot(data, hue='PROVINSI', size=3) x = data.iloc[:, [1,2,3]].values #:semua baris, [kolom].nilai yang ada didalamnya x #menstandarkan ukuran variable from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_scaled = scaler.fit_transform(x) print(x_scaled) kmeans = KMeans(n_clusters=5, random_state=123) kmeans.fit(x_scaled) print(kmeans) #Menampilkan nilai centroid yang digenerate oleh classifier print(kmeans.cluster_centers_) data.info() import numpy as np print(kmeans.labels_) plt.scatter(x_scaled[:,0], x_scaled[:,1], c=kmeans.labels_, cmap='rainbow') plt.xlabel('KEPADATAN PENDUDUK') plt.ylabel('TINGKATPARTISIPASISEKOLAH ') plt.title('Grafik Cluster') plt.show() #--- Memvisualkan hasil kluster --- #--- Menambahkan Kolom "kluster" Dalam Data Frame PENDUDUK --- #import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (5,5) #width,height data["kluster"] = kmeans.labels_ output = plt.scatter(x_scaled[:,0], x_scaled[:,1], c = data.kluster, marker = "o", alpha = 1) centers = kmeans.cluster_centers_ plt.scatter(centers[:,0], centers[:,1], c='green', alpha=1 , marker="s") plt.title("Hasil Klustering K-Means") plt.colorbar (output) plt.show() Error =[] for i in range(1, 11): kmeans_elbow = KMeans(n_clusters = i).fit(x) kmeans_elbow.fit(x) Error.append(kmeans_elbow.inertia_) import matplotlib.pyplot as plt plt.plot(range(1, 11), Error) plt.title('Elbow method') plt.xlabel('No of clusters') plt.ylabel('Error') plt.show() # ambil input KEPADATANPENDUDUK = 50000#@param {type:"number"} TINGKATPARTISIPASISEKOLAH = 13#@param {type:"number"} TINGKATPENGANGGURANTERBUKA = 20#@param {type:"number"} # buat vektor untuk uji x_coba = [[KEPADATANPENDUDUK, TINGKATPARTISIPASISEKOLAH, TINGKATPENGANGGURANTERBUKA]] x_coba = scaler.transform(x_coba) # prediksikan dengan model y_pred = kmeans.predict(x_coba) # cetak hasil print("Hasil prediksi: %s" % y_pred[0])Hasil prediksi: 1[ATM 623: Climate Modeling](../index.ipynb)[](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany Lecture 2: The zero-dimensional energy balance model About these notes:This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab Contents1. [Recap of global energy budget](section1)2. [Tuning radiative fluxes to the observations](section2)3. [Equilibrium temperature](section3)4. [A time-dependent Energy Balance Model](section4)5. [Representing time derivatives on a computer](section5)6. [Numerical solution of the Energy Balance Model](section6)7. [Analytical solution of the Energy Balance Model: e-folding time and feedback parameter](section7)8. [Summary and take-away messages](section8)# Ensure compatibility with Python 2 and 3 from __future__ import print_function, division____________ 1. Recap of the global energy budget____________ Let's look again at the observations: ![Observed global energy flows from Tren (2012)](../images/GlobalEnergyBudget.png) ____________ 2. Tuning radiative fluxes to the observations____________ Recap of our simple greenhouse modelLast class we introduced a very simple model for the **OLR** or Outgoing Longwave Radiation to space:$$ \text{OLR} = \tau \sigma T_s^4 $$where $\tau$ is the **transmissivity** of the atmosphere, a number less than 1 that represents the greenhouse effect of Earth's atmosphere. We also tuned this model to the observations by choosing $ \tau \approx 0.61$.More precisely:OLRobserved = 238.5 # in W/m2 sigma = 5.67E-8 # S-B constant Tsobserved = 288. # global average surface temperature tau = OLRobserved / sigma / Tsobserved**4 # solve for tuned value of transmissivity print(tau)Let's now deal with the shortwave (solar) side of the energy budget. Absorbed Shortwave Radiation (ASR) and Planetary AlbedoLet's define a few terms. Global mean insolationFrom the observations, the area-averaged incoming solar radiation or **insolation** is 341.3 W m$^{-2}$.Let's denote this quantity by $Q$.Q = 341.3 # the insolationPlanetary albedoSome of the incoming radiation is not absorbed at all but simply reflected back to space. Let's call this quantity $F_{reflected}$From observations we have:Freflected = 101.9 # reflected shortwave flux in W/m2The **planetary albedo** is the fraction of $Q$ that is reflected.We will denote the planetary albedo by $\alpha$.From the observations:alpha = Freflected / Q print(alpha)That is, about 30% of the incoming radiation is reflected back to space. Absorbed Shortwave RadiationThe **Absorbed Shortwave Radiation** or ASR is the part of the incoming sunlight that is *not* reflected back to space, i.e. that part that is absorbed somewhere within the Earth system.Mathematically we write$$ \text{ASR} = Q - F_{reflected} = (1-\alpha) Q $$ From the observations:ASRobserved = Q - Freflected print(ASRobserved)As we noted last time, this number is *just slightly greater* than the observed OLR of 238.5 W m$^{-2}$. ____________ 3. Equilibrium temperature____________ *This is one of the central concepts in climate modeling.*The Earth system is in **energy balance** when energy in = energy out, i.e. when$$ \text{ASR} = \text{OLR} $$ We want to know:- What surface temperature do we need to have this balance?- By how much would the temperature change in response to other changes in Earth system? - Changes in greenhouse gases - Changes in cloudiness - etc. With our simple greenhouse model, we can get an **exact solution** for the equilibrium temperature.First, write down our statement of energy balance:$$ (1-\alpha) Q = \tau \sigma T_s^4 $$ Rearrange to solve for $T_s$:$$ T_s^4 = \frac{(1-\alpha) Q}{\tau \sigma} $$and take the fourth root, denoting our **equilibrium temperature** as $T_{eq}$:$$ T_{eq} = \left( \frac{(1-\alpha) Q}{\tau \sigma} \right)^\frac{1}{4} $$ Plugging the observed values back in, we compute:# define a reusable function! def equilibrium_temperature(alpha,Q,tau): return ((1-alpha)*Q/(tau*sigma))**(1/4) Teq_observed = equilibrium_temperature(alpha,Q,tau) print(Teq_observed)And this equilibrium temperature is *just slightly warmer* than 288 K. Why? A climate change scenarioSuppose that, due to global warming (changes in atmospheric composition and subsequent changes in cloudiness):- The longwave transmissitivity decreases to $\tau = 0.57$ - The planetary albedo increases to $\alpha = 0.32$What is the new equilibrium temperature? For this very simple model, we can work out the answer exactly:Teq_new = equilibrium_temperature(0.32,Q,0.57) # an example of formatted print output, limiting to two or one decimal places print('The new equilibrium temperature is {:.2f} K.'.format(Teq_new)) print('The equilibrium temperature increased by about {:.1f} K.'.format(Teq_new-Teq_observed))Most climate models are more complicated mathematically, and solving directly for the equilibrium temperature will not be possible! Instead, we will be able to use the model to calculate the terms in the energy budget (ASR and OLR). Python exercise- Write Python functions to calculate ASR and OLR for *arbitrary parameter values*.- Verify the following: - With the new parameter values but the old temperature T = 288 K, is ASR greater or lesser than OLR? - Is the Earth gaining or losing energy? - How does your answer change if T = 295 K (or any other temperature greater than 291 K)? ____________ 4. A time-dependent Energy Balance Model____________ The above exercise shows us that if some properties of the climate system change in such a way that the **equilibrium temperature goes up**, then the Earth system *receives more energy from the sun than it is losing to space*. The system is **no longer in energy balance**.The temperature must then increase to get back into balance. The increase will not happen all at once! It will take time for energy to accumulate in the climate system. We want to model this **time-dependent adjustment** of the system.In fact almost all climate models are **time-dependent**, meaning the model calculates **time derivatives** (rates of change) of climate variables. An energy balance equationWe will write the **total energy budget** of the Earth system as$$ \frac{dE}{dt} = (1-\alpha) Q - OLR $$Note: **This is a generically true statement.** We have just defined some terms, and made the (very good) assumption that the only significant energy sources are radiative exchanges with space. **This equation is the starting point for EVERY CLIMATE MODEL.**But so far, we don’t actually have a MODEL. We just have a statement of a budget. To use this budget to make a model, we need to relate terms in the budget to state variables of the atmosphere-ocean system.For now, the state variable we are most interested in is **temperature** – because it is directly connected to the physics of each term above. If we now suppose that $$ E = C T_s $$where $T_s$ is the **global mean surface temperature**, and $C$ is a constant – the **effective heat capacity** of the atmosphere- ocean column.then our budget equation becomes: $$ C \frac{dT_s}{dt} = \text{ASR} - \text{OLR} $$ where- $C$ is the **heat capacity** of Earth system, in units of J m$^{-2}$ K$^{-1}$.- $\frac{dT}{dt}$ is the rate of change of global average surface temperature. By adopting this equation, we are assuming that the energy content of the Earth system (atmosphere, ocean, ice, etc.) is *proportional to surface temperature*.Important things to think about:- Why is this a sensible assumption?- What determines the heat capacity $C$?- What are some limitations of this assumption? For our purposes here we are going to use a value of C equivalent to heating 100 meters of water:$$C = c_w \rho_w H$$where $c_w = 4 \times 10^3$ J kg$^{-1}$ $^\circ$C$^{-1}$ is the specific heat of water,$\rho_w = 10^3$ kg m$^{-3}$ is the density of water, and$H$ is an effective depth of water that is heated or cooled.c_w = 4E3 # Specific heat of water in J/kg/K rho_w = 1E3 # Density of water in kg/m3 H = 100. # Depth of water in m C = c_w * rho_w * H # Heat capacity of the model print('The effective heat capacity is {:.1e} J/m2/K'.format(C))Solving the energy balance modelThis is a first-order Ordinary Differential Equation (ODE) for $T_s$ as a function of time. It is also our very first climate model.To solve it (i.e. see how $T_s$ evolves from some specified initial condition) we have two choices:1. Solve it analytically2. Solve it numerically Option 1 (analytical) will usually not be possible because the equations will typically be too complex and non-linear. This is why computers are our best friends in the world of climate modeling.HOWEVER it is often useful and instructive to simplify a model down to something that is analytically solvable when possible. Why? Two reasons:1. Analysis will often yield a deeper understanding of the behavior of the system2. Gives us a benchmark against which to test the results of our numerical solutions. ____________ 5. Representing time derivatives on a computer____________ Recall that the derivative is the **instantaneous rate of change**. It is defined as $$ \frac{dT}{dt} = \lim_{\Delta t\rightarrow 0}⁡ \frac{\Delta T}{\Delta t}$$- **On the computer there is no such thing as an instantaneous change.** - We are always dealing with *discrete quantities*.- So we approximate the derivative with $\Delta T/ \Delta t$. - So long as we take the time interval $\Delta t$ "small enough", the approximation is valid and useful.- (The meaning of "small enough" varies widely in practice. Let's not talk about it now) So we write our model as$$ C \frac{\Delta T}{\Delta t} \approx \text{ASR} - \text{OLR}$$where $\Delta T$ is the **change in temperature predicted by our model** over a short time interval $\Delta t$. We can now use this to **make a prediction**: Given a current temperature $T_1$ at time $t_1$, what is the temperature $T_2$ at a future time $t_2$? We can write$$ \Delta T = T_2-T_1 $$$$ \Delta t = t_2-t_1 $$and so our model says$$ C \frac{T_2-T_1}{\Delta t} = \text{ASR} - \text{OLR} $$Which we can rearrange to **solve for the future temperature**:$$ T_2 = T_1 + \frac{\Delta t}{C} \left( \text{ASR} - \text{OLR}(T_1) \right) $$We now have a formula with which to make our prediction!Notice that we have written the OLR as a *function of temperature*. We will use the current temperature $T_1$ to compute the OLR, and use that OLR to determine the future temperature. ____________ 6. Numerical solution of the Energy Balance Model____________ The quantity $\Delta t$ is called a **timestep**. It is the smallest time interval represented in our model.Here we're going to use a timestep of 1 year:dt = 60. * 60. * 24. * 365. # one year expressed in seconds # Try a single timestep, assuming we have working functions for ASR and OLR T1 = 288. T2 = T1 + dt / C * ( ASR(alpha=0.32) - OLR(T1, tau=0.57) ) print(T2)What happened? Why? Try another timestepT1 = T2 T2 = T1 + dt / C * ( ASR(alpha=0.32) - OLR(T1, tau=0.57) ) print(T2)Warmed up again, but by a smaller amount. But this is tedious typing. Time to **define a function** to make things easier and more reliable:def step_forward(T): return T + dt / C * ( ASR(alpha=0.32) - OLR(T, tau=0.57) )Try it out with an arbitrary temperature:step_forward(300.)Notice that our function calls other functions and variables we have already defined. Python fact 10: Functions can access variables and other functions defined outside of the function. This is both very useful and occasionally confusing. Now let's really harness the power of the computer by **making a loop** (and storing values in arrays):import numpy as np numsteps = 20 Tsteps = np.zeros(numsteps+1) Years = np.zeros(numsteps+1) Tsteps[0] = 288. for n in range(numsteps): Years[n+1] = n+1 Tsteps[n+1] = step_forward( Tsteps[n] ) print(Tsteps)What did we just do?- Created an array of zeros- set the initial temperature to 288 K- repeated our time step 20 times. - Stored the results of each time step into the array. Python fact 11: the `for` statement executes a statement (or series of statements) a specified number of times (a loop!) Python fact 12: Use square bracket [ ] to refer to elements of an array or list. Use round parentheses ( ) for function arguments. Plotting the resultNow let's draw a picture of our result!# a special instruction for the Jupyter notebook # Display all plots inline in the notebook %matplotlib inline # import the plotting package import matplotlib.pyplot as plt plt.plot( Years, Tsteps ) plt.xlabel('Years') plt.ylabel('Global mean temperature (K)');Note how the temperature *adjusts smoothly toward the equilibrium temperature*, that is, the temperature at whichASR = OLR.**If the planetary energy budget is out of balance, the temperature must change so that the OLR gets closer to the ASR!**The adjustment is actually an *exponential decay* process: The rate of adjustment slows as the temperature approaches equilibrium. The temperature gets very very close to equilibrium but never reaches it exactly. Python fact 13: We can easily make simple graphs with the function `plt.plot(x,y)`, where `x` and `y` are arrays of the same size. But we must import it first. This is actually not native Python, but uses a special graphics library called `matplotlib`. Just about all of our notebooks will start with this:```%matplotlib inlineimport numpy as npimport matplotlib.pyplot as plt``` ____________ 7. Analytical solution of the Energy Balance Model: e-folding time and feedback parameter____________ Equilibrium solutionsWe've already seen that the equilibrium solution of the model is$$ T_{eq} = \left( \frac{(1-\alpha) Q}{\tau \sigma} \right)^\frac{1}{4} $$and tuned the model parameter based on this relationship. We are going to **linearize the equation** for small perturbations away from this equilibrium.Let $T_s = T_{eq} + T_s^\prime$ and restrict our solution to $T_s^\prime << T_{eq}$.Note this this is not a big restriction! For example, a 10 degree warming or cooling is just $\pm$3.4% of the absolute equilibrium temperature. Linearizing the governing equationNow use a first-order Taylor series expansion to write$$ \text{OLR} = \tau \sigma T_s^4 $$$$OLR = \tau \sigma T_s^4 = \tau \sigma \left( T_{eq} + T_s^\prime \right)^4 \approx \tau \sigma \left( T_{eq}^4 + 4 T_{eq}^3 T_s^\prime \right) $$ and the budget for the perturbation temperature thus becomes$$C \frac{d T_s^\prime}{d t} = -\lambda_0 T_s^\prime$$where we define$$\lambda_0 = 4 \tau \sigma T_{eq}^3 $$ Putting in our observational values, we getlambda_0 = 4 * sigma * tau * Teq_observed**3 # This is an example of formatted text output in Python print( 'lambda_0 = {:.2f} W m-2 K-1'.format(lambda_0) )This is actually our first estimate of what is often called the **Planck feedback**. It is the tendency for a warm surface to cool by increased longwave radiation to space. It may also be refered to as the "no-feedback" climate response parameter. As we will see, $\lambda_0$ quantifies the sensitivity of the climate system in the absence of any actual feedback processes. Solve the linear ODENow define$$ t^* = \frac{C}{\lambda_0} $$This is a positive constant with dimensions of time (seconds). With these definitions the temperature evolves according to$$ \frac{d T_s^\prime}{d t} = - \frac{T_s^\prime}{t^*}$$This is one of the simplest ODEs. Hopefully it looks familiar to most of you. It is the equation for an **exponential decay** process. We can easily solve for the temperature evolution by integrating from an initial condition $T_s^\prime(0)$:$$ \int_{T_s^\prime(0)}^{T_s^\prime(t)} \frac{d T_s^\prime}{T_s^\prime} = -\int_0^t \frac{dt}{t^*}$$$$\ln \bigg( \frac{T_s^\prime(t)}{T_s^\prime(0)} \bigg) = -\frac{t}{t^*}$$$$T_s^\prime(t) = T_s^\prime(0) \exp \bigg(-\frac{t}{t^*} \bigg)$$I hope that the mathematics is straightforward for everyone in this class. If not, go through it carefully and make sure you understand each step. e-folding time for relaxation of global mean temperatureOur model says that surface temperature will relax toward its equilibrium value over a characteristic time scale $t^*$. This is an **e-folding time** – the time it takes for the perturbation to decay by a factor $1/e = 0.37$*What should this timescale be for the climate system?*To estimate $t^*$ we need a value for the effective heat capacity $C$.Our "quick and dirty" estimate above used 100 meters of water to set this heat capacity. What is the right choice for water depth $H$? That turns out to be an interesting and subtle question. It depends very much on the timescale of the problem- days?- years?- decades?- millenia? We will revisit this question later in the course. For now, let’s just continue assuming $H = 100$ m (a bit deeper than the typical depth of the surface mixed layer in the oceans).Now calculate the e-folding time for the surface temperature:tstar = C / lambda_0 # Calculated value of relaxation time constant seconds_per_year = 60.*60.*24.*365. print( 'The e-folding time is {:1.2e} seconds or about {:1.0f} years.'.format(tstar, tstar / seconds_per_year))This is a rather fast timescale relative to other processes that can affect the planetary energy budget. **But notice that the climate feedback parameter $\lambda$ is smaller, the timescale gets longer.** We will come back to this later. ____________ 8. Summary and take-away messages____________ - We looked at the flows of energy in and out of the Earth system. - These are determined by radiation at the top of the Earth's atmosphere.- Any imbalance between shortwave absorption (ASR) and longwave emission (OLR) drives a change in temperature- Using this idea, we built a climate model!- This **Zero-Dimensional Energy Balance Model** solves for the global, annual mean surface temperature $T_s$- Two key assumptions: - Energy content of the Earth system varies proportionally to $T_s$ - The OLR increases as $\tau \sigma T_s^4$ (our simple greenhouse model)- Earth (or any planet) has a well-defined **equilibrium temperature** at which ASR = OLR, because of the *temperature dependence of the outgoing longwave radiation*.- The system will tend to relax toward its equilibrium temperature on an $e$-folding timescale that depends on - (1) radiative feedback processes, and - (2) effective heat capacity.- In our estimate, this e-folding time is relatively short. *In the absence of other processes that can either increase the heat capacity or lower (in absolute value) the feedback parameter, the Earth would never be very far out of energy balance.*- We will quantify this statement more as the term progresses. [Back to ATM 623 notebook home](../index.ipynb) ____________ Version information____________%load_ext version_information %version_information numpy, matplotlib* numpy is the fundamental package for scientific computing with Python. * h5py is a common package to interact with a dataset that is stored on an H5 file. * matplotlib is a famous library to plot graphs in Python. * PIL and scipy are used here to test your model with your own picture at the end. 1. training set labbeled with classes2. test set labbeld with classes3. no of training examples4. no of testing examples5. height and width for each data6. size of each image or video or skeleton7. flatten the input8. Standardize/Normalize the data9. initialize model parameters10. learn parameters for the model by minimizing the cost11. use the learned parameters to make predictions (on test set)12. Analyse results and conclude Steps dor building a neural Network1. Define the model structure ( no of input features )2. Initialize the model parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent) You often build 1-3 separately and integrate them into one function we call `model()`.* with some no of iterations* You get train accuracy * You get test accuracy * plot learning rate through cost values and no of iterations# Load the TensorBoard notebook extension %load_ext tensorboard import tensorflow as tf import datetime # Clear any logs from previous runs !rm -rf ./logs/ mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 x_train[1][1].shape class Net(tf.keras.Model): """A simple linear model.""" def __init__(self): super(Net, self).__init__() self.l1 = tf.keras.layers.Dense(5) def call(self, x): return self.l1(x) net = create_model() net.save_weights('easy_checkpoint') def train_step(net, example, optimizer): """Trains `net` on `example` using `optimizer`.""" with tf.GradientTape() as tape: output = net(example['x']) loss = tf.reduce_mean(tf.abs(output - example['y'])) variables = net.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return loss def toy_dataset(): inputs = tf.range(10.)[:, None] labels = inputs * 5. + tf.range(5.)[None, :] return tf.data.Dataset.from_tensor_slices( dict(x=inputs, y=labels)).repeat().batch(2) from tensorflow.keras.callbacks import EarlyStopping earlystop_callback = EarlyStopping( monitor='val_accuracy', min_delta=0.0001, patience=1) opt = tf.keras.optimizers.Adam(0.1) dataset = toy_dataset() iterator = iter(dataset) ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net, iterator=iterator) manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3) def train_and_checkpoint(net, manager): ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored from {}".format(manager.latest_checkpoint)) else: print("Initializing from scratch.") for _ in range(50): example = next(iterator) loss = train_step(net, example, opt) ckpt.step.assign_add(1) if int(ckpt.step) % 10 == 0: save_path = manager.save() print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path)) print("loss {:1.2f}".format(loss.numpy())) train_and_checkpoint(net, manager) opt = tf.keras.optimizers.Adam(0.1) net = Net() dataset = toy_dataset() iterator = iter(dataset) ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net, iterator=iterator) manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3) train_and_checkpoint(net, manager) print(manager.checkpoints) # List the three remaining checkpoints ls ./tf_ckpts def create_model(): return tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax') ]) model = create_model() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model.fit(x=x_train, y=y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback]) # launch tensorboard %tensorboard --logdir logs/fit model.summary() predictions = model(x_train[:1]).numpy() predictions tf.nn.softmax(predictions).numpy() loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_fn(y_train[:1], predictions).numpy() model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy']) model.fit(x_train, y_train, epochs=5) model.evaluate(x_test, y_test, verbose=2) probability_model = tf.keras.Sequential([ model, tf.keras.layers.Softmax() ]) probability_model(x_test[:5])MoreStructureKaggle score: References:1. https://www.kaggle.com/toregil/welcome-to-deep-learning-cnn-99 Run nameimport time project_name = 'DigitRecognizer' step_name = 'MoreStructure' date_str = time.strftime("%Y%m%d", time.localtime()) time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime()) run_name = '%s_%s_%s' % (project_name, step_name, time_str) print('run_name: %s' % run_name) t0 = time.time()run_name: DigitRecognizer_MoreStructure_20180417_212636Important Paramsbatch_size = 128 input_size = None test_size = 0.05 random_state = NoneImport PKGsimport numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg %matplotlib inline from IPython.display import display import os import gc import math import shutil import zipfile import pickle import h5py from PIL import Image from tqdm import tqdm from multiprocessing import cpu_count from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_scoreBasic folderscwd = os.getcwd() input_folder = os.path.join(cwd, 'input') log_folder = os.path.join(cwd, 'log') model_folder = os.path.join(cwd, 'model') output_folder = os.path.join(cwd, 'output') print('input_folder: \t\t%s' % input_folder) print('log_folder: \t\t%s' % log_folder) print('model_folder: \t\t%s' % model_folder) print('output_folder: \t\t%s'% output_folder) train_csv_file = os.path.join(input_folder, 'train.csv') test_csv_file = os.path.join(input_folder, 'test.csv') print('\ntrain_csv_file: \t%s' % train_csv_file) print('test_csv_file: \t\t%s' % test_csv_file) processed_data_file = os.path.join(input_folder, 'DigitRecognizer_Preprocess.p') print('processed_data_file: \t%s' % processed_data_file)input_folder: D:\Kaggle\digit-recognizer\input log_folder: D:\Kaggle\digit-recognizer\log model_folder: D:\Kaggle\digit-recognizer\model output_folder: D:\Kaggle\digit-recognizer\output train_csv_file: D:\Kaggle\digit-recognizer\input\train.csv test_csv_file: D:\Kaggle\digit-recognizer\input\test.csv processed_data_file: D:\Kaggle\digit-recognizer\input\DigitRecognizer_Preprocess.pBasic functionsimport sys def describe(arr): print(arr.shape, arr.min(), arr.max(), sys.getsizeof(arr)) def show_data_images(rows, fig_column, y_data, *args): columns = len(args) figs, axes = plt.subplots(rows, columns, figsize=(rows, fig_column*columns)) print(axes.shape) for i, ax in enumerate(axes): y_data_str = '' if type(y_data) != type(None): y_data_str = '_' + str(y_data[i]) ax[0].set_title('28x28' + y_data_str) for j, arg in enumerate(args): ax[j].imshow(arg[i])Load datadef save_data(x_data, y_data, x_test, file_name): if os.path.exists(file_name): os.remove(file_name) print('File removed: \t%s' % file_name) with h5py.File(file_name) as h: h.create_dataset('x_data', data=x_data) h.create_dataset('y_data', data=y_data) h.create_dataset('x_test', data=x_test) print('File saved: \t%s' % file_name) def load_data(file_name): with h5py.File(file_name, 'r') as h: x_data = np.array(h['x_data']) y_data = np.array(h['y_data']) x_test = np.array(h['x_test']) print('File loaded: \t%s' % file_name) return x_data, y_data, x_test x_data, y_data, x_test = load_data(processed_data_file) print(x_data.shape) print(y_data.shape) print(x_test.shape) # Preview data # index = 0 # fig, ax = plt.subplots(2, 2, figsize=(12,6)) # ax[0, 0].plot(x_data[index].reshape(784,)) # ax[0, 0].set_title('784x1 data') # ax[0, 1].imshow(x_data[index].reshape(28,28), cmap='gray') # ax[0, 1].set_title('28x28 data => ' + str(y_data[index])) # ax[1, 0].plot(x_test[index].reshape(784,)) # ax[1, 0].set_title('784x1 data') # ax[1, 1].imshow(x_test[index].reshape(28,28), cmap='gray') # ax[1, 1].set_title('28x28 data') # plt.show() x_data = x_data[:, :, :, np.newaxis] x_test = x_test[:, :, :, np.newaxis]Resize imagesif input_size == None: input_size = x_data.shape[1] input_shape = (input_size, input_size, 1) print('input_shape: ', input_shape) # x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=test_size, random_state=random_state) # describe(x_train) # describe(x_val) # describe(y_train) # describe(y_val) from keras.utils.np_utils import to_categorical print(y_data[0]) y_data_cat = to_categorical(y_data) print(y_data[0])Using TensorFlow backend.Build modelfrom keras.models import Sequential, Model from keras.layers import Dense, Dropout, Input, Flatten, Conv2D, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D, GlobalMaxPooling2D, AveragePooling2D from keras.layers.merge import Concatenate from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler, TensorBoard def build_model(input_shape): model = Sequential() # Block 1 model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', padding = 'Same', input_shape = input_shape)) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(MaxPooling2D(strides=(2,2))) model.add(Dropout(0.25)) # Block 2 model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(MaxPooling2D(strides=(2,2))) model.add(Dropout(0.25)) # Block 3 model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(MaxPooling2D(strides=(2,2))) model.add(Dropout(0.25)) # Output model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax')) model.compile( optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'] ) return model model = build_model(input_shape) model.summary() def saveModel(model, run_name): cwd = os.getcwd() modelPath = os.path.join(cwd, 'model') if not os.path.isdir(modelPath): os.mkdir(modelPath) weigthsFile = os.path.join(modelPath, run_name + '.h5') model.save(weigthsFile) # saveModel(model, 'saveModel_test') # annealer = LearningRateScheduler(lambda x: 1e-3 * 0.995 ** x) def get_lr(x): lr = round(3e-4 * 0.9 ** x, 12) if lr < 1e-12: lr = 1e-12 print('%.12f' % lr, end=' ') return lr annealer = LearningRateScheduler(get_lr) # log_dir = os.path.join(log_path, run_name) # print('log_dir:' + log_dir) # tensorBoard = TensorBoard(log_dir=log_dir) callbacks = [] # callbacks = [annealer] train_datagen = ImageDataGenerator( rotation_range=20, height_shift_range=0.2, width_shift_range=0.2, shear_range=0.1, zoom_range=0.2, channel_shift_range=20, # horizontal_flip=True, # vertical_flip=True, fill_mode='wrap' ) val_datagen = ImageDataGenerator() cpu_amount = cpu_count() print(cpu_amount) hist = model.fit( x_data, y_data_cat, batch_size=batch_size, epochs=2, verbose=2, callbacks=callbacks, validation_split=test_size, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None ) # %%time # batch_size = 128 # # steps_per_epoch = x_train.shape[0] / batch_size # hist = model.fit_generator( # train_datagen.flow(x_train, y_train, batch_size=batch_size, seed=random_state), # # steps_per_epoch=steps_per_epoch, # epochs=20, #Increase this when not on Kaggle kernel # verbose=1, #1 for ETA, 0 for silentrandom_state # callbacks=callbacks, # max_queue_size=batch_size, # workers=cpu_amount, # validation_steps=1000, # validation_data=val_datagen.flow(x_val, y_val, batch_size=32, seed=random_state) # ) final_loss, final_acc = model.evaluate(x_data, y_data_cat, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc)) final_acc_str = '{0:0>4}'.format(int(final_acc*10000)) run_name_acc = project_name + '_' + step_name + '_' + time_str + '_' + final_acc_str print(run_name_acc) histories = pd.DataFrame(hist.history) histories['epoch'] = hist.epoch print(histories.columns) histories_file = os.path.join(model_folder, run_name_acc + '.csv') histories.to_csv(histories_file, index=False) plt.plot(hist.history['loss'], color='b') plt.plot(hist.history['val_loss'], color='r') plt.show() plt.plot(hist.history['acc'], color='b') plt.plot(hist.history['val_acc'], color='r') plt.show() saveModel(model, run_name_acc) # y_hat = model.predict(x_val) # y_pred = np.argmax(y_hat, axis=1) # y_true = np.argmax(y_val, axis=1) # cm = confusion_matrix(y_true, y_pred) # print(cm) from sklearn.metrics import confusion_matrix, accuracy_score def show_diffs(model, x_data, y_data, labels): y_hat = model.predict(x_data) y_pred = np.argmax(y_hat, axis=1) y_true = np.argmax(y_data, axis=1) print('accuracy_score: %s' % accuracy_score(y_true, y_pred)) cm = confusion_matrix(y_true, y_pred, labels) print(cm) indexes = [] diffs = [] for i, p in enumerate(zip(y_true, y_pred)): if p[0] != p[1]: indexes.append(i) diffs.append(p) count = len(indexes) print('count: %s' % count) # figs, axes = plt.subplots(count, 1, figsize=(count,50)) # print(axes.shape) # for i, p in enumerate(diffs): # axes[i].imshow(x_val[indexes[i]].reshape(28, 28), cmap='gray') # axes[i].set_title('%s -> %s' % (p[0], p[1])) show_diffs(model, x_data, y_data_cat, labels=list(range(0, 10)))accuracy_score: 0.973476190476 [[4099 3 2 0 0 1 18 0 3 6] [ 1 4637 17 15 1 0 2 4 2 5] [ 10 21 4010 60 7 0 3 45 18 3] [ 5 5 12 4268 0 15 0 17 9 20] [ 3 12 3 0 3876 0 22 3 1 152] [ 4 2 1 30 0 3727 15 2 3 11] [ 14 4 1 1 3 9 4099 0 6 0] [ 3 11 18 11 9 0 0 4248 2 99] [ 11 22 9 41 11 21 18 6 3849 75] [ 19 8 1 27 4 15 1 35 5 4073]] count: 1114Predictif not os.path.exists(output_folder): os.mkdir(output_folder) pred_file = os.path.join(output_folder, run_name_acc + '.csv') print(pred_file) y_data_proba = model.predict(x_data, batch_size=batch_size) print('y_data_proba.shape: ', y_data_proba.shape) y_test_proba = model.predict(x_test, batch_size=batch_size) print('y_test_proba.shape: ', y_test_proba.shape) def save_proba(y_data_proba, y_data, y_test_proba, file_name): if os.path.exists(file_name): os.remove(file_name) print('File removed: \t%s' % file_name) with h5py.File(file_name) as h: h.create_dataset('y_data_proba', data=y_data_proba) h.create_dataset('y_data', data=y_data) h.create_dataset('y_test_proba', data=y_test_proba) print('File saved: \t%s' % file_name) def load_proba(file_name): with h5py.File(file_name, 'r') as h: y_data_proba = np.array(h['y_data_proba']) y_data = np.array(h['y_data']) y_test_proba = np.array(h['y_test_proba']) print('File loaded: \t%s' % file_name) return y_data_proba, y_data, y_test_proba y_proba_file = os.path.join(model_folder, '%s.p' % run_name_acc) save_proba(y_data_proba, y_data, y_test_proba, y_proba_file) y_data_proba, y_data, y_test_proba = load_proba(y_proba_file) print(y_data_proba.shape) print(y_data.shape) print(y_test_proba.shape) y_test_pred = np.argmax(y_test_proba, axis=1) print('y_test_pred.shape: ', y_test_pred.shape) with open(pred_file, 'w') as f: f.write('ImageId,Label\n') for i in range(len(y_test_pred)) : f.write("".join([str(i+1),',',str(y_test_pred[i]),'\n'])) print(run_name_acc) t1 = time.time() print('time cost: %.2f s' % (t1-t0)) print('Done!')DigitRecognizer_MoreStructure_20180417_212636_9734 time cost: 279.37 s Done!Part 8: Time to Get TRIGGEREDLet's take a detour into what's inside our network. We've been ignoring this so far so we can get a feel for what the network does, how to train it, how to perturb data to make things better, and we got a glimpse of some of the numbers (*hyperparameters*) that tune the network.We discussed briefly that the network is made up of *perceptrons*. It's time to look at a single perceptron. But first let's see where perceptrons come from. Enter the NeuronImage a brain. Could be yours, or someone else's, or an animal brain, particularly if you've taken biology recently and done some dissection. If you take a strong microscope and zoom in on the contents of the brain, a lot of what you'll find are *neurons*. A neuron is a cell that listens to connections from other neurons, which pass along small amounts of electricity using chemical reactions. The neuron listens harder - gives more *weight* - to some of the neurons that are sending to it, and less to other connections, and the neuron adds up how much signal it's hearing overall. If what it's hearing is enough, the neuron will get triggered and send signals to other neurons it's connected to. The Easy-Going NeuronLet's pretend you're Bruce, and you are not easily triggered - you're a calm person and don't spread rumors or excitement very easily. Maybe you don't like noise very much, or think that most rumors are dumb... But let's say you have two classmates who are spreading rumors. Now you know that Cathy is very chatty and spreads rumors all the time. You don't usually spend much time listening to Cathy: you give her less *weight* in your decision to pass on a rumor. But Bob rarely passes rumors on, so when Bob speaks you tend to listen harder, to give him more *weight*. And let's say you might spread a rumor to your friend Yvette if you decide it's good enough to pass on.Now let's say Cathy is talking and spreading her usual rumors in her usual excitable way. She's saying a lot but you tend to tune her out. You don't get triggered - you don't pass on any of her rumors.Now let's say Bob quietly mentions something someone did earlier in class. Bob's not very excited so you decide not to get triggered here either. You don't think you need to tell Yvette about what Bob is saying.But now Bob and Cathy both start talking at the same time, Cathy more softly but Bob a bit more loudly than usual, and when you put the information together you decide to to tell Yvette. You just got triggered.Or let's say another day Bob is quiet but Cathy is really excited, way beyond her usual. She manages to get your attention and you decide to pass on her rumor. She overcame the small weight you gave her by being excited enough. Triggered again! The Excitable NeuronNow let's say you're Cathy the Chatty Neuron. You like spreading rumors and talking a lot, which already makes you easily triggered. You're listening to three friends John, Mary, and Joan. But your weight for each of them is high. So high that you get triggered on anything your friends say above a whisper, and tell Yvette and Bruce.You're easily triggered, it doesn't take much to get you going.Let's say John is whispering a new rumor, but Mary and Joan are quiet right now. Not quite enough to get excited about, so you stay quiet.But let's say Mary joins in. Now it's more interesting... and you decide to tell Yvette and Bruce. *Triggered.* Making Squishy Brains Into MathOK so we get the idea of a neuron just a bit. But of course, at least at this point in time, our computers are great at math and passing electrical signals around a CPU chip, but aren't so good at being squishy, neuron-filled brains. We need a computer-neuron. So a bunch of years ago was created the *perceptron*.A perceptron keeps track of which other perceptrons are connected to it, how hard it listens to each one of them (the *weights*), and a number, the *bias*, that indicates how soon it gets triggered, with a low bias meaning excitable like Cathy, and a high bias meaning laid back like Bruce. When the perceptrons it listens to give it enough signal, and the neuron cares enough to give them enough weight to get bigger than its bias, it triggers and passes a number greater than zero to the perceptrons it sends signal to. When not triggered, the perceptron sends zero to its friends. Time to Give It a TryThe code below simulates a perceptron with 2 inputs 'Bob' and 'Mary', and their 2 weights, a bias, and 1 output to Yvette. If you want to act more like Bruce, set your bias closer to 1.0 or even higher. If you want to be more like Cathy, try 0.1. And you can give your friends different weights as well, listening harder to one or the other, then try setting how loud your friends are speaking and see if you get TRIGGERED.from ipywidgets import interact prefix=" " @interact(bob=(0.0,1.0,0.1), bobWeight=(0.0,1.0,0.1), mary=(0.0,1.0,0.1), maryWeight=(0.0,1.0,0.1), bias=(0.0,2.0,0.1)) def amITriggered(bob=0.1, mary=0.0, bobWeight=0.2, maryWeight=0.3, bias=0.5): global prefix bobInput = (bob * bobWeight) maryInput = (mary * maryWeight) sumOfInputs = bobInput + maryInput if sumOfInputs >= bias: output = "TRIGGERED! ==output==> 1.0" topMarquee = "\\||||||||/" bottomMarquee = "/||||||||\\" else: output = "Not triggered ==output==> 0.0" topMarquee = "" bottomMarquee = prefix + ". . . . . ." if prefix == "": prefix = " " else: prefix = "" print("Bob = %1.1f * (w=%1.1f) = %1.2f %s" % (bob, bobWeight, bobInput, topMarquee)) print(" + --> %1.2f >= %1.2f ? %s" % (sumOfInputs, bias, output)) print("Mary = %1.1f * (w=%1.1f) = %1.2f %s" % (mary, maryWeight, maryInput, bottomMarquee))Example of RBC `external` function The `external` feature provides a way of calling external functions from UDF/UDTFs. As a starting point of this notebook, let's connect to the OmniSciDB server and insert some test dataimport numpy as np from rbc.omniscidb import RemoteOmnisci omnisci = RemoteOmnisci(user='admin', password='', host='127.0.0.1', port=6274, dbname='omnisci')Now, let's insert some test data... Which consists of 5 negative integersSQL=lambda query: np.array(list(omnisci.sql_execute(query)[1])) SQL('DROP TABLE IF EXISTS test_data'); SQL('CREATE TABLE IF NOT EXISTS test_data (X BIGINT)') omnisci.load_table_columnar('test_data', X=[-1, -2, -3, -4, -5]) SQL('SELECT * FROM test_data')Declare and use external functions RBC defines a helper function `external` which can be used to define and call external functions. e.g. function defined by the server, an external library or from the C standard library. It expects the function declaration in the following forms: As a C function declaration:```pythonfrom rbc.external import externalfn = external('int64 abs(int64)')``` Function name using a keyword argument```pythonfrom rbc.external import externalfn = external('int64(int64)', name='abs')``` A list of signatures```pythonfrom rbc.external import externalfn = external(['i32(i32)', 'i64(i64)'], name='abs')``` Usage One can use external function from any jitted functions. For instance, let's define and use the `abs` function from the `cmath` library:from rbc.external import external abs = external('int64 abs(int64)')Let's now declare a function that uses the `abs`@omnisci('int64(int64)') def use_abs(i): return abs(i) result = SQL('SELECT X, use_abs(X) FROM test_data') for r, v in result: print(f'abs({r}) = {v}')abs(-1) = 1 abs(-2) = 2 abs(-3) = 3 abs(-4) = 4 abs(-5) = 5Caveats Notice that `abs` is not callable from pure python# NBVAL_RAISES_EXCEPTION abs(-3)Exploratory Data Analysis and Feature Engineering, PhDTo learn more about Python, refeer to the following websites* Python : www.python.org* W3Schools : www.w3schools.com/pythonTo learn more about the Python packages we explore in this notebook, refeer to the following websites* NumPy : www.numpy.org* Matplotlib : www.matplotlib.org* Pandas : https://pandas.pydata.org* Scikit-Learn : https://scikit-learn.org/* Seaborn: https://seaborn.pydata.org/* StatsModel : https://www.statsmodels.orgimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as snsLoad Datafile_name = 'https://raw.githubusercontent.com/SumuduTennakoon/MLFoundations/main/Datasets/income_data.csv' # Load CSV File data = pd.read_csv(file_name) data.sample(20) data.shape data.describe(include='all').transpose() data.describe().transpose() data.columns data.dtypesDrop Unnecessary Columnsdata.drop(labels='Unnamed: 0', axis=1, inplace=True) data.drop(labels=1, axis=0) data.head()Quering Datadata.query("age < 60 and age > 30")Create Subset of DataFrame macthing a conditiondata_age_90 = data.query("age==90") data_age_90.shapeCreate a New Column exlauating Logical Expressiondata['is_age_gt_30'] = data['age']>30 data[['age','is_age_gt_30']].head() data.loc[data['age']==90].head()Drop Rows with Missing Valuesdata['class'].isna() data.loc[data['class'].isna()] # Numpy NaN a = np.NaN data.dropna(how='any', axis=0, inplace=True) data.loc[data['class'].isna()] data.shapeCreate Unique IDdata.head() data.index data['ID'] = data.index+1 data.head()Rearranging Columnsdata.columns data = data[['ID', 'age', 'workclass', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'class']] data.head() data.describe(include='all').transpose()Descriptive Statistics For Numerical Columns# mean # median # min/max # std data['age'].describe() f,ax0 = plt.subplots() ax1 = ax0.twinx() data['age'].plot(kind='hist', bins=10, ax=ax0) data['age'].plot(kind='kde', c='red', ax=ax1) sns.displot(kind='hist', x='age', data=data, bins=10, kde=True) data['age'].plot(kind='box') sns.boxplot(y="age", data=data)Exploring Categorical Variablesdata['education'].head() edu_vals = data['education'].unique() edu_vals len(edu_vals) data['education'].nunique() data['education'].value_counts() data['education'].value_counts().plot(kind='bar') edu_values = data['education'].value_counts() edu_values type(edu_values) edu_values.index edu_values.values data.head().values data['education'].describe() data['age'].max() data['scaled_age'] = data['age']/data['age'].max() data['scaled_age']Value Countsdata['education'].value_counts()Group Bydata.groupby(by='education')['ID'].count() data.groupby(by='education')['age'].min() data.groupby(by='education')['age'].max() data.groupby(by='education').agg({'age': ['min', 'max', 'mean', 'std', 'sum']}) data['workclass'].head() data['workclass'].unique() data.groupby(by=['education', 'workclass']).agg({'age': ['min', 'max', 'mean']})Formulating ML Problem to Solvedata.head() data['class'].unique() # <=50K -> 0 # >50K -> 1 # y = f(X)Replace Values (Method 1)data.loc[data['class']==' <=50K.', 'class'] = ' <=50K' data.loc[data['class']==' >50K.', 'class'] = ' >50K'Replace Values (Method 2/recomended)data['class'].replace(' >50K.', ' >50K') data['class'].replace(' <=50K.', ' <=50K') data['class'].unique()Transform into Binary Columndata['y_act'] = np.where(data['class']==' >50K',1,0) data[['class', 'y_act']].sample(10) data[['class', 'y_act']].value_counts() data.shape[0] # value counts as a ratio data[['class', 'y_act']].value_counts(normalize=True)Explore X Variablesdata.columns data.head() data.groupby(by=['class', 'y_act']).agg({'age': ['min', 'max', 'mean', 'std']})Select Columns based on Data Typedata.select_dtypes(include=['float64']) data.select_dtypes(exclude=['object'])Continious Variable to Categorical Variabledata['age'].hist() data['age'].describe() data['age'].value_counts(bins=10) data['age'].value_counts(bins=10, normalize=True)Use `cut()` functionlabels = ['<20', '20-30', '30-40', '40-50', '50-60', '>60'] bin_edges = [0, 20, 30, 40, 50, 60, np.inf] len(labels) len(bin_edges) data['age_group'] = pd.cut(x=data['age'], bins=bin_edges, labels=labels) data[['age', 'age_group']].sample(10)Exercise 1Convert `hours_per_week` into categorical variabledata['hours_per_week'].describe() data['hours_per_week'].value_counts(bins=10) # Type your code here data.dtypesCross Tables (two-way tables)pd.crosstab(data['education'], data['age_group'], margins=True, margins_name='Total') pd.crosstab(data['education'], data['age_group'], normalize=True).style.background_gradient(cmap='RdYlGn')#.set_precision(5)Pivot Tablepd.pivot_table(data=data, index='education', columns='age_group', values='ID', aggfunc='count', margins=True, margins_name='Total')Combine Values of Categorical Variables (Reduce Number of Categories)data.education.unique() data.education_num.unique() data['education_grp'] = data['education'].replace(' Preschool', 'school') data['education_group'] = None data.loc[data['education']==' Preschool', 'education_group'] = 'school' data.loc[data['education']==' 5th-6th', 'education_group'] = 'school' data.loc[data['education']==' 1st-4th', 'education_group'] = 'school' data.loc[data['education']==' 7th-8th', 'education_group'] = 'school' data.loc[data['education']==' 9th', 'education_group'] = 'school' data.loc[data['education']==' Some-college', 'education_group'] = 'h. study' data.loc[data['education']==' Assoc-voc', 'education_group'] = 'h. study' data.loc[data['education']==' Assoc-acdm', 'education_group'] = 'h. study' data.loc[data['education']==' Doctorate', 'education_group'] = 'h. study' data.loc[data['education']==' Prof-school', 'education_group'] = 'h. study' data.loc[data['education']==' Bachelors', 'education_group'] = 'h. study' data.loc[data['education']==' HS-grad', 'education_group'] = 'h. study' data.loc[data['education']==' Masters', 'education_group'] = 'h. study' data.loc[data['education']==' 10th', 'education_group'] = 'h. school' data.loc[data['education']==' 11th', 'education_group'] = 'h. school' data.loc[data['education']==' 12th', 'education_group'] = 'h. school' data['education_group'].unique() data.loc[data['education_group'].isna()] pd.crosstab(data['education_group'], data['age_group'], margins=True, margins_name='Total')Apply Custom Functions over Data Framesdef merge_education_catergory(education): if education in (' Preschool', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th'): return 'school' elif education in (' 10th', ' 11th', ' 12th', ' HS-grad'): return 'h. school' elif education in (' Bachelors', ' Masters', ' Some-college', ' Assoc-acdm', ' Assoc-voc', ' Doctorate', ' Prof-school'): return 'h. Study' print(f"merge_education_catergory(' 12th') = '{merge_education_catergory(' 12th')}'") print(f"merge_education_catergory(' Masters') = '{merge_education_catergory(' Masters')}'") data['education_group2'] = data['education'].apply(merge_education_catergory) pd.crosstab(data['education_group2'], data['age_group'], margins=True, margins_name='Total')Use String Matching (RegEx)data.loc[data['education'].str.match('( Preschool| 1st-4th| 5th-6th| 7th-8th| 9th)'), 'education_group3'] = 'school' data.loc[data['education'].str.match('( 10th| 11th| 12th| HS-grad)'), 'education_group3'] = 'h. school' data.loc[data['education'].str.match('( Bachelors| Masters| Some-college| Assoc-acdm| Assoc-voc| Doctorate| Prof-school)'), 'education_group3'] = 'h. Study' pd.crosstab(data['education_group3'], data['age_group'], margins=True, margins_name='Total')Correlationcorrelation_matrix = data[['age', 'hours_per_week', 'education_num','y_act']].corr() correlation_matrix sns.heatmap(correlation_matrix)Model Building Examplefrom sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split data.columnsIdentify `X` and `y`X_variables = ['age', 'hours_per_week', 'education_num'] data[X_variables].head() y_varibale = 'y_act' data[y_varibale].head() # Two classes data[y_varibale].unique() X = data[X_variables].values X y = data[y_varibale].values yTrain Test SplitX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) print(F"Train sample size = {len(X_train)}") print(F"Test sample size = {len(X_test)}")Train sample size = 28765 Test sample size = 12329Create Logistic Regression Model* https://en.wikipedia.org/wiki/Logistic_regression* https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.htmlmodel = LogisticRegression()Fit Modelmodel.fit(X_train, y_train)Predictmodel.predict_proba(X_test)Predicted Probability for Class 1y_pred_prob = model.predict_proba(X_test)[:, 1] y_pred_prob y_pred = model.predict(X_test) y_predCreate Results Data Frame for Test Sampletest_result = pd.DataFrame(data={'y_act':y_test, 'y_pred':y_pred, 'y_pred_prob':y_pred_prob}) test_result.sample(10)Confusion Matrixcm = pd.crosstab(test_result['y_act'], test_result['y_pred'], margins=True) cm acuracy = (cm.loc[0,0] + cm.loc[1,1] ) / cm.loc['All','All'] acuracy from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=1000) model.fit(X_train, y_train) model.predict_proba(X_test) y_pred = model.predict(X_test) test_result = pd.DataFrame(data={'y_act':y_test, 'y_pred':y_pred, 'y_pred_prob':y_pred_prob}) test_result.sample(10) cm = pd.crosstab(test_result['y_act'], test_result['y_pred'], margins=True) cm acuracy = (cm.loc[0,0] + cm.loc[1,1] ) / cm.loc['All','All'] acuracyLast update 2021-10-09 by Setting Hyper-parameters# hyperparameters ? H = 8 # number of hidden layer neurons learning_rate = 1e-2 gamma = 0.99 # discount factor for reward decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2 ? resume = False # resume from previous checkpoint? model_bs = 3 # Batch size when learning from model real_bs = 3 # Batch size when learning from real environment # model initialization D = 4 # input dimensionality ?Policy Networktf.reset_default_graph() # reset tensorflow graph observations = tf.placeholder(tf.float32, [None,4], name="input_x") W1 = tf.get_variable("W1", shape=[4, H], initializer=tf.contrib.layers.xavier_initializer()) layer1 = tf.nn.relu(tf.matmul(observations,W1)) W2 = tf.get_variable("W2", shape=[H, 1], initializer=tf.contrib.layers.xavier_initializer()) score = tf.matmul(layer1,W2) probability = tf.nn.sigmoid(score) tvars = tf.trainable_variables() input_y = tf.placeholder(tf.float32, [None, 1], name="input_y") advantages = tf.placeholder(tf.float32, name="reward_signal") adam = tf.train.AdamOptimizer(learning_rate=learning_rate) W1Grad = tf.placeholder(tf.float32, name="batch_grad1") W2Grad = tf.placeholder(tf.float32, name="batch_grad2") batchGrad = [W1Grad,W2Grad] loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability)) loss = -tf.reduce_mean(loglik*advantages) newGrads = tf.gradients(loss, tvars) updateGrads = adam.apply_gradients(zip(batchGrad,tvars))Model NetworkImplement a multi-layer neural network that predicts the next observation, rewrad, and done state from a current state and actionmH = 256 # model layer size input_data = tf.placeholder(tf.float32, [None, 5]) with tf.variable_scope('rnnlm'): softmax_w = tf.get_variable("softmax_w", [mH, 50]) softmax_b = tf.get_variable("softmax_b", [50]) previous_state = tf.placeholder(tf.float32, [None,5], name="previous_state") W1M = tf.get_variable("W1M", shape=[5, mH], initializer=tf.contrib.layers.xavier_initializer()) B1M = tf.Variable(tf.zeros([mH]), name="B1M") layer1M = tf.nn.relu(tf.matmul(previous_state, W1M) + B1M) W2M = tf.get_variable("W2M", shape=[mH, mH], initializer=tf.contrib.layers.xavier_initializer()) B2M = tf.Variable(tf.zeros([mH]), name="B2M") layer2M = tf.nn.relu(tf.matmul(layer1M, W2M) + B2M) wO = tf.get_variable("wO", shape=[mH, 4], initializer=tf.contrib.layers.xavier_initializer()) wR = tf.get_variable("wR", shape=[mH, 1], initializer=tf.contrib.layers.xavier_initializer()) wD = tf.get_variable("wD", shape=[mH, 1], initializer=tf.contrib.layers.xavier_initializer()) bO = tf.Variable(tf.zeros([4]), name="bO") bR = tf.Variable(tf.zeros([1]), name="bR") bD = tf.Variable(tf.zeros([1]), name="bD") predicted_observation = tf.matmul(layer2M,wO,name="predicted_observation") + bO predicted_reward = tf.matmul(layer2M, wR, name="predicted_reward") + bR predicted_done = tf.sigmoid(tf.matmul(layer2M, wD, name="prediced_done") + bD) true_observation = tf.placeholder(tf.float32, [None, 4], name="true_observation") true_reward = tf.placeholder(tf.float32, [None,1], name="true_reward") true_done = tf.placeholder(tf.float32, [None,1], name="true_done") predicted_state = tf.concat([predicted_observation, predicted_reward, predicted_done], 1) observation_loss = tf.square(true_observation - predicted_observation) reward_loss = tf.square(true_reward - predicted_reward) done_loss = tf.multiply(predicted_done, true_done) + tf.multiply(1-predicted_done, 1-true_done) done_loss = -tf.log(done_loss) model_loss = tf.reduce_mean(observation_loss + done_loss + reward_loss) modelAdam = tf.train.AdamOptimizer(learning_rate=learning_rate) updateModel = modelAdam.minimize(model_loss)Helper-functions?def resetGradBuffer(gradBuffer): for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0 return gradBuffer def discount_rewards(r): """ take 1D float array of rewards and compute discounted reward""" discounted_r = np.zeros_like(r) running_add = 0 for t in reversed(xrange(0, r.size)): running_add = running_add * gamma + r[t] discounted_r[t] = running_add return discounted_r # This function uses our model to produce a new state when given a previous state and action def stepModel(sess, xs, action): toFeed = np.reshape(np.hstack([xs[-1][0],np.array(action)]), [1,5]) myPredict = sess.run([predicted_state], feed_dict={previous_state: toFeed}) reward = myPredict[0][:,4] observation = myPredict[0][:,0:4] observation[:,0] = np.clip(observation[:,0],-2.4,2.4) observation[:,2] = np.clip(observation[:,2],-0.4,0.4) doneP = np.clip(myPredict[0][:,5],0,1) if doneP > 0.1 or len(xs) >= 300: done = True else: done = False return observation, reward, doneTraining the Policy and Modelxs, drs, ys, ds = [],[],[],[] running_reward = None reward_sum = 0 episode_number = 1 real_episodes = 1 init = tf.global_variables_initializer() batch_size = real_bs drawFromModel = False trainTheModel = True trainThePolicy = False switch_point = 1 # Launch the graph with tf.Session() as sess: rendering = False sess.run(init) observation = env.reset() x = observation gradBuffer = sess.run(tvars) gradBuffer = resetGradBuffer(gradBuffer) while episode_number <= 5000: # Start displaying environment once perfomance is acceptably high #if ((reward_sum/batch_size > 150) and drawFromModel == False) or rendering == True: if (np.all(reward_sum/batch_size > 150) and np.all(drawFromModel == False)) or rendering == True: env.render() rendering = True x = np.reshape(observation,[1,4]) tfprob = sess.run(probability, feed_dict={observations: x}) action = 1 if np.random.uniform() < tfprob else 0 # record various intermediates (needed) xs.append(x) y = 1 if action == 0 else 0 ys.append(y) # step the model or real environment and get new measurements if drawFromModel == False: observation, reward, done, info = env.step(action) else: observation, reward, done = stepModel(sess, xs, action) reward_sum += reward ds.append(done*1) drs.append(reward) # record reward ( has to be done after we call step() to get reward for previous action) if done: if drawFromModel == False: real_episodes += 1 episode_number += 1 # stack together all inputs, hidden states, action gradients, and rewards for this episode epx = np.vstack(xs) epy = np.vstack(ys) epr = np.vstack(drs) epd = np.vstack(ds) xs,drs,ys,ds = [],[],[],[] # reset array memory if trainTheModel == True: actions = np.array([np.abs(y-1) for y in epy][:-1]) state_prevs = epx[:-1,:] state_prevs = np.hstack([state_prevs,actions]) state_nexts = epx[1:,:] rewards = np.array(epr[1:,:]) dones = np.array(epd[1:,:]) state_nextsAll = np.hstack([state_nexts,rewards,dones]) feed_dict={previous_state: state_prevs, true_observation: state_nexts, true_done:dones, true_reward:rewards} loss,pState,_ = sess.run([model_loss,predicted_state, updateModel], feed_dict) if trainThePolicy == True: discounted_epr = discount_rewards(epr).astype('float32') discounted_epr -= np.mean(discounted_epr) discounted_epr /= np.std(discounted_epr) tGrad = sess.run(newGrads, feed_dict={observations: epx, input_y: epy, advantages: discounted_epr}) # if gradients become too large, end training process if np.sum(tGrad[0] == tGrad[0]) == 0: break for ix, grad in enumerate(tGrad): gradBuffer[ix] += grad if switch_point + batch_size == episode_number: switch_point = episode_number if trainThePolicy == True: sess.run(updateGrads, feed_dict={W1Grad: gradBuffer[0], W2Grad: gradBuffer[1]}) gradBuffer = resetGradBuffer(gradBuffer) running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01 if drawFromModel == False: print ('World Perf: Episode %f. Reward %f. action: %f. mean reward %f.' % (real_episodes, reward_sum/real_bs, action, running_reward/real_bs)) if reward_sum/batch_size > 100: break reward_sum = 0 #once the model has been trained to 100 episodes, we start alternating between trainign the policy # from the model and training the model from the real environment. if episode_number > 100: drawFromModel = not drawFromModel trainTheModel = not trainTheModel trainThePolicy = not trainThePolicy if drawFromModel == True: observation = np.random.uniform(-.1, 0.1, [4]) # # Generate reasonable starting point batch_size = model_bs else: observation = env.reset() batch_size = real_bs print(real_episodes) plt.figure(figsize=(8, 12)) for i in range(6): plt.subplot(6, 2, 2*i + 1) plt.plot(pState[:,i]) plt.subplot(6,2,2*i+1) plt.plot(state_nextsAll[:,i]) plt.tight_layout()/home/john/anaconda2/envs/my_env35/lib/python3.5/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. warnings.warn(message, mplDeprecation, stacklevel=1)Performance Metrics in Classification Possible outcomes of a classification task and the confusion matrixIf we are in a binary classification problem where the two classes are $1$ (call it positive) and $0$ (call it negative), our model can spit out either of them, but chances are some points will be classified wrongly. This leaves us with 4 possible situations in terms of how the points get classified based on what they actually are: * $TP$: True Positives, those points which are classified as $1$ and are actually $1$;* $TN$: True Negatives, those points which are classified as $0$ and are actually $0$;* $FP$: False Positives, those points which are classified as $1$ but are actually $0$;* $FN$: False Negatives, those points which are classified as $0$ but are actually $1$A useful graphic representation of the categories above is given in the figure here. The sum of counts in each of the categories, which is represented by the total area of the picture, equals the total of data points classified. The area of the ellipse, which is given by $TP + FP$ (abuse of notation: we're using the symbols above to mean the counts of the categories here!), will give the total of points classified as positive; the area of the green parts, equalling $TP + FN$ is the total of points which are actually positives; the area of the red parts, equalling $FP + TN$, is instead the total of points which are actually negatives. These categories and the terminology we set here will be used in the following to define the metrics we can use to assess the performance of a classifier. Note that we have specifically and for the sake of simplicity referred to the case of a binary classifier, but this framework is extensible to a generic multi-class classification with little work: you'd just have to consider that for each of the classes, you'll have points classified correctly and points classified as belonging to another class, so there will be counts of wrongly classified points in each class. We'll expand on this soon. The confusion matrixAlso known sometimes as *contingency table* or *error matrix*, it is a good tool to have a quick eye-catching assessment of the performance of a classifier. Again in the simple case of a classification problem with two classes (binary classification), the confusion matrix is, considering the categories of points described above, displayed in the figure. It is literally just a square matrix with all the counts. In the case of a multi-class classification problem with $n$ classes, the matrix will be a $n \times n$ one where the diagonal contains the true positives for each class, and out of diagonal items will report the number of wrongly classified items, class by class. In short, the categories are per class and for the wrongly categorised points you can compute how many points of actual class $a$ are classified in class $b$, etc, for each class.We'll now show what we mean using data from the [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), an old dataset created by Fisher which is now widely used for pedagogical purposes to illustrate the use of algorithms. It contains data for three species of Iris flowers (we'll use numerical indices to indicate them here), data being their lengths and widths of petals and sepals. We'll use this data to classify the species and we'll employ a Random Forest classifier for the task. This dataset can be called directly from `scikit-learn`, so quite useful. We wrote a routine to display the confusion matrix, which we'll use here. Let's see this. The classes of Iris species we have are called $0$, $1$ and $2$. The matrix will show, from the test set, how many points get classified in each class for each class.# Load the Iris dataset from sklearn, separating the data matrix and the array of classes iris = load_iris() X = iris.data y = iris.target # Initiate the classifier (using default parameters) rf = RandomForestClassifier() # Splitting the dataset into train and test (70%/30%) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # Fitting model on training set and predict on test set rf.fit(X_train, y_train) y_pred = rf.predict(X_test) # Plot the confusion matrix do_plot_conf_mat(y_test, y_pred)Per-class performance metricsIn the generic case of a multi-class classification, the results in each of the classes can be assessed. For each class, we can compute the metrics described here. We will also show their values in the little Iris classification from above.The binary classification case would just be a special case of this so we thought we'd stay general. In this general case, the categories of points illustrated above refer to the specific class we're monitoring, so while $TP$ is the diagonal value for class in the confusion matrix, $FP$ will be the total of false positives for it, that is, the sum of values in the column for class in matrix excluding the diagonal value. With the same reasoning, $FN$ will be the total of false negatives for the class, that is, the sum of values in the row for the class in the confusion matrix, excluding the diagonal value. Precision and specificityThe precision, referred to a class, is defined as $$p = \frac{TP}{TP + FP} \ ,$$and is the fraction of true points in that class over the total of points classified as belonging to that class.The precision is a measure of how *useful* the classified results are as it gives the fraction of relevant items to the total classified in class. With reference to the ellipse figure above, precision is the fraction of the $TP$ area in the ellipse to the whole ellipse area. A precision of $1$ means that all samples in class were correctly classified.In the case of a binary classification, the equivalent of precision but for the negative class is called *specificity*, or *true negative rate*, and is defined as$$s = \frac{TN}{TN + FP} \ .$$ RecallThe *recall*, also called *sensitivity*, or *true positive rate*, is defined as$$r = \frac{TP}{TP + FN} \ ,$$and gives the fraction of true positives for the class over the total of points belonging to the class. It is a measure of how *complete* the results are, meaning how many of the real points in class are retrieved. A recall $r=1$ means that all items in the class where actually classed as in class. An example on precision vs. recallThis is taken from [[the Wikipedia page on precision and recall]](1).Let us imagine there is a surgeon who needs to remove all cancerous cells from a patient to prevent regeneration. In the process, if healthy cells are removed as well, this would leave disgraceful lesions to the organs involved. The decision to increase recall at the cost of precision is one where more cells than needed are removed and ensure that all bad ones will go. The decision to increase precision at the cost of recall, on the other hand, would see the surgeon be more conservative and ensure only bad cells are removed, at the cost of not removing them all. The $F$-scoreWhile precision and recall measure different things, often related in such a way that increasing the one will decrease the other, the *F-score* is a single metric which brings both information together.In general, a F-score is defined as $$F_\beta = (1 + \beta^2)\frac{pr}{\beta^2p + r} \ \ \beta > 0, \beta \in \mathbb{R}$$The most commonly seen metric in this class is the *F1-score*, which is just the [harmonic mean](../../maths/measures.ipynbThe-harmonic-mean) of precision and recall:$$F_1 = 2 \frac{pr}{p + r}$$The general F-score furnishes a way to decide to weigh precision and recall differently: while the F1-score weighs them equally, the F2-score gives more weight to the precision and the F0.5-score does the reverse, for instance. In the case of our classifier above ...Let's have a look at these per-class metrics for the classification problem we pursued above! We use the [`sklearn.metrics.classification_report`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.htmlsklearn.metrics.classification_report) for the job, which is a convenient routine spitting out the three metrics we just described, for all classes, in one go. Note that for each metric it also furnishes the weighted value based on the support of the class, namely for the average precision, for instance,$$p_{avg} = \sum_c^C p_c s_c \ ,$$where the sum runs over all the classes, $p_c$ and $s_c$ are, respectively, the precision and the support of the class.# sklearn furnishes a report of these metrics for all classes in one go! print(classification_report(y_test, y_pred))precision recall f1-score support 0 1.00 1.00 1.00 11 1 0.95 0.90 0.92 20 2 0.87 0.93 0.90 14 avg / total 0.94 0.93 0.93 45Global metricsThese metrics are meant to assess the performance of the classifier as a whole and not referring to each specific class. We already outlines the weighted average of precision, recall and F-score above, which serve exactly this purpose. AccuracyThe (global) *accuracy* of a classifier is defined as$$a = \frac{\sum_c^C TP_c}{N} \ ,$$and measures the total number of correct predictions over the total of data points ($N$ is the total of points and the sum runs over the classes, summing the counts of true positives in each). It would be the first thing which comes to mind (I mean, the simplest thing to do) when assessing a classifier's performance and indeed gives insight on how good it is to class points. But, its weak spot is that it gives no insight on the difference between the two types of errors: false positives and false negatives, treating them on the same ground. Plus it hides all information on which classes is doing better/worse.In reality, the accuracy can also be computed per class. See the example we give below. In the case of our classifier above ...# From sklearn accuracy_score(y_test, y_pred)Module 10: Logscaleimport matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import scipy.stats as ss import vega_datasets %matplotlib inlineRatio and logarithmIf you use linear scale to visualize ratios, it can be quite misleading.Let's first create some ratios.x = np.array([1, 1, 1, 1, 10, 100, 1000]) y = np.array([1000, 100, 10, 1, 1, 1, 1 ]) ratio = x/y print(ratio)[1.e-03 1.e-02 1.e-01 1.e+00 1.e+01 1.e+02 1.e+03]**Q: Plot on the linear scale using the [`scatter()`](http://matplotlib.org/examples/shapes_and_collections/scatter_demo.html) function. Also draw a horizontal line at ratio=1 for a reference. **# Implement**Q: Explain what's bad about this plot.** **Q: Can you fix it? **# ImplementLog-binningLet's first see what happens if we do not use the log scale for a dataset with a heavy tail. **Q: Load the movie dataset from `vega_datasets` and remove the NaN rows based on the following three columns: `IMDB_Rating`, `IMDB_Votes`, `Rotten_Tomatoes_Rating`. **# ImplementIf you simply call `hist()` method with a dataframe object, it identifies all the numeric columns and draw a histogram for each. **Q: draw all possible histograms of the movie dataframe. Adjust the size of the plots if needed. **# ImplementAs we can see, a majority of the columns are not normally distributed. In particular, if you look at the worldwide gross variable, you only see a couple of meaningful data from the histogram. Is this a problem of resolution? How about increasing the number of bins?**Q: Play with the number of bins, and then increase the number of bins to 200. **# ImplementMaybe a bit more useful, but it doesn't tell anything about the data distribution above certain point. **Q: How about changing the vertical scale to logarithmic scale?**# ImplementNow, let's try log-bin. Recall that when plotting histgrams we can specify the edges of bins through the `bins` parameter. For example, we can specify the edges of bins to [1, 2, 3, ... , 10] as follows.movies.IMDB_Rating.hist(bins=range(0,11))Here, we can specify the edges of bins in a similar way. Instead of specifying on the linear scale, we do it on the log space. Some useful resources:* [Google query: python log-bin](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8q=python+log-bin&pws=0)* [numpy.logspace](http://docs.scipy.org/doc/numpy/reference/generated/numpy.logspace.html)* [numpy.linspace vs numpy.logspace](http://stackoverflow.com/questions/31480033/difference-in-output-between-numpy-linspace-and-numpy-logspace)Hint: since $10^{\text{start}} = \text{min(Worldwide_Gross)}$, $\text{start} = \log_{10}(\text{min(Worldwide_Gross)})$min(movies.Worldwide_Gross)Because there seems to be movie(s) that made $0, and because log(0) is undefined & log(1) = 0, let's add 1 to the variable.movies.Worldwide_Gross = movies.Worldwide_Gross+1.0**Q: now create logarithmic bins. Create 20 bins from the minimum value to the maximum value.**# ImplementNow we can plot a histgram with log-bin. Set both axis to be log-scale.# ImplementWhat is going on? Is this the right plot?**Q: explain and fix** **Q: Can you explain the plot? Why are there gaps?** CCDFCCDF is a nice alternative to examine distributions with heavy tails. The idea is same as CDF, but the direction of aggregation is opposite. We have done CDF before. It's just a small change to that code. **Q: Draw a CCDF in log-log scale**# implementWe can also try in semilog scale (only one axis is in a log-scale), where the horizontal axis is linear. **Q: Draw a CCDF in semilog scale**# ImplementA straight line in semilog scale means exponential decay (cf. a straight line in log-log scale means power-law decay). So it seems like the amount of money a movie makes across the world follows *roughly* an exponential distribution, while there are some outliers that make insane amount of money. **Q: Which is the most successful movie in our dataset?**You can use the following- `idxmax()`: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.idxmax.html- `loc`: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.loc.html or `iloc`: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.iloc.html Which one should you use, `loc` or `iloc`? How are they different from each other?# ImplementBANK CUSTOMERS RETIREMENT PREDICTIONS USING SUPPORT VECTOR MACHINES STEP 1: PROBLEM STATEMENT You work as a data scientist at a major bank in NYC and you have been tasked to develop a model that can predict whether a customer is able to retire or not based on his/her features. Features are his/her age and net 401K savings (retirement savings in the U.S.). STEP 2: IMPORTING DATAimport pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('Bank_Customer_retirement.csv') df.head() df.columns df.shape df.tail()STEP 3: VISUALIZING THE DATAsns.pairplot(data=df, vars=('Age', '401K Savings'),hue='Retire')Customers with higher 401K Savings and who are aged are eligible to retire. Whereas, younger crowd with less 401K savings are not eligible to retire.sns.countplot(df['Retire'])STEP 4: MODEL TRAININGX = df.drop(['Retire', 'Customer ID'], axis=1)Dropping customer ID and Retire columns to create the predictor dataframe.X y = df['Retire'] y from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 5) from sklearn.svm import SVC classifier = SVC() classifier.fit(X_train, y_train)/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning)STEP 5: EVALUATING THE MODELy_predict = classifier.predict(X_test) from sklearn.metrics import confusion_matrix, classification_report cm = confusion_matrix(y_test, y_predict) sns.heatmap(cm, annot=True, fmt='d') print(classification_report(y_test, y_predict))precision recall f1-score support 0 0.43 1.00 0.60 43 1 0.00 0.00 0.00 57 micro avg 0.43 0.43 0.43 100 macro avg 0.21 0.50 0.30 100 weighted avg 0.18 0.43 0.26 100STEP 6: IMPROVING THE MODEL Using normalisationmin_train = X_train.min() print(min_train) range_train = (X_train-min_train).max() range_train X_train_scaled = (X_train-min_train)/range_train X_train_scaled y_train min_test = X_test.min() print(min_test) range_test = (X_test-min_test).max() range_test X_test_scaled = (X_test-min_test)/range_test X_test_scaled sns.scatterplot('Age', '401K Savings', data=X_train, hue=y_train) sns.scatterplot('Age', '401K Savings', data=X_train_scaled, hue=y_train) classifier_scaled = SVC() classifier_scaled.fit(X_train_scaled, y_train) y_predict_scaled = classifier_scaled.predict(X_test_scaled) cm = confusion_matrix(y_test, y_predict_scaled) sns.heatmap(cm, annot=True, fmt='d') print(classification_report(y_test, y_predict_scaled))precision recall f1-score support 0 0.87 0.95 0.91 43 1 0.96 0.89 0.93 57 micro avg 0.92 0.92 0.92 100 macro avg 0.92 0.92 0.92 100 weighted avg 0.92 0.92 0.92 100IMPROVING THE MODEL - PART 2 Using parameter optimizationparam_grid = {'C':[0.1, 1, 10, 100], 'gamma': [1,0.1, 0.01,0.001], 'kernel':['rbf']} from sklearn.model_selection import GridSearchCV grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=4)#refit to obtain best parameter grid.fit(X_train_scaled, y_train) grid.best_params_ grid_predict = grid.predict(X_test_scaled) cm_grid = confusion_matrix(y_test, grid_predict) sns.heatmap(cm_grid, annot=True, fmt='d') print(classification_report(y_test, grid_predict))precision recall f1-score support 0 0.88 0.98 0.92 43 1 0.98 0.89 0.94 57 micro avg 0.93 0.93 0.93 100 macro avg 0.93 0.94 0.93 100 weighted avg 0.94 0.93 0.93 100IntroductionYou've built a model to identify clothing types in the **MNIST for Fashion** dataset. Now you will make your model bigger, specify larger stride lengths and apply dropout. These changes will make your model faster and more accurate.This is a last step in the **[Deep Learning Track](https://www.kaggle.com/learn/deep-learning)**. Data Preparation**Run this cell of code.**import numpy as np from sklearn.model_selection import train_test_split from tensorflow import keras # Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.deep_learning.exercise_8 import * print("Setup Complete") img_rows, img_cols = 28, 28 num_classes = 10 def prep_data(raw): y = raw[:, 0] out_y = keras.utils.to_categorical(y, num_classes) x = raw[:,1:] num_images = raw.shape[0] out_x = x.reshape(num_images, img_rows, img_cols, 1) out_x = out_x / 255 return out_x, out_y fashion_file = "../input/fashionmnist/fashion-mnist_train.csv" fashion_data = np.loadtxt(fashion_file, skiprows=1, delimiter=',') x, y = prep_data(fashion_data)Setup Complete1) Increasing Stride Size in A LayerBelow is a model without strides (or more accurately, with a stride length of 1)Run it. Notice it's accuracy and how long it takes per epoch. Then you will change the stride length in one of the layers.from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout batch_size = 16 fashion_model = Sequential() fashion_model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(img_rows, img_cols, 1))) fashion_model.add(Conv2D(16, (3, 3), activation='relu')) fashion_model.add(Flatten()) fashion_model.add(Dense(128, activation='relu')) fashion_model.add(Dense(num_classes, activation='softmax')) fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) fashion_model.fit(x, y, batch_size=batch_size, epochs=3, validation_split = 0.2) fashion_model.summary()Train on 48000 samples, validate on 12000 samples Epoch 1/3 48000/48000 [==============================] - 16s 328us/sample - loss: 0.3974 - accuracy: 0.8564 - val_loss: 0.3083 - val_accuracy: 0.8879 Epoch 2/3 48000/48000 [==============================] - 11s 236us/sample - loss: 0.2519 - accuracy: 0.9064 - val_loss: 0.2593 - val_accuracy: 0.9080 Epoch 3/3 48000/48000 [==============================] - 12s 248us/sample - loss: 0.1879 - accuracy: 0.9294 - val_loss: 0.2851 - val_accuracy: 0.9028 Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 26, 26, 16) 160 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 16) 2320 _________________________________________________________________ flatten (Flatten) [...]You have the same code in the cell below, but the model is now called `fashion_model_1`. Change the specification of `fashion_model_1` so the second convolutional layer has a stride length of 2.Run the cell after you have done that. How does the speed and accuracy change compared to the first model you ran above?fashion_model_1 = Sequential() fashion_model_1.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(img_rows, img_cols, 1))) fashion_model_1.add(Conv2D(16, (3, 3), activation='relu', strides=2)) fashion_model_1.add(Flatten()) fashion_model_1.add(Dense(128, activation='relu')) fashion_model_1.add(Dense(num_classes, activation='softmax')) fashion_model_1.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) fashion_model_1.fit(x, y, batch_size=batch_size, epochs=3, validation_split = 0.2) q_1.check()Train on 48000 samples, validate on 12000 samples Epoch 1/3 48000/48000 [==============================] - 12s 248us/sample - loss: 0.4291 - accuracy: 0.8436 - val_loss: 0.3379 - val_accuracy: 0.8770 Epoch 2/3 48000/48000 [==============================] - 11s 234us/sample - loss: 0.2956 - accuracy: 0.8910 - val_loss: 0.2850 - val_accuracy: 0.8983 Epoch 3/3 48000/48000 [==============================] - 11s 233us/sample - loss: 0.2403 - accuracy: 0.9097 - val_loss: 0.2720 - val_accuracy: 0.9048For the solution, uncomment and run the cell below:#q_1.solution()You should notice that your model training ran about twice as fast, but the accuracy change was trivial. In addition to being faster to train, this model is also faster at making predictions. This is very important in many scenarios. In practice, you'll need to decide whether that type of speed is important in the applications where you eventually apply deep learning models.You could experiment with more layers or more convolutions in each layer. With some fine-tuning, you can build a model that is both faster and more accurate than the original model.model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 1))) model.add(Dropout(0.25)) model.add(Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(48, kernel_size=(3, 3), strides=1, activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) model.fit(x, y, batch_size=128, epochs=7, validation_split = 0.2) model.summary()Train on 48000 samples, validate on 12000 samples Epoch 1/7 48000/48000 [==============================] - 6s 128us/sample - loss: 0.5964 - accuracy: 0.7825 - val_loss: 0.4089 - val_accuracy: 0.8514 Epoch 2/7 48000/48000 [==============================] - 5s 105us/sample - loss: 0.3606 - accuracy: 0.8665 - val_loss: 0.3090 - val_accuracy: 0.8894 Epoch 3/7 48000/48000 [==============================] - 5s 106us/sample - loss: 0.2984 - accuracy: 0.8896 - val_loss: 0.2761 - val_accuracy: 0.9020 Epoch 4/7 48000/48000 [==============================] - 5s 108us/sample - loss: 0.2598 - accuracy: 0.9030 - val_loss: 0.2623 - val_accuracy: 0.9053 Epoch 5/7 48000/48000 [==============================] - 5s 114us/sample - loss: 0.2302 - accuracy: 0.9149 - val_loss: 0.2419 - val_accuracy: 0.9137 Epoch 6/7 48000/48000 [==============================] - 5s 108us/sample - loss: 0.2055 - accuracy: 0.9221 - val_loss: 0.2498 - val_accuracy: 0.9115 Epoch 7/7 48000/48000 [==============================] -[...]13.1. Simulating a discrete-time Markov chainimport numpy as np import matplotlib.pyplot as plt %matplotlib inline N = 100 # maximum population size a = .5 / N # birth rate b = .5 / N # death rate nsteps = 1000 x = np.zeros(nsteps) x[0] = 25 np.random.rand() for t in range(nsteps - 1): if 0 < x[t] < N - 1: # Is there a birth? isbirth = np.random.rand() <= a * x[t] # Is there a death? isdeath = np.random.rand() <= b * x[t] # We update the population size. x[t + 1] = x[t] + 1 * birth - 1 * death # The evolution stops if we reach $0$ or $N$. else: x[t + 1] = x[t] fig, ax = plt.subplots(1, 1, figsize=(8, 4)) ax.plot(x, lw=2) ntrials = 100 x = np.random.randint(size=ntrials, low=0, high=N) def simulate(x, nsteps): """Run the simulation.""" for _ in range(nsteps - 1): # Which trials to update? upd = (0 < x) & (x < N - 1) # In which trials do births occur? birth = 1 * (np.random.rand(ntrials) <= a * x) # In which trials do deaths occur? death = 1 * (np.random.rand(ntrials) <= b * x) # We update the population size for all trials x[upd] += birth[upd] - death[upd] bins = np.linspace(0, N, 25) nsteps_list = [10, 1000, 10000] fig, axes = plt.subplots(1, len(nsteps_list), figsize=(12, 3), sharey=True) for i, nsteps in enumerate(nsteps_list): ax = axes[i] simulate(x, nsteps) ax.hist(x, bins=bins) ax.set_xlabel("Population size") if i == 0: ax.set_ylabel("Histogram") ax.set_title(f"{nsteps} time steps")1. Import packagesimport keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Conv2D, Flatten import numpy as np import matplotlib.pyplot as plt2. Load data(x_train, y_train), (x_notrain, y_notrain) = mnist.load_data()Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz 11493376/11490434 [==============================] - 2s 0us/stepsplit the non-training data into two parts: validation set and test setx_validation = x_notrain[:5000,:] y_validation = y_notrain[:5000,] x_test = x_notrain[5000:,:] y_test = y_notrain[5000:,]3. Visualize datarandnum = np.random.randint(0, 60000, 4) for icount in np.arange(4): plt.subplot(1,4,icount+1) img_current = x_train[randnum[icount]] plt.imshow(img_current)4. Prepare data in the right formatx_train = x_train.reshape(x_train.shape[0],28,28,1).astype('float32')/225 x_validation = x_validation.reshape(x_validation.shape[0], 28,28,1).astype('float32')/225 x_test = x_test.reshape(x_test.shape[0],28,28,1).astype('float32')/225 y_train = keras.utils.to_categorical(y_train,10) y_validation = keras.utils.to_categorical(y_validation,10) y_test = keras.utils.to_categorical(y_test,10)5. Build up your convolutional neural networksmodel = Sequential() model.add(Conv2D(32, kernel_size=(3,3), strides=1, padding='same', input_shape=(28,28,1), activation="relu")) model.add(Conv2D(32,(3,3),padding='same',activation='relu')) model.add(Flatten()) model.add(Dense(10,activation='softmax')) model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])6. Train your neural networksmodel.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1, validation_data=(x_validation,y_validation)) score = model.evaluate(x_test,y_test,verbose=1) score7. Visualize your resultsrandnum = np.random.randint(0,5000,4) x = x_test[randnum] plt.figure(figsize=(10,30)) for i in np.arange(4): plt.subplot(1,4,i+1) y_predict = np.argmax(model.predict(x)[i]) y_true = np.argmax(y_test[randnum[i]]) img = np.reshape(x_test[randnum[i]], (28,28)) plt.imshow(img) plt.title('Predict: %i True %i' %(y_predict, y_true )) plt.figure(figsize=(10,2)) for i in range(4): bar = model.predict(x)[i] classes = [0,1,2,3,4,5,6,7,8,9] plt.subplot(1,4,i+1) plt.bar(classes, bar) plt.xlabel("category") plt.ylabel("probability") plt.title("prediction confidence")8. Visualize the features learned by convolutional layers (optional)import keras.backend as K inp = model.input outputs = [] for layers in model.layers: outputs.append(layers.output) functors = [] for out in outputs: functors.append(K.function([inp, K.learning_phase()], [out])) #test = np.random.random((1,28,28,1)) test = x[0].reshape(1,28,28,1) plt.imshow(test.reshape(28,28)) plt.show() layer_outs = [] for func in functors: layer_outs.append([func([test,0])]) # features detected in the second convolutional layer for each filter images = layer_outs[1][0][0][0] #change the first index to '0' if want to see the first convolutional layer plt.figure(figsize=(15,10)) for i in range(32): #change this if filter size is not 32 plt.subplot(4,8,i+1) plt.imshow(images[:,:,i], cmap="gray") plt.tight_layout()คำตอบของการสองกลางภาค รายวิชา 517432 เจ้าของนายภัคพล พงษ์ทวี (07580028) ข้อ 2.2#Exam2.2 from nltk.book import * from nltk.corpus import words english_vocab = set(words.words()) text1_vocab = set([ w for w in text1.vocab() if w.isalpha() and w.istitle() and w[0] == 'I']) result = set([w.lower() for w in text1_vocab]) - english_vocab print(len(result),"\n",result)31 {'isabella', 'indians', 'indiamen', 'instances', 'ishmael', 'ifs', 'italy', 'isolatoes', 'iroquois', 'icelandic', 'imprimis', 'isaiah', 'islanders', 'israel', 'ingin', 'islands', 'illinois', 'indiaman', 'inlanders', 'inserting', 'ireland', 'india', 'ixion', 'isles', 'israelites', 'indies', 'innocents', 'icebergs', 'irish', 'indian', 'italian'}ข้อ 3#exam 3 from nltk.corpus import brown print('Categories\t\tNumber of token\t\tNumber of vocabulary\t\tLexical Density') for category in ['adventure','fiction','news','reviews','religion','romance']: token = [w.lower() for w in brown.words(categories = category)] token_count = len(token) vocab_count = len(set(token)) print(category,'\t\t',token_count,'\t\t',vocab_count,'\t\t',token_count/vocab_count)Categories Number of token Number of vocabulary Lexical Density adventure 69342 8289 8.365544697792254 fiction 68488 8680 7.890322580645162 news 100554 13112 7.668852959121415 reviews 40704 8069 5.044491262857851 religion 39399 5931 6.642893272635306 romance 70022 7883 8.882658886210834ข้อ 4#exam 4 from nltk.corpus import words,stopwords,webtext def unknown(text): english_vocab = set([w.lower() for w in words.words()]) stop_vocab = set([w.lower() for w in stopwords.words('english')]) lower_text = set([w.lower() for w in text if w.isalpha()]) return list(lower_text - english_vocab - stop_vocab) print(len(unknown(webtext.words())))7521ข้อ 5#exam 5 from nltk import FreqDist freq = FreqDist([len(w) for w in unknown(webtext.words())]) print(freq.max()) # คำที่ความยากมากสุด print(freq.hapaxes()) #คำที่มีความยามน้อยสุด7 [27, 25, 28]ข้อ 8#Exam8 sent = 'The quick brown fox jumps over the lazy dog'.split() result = [w.lower() for w in sent if len(w) == 3] print(result)['the', 'fox', 'the', 'dog']ข้อ 9#Exam9 from nltk import FreqDist,bigrams from nltk.corpus import inaugural def my_bigram(word,text): word_pair = list(bigrams([w.lower() for w in text if w.isalpha()])) interest_pair = [w for w in word_pair if w[1] == word] freq = FreqDist(interest_pair) most_pair = freq.most_common(1)[0] return most_pair[0][0]+" "+word+" "+str(most_pair[1]) print(my_bigram('citizens',inaugural.words())) print(my_bigram('people',inaugural.words())) print(my_bigram('government',inaugural.words())) #Exam9 (ต่อ) ให้แสดงผล list ที่พบหน้า สัก 5 อันดับแรก from nltk import FreqDist,bigrams from nltk.corpus import inaugural def my_bigram_list(word,text,size = 5): word_pair = list(bigrams([w.lower() for w in text if w.isalpha()])) interest_pair = [w for w in word_pair if w[1] == word] freq = FreqDist(interest_pair) output = [] for pair in freq.most_common(size): output.append(pair[0][0]+" "+pair[0][1]) return output print(my_bigram_list('citizens',inaugural.words())) print(my_bigram_list('people',inaugural.words())) print(my_bigram_list('government',inaugural.words()))['fellow citizens', 'our citizens', 'its citizens', 'the citizens', 'of citizens'] ['the people', 'our people', 'american people', 'a people', 'free people'] ['the government', 'of government', 'our government', 'a government', 'federal government']ข้อ 10#Exam 10.1 from nltk.corpus import wordnet print(wordnet.synset('cetacean.n.01').definition()) #Exam 10.2 from nltk.corpus import wordnet print(wordnet.synset('cetacean.n.01').lemma_names()) #Exam 10.3 mammal = wordnet.synset('mammal.n.01') homo = wordnet.synset('homo.n.01') dolphin = wordnet.synset('dolphin.n.01') print(wordnet.path_similarity(mammal,homo)) print(wordnet.path_similarity(mammal,dolphin)) print(wordnet.path_similarity(homo,dolphin)) #Exam 10.4 cetacean = wordnet.synset('cetacean.n.01') mammal = wordnet.synset('mammal.n.01') homo = wordnet.synset('homo.n.01') dolphin = wordnet.synset('dolphin.n.01') print(cetacean.hypernym_paths(),'\n\n') print(mammal.hypernym_paths(),'\n\n') print(homo.hypernym_paths(),'\n\n') print(dolphin.hypernym_paths(),'\n\n')[[Synset('entity.n.01'), Synset('physical_entity.n.01'), Synset('object.n.01'), Synset('whole.n.02'), Synset('living_thing.n.01'), Synset('organism.n.01'), Synset('animal.n.01'), Synset('chordate.n.01'), Synset('vertebrate.n.01'), Synset('mammal.n.01'), Synset('placental.n.01'), Synset('aquatic_mammal.n.01'), Synset('cetacean.n.01')]] [[Synset('entity.n.01'), Synset('physical_entity.n.01'), Synset('object.n.01'), Synset('whole.n.02'), Synset('living_thing.n.01'), Synset('organism.n.01'), Synset('animal.n.01'), Synset('chordate.n.01'), Synset('vertebrate.n.01'), Synset('mammal.n.01')]] [[Synset('entity.n.01'), Synset('physical_entity.n.01'), Synset('causal_agent.n.01'), Synset('person.n.01'), Synset('homosexual.n.01')], [Synset('entity.n.01'), Synset('physical_entity.n.01'), Synset('object.n.01'), Synset('whole.n.02'), Synset('living_thing.n.01'), Synset('organism.n.01'), Synset('person.n.01'), Synset('homosexual.n.01')]] [[Synset('entity.n.01'), Synset('physical_entity.n.01'),[...]ข้อ 11#exam11 from nltk.corpus import cmudict def syllable(word): return len([1 for p in cmudict.dict()[word.lower()][0] if p[len(p)-1].isnumeric()]) print('Hello: ', syllable('hello'))Hello: 2ข้อ 12#exam12 from nltk import FreqDist from nltk.corpus import inaugural from nltk.corpus import cmudict from collections import Counter # ไม่ใช้ฟังก์ชันจากข้อ 11 เพราะต้อง optimize ความเร็ว wordlist = [w.lower() for w in inaugural.words() if w.isalpha()] vocab = set(wordlist) my_syllable_dict = {} search_dict = cmudict.dict() for word in vocab: if word in search_dict: my_syllable_dict[word] = len([1 for p in search_dict[word][0] if p[len(p)-1].isnumeric()]) count_list = [] for word in wordlist: if word in my_syllable_dict: count_list.append(my_syllable_dict[word]) #คำที่พยางยาวที่สุด print(max(count_list)) #คำที่ถี่ที่สุด count_list_counter = Counter(count_list) print(count_list_counter.most_common(1)[0]) #ค่าเฉลี่ยทั้งหมด print(sum(count_list) / float(len(count_list)))7 (1, 83834) 1.5898596477882165import torch from torch import nn from torch import optim from torch.utils.data import TensorDataset,DataLoader from torch.autograd import Variable from sklearn.preprocessing import StandardScaler,MinMaxScaler from torch.nn import functional as F import matplotlib.pyplot as plt import pandas as pd import numpy as np import random from tqdm import tqdm import warnings import joblib warnings.simplefilter('ignore')LOAD DATAdf_url = '/content/drive/MyDrive/台塑輕油案子/data/c620/明志_遷移式學習_訓練資料_寄送版/c620_data.xlsx' c_url = '/content/drive/MyDrive/台塑輕油案子/data/c620/col_names/c620_col_names.pkl' c = joblib.load(c_url) df = pd.read_excel(df_url,index_col=0) df.head() x_col = c['x41'] op_col = c['density']+c['yRefluxRate']+c['yHeatDuty']+c['yControl'] op_col = df[op_col].dropna(axis=1).columns.tolist() print(len(x_col),len(op_col)) dataset = TensorDataset(torch.FloatTensor(df[x_col+op_col].values)) data_iter = DataLoader(dataset,batch_size=64,drop_last=True) next(iter(data_iter))[0].shapemodel and optimizer wgan* 1.判别器最后一层去掉sigmoid* 2.生成器和判别器的loss不取log* 3.每次更新判别器的参数之后把它们的绝对值截断到不超过一个固定常数c* 4.不要用基于动量的优化算法(包括momentum和Adam),推荐RMSProp,SGD也行latent_dim = 128 class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.f1 = nn.Linear(latent_dim,latent_dim) self.f2 = nn.Linear(latent_dim,latent_dim) self.wt_head = nn.Linear(latent_dim,len(x_col)) self.op_head = nn.Linear(latent_dim,len(op_col)) def forward(self, z): z = F.relu(self.f1(z)) z = F.relu(self.f2(z)) wt = F.sigmoid(self.wt_head(z)) wt = wt / wt.sum(dim=1).reshape(-1,1) wt = wt * 100 op = self.op_head(z) return torch.cat((wt,op),dim=1) generator = Generator() discriminator = nn.Sequential(nn.Linear(len(x_col)+len(op_col),1)) optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=0.00005) optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=0.00005) def train(generator,discriminator,optimizer_D,optimizer_G,data_iter,num_epoch=30000,n_critic=5,clip_value=0.01,latent_dim=128): batches_done = 0 history = {'loss_G':[],'loss_D':[]} for epoch in range(num_epoch): for i, datas in enumerate(data_iter): datas = datas[0] real_datas = Variable(datas.type(torch.FloatTensor)) # --------------------- # Train Discriminator # --------------------- optimizer_D.zero_grad() # Sample noise as generator input z = Variable(torch.FloatTensor(np.random.normal(0,1,(real_datas.shape[0],latent_dim)))) # Generate a batch of datas fake_datas = generator(z).detach() # Adversarial loss assert real_datas.shape == fake_datas.shape loss_D = -torch.mean(discriminator(real_datas)) + torch.mean(discriminator(fake_datas)) loss_D.backward() optimizer_D.step() # Clip weights of discriminator for p in discriminator.parameters(): p.data.clamp_(-clip_value,clip_value) # Train the generator every n_critic iterations if i % n_critic == 0: # ----------------- # Train Generator # ----------------- optimizer_G.zero_grad() # Generate a batch of datas gen_datas = generator(z) # Adversarial loss loss_G = -torch.mean(discriminator(gen_datas)) loss_G.backward() optimizer_G.step() history['loss_G'].append(loss_G.item()) history['loss_D'].append(loss_D.item()) if epoch % 100 == 0: print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"% (epoch,num_epoch,batches_done%len(data_iter),len(data_iter),loss_D.item(),loss_G.item())) batches_done += 1 return generator.eval(),discriminator.eval(),history generator,discriminator,history = train(generator,discriminator,optimizer_D,optimizer_G,data_iter) plt.plot(history['loss_G'],label='loss_G') plt.plot(history['loss_D'],label='loss_D') plt.legend() plt.show() z = Variable(torch.FloatTensor(np.random.normal(0,1,(9999,latent_dim)))) fake_data = generator(z) fake_data.detach().numpy()[:,:41].sum(axis=1) fake_data = pd.DataFrame(fake_data.detach().numpy(),columns=x_col+op_col) fake_data fake_data.to_csv('/content/drive/MyDrive/台塑輕油案子/data/c620/fake_data/c620_fake_data.csv')Implementation of model.earth page - Part 1Using the [write up](https://model.earth/localsite/info/data/) by Catherine from EPA as a guide.With modifications from Loren: `add display of counties for Motor Vehicle Manufacturing as both 4-digit 3361 and 6-digit 336111. Let’s look at Georgia, Alabama, California and Nevada.`# if running in colab uncomment next line #!pip install git+https://github.com/modelearth/flowsa -q import flowsa import pandas as pdgetFlowByActivityThis is the API function we'll be using. They have docstrings to look at.help(flowsa.getFlowByActivity)Help on function getFlowByActivity in module flowsa: getFlowByActivity(flowclass, years, datasource, geographic_level='all', file_location='local') Retrieves stored data in the FlowByActivity format :param flowclass: list, a list of`Class' of the flow. required. E.g. ['Water'] or ['Land', 'Other'] :param year: list, a list of years [2015], or [2010,2011,2012] :param datasource: str, the code of the datasource. :param geographic_level: 'all', 'national', 'state', 'county'. Default is 'all' :param file_location: 'local' or 'remote'. Default is 'local' :return: a pandas DataFrame in FlowByActivity formatUse API - returns dataframedf = flowsa.getFlowByActivity(flowclass=['Employment', 'Money', 'Other'], years=[2018], datasource="BLS_QCEW")2021-02-24 07:20:39 INFO BLS_QCEW_2018.parquet not found in local folder; loading from remote server...Some standard dataframe analysis. 1. Size 2. dtypes 3. isnull values 4. ... I'll go through it my way for the first df. You can apply your favorite techniques (i.e. plots, ...)df.shape #info function in colab doesn't support this parameter #df.info(show_counts=True) df.info() df.isnull().sum()FIPS codesThis df was generated with the default geographic_level (all) meaning it returns both state and county FIPS values. Let's check that out for GA (FIPS = 13000).df[df.Location.str.startswith('13')]['Location'].unique() len(_)There are 159 counties in GA according to wikipedia. Since the count is including the state fips and there looks to be a weird one, 13999, I think it's good.#df = flowsa.getFlowByActivity(flowclass=['Employment', 'Money', 'Other'], years=[2015, 2016], datasource="BLS_QCEW") from flowsa.common import fbaoutputpath fc=['Employment', 'Money', 'Other'] years=[2018] ds="BLS_QCEW"County level dataDigressing a bit from the write up on model.earth at this point.# using the county geographic_level county_df = flowsa.getFlowByActivity(flowclass=fc, years=years, datasource=ds, geographic_level='county') county_df.shapeBuild dataframe with state specific rows.# Create new column with first two digits of Location representing state county_df = county_df.assign(StateFIPS=county_df['Location'].apply(lambda x: x[0:2])) # create subset with the states, AL, GA, CA, NV state_subset_df = county_df[county_df['StateFIPS'].isin(['01', '06', '13', '32'])].reset_index(drop=True) state_subset_df.shape state_subset_df['StateFIPS'].value_counts()So this makes sense. Further filter for NAICS of interestBased on discussion with Loren I am simplifying to auto manufacturing type codes.len(state_subset_df.ActivityProducedBy.unique()) state_subset_df_auto = state_subset_df[state_subset_df['ActivityProducedBy'].isin(['3361', '336111'])].reset_index(drop=True) len(state_subset_df_auto)We now have a very managable data set to understand how to roll things around. It contains information on two NAICS codes and 4 states. Transform df to generate required csvI am going to simplify and just look at GA for starters and a subset of the columns.georgia_df = state_subset_df_auto.query(f"StateFIPS == '13'").reset_index(drop=True)[['Location', 'ActivityProducedBy', 'Class', 'FlowAmount']] georgia_df = georgia_df.rename(columns={'Location': 'fips', 'ActivityProducedBy': 'naics'})Final prep on the df before we group and generate the df that can be used for the csv file.new_column_names = {'Employment': 'employees', 'Money': 'wages', 'Other': 'firms'} georgia_df['Class'] = georgia_df['Class'].apply(lambda r: new_column_names[r])Let's take a peek at the dataframe now. Finally, this code snippet groups the data, iterates, and builds the final df. Someone can show me if there's a `simple` pandas method to do this?rows = [] cols = ['fips', 'naics', 'employees', 'firms', 'wages'] grouped_df = georgia_df.groupby(['fips', 'naics', 'Class'])['FlowAmount'].sum().unstack() for index, row in grouped_df.iterrows(): rows.append([index[0], index[1], row.employees, row.firms, row.wages]) pd.DataFrame(data=rows, columns=cols)Whole lot of 0.0??georgia_df.query(f"Class == 'wages'")Exercise 03.02 | Internet users around the worldYour company wants to invest in countries with potential internet user growth.That is we want to find countries with the lowest ratio of internet users.Your task is to find the following information on the internet:* A table showing the number of internet users within a country.* A table showing the population per country.Once you found a webpage, load the appropriate table into a DataFrame.---- DiscussionDifferent websites will contain this information, but let's go with CIAfactbook, as their tables are quite parseable (besides, the information will beavailable in other formats as well).* Internet users: [https://www.cia.gov/library/publications/the-world-factbook/fields/204rank.html](https://www.cia.gov/library/publications/the-world-factbook/fields/204rank.html)* Population: [https://www.cia.gov/library/publications/the-world-factbook/fields/335rank.html](https://www.cia.gov/library/publications/the-world-factbook/fields/335rank.html) Import Pandas and load the data info dataframe lists.import pandas as pd f204s = pd.read_html("https://www.cia.gov/library/publications/the-world-factbook/fields/204rank.html") f335s = pd.read_html("https://www.cia.gov/library/publications/the-world-factbook/fields/335rank.html")Confirm, that there is only one table on the page (if there are more, you need to find out, which one contains the relevant information); then assign new variables for the dataframes.assert len(f204s) == 1 assert len(f335s) == 1 users = f204s[0] population = f335s[0] users.head() population.head()Excellent, you found an important relevant information quickly! Optional task: Determine the countries with the lowest ratio of internet users. For that, we need to merge the three important columns: "Country", "Internet users" and "Population".mdf = population[["Country", "Population"]].merge(users[["Country", "Internet users"]]) mdf.head()Calculate the ratio and put it into a new column, the sort by the ratio.mdf["Ratio"] = mdf["Internet users"] / mdf["Population"] mdf.sort_values(by="Ratio").head(10)Saperating input and output variabledfinput = df1.loc[:,['Rooms','Type','Method','Distance','Bedroom2','Bathroom','Car','Landsize','BuildingArea']] dfinput.head() dfoutput = df1.loc[:,['Price']] dfoutput.head()Spliting data into 80:20split = int(.8*len(df1)) print(split) df1.shape 14246*.8 14246*.2 rename?? %whos dfinput.isnull().sum() (dfinput['Landsize'] == 0).sum() ## there are 2026 '0' values in landsize dfinput['Landsize'].shape import statsmodels.api as sm import statsmodels.formula.api as smf # ordilary least Square ols = smf.ols('Price~Rooms+Bedroom2+Bathroom',data = df1).fit() print(ols) print(ols.summary()) ols = smf.ols('Price~Rooms+Bedroom2+Bathroom+Landsize+BuildingArea',data = df1).fit() print(ols) print(ols.summary()) df2 = df1.corr() print(df2) x=df2.loc['Price'] print(x) x = df2[df2.loc['Price']>.5] print(x) df.axes df.head() df1['Postcode'] = df['Postcode'] df1.head() df1.isnull().sum() df1.shape df1['Postcode'].plot(kind = 'density') df1['Postcode'].median() df1['Postcode'].mean() df1['Postcode'].iloc[df1['Postcode'].isnull()] = 3085.0 df1.isnull().sum() df1.shape corr_df1 = df1.corr() ols1 = smf.ols('Price~Rooms+Postcode+Bedroom2+Bathroom',data = df1).fit() print(ols1.summary()) ols2=smf.ols('Price~Rooms+Bathroom+Bedroom2+Car',data = df1).fit() print(ols2.summary()) ols3=smf.ols('Price~Rooms+Bathroom+Bedroom2+Car+Postcode',data = df1).fit() print(ols3.summary()) df1['Postcode'].cov(df1['Price']) df1.axes ols4 = smf.ols('Price~Rooms+Distance+Bathroom+Bedroom2+Car',data = df1).fit() print(ols4.summary()) ols5 = smf.ols('Price~Rooms+Distance+Bathroom+Bedroom2+Car+Postcode',data = df1).fit() print(ols5.summary()) ols6 = smf.ols('Price~Rooms+Distance+Bathroom+Bedroom2+Car+Postcode+Landsize',data = df1).fit() print(ols6.summary()) ols7 = smf.ols('Price~Rooms+Distance+Bathroom+BuildingArea+Car+Postcode+Landsize',data = df1).fit() print(ols7.summary()) df1.axes ols8 = smf.ols('Price~Rooms+Distance+Bathroom+BuildingArea+Car+Postcode+Landsize+Method',data = df1).fit() print(ols8.summary()) ols9 = smf.ols('Price~Rooms+Distance+Bathroom+BuildingArea+Car+Postcode+Landsize+Type',data = df1).fit() print(ols9.summary()) corr_df1['Price'] import statsmodels.graphics.api as smg corr_matrix = np.corrcoef(df1) smg.plot_corr(corr_matrix, xnames=df1.columns) plt.show()First method to load an image:filename = 'image_data/'Creando modelos en kerasVamos ver cómo sería un workflow de keras:* Crear el modelo* Crear y añadir las capas* Compilar el modelo* Entrenar el modelo* Usar el modelo para predecir o evaluar EL modelo secuencial:``` model = tf.keras.Sequential() model.add(tf.keras.layers.Activation('tanh'))```import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(10, input_shape=(256,))) model.add(tf.keras.layers.Activation('tanh')) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) model.summary()Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_18 (Dense) (None, 10) 2570 _________________________________________________________________ activation_6 (Activation) (None, 10) 0 _________________________________________________________________ dense_19 (Dense) (None, 10) 110 _________________________________________________________________ activation_7 (Activation) (None, 10) 0 ================================================================= Total params: 2,680 Trainable params: 2,680 Non-trainable params: 0 _________________________________________________________________Modelo funcional``` miModelo = Model(inputs=tensor1, outputs=tensor2)```import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D ins = tf.keras.layers.Input(shape=(784,)) x = tf.keras.layers.Dense(64, activation='relu')(ins) x = tf.keras.layers.Dense(64, activation='relu')(x) predictions = tf.keras.layers.Dense(10, activation='softmax')(x) model = tf.keras.Model(inputs=ins, outputs=predictions) model.summary()Model: "model_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_5 (InputLayer) [(None, 784)] 0 _________________________________________________________________ dense_20 (Dense) (None, 64) 50240 _________________________________________________________________ dense_21 (Dense) (None, 64) 4160 _________________________________________________________________ dense_22 (Dense) (None, 10) 650 ================================================================= Total params: 55,050 Trainable params: 55,050 Non-trainable params: 0 _________________________________________________________________Εργασία 1 B. Αναγνώριση τύπων γυαλιού με βάση τη χημική τους σύσταση 1. Προετοιμασία των δεδομένων Σκοπός της εργασίας είναι η αξιολόγηση μοντέλων τα οποία κατηγοριοποιούν μετρήσεις διαφόρων χημικών στοιχείων σε 6 τύπους γυαλιού. Τα δεδομένα αυτά είχαν συγκεντρωθεί με σκοπό να μπορεί να αναγνωριστεί ο τύπος του γυαλιού όταν αυτό συλλέγεται από τόπους εγκλήματος έτσι ωστέ να μπορεί να χρησιμοποιηθεί ως αποδεικτικό στοιχείο. Παρακάτω βλέπουμε τις εκδόσεις της python και των βιβλιοθηκών που χρησιμοποιήθηκαν στην εργασία.import matplotlib import seaborn as sns import pandas as pd import numpy as np import sklearn import sys np.random.seed(0) print('Python version:', sys.version) print('scikit-learn version:', sklearn.__version__) print('pandas version:', pd.__version__) print('numpy version:', np.__version__) print('matplotlib version:', matplotlib.__version__) print('seaborn version:', sns.__version__)Python version: 3.7.9 (default, Aug 31 2020, 17:10:11) [MSC v.1916 64 bit (AMD64)] scikit-learn version: 0.23.2 pandas version: 1.1.3 numpy version: 1.19.2 matplotlib version: 3.3.2 seaborn version: 0.11.0Τα δεδομένα βρίσκονται στο αρχείο glass.data τα οποία διαβάζονται και αποθηκεύονται σε ένα pandas dataframe.import numpy as np import urllib.request import os np.random.seed(0) url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data' filename = 'glass.data' if not os.path.exists(filename): print('Downloading file...') urllib.request.urlretrieve(url, filename) print('Done.') names = ("Id", "RI", "Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe", "Type") df = pd.read_csv(filename, names=names)Παρακάτω βλέπουμε τα 5 πρώτα παραδείγματα.df.head()Ακολουθεί περιγραφή των διαφόρων χαρακτηριστικών.%%html |Id|Attribute|Description||:-|:-|:-||1|Id| Id number: 1 to 214||2|RI|refractive index|3|Na|Sodium (unit measurement: weight percent in corresponding oxide, as are attributes 4-10)|4|Mg|Magnesium|5|Al|Aluminum|6|Si|Silicon|7|K|Potassium|8|Ca|Calcium|9|Ba|Barium|10|Fe|Iron|11|Type|Type of glass: (class attribute) Ακολουθεί περιγραφή των κλάσεων. Η 4η κλάση δεν χρησιμοποιείται. |Class|Description||:-|:-||1|building_windows_float_processed|2|building_windows_non_float_processed|3|vehicle_windows_float_processed|4|vehicle_windows_non_float_processed (none in this database)|5|containers|6|tableware|7|headlamps Όλα τα χαρακτηριστικά είναι πραγματικοί αριθμοί εκτός από το Id και τη μεταβλητή στόχο Type οι οποίοι είναι ακέραιοι. Παρατηρούμε ότι δεν υπάρχουν τιμές που λείπουν.df.info() RangeIndex: 214 entries, 0 to 213 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Id 214 non-null int64 1 RI 214 non-null float64 2 Na 214 non-null float64 3 Mg 214 non-null float64 4 Al 214 non-null float64 5 Si 214 non-null float64 6 K 214 non-null float64 7 Ca 214 non-null float64 8 Ba 214 non-null float64 9 Fe 214 non-null float64 10 Type 214 non-null int64 dtypes: float64(9), int64(2) memory usage: 18.5 KBΒγάζουμε εκτός του dataframe το Id γιατί είναι απλώς ένας αύξων αριθμός και δεν προσφέρει κάποια χρήσιμη πληροφορία για την κατηγοριοποίησηdf.drop(labels='Id', axis=1, inplace=True)Το παρακάτω ιστόγραμμα μας δείχνει ότι οι κλάσεις των ψηφίων δεν είναι ζυγισμένες.import matplotlib.pyplot as plt df.hist(column='Type') plt.show()To dataset χωρίζεται σε training set (60%) και test set (40%) χρησιμοποιώντας την συνάρτηση StratifiedShuffleSplit η οποία μας εξασφαλίζει οτι η κατανομή στα δυο set θα είναι ίδια ως προς τη μεταβλητή στόχο.from sklearn import model_selection split = model_selection.StratifiedShuffleSplit(n_splits=1, test_size=0.4, random_state=0) for train_index, test_index in split.split(df, df['Type']): df_train = df.loc[train_index] df_test = df.loc[test_index] df_train_x = df_train.drop('Type', axis=1) df_train_y = df_train['Type'] df_test_x = df_test.drop('Type', axis=1) df_test_y = df_test['Type'] df_train_y.hist() df_test_y.hist() plt.show()Παρακάτω βλέπουμε κάποια στατιστικά δεδομένα για τις ανέξαρτητες μεταβλητές και την εξαρτημένη μεταβλητή Type.df_train.describe()Ακολουθούν ιστογράμματα των χαρακτηριστικών και το scatter matrix.df_train_x.hist(figsize=(12, 12)) plt.plot() pd.plotting.scatter_matrix(df_train_x, figsize=(12, 12)) plt.show()Το training set και το test set αποθηκεύονται σε numpy arrays.x_train = df_train_x.to_numpy() y_train = df_train_y.to_numpy() x_test = df_test_x.to_numpy() y_test = df_test_y.to_numpy() print('x_train.shape =', x_train.shape) print('y_train.shape =', y_train.shape) print('x_test.shape =', x_test.shape) print('y_test.shape =', y_test.shape)x_train.shape = (128, 9) y_train.shape = (128,) x_test.shape = (86, 9) y_test.shape = (86,)2. Μείωση διαστάσεων Εφαρμόζεται κανονικοποίηση και μείωση των διαστάσεων με την τεχνική PCA διατηρώντας το 90% της διασποράς.from sklearn import decomposition from sklearn import preprocessing scaler = preprocessing.MinMaxScaler() x_train_scaled = scaler.fit_transform(x_train) pca = decomposition.PCA(n_components=0.9, svd_solver='full', random_state=0) x_train_reduced = pca.fit_transform(x_train_scaled) print('x_train.shape', x_train.shape) print('x_train_reduced.shape =', x_train_reduced.shape)x_train.shape (128, 9) x_train_reduced.shape = (128, 5)Η διαδικάσια αυτή γίνεται μόνο για την απεικόνιση των παραδειγμάτων στο επίπεδο των δυο πρώτων ιδιοδιανυσμάτων. Αργότερα εφαρμόζεται μέσω pipeline.plt.scatter(x_train_reduced[:, 0], x_train_reduced[:, 1], c=y_train, cmap='Set1', alpha=0.6) plt.xlabel('1st eigenvector') plt.ylabel('2nd eigenvector') plt.colorbar() plt.show()3. Αξιολόγηση μοντέλων 3.1 Περιγραφή της διαδικασίας Επιλέγεται να γίνει αξιολόγηση των μοντέλων για την αναζήτηση των βέλτιστων παραμέτρων με την τεχνική της διασταυρωμένης επικύρωσης επειδή τα παραδείγματα είναι λίγα. Για την αξιολόγηση επιλέγεται η μετρική F1 επειδή οι κλάσεις δεν είναι ζυγισμένες. Η συνάρτηση GridSearchCV εφαρμόζει k-fold cross-validation με k=5 και stratified shuffle split. Μετά απο κάθε split εφαρμόζεται κανονικοποίηση με τον MinMaxScaler, PCA και εκπαίδευση του μοντέλου. Αυτά τα 3 βήματα ομαδοποιούνται σε ένα pipeline. Όταν τελειώσει η διαδικασία του grid search δημιουργείται ένα διάγραμμα για το πώς αλλάζει το μέσο F1 ($\pm$std) στο training set και στο validation set καθώς αλλάζει μια παράμετρος και ένα αντίστοιχο διάγραμμα για τον χρόνο εκπαίδευσης. Στο τέλος εκπαιδεύεται το βέλτιστο μοντέλο εκ νέου σε ολόκληρο το training set και αξιολογείται στο test set.def plot_grid_search(search, baseline_score_val, param1, param2=None, xscale='linear'): param1_key = 'model__' + param1 if param2 is None: x = [x['model__' + param1] for x in search.cv_results_['params']] mean_train_score = search.cv_results_['mean_train_score'] mean_test_score = search.cv_results_['mean_test_score'] mean_fit_time = search.cv_results_['mean_fit_time'] std_train_score = search.cv_results_['std_train_score'] std_test_score = search.cv_results_['std_test_score'] std_fit_time = search.cv_results_['std_fit_time'] else: param2_key = 'model__' + param2 x = [] mean_train_score = [] mean_test_score = [] mean_fit_time = [] std_train_score = [] std_test_score = [] std_fit_time = [] val2 = search.best_params_[param2_key] all_mean_train_score = search.cv_results_['mean_train_score'] all_mean_test_score = search.cv_results_['mean_test_score'] all_mean_fit_time = search.cv_results_['mean_fit_time'] all_std_train_score = search.cv_results_['std_train_score'] all_std_test_score = search.cv_results_['std_test_score'] all_std_fit_time = search.cv_results_['std_fit_time'] for i, params in enumerate(search.cv_results_['params']): if params[param2_key] == val2: x.append(params[param1_key]) mean_train_score.append(all_mean_train_score[i]) mean_test_score.append(all_mean_test_score[i]) mean_fit_time.append(all_mean_fit_time[i]) std_train_score.append(all_std_train_score[i]) std_test_score.append(all_std_test_score[i]) std_fit_time.append(all_std_fit_time[i]) mean_train_score = np.array(mean_train_score) mean_test_score = np.array(mean_test_score) mean_fit_time = np.array(mean_fit_time) std_train_score = np.array(std_train_score) std_test_score = np.array(std_test_score) std_fit_time = np.array(std_fit_time) baseline_score = np.full(len(x), baseline_score_val) if param2 is None: plt.title('Score') else: if isinstance(val2, float): plt.title('Score for ' + param2 + ' = ' + "{:.4f}".format(val2)) else: plt.title('Score for ' + param2 + ' = ' + str(val2)) plt.plot(x, mean_train_score, label='Train') plt.fill_between(x, mean_train_score-std_train_score, mean_train_score+std_train_score, alpha=0.2) plt.plot(x, mean_test_score, label='Validation') plt.fill_between(x, mean_test_score-std_test_score, mean_test_score+std_test_score, alpha=0.2) plt.plot(x, baseline_score, label='Baseline', linestyle='dotted') plt.xscale(xscale) plt.xlabel(param1) plt.ylabel('F1') plt.legend() plt.show() plt.title('Training time') plt.plot(x, mean_fit_time) plt.fill_between(x, mean_fit_time-std_fit_time, mean_fit_time+std_fit_time, alpha=0.2) plt.xscale('log') plt.xlabel(param1) plt.ylabel('Time (sec)') plt.show() final_results = [] def evaluate_model(classifier_str, search): y_pred = search.predict(x_test) y_train_pred = search.predict(x_train) train_f1 = metrics.f1_score(y_train, y_train_pred, average='weighted', zero_division=0) test_f1 = metrics.f1_score(y_test, y_pred, average='weighted', zero_division=0) print('Training time = {} sec'.format(search.refit_time_)) print('Accuracy =', metrics.accuracy_score(y_test, y_pred)) print('Precision =', metrics.precision_score(y_test, y_pred, average='weighted', zero_division=0)) print('Recall =', metrics.recall_score(y_test, y_pred, average='weighted', zero_division=0)) print('F1 =', test_f1) print('Training F1 =', train_f1) confusion_matrix = metrics.confusion_matrix(y_test, y_pred, labels=range(1, 8)) labels = [str(x) for x in range(1, 8)] sns.heatmap(confusion_matrix, cmap="Oranges", annot=True, xticklabels=labels, yticklabels=labels) plt.title('Confusion matrix') plt.show() correct_indices = np.where(y_test == y_pred)[0] incorrect_indices = np.where(y_test != y_pred)[0] for i in correct_indices[:1]: print('Example of correct classification (y_pred = {}, y_test = {})'.format(y_pred[i], y_test[i])) print(df_test_x.iloc[i]) print() for i in incorrect_indices[:1]: print('Example of misclassification (y_pred = {}, y_test = {})'.format(y_pred[i], y_test[i])) print(df_test_x.iloc[i]) best_params_str = '' for param, value in search.best_params_.items(): if best_params_str != '': best_params_str += ', ' if param.startswith('model__'): best_params_str += param[len('model__'):] else: best_params_str += param best_params_str += ' = ' if isinstance(value, float): best_params_str += '{:.4f}'.format(value) else: best_params_str += str(value) res = { 'Classifier': classifier_str, 'Parameters': best_params_str, 'Training F1': "{:.4f}".format(train_f1), 'Test F1': "{:.4f}".format(test_f1), 'Training Time (sec)': "{:.5f}".format(search.refit_time_) } final_results.append(res)3.2 Dummy Classifier Ο Dummy Classifier χρησιμοποίεται ως ένα απλό baseline για τη σύγκριση με τα άλλα μοντέλα. Επιλέγει πάντα την πιο συχνή ετικέτα στο training set.from sklearn import dummy from sklearn import metrics from sklearn import pipeline from time import time model = dummy.DummyClassifier(strategy='most_frequent', random_state=0) pipe = pipeline.Pipeline([('scaler', scaler), ('pca', pca), ('model', model)]) t1 = time() pipe.fit(x_train, y_train) t2 = time() y_pred = pipe.predict(x_test) y_train_pred = pipe.predict(x_train) train_f1 = metrics.f1_score(y_train, y_train_pred, average='weighted', zero_division=0) dummy_f1 = metrics.f1_score(y_test, y_pred, average='weighted', zero_division=0) print('Training time = {} sec'.format(t2 - t1)) print('Accuracy =', metrics.accuracy_score(y_test, y_pred)) print('Precision =', metrics.precision_score(y_test, y_pred, average='weighted', zero_division=0)) print('Recall =', metrics.recall_score(y_test, y_pred, average='weighted', zero_division=0)) print('F1 =', dummy_f1) print('Training F1 =', train_f1) res = { 'Classifier': 'Dummy Classifier', 'Parameters': "strategy = 'most_frequent'", 'Training F1': "{:.4f}".format(train_f1), 'Test F1': "{:.4f}".format(dummy_f1), 'Training Time (sec)': "{:.4f}".format(t2 - t1) } final_results.append(res)Training time = 0.0009975433349609375 sec Accuracy = 0.3488372093023256 Precision = 0.12168739859383451 Recall = 0.3488372093023256 F1 = 0.18043303929430635 Training F1 = 0.190014367816091963.3 Linear SVM Η βελτιστοποίηση του svm.SVC με linear kernel πραγματοποιείται ως προς την παράμετρο C. Το C είναι η παράμετρος αντιστάθμισης μεταξύ της προσπάθειας για μέγιστο περιθώριο μεταξύ των κλάσεων και για ελάχιστο αριθμό λαθών. $ Loss = \mathbf{w}^T\mathbf{w} + C\sum_{k=1}^R\varepsilon_{k}\ $ Kernel: $ K(\mathbf{x}, \mathbf{x}') = \langle\mathbf{x},\mathbf{x}'\rangle $from sklearn import svm model = svm.SVC(kernel='linear') pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__C': np.logspace(-3, 3) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'C', xscale='log') evaluate_model('Linear SVM', search)Training time = 0.015622138977050781 sec Accuracy = 0.5232558139534884 Precision = 0.44693446088794925 Recall = 0.5232558139534884 F1 = 0.4756266988825128 Training F1 = 0.58415972468391823.4 Polynomial SVM Η βελτιστοποίηση του svm.SVC με polynomial kernel πραγματοποιείται ως προς τις παραμέτρους C και degree. Το degree είναι ο βαθμός του πολυωνύμου. Το gamma έχει την default τιμή 'scale' και υπολογίζεται ως gamma = 1/(n_features*X.var()). Kernel: $ K(\mathbf{x}, \mathbf{x}') = (\gamma\langle\mathbf{x},\mathbf{x}'\rangle+r)^d $model = svm.SVC(kernel='poly') pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__C': np.logspace(-3, 3), 'model__degree': np.arange(2, 5) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'C', param2='degree', xscale='log') evaluate_model('Polynomial SVM', search)Training time = 0.015618562698364258 sec Accuracy = 0.6162790697674418 Precision = 0.5566737079006593 Recall = 0.6162790697674418 F1 = 0.5812971499208632 Training F1 = 0.7669947240259743.5 RBF SVM Η βελτιστοποίηση του svm.SVC με rbf kernel πραγματοποιείται ως προς τις παραμέτρους C και gamma. Το gamma δείχνει πόσο μακριά φτάνει η επιρροή ενός παραδείγματος. Kernel: $ K(\mathbf{x}, \mathbf{x}') = e^{-\gamma||\mathbf{x}-\mathbf{x}'||^2} $model = svm.SVC(kernel='rbf') pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__C': np.logspace(-3, 3), 'model__gamma': np.logspace(-3, 3) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'C', param2='gamma', xscale='log') evaluate_model('RBF SVM', search)Training time = 0.0 sec Accuracy = 0.6627906976744186 Precision = 0.639828411153275 Recall = 0.6627906976744186 F1 = 0.6369513565630606 Training F1 = 0.73741810923057143.6 Sigmoid SVM Η βελτιστοποίηση του svm.SVC με sigmoid kernel πραγματοποιείται ως προς τις παραμέτρους C και gamma. Kernel: $ K(\mathbf{x}, \mathbf{x}') = tanh(\gamma\langle\mathbf{x},\mathbf{x}'\rangle+r) $model = svm.SVC(kernel='sigmoid') pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__C': np.logspace(-3, 3), 'model__gamma': np.logspace(-3, 3) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'C', param2='gamma', xscale='log') evaluate_model('Sigmoid SVM', search)Training time = 0.0019843578338623047 sec Accuracy = 0.5232558139534884 Precision = 0.4462413666544326 Recall = 0.5232558139534884 F1 = 0.47586232650059745 Training F1 = 0.58430059523809523.7 Nearest Neighbors Η βελτιστοποίηση του sklearn.neighbors.KNeighborsClassifier πραγματοποιείται ως προς τις παραμέτρους n_neighbors και p. Στον πολυδιάστατο χώρο των χαρακτηριστικών οι k πλησιέστεροι γείτονες του κάθε δείγματος ψηφίζουν για να αποφασίσουν την κλάση στην οποία ανήκει. Για να βρεθούν οι πλησιέστεροι γείτονες χρησιμοποιείται η απόσταση minkowski: $d_p(\mathbf{x}, \mathbf{y}) = \sqrt[p]{\sum_{i}|x_i-y_i|^p}$from sklearn import neighbors model = neighbors.KNeighborsClassifier() pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__n_neighbors': np.arange(1, 20), 'model__p': np.arange(1, 5) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'n_neighbors', param2='p') evaluate_model('Nearest Neighbors', search)Training time = 0.0 sec Accuracy = 0.686046511627907 Precision = 0.6938243584960411 Recall = 0.686046511627907 F1 = 0.677388184262073 Training F1 = 1.03.8 Nearest Class Centroid Η βελτιστοποίηση του sklearn.neighbors.NearestCentroid πραγματοποιείται ως προς το shrink_threshold. Κάθε κλάση αντιπροσωπεύεται απο το centroid των μελών της.model = neighbors.NearestCentroid() pipe = pipeline.Pipeline([ ('scaler', scaler), ('pca', pca), ('model', model) ]) param_grid = { 'model__shrink_threshold': np.arange(0, 1.1, 0.01) } search = model_selection.GridSearchCV(pipe, param_grid, n_jobs=-1, scoring='f1_weighted', return_train_score=True) search.fit(x_train, y_train) print('Best F1 =', search.best_score_) print('Best params =', search.best_params_) plot_grid_search(search, dummy_f1, 'shrink_threshold') evaluate_model('Nearest Class Centroid', search)Training time = 0.0 sec Accuracy = 0.5465116279069767 Precision = 0.600306670074112 Recall = 0.5465116279069767 F1 = 0.477906976744186 Training F1 = 0.453337447033142664. Σύνοψη αποτελεσμάτωνfinal_results_df = pd.DataFrame(final_results) final_results_df = final_results_df.style.set_table_styles([dict(selector='th', props=[('text-align', 'left')])]) final_results_df.set_properties(**{'text-align': 'left'}).hide_index()print function* the syntax for **print** varies between verions of python> print("something") for version python 3* ```from __future__ import print_function``` is used to mitigate the useage differenceprint("Hello World !")Hello World !comments* single line comment* mutli line comment* shebang# this is a single line comment """ This is a multi line comment Where we can use Any spacing we want """ ''' this is also another multi line comment ''' print("Read The comments") ''' #!/usr/bin/env python3 The first line in this file is the "shebang" line. When you execute a file from the shell, the shell tries to run the file using the command specified on the shebang line. The ! is called the "bang". ... The shebang line specifies exactly how to run a script. ''' print("There are only two special comments we have to keep in mind:") var = """ #!/usr/bin/env python3 #-*- coding: utf-8 -*- """ print(var) print("comments can also be used as strings")There are only two special comments we have to keep in mind: #!/usr/bin/env python3 #-*- coding: utf-8 -*- comments can also be used as stringsvariable names Legal variable names:```myvar = "John"my_var = "John"_my_var = "John"myVar = "John"MYVAR = "John"myvar2 = "John"``` Illegal variable names:```2myvar = "John"my-var = "John"my var = "John"``` multiple variablesx,y,z1=12,13.4,2+5j print(x,y,z1)12 13.4 (2+5j)Built-in Data TypesIn programming, data type is an important concept.Variables can store data of different types, and different types can do different things.Python has the following data types built-in by default, in these categories:* Text Type: **str*** Numeric Types: **int, float, complex*** Sequence Types: **list, tuple, range*** Mapping Type: **dict*** Set Types: set, frozenset* Boolean Type: bool* Binary Types: bytes, bytearray, memoryview Getting the Data TypeYou can get the data type of any object by using the type() function:help(type)Help on class type in module builtins: class type(object) | type(object_or_name, bases, dict) | type(object) -> the object's type | type(name, bases, dict) -> a new type | | Methods defined here: | | __call__(self, /, *args, **kwargs) | Call self as a function. | | __delattr__(self, name, /) | Implement delattr(self, name). | | __dir__(...) | __dir__() -> list | specialized __dir__ implementation for types | | __getattribute__(self, name, /) | Return getattr(self, name). | | __init__(self, /, *args, **kwargs) | Initialize self. See help(type(self)) for accurate signature. | | __instancecheck__(...) | __instancecheck__() -> bool | check if an object is an instance | | __new__(*args, **kwargs) | Create and return a new object. See help(type) for accurate signature. | | __prepare__(...) | __prepare__() -> dict | used to create the namespace for the class statement | [...]str**str** is the default string type TASK -1Create a dictionary of three day routine from 0 pm== 12 am to 12 pm by hour along with two variables of work description where work with computers can change but work without computers can not change.* The first key should be **day*** Every day should consist of a list of works* If the work is during your office hour **it can not change later*** If the work is not in your office hour it can be changed later* If the work is comprised of two parts: say sitting down and using computer- use complex number where The format```dict[day][time]```* change monday works* access friday evening 4 pm * if it is a complex work print both the works with the help of list and tuples Note* The index of list and tuples are always int* complex_number.real or complex_number.imag always returns float* change them to intezers* use int() function to typecast **(will be discussed)*** use help() to see what int() does__workWithComouters__=['see movies','code','web search'] work_with_out_compuetrs=('sitdown','sleep','walk') my_routine= {'sat':['work1','work2',('work3',2+1j,'work4'),'work5'], 'sun':['work1','work2',('work3','work4'),'work5']} # accessing some work say -- saturday 2 pm - 3 pm print(my_routine['sat'][2]) # index can not be float print(type(my_routine['sat'][2][1].real)) # printing a complex work print(work_with_out_compuetrs[int(my_routine['sat'][2][1].real)], __workWithComouters__[int(my_routine['sat'][2][1].imag)]) help(int)('work3', (2+1j), 'work4') walk code Help on class int in module builtins: class int(object) | int(x=0) -> integer | int(x, base=10) -> integer | | Convert a number or string to an integer, or return 0 if no arguments | are given. If x is a number, return x.__int__(). For floating point | numbers, this truncates towards zero. | | If x is not a number or if base is given, then x must be a string, | bytes, or bytearray instance representing an integer literal in the | given base. The literal can be preceded by '+' or '-' and be surrounded | by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. | Base 0 means to interpret the base from the string as an integer literal. | >>> int('0b100', base=0) | 4 | | Methods defined here: | | __abs__(self, /) | abs(self) | | __add__(self, value, /) | Return self+value. | | __and__(self, value, /) | Return self&value. | | __bool__(self, /) | [...]Multiple Regression in R & Statistics based on Students' performance in Portuguese & Maths **Team D** _ce514_* ***** ***** **** The dataset we use can be found in the UCI Machine Learning Repo here:https://archive.ics.uci.edu/ml/datasets/student+performance Based on various attributes of students in 2 Portuguese schools, we will predict their academic success. Data Set InformationThis data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). In [Cortez and Silva, 2008], the two datasets were modeled under binary/five-level classification and regression tasks. **Important note**: the target attribute G3 has a strong correlation with attributes G2 and G1. This occurs because G3 is the final year grade (issued at the 3rd period), while G1 and G2 correspond to the 1st and 2nd period grades. It is more difficult to predict G3 without G2 and G1, but such prediction is much more useful (see paper source for more details). Load the data# Libraries & etc set.seed(1) library(car) library(boot) # Read tables from two comma-separated files data1 = read.table("student-por.csv",sep=";",header=TRUE) dataSet1 = data.frame(data1) data2 = read.table("student-mat.csv",sep=";",header=TRUE) dataSet2 = data.frame(data2) # Our final dataset will be the merge of the 2 datasets above dataSet = rbind(dataSet1, dataSet2)Attaching package: ‘boot’ The following object is masked from ‘package:car’: logitClean the final datasetAs said in the intro information of the notebook, the grades are distributed in 3 attributes: G1, G2 & G3# Change the names of the columns-attributes names(dataSet) <- c("school","sex","age", "address","family size","parents cohab.", "mom's education", "dad's education","mom's job", "dad's job","reason", "guardian","travel", "study", "failures","education support","family support","paid","activities", "nursery","higher", "internet","romantic","family bond","free time","social","workday alch.","weekend alch.","health", "absences","Grade 1","Grade 2","Grade 3") # Merge the 3 variables into 1 variable Grade = (dataSet$`Grade 1` + dataSet$`Grade 2` + dataSet$`Grade 3`) / 3 # Make these 3 columns ZERO and substitute them of the Grade variable dataSet$`Grade 1` = NULL dataSet$`Grade 2` = NULL dataSet$`Grade 3` = NULL dataSet = cbind(dataSet, Grade) # Substitution attach(dataSet) # Attach the attributes to global envThe following object is masked _by_ .GlobalEnv: GradeSummary of the datasetsummary(dataSet) hist(Grade)Attributes of the model These can be used as predictors or 'x' in order for us to perform a multivariable regressionnames(dataSet)ScatterplotAs predictors, we will consider the first 10 attributes.This is equal to all the attributes from `school` to `dad's job`x1 <- dataSet[c(31, 1 : 10)] pairs(x1)Fit a multiple regression model based on study, failures, extra education support, extra paid classes & the desire to take higher educationEveryone can use the predictors they want.We thought that these are some nice ones to predict a grade of a student.fit = lm(Grade ~ study + failures + `education support` + paid + higher, data = dataSet) summary(fit)Explanation of statistics Linear Regression Model:Here, our model corresponds mathematically to: $$ Grade = \beta_0 + \beta_1*study + \beta_2*failures + \beta_3*education support + \beta_4*extraPaidClasses + \beta_5* willForHigherEducation + \epsilon $$ PredictorsUnder `Coefficients`, there are the terms included in the model as predictors or else-called 'x'. EstimateCoefficient estimates for each corresponding term in the model. For example, the estimate for the constant term (intercept) is 9.2697 p - ValueThe p-Values are very important because, We can consider a linear model to be statistically significant only when both these p-Values are less that the pre-determined statistical significance level, which is ideally 0.05.We can confirm this by looking the **last column** in the **coefficients** section at `Pr(>|t|)`.All of these predictors are good for our model.Also, this is visually interpreted by the significance stars at the end of the row. The more the starsbeside the variable’s p-Value, the more significant the variable.Last but not least, when the p-Value is less than significance level (< 0.05), we can safely reject the null hypothesis that the co-efficient β of the predictor is zero. In our case, both these p Values are well below the 0.05 threshold, so we can conclude our model is indeed statistically significant. t- StatisticIt's the column under the name of `t value` .Each attribute has each own t-Statistic value. This equals to $ t-Statistic = \frac{\beta - coefficient}{Std.Error} $For example, failures on our model have a `t-value` of `-11.566.`When you run a hypothesis test, you’ll use a distribution like a t-distribution or normal distribution. These have a known area, and enable to you to calculate a probability value (p-value) that will tell you if your results are due to chance, or if your results are die to your theory being correct. The larger the test statistic, the smaller the p-value and the more likely you are to reject the null hypothesis. $ R^2 $: Coefficient of Determination (R-Squared)The coefficient of determination of a linear regression model is the quotient of the variances of the fitted values and observed values of the dependent variable. If we denote $y_i$ as the observed values of the dependent variable, $ \bar y $ as its mean, and $\hat y_i$ as the fitted value, then the coefficient of determination is:![Mathematical formula of R-Squared](http://www.r-tutor.com/sites/default/files/images/simple-regression5x.png)In our model, **R-Squared Error* is equal to:summary(fit)$r.squaredwhich means that the proportion of variation in the dependent (response) variable is explained good in this model, since it the error is low._Quick Note:_ We don’t necessarily discard a model based on a low R-Squared value. It's a better practice to look at the AIC (by using the `glm` method in R) and prediction accuracy on validation sample when deciding on the efficacy of a model. What about *Adjusted R-Squared* ?As you add more X variables to your model, the R-Squared value of the new bigger model will always begreater than that of the smaller subset. This is because, since all the variables in theoriginal model is also present, their contribution to explain the dependent variable willbe present in the super-set as well, therefore, whatever new variable we add can onlyadd (if not significantly) to the variation that was already explained.It is here, the adjusted R-Squared value comes to help. Adj R-Squared penalizes total value for thenumber of terms (read predictors) in your model. **Therefore, when comparing nestedmodels, it is a good practice to look at adj-R-squared value over R-squared.**In our model (see `summary` of `fit`), it's `0.2125` which verificates that our regression was good. Standard Error and F-StatisticBoth standard errors and F-statistic are measures of goodness of fit.$$ Std. Error = \sqrt{MSE} = \sqrt{\frac{SSE}{n-q}} $$and$$ F-statistic = \frac{MSR}{MSE} $$where, $n$ is the number of observations, $q$ is the number of coefficients and $MSR$ is themean square regression.Standard Error is low on our model, something that confirms the goodness of fit of our regression predictors.This applies to the F-statistic value too. Mean Squared Error The $MSE$ is a measure of the quality of an estimator—it is always non-negative, and values closer to zero are better.The mean squared error (MSE) is the mean of the square of the residuals:MSE = mean(fit$residuals^2) MSEAs confirmed by other metrics, our regression model is good. Root Mean Squared Error(RMSE)As the title says, Root mean squared error (RMSE) is then the square root of MSE:RMSE <- sqrt(MSE) RMSEResidual Sum Of Squares (RSS)Residual sum of squares (RSS) is the sum of the squared residuals:rss = sum(residuals(fit)^2) rssResidual Standard Error (RSE)is the square root of (RSS / degrees of freedom):rse = sqrt( sum(residuals(fit)^2) / fit$df.residual ) rseBLU14 - Exercise Notebookimport os import joblib import pandas as pd import numpy as np import category_encoders import json import joblib import pickle import math import requests from copy import deepcopy import seaborn as sns from uuid import uuid4 from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline, Pipeline from sklearn.model_selection import cross_val_score from category_encoders import OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, roc_auc_score import matplotlib.pyplot as plt import matplotlib.image as mpimg %matplotlib inlineAfter the police you nailed another big client. A hospital hired you to try to help predicting if a person is going to suffer from heart disease or not, so they can redirect patients from other appointments into the proper treatment, if needed, minimizing later problems with this. You're no expert in the medical field, but you decide to take on the challenge.They provide you with a dataset with several patient measures and the fact that they had heart disease or not.They also provide you with the following data description: Attribute Information 1) age 2) sex 3) cp - chest pain type (4 values) 4) trestbps - resting blood pressure 5) chol - serum cholesterol in mg/dl 6) fbs - fasting blood sugar > 120 mg/dl 7) restecg - resting electrocardiographic results (values 0,1,2) 8) thalach - maximum heart rate achieved 9) exang - exercise induced angina 10) oldpeak - ST depression induced by exercise relative to rest 11) slope - the slope of the peak exercise ST segment 12) ca - number of major vessels (0-3) colored by fluoroscopy 13) thal - thallium stress test: 0 = normal; 1 = fixed defect; 2 = reversable defect 14) target - 0= less chance of heart attack 1= more chance of heart attack**Note**: even if the dataset has values outside of the data dictionary, you should for these exercises consider the data dictionary as the source truthLoad the dataset below and check out its format:df = pd.read_csv(os.path.join("data", "heart.csv")) df.head()Let's split our data into train and test:df_train, df_test = train_test_split(df, test_size=0.3, random_state=42) df_test.target.value_counts().plot(kind="bar"); plt.xlabel('Target value'); plt.ylabel('Target value counts');Q1) Q1.a) Train a baseline modelBuild a baseline model for this problem (don't worry about performance for now) and serialize it. Use the following features: 1) age 2) sex 3) cp - chest pain type (4 values) 4) trestbps - resting blood pressure 6) fbs - fasting blood sugar > 120 mg/dl 7) restecg - resting electrocardiographic results (values 0,1,2) 10) oldpeak - ST depression induced by exercise relative to rest 12) ca - number of major vessels (0-3) colored by fluoroscopy 13) thal - thallium stress test: 0 = normal; 1 = fixed defect; 2 = reversable defect**Note**: As we already provided the split, use the `df_train` to train your model.# This is a temporary directory where your serialized files will be saved. Make sure you use this as # the target folder when you serialize your files TMP_DIR = '/tmp' # Write code to train and serialize a model in the block below # # Outputs expected: `columns.json`, `dtypes.pickle` and `pipeline.pickle` # # Your pipeline should be able to receive a dataframe with the columns we've requested you to use # in the form `pipeline.predict(test_df)` # # YOUR CODE HERE raise NotImplementedError()Test your procedure is correct by running the asserts below:with open(os.path.join(TMP_DIR, 'columns.json')) as fh: columns = json.load(fh) assert columns == ["age", "sex", "cp", "trestbps", "fbs", "restecg", "oldpeak", "ca", "thal"] with open(os.path.join(TMP_DIR, 'dtypes.pickle'), 'rb') as fh: dtypes = pickle.load(fh) assert dtypes.apply(lambda x: str(x)).isin(["int64", "int32", "float64", "float32"]).all() with open(os.path.join(TMP_DIR, 'pipeline.pickle'), 'rb') as fh: pipeline = joblib.load(fh) assert isinstance(pipeline, Pipeline) assert pipeline.predict(pd.DataFrame([{ "age": 23, "sex": 1, "cp": 3, "trestbps": 120, "fbs": 1, "restecg": 1, "oldpeak": 0, "ca": 0, "thal": 1} ], columns=columns).astype(dtypes)) in [0, 1]Q1.b) Client requirementsNow, the doctors asked you one more thing. They want to make sure your model is as good at retrieving male cases of heart disease as it is retrieving female cases. For example, if we have a pool of patients where 100 male patients actually have heart disesase and we retrieve 80 out of those, and where 100 female patients also have heart disesase but we only return 20 from those, then you're discrimating and that's not ok. A similar proportion, such as 75 women out of the 100 with heart disease, is expected.Build a small function to verify this. In particular make sure that the difference in percentage points is not higher than 5:def verify_retrieve_rates(X_test, y_true, y_test): """ Verify retrieval rates for different `sex` instances are not different by more than 5 percentage points Inputs: X_test: features for the test cases y_true: true labels for the test cases [0, 1] y_test: predictions for the test cases [0, 1] Returns: tuple of (success, rate_difference) success: True if the condition is satisfied, otherwise False rate_difference: difference between each class retrieval rates (as an absolute value) """ # YOUR CODE HERE raise NotImplementedError()Verify your function is working on a couple of modelsmodel_1 = pd.read_csv(os.path.join('data', 'data_model_1.csv')) X_test = model_1.copy().drop(columns=['target', 'prediction']) y_test = model_1.target y_pred = model_1.prediction success, rate_diff = verify_retrieve_rates(X_test, y_test, y_pred) assert success is False assert math.isclose(rate_diff, 0.20138888888888884) model_2 = pd.read_csv(os.path.join('data', 'data_model_2.csv')) X_test = model_2.copy().drop(columns=['target', 'prediction']) y_test = model_2.target y_pred = model_2.prediction success, rate_diff = verify_retrieve_rates(X_test, y_test, y_pred) assert success is True assert math.isclose(rate_diff, 0.04513888888888884)If you passed the asserts, you've defused this task. Move forward to the next one Q2) Prepare the model to be servedNow use the model that you built for Q1 and build a predict function around it that will parse the request and return the respective prediction. Split your code into initialization and prediction code as you've learned. Additionally, instead of returning 0 or 1, return True or False. Do not worry about potential bad inputs at this point, we'll get to it later on.# Initialization code # YOUR CODE HERE raise NotImplementedError() def predict(request): """ Produce prediction for request. Inputs: request: dictionary with format described below ``` { "observation_id": , "data": { "age": , "sex": , "cp": , "trestbps": , "fbs": , "restecg": , "oldpeak": , "ca": , "thal": } } ``` Returns: response: A dictionary echoing the request and its data with the addition of the prediction and probability ``` { "observation_id": , "age": , "sex": , "cp": , "trestbps": , "fbs": , "restecg": , "oldpeak": , "ca": , "thal": , "prediction": , "probability": } ``` """ # YOUR CODE HERE raise NotImplementedError() return responseTest your function on the code below:request = { "observation_id": "1", "data": { "age": 23, "sex": 1, "cp": 3, "trestbps": 120, "fbs": 1, "restecg": 1, "oldpeak": 0, "ca": 0, "thal": 1 } } response = predict(request) assert sorted(response.keys()) == \ sorted(["observation_id", "age", "sex", "cp", "trestbps", "fbs", "restecg", "oldpeak", "ca", "thal", "prediction", "probability"]) assert response["observation_id"] == "1" assert response["age"] == 23 assert response["restecg"] == 1 assert response["prediction"] in [True, False] probability_1 = response["probability"] request = { "observation_id": "2", "data": { "age": 44, "sex": 0, "cp": 2, "trestbps": 170, "fbs": 1, "restecg": 1, "oldpeak": 1, "ca": 0, "thal": 2 } } response = predict(request) assert sorted(response.keys()) == \ sorted(["observation_id", "age", "sex", "cp", "trestbps", "fbs", "restecg", "oldpeak", "ca", "thal", "prediction", "probability"]) assert response["observation_id"] == "2" assert response["fbs"] == 1 assert response["restecg"] == 1 assert response["prediction"] in [True, False] probability_2 = response["probability"] assert probability_1 != probability_2Hurray! It passed the tests. Q3) Make sure your input is correctNow let's be a bit more thoroughProtect your function against unexpected inputs. Create a function similar to the one before, but this time, return a different response. If everything is well with your request return an answer like this:```json{ "observation_id": "id1234", "prediction": True, "probability": 0.4}```However, if there is a problem with the initial data, whether it's fields missing or invalid values, return a different response:```json{ "observation_id": "id1234", "error": "Some error occured",}``` Hints - Hint 1: If the `observation_id` is not present, set it to None- Hint 2: Check out the tests to see what we expect from the error cases and error messages# Initialization code # YOUR CODE HERE raise NotImplementedError() def attempt_predict(request): """ Produce prediction for request. Inputs: request: dictionary with format described below ``` { "observation_id": , "data": { "age": , "sex": , "cp": , "trestbps": , "fbs": , "restecg": , "oldpeak": , "ca": , "thal": } } ``` Returns: A dictionary with predictions or an error, the two potential values: ``` { "observation_id": , "prediction": , "probability": } ``` or ``` { "observation_id": , "error": "some error message" } ``` if success is False, return an error string """ # YOUR CODE HERE raise NotImplementedError() return responseRun the tests below to validate your function is protected against some simple cases:################################################ # Test with good payload ################################################ base_request = { "observation_id": "1", "data": { "age": 23, "sex": 1, "cp": 3, "trestbps": 120, "fbs": 1, "restecg": 1, "oldpeak": 0.0, "ca": 0, "thal": 1 } } response = attempt_predict(base_request) assert 'prediction' in response, response assert 'probability' in response, response assert 'observation_id' in response, response assert response["observation_id"] == "1", response["observation_id"] assert response["prediction"] in [True, False], response["prediction"] assert response["probability"] <= 1.0, response["probability"] assert response["probability"] >= 0.0, response["probability"] ################################################ # Test missing `observation_id` produces an error ################################################ bad_request_1 = deepcopy(base_request) bad_request_1['random_field'] = bad_request_1.pop('observation_id') response = attempt_predict(bad_request_1) assert 'error' in response, response assert 'observation_id' in response['error'] ################################################ # Test missing `data` produces an error ################################################ bad_request_2 = deepcopy(base_request) bad_request_2['data_field_name'] = bad_request_2.pop('data') response = attempt_predict(bad_request_2) assert 'error' in response, response assert 'data' in response['error'] ################################################ # Test missing columns produce an error ################################################ bad_request_3 = deepcopy(base_request) bad_request_3['data'].pop('age') response = attempt_predict(bad_request_3) assert 'error' in response, response assert 'age' in response['error'], response['error'] ################################################ # Test extra columns produce an error ################################################ bad_request_4 = deepcopy(base_request) bad_request_4['data']['bloodpressure'] = 2 response = attempt_predict(bad_request_4) assert 'error' in response, response assert 'bloodpressure' in response['error'], response['error']Run a couple more tests to make sure your server is bulletproof:#################################################### # Test invalid values for categorical features - sex #################################################### bad_request_5 = deepcopy(base_request) bad_request_5['data']['sex'] = 3 response = attempt_predict(bad_request_5) assert 'error' in response, response assert 'sex' in response['error'], response['error'] assert '3' in response['error'], response['error'] ########################################################################### # Test invalid values for categorical features - number of vessels coloured ########################################################################### bad_request_6 = deepcopy(base_request) bad_request_6['data']['ca'] = 'Hello world' response = attempt_predict(bad_request_6) assert 'error' in response, response assert 'ca' in response['error'], response['error'] assert 'Hello world' in response['error'], response['error'] #################################################### # Test invalid values for numerical features - age #################################################### bad_request_7 = deepcopy(base_request) bad_request_7['data']['age'] = -12 response = attempt_predict(bad_request_7) assert 'error' in response, response assert 'age' in response['error'], response['error'] assert '-12' in response['error'], response['error'] bad_request_8 = deepcopy(base_request) bad_request_8['data']['age'] = 1200 response = attempt_predict(bad_request_8) assert 'error' in response, response assert 'age' in response['error'], response['error'] assert '1200' in response['error'], response['error'] #################################################### # Test invalid values for numerical features - trestbps #################################################### bad_request_9 = deepcopy(base_request) bad_request_9['data']['trestbps'] = 10 response = attempt_predict(bad_request_9) assert 'error' in response, response assert 'trestbps' in response['error'], response['error'] assert '10' in response['error'], response['error'] bad_request_10 = deepcopy(base_request) bad_request_10['data']['trestbps'] = 500 response = attempt_predict(bad_request_10) assert 'error' in response, response assert 'trestbps' in response['error'], response['error'] assert '500' in response['error'], response['error'] #################################################### # Test invalid values for numerical features - oldpeak #################################################### bad_request_11 = deepcopy(base_request) bad_request_11['data']['oldpeak'] = 12 response = attempt_predict(bad_request_11) assert 'error' in response, response assert 'oldpeak' in response['error'], response['error'] assert '12' in response['error'], response['error'] bad_request_12 = deepcopy(base_request) bad_request_12['data']['oldpeak'] = 40.312 response = attempt_predict(bad_request_12) assert 'error' in response, response assert 'oldpeak' in response['error'], response['error'] assert '40.312' in response['error'], response['error']Ufff. That was tough. But now your app is a bit safer to deploy! At least from all the cases we could think of. Q4) Put everything togetherFinally, build a server with your model and a predict endpoint protected from all the cases before. Deploy it and set the name of your app below:# Assign the variable APP_NAME to the name of your heroku app # APP_NAME = ... # YOUR CODE HERE raise NotImplementedError()Test that your server is bulletproof:# Test locally # url = f"http://localhost:5000/predict" # Testing the predict/update endpoint url = "https://{}.herokuapp.com/predict".format(APP_NAME) ################################################ # Test with good payload ################################################ payload = { "observation_id": str(uuid4()), "data": { "age": 23, "sex": 1, "cp": 3, "trestbps": 120, "fbs": 1, "restecg": 1, "oldpeak": 0.0, "ca": 0, "thal": 1 } } r = requests.post(url, json=payload) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'prediction' in response, response assert 'probability' in response, response assert response["prediction"] in [True, False] assert isinstance(response["probability"], float) assert 0 <= response["probability"] <= 1 ################################################ # Test missing `observation_id` produces an error ################################################ bad_payload_1 = deepcopy(payload) bad_payload_1['random_field'] = bad_payload_1.pop('observation_id') r = requests.post(url, json=bad_payload_1) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'observation_id' in response['error'], response['error'] ################################################ # Test missing `data` produces an error ################################################ bad_payload_2 = deepcopy(payload) bad_payload_2['observation_id'] = str(uuid4()) bad_payload_2['random_field'] = bad_payload_2.pop('data') r = requests.post(url, json=bad_payload_2) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'data' in response['error'], response['error'] ################################################ # Test missing columns produce an error ################################################ bad_payload_3 = deepcopy(payload) bad_payload_3['observation_id'] = str(uuid4()) bad_payload_3['data'].pop('age') r = requests.post(url, json=bad_payload_3) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'age' in response['error'], response['error'] ################################################ # Test extra columns produce an error ################################################ bad_payload_4 = deepcopy(payload) bad_payload_4['observation_id'] = str(uuid4()) bad_payload_4['data']['bloodpressure'] = 100 r = requests.post(url, json=bad_payload_4) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'bloodpressure' in response['error'], response['error'] ########################################################################### # Test invalid values for categorical features - number of vessels coloured ########################################################################### bad_payload_5 = deepcopy(payload) bad_payload_5['observation_id'] = str(uuid4()) bad_payload_5['data']['ca'] = 'Hello world' r = requests.post(url, json=bad_payload_5) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'ca' in response['error'], response['error'] assert 'Hello world' in response['error'], response['error'] #################################################### # Test invalid values for numerical features - age #################################################### bad_payload_6 = deepcopy(payload) bad_payload_6['observation_id'] = str(uuid4()) bad_payload_6['data']['age'] = -12 r = requests.post(url, json=bad_payload_6) assert isinstance(r, requests.Response) assert r.ok response = r.json() assert 'error' in response, response assert 'age' in response['error'], response['error'] assert '-12' in response['error'], response['error']Fitting difference of exponentials to voltage clamp dataimport os import sys sys.path.append('../') from matplotlib import pyplot as plt import matplotlib plt.style.use('seaborn-white') import numpy as np from Linearity import Neuron import lmfit prefix = '/home/bhalla/Documents/Codes/data' analysisFile = prefix + '/media/sahil/NCBS_Shares_BGStim/patch_data/170530/c1_EI/plots/c1_EI.pkl' plotDir = os.path.dirname(analysisFile) neuron = Neuron.load(analysisFile)$g(t) = \bar{g}\frac{( e^\frac{\delta_{onset} - t }{\tau_{decay}} - e^\frac{\delta_{onset} - t }{\tau_{rise}})}{- \left(\frac{\tau_{rise}}{\tau_{decay}}\right)^{\frac{\tau_{decay}}{\tau_{decay} - \tau_{rise}}} + \left(\frac{\tau_{rise}}{\tau_{decay}}\right)^{\frac{\tau_{rise}}{\tau_{decay} - \tau_{rise}}}}$def fitFunctionToPSP(time, vector, t_0=0, g_max=0): ''' Fits using lmfit ''' def _doubleExponentialFunction(t, t_0, tOn, tOff, g_max): ''' Returns the shape of an EPSP as a double exponential function ''' tPeak = t_0 + float(((tOff * tOn)/(tOff-tOn)) * np.log(tOff/tOn)) A = 1./(np.exp(-(tPeak-t_0)/tOff) - np.exp(-(tPeak-t_0)/tOn)) g = [ g_max * A * (np.exp(-(t_point-t_0)/tOff) - np.exp(-(t_point-t_0)/tOn)) if t_point >= t_0 else 0. for t_point in t] return np.array(g) model = lmfit.Model(_doubleExponentialFunction) # Fixing values of variables from data # Onset time if not t_0: model.set_param_hint('t_0', value =max(time)/10., min=0., max = max(time)) else: model.set_param_hint('t_0', value = t_0, vary=False) # g_max if not g_max: model.set_param_hint('g_max', value = max(vector)/10., min = 0., max = max(vector)) else: model.set_param_hint('g_max', value = g_max, vary=False) model.set_param_hint('tOn', value =max(time)/5.1 , min = 0., max = max(time)) model.set_param_hint('t_ratio', value =10., min=1.05) model.set_param_hint('tOff', min = 0., expr='tOn*t_ratio') model.set_param_hint('t_peak', expr = 't_0 + ((tOff * tOn)/(tOff-tOn)) * log(tOff/tOn)') pars = model.make_params() result = model.fit(vector, pars, t=time) # print (result.fit_report()) return result n = {key:value for key,value in neuron} for numSq in set(n[1]).intersection(set(n[2])): for i in set(n[1][numSq].trial).intersection(set(n[2][numSq].trial)): if i == 3 and numSq == 7: exc = -1e9*n[1][numSq].trial[i].interestWindow inh = 1e9*n[2][numSq].trial[i].interestWindow time = np.arange(len(n[1][numSq].trial[i].interestWindow))*n[1][numSq].trial[i].samplingTime exc_fit = fitFunctionToPSP(time, exc) inh_fit = fitFunctionToPSP(time, inh) f,ax = plt.subplots() ax.plot(time, exc, alpha=0.2, c='b') ax.set_xlabel("Time") ax.set_ylabel("Current (pA)") ax.plot(time, exc_fit.best_fit, '-', label="Excitation", c='b') ax.plot(time, -inh, alpha=0.2, c='g') ax.plot(time, -inh_fit.best_fit, '-', label="Inhibition", c='g') plt.legend() f.set_figwidth(8) f.set_figheight(8) plt.show() | April 04, 2019 (updated) This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. $ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $ Solutions for Superposition Task 2 (Optional) We have a quantum system with four states: $\ket{00}$, $ \ket{01} $, $\ket{10}$, and $ \ket{11} $.We can also say that our system has two qubits.Suppose that the system is in the following state:$ \myrvector{ \dfrac{ 1 }{ \sqrt{3} - \sqrt{ 5 + 2\sqrt{6}} } \\ \\ \dfrac{1}{ \sqrt{3} - \sqrt{ 7 + 2\sqrt{12} } } \\ \\ \dfrac{ 1 }{ \sqrt{5} - \sqrt{ 13 + 2\sqrt{40} } } \\ \\ \dfrac{1}{ \sqrt{ 7 } - \sqrt{ 15 + 2 \sqrt{56} } } }. $ Find the probability of observing the system in state $\ket{00}$, $ \ket{01} $, $\ket{10}$, or $ \ket{11} $. You may write a function to calculate the dominator of each fraction automatically, where its value is determined by three values $a$, $ b$, and $ c $ by assuming the form $ \sqrt{a} - \sqrt{b + 2 \sqrt{c} } $. Verify that the total probability is 1 (or almost 1). Solutiondef square_roots(a,b,c): # we iteratively calculate the expression with many square roots # we start with c and continue with b and a result = c**0.5 # square root of c result = 2 * result # 2*sqrt(c) result = result + b # b + 2*sqrt(c) result = result**0.5 # square root result = a**0.5 - result return result quantum_state =[ square_roots(3,5,6)**(-1), square_roots(3,7,12)**(-1), square_roots(5,13,40)**(-1), square_roots(7,15,56)**(-1), ] # this is our quantum state # print the quantum state print(quantum_state) print() print("The probability of observing the states 00, 01, 10, 11:") total_probability = 0 for i in range(len(quantum_state)): current_probability = quantum_state[i]**2 # square of the amplitude print(current_probability) total_probability = total_probability + current_probability print() print("total probability is",total_probability)Task 3 Create a quantum ciruit with 5 qubits.Apply h-gate (Hadamard operator) to each qubit.Apply z-gate ($Z$ operator) to randomly picked qubits. (i.e., $ mycircuit.z(qreg[i]) $)Apply h-gate to each qubit.Measure each qubit.Execute your program 1000 times.Compare the outcomes of the qubits affected by z-gates, and the outcomes of the qubits not affected by z-gates.Does z-gate change the outcome?Why? Solution# import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # import randrange for random choices from random import randrange number_of_qubit = 5 # define a quantum register with 5 qubits qreg = QuantumRegister(number_of_qubit) # define a classical register with 5 bits creg = ClassicalRegister(number_of_qubit) # define our quantum circuit mycircuit = QuantumCircuit(qreg,creg) # apply h-gate to all qubits for i in range(number_of_qubit): mycircuit.h(qreg[i]) # apply z-gate to randomly picked qubits for i in range(number_of_qubit): if randrange(2) == 0: # the qubit with index i is picked to apply z-gate mycircuit.z(qreg[i]) # apply h-gate to all qubits for i in range(number_of_qubit): mycircuit.h(qreg[i]) # measure all qubits mycircuit.measure(qreg,creg) print("Everything looks fine, let's continue ...") # draw the circuit mycircuit.draw() # execute the circuit 1000 times in the local simulator job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=1000) counts = job.result().get_counts(mycircuit) for outcome in counts: # print the reverse of the outcome reverse_outcome = '' for i in outcome: reverse_outcome = i + reverse_outcome print(reverse_outcome,"is observed",counts[outcome],"times")Call WPS with oswlibfrom os import listdir from os import path nc_dir = '/home/nils/birdhouse/var/lib/pywps/cache/malleefowl/esgf1.dkrz.de/thredds/fileServer/cordex/cordex/output/EUR-11/MPI-CSC/MPI-M-MPI-ESM-LR/historical/r1i1p1/MPI-CSC-REMO2009/v1/sem/tas/v20160419/' resources = [path.join('file://' + nc_dir, nc) for nc in listdir(nc_dir)] resources from os.path import join execute = wps.execute( identifier="segetalflora", #indices_clipping", inputs=[ ("resource",resources[0]), ("resource",resources[1]), ('culture_type', 'fallow'), ('climate_type', '3') ]) monitorExecution(execute, sleepSecs=5) print execute.getStatus() for o in execute.processOutputs: print o.referencehttp://localhost:8090/wpsoutputs/flyingpigeon/out_tasmean-d99eaef0-a807-11e6-a39a-142d277ef1f3.tarcall the module onlyfrom flyingpigeon import segetalflora as sf from os import listdir, path tasdir = '/home/nils/data/tests/segetalflora/' nc_tasmean = [path.join(tasdir, nc ) for nc in listdir(tasdir)] reload(sf) nc_sf = sf.get_segetalflora(resource=nc_tasmean) nc_sf sf.get_segetalflora?Práctica de Acceso a bases de datos Considerar la gestión de la información de una librería. Para ello se utilizará una base de datos con las siguientes tablas:Tabla Compradores:Columna | Tipo | Nulos | Defectos | Clave-Pri | Únicos-- | --registro| INT(4)| No| | Sí | Sínombre| VARCHAR(35)| No| ' '| | Sífecha_nacimiento| DATE| No|000-00-00 | | teléfono| VARCHAR(10)| Sí| NULL | | domicilio| VARCHAR(35)| Sí|NULL | | población| VARCHAR(25)| Sí|NULL | | anotaciones| TEXT| | | | Tabla Libros:Columna | Tipo | Nulos | Defectos | Clave-Pri | Únicos-- | --registro| INT(4)| No| | Sí | Sítítulo| VARCHAR(35)| No| ' '| | Síescritor| VARCHAR(35)| No| ' '| | editorial | VARCHAR(20)| No| ' '| | soporte| VARCHAR(35)| No| 'Libro '| | fecha_entrada| DATE| No|NULL | | país| VARCHAR(20)| No| 0000-00-00 | |Síimporte| DECIMAL(8,2)| No|0.0 | | anotaciones| BLOB| | | | Tabla Compras:Columna | Tipo | Nulos | Defectos | Clave-Pri | Únicos-- | --registro| INT(4)| No| | Sí | Síid_comprador| INT(4)| No| ' '| | id_libro| INT(4)| No| ' '| | En esta tabla se asocian mediante los campos registro de la tabla libros y la tabla compradores en los campos id_libro y id_comprador respectivamente, las compras que se han realizado los clientes. Se pide:1. Crear un programa en Python que genere utilizando SQLite una base de datos denominada “Libreria” y 3 tablas: compradores, libros y compras[1,5 puntos].2. Crear un programa en Python que rellene las tablas anteriores con los siguientes datos [1 punto]:Registros de la tabla Compradoresregistro| nombre | fecha_nacimiento | teléfono | domicilio | población |anotaciones-- | --1 | | 1955-10-23 | 608900890 | La isla del tesoro,33 | Getafe | Buen comprador2 | | 1961-12-13 | 607899005 | Plaza Mayor,56 | Pozuelo |3 | | 1976-04-02 | 917895679 | Esparteros, 5 | Getafe | 4 | | 1968-11-12 | 609440567 | ,4 | Pozuelo | Le gusta la ciencia ficción5 | | 1986-08-17| 690890456 | Gran vía,56 | Getafe | Le gustan los ensayos6 | | 1957-08-25 | 917890056| Plaza de España, 34 | Pozuelo | Le gusta la historia7 | | 1977-07-20 | 915678900 | Principal,3 | Getafe | Le gusta la novela de amor8 | | 1996-11-09 | 634567876 | Aviación,34 | Getafe | 9 | | 1984-11-08 | 645666900 | Río Ebro,4 | Las rozas |Registros de la tabla Librosregistro| título | escritor | editorial | soporte | fecha_entrada| país | importe|anotaciones-- | --1 | El Quijote | | Alianza | Libro| 1988-06-11| España| 12| NULL2 | Marina | | Edebé | CD| 1988-06-11| España| 18.95| NULL3 | La hoguera de las vanidades | | RBA editores | DVD| 1988-06-11| USA| 22.25| NULL4 | Los pilares de la Tierra | | Faber | Libro| 1988-06-11| USA| 12.95| NULL5 | Otelo | | Anaya | Libro| 1988-06-11| Inglaterra| 14.95| NULL6 | Rimas y Leyendas | | Roca | Libro| 1988-06-11| España| 25.95| NULL7 | Poesía | | P&J | Libro| 1988-06-11| España| 10.95| NULLRegistros de la tabla Comprasregistro| id_comprador | id_libro--|--1 | 9| 72 | 9| 33 | 8| 24 | 7| 15 | 8| 16 | 1| 17 | 7| 18 | 6| 29 | 3| 510 | 3| 111 | 3| 23.Crear un programa en Python que resuelva las siguientes consultas SQL [7,5 puntos]. Se debe mostrar por pantalla el resultado de cada consulta: * Obtener los países y el número de libros vendidos agrupados por país y ordenados de manera descendente respecto al total de ventas. * Obtener el nombre de los compradores que han comprado al menos un libro y número de libros comprados, ordenados decrecientemente por el número total de libros comprados. * Obtener la media de lo que han gastado todos los compradores y la suma total de todas las ventas realizadas. * Obtener el nombre, teléfono y anotaciones de las filas de la tabla Compradores dónde el nombre del comprador empieza con la letra “M”. * Actualizar la tabla Libros, cambiando el soporte de los registros 6 y 7 a “DVD”. * Borrar los libros que no se han vendido nunca. Normas de entrega* Fecha tope de entrega: 10/11/2017* La entrega se realizará subiendo al campus virtual un notebook de Jupyter con la solución. El archivo tendrá como nombre BasesDatos_GrupoX donde X será el número de grupo correspondiente.# Crear un programa en Python que genere utilizando SQLite una base de datos denominada “Libreria” y 3 tablas: compradores, libros y compras[1,5 puntos]. #compradores, libros y compras import sqlite3 def crearTablas(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("DROP TABLE IF EXISTS compradores") cur.execute("DROP TABLE IF EXISTS libros") cur.execute("DROP TABLE IF EXISTS compras") cur.execute("CREATE TABLE compradores(registro INT(4) NOT NULL PRIMARY KEY, nombre VARCHAR(35) NOT NULL UNIQUE, fecha_nacimiento DATE NOT NULL default (000-00-00), telefono VARCHAR(10) default NULL,domicilio VARCHAR(35) default NULL,poblacion VARCHAR(25) default NULL,anotacion TEXT)") cur.execute("CREATE TABLE libros(registro INT(4) NOT NULL PRIMARY KEY, titulo VARCHAR(35) NOT NULL UNIQUE, escritor VARCHAR(35) NOT NULL,editorial VARCHAR(35) NOT NULL, soporte VARCHAR(35) NOT NULL default Libro, fecha_entrada DATE NOT NULL default NULL,pais VARCHAR(20) NOT NULL default(0000-00-00),importe DECIMAL(8,2) NOT NULL default 0.0,anotacion BLOB)") cur.execute("CREATE TABLE compras (registro INT(4) NOT NULL PRIMARY KEY, id_comprador NT(4) NOT NULL, id_libro INT(4) NOT NULL)") cur.close() conn.commit() # Crear un programa en Python que rellene las tablas anteriores con los siguientes datos [1 punto] def rellenaTablas(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("1" ,"" ,"1955-10-23", "608900890", "La isla del tesoro,33" ,"Getafe", "Buen comprador ")) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("2","","1961-12-13","607899005"," Plaza Mayor,56","Pozuelo"," ")) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("3", "", "1976-04-02", "917895679", "Esparteros, 5", "Getafe"," " )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("4", "", "1968-11-12" ,"609440567", "Juan sin miedo,4" ,"Pozuelo", "Le gusta la ciencia ficción" )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("5" ,"" ,"1986-08-17"," 690890456" ,"Gran vía,56" ,"Getafe", "Le gustan los ensayos" )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("6", "", "1957-08-25", "917890056" ,"Plaza de España, 34" ,"Pozuelo", "Le gusta la historia" )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("7", "", "1977-07-20", "915678900", "Principal,3", "Getafe" ,"Le gusta la novela de amor" )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("8", "", "1996-11-09", "634567876", "Aviación,34", "Getafe"," " )) cur.execute("INSERT INTO compradores (registro, nombre, fecha_nacimiento, telefono,domicilio, poblacion,anotacion) VALUES (?,?,?,?,?,?,?)",("9", "", "1984-11-08", "645666900" ,"Río Ebro,4", "Las rozas"," " )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("1" ,"El Quijote", "Miguel de Cervantes", "Alianza","Libro" ,"1988-06-11", "España", "12","NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("2", "Marina", "", "Edebé" ,"CD" ,"1988-06-11", "España", "18.95", "NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("3", "La hoguera de las vanidades", "", "RBA editores", "DVD", "1988-06-11", "USA", "22.25", "NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("4", "Los pilares de la Tierra", "" ,"Faber", "Libro", "1988-06-11", "USA", "12.95", "NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("5" ,"Otelo", "W" ,"Anaya", "Libro" ,"1988-06-11", "Inglaterra", "14.95", "NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("6" ,"Rimas y Leyendas", "", "Roca", "Libro" ,"1988-06-11" ,"España", "25.95", "NULL" )) cur.execute("INSERT INTO libros (registro, titulo, escritor, editorial,soporte, fecha_entrada,pais,importe,anotacion) VALUES (?,?,?,?,?,?,?,?,?)",("7", "Poesía", "", "P&J", "Libro", "1988-06-11", "España", "10.95", "NULL")) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("1","9","7" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("2", "9" ,"3")) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("3","8","2" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("4", "7","1" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("5", "8", "1" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("6", "1", "1" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("7", "7", "1" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("8", "6","2" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("9", "3", "5" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("10", "3", "1" )) cur.execute("INSERT INTO compras (registro, id_comprador, id_libro) VALUES (?,?,?)",("11", "3", "2" )) cur.close() conn.commit() crearTablas() rellenaTablas() # Obtener los países y el número de libros vendidos agrupados por país y ordenados de manera descendente respecto al total de ventas. def ObtenPaisYNumLibrosVent(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("SELECT * FROM libros") pais = [rec[6] for rec in cur.fetchall()] cur.execute("SELECT * FROM compras") compras = [rec[2] for rec in cur.fetchall()] i = 0 ventas = [] suma = 0 #se guardan los datos en ventas(desordenados) while i < len(pais) : j = 0 suma = 0 while j < len(compras): if (compras[j]-1) == i: suma += 1 j+= 1 ventas += [[pais[i], suma]] i+=1 paises = [] mayor = 0 pos = 0 i = 0 #rellena paises while i < len(ventas): nombrePais = ventas[i][0] x = 0 encontrado = False while x < len(paises) and encontrado == False: if paises[x][0] == nombrePais : encontrado = True x+=1 if encontrado == True: i+=1 else: j=0 suma = 0 while j < len(ventas): if nombrePais == ventas[j][0]: suma += ventas[j][1] j+=1 paises += [[nombrePais,suma]] i+=1 mayor = 0 pos = 0 i = 0 #se ordena paises while i < len(paises): j=0 mayor = 0 while j< len(paises): if paises[j][1] >= mayor: pos = j mayor = paises[j][1] j+=1 print(str(paises[pos][0]) + ": " + str(paises[pos][1])) paises[pos][1] = -1 i+=1 ObtenPaisYNumLibrosVent() # Crear un programa en Python que resuelva las siguientes consultas SQL [7,5 puntos]. # Se debe mostrar por pantalla el resultado de cada consulta: # Obtener el nombre de los compradores que han comprado al menos un libro y número de libros comprados, # ordenados decrecientemente por el número total de libros comprados. def ObtenCompradoresYNumlibros(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("SELECT * FROM compradores") NombreComprador = [rec[1] for rec in cur.fetchall()] cur.execute("SELECT * FROM compras") idComprador = [rec[1] for rec in cur.fetchall()] i = 0 suma = 0 CompradorYCompras=[] #rellenamos el comprador con su numero de compras while i < len(NombreComprador): j = 0 suma =0 while j < len(idComprador): if (idComprador[j]-1) == i: suma +=1 j+=1 CompradorYCompras+=[[NombreComprador[i],suma]] i+=1 pos = 0 i = 0 #se odena CompradorYCompras y se quitan los que no compran while i < len(CompradorYCompras): j=0 mayor = 0 pos = 0 while j < len(CompradorYCompras): if CompradorYCompras[j][1] >= mayor: mayor = CompradorYCompras[j][1] pos = j j+=1 if CompradorYCompras[pos][1] > 0 : print(str(CompradorYCompras[pos][0]) + ": " + str(CompradorYCompras[pos][1])) CompradorYCompras[pos][1] = 0 i+=1 ObtenCompradoresYNumlibros() # Obtener la media de lo que han gastado todos los compradores y la suma total de todas las ventas realizadas. def obtenMediaySuma(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("SELECT * FROM compras") compras = [comp[2] for comp in cur.fetchall()] cur.execute("SELECT * FROM libros") libros = [lib[7] for lib in cur.fetchall()] # Contamos con una tabla de libros ordenada respecto al ID del libro suma = 0 for elem in compras: suma = suma + libros[elem-1] cur.execute("SELECT * FROM compradores") compradores = [comp[0] for comp in cur.fetchall()] media = suma / len(compradores) media = round(media, 2) suma = round(suma, 2) # Había dos formas de interpretar este apartado, a la hora de contar los compradores, ya que se podía interpretar como # que solo contásemos los compradores cuyo ID aparece en la tabla compras, o bien los compradores totales de la tabla compradores # hemos optado por la segunda opción ya que el enunciado habla de todos los compradores, por tanto contamos con todos los de # la tabla compradores print("La media de lo gastado por los compradores es " + str(media) + "€\n") print("La suma total de las ventas asciende a " + str(suma) + "€") obtenMediaySuma() # Obtener el nombre, teléfono y anotaciones de las filas de la tabla Compradores dónde el nombre del comprador empieza con la letra “M”. def muestraCompradoresM(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("SELECT * FROM compradores") compradores = [(comp[1], comp[3], comp[6]) for comp in cur.fetchall()] for elem in compradores: nombre = elem[0] if nombre[0] == "M": telef = elem[1] anotac = elem[2] print("Nombre: " + nombre + ", teléfono: " + telef + ", anotaciones: " + anotac) # Si no tiene anotaciones, hemos optado por que se muestre en blanco en vez de ocultar el apartado de anotaciones # para que así se vea que no las tiene realmente muestraCompradoresM() # Actualizar la tabla Libros, cambiando el soporte de los registros 6 y 7 a “DVD”. def actualizaLibros(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute('UPDATE libros set soporte=? WHERE registro=?',["DVD",6]) cur.execute('UPDATE libros set soporte=? WHERE registro=?',["DVD",7]) cur.execute("SELECT * FROM libros") libros = [(comp[0], comp[4]) for comp in cur.fetchall()] print(libros) conn.commit() actualizaLibros() # Borrar los libros que no se han vendido nunca. def borraNoVendidos(): conn=sqlite3.connect("Libreria.sqlite3") cur = conn.cursor() cur.execute("SELECT * FROM compras") compras = [comp[2] for comp in cur.fetchall()] dicc = dict() for elem in compras: dicc[elem] = True cur.execute("SELECT * FROM libros") libros = [lib[0] for lib in cur.fetchall()] for elem in libros: if elem in dicc: continue else: cur.execute('DELETE FROM libros WHERE registro=?',[elem]) cur.execute("SELECT * FROM libros") libros = [comp[1] for comp in cur.fetchall()] # Según la tabla dada se deberían borrar el registro 4 y 6 de libros print(libros) conn.commit() borraNoVendidos()['El Quijote', 'Marina', 'La hoguera de las vanidades', 'Otelo', 'Poesía']Comparison of the molecular domain between cell lines and tumors for breast cancerThis notebook supports the second figure. It takes data from cell lines, PDXs and tumors, compute the domain-specific factors and compare them using the cosine similarity matrix.Finally, tumor data is projected on each of these domain-specific factors and variance explained is computed to see how tumor variance is supported.This figure also supports Fig Supp 1.# Tissue to consider tumor_type = 'Breast' cell_line_type = 'BRCA' pdx_type = 'BRCA' # Normalization parameters normalization = 'TMM' transformation = 'log' mean_center = True std_unit = False protein_coding_only = True import os, sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import matplotlib from sklearn.decomposition import PCA, FastICA, SparsePCA from sklearn.externals.joblib import Parallel, delayed import matplotlib.cm as cm plt.style.use('ggplot') #Import src implementations os.environ['OMP_NUM_THREADS'] = '1' os.environ['KMP_DUPLICATE_LIB_OK']='True' from data_reader.read_data import read_data from normalization_methods.feature_engineering import feature_engineeringImport data# Import tumor + cell line data (count data) x_target, x_source, g, _, _ = read_data('cell_line', 'tumor', 'count', cell_line_type, tumor_type, remove_mytochondria=False) cl_vs_t = {'source':x_source, 'target':x_target} cl_vs_t_genes = g del g, x_target, x_source print('Cell lines vs Tumors data imported') # Import tumor + pdx data (FPKM) x_target, x_source, g, _, _ = read_data('pdx', 'tumor', 'fpkm', pdx_type, tumor_type, remove_mytochondria=False) pdx_vs_t = {'source':x_source, 'target':x_target} pdx_vs_t_genes = g del g, x_target, x_source print('PDX vs tumors data imported') # Import PDX + cell-line data (FPKM) x_target, x_source, g, _, _ = read_data('cell_line', 'pdx', 'fpkm', cell_line_type, pdx_type, remove_mytochondria=False) cl_vs_pdx = {'source':x_source, 'target':x_target} cl_vs_pdx_genes = g del g, x_target, x_source print('Cell lines vs PDX data imported') # Normalization & Transformation for RNA-Seq data for e in [cl_vs_t, pdx_vs_t, cl_vs_pdx]: e['source'] = feature_engineering(e['source'], normalization, transformation, mean_center, std_unit) e['target'] = feature_engineering(e['target'], normalization, transformation, mean_center, std_unit)Cosines similarity computationComputes and plot the cosines similarity and plot it. Also breaks down the results per PC to show the overlap.number_components = 20 def compute_components_PCA(x): pca_instance = PCA(number_components) pca_instance.fit(x) return pca_instance.components_ def compute_components_Sparse_PCA(x): pca_instance = SparsePCA(number_components, verbose=10) pca_instance.fit(x) print('computed') return pca_instance.components_ def compute_components_ICA(x): ica_instance = Fast(number_components, n_jobs=3) ica_instance.fit(x) print('COMPUTED') return orth(ica_instance.mixing_).transpose() def compute_cosine_similarity(data, dim_red_method): source_components = dim_red_method(data['source']) target_components = dim_red_method(data['target']) components = { 'source':source_components, 'target':target_components } return source_components.dot(target_components.transpose()), components compute_components = compute_components_PCA cl_vs_t_cosine_similarity, cl_vs_t_components = compute_cosine_similarity(cl_vs_t, compute_components) pdx_vs_t_cosine_similarity, pdx_vs_t_components = compute_cosine_similarity(pdx_vs_t, compute_components) cl_vs_pdx_cosine_similarity, cl_vs_pdx_components = compute_cosine_similarity(cl_vs_pdx, compute_components) # Plot cosines similarity between cell lines and tumors sns.heatmap(np.abs(cl_vs_t_cosine_similarity), cmap='seismic_r',\ center=0, vmax=1., vmin=0) plt.ylabel('Cell lines', fontsize=25, color='black') plt.xlabel('Tumors', fontsize=25, color='black') plt.xticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.yticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_cosines_similarity_cell_lines_tumors_RNAseq_%s_%s.png'%(tumor_type, cell_line_type.replace('/','')),\ dpi=300) else: plt.savefig('./figures/supp_fig2_cosines_similarity_cell_lines_tumors_RNAseq_%s_%s.png'%(tumor_type, cell_line_type.replace('/','')),\ dpi=300) plt.show() # Plot cosines similarity between pdx and tumors sns.heatmap(np.abs(pdx_vs_t_cosine_similarity), cmap='seismic_r',\ center=0, vmax=1., vmin=0) plt.ylabel('PDX', fontsize=25, color='black') plt.xlabel('Tumors', fontsize=25, color='black') plt.xticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.yticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_cosines_similarity_pdx_tumors_RNAseq_%s_%s.png'%(tumor_type, pdx_type.replace('/','')),\ dpi=300) else: plt.savefig('./figures/supp_fig2_cosines_similarity_pdx_tumors_RNAseq_%s_%s.png'%(tumor_type, pdx_type.replace('/','')),\ dpi=300) plt.show() # Plot cosines similarity between cell lines and pdx sns.heatmap(np.abs(cl_vs_pdx_cosine_similarity), cmap='seismic_r',\ center=0, vmax=1., vmin=0) plt.ylabel('Cell lines', fontsize=25, color='black') plt.xlabel('PDX', fontsize=25, color='black') plt.xticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.yticks(np.arange(.5,number_components,2), range(1,number_components+1,2), fontsize=15, color='black') plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_cosines_similarity_cell_lines_pdx_RNAseq_%s_%s.png'%(tumor_type, pdx_type.replace('/','')),\ dpi=300) else: plt.savefig('./figures/supp_fig2_cosines_similarity_cell_lines_pdx_RNAseq_%s_%s.png'%(tumor_type, pdx_type.replace('/','')),\ dpi=300) plt.show()Variance explained# Tumor variance explained by cell lines def target_variance_projected(data, components): target_projected_variance = np.var(data['target'].dot(components['target'].transpose()),0) source_projected_variance = np.var(data['target'].dot(components['source'].transpose()),0) target_total_variance = np.sum(np.var(data['target'], 0)) return { 'source': source_projected_variance / target_total_variance, 'target': target_projected_variance / target_total_variance } # Compute target projected variance cl_vs_t_variance = target_variance_projected(cl_vs_t, cl_vs_t_components) cl_vs_pdx_variance = target_variance_projected(cl_vs_pdx, cl_vs_pdx_components) pdx_vs_t_variance = target_variance_projected(pdx_vs_t, pdx_vs_t_components) ##### # Cell lines vs Tumors ##### plt.figure(figsize=(8,5)) plt.plot(np.arange(1, number_components+1), cl_vs_t_variance['target'],\ label='Tumor Principal Component', linewidth=3) plt.plot(np.arange(1, number_components+1), cl_vs_t_variance['source'],\ label='Cell line Principal Component', linewidth=3) plt.xticks(np.arange(1, number_components+1, 2), fontsize=15, color='black') max_var = cl_vs_t_variance['target'][0] plt.ylim(0,1.1*max_var) plt.yticks(np.arange(0, 1.1*max_var,0.02), (np.arange(0, 1.1*max_var,0.02)*100).astype(int), fontsize=15, color='black') del max_var plt.xlabel('Factor number', fontsize=20, color='black') plt.ylabel('Proportion of tumor variance', fontsize=20, color='black') plt.legend(fontsize=17) plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_variance_explained_cl_vs_t_%s_%s.png'%(tumor_type, cell_line_type.replace('/','')),\ dpi=300) else: plt.savefig('./figures/supp_fig2_variance_explained_cl_vs_t_%s_%s.png'%(tumor_type, cell_line_type.replace('/','')),\ dpi=300) plt.show() ##### # PDX vs Tumors ##### plt.figure(figsize=(8,5)) plt.plot(np.arange(1, number_components+1), pdx_vs_t_variance['target'],\ label='Tumor Principal Component', linewidth=3) plt.plot(np.arange(1, number_components+1), pdx_vs_t_variance['source'],\ label='PDX Principal Component', linewidth=3) plt.xticks(np.arange(1, number_components+1, 2), fontsize=15, color='black') max_var = pdx_vs_t_variance['target'][0] plt.ylim(0,1.1*max_var) plt.yticks(np.arange(0, 1.1*max_var,0.02), (np.arange(0, 1.1*max_var,0.02)*100).astype(int), fontsize=15, color='black') del max_var plt.xlabel('Factor number', fontsize=20, color='black') plt.ylabel('Proportion of tumor variance', fontsize=20, color='black') plt.legend(fontsize=17) plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_variance_explained_pdx_vs_t_%s_%s.png'%(tumor_type, pdx_type), dpi=300) else: plt.savefig('./figures/supp_fig2_variance_explained_pdx_vs_t_%s_%s.png'%(tumor_type, pdx_type), dpi=300) plt.show() ##### # Cell lines vs PDX ##### plt.figure(figsize=(8,5)) plt.plot(np.arange(1, number_components+1), cl_vs_pdx_variance['target'],\ label='PDX Principal Component', linewidth=3) plt.plot(np.arange(1, number_components+1), cl_vs_pdx_variance['source'],\ label='Cell line Principal Component', linewidth=3) plt.xticks(np.arange(1, number_components+1, 2), fontsize=15) max_var = cl_vs_pdx_variance['target'][0] plt.ylim(0,1.1*max_var) plt.yticks(np.arange(0, 1.1*max_var,0.02), (np.arange(0, 1.1*max_var,0.02)*100).astype(int), fontsize=12) plt.xlabel('Factor number', fontsize=20) plt.ylabel('Proportion of PDX variance', fontsize=20) plt.legend(fontsize=17) plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_variance_explained_cl_vs_t_%s_%s.png'%(tumor_type, pdx_type), dpi=300) else: plt.savefig('./figures/supp_fig2_variance_explained_cl_vs_t_%s_%s.png'%(tumor_type, pdx_type), dpi=300) plt.show() ## Bootstrap analysis for variance n_jobs = 5 def bootstrap_projected_variance(data_var, components, n=1): np.random.seed() bootstrapped_variance = [] for _ in range(n): e = np.random.choice(range(data_var.shape[0]), size=data_var.shape[0], replace=True) bootstrapped_variance.append(np.var(data_var[e].dot(components.transpose()),0)) return bootstrapped_variance ##### # CL vs Tumor ##### target = cl_vs_t['target'] source = cl_vs_t['source'] # Compute components target_components = compute_components(target) source_components = compute_components(source) # Bootstrap target data and project it onto the different components. n_bootstrap = 100 size_batch = 10 bootstrapped_target_variance = Parallel(n_jobs=n_jobs, verbose=10)\ (delayed(bootstrap_projected_variance)(target, target_components, size_batch) for _ in range(int(n_bootstrap/size_batch))) bootstrapped_target_variance = np.concatenate(bootstrapped_target_variance) bootstrapped_source_variance = Parallel(n_jobs=n_jobs, verbose=10)\ (delayed(bootstrap_projected_variance)(target, source_components, size_batch) for _ in range(int(n_bootstrap/size_batch))) bootstrapped_source_variance = np.concatenate(bootstrapped_source_variance) # Compute variance projected target_proj_variance = np.var(target.dot(target_components.transpose()), 0) source_proj_variance = np.var(target.dot(source_components.transpose()), 0) target_var = np.sum(np.var(target,0)) source_proj_variance /= target_var target_proj_variance /= target_var bootstrapped_target_variance /= target_var bootstrapped_source_variance /= target_var # Plot figure plt.figure(figsize=(8,5)) plt.plot(range(1, target_proj_variance.shape[0]+1), target_proj_variance, label='Tumor Principal Component') plt.fill_between(range(1,target_proj_variance.shape[0]+1), np.percentile(bootstrapped_target_variance, 1, axis=0), np.percentile(bootstrapped_target_variance, 99, axis=0), alpha=0.3) plt.plot(range(1, source_proj_variance.shape[0]+1),source_proj_variance, label='Cell line Principal Component') plt.fill_between(range(1, source_proj_variance.shape[0]+1), np.percentile(bootstrapped_source_variance, 1, axis=0), np.percentile(bootstrapped_source_variance, 99, axis=0), alpha=0.3) plt.xticks(np.arange(1, number_components+1, 2), fontsize=15, color='black') max_var = np.percentile(bootstrapped_target_variance, 99, axis=0)[0] plt.yticks(np.arange(0, 1.1*max_var,0.02), (np.arange(0, 1.1*max_var,0.02)*100).astype(int), fontsize=15, color='black') del max_var plt.xlabel('Factor number', fontsize=20, color='black') plt.ylabel('Proportion of tumor variance', fontsize=20, color='black') plt.legend(fontsize=17) plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_variance_explained_bootstrapped_cl_vs_t_%s_%s_boot_%s.png'%(tumor_type, cell_line_type.replace('/',''), n_bootstrap),\ dpi=300) plt.show() ##### # PDX vs Tumors ##### target = pdx_vs_t['target'] source = pdx_vs_t['source'] target_components = compute_components(target) source_components = compute_components(source) n_bootstrap = 100 size_batch = 10 bootstrapped_target_variance = Parallel(n_jobs=n_jobs, verbose=10)\ (delayed(bootstrap_projected_variance)(target, target_components, size_batch) for _ in range(int(n_bootstrap/size_batch))) bootstrapped_target_variance = np.concatenate(bootstrapped_target_variance) bootstrapped_source_variance = Parallel(n_jobs=n_jobs, verbose=10)\ (delayed(bootstrap_projected_variance)(target, source_components, size_batch) for _ in range(int(n_bootstrap/size_batch))) bootstrapped_source_variance = np.concatenate(bootstrapped_source_variance) target_proj_variance = np.var(target.dot(target_components.transpose()), 0) source_proj_variance = np.var(target.dot(source_components.transpose()), 0) target_var = np.sum(np.var(target,0)) source_proj_variance /= target_var target_proj_variance /= target_var bootstrapped_target_variance /= target_var bootstrapped_source_variance /= target_var plt.figure(figsize=(8,5)) plt.plot(range(1, target_proj_variance.shape[0]+1), target_proj_variance, label='Tumor Principal Component') plt.fill_between(range(1,target_proj_variance.shape[0]+1), np.percentile(bootstrapped_target_variance, 1, axis=0), np.percentile(bootstrapped_target_variance, 99, axis=0), alpha=0.3) plt.plot(range(1, source_proj_variance.shape[0]+1),source_proj_variance, label='PDX Principal Component') plt.fill_between(range(1, source_proj_variance.shape[0]+1), np.percentile(bootstrapped_source_variance, 1, axis=0), np.percentile(bootstrapped_source_variance, 99, axis=0), alpha=0.3) plt.xticks(np.arange(1, number_components+1, 2), fontsize=15, color='black') max_var = np.percentile(bootstrapped_target_variance, 99, axis=0)[0] plt.ylim(0,max_var) plt.yticks(np.arange(0, 1.1*max_var,0.02), (np.arange(0, 1.1*max_var,0.02)*100).astype(int), fontsize=15, color='black') del max_var plt.xlabel('Factor number', fontsize=20, color='black') plt.ylabel('Proportion of tumor variance', fontsize=20, color='black') plt.legend(fontsize=17) plt.tight_layout() if tumor_type == 'Breast': plt.savefig('./figures/fig2_variance_explained_bootstrapped_pdx_vs_t_%s_%s_boot_%s.png'%(tumor_type, pdx_type, n_bootstrap),\ dpi=300) plt.show()[Parallel(n_jobs=5)]: Using backend LokyBackend with 5 concurrent workers. [Parallel(n_jobs=5)]: Done 3 out of 10 | elapsed: 9.1s remaining: 21.2s [Parallel(n_jobs=5)]: Done 5 out of 10 | elapsed: 9.1s remaining: 9.1s [Parallel(n_jobs=5)]: Done 7 out of 10 | elapsed: 14.0s remaining: 6.0s [Parallel(n_jobs=5)]: Done 10 out of 10 | elapsed: 14.1s finished [Parallel(n_jobs=5)]: Using backend LokyBackend with 5 concurrent workers. [Parallel(n_jobs=5)]: Done 3 out of 10 | elapsed: 5.4s remaining: 12.6s [Parallel(n_jobs=5)]: Done 5 out of 10 | elapsed: 5.4s remaining: 5.4s [Parallel(n_jobs=5)]: Done 7 out of 10 | elapsed: 10.5s remaining: 4.5s [Parallel(n_jobs=5)]: Done 10 out of 10 | elapsed: 10.6s finished유닛, 게이트 코드 1장에서 사용했던 코드를 이용합니다.class Unit(object): def __init__(self, value, grad): # 정방향에서 계산되는 값 self.value = value # 역방향일 때 계산되는 이 유닛에 대한 회로 출력의 변화율 self.grad = grad class MultiplyGate(object): def forward(self, u0, u1): self.u0 = u0 self.u1 = u1 self.utop = Unit(self.u0.value * self.u1.value, 0.0) return self.utop def backward(self): # 출력 유닛의 기울기를 받아 곱셉 게이트의 자체 기울기와 곱하여(체인 룰) 입력 유닛의 기울기로 저장합니다. self.u0.grad += self.u1.value * self.utop.grad self.u1.grad += self.u0.value * self.utop.grad class AddGate(object): def forward(self, u0, u1): self.u0 = u0 self.u1 = u1 self.utop = Unit(self.u0.value + self.u1.value, 0.0) return self.utop def backward(self): # 입력에 대한 덧셈 게이트의 기울기는 1 입니다 self.u0.grad += 1 * self.utop.grad self.u1.grad += 1 * self.utop.gradSVM 을 위한 회로 코드 회로: 다섯 개의 유닛 (x,y,a,b,c) 을 입력 받고 하나의 유닛을 출력합니다. 그리고 입력에 대한 기울기를 계산합니다class Circuit(object): def __init__(self): # 게이트 생성 self.mulg0 = MultiplyGate() self.mulg1 = MultiplyGate() self.addg0 = AddGate() self.addg1 = AddGate() def forward(self, x, y, a, b, c): self.ax = self.mulg0.forward(a, x) # a*x self.by = self.mulg1.forward(b, y) # b*y self.axpby = self.addg0.forward(self.ax, self.by) # a*x + b*y self.axpbypc = self.addg1.forward(self.axpby, c) # a*x + b*y + c return self.axpbypc def backward(self, gradient_top): # 상위 게이트로 부터 기울기를 전달 받음 self.axpbypc.grad = gradient_top self.addg1.backward() # axpby 와 c 에 기울기 적용 self.addg0.backward() # ax 와 by 에 기울기 적용 self.mulg1.backward() # b 와 y 에 기울기 적용 self.mulg0.backward() # a 와 x 에 기울기 적용SVM 클래스class Svm(object): def __init__(self): # 파라메타를 랜덤하게 초기화 self.a = Unit(1.0, 0.0) self.b = Unit(-2.0, 0.0) self.c = Unit(-1.0, 0.0) self.circuit = Circuit() def forward(self, x, y): # x 와 y 는 유닛 객체라 가정합니다 self.unit_out = self.circuit.forward(x, y, self.a, self.b, self.c) return self.unit_out def backward(self, label): # 레이블은 +1 또는 -1 # a,b,c 의 기울기 초기화 self.a.grad = 0.0 self.b.grad = 0.0 self.c.grad = 0.0 # 회로의 출력에 따라 당겨야 할 힘(기울기)을 계산합니다 pull = 0.0 if label == 1 and self.unit_out.value < 1: pull = 1 # 스코어가 너무 낮네요. 증가시켜야 합니다. if label == -1 and self.unit_out.value > -1: pull = -1 # 스코어가 너무 높네요. 감소시켜야 합니다. self.circuit.backward(pull) # x,y,a,b,c 에 기울기를 적용합니다 # 0의 방향으로 각 파라메타에 비례해서 정형화 힘을 추가합니다 self.a.grad += -self.a.value self.b.grad += -self.b.value def learn_from(self, x, y, label): self.forward(x, y) # 정방향 계산 (모든 유닛의 .value 속성을 채웁니다) self.backward(label) # 역방향 계산 (모든 유닛의 .grad 속성을 채웁니다) self.parameter_update() # 파라메타를 업데이트합니다 def parameter_update(self): step_size = 0.03 # 학습 속도를 높이기 위해 0.03 으로 셋팅합니다. self.a.value += step_size * self.a.grad self.b.value += step_size * self.b.grad self.c.value += step_size * self.c.grad경사 하강법을 적용합니다. 데이터 준비data = [] labels = [] data.append([1.2, 0.7]) labels.append(1) data.append([-0.3, -0.5]) labels.append(-1) data.append([3.0, 0.1]) labels.append(1) data.append([-0.1, -1.0]) labels.append(-1) data.append([-1.0, 1.1]) labels.append(-1) data.append([2.1, -3]) labels.append(1)분류의 정확도를 계산하기 위한 함수def eval_training_accuracy(): num_correct = 0; for i in range(len(data)): x = Unit(data[i][0], 0.0) y = Unit(data[i][1], 0.0) true_label = labels[i] # 예측과 레이블이 맞는지 검사 unit_out = svm.forward(x, y) predicted_label = 1 if unit_out.value > 0 else -1; if predicted_label == true_label: num_correct += 1 return num_correct / len(data);학습 루프svm = Svm() for iter in range(400): # 임의의 데이터 포인트 추출 i = int(np.floor(np.random.random() * len(data))) x = Unit(data[i][0], 0.0) y = Unit(data[i][1], 0.0) label = labels[i] svm.learn_from(x, y, label) if iter % 25 == 0: # 매 25번 반복마다... print('training accuracy at iter %d: %f' % (iter, eval_training_accuracy()))training accuracy at iter 0: 0.666667 training accuracy at iter 25: 0.833333 training accuracy at iter 50: 0.833333 training accuracy at iter 75: 0.833333 training accuracy at iter 100: 0.833333 training accuracy at iter 125: 0.833333 training accuracy at iter 150: 0.833333 training accuracy at iter 175: 0.833333 training accuracy at iter 200: 1.000000 training accuracy at iter 225: 0.833333 training accuracy at iter 250: 1.000000 training accuracy at iter 275: 0.833333 training accuracy at iter 300: 1.000000 training accuracy at iter 325: 1.000000 training accuracy at iter 350: 1.000000 training accuracy at iter 375: 1.000000숨바꼭질 문제- 문제수빈이는 동생과 숨바꼭질을 하고 있다. 수빈이는 현재 점 N(0 ≤ N ≤ 100,000)에 있고, 동생은 점 K(0 ≤ K ≤ 100,000)에 있다. 수빈이는 걷거나 순간이동을 할 수 있다. 만약, 수빈이의 위치가 X일 때 걷는다면 1초 후에 X-1 또는 X+1로 이동하게 된다. 순간이동을 하는 경우에는 1초 후에 2*X의 위치로 이동하게 된다.수빈이와 동생의 위치가 주어졌을 때, 수빈이가 동생을 찾을 수 있는 가장 빠른 시간이 몇 초 후인지 구하는 프로그램을 작성하시오.- 입력첫 번째 줄에 수빈이가 있는 위치 N과 동생이 있는 위치 K가 주어진다. N과 K는 정수이다.- 출력수빈이가 동생을 찾는 가장 빠른 시간을 출력한다.from collections import deque N, k = map(int, input().split()) ck = [-1 for _ in range(200000)] q = deque() ck[N] = 0 q.append(N) while len(q) > 0: front = q.popleft() if front == k: print(ck[k]) break if front-1 > 0 and ck[front-1] == -1: ck[front-1] = ck[front] + 1 q.append(front-1) if front+1 < len(ck) and ck[front+1] == -1: ck[front+1] = ck[front] +1 q.append(front+1) if front*2 < len(ck) and ck[front*2] == -1: ck[front*2] = ck[front] + 1 q.append(front*2) 20 * 20피보나치 수열1. 재귀함수 사용 -> 매우 느리다. 오류남2. 동적 계획법 사용 -> 전 상태 / 작은 문제 -> 큰 문제 해결 - 결과를 저장하는 '메모화'를 한다#2. 동적 계획법(bottom up) n = int(input()) dp = [0 for _ in range(50)] dp[1] = 1 for i in range(2, n +1): dp[i] = dp[i-1] + dp[i-2] print(dp[n]) #3. 재귀, 동적계획법을 합쳐서 풀이 (top-down) n = int(input()) dp = [0 for _ in range(n +1)] def fib(n): if n < 2: return n if dp[n] != 0: return dp(n) dp[n] = fib(n-1) + fib(n-2) return dp[n] print(fib(n))42차원 배열의 합- 부분하븡ㄹ 이용- 동적 계획법 사용f-Strings (Python 3.6+)a = 5 b = 10 f'Five plus ten is {a + b} and not {2 * (a + b)}.'a malicious user can supply a format string, they can potentially leak secret keys and other sensitive information!# This is our super secret key: SECRET = 'this-is-a-secret' user_input = '{error.__init__.__globals__[SECRET]}' class Error: def __init__(self): pass # A malicious user can craft a format string that # can read data from the global namespace: # This allows them to exfiltrate sensitive information, # like the secret key: err = Error() user_input.format(error=err) user_input = '${error.__init__.__globals__[SECRET]}' Template(user_input).substitute(error=err) import requests r = requests.get('https://files.realpython.com/media/python-string-formatting-flowchart.4ecf0148fd87.png')(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp��� 8 ` � � � ( P x � � �  @ h � � �  0 X � � � � H p � � � 8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h��� 0 X � � � � !H!p!�!�!�!"8"`"�"�"�"#(#P#x#�#�#�#$@$h$�$�$�$%0%X%�%�%�%�% &H&p&�&�&�&'8'`'�'�'�'(((P(x(�(�(�()@)h)�)�)�)*0*X*�*�*�*�* +H+p+�+�+�+,8,`,�,�,�,-(-P-x-�-�-�-.@.h.�.�.�./0/X/�/�/�/�/ 0H0p0�0�0�0181`1�1�1�12(2P2x2�2�2�23@3h3�3�3�3404X4�4�4�4�4 5H5p5�5�5�5686`6�6�6�67(7P7x7�7�7�78@8h8�8�8�8909X9�9�9�9�9 :H:p:�:�:�:;8;`;�;�;�;<(<P<x<�<�<�<=@=h=�=�=�=>0>X>�>�>�>�> ?H?p?�?�?�?@8@`@�@�@�@A(APAxA�A�A�AB@BhB�B�B�BC0CXC�C�C�C�C DHDpD�D�D�DE8E`E�E�E�EF(FPFxF�F�F�FG@GhG�G�G�GH0HXH�H�H�H�H IHIpI�I�I�IJ8J`J�J�J�JK(KPKxK�K�K�KL@LhL�L�L�LM0MXM�M�M�M�M NHNpN�N�N�NO8O`O�O�O�OP(PPPxP�P�P�PQ@QhQ�Q�Q�QR0RXR�R�R�R�R SHSpS�S�S�ST8T`T�T�T�TU(UPUxU�U�U�UV@VhV�V�V�VW0WXW�W�W�W�W XHXpX�X�X�XY8Y`Y�Y�Y�YZ(ZPZxZ�Z�Z�Z[@[h[�[�[�[\0\X\�\�\�\�\ ]H]p]�]�]�]^8^`^�^�^�^_(_P_x_�_�_�_`@`h`�`�`�`a0aXa�a�a�a�a bHbpb�b�b�bc8c`c�c�c�cd(dPdxd�d�d�de@ehe�e�e�ef0fXf�f�f�f�f gHgpg�g�g�gh8h`h�h�h�hi(iPixi�i�i�ij@jhj�j�j�jk0kXk�k�k�k�k lHlpl�l�l�lm8m`m�m�m�mn(nPnxn�n�n�no@oho�o�o�op0pXp�p�p�p�p qHqpq�q�q�qr8r`r�r�r�rs(sPsxs�s�s�st@tht�t�t�tu0uXu�u�u�u�u vHvpv�v�v�vw8w`w�w�w�wx(xPxxx�x�x�xy@yhy�y�y�yz0zXz�z�z�z�z {H{p{�{�{�{|8|`|�|�|�|}(}P}x}�}�}�}~@~h~�~�~�~0X���� �H�p�������8�`�����؁�(�P�x���Ȃ���@�h��������0�X�����Є�� �H�p�������8�`�����؆�(�P�x���ȇ���@�h��������0�X�����Љ�� �H�p�������8�`�����؋�(�P�x���Ȍ���@�h��������0�X�����Ў�� �H�p�������8�`�����ؐ�(�P�x���ȑ��@�h��������0�X�����Г�� �H�p�������8�`�����ؕ�(�P�x���Ȗ��@�h��������0�X�����И�� �H�p�������8�`�����ؚ�(�P�x���ț��@�StarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterScriptsDedupFilteredStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedup����  ��Q   �@    (( 0�Q�3Q�3Q �DJPU[agmsy~��������������������� %+16<BHNTZ`flpv|����������������������%*06;AGMSY_ejpv|���������������������� %+17=CIOU[agmsy���������������������� #)/5;AGLRX]ciou{���������������������� !'-38>DJPV\bhnty��������������������� $*06;AGMQW]ciou{����������������������     % + 0 6 : @ F L R X ^ d j p v | � � � � � � � � � � � � � � � � � � � � � �     $ ) / 4 : @ F L R W ] b g m s y  � � � � � � � � � � � � � � � � � � � � � �     ! ' - 2 8 > C H M S Y ^ c i o u { � � � � � � � � � � � � � � � � � � � � � �     ! ' - 3 9 > D J P V \ b h n t z  � � � � � � � � � � � � � � � � � � � � � �     56724835192946663356665551219269817975823810947696011117454514178046558825115673169114346423372925297659135540260498333290958227675607363615615255496476720665819204389423582370477483992449180487485129081963813666713155335317189820481848522887942204617337943561109502030424311624411164600457813743043143243784071899511351312710203755505056304724644583864687154901757796623236235001860928987587518560591444268718542640661405918092835755479681864416416012698860401452675650021835826513051410611139090861065449628055309253640299171150708148910140007101726530283856666162235110453327525099242356336876427528020614718342345059816053519317459174195530277747502050285223281818542307345688261630515954178140519931367278407226572472366905065652830415850753476961152265355004071374794521798051609042759574913942818771481893255363648124294613823564919132783632643130859714079927939527189777171129864236926660446323511266254911121406041394442365130991609923689111997846591364472317880122605513475425821429391533755854985130488524201020910960732511599936878944795361249633005520982793656572752237438623152327924059830372440898354271113252714703375314574501464604576595403674756628793037050498300387279616750444614448744645727478920244080291936515048556266530895414396159891350601117986907726080421008816853524373726943039531540274325793559628826831261038144416825520653303218645040988639374361016031952912731540810560263645024634021058553234804045592578762340651843132208084361282698125960429503333219401254400128138488375743256634368784474898200673338156550747199684324867395099617375165899619170497779313367231938408619179813133406571387652632670237442631281562478152434759329837852410297256242842923194200515976791315233435956836726051527624603668701317551608975255044294924321455449003395233596444784216630332902361308613512586583706721089356000764433488273828557839362212773010501338416637166356800559645516175838232112980577566509636601527610791910163311171544329080740353209858517898520964460231139327266559325543220648220399651112172810439533220657351473537268534389306789353632532454309598244289614615492390452125341454894682756566763616915496241528730658914260333217722473987702671155216223278825556861494234515732420503233095588189294934395199857945258501374425900964420676596672296324223173923861672944065101316822275745790642532332203044031962536165319944619188323852405620984114186358365776492636862813463206276305685846641783156607216179671949385032324591071736335312603955265639571933042936185888081827501657036319104236673621755001016106654979162723404901412098064330851897551489262043305182002919628958779865553633547254130002428223628510799293723159163668609535032570532252575102052395568306912488935086043495572190105675711649586598129362454712645232425287225962963771506812152510613694324143230293745738290761626222181730891816312510825651560404232364118825035926801246272988547571326121646554007357355126890509937398525204419482705412764586049493869570103405146570209381384330857318177531256493019563753387336491682649865494162631322372719297159566351210905638893153566362063194340528840711811535720509959380723426329162999696599191356066183948509983283055557087600286385728655408659513919974030526551034287024106946192056763624246641375215569077585129826392343030771937074886666556313788381575139618628347386482131306284207973495540257869370279;6$�498C<[�C�ϑ"����?9B�W�as�xn +$�+EHͥ�A���0 �L�e������54 v��`�����i �/%&hUlǵ�)���F�'^3�N dG��U������/���~�++�W�yl�}�O���g�+< Gy � �� 8 ; �U [^ j � �� �� �� k� � Z � =, ? 2� �� �� k" �3 @= P ` Ë  b �& �G GZ �� �� �� *� G={\�������Bv������S�Y!l��������Q�Cq�~���A(�kr���K���XI����)�0�%�<  �*>m�{���F�E�(�K�X��{�O� �+�9�Z\%dAov���r� �$�?�ErK�]m��������7 >�I�w�z��1��~�3��&�FSLx���  �3���2�S�Ӹ5<Yn���5������!�8�LXM�ZLa0n�o�r�x�y�{�/������t'�6�{R�ȵ  v �( �^ �p ߘ � � Ƽ /� <� �� I� �!G!�!L�!��!��!�!��!��!S"1"�H"��"J�"M�")�"�"%#C#�Y#Hd#n#]z#�#��#S�#��#��#d$�$�($�=$�$�$��$�$T %&%�%.%C5%�F%�{%��%�%�%�#&�4&o&y|&��&�& �&x�&v�&��&к&G�&W�&�'m'�V'�q'i�'�'��'��'�(�!()-(�0(�2(�B(/H(�T(Gp(�{(׸(%�(��(�(�")^)�)Q�)I3*)L*��*��*��*��* �*OM,��,�,��,q6-�R-P{-�~-�-�-f�-�,.�H.ii.�p.G�.��.��.x�.��.�/� /t/�@/AJ/�T/�t/{/`�/�00�0�@0�m0��0u�0N�0��0!�0�{1��1�1S�1��1{�2ͺ2��2�'35D3jv3�3��3��3�4%4844�;4�=4�@4S4zs4�4��4��4�?50P5}U5/X5r|5؆51�5��5��5 6�H6�[6��6��6��6��6�7&+7pc7��7�7��7��7� 8( 8J98�D8��8��8r9��9��9 �98�9��9� :�!:4�:��:� ;�>;T;c;��;�;�;;�;\�;��;V�;d�;��;�<�<�:<�T<�c<'�<�<��<��<�=#=��=�=�=}�=�>��>�>8�>=�>�?O*?�8?�x?�?Ѕ?�?%�?/�?��?P�?� @O@�&@�C@�W@up@ov@%�@��@m�@��@��@2A'%A*LAB�A�A��A�A��A�&B�?B�BB'�B��Bn�BD�Ba�ByC�Co?C�pC(�C��C �C�C�!D~/Dp`D-cDthD��Du�DM�D\�D E E{vE_�Ep�EլE��E��E��E�F�YF�nFٚF]�F�F�F�G�LG�UGgG�G��G��G��G�FH�eH��Oy�O/�O�O��Ov P�SPrbP[�P6�PږP*�P��PQ�Q�QAdvent of Code 2018 - Day 4 Input# input_file = 'input-sample.txt' input_file = 'input-full.txt' # data examples: dabAcCaCBAcCcaDA input = '' with open(input_file, 'r') as f: input = f.read().rstrip() # Python limits recursion to 1000 recursive calls by default import sys if len(input) > 1000: sys.setrecursionlimit(len(input))Part 1def reduce_polymer(prefix, polymer): new_polymer = prefix prev_char = '' for i, curr_char in enumerate(polymer): if prev_char != curr_char.swapcase(): new_polymer += prev_char prev_char = curr_char else: # merge polymer chunks without prev_char + curr_char new_polymer += polymer[i+1:] # call function recursively # split two chars back to account for new "reactions" split = len(prefix) + i - 2 if split < 0: split = 0 return reduce_polymer(new_polymer[:split], new_polymer[split:]) new_polymer += prev_char return new_polymer reduced_polymer = reduce_polymer('', input) print 'polymer length: {}'.format(len(reduced_polymer))polymer length: 9202Part 2min_polymer_len = len(input) # get all unique chars in input uniq_chars = ''.join(set(input.lower())) for char in uniq_chars: # remove uppercase/lowercase char from polymer and reduce stripped_polymer = input.translate(None, '{}{}'.format(char, char.upper())) reduced_polymer = reduce_polymer('', stripped_polymer) if len(reduced_polymer) < min_polymer_len: min_polymer_len = len(reduced_polymer) print 'min polymer len: {}'.format(min_polymer_len)min polymer len: 6394Binary treesimport matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from dtreeviz.trees import * from lolviz import * import numpy as np import pandas as pd %config InlineBackend.figure_format = 'retina'Setup Make sure to install stuff:```pip install -U dtreevizpip install -U lolvizbrew install graphviz``` Binary tree class definitionA binary tree has a payload (a value) and references to left and right children. One or both of the children references can be `None`. A reference to a node is the same thing as a reference to a tree as the tree is a self similar data structure. We don't distinguish between the two kinds of references. A reference to the root node is a reference to the entire tree.Here is a basic tree node class in Python. The constructor requires at least a value to store in the node.class TreeNode: def __init__(self, value, left=None, right=None): self.value = value self.left = left self.right = right def __repr__(self): return str(self.value) def __str__(self): return str(self.value)Manual tree construction Here's how to create and visualize a single node:root = TreeNode(1) treeviz(root)**Given `left` and `right` nodes, create node `root` with those nodes as children.**left = TreeNode(2) right = TreeNode(3) root = ... treeviz(root)Solutionleft = TreeNode(2)right = TreeNode(3)root = TreeNode(1,left,right)treeviz(root) **Write code to create the following tree structure**left = ... right = ... root = ... treeviz(root)Solutionleft = TreeNode(2,TreeNode(4))right = TreeNode(3,TreeNode(5),TreeNode(6))root = TreeNode(1,left,right)treeviz(root) Walking trees manuallyTo walk a tree, we simply follow the left and right children references, avoiding any `None` references. **Q.** Given tree `r` shown here, what Python expressions refer to the nodes with 443 and 17 in them?left = TreeNode(443,TreeNode(-34)) right = TreeNode(17,TreeNode(99)) r = TreeNode(10,left,right) treeviz(r)Solutionr.left, r.right **Q.** Given the same tree `r`, what Python expressions refer to the nodes with -34 and 99 in them? Solutionr.left.left, r.right.left Walking all nodesNow let's create a function to walk all nodes in a tree. Remember that our template for creating any recursive function looks like this:```def f(input): 1. check termination condition 2. process the active input region / current node, etc… 3. invoke f on subregion(s) 4. combine and return results```def walk(p:TreeNode): if p is None: return # step 1 print(p.value) # step 2 walk(p.left) # step 3 walk(p.right) # step 3 (there is no step 4 for this problem)Let's create the simple 3-level tree we had before:left = TreeNode(2,TreeNode(4)) right = TreeNode(3,TreeNode(5),TreeNode(6)) root = TreeNode(1,left,right) treeviz(root)**Q.** What is the output of running `walk(root)`? Solution We walk the tree depth first, from left to right124356 Searching through nodesHere's how to search for an element as you walk, terminating as soon as the node with `x` is found:def search(p:TreeNode, x:object): print("enter ",p) if p is None: return None print(p) if x==p.value: return p q = search(p.left, x) if q is not None: return q q = search(p.right, x) return q**Q.** What is the output of running `search(root, 5)`? Solution We walk the tree depth first as before, but now we stop when we reach the node with 5:12435 To see the recursion entering and exiting (or discovering and finishing) nodes, here is a variation that prints out its progress through the tree:def search(p:TreeNode, x:object): if p is None: return None print("enter ",p) if x==p.value: print("exit ",p) return p q = search(p.left, x) if q is not None: print("exit ",p) return q q = search(p.right, x) print("exit ",p) return q search(root, 5)enter 1 enter 2 enter 4 exit 4 exit 2 enter 3 enter 5 exit 5 exit 3 exit 1Creating (random) decision tree "stumps"A regression tree stump is a tree with a decision node at the root and two predictor leaves. These are used by gradient boosting machines as the "weak learners."class TreeNode: # acts as decision node and leaf. it's a leaf if split is None def __init__(self, split=None, prediction=None, left=None, right=None): self.split = split self.prediction = prediction self.left = left self.right = right def __repr__(self): return str(self.value) def __str__(self): return str(self.value) df = pd.DataFrame() df["sqfeet"] = [750, 800, 850, 900,950] df["rent"] = [1160, 1200, 1280, 1450,1300] dfThe following code shows where sklearn would do a split with a normal decision tree.X, y = df.sqfeet.values.reshape(-1,1), df.rent.values t = DecisionTreeRegressor(max_depth=1) t.fit(X,y) fig, ax = plt.subplots(1, 1, figsize=(3,1.5)) t = rtreeviz_univar(t, X, y, feature_names='sqfeet', target_name='rent', fontsize=9, colors={'scatter_edge': 'black'}, ax=ax)Instead of picking the optimal split point, we can choose a random value in between the minimum and maximum x value, like extremely random forests do:def stumpfit(x, y): if len(x)==1 or len(np.unique(x))==1: # if one x value, make leaf return TreeNode(prediction=y[0]) split = np.round(np.random.uniform(min(x),max(x))) t = TreeNode(split) t.left = TreeNode(prediction=np.mean(y[x=split])) return t**Run the following code multiple times to see how it creates different y lists in the nodes, according to the split value.**root = stumpfit(X.reshape(-1),y) treeviz(root)Creating random decision trees (single variable)And now to demonstrate the magic of recursion. If we replace```t.left = TreeNode(prediction=np.mean(y[xdef treefit(x, y): if len(x)==1 or len(np.unique(x))==1: # if one x value, make leaf return TreeNode(prediction=y[0]) split = np.round(np.random.uniform(min(x),max(x))) t = TreeNode(split) t.left = treefit(x[x=split], y[x>=split]) return t root = treefit(X.reshape(-1),y) treeviz(root)Create a rendering [context](https://docs.rs/nsi/latest/nsi/struct.Context.html).let ctx = nsi::Context::new(&[]).unwrap();The type of the variable polyhedron was redefined, so was lost.Create a dodecahedron.let polyhedron = p_ops::Polyhedron::dodecahedron();We convert the dodecahedron into an NSI node and connect it to the scene’s root.let handle = polyhedron.to_nsi(&ctx, None, None, None, None); ctx.connect(handle.as_str(), "", ".root", "objects", &[]);Set up an [attribute](https://nsi.readthedocs.io/en/latest/nodes.htmlnode-attributes) to hook up a shader to our geometry node.We then create & connect a [shader](https://nsi.readthedocs.io/en/latest/nodes.htmlthe-shader-node) and instance a *principled* [OSL](https://opensource.imageworks.com/osl.html) shader.ctx.create("dodeca_attrib", nsi::NodeType::Attributes, &[]); ctx.connect("dodeca_attrib", "", handle.as_str(), "geometryattributes", &[]); ctx.create("dodeca_shader", nsi::NodeType::Shader, &[]); ctx.connect("dodeca_shader", "", "dodeca_attrib", "surfaceshader", &[]); ctx.set_attribute( "dodeca_shader", &[ nsi::string!("shaderfilename", "${DELIGHT}/osl/dlPrincipled"), nsi::color!("i_color", &[0.8, 0.5, 0.3]), nsi::float!("roughness", 0.1), nsi::float!("specular_level", 0.5), nsi::float!("metallic", 1.0), ], );Next we set up an [environment](https://nsi.readthedocs.io/en/latest/nodes.htmlthe-environment-node) node and instance an emissive *environment* [OSL](https://opensource.imageworks.com/osl.html) shader.ctx.create("environment", nsi::NodeType::Environment, &[]); ctx.connect("environment", "", ".root", "objects", &[]); ctx.create("env_attrib", nsi::NodeType::Attributes, &[]); ctx.connect("env_attrib", "", "environment", "geometryattributes", &[]); ctx.set_attribute("env_attrib", &[nsi::integer!("visibility.camera", 0)]); ctx.create("env_shader", nsi::NodeType::Shader, &[]); ctx.connect("env_shader", "", "env_attrib", "surfaceshader", &[]); ctx.set_attribute( "env_shader", &[ nsi::string!( "shaderfilename", "${DELIGHT}/osl/environmentLight" ), nsi::float!("intensity", 1.5), ], ); ctx.set_attribute( "env_shader", &[nsi::string!("image", "/Users/moritz/code/crates/nsi/assets/wooden_lounge_1k.tdl")], );And finally, a camera transform & a [camera](https://nsi.readthedocs.io/en/latest/nodes.htmlcamera-nodes).ctx.create("camera_xform", nsi::NodeType::Transform, &[]); ctx.connect("camera_xform", "", ".root", "objects", &[]); ctx.set_attribute( "camera_xform", &[nsi::double_matrix!( "transformationmatrix", &[1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 5., 1.,] )], ); ctx.create("camera", nsi::NodeType::PerspectiveCamera, &[]); ctx.connect("camera", "", "camera_xform", "objects", &[]); ctx.set_attribute("camera", &[nsi::float!("fov", 40.)]);We hook up a [screen](https://nsi.readthedocs.io/en/latest/nodes.htmlthe-screen-node) to the camera set the resolution and the antialiasing samples.We also adjust the shading quality for the whole scene on the [global](https://nsi.readthedocs.io/en/latest/nodes.htmlthe-global-node) node.ctx.create("screen", nsi::NodeType::Screen, &[]); ctx.connect("screen", "", "camera", "screens", &[]); ctx.set_attribute( "screen", &[ nsi::integers!("resolution", &[400, 400]).array_len(2), nsi::integer!("oversampling", 16), ], ); ctx.set_attribute( ".global", &[ nsi::integer!("quality.shadingsamples", 64), ], );And we’re ready to render the screen to our notebook.ctx.as_jupyter("screen");3DL INFO Loaded "/Applications/3Delight/osl/environmentLight.oso" (took 0.00s) 3DL INFO Loaded "/Applications/3Delight/osl/dlPrincipled.oso" (took 0.01s)Here we describe Python Functions:> . 1) np.ones() > . 2) np.zeros() > . 3) np.empty()import numpy as np # Creating two dimenstion array arr_2d=np.array([[1,1,1],[1,1,1],[1,1,1]]) arr_2d # instead of putting manullay we can put it using build in function in np modules # one dimenstion arr_1s=np.ones(5) print(arr_1s) arr_1s.dtype # two dimenstion arr_ones=np.ones((3,4))# first rows and than coloumns print(arr_ones) # two dimenstion with data type a_ones=np.ones((3,5),dtype=int) # 3 rows and 5 columns print(a_ones) # two dimenstion with data type int arr_zeros=np.zeros((4,5),dtype=int) # 4 rows and 5 columns print(arr_zeros) # two dimenstion with data type bool ar_zeros=np.zeros((4,3),dtype=bool) ar_zeros arr_zeros=np.zeros((4,3),dtype=str) arr_zeros # empty string == 0 === False in python**Phase kickback**import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, Aer, IBMQ, QuantumRegister, ClassicalRegister, execute, BasicAer from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator # Loading your IBM Quantum account(s) provider = IBMQ.load_account() import math %matplotlib inline # Set up the program reg1 = QuantumRegister(2, name='reg1') reg2 = QuantumRegister(1, name='reg2') qc = QuantumCircuit(reg1, reg2) qc.h(reg1) # put a into reg1 superposition of 0,1,2,3 qc.cu1(math.pi/4, reg1[0], reg2) qc.cu1(math.pi/2, reg1[1], reg2) backend = BasicAer.get_backend('statevector_simulator') job = execute(qc, backend) result = job.result() outputstate = result.get_statevector(qc, decimals=3) print(outputstate) qc.draw() # draw the circuit[0.5+0.j 0.5-0.j 0.5+0.j 0.5-0.j 0. +0.j 0. +0.j 0. +0.j 0. +0.j]Comparisons Compare custom runs Here comparing results for one foldconstraint = "1h8c" results_dir = "." output_dir = "." included_frameworks = [] excluded_frameworks = [] frameworks_sort_key = None # frameworks_sort_key = lambda f: definitions[f]['key'] if 'key' in definitions[f] else f.lower() frameworks_labels = None # frameworks_labels = lambda l: definitions[l]['framework'].lower() duplicates_handling = 'fail' # accepted values: 'fail', 'keep_first', 'keep_last', 'keep_none' imputation = None normalization = None # normalization = (0, 'h2o', 'mean') row_filter = None # row_filter = lamdba r: r.fold == 0 #! r is a pd.Series title_extra = "" # register_colormap(config.colormap, ('colorblind', [1, 0, 2, 3, 4, 5])) # this cell is an example showing how to use/customize this notebook depending on your results results_dir = "../results" output_dir = "./tmp" duplicates_handling = 'keep_last' normalization = (0, 'constantpredictor', 'mean') # row_filter = lambda r: ~r.task.isin(['kddcup09_appetency', 'colleges']) definitions = dict( constantpredictor=dict( ref = True, framework='constantpredictor_enc', results_files=glob.glob(f"{results_dir}/constantpredictor*/scores/results.csv") ), autogluon=dict( framework='AutoGluon', results_files=glob.glob(f"{results_dir}/autogluon*/scores/results.csv") ), autosklearn=dict( framework='autosklearn', results_files=glob.glob(f"{results_dir}/autosklearn*/scores/results.csv") ), h2oautoml=dict( framework='H2OAutoML', results_files=glob.glob(f"{results_dir}/h2oautoml*/scores/results.csv") ), tpot=dict( framework='TPOT', results_files=glob.glob(f"{results_dir}/tpot*/scores/results.csv") ) ) definitions runs = {k:v for k, v in definitions.items() if (k in included_frameworks if included_frameworks else True) and k not in excluded_frameworks} runs def results_as_df(results_dict, row_filter=None): def apply_filter(res, filtr): r = res.results return r.loc[filtr(r)] if row_filter is None: row_filter = lambda r: True return pd.concat([apply_filter(res, lambda r: (r.framework==name) & row_filter(r)) for name, res in results_dict.items() if res is not None]) ref_results = {name: prepare_results(run['results_files'], renamings={run['framework']: name}, exclusions=excluded_frameworks, normalization=normalization, duplicates_handling=duplicates_handling, ) for name, run in runs.items() if runs[name].get('ref', False)} all_ref_res = results_as_df(ref_results, row_filter) runs_results = {name: prepare_results(run['results_files'], renamings={run['framework']: name}, exclusions=excluded_frameworks, imputation=imputation, normalization=normalization, ref_results=all_ref_res, duplicates_handling=duplicates_handling ) for name, run in runs.items() if name not in ref_results} all_res = pd.concat([ all_ref_res, results_as_df(runs_results, row_filter) ]) from functools import reduce metadata = reduce(lambda l, r: {**r, **l}, [res.metadata for res in list(ref_results.values())+list(runs_results.values()) if res is not None], {}) # metadata = next(res for res in ref_results.values()).metadata problem_types = pd.DataFrame(m.__dict__ for m in metadata.values())['type'].unique().tolist() render_metadata(metadata, filename=create_file(output_dir, "datasets", results_group, "metadata.csv")) res_summary = render_summary('result', results=all_res) res_summary.to_csv(create_file(output_dir, "tables", "results_summary.csv")) score_summary = render_summary('score', results=all_res) score_summary.to_csv(create_file(output_dir, "tables", "score_summary.csv")) models_summary = render_summary('models_count', results=all_res) models_summary.to_csv(create_file(output_dir, "tables", "models_summary.csv")) if normalization: norm_score_summary = render_summary('norm_score', results=all_res) norm_score_summary.to_csv(create_file(output_dir, "tables", "normalized_score_summary.csv")) benchmark_leaderboard = render_leaderboard('score', results=all_res, aggregate=True) benchmark_leaderboard.to_csv(create_file(output_dir, "tables", "benchmark_leaderboard.csv")) if 'binary' in problem_types: fig = draw_score_heatmap('score', results=all_res, type_filter='binary', metadata=metadata, x_labels=frameworks_labels or True, x_sort_by=frameworks_sort_key, y_sort_by='nrows', title=f"Scores ({binary_score_label}) on {results_group} binary classification problems{title_extra}", center=0.5 ); savefig(fig, create_file(output_dir, "visualizations", "binary_score_heat.png")) if 'multiclass' in problem_types: fig = draw_score_heatmap('score', results=all_res, type_filter='multiclass', metadata=metadata, x_labels=frameworks_labels or True, x_sort_by=frameworks_sort_key, y_sort_by='nrows', title=f"Scores ({multiclass_score_label}) on {results_group} multi-class classification problems{title_extra}", center=0 ); savefig(fig, create_file(output_dir, "visualizations", "multiclass_score_heat.png")) if 'regression' in problem_types: fig = draw_score_heatmap('score', results=all_res, type_filter='regression', metadata=metadata, x_labels=frameworks_labels or True, x_sort_by=frameworks_sort_key, y_sort_by='nrows', title=f"Scores ({regression_score_label}) on {results_group} regression problems{title_extra}", center=0 ); savefig(fig, create_file(output_dir, "visualizations", "regression_score_heat.png")) render_colormap(config.colormap) if 'binary' in problem_types: fig = draw_score_barplot('score', results=all_res, type_filter='binary', metadata=metadata, x_sort_by=tasks_sort_by, ylabel=binary_score_label, ylim=dict(bottom=.5), hue_sort_by=frameworks_sort_key, ci=95, title=f"Scores ({binary_score_label}) on {results_group} binary classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "binary_score_barplot.png")) if 'multiclass' in problem_types: fig = draw_score_barplot('score', results=all_res, type_filter='multiclass', metadata=metadata, x_sort_by=tasks_sort_by, ylabel=multiclass_score_label, ylim=dict(top=0.1), hue_sort_by=frameworks_sort_key, ci=95, title=f"Scores ({multiclass_score_label}) on {results_group} multiclass classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "multiclass_score_barplot.png")) if 'regression' in problem_types: fig = draw_score_barplot('score', results=all_res, type_filter='regression', metadata=metadata, x_sort_by=tasks_sort_by, yscale='symlog', ylabel=regression_score_label, ylim=dict(top=0.1), hue_sort_by=frameworks_sort_key, ci=95, title=f"Scores ({regression_score_label}) on {results_group} regression classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, size=(8, 6), ); savefig(fig, create_file(output_dir, "visualizations", "regression_score_barplot.png")) if 'binary' in problem_types: fig = draw_score_pointplot('score', results=all_res, type_filter='binary', metadata=metadata, x_sort_by=tasks_sort_by, ylabel=binary_score_label, ylim=dict(bottom=.5), hue_sort_by=frameworks_sort_key, join='none', marker='hline_xspaced', ci=95, title=f"Scores ({binary_score_label}) on {results_group} binary classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "binary_score_pointplot.png")) if 'multiclass' in problem_types: fig = draw_score_pointplot('score', results=all_res, type_filter='multiclass', metadata=metadata, x_sort_by=tasks_sort_by, ylabel=multiclass_score_label, hue_sort_by=frameworks_sort_key, join='none', marker='hline_xspaced', ci=95, title=f"Scores ({multiclass_score_label}) on {results_group} multiclass classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "multiclass_score_pointplot.png")) if 'regression' in problem_types: fig = draw_score_pointplot('score', results=all_res, type_filter='regression', metadata=metadata, x_sort_by=tasks_sort_by, ylabel=regression_score_label, yscale='symlog', ylim=dict(top=0.1), hue_sort_by=frameworks_sort_key, join='none', marker='hline_xspaced', ci=95, title=f"Scores ({regression_score_label}) on {results_group} regression classification problems{title_extra}", legend_loc='lower center', legend_labels=frameworks_labels, size=(8, 6), ); savefig(fig, create_file(output_dir, "visualizations", "regression_score_pointplot.png")) if 'binary' in problem_types: fig = draw_score_stripplot('score', results=all_res.sort_values(by=['framework']), type_filter='binary', metadata=metadata, xlabel=binary_score_label, y_sort_by=tasks_sort_by, hue_sort_by=frameworks_sort_key, title=f"Scores ({binary_score_label}) on {results_group} binary classification problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "binary_score_stripplot.png")) if 'multiclass' in problem_types: fig = draw_score_stripplot('score', results=all_res.sort_values(by=['framework']), type_filter='multiclass', metadata=metadata, xlabel=multiclass_score_label, xscale='symlog', y_sort_by=tasks_sort_by, hue_sort_by=frameworks_sort_key, title=f"Scores ({multiclass_score_label}) on {results_group} multi-class classification problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "multiclass_score_stripplot.png")) if 'regression' in problem_types: fig = draw_score_stripplot('score', results=all_res, type_filter='regression', metadata=metadata, xlabel=regression_score_label, xscale='symlog', y_sort_by=tasks_sort_by, hue_sort_by=frameworks_sort_key, title=f"Scores ({regression_score_label}) on {results_group} regression problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "regression_score_stripplot.png")) if 'binary' in problem_types and normalization: fig = draw_score_stripplot('norm_score', results=all_res, type_filter='binary', metadata=metadata, xlabel=f"rel. {binary_score_label}", y_sort_by='nrows', hue_sort_by=frameworks_sort_key, title=f"Relative scores ({binary_score_label}) on {results_group} binary classification problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "binary_rel_score_stripplot.png")) if 'multiclass' in problem_types and normalization: fig = draw_score_stripplot('norm_score', results=all_res, type_filter='multiclass', metadata=metadata, xlabel=f"rel. {multiclass_score_label}", xscale='symlog', y_sort_by='nrows', hue_sort_by=frameworks_sort_key, title=f"Relative scores ({multiclass_score_label}) on {results_group} multi-class classification problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "multiclass_rel_score_stripplot.png")) if 'regression' in problem_types and normalization: fig = draw_score_stripplot('norm_score', results=all_res, type_filter='regression', metadata=metadata, xlabel=f"rel. {regression_score_label}", y_sort_by='nrows', hue_sort_by=frameworks_sort_key, title=f"Relative scores ({regression_score_label}) on {results_group} regression problems{title_extra}", legend_labels=frameworks_labels, ); savefig(fig, create_file(output_dir, "visualizations", "regression_rel_score_stripplot.png"))The cake is not a lie!======================Commander Lambda has had an incredibly successful week: she completed the first test run of her LAMBCHOP doomsday device, she captured six key members of the Bunny Rebellion, and she beat her personal high score in Tetris. To celebrate, she's ordered cake for everyone - even the lowliest of minions! But competition among minions is fierce, and if you don't cut exactly equal slices of cake for everyone, you'll get in big trouble. The cake is round, and decorated with M&Ms in a circle around the edge. But while the rest of the cake is uniform, the M&Ms are not: there are multiple colors, and every minion must get exactly the same sequence of M&Ms. Commander Lambda hates waste and will not tolerate any leftovers, so you also want to make sure you can serve the entire cake.To help you best cut the cake, you have turned the sequence of colors of the M&Ms on the cake into a string: each possible letter (between a and z) corresponds to a unique color, and the sequence of M&Ms is given clockwise (the decorations form a circle around the outer edge of the cake).Write a function called solution(s) that, given a non-empty string less than 200 characters in length describing the sequence of M&Ms, returns the maximum number of equal parts that can be cut from the cake without leaving any leftovers.------------ Python cases --Input:solution.solution("abcabcabcabc")Output: 4Input:solution.solution("abccbaabccba")Output: 2def cutCake(l, n): """Yield n number of sequential chunks from l.""" d, r = divmod(len(l), n) for i in range(n): si = (d+1)*(i if i < r else r) + d*(0 if i < r else i - r) yield l[si:si+(d+1 if i < r else d)] def spinCake (s): """Shift""" temp = s[0] for i in range (0, len(s)-1): s[i] = s[i+1] s[-1] = temp return s def areMinionsHappy (s): """Check if all elements of a sequence are equal""" return all(x==s[0] for x in s) def solution(s): s = [char for char in s] maxPieces = 1 for n in range(2, len(s)+1): if (len(s)/n)%1!=0: continue for _ in range((int)(len(s)/n)): pieces = [] gen = cutCake(s, n) #print('.') for _ in range(n): pieces.append (next(gen)) if areMinionsHappy (pieces): maxPieces=n break else: s = spinCake (s) return maxPieces`clear_cache`用来清除中间生成的文档from utils.io_file import clear_cache用来保存中间文档,分句、分词from preprocess.sentence import save_splited_sentence from preprocess.vocabulary import save_parsed_words不同的特征提取from model.feature import extract_feature_from_text分类算法from model.subclass.SupportVM import SVM_classifier from model.subclass.GaussianNB import GNB_classifier from model.subclass.MLPerceptron import MLP_classifier from model.subclass.StochasticGD import SGD_classifier from model.subclass.LinearDA import LDA_classifier from model.subclass.DecisionTree import DTree_classifier from model.subclass.KNearest import KN_classifier分句子与分词,中间结果保存在`data/sent`与`data/jieba`save_splited_sentence() save_parsed_words('jieba')提取全书120回合每一回的特征向量,保存在矩阵`matrix120`中matrix120 = [ extract_feature_from_text('bag of words', ['jieba']), extract_feature_from_text('sentence length', []), ]准备分类器classifier_lst = [ SVM_classifier(), GNB_classifier(), MLP_classifier(['lbfgs', 1e-5, (256, 64, 64, 8, 2), 1]), SGD_classifier(["hinge", "l2"]), LDA_classifier(), DTree_classifier(), KN_classifier([20]) ]对每一个特征矩阵、每一个分类器进行训练、测试、汇报for m in matrix120: for c in classifier_lst: c.read(m) c.train() c.predict() c.report()Please input your directory for the top level folderfolder name : SUBMISSION MODELdir_ = 'INPUT-PROJECT-DIRECTORY/submission_model/' # input only here raw_data_dir = dir_+'2. data/' processed_data_dir = dir_+'2. data/processed/'raw_data_dir = '/home/ec2-user/SageMaker/efs/Hilaf/M5/data/'processed_data_dir = '/home/ec2-user/SageMaker/efs/Hilaf/M5/data/processed/' 1. Main setup# General imports import numpy as np import pandas as pd import os, sys, gc, time, warnings, pickle, psutil, random from math import ceil from sklearn.preprocessing import LabelEncoder from tqdm import tqdm warnings.filterwarnings('ignore') ## Simple "Memory profilers" to see memory usage def get_memory_usage(): return np.round(psutil.Process(os.getpid()).memory_info()[0]/2.**30, 2) def sizeof_fmt(num, suffix='B'): for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) ## Memory Reducer # :df pandas dataframe to reduce size # type: pd.DataFrame() # :verbose # type: bool def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df ## Merging by concat to not lose dtypes def merge_by_concat(df1, df2, merge_on): merged_gf = df1[merge_on] merged_gf = merged_gf.merge(df2, on=merge_on, how='left') new_columns = [col for col in list(merged_gf) if col not in merge_on] df1 = pd.concat([df1, merged_gf[new_columns]], axis=1) return df1 ########################### Vars ################################################################################# TARGET = 'sales' # Our main target END_TRAIN = 1941 # Last day in train set MAIN_INDEX = ['id','d'] # We can identify item by these columns2. Part 1- Melting train data => grid_part_1- creating price features => grid_part_2- creating calendar features => grid_part_3########################### Load Data ################################################################################# print('Load Main Data') # Here are reafing all our data # without any limitations and dtype modification train_df = pd.read_csv(raw_data_dir+'sales_train_evaluation.csv') prices_df = pd.read_csv(raw_data_dir+'sell_prices.csv') calendar_df = pd.read_csv(raw_data_dir+'calendar.csv') ########################### Make Grid ################################################################################# print('Create Grid') # We can tranform horizontal representation representation 바꾸기 # to vertical "view" # Our "index" will be 'id','item_id','dept_id','cat_id','store_id','state_id' # and labels are 'd_' coulmns index_columns = ['id','item_id','dept_id','cat_id','store_id','state_id'] grid_df = pd.melt(train_df, id_vars = index_columns, var_name = 'd', value_name = TARGET) # If we look on train_df we se that # we don't have a lot of traning rows # but each day can provide more train data print('Train rows:', len(train_df), len(grid_df)) # To be able to make predictions # we need to add "test set" to our grid add_grid = pd.DataFrame() for i in range(1,29): temp_df = train_df[index_columns] temp_df = temp_df.drop_duplicates() temp_df['d'] = 'd_'+ str(END_TRAIN+i) temp_df[TARGET] = np.nan add_grid = pd.concat([add_grid,temp_df]) grid_df = pd.concat([grid_df,add_grid]) grid_df = grid_df.reset_index(drop=True) # Remove some temoprary DFs del temp_df, add_grid # We will not need original train_df # anymore and can remove it del train_df # You don't have to use df = df construction # you can use inplace=True instead. # like this # grid_df.reset_index(drop=True, inplace=True) # Let's check our memory usage print("{:>20}: {:>8}".format('Original grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum()))) # We can free some memory # by converting "strings" to categorical # it will not affect merging and # we will not lose any valuable data for col in index_columns: grid_df[col] = grid_df[col].astype('category') # Let's check again memory usage print("{:>20}: {:>8}".format('Reduced grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum()))) ########################### Product Release date ################################################################################# print('Release week') # It seems that leadings zero values # in each train_df item row # are not real 0 sales but mean # absence for the item in the store # we can safe some memory by removing # such zeros # Prices are set by week # so it we will have not very accurate release week release_df = prices_df.groupby(['store_id','item_id'])['wm_yr_wk'].agg(['min']).reset_index() release_df.columns = ['store_id','item_id','release'] # Now we can merge release_df grid_df = merge_by_concat(grid_df, release_df, ['store_id','item_id']) del release_df # We want to remove some "zeros" rows # from grid_df # to do it we need wm_yr_wk column # let's merge partly calendar_df to have it grid_df = merge_by_concat(grid_df, calendar_df[['wm_yr_wk','d']], ['d']) # Now we can cutoff some rows # and safe memory grid_df = grid_df[grid_df['wm_yr_wk']>=grid_df['release']] grid_df = grid_df.reset_index(drop=True) # Let's check our memory usage print("{:>20}: {:>8}".format('Original grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum()))) # Should we keep release week # as one of the features? # Only good CV can give the answer. # Let's minify the release values. # Min transformation will not help here # as int16 -> Integer (-32768 to 32767) # and our grid_df['release'].max() serves for int16 # but we have have an idea how to transform # other columns in case we will need it grid_df['release'] = grid_df['release'] - grid_df['release'].min() grid_df['release'] = grid_df['release'].astype(np.int16) # Let's check again memory usage print("{:>20}: {:>8}".format('Reduced grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum()))) ########################### Save part 1 ################################################################################# print('Save Part 1') # We have our BASE grid ready # and can save it as pickle file # for future use (model training) grid_df.to_pickle(processed_data_dir+'grid_part_1.pkl') print('Size:', grid_df.shape) ########################### Prices ################################################################################# print('Prices') # We can do some basic aggregations prices_df['price_max'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('max') prices_df['price_min'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('min') prices_df['price_std'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('std') prices_df['price_mean'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('mean') # and do price normalization (min/max scaling) prices_df['price_norm'] = prices_df['sell_price']/prices_df['price_max'] # Some items are can be inflation dependent # and some items are very "stable" prices_df['price_nunique'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('nunique') prices_df['item_nunique'] = prices_df.groupby(['store_id','sell_price'])['item_id'].transform('nunique') # I would like some "rolling" aggregations # but would like months and years as "window" calendar_prices = calendar_df[['wm_yr_wk','month','year']] calendar_prices = calendar_prices.drop_duplicates(subset=['wm_yr_wk']) # distinct(.keep_all = True) prices_df = prices_df.merge(calendar_prices[['wm_yr_wk','month','year']], on=['wm_yr_wk'], how='left') del calendar_prices # Now we can add price "momentum" (some sort of) # Shifted by week # by month mean # by year mean prices_df['price_momentum'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id'])['sell_price'].transform(lambda x: x.shift(1)) prices_df['price_momentum_m'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id','month'])['sell_price'].transform('mean') prices_df['price_momentum_y'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id','year'])['sell_price'].transform('mean') del prices_df['month'], prices_df['year'] grid_df = reduce_mem_usage(grid_df) prices_df = reduce_mem_usage(prices_df) ########################### Merge prices and save part 2 ################################################################################# print('Merge prices and save part 2') # Merge Prices original_columns = list(grid_df) grid_df = grid_df.merge(prices_df, on=['store_id','item_id','wm_yr_wk'], how='left') keep_columns = [col for col in list(grid_df) if col not in original_columns] grid_df = grid_df[MAIN_INDEX+keep_columns] grid_df = reduce_mem_usage(grid_df) # Safe part 2 grid_df.to_pickle(processed_data_dir+'grid_part_2.pkl') print('Size:', grid_df.shape) # We don't need prices_df anymore del prices_df, grid_df # We can remove new columns # or just load part_1 grid_df = pd.read_pickle(processed_data_dir+'grid_part_1.pkl') ########################### Merge calendar ################################################################################# grid_df = grid_df[MAIN_INDEX] # Merge calendar partly icols = ['date', 'd', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2', 'snap_CA', 'snap_TX', 'snap_WI'] grid_df = grid_df.merge(calendar_df[icols], on=['d'], how='left') # Minify data # 'snap_' columns we can convert to bool or int8 icols = ['event_name_1', 'event_type_1', 'event_name_2', 'event_type_2', 'snap_CA', 'snap_TX', 'snap_WI'] for col in icols: grid_df[col] = grid_df[col].astype('category') # Convert to DateTime grid_df['date'] = pd.to_datetime(grid_df['date']) # Make some features from date grid_df['tm_d'] = grid_df['date'].dt.day.astype(np.int8) grid_df['tm_w'] = grid_df['date'].dt.week.astype(np.int8) grid_df['tm_m'] = grid_df['date'].dt.month.astype(np.int8) grid_df['tm_y'] = grid_df['date'].dt.year grid_df['tm_y'] = (grid_df['tm_y'] - grid_df['tm_y'].min()).astype(np.int8) grid_df['tm_wm'] = grid_df['tm_d'].apply(lambda x: ceil(x/7)).astype(np.int8) # 오늘 몇째주? grid_df['tm_dw'] = grid_df['date'].dt.dayofweek.astype(np.int8) grid_df['tm_w_end'] = (grid_df['tm_dw']>=5).astype(np.int8) # Remove date del grid_df['date'] ########################### Save part 3 (Dates) ################################################################################# print('Save part 3') # Safe part 3 grid_df.to_pickle(processed_data_dir+'grid_part_3.pkl') print('Size:', grid_df.shape) # We don't need calendar_df anymore del calendar_df del grid_df ########################### Some additional cleaning ################################################################################# ## Part 1 # Convert 'd' to int grid_df = pd.read_pickle(processed_data_dir+'grid_part_1.pkl') grid_df['d'] = grid_df['d'].apply(lambda x: x[2:]).astype(np.int16) # Remove 'wm_yr_wk' # as test values are not in train set del grid_df['wm_yr_wk'] grid_df.to_pickle(processed_data_dir+'grid_part_1.pkl') del grid_df3. Part2- Lag featrue- Lag rolling featuregrid_df = pd.read_pickle(processed_data_dir+'grid_part_1.pkl') # We need only 'id','d','sales' # to make lags and rollings grid_df = grid_df[['id','d','sales']] SHIFT_DAY = 28 # Lags # with 28 day shift start_time = time.time() print('Create lags') LAG_DAYS = [col for col in range(SHIFT_DAY,SHIFT_DAY+15)] grid_df = grid_df.assign(**{ '{}_lag_{}'.format(col, l): grid_df.groupby(['id'])[col].transform(lambda x: x.shift(l)) for l in LAG_DAYS for col in [TARGET] }) # Minify lag columns for col in list(grid_df): if 'lag' in col: grid_df[col] = grid_df[col].astype(np.float16) print('%0.2f min: Lags' % ((time.time() - start_time) / 60)) # Rollings # with 28 day shift start_time = time.time() print('Create rolling aggs') for i in [7,14,30,60,180]: print('Rolling period:', i) grid_df['rolling_mean_'+str(i)] = grid_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(SHIFT_DAY).rolling(i).mean()).astype(np.float16) grid_df['rolling_std_'+str(i)] = grid_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(SHIFT_DAY).rolling(i).std()).astype(np.float16) # Rollings # with sliding shift for d_shift in [1,7,14]: print('Shifting period:', d_shift) for d_window in [7,14,30,60]: col_name = 'rolling_mean_tmp_'+str(d_shift)+'_'+str(d_window) grid_df[col_name] = grid_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(d_shift).rolling(d_window).mean()).astype(np.float16) print('%0.2f min: Lags' % ((time.time() - start_time) / 60)) ########################### Export ################################################################################# print('Save lags and rollings') grid_df.to_pickle(processed_data_dir+'lags_df_'+str(SHIFT_DAY)+'.pkl')Save lags and rollings4. Part3- Mean encoding feature########################### Apply on grid_df ################################################################################# # lets read grid from # https://www.kaggle.com/kyakovlev/m5-simple-fe # to be sure that our grids are aligned by index grid_df = pd.read_pickle(processed_data_dir+'grid_part_1.pkl') grid_df['sales'][grid_df['d']>(1941-28)] = np.nan base_cols = list(grid_df) icols = [ ['state_id'], ['store_id'], ['cat_id'], ['dept_id'], ['state_id', 'cat_id'], ['state_id', 'dept_id'], ['store_id', 'cat_id'], ['store_id', 'dept_id'], ['item_id'], ['item_id', 'state_id'], ['item_id', 'store_id'] ] for col in icols: print('Encoding', col) col_name = '_'+'_'.join(col)+'_' grid_df['enc'+col_name+'mean'] = grid_df.groupby(col)['sales'].transform('mean').astype(np.float16) grid_df['enc'+col_name+'std'] = grid_df.groupby(col)['sales'].transform('std').astype(np.float16) keep_cols = [col for col in list(grid_df) if col not in base_cols] grid_df = grid_df[['id','d']+keep_cols] ################################################################################# print('Save Mean/Std encoding') grid_df.to_pickle(processed_data_dir+'mean_encoding_df.pkl')Save Mean/Std encodingImport ScikitLearn, Pandas and Numpyimport sklearn import pandas as pd import numpy as np1. Read the Dataset using Pandastrain_data = pd.read_csv("data/image_train_data/image_train_data.csv") test_data = pd.read_csv("data/image_test_data/image_test_data.csv") train_data2. Exploratory Data Analysistrain_data.head() train_data.info() train_data.describe() import seaborn as sns fig = sns.countplot(x='label', data=train_data)3. Data PreprocessingThe "deep_features" column and "image_array" column have there lists string represented. So, they need to be transformed to floats and integers respectively to be useddef filtering_image_array(row): y = row.loc['deep_features'] y = y.replace('[', '').replace(']','').replace(" ", ",").split(',') y = list(map(float, y)) row.loc['deep_features'] = y x = row.loc['image_array'] x = x.replace('[', '').replace(']','').replace(" ", ",").split(',') x = list(map(int, x)) row.loc['image_array'] = x return row train_data = train_data.apply(filtering_image_array, axis=1) test_data = test_data.apply(filtering_image_array, axis=1) train_data.head() im = train_data.loc[0]['image_array'] im[0:10]Plot images by indeximport matplotlib.pyplot as plt def plot_images_index(ids, data): for idx in ids: row = data.loc[idx] im = np.asarray(row.loc['image_array']) im.resize(32,32,3) plt.figure(figsize=(1, 1)) plt.imshow(im) plt.show() def plot_images_id(ids, data): for idx in ids: row = data.loc[data['id'] == idx] im = np.asarray(row.loc[:, 'image_array'].tolist()) im = np.resize(im, (32,32,3)) plt.figure(figsize=(1, 1)) plt.imshow(im) plt.show() x = [24, 33, 36, 70, 90] plot_images_id(x, train_data) x = [0, 1, 2, 3, 4] plot_images_index(x, train_data)Apply LabelEncoder() on the label column of both training and test datasets.from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() encoder.fit(train_data['label']) def encodin_labels(row): encoded_label = encoder.transform([row['label']]) row['encoded_label'] = encoded_label[0] return row train_data = train_data.apply(encodin_labels, axis=1) train_data.head() def encodin_labels(row): encoded_label = encoder.transform([row['label']]) row['encoded_label'] = encoded_label[0] return row test_data = test_data.apply(encodin_labels, axis=1)4. Logistic Regression Pipelinefrom sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression pipeline_simple = Pipeline( steps=[ ('StandardScaler', StandardScaler()), ('Logistic_Regression', LogisticRegression(multi_class='multinomial', max_iter=1000)) ], verbose=True ) pipeline_complex = Pipeline( steps=[ ('StandardScaler', StandardScaler()), ('Logistic_Regression', LogisticRegression(multi_class='multinomial', max_iter=1000)) ], verbose=True ) from sklearn import set_config set_config(display='diagram')5. Pipeline Trainingfeatures_simple = 'image_array' label = 'encoded_label' X_train_simple = np.array(train_data[features_simple].tolist()) y_train = np.array(train_data[label].tolist()) pipeline_simple.fit(X_train_simple, y_train) features_simple = 'image_array' label = 'encoded_label' X_train_simple = np.array(train_data[features_simple].tolist()) y_train = np.array(train_data[label].tolist()) pipeline_simple.fit(X_train_simple, y_train) features_complex = 'deep_features' X_train_complex = np.array(train_data[features_complex].tolist()) y_train = np.array(train_data[label].tolist()) pipeline_complex.fit(X_train_complex, y_train)[Pipeline] .... (step 1 of 2) Processing StandardScaler, total= 0.2s [Pipeline] (step 2 of 2) Processing Logistic_Regression, total= 7.9sPLot Learning Curves Pipelinefrom sklearn.model_selection import learning_curve def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)): """ Generate a simple plot of the test and traning learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : integer, cross-validation generator, optional If an integer is passed, it is the number of folds (defaults to 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ estimator.verbose = False plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") estimator.verbose = True return plt6. Metricsfrom sklearn.metrics import classification_report from sklearn.metrics import accuracy_score X_test = np.array(test_data[features_simple].tolist()) y_pred_simple = pipeline_simple.predict(X_test) y_true = np.array(test_data[label].tolist()) print('Classification report:\n\n{}'.format( classification_report(test_data[label], y_pred_simple)) ) print('Accuracy Score:', accuracy_score(y_true, y_pred_simple)) X_test = np.array(test_data[features_complex].tolist()) y_pred_complex = pipeline_complex.predict(X_test) y_true = np.array(test_data[label].tolist()) print('Classification report:\n\n{}'.format( classification_report(test_data[label], y_pred_complex)) ) print('Accuracy Score:', accuracy_score(y_true, y_pred_complex)) fig = plot_learning_curve(pipeline_simple, 'Simple Pipeline Learning Curves', X_train_simple, y_train) fig = plot_learning_curve(pipeline_complex, 'Complex Pipeline Learning Curves', X_train_complex, y_train)Assignment Task 2: Create category-specific image retrieval modelsbird_data = train_data.query('label == "bird"') bird_data = bird_data.reset_index(drop=True) bird_data.head() cat_data = train_data.query('label == "cat"') cat_data = cat_data.reset_index(drop=True) cat_data.head() dog_data = train_data.query('label == "dog"') dog_data = dog_data.reset_index(drop=True) dog_data.head() automobile_data = train_data.query('label == "automobile"') automobile_data = automobile_data.reset_index(drop=True) automobile_data.head() from sklearn.neighbors import NearestNeighbors bird_model = Pipeline( steps=[ ('Nearest Neighbor', NearestNeighbors(metric='euclidean', algorithm='brute')) ], verbose=True ) cat_model = Pipeline( steps=[ ('Nearest Neighbor', NearestNeighbors(metric='euclidean', algorithm='brute')) ], verbose=True ) dog_model = Pipeline( steps=[ ('Nearest Neighbor', NearestNeighbors(metric='euclidean', algorithm='brute')) ], verbose=True ) automobile_model = Pipeline( steps=[ ('Nearest Neighbor', NearestNeighbors(metric='euclidean', algorithm='brute')) ], verbose=True ) X_train_bird = np.array(bird_data[features_complex].tolist()) bird_model.fit(X_train_bird) X_train_cat = np.array(cat_data[features_complex].tolist()) cat_model.fit(X_train_cat) X_train_dog = np.array(dog_data[features_complex].tolist()) dog_model.fit(X_train_dog) X_train_automobile = np.array(automobile_data[features_complex].tolist()) automobile_model.fit(X_train_automobile)[Pipeline] .. (step 1 of 1) Processing Nearest Neighbor, total= 0.0s3. A simple example of nearest-neighbors classification:def nearest_image(sample, type_data, pipeline): distances, indices = pipeline['Nearest Neighbor'].kneighbors(np.array(sample[features_complex]).reshape(1, -1)) neighbors = pd.DataFrame({'distance':distances[0].tolist(), 'index':indices[0].tolist()}) left_join = pd.merge(neighbors, type_data, how='left', left_on='index', right_index=True) plot_images_id([left_join.loc[0, 'id']], type_data) return left_join sample = test_data.loc[0] sample_list = [sample.id] plot_images_id(sample_list, test_data) table = nearest_image(sample, cat_data, cat_model) table['distance'].mean() sample = test_data.loc[0] sample_list = [sample.id] plot_images_id(sample_list, test_data) table = nearest_image(sample, dog_data, dog_model) table['distance'].mean()문제Some weights of the model checkpoint at bert-base-uncased were not used when initializing라는 에러를 계속 받고 있다. 찾아보니 fine-tuning이 제대로 되지 않았다는 뜻이라는데```pythonmodel = BertForSequenceClassification.from_pretrained( "beomi/kcbert-base", Use the 12-layer BERT model, with an uncased vocab. num_labels = 2, The number of output labels--2 for binary classification. You can increase this for multi-class tasks. output_attentions = False, Whether the model returns attentions weights. output_hidden_states = False, Whether the model returns all hidden-states.)```라는 코드로 맨 위에 리니어 클래시피케이션을 수행하는 레이어를 쌓았기에 문제가 되지 않는다 생각해서 진행했다. 하지만 관련 이슈에 대해서는 더 찾아보기로 할게요. epoch- 1epoch에서 90% accuracy를 달성했다...?# Get all of the model's parameters as a list of tuples. params = list(model.named_parameters()) print('The BERT model has {:} different named parameters.\n'.format(len(params))) print('==== Embedding Layer ====\n') for p in params[0:5]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== First Transformer ====\n') for p in params[5:21]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== Output Layer ====\n') for p in params[-4:]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. )Optimizer & Learning Rate SchedulerFor the purposes of fine-tuning, the authors recommend choosing from the following values (from Appendix A.3 of the BERT paper):Batch size: 16, 32Learning rate (Adam): 5e-5, 3e-5, 2e-5Number of epochs: 2, 3, 4파라미터 조절가능. batch size는 사이즈 때문에 작게 잡는게 좋을 것 같다. 64는 안됨.from transformers import get_linear_schedule_with_warmup # Number of training epochs. The BERT authors recommend between 2 and 4. # We chose to run for 4, but we'll see later that this may be over-fitting the # training data. epochs = 4 # Total number of training steps is [number of batches] x [number of epochs]. # (Note that this is not the same as the number of training samples). total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps)Training LoopTraining:Unpack our data inputs and labelsLoad data onto the GPU for accelerationClear out the gradients calculated in the previous pass.In pytorch the gradients accumulate by default (useful for things like RNNs) unless you explicitly clear them out.Forward pass (feed input data through the network)Backward pass (backpropagation)Tell the network to update parameters with optimizer.step()Track variables for monitoring progressimport numpy as np # Function to calculate the accuracy of our predictions vs labels def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) import time import datetime def format_time(elapsed): ''' Takes a time in seconds and returns a string hh:mm:ss ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) import random import numpy as np # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 42 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # We'll store a number of quantities such as training and validation loss, # validation accuracy, and timings. training_stats = [] # Measure the total training time for the whole run. total_t0 = time.time() # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_train_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification # It returns different numbers of parameters depending on what arguments # arge given and what flags are set. For our useage here, it returns # the loss (because we provided labels) and the "logits"--the model # outputs prior to activation. loss, logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_train_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over all of the batches. avg_train_loss = total_train_loss / len(train_dataloader) # Measure how long this epoch took. training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) # ======================================== # Validation # ======================================== # After the completion of each training epoch, measure our performance on # our validation set. print("") print("Running Validation...") t0 = time.time() # Put the model in evaluation mode--the dropout layers behave differently # during evaluation. model.eval() # Tracking variables total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 # Evaluate data for one epoch for batch in validation_dataloader: # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using # the `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Tell pytorch not to bother with constructing the compute graph during # the forward pass, since this is only needed for backprop (training). with torch.no_grad(): # Forward pass, calculate logit predictions. # token_type_ids is the same as the "segment ids", which # differentiates sentence 1 and 2 in 2-sentence tasks. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification # Get the "logits" output by the model. The "logits" are the output # values prior to applying an activation function like the softmax. (loss, logits) = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) # Accumulate the validation loss. total_eval_loss += loss.item() # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences, and # accumulate it over all batches. total_eval_accuracy += flat_accuracy(logits, label_ids) # Report the final accuracy for this validation run. avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) # Calculate the average loss over all of the batches. avg_val_loss = total_eval_loss / len(validation_dataloader) # Measure how long the validation run took. validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) # Record all statistics from this epoch. training_stats.append( { 'epoch': epoch_i + 1, 'Training Loss': avg_train_loss, 'Valid. Loss': avg_val_loss, 'Valid. Accur.': avg_val_accuracy, 'Training Time': training_time, 'Validation Time': validation_time } ) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0))) PATH = '/home/aiffel-dj57/project' torch.save(model, PATH + 'kcbert.pt') # 전체 모델 저장 torch.save(model.state_dict(), PATH + 'model_state_dict.pt') # 모델 객체의 state_dict 저장 torch.save({ 'kcbert': model.state_dict(), 'optimizer': optimizer.state_dict() }, PATH + 'all.tar') # 여러 가지 값 저장, 학습 중 진행 상황 저장을 위해 epoch, loss 값 등 일반 scalar값 저장 가능 #이건 나중에 실행해보려고 했던거니 신경안쓰셔도 됩니다. import pandas as pd # Display floats with two decimal places. pd.set_option('precision', 2) # Create a DataFrame from our training statistics. df_stats = pd.DataFrame(data=training_stats) # Use the 'epoch' as the row index. df_stats = df_stats.set_index('epoch') # A hack to force the column headers to wrap. #df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])]) # Display the table. df_stats '''''' import matplotlib.pyplot as plt % matplotlib inline import seaborn as sns # Use plot styling from seaborn. sns.set(style='darkgrid') # Increase the plot size and font size. sns.set(font_scale=1.5) plt.rcParams["figure.figsize"] = (12,6) # Plot the learning curve. plt.plot(df_stats['Training Loss'], 'b-o', label="Training") plt.plot(df_stats['Valid. Loss'], 'g-o', label="Validation") # Label the plot. plt.title("Training & Validation Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.xticks([1, 2, 3, 4]) plt.show() '''''' #테스트셋에 평가해보기 import pandas as pd # Load the dataset into a pandas dataframe. test = pd.read_csv('nsmc/ratings_test.txt', sep='\t') # Report the number of sentences. print('Number of test sentences: {:,}\n'.format(df.shape[0])) # Create sentence and label lists sentences2 = test.document.values labels2 = test.label.values #왜 50,000이지 좀따 해결해볼게요.... #test sentence 수가 점점 늘어난다 뭔가 이상하다 ㅠㅠㅠㅠ(재부팅함) # Print the original sentence. print(' Original: ', sentences2[0]) # Print the sentence split into tokens. print('Tokenized: ', tokenizer.tokenize(sentences2[0])) # Print the sentence mapped to token ids. print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences2[0]))) # BERT의 입력 형식에 맞게 토큰을 추가해줍니다 sentences = ["[CLS] " + str(sent) + " [SEP]" for sent in sentences2] sentences[:10] tokenized_texts = [tokenizer.tokenize(sentence) for sentence in sentences] print(tokenized_texts[:3]) from keras.preprocessing.sequence import pad_sequences MAX_LEN = 128 input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts] input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype='long', truncating='post', padding='post') input_ids[0] #영어는 토큰화가 엉망으로 되네 # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 64, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Set the batch size. batch_size = 32 # Create the DataLoader. prediction_data = TensorDataset(input_ids, attention_masks, labels) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size) # Prediction on test set print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) # Put model in evaluation mode model.eval() # Tracking variables predictions , true_labels = [], [] # Predict for batch in prediction_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs[0] # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Store predictions and true labels predictions.append(logits) true_labels.append(label_ids) print(' DONE.') print('Positive samples: %d of %d (%.2f%%)' % (df.label.sum(), len(df.label), (df.label.sum() / len(df.label) * 100.0)))Positive samples: 25173 of 50000 (50.35%)This file contains some code used to generate images for my explanationsimport random from matplotlib import pyplot as plt import networkx as nx def random_unweighted(num_nodes): # Create a list of numbers up to the number of nodes, and shuffle it nodes = list(range(num_nodes)) random.shuffle(nodes) G = nx.Graph() for i in range(num_nodes-1): G.add_edge(nodes[i], nodes[i+1]) return G linear_graph = random_unweighted(5) nx.draw(linear_graph, with_labels=True, font_weight='bold', font_color = "white") plt.savefig("LinearGraph.png")Attributes and their meaning: - age: The person's age in years- sex: The person's sex (1 = male, 0 = female)- cp: The chest pain experienced (Value 1: typical angina, Value 2: atypical angina, Value 3: non-anginal pain, Value 4: asymptomatic)- trestbps: The person's resting blood pressure (mm Hg on admission to the hospital)- chol: The person's cholesterol measurement in mg/dl- fbs: The person's fasting blood sugar (> 120 mg/dl, 1 = true; 0 = false)- restecg: Resting electrocardiographic measurement (0 = normal, 1 = having ST-T wave abnormality, 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria)- thalach: The person's maximum heart rate achieved- exang: Exercise induced angina (1 = yes; 0 = no)- oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot. See more here)- slope: the slope of the peak exercise ST segment (Value 1: upsloping, Value 2: flat, Value 3: downsloping)- ca: The number of major vessels (0-3)- thal: A blood disorder called thalassemia (3 = normal; 6 = fixed defect; 7 = reversable defect)- target: Heart disease (0 = no, 1 = yes) Some of them are categorical - sex- cp- fbs- restecg- exang- slope- ca- thal- and target And the others are numerical - age- trestbps- chol- thalach- oldpeakdf = pd.read_csv("../data/raw/heart.csv") dfLet's have a closer look on datadf.info() RangeIndex: 303 entries, 0 to 302 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 303 non-null int64 1 sex 303 non-null int64 2 cp 303 non-null int64 3 trestbps 303 non-null int64 4 chol 303 non-null int64 5 fbs 303 non-null int64 6 restecg 303 non-null int64 7 thalach 303 non-null int64 8 exang 303 non-null int64 9 oldpeak 303 non-null float64 10 slope 303 non-null int64 11 ca 303 non-null int64 12 thal 303 non-null int64 13 target 303 non-null int64 dtypes: float64(1), int64(13) memory usage: 33.3 KBAs we can see there is no null valuesdf.describe()Let's have a closer loot at the correlation of featuresplt.figure(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap='coolwarm')Positive correlation means that features are directly proportional, whereas negative correlation indicates inverse proportionality.The closer correlation to zero, the weaker is the linear relationship.So we can drop fbs feature due to its week correlation with target.df.drop('fbs', axis=1, inplace=True)Let's now consider data distribution accross different features It can be seen that most numerical features' distribution is close to Gaussian and than there are more men that women in the dataset.Age is also a very important risk factor._ = df.hist(figsize=(30, 50)) plt.figure(figsize=(10,6)) sns.boxplot(x=df['target'],y=df['age']) plt.show()Fitting models Let us try to build different models and compare them.Firstly, we need to process and prepare data.from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.base import BaseEstimator, TransformerMixin from sklearn.neighbors import KNeighborsClassifier class Columns(BaseEstimator, TransformerMixin): def __init__(self, names=None): self.names = names def fit(self, X, y=None, **fit_params): return self def transform(self, X): return X[self.names] X = df.drop('target', axis=1) y = df['target'] X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0) numeric = ["age", "trestbps", "chol", "thalach", "oldpeak"] categorical = ["sex", "cp", "restecg", "exang", "slope", "ca", "thal"] pipe = Pipeline([ ("features", FeatureUnion([ ('categorical', make_pipeline(Columns(names=categorical),OneHotEncoder(sparse=False))), ('numeric', make_pipeline(Columns(names=numeric),StandardScaler())), ])) ]) X_train = pipe.fit_transform(X_train) X_test = pipe.transform(X_test) models=[] models.append(('LR',LogisticRegression(solver='sag',max_iter=5000))) models.append(('LDA',LinearDiscriminantAnalysis())) models.append(('KNN',KNeighborsClassifier())) models.append(('DTC',DecisionTreeClassifier())) models.append(('NB',GaussianNB())) models.append(('SVM',SVC())) results=[] names=[] for name,model in models: kfold=KFold(n_splits=10) cv_results=cross_val_score(model,X_train,y_train,cv=kfold,scoring='accuracy') model.fit(X_train, y_train) results.append(cv_results) names.append(name) print("%s %f" % (name,cv_results.mean())) for name,model in models: predictions=model.predict(X_test) score = accuracy_score(y_test,predictions) print("%s %f" % (name,score))LR 0.885246 LDA 0.868852 KNN 0.868852 DTC 0.770492 NB 0.852459 SVM 0.852459Overall The best model is Linear Regression, while the worst was Decision Tree Classifier.X_train.shape垃圾邮件分类https://www.kaggle.com/uciml/sms-spam-collection-dataset 拿到数据首先读入拿到数据from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split import pandas as pd import numpy as np # 读取数据 data_dir = "../input/" df = pd.read_csv(data_dir + '/spam.csv', encoding='latin-1') # 把数据拆分成为训练集和测试集 data_train, data_test, labels_train, labels_test = train_test_split( df.v2, df.v1, test_size=0.2, random_state=0) #print ('拆分过后的每个邮件内容') #print (data_train[:10]) #print ('拆分过后每个邮件是否是垃圾邮件') #print (labels_train[:10])建立词汇表,统计两个类目下面的共词计数''' 用一个dictionary保存词汇,并给每个词汇赋予唯一的id ''' def GetVocabulary(data): return把文章变成词向量''' 把文本变成向量的表示形式,以便进行计算 ''' def Document2Vector(vocab_dict, data): return # 把训练集的句子全部变成向量形式做naive bayes 训练,得到训练集每个词概率''' 在训练集计算两种概率: 1. 词在每个分类下的概率,比如P('email'|Spam) 2. 每个分类的概率,比如P(Spam) 这里的计算实现巧妙利用了numpy的array结构: 1. 在每个分类下创建一个与词汇量大小相等的vector(即 numpy array), 即spam_word_counter 和 ham_word_counter 2. 在遍历每一个句子的时候,直接与句子对应的vector相加,累积每个单词出现的次数 3. 在遍历完所有句子之后,再除以总词汇量,得到每个单词的概率 ''' def NaiveBayes_train(train_matrix,labels_train): return进行测试集预测''' 对测试集进行预测,按照公式计算例子在两个分类下的概率,选择概率较大者作为预测结果 ''' def Predict(test_word_vector, p_spam_vector, p_spam, p_ham_vector, p_ham): return # 检测模型 from sklearn.metrics import accuracy_score,classification_report,confusion_matrix from sklearn.model_selection import cross_val_score #print (accuracy_score(labels_test, predictions)) #print (classification_report(labels_test, predictions)) #print (confusion_matrix(labels_test, predictions))> ** Import the required libraries :- **import re # regular expressions # for mathemactical computation, oraganization and visualization of data import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # for data preprocessing, model training and model evaluation from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix import nltk # for stemming and stopwords removal> ** Perform Exploratory Data Analysis :- **data = pd.read_csv('../input/amazon_alexa.tsv',sep='\t') data.shape data.head() plt.rcParams["figure.figsize"] = (20,10) data.groupby(['variation']).feedback.value_counts().plot(kind='bar') data['variation'].nunique() data.isna().sum()> ** Drop the unused Features :- **data.drop(['rating','date'], inplace=True, axis=1) data.head() import collections collections.Counter(data['feedback'])> ** Text Preprocessing :- **stop_words = set(nltk.corpus.stopwords.words('english')) sno = nltk.stem.SnowballStemmer('english') def cleanpunc(sentence): cleaned = re.sub(r"[?|!|\'|\"|#]", r"",sentence) cleaned = re.sub(r"[.|,|)|(|\|/]",r"",cleaned) return cleaned.lower() sentences = data['verified_reviews'].values reviews = [] for sent in sentences: cleaned_sent = cleanpunc(sent) sent_arr = cleaned_sent.split() output_sent = '' for word in sent_arr: if word not in stop_words: stemmed_word = sno.stem(word) output_sent = output_sent + ' ' + stemmed_word reviews.append(output_sent) reviews_text = pd.DataFrame({'reviews': reviews}) data = pd.concat([data,reviews_text], axis=1) data.head()> ** Resample unbalanced Data :- **data.drop(['verified_reviews'], axis=1,inplace=True) from sklearn.utils import resample data_majority = data[data.feedback == 1] data_minority = data[data.feedback == 0] data_minority_upsampled = resample(data_minority, replace=True,n_samples=2500,random_state=123) data_upsampled = pd.concat([data_majority, data_minority_upsampled]) final = pd.concat([data_upsampled, pd.get_dummies(data_upsampled['variation'],sparse=True)], axis=1) final.shape final.drop(['variation'], axis=1, inplace=True) final.head()> ** Apply Bag of Words on the Textual feature :- **count_vect = CountVectorizer(ngram_range=(1,2)) final_counts = count_vect.fit_transform(final['reviews'].values) print(final_counts.get_shape()) print(final.shape) final.drop(['reviews'],axis=1,inplace=True) rev_df = pd.DataFrame(final_counts.todense(),columns=count_vect.get_feature_names()) rev_df.shape final.reset_index(inplace=True, drop=True) final_df = pd.concat([final,rev_df], axis=1) final_df.shape> ** Seperate dependent and independent Features :- **X = final_df.iloc[:,1:].values y = np.ravel(final_df.iloc[:,0:1].values)> ** Split data into Training and Testing parts :- **X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=123)> ** Training the model with GridSearchCV to find optimal Hyperparameter :- **clf = MultinomialNB() param_disb = { 'alpha': [10**-4,10**-3,10**-2,10**-1,10,1,10**2,10**3,10**4]} search = GridSearchCV(clf, param_grid=param_disb, cv=5) search.fit(X_train,y_train) print(search.best_estimator_)MultinomialNB(alpha=0.1, class_prior=None, fit_prior=True)> ** Training and Testing the model with optimal Hyperparameter :- **clf = MultinomialNB(alpha=0.1, class_prior=None, fit_prior=True) clf. fit(X_train, y_train) print(confusion_matrix(y_test, clf.predict(X_test))) print(f"Accuracy Score -> {clf.score(X_test,y_test)}")[[776 55] [ 14 935]] Accuracy Score -> 0.9612359550561798Problem definition Apply regression models to predict the house pricing Load the data#input df = pd.read_csv('data/house_pricing.csv') print(df.columns) print(df.shape) df.head()Index([u'id', u'date', u'price', u'bedrooms', u'bathrooms', u'sqft_living', u'sqft_lot', u'floors', u'waterfront', u'view', u'condition', u'grade', u'sqft_above', u'sqft_basement', u'yr_built', u'yr_renovated', u'zipcode', u'lat', u'long', u'sqft_living15', u'sqft_lot15', u'walking_score', u'transit_score', u'bike_score', u'median_income', u'mean_income', u'population'], dtype='object') (21165, 27)Feature Engineering# TODO: create 2 new features # TODO: select the features and the target X_columns = [] y_column = []Model Training# TODO: split the data: 80% for training and 20% for test # TODO: train a linear regression model # identigy the feature importance # importance = [] # for i in range(len(X_columns)): # importance.append([X_columns[i], model.feature_importances_[i]]) # pd.DataFrame(importance).sort_values(by=1, ascending=False)Model Evaluation# TODO: evaluate the model using the Mean Absolute Error # plot the results # plt.scatter(y_test, y_pred, alpha=0.3) # plt.show()Running Dask on the clusterThe dask frameworks enabling users to parallelize internal systemsNot all computations fit into a big dataframe. Dask exposes lower-level APIs letting you build custom systems for in-house applications. This helps parallelize python processes and dramatically accelerate their performanceDask Kubernetes deploys Dask workers on Kubernetes clusters using native Kubernetes APIs. It is designed to dynamically launch short-lived deployments of workers during the lifetime of a Python process.Check out this link https://kubernetes.dask.org/en/latest/ When user runs dask the frameworks start one or more pods running in parallel on the cluster. Users can define the number of nodes and the minimun and maximum number of pods that the dask framework opens upScale to zero is achieved by setting the minimum = 0. Setting it to zero delete the pods once the job is done and free up the resources!pip install dask distributed !pip install dask-kubernetes==0.10.0 from dask_kubernetes import KubeCluster cluster = KubeCluster.from_yaml('worker-spec.yml') cluster.scale_up(4) # specify number of nodes explicitly cluster.adapt(minimum=2, maximum=5) # or dynamically scale based on current workloadto view the pods that are running!kubectl -n default-tenant get pods | grep dask # Example usage import distributed import dask.array as da # Connect dask to the cluster client = distributed.Client(cluster) # Create an array and calculate the mean array = da.ones((1000, 1000, 1000), chunks=(100, 100, 10)) print(array.mean().compute()) # Should print 1.0Autoencoders What are autoencoders?Autoencoders are networks which have the same input and output. A set of data is fed to these networks and they are expected to recreate the input. However, what makes autoencoders interesting is that they compress the information into lower number of dimensions (a.k.a latent space) and then recreate the input using those dimensions. They can be used for dimensionality reduction similar to PCA, t-SNE, and Umap. Some of the advantages of using autoencoders compared to some of the other techniques are:- Flexibility: You can design the network based on what the problem demands.- Reversibility: Unlike methods such as t-SNE and UMAP you can convert data back to the initial space.- Non-linearity: Unlike linear methods such as PCA, it is capable of using non-linear transformation.Learning Objectives: - How AutoEncoders compress- How Variation Auto Encoder's use a latent space- What is a latent Space- using `Kullback–Leibler divergence` as a loss to measure the difference in two distributions- Applications of Auto Encoders StructureAutoencoders have two main components:1. Encoder: Converts data to latent space.2. Decoder: Converts the data back from latent space to its initial space.The architecture looks similar to the image below:We pass the input through the model and it will compress and decompress the input and returns a result. Then we compare the output of the model with the original input. To check how close the output is to the original input we use a loss function. ApplicationsAutoencoders are not only useful for dimensionality reduction. They are often used for other purposes as well, including:1. __Denoising:__ We could add noise to the input and then feed it to the model and then compare the output with the original image (without noise). This approach will create a model which is capable of removing noise from the input.2. __Anomaly Detection:__ When we train a model on specific set of data, the model learns how to recreate the dataset. As a result when there are uncommon instances in the data the model will not be able to recrate them very well. This behaviour is sometimes used as a technique to find anomalous data points. 3. __Unsupervised Clustering:__ Like clustering algorithms but more flexible, able to fit complex relationships Let's start by importing the required libraries.import torch from torch import nn, optim from torch.nn import functional as F from torch.distributions import Normal from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import seaborn as sns from PIL import Image from pathlib import Path import numpy as np import os from tqdm.auto import tqdm import matplotlib.pyplot as plt import pandas as pd from deep_ml_curriculum.torchsummaryX import summaryProblem DescriptionWe are going to start with a simple problem. We will use MNIST dataset which is a collection of hand-written digits as 28x28 pixel images. We are going to use autoencoder to compress each image into only two values and then reconstruct the image. When the model is trained we will have a look at the reconstructed images as well as latent space values. Dataset and dataloaderFirst we need to create a `Dataset` class. The `Dataset` class reads the data from file and returns data points when we need them. The advantage of using a `Dataset` is that we can adjust it based on what we need for each problem. If we are not dealing with large amount of data we can decide to keep everything in RAM so it is ready use. But if we are dealing with a few gigabytes of data we might need to open the file only when we need them.The MNIST data set is not large so we can easily fit it into memory. In the `Dataset` class we define a few methods:- `__init__`: What information is required to create the object and how this information is saved.- `__len__`: Returns the number of data points (images) when we use `len()` function.- `__getitem__`: We can define how indexing would work for this class.We are going to define a couple of custom functions for convinience:- `show`: to see the image.- `sample`: which returns a random sample of the data.Dataset? path = Path("../../data/processed/MNIST/") class DigitsDataset(Dataset): def __init__(self, path, transform=None): self.root_dir = Path(path) self.transform = transform data = pd.read_csv(path) if "label" in data.columns: self.x = data.drop(columns=["label"]).values.reshape((-1, 28, 28)) self.y = data["label"].values else: self.x = data.values.reshape((-1, 28, 28)) def __len__(self): """Python method for length""" return len(self.x) def __getitem__(self, idx): """Python method for square brackets""" output = self.x[int(idx)] / 255 if self.transform: output = self.transform(output) return output def show(self, idx): plt.imshow(self.x[idx], "gray")__Note:__ We also defined a class called `ToTensor`. This class takes an input and converts it to pytorch tensor. Now that we have a `Dataset` class, we can create a training and test dataset.ds_train = DigitsDataset(path / "train.csv", transform=transforms.ToTensor()) ds_test = DigitsDataset(path / "test.csv", transform=transforms.ToTensor()) x= ds_train[0] x.shape, x.dtype for i in range(4): for j in range(4): plt.subplot(4, 4, 1+i*4+j) ds_train.show(i*4+j) plt.xticks([]) plt.yticks([]) plt.show() # Both of these are the same ds_train.__getitem__(1).shape ds_train[1].shapeNext step is to create a data loaders. The training process takes place at multiple steps. At each step, we choose a few images and feed them to the model. Then we calculate the loss value based on the output. Using the loss value we update the values in the model. We do this over and over until when we think the model is trained. Each of these steps are called a mini-batch and the number of images passed in at each mini-batch is called batch size. Dataloader's job is to go to the dataset and grab a mini-batch of images for training. To create a Dataloader we use a pytorch dataloder object.batch_size = 64 train_loader = torch.utils.data.DataLoader( ds_train, batch_size=batch_size, shuffle=True ) test_loader = torch.utils.data.DataLoader(ds_test, batch_size=batch_size, shuffle=False) test_loader__Note:__ Shuffle tells the data loader whether the data needs to be shuffled at the end of each epoch. We do it for training to keep the input random. But we don't need to do it for testing since we only use the test dataset for evaluation. Model definitionNow we need to create the model. The architecture we are going to use here is made of two linear layers for the encoder and two linear layers for the decoder.# nn.Module?? class AE(nn.Module): def __init__(self): super(AE, self).__init__() self.encoder = nn.Sequential( nn.Linear(784, 400), nn.ReLU(inplace=True), nn.Linear(400, 2) ) self.decoder = nn.Sequential( nn.Linear(2, 400), nn.ReLU(inplace=True), nn.Linear(400, 784), nn.Sigmoid() ) def encode(self, x): x = x.reshape((-1, 1, 28 * 28)) return self.encoder(x).reshape((-1, 2)) def decode(self, z): return self.decoder(z).reshape((-1, 28 * 28)) def forward(self, x): z = self.encode(x) return self.decode(z)If we have access to GPU, let's make sure we are using it.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") deviceNow we create an instance of the model.model = AE().to(device) model # Let use torchsummary X to see the size of the model x=torch.rand((1, 784)).to(device) summary(model, torch.rand((2, 784)).to(device)) 1================================================================== Kernel Shape Output Shape Params Mult-Adds Layer 0_encoder.Linear_0 [784, 400] [2, 1, 400] 314.0k 313.6k 1_encoder.ReLU_1 - [2, 1, 400] - - 2_encoder.Linear_2 [400, 2] [2, 1, 2] 802.0 800.0 3_decoder.Linear_0 [2, 400] [2, 400] 1.2k 800.0 4_decoder.ReLU_1 - [2, 400] - - 5_decoder.Linear_2 [400, 784] [2, 784] 314.384k 313.6k 6_decoder.Sigmoid_3 - [2, 784] - - ------------------------------------------------------------------ Totals Total params 630.386k Trainable params 630.386k Non-trainable params 0.0 Mult-Adds 628.8k ==================================================================We also need to choose an optimiser. The optimiser use the loss value and it's gradients with respect to model parameters and tells us how much each value must be adjusted to have a better model.optimizer = optim.Adam(model.parameters(), lr=1e-3)And the final component is the loss function. Here we are going to use Binary Cross Entropy function because each pixel can go from zero to one.def loss_bce(recon_x, x): BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction="sum") return BCELet's define two functions one for executing a single epoch of training and one for evaluating the mdel using test data.Notice the following comments in the training loopdef train(epoch, loss_function, log_interval=50): model.train() train_loss = 0 for batch_idx, data in enumerate(tqdm(train_loader, leave=False, desc='train')): # We make sure the data is in the right device (cpu or gpu) data = data.to(device).float() # We make sure that any saved gradient (derivative) is zeroed. optimizer.zero_grad() # We pass a mini-batch of data into the model and grab the predictions. recon_batch = model(data) # We use the loss function to find out how close the model's output is to the actual image. loss = loss_function(recon_batch, data) # We use loss.backward() to calculate the derivative of loss with respect to model parameters. loss.backward() # We ask the optimiser to update model's parameters. optimizer.step() train_loss += loss.item() if batch_idx % log_interval == 0: pct = 100.0 * batch_idx / len(train_loader) l = loss.item() / len(data) print( '#{} [{}/{} ({:.0f}%)]\t Batch Loss: {:.6f} '.format(epoch, batch_idx * len(data), len(train_loader.dataset), pct, l), end="\r", flush=True, ) print('#{} Train loss: {:.4f}\t'.format(epoch, train_loss / len(train_loader.dataset))) def test(epoch, loss_function, log_interval=50): model.eval() test_loss = 0 with torch.no_grad(): for i, data in enumerate(tqdm(test_loader, leave=False, desc='test')): data = data.to(device).float() recon_batch = model(data) test_loss += loss_function(recon_batch, data).item() test_loss /= len(test_loader.dataset) print('#{} Test loss: {:.4f}'.format(epoch, test_loss)) def cvt2image(tensor): return tensor.detach().cpu().numpy().reshape(28, 28) def show_prediction(idx, title='', ds=ds_train): """Show a predict vs actual""" model.eval() original = ds[idx].float() result = model(original.to(device)) img = cvt2image(result[0]).squeeze() plt.figure(figsize=(4, 2)) plt.subplot(1, 2, 1) plt.imshow(img, "gray") plt.title("Predicted") plt.xticks([]) plt.yticks([]) plt.subplot(1, 2, 2) plt.imshow(original.squeeze(), 'gray') plt.title("Actual") plt.xticks([]) plt.yticks([]) plt.suptitle(title) plt.show() show_prediction(10, '0')Now that all the components are ready, let's train the model for $10$ epochs.epochs = 10 for epoch in tqdm(range(1, epochs + 1)): show_prediction(3, title=f"epoch={epoch}") train(epoch, loss_bce) test(epoch, loss_bce) show_prediction(3, title=f"epoch={epoch}")ResultsNow let's check out the model.# Generate a random integer idx = np.random.randint(0, len(ds_test)) # show this row of the data show_prediction(idx)Run the cell above a few times and compare the predicted and actual images. Latent space There are certainly some similarities but the predicted (reconstructed) images are not always very clear. We will shortly discuss how we can improve the model. But before that, let's have look at the latent space. The model is converting every image which has 784 values (28x28 pixels) to only 2 values. Those 2 values are the latent space. We can plot them for a few numbers (see below).We can also traverse the latent space and see how the reconstructed image changes in meaningfull ways. This is a usefull property and means the model has learnt how to vary images.# Scatter plot def traverse(ds=ds_train, model=model, y=None, xmin=None, xmax=None): # Get the first 1000 images n = min(1000, len(ds)) idxs = np.random.choice(range(len(ds)), size=n, replace=False) x = torch.stack([ds[i] for i in idxs]) ys = np.array([ds.y[i] for i in idxs]) x = x.to(device).float().reshape((-1, 1, 28,28)) res = model.encode(x) # If we output a distribution, use the mean if isinstance(res, Normal): res = res.loc # to numpy res = res.detach().cpu().numpy() classes = pd.Series(ds.y).unique() for i, cls in enumerate(classes): idx = ys == cls plt.scatter(res[idx, 0], res[idx, 1], label=cls, alpha=0.5) plt.title('the latent space') plt.xlabel('latent variable 1') plt.ylabel('latent variable 2') if xmin is None: xmin, xmax = plt.xlim() xrange = xmax-xmin xmin -= xrange/2 xmax += xrange/2 if y is None: ymin, ymax = plt.ylim() y = (ymin+ymax)/2 plt.hlines(y, xmin, xmax, color='r', lw=2, label='traversal') plt.legend() plt.show() # Do out traversal plt.figure(figsize=(12, 12)) n_steps = 10 xs = np.linspace(xmin, xmax, n_steps) for xi, x in enumerate(xs): # Decode image at x,y z = torch.zeros((1, res.shape[1])) z[:, 0] = x z[:, 1] = y z = z.float().to(device) img = model.decode(z).cpu().detach().numpy() img = (img.reshape((28, 28)) * 255).astype(np.uint8) # plot an image at x, y plt.subplot(1, n_steps, xi+1) plt.imshow(img, cmap='gray') plt.title(f'{x:2.1f}, {y:2.1f}') plt.xticks([]) plt.yticks([]) traverse(model=model, ds=ds_train)Each color represents a number. Despite most numbers overlapping, we can still see some distictions, for instance between $1$ and other numbers. Improving the modelObviously the model that we trained needs improvement as it is not recreating the images well enough. There are a few ways we can improve the model. One way is to create a deeper encoder and decoder. In the example above we used only two layers for encoder and layers for decoder. This doesn't allow the model to comprehend complex relationships, especially in this scenario since we are working with images. By adding more layers we can give the model the opportunity to better differentiate between digits.Another way of making the model is using more dimensions in latent space. For instance, if instead of compressing each image into two values we could use ten values. This will allow the model to extract more features from the input which will make reconstructing the image easier. However, it must be noted that whole point of using autoencoder is to force the model to compress the information into as few dimensions as possible. Variational AutoencodersVariational Autoencoders (VAE) are one of the variations of autoencoders. Unlike normal autoencoders which compress the data into a few values, VAEs tries to find the distribution of the data in latent space. As a result, the final model not only has the ability to recreate the input, but can also generate new outputs by sampling from the latent space distribution. Since VAE is a variation of autoencoder, it has a similar architecture. The main difference between the two is an additional layer between encoder and decoder which samples from latent space distribution.In a VAE, the encoder generates two values for each parameter in latent space. One represent the mean and one represents the standard deviation of the parameter. Then sampling layer uses these two numbers and generates random values from the same distribution. These values then are fed to decoder which will create an output similar to the input. Model definition: VAE Let's create a VAE model. We will use layers with the same size as the previous model. Notice for the second layer we have two linear layers, one to generate the mean and one to generate the log of variance which will be converted into standard deviation.class VAE(nn.Module): """Variational Autoencoder""" def __init__(self): super(VAE, self).__init__() self.encoder = nn.Sequential( nn.Linear(784, 400), nn.ReLU(), nn.Linear(400, 4) # 2 for mean, 2 for std ) self.decoder = nn.Sequential( nn.Linear(2, 400), nn.ReLU(), nn.Linear(400, 784), nn.Sigmoid() ) def encode(self, x): """Takes in image, output distribution""" x = x.reshape((-1, 28*28)) h = self.encoder(x) # first few features are mean mean = h[:, :2] # second two are the log std log_std = h[:, 2:] std = torch.exp(log_std) # return a normal distribution with 2 parameters return Normal(mean, std) def decode(self, z): """Takes in latent vector and produces image.""" return self.decoder(z).reshape((-1, 28 * 28)) def forward(self, x): """Combine the above methods""" dist = self.encode(x.view(-1, 784)) z = dist.rsample() # sample, with gradient return self.decode(z), dist # Normal's d = Normal(torch.tensor([0, 1]), torch.Tensor([2, 3])) # d.rsample() model = VAE().to(device) optimizer = optim.Adam(model.parameters(), lr=1e-3) model # We can view the shape of our model and number of params x = torch.rand((1, 784)).to(device) summary(model, x) 1================================================================== Kernel Shape Output Shape Params Mult-Adds Layer 0_encoder.Linear_0 [784, 400] [1, 400] 314.0k 313.6k 1_encoder.ReLU_1 - [1, 400] - - 2_encoder.Linear_2 [400, 4] [1, 4] 1.604k 1.6k 3_decoder.Linear_0 [2, 400] [1, 400] 1.2k 800.0 4_decoder.ReLU_1 - [1, 400] - - 5_decoder.Linear_2 [400, 784] [1, 784] 314.384k 313.6k 6_decoder.Sigmoid_3 - [1, 784] - - ------------------------------------------------------------------ Totals Total params 631.188k Trainable params 631.188k Non-trainable params 0.0 Mult-Adds 629.6k ==================================================================Concept: KLD The loss function is similar to what we used before, except we have an extra part. the extra equation is [Kullback–Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) which measures difference between probability distributions.However we are using the KLD_loss, which is always positiveImage source: wikipediadef loss_bce_kld(recon_x, x, dist): BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction="sum") # KL-divergence between a diagonal multivariate normal, # and a standard normal distribution (with zero mean and unit variance) # In other words, we are punishing it if it's distribution moves away from a standard normal dist KLD = -0.5 * torch.sum(1 + dist.scale.log() - dist.loc.pow(2) - dist.scale) return BCE + KLD # You can try the KLD here with differen't distribution p = Normal(loc=1, scale=2) q = Normal(loc=0, scale=1) kld = torch.distributions.kl.kl_divergence(p, q) # plot the distributions ps=p.sample_n(10000).numpy() qs=q.sample_n(10000).numpy() sns.kdeplot(ps, label='p') sns.kdeplot(qs, label='q') plt.title(f"KLD(p|q) = {kld:2.2f}\nKLD({p}|{q})") plt.legend() plt.show()/home/wassname/anaconda/envs/deep_ml_curriculum/lib/python3.7/site-packages/torch/distributions/distribution.py:134: UserWarning: sample_n will be deprecated. Use .sample((n,)) instead warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)Exercise 1: KLDRun the above cell with while changing Q.- Use the code above and test if the KLD is higher for distributions that overlap more- (advanced) Write new code that plots a line of kld vs q.loc, using the function below```pythondef kld_vs_qloc(loc): kld = torch.distributions.kl.kl_divergence(p, Normal(loc=loc, scale=1)) return kld klds = []locs = range(-10, 10)for loc in locs: YOUR CODE HERE: run kld_vs_qloc, for a loc klds.append(kld) YOUR code here, plot locs vs klds```# Part 1 p = Normal(0, 1) kld_close = torch.distributions.kl.kl_divergence(p, Normal(0, 1)) kld_far = torch.distributions.kl.kl_divergence(p, Normal(10, 1)) print(kld_close, kld_far) print('close is lower?', kld_closetensor(0.) tensor(50.) close is lower? tensor(True)Train We also need to slightly adjust the training loop since the loss function now takes four inputs.def train_vae(epoch, loss_function, model, train_loader, log_interval=50): model.train() train_loss = 0 for batch_idx, data in enumerate(tqdm(train_loader, leave=False)): data = data.to(device).float() optimizer.zero_grad() recon_batch, dist = model(data) loss = loss_function(recon_batch, data, dist) loss.backward() train_loss += loss.item() optimizer.step() if batch_idx % log_interval == 0: pct = 100.0 * batch_idx / len(train_loader) l = loss.item() / len(data) print( '#{} [{}/{} ({:.0f}%)]\tLoss: {:.6f} '.format(epoch, batch_idx * len(data), len(train_loader.dataset), pct, l), end="\r", flush=True, ) print('#{} Train loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset))) def test_vae(epoch, loss_function, model, test_loader, log_interval=50): model.eval() test_loss = 0 with torch.no_grad(): for i, data in enumerate(test_loader): data = data.to(device).float() recon_batch, dist = model(data) test_loss += loss_function(recon_batch, data, dist).item() test_loss /= len(test_loader.dataset) print('#{} Test loss: {:.4f}'.format(epoch, test_loss)) # We can view the shape of our model and number of params summary(model, torch.rand((1, 784)).to(device)) 1 epochs = 10 show_prediction(3, title=f"epoch={0}") for epoch in tqdm(range(1, epochs + 1)): train_vae(epoch, loss_bce_kld, model, train_loader) test_vae(epoch, loss_bce_kld, model, test_loader) show_prediction(3, title=f"epoch={epoch}")Saving and Loading ModelYou can save and load as a pickle, but it's better to use torch.save (which uses pickle)import pickle with open("VAE.pk", "wb") as fp: torch.save(model.state_dict(), fp) model = VAE().to(device) with open("VAE.pk", "rb") as fp: model.load_state_dict(torch.load(fp))Resultsidx = np.random.randint(0, len(ds_test)) show_prediction(idx)One property of a latent space is that you can travese it, and get meaningful varations of outputs.traverse(model=model, ds=ds_train)If we compare this plot with the similar plot for normal autoencoder, we can see that VAE did a better job at creating clusters. The points for each digits are closer together compared to previous model. However, there is still room for improvement. We can also see that using KLD helped make sure the latent space was centered around 0. Exercise 2: DeeperCreate a new VAE but this time use a deeper network. Note, everything else (loss function, dataloaders, training loops, etc.) will stay the same only the model will change. The example above was using these sizes: 784 --> 400 --> 2 --> 400 --> 784Try a new model which uses these size: 784 --> 400 --> 80 --> 2 --> 80 --> 400 --> 784# Create the model definition # YOUR CODE HERE # # Training logic # epochs = 10 # show_prediction(10, title=f"epoch={0}") # for epoch in tqdm(range(1, epochs + 1)): # train(epoch, loss_bce_kld) # test(epoch, loss_bce_kld) # show_prediction(10, title=f"epoch={epoch}") # # Visualise the results # idx = np.random.randint(0, len(ds_test)) # show_prediction(idx) # plt.show() # traverse(model=model, y=3, xmin=-5, xmax=5)Exercise 3: WiderCreate a new VAE but this time use a more than two parameters for the latent space. This will reduce the loss Application: Anomaly DetectionThe model will reconstruct normal data well, and fail to reconstruct anomolies. This means we can use it for anomoly detectionimg = ds_train[11].to(device).float() # First try to reconstruct a real image img_recon, _ = model(img) loss_img = loss_bce(img_recon , img) # then a fake image, a vector of random noise rand = torch.rand((28, 28)).to(device) rand[:, 15] = 1 rand[15, :] = 1 rand = rand.reshape((-1, )) rand_recon, _ = model(rand) loss_rand = loss_bce(rand_recon , rand) print(f'img_loss={loss_img:2.2f}, random_loss={loss_rand:2.2f}\nanomoly detected={loss_imgimg_loss=109.65, random_loss=2056.27 anomoly detected=TrueApplication: DenoisingSince the model only keep the important information, noise ends up being discarded. This can not only let us compress data, but denoise it.In the example below we add some artifacts, and the autoencoder discards them during reconstruction.img = ds_train[11].to(device).float() # First try to reconstruct a real image img_recon, _ = model(img) loss_img = loss_bce(img_recon , img) # Add noise to an image rand = (img * 1.0).reshape((28, 28)) rand[:, 15] = 0.5 # vertical bar rand[15, :] = 0.9 # horizontal bar rand[5, 5] = 0.9 # spot rand = rand.flatten() # Reconstruct the noisy image rand_recon, _ = model(rand) loss_rand = loss_bce(rand_recon , rand) plt.subplot(1, 2, 1) plt.suptitle(f'real image loss={loss_img:2.2f}') plt.imshow(cvt2image(img), cmap="gray") plt.subplot(1, 2, 2) plt.imshow(cvt2image(img_recon), cmap="gray") plt.show() plt.subplot(1, 2, 1) plt.suptitle(f'noisy image loss={loss_rand:2.2f}') plt.imshow(cvt2image(rand), cmap="gray") plt.subplot(1, 2, 2) plt.imshow(cvt2image(rand_recon), cmap="gray") # You can see it's removed the noise that we added, but retained the digitSolution to Exercises Exercise 1Solution```Python Part 1p = Normal(0, 1)kld_close = torch.distributions.kl.kl_divergence(p, Normal(0, 1))kld_far = torch.distributions.kl.kl_divergence(p, Normal(10, 1))print(kld_close, kld_far)print('close is lower?', kld_close, - A guide to convolution arithmetic for deep learning"](https://github.com/vdumoulin/conv_arithmetic).A convolution layer, with the input below, and the output above:A transpose convolution layer:# Let's create our model. Same as before the model has three main sections: class CVAE(nn.Module): """Convolutional VAE""" def __init__(self, n_latent=2): super(CVAE, self).__init__() # After each layer in the encoder we decrease the size of output and increase the number of channels. self.encoder = nn.Sequential( nn.Conv2d(1, 32, kernel_size=4, stride=2), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 128, kernel_size=5, stride=2), nn.ReLU(), nn.Flatten(), ) # It acts as opposite of encoder. At each layer we increase the size of output and decrease the number of channels. self.decoder = nn.Sequential( nn.ConvTranspose2d(128, 128, kernel_size=3, stride=2), nn.BatchNorm2d(128), nn.ReLU(), nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2), nn.BatchNorm2d(32), nn.ReLU(), nn.ConvTranspose2d(32, 1, kernel_size=2, stride=2), nn.Sigmoid(), ) self.fc11 = nn.Linear(128, n_latent) self.fc12 = nn.Linear(128, n_latent) self.fc2 = nn.Linear(n_latent, 128) def encode(self, x): x = x.reshape((-1, 1, 28, 28)) h = self.encoder(x) mu, logvar = self.fc11(h), self.fc12(h) return Normal(mu, torch.exp(logvar)) def decode(self, z): z = self.fc2(z) z = z.view(z.size(0), 128, 1, 1) z = self.decoder(z) z = z.reshape((-1, 28*28)) return z def forward(self, x): dist = self.encode(x) z = dist.rsample() z = self.decode(z) return z, dist model = CVAE().to(device) from deep_ml_curriculum.torchsummaryX import summary x = torch.randn(1, 1, 28, 28).to(device) summary(model, x) optimizer = optim.Adam(model.parameters(), lr=1e-3) # training loop epochs = 10 show_prediction(3, title=f"epoch={0}") for epoch in tqdm(range(1, epochs + 1)): train_vae(epoch, loss_bce_kld, model, train_loader) test_vae(epoch, loss_bce_kld, model, test_loader) show_prediction(3, title=f"epoch={epoch}") traverse(model=model, ds=ds_train, y=1) traverse(model=model, ds=ds_train)CS7641 Machine Learning Application of Machine Learning in Pairs Trading Cointegration Test and Pair Selectionimport pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm import statsmodels.tsa.stattools as ts import datetime import heapq import operator df_price = pd.read_csv("./stock_price_table.csv", index_col = 0) df_price.columns = df_price.columns.str.lstrip('price_') df_price.head() df_price.loc[14593] clusters = pd.read_csv("./10_clusters_KMeans.csv") clusters.head() lookup_table = pd.read_csv("./permno_ticker_lookup_table.csv", index_col = 0) lookup_table.head() ''' Function for make clustered result into a dict object with key as cluster number and values as PERMNO ''' def df_to_dict(df, idx_col, value_col): cluster_dict = df.groupby(idx_col)[value_col].apply(list).to_dict() return cluster_dict cluster_dict = df_to_dict(clusters, 'cluster', 'Unnamed: 0') keys = cluster_dict.keys() len(cluster_dict) def cointegration(cluster, significance, start_day, end_day, df_price): """ Args: cluster: a list object that contains all the PERMNO for a specific cluster significance: a float object, the probability of rejecting the null hypothesis when it is true start_day: a string object with format '%Y%m%d'8. like '20151231', start of training period end_day:a string object with format '%Y%m%d'8. like '20151231', end of training period df_price: a pandas DataFrame of daily stock price with PERMNO as index and date as columns Returns: pair_coin: a list of list which contains the PERMNO for all the tested pairs p_value: a list which contains p value of cointegration test for all tested paris """ pair_coin = [] p_value = [] n = len(cluster) keys = cluster.copy() for i in range(n-1): for j in range(i+1, n): asset_1 = df_price.loc[start_dat:end_day, keys[i]] asset_2 = df_price.loc[start_day:end_day, keys[j]] # fit two stock price series into OLS linear regression results = sm.OLS(asset_1, asset_2).fit() predict = results.predict(asset_2) # find the residual and test if the residuals are stationary (ADF test) error = asset_1 - predict ADFtest = ts.adfuller(error) # if the residual is stationary (reject null hypothesis with sig level we choose) # we consider it as a valid cointegrated pair if ADFtest[1] < significance: pair_coin.append([keys[i], keys[j]]) p_value.append(ADFtest[1]) return pair_coin, p_value def pair_selection(clusters, significance, start_day, end_day, df_price, num_pairs = 1): """ Args: cluster: any kind of dataset significance: a float object, the probability of rejecting the null hypothesis when it is true start_day: a string object with format '%Y%m%d'8. like '20151231', start of training period end_day:a string object with format '%Y%m%d'8. like '20151231', end of training period df_price: a pandas DataFrame of daily stock price with PERMNO as index and date as columns num_pairs: a int object, how many pairs we want to select from each cluster. Default is 1 Returns: selected_pairs: a list of selected pairs based on cointegration test """ cluster_dict = df_to_dict(clusters, 'cluster', 'Unnames: 0') k = len(cluster_dict) selected_pair = [] if num_pairs = 1: for i in range(k): cluster = cluster_dict[i] pair_coin, p_value = cointegration(cluster, significance, start_day, end_day, df_price) if len(p_value) > 0: if np.min(p_value) < significance: index = np.where(p_value == np.min(p_value))[0][0] selected_pair.append([pair_coin[index][0], pair_coin[index][1]]) else: p_value_contval = [] pairs_contval = [] for i in ranke(k): cluster = cluster_dict[i] pair_coin, p_value = cointegration(cluster, significance, start_day, end_day, df_price) if len(p_value) > 0: p_value_contval += p_value pairs_contval += pair_coin selected_pair_index = heapq.nsmallest(num_pairs, range(len(p_value_contval)), key = p_value_contval.__getitem__) selected_pair = operator.itemgetter(*selected_pair_index)(pairs_contval) return selected_pair #preview of cointegrated pairs def plot_pairs(asset1, asset2, start_date, end_date, df_price, lookup_table): price1 = df_price.loc[asset1, start_date:end_date] print(price1) price2 = df_price.loc[asset2, start_date:end_date] tic1 = lookup_table.loc[lookup_table['PERMNO'] == asset1, 'TICKER'].values[0] tic2 = lookup_table.loc[lookup_table['PERMNO'] == asset2, 'TICKER'].values[0] fig = plt.figure(figsize = (10,7)) ax = fig.add_subplot(111) ax.set_xlabel(tic1) ax.set_ylabel(tic2) ax.scatter(price1[0:252], price2[0:252], color = 'b', label = 'year 2010') ax.scatter(price1[252:504], price2[252:504], color = 'g', label = 'year 2011') ax.scatter(price1[504:756], price2[504:756], color = 'r', label = 'year 2012') ax.scatter(price1[756:1008], price2[756:1008], color = 'c', label = 'year 2013') ax.scatter(price1[1008:1260], price2[1008:1260], color = 'm', label = 'year 2014') ax.scatter(price1[1260:], price2[1260:], color = 'k', label = 'year 2015') ax.legend() plt.title('Stock Price Comparison of ' + tic1 + ' and ' + tic2) plt.show() plt.savefig('./stock_price_relation/comp_'+tic1+'_'+tic2+'.png') start_date = '20100101' end_date = '20151231' plot_pairs(81294, 82581, start_date, end_date, df_price, lookup_table) asset1 = 81294 asset2 = 82581 price1 = df_price.loc[asset1, start_date:end_date] price2 = df_price.loc[asset2, start_date:end_date] results = sm.OLS(price1, price2).fit() predict = results.predict(price2) error = price1 - predict ADFtest = ts.adfuller(error) plt.figure(figsize = (10,7)) plt.title('Spread of WDFC and HSIC') plt.plot(error) plt.savefig('./spread_wdfc_hsic.png') print(ADFtest[1])2.8702051939237176e-05Objective: To document basic and advanced methods for univariate and multivariate visualization techniques.# Load the required libraries import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') %matplotlib inline # Load the dataset df = pd.read_csv('../data/FIFA 2018 Statistics.csv') # check the data type of variables # In pandas info() is a rough equivalent to str() of R print(df.info()) RangeIndex: 128 entries, 0 to 127 Data columns (total 27 columns): Date 128 non-null object Team 128 non-null object Opponent 128 non-null object Goal Scored 128 non-null int64 Ball Possession % 128 non-null int64 Attempts 128 non-null int64 On-Target 128 non-null int64 Off-Target 128 non-null int64 Blocked 128 non-null int64 Corners 128 non-null int64 Offsides 128 non-null int64 Free Kicks 128 non-null int64 Saves 128 non-null int64 Pass Accuracy % 128 non-null int64 Passes 128 non-null int64 Distance Covered (Kms) 128 non-null int64 Fouls Committed 128 non-null int64 Yellow Card 128 non-null int64 Yellow & Red 128 non-null int64 Red 128 non-nul[...]Univariate data visualization# Basic visualizations # Histogram - will work only for continuous variables sns.distplot(df['Passes']) # determining relationship between continuous variables # scatter plot between Passes and Goal Scored var = 'Goal Scored' data = pd.concat([dt['Passes'], dt[var]], axis=1) data.plot.scatter(x=var, y='Passes')1.0 Import Function!pip install yfinanceRequirement already satisfied: yfinance in c:\programdata\anaconda3\lib\site-packages (0.1.63) Requirement already satisfied: numpy>=1.15 in c:\programdata\anaconda3\lib\site-packages (from yfinance) (1.18.5) Requirement already satisfied: multitasking>=0.0.7 in c:\programdata\anaconda3\lib\site-packages (from yfinance) (0.0.9) Requirement already satisfied: requests>=2.20 in c:\programdata\anaconda3\lib\site-packages (from yfinance) (2.24.0) Requirement already satisfied: pandas>=0.24 in c:\programdata\anaconda3\lib\site-packages (from yfinance) (1.0.5) Requirement already satisfied: lxml>=4.5.1 in c:\programdata\anaconda3\lib\site-packages (from yfinance) (4.5.2) Requirement already satisfied: idna<3,>=2.5 in c:\programdata\anaconda3\lib\site-packages (from requests>=2.20->yfinance) (2.10) Requirement already satisfied: certifi>=2017.4.17 in c:\programdata\anaconda3\lib\site-packages (from requests>=2.20->yfinance) (2020.6.20) Requirement already satisfied: chardet<4,>=3.0.2 in c:\pr[...]2.0 Setupfrom META_TOOLBOX import * import BOLSA_NEW as BOVESPA ACOES = ['ITSA4.SA', 'VALE3.SA', 'GGBR4.SA', 'LREN3.SA', 'JBSS3.SA'] DATA_INICIAL = '2019-02-01' DATA_FINAL = '2020-02-01' DADOS_BOLSA ={'ID ACAO':ACOES, 'DATA INICIAL':DATA_INICIAL, 'DATA FINAL':DATA_FINAL} DADOS_GERAIS, RETORNOS, COVARIANCIA = BOVESPA.DADOS_BOLSA_PRECO_AJUSTADO(DADOS_BOLSA) OPCOES_GRAF={'ANNOT':True, 'LINEWIDTHS':20, 'FMT':'.4'} BOVESPA.BOLSA_PLOT_001(COVARIANCIA,OPCOES_GRAF) N_REP = 20 N_ITER = 50 N_POP = 20 D = 5 X_L = [0.00] * D X_U = [1.00] * D M = 2 GAMMA = GAMMA_ASSEMBLY(X_L, X_U, D, M) NULL_DIC = {'COVARIANCIA': COVARIANCIA, 'RETORNOS DIÁRIOS': RETORNOS} SETUP_FA = { 'N_REP': N_REP, 'N_ITER': N_ITER, 'N_POP': N_POP, 'D': D, 'X_L': X_L, 'X_U': X_U, 'BETA_0': 0.98, 'ALPHA_MIN': 0.25, 'ALPHA_MAX': 1.00, 'THETA': 0.95, 'GAMMA': GAMMA, 'NULL_DIC': NULL_DIC } # OBJ. Function def OF_FUNCTION(X, NULL_DIC): DADOS_COV = NULL_DIC['COVARIANCIA'] DADOS_RETORNO = NULL_DIC['RETORNOS DIÁRIOS'] LAMBDA = 0.50 OF = BOVESPA.FO_MARKOWITZ(X, DADOS_COV, DADOS_RETORNO, LAMBDA) H = np.abs(sum(X)) - 1 for I_CONT in range(len(X)): OF += (H ** 2) * 1E6 return OF4.0 Example[RESULTS_REP, BEST_REP, AVERAGE_REP, WORST_REP, STATUS] = FA_ALGORITHM_0001(OF_FUNCTION, SETUP_FA) BEST_REP_ID = STATUS[0] BEST_REP_ID BEST = BEST_REP[BEST_REP_ID] AVERAGE = AVERAGE_REP[BEST_REP_ID] WORST = WORST_REP[BEST_REP_ID] PLOT_SETUP = { 'NAME': 'WANDER-OF', 'WIDTH': 0.40, 'HEIGHT': 0.20, 'DPI': 600, 'EXTENSION': '.svg', 'COLOR OF': '#000000', 'MARKER OF': 's', 'COLOR FIT': '#000000', 'MARKER FIT': 's', 'MARKER SIZE': 6, 'LINE WIDTH': 4, 'LINE STYLE': '--', 'OF AXIS LABEL': '$W (kN) $', 'X AXIS LABEL': 'Number of objective function evaluations', 'LABELS SIZE': 14, 'LABELS COLOR': '#000000', 'X AXIS SIZE': 14, 'Y AXIS SIZE': 14, 'AXISES COLOR': '#000000', 'ON GRID?': True, 'Y LOG': True, 'X LOG': True, } DATASET = {'X': BEST['NEOF'], 'OF': BEST['OF'], 'FIT': BEST['FIT']} META_PLOT_001(DATASET, PLOT_SETUP) PLOT_SETUP = { 'NAME': 'WANDER-OF', 'WIDTH': 0.40, 'HEIGHT': 0.20, 'DPI': 600, 'EXTENSION': '.svg', 'COLOR': '#00BFFF', 'MARKER': 's', 'MARKER SIZE': 6, 'LINE WIDTH': 4, 'LINE STYLE': '--', 'Y AXIS LABEL': '$Euller$', 'X AXIS LABEL': 'Number of objective function evaluations', 'LABELS SIZE': 14, 'LABELS COLOR': '#000000', 'X AXIS SIZE': 14, 'Y AXIS SIZE': 14, 'AXISES COLOR': '#000000', 'ON GRID?': True, 'Y LOG': True, 'X LOG': True, } DATASET = {'X': BEST['NEOF'], 'Y': BEST['OF']} META_PLOT_002(DATASET, PLOT_SETUP) PLOT_SETUP = { 'NAME': 'WANDER-OF', 'WIDTH': 0.40, 'HEIGHT': 0.20, 'DPI': 600, 'EXTENSION': '.svg', 'COLOR BEST': '#00008B', 'COLOR WORST': '#000000', 'COLOR AVERAGE': '#ffcbdb', 'MARKER': 's', 'MARKER SIZE': 6, 'LINE WIDTH': 4, 'LINE STYLE': '--', 'Y AXIS LABEL': '$W (kN) $', 'X AXIS LABEL': 'Number of objective function evaluations', 'LABELS SIZE': 14, 'LABELS COLOR': '#000000', 'X AXIS SIZE': 14, 'Y AXIS SIZE': 14, 'AXISES COLOR': '#000000', 'ON GRID?': True, 'LOC LEGEND': 'upper right', 'SIZE LEGEND': 12, 'Y LOG': True, 'X LOG': True } DATASET = {'X': BEST['NEOF'], 'BEST': BEST['OF'], 'AVERAGE': AVERAGE['OF'], 'WORST': WORST['OF']} META_PLOT_003(DATASET, PLOT_SETUP) PLOT_SETUP = { 'NAME': 'WANDER-OF', 'WIDTH': 0.40, 'HEIGHT': 0.20, 'DPI': 600, 'EXTENSION': '.svg', 'MARKER': 's', 'X AXIS LABEL': 'OF values', 'X AXIS SIZE': 14, 'Y AXIS SIZE': 14, 'LABELS SIZE': 14, 'LABELS COLOR': '#000000', 'COLOR': '#000000', 'AXISES COLOR': '#000000', 'BINS': 20, 'KDE': False, } DATASET = {'NUMBER OF REPETITIONS': N_REP, 'NUMBER OF ITERATIONS': N_ITER, 'OF OR FIT': 'OF', 'BEST': BEST_REP} META_PLOT_004(DATASET, PLOT_SETUP)C:\ProgramData\Anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. warnings.warn(#instalar libreria !pip install yfinance #API Datos import yfinance as yfin #Manejo de tablas de datos import pandas as pd #operaciones matriciales import numpy as np #Manejo de archivos from google.colab import files #Gráficos import matplotlib.pyplot as plt ticker = '^GSPC' sp = yfin.download(ticker, start='2015-01-01', end='2021-01-01') sp sp["logret"]=np.log(sp["Adj Close"]/sp["Adj Close"].shift(1)) sp sp["retar"]=sp["Adj Close"]/sp["Adj Close"].shift(1)-1 sp np.mean(sp.logret)*252 np.mean(sp.retar)*252 np.std(sp.logret)*252 np.std(sp.retar)*np.sqrt(252) np.std(sp.logret)*np.sqrt(252) plt.plot(sp["Adj Close"]) plt.plot((sp["logret"]-sp["logret"].shift(1))**2)Rank FeaturesThe creator of Shapley Additive Explanations, , has written an efficient implementation that we can install and use. We’ll be able to use this to determine both local feature importance (for a single observation) and global feature importance (for all training samples as a whole). To aggregate local feature importance into global feature importance, we take the absolute values of the local feature importances, and then average them.We can calculate the feature importance using sklearn and using the Shap library.Based on the feature importances, we can think about modifying features to improve them. Then we can re-train the model on the modified features.Finally, we can prune the feature set to just use the most relevant features.# Note, this will install zipline and alphalens, which will take some time import sys !{sys.executable} -m pip install --quiet -r requirements.txt import numpy as np import pandas as pd import time import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (14, 8) import os import project_helper from zipline.data import bundles os.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..', 'data', 'module_4_quizzes_eod') ingest_func = bundles.csvdir.csvdir_equities(['daily'], project_helper.EOD_BUNDLE_NAME) bundles.register(project_helper.EOD_BUNDLE_NAME, ingest_func) print('Data Registered') from zipline.pipeline import Pipeline from zipline.pipeline.factors import AverageDollarVolume from zipline.utils.calendars import get_calendar universe = AverageDollarVolume(window_length=120).top(500) trading_calendar = get_calendar('NYSE') bundle_data = bundles.load(project_helper.EOD_BUNDLE_NAME) engine = project_helper.build_pipeline_engine(bundle_data, trading_calendar) # Test universe_end_date = pd.Timestamp('2016-01-05', tz='UTC') universe_tickers = engine\ .run_pipeline( Pipeline(screen=universe), universe_end_date, universe_end_date)\ .index.get_level_values(1)\ .values.tolist() from zipline.data.data_portal import DataPortal data_portal = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day, equity_minute_reader=None, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader) def get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'): end_dt = pd.Timestamp(end_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') start_dt = pd.Timestamp(start_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') end_loc = trading_calendar.closes.index.get_loc(end_dt) start_loc = trading_calendar.closes.index.get_loc(start_dt) return data_portal.get_history_window( assets=assets, end_dt=end_dt, bar_count=end_loc - start_loc, frequency='1d', field=field, data_frequency='daily')Make Factors- Take the same factors we have been using:from zipline.pipeline.factors import CustomFactor, DailyReturns, Returns, SimpleMovingAverage from zipline.pipeline.data import USEquityPricing factor_start_date = universe_end_date - pd.DateOffset(years=3, days=2) sector = project_helper.Sector() def momentum_1yr(window_length, universe, sector): return Returns(window_length=window_length, mask=universe) \ .demean(groupby=sector) \ .rank() \ .zscore() def mean_reversion_5day_sector_neutral(window_length, universe, sector): return -Returns(window_length=window_length, mask=universe) \ .demean(groupby=sector) \ .rank() \ .zscore() def mean_reversion_5day_sector_neutral_smoothed(window_length, universe, sector): unsmoothed_factor = mean_reversion_5day_sector_neutral(window_length, universe, sector) return SimpleMovingAverage(inputs=[unsmoothed_factor], window_length=window_length) \ .rank() \ .zscore() class CTO(Returns): """ Computes the overnight return, per hypothesis from https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2554010 """ inputs = [USEquityPricing.open, USEquityPricing.close] def compute(self, today, assets, out, opens, closes): """ The opens and closes matrix is 2 rows x N assets, with the most recent at the bottom. As such, opens[-1] is the most recent open, and closes[0] is the earlier close """ out[:] = (opens[-1] - closes[0]) / closes[0] class TrailingOvernightReturns(Returns): """ Sum of trailing 1m O/N returns """ window_safe = True def compute(self, today, asset_ids, out, cto): out[:] = np.nansum(cto, axis=0) def overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe): cto_out = CTO(mask=universe, window_length=cto_window_length) return TrailingOvernightReturns(inputs=[cto_out], window_length=trail_overnight_returns_window_length) \ .rank() \ .zscore() def overnight_sentiment_smoothed(cto_window_length, trail_overnight_returns_window_length, universe): unsmoothed_factor = overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe) return SimpleMovingAverage(inputs=[unsmoothed_factor], window_length=trail_overnight_returns_window_length) \ .rank() \ .zscore() universe = AverageDollarVolume(window_length=120).top(500) sector = project_helper.Sector() pipeline = Pipeline(screen=universe) pipeline.add( momentum_1yr(252, universe, sector), 'Momentum_1YR') pipeline.add( mean_reversion_5day_sector_neutral_smoothed(20, universe, sector), 'Mean_Reversion_Sector_Neutral_Smoothed') pipeline.add( overnight_sentiment_smoothed(2, 10, universe), 'Overnight_Sentiment_Smoothed') all_factors = engine.run_pipeline(pipeline, factor_start_date, universe_end_date) all_factors.head()Add sector codepipeline.add(sector, 'sector_code')Universal Quant Features* stock volatility: zipline has a custom factor called AnnualizedVolatility. The [source code is here](https://github.com/quantopian/zipline/blob/master/zipline/pipeline/factors/basic.py) and also pasted below: Annualized volatility.Create `AnnualizedVolatility` objects for 20 day and 120 day (one month and six-month) time windows. Remember to set the `mask` parameter to the `universe` object created earlier (this filters the stocks to match the list in the `universe`). Convert these to ranks, and then convert the ranks to zscores.from zipline.pipeline.factors import AnnualizedVolatility volatility_20d = AnnualizedVolatility(window_length=20, mask=universe).rank().zscore() volatility_120d = AnnualizedVolatility(window_length=120, mask=universe).rank().zscore() pipeline.add(volatility_20d, 'volatility_20d') pipeline.add(volatility_120d, 'volatility_120d')Average Dollar Volume feature[AverageDollarVolume](http://www.zipline.io/appendix.htmlzipline.pipeline.factors.AverageDollarVolume):Use 20 day and 120 day `window_length`, rank and then zscore#from zipline.pipeline.factors import AverageDollarVolume # already imported earlier, but shown here for reference adv_20d = AverageDollarVolume(window_length=20, mask=universe).rank().zscore() adv_120d = AverageDollarVolume(window_length=120, mask=universe).rank().zscore() pipeline.add(adv_20d, 'adv_20d') pipeline.add(adv_120d, 'adv_120d')Regime Features market dispersion featureCalculate the mean returns$\mu = \sum_{t=0}^{T}\sum_{i=1}^{N}r_{i,t}$$\sqrt{\frac{1}{T} \sum_{t=0}^{T} \frac{1}{N}\sum_{i=1}^{N}(r_{i,t} - \mu)^2}$class MarketDispersion(CustomFactor): inputs = [DailyReturns()] window_length = 1 window_safe = True def compute(self, today, assets, out, returns): # returns are days in rows, assets across columns mean_returns = np.nanmean(returns) out[:] = np.sqrt(np.nanmean((returns - mean_returns)**2)) pipeline.add(SimpleMovingAverage(inputs=[MarketDispersion(mask=universe)], window_length=20), 'dispersion_20d') pipeline.add(SimpleMovingAverage(inputs=[MarketDispersion(mask=universe)], window_length=120), 'dispersion_120d')Market volatility feature* High and low volatility We'll also build a class for market volatility, which inherits from [CustomFactor](http://www.zipline.io/appendix.html?highlight=customfactorzipline.pipeline.CustomFactor). Market return$r_{m,t} = \sum_{i=1}^{N}r_{i,t}$ for each day $t$ in `window_length`. Average market returnAlso calculate the average market return over the `window_length` $W$ of days: $\mu_{m} = \frac{1}{N}\sum_{t=1}^{T} r_{m,t}$ Standard deviation of market returnThen calculate the standard deviation of the market return $\sigma_{m,t} = \sqrt{252 \times \frac{1}{N} \sum_{t=1}^{T}(r_{m,t} - \mu_{m})^2 } $class MarketVolatility(CustomFactor): inputs = [DailyReturns()] window_length = 1 # We'll want to set this in the constructor when creating the object. window_safe = True def compute(self, today, assets, out, returns): DAILY_TO_ANNUAL_SCALAR = 252. # 252 trading days in a year """ For each row (each row represents one day of returns), calculate the average of the cross-section of stock returns So that market_returns has one value for each day in the window_length So choose the appropriate axis (please see hints above) """ mkt_returns = np.nanmean(returns, axis=1) """ Calculate the mean of market returns """ mkt_returns_mu = np.nanmean(mkt_returns) """ Calculate the standard deviation of the market returns, then annualize them. """ out[:] = np.sqrt(DAILY_TO_ANNUAL_SCALAR * np.nanmean((mkt_returns-mkt_returns_mu)**2)) # create market volatility features using one month and six-month windows market_vol_20d = MarketVolatility(window_length=20) market_vol_120d = MarketVolatility(window_length=120) # add market volatility features to pipeline pipeline.add(market_vol_20d, 'market_vol_20d') pipeline.add(market_vol_120d, 'market_vol_120d')Run pipeline to calculate featuresall_factors = engine.run_pipeline(pipeline, factor_start_date, universe_end_date) all_factors.head(2)Make Date Parts* we make colums to for the trees to split on that might capture trader/investor behavior due to calendar anomalies.* We can get the dates from the index of the dataframe that is returned from running the pipeline January, December* Create a numpy array that has 1 when the month is January, and 0 otherwise. Store it as a column in the all_factors dataframe.* Add another similar column to indicate when the month is Decemberall_factors['is_January'] = (all_factors.index.get_level_values(0).month == 1).astype(int) all_factors['is_December'] = (all_factors.index.get_level_values(0).month == 12).astype(int)Weekday, quarter* add columns to the all_factors dataframe that specify the weekday, quarter and yearall_factors['weekday'] = all_factors.index.get_level_values(0).weekday all_factors['quarter'] = all_factors.index.get_level_values(0).quarter all_factors['year'] = all_factors.index.get_level_values(0).yearStart and end-of features* The start and end of the week, month, and quarter may have structural differences in trading activity.* [Pandas.date_range](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html) takes the start_date, end_date, and frequency.* The [frequency](http://pandas.pydata.org/pandas-docs/stable/timeseries.htmloffset-aliases) for end of month is `BM`.all_factors['month_end'] = all_factors.index.get_level_values(0).isin(pd.date_range(start=factor_start_date, end=universe_end_date, freq='BM')).astype(int) all_factors['month_start'] = all_factors.index.get_level_values(0).isin(pd.date_range(start=factor_start_date, end=universe_end_date, freq='BMS')).astype(int) all_factors['qtr_end'] = all_factors.index.get_level_values(0).isin(pd.date_range(start=factor_start_date, end=universe_end_date, freq='BQ')).astype(int) all_factors['qtr_start'] = all_factors.index.get_level_values(0).isin(pd.date_range(start=factor_start_date, end=universe_end_date, freq='BQS')).astype(int) all_factors.columns features = list(all_factors.columns) featuresMake Targetpipeline_target = Pipeline(screen=universe)ExampleWe'll convert returns into 5-quantiles.return_5d_5q = Returns(window_length=5, mask=universe).quantiles(5) return_5d_5q pipeline_target.add(return_5d_5q, 'return_5d_5q') targets_df = engine.run_pipeline(pipeline_target, factor_start_date, universe_end_date) targets_df.head() targets_df.columns target_label = 'return_5d_5q' all_factors.index.get_level_values(1) targets_df.index.get_level_values(1)Split into training, validation and testdef split_into_sets(data, set_sizes): assert np.sum(set_sizes) == 1 last_i = 0 sets = [] for set_size in set_sizes: set_n = int(len(data) * set_size) sets.append(data[last_i:last_i + set_n]) last_i = last_i + set_n return sets def split_by_index(df, index_level, sets): set_indicies = split_into_sets(df.index.levels[index_level], sets) return [df.loc[indicies[0]:indicies[-1]] for indicies in set_indicies] # put the features and target into one dataframe before # running dropna, so that the rows match. tmp = all_factors.copy() tmp [target_label] = targets_df[target_label] tmp = tmp.dropna() X = tmp[features] y = tmp[target_label] X_train, X_valid, X_test = split_by_index(X, 0, [0.6, 0.2, 0.2]) y_train, y_valid, y_test = split_by_index(y, 0, [0.6, 0.2, 0.2]) X_train.shape y_train.shapeFit a random forestfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, ExtraTreesRegressor, RandomForestRegressor from sklearn.metrics import log_loss clf = RandomForestClassifier( n_estimators=10, max_features='sqrt', min_samples_split=5000, bootstrap=True, oob_score=True, n_jobs=-1, criterion='entropy', verbose=0, random_state=0 ) clf.fit(X_train, y_train)Rank features by Feature importance (sklearn)We'll define a function that uses the built in sklearn feature importances, and sorts the features by their feature importance.Note that [numpy.argsort](https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html) returns a list of the original index locations of a list, in the order that would make them sorted in ascending order.np.argsort([10,30,20])One way to reverse the order of a list or array is to use the notation `[::-1]`tmp = [3,2,1] tmp[::-1] def model_importances(m, features): # TODO: get the feature importances from the model importances = # TODO: sort the importances in descending order, and store the indices of that sort indices = """ Iterate through the features, starting with the ones with the highest feature importances """ features_ranked = [] for f in range(X_train.shape[1]): print("%d. %s (%d) (%f)" % (f+1,features[indices[f]], indices[f], importances[indices[f]])) features_ranked.append(features[indices[f]]) return features_rankedSee ranking of features according to sklearn's feature_importancesfeatures_skl = model_importances(clf, features)Using Shap libraryWe'll also use the Shap library to determine feature importance.import shap shap.initjs() #initialize javascript to enable visualizationsShap outputs https://shap.readthedocs.io/en/latest/```shap_values(X, y=None, tree_limit=-1, approximate=False)X:A matrix of samples ( samples x features) on which to explain the model’s output.tree_limit:Limit the number of trees used by the model.approximate:Run fast, but only roughly approximate the Tree SHAP values```>**For models with a single output this returns a matrix of SHAP values ( samples x features).** >Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored in the expected_value attribute of the explainer when it is constant). >**For models with vector outputs this returns a list of such matrices, one for each output.**# this will take a few seconds to run explainer = shap.TreeExplainer(clf) shap_values = explainer.shap_values(X_train, tree_limit=5)The classifier has 5 distinct outputsSo the shap_values is a list of 5 numpy arrays.set(y_train) len(shap_values)Each element of shap_values has one row for each training data point, and one column for each feature.# features, data points shap_values[0].shapelocal to global feature importanceSHAP calculates local feature importance for every training observation (every row). To calculate global feature importance, take the absolute values of the local feature importances and then take the average across all samples.$GlobalImportance_{i} = \frac{1}{N}\sum_{j=1}^{N} |LocalImportance_{i,j}|$ Where there are N samples, and $i$ denotes a particular feature. We can use the built in function to plot the features sorted by global feature importance. This is taking the average of the absolute values of the shapley values for each feature, to get the global feature importance.# mean of absolute values for each feature to get global feature importance shap.summary_plot(shap_values, X_train, plot_type="bar")Note that the plot shows the first 20 features. We can write our own function to calculate global feature importance, so that we can see the global feature importance of all features. Rank features using SHAPThere are a couple classes (one for each quantile). So the list returned by `shap.shap.TreeExplainer.shap_values()` has one element for each of those classes. We'll explore how to get the absolute values and then average of those absolute values, for each of the features. Then we can put this into a function.Remember, here's the formula to aggregate local feature importances into global feature importance:$GlobalImportance_{i} = \frac{1}{N}\sum_{j=1}^{N} |LocalImportance_{i,j}|$ We can concatenate the 2D arrays in the list `shap_values`.tmp1 = np.concatenate(shap_values) tmp1.shapeTake the absolute valuestmp2 = np.abs(tmp1) tmp2Take the average for each columntmp3 = np.nanmean(tmp2,axis=0) tmp3QuizImplement the function that calculates global feature importance using shapley values, and sorts the features by importance."""Challenge: try implementing the function yourself! """ def model_shap_importances(model, features,X): passYou can also use the starter code below, if you prefer:def model_shap_importances(model, features,X): """ Note that the observations should be numeric (integer or float). So booleans should be converted to 1 (True) and 0 (False) """ # TODO: calculate shap values shap_values = # TODO: concatenate the shap values into one matrix shap_values_matrix = # TODO: take the absolute values shap_abs = # TODO: Take the average for each feature (each column) global_importances = # TODO: get the indices sorted in descending order of global feature importance indices = features_ranked = [] for f in range(X.shape[1]): print("%d. %s (%d) (%f)" % (f+1,features[indices[f]], indices[f], global_importances[indices[f]])) features_ranked.append(features[indices[f]]) return features_ranked # this will take a few seconds to run features_ranked = model_shap_importances(clf,features,X_train) features_rankedDiscussion on sector- Random forests can still work with categorical features that are numbers. For instance, to filter features by sector '5', it's possible for a tree to split on sector 4. One of the reasons tree-based models are great is because they can still try to interpret data that hasn't been fully cleaned or processed. However it's still a best practice to one-hot encode categorical features, as this will help to reduce noise, and hopefully help the model's performance. Sector category namesYou'll one-hot encode the sector with sector labels in the project. Please see some code that can be useful when you assign category labels to each sector.## Sector Labels sector_names = pd.read_csv('sector_names.csv') sector_names = sector_names[['Sector','sector_id']] sector_names = sector_names.drop_duplicates() sector_names = sector_names.append(pd.DataFrame([['no sector assigned',-1]], columns = sector_names.columns)) sector_names # use this dataframe to get the sector name by sector id # here's an example tmp = sector_names.loc[sector_names['sector_id'] == 9]['Sector'].values[0] tmpOne-hot encode other features?Are there other features here that you could also one-hot encode? You can one-hot encode these other features in the project! date featuresThe low frequency date parts (end of month, end of quarter etc.) have low importance. This should not be surprising since we are looking at only a couple years of training history and there are only, say, 4 quarters in a year. End of month trading activity may occur some days before the last business day of the month. To better capture what we think of as end-of the month trading, we can try including the last 5 business days of the month. Similarly, we can try including the last two weeks of each quarter. Date features helper codeYou may find some of these functions useful in the project!* We can use [BDay](https://pandas.pydata.org/pandas-docs/stable/timeseries.html) to offset our date_range by a specified number of business days* Also, check out a list of [frequencies](http://pandas.pydata.org/pandas-docs/stable/timeseries.htmloffset-aliases) to choose from, such as `M`, `Q`. Examplefrom pandas.tseries.offsets import BDay tmp = all_factors.index.get_level_values(0) tmp tmp_1 = tmp + BDay(-1) tmp_1Notice how adding `Bday(-1)` to the DateTimeIndex `tmp` made another DateTimeIndex with the second to last business day of each month. UnionDatetimeIndex has a [union](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DatetimeIndex.union.html) function that merges one DatetimeIndex with anothertmp_1.union(tmp)part 1puzzlelines = pathlib.Path('day24.txt').read_text().splitlines() inplines = [] for i,line in enumerate(puzzlelines): if 'inp' in line: inplines.append(i) inplines stanzas = {} stanza_length = 18 for i, inpline in enumerate(inplines): stanzas[i] = puzzlelines[inpline:inpline+stanza_length] ref_stanza = stanzas[0] ref_stanza not_the_same = {} for i, line in enumerate(ref_stanza): cmplines = [stanzas[j][i] for j in range(len(stanzas))] if all(cmpline == line for cmpline in cmplines): continue not_the_same[i] = cmplines not_the_same divzs = [int(line.split()[2]) for line in not_the_same[4]] addxs = [int(line.split()[2]) for line in not_the_same[5]] addys = [int(line.split()[2]) for line in not_the_same[15]] divzs addxs addys$$\require{cases}$$Each digit has the same processing, except for lines 4, 5, and 15 (indexing from 0): inp w mul x 0 add x z mod x 26 div z [1, 1, 1, 26, 1, 26, 1, 1, 26, 1, 26, 26, 26, 26] add x [12, 15, 11, -14, 12, -10, 11, 13, -7, 10, -2, -1, -4, -12] eql x w eql x 0 mul y 0 add y 25 mul y x add y 1 mul z y mul y 0 add y w add y [4, 11, 7, 2, 11, 13, 9, 12, 6, 2, 11, 12, 3, 13] mul y x add z yw, x, y, and z are set to zero for the first run digit input. After the first digit, the non-wregisters have whatever value is left over from the previous digit. In reality, though, x and y arecleared to 0 before being used, so the only register that persists is z. inp w <-- set digit w from input mul x 0 <-- x is cleared to 0, so doesn't depend on previous stanza add x z mod x 26 <-- set x to z_prev%26 div z z* <-- set z to z_prev//z*, where z* depends on which stanza is being runSo, at this point: $z_j = z_{j-1}//z^*_j$ and $x_j = z_{j-1}%26$. add x x* eql x w eql x 0 <-- 1 if w == z_prev%26 + x*, and 0 if w != z_prev%26 + x* $x = \begin{cases} 0, & z_{j-1}\% 26 = w - x^*_j \\ 1, & z_{j-1}\% 26 \neq w - x^*_j \end{cases} $ mul y 0 <-- y is cleared, so doesn't depend on previous stanza add y 25 mul y x add y 1 <-- y = 25*x + 1, so y = 1 if w == z_prev%26 + x*, and y = 26 if w != z_prev%26 + x*$y = \begin{cases} 1, & z_{j-1}\% 26 = w - x^*_j \\ 26, & z_{j-1}\% 26 \neq w - x^*_j \end{cases} $ mul z y <-- z is unchanged if w == z_prev%26 + x*, and z is multiplied by 26 if w != z_prev%26 + x*$z_j = \begin{cases} z_{j-1}//z^*_j, & z_{j-1}\% 26 = w - x^*_j \\ 26\cdot(z_{j-1}//z^*_j), & z_{j-1}\% 26 \neq w - x^*_j \end{cases} $ mul y 0 <-- clear y to 0 add y w add y y* mul y x add z y <-- z is unchanged if w == z%26 + x*, z += w + y* if w != z%26 + x*$z_j = \begin{cases} z_{j-1}//z^*_j, & z_{j-1}\% 26 = w - x^*_j \\ 26\cdot(z_{j-1}//z^*_j) + w + y^*_j, & z_{j-1}\% 26 \neq w - x^*_j \end{cases} $def run_stanza(w, zprev, j): z = zprev//divzs[j] if w == zprev%26 + addxs[j]: return z z *= 26 z += w + addys[j] return z def full_run(digits): z = 0 for j, w in enumerate(digits): z = run_stanza(w, z, j) return z fig,ax = plt.subplots() for w in range(1,10): zprevs = range(0, 131) ax.plot(zprevs, [run_stanza(w, zprev, 13) for zprev in zprevs], '.', label=w) plt.legend() ax.set_xlabel(r'$z_{j-1}$') ax.set_ylabel('$z_j$') ax.set_title('Last digit')Thinking is hard. How about we just brute-force it? Starting w/ z=0, what values of z are possible for each digit? That's likely to be far less than $9^{14}$.zvals = collections.defaultdict(set) zvals[0] = set([run_stanza(w, 0, 0) for w in range(1,10)]) for digit in range(1, 14): zvals[digit] = set([run_stanza(w, zprev, digit) for w in range(1, 10) for zprev in zvals[digit-1]]) print(digit, len(zvals[digit]))1 81 2 729 3 810 4 7290 5 8100 6 72900 7 656100 8 703728 9 6333552 10 6733044 11 6394302 12 6626691 13 6401629Now work backwards. What previous values of z for the last digit will give me 0?for w in range(1, 10): for zprev in zvals[12]: if run_stanza(w, zprev, 13) == 0: print(w, zprev)1 13Similarly for the penultimate digit, only now the z I want is the one that will give me 0 for the next digit.for w in range(1, 10): for zprev in zvals[11]: if run_stanza(w, zprev, 12) == 13: print(w, zprev)8 350 9 351Here there are two possible values of w that have an associated value of z that would work.We want the biggest number, so pick the larger value of w.for w in range(1, 10): for zprev in zvals[10]: if run_stanza(w, zprev, 11) == 351: print(w, zprev)9 9136Okay, it's slow, but doable, to do this for all of the digits.maxvals = [] next_z = 0 for digit in reversed(range(1, 14)): valid = collections.defaultdict(int) for w in range(1, 10): for zprev in zvals[digit-1]: if run_stanza(w, zprev, digit) == next_z: valid[w] = zprev print(digit, valid) maxdig = max(valid.keys()) maxvals.append(maxdig) next_z = valid[maxdig] for w in range(1, 10): if run_stanza(w, 0, 0) == next_z: w0 = w break maxvals.append(w0) full_run(reversed(maxvals)) ''.join(str(i) for i in reversed(maxvals + [w0]))part 2minvals = [] next_z = 0 for digit in reversed(range(1, 14)): valid = collections.defaultdict(int) for w in range(1, 10): for zprev in zvals[digit-1]: if run_stanza(w, zprev, digit) == next_z: valid[w] = zprev print(digit, valid) mindig = min(valid.keys()) minvals.append(mindig) next_z = valid[mindig] for w in range(1, 10): if run_stanza(w, 0, 0) == next_z: w0 = w break minvals.append(w0) print(reversed(minvals)) ''.join(str(c) for c in reversed(minvals)) full_run(reversed(minvals))Amazon SageMaker Processing jobsWith Amazon SageMaker Processing jobs, you can leverage a simplified, managed experience to run data pre- or post-processing and model evaluation workloads on the Amazon SageMaker platform.A processing job downloads input from Amazon Simple Storage Service (Amazon S3), then uploads outputs to Amazon S3 during or after the processing job.This notebook shows how you can:1. Run a processing job to run a scikit-learn script that cleans, pre-processes, performs feature engineering, and splits the input data into train and test sets.2. Run a training job on the pre-processed training data to train a model3. Run a processing job on the pre-processed test data to evaluate the trained model's performance4. Use your own custom container to run processing jobs with your own Python libraries and dependencies. Data pre-processing and feature engineering To run the scikit-learn preprocessing script as a processing job, create a `SKLearnProcessor`, which lets you run scripts inside of processing jobs using the scikit-learn image provided.import boto3 import sagemaker import pandas as pd from sagemaker.sklearn.processing import SKLearnProcessor from time import gmtime, strftime sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name #sm = boto3.Session().client(service_name='sagemaker', region_name=region) timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) output_prefix = 'amazon-reviews-scikit-processor-{}'.format(timestamp_prefix) processing_job_name = 'amazon-reviews-scikit-processor-{}'.format(timestamp_prefix) print('Processing job name: {}'.format(processing_job_name))Processing job name: amazon-reviews-scikit-processor-2020-03-02-04-59-28Before introducing the script you use for data cleaning, pre-processing, and feature engineering, inspect the first 20 rows of the dataset. The target is predicting the `income` category. The features from the dataset you select are `age`, `education`, `major industry code`, `class of worker`, `num persons worked for employer`, `capital gains`, `capital losses`, and `dividends from stocks`.import pandas as pd input_data = 's3://sagemaker-sample-data-{}/processing/census/census-income.csv'.format(region) df = pd.read_csv(input_data, nrows=10) df.head(n=10) sklearn_processor = SKLearnProcessor(framework_version='0.20.0', role=role, instance_type='ml.m5.xlarge', instance_count=3)TODO: Fix thisThis notebook cell writes a file `preprocessing.py`, which contains the pre-processing script. You can update the script, and rerun this cell to overwrite `preprocessing.py`. You run this as a processing job in the next cell. In this script, you* Remove duplicates and rows with conflicting data* transform the target `income` column into a column containing two labels.* transform the `age` and `num persons worked for employer` numerical columns into categorical features by binning them* scale the continuous `capital gains`, `capital losses`, and `dividends from stocks` so they're suitable for training* encode the `education`, `major industry code`, `class of worker` so they're suitable for training* split the data into training and test datasets, and saves the training features and labels and test features and labels.Our training script will use the pre-processed training features and labels to train a model, and our model evaluation script will use the trained model and pre-processed test features and labels to evaluate the model. Run this script as a processing job. Use the `SKLearnProcessor.run()` method. You give the `run()` method one `ProcessingInput` where the `source` is the census dataset in Amazon S3, and the `destination` is where the script reads this data from, in this case `/opt/ml/processing/input`. These local paths inside the processing container must begin with `/opt/ml/processing/`.Also give the `run()` method a `ProcessingOutput`, where the `source` is the path the script writes output data to. For outputs, the `destination` defaults to an S3 bucket that the Amazon SageMaker Python SDK creates for you, following the format `s3://sagemaker--//output/from sagemaker.processing import ProcessingInput, ProcessingOutput sklearn_processor.run(code='preprocessing.py', inputs=[ProcessingInput( source=input_data, destination='/opt/ml/processing/input')], outputs=[ProcessingOutput(output_name='train_data', source='/opt/ml/processing/train'), ProcessingOutput(output_name='validation_data', source='/opt/ml/processing/validation'), ProcessingOutput(output_name='test_data', source='/opt/ml/processing/test')], arguments=['--train-test-split-ratio', '0.2'] ) preprocessing_job_description = sklearn_processor.jobs[-1].describe() output_config = preprocessing_job_description['ProcessingOutputConfig'] for output in output_config['Outputs']: if output['OutputName'] == 'train_data': preprocessed_train_data = output['S3Output']['S3Uri'] if output['OutputName'] == 'validation_data': preprocessed_validation_data = output['S3Output']['S3Uri'] if output['OutputName'] == 'test_data': preprocessed_test_data = output['S3Output']['S3Uri'] from IPython.core.display import display, HTML display(HTML('Review
CloudWatch Logs After About 5 Minutes'.format(region, processing_job_name)))Now inspect the output of the pre-processing job, which consists of the processed features.train_features = pd.read_csv(preprocessed_train_data + '/train_features.csv', nrows=10) print('Training features shape: {}'.format(train_features.shape)) train_features.head(n=10)Training features shape: (10, 73)Training using the pre-processed dataWe create a `SKLearn` instance, which we will use to run a training job using the training script `train.py`.from sagemaker.sklearn.estimator import SKLearn sklearn = SKLearn( entry_point='train.py', train_instance_type="ml.m5.xlarge", role=role)The training script `train.py` trains a logistic regression model on the training data, and saves the model to the `/opt/ml/model` directory, which Amazon SageMaker tars and uploads into a `model.tar.gz` file into S3 at the end of the training job. Run the training job using `train.py` on the preprocessed training data.sklearn.fit({'train': preprocessed_train_data}) train_job_description = sklearn.jobs[-1].describe() model_data_s3_uri = '{}{}/{}'.format( train_job_description['OutputDataConfig']['S3OutputPath'], train_job_description['TrainingJobName'], 'output/model.tar.gz')Model Evaluation`evaluation.py` is the model evaluation script. Since the script also runs using scikit-learn as a dependency, run this using the `SKLearnProcessor` you created previously. This script takes the trained model and the test dataset as input, and produces a JSON file containing classification evaluation metrics, including precision, recall, and F1 score for each label, and accuracy and ROC AUC for the model.import json from sagemaker.s3 import S3Downloader sklearn_processor.run(code='evaluation.py', inputs=[ProcessingInput( source=model_data_s3_uri, destination='/opt/ml/processing/model'), ProcessingInput( source=preprocessed_test_data, destination='/opt/ml/processing/test')], outputs=[ProcessingOutput(output_name='evaluation', source='/opt/ml/processing/evaluation')] ) evaluation_job_description = sklearn_processor.jobs[-1].describe()Now retrieve the file `evaluation.json` from Amazon S3, which contains the evaluation report.evaluation_output_config = evaluation_job_description['ProcessingOutputConfig'] for output in evaluation_output_config['Outputs']: if output['OutputName'] == 'evaluation': evaluation_s3_uri = output['S3Output']['S3Uri'] + '/evaluation.json' break evaluation_output = S3Downloader.read_file(evaluation_s3_uri) evaluation_output_dict = json.loads(evaluation_output) print(json.dumps(evaluation_output_dict, sort_keys=True, indent=4))Running processing jobs with your own dependenciesAbove, you used a processing container that has scikit-learn installed, but you can run your own processing container in your processing job as well, and still provide a script to run within your processing container.Below, you walk through how to create a processing container, and how to use a `ScriptProcessor` to run your own code within a container. Create a scikit-learn container and run a processing job using the same `preprocessing.py` script you used above. You can provide your own dependencies inside this container to run your processing script with. This block of code builds the container using the `docker` command, creates an Amazon Elastic Container Registry (Amazon ECR) repository, and pushes the image to Amazon ECR.import boto3 account_id = boto3.client('sts').get_caller_identity().get('Account') ecr_repository = 'sagemaker-processing-container' tag = ':latest' processing_repository_uri = '{}.dkr.ecr.{}.amazonaws.com/{}'.format(account_id, region, ecr_repository + tag) # Create ECR repository and push docker image !docker build -t $ecr_repository docker !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email) !aws ecr create-repository --repository-name $ecr_repository !docker tag {ecr_repository + tag} $processing_repository_uri !docker push $processing_repository_uriThe `ScriptProcessor` class lets you run a command inside this container, which you can use to run your own script.from sagemaker.processing import ScriptProcessor script_processor = ScriptProcessor(command=['python3'], image_uri=processing_repository_uri, role=role, instance_count=1, instance_type='ml.m5.xlarge')Run the same `preprocessing.py` script you ran above, but now, this code is running inside of the Docker container you built in this notebook, not the scikit-learn image maintained by Amazon SageMaker. You can add the dependencies to the Docker image, and run your own pre-processing, feature-engineering, and model evaluation scripts inside of this container.script_processor.run(code='preprocessing.py', inputs=[ProcessingInput( source=input_data, destination='/opt/ml/processing/input')], outputs=[ProcessingOutput(output_name='train_data', source='/opt/ml/processing/train'), ProcessingOutput(output_name='test_data', source='/opt/ml/processing/test')], arguments=['--train-test-split-ratio', '0.2'] ) script_processor_job_description = script_processor.jobs[-1].describe() print(script_processor_job_description)データのチェック___import numpy as np import matplotlib import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras %matplotlib inline %config InlineBackend.figure_formats = {'png', 'retina'}データのロードfashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] print(train_images.shape, test_images.shape) for i, name in enumerate(class_names): train_num = np.sum(train_labels == i) test_num = np.sum(test_labels == i) print(f"{name}: train: {train_num}, test: {test_num}") l = 10 h = 3 plt.figure(figsize=(15, 6)) for i, (img, label) in enumerate(zip(train_images[:l*h], train_labels[:l*h]), start=1): plt.subplot(h, l, i) plt.imshow(img, cmap="gray") plt.title(class_names[label]) plt.show()Basic analyzing and plottingThis tutorial will go over the basics of analyzing `eggs`, the primary data structure used in `quail`. To learn about how an egg is set up, see the egg tutorial.An egg is made up of (at minimum) the words presented to a subject and the words recalled by the subject. With these, two components we can perform a number of analyses:1. **Recall Accuracy** - the proportion of words presented that were later recalled2. **Serial Position Curve** - recall accuracy as a function of the encoding position of the word3. **Probability of First Recall** - the probability that a word will be recalled first as a function of its encoding position4. **Lag-CRP** - given the recall of word n, the probability of recalling words at neighboring positions (n+/-1, 2, 3 etc).5. **Temporal Clustering** - a measure of recall clustering by temporal proximity during encodingIf we have a set of features for the stimuli, we can also compute a **Memory Fingerprint**, which is an estimate of how a subject clusters their recall responses with respect to features of a stimulus (see the fingerprint tutorial for more on this).Let's get to analyzing some `eggs`. First, we'll load in some example data:import quail egg = quail.load_example_data() # -*- coding: utf-8 -*- import numpy as np import quail from quail import Fingerprint, OptimalPresenter # generate some fake data next_presented = ['CAT', 'DOG', 'SHOE', 'HORSE', 'SNAIL', 'FOOT', 'CAR', 'ARM', 'UTAH', 'NEW YORK', 'TRUCK', 'EAR', 'ARIZONA', 'BIKE', 'STROLLER', 'TOE'] next_recalled = ['HORSE', 'DOG', 'CAT'] next_features = [{ 'category' : 'animal', 'size' : 'bigger', 'starting letter' : 'C', 'length' : 3 }, { 'category' : 'animal', 'size' : 'bigger', 'starting letter' : 'D', 'length' : 3 }, { 'category' : 'object', 'size' : 'smaller', 'starting letter' : 'S', 'length' : 4 }, { 'category' : 'animal', 'size' : 'bigger', 'starting letter' : 'H', 'length' : 5 }, { 'category' : 'animal', 'size' : 'bigger', 'starting letter' : 'S', 'length' : 5 }, { 'category' : 'body part', 'size' : 'smaller', 'starting letter' : 'F', 'length' : 4 }, { 'category' : 'transportation', 'size' : 'bigger', 'starting letter' : 'C', 'length' : 3 }, { 'category' : 'body part', 'size' : 'bigger', 'starting letter' : 'A', 'length' : 3 } ] dist_funcs = { 'category' : lambda a, b: int(a!=b), 'size' : lambda a, b: int(a!=b), 'starting letter' : lambda a, b: int(a!=b), 'length' : lambda a, b: np.linalg.norm(np.subtract(a,b)) } egg = quail.Egg(pres=[next_presented], rec=[next_recalled], features=[next_features]) # initialize fingerprint fingerprint = Fingerprint(init=egg) # initialize presenter params = { 'fingerprint' : fingerprint } presenter = OptimalPresenter(params=params) # update the fingerprint fingerprint.update(egg) %%timeit resorted_egg = presenter.order_perm(egg, nperms=10000)1 loop, best of 3: 4.31 s per loopThis dataset is comprised of 30 subjects, who each performed 8 study/test blocks of 16 words each. Here are some of the presented words:egg.pres.head()and some of the recalled words:egg.rec.head()We can start with the simplest analysis - recall accuracy - which is just the proportion of words recalled that were in the encoding lists. To compute accuracy, simply call the `analyze` function, with the `analysis` key word argument set to `accuracy`: Recall Accuracyaccuracy = quail.analyze(egg, analysis='accuracy') accuracy.head()The result is a multi-index Pandas DataFrame where the first-level index is the subject identifier and the second level index is the list number. By default, note that each list is analyzed separately. However, you can easily return the average over lists using the `listgroup` kew word argument:accuracy_avg = quail.analyze(egg, analysis='accuracy', listgroup=['average']*8) accuracy_avg.head()Now, the result is a single value for each subject representing the average accuracy across the 16 lists. The `listgroup` kwarg can also be used to do some fancier groupings, like splitting the data into the first and second half of the experiment:accuracy_split = quail.analyze(egg, analysis='accuracy', listgroup=['First Half']*4+['Second Half']*4) accuracy_split.head()These analysis results can be passed directly into the plot function like so:ax = quail.plot(accuracy_split)For more details on plotting, see the plot tutorial. Next, lets take a look at the serial position curve analysis. As stated above the serial position curve (or spc) computes recall accuracy as a function of the encoding position of the word. To use it, use the same `analyze` function illustrated above, but set the `analysis` kwarg to `spc`. Let's also average across lists within subject: Serial Position Curvespc = quail.analyze(egg, analysis='spc', listgroup=['average']*8) spc.head()The result is a df where each row is a subject and each column is the encoding position of the word. To plot, simply pass the result of the analysis function to the plot function:ax = quail.plot(spc)Probability of First RecallThe next analysis we'll take a look at is the probability of first recall, which is the probability that a word will be recalled first as a function of its encoding position. To compute this, call the `analyze` function with the `analysis` kwarg set to `pfr`. Again, we'll average over lists:pfr = quail.analyze(egg, analysis='pfr', listgroup=['average']*8) pfr.head()This df is set up just like the serial position curve. To plot:ax = quail.plot(pfr)Lag-CRPThe next analysis to consider is the lag-CRP, which again is a function that given the recall of word n, returns the probability of recalling words at neighboring positions (n+/-1, 2, 3 etc). To use it? You guessed it: call the `analyze` function with the `analysis` kwarg set to `lagcrp`:lagcrp = quail.analyze(egg, analysis='lagcrp', listgroup=['average']*8) lagcrp.head()Unlike the previous two analyses, the result of this analysis returns a df where the number of columns are double the length of the lists. To view the results:ax=quail.plot(lagcrp)Temporal clusteringAnother way to evaluate temporal clustering is to measure the temporal distance of each transition made with respect to where on a list the subject could have transitioned. This 'temporal clustering score' is a good summary of how strongly participants are clustering their responses according to temporal proximity during encoding.temporal = quail.analyze(egg, analysis='temporal', listgroup=['average']*8) ax = quail.plot(temporal, plot_style='violin', ylim=[0,1])Memory FingerprintLast but not least is the memory fingerprint analysis. For a detailed treatment of this analysis, see the fingerprint tutorial.As described in the fingerprint tutorial, the `features` data structure is used to estimate how subjects cluster their recall responses with respect to the features of the encoded stimuli. Briefly, these estimates are derived by computing the similarity of neighboring recall words along each feature dimension. For example, if you recall "dog", and then the next word you recall is "cat", your clustering by category score would increase because the two recalled words are in the same category. Similarly, if after you recall "cat" you recall the word "can", your clustering by starting letter score would increase, since both words share the first letter "c". This logic can be extended to any number of feature dimensions.To use this analysis function, you'll need to include a `features` field when you create your `egg`. Our example data has this field included. For more info on how to create this field, see the egg and fingerprint tutorials.Here is a glimpse of the features df:egg.features.head()Like the other analyses, computing the memory fingerprint can be done using the `analyze` function with the `analysis` kwarg set to `fingerprint`:fingerprint = quail.analyze(egg, analysis='fingerprint', listgroup=['average']*8) fingerprint.head()The result of this analysis is a df, where each row is a subject's fingerprint and each column is a feature dimensions. The values represent a subjects tendency to cluster their recall responses along a particular feature dimensions. They are probability values, and thus, greater values indicate more clustering along that feature dimension. To plot, simply pass the result to the plot function:ax = quail.plot(fingerprint, ylim=[0,1.2])This result suggests that subjects in this example dataset tended to cluster their recall responses by category as well as the size (bigger or smaller than a shoebox) of the word. List length and other properties of your experiment can bias these clustering scores. To help with this, we implemented a permutation clustering procedure which shuffles the order of each recall list and recomputes the clustering score with respect to that distribution. Note: this also works with the temporal clustering analysis.# warning: this can take a little while. Setting parallel=True will help speed up the permutation computation # fingerprint = quail.analyze(egg, analysis='fingerprint', listgroup=['average']*8, permute=True, n_perms=100) # ax = quail.plot(fingerprint, ylim=[0,1.2])Finally, the fingerprint can be plotted along side of the temporal clustering score:fingerprint_temporal = quail.analyze(egg, analysis='fingerprint_temporal', listgroup=['average']*8) ax = quail.plot(fingerprint_temporal)Import investigator package of the PyOTIC software# Import os to easily join names of filepaths import os # Add the path of the PyOTIC Software to the system path # Adjust this path to where the PyOTIC Software package is located import sys sys.path.append('../../') #Load investigator package import pyoti pyoti.info() #Create an experiment experiment = pyoti.create_experiment()Create experiment file (or open previously saved one)# Choose the path, were the experiment should be created (or opened from) # # datadir: The path to where the experiment (and the data) are located # datafile: The name of the file that contains the data. Here it is only used to generate dbfile. # The data is loaded further down upon creation of a Record. # dbfile: The name of the database file the experiment is saved to (or loaded from). datadir = '../data/' datafile = 'B01.bin' # For the name of the experiment, exchange the extension '.bin' with '.fs' dbfile = os.path.join(datadir, datafile.replace('.bin', '.fs')) # Create/open the experiment dbfile experiment.open(dbfile) #datadir = '/srv/files/common/Practicals/SingleMoleculeBiophysics SS2015/ASWAD 2015-09-24/' #datadir = 'Z:\\Practicals\\SingleMoleculeBiophysics SS2015\\ASWAD 2015-09-24\\' # show status of Records, Views, MultiRegions, and Modifications in experiment experiment.print_status() # cleanup/pack database file experiment.cleanup() # save the state of the experiment in the database file experiment.save(pack=True) # revert changes since last commit of experiment experiment.abort() # close database file experiment.close()Create a calibration# Choose the calibration type that should be created. # See 'pyoti/etc/calibration.cfg' for known types. # If you choose an unknown type, a generic calibration is created. calibration_type='pyoticf' # You can provide a calibration file, where previously stored calibration values are loaded from. # Make sure to set a proper corresponding calibration_type, which will load the files provided. calibdir = os.path.join("..", "calibration", "converted_data") #calibfile = 'B01__hc_results.txt' calibfile = datafile.replace('.bin', '__hc_results.txt') # Create a calibration and assign it to the variable 'calibration' calibration = pyoti.create_calibration(calibration_type=calibration_type, filename=calibfile, directory=calibdir) #calibdir = os.path.join(datadir, 'analysis') #calibdir = os.path.join('/home/tobiasj/experiments/ASWAD/2013-12-18/flow_cell_c', 'hdcalibration/analysis') #calibdir = '/media/tobiasj/cbd_drive/data/ASWAD/2015-10-28 - unzipping/analysis/'Create record(s) and add to experiment Either: Define a generic function to read in the data and create a record:# Define a name for the record (defaults to 'default') name='alpha' # Define a function that is used to read in the data you want to analyze. # The function needs to receive at least the positional parameter 'filename'. # The return value of the function needs to be the data as a numpy array. # You can (beside other options) use functions that the package numpy offers to read in data: # http://docs.scipy.org/doc/numpy/reference/routines.io.html # # One example, to read in data from a text file with 5 header lines followed by the data, # could look like this: import numpy as np import pyoti.data.labview as lv import os def load_data(filename): #data = np.loadtxt(filename, skiprows=5) data = lv.read_labview_bin_data(filename)[:,0:3] return data # Define the samplingrate (either provide a function or simply a variable). # The function gets executed once, upon initialisation of the record. The # return value of the function (or the value of the variable) gets stored in # the record object: def samplingrate(): samplingrate = 40000.0 return samplingrate #samplingrate = 40000.0 # Name the traces here, the load_data() function returns. Make sure the # traces are properly describing the data returned by load_data function. # This definition takes precedence over the traces defined in the # configfile (see below) traces = [ 'psdX', 'psdY', 'psdZ' ] # You can provide a configfile, which, for instance, defines the traces returned by load_data(). # If not provided, configfile defaults to '../pyoti/etc/GenericDataFile.cfg'. # You could also create your own setup specific configfile and use GenericDataFile as a template. # Make sure to also add the parameter cfgfile to the function call below, if you define a cfgfile, # like: experiment.create_record(cfgfile=cfgfile, ...) #cfgfile = '../pyoti/etc/record/GenericDataFile.cfg' record = experiment.create_record(name=name, calibration=calibration, traces=traces, load_data=load_data, filename=datafile, directory=datadir, samplingrate=samplingrate)Or: Read in a record for a predefined setup:# Define a name for the record (defaults to 'default') name='alpha' # Choose the file, where standard values for the Record are defined cfgfile = '../pyoti/etc/record/ASWAD.cfg' experiment.create_record(name=name, calibration=calibration, cfgfile=cfgfile, filename=datafile, directory=datadir) # Create/load additional records (e.g. extra_unzipping or beadscan) name = 'beta' extradatadir = datadir extradatafile = 'B01b.bin' experiment.create_record(name=name, calibration=calibration, cfgfile=cfgfile, filename=extradatafile, directory=extradatadir) #experiment.records.beta.calibration = experiment.records.alpha.calibration name = 'generic' group = 'modification' parent = 'used' traces_apply=['psdX', 'psdYZ'] extra_mod_params='factor' import numpy as np def modify(self, data, samples, data_traces, data_index, mod_index): # data: Contains the data, indexed by samples and data_traces # samples: Is the index of the samples contained in data, which was # given/asked by the user/process who called _get_data(). # data_traces: Contains a list of traces (str) existent in data, which # was given/asked by the user/process who called _get_data(). # data_index: data[:,data_index] gives the data, which is modified by # this modification (defined by traces_apply) # mod_index: numpy.array(self.traces_apply)[mod_index] gives the traces, # which are existent in data and also modified by this modfication # self.mod_params[mod_index] gives the mod_params of the traces # self.mod_params gives a list of all available mod_parameters # self.get_mod_params(names=...) returns a list with the # mod_parameters with names=... # self.name gives the mod_parameter with name name # # Modify and return the data ... print('Modifying the data ...') # # # A simple example of a modification (subtraction of the mod_param multiplied with # the extra_mod_param factor from traces): #data[:, data_index] -= self.mod_params[np.newaxis, mod_index] * self.factor # return data experiment.add_group(name, parent, group_type=group, adjust=True, modify=modify, traces_apply=traces_apply, mod_params=extra_mod_params)Analyse and modify dataname = 'used' group = 'selection' parent = 'alpha' experiment.add_group(name, parent, group_type=group) name = 'used_beta' group = 'selection' parent = 'beta' experiment.add_group(name, parent, group_type=group) name = 'offset' group = 'offset' parent = 'used' experiment.add_group(name, parent, group_type=group) name = 'offset_beta' group = 'offset' parent = 'used_beta' experiment.add_group(name, parent, group_type=group) experiment.concatenate('offset_concatenated', 'offset', 'offset_beta') name = 'touchdown' group = 'touchdown' parent = 'offset' experiment.add_group(name, parent, group_type=group) experiment.replace_in('touchdown', 'offset', 'offset_concatenated') experiment.replace_in('touchdown_mod', 'offset', 'offset_concatenated') name = 'beadscan' group = 'beadscan' parent = 'touchdown' experiment.add_group(name, parent, group_type=group) experiment.remove('beadscan') experiment.remove('beadscan_mod') name = 'attachment' group = 'attachment' parent = 'beadscan' experiment.add_group(name, parent, group_type=group) name = 'attachment_2nd' group = 'attachment' parent = 'attachment' experiment.add_group(name, parent, group_type=group) name = 'baseline' group = 'baseline' parent = 'attachment_2nd' experiment.add_group(name, parent, group_type=group) name = 'rotation' group = 'rotation' parent = 'baseline' experiment.add_group(name, parent, group_type=group) name = 'rotation_2nd' group = 'rotation' parent = 'rotation' experiment.add_group(name, parent, group_type=group)Select data to generate the resultsname = 'results' group = 'selection' parent = 'rotation_2nd' # traces used to select data from traces = ['psdXYZ', 'positionXYZ'] results_region = experiment.add_group(name, parent, group_type=group, traces=traces) # Enable caching for results region, for faster data return experiment.set_cached_region(name) # Choose resolution for presentation of data (extension, force) resolution = 1000 # points/s resolution # Create Result objects to obtain force and extension tether = pyoti.create_tether(region=results_region, resolution=resolution) # Show the autodetected minima, maxima and sections #tether._sf.highest_frequency=32 #tether._sf.reduce_false_positives = True #tether._sf.compare_time = 0.005 tether.update() tether.init_rfig(legend=True) # Create force extension curves prefix = ''.join((os.path.splitext(os.path.basename(experiment.filename))[0], "_")) resultsdir = os.path.join("..", "results") # Save force extension stress/release pair plots tether.save_force_extension_plots(directory=resultsdir, file_prefix=prefix, bps=9018) # Display saved force extension stress/release pair plots # pyoti.gui.browse_images(directory=resultsdir, prefix=prefix) # Display force extension stress/release pair plots tether.init_fe_fig() tether.show_force_extension_plots(bps=1399, autolimit=False) # plot Timecourse of Extension plt.close('all') plt.figure() plt.grid(True) # Timevector, extension and stress/release pairs t = tether.timevector e = tether.extension pl, ps = tether.stress_release_pairs() # Plot all stress/release extension/timevector sections for pl, ps in zip(pl, ps): plt.plot(t[pl], e[pl] * 1000, 'g.', ms=1.0) plt.plot(t[ps], e[ps] * 1000, 'r.', ms=1.0) plt.title('Timecourse of extension') plt.ylabel("Extension (nm)") plt.xlabel("Time (s)") plt.show(plt.gcf()) plt.close() plt.close('all') plt.figure() plt.grid(True) fXYZ = tether.forceXYZ rpl_ = tether.sections(direction='right', cycle='stress') lpl_ = tether.sections(direction='left', cycle='stress') for rpl in rpl_: plt.plot(fXYZ[rpl, 1] * 1000, fXYZ[rpl, 2] * 1000, 'r') for lpl in lpl_: plt.plot(fXYZ[lpl, 1] * 1000, fXYZ[lpl ,2] * 1000, 'g') excited_axis = results_region.excited_axis plt.xlabel(''.join(("Force (", excited_axis,")"))) plt.ylabel("Force (Z)") plt.title("Y vs. Z") plt.show(plt.gcf()) plt.close()Convolutional Layers in PyTorchTo create a convolutional layer in PyTorch, you must first import the necessary module:import torch.nn as nnThen, there is a two part process to defining a convolutional layer and defining the feedforward behavior of a model (how an input moves through the layers of a network). First, you must define a Model class and fill in two functions.initYou can define a convolutional layer in the __init__ function of by using the following format:self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)forwardThen, you refer to that layer in the forward function! Here, I am passing in an input image x and applying a ReLU function to the output of this layer.x = F.relu(self.conv1(x))ArgumentsYou must pass the following arguments:in_channels - The number of inputs (in depth), 3 for an RGB image, for example.out_channels - The number of output channels, i.e. the number of filtered "images" a convolutional layer is made of or the number of unique, convolutional kernels that will be applied to an input.kernel_size - Number specifying both the height and width of the (square) convolutional kernel.There are some additional, optional arguments that you might like to tune:stride - The stride of the convolution. If you don't specify anything, stride is set to 1.padding - The border of 0's around an input array. If you don't specify anything, padding is set to 0.NOTE: It is possible to represent both kernel_size and stride as either a number or a tuple.There are many other tunable arguments that you can set to change the behavior of your convolutional layers. To read more about these, we recommend perusing the official documentation.Pooling LayersPooling layers take in a kernel_size and a stride. Typically the same value as is the down-sampling factor. For example, the following code will down-sample an input's x-y dimensions, by a factor of 2:self.pool = nn.MaxPool2d(2,2)forwardHere, we see that poling layer being applied in the forward function.x = F.relu(self.conv1(x))x = self.pool(x)Convolutional Example 1Say I'm constructing a CNN, and my input layer accepts grayscale images that are 200 by 200 pixels (corresponding to a 3D array with height 200, width 200, and depth 1). Then, say I'd like the next layer to be a convolutional layer with 16 filters, each filter having a width and height of 2. When performing the convolution, I'd like the filter to jump two pixels at a time. I also don't want the filter to extend outside of the image boundaries; in other words, I don't want to pad the image with zeros. Then, to construct this convolutional layer, I would use the following line of code:self.conv1 = nn.Conv2d(1, 16, 2, stride=2)Convolutional Example 2Say I'd like the next layer in my CNN to be a convolutional layer that takes the layer constructed in Example 1 as input. Say I'd like my new layer to have 32 filters, each with a height and width of 3. When performing the convolution, I'd like the filter to jump 1 pixel at a time. I want this layer to have the same width and height as the input layer, and so I will pad accordingly. Then, to construct this convolutional layer, I would use the following line of code:self.conv2 = nn.Conv2d(16, 32, 3, padding=1)Convolution with 3x3 window and stride 1Image source: http://iamaaditya.github.io/2016/03/one-by-one-convolution/Sequential ModelsWe can also create a CNN in PyTorch by using a Sequential wrapper in the __init__ function. Sequential allows us to stack different types of layers, specifying activation functions in between!def __init__(self): super(ModelName, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 16, 2, stride=2), nn.MaxPool2d(2, 2), nn.ReLU(True), nn.Conv2d(16, 32, 3, padding=1), nn.MaxPool2d(2, 2), nn.ReLU(True) )Formula: Number of Parameters in a Convolutional LayerThe number of parameters in a convolutional layer depends on the supplied values of filters/out_channels, kernel_size, and input_shape. Let's define a few variables:K - the number of filters in the convolutional layerF - the height and width of the convolutional filtersD_in - the depth of the previous layerNotice that K = out_channels, and F = kernel_size. Likewise, D_in is the last value in the input_shape tuple, typically 1 or 3 (RGB and grayscale, respectively).Since there are F*F*D_in weights per filter, and the convolutional layer is composed of K filters, the total number of weights in the convolutional layer is K*F*F*D_in. Since there is one bias term per filter, the convolutional layer has K biases. Thus, the number of parameters in the convolutional layer is given by K*F*F*D_in + K.Formula: Shape of a Convolutional LayerThe shape of a convolutional layer depends on the supplied values of kernel_size, input_shape, padding, and stride. Let's define a few variables:K - the number of filters in the convolutional layerF - the height and width of the convolutional filtersS - the stride of the convolutionP - the paddingW_in - the width/height (square) of the previous layerNotice that K = out_channels, F = kernel_size, and S = stride. Likewise, W_in is the first and second value of the input_shape tuple.The depth of the convolutional layer will always equal the number of filters K.The spatial dimensions of a convolutional layer can be calculated as: (W_in−F+2P)/S+1FlatteningPart of completing a CNN architecture, is to flatten the eventual output of a series of convolutional and pooling layers, so that all parameters can be seen (as a vector) by a linear classification layer. At this step, it is imperative that you know exactly how many parameters are output by a layer.For the following quiz questions, consider an input image that is 130x130 (x, y) and 3 in depth (RGB). Say, this image goes through the following layers in order:nn.Conv2d(3, 10, 3)nn.MaxPool2d(4, 4)nn.Conv2d(10, 20, 5, padding=2)nn.MaxPool2d(2, 2)n = 9 n%2 while(True): n = int(input()) if (n % 2 )== 1: print("Weird") if (n % 2) == 0 and (n >= 2) and (n <= 5): print("Not Weird") if (n % 2 == 0) and (n >= 6) and (n <= 20): print("Weird") if (n % 2 == 0) and (n > 20): print("Not Weird")5 Weird 6导入所需的包from autox import AutoX from autox.file_io import read_data_from_path from autox.process_data import Feature_type_recognition from autox.util import log, reduce_mem_usage from autox.feature_engineer import FeatureStat from autox.feature_engineer import FeatureCount from autox.models.regressor import CrossLgbRegression from autox.ensemble.stacking import StackingRegressor from autox.models.classifier import CrossLgbBiClassifier, CrossXgbBiClassifier, CrossTabnetBiClassifier import json from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from lightgbm import LGBMRegressor import pandas as pd import numpy as np import os from tqdm import tqdm初始化AutoX类relations = [ { "related_to_main_table": "true", # 是否为和主表的关系 "left_entity": "train_transaction.csv", # 左表名字 "left_on": ["TransactionID"], # 左表拼表键 "right_entity": "train_identity.csv", # 右表名字 "right_on": ["TransactionID"], # 右表拼表键 "type": "1-1" # 左表与右表的连接关系 }, { "related_to_main_table": "true", # 是否为和主表的关系 "left_entity": "test_transaction.csv", # 左表名字 "left_on": ["TransactionID"], # 左表拼表键 "right_entity": "test_identity.csv", # 右表名字 "right_on": ["TransactionID"], # 右表拼表键 "type": "1-1" # 左表与右表的连接关系 } ] # 选择数据集 data_name = 'kaggle_ieee' path = f'./data/{data_name}' autox = AutoX(target = 'isFraud', train_name = 'train_transaction.csv', test_name = 'test_transaction.csv', id = ['TransactionID'], path = path, relations = relations)INFO -> [+] read test_identity.csv INFO -> Memory usage of dataframe is 44.39 MB INFO -> Memory usage after optimization is: 9.84 MB INFO -> Decreased by 77.8% INFO -> table = test_identity.csv, shape = (141907, 41) INFO -> [+] read sample_submission.csv INFO -> Memory usage of dataframe is 7.73 MB INFO -> Memory usage after optimization is: 2.90 MB INFO -> Decreased by 62.5% INFO -> table = sample_submission.csv, shape = (506691, 2) INFO -> [+] read train_identity.csv INFO -> Memory usage of dataframe is 45.12 MB INFO -> Memory usage after optimization is: 10.00 MB INFO -> Decreased by 77.8% INFO -> table = train_identity.csv, shape = (144233, 41) INFO -> [+] read test_transaction.csv INFO -> Memory usage of dataframe is 1519.24 MB INFO -> Memory usage after optimization is: 425.24 MB INFO -> Decreased by 72.0% INFO -> table = test_transaction.csv, shape = (506691, 393) INFO -> [+] read train_transacti[...]特征工程df = autox.dfs_['train_test'] feature_type = autox.info_['feature_type']['train_test'] id_ = autox.info_['id'] target = autox.info_['target']target_encoding# from autox.feature_engineer import FeatureTargetEncoding # featureTE = FeatureTargetEncoding() # featureTE.fit(df, 'orders_3h_15h', df_feature_type = feature_type, silence_cols = id_, select_all=False) # log(featureTE.get_ops()) # 手动修改配置 # featureCount.set_keys([[],[]]) # FE_te = featureTE.transform(df) # FE_terank特征from autox.feature_engineer import FeatureRank featureRank = FeatureRank() featureRank.fit(df, df_feature_type = feature_type, select_all = False) cnt = 0 for key_ in featureRank.get_ops().keys(): cnt += len(featureRank.get_ops()[key_]) cnt # FE_rank = featureRank.transform(df) # FE_rank.head()统计特征featureStat = FeatureStat() featureStat.fit(df, df_feature_type = feature_type, silence_group_cols = id_ + [target], silence_agg_cols = id_ + [target], select_all = False) featureStat.get_ops().keys() cnt = 0 for key_ in featureStat.get_ops().keys(): cnt += len(featureStat.get_ops()[key_]) cnt # 手动修改配置 # featureGroupby.set_keys() # FE_stat = featureStat.transform(df) # FE_stat.head()count特征featureCount = FeatureCount() featureCount.fit(df, degree=2, df_feature_type = feature_type, silence_cols = id_ + [target], select_all=False) log(featureCount.get_ops()) len(featureCount.get_ops()) # 手动修改配置 # featureCount.set_keys([[],[]]) # FE_count = featureCount.transform(df) # FE_count.head()特征合并from autox.process_data import feature_combination df_list = [df] # df_list = [df, FE_count, FE_stat, FE_rank] FE_all = feature_combination(df_list) FE_all.shapetrain和test数据切分from autox.process_data import train_test_divide train_length = autox.info_['shape_of_train'] train, test = train_test_divide(FE_all, train_length) train.shape, test.shape特征过滤from autox.process_data import feature_filter used_features = feature_filter(train, test, id_, target)100%|██████████| 426/426 [06:26<00:00, 1.10it/s] INFO -> filtered features: ['TransactionID', 'isFraud', 'TransactionDT', 'id_01', 'id_02', 'id_03', 'id_04', 'id_05', 'id_06', 'id_07', 'id_08', 'id_09', 'id_10', 'id_11', 'id_13', 'id_14', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_24', 'id_25', 'id_26', 'id_32']查看最终使用的特征len(used_features) used_features模型训练model_lgb = CrossLgbBiClassifier() model_lgb.fit(train[used_features], train[target], tuning=False)INFO -> (590540, 400)查看模型特征重要性fimp = model_lgb.feature_importances_ fimp模型预测predict_lgb = model_lgb.predict(test[used_features]) predict = predict_lgb预测结果后处理from autox.process_data import clip_label min_ = autox.info_['min_target'] max_ = autox.info_['max_target'] predict = clip_label(predict, min_, max_) min_, max_获取subsub = test[id_] sub[target] = predict sub.index = range(len(sub)) sub.head() sub.shape sub[target].max(), sub[target].min()输出结果tag = "ieee_lgb" path = f'./sub/sub_{tag}.csv' sub.to_csv(path, index = False)Train Neural Networksimport matplotlib.pyplot as plt import numpy as np import pandas as pd import os import PIL import pathlib from sklearn.utils import class_weight import pickle import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.optimizers import SGD, Adam, RMSprop from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, GlobalMaxPooling2D print(tf.__version__) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # %load_ext tensorboard2.3.1 Num GPUs Available: 0Data Load data **data is structured as:** ../data/ dataset/ train/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... test/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... val/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... House_Plants.csv **Define dataset and parameters**data_path = '../data/gsp15_ttv/' class_names = ['Aloe_Vera', 'Asparagus_Fern', 'Baby_Rubber_Plant', 'Boston_Fern', 'Easter_Lily', 'Fiddle_Leaf_Fig', 'Jade_Plant', 'Monstera','Parlor_Palm', 'Peace_Lily', 'Pothos', 'Rubber_Plant', 'Snake_Plant', 'Spider_Plant', 'Umbrella_Tree'] img_width, img_height = 224, 224 batch_size = 256Load datatrain_data_dir = f'{data_path}/train' validation_data_dir = f'{data_path}/test' no_classes = len(class_names) # import training with augmentation at each epoch print('Training:') train_datagen = ImageDataGenerator( rescale=1. / 255, shear_range=0.1, zoom_range=0.2, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, classes=class_names, class_mode='categorical', seed = 2020, shuffle = True) # import validation print('\nValidation:') val_datagen = ImageDataGenerator(rescale=1. / 255) validation_generator = val_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, classes=class_names, class_mode='categorical', seed = 2020, shuffle = True)Training: Found 10453 images belonging to 15 classes. Validation: Found 1321 images belonging to 15 classes.Plot dataplt.figure(figsize=(10, 10)) images, labels = next(train_generator) for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i]) plt.title(class_names[np.argmax(labels[i])]) plt.axis("off")Define class weights for imbalanced data ![Data Imbalance](../data/figures/number_imgs_per_class.png) Using class_weights changes the range of the loss. This may affect the stability of the training depending on the optimizer. Optimizers whose step size is dependent on the magnitude of the gradient, like optimizers. SGD, may fail. The optimizer used here, optimizers. Adam, is unaffected by the scaling change. Also note that because of the weighting, the total losses are not comparable between the two models.def create_weights_dict(train_generator): '''Calculates the number of samples per class and returns a dictionary for passing to .fit()''' n_train = len(train_generator.filenames) number_of_generator_calls = np.ceil(n_train / (1.0 * batch_size)) # 1.0 above is to skip integer division label_list = [] for i in range(0,int(number_of_generator_calls)): label_list.extend(np.array(train_generator[i][1])) label_list = list(map(lambda x: np.argmax(x), label_list)) class_weight_arr = class_weight.compute_class_weight(class_weight='balanced', classes = np.unique(label_list), y = label_list) class_weight_dict = dict(zip(np.arange(len(label_list)), class_weight_arr)) return class_weight_dict class_weight_dict = create_weights_dict(train_generator) class_weight_dict/opt/miniconda3/envs/tf23/lib/python3.8/site-packages/PIL/TiffImagePlugin.py:785: UserWarning: Corrupt EXIF data. Expecting to read 4 bytes but only got 0. warnings.warn(str(msg))Build modeldef get_prelim(): model = Sequential([ layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical", input_shape=(img_height, img_width, 3)), layers.experimental.preprocessing.RandomRotation(0.2), layers.experimental.preprocessing.RandomZoom(0.3), layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)), layers.Conv2D(16, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(32, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(64, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(no_classes), layers.Activation('softmax') ]) return model def get_double_conv(): model = keras.models.Sequential([ layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical", input_shape=(img_height, img_width, 3)), layers.experimental.preprocessing.RandomRotation(0.2), layers.experimental.preprocessing.RandomZoom(0.3), layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)), layers.Conv2D(64, 7, activation='relu', padding='same'), layers.MaxPooling2D(2), layers.Conv2D(128, 3, activation='relu', padding='same'), layers.Conv2D(128, 3, activation='relu', padding='same'), layers.MaxPooling2D(2), layers.Conv2D(256, 3, activation='relu', padding='same'), layers.Conv2D(256, 3, activation='relu', padding='same'), layers.MaxPooling2D(2), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(no_classes, activation='softmax') ]) return model def get_VGG16tl(): resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)) # create the base model from the pre-trained model pretrained_model = VGG16(input_shape=(img_height, img_width, 3), include_top=False, weights='imagenet') # freeze the convolutional base pretrained_model.trainable = False model = tf.keras.Sequential([resize_layer, pretrained_model, Flatten(), Dense(256, activation='relu'), Dropout(0.5), # try 0.5 Dense(256, activation='relu'), Dense(no_classes, activation='softmax')]) return model def get_InceptionV3tl(): resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)) # create the base model from the pre-trained model pretrained_model = InceptionV3(input_shape=(img_height, img_width, 3), include_top=False, weights='imagenet') # freeze the convolutional base pretrained_model.trainable = False model = tf.keras.Sequential([resize_layer, pretrained_model, Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(256, activation='relu'), Dense(no_classes, activation='softmax')]) return model def get_InceptionV3tl_1024(): resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)) # create the base model from the pre-trained model pretrained_model = InceptionV3(input_shape=(img_height, img_width, 3), include_top=False, weights='imagenet') # freeze the convolutional base pretrained_model.trainable = False model = tf.keras.Sequential([resize_layer, pretrained_model, Flatten(), Dense(1024, activation='relu'), Dropout(0.2), Dense(no_classes, activation='softmax')]) return model def get_ResNet50tl(): resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)) # create the base model from the pre-trained model pretrained_model = ResNet50(input_shape=(img_height, img_width, 3), include_top=False, weights='imagenet') # freeze the convolutional base pretrained_model.trainable = False model = tf.keras.Sequential([resize_layer, pretrained_model, Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(256, activation='relu'), Dense(no_classes, activation='softmax')]) return model def get_InceptionResNetV2tl(): resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)) # create the base model from the pre-trained model VGG16 pretrained_model = InceptionResNetV2(input_shape=(img_height, img_width, 3), include_top=False, weights='imagenet') # freeze the convolutional base pretrained_model.trainable = False model = tf.keras.Sequential([resize_layer, pretrained_model, Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(256, activation='relu'), Dense(no_classes, activation='softmax')]) return model def generate_model(model_name): if model_name == 'prelim': model = get_prelim() data_augmentation elif model_name == 'double_conv': model = get_double_conv() elif model_name == 'VGG16': model = get_VGG16tl() elif model_name == 'ResNet50': model = get_ResNet50tl() elif model_name == 'InceptionV3': model = get_InceptionV3tl() elif model_name == 'InceptionV3_1024': model = get_InceptionV3tl_1024() elif model_name == 'InceptionResNetV2': model = get_InceptionResNetV2tl() else: print('please select a valid model') return model model = generate_model('InceptionV3_1024') model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy', 'top_k_categorical_accuracy']) model.summary()Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= rescaling_1 (Rescaling) (None, 224, 224, 3) 0 _________________________________________________________________ inception_v3 (Functional) (None, 5, 5, 2048) 21802784 _________________________________________________________________ flatten_1 (Flatten) (None, 51200) 0 _________________________________________________________________ dense_2 (Dense) (None, 1024) 52429824 _________________________________________________________________ dropout_1 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_3 (Dense) (None, 15) 15375 ======================================================[...]Train modelinitial_epochs=20 history = model.fit(train_generator, steps_per_epoch=len(train_generator.filenames)//batch_size, epochs=initial_epochs, validation_data=validation_generator, class_weight=class_weight_dict) # unfreeze the layers model.trainable = True model.compile(loss = 'categorical_crossentropy', optimizer = keras.optimizers.Adam(1e-5), metrics = ['accuracy', 'top_k_categorical_accuracy']) model.summary() fine_tune_epochs = 100 total_epochs = initial_epochs + fine_tune_epochs history_fine = model.fit(train_generator, steps_per_epoch=len(train_generator.filenames)//batch_size, epochs=total_epochs, validation_data=validation_generator, class_weight=class_weight_dict)Save model/metrics and plotmodel_name = 'InceptionV3_40_200e_GSP1.0' model.save_weights(f'../models/{model_name}_weights.h5') model.save(f'../models/{model_name}_model.h5') acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] t5acc = history.history['top_k_categorical_accuracy'] t5val_acc = history.history['val_top_k_categorical_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] acc += history_fine.history['accuracy'] val_acc += history_fine.history['val_accuracy'] t5acc += history_fine.history['top_k_categorical_accuracy'] t5val_acc += history_fine.history['val_top_k_categorical_accuracy'] loss += history_fine.history['loss'] val_loss += history_fine.history['val_loss'] fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,8), sharex=True) x_plot = np.arange(1, total_epochs+1) ax[0].plot(x_plot, acc[:total_epochs], '+-', label='training') ax[0].plot(x_plot, val_acc[:total_epochs], '+-', label='validation') ax[0].plot(x_plot, t5acc[:total_epochs], '+-', label='top 5 training') ax[0].plot(x_plot, t5val_acc[:total_epochs], '+-', label='top 5 validation') ax[0].legend() ax[0].set_ylabel('accuracy') # ax[0].set_ylim(0.5, 1) ax[0].grid(ls='--', c='C7') ax[0].set_title('accuracy') ax[0].axvline(initial_epochs, c='C7', ls='--') ax[1].plot(x_plot, loss[:total_epochs], '+-', label='training') ax[1].plot(x_plot, val_loss[:total_epochs], '+-', label='validation') ax[1].legend() ax[1].set_ylabel('cross entropy') # ax[1].set_ylim(0, 1) ax[1].grid(ls='--', c='C7') ax[1].set_title('loss') ax[1].set_xlabel('epoch') ax[1].axvline(initial_epochs, c='C7', ls='--') plt.show() plt.savefig(f'../models/{model_name}_graph.svg') plt.savefig(f'../models/{model_name}_graph.png', dpi=400) graph_vals = pd.DataFrame({'acc':acc[:total_epochs], 'val_acc':val_acc[:total_epochs], 'loss':loss[:total_epochs], 'val_loss':val_loss[:total_epochs], 't5':t5acc[:total_epochs], 'val_t5':t5val_acc[:total_epochs]}) graph_vals.to_csv(f'../models/{model_name}_metrics.csv', index=False) val_predictions = model.predict(val_ds, batch_size=BATCH_SIZE) def plot_cm(labels, predictions, p=0.5): cm = confusion_matrix(labels, predictions > p) plt.figure(figsize=(5,5)) sns.heatmap(cm, annot=True, fmt="d")r4t plt.title('Confusion matrix @{:.2f}'.format(p)) plt.ylabel('Actual label') plt.xlabel('Predicted label') plt.savefig('../models/VGG16_70e_1.0.svg') model.save_weights('../models/VGG16_20_100e_1.0.h5') model.save('../models/VGG16_20_100e_1.0.h5') def plot_confusion_matrix(cm, class_names): """ Returns a matplotlib figure containing the plotted confusion matrix. Args: cm (array, shape = [n, n]): a confusion matrix of integer classes class_names (array, shape = [n]): String names of the integer classes """ figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) # Normalize the confusion matrix. cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) # Use white text if squares are dark; otherwise black. threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure # Use the model to predict the values from the validation dataset. test_pred_raw = model.predict(val_ds) test_pred = np.argmax(test_pred_raw, axis=1) # Calculate the confusion matrix. cm = sklearn.metrics.confusion_matrix(test_labels, test_pred) # Log the confusion matrix as an image summary. figure = plot_confusion_matrix(cm, class_names=class_names) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,8), sharex=True) x_vals = np.arange(1, epochs+1) ax[0].plot(x_vals, acc, '+-', label='training') ax[0].plot(x_vals, val_acc, '+-', label='validation') ax[0].legend() ax[0].set_ylabel('accuracy') ax[0].set_ylim(0, 1) ax[0].grid(ls='--', c='C7') ax[0].set_title('accuracy') ax[1].plot(x_vals, loss, '+-', label='training') ax[1].plot(x_vals, val_loss, '+-', label='validation') ax[1].legend() ax[1].set_ylabel('cross entropy') ax[1].set_ylim(0, 3) ax[1].grid(ls='--', c='C7') ax[1].set_title('loss') ax[1].set_xlabel('epoch') plt.show() model.save_weights('../models/.h5') model.save('../models/.h5')Evaluationimport glob pred_path = '../data/pred_16c_only1/' pred_ds = tf.keras.preprocessing.image_dataset_from_directory( pred_path, # labels = [0]*len(glob.glob(f'{pred_path}*')), image_size=(img_height, img_width), batch_size=batch_size ) normalization_layer = layers.experimental.preprocessing.Rescaling(1./255) normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) predictions = model.predict(pred_ds) print(predictions) # Generate arg maxes for predictions classes = np.argmax(predictions, axis = 1) print(classes[0]) print(class_names[classes[0]]) temp = tf.keras.models.load_model('../models/convmod_1.0.h5') temp.summary() dot_img_file = '../models/convmod_1.0.png' tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)List of My Movies## Basic stuff %load_ext autoreload %autoreload from IPython.core.display import display, HTML display(HTML("")) display(HTML("""""")) ## Python Version import sys print("Python: {0}".format(sys.version)) from combine import combine from fsUtils import moveFile from timeUtils import clock, elapsed import datetime as dt start = dt.datetime.now() print("Notebook Last Run Initiated: "+str(start))Combine Movies%load_ext autoreload %autoreload from combine import combine from wikipedia import wikipedia from wikifilm import wikifilm from oscar import oscars from razzies import razzies from rollingstone import rollingstone from BAFTA import BAFTA from SAG import SAG from filmsite import filmsite from films101 import films101 from AACTA import AACTA from flops import flops from canada import Canada from boxofficemojo import boxofficemojo from rottentomatoes import rottentomatoes from ultimatemovierankings import ultimatemovierankings comb = combine() comb.setOscarData(oscars(wikipedia()), 10) comb.setWikiFilmData(wikifilm(), 1) comb.setRazziesData(razzies(), 1) comb.setBAFTAData(BAFTA(), 1) comb.setSAGData(SAG(), 1) comb.setAACTAData(AACTA(), 1) comb.setFilmsiteData(filmsite(), 1) comb.setFilms101Data(films101(), 1) comb.setFlopsData(flops(), 1) comb.setCanadaData(Canada(), 1) comb.setRollingStoneData(rollingstone(), 1) comb.setRottenTomatoesData(rottentomatoes(), 90) comb.setBoxOfficeMojoData(boxofficemojo(), 5e6) comb.setUltimateMovieRankingsData(ultimatemovierankings(), 90) comb.getData() comb.saveCorrections(debug=True) comb.mergeMovies(debug=True)The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload Current Time is Sun Oct 27, 2019 17:07:23 for Last Run Found 23 Years of Rolling Stone Movies Found 98 Years of Wiki Film Movies Found 119 Years of Films101 Movies Found 50 Years of AACTA Movies Found 105 Years of Filmsite Movies Found 67 Years of Ultimate Movie Rankings Movies Found 94 Years of Rotten Tomatoes Movies Found 47 Years of BAFTA Movies Found 32 Years of Flops Movies Found 61 Years of Canada Movies Found 25 Years of SAG Movies Found 38 Years of Razzies Movies Found 84 Years of Oscar Movies Found 39 Years of Box Office Mojo Movies Found Data Between 1900 and 2018Merge My Movies With Combined Moviesfrom mymovies import mymovies mine = mymovies() mine.setCombinedMovies(combine()) _ = clock("Last Run") mine.suggestRenames() mine.findMyMovies() _ = clock("Last Run") ### Stopped after 1985. Do 1986 onward mine.mergeMovies() show = False if show: renames = mine.getMovieRenames() for old,new in renames.items(): print("moveFile(src=\"{0}\", dst=\"{1}\", debug=True)".format(old,new)) mine.searchMyMovies("", 2019, cutoff=0.6) #mine.showCombinedMovieStatus(1987) mine.printMissingMovies(2014) mine.missingMovies printMissingMovies(1995) moveFile(src="/Volumes/Save/Movies/ The Demon Barber Of Fleet Street [2007].mp4", dst="/Volumes/Save/Movies/S: The Demon Barber Of Fleet Street [2007].mp4", debug=True) moveFile(src="/Volumes/Download/Movies/The King Of Kong A Fistful Of Quarters [2007].mp4", dst="/Volumes/Download/Movies/The King of Kong: A Fistful of Quarters [2007].mp4", debug=True) moveFile(src="/Volumes/Seagate/Movies/X The Man With The X Ray Eyes [1963].mp4", dst="/Volumes/Seagate/Movies/X: The Man With The X-Ray Eyes [1963].mp4", debug=True) mine.searchMyMovies("Goldeneye", 1995, cutoff=0.6, num=10) tmp="""AACTA.py filmsite.py razzies.py BAFTA.py flops.py rollingstone.py SAG.py goldenglobes.py rottentomatoes.py amc.py movieDB.py setup.py boxofficemojo.py movieRenames.py ultimatemovierankings.py canada.py movies.py wikifilm.py combine.py mymovies.py wikipedia.py films101.py oscar.py""" z = [y.split() for y in [x for x in tmp.split("\n")]] [x.replace(".py", "") for x in [item for sublist in z for item in sublist]] flat_listDo not forget to click Runtime -> Change Runtime type -> Hardware accelerator ->GPU As before - mount drive, download dataDATA_PREFIX = "/content/gdrive/My Drive/kaggle_2/" import os os.makedirs(DATA_PREFIX) from google.colab import drive drive.mount('/content/gdrive') !mkdir /root/.kaggle !cp /content/gdrive/My\ Drive/kaggle.json /root/.kaggle/ !chmod 600 /root/.kaggle/kaggle.json !ls -l /root/.kaggle !kaggle competitions download --force -c mlimperial2020-2 -p '{DATA_PREFIX}' !unzip '{DATA_PREFIX}/kaggle_train.h5.zip' -d '{DATA_PREFIX}' !chmod +rw '{DATA_PREFIX}/kaggle_train.h5' !rm '{DATA_PREFIX}/kaggle_train.h5.zip' !unzip '{DATA_PREFIX}/kaggle_test.h5.zip' -d '{DATA_PREFIX}' !chmod +rw '{DATA_PREFIX}/kaggle_test.h5' !rm '{DATA_PREFIX}/kaggle_test.h5.zip' !ls '{DATA_PREFIX}' # Create folder to save NN snapshots during training os.makedirs(os.path.join("/content/gdrive/My Drive/kaggle_2", "nn_snapshots"))!Link to challenge! https://www.kaggle.com/t/3814f61079e947cda51a13cb9a7b582a Metric - ROC AUC Your task is to try as many techniques that you have learned this week as possible.The outcome of your work should be a small table with results, i.e Method - parameters tuned with CV - score + features created on top of exiting ones. The table should be accompanied by a small report of your workflow and reasoning. Also, you need to send the code.The archive with the files should be sent to with the topic: **Surname_name_kaggle_2**The total amount of points is 10. You will get additional points based on your final ranking.** 1 Point **Try different layers, such as Dropout, BN, different poolings, convs. Do all of them work? Why?** 1 Point **Try different activations, i.e. relu, sigmoid, tanh etc. Do all of them work? Why?** 1 Point **Try different optimisers, i.e. SGD, Adam, Adamax, rmsprop ... Which you find the best? Which converges faster?** 1 Point **Try different depth of network, different order of layers. What do you observe?** 3 Points **Augment(rotation, jitter, reflection ...) you data using symmetries in the data.** 1 Points **Change optimisation loop/optimiser to set up decay of learning rate by the scheme of your choice, abort trainin by stopping criteria of your choice.** 2 Points **Use pretrained nets / part of nest. Does this technique improve the score?** Bonus **Beat medium baseline - + 2 bonus points. Util functions to preprocess and work with datadef read_data(data, is_train=True, start_ind=0, end_ind=0): layer_hcal = np.expand_dims(data['all_events']['histHCAL'][start_ind : end_ind], -1).astype(np.float32) layer_em = np.expand_dims(data['all_events']['histEM'][start_ind : end_ind], -1).astype(np.float32) layer_track = np.expand_dims(data['all_events']['histtrack'][start_ind : end_ind], -1).astype(np.float32) hit_map = np.concatenate((layer_hcal, layer_em, layer_track), axis=-1).astype(np.float32) hit_map = np.rollaxis(hit_map, 3, 1) hit_map = (hit_map - hit_map.mean(axis=0, keepdims=True)) / hit_map.std(axis=0, keepdims=True) answers = None if is_train: answers = np.expand_dims(data['all_events']['y'][start_ind : end_ind], -1) return hit_map, answers def save_data_in_chunks(data, chunk_size): for index, step in enumerate(range(0, len(data['all_events']['histHCAL']), chunk_size)): X, y = read_data(train, True, step, step + chunk_size) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.9, random_state=42) np.save("X_train_{}".format(index) , X_train) np.save("y_train_{}".format(index) , y_train) np.save("X_val_{}".format(index) , X_val) np.save("y_val_{}".format(index) , y_val) del X, y, X_train, X_val, y_train, y_val gc.collect() print("Done:{}".format(index))import standard packagesimport gc import numpy as np import pickle import os import matplotlib.pyplot as plt %matplotlib inline import scipy from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_splitimport torch and h5 data format parserimport h5py import torchCreate small batches of train data locally(will be deleted after restrat of the session)train = h5py.File(os.path.join(DATA_PREFIX, "kaggle_train.h5"), 'r') save_data_in_chunks(train, 50000)Event exampleX_example = np.load("X_train_0.npy") f, ax = plt.subplots(1,3,figsize=(14,6)) for i in range(3): ax[i].imshow(X_example[100,i,:,:], cmap="Reds") del X_example gc.collect()Load validation data to memory# This variable tells, how many file pieces of validation data we want to consider N_DATA_SPLITS = 1 X_val = np.concatenate([np.load("X_val_{}.npy".format(i)) for i in range(N_DATA_SPLITS)]) y_val = np.concatenate([np.load("y_val_{}.npy".format(i)) for i in range(N_DATA_SPLITS)])The next steps are exactly the same as on Mondayfrom torch import nn import torch.nn.functional as F class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1)This is dummy example of CNN, look what you can change heredevice = torch.device("cuda", 0) model = torch.nn.Sequential() model.add_module("maxpool_1", torch.nn.MaxPool2d(kernel_size=2)) model.add_module('conv_1', nn.Conv2d(3, 32, kernel_size=(5,5), stride=1, padding=0)) model.add_module("maxpool_2", torch.nn.MaxPool2d(kernel_size=2)) model.add_module("relu_1", torch.nn.ReLU()) model.add_module("flat", Flatten()) model.add_module("fc1", torch.nn.Linear(6272, 128)) model.add_module("relu_2", torch.nn.ReLU()) model.add_module("fc2", torch.nn.Linear(128, 1)) model.add_module("sigmoid", torch.nn.Sigmoid()) model.to(device)Training on minibatchesJust like before, we train our model on small random minibatches of data with adaptive optimization method of your choice.# An auxilary function that returns mini-batches for neural network training from tqdm import trange def iterate_minibatches(X, y, batchsize, shuffle=False): indices = np.arange(len(X)) if shuffle: indices = np.random.permutation(indices) for start in trange(0, len(indices), batchsize): ix = indices[start: start + batchsize] yield X[ix], y[ix]Choose you optimiseropt = torch.optim.SGD(model.parameters(), lr=0.01)And set up batch_size and number of epochsimport time #from pandas import ewma from IPython import display num_epochs = 2 #amount of passes through the data batch_size = 1024 #number of samples processed at each function call auc_history = [] number_of_chunks = 1 #number of initial data splits to process best_score = 0 best_epoch = 0 for epoch in range(num_epochs): # In each epoch, we do a full pass over the training data: train_err = train_acc = 0 train_batches = 0 start_time = time.time() for step in range(number_of_chunks): X_train, y_train = np.load("X_train_{}.npy".format(step)), np.load("y_train_{}.npy".format(step)) train_batches += np.ceil(len(X_train) / batch_size).astype(int) # This is you have see already - traning loop model.train(True) # enable dropout / batch_norm training behavior for X_batch, y_batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=True): X_batch = torch.FloatTensor(X_batch).to(device) y_batch = torch.FloatTensor(y_batch).to(device) y_predicted = model(X_batch) loss = torch.nn.functional.binary_cross_entropy(y_predicted, y_batch).mean() loss.backward() opt.step() opt.zero_grad() train_err += loss.data.cpu().numpy() train_acc += torch.eq(torch.round(y_predicted), y_batch).data.cpu().numpy().mean() # And a full pass over the validation data: y_pred = [] model.train(False) for X_batch, y_batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False): X_batch = torch.FloatTensor(X_batch).to(device) y_pred.extend(model(X_batch).data.cpu().numpy()) y_pred = np.asarray(y_pred) # Save the metrics values val_acc = accuracy_score(y_val, y_pred > 0.5) val_roc_auc = roc_auc_score(y_val, y_pred) auc_history.append(val_roc_auc) # Visualize display.clear_output(wait=True) plt.figure(figsize=(8, 6)) plt.title("Validation AUC") plt.xlabel("#iteration") plt.ylabel("AUC") plt.plot(auc_history, 'b',label='val auc') plt.legend(loc='best') plt.grid() plt.show() # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration):\t\t{:.6f}".format(train_err / train_batches)) print(" train accuracy:\t\t{:.2f} %".format(train_acc / train_batches * 100)) print(" validation accuracy:\t\t{:.2f} %".format(val_acc * 100)) print(" validation roc_auc:\t\t{:.2f} %".format(val_roc_auc * 100)) if auc_history[-1] > best_score: best_score = auc_history[-1] best_epoch = epoch with open(os.path.join("/content/gdrive/My Drive/kaggle_2", "nn_snapshots", "best.pt"), 'wb') as f: torch.save(model, f)Read test data, feed it to neural network, and save the output in kaggle fromat.chunk_size = 10000 test = h5py.File(os.path.join(DATA_PREFIX, "kaggle_test.h5"), 'r') y_ans = [] for index, step in enumerate(range(0, len(test['all_events']['histHCAL']), chunk_size)): X, _ = read_data(test, False, step, step + chunk_size) y_ans.extend(model(torch.FloatTensor(X).to(device)).detach().cpu().numpy()) del X gc.collect() print("Done:{}".format(index)) y_ans = np.array(y_ans)Saving you results to file.import pandas as pd from IPython.display import FileLink def save_results(filename, y_ans): answer_dataframe = pd.DataFrame(columns=["ID", "ans"]) answer_dataframe['ID'] = range(0,len(y_ans)) answer_dataframe['ans'] = y_ans answer_dataframe.to_csv(os.path.join(DATA_PREFIX, '{}'.format(filename)), index=False) return FileLink(os.path.join(DATA_PREFIX, '{}'.format(filename))) save_results("baseline.csv", y_ans) !kaggle competitions submit -c mlimperial2020-2 -f "{DATA_PREFIX}/baseline.csv" -m "Message"Exploratory Data Analysis (EDA): understand the input dataimport boto3 import configparser config = configparser.ConfigParser() config.read('dwh.cfg') # AWS KEY = config.get('AWS','KEY') SECRET = config.get('AWS','SECRET') s3c = boto3.client('s3', region_name='eu-west-2', aws_access_key_id=KEY, aws_secret_access_key=SECRET ) s3r = boto3.resource('s3', region_name='eu-west-2', aws_access_key_id=KEY, aws_secret_access_key=SECRET ) udacity_bucket = s3r.Bucket("udacity-dend")Look into input files for the `song_data`$\rightarrow$ Contains JSON files with song metadata (artist_id, artist_latitude, ..., year)for i, obj in enumerate(udacity_bucket.objects.filter(Prefix='song_data')): print(obj) if i > 3: break # print the content of the first file # boto3 s3 resource has no get_object print(s3c.get_object(Bucket='udacity-dend', Key='song_data/A/')\ ['Body'].read().decode('utf-8')[:500]){"artist_id":"ARJNIUY12298900C91","artist_latitude":null,"artist_location":"","artist_longitude":null,"artist_name":"","duration":213.9424,"num_songs":1,"song_id":"SOBLFFE12AF72AA5BA","title":"Scream","year":2009}Look into input files for the `log_data`$\rightarrow$ Contain JSON files with user activity, ie songs played (with artist, authorisation status, ...., user_id)# list Log Data # boto3 s3 client has no attribute 'Bucket' song_data_bucket = s3r.Bucket("udacity-dend") for i, obj in enumerate(udacity_bucket.objects.filter(Prefix='log_data')): print(obj) if i > 5: break # print the content of the first file print(s3c.get_object(Bucket='udacity-dend', Key='log_data/2018/11/2018-11-01-events.json')\ ['Body'].read().decode('utf-8')[:500]){"artist":null,"auth":"Logged In","firstName":"Walter","gender":"M","itemInSession":0,"lastName":"Frye","length":null,"level":"free","location":"San Francisco-Oakland-Hayward, CA","method":"GET","page":"Home","registration":1540919166796.0,"sessionId":38,"song":null,"status":200,"ts":1541105830796,"userAgent":"\"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/36.0.1985.143 Safari\/537.36\"","userId":"39"} {"artist":null,"auth":"Logged In","firstNam$\rightarrow$ error in load of `staging_events`:1. check in AWS Redshift Query editor `select * from stl_load_errors`2. error "Invalid timestamp format or value [YYYY-MM-DD HH24:MI:SS]"3. From sample data above: "ts":1541105830796 is not a timestamp -> might use BIGINT -> or adapt LOAD SQL -> chose this, compare sql_queries.py Look into input file `log_json_path.json`$\rightarrow$ Contains column headers# log_json_path.json # inspired by https://www.slsmk.com/use-boto3-to-open-an-aws-s3-file-directly/ print(s3c.get_object(Bucket='udacity-dend', Key='log_json_path.json')['Body'].read().decode('utf-8')) s3cGet an impression of the song_data size#inspired by https://newbedev.com/how-to-find-size-of-a-folder-inside-an-s3-bucket: top_level_folders = dict() num_files = 0 for key in s3c.list_objects(Bucket='udacity-dend')['Contents']: folder = key['Key'].split('/')[0] if folder == 'song-data': if num_files < 5: print("Key %s in folder %s. %d bytes" % (key['Key'], folder, key['Size'])) num_files += 1 print(num_files)Key song-data/ in folder song-data. 0 bytes Key song-data/A/A/A/TRAAAAK128F9318786.json in folder song-data. 244 bytes Key song-data/A/A/A/TRAAAAV128F421A322.json in folder song-data. 303 bytes Key song-data/A/A/A/TRAAABD128F429CF47.json in folder song-data. 268 bytes Key song-data/A/A/A/TRAAACN128F9355673.json in folder song-data. 262 bytes 905Develop the print statements in etl.py# Develop print statement in etl.py from sql_queries import copy_table_queries, insert_table_queries for query in insert_table_queries: print(query.strip().split(' ')[2])songplays users songs artists time1. Statiscal learning: The setting and the estimator object 1.1 Datasets2D arrays, .shape(n_samplpes, n_features) 1.1.1 load_datasetsfrom sklearn import datasets iris = datasets.load_iris() data = iris.data data.shape, iris.feature_names, iris.target_names digits = datasets.load_digits() digits.images.shape %matplotlib inline import matplotlib.pyplot as plt plt.imshow(digits.images[-1], cmap=plt.cm.gray_r) # to use this dataset with the scikit, we transform each 8*8 image into a feature vector of length 64 data = digits.images.reshape((digits.images.shape[0], -1)) data.shape, digits.images1.1.2 dataset split: train dataset & test datasetfrom sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=20 )1.1.3 Preprocessing* **Normalizing data** For many estimators, including the SVMs, having datasets with unit standard deviation for each feature is important to get good prediction. * based on the mean and std * Scaling features to a rangeimport sklearn.preprocessing as sk_preprocessing # based on the mean and std scaler = sk_preprocessing.StandardScaler().fit(iris.data) new_x = scaler.transform(iris.data) new_x[:5] # normalization to a range scaler = sk_preprocessing.MinMaxScaler(feature_range=(0, 1)).fit(iris.data) new_x = scaler.transform(iris.data) new_x[:4]* **regularization** Regulazation is set by the C parameter in models: a small value for C means more regularization; a large value for C means less regularization.new_x = sk_preprocessing.normalize(iris.data, norm='l2') new_x[:4]1.2 Estimator's objects**Fitting data**: An estimator is any object that learns from data; it may be a classification, regression or clustering algorithm or a transformer that extracts/filters useful features from raw data.```>>>estimator.fit(data)```**Estimator parameters**: All the paramaters of an estimator can be set when it instantiated or by modifying the corresponding attribute:```>>>estimator = Estimator(param1=1, param2=2)>>>eatimator.param1```**Estimated parameters**: All the estimated parameters are attributes of the estimator object ending an underscore```>>>estimator.estimated_param_``` 2. Supervised learning: Predicting an output variable from high-dimensional observations* **the problem solved in supervised learning** : Supervised learning consists in learning the link between two datasets: the observed data x and an external variable y that we are trying to predict, usually called "target" or "labels". Most often, y is a 1D array of length n_samples. All supervised estimators in scikit-learning implements a fit(x,y) method to fit the model and a predict(x) method that, given unlabeled observations x, returns the predict labels y. * classification & regression 2.1 Nearest neighbor and the curse of dimensionality* **Classifying irises**import numpy as np from sklearn import datasets iris = datasets.load_iris() iris_x = iris.data iris_y = iris.target np.unique(iris_y)2.1.1 K-Nearest neighbors classifier KNN* **Training set and testing set**While experimenting with any learning algotithm, it is important not to test the prediction of an estimator on the data used to fit the estimator as this would not be evaluating the performance of the estimator on new data. This is why datasets are often split into *train* and *test* data.```sklearn.model_test.train_test_split()```## Split iris data in train and test data # A random permutation, to split the data randomly np.random.seed(0) indices = np.random.permutation(len(iris_x)) iris_x_train = iris_x[indices[:-10]] iris_y_train = iris_y[indices[:-10]] iris_x_test = iris_x[indices[-10:]] iris_y_test = iris_y[indices[-10:]] # Create and fit a nearest-neighbor classifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(iris_x_train, iris_y_train) knn.predict(iris_x_test), iris_y_test knn.score(iris_x_test, iris_y_test)2.1.2 The curse of dimensionalityIf the number of features is $p$, you now require $n\sim1/d^p$. As $p$ becomes large, the number of training points required for a good estimator grows exponentially. This is called the curse of dimensionality and is a core problem that machine learning addresses. 2.2 Linear model: from regression to sparsity* **Diabetes dataset** 2.2.1 Linear regressionLinearRegression fits a linear model to the data set by adjusting a set of parameters in order to make the sum of the squared residuals of the model as small as possible.Linear model: $ y = X \beta + \varepsilon$* X: data* $y$: target variable* $\beta$: Coeffcients* $\varepsilon$: Observation noisefrom sklearn import linear_model diabetes = datasets.load_diabetes() diabetes_x_train = diabetes.data[:-20] diabetes_y_train = diabetes.target[:-20] diabetes_x_test = diabetes.data[-20:] diabetes_y_test = diabetes.target[-20:] regr = linear_model.LinearRegression() regr.fit(diabetes_x_train, diabetes_y_train) regr.coef_ # The mean square error np.mean((regr.predict(diabetes_x_test)-diabetes_y_test)**2) # Explained variance score: 1 is perfect prediction # and 0 means that there is no linear relationship between x and y. R^2 regr.score(diabetes_x_test, diabetes_y_test)2.2.2 ShrinkageIf there are few data points per dimension, noise in the observations induces high variance.x = np.c_[.5, 1].T y = [5, 1] test = np.c_[0, 2].T regr = linear_model.LinearRegression() import matplotlib.pyplot as plt plt.figure() np.random.seed(0) for _ in range(6): this_x = .1*np.random.normal(size=(2, 1)) + x regr.fit(this_x, y) plt.plot(test, regr.predict(test)) plt.scatter(this_x, y, s=3)A solution in high-dimensional statistical learning is to *shrink* the regression coefficients to zero: any two randomly chosen set of observations are likely to be uncorrelated. This is called **Ridge regression**regr = linear_model.Ridge(alpha=.1) plt.figure() np.random.seed(0) for _ in range(6): this_x = 0.1*np.random.normal(size=(2,1)) + x regr.fit(this_x, y) plt.plot(test, regr.predict(test)) plt.scatter(this_x, y, s=3)**bias/variance tradeoff**: the larger the ridge alpha parameter, the higher the bias and the lower the variance# choose alphs to minimize left out error alphas = np.logspace(-4, -1, 6) alphas from __future__ import print_function print([regr.set_params(alpha=alpha).fit(diabetes_x_train, diabetes_y_train).score(diabetes_x_test, diabetes_y_test) for alpha in alphas])[0.5851110683883531, 0.5852073015444677, 0.5854677540698492, 0.5855512036503916, 0.5830717085554161, 0.570589994372801]**Note**: Capturing in the fitted parameters noise that prevents the model to generalize to new data is called overfitting. The bias introduced by the ridge regression is called a regulation. 2.2.3 SparsityTo improve the conditioning of the problem(i.e. mitigating the The curse of dimensionality), it would be interesting to select only the informative features and set non-informative ones. Ridge regression will decrease their contribution, but not set them to zero. Another penalization approach, called Lasso(least absolute shrinkage and selection operator), can set some coefficients to zero. Such methods are called **sparse method** and sparsity can be seen as an application of **Occam's razor**: prefer simpler models.regr = linear_model.Lasso() scores = [regr.set_params(alpha=alpha).fit(diabetes_x_train, diabetes_y_train).score(diabetes_x_test, diabetes_y_test) for alpha in alphas] best_alpha = alphas[scores.index(max(scores))] regr.alpha = best_alpha regr.fit(diabetes_x_train, diabetes_y_train) regr.coef_* Different algorithm for the same problemDifferent algorithm can be used to solve the same problem. For instance, the Lasso object in scikit-learn solves the lasso regression problem using a *coordinate descent* method, this is efficient on large dataset. However, the LassoLars object using the LARS algorithm is very efficient for problems in which the weight vector estimator is very sparse(i.e. problems with very few observations). 2.2.4 ClassificationFor classification, as in the labeling iris task, linear regression is not the right approach as it will give too much weight to data far from the dicision frontier. A linear approach is to fit a *sigmoid* function or *Logistic* function:$$ y = sigmoid(X\beta - offset) + \epsilon=\frac{1}{1 + exp(-X\beta + offset)} + \epsilon$$logistic = linear_model.LogisticRegression(C=1e5) logistic.fit(iris_x_train, iris_y_train)* **Multiclass classification**If you have several classes to predict, an option often used is to fit one-versus-all classifiers and them use a voting heuristic for the final desicion.* **Shrinkage and sparsity with logistic regression**The C parameter controls the amount of regularization in the LogisticRegression object: a large value for C results in less regularization. Penalty="l2" gives Shrinkage(i.e. non-sparse coefficients), while penalty="l1" gives Sparsity.from sklearn import neighbors, linear_model from sklearn.model_selection import train_test_split digits_x = digits.data digits_y = digits.target digits_x_train, digits_x_test, digits_y_train, digits_y_test = train_test_split(digits_x, digits_y, test_size=0.1, random_state=20) knn1 =neighbors.KNeighborsClassifier() knn1.fit(digits_x_train, digits_y_train) logstic1 = linear_model.LogisticRegression(C=1e5) logstic1.fit(digits_x_train, digits_y_train) knn1.score(digits_x_test, digits_y_test), logstic1.score(digits_x_test, digits_y_test)2.3 Support vector machines (SVMs) 2.3.1 Linear SVMsSupport Vector Machine belong to the discriminant model family: they try to find a combination of samples to build a plane maximizing the margin between the two classes. Regularization is set by the C parameter: a small value for C means the margin is calculated using many or all of the observations around the separating line(more regularization); a large value for C means the margin is calculated on observations close to the separating line(less regularization).|Unregularized SVM |Regularized SVM (default) ||:------|:------||| |SVM can be used in regression-SVR(Support Vector Regression), or in classification-SVC(Support Vector Classification).def make_meshgrid(x, y, h=0.02): """Create a mesh of points to plot in Parameters ---------- x: data to base x-axis meshgrid on y: data to base y-axis meshgrid on h: stepsize for meshgrid, optional Returns: ---------- xx, yy: ndarray """ x_min, x_max = x.min() - 1, x.max() + 1 y_min, y_max = y.min() - 1, y.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return xx, yy def plot_contours(ax, clf, xx, yy, **params): """Plot the decision boundaries for a classifier. Parameters: ----------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out from sklearn import svm X = iris.data[:, :2] y = iris.target C = 1.0 # We do not scale our data since we want to plot the support vectors models = (svm.SVC(kernel='linear', C=C), svm.LinearSVC(C=C), svm.SVC(kernel='rbf', gamma=0.7, C=C), svm.SVC(kernel='poly', degree=3, C=C)) models = (clf.fit(X, y) for clf in models) # title for the plots titles = ('SVM with linar kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial(degree 3) kernel') # Set-up 2*2 grid for plotting fig, sub = plt.subplots(2,2) plt.subplots_adjust(wspace=0.4, hspace=0.4) X0, X1 = X[:, 0], X[:,1] xx, yy = make_meshgrid(X0, X1) for clf, title, ax in zip(models, titles, sub.flatten()): plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8) ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xlabel('Sepal length') ax.set_ylabel('Sepal length') ax.set_xticks(()) ax.set_yticks(()) ax.set_title(title) plt.show()2.3.2 Using kernels Classes are not always linearly separable in feature space. The solution is to build a decision function that is not linear but may be polynomial instead. | Linear kernel | Polynomial kernel | RBF kernel(Radial Basis Function) |-|:-:|:-:|| | | ||>>>svc = svm.SVC(kernel='linear')|>>>svc=svm.SVC(kernel='poly',degree=3)>>>degree:polynomial degree|>>>svc=svm.SVC(kernel='rbf')>>>gamma:inverse of size of radial kernel| 3. Model selection: choosing estimators and their parameters 3.1 Score, and cross-validated scoresScore--Bigger is better.from sklearn import datasets, svm import numpy as np digits = datasets.load_digits() X_digits = digits.data y_digits = digits.target svc = svm.SVC(C=1, kernel='linear') svc.fit(X_digits[:-100],y_digits[:-100]).score(X_digits[-100:], y_digits[-100:]) x_folds = np.array_split(X_digits, 3) y_folds = np.array_split(y_digits, 3) x_folds[0].shape, X_digits.shape scores = [] for k in range(3): X_train = list(x_folds) x_test = X_train.pop(k) x_train = np.concatenate(X_train) y_train = list(y_folds) y_test = y_train.pop(k) y_train = np.concatenate(y_train) scores.append(svc.fit(x_train, y_train).score(x_test, y_test)) scores3.2 Cross-validation generatorsA collection of classes can be used to generate lists of train/test indices for popular cross-validation strategies. *KFold().split* method & *cross_val_score*from sklearn.model_selection import KFold, cross_val_score X = ['a', 'a', 'b', 'c', 'c', 'c'] k_fold = KFold(n_splits=3) for train_indices, test_indices in k_fold.split(X): print("Train: %s | test: %s" %(train_indices, test_indices)) [ svc.fit(digits_x[train], digits_y[train]).score(digits_x[test], digits_y[test]) for train,test in k_fold.split(digits_x)] cross_val_score(svc, digits_x, digits_y, cv=k_fold, n_jobs=1) # specify an alternative scoring method cross_val_score(svc, digits_x, digits_y, cv=k_fold, scoring='precision_macro')Cross-validation generators* **KFold(n_splits, shuffle, random_state)**: splits it into K folds, trains on K-1 and then tests on the left-out.* **StratifiedKFold(n_splits, shuffle, random_state)**: Same as K-Fold but preserves the class distribution within each fold.* **GroupKFold(n_splits)**: Ensure that the same group is not in both testing and training sets.* **ShuffleSplit(n_splits, test_size, train_size, random_state)**: Generates train/test indices based on random permutaion.* **StratifiedShuffleSplit**: Same as shuffle split but preserves the class distribution within each iteration.* **GroupShuffleSplit**: Ensures that the same group is not in both testing and training sets.* **LeaveOneGroupOut()**: Takes a group array to group observations.* **LeavePGroupOut(n_groups)**: Leave P groups out.* **LeaveOneOut()**: Leave one observation out.* **LeavePOut(P)**: Leave P observations out.* **PredefinedSplit**: Generates train/test indices based on predefined splits.# generates train/test indices based on random permutation from sklearn.model_selection import ShuffleSplit ss = ShuffleSplit(n_splits=3, test_size=0.5) for train,test in ss.split(X): print(train,test) ## exercise svc = svm.SVC(kernel='linear') C_s = np.logspace(-10, 0, 10) scores = [] scores_std = [] for c in C_s: svc.C = c score = cross_val_score(svc, digits_x, digits_y, cv=ss, n_jobs=1) scores.append(np.mean(score)) scores_std.append(np.std(score)) scores plt.figure(1, figsize=(4, 3)) plt.clf() plt.semilogx(C_s, scores) plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--') plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--') locs, labels = plt.yticks() plt.yticks(locs, list(map(lambda x: "%g" %x, locs))) plt.ylabel("CV scores") plt.xlabel("Parmeter C") plt.ylim(0, 1.1) plt.show() locs3.3 Grid-search and cross-validation estimators 3.3.1 Grid-searchscikit-learn provides an object that, given data, computes the score during the fit of an estimator on a parameter grid and chooses the parameters to maximize the cross-validation score. This object takes an estimator during hte construction and exposes an estimator API:from sklearn.model_selection import GridSearchCV, cross_val_score Cs = np.logspace(-6, -1, 10) clf = GridSearchCV(estimator=svc, param_grid=dict(C=Cs), n_jobs=-1) clf.fit(digits_x[:1000], digits_y[:1000]) clf.best_score_ clf.best_estimator_.C clf.score(digits_x[1000:], digits_y[1000:]) cross_val_score(clf, digits_x, digits_y)By default, the GridSearchCV uses a 3-fold cross-validation. However, if it detects that a classifier is passed, rather than a regressor, it uses a stratified 3-fold.Two cross-validation loops are performed in parallel: one by the **GridSearchCV** estimator to set *gamma* and the other one by **cross_val_score** to measure the prediction performance of the estimator. The resulting scores are unbiased estimates of the prediction score on new data. 3.3.2 Cross-validated estimatorsCross-validation to set a parameter can be done more efficiently on an algorithm-by-algorithm basis. This is why, for certain estimators, scikit-learn exposes **Cross-validation: evaluating estimator performance** estimators that set their parameter automatically by cross-validation:from sklearn import linear_model, datasets lasso = linear_model.LassoCV() diabetes = datasets.load_diabetes() X_diabetes = diabetes.data y_diabetes = diabetes.target lasso.fit(X_diabetes, y_diabetes) lasso.alpha_ from sklearn.linear_model import LassoCV, Lasso from sklearn.model_selection import KFold, GridSearchCV # 3-fold kfold split the dataset k_fold = KFold(n_splits=3) clf1 = LassoCV(cv=k_fold) clf1.fit(X_diabetes, y_diabetes) clf1.score(X_diabetes[200:], y_diabetes[200:]) cross_val_score(clf1, X_diabetes, y_diabetes) clf1.cv lasso = Lasso() alphas = np.logspace(-6,-1,10) clf2 = GridSearchCV(estimator=lasso, param_grid=dict(alpha=alphas),n_jobs=1) clf2.fit(digits_x[:1000], digits_y[:1000]) clf2.cv_results_ clf2.best_score_ alphas = np.logspace(-4,-0.5,30) lasso = Lasso(random_state=0) n_folds = 3 clf1 = GridSearchCV(estimator=lasso, param_grid=dict(alpha=alphas), cv=n_folds, refit=False) clf1.fit(X_diabetes[:150], y_diabetes[:150]) scores = clf1.cv_results_['mean_test_score'] scores_std = clf1.cv_results_['std_test_score'] plt.figure().set_size_inches(8,6) plt.semilogx(alphas, scores) std_error = scores_std/ np.sqrt(n_folds) plt.semilogx(alphas, scores+std_error, 'b--') plt.semilogx(alphas, scores-std_error, 'r--') plt.fill_between(alphas, scores+std_error, scores-std_error, alpha=0.3) plt.xlabel('Alpha') plt.ylabel('Score +/- std_error') plt.axhline(np.max(scores), linestyle='--', color='.5') #plt.axvline(x=0.01, linestyle='--', color='0.5') plt.xlim([alphas[0], alphas[-1]]) ## LassoCV lasso_cv = LassoCV(alphas=alphas, random_state=0) k_fold = KFold(3) for k,(train, test) in enumerate(k_fold.split(digits_x, digits_y)): lasso_cv.fit(digits_x[train], digits_y[train]) print('Fold {0}, cv-score: {1:0.5f}, cv-alpha: {2:0.5f}'.format(k,lasso_cv.alpha_, lasso_cv.score(digits_x[test], digits_y[test])))Fold 0, cv-score: 0.07880, cv-alpha: 0.51473 Fold 1, cv-score: 0.13738, cv-alpha: 0.56136 Fold 2, cv-score: 0.18139, cv-alpha: 0.449104. Unsupervised learning: seeking representations of the data 4.1 Clustering: grouping observations together* ! **The problem solved in clustering**Given the iris dataset, if we knew that there were 3 types of iris, but did not have access to a taxonomist to label them: we could try a **clustering task**: split the observations into well-separated group called *clusters*. 4.1.1 K-means clusteringthe simplest clustering algorithmfrom sklearn import cluster, datasets from mpl_toolkits.mplot3d import Axes3D iris = datasets.load_iris() X_iris = iris.data y_iris = iris.target k_means = cluster.KMeans(n_clusters=3) k_means.fit(X_iris) print(k_means.labels_[::10]) labels = k_means.labels_ fig = plt.figure(figsize=(4,3)) ax = Axes3D(fig, rect=[0,0,0.95,1], elev=48, azim=134) ax.scatter(X_iris[:,3], X_iris[:,0], X_iris[:,2], c=labels.astype(np.float), edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') ax.set_title("k_means_iris_3") ax.dist = 12 import scipy as sp try: face = sp.face(gray=True) except AttributeError: from scipy import misc face = misc.face(gray=True) X = face.reshape((-1,1)) k_means = cluster.KMeans(n_clusters=5, n_init=1) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ face_compressed = np.choose(labels, values) face_compressed.shape = face.shape vmin = face.min() vmax = face.max() plt.figure(1, figsize=(3, 2.2)) plt.imshow(face, cmap=plt.cm.gray, vmin=vmin, vmax=256)4.1.2 Hierarchical agglomerative clustering: WardA **Hierarchical clustering** method is a type of cluster analysis that aims to build a hierarchy of clusters. In general, the various approaches of this technique are either:* **Agglomerative**: --bottom-up approaches: each observation starts in its own cluster, and clusters are iteratively merged in such a way to minimize a linkage criterion. This approach is particularly interesting when the clusters of interest are mode of only a few observations. When the number of clusters is large, it is much more computationally efficient than k-means.* **Divisive**: --top-down approaches: all observations start in one cluster, which is iteratively split as one moves down the hierarchy. For estimating large numbers of clusters, this approach is both slow (due to all observations starting as one cluster, which it splits recursively) and statistically ill-posed. Connectivity-constrained clusteringWith agglomerative clustering, it is possible to specify which samples can be clustered together by giving a connectivity graph. Graphs in the scikit are represented by their adjacency matrix. Often, a sparse matrix is used. This can be useful, for instrance, to retrieve connected regions (sometimes also referred to as connected components) when clustering an image:import matplotlib.pyplot as plt from sklearn.feature_extraction.image import grid_to_graph from sklearn.cluster import AgglomerativeClustering # Generate data try: # Scipy >= 0.16 have face in misc from scipy.misc import face face = face(gray=True) except ImportError: face = sp.face(gray=True) # Resize it to 10% of the original size to speed up the processing face = sp.misc.imresize(face, 0.10) / 255 X = np.reshape(face, (-1, 1)) # Define the structure A of the data. Pixels connected to their neighbors connectivity = grid_to_graph(*face.shape)/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py:9: DeprecationWarning: `imresize` is deprecated! `imresize` is deprecated in SciPy 1.0.0, and will be removed in 1.2.0. Use ``skimage.transform.resize`` instead. if __name__ == '__main__':Feature agglomerationWe have seem that sparsity could be used to mitigate the curse of dimensionality, i.e. an insufficient amount of observations compared to the number of features. Another approach is to merge together similar features: **feature agglomeration**. This approach can be implemented by clustering in the feature direction, in other words clustering th transposed data.images = digits.images X = np.reshape(images, (len(images), -1)) connectivity = grid_to_graph(*images[0].shape) agglo = cluster.FeatureAgglomeration(connectivity=connectivity, n_clusters=32) agglo.fit(X) X_reduced = agglo.transform(X) X_approx = agglo.inverse_transform(X_reduced) images_approx = np.reshape(X_approx, images.shape) #images_approx X_approx = agglo.inverse_transform(X_reduced) images_approx = np.reshape(X_approx, images.shape)transform and inverse_transform methodssome estimators expose a *transform* method, for instance to reduce the dimensionality of the dataset. 4.2 Decompositions: from a signal to componects and loadings* **Componects and loadings** If X is our multivariate data, then the problem that we are trying to solve is to rewrite it on a different observational basis: we want to learn loadings L and a set of components C such that X = L C. Different criteria exist to choose the components. 4.2.1 Principal component analysis: PCAPrincipal component analysis(PCA) selects the successive components that explain the maximum variance in the signal. The point cloud spanned by the observations above is very flat in one direction: one of the three univeriate features can almost be exactly computed using the other two. PCA finds the directions in which the data is not *flat*.When used to *transform* data, PCA can reduce the dimensionality of the data by projecting on a principal subspace.# Create a signal with only a useful dimensions x1 = np.random.normal(size=100) x2 = np.random.normal(size=100) x3 = x1 + x2 X = np.c_[x1, x2, x3] from sklearn import decomposition pca = decomposition.PCA() pca.fit(X) pca.explained_variance_ # As we can see, only the 2 first componects are useful pca.n_components = 2 X_reduced = pca.fit_transform(X) X_reduced.shape4.2.2 Independent Component Analysis: ICAIndependent component analysis(ICA) selects components so that the distribution of their loadings carries a maximum amount of independent information. It is able to recover **non-Gaussian** independent signals:# Generate sample data from scipy import signal time = np.linspace(0, 10, 2000) s1 = np.sin(2 * time) np.random.seed(0) n_samples = 2000 time = np.linspace(0, 8, n_samples) s1 = np.sin(2*time) s2 = np.sign(np.sin(3*time)) s3 = signal.sawtooth(2*np.pi*time) # Signal 3: saw tooth signal S = np.c_[s1, s2, s3] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # Standardize data # Mix data A = np.array([[1, 1, 1], [0.5, 2, 1],[1.5, 1, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations # Compute ICA ica = decomposition.FastICA(n_components=3) S_ = ica.fit_transform(X) # Reconstruct signals A_ = ica.mixing_ # get estimated mixing matrix # We can prove that the ICA model applies by reverting the unmixing np.allclose(X, np.dot(S_, A_.T) + ica.mean_) # For comparison, compute PCA pca = decomposition.PCA(n_components=3) H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components # plot results plt.figure() models = [X, S, S_, H] names = ['Observations(mixed signal)', 'True Sources', 'ICA recovered signals', 'PCA recovered signals'] colors = ['red', 'steelblue', 'orange'] for ii, (model, name) in enumerate(zip(models, names), 1): plt.subplot(4,1,ii) plt.title(name) for sig, color in zip(model.T, colors): plt.plot(sig, color=color) plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46) plt.show()5 Putting it all together 5.1 PipeliningWe have seen that some estimators can transform data and that some estimators can predict variables. We can also create combined estimators:import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, decomposition, datasets from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV logistic = linear_model.LogisticRegression() pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)]) X_digits = digits.data Y_digits = digits.target # plot the PCA spectrum pca.fit(X_digits) plt.figure(1, figsize=(4, 3)) plt.clf() plt.axes([0.2, 0.2, 0.7, 0.7]) plt.plot(pca.explained_variance_, linewidth=2) plt.axis('tight') plt.xlabel('n_componects') plt.ylabel('explained_variance_') # Prediction n_components = [20, 40, 64] Cs = np.logspace(-4, 4, 3) # parameters of pipelines can be set using '__' separated parameter names: estimator = GridSearchCV(pipe, dict(pca__n_components=n_components, logistic__C=Cs)) estimator.fit(X_digits, y_digits) plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen') plt.legend(prop=dict(size=12)) plt.show()5.2 Face recognition with eigenfacesThe dataset used in this example is a preprocessed except of the 'Labeled Faces in the Wild', also known as LFW:""" ======================================= Faces recognition example using eigenfaces and SVMs ======================================= """ from __future__ import print_function from time import time import logging import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.datasets import fetch_lfw_people from sklearn.metrics import classification_report, confusion_matrix from sklearn.decomposition import PCA from sklearn.svm import SVC print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') ## Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape # for machinee learning we use the 2 data directly(as relative pixel positions info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print('Total dataset size:') print('n_samples: %d' % n_samples) print('n_features: %d' % n_features) print('n_classes: %d' % n_classes) # Split into a training set and a test set using a stratified k fold # split into a training and testing set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) # Compute a PCA(eigenfaces) on the face dataset(treated as unlabeled dataset): # unsupervised feature extraction / dimensionality reduction n_components = 150 print('Extracing the top %d eigenfaces from %d faces' %(n_components, X_train.shape[0])) t0 = time() pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train) print('Done in %0.3fs' %(time()-t0)) eigenfaces = pca.components_.reshape((n_components, h, w)) print('Projecting the input data on the eigenfaces orthonormal basis') t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print('Done in %0.3f' %(time()-t0)) ## Train a SVM classification model print('Fitting the classifier to the training set') t0 = time() param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma':[0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]} clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) clf = clf.fit(X_train_pca, y_train) print('Done in %0.3fs' %(time()-t0)) print('Best estimator found by grid search:') print(clf.best_estimator_) # Quantitative evaluation of the model quality on the test set print("Predicting people's names on the test set") t0 = time() y_pred = clf.predict(X_test_pca) print('Done in %0.3fs' %(time()-t0)) print(classification_report(y_test, y_pred, target_names=target_names)) print(confusion_matrix(y_test, y_pred, labels=range(n_classes))) # Qualitative evaluation of the prediction using matplotlib def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.99, hspace=0.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i+1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the best set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'Predicted: %s \nTrue: %s' %(pred_name, true_name) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0]) ] plot_gallery(X_test, prediction_titles, h, w) # plot the gallery of the most significative eigenfaces eigenface_titles = ['eigenface %d' % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show()DataJoint Workflow for Neuropixels Analysis with Kilosort+ This notebook will describe the steps for interacting with the data processed with the workflow.+ This workflow is assembled from 4 DataJoint elements: + [element-lab](https://github.com/datajoint/element-lab) + [element-animal](https://github.com/datajoint/element-animal) + [element-session](https://github.com/datajoint/element-session) + [element-array-ephys](https://github.com/datajoint/element-array-ephys)+ DataJoint provides abundant functions to query and fetch data. For a detailed tutorials, visit our [general tutorial site](https://playground.datajoint.io/) Load modulesimport datajoint as dj import matplotlib.pyplot as plt import numpy as np from workflow.pipeline import subject, session, probe, ephysWorkflow architecturedj.Diagram(subject.Subject) + dj.Diagram(session.Session) + dj.Diagram(probe) + dj.Diagram(ephys)Experimental metadatasubject.Subject() session_key = (session.Session & 'subject="subject5"' & 'session_datetime = "2021-10-07 12:00:00"').fetch1('KEY') session_keyRecordings for a particular session from one or more probes.ephys.EphysRecording & session_keySpike-sorting resultsephys.CuratedClustering.Unit & session_keyGenerate a raster plot for one probe insertion, one parameter set, and the 'good' unitsclustering_query = (ephys.CuratedClustering.Unit & \ session_key & \ 'insertion_number = 2' & \ 'paramset_idx=0' & \ 'cluster_quality_label = "good"') units, unit_spiketimes = clustering_query.fetch('unit', 'spike_times') x = np.hstack(unit_spiketimes) y = np.hstack([np.full_like(s, u) for u, s in zip(units, unit_spiketimes)]) fig, ax = plt.subplots(1, 1, figsize=(32, 16)) ax.plot(x, y, '|') ax.set_xlabel('Time (s)'); ax.set_ylabel('Unit');Plot the waveform of a unitunit_key = (clustering_query & 'unit = 1').fetch1('KEY') unit_data = (ephys.CuratedClustering.Unit * ephys.WaveformSet.PeakWaveform & unit_key).fetch1() sampling_rate = (ephys.EphysRecording & session_key & 'insertion_number = 2').fetch1('sampling_rate')/1000 # in kHz plt.plot(np.r_[:unit_data['peak_electrode_waveform'].size] * 1/sampling_rate, unit_data['peak_electrode_waveform']) plt.xlabel('Time (ms)'); plt.ylabel(r'Voltage ($\mu$V)');Data Representation> This module is dedicated to represent software artifacts into proper abstract structure such as vectors or graphs. Possible subfolders:>> - Vectorization: --Word2vec --BERT> - Graph# export # Imports import numpy as np from abc import ABC, abstractmethod from pathlib import Path from transformers import pipeline #hide from nbdev.showdoc import * # export class Vectorizor(ABC): def __init__(self, vectorizor): self.vectorizor = vectorizor super().__init__() @abstractmethod def vectorize(self, inpt): pass # export class BertVectorizor(Vectorizor): """ Vectorization subclass that handles vectorizing using BERT """ def vectorize(self, inpt): return np.array(self.vectorizor("public static void main")) path = Path('/tf/data/models/JavaBert-v1') vectorizor = BertVectorizor(pipeline( "feature-extraction", model= str(path), tokenizer= str(path) )) vectorizor.vectorize("public static void main") ! nbdev_build_docs import ds4se.mgmnt.prep.conv as prep params = { 'system':'wikipedia/20190301.en', 'saving_path': 'test_data/', 'language': 'english' } #[step1] Create Preprocesser <---------- preprocess_pipeline = prep.ConventionalPreprocessing(params= params)Data loaderdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu") # desired size of the output image imsize = (256, 256) # scale imported image + transform it into a torch tensor loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name).convert('RGB') # fake batch dimension required to fit network's input dimensions image = loader(image).unsqueeze(0) return image.to(device, torch.float) style_img = image_loader("Style/chuanghua.png") content_img = image_loader("content/thor.jpeg") assert style_img.size() == content_img.size(), \ "we need to import style and content images of the same size" unloader = transforms.ToPILImage() # reconvert into PIL image plt.ion() def imshow(tensor, title=None): image = tensor.cpu().clone() # we clone the tensor to not do changes on it image = image.squeeze(0) # remove the fake batch dimension image = unloader(image) plt.imshow(image) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated plt.figure() imshow(style_img, title='Style Image') plt.figure() imshow(content_img, title='Content Image')Loss Function# content loss class ContentLoss(nn.Module): def __init__(self, target,): super(ContentLoss, self).__init__() self.target = target.detach() def forward(self, input): self.loss = F.mse_loss(input, self.target) return input # style loss def gram_matrix(input): a, b, c, d = input.size() features = input.view(a * b, c * d) G = torch.mm(features, features.t()) gram = G.div(a * b * c * d) return gram class StyleLoss(nn.Module): def __init__(self, target): super(StyleLoss, self).__init__() self.target = target.detach() def forward(self, input): ## -- ! code required G = gram_matrix(input) A = gram_matrix(self.target) self.loss = F.mse_loss(G, A) return inputPre-trained modelcnn = models.vgg19(pretrained=True).features.to(device).eval() # normalize image before send it to network cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) # create a module to normalize input image so we can easily put it in a nn.Sequential class Normalization(nn.Module): def __init__(self, mean, std): super(Normalization, self).__init__() self.mean = mean.clone().detach() self.std = std.clone().detach() def forward(self, img): # normalize img self.mean = torch.tensor(self.mean).view(-1, 1, 1) self.std = torch.tensor(self.std).view(-1, 1, 1) normalized_img = (img - self.mean) / self.std return normalized_imgGet content/style representationsdef get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img, content_layers, style_layers): cnn = copy.deepcopy(cnn) # normalization module normalization = Normalization(normalization_mean, normalization_std).to(device) # just in order to have an iterable access to or list of content/syle losses content_losses = [] style_losses = [] # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential # to put in modules that are supposed to be activated sequentially model = nn.Sequential(normalization) i = 0 # increment every time we see a conv for layer in cnn.children(): if isinstance(layer, nn.Conv2d): i += 1 name = 'conv_{}'.format(i) elif isinstance(layer, nn.ReLU): name = 'relu_{}'.format(i) layer = nn.ReLU(inplace=False) elif isinstance(layer, nn.MaxPool2d): name = 'pool_{}'.format(i) elif isinstance(layer, nn.BatchNorm2d): name = 'bn_{}'.format(i) else: raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) model.add_module(name, layer) if name in content_layers: # add content loss: target = model(content_img).detach() content_loss = ContentLoss(target) model.add_module("content_loss_{}".format(i), content_loss) content_losses.append(content_loss) if name in style_layers: # add style loss: ## -- ! code required target_feature = model(style_img).detach() style_loss = StyleLoss(target_feature) model.add_module("style_loss_{}".format(i), style_loss) style_losses.append(style_loss) # now we trim off the layers after the last content and style losses for i in range(len(model) - 1, -1, -1): if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): break model = model[:(i + 1)] return model, style_losses, content_lossesBuild modelcontent_layers_selected = ['conv_4'] style_layers_selected = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] def run_style_transfer(cnn, normalization_mean, normalization_std, content_img, style_img, input_img, num_steps=300, style_weight=1000000, content_weight=1, content_layers=content_layers_selected, style_layers=style_layers_selected): """Run the style transfer.""" print('Building the style transfer model..') model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img, content_layers, style_layers) optimizer = optim.Adam([input_img.requires_grad_()], lr=0.1, eps=1e-1) print('Optimizing..') step_i = 0 while step_i <= num_steps: input_img.data.clamp_(0, 1) optimizer.zero_grad() model(input_img) style_score = 0 content_score = 0 for sl in style_losses: style_score += sl.loss for cl in content_losses: content_score += cl.loss style_score *= style_weight content_score *= content_weight loss = style_score + content_score loss.backward() optimizer.step() step_i += 1 if step_i % 50 == 0: print("run {}:".format(step_i)) print('Style Loss : {:4f} Content Loss: {:4f}'.format( style_score.item(), content_score.item())) print() # a last correction... input_img.data.clamp_(0, 1) return input_imgTestcontent_layers_selected = ['conv_4'] style_layers_selected = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] style_weight=1e20 input_img = content_img.clone().detach().requires_grad_(True) output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std, content_img, style_img, input_img, num_steps=300,style_weight=style_weight, content_layers=content_layers_selected, style_layers=style_layers_selected) plt.figure() imshow(output, title='Output Image') plt.ioff() plt.show()Building the style transfer model..data prepimport pandas train=pandas.read_csv("train.csv") train.shape train.columns dv='Survived' train.dtypes train.isnull().any() nrow=train.PassengerId.count() #nulls=train.isnull().apply(lambda x: sum(x)/nrow) #nulls=nulls.index[nulls>0.5].tolist() #train2=train.drop(nulls,1) #from sklearn.preprocessing import Imputer #imp=Imputer(strategy='most_frequent',axis=1,copy=True) #imp.transform(train2)variables creationtrain=train.assign(cabinflag=train.Cabin.notnull().astype('int64')) train=train.assign(deck=train.Cabin.astype('str').apply(lambda x: x[0])) train=train.assign(orticket=train.Ticket.apply(str.isnumeric).astype('int64')) train=train.assign(lastname=train.Name.apply(lambda x: x.split(",",1)[0])) train=train.assign(title=train.Name.apply(lambda x: x.split(",",1)[1].split(".",1)[0].strip())) train=train.assign(family=train.SibSp+train.Parch) train=train.assign(fareperperson=train.Fare/(train.family+1)) train.loc[train.Embarked.isnull(),'Embarked']=pandas.Series(['U'],index=train[train.Embarked.isnull()].index) #fare as % of median fare for class-deck # travelling with spouse, parents, children or alone #types of ticketsexplorationtrain2.title.unique() train.groupby(by='title').apply(lambda x: x.shape[0]) train.groupby('Ticket').apply(lambda x: x.shape[0]).order(ascending=False).head() train.groupby('orticket').apply(lambda x: x.shape[0]).order(ascending=False).head() train.groupby(['orticket','Pclass']).apply(lambda x: x.shape[0]).order(ascending=False).head() train.groupby(by='lastname').apply(lambda x: x.shape[0]).order(ascending=False).head() train.loc[train.family.order(ascending=False).index,'lastname'].unique()[0:5] train.groupby('Pclass').Cabin.count() train.groupby('Pclass').Cabin.unique() train.groupby('title').Cabin.count()Imputation impute age with title wise median ageAgeimp=train.groupby('title').Age.median() train.loc[train.Age.isnull(),'Age']=train.title.apply(lambda x: Ageimp[x])[train[train.Age.isnull()].index] train.isnull().apply(lambda x: sum(x)/nrow) train=train.assign(Sex=train.Sex.astype('category').cat.codes) train=train.assign(Embarked=train.Embarked.astype('category').cat.codes) train=train.assign(deck=train.deck.astype('category').cat.codes) train=train.assign(title=train.title.astype('category').cat.codes)Modelingdev=train.loc[:,['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','cabinflag','deck','orticket','title','family','fareperperson']] dev.shape from sklearn.ensemble import RandomForestClassifier modobj=RandomForestClassifier(random_state=111,verbose=0) from sklearn.cross_validation import StratifiedKFold, StratifiedShuffleSplit sss=StratifiedShuffleSplit(train.Survived.values, 5, test_size=0.2, random_state=121) from sklearn.grid_search import GridSearchCV, ParameterGrid GSobj=GridSearchCV(modobj,{'min_samples_leaf':[25,50],'max_depth':[3,5,7,9],'max_features':['sqrt'],'criterion':["gini"],'n_estimators':[100,200,400]}, scoring='accuracy',cv=sss,verbose=10000,n_jobs=-1) GSobj.fit(dev,train.Survived.values) GSobj.best_score_Creating your own dataset from Images Pulled From eBay*by: and . Inspired by [](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)* I followed along in [lesson2-download.ipynb](lesson2-download.ipynb) and last week I did some work trying to duplicate it building a classifier from Google images to try to distinguish coins from Canada, the US and and UK. I captured the effort here [lesson2-download-DanDouglas-CoinsFromGoogle.ipynb](lesson2-download-DanDouglas-CoinsFromGoogle.ipynb). However, I didn't have much success doing it that way. I believe that that was due to the fact that the images were just too noisy. After relection, I decided to use a different source for the data. I went to [eBay](https://www.ebay.com) browsed categories of listings of the coins for sale. The marketplace already had the coins classified, and the listing thumbnail pictures were good closeups of coins that should work better. I adapted the JavaScript console method of URL capture to work with those pages (details below) and went at it.from fastai.vision import *Get a list of URLs Search and scroll eBay's marketplace has a rich category tree and sellers categorize their listings when they are created to make them easier to find. * US Coins - [https://www.ebay.com/sch/253/i.html?_from=R40&_nkw=coins](https://www.ebay.com/sch/253/i.html?_from=R40&_nkw=coins)* UK Coins - [https://www.ebay.com/sch/3394/i.html?_from=R40&_nkw=coins](https://www.ebay.com/sch/3394/i.html?_from=R40&_nkw=coins)* Canada Coins - [https://www.ebay.com/sch/3377/i.html?_from=R40&_nkw=coins](https://www.ebay.com/sch/3377/i.html?_from=R40&_nkw=coins)* Austrialia Coins - [https://www.ebay.com/sch/45142/i.html?_from=R40&_nkw=coins](https://www.ebay.com/sch/45142/i.html?_from=R40&_nkw=coins)You can set the page to show a galary view and set the most images per page that you can, for me that was 192. Download into file Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.In Google Chrome press CtrlShiftj on Windows/Linux and CmdOptj on macOS, and a small window the javascript 'Console' will appear. In Firefox press CtrlShiftk on Windows/Linux or CmdOptk on macOS. That is where you will paste the JavaScript commands.You will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands.After some trial and error changing up the original and experimenting with different selectors, I hit up this set of commands to get URLs from the pages downloaded in to a file ```javascripturls = Array.from(document.querySelectorAll('.s-item__image-img')).map(el=>el.src)window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n')));```This finds all the elements in the document with CSS class `s-item__image-img` and extracts the `src=` attribute, one pre line in a downloaded `.csv` file.Once the files were downloaded, I used a full featured text editor ([TextMate](https://macromates.com/)) to remove duplicates and any munged lines. I also changed the format of the returned impages from `.webp` to `.jpeg`.When I was done, I had 4 files with the following number of lines* australia-coins.txt 1063* united_kingdom-coins.txt 859* united_states-coins.txt 961* canada-coins.txt 1089 Create directory and upload urls file into your server Choose an appropriate name for your labeled images. You can run these steps multiple times to create different labels.folder = 'australia' file = 'australia-coins.txt' folder = 'united_kingdom' file = 'united_kingdom-coins.txt' folder = 'united_states' file = 'united_states-coins.txt' folder = 'canada' file = 'canada-coins.txt'You will need to run this cell once per each category.path = Path('data/coins-ebay') dest = path/folder dest.mkdir(parents=True, exist_ok=True) path.ls()Download images Now you will need to download your images from their respective urls.fast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.Let's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.You will need to run this line once for every category.classes = ['australia','united_kingdom','united_states', 'canada'] download_images(path/file, dest, max_pics=859) # If you have problems download, try with `max_workers=0` to see exceptions: download_images(path/file, dest, max_pics=20, max_workers=0)Then we can remove any images that can't be opened:for c in classes: print(c) verify_images(path/c, delete=True, max_size=1000)australiaView data#np.random.seed(42) #data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, # ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) # If you already cleaned your data, run this cell instead of the one before np.random.seed(42) data = ImageDataBunch.from_csv(path, folder=".", valid_pct=0.2, csv_labels='cleaned.csv', ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)Good! Let's take a look at some of our pictures then.data.classes data.show_batch(rows=3, figsize=(7,8)) data.classes, data.c, len(data.train_ds), len(data.valid_ds)Train modellearn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.lr_find() # If the plot is not showing try to give a start and end learning rate # learn.lr_find(start_lr=1e-5, end_lr=1e-1) learn.recorder.plot() learn.fit_one_cycle(6, max_lr=slice(3e-5,1e-5)) learn.save('stage-2')Interpretationlearn.load('stage-2'); interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix()Cleaning UpSome of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be.Using the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong.from fastai.widgets import *First we need to get the file paths from our top_losses. We can do this with `.from_toplosses`. We then feed the top losses indexes and corresponding dataset to `ImageCleaner`.Notice that the widget will not delete images directly from disk but it will create a new csv file `cleaned.csv` from where you can create a new ImageDataBunch with the corrected labels to continue training your model. In order to clean the entire set of images, we need to create a new dataset without the split. The video lecture demostrated the use of the `ds_type` param which no longer has any effect. See [the thread](https://forums.fast.ai/t/duplicate-widget/30975/10) for more details.#db = (ImageList.from_folder(path) # .split_none() # .label_from_folder() # .transform(get_transforms(), size=224) # .databunch() # ) # If you already cleaned your data using indexes from `from_toplosses`, # run this cell instead of the one before to proceed with removing duplicates. # Otherwise all the results of the previous step would be overwritten by # the new run of `ImageCleaner`. db = (ImageList.from_csv(path, 'cleaned.csv', folder='.') .split_none() .label_from_df() .transform(get_transforms(), size=224) .databunch() )Then we create a new learner to use our new databunch with all the images.learn_cln = cnn_learner(db, models.resnet34, metrics=error_rate) learn_cln.load('stage-2'); ds, idxs = DatasetFormatter().from_toplosses(learn_cln)Make sure you're running this notebook in Jupyter Notebook, not Jupyter Lab. That is accessible via [/tree](/tree), not [/lab](/lab). Running the `ImageCleaner` widget in Jupyter Lab is [not currently supported](https://github.com/fastai/fastai/issues/1539).# Don't run this in google colab or any other instances running jupyter lab. # If you do run this on Jupyter Lab, you need to restart your runtime and # runtime state including all local variables will be lost. ImageCleaner(ds, idxs, path)If the code above does not show any GUI(contains images and buttons) rendered by widgets but only text output, that may caused by the configuration problem of ipywidgets. Try the solution in this [link](https://github.com/fastai/fastai/issues/1539issuecomment-505999861) to solve it. Flag photos for deletion by clicking 'Delete'. Then click 'Next Batch' to delete flagged photos and keep the rest in that row. `ImageCleaner` will show you a new row of images until there are no more to show. In this case, the widget will show you images until there are none left from `top_losses.ImageCleaner(ds, idxs)` You can also find duplicates in your dataset and delete them! To do this, you need to run `.from_similars` to get the potential duplicates' ids and then run `ImageCleaner` with `duplicates=True`. The API works in a similar way as with misclassified images: just choose the ones you want to delete and click 'Next Batch' until there are no more images left. Make sure to recreate the databunch and `learn_cln` from the `cleaned.csv` file. Otherwise the file would be overwritten from scratch, losing all the results from cleaning the data from toplosses.ds, idxs = DatasetFormatter().from_similars(learn_cln) ImageCleaner(ds, idxs, path, duplicates=True)Remember to recreate your ImageDataBunch from your `cleaned.csv` to include the changes you made in your data! Putting your model in production First thing first, let's export the content of our `Learner` object for production:learn.export()This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used). You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so:defaults.device = torch.device('cpu') img = open_image(path/'united_states'/'00000014.jpeg') imgWe create our `Learner` in production enviromnent like this, just make sure that `path` contains the file 'export.pkl' from before.learn = load_learner(path) pred_class,pred_idx,outputs = learn.predict(img) pred_classNotebook for elaboration of grid frames and comparison of Java and C results. First we may want to import the libraries for data manipulation.import pandas as pd import mathAnd define the Java and C output.txt paths.JAVA_OUT_PATH = '../ns_java/output.txt' C_OUT_PATH = '../src/output.txt'Then we're going to define the amount of points per frame, previously defined in the C program, and read with Pandas the csv output.SQUARE_BOUNDS = 102 FRAME_POINTS = 10404 raw_C = [] C_data = [] raw_C = pd.read_csv(C_OUT_PATH, names=['x', 'y', 'd'])Size of raw output.txt:print(len(raw_C))530604For consistency, we're also picking up the Java application output and checking whether it corresponds to the C one. This has to be done only if parameters were equal between the simulations.EQUAL_PARAMETERS = True raw_Java = [] if EQUAL_PARAMETERS: raw_Java = pd.read_csv(JAVA_OUT_PATH, names=['x', 'y', 'd']) # Now let's do the comparison start = 0 end = FRAME_POINTS i = 0 tmp = pd.DataFrame(columns=['x', 'y', 'd']) # Let's sort the C output file and save it in tmp while (start != len(raw_C)): # Take a copy of the first frame chunk = raw_C.loc[start:(end-1)].copy() # Sort it first along the y column and then along the x one chunk = chunk.sort_values(['y', 'x'], ascending=[True, True]) # Append it to the temporary dataframe tmp = tmp.append(chunk, ignore_index = True) start = end end += FRAME_POINTS tmp = tmp.reset_index(drop=True) start = 0 end = FRAME_POINTS # Control done for the first frame to ensure correctness between Java and C application - SLOW!! while (i != FRAME_POINTS): java_x = tmp.x[i] java_y = tmp.y[i] # This call will take a while #java_idx = raw_Java.loc[(raw_Java['x'] == java_x) & (raw_Java['y'] == java_y) & (start <= raw_Java.index) & (raw_Java.index < end)].index[0] java_idx = i if not (raw_Java.d[java_idx] == tmp.d[i]): print("ERRORE") print("C index: ", i) print("Java index: ", java_idx) print(tmp.loc[i]) print(raw_Java.loc[java_idx]) print('\n') i += 1 # There should be a print only in case of errors. # This shows the differencies(if any) between the two dataframes print(tmp.compare(raw_Java)) # True if they're equal tmp.equals(raw_Java)MC integrationWe will use Monte Carlo (MC) to calculate $\pi$ using the Python Standard Libraryimport random import math import time import numpy as np n_samples = 10000 num_inside = 0 for i in range(n_samples): # Generate a random point between 0 and 1 for x x = random.random() # Generate a random point between 0 and 1 for y y = random.random() # Print the point # print(F'The generated point is ({x},{y})') distance_from_origin = math.sqrt(x ** 2 + y ** 2) if distance_from_origin < 1: num_inside += 1 # calculate pi my_pi = 4 * num_inside / n_samples print(my_pi) import matplotlib.pyplot as plt %matplotlib notebook # make plotting interactive # Create empty figure fig = plt.figure() ax = fig.add_subplot(111) fig.show() for i in range(n_samples): # Generate a random point between 0 and 1 for x x = random.random() # Generate a random point between 0 and 1 for y y = random.random() # Print the point # print(F'The generated point is ({x},{y})') distance_from_origin = math.sqrt(x ** 2 + y ** 2) if distance_from_origin < 1: num_inside += 1 ax.plot(x,y,'ob') # o - round; b - blue else: ax.plot(x,y,'r*') # r - red; * - asterisk my_pi = 4 * num_inside / n_samples my_pi n_samples = 1000 num_inside = 0 # Empty lists for appending calculate_pi = [] n_values = [] for i in range(1,n_samples+1): # Generate a random point between 0 and 1 for x x = random.random() # Generate a random point between 0 and 1 for y y = random.random() # Print the point # print(F'The generated point is ({x},{y})') distance_from_origin = math.sqrt(x ** 2 + y ** 2) if distance_from_origin < 1: num_inside += 1 ax.plot(x,y,'ob') # o - round; b - blue else: ax.plot(x,y,'r*') # r - red; * - asterisk log10 = math.log(i,10) # Calculate log10(i) if log10 % 1 == 0: # log10 as an integer my_pi = 4*num_inside/i calculate_pi.append(my_pi) n_values.append(i) print(F'{i}\t{my_pi}') start = time.time() n_samples = 1000000 num_inside = 0 for i in range(1,n_samples+1): # Generate a random point between 0 and 1 for x x = random.random() # Generate a random point between 0 and 1 for y y = random.random() # Print the point # print(F'The generated point is ({x},{y})') distance_from_origin = math.sqrt(x ** 2 + y ** 2) if distance_from_origin < 1: num_inside += 1 pi = 4 * num_inside / n_samples end = time.time() elapsed_time = end - start print(F"{pi} calculated with {n_samples} samples using Python Standard Library : {elapsed_time}")3.141656 calculated with 1000000 samples using Python Standard Library : 0.7251179218292236NumPy Calculation of $\pi$# Start by using 100 samples. start = time.time() n_samples = 1000000 random_numbers = np.random.random(size=(n_samples,2)) vals = np.sum(random_numbers**2, axis=1) num_inside = np.sum(vals < 1) pi = 4 * num_inside / n_samples end = time.time() elapsed_time = end - start print(F"{pi} calculated with {n_samples} samples using NumPy Library : {elapsed_time}")3.142208 calculated with 1000000 samples using NumPy Library : 0.06436467170715332Concise Chit ChatGitHub Repository: Code TODO:1. create a DataLoader class for dataset preprocess. (Use tf.data.Dataset inside?)1. Create a PyPI package for easy load cornell movie curpos dataset(?)1. Use PyPI module `embeddings` to load `GLOVES`, or use tfhub to load `GLOVES`?1. How to do a `clip_norm`(or set `clip_value`) in Keras with Eager mode but without `tf.contrib`?1. Better name for variables & functions1. Code clean1. Encapsulate all layers to Model Class: 1. ChitChatEncoder 1. ChitChatDecoder 1. ChitChatModel1. Re-style to follow the book1. ...? Book Todo1. Outlines1. What's seq2seq1. What's word embedding1. 1. Split code into snips1. Write for snips1. Content cleaning and optimizing1. ...? Other1. `keras.callbacks.TensorBoard` instead of `tf.contrib.summary`? - `model.fit(callbacks=[TensorBoard(...)])`1. download url? - http://old.pep.com.cn/gzsx/jszx_1/czsxtbjxzy/qrzptgjzxjc/dzkb/dscl/ config.py'''doc''' # GO for start of the sentence # DONE for end of the sentence GO = '\b' DONE = '\a' # max words per sentence MAX_LEN = 20data_loader.py''' data loader ''' import gzip import re from typing import ( # Any, List, Tuple, ) import tensorflow as tf import numpy as np # from .config import ( # GO, # DONE, # MAX_LEN, # ) DATASET_URL = 'https://github.com/huan/concise-chit-chat/releases/download/v0.0.1/dataset.txt.gz' DATASET_FILE_NAME = 'concise-chit-chat-dataset.txt.gz' class DataLoader(): '''data loader''' def __init__(self) -> None: print('DataLoader', 'downloading dataset from:', DATASET_URL) dataset_file = tf.keras.utils.get_file( DATASET_FILE_NAME, origin=DATASET_URL, ) print('DataLoader', 'loading dataset from:', dataset_file) # dataset_file = './data/dataset.txt.gz' # with open(path, encoding='iso-8859-1') as f: with gzip.open(dataset_file, 'rt') as f: self.raw_text = f.read().lower() self.queries, self.responses \ = self.__parse_raw_text(self.raw_text) self.size = len(self.queries) def get_batch( self, batch_size=32, ) -> Tuple[List[List[str]], List[List[str]]]: '''get batch''' # print('corpus_list', self.corpus) batch_indices = np.random.choice( len(self.queries), size=batch_size, ) batch_queries = self.queries[batch_indices] batch_responses = self.responses[batch_indices] return batch_queries, batch_responses def __parse_raw_text( self, raw_text: str ) -> Tuple[List[List[str]], List[List[str]]]: '''doc''' query_list = [] response_list = [] for line in raw_text.strip('\n').split('\n'): query, response = line.split('\t') query, response = self.preprocess(query), self.preprocess(response) query_list.append('{} {} {}'.format(GO, query, DONE)) response_list.append('{} {} {}'.format(GO, response, DONE)) return np.array(query_list), np.array(response_list) def preprocess(self, text: str) -> str: '''doc''' new_text = text new_text = re.sub('[^a-zA-Z0-9 .,?!]', ' ', new_text) new_text = re.sub(' +', ' ', new_text) new_text = re.sub( '([\w]+)([,;.?!#&-\'\"-]+)([\w]+)?', r'\1 \2 \3', new_text, ) if len(new_text.split()) > MAX_LEN: new_text = (' ').join(new_text.split()[:MAX_LEN]) match = re.search('[.?!]', new_text) if match is not None: idx = match.start() new_text = new_text[:idx+1] new_text = new_text.strip().lower() return new_textvocabulary.py'''doc''' import re from typing import ( List, ) import tensorflow as tf # from .config import ( # DONE, # GO, # MAX_LEN, # ) class Vocabulary: '''voc''' def __init__(self, text: str) -> None: self.tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='') self.tokenizer.fit_on_texts( [GO, DONE] + re.split( r'[\s\t\n]', text, ) ) # additional 1 for the index 0 self.size = 1 + len(self.tokenizer.word_index.keys()) def texts_to_padded_sequences( self, text_list: List[List[str]] ) -> tf.Tensor: '''doc''' sequence_list = self.tokenizer.texts_to_sequences(text_list) padded_sequences = tf.keras.preprocessing.sequence.pad_sequences( sequence_list, maxlen=MAX_LEN, padding='post', truncating='post', ) return padded_sequences def padded_sequences_to_texts(self, sequence: List[int]) -> str: return 'tbw'model.py'''doc''' import tensorflow as tf import numpy as np from typing import ( List, ) # from .vocabulary import Vocabulary # from .config import ( # DONE, # GO, # MAX_LENGTH, # ) EMBEDDING_DIM = 300 LATENT_UNIT_NUM = 500 class ChitEncoder(tf.keras.Model): '''encoder''' def __init__( self, ) -> None: super().__init__() self.lstm_encoder = tf.keras.layers.CuDNNLSTM( units=LATENT_UNIT_NUM, return_state=True, ) def call( self, inputs: tf.Tensor, # shape: [batch_size, max_len, embedding_dim] training=None, mask=None, ) -> tf.Tensor: _, *state = self.lstm_encoder(inputs) return state # shape: ([latent_unit_num], [latent_unit_num]) class ChatDecoder(tf.keras.Model): '''decoder''' def __init__( self, voc_size: int, ) -> None: super().__init__() self.lstm_decoder = tf.keras.layers.CuDNNLSTM( units=LATENT_UNIT_NUM, return_sequences=True, return_state=True, ) self.dense = tf.keras.layers.Dense( units=voc_size, ) self.time_distributed_dense = tf.keras.layers.TimeDistributed( self.dense ) self.initial_state = None def set_state(self, state=None): '''doc''' # import pdb; pdb.set_trace() self.initial_state = state def call( self, inputs: tf.Tensor, # shape: [batch_size, None, embedding_dim] training=False, mask=None, ) -> tf.Tensor: '''chat decoder call''' # batch_size = tf.shape(inputs)[0] # max_len = tf.shape(inputs)[0] # outputs = tf.zeros(shape=( # batch_size, # batch_size # max_len, # max time step # LATENT_UNIT_NUM, # dimention of hidden state # )) # import pdb; pdb.set_trace() outputs, *states = self.lstm_decoder(inputs, initial_state=self.initial_state) self.initial_state = states outputs = self.time_distributed_dense(outputs) return outputs class ChitChat(tf.keras.Model): '''doc''' def __init__( self, vocabulary: Vocabulary, ) -> None: super().__init__() self.word_index = vocabulary.tokenizer.word_index self.index_word = vocabulary.tokenizer.index_word self.voc_size = vocabulary.size # [batch_size, max_len] -> [batch_size, max_len, voc_size] self.embedding = tf.keras.layers.Embedding( input_dim=self.voc_size, output_dim=EMBEDDING_DIM, mask_zero=True, ) self.encoder = ChitEncoder() # shape: [batch_size, state] self.decoder = ChatDecoder(self.voc_size) # shape: [batch_size, max_len, voc_size] def call( self, inputs: List[List[int]], # shape: [batch_size, max_len] teacher_forcing_targets: List[List[int]]=None, # shape: [batch_size, max_len] training=None, mask=None, ) -> tf.Tensor: # shape: [batch_size, max_len, embedding_dim] '''call''' batch_size = tf.shape(inputs)[0] inputs_embedding = self.embedding(tf.convert_to_tensor(inputs)) state = self.encoder(inputs_embedding) self.decoder.set_state(state) if training: teacher_forcing_targets = tf.convert_to_tensor(teacher_forcing_targets) teacher_forcing_embeddings = self.embedding(teacher_forcing_targets) # outputs[:, 0, :].assign([self.__go_embedding()] * batch_size) batch_go_embedding = tf.ones([batch_size, 1, 1]) * [self.__go_embedding()] batch_go_one_hot = tf.ones([batch_size, 1, 1]) * [tf.one_hot(self.word_index[GO], self.voc_size)] outputs = batch_go_one_hot output = self.decoder(batch_go_embedding) for t in range(1, MAX_LEN): outputs = tf.concat([outputs, output], 1) if training: target = teacher_forcing_embeddings[:, t, :] decoder_input = tf.expand_dims(target, axis=1) else: decoder_input = self.__indice_to_embedding(tf.argmax(output)) output = self.decoder(decoder_input) return outputs def predict(self, inputs: List[int], temperature=1.) -> List[int]: '''doc''' outputs = self([inputs]) outputs = tf.squeeze(outputs) word_list = [] for t in range(1, MAX_LEN): output = outputs[t] indice = self.__logit_to_indice(output, temperature=temperature) word = self.index_word[indice] if indice == self.word_index[DONE]: break word_list.append(word) return ' '.join(word_list) def __go_embedding(self) -> tf.Tensor: return self.embedding( tf.convert_to_tensor(self.word_index[GO])) def __logit_to_indice( self, inputs, temperature=1., ) -> int: ''' [vocabulary_size] convert one hot encoding to indice with temperature ''' inputs = tf.squeeze(inputs) prob = tf.nn.softmax(inputs / temperature).numpy() indice = np.random.choice(self.voc_size, p=prob) return indice def __indice_to_embedding(self, indice: int) -> tf.Tensor: tensor = tf.convert_to_tensor([[indice]]) return self.embedding(tensor)Train Tensor Board[Quick guide to run TensorBoard in Google Colab](https://www.dlology.com/blog/quick-guide-to-run-tensorboard-in-google-colab/)`tensorboard` vs `tensorboard/` ?LOG_DIR = '/content/data/tensorboard/' get_ipython().system_raw( 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &' .format(LOG_DIR) ) # Install ! npm install -g localtunnel # Tunnel port 6006 (TensorBoard assumed running) get_ipython().system_raw('lt --port 6006 >> url.txt 2>&1 &') # Get url ! cat url.txt '''train''' import tensorflow as tf # from chit_chat import ( # ChitChat, # DataLoader, # Vocabulary, # ) tf.enable_eager_execution() data_loader = DataLoader() vocabulary = Vocabulary(data_loader.raw_text) chitchat = ChitChat(vocabulary=vocabulary) def loss(model, x, y) -> tf.Tensor: '''doc''' weights = tf.cast( tf.not_equal(y, 0), tf.float32, ) prediction = model( inputs=x, teacher_forcing_targets=y, training=True, ) # implment the following contrib function in a loop ? # https://stackoverflow.com/a/41135778/1123955 # https://stackoverflow.com/q/48025004/1123955 return tf.contrib.seq2seq.sequence_loss( prediction, tf.convert_to_tensor(y), weights, ) def grad(model, inputs, targets): '''doc''' with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, model.variables) def train() -> int: '''doc''' learning_rate = 1e-3 num_batches = 8000 batch_size = 128 print('Dataset size: {}, Vocabulary size: {}'.format( data_loader.size, vocabulary.size, )) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) root = tf.train.Checkpoint( optimizer=optimizer, model=chitchat, optimizer_step=tf.train.get_or_create_global_step(), ) root.restore(tf.train.latest_checkpoint('./data/save')) print('checkpoint restored.') writer = tf.contrib.summary.create_file_writer('./data/tensorboard') writer.set_as_default() global_step = tf.train.get_or_create_global_step() for batch_index in range(num_batches): global_step.assign_add(1) queries, responses = data_loader.get_batch(batch_size) encoder_inputs = vocabulary.texts_to_padded_sequences(queries) decoder_outputs = vocabulary.texts_to_padded_sequences(responses) grads = grad(chitchat, encoder_inputs, decoder_outputs) optimizer.apply_gradients( grads_and_vars=zip(grads, chitchat.variables) ) if batch_index % 10 == 0: print("batch %d: loss %f" % (batch_index, loss( chitchat, encoder_inputs, decoder_outputs).numpy())) root.save('./data/save/model.ckpt') print('checkpoint saved.') with tf.contrib.summary.record_summaries_every_n_global_steps(1): # your model code goes here tf.contrib.summary.scalar('loss', loss( chitchat, encoder_inputs, decoder_outputs).numpy()) # print('summary had been written.') return 0 def main() -> int: '''doc''' return train() main() #! rm -fvr data/tensorboard # ! pwd # ! rm -frv data/save # ! rm -fr /content/data/tensorboard # ! kill 2823 # ! kill -9 2823 # ! ps axf | grep lt ! cat url.txtyour url is: https://bright-fox-51.localtunnel.mechat.py'''train''' # import tensorflow as tf # from chit_chat import ( # ChitChat, # DataLoader, # Vocabulary, # DONE, # GO, # ) # tf.enable_eager_execution() def main() -> int: '''chat main''' data_loader = DataLoader() vocabulary = Vocabulary(data_loader.raw_text) print('Dataset size: {}, Vocabulary size: {}'.format( data_loader.size, vocabulary.size, )) chitchat = ChitChat(vocabulary) checkpoint = tf.train.Checkpoint(model=chitchat) checkpoint.restore(tf.train.latest_checkpoint('./data/save')) print('checkpoint restored.') return cli(chitchat, vocabulary=vocabulary, data_loader=data_loader) def cli(chitchat: ChitChat, data_loader: DataLoader, vocabulary: Vocabulary): '''command line interface''' index_word = vocabulary.tokenizer.index_word word_index = vocabulary.tokenizer.word_index query = '' while True: try: # Get input sentence query = input('> ').lower() # Check if it is quit case if query == 'q' or query == 'quit': break # Normalize sentence query = data_loader.preprocess(query) query = '{} {} {}'.format(GO, query, DONE) # Evaluate sentence query_sequence = vocabulary.texts_to_padded_sequences([query])[0] response_sequence = chitchat.predict(query_sequence, 1) # Format and print response sentence response_word_list = [ index_word[indice] for indice in response_sequence if indice != 0 and indice != word_index[DONE] ] print('Bot:', ' '.join(response_word_list)) except KeyError: print("Error: Encountered unknown word.") main() ! cat /proc/cpuinfoprocessor : 0 vendor_id : GenuineIntel cpu family : 6 model : 63 model name : Intel(R) Xeon(R) CPU @ 2.30GHz stepping : 0 microcode : 0x1 cpu MHz : 2299.998 cache size : 46080 KB physical id : 0 siblings : 2 core id : 0 cpu cores : 1 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms xsaveopt arch_capabilities bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf bogomips : 4599.99 clflush size : 64 cache_alignment : 64 address sizes : 46 bits physical, 48 bits virtual power management: processor : 1 vendor_id : GenuineIntel cpu family : 6 model : 63 model name : Intel(R) Xeon(R) [...]Warning: Before running below cells please make sure you have API key. Please see README.md for more info on API key.import os os.environ["LS_API_KEY"] = "MY-API-KEY" # replace your API key here. from here_map_widget import Map, SplitMapControl, GeoJSON import os m = Map(api_key=os.environ["LS_API_KEY"]) left_geojson = GeoJSON( url="https://gist.githubusercontent.com/peaksnail/5d4f07ca00ed7c653663d7874e0ab8e7/raw/64c2a975482efd9c42e54f6f6869f091055053cd/countries.geo.json", disable_legacy_mode=True, style={"fillColor": "#ff0000", "color": "black", "opacity": 0.1}, ) right_geojson = GeoJSON( url="https://gist.githubusercontent.com/peaksnail/5d4f07ca00ed7c653663d7874e0ab8e7/raw/64c2a975482efd9c42e54f6f6869f091055053cd/countries.geo.json", disable_legacy_mode=True, style={"fillColor": "#00ff00", "color": "black", "opacity": 0.1}, ) sp = SplitMapControl(left_layer=left_geojson, right_layer=right_geojson) m.add_control(sp) m m.remove_control(sp)Dealing with missing dataĐây không phải là một vấn đề hiếm trong thực tế khi mà dữ liệu train của chúng ta bị mất một hoặc nhiều giá trị khác nhau do nhiều nguyên nhân. Có thể là do một lỗi trong qúa trình thu thập dữ liệu, một số phép đo không thể thực hiện được, hoặc một vài trường có thể bị bỏ trống, ví dụ như một phiếu khảo sát chẳng hạn. Ta thông thường thấy những dữ liệu bị mất đó phần lớn là những khoảng trống hoặc là những chuỗi ký hiệu như là NaN (Not a number), hoặc là NULL(biến chỉ giá trị không biết trong bảng dữ liệu quan hệ). Không may là phần lớn các tool tính toán không thể xử lý những điểm dữ liệu mất này được hoặc nó sẽ cho ra kết quả là không dự đón được kết quả nếu ta bỏ qua chúng. Do đó, điều cần thiết là phải để ý đến những giá trị khuyết này trước khi ta tiến hành những bước tiếp theo được.Trong phần này, ta sẽ tìm hiểu một vài thao tác để đối phó với việc thiếu dữ liệu bằng cách loại bỏ những điểm trong dữ liệu hoặc là bỏ thêm những gía trị từ những mấu training example và features khác. Identifying missing values in tabular dataTrước khi thảo luận những kỹ thuật khác nhau đối phó với thiếu dữ liệu, giờ tạo một data frame đơn giản từ file **Comma-separated values (CSV)** để có được cái nhìn về vấn đề thiếu dữ liệu này:import pandas as pd from io import StringIO csv_data = \ '''A,B,C,D 1.0,2.0,3.0,4.0 5.0,6.0,,8.0 10.0,11.0,12.0,''' # Nếu như dùng Python 2.7 thì mình phải convert string sang unicode: # csv_data = unicode(csv_data) df = pd.read_csv(StringIO(csv_data)) print(df)A B C D 0 1.0 2.0 3.0 4.0 1 5.0 6.0 NaN 8.0 2 10.0 11.0 12.0 NaNSử dụng phương thức read_csv để đọc dữ liệu CSV vào trong pandas DataFame và những ô bị trống sẽ được tự động điền giá trị NaN. Hàm StringIO cho phép ta đọc chuỗi string csv_data thành CSV file.Đối với một bảng DataFrame lớn; ta có thể đếm số dữ liệu bị thiếu tại các cột của dữ liệu này. Ta sử dụng phương thức isnull để tham chiếu ra một bảng mới với dữ liệu có 2 kiểu là True, False. Với True tại các cells không rỗng và False tại cell rỗng hoặc NaN. Sau đó dùng phương thức sum() để đếm những điểm dị biệt tại các cột đó.df.isnull().sum()Convenient data handling with pandas data frame Đôi khi là sẽ tiện hơn khi mình tiền xử lý dữ liệu với DataFrame. Hiện nay, mặc dù sklearn cũng hỗ trợ dữ liệu DataFrame làm đầu vào, nhưng mà NumPy vẫn là sử lý tốt nhất trong sklearn, và lời khuyên là nên sử dụng NumPy với sklearn. Chú ý là ta có thể lấy được dữ liệu của DataFrame dưới dạng mảng NumPy bằng thuộc tính values của nó:df.valuesEliminating training examples or features with missing valuesMột trong những cách đơn giản nhất để đối mặt với thiếu dữ liệu đó là xóa những features (xóa cột) hoặc là các training examples (xóa hàng) từ dữ liệu thông qua phương thức dropna trong DataFrame:Ta có thể xóa hàng: set tham số axis = 0df.dropna(axis=0) # Ta cũng có thể xóa cột bằng cách cho axis = 1 df.dropna(axis=1) # Ngoài ra ta có thêm nhiều tham số khác để chọn kiểu xóa # Xóa những hàng mà các thuộc tính của hàng đó là NaN df.dropna(how='all') # Xóa hàng mà có ít hơn 4 giá trị thực df.dropna(thresh=4) # Xoa hàng khi mà NaN xuất hiện tại cột cụ thể nào đó trong dữ liệu df.dropna(subset=['C'])Mặc dù xóa những điểm mất dữ liệu là phương pháp tiện lợi, nhưng nó vẫn dẫn đến những bất lợi; ví dụ, ta có thế xóa đi quá nhiều mẫu, làm cho việc phân tích dựa trên dữ liệu khó có thể đạt được. Hoặc là nếu ta bỏ đi quá nhiều features, sẽ dẫn đến việc mất đi những gía trị cần thiết để có thể phân biệt được giữa các lớp với nhau. Trong phần sau, sẽ nói về phương pháp khác để đối mặt với nó, gọi là kỹ thuật nội suy **interpolation techniques**. Imputing missing valuesThông thường xóa bỏ các mẫu hoặc các features là không khả dĩ, bởi vì ta có thể bỏ mất đi rất nhiều dữ liệu. Trong trường hợp này ta sử dụng các kỹ thuật nội suy khác nhau để ước lượng các giá trị bị mất từ các mẫu hiện có trong dữ liệu của mình. Một trong những phương pháp nội suy phổ biến nhất gọi là **mean imputation**, ta đơn giản là thay thế giá trị bị mất bằng giá trị trung bình của toàn bộ cột. Cách đơn giản để thực hiện là import SingleImputer class từ thư viên Sklearn, được thực hiện như sau:from sklearn.impute import SimpleImputer import numpy as np imr = SimpleImputer(missing_values=np.nan, strategy='mean') imr = imr.fit(df.values) imputed_data = imr.transform(df.values) imputed_dataTa đã thay thế giá trị NaN bởi giá trị mean tương ứng của cột. Một options cho tham số strategy đó là median hoặc là most_frequent, khi mà giá trị thiếu sẽ đc thay thế bằng giá trị median hoặc là mode của cái cột thuộc tính. Những hàm này rất là hữu dụng khi mà điền vào nó những giá trị kiểu **categorical**, ví dụ như trong bảng có cột mang tên là **clolor** và trong cột đó chứa các thuộc tính là red, green, blue,... Ta sẽ bắt gặp kiểu dữ liệu này trong phần sau.Cách khác, ta có thể điền giá trị thiếu vào bằng hàm **fillna** trongvà thêm vào đó một method imputation như là một tham số. Ta có thể làm như sau:df.fillna(df.mean())Understanding the sklearn estimator API Trong phần trước, ta sử dụng class SimpleImputer từ thư viện sklearn để điền các giá trị thiếu vào trong dataset. Lớp SimpleImputer nó thuộc vào các lớp **transformer** trong sklearn, các lớp này được dùng cho việc biến đổi dữ liệu. Hai methods cần thiết cho các estimators là fit và transform. Fit method dùng để học ác tham số từ dữ liệu training data, và phương thức transform sử dụng những tham số này để biến đổi dữ liệu. Tất cả những dữ liệu nào cần được biến đổi cần phải có số features tương tự như dữ liệu mà ta dùng để fit vào trong model.Hình bên dưới mô tả cách mà transformer fit vào dữ liệu training data, được dùng để biến đổi tập training dataset cũng như là tạo ra tập test dataset mới:![](pic1.png) Những cái phân loại được dùng trong chương 3 được gọi là **estimator** trong sklearn, với API tương tự như là class **transformer**. Estimators có phương thức predict cũng như là phương thức transform, ta có thể thấy điều đó tại phần sau của chương này. Ta cũng có thể sử dụng method fit để học các tham số cho model khi chúng ta train các estimators cho việc phân loại. Tuy nhiên, trong học giám sát **supervised learning** thì ta thêm vào các class labels cho việc fitting model, model có thể được sử dụng để dự đoán các dữ liệu mới, chưa được gán nhãn thông qua phương thức predict, được miêu tả trong hình bên dưới: ![](pic2.png) Handling categorical dataTa đã làm việc với những dữ liệu kiểu số. Tuy nhiên, nó không phải là kiểu dữ liệu phổ biến, thực tế thì nó có chứa một hoặc nhiều feature có kiểu dữ liệu categorical. Trong phần này, ta sẽ có một ví dụ đơn giản để tìm hiểu cách tiếp cận đối với những kiểu dữ liệu kiểu này trong các thư viện sử dụng tính toán số học.Khi nói về dữ liệu phân loại, ta cần phải phân biệt được **ordinal** và **nominal** features. Các thuộc tính **ordinal** có thể được hiểu là các giá trị categorical có thể được sắp xếp theo thứ tự. Ví dụ, cỡ áo T-shirt là một **ordinal** feature, bởi vì chúng ta có thể sắp xếp chúng theo thứ tự: XL > L > M. Ngược lại, thuộc tính **nominal** không tồn tại thứ tự nào, và ví dụ là màu áo T-shirt là một ví dụ cho **nominal** feature bởi vì màu sắc thì đâu có thể so sánh được. Categorical data encoding with pandasTrước khi tìm hiểu các kỹ thuật khác nhau cho dữ liệu categorical, ta tạo một DataFrame mới để minh hoạ cho vấn đề:import pandas as pd df = pd.DataFrame([ ['green', 'M', 10.1, 'class2'], ['red', 'L', 13.5, 'class1'], ['blue', 'XL', 15.3, 'class2']]) df.columns = ['colors', 'size', 'price', 'classlabel'] dfTa thấy trong output, dữ liệu DataFrame được tạo ra chưa dữ liệu **nominal feature** (color), một **ordinal feature**(size), và một thưộc tính **numerical feature**(price). Các nhãn classlabel (giả sử chúng được sử dụng cho mục đích học có giám sát) được lưu trữ ở cột cuối cùng. Những thuật toán cho việc phân loại trong cuốn sách không sử dụng thông tin có thứ tự trong các lớp nhãn. Mapping ordinal features Để đám bảo rằng thuật toán có thể hiểu được các thuộc tính **ordinal feature** đúng, ta cần phải chuyển cái chuỗi phân loại (categorical string) kiểu string sang dạng số nguyên. Không may là ta không có được hàm nào có thể tự động suy diễn ra thứ tự chính xác của nhãn **labels** trong thuộc tính size của, do đó ta phải định nghĩa hàm ánh xạ (**mapping**) thủ công. Trong ví dụ dưới, giả sử ta biến được giá trị khác nhau giữa các thuộc tính, ví dụ như: XL = L + 1 = M + 2:size_mapping = {'XL': 3, 'L': 2, 'M': 1} df['size'] = df['size'].map(size_mapping) dfNếu như ta muốn chuyển ngược lại từ giá trị nguyên về lại dạng biểu diễn trước của nó, ta đơn giản là định nghĩa một hàm reverse-mapping dictionary, inv_size_mapping = {v: k for k, v in size_mapping.items()}, và sau đó ta lại sử dụng dictionary này tương tự như với phần ở trên để chuyển từ giá trị nguyên về lại giá trị **ordinal feature**inv_size_mapping = {v: k for k, v in size_mapping.items()} print(inv_size_mapping) print(df['size'].map(inv_size_mapping)){3: 'XL', 2: 'L', 1: 'M'} 0 M 1 L 2 XL Name: size, dtype: objectEncoding class labelsRất nhiều thư viện Machine Learning yêu cầu rằng các nhãn **class labels** được mã hóa dưới dạng các giá trị nguyên. Mặc dù phần lớn các bộ ước lượng (estimators) trong thư viện Sklearn chuyển đổi các class labels sang dạng số nguyên (tự nó thực hiện bên trong), nhưng mà tốt hơn là ta nên cung cấp các class labels dưới dạng mảng số nguyên để tránh các trục trặc kỹ thuật. Để có thể mã hóa các nhãn cho lớp, ta có thể sử dụng cách tiếp cận tương tự như đối với **mapping ordinal feature** mà mình đã trình bày ở trên. Ta nên chú ý rằng các class labels không phải dạng **ordinal**, và các số nguyên ta gán cho các nhãn không phải là vấn đề ở đây. Do đó, ta đơn giản là enumerate (liệt kê các lớp nhãn), bắt đầu từ 0:import numpy as np class_mapping = {label: idx for idx, label in enumerate(np.unique(df['classlabel']))} print(class_mapping){'class1': 0, 'class2': 1}Sau đó, ta có thể sử dụng cái mapping dictionary để chuyển class labels này sang dạng số nguyên, tương tự như cách ta làm với thuộc tính **ordinal** ở trên:df['classlabel'] = df['classlabel'].map(class_mapping) print(df)colors size price classlabel 0 green 1 10.1 1 1 red 2 13.5 0 2 blue 3 15.3 1Tương tự, ta có thể thay đổi thứ tự của thằng key-values để có thể map lại dưới dạng nhãn ban đầu:inv_class_mapping = {v: k for k, v in class_mapping.items()} print(inv_class_mapping) df['classlabel'] = df['classlabel'].map(inv_class_mapping) df{0: 'class1', 1: 'class2'}Một cách khác, ta sử dụng class LabelEncoder có trong sklearn để thực hiện việc này:from sklearn.preprocessing import LabelEncoder class_le = LabelEncoder() y = class_le.fit_transform(df['classlabel'].values) print(y)[1 0 1]**Câu hỏi đặt ra ở đây là làm sao để áp cái ni vô lại trong cái dataframe :v (Cái ni mình tìm hiểu rồi biểu diễn trong code của mình sau ^^)**Chú ý rằng phương thức **fit_transform** chỉ là dạng gọi tắt của 2 cái phương thức tách biệt **fit** và **transform**, ta có thể tìm thấy cái ni tương tự trong class StandartScaler cho việc chuẩn hóa z dữ liệu. Ngược lại, ta có thể sử dụng phương thức inverse_transform để chuyển class kiểu nguyên trở về lại dạng string nguyên bản của nó:z = class_le.inverse_transform(y) print(z)['class2' 'class1' 'class2']Performing One-hot encoding on nominal features Trong phần trước Mapping ordinal features, ta sử dụng phương thức dictionary mapping để chuyển đổi ordinal feature size sang dạng số nguyên. Sklearn estimator để phân loại xem các nhãn class labels như là dạng categorical data và nó không có thứ tự **nominal**, ta cũng đã sử dụng LabelEncoder để chuyển dạng string sang dạng số nguyên. Ta có thể nghĩ đến việc áp dụng phương thức tương tự để xử lý cho các thuộc tính **nominal features** trong data của ta ví dụ như là feature color trong dữ liệu:X = df[['colors', 'size', 'price']].values color_le = LabelEncoder() X[:, 0] = color_le.fit_transform(X[:, 0]) print(X)[[1 1 10.1] [2 2 13.5] [0 3 15.3]]Sau khi thực hiện đoạn code trên, cột đầu tiên trong mảng NumPy, x giữ gía trị mới của colors, được mã hóa như sau: * blue = 0* green = 1* red = 2 Nếu ta dừng tại đây và chuyển dữ liệu mới này vào lại trong bộ phân loại, ta sẽm mắc phải lỗi thông dụng khi đối mặt với dữ liệu phân loại. Vấn đề ở đây là mặc dù giá trị của màu sắc không phân cấp độ được, nhưng mà thuật toán của chúng ta sẽ hiểu rằng gía trị của green là lớn hơn gía trị của blue, và red là lớn hơn green. Và có thể là mặc dù giả định không chính xác, nhưng mà thuật toán vẫn có thể tạo ra kết qủa có ích, nhưng mà cái kết quả đó không phải là tối ưu. Một phương thức thông dụng cho vấn đề này đó là sử dụng kỹ thuật có tên **one-hot encoding**. Ý tưởng cho hướng tiếp cận này đó là ta tạo một giá tính năng (dummy feature) cho mỗi giá trị riêng biệt trong cột **nominal** feature. Ở đây, ta sẽ chuyển cái thuộc tính color sang 3 thuộc tính mới: blue, green và red. Các giá trị nhị phân được sử dụng cho các phần của màu sắc trong ví dụ; ví dụ như mẫu blue có thể được biểu diễn là blue=1, green=0 và red=0. Để thực hiện việc biến đổi này, ta có thể sử dụng OneHotEncoder được cài đặt trong Sklearn preprocessing module:from sklearn.preprocessing import OneHotEncoder X = df[['colors', 'size', 'price']].values color_ohe = OneHotEncoder() print(color_ohe.fit_transform(X[:, 0].reshape(-1, 1)).toarray())[[0. 1. 0.] [0. 0. 1.] [1. 0. 0.]]Chú ý rằng ta áp dụng OneHotEncoder với một cột đơn duy nhất (X[:, 0].reshape(-1, 1))), để tránh thay đổi 2 cột còn lại trong dữ liệu. Nếu chúng ta muốn chuyển đổi một cột thành ba feature nữa trong dữ liệu, ta có thể dùng ColumnTransformer, có tham số là danh sách list các tuple của (name, transformer, column(s)).from sklearn.compose import ColumnTransformer X = df[['colors', 'size', 'price']].values c_transf = ColumnTransformer([ ('onehot', OneHotEncoder(), [0]), ('nothing', 'passthrough', [1, 2]) ]) print(c_transf.fit_transform(X).astype(float))[[ 0. 1. 0. 1. 10.1] [ 0. 0. 1. 2. 13.5] [ 1. 0. 0. 3. 15.3]]Trong đoạn code trên, ta chỉ ra rằng ta muốn biến đổi cột đầu tiên thôi và để các cột còn lại không đụng tới thông qua tham số 'passthrough'.**Một cách khác tiện hơn** để tạo các dummy features thông qua One-hot encoding đó là sử dụng phương thức get_dummies được thiết đặt trong pandas. Áp vào DataFrame, phương thức get_dummies chỉ convert các cột thuộc tính chứa giá trị là các chuỗi và để các cột chứa giá trị số lại:pd.get_dummies(df[['colors', 'price', 'size']])**HAND WRITTEN DIGIT CLASSIFICATION WITH CNN** **IMPORTING THE LIBRARIES**import tensorflow as tf (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train.shape import matplotlib.pyplot as plt %matplotlib inline fig, axs = plt.subplots(4, 4, figsize = (20, 20)) plt.gray() for i, ax in enumerate(axs.flat): ax.matshow(x_train[i]) ax.axis('off') ax.set_title('Number {}'.format(y_train[i])) fig.show()**TRAINING AND TESTING DATASET**x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) input_shape = (28, 28, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print('Number of images in x_train', x_train.shape[0]) print('Number of images in x_test', x_test.shape[0])x_train shape: (60000, 28, 28, 1) Number of images in x_train 60000 Number of images in x_test 10000**DESIGNING THE MODEL**from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D model = Sequential() model.add(Conv2D(28, kernel_size=(3,3), input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation=tf.nn.relu)) model.add(Dropout(0.2)) model.add(Dense(10,activation=tf.nn.softmax)) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x=x_train,y=y_train, epochs=1)1875/1875 [==============================] - 35s 18ms/step - loss: 0.2076 - accuracy: 0.9381**EVALUATING THE MODEL**model.evaluate(x_test, y_test)313/313 [==============================] - 2s 6ms/step - loss: 0.0743 - accuracy: 0.9766**PREDICTION**p = model.predict(x_test) print(p[2]) import numpy as np print(np.argmax(p[2]))1The curious case of `__mro__` Method resolution order in multiple inheritance using C3 linearization Inheritance in Python* Python supports **multiple inheritance**, i.e. a class can be derived from more than one base classes* In multiple inheritance, the features of all the base classes are inherited into the derived class* The syntax for multiple inheritance is similar to single inheritanceclass A: pass class B: pass class C(A,B): pass print(C.__bases__)(, )What is MRO?* MRO stands for Method Resolution Order* MRO is the order in which base classes in the inheritance hierarchy are searched when looking for a method* All Python versions after 2.3 use the *C3 linearization* algorithm to determine this order* Not all classes admit a linearization. There are cases, in complicated hierarchies, where it is not possible to derive a class such that its linearization respects all the desired properties.Check [this article](https://www.python.org/download/releases/2.3/mro/) on python.org for more info. Part of this presentation also based on [this tutorial](https://www.youtube.com/watch?v=YLgf8wGCX2w). Why is this useful?* Given a class `C`, in the case of single inheritance hierarchy, if `C` is a subclass of `C1`, and `C1` is a subclass of `C2`, then the linearization of `C` is simply the list `[C,C1,C2]`.* However, in a complicated multiple inheritance hierarchy, it is a non-trivial task to specify the order in which methods are overridden, i.e. to specify the order of the ancestors of `C`.* We need to be able discover **deterministically** the order of classes for method calls in the inheritance chain.* The list of the ancestors of a class `C`, including the class itself, ordered from the nearest ancestor to the furthest, is called the *class precedence list* or the *linearization* of `C`.* The Method Resolution Order (MRO) is the set of rules that construct the linearization. In the Python literature, the idiom the *MRO* of the class `C` is also used as a synonymous for the *linearization* of `C`. The diamond problem* In the above example the `Button` class inherits two differerent implementations of `equals()`* It has no implementation of the operation of its own* When `button.equals()` is called, it is unknown which implemetation - from `Rectangle`, `Clickable` or `object` will be used C3 Linearization* First introduced in the Dylan language* Algorithm based on 3 important properties (this is how the name *C3* is derived) 1. Consistent extended precedence graph (MRO is determined based on structure of the inheritance graph) 2. Preserving local precedence ordering (no class will appear before any of its subclasses) 3. Monotonicity Monotonicity* An MRO is monotonic when the following is true: - If `C1` precedes `C2` in the linearization of `C` then `C1` precedes `C2` in the linearization of any subclass of `C`.* Consider: `class X(O), class Y(O), class A(X,Y), class B(Y,X), class C(B,A)` - Based on monotonicity it is **not** possible to derive a new class `C` from `A` and `B` since `X` precedes `Y` in `A`, but `Y` precedes `X` in `B`, therefore the method resolution order would be ambiguous in `C` (`XY` breaks monotonicity with `B`, `YX` breaks monotonicity with `A`). Definition and notation* Notation - `C1 C2 ... CN` indicates the list of classes `[C1,C2,...,CN]` - The *head* of the list is its first element: head = `C1` - The *tail* is the rest of the list: tail = `C2 ... CN` - The sum of the lists `[C] + [C1,C2,...,CN] = C + (C1 C2 ... CN) = C C1 C2 ... CN`* Consider a class `C` in a multiple inheritance hierarchy, with `C` inheriting from the base classes `B1, B2, ..., BN`: - The linearization of `C` is the sum of `C` plus the merge of linearizations of the parents and the list of the parents - `L[C(B1 ... BN)] = C + merge(L[B1],...,L[BN], B1 ... BN)`* Example: `L[Y(X1 X2 X3)] = Y + merge(L[X1],L[X2],L[X3], X1 X2 X3)` Computing mergeConsider a simple merge example: `merge(DO,EO,DE) = DEO`1. Select the first head of the lists which does not appear in the tail of any of the other lists. - A good head may appear as the first element in multiple lists at the same time, but it is forbidden to appear anywhere else.2. Remove the selected element from all the lists where it appears as a head and append to the output list.3. Repeat the operation of selecting and removing a good head to extend the output list until all remaining lists are exhausted.4. If at some point no good head can be selected, because the heads of all remaining lists appear in any one tail of the lists, then the merge is impossible to compute due to cyclic dependencies in the inheritance hierarchy and no linearization of the original class exists. Properties of merge* Three important considerations when computing merge: 1. The merge of several sequences is a sequence that contains **each** of the the elements of the input sequence - All elements within the input lists *DO*, *EO* and *DE* are present in the merged result *DEO*. 2. An element that appears in more than once of the input sequences appears **only once** in the output sequence - *D*, *E* and *O* appear in more than on input sequence, but the result has only one instance of each. 3. If two elements appear in the same input sequence, their order in the output sequence is the same as their order in the input sequence. - In the input sequence, D precedes both O and E; E precedes O. The same ordering is maintained in the merged output. Compute the linearization:`class A(B,C), class B(D,E) class C(D,F), class D(O), class E(O), class F(O), class O` C3 computing example**`L[C(B1 ... BN)] = C + merge(L[B1],...,L[BN], B1 ... BN)`** ```L[O] = OL[D] = D + merge(L[O],O) = D + merge(O,O) = DOL[E] = EO, L[F] = FO L[B] = B + merge(L[D],L[E],DE) = B + merge(DO,EO,DE) = B + D + merge(O,EO,E) = B + D + E + merge(O,O) = BDEOL[C] = C + merge(L[D],L[F],DF) = C + merge(DO,FO,DF) = CDFOL[A] = A + merge(L[B],L[C],BC) = A + merge(BDEO,CDFO,BC) = A + B + merge(DEO,CDFO,C) = A + B + C + merge(DEO,DFO) = A + B + C + D + merge(EO,FO) = A + B + C + D + E + merge(O,FO) = A + B + C + D + E + F + merge(O,O) = ABCDEFO```class F: pass class E: pass class D: pass class C(D,F): pass class B(D,E): pass class A(B,C): pass from inspect import getmro print(getmro(A)) print(A.__mro__) class A: pass class B: pass class C(A,B): pass class D(B,A): pass class E(C,D): pass print(E.__mro__)Tools Table of Contents:* [What programming language do you use at work?](What-programming-language-do-you-use-at-work?)* [Which version control tools do you use for software development?](Which-version-control-tools-do-you-use-for-software-development?) Setting up# Import notebook containing sampled dataset %run "./00_data-cleaning.ipynb" # Filtering the df df = df[(df['Do you write code as part of your job?'] =='Yes')]What programming language do you use at work?# programming languages languages = ['Java', 'C', 'Python', 'Plusplus', 'Visual Basic .NET', 'Zehscharf', 'PHP', 'JavaScript', 'SQL', 'Delphi/Object Pascal', 'MATLAB', 'Assembly language', 'Go', 'Perl', 'R', 'Visual Basic', 'SAS', 'F#', 'Fortran', 'Julia', 'Eisenoxid', 'Lisp', 'Prolog', 'Haskell', 'IDL', 'Other'] df['What programming language do you use at work? Please select all that apply.'] = df['What programming language do you use at work? Please select all that apply.'].str.replace('C#', 'Zehscharf') df['What programming language do you use at work? Please select all that apply.'] = df['What programming language do you use at work? Please select all that apply.'].str.replace(re.escape('C++'), 'Plusplus') df['What programming language do you use at work? Please select all that apply.'] = df['What programming language do you use at work? Please select all that apply.'].str.replace('Rust', 'Eisenoxid') def find(df, column, list): number = {} for key in list: counts = df[column].str.contains(key).value_counts() if counts.index.contains(True): number.update({key: counts[True]}) else: number.update({key: 0}) return number number = find(df, 'What programming language do you use at work? Please select all that apply.', languages) plot_data = pd.DataFrame(number, index=[0]) plot_data.rename(columns={'Plusplus': 'C++', 'Zehscharf': 'C#', 'Eisenoxid': 'Rust'}, inplace=True) plot_data = plot_data.sort_values(by=[0], axis=1, ascending=False) plot_data.drop([col for col, val in plot_data.sum().iteritems() if val<1], axis=1, inplace=True) plt.figure(figsize=(15,10)) fig = sns.barplot(data=plot_data, orient='v') fig.yaxis.set_major_locator(MaxNLocator(integer=True)) plt.xticks(rotation= 90) plt.title('What programming language do you use at work?', bbox={'facecolor':'0.8', 'pad':12}) plt.ylabel('Number of participants', fontsize=15) plt.xlabel('Programming languages', fontsize=15) ax = plt.gca() totals = [] for i in ax.patches: totals.append(i.get_height()) total = sum(totals) for i in ax.patches: ax.text(i.get_x()+ 0.15, i.get_height()+25, \ str(round((i.get_height()/total)*100, 2))+'%', fontsize=13, color='dimgrey', rotation=45) plt.show() # Top 3 programming languages per institute institutes = df['In which institute do you work?'].drop_duplicates().values.tolist() languages = df['What programming language do you use at work? Please select all that apply.'].drop_duplicates().values.tolist() def find_combination_number(df, keyword_list): df_add = pd.DataFrame(columns=keyword_list, index=[0]) df = pd.concat([df, df_add], axis=1) df[keyword_list] = False for index, row in df.iterrows(): raw_string = row['What programming language do you use at work? Please select all that apply.'] if raw_string is not nm.NaN: lang_str = raw_string.split(';#') lang_str.sort() for lang in lang_str: df.at[index, lang] = True return df def get_users_per_institute(df_sorted, lang_list, institute_list): lang_count = pd.DataFrame(data=0, index=lang_list, columns=institute_list) for inst in institute_list: df_selection = df_sorted[df_sorted['In which institute do you work?'] == inst] for index, row in df_selection.iterrows(): for lang in lang_list: if row[lang] == True: lang_count.at[lang, inst] += 1 return lang_count df_new = find_combination_number(df, languages) lang_count = get_users_per_institute(df_new, languages, institutes) for inst in institutes: print(lang_count[inst].nlargest(3)) lang_count = lang_count[(lang_count.T != 0).any()] sns.set() fig = lang_count.set_index(lang_count.index.values).T.plot(kind='bar', stacked=True, rot=0, figsize=(35,30)) plt.xticks(rotation= 90) plt.title('Top 3 programming languages per institute', bbox={'facecolor':'0.8', 'pad':12}) plt.ylabel('', fontsize=20) plt.xlabel('Institute', fontsize=20) plt.show()Which version control tools do you use for software development?# version controll system - einzeln languages = ['Subversion', 'Git', 'Bazaar', 'CVS', 'Other', 'None', 'Scharp'] df['Which version control tools do you use for software development? Please select all that apply.'] = df['Which version control tools do you use for software development? Please select all that apply.'].str.replace(re.escape('Other DVCS (e.g. Bazaar, BitKeeper, GNU arch, Mercurial, Monotone)'), 'Bazaar') df['Which version control tools do you use for software development? Please select all that apply.'] = df['Which version control tools do you use for software development? Please select all that apply.'].str.replace(re.escape('Other central VCS (e.g. DARCS, SourceSafe)'), 'Scharp') def find(df, column, list): number = {} for key in list: counts = df[column].str.contains(key).value_counts() if counts.index.contains(True): number.update({key: counts[True]}) else: number.update({key: 0}) return number number = find(df, 'Which version control tools do you use for software development? Please select all that apply.', languages) plot_data = pd.DataFrame(number, index=[0]) plot_data.rename(columns={'Bazaar': 'Other DVCS (e.g. Bazaar, BitKeeper, GNU arch, Mercurial, Monotone)', 'Scharp': 'Other central VCS (e.g. DARCS, SourceSafe)'}, inplace=True) plot_data = plot_data.sort_values(by=[0], axis=1, ascending=False) plot_data.drop([col for col, val in plot_data.sum().iteritems() if val<1], axis=1, inplace=True) plt.figure(figsize=(15,10)) fig = sns.barplot(data=plot_data, orient='v') fig.yaxis.set_major_locator(MaxNLocator(integer=True)) plt.xticks(rotation= 90) plt.title('Which version control tools do you use for software development?', bbox={'facecolor':'0.8', 'pad':12}) plt.ylabel('Number of participants', fontsize=15) plt.xlabel('Version Control Tool', fontsize=15) ax = plt.gca() totals = [] for i in ax.patches: totals.append(i.get_height()) total = sum(totals) for i in ax.patches: ax.text(i.get_x()+ 0.15, i.get_height()+.9, \ str(round((i.get_height()/total)*100, 2))+'%', fontsize=17, color='dimgrey') plt.show() #Which version control tools do you use for software development? count = df['Which version control tools do you use for software development? Please select all that apply.'].value_counts() results = pd.DataFrame(count.values, count.index) display(results) # Version Control Tool - kombiniert plt.figure(figsize=(15,10)) count = df['Which version control tools do you use for software development? Please select all that apply.'].value_counts() sns.set(style="darkgrid") sns.barplot(count.index, count.values) plt.xticks(rotation= 90) plt.title('Which version control tools do you use for software development?', bbox={'facecolor':'0.8', 'pad':12}) plt.ylabel('Number of participants', fontsize=15) plt.xlabel('Version Control Tool', fontsize=15) ax = plt.gca() totals = [] for i in ax.patches: totals.append(i.get_height()) total = sum(totals) for i in ax.patches: ax.text(i.get_x()+ 0.15, i.get_height()+12, \ str(round((i.get_height()/total)*100, 2))+'%', fontsize=13, color='dimgrey', rotation=45) plt.show()Oil prices Follow up: we want to see if inflation adjusted prices will have any significantly different result from our "Oil Price" notebook. Conclusion: no significantly change in terms of quarterly change or seasonalitydf = pd.read_csv(r'.\data\wti-daily.csv') df.head(2) df['Date'] = pd.to_datetime(df['Date']) df.index=df.Date df=df.drop('Date', axis=1) plt.rcParams["figure.figsize"] = (20,4) df.plot() plt.title("WTI oil price", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("price $") price_max= pd.to_datetime("2008-07-03") ax = df.plot() plt.title("WTI oil price", fontdict={'fontsize': 20, 'fontweight': 'bold'}) ax.xaxis.set_major_formatter(mdates.DateFormatter('%y')) ax.xaxis.set_major_locator(mdates.YearLocator()) ax.axvline(price_max, color ='red', alpha=0.5, dashes=(5, 2, 1, 2), linewidth=3.0) plt.ylabel("price $") ax.tick_params(direction='in', length=3, width=1, colors='grey')Monthly meanmonthly_mean=df.resample('MS').mean() monthly_mean.columns=['price'] ax = monthly_mean.plot() plt.title("average monthly price", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("price $") ax.tick_params(direction='in', length=3, width=1, colors='grey') #import cpi data downloaded from Department of Labor cpi = pd.read_excel('./data/CPI_all_urban_SeriesReport-20190426102108_bcf8a6.xlsx', index_col=0, skiprows=11) cpi = cpi.iloc[:,0:11] cpi.reset_index(drop=False, inplace=True) cpi =pd.melt(cpi,'Year',var_name="Month", value_name="cpi") cpi['Day']=1 cpi['Month']=pd.to_datetime(cpi.Month, format='%b').dt.month cpi.index=pd.to_datetime(cpi.loc[:,['Year','Month','Day']]) cpi.sort_index(inplace=True) cpi.drop(['Year','Month','Day'], axis=1, inplace=True) ax = cpi.cpi.plot() plt.title("hertorical cpi", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("index") ax.tick_params(direction='in', length=3, width=1, colors='grey') cpi = cpi.loc['1983-07-01':'2019-03-01'] cpi['adjust']= cpi.loc['2019-03-01'].cpi/cpi.cpi cpi.head() wti_cpi = pd.merge(left= monthly_mean, right= cpi, how='inner', left_index=True, right_index=True) wti_cpi['price_adj'] = wti_cpi.price*wti_cpi.adjust wti_cpi = wti_cpi.loc[:,['price','price_adj']] wti_cpi.plot() ax = plt.title("monthly average oil price inflation adjusted and unadjusted", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("$ price") #quarterly wti_cpi = wti_cpi.resample('q').mean() ax = wti_cpi.plot() plt.title("monthly average oil price inflation adjusted and unadjusted", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("index") ax.tick_params(direction='in', length=3, width=1, colors='grey') ax = wti_cpi.loc['2004':,:].pct_change().plot() plt.title("Quarterly change of inflation adjusted oil price and unadjusted price", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("index") ax.tick_params(direction='in', length=3, width=1, colors='grey') ax = wti_cpi.loc['2004':,:].pct_change().rolling(4, center = True).mean().plot() plt.title("Moving average of quarterly change of inflation adjusted oil price and unadjusted price", fontdict={'fontsize': 20, 'fontweight': 'bold'}) plt.ylabel("index") ax.tick_params(direction='in', length=3, width=1, colors='grey') wti_cpi['qtr'] =wti_cpi.index.quarter wti_cpi['year'] = wti_cpi.index.year wti_cpi['QoQ'] =wti_cpi.price_adj.pct_change() wti_cpi= wti_cpi.loc['2004':,['price_adj','QoQ' ,'year','qtr']] palette = sns.cubehelix_palette(18, start=2, rot=0, dark=0, light = 0.95, reverse = False) sns.pairplot(wti_cpi,palette=palette, hue='year' )Seasonalityplt.rcParams["figure.figsize"] = (10,5) sns.boxplot( x='price_adj', y='qtr', data=wti_cpi, showmeans=True,saturation=1.00, orient='h',boxprops=dict(alpha=.5), palette ='Blues') plt.title("boxplot of inflation adjusted prices by quarter", fontdict={'fontsize': 20, 'fontweight': 'bold'})We'll use the same dataset of beer reviews.df = pd.read_csv('data/beer_subset.csv.gz', parse_dates=['time'], compression='gzip') review_cols = ['review_appearance', 'review_aroma', 'review_overall', 'review_palate', 'review_taste'] df.head()A small teaser of the kind of stuff you can dofig, ax = plt.subplots(figsize=(5, 10)) sns.countplot(hue='kind', y='stars', data=(df[review_cols] .stack() .rename_axis(['record', 'kind']) .rename('stars') .reset_index()), ax=ax, order=np.arange(0, 5.5, .5)) sns.despine()Groupby Groupby is a fundamental operation to pandas and data analysis.The components of a groupby operation are to1. Split a table into groups2. Apply a function to each group3. Combine the resultsIn pandas the first step looks like```pythondf.groupby( grouper )````grouper` can be many things- Series (or string indicating a column in `df`)- function (to be applied on the index)- dict : groups by *values*- `levels=[ names of levels in a MultiIndex ]`gr = df.groupby('beer_style') grHaven't really done anything yet. Just some book-keeping to figure out which **keys** go with which **rows**. Keys are the things we've grouped by (each `beer_style` in this case).The last two steps, apply and combine, are just:gr.agg('mean')This says apply the `mean` function to each column. Non-numeric columns (nusiance columns) are excluded. We can also select a subset of columns to perform the aggregation on.gr[review_cols].agg('mean')`.` attribute lookup works as well.gr.abv.agg('mean')Certain operations are attached directly to the `GroupBy` object, letting you bypass the `.agg` partgr.abv.mean()Exercise Find the `beer_style`s with the greatest variance in `abv`.- hint: `.std` calculates the standard deviation, and is available on `GroupBy` objects like `gr.abv`.- hint: use `.sort_values` to sort a Series by the values (it took us a while to come up with that name)# your code goes here %load -r 15:17 solutions/solutions_groupby.pyNow we'll run the gamut on a bunch of grouper / apply combinations.Keep sight of the target though: split, apply, combine.- Grouper: Controls the output index * single grouper -> `Index` * array-like grouper -> `MultiIndex`- Subject (Groupee): Controls the output data values * single column -> `Series` (or DataFrame if multiple aggregations) * multiple columns -> `DataFrame`- Aggregation: Controls the output columns * single aggfunc -> `Index` in the colums * multiple aggfuncs -> `MultiIndex` in the columns (Or 1-D Index groupee is 1-d) Multiple Aggregations on one columngr['review_aroma'].agg(['mean', np.std, 'count']).head()Single Aggregation on multiple columnsgr[review_cols].mean()Multiple aggregations on multiple columnsgr[review_cols].agg(['mean', 'count', 'std'])Hierarchical Indexes in the columns can be awkward to work with, so I'll usuallymove a level to the Index with `.stack`.multi = gr[review_cols].agg(['mean', 'count', 'std']).stack(level=0) multi.head(10)You can group by **levels** of a MultiIndex.multi.groupby(level='beer_style')['mean'].agg(['min', 'max'])Group by **multiple** columnsdf.groupby(['brewer_id', 'beer_style']).review_overall.mean() df.groupby(['brewer_id', 'beer_style'])[review_cols].mean()Exercise: Plot the relationship between review length (the `text` column) and average `review_overall`.Hint: Break the problem into pieces:- Find the **len**gth of each reivew (remember the `df.text.str` namespace?)- Group by that Series of review lengths- Find the `mean` `review_overall` by review length- I used `style='k.'` in the plot# Your solution %load -r 1:5 solutions/solutions_groupby.pyBonus exercise- Try grouping by the number of words.- Try grouping by the number of sentances.Remember that `str.count` accepts a regular expression.Don't worry too much about these, especially if you don't remember the syntaxfor regular expressions (I never can). Just jump to the next exercise.# Your code goes here %load -r 18:20 solutions/solutions_groupby.pyExercise: Which **brewer** (`brewer_id`) has the largest gap between the min and max `review_overall` for two of their beers.Hint: You'll need to do this in two steps.1. Find the average `review_overall` by `brewer_id` and `beer_name`.2. Find the difference between the max and min by brewer (rembember `.groupby(level=)`)# Your code goes here. You've got this! %load -r 6:13 solutions/solutions_groupby.pyCreate our own "kind" of beer, which aggregates `style`.style = df.beer_style.str.lower() style.head() kinds = ['ipa', 'apa', 'amber ale', 'rye', 'scotch', 'stout', 'barleywine', 'porter', 'brown ale', 'lager', 'pilsner', 'tripel', 'biter', 'farmhouse', 'malt liquour', 'rice'] expr = '|'.join(['(?P<{name}>{pat})'.format(pat=kind, name=kind.replace(' ', '_')) for kind in kinds]) expr beer_kind = (style.replace({'india pale ale': 'ipa', 'american pale ale': 'apa'}) .str.extract(expr, expand=False).fillna('').sum(1) .str.lower().replace('', 'other')) beer_kind.head() df.groupby(['brewer_id', beer_kind]).review_overall.mean() df.groupby(['brewer_id', beer_kind]).beer_id.nunique().unstack(1).fillna(0)We've seen a lot of permutations among number of groupers, number of columns to aggregate, and number of aggregators.In fact, the `.agg`, which returns one row per group, is just one kind of way to combine the results. The three ways are- `agg`: one row per results- `transform`: identicaly shaped output as input- `apply`: anything goes Transform Combined Series / DataFrame is the same shape as the input. For example, say you want to standardize the reviews by subtracting the mean.def de_mean(reviews): s = reviews - reviews.mean() return s de_mean(df.review_overall)We can do this at the *person* level with `groupby` and `transform`.df['review_overall_demeaned'] = df.groupby('profile_name').review_overall.transform(de_mean) df[['review_overall', 'review_overall_demeaned']].head()ApplySo there's `gr.agg`. and `gr.transform`, and finally `gr.apply`. We're going to skip apply for now. I have an example in a later notebook. ResampleResample is a special kind of groupby operation for when you have a `DatetimeIndex`.%matplotlib inline flights = pd.read_csv("data/flights.csv.gz", parse_dates=['fl_date', 'crs_dep_time', 'dep_time', 'crs_arr_time', 'arr_time']) flights.head() dep = flights.crs_dep_time.value_counts().sort_index() dep.head() dep.plot(); resampler = dep.resample('H') resampler.mean().plot() dep.resample('3H').mean().plot() dep.resample("D").sum().plot() daily = dep.resample("D").sum() daily daily.rolling(7).mean().plot() flights.head() flights['dep_delay'] = (flights['dep_time'] - flights['crs_dep_time']).dt.seconds / 60 delay = flights.set_index('crs_dep_time').dep_delay.sort_index() delay.resample('H').agg(['mean', 'sum', 'count']).rolling(8)['mean'].mean().plot()Minio create bucket- open-source object storage server that stores unstructured data- alternative to AWS S3 buckets- AWS-S3 compatable- python API ref: https://docs.minio.io/docs/python-client-api-reference.html- to quickly set-up a server see: https://docs.minio.io/docs/minio-docker-quickstart-guide - easy to set-up on linux or windows- in another notebook, we will try to use minio to store model artifacts for MLflow Import dependenciesfrom minio import Minio from minio.error import ResponseError import json import osInstantiate a client object- read envirnoment varaibles (should set them before open notebook)minioClient = Minio(os.environ['MLFLOW_S3_ENDPOINT_URL'].split('//')[1], access_key=os.environ['AWS_ACCESS_KEY_ID'], secret_key=os.environ['AWS_SECRET_ACCESS_KEY'], secure=False)List the buckets in the server- none were foundminioClient.list_buckets()Let's create an s3 compatible bucket- we'll name the bucket 'mlflow'try: minioClient.make_bucket('mlflow') except ResponseError as err: print(err) buckets = minioClient.list_buckets() for bucket in buckets: print(bucket.name, bucket.creation_date)Now we need to set the bucket policy- see: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.htmlpolicy = {"Version":"2012-10-17", "Statement":[ { "Sid":"", "Effect":"Allow", "Principal":{"AWS":"*"}, "Action":"s3:GetBucketLocation", "Resource":"arn:aws:s3:::mlflow" }, { "Sid":"", "Effect":"Allow", "Principal":{"AWS":"*"}, "Action":"s3:ListBucket", "Resource":"arn:aws:s3:::mlflow" }, { "Sid":"", "Effect":"Allow", "Principal":{"AWS":"*"}, "Action":"s3:GetObject", "Resource":"arn:aws:s3:::mlflow/*" }, { "Sid":"", "Effect":"Allow", "Principal":{"AWS":"*"}, "Action":"s3:PutObject", "Resource":"arn:aws:s3:::mlflow/*" } ]} minioClient.set_bucket_policy('mlflow', json.dumps(policy))List all the objects in a bucket# List all object paths in bucket that begin with my-prefixname. objects = minioClient.list_objects('mlflow', prefix='my', recursive=True) for obj in objects: print(obj.bucket_name, obj.object_name.encode('utf-8'), obj.last_modified, obj.etag, obj.size, obj.content_type)ImplementationThe convolutional autoencoder is implemented in Python3.8 using the TensorFlow 2.2 library. First we are going to import all the library and functions that is required in building convolutional autoencoder.import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.layers import Input, Dense, Conv2D, Activation, MaxPool2D from tensorflow.keras.layers import BatchNormalization, Flatten, Reshape, Conv2DTranspose, LeakyReLU from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam, SGDLet us seed the environment and load the Fashion MNIST dataset.## Seeding np.random.seed(42) tf.random.set_seed(42) ## Loading the dataset and then normalizing the images. dataset = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = dataset.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/stepNow we specify the hyperparameters.## Hyperparameters H = 28 W = 28 C = 1 ## Latent space latent_dim = 128Now we will build the model for the convolutional autoencoder. the inputs variable defined the input for the model which takes the input image while training.inputs = Input(shape=(H, W, C), name="inputs") x = inputsThe layers specified below forms the encoder for the convolutional autoencoder. The Conv2D layer learn the required features from the incoming image or feature maps.x = Conv2D(32, (3, 3), padding="same")(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.2)(x) x = MaxPool2D((2, 2))(x) x = Conv2D(64, (3, 3), padding="same")(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.2)(x) x = MaxPool2D((2, 2))(x) x = Flatten()(x) units = x.shape[1] x = Dense(latent_dim, name="latent")(x) x = Dense(units)(x) x = LeakyReLU(alpha=0.2)(x) x = Reshape((7, 7, 64))(x)The layer specified below forms the decoder for the convolutional autoencder. The decoder is a mirror image of the encoder, except instead of using Conv2D layer, the Conv2DTranspose (Transpose Convolution) is used. The transpose convolution is used learns to increase the dimensions of the incoming feature maps.At the last a sigmoid activation is used because we want the output value between 0 and 1.x = Conv2DTranspose(64, (3, 3), strides=2, padding="same")(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.2)(x) x = Conv2DTranspose(1, (3, 3), strides=2, padding="same")(x) x = BatchNormalization()(x) x = Activation("sigmoid", name="outputs")(x) outputs = xThe convolutional autoencoder is now complete and we are ready to build the model using all the layers specified above.autoencoder = Model(inputs, outputs) autoencoder.compile(optimizer=Adam(1e-3), loss='binary_crossentropy') autoencoder.summary()Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= inputs (InputLayer) [(None, 28, 28, 1)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 28, 28, 32) 320 _________________________________________________________________ batch_normalization (BatchNo (None, 28, 28, 32) 128 _________________________________________________________________ leaky_re_lu (LeakyReLU) (None, 28, 28, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 14, 14, 64) 18496 _____________________________________________________________[...]Now we start training the convolutional autoencoder using the Fashion MNIST dataset.autoencoder.fit( x_train, x_train, epochs=20, batch_size=256, shuffle=False, validation_data=(x_test, x_test) )Epoch 1/20 235/235 [==============================] - 124s 528ms/step - loss: 0.4641 - val_loss: 0.5113 Epoch 2/20 235/235 [==============================] - 124s 526ms/step - loss: 0.3890 - val_loss: 0.3935 Epoch 3/20 235/235 [==============================] - 123s 525ms/step - loss: 0.3511 - val_loss: 0.3519 Epoch 4/20 235/235 [==============================] - 123s 524ms/step - loss: 0.3266 - val_loss: 0.3251 Epoch 5/20 235/235 [==============================] - 123s 524ms/step - loss: 0.3098 - val_loss: 0.3085 Epoch 6/20 235/235 [==============================] - 123s 524ms/step - loss: 0.2978 - val_loss: 0.2996 Epoch 7/20 235/235 [==============================] - 123s 523ms/step - loss: 0.2889 - val_loss: 0.2884 Epoch 8/20 235/235 [==============================] - 123s 523ms/step - loss: 0.2821 - val_loss: 0.2828 Epoch 9/20 235/235 [==============================] - 123s 524ms/step - loss: 0.2768 - val_loss: 0.2796 Epoch 10/20 235/235 [==============================] - 124s 528m[...]After the training is complete, we can make predictions on the test dataset.test_pred_y = autoencoder.predict(x_test)After the output is generated on the test images, we will save some of the test images and its output for the comparison.n = 10 ## how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): ## display original ax = plt.subplot(2, n, i + 1) ax.set_title("Original Image") plt.imshow(x_test[i].reshape(H, W)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ## display reconstruction ax = plt.subplot(2, n, i + 1 + n) ax.set_title("Predicted Image") plt.imshow(test_pred_y[i].reshape(H, W)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) #plt.savefig("results/convolutonal_autoencoder.png")Learning Curves for modelsimport os import joblib import numpy as np import pandas as pd from sklearn.pipeline import Pipeline from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import learning_curve, GroupKFold from optimalcodon.projects.rnastability.dataprocessing import get_data, general_preprocesing_pipeline # 1 DATA PRE-PROCESSING (train_x, train_y), (test_x, test_y) = get_data( '../data/191004-TrainAndTestSets/') print("{} points for training and {} for testing with {} features".format( train_x.shape[0], test_x.shape[0], test_x.shape[1])) groups = train_x.index.values67775 points for training and 7576 for testing with 6 featuresGeneral cross-validation strategy# function def MYlearning_curve(mdl_id_name, train_x, train_y, estimator): """ Args: mdl_id_name (str): id to identify model train_x: training predictors, should be pre-processed for the particular model to be evaluated train_y: training labels estimator: model cv: grouped k-fold """ cv = GroupKFold(n_splits=5).split(train_x, train_y, groups=groups) print('generating learning curve for ' + mdl_id_name) train_sizes, train_scores, test_scores = learning_curve( estimator=estimator, cv=cv, X=train_x, y=train_y, train_sizes=np.linspace(0.1, 1, 5), scoring='r2', n_jobs=25, verbose=10 ) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) learning_curve_results = pd.DataFrame({ 'train_sizes': train_sizes, 'train_scores_mean': train_scores_mean, 'train_scores_std': train_scores_std, 'test_scores_mean': test_scores_mean, 'test_scores_std': test_scores_std }) learning_curve_results['model'] = mdl_id_name return learning_curve_results*** Linear Modelsfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.feature_selection import VarianceThreshold from sklearn.cross_decomposition import PLSRegression linear_models = dict( lasso="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/lasso.joblib", enet="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/enet.joblib", linear="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/linear_reg.joblib", pls ="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/PLS.joblib" ) # load the models linear_models = {x:joblib.load(linear_models[x]) for x in linear_models}For this linear model I use a specific preprocessing pipeline to add 2nd degree polynomial. Next, I define this preprocessing pipeline.# pre-processing preprocessing = Pipeline([ ('general', general_preprocesing_pipeline(train_x)), # see the code for general_preprocesing_pipeline ('polyfeaturs', PolynomialFeatures(degree=2)), ('zerovar', VarianceThreshold(threshold=0.0)), ('scaling', StandardScaler()) # I scale again not all polynomial features may be with scaled ]) preprocessing.fit(train_x) train_x_transformed_for_linear = preprocessing.transform(train_x) res_linear = [] for mdl_id, estimator in linear_models.items(): tmp_res = MYlearning_curve(mdl_id, train_x_transformed_for_linear, train_y, estimator) res_linear.append(tmp_res) res_linear = pd.concat(res_linear) res_linear['type'] = "linear" res_linear.to_csv('results-data/lc-linear.csv', index=False)*** Non-linear and Tree modelsnon_linear_models = dict( knn="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/knn.joblib", adaBoost="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/AdaBoost.joblib", decisionTree="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/decision tree.joblib", gbm="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/gbm.joblib", randomforest="../191004-TrainPredictiveModelsMrnaStability/results_data/trained_models/random forest.joblib" ) # load the models non_linear_models = {x:joblib.load(non_linear_models[x]) for x in non_linear_models} # reset the params for random forest non_linear_models['randomforest'] = non_linear_models['randomforest'].set_params(n_jobs=1)For the next model we use the general pre-processing pipeline:general_pipeline = general_preprocesing_pipeline(train_x) train_x_transformed = general_pipeline.transform(train_x) res_nonlinear = [] for mdl_id, estimator in non_linear_models.items(): tmp_res = MYlearning_curve(mdl_id, train_x_transformed, train_y, estimator) res_nonlinear.append(tmp_res) res_nonlinear = pd.concat(res_nonlinear) res_nonlinear['type'] = "non-linear" res_nonlinear.to_csv('results-data/lc-nonlinear.csv', index=False)Create a general MODFLOW model from the NHDPlus dataset__author__ = '' %matplotlib inline import os import sys import numpy as np import matplotlib.pyplot as plt import scipy.ndimage as nd import pandas as pd import random import gdal from model_specs import * from gen_mod_dict import * from ipywidgets import interact, Dropdown from IPython.display import display for key, value in model_dict.items(): md = key ms = model_dict[md] print('trying {}'.format(md)) try: pass except: passtrying AssabetProject specific variables are imported in the model_spec.py and gen_mod_dict.py files that must be included in the notebook directory. The first first includes pathnames to data sources that will be different for each user. The second file includes a dictionary of model-specific information such as cell size, default hydraulic parameter values, and scenario defintion (e.g. include bedrock, number of layers, etc.). There are examples in the repository. Run the following cell to get a pull-down menu of models in the model_dict. Then, without re-running that cell, run all the remaining cells. Re-running the following cell would re-set the model to the first one in the list, which you probably don't want. If you use the notebook option to run all cells below, it runs the cell you're in, so if you use that option, move to the next cell (below the pull-down menu of models) first.models = list(model_dict.keys()) models.sort() model_area = Dropdown( options=models, description='Model:', background_color='cyan', border_color='black', border_width=2) display(model_area) md = model_area.value ms = model_dict[md] print('The model being processed is {}'.format(md))The model being processed is AssabetRead model_grid.csv file that was created using first general model notebookmodel_ws = os.path.join(proj_dir, ms['ws']) model_file = os.path.join(model_ws, 'model_grid.csv') model_grid = pd.read_csv(model_file, na_values=[hnoflo]) if 'obs_grp' in model_grid.columns: model_grid.drop('obs_grp', axis=1, inplace=True)Get NROW, NCOL from model_grid.csvNROW = model_grid.row.max() + 1 NCOL = model_grid.col.max() + 1 num_cells = NROW * NCOLThis cell makes a new column that contains the percent coarse material (which comes from 'is_coarse' in model_grid.csv') in the local neighborhood of each cell. The user can change the size of the neighborhood, which is a square blcok of cells centered on each cell as it moves, by changing the variable hood_size.is_coarse = np.zeros(( NROW, NCOL ), dtype=np.float32) gess = model_grid.gess_poly.values.reshape( NROW, NCOL ) is_coarse[gess == 0] = 0 is_coarse[gess == 1] = 1 # use this number to get broader dist of pct_coarse # this might allow quantiles where otherwise none are possible # this variable is not stored for the next step--only used here for quantiles hood_size = 5 footprint = np.ones((hood_size, hood_size)) / hood_size**2 temp = nd.correlate(is_coarse, footprint,) model_grid['pct_coarse'] = temp.ravel() model_grid.pct_coarse.hist()* Select 'hydro' obs from model_grid* Put the integer that represents unique reaches into the index* Groupby the reach integer so that all the cells that belong to a reach are grouped together* Add labels to identify the quantiles of the median elevation of all the cells for each reach* Groupby by those quantiles so that all the cells that belong to each quantile are grouped together* Loop through the rows from the original dataframe and select the rows that belong to the elevation quantile group* Label each group as they're being looped through and append them for each observation* The commented-out statement could be used to randomly sample from each group# make additional obs using drain observation in MODFLOW (should be > 0) # pull out drain flows from budget package for first order # also summarize flow at gages sel = pd.DataFrame(model_grid[model_grid.obs_type == 'hydro']) sel.set_index(sel.reach_int, drop=False, inplace=True) num_of_samples = 10 num_of_obs = 5 o1 = sel.groupby(['reach_int']).median() o1['top_quant'], rbins = pd.qcut(o1.top, num_of_obs, retbins=True, labels=False) temp = o1.groupby(['top_quant']) stream_obs = pd.DataFrame() for grp, item in temp: obs = pd.DataFrame(sel.loc[item.index]) obs['obs_grp'] = 'strm_el{}'.format(grp) obs['obs_grp_int'] = grp + 1 stream_obs = pd.concat([stream_obs, obs]) # note: possible enhancement is to add within-cell percent coarse num_of_obs = 3 is_topo = model_grid.obs_type == 'topo' try: model_grid.loc[is_topo, 'top_quant'] = pd.qcut(model_grid.top, num_of_obs, labels=[1, 2, 3]) except: pass try: model_grid.loc[is_topo, 'coarse_grp'] = pd.cut(model_grid.pct_coarse, [0.0, 0.1, 0.9, 1.0], include_lowest=True, labels=[1, 2, 3]) except: pass try: mini_mohp = model_grid.dist2str / model_grid.dist2str.max() model_grid.loc[is_topo, 'hypo_quant'] = pd.cut(mini_mohp, [0.0, 0.3333, 0.6666, 1.0], include_lowest=True, labels=[1, 2, 3]) except: passEach cell saves one of the individual quantiles or quantile-based observation groups as tiff filesdata = model_grid.pct_coarse.values.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'pct_coarse.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(-9999) dst = None src = None data = model_grid.coarse_grp.values.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'coarse_grp.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None data = model_grid.hypo_quant.values.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'hypo_quant.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None data = model_grid.top_quant.values.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'top_quant.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None blank = np.zeros((num_cells)) blank[stream_obs.node_num.values] = stream_obs.obs_grp_int data = blank.reshape((NROW,NCOL)) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'stream_obs.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(0) dst = None src = None# Here we assume CUDA 10.0 is installed. You should change the number # according to your own CUDA version (e.g. mxnet-cu101 for CUDA 10.1). ! pip install mxnet-cu101 ! pip install autogluon import autogluon as ag from autogluon import TextClassification as task/usr/local/lib/python3.6/dist-packages/mxnet/optimizer/optimizer.py:167: UserWarning: WARNING: New optimizer gluonnlp.optimizer.lamb.LAMB is overriding existing optimizer mxnet.optimizer.optimizer.LAMB Optimizer.opt_registry[name].__name__))We are using a subset of the Stanford Sentiment Treebank (SST). The original dataset consists of sentences from movie reviews and human annotations of their sentiment. The task is to classify whether a given sentence has positive or negative sentiment (binary classification)dataset = task.Dataset(name='ToySST')AutoGluon fine-tunes neural networks that have already been pretrained on large scale text dataset such as Wikicorpus. Although the dataset involves entirely different text, lower-level features captured in the representations of the pretrained network (such as edge/texture detectors) are likely to remain useful for our own text dataset.predictor = task.fit(dataset, epochs=1) print('Top-1 val acc: %.3f' % predictor.results['best_reward']) test_acc = predictor.evaluate(dataset) print('Top-1 test acc: %.3f' % test_acc) sentence = 'I feel this is awesome!' ind = predictor.predict(sentence) print('The input sentence sentiment is classified as [%d].' % ind.asscalar())The input sentence sentiment is classified as [1].Welcome to GraphScope Playground Try GraphScope directly in your browser! Let's get started with printing "Hello World" in Python, just hover the mouse over [ ] and press the play button to the upper left. Or press Shift-Enter to execute.print("hello world")hello worldNext just import *GraphScope* and have fun! In addition, you can also create new python files. Please note, **ONLY** the files in the `Workspace` folder will be preserved after session ends.import graphscope graphscope.__version__Training# Setup encoder = Encoder(LATENT_DIM) decoder = Decoder(LATENT_DIM) model = VAE(encoder, decoder, N_SAMPLES, LATENT_DIM) model.to(device) optimizer = optim.Adam(vae.parameters(), lr=1e-4) kl_divergence_loss = KLDivergenceLoss() recon_loss = ReconstructionLoss() for epoch in range(1, EPOCHS + 1): # TRAIN model.train() train_loss = 0 for batch_idx, (x, _) in enumerate(train_loader): optimizer.zero_grad() x = Variable(x) x = x.to(device) xhat, mu, logvar = model.forward(x) rc_loss = recon_loss(xhat, x) kl_loss = kl_divergence_loss(mu, logvar) loss = rc_loss + kl_loss loss.backward() optimizer.step() train_loss += loss.item() if batch_idx % LOG_INTERVAL == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tRCLoss: {:.6f}, KLLoss: {:.6f}'.format( epoch, batch_idx * len(x), len(train_loader.dataset), 100. * batch_idx / len(train_loader), rc_loss.item(), kl_loss.item() )) print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset))) # TEST model.eval() test_loss = 0 for i, (data, _) in enumerate(test_loader): data = data.to(device) # we're only going to infer, so no autograd at all required: volatile=True data = Variable(data, volatile=True) xhat, mu, logvar = model.forward(data) rc_loss = recon_loss(xhat, x) kl_loss = kl_divergence_loss(mu, logvar) loss = rc_loss + kl_loss if i == 0: n = min(data.size(0), 8) # for the first 128 batch of the epoch, show the first 8 input digits # with right below them the reconstructed output digits comparison = torch.cat([data[:n], xhat.view(BATCH_SIZE, 1, 28, 28)[:n]]) save_image(comparison.data.cpu(), './mnist/reconstruction_' + str(epoch) + '.png', nrow=n) test_loss /= len(test_loader.dataset) print('====> Test set loss: {:.4f}'.format(loss)) xgen = vae.sample() xgen = xgen.detach().numpy()[0, 0, ...] plt.imshow(xgen) xgen.max(), xgen.min(), xgen.mean()Testsx, y = next(iter(train_loader)) print(x.shape) print(x.max()) plt.imshow(x[0][0]) mu, logvar = encoder.forward(x[0, ...]) mu, logvar.exp() xhat = decoder.forward(mu) plt.imshow(xhat.detach().numpy()[0][0]) xhat, mu, logvar = vae.forward(x) plt.imshow(xhat[0].detach().numpy()[0][0])Regressão LinearEsta primeira parte cobrirá:* Uma simples implementação de uma rede neural* Conceitos como função objetivo (target function) e função de custo (cost function)* Otimização pelo gradiente descendente (gradient descent)Tudo isso será ilustrado com a ajuda da rede neural mais simples possível: um modelo de regressão linear de 1 entrada e de 1 saída que tem como objetivo prever o valor-alvo $t$ a partir do valor de entrada $x$. A rede é definida como tendo uma entrada $ \mathbf{x}$ que é transformada pelo peso $w$ para gerar a saída $\mathbf{y}$ pela equação $\mathbf{y}=\mathbf{x}*w$, sendo que $\mathbf{y}$ precisa aproximar os targets $\mathbf{t}$ da melhor forma possível, conforme definido por uma função de custo. Esta rede pode ser representada graficamente como:![Image of the simple neural network](https://peterroelants.github.io/images/neural_net_implementation/SimpleANN01.png)Em redes neurais regulares, normalmente temos várias camadas, funções de ativação não lineares e um bieas para cada nó. Neste primeiro exemplo, temos apenas uma camada com um parâmetro de peso $w$, uma função de ativação $z=\phi(y)$ do tipo linear (i.e. $z=y$) na saída e nenhum bias. In [simple linear regression](http://en.wikipedia.org/wiki/Simple_linear_regression) o parâmetro $w$ e bias são tipicamente combinados no vetor de parâmetros, onde o bias define o intercept-y e $w$ é a inclinação da reta da regressão. Na regressão linear típica, esses parâmetros são tipicamente ajustados via método dos mínimos quadrados [least squares method](http://s-mat-pcs.oulu.fi/~mpa/matreng/ematr5_5.htm).Nesse exemplo, vamos aproximar os alvos $\mathbf{t}$ com as saídas do modelo $y$, minimizando a função custo quadratica (= distância euclidiana ao quadrado). A função de custo de erro quadrático é definida como $\frac{1}{2}\Vert \mathbf{y} - \mathbf{t} \Vert ^2$ (aqui o termo $\frac{1}{2}$ é adicionado por nosso conveniência para facilitar a derivação futura). A minimização do custo será feita com via método de ótimização do [gradient descent](http://en.wikipedia.org/wiki/Gradient_descent) que é tipicamente usado no treinamento de redes neurais.Começamos por importar as bibliotecas que precisamos e definindo uma semente do gerador numérico aleatórios para que o exemplo seja reproduzível:# Python imports import numpy as np # Matrix and vector computation package import matplotlib.pyplot as plt # Plotting library # Allow matplotlib to plot inside this notebook %matplotlib inline # Set the seed of the numpy random number generator so that the tutorial is reproducable np.random.seed(seed=1)Definindo a função objetivoNeste exemplo, os alvos $\mathbf{t}$ serão gerados a partir de uma função $\mathit{f}$ e ruído gaussiano ([gaussian noise](http://en.wikipedia.org/wiki/Normal_distribution)) aditivo amostrado de $\mathcal{N}(0, 0.2)$, onde $\mathcal{N}$ é a distribuição normal com média $0$ e variância $0.2$. $f$ é definida como $f(x) = x * 2$, com $\mathbf{x}$ as amostras de entrada, slope $2$ e intercept $0$. Logo $\mathbf{t}$ é definida como $f(\mathbf{x}) + \mathcal{N}(0,0.2)$.Vamos amostrar 20 entradas $\mathbf{x}$ com distribuição uniforme entre 0 e 1, e então gerar os valores de saída de destino $\mathbf{t}$ pelo processo descrito acima. Estas entradas resultantes $\mathbf{x}$ e alvos $\mathbf{t}$ são representados um contra o outro na figura abaixo junto com a linha original $f(x)$ sem o ruído gaussiano. Note que $\mathbf{x}$ é um vetor de amostras de entrada individuais $x_i$, e que $\mathbf{t}$ é um vetor correspondente de valores alvo $t_i$.# Define the vector of input samples as x, with 20 values sampled from a uniform distribution # between 0 and 1 x = np.random.uniform(0, 1, 20) # Generate the target values t from x with small gaussian noise so the estimation won't # be perfect. # Define a function f that represents the line that generates t without noise def f(x): return x * 2 # Create the targets t with some gaussian noise noise_variance = 0.2 # Variance of the gaussian noise # Gaussian noise error for each sample in x noise = np.random.randn(x.shape[0]) * noise_variance # Create targets t t = f(x) + noise # Plot the target t versus the input x plt.plot(x, t, 'o', label='t') # Plot the initial line plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)') plt.xlabel('$x$', fontsize=15) plt.ylabel('$t$', fontsize=15) plt.ylim([0,2]) plt.title('inputs (x) vs targets (t)') plt.grid() plt.legend(loc=2) plt.show()Definindo a função de custoNós iremos otimizar o modelo $\mathbf{y} = \mathbf{x} * w$ ajustando o parâmetro $w$ de forma que a função de custo quadratica [squared error cost](http://en.wikipedia.org/wiki/Euclidean_distanceSquared_Euclidean_distance) ao longo de todas as amostras seja minimizado. O custo do erro quadrático é definido como:$$\xi = \frac{1}{N}\sum_{i=1}^N \Vert y_i - t_i \Vert ^ 2$$com $N$ o número de amostras no conjunto de treinamento. A meta de otimização é: $\underset{w}{\text{argmin}} \frac{1}{N}\sum_{i = 1}^{N} \Vert y_i - t_i \Vert ^ 2$.Observe que tomamos a soma dos erros em todas as amostras, o que é conhecido como treinamento em lote (batch). Poderíamos também atualizar os parâmetros com base em uma amostra por vez, o que é conhecido como treinamento online.Esta função de custo para a variável $w$ é representada na figura abaixo. O valor $w=2$ está no mínimo da função custo (parte inferior da parábola), esse valor é o mesmo valor que a inclinação que escolhemos para $f(x)$. Observe que essa função é convexa [convex](http://en.wikipedia.org/wiki/Convex_function) e que há apenas um mínimo: o mínimo global. Embora toda função de custo de erro quadratica para regressão linear seja convexa, esse não é o caso de outros modelos e outras funções de custo.O modelo de rede neural é implementado na função `nn(x, w)`, e a função custo é implementada na função `cost(y, t)`.# Define the neural network function y = x * w def nn(x, w): y = x * w return y def activation(y): z = y return z # Define the cost function def cost(y, t): xi = (0.5*(y - t)**2).sum() return xi # Plot the cost vs the given weight w # Define a vector of weights for which we want to plot the cost ws = np.linspace(0, 4, num=100) # weight values cost_ws = np.vectorize(lambda w: cost(activation(nn(x, w)) , t))(ws) # cost for each weight in ws # Plot plt.plot(ws, cost_ws, 'r-') plt.xlabel('$w$', fontsize=15) plt.ylabel('$J(\mathbf{w})$', fontsize=15) plt.title('cost vs. weight') plt.grid() plt.show()Otimizando a função de custoPara uma função de custo simples, como neste exemplo, você pode ver facilmente qual deve ser o peso ideal. Mas a superfície de erro pode ser bastante [complexa](https://en.wikipedia.org/wiki/Rastrigin_function) ou ter uma alta dimensionalidade (cada parâmetro adicional $w_i$ adiciona uma nova dimensão). É por isso que usamos [técnicas de otimização](https://en.wikipedia.org/wiki/Mathematical_optimization) para encontrar o mínimo da função de erro. Gradiente descendenteUm algoritmo de otimização comumente usado para treinar redes neurais é o algoritmo do [gradient descent](http://en.wikipedia.org/wiki/Gradient_descent). O gradiente descendente funciona tomando a [derivada](http://en.wikipedia.org/wiki/Derivative) da função de custo $J(x)$ em relação aos parâmetros em uma posição específica nessa função de custo e atualiza os parâmetros na direção do gradiente negativo. O parâmetro $w$ é iterativamente atualizado, tomando medidas proporcionais ao negativo do gradiente:$$w^{k+1} = w^k + \Delta w^k$$Com $w^k$ sendo o valor de $w$ na iteração $k$ durante a descida do gradiente. $\Delta w$ é definido como:$$\Delta w = -\eta \frac{\partial \xi}{\partial w}$$Com $\eta$ sendo a taxa de aprendizado, que é o tamanho do salto, e $\partial \xi / \partial w$ o gradiente da função de custo $\xi$ em relação ao peso $w$. Para cada amostra $i$ este gradiente pode ser dividido de acordo com a [regra da cadeia](http://en.wikipedia.org/wiki/Chain_rule) em:$$\frac{\partial \xi_i}{\partial w} = \frac{\partial z_i}{\partial w} \frac{\partial y_i}{\partial z_i} \frac{\partial \xi_i}{\partial y_i}$$Onde $\xi_i$ é o erro quadratido, então o termo ${\partial \xi_i} / {\partial y_i}$ pode ser escrito como:$$\frac{\partial \xi_i}{\partial y_i} = \frac{\partial \frac{1}{2}(y_i - t_i)^2}{\partial y_i} = 2\frac{1}{2} (y_i - t_i) = (y_i - t_i)$$O segundo termo $z_i$ é a função de ativação, como nesse caso a mesma é linear $z_i = y_i$ então o termo ${\partial y_i} / {\partial z_i}$ pode ser escrito como:$$\frac{\partial y_i}{\partial z_i} = 1$$E como $z_i = x_i * w$ podemos escrever ${\partial z_i} / {\partial w}$ como:$$\frac{\partial z_i}{\partial w} = \frac{\partial (x_i * w)}{\partial w} = x_i$$Assim, a função de atualização completa $\Delta w$ para amostra $i$ se tornará:$$\Delta w = -\eta \frac{\partial \xi_i}{\partial w} = -\eta (y_i - t_i) x_i$$No processamento em lote (batch), apenas adicionamos todos os gradientes para cada amostra:$$\Delta w = -\eta \sum_{i=1}^{N}(y_i - t_i)x_i$$Para iniciar o algoritmo de gradiente descendente, normalmente se inicia escolhendo os parâmetros iniciais aleatoriamente e começa a atualizar esses parâmetros com $\Delta w$ até a convergência. A taxa de aprendizado precisa ser ajustada separadamente como um hiperparâmetro para cada rede neural.O gradiente ${\partial \xi} / {\partial w}$ é implementado pela função `gradient(w, x, t)`. $\Delta w$ é calculado pelo `delta_w(w_k, x, t, learning_rate)`. O loop abaixo executa 4 iterações de gradiente descendente enquanto imprime o valor do parâmetro e o custo atual.# define the gradient function. Remember that y = nn(x, w) = x * w def gradient(w, x, t): return x * (activation(nn(x, w)) - t) # define the update function delta w. Batch formula def delta_w(w_k, x, t, learning_rate): return learning_rate * gradient(w_k, x, t).sum() # Set the initial weight parameter w = 0.1 # Set the learning rate learning_rate = 0.1 # Start performing the gradient descent updates, and print the weights and cost: nb_of_iterations = 4 # number of gradient descent updates w_cost = [(w, cost(nn(x, w), t))] # List to store the weight,costs values for i in range(nb_of_iterations): dw = delta_w(w, x, t, learning_rate) # Get the delta w update w = w - dw # Update the current weight parameter w_cost.append((w, cost(activation(nn(x, w)), t))) # Add weight,cost to list # Print the final w, and cost for i in range(0, len(w_cost)): print('w({}): {:.4f} \t cost: {:.4f}'.format(i, w_cost[i][0], w_cost[i][1]))w(0): 0.1000 cost: 6.8099 w(1): 0.8139 cost: 2.6999 w(2): 1.2515 cost: 1.1554 w(3): 1.5197 cost: 0.5749 w(4): 1.6842 cost: 0.3568Observe no resultado anterior que o algoritmo de descida de gradiente converge rapidamente para o valor alvo em torno de 2.0. Vamos tentar plotar essas iterações do algoritmo de gradiente descendente para visualizá-lo mais.# Plot the first 2 gradient descent updates plt.plot(ws, cost_ws, 'r-') # Plot the error curve # Plot the updates for i in range(0, len(w_cost)-1): w1, c1 = w_cost[i] w2, c2 = w_cost[i+1] plt.plot(w1, c1, 'bo') plt.plot([w1, w2],[c1, c2], 'b-') plt.text(w1, c1+0.5, '$w^{}$'.format(i)) plt.plot(w2, c2, 'bo') # Plot the final point plt.text(w2, c2+0.5, '$w^{}$'.format(i+1)) # Text the final point # Show figure plt.xlabel('$w$', fontsize=15) plt.ylabel('$\\xi$', fontsize=15) plt.title('Gradient descent updates plotted on cost function') plt.grid() plt.show()Gradiente descendente iterativoA última figura mostra as atualizações de gradiente descendente dos parâmetros de peso para 2 iterações. Os pontos azuis representam os valores dos parâmetros de peso $w^k$ na iteração $k$. Observe como a atualização difere da posição do peso e do gradiente naquele ponto. A primeira atualização leva um passo muito maior do que a segunda atualização porque o gradiente em $w^0$ é muito maior que o gradiente em $w^1$.A linha de regressão ajustada por gradiente descendente com 10 iterações é mostrada na figura abaixo. A linha ajustada (vermelha) fica perto da linha original (azul), que é o que tentamos aproximar através das amostras com ruído. Note que ambas as linhas passam pelo ponto $(0,0)$, isto é porque nós não temos um termo de bias, que representa o intercept, o intercept em $x = 0$ é, portanto, $t = 0$.w = 0 # Start performing the gradient descent updates nb_of_iterations = 10 # number of gradient descent updates for i in range(nb_of_iterations): dw = delta_w(w, x, t, learning_rate) # get the delta w update w = w - dw # update the current weight parameter # Plot the fitted line agains the target line # Plot the target t versus the input x plt.plot(x, t, 'o', label='t') # Plot the initial line plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)') # plot the fitted line plt.plot([0, 1], [0*w, 1*w], 'r-', label='fitted line') plt.xlabel('input x') plt.ylabel('target t') plt.ylim([0,2]) plt.title('input vs. target') plt.grid() plt.legend(loc=2) plt.show()Learning Objectives- we learn what are Convolutional Neural Network (CNN)- Will talk about CNN components such as stride, max or average pooling- Discuss how we can obtain the parameters for CNN Activity: Obtain the number of parameters for the following CNN--- - By default, the strides = (1, 1)from __future__ import print_function import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) # it inherited from the previous step so we dont have to define '32' model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) print(model.summary())_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ conv2d_2 (Conv2D) (None, 24, 24, 64) 18496 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 12, 12, 64) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 9216) 0 _________________________________________________________________ dense_1 (Dense) (None, 128) 1179776 _________________________________________________________________ dense_2 (Dense) (None, 10) 1290 ================================================================= Total para[...]Deploy Watson ML Deploys model to IBM Watson Machine LearningCurrently only supports TensorFlow V2.4 SavedModel format. More to come...!pip3 install ibm-watson-machine-learning from ibm_watson_machine_learning import APIClient import json import logging import sys import time import re import shutil import os # IBM Cloud api_key https://cloud.ibm.com/iam/apikeys api_key = os.environ.get('api_key') # IBM Cloud deployment space ID https://dataplatform.cloud.ibm.com/ml-runtime/spaces space = os.environ.get('space') # IBM Cloud location (default: us-south) location = os.environ.get('location', 'us-south') # Model zip file name model_zip = os.environ.get('model_zip', 'model.zip') # IBM Cloud WML Deployment Name deployment_name = os.environ.get('deployment_name', 'Deployment Name') # IBM Cloud WML Model Name model_name = os.environ.get('model_name', 'Model Name') # temporal data storage for local execution data_dir = os.environ.get('data_dir', '../../data/') parameters = list( map(lambda s: re.sub('$', '"', s), map( lambda s: s.replace('=', '="'), filter( lambda s: s.find('=') > -1 and bool(re.match(r'[A-Za-z0-9_]*=[.\/A-Za-z0-9]*', s)), sys.argv ) ))) for parameter in parameters: logging.warning('Parameter: ' + parameter) exec(parameter) wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } client = APIClient(wml_credentials) model_folder = str(time.time()) shutil.unpack_archive(data_dir + model_zip, extract_dir=data_dir + model_folder) data_dir_model_folder = data_dir + model_folder %%bash -s "$data_dir_model_folder" cd $1 tar -czvf ../model.tar.gz * cd .. o = client.software_specifications.get_uid_by_name('tensorflow_2.4-py3.7') software_spec_uid = o client.set.default_space(space) model_meta_props = { client.repository.ModelMetaNames.NAME: deployment_name, client.repository.ModelMetaNames.TYPE: "tensorflow_2.4", client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: software_spec_uid } published_model = client.repository.store_model( model='model.tar.gz', meta_props=model_meta_props ) model_uid = client.repository.get_model_uid(published_model) model_details = client.repository.get_details(model_uid) print(json.dumps(model_details, indent=2)) client.repository.list_models(limit=15) deployment = client.deployments.create( model_uid, meta_props={ client.deployments.ConfigurationMetaNames.NAME: model_name, client.deployments.ConfigurationMetaNames.ONLINE: {} } ) deployment_uid = client.deployments.get_uid(deployment) deployment_uidColumns for batting- Rk -- Rank. This is a count of the rows from top to bottom. It is recalculated following the sorting of a column.- Pos -- Position- Name -- Player Name. Bold can mean player is active for this team or player has appeared in MLB * means LHP or LHB, means switch hitter, + can mean HOFer.- Age -- Player’s age at midnight of June 30th of that year- G -- Games Played This includes all times that the player appeared on the lineup card. Pitchers in non-DH games that appeared on the lineup card but didn't bat will still have a game in this column.- PA -- Plate Appearances When available, we use actual plate appearances from play-by-play game accounts. Otherwise estimated using AB + BB + HBP + SF + SH, which excludes catcher interferences. When this color click for a summary of each PA.- AB -- At Bats- R -- Runs Scored/Allowed- H -- Hits/Hits Allowed- 2B -- Doubles Hit/Allowed- 3B -- Triples Hit/Allowed- HR -- Home Runs Hit/Allowed- RBI -- Runs Batted In- SB -- Stolen Bases- CS -- Caught Stealing- BB -- Bases on Balls/Walks- SO -- Strikeouts- BA -- Hits/At Bats For recent years, leaders need 3.1 PA per team game played. Bold indicates highest BA using current stats. Gold means awarded title at end of year.- OBP -- (H + BB + HBP)/(At Bats + BB + HBP + SF) For recent years, leaders need 3.1 PA per team game played- SLG -- Total Bases/At Bats or (1B + 2*2B + 3*3B + 4*HR)/AB For recent years, leaders need 3.1 PA per team game played- OPS -- On-Base + Slugging Percentages For recent years, leaders need 3.1 PA per team game played- OPS+ -- OPS+ 100*[OBP/lg OBP + SLG/lg SLG - 1] Adjusted to the player’s ballpark(s)- TB -- Total Bases Singles + 2 x Doubles + 3 x Triples + 4 x Home Runs.- GDP -- Double Plays Grounded Into Only includes standard 6-4-3, 4-3, etc. double plays. First tracked in 1933. For gamelogs only in seasons we have play-by-play, we include triple plays as well. All official seasonal totals do not include GITP's.- HBP -- Times Hit by a Pitch.- SH -- Sacrifice Hits (Sacrifice Bunts)- SF -- Sacrifice Flies First tracked in 1954.- IBB -- Intentional Bases on Balls First tracked in 1955.#Selecting main statistics columns and Renaming them for easier interpretation: batting = batting[['Pos', 'Name', 'G', 'PA', 'AB', 'R', 'H', '2B', '3B', 'HR', 'SB', 'CS', 'BB', 'SO']] col_ren = ['position', 'name', 'games_played', 'plate_appearances', 'at bats', 'runs_made', 'hits_made', 'doubles-made','triples-made','home_runs-made','stolen_bases','caught_stealing','base_on_balls','strikes_out'] batting.columns = col_ren batting.head() #For pitching pitching.head()Pitching Columns- Rk -- Rank This is a count of the rows from top to bottom. It is recalculated following the sorting of a column.- Pos -- Position- Name -- Player Name Bold can mean player is active for this team or player has appeared in MLB * means LHP or LHB, means switch hitter, + can mean HOFer.- Age -- Player’s age at midnight of June 30th of that year- W -- Wins- L -- Losses- W-L% -- Win-Loss Percentage- W / (W + L) For players, leaders need one decision for every ten team games. For managers, minimum to qualify for leading is 320 games.- ERA -- 9 * ER / IP For recent years, leaders need 1 IP per team game played. Bold indicates lowest ERA using current stats Gold means awarded ERA title at end of year.- G -- Games Played or Pitched- GS -- Games Started- GF -- Games Finished- CG -- Complete Game- SHO -- Shutouts No runs allowed and a complete game.- SV -- Saves- IP -- Innings Pitched- H -- Hits/Hits Allowed- R -- Runs Scored/Allowed- ER -- Earned Runs Allowed- HR -- Home Runs Hit/Allowed- BB -- Bases on Balls/Walks- IBB -- Intentional Bases on Balls First tracked in 1955.- SO -- Strikeouts- HBP -- Times Hit by a Pitch.- BK -- Balks- WP -- Wild Pitches- BF -- Batters Faced- ERA+ -- ERA+100*[lgERA/ERA] Adjusted to the player’s ballpark(s).- FIP -- Fielding Independent Pitching this stat measures a pitcher's effectiveness at preventing HR, BB, HBP and causing SO(13*HR + 3*(BB+HBP) - 2*SO)/IP + Constantlg The constant is set so that each season MLB average FIP is the same as the MLB avg ERA- WHIP -- (BB + H)/IP For recent years, leaders need 1 IP per team game played- H9 -- 9 x H / IP For recent years, leaders need 1 IP per team game played- HR9 -- 9 x HR / IP For recent years, leaders need 1 IP per team game played- BB9 -- 9 x BB / IP For recent years, leaders need 1 IP per team game played- SO9 -- 9 x SO / IP For recent years, leaders need 1 IP per team game played- SO/W -- SO/W or SO/BB For recent years, pitching leaders need 1 IP per team game played.No batting leaders computed.#Getting the main statistics and renaming the columns pitching = pitching[['Name', 'W', 'L', 'G', 'GS', 'GF', 'CG', 'SHO', 'SV', 'IP', 'H', 'R', 'ER', 'HR', 'BB', 'SO', 'HBP','BK', 'WP', 'BF']] col_ren = ['name', 'wins', 'losses','games_pitched', 'games_started','games_finished', 'complete_game','shutouts','saves','innings_pitched','hits-allowed', 'runs-allowed','earned_runs-allowed', 'home_runs-allowed','base_on_balls-allowed','strikeouts','times_hit_by_pitch','balks','wild_pitches','batters_faced'] pitching.columns = col_ren pitching.head() #Creating the main data set using player name as primary key df = pd.merge(pd.merge(man40, batting, left_on='name', right_on='name', how='left'), pitching, left_on='name', right_on='name', how='left') df.shape df.head() df.columns #Checking of missing values plt.figure(figsize=(20,10)) df.isna().sum() #Missing values processing #Replace all missing values with zero df.fillna(0,axis=0,inplace=True) #Convert numerical values into integers def change_num_type(dtf): for c in list(dtf.columns): try: dtf[c] = dtf[c].astype(int) except: pass change_num_type(df) # Checking for correlations # Cheacking for correlation: # Numeric variables correlation cmap = sns.diverging_palette(13, 13, s=90, n=5, as_cmap=True) fig, ax = plt.subplots(figsize=(30,10)) style.use('ggplot') corr = df.corr() mask = np.triu(np.ones_like(corr, dtype=np.bool)) ax = sns.heatmap(corr,mask=mask, annot=True, cmap=cmap, vmin = -1, vmax = 1) plt.title("Numerical Features Correlation Matrix", loc="left", fontsize=14) plt.show() df.head() df['age'].hist(edgecolor='k') df['country'].value_counts().plot.bar() df['batting_side'].value_counts().plot.bar() df['batters_faced'] df['runs-allowed'] sns.jointplot(x='height', y='weight', data =df) df.head()![Ethereum Wallet loading screen](https://i.imgur.com/BDs8lGK.png) ETHPrize dev interviews analysis ContextVarious community members and developers have been interviewing the best developerswe know currently working on Ethereum over the last few months.We have interviewed nearly 100 developers and plan on doing many more.The aim is to use all this data being gathered to identify the most important developer toolsand infrastructure needs our open source community has.We can then fund bounties for teams to actually go out and focus entirely on building the toolsand infrastructure the network requires to scale and attract the best talent.We already have a bounty out to do the analysis and have someone working on the problem,using some awesome tools that are also open sourceand will hopefully contribute back to the data scientist community in general:https://github.com/status-im/ETHPrize/issues/14However, if you'd like to dig in for yourself and show us what you got,please go ahead! As this is ongoing there will be many more interviews coming through. ContentNearly 100 interviews divided into 14 questions(1 question is quite general and contains several sub questions dependingon the domain specific knowledge of the person being interviewed).Not all interviews get to all questions based on time and connectivity constraints,but most cover a fair number of them. AcknowledgementsWe wouldn't be here without the help of others. <-- That pretty much says it all.This is a completely open source, community-led initiativeApart from a few Status tokens, we're all doing this our of the love of our hearts ;) InspirationI would like to see interactive insights for each question. However, I am most interested in 1. What is causing the biggest frustrations? 2. What tools don't exist right now that can and need to be built? 3. Where are the best educational resources and how did these developers learn what they know? 4. What are the best tools (especially those not often mentioned, and will require some context to identify, people working on it) 5. What are the other great bounties we can create to help the whole community? ChallengesThis is a pure open-ended text dataset: - no numerical data - no time data - no categorical data - only conversationsIt's also very niche, with lots of Ethereum, dev and crypto jargon, meaning our generic tool will probably struggle.Oh well! What you will findHow to use state of the art Natural Language Processing (Gensim and spaCy) to extract topics from the dataset.Note that we use a pretrained model based on general web pages. I'm not even sure crypto and Ethereum was a thing at the time.A dedicated Ethereum-focused model would probably have much better results. Table of Contents1  ETHPrize dev interviews analysis1.1  Context1.2  Content1.3  Acknowledgements1.4  Inspiration2  Challenges3  What you will find4  Exploratory Data Analysis4.1  Basic imports4.2  Loading data4.3  Setting up for EDA4.4  Q1 - What are the tools/libraries/frameworks you use?4.4.1  Cleaning the data4.4.1.1  Raw data4.4.1.2  Cleaning needs4.4.1.3  Clean text4.4.1.4  Simple word cloud4.5  Q2 - What are your biggest frustrations?4.5.1  Cleaning the data4.5.1.1  Raw data4.5.2  Preprocessing4.5.3  Extracting the topics4.5.4  Topic visualization4.5.4.1  As text4.5.4.2  Interactive visualization4.5.5  Cleanup5  Topic modelling at scale5.1  Introducing the production functions5.2  Sanity checks on developers frustrations5.2.1  Modelization5.2.2  Visualization (text and interactive)5.3  Let's try it on tooling as well5.3.1  Modelization5.3.2  Visualization (text and interactive)6  Analyzing easier non-niche fields6.1  Who are you and what are you working on?6.2  What are you excited about? Exploratory Data Analysis Basic importsimport pandas as pd # Visualization from wordcloud import WordCloud import matplotlib.pyplot as plt from IPython.display import Image import pprint # pretty printing # Natural language processing import spacy from gensim.utils import simple_preprocess from gensim.models import Phrases from gensim.models.phrases import Phraser import gensim.corpora as corpora from gensim.models.ldamodel import LdaModel from gensim.models import CoherenceModel import pyLDAvis import pyLDAvis.gensim pyLDAvis.enable_notebook() import warnings # remove all the deprecation and future warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=FutureWarning) # Download the model with 'python -m spacy download en_core_web_lg --user' # Note: this is a 800MB model # 'en_core_web_sm', 29MB can be used as alternative # see https://spacy.io/models/en#section-en_core_web_lg nlp = spacy.load('en_core_web_lg')Loading datadf = pd.read_csv('./ETHPrize Developer Interviews.csv') # Anonymize answers (interviewees are OK to have their names public) # df.drop(columns=['Name'], inplace = True) # View a snippet of the data df.head(3) # View the counts of answers and non-empty answers df.describe() # What does a full answer look like: df['What are the tools/libraries/frameworks you use?'][0]Setting up for EDALet's start by giving each columns shortnames to ease working on them.We will also replace the NaN by blank answers. NaN will trip spaCy otherwise.df.columns = [ 'Name', # Name 'tooling', # What are the tools/libraries/frameworks you use? 'frustrations', # What are your biggest frustrations? 'testing', # How do you handle testing? 'smart_contract', # How do you handle smart contract verif & security? 'bounties', # Other bounties 'who_what', # Who are you and what are you working on? 'domain_questions', # Other domain specific questions? 'missing_tools', # What tools don’t exist at the moment? 'easier_expected', # Was anything easier than expected? 'excited_about', # What are you most excited about in the short term? 'hardest_part', # What was the hardest part to develop with Ethereum? 'best_resources', # What are the best educational resources? 'questions_to_ask', # Are there any other questions we should be asking? 'people_talk_to' # Who do you think we should talk to? ] df.fillna('', inplace = True) df.head(3)Q1 - What are the tools/libraries/frameworks you use?We will use Named Entity Recognition (NER) techniques to extract the tools/libraries/framework names used by devs.Note that this is quite approximative as the pretrained model used, 'en_core_web_lg', is trained on general english (extracted from Common Crawl) not on devs or crypto focused publications. Still a non-negligible part of the web focus on dev tools/tutorials. See details: https://spacy.io/models/enen_core_web_lg Cleaning the data Raw dataFirst, let's check on the first 10 answers:- what does the raw answers look like?- can we run naive Named Entity Recognition on it?for i in range(10): print(f""" #################################### doc {i}, number of characters: {len(df['tooling'][i])} """) if df['tooling'][i] != '': doc = nlp(df['tooling'][i]) # nlp = spacy.load('en_core_web_lg') print(doc) del doc#################################### doc 0, number of characters: 930 EthereumJS / truffle hardwallet provider / Typedoc (to generate documentation) / We don’t use Truffle anymore Not reliable – can have race conditions; Artifacts don’t allow you to have versions of the contract on different networks You don’t run into this stuff if you’re just building a “hello world” Overwriting parts of the contracts; We rolled our own. It’s open-source but not documented. We have other things that are keeping us busy. The problem is that Truffle has too much functionality –– we would like to see more of a Unix philosophy. Cannot support different versions of Solidity. Version management doesn’t exist. EthPM –– doesn’t have versioning either? It’s going through a big refactor, so we held off. We need the NPM package manager. We have built a lot of developer tools to interface with the smart contracts ourselves. We have also built own deployer and migration tool, as truffle’s did not suit our needs[...]Wow that is intimidating. We have completely empty answers and answers with 4430 characters. And that's just within the first 10 interviews.Let's run NER on doc 0, 5 and 8, as they have lots of contents and a mix of various potentially problematiccharacters like / - –– -- !! and !!!for i in [0, 5, 8]: print(f""" #################################### doc {i}, number of characters: {len(df['tooling'][i])} document | {"Label".rjust(20)} | {"Entity type".rjust(15)}""") doc = nlp(df['tooling'][i]) for entity in doc.ents: print(f'{i:>8} | {entity.text.rjust(20)} | {entity.label_.rjust(15)}')#################################### doc 0, number of characters: 930 document | Label | Entity type 0 | Typedoc | PERSON 0 | Truffle | ORG 0 | – | DATE 0 | Truffle | LOC 0 | Solidity | ORG 0 | | ORG 0 | NPM | PERSON #################################### doc 5, number of characters: 1398 document | Label | Entity type 5 | Integration | ORG 5 | Geth/Parity | ORG 5 | Custom | PRODUCT 5 | RPC | ORG 5 | ETH.js | ORG 5 | Travis | PERSON 5 | Rinkeby Custom | PERSON 5 | Truffle | ORG [...]Cleaning needs- '/' should be replaced by ', ' to avoid Geth/Parity- Carriage returns '\n' should be replaced by '. '- We should keep ORG, PERSON, LOC, PRODUCTNot catched:- doc0: EthereumJS, Unix, EthPM- doc5: Visual Studio Code, Solidity, Solium, web3.js, testrpc, react, redux, augur, ethereumjs-blockstream, keythereum- doc8: WASM, ethers.js, solidityConclusions:- Lots of tools that doesn't start with an uppercase are missed.- Tools that starts with uppercase but begin the sentence are sometimes not recognized- The model is not good enough to extract dev tools. (If only we could have a ReCaptcha for Ethereum so that people tag dev tools =) )- One way to improve the model would be to: - extract ethereum tagged repos from Github and add them to keywords - idem from Ethereum-related wikis - have proper casing for common stuff like solidity -> Solidity - Some tagger like Prodi.gy https://prodi.gy/ or Aylien https://aylien.com/ Clean textLet's do some basic cleaning to see how it improve things stilldef clean_punct(txt): x = txt.replace("\n", ". ") x = x.replace(" -- ", ": ") x = x.replace(" - ", ": ") return x def reCase(txt): ## recasing common words so that spaCy picks them up as entities ## an ethereum specific NLP model shouldn't need that ## Also this is inefficient as we could replace everything in one pass ## but we'll worry about that for 10k+ interviews. x = txt.replace("solidity", "Solidity") x = x.replace("truffle", "Truffle") x = x.replace(" eth", " Eth") # space to avoid recasing geth into gEth x = x.replace(" geth", " Geth") # avoid together -> toGether ¯_(ツ)_/¯ x = x.replace("jQuery", "JQuery") x = x.replace(" react", " React") x = x.replace(" redux", " Redux") x = x.replace("testRPC", "TestRPC") x = x.replace("keythereum", "Keythereum") # ... return x for i in [0, 5, 8]: print(f""" #################################### doc {i}, number of characters: {len(df['tooling'][i])} document | {"Label".rjust(20)} | {"Entity type".rjust(15)}""") doc = nlp(reCase(clean_punct(df['tooling'][i]))) for entity in doc.ents: print(f'{i:>8} | {entity.text.rjust(20)} | {entity.label_.rjust(15)}') del doc#################################### doc 0, number of characters: 930 document | Label | Entity type 0 | Typedoc | PERSON 0 | Truffle | ORG 0 | – | DATE 0 | contracts;. | PERSON 0 | Truffle | LOC 0 | Solidity | ORG 0 | NPM | ORG 0 | Truffle | LOC #################################### doc 5, number of characters: 1398 document | Label | Entity type 5 | everyday | DATE 5 | Geth/Parity | ORG 5 | RPC | ORG 5 | ETH.js | ORG 5 | Travis | PERSON 5 | Rinkeby | ORG 5 | Truffle | LOC [...]Ugh, this will be better to analyze manually at the moment,automatic analysis is too unreliable astooling is too niche and the model does not have data on that.Let's do a word cloud for at the very least on PERSON, ORG and PRODUCT. Simple word cloud# Remove noise ignore_words = { 'Ethereum', 'UI', 'ETH', 'Eth', 'IDE', 'ABI' } def tooling_extraction(txt): if txt == '': return '' doc = nlp(reCase(clean_punct(txt))) tools = [] for named_entity in doc.ents: if named_entity.label_ in {'PERSON', 'ORG', 'PRODUCT'} and named_entity.text not in ignore_words: txt = named_entity.text.replace(' ', '_') tools.append(txt) return ', '.join(tools) df['tooling_extracted'] = df['tooling'].apply(tooling_extraction) # Reminder - a lot is missed due to the very niche domain # while the NLP model was trained on general web publications. df['tooling_extracted'] # Concatenating all tools tools = '' for idx, row in df['tooling_extracted'].iteritems(): tools += ', ' + row # Setting up the figure # Don't ask me what title and suptitle (super-title) are supposed to do ... plt.figure(figsize=(12,6)) wordcloud = WordCloud(background_color='white', width=500, height=300, max_font_size=50, max_words=80).generate(tools) plt.imshow(wordcloud) plt.title(""" ETHPrize devs - tools, library frameworks """, fontsize=20) plt.suptitle("""This is a basic approach, manual processing is recommended. At scale tagging 10~20% of the data manually before automated extraction on the rest would probably be much better """, fontsize=14) plt.axis("off") plt.show() # Cleanup del toolsVisualization can still be seriously improved, more cleaning can be doneas we have \_Parity/Parity, some person names (because products are recognized as person).I'm not even talking about the colors ¯\\_(ツ)_/¯.An example of what can be done can be seen on [avsa twitter on Ethereum shared value survey](https://twitter.com/avsa/status/1003655572590342145):![Ethereum shared values](https://pbs.twimg.com/media/De2tXpUXcAA1s-J.jpg)I'm open to jupyter-compatible wordcloud packages.In any case Irecommend to process this field manually for now.It is too niche for a general purpose NLP model. Q2 - What are your biggest frustrations?This field is probably much easier to process as frustrations are probably general English.We will extract coherent "topics" from the answer usinga LDA model (Latent Dirichlet Allocation) which is a probabilistic method.Alternatively a NMF model (Non-Negative Matrix Factorization) can be usedwhich is a linear algebraic method.One might work better than the other but only doing both will tell. Cleaning the data Raw dataLike before, let's first start by checking the raw data.- what does the raw answers look like?- can we run naive topic modelling on it?for i in range(10): print(f""" #################################### doc {i}, number of characters: {len(df['frustrations'][i])} """) if df['frustrations'][i] != '': doc = nlp(df['frustrations'][i]) # nlp = spacy.load('en_core_web_lg') print(doc) del doc#################################### doc 0, number of characters: 1932 Getting a simple experimental environment up is hard Remix is there; but it’s not enough. Tracing and profiling is not existent. Remix does it, but can’t do it locally. “Code coverage” tool exists but inserts console.logs in Adding up gas costs per line of code. Need to do profiling, because gas costs depend on inputs. Solidity language itself: Crashed the Solidity compiler twice today. E.g. AbiEncoderV2 is pretty new and hard to understand/use. The code that it produces is inefficient. There is a stark stack limit in solidity All the variables are locally scoped. Un-intuitive as it is a curly braced language. The lifecycle hook value_of is within a function. Can only access top 16 slack slots. Including input and output. We need to implement a graph coloring register allocator – to find out what the lifetime of the variables is. “This stack slot was used in the first half –– the second half you can use” Or implem[...]Seems like we have proper English words here (gas costs, scalability, documentation, errors, ...)so we will build a simple topic model.Let's pick doc 0 and see if tokenization works fine, we will pick the last part as it has lots of special characters.doc = nlp(df['frustrations'][0][-299:]) for token in doc: print(token.text) del docan interface – public or external for abstract functions – they collide ERCs – there is not a formal way to describe the interface for contracts . Have a standard repo –– this will lead to a canonical ABI definition –– can have semantic rules around this . Could run some specific unit tests againstTokenization seems to be good (further checks shows that stuff like "e.g." are properly recognized as well) PreprocessingFor preprocessing we will need to: - Remove the stopwords: "a, to, or, ..." - domain specific stopwords: "Ethereum" at least - question specific stopwords: "frustrations" - detect and create bigrams and trigrams, i.e. words often associated together like smart_contract - Lemmatize: i.e. specific, specificiy, specifically should count the same. - keep only what carry topics/sense (nouns, adjectives, verbs, adverbs) and discard the fluff# Lets have a look at the included stopwords print(nlp.Defaults.stop_words) def topic_modelling_preprocess(txt): # Remove stopwords stop_words = nlp.Defaults.stop_words stop_words.add('ethereum') stop_words.add('frustrations') cleaned = [word for word in simple_preprocess(txt, deacc = True) if word not in stop_words] if cleaned == []: return [] # Lemmatization - TODO improve doc = nlp(" ".join(cleaned)) result = [token.lemma_ for token in doc if token.pos_ in {'NOUN', 'ADJ', 'VERB', 'ADV'}] return result # Let's try it on doc 5 which is short (394 characters) print('#### Before\n') print(df['frustrations'][5]) print('#### After\n') topic_modelling_preprocess(df['frustrations'][5])#### Before Lack of a debugger - by far the biggest issue. Need to print things from failing transactions and can’t do this Right now, we just attempt the transaction until it doesn’t fail. We are often unsure why things fail, just keep going until it doesn’t. What’s missing is break points and executing code from that. That will shorten feedback cycles. Development environment is really bad right now… #### AfterThoughts: - We will miss the intensity in individual reply "really bad" - The time information "right now" - Still common themes and pain point will emerge after more than a hundred interviews - In the future, a model can be trained on each frustration to characterize the intensity and objectivity and the words associated to it.# Let's start with a quick and dirty model frustrations = df['frustrations'].apply(topic_modelling_preprocess) frustrationsI don't know if we should be happy that lemmatization doesn't: - Transform solidity into solid - Transform customize into custom, monitoring into monitor, regularly into regular, ... Note that we don't have n-grams yet. Extracting the topics# Create Dictionary id2word = corpora.Dictionary(frustrations) # Term Document Frequency corpus = [id2word.doc2bow(text) for text in frustrations] # Modelization, nuber of topics need to be tuned lda_model = LdaModel(corpus=corpus, id2word=id2word, num_topics=10, # <--- tune this random_state=1337, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True)Topic visualizationLet's see the main keywords for each topics. Note that topics are createdfrom probabilities of words being associated together.They are unbiaised (no human preconception about an error topic, documentation topic, ...) As textpprint.pprint(lda_model.print_topics())[(0, '0.021*"transaction" + 0.017*"block" + 0.009*"receipt" + 0.008*"bloom" + ' '0.008*"need" + 0.008*"level" + 0.008*"metamask" + 0.008*"blockchain" + ' '0.007*"write" + 0.007*"account"'), (1, '0.021*"contract" + 0.014*"solidity" + 0.014*"code" + 0.012*"gas" + ' '0.011*"use" + 0.011*"hard" + 0.011*"need" + 0.010*"function" + ' '0.008*"truffle" + 0.008*"run"'), (2, '0.012*"people" + 0.012*"state" + 0.010*"doesn" + 0.009*"work" + ' '0.009*"contract" + 0.008*"chain" + 0.007*"fail" + 0.007*"run" + ' '0.007*"know" + 0.007*"don"'), (3, '0.015*"thing" + 0.013*"chain" + 0.012*"need" + 0.010*"gas" + 0.009*"limit" ' '+ 0.009*"problem" + 0.008*"stuff" + 0.008*"use" + 0.008*"good" + ' '0.008*"people"'), (4, '0.012*"need" + 0.012*"node" + 0.012*"error" + 0.012*"contract" + ' '0.011*"lot" + 0.010*"web" + 0.009*"run" + 0.008*"thing" + 0.008*"problem" + ' '0.007*"ne"'), (5, '0.014*"test" + 0.013*"need" + 0.012*"solidity" + 0.011*"run" + 0.011*"good" ' '+ 0.010*"[...]Interactive visualizationNote: topic ID by pyLDAvis use a different index from the previous pretty-printed topics.Also, the interactive visualization doesn't work on Github.vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) visWith this preprocessing, 10 topics are a bit too much, as we have at least 2 topics overlappingover the PC1/PC2 axis (the 2 principal components, 2 axis that explains the most the variance in the datasets)Outcomes: - Test, people, solidity, [smart?] contract, gas are recurring themes - Hopefully using n-grams will draw a better picture. Cleanupdel id2word del corpus del frustrations del visTopic modelling at scaleWe put everything with done manually in a function so we can do topic modelling in a one-liner for the answers of the other questions.The model also measure its own performance via perplexity and coherence.Note: we do lemmatization after n_grams to as many combinations like "non-technical" have a canonical use. Introducing the production functionsdef gen_stop_words(extra_stop_words): ## Expect a list of stop words in lowercase result = nlp.Defaults.stop_words for word in extra_stop_words: result.add(word) return result def model_topics(txts, num_topics, stop_words, use_ngrams = False, n_grams_min_count = 5, n_grams_score_threshold = 1): # Note: here we process the whole dataframe series # Transform the serie into a list of list of words, # Remove stopwords at the same time cleaned = [] for idx, txt in txts.iteritems(): # Remove stopwords cleaned += [[word for word in simple_preprocess(txt, deacc = True) if word not in stop_words]] if use_ngrams: # Build bigrams and trigrams bigrams = Phraser(Phrases(cleaned, min_count=n_grams_min_count, threshold=n_grams_score_threshold)) trigrams = Phraser(Phrases(bigrams[cleaned], threshold=n_grams_score_threshold)) # Now create the bag of words with the new trigrams cleaned = [trigrams[bigrams[txt]] for txt in cleaned] # Lemmatization - TODO improve lemmatized = [] for txt in cleaned: if txt == []: lemmatized += [] else: doc = nlp(" ".join(txt)) lemmatized += [[token.lemma_ for token in doc if token.pos_ in {'NOUN', 'ADJ', 'VERB', 'ADV'}]] print("Snippet of keywords for topic modelling for the first 3 answers") print(lemmatized[0:3]) # Create Dictionary id2word = corpora.Dictionary(lemmatized) # Term Document Frequency corpus = [id2word.doc2bow(text) for text in lemmatized] # Modelling lda_model = LdaModel(corpus=corpus, id2word=id2word, num_topics=num_topics, random_state=1337, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) ## Model performance print("\nModel performance\n") ## Perplexity print(f"""Perplexity: {lda_model.log_perplexity(corpus)}. Lower is better. See https://en.wikipedia.org/wiki/Perplexity. The best number of topics minimize perplexity. """) ## Coherence coherence = CoherenceModel( model=lda_model, texts=lemmatized, dictionary=id2word, coherence='c_v' ) ## Corpus coherence print(f'Whole model coherence: {coherence.get_coherence()}.') ## By topic coherence topic_coherences = coherence.get_coherence_per_topic() print(f""" By topic coherence. Higher is better. Measure how "well related" are the top words within the same topic. """) print(f'topic_id | {"top 3 keywords".rjust(45)} | topic coherence') for topic_id in range(num_topics): words_proba = lda_model.show_topic(topic_id, topn=3) words = [words for words,proba in words_proba] print(f'{topic_id:>8} | {str(words).rjust(45)} | {topic_coherences[topic_id]:>8.4f}') return lda_model, corpus, id2wordSanity checks on developers frustrations Modelizationstop_words = gen_stop_words(['ethereum', 'frustrations']) lda_model, corpus, id2word = model_topics( df['frustrations'], 10, stop_words, use_ngrams = True, n_grams_min_count = 1, n_grams_score_threshold = 1 # Need more data to have less permissive thresholds )Snippet of keywords for topic modelling for the first 3 answers [['get', 'simple', 'experimental', 'environment', 'hard', 'remix', 'trace', 'profile', 'existent', 'remix', 'locally', 'code_coverage', 'tool', 'exist', 'insert', 'console', 'log', 'add', 'gas_cost', 'line', 'code_ne', 'profiling', 'gas_cost', 'depend', 'input', 'solidity', 'language', 'crash', 'solidity_compiler', 'twice', 'today', 'abiencoderv', 'pretty', 'new', 'hard', 'understand', 'use', 'code', 'produce', 'inefficient', 'stark', 'stack', 'limit', 'solidity', 'variable', 'locally', 'scop', 'un', 'intuitive', 'curly', 'brace', 'language', 'lifecycle', 'hook', 'value_of', 'function', 'access', 'slack', 'slot', 'include', 'input', 'output', 'ne', 'implement', 'graph', 'coloring', 'register', 'allocator', 'find', 'lifetime', 'variable', 'stack', 'slot', 'half', 'second', 'half', 'use', 'implement', 'register', 'splitting', 'function', 'inlin', 'frustrating', 'safemath', 'turn', 'operator', 'function', 'huge', 'gas', 'sink[...]Wow this is much better with bigrams like gas_limit. My raw thoughts:```topic_id | top 3 keywords | topic coherence 0 | ['contract', 'thing', 'hard'] | 0.4731 -> documentation on smart contract 1 | ['solidity', 'need', 'build'] | 0.4084 -> improve solidity build system 2 | ['test', 'run', 'need'] | 0.3365 -> we need more tests 3 | ['gas_limit', 'state_channel', 'possible'] | 0.2845 -> unsure 4 | ['use', 'contract', 'tool'] | 0.2732 -> we need usage demos 5 | ['block', 'address', 'transaction'] | 0.3087 -> ... 6 | ['user', 'need', 'block'] | 0.3906 7 | ['contract', 'need', 'build'] | 0.4742 8 | ['thing', 'error', 'need'] | 0.4019 9 | ['chain', 'write', 'solidity'] | 0.6289``` More info can be extracted with the interactive visualization Visualization (text and interactive)Note that interactive visualization does not work on Githubpprint.pprint(lda_model.print_topics()) vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) vis # Interactive vizualization, doesn't work on Github[(0, '0.016*"contract" + 0.013*"thing" + 0.010*"hard" + 0.009*"test" + ' '0.009*"need" + 0.009*"work" + 0.007*"error" + 0.007*"library" + ' '0.006*"debug" + 0.006*"difficult"'), (1, '0.011*"solidity" + 0.009*"need" + 0.009*"build" + 0.009*"people" + ' '0.009*"work" + 0.008*"time" + 0.007*"good" + 0.007*"way" + 0.006*"new" + ' '0.006*"problem"'), (2, '0.014*"test" + 0.011*"run" + 0.009*"need" + 0.008*"write" + 0.007*"problem" ' '+ 0.007*"use" + 0.007*"lot" + 0.006*"solidity" + 0.006*"fail" + ' '0.006*"want"'), (3, '0.018*"gas_limit" + 0.012*"state_channel" + 0.009*"possible" + ' '0.009*"contract" + 0.009*"big" + 0.009*"datum" + 0.006*"hard" + 0.006*"get" ' '+ 0.006*"blockchain" + 0.006*"ecosystem"'), (4, '0.014*"use" + 0.014*"contract" + 0.014*"tool" + 0.011*"good" + 0.011*"want" ' '+ 0.008*"truffle" + 0.006*"need" + 0.006*"call" + 0.006*"code" + ' '0.006*"solution"'), (5, '0.010*"block" + 0.009*"address" + 0.009*"transaction" + 0.009*"metamask" + ' [...]Let's try it on tooling as wellSince Named Entity Extraction didn't work that well Modelizationstop_words = gen_stop_words(['ethereum', 'tool', 'tooling', 'tools']) lda_model, corpus, id2word = model_topics( df['tooling'], 7, stop_words, use_ngrams = True, n_grams_min_count = 1, n_grams_score_threshold = 1 # Need more data to have less permissive thresholds )Snippet of keywords for topic modelling for the first 3 answers [['ethereumj', 'truffle', 'hardwallet', 'provider', 'typedoc', 'generate', 'documentation', 'don_use', 'truffle', 'anymore', 'reliable', 'race', 'condition', 'artifact', 'don', 'allow', 'version', 'contract', 'different_network', 'don_run', 'stuff', 'building', 'world', 'overwrite', 'part', 'contract', 'roll', 'open_source', 'document', 'thing', 'keep', 'busy', 'problem', 'truffle', 'functionality', 'unix', 'philosophy', 'support', 'different', 'version', 'solidity', 'version', 'management', 'doesn', 'exist', 'ethpm', 'doesn', 'versioning', 'go', 'big', 'refactor', 'hold', 'need', 'package', 'manager', 'build', 'lot', 'developer', 'interface', 'smart_contract', 'build', 'deployer', 'migration', 'truffle', 'suit', 'need'], ['python', 'raiden', 'client'], ['end', 'visual_studio', 'code', 'docker', 'image', 'run', 'pythereum', 'unit', 'test', 'solium_linter', 'everyday', 'integration_test', 'run_geth', 'parity_node', 'custom'[...]Visualization (text and interactive)Note that interactive visualization does not work on Githubpprint.pprint(lda_model.print_topics()) vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) vis # Interactive vizualization, doesn't work on GithubMmmh, that doesn't seem to work as well, many tools appears less frequently than generic stuff like "**web" or "standard". Analyzing easier non-niche fieldsThere are other fields related to tools and very specific Ethereum development jargon.We first start with fields that can be processed with a "normal English". Who are you and what are you working on?stop_words = gen_stop_words(['ethereum']) lda_model, corpus, id2word = model_topics( df['who_what'], 7, stop_words, use_ngrams = True, n_grams_min_count = 1, n_grams_score_threshold = 1 # Need more data to have less permissive thresholds ) vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) vis # Interactive vizualization, doesn't work on GithubNot too sure what to make of this, apart from smart_contract being a recurrent theme.Some topics are related to: - app - client - hiring /recruiting - wasm - tokens / auction - bounties Ideally a future direction would be "extractive text summarization" Or simpler --> pick top sentences related to each topic. Some ideas here: https://github.com/mathsyouth/awesome-text-summarization What are you excited about?stop_words = gen_stop_words(['ethereum', 'exciting', 'excited']) lda_model, corpus, id2word = model_topics( df['excited_about'], 7, stop_words, use_ngrams = True, n_grams_min_count = 1, n_grams_score_threshold = 1 # Need more data to have less permissive thresholds ) vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) vis # Interactive vizualization, doesn't work on GithubDirect Optimization + A* sampling for TSPimport os import numpy as np import torch import matplotlib import matplotlib.pyplot as plt import string %matplotlib inline from utils import load_model import dirpg from a_star_sampling import Node, Trajectory class opts: def __init__(self, max_interactions=200, alpha=1.0, epsilon=2.0, heuristic='mst', independent_gumbel=False, first_improvement=False, dynamic_weighting = False, dfs_like=False, not_prune=False): self.max_interactions = max_interactions self.first_improvement = first_improvement self.dynamic_weighting = dynamic_weighting self.independent_gumbel = independent_gumbel self.heuristic = heuristic self.dfs_like = dfs_like self.not_prune = not_prune self.alpha=alpha self.epsilon = epsilon dirpg_opts = opts() num_cities = 8 model, _ = load_model('outputs/tsp_{}/jupyter_example/DirPG_20200506T134440/'.format(num_cities), epoch = 0) # 'pretrained/tsp_100/') #model, _ = load_model('outputs/tsp_{}/visuals/DirPG_20200421T162602/'.format(num_cities), epoch = 1) #model, _ = load_model('outputs/tsp_{}/2epochs_ours/DirPG_20200506T004445/'.format(num_cities), epoch = 0) model.eval() # Put in evaluation mode to not track gradients dirpg = dirpg.DirPG(model, dirpg_opts) import heapq from utils import utils_gumbel import networkx as nx import time class PriorityQueue: def __init__(self, init_state, distance_mat, epsilon, search_params, inference=False ): self.queue = [] self.G = nx.Graph() Node.epsilon = epsilon init_state = init_state._replace(first_a=init_state.first_a.squeeze(0), prev_a=init_state.prev_a.squeeze(0), visited_=init_state.visited_.squeeze(0), lengths=init_state.lengths.squeeze(0), cur_coord=init_state.cur_coord.squeeze(0), ids=init_state.ids.squeeze(0), i=init_state.i.squeeze(0)) special_action = init_state.prev_a.item() not_visited = [i for i in range(init_state.loc.size(1)) if i != special_action] self.first_coord = init_state.loc[init_state.ids, special_action] self.graph_size = distance_mat.shape[1] # global nodes parameters # Node.alpha = search_params['alpha'] Node.epsilon = epsilon Node.dynamic_weighting = search_params['dynamic_weighting'] Node.heuristic = search_params['heuristic'] Node.graph_size = self.graph_size Node.dist = distance_mat self.root_node = Node(id=init_state.ids, first_a=init_state.first_a.item(), next_actions=not_visited, # torch.tensor(not_visited), # number of cities not_visited=not_visited, prefix=[special_action], lengths=0.0, cur_coord=self.first_coord, max_gumbel=utils_gumbel.sample_gumbel(0), t_opt=True) self.G.add_node(self.root_node) heapq.heappush(self.queue, self.root_node) if search_params['independent_gumbel']: direct_node = copy.copy(self.root_node) direct_node.t_opt = False heapq.heappush(self.queue, direct_node) self.current_node = self.root_node self.id = init_state.ids.item() self.trajectories_list = [] self.t_opt = None self.t_direct = None self.prune_count = 0 self.orig_dist = distance_mat self.start_search_direct = False self.start_time = float('Inf') # self.max_search_time = max_search_time self.num_interactions = 0 self.first_improvement = search_params['first_improvement'] self.max_interactions = search_params['max_interactions'] self.dfs_like = search_params['dfs_like'] self.p = search_params['prune'] self.dynamic_weighting = search_params['dynamic_weighting'] self.inference = inference self.prune = False self.lower_bound = -float('Inf') ####### plotting ####### #priority-queue: self.labels = {self.root_node : 'root'} self.nodes_opt = [] self.other_nodes = [] self.ids = 1 self.direct_node = None #prefix: def pop(self): if not self.queue: print('the queue is empty') return 'break' parent = heapq.heappop(self.queue) self.current_node = parent if self.num_interactions >= self.max_interactions: print('interactions budget is over') return 'break' if self.prune and self.lower_bound > parent.upper_bound: self.prune_count += 1 return self.pop() # Start the search time count if not parent.t_opt and not self.start_search_direct: self.start_time = time.time() self.start_search_direct = True if parent.done: return self.set_trajectory(parent) return parent def set_trajectory(self, node): t = Trajectory(actions=node.prefix, gumbel=node.max_gumbel, length=node.lengths - (self.first_coord - node.cur_coord).norm(p=2, dim=-1), objective=node.objective) self.trajectories_list.append(t) if node.t_opt: self.t_opt = t self.t_direct = t self.direct_node = node self.lower_bound = t.objective if self.inference: return 'break' else: if t.objective > self.t_direct.objective: # if len(self.trajectories_list) > 2: # print('here: ', len(self.trajectories_list)) self.t_direct = t self.lower_bound = t.objective self.direct_node = node if self.first_improvement: #print('***** priority(direct) > priority(opt) *****') print('first improvement') return 'break' if self.queue: return self.pop() else: # print('break') print('5') return 'break' def expand(self, state, logprobs): self.num_interactions += 1 special_action = state.prev_a.item() s = time.time() not_visited = [i for i in self.current_node.not_visited if i != special_action] cur_coord = state.loc[self.current_node.id, special_action] length = -(cur_coord - self.current_node.cur_coord).norm(p=2, dim=-1) #updated_prefix = self.current_node.prefix + [special_action] #dist = np.delete(np.delete(self.orig_dist, self.current_node.prefix[1:], 0), self.current_node.prefix[1:], 1) #print('******** orig ******') #print(self.orig_dist) #print('******** mod ******') #print(dist) special_child = Node( id=self.current_node.id, first_a=self.current_node.first_a, not_visited=not_visited, prefix=self.current_node.prefix + [special_action], lengths=self.current_node.lengths + length, cur_coord=cur_coord, done=len(not_visited) == 0, logprob_so_far=self.current_node.logprob_so_far + logprobs[special_action], max_gumbel=self.current_node.max_gumbel, next_actions=not_visited, depth=self.current_node.depth + 1, t_opt=self.current_node.t_opt, dfs_like=self.dfs_like) if special_child.t_opt: self.nodes_opt.append(special_child) else: self.other_nodes.append(special_child) self.G.add_edge(self.current_node, special_child) self.labels[special_child] = str(self.ids) self.ids+=1 if self.prune and special_child.upper_bound < self.lower_bound: self.prune_count += 1 else: heapq.heappush(self.queue, special_child) # Sample the max gumbel for the non-chosen actions and create an "other # children" node if there are any alternatives left. m = time.time() other_actions = [i for i in self.current_node.next_actions if i != special_action] assert len(other_actions) == len(self.current_node.next_actions) - 1 other_children = None if other_actions and not self.inference: other_max_location = utils_gumbel.logsumexp(logprobs[other_actions]) other_max_gumbel = utils_gumbel.sample_truncated_gumbel(self.current_node.logprob_so_far + other_max_location, self.current_node.max_gumbel).item() other_children = Node( id=self.current_node.id, first_a=self.current_node.first_a, not_visited=self.current_node.not_visited, prefix=self.current_node.prefix, lengths=self.current_node.lengths, cur_coord=self.current_node.cur_coord, done=self.current_node.done, logprob_so_far=self.current_node.logprob_so_far, max_gumbel=other_max_gumbel, next_actions=other_actions, depth=self.current_node.depth + 1, upper_bound=self.current_node.upper_bound, t_opt=False, dfs_like=False) self.other_nodes.append(other_children) self.G.add_edge(self.current_node, other_children) self.labels[other_children] = str(self.ids) self.ids+=1 if self.prune and other_children.upper_bound < self.lower_bound: self.prune_count += 1 else: heapq.heappush(self.queue, other_children) f = time.time() sp = m - s oth = f - m return special_child, other_children def encode(x, dirpg): embeddings = dirpg.encoder(x, only_encoder=True) state = dirpg.encoder.problem.make_state(x) fixed = dirpg.encoder.precompute(embeddings) return state, fixed x = torch.rand(1, 20, 2) def init_queue(x, dirpg, epsilon=1.0, alpha=1.0, start_from = 0): dirpg.search_params['alpha'] = alpha state, fixed = encode(x, dirpg) _, state = dirpg.forward_and_update(state, fixed, first_action=start_from) return PriorityQueue(init_state=state[torch.tensor(0)], distance_mat=state.dist[0], epsilon = epsilon, inference=False, search_params=dirpg.search_params), state, fixed def sample(queue, fixed, state): while queue: parent = queue.pop() if parent == 'break': return queue batch_state = state.stack_state([parent]) log_p, state = dirpg.forward_and_update(batch_state, fixed) queue.expand(state[torch.tensor(0)], log_p[0]) queue,state, fixed = init_queue(x, dirpg) queue = sample(queue, fixed, state) print(queue.num_interactions)Interactive tree plotplt.rcParams['figure.figsize'] = [16, 6] np.random.seed(3) torch.manual_seed(3) x = torch.rand(1, num_cities, 2) #x = torch.load('good_example_8graph') abc = string.ascii_lowercase[:x.size(1)] direct, first_direct = None, True queue,state, fixed = init_queue(x, dirpg, epsilon=2.0, alpha=1.0)Press Ctrl+Entr to expand the queue Left: priority queue, Right: prefix of the current node (yellow node in the left fig)cities = nx.DiGraph() cities.add_nodes_from(range(x.size(1))) parent = queue.pop() if parent == 'break': print('END') else: batch_state = state.stack_state([parent]) log_p, state = dirpg.forward_and_update(batch_state, fixed) sp, oth = queue.expand(state[torch.tensor(0)], log_p[0]) if queue.t_opt is not None: print('t_opt: ') print([abc[i] for i in queue.t_opt.actions]) if queue.t_direct is not None: print('t_direct: ') print([abc[i] for i in queue.t_direct.actions]) print('special child prefix: ') print([abc[i] for i in sp.prefix]) print('depth: ', sp.depth) plt.subplot(121) pos = nx.kamada_kawai_layout(queue.G) # nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold') colors = [] nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.root_node], node_size = 1000, node_color='g', alpha=0.8) opt_nodes = [i for i in queue.nodes_opt if i!=sp] nx.draw_networkx_nodes(queue.G, pos, nodelist=opt_nodes, node_size = 500, node_color='r', alpha=0.5) in_queue = [i for i in queue.other_nodes if i in queue.queue] nx.draw_networkx_nodes(queue.G, pos, nodelist=in_queue, node_size = 500, node_color='y', alpha=0.8) out_of_queue = [i for i in queue.other_nodes if i not in queue.queue] nx.draw_networkx_nodes(queue.G, pos, nodelist=out_of_queue, node_size = 500, node_color=[(0.2,0.2,0.2) for _ in range(len(out_of_queue))], alpha=0.6) nx.draw_networkx_nodes(queue.G, pos, nodelist=[sp], node_size = 500, node_color=[(0.0,1.0,0.0)], alpha=0.8) """ if first_direct and queue.t_direct != queue.t_opt: first_direct = False direct = queue.t_direct """ if queue.direct_node is not None: """ if direct != queue.t_direct: direct = queue.t_direct """ nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.direct_node], node_shape='^', node_size = 800, node_color=[(0.0,1.0,0.0)], alpha=0.8) nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5) nx.draw_networkx_edges(queue.G, pos, edgelist=[(parent, sp)], width=8, alpha=0.5, edge_color='r') nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16) ##################### plt.subplot(122) pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])} edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if ispecial child prefix: ['a', 'd'] depth: 1Node size: max Gumbel Node color: epsilon(length + 2MST)def make_circule(n): G = nx.Graph() G.add_nodes_from(range(n)) pos = nx.circular_layout(G, scale=0.5, center=(0.5,0.5)) return torch.tensor([np.stack(list(pos.values()))], dtype = torch.float32) def min_max_norm(x, a, b): min_x = np.min(x) max_x = np.max(x) return a + (((x - min_x)*(b-a))/(max_x - min_x)) def norm(x): return (x-np.mean(x))/np.std(x) np.random.seed(4) torch.manual_seed(4) x = torch.rand(1, num_cities, 2) # x = make_circule(num_cities) #x = torch.load('good_example_8graph') queue,state, fixed = init_queue(x, dirpg, epsilon=10.0, alpha=2.0) update = False direct, first_direct = None, True parent = queue.pop() Node.budget = dirpg_opts.max_interactions update = not update cities = nx.DiGraph() cities.add_nodes_from(range(x.size(1))) if parent == 'break': print('END') else: if update: batch_state = state.stack_state([parent]) log_p, state = dirpg.forward_and_update(batch_state, fixed) sp, oth = queue.expand(state[torch.tensor(0)], log_p[0]) if queue.t_opt is not None: print('t_opt: ') print([abc[i] for i in queue.t_opt.actions]) if queue.t_direct is not None: print('t_direct: ') print([abc[i] for i in queue.t_direct.actions]) print('special child prefix: ') print([abc[i] for i in sp.prefix]) print('prune count: ', queue.prune_count) print('lower bound: ',queue.lower_bound ) print('scecial child: ') sp.print() print('other children: ') oth.print() if oth is not None else None #print('scecial upper bound: ',sp.upper_bound) #print('others upper bound: ',oth.upper_bound) if oth is not None else None ax = plt.subplot(121) pos = nx.kamada_kawai_layout(queue.G) # nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold') nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.root_node], node_size = 1000, node_color='g', alpha=0.8) nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5) nx.draw_networkx_edges(queue.G, pos, edgelist=[(parent, sp)], width=8, alpha=0.5, edge_color=(0.0,1.0,0.0)) """ print('max_gumbel + eps*(- length - 2MST) = ') for i,j in zip([n.max_gumbel for n in queue.queue], [Node.epsilon*n.get_upper_bound(2.0).item() for n in queue.queue]): print(i, ' + ',j, ' = ', i+j ) """ org_s = [n.max_gumbel for n in queue.queue] s2 = [300+4000.0*np.exp(n.max_gumbel) for n in queue.queue] s_mm = min_max_norm(org_s, a=np.min(org_s) ,b=np.max([5000,np.max(org_s)])) s_n = 300+100*norm(org_s) colors = [n.eps_reward.item() for n in queue.queue] nx.draw(queue.G, pos, nodelist=queue.queue, node_size=s2, node_color=colors, cmap=plt.cm.YlOrRd, alpha=0.8) out_of_queue = [i for i in queue.G if i not in queue.queue if i != queue.root_node] nx.draw_networkx_nodes(queue.G, pos, nodelist=out_of_queue, node_size = 500, node_color=[(0.2,0.2,0.2) for _ in range(len(out_of_queue))], alpha=0.6) if not update: parent = queue.pop() ax.set_facecolor(color='none') nx.draw_networkx_nodes(queue.G, pos,ax=ax, nodelist=[parent], node_size = 4000, node_color='none', linewidths=3.0, node_shape = matplotlib.markers.MarkerStyle(marker='h', fillstyle='none'), edgecolors='b') if queue.direct_node is not None: nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.direct_node], node_shape='*', node_size = 1500, node_color=[(0.0,1.0,0.0)], alpha=0.8) nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16) ##################### plt.subplot(122) pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])} edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if ispecial child prefix: ['a', 'e'] prune count: 0 lower bound: -inf scecial child: ----------- Node ----------- id: tensor(0) first_a: 0 prefix: [0, 4] not visited: [1, 2, 3, 5, 6, 7] next_actions: [1, 2, 3, 5, 6, 7] t: 2 distance matrix: tensor([[0.0000, 0.5840, 0.7588, 0.5595, 0.4195, 0.4201, 0.3765, 0.2391], [0.5840, 0.0000, 0.1907, 1.1432, 0.9285, 0.2275, 0.8754, 0.7723], [0.7588, 0.1907, 0.0000, 1.3175, 1.1175, 0.3579, 1.0166, 0.9581], [0.5595, 1.1432, 1.3175, 0.0000, 0.3793, 0.9705, 0.4446, 0.4149], [0.4195, 0.9285, 1.1175, 0.3793, 0.0000, 0.8165, 0.5878, 0.1804], [0.4201, 0.2275, 0.3579, 0.9705, 0.8165, 0.0000, 0.6601, 0.6430], [0.3765, 0.8754, 1.0166, 0.4446, 0.5878, 0.6601, 0.0000, 0.4621], [0.2391, 0.7723, 0.9581, 0.4149, 0.1804, 0.6430, 0.4621, 0.0000]]) alpha*MST: tensor(-4.0270) priority: -37.30284118652344 objective: tensor(-5.4219) upper bound: tensor(-41.4974) lengths: tensor(-0.4195) bo[...]DFS vs BFS DFS: Nodes that extends a prefix are colored orange BFS: Nodes that search for actions other than the last node are colored blue Nodes that explore different prefix are colored yellowplt.rcParams['figure.figsize'] = [16, 6] np.random.seed(4) torch.manual_seed(4) x = torch.rand(1, num_cities, 2) # x = make_circule(num_cities) #x = torch.load('good_example_8graph') queue,state, fixed = init_queue(x, dirpg, epsilon=10.0, alpha=5.0) update = False direct, first_direct = None, True dfs, bfs, others = [],[],[] last_parent = None cities = nx.DiGraph() cities.add_nodes_from(range(x.size(1))) parent = queue.pop() if parent == 'break': print('END') else: batch_state = state.stack_state([parent]) log_p, state = dirpg.forward_and_update(batch_state, fixed) sp, oth = queue.expand(state[torch.tensor(0)], log_p[0]) if last_parent is not None and parent not in queue.nodes_opt: if parent.prefix == last_parent.prefix: bfs.append(parent) elif parent.prefix[:-1] == last_parent.prefix: dfs.append(parent) else: others.append(parent) if queue.t_opt is not None: print('t_opt: ') print([abc[i] for i in queue.t_opt.actions]) if queue.t_direct is not None: print('t_direct: ') print([abc[i] for i in queue.t_direct.actions]) print('special child prefix: ') print([abc[i] for i in sp.prefix]) print('depth: ', sp.depth) print('alpha: ', sp.alpha) plt.subplot(121) pos = nx.kamada_kawai_layout(queue.G) # nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold') nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.root_node], node_size = 1000, node_color='g', alpha=0.8) opt_nodes = [i for i in queue.nodes_opt if i!=sp] nx.draw_networkx_nodes(queue.G, pos, nodelist=queue.nodes_opt, node_size = 500, node_color='r', alpha=0.5) others_nodes = [i for i in queue.other_nodes if i not in dfs+bfs+others] nx.draw_networkx_nodes(queue.G, pos, nodelist=others_nodes, node_size = 500, node_color=[(0.2,0.2,0.2) for _ in range(len(others_nodes))], alpha=0.6) if dfs: nx.draw_networkx_nodes(queue.G, pos, nodelist=dfs, node_size = 500, node_color='orange', alpha=0.8) if bfs: nx.draw_networkx_nodes(queue.G, pos, nodelist=bfs, node_size = 500, node_color='blue', alpha=0.6) if others: nx.draw_networkx_nodes(queue.G, pos, nodelist=others, node_size = 500, node_color='magenta', alpha=0.6) nx.draw_networkx_nodes(queue.G, pos, nodelist=[sp], node_size = 500, node_color=[(0.0,1.0,0.0)], alpha=0.8) """ if first_direct and queue.t_direct != queue.t_opt: first_direct = False direct = queue.t_direct """ if queue.direct_node is not None: """ if direct != queue.t_direct: direct = queue.t_direct """ nx.draw_networkx_nodes(queue.G, pos, nodelist=[queue.direct_node], node_shape='^', node_size = 800, node_color=[(0.0,1.0,0.0)], alpha=0.8) nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5) nx.draw_networkx_edges(queue.G, pos, edgelist=[(parent, sp)], width=8, alpha=0.5, edge_color='r') nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16) ##################### plt.subplot(122) pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])} edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if i 0 else distance_matrix """ dm = distance_matrix.clone() n_vertices = torch.tensor(dm.shape[0]) visited_vertices = torch.tensor([0]) # Add the first point num_visited = torch.tensor(1) # exclude self connections by assigning inf to the diagonal dm.fill_diagonal_(np.inf) mst_edges = torch.zeros(n_vertices, n_vertices, dtype=torch.bool, device=dm.device) while num_visited != n_vertices: new_edge = torch.argmin(dm[visited_vertices]) print(new_edge, n_vertices) new_edge = torch_divmod(new_edge, n_vertices) print(new_edge) new_edge = [visited_vertices[new_edge[0]], new_edge[1]] mst_edges[new_edge[0], new_edge[1]] = True print(visited_vertices,new_edge[1]) visited_vertices = torch.cat([visited_vertices,new_edge[1].unsqueeze(0)], dim=0) dm[visited_vertices, new_edge[1]] = np.inf dm[new_edge[1], visited_vertices] = np.inf num_visited += 1 return (mst_edges*distance_matrix) np.sum([[1,2,3],[1,2,3]]) a = generate_random_distance_matrix(5) print(a) a = prim_pytorch(a).tolist() print(a[2]) np.sum(a[2]) #mst(a.numpy()) def reduce_distance_matrix(x,nodes, prefix=True): """args: x: distance metrix NxN nodes: list of nodes to remove or to keep depending on prefix arg prefix: if true""" if prefix: return np.delete(np.delete(x, nodes[1:], 0), nodes[1:], 1) else: # not_visited + ind = torch.tensor(nodes, dtype=torch.long) return torch.index_select(torch.index_select(x,0,ind),1,ind) def reduced_mst(prefix, dm, mst_val): chosen = prefix[-1] not_visited = [j for j in range(len(dm)) if j not in prefix] reduce_distance_matrix(generate_random_distance_matrix(5),[2,3], False)Compare MST to Reduced-MSTThe root node of the priority queue computes the MST of the complete graph (n cities).When the priority queue is expanded, the next node computes the MST of n-1 cities.Here we compare two alternatives for estimating the MST starting from n to 1:1. exact MST computation 2. removing the chosen node and edges from the last MSTn = 5 mst_vals = [0 for _ in range(n)] rmst_vals = [0 for _ in range(n)] for trial in range(1): x = generate_random_distance_matrix(n) mst = _mst(x) prefix = [] for chosen in range(n,1,-1): exact_mst = _mst(x) rmst = reduced_mst(chosen, rmst) prefix.append(chosen) not_visited = [j for j in range(n) if j not in prefix] x = reduce_distance_matrix(x,not_visited,False) print(x) def convert_distance_matrix_to_batched_edges(distance_matrix): """distance_matrix: batch of distance matrices. size: [batch, n, n] returns weights_and_edges: in shape (batch_size, n * (n - 1) / 2, 3), where weights_and_edges[.][i] = [weight_i, node1_i, node2_i] for edge i.""" weights_and_edges = torch. kruskals_cpp_pytorch() import random np_time = 0 torch_time = 0 for i in range(5,20): prefix = random.sample(range(i), random.sample(range(1,i), 1)[0]) not_visited = [j for j in range(i) if j not in prefix] a = generate_random_distance_matrix(i) s = time.time() reduce_distance_matrix(a,prefix, True) d = time.time() reduce_distance_matrix(a,not_visited, False) torch_time += (time.time() - d) np_time += (d-s) print(torch_time) print(np_time)0.0010297298431396484 0.0021178722381591797--no_progress_bar --graph_size 20 --not_prune --annealing 0.005 --epsilon 25 --alpha 1.5 --dynamic_weighting --exp_name bs200eps25alpha15ann005 --epoch_size 128000 --n_epochs 100 --batch_size 200prefix = random.sample(range(10), random.sample(range(1,10), 1)[0]) print(prefix) not_visited = [j for j in range(10) if j not in prefix] not_visited import random l = [] for i in range(100000): l.append(torch.tensor(i)) s = time.time() a = torch.tensor(l) print(time.time()-s) s = time.time() b = torch.stack(l) print(time.time()-s) import torch import kruskals_cpp n = 4 weights = np.array([0.7601073, -0.20460297, -0.4689217, -0.5127163, -1.9022679, 1.1506207]) vertices = np.triu_indices(n=n-1, m=n, k=1) weights_and_edges = np.array( [list(e) for e in zip(weights, vertices[0], vertices[1])]) class A: one = 1 two = 2 def __init__(self, x): self.x = x class B: def __init__(self, param): self.Geometric fucntion$\theta = \dfrac{180}{\pi} \tan^{-1}\left(\dfrac{x + a}{s}\right) + \beta$$x$ and $a$ in the units of channels. $s$ is also in the units of channels and it rescales the detector range.$\beta[^\circ]$ is an angle.$\dfrac{\partial\theta}{\partial s} = -\dfrac{180}{\pi} \dfrac{a+x}{a^2+2ax+s^2+x^2}= -\dfrac{180}{\pi} \dfrac{a+x}{(a+x)^2+s^2}$$\dfrac{\partial\theta}{\partial a} = \dfrac{180}{\pi} \dfrac{s}{a^2+2ax+s^2+x^2} = \dfrac{180}{\pi} \dfrac{s}{(a+x)^2+s^2}$$\dfrac{\partial\theta}{\partial \beta} = 1$$\theta = \dfrac{180}{\pi}\left[ \tan^{-1}\left(\dfrac{x + a}{z\sin\left(\beta\right)}\right) + \beta\right]$here $\beta$ is in radians$\dfrac{\partial\theta}{\partial z} = -\dfrac{180}{\pi}\dfrac{(a + x) \csc(\beta)}{z^2 + (a + x)^2 \csc^2(\beta)}$$\dfrac{\partial\theta}{\partial a} = \dfrac{180}{\pi}\dfrac{z \csc(\beta)}{z^2 + (a + x)^2 \csc^2(\beta))}$$\dfrac{\partial\theta}{\partial b} = \dfrac{180}{\pi} \left[ 1 - \dfrac{z (a + x) \cot(\beta) \csc(\beta)}{z^2 + (a + x)^2 \csc^2(\beta)} \right]$def fce_trig(x,a,b,s): return (arctan((x+a)/s)) * 180 / pi + b def fce_trigz(x,a,b,z): t = z * sin(b) return (arctan((x+a)/t) + b) * 180 / pi def theta0(a,s): return arctan(a/s)*180/pi def thetam(a,s): return arctan((a+1279)/s)*180/pi def alpha(a,s,b): return theta0(a,s)+b,thetam(a,s)+b def dthetads(x,a,s): return -180 / pi * (a+x)/((a+x)**2+s**2) def dthetada(x,a,s): return 180 / pi * (s)/((a+x)**2+s**2) z = loadtxt('calibration.ini',unpack=True) x,y = z opt_lin,var=curve_fit(fce_linear,x,y) opt_quad,var=curve_fit(fce_quad,x,y) opt_trip,var=curve_fit(fce_trip,x,y) opt_trig,var=curve_fit(fce_trig,x,y) opt_trigz,var=curve_fit(fce_trigz,x,y,p0=opt_trig) a,b,s = opt_trig _a,_b,z = opt_trigz print(a,_a) opt_trig,theta0(a,s),thetam(a,s),alpha(a,s,b) opt_trigz _x = arange(0,1280) figure(figsize=(12,7)) plot(_x,fce_trip(_x,*opt_trip),':',label='trip') plot(_x,fce_trig(_x,*opt_trig),'-',label='trig') plot(_x,fce_trigz(_x,*opt_trigz),'-',label='trigz') plot(x,y,'+',ms=32,label='calibration points') xlim(0,1280) legend(frameon=False) x = linspace(-10000,10000,1000) y = fce_trig(x,*opt_trig) plot(x,y) x = linspace(_x[0],_x[-1],1000) y = fce_trig(x,*opt_trig) plot(x,y,'k-',lw=2) a,b,s = opt_trig ylim(-90+b,90+b) xlim(-10000,10000) xlabel('channel') ylabel(r'angle $\theta$') a,b,s = opt_trig for d in [-300,0,300]: x = linspace(-10000,10000,1000) y = fce_trig(x,a,b,s+d) plot(x,y) x = linspace(_x[0],_x[-1],1000) y = fce_trig(x,a,b,s+d) plot(x,y,'k-',lw=1) a,b,s = opt_trig ylim(10,70) xlim(-200,1500) xlabel('channel') ylabel(r'angle $\theta$') print(y[0],y[-1]) a,b,z = opt_trigz for d in [-30,0,30]: x = linspace(-10000,10000,1000) y = fce_trigz(x,a,b,z+d) plot(x,y) x = linspace(_x[0],_x[-1],1000) y = fce_trigz(x,a,b,z+d) plot(x,y,'k-',lw=1) ylim(14,54) xlim(-200,1500) xlabel('channel') ylabel(r'angle $\theta$') print(y[0],y[-1]) x = linspace(-10000,10000,20000) y = dthetads(x,*opt_trig[:2]) plot(x,y) x = linspace(_x[0],_x[-1],20000) y = dthetads(x,*opt_trig[:2]) plot(x,y,'k-',lw=2) xlim(-1000,2000) a,s = opt_trig[:2] for d in [0,100,200]: x = linspace(-10000,10000,20000) y = dthetada(x,a,s+d) plot(x,y) x = linspace(_x[0],_x[-1],20000) y = dthetada(x,a,s+d) plot(x,y,'k-',lw=2) xlim(-1000,2000)Copyright 2018 Google LLC.# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.Cat vs. Dog Image Classification with TensorFlow Exercise 2: Reducing Overfitting**_Estimated completion time: 30 minutes_**In this notebook we will build on the model we created in Exercise 1 to classify cats vs. dogs, and improve accuracy by employing a couple strategies to reduce overfitting: **data augmentation** and **dropout**. We will follow these steps:1. Explore how data augmentation works by making random transformations to training images.2. Add data augmentation to our data preprocessing.3. Add dropout to the convnet.4. Retrain the model and evaluate loss and accuracy. Let's get started! Exploring Data AugmentationLet's get familiar with the concept of **data augmentation**, an essential way to fight overfitting for computer vision models.In order to make the most of our few training examples, we will "augment" them via a number of random transformations, so that at training time, **our model will never see the exact same picture twice**. This helps prevent overfitting and helps the model generalize better.This can be done by configuring a number of random transformations to be performed on the images read by our `ImageDataGenerator` instance. Let's get started with an example:from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')These are just a few of the options available (for more, see the [Keras documentation](https://keras.io/preprocessing/image/). Let's quickly go over what we just wrote:- `rotation_range` is a value in degrees (0–180), a range within which to randomly rotate pictures.- `width_shift` and `height_shift` are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.- `shear_range` is for randomly applying shearing transformations.- `zoom_range` is for randomly zooming inside pictures.- `horizontal_flip` is for randomly flipping half of the images horizontally. This is relevant when there are no assumptions of horizontal assymmetry (e.g. real-world pictures).- `fill_mode` is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.Let's take a look at our augmented images. First let's set up our example files, as in Exercise 1. **NOTE:** The 2,000 images used in this exercise are excerpted from the ["Dogs vs. Cats" dataset](https://www.kaggle.com/c/dogs-vs-cats/data) available on Kaggle, which contains 25,000 images. Here, we use a subset of the full dataset to decrease training time for educational purposes.!wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O \ /tmp/cats_and_dogs_filtered.zip import os import zipfile local_zip = '/tmp/cats_and_dogs_filtered.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() base_dir = '/tmp/cats_and_dogs_filtered' train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') train_cat_fnames = os.listdir(train_cats_dir) train_dog_fnames = os.listdir(train_dogs_dir)Next, let's apply the `datagen` transformations to a cat image from the training set to produce five random variants. Rerun the cell a few times to see fresh batches of random variants.%matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as mpimg from keras.preprocessing.image import array_to_img, img_to_array, load_img img_path = os.path.join(train_cats_dir, train_cat_fnames[2]) img = load_img(img_path, target_size=(150, 150)) # this is a PIL image x = img_to_array(img) # Numpy array with shape (150, 150, 3) x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3) # The .flow() command below generates batches of randomly transformed images # It will loop indefinitely, so we need to `break` the loop at some point! i = 0 for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(array_to_img(batch[0])) i += 1 if i % 5 == 0: breakAdd Data Augmentation to the Preprocessing StepNow let's add our data-augmentation transformations from [**Exploring Data Augmentation**](scrollTo=E3sSwzshfSpE) to our data preprocessing configuration:# Adding rescale, rotation_range, width_shift_range, height_shift_range, # shear_range, zoom_range, and horizontal flip to our ImageDataGenerator train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) # Flow training images in batches of 32 using train_datagen generator train_generator = train_datagen.flow_from_directory( train_dir, # This is the source directory for training images target_size=(150, 150), # All images will be resized to 150x150 batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') # Flow validation images in batches of 32 using test_datagen generator validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')If we train a new network using this data augmentation configuration, our network will never see the same input twice. However the inputs that it sees are still heavily intercorrelated, so this might not be quite enough to completely get rid of overfitting. Adding DropoutAnother popular strategy for fighting overfitting is to use **dropout**. **TIP:** To learn more about dropout, see [Training Neural Networks](https://developers.google.com/machine-learning/crash-course/training-neural-networks/video-lecture) in [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/). Let's reconfigure our convnet architecture from Exercise 1 to add some dropout, right before the final classification layer:from keras.models import Model from keras import layers from keras.optimizers import RMSprop from keras import backend as K import tensorflow as tf # Configure the TF backend session tf_config = tf.ConfigProto( gpu_options=tf.GPUOptions(allow_growth=True)) K.set_session(tf.Session(config=tf_config)) # Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for # the three color channels: R, G, and B img_input = layers.Input(shape=(150, 150, 3)) # First convolution extracts 16 filters that are 3x3 # Convolution is followed by max-pooling layer with a 2x2 window x = layers.Conv2D(16, 3, activation='relu')(img_input) x = layers.MaxPooling2D(2)(x) # Second convolution extracts 32 filters that are 3x3 # Convolution is followed by max-pooling layer with a 2x2 window x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPooling2D(2)(x) # Third convolution extracts 64 filters that are 3x3 # Convolution is followed by max-pooling layer with a 2x2 window x = layers.Convolution2D(64, 3, activation='relu')(x) x = layers.MaxPooling2D(2)(x) # Flatten feature map to a 1-dim tensor x = layers.Flatten()(x) # Create a fully connected layer with ReLU activation and 512 hidden units x = layers.Dense(512, activation='relu')(x) # Add a dropout rate of 0.5 x = layers.Dropout(0.5)(x) # Create output layer with a single node and sigmoid activation output = layers.Dense(1, activation='sigmoid')(x) # Configure and compile the model model = Model(img_input, output) model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])Retrain the ModelWith data augmentation and dropout in place, let's retrain our convnet model. This time, let's train on all 2,000 images available, for 30 epochs, and validate on all 1,000 test images. (This may take a few minutes to run.) See if you can write the code yourself:# WRITE CODE TO TRAIN THE MODEL ON ALL 2000 IMAGES FOR 30 EPOCHS, AND VALIDATE # ON ALL 1,000 TEST IMAGESSolutionClick below for the solution.history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50, verbose=2)Note that with data augmentation in place, the 2,000 training images are randomly transformed each time a new training epoch runs, which means that the model will never see the same image twice during training. Evaluate the ResultsLet's evaluate the results of model training with data augmentation and dropout:# Retrieve a list of accuracy results on training and test data # sets for each training epoch acc = history.history['acc'] val_acc = history.history['val_acc'] # Retrieve a list of list results on training and test data # sets for each training epoch loss = history.history['loss'] val_loss = history.history['val_loss'] # Get number of epochs epochs = range(len(acc)) # Plot training and validation accuracy per epoch plt.plot(epochs, acc) plt.plot(epochs, val_acc) plt.title('Training and validation accuracy') plt.figure() # Plot training and validation loss per epoch plt.plot(epochs, loss) plt.plot(epochs, val_loss) plt.title('Training and validation loss')Much better! We are no longer overfitting, and we have gained ~3 validation accuracy percentage points (see the green line in the top chart). In fact, judging by our training profile, we could keep fitting our model for 30+ more epochs and we could probably get to ~80%! Clean UpBefore running the next exercise, run the following cell to terminate the kernel and free memory resources:import os, signal os.kill(os.getpid(), signal.SIGKILL)_Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!*import graphviz import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score from sklearn.model_selection import train_test_split import statsmodels.api as sm from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz from sklearn.impute import SimpleImputer pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv') df['ExterQual'] = df['ExterQual'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['ExterCond'] = df['ExterCond'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['BsmtQual'] = df['BsmtQual'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['BsmtCond'] = df['BsmtCond'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['BsmtFinType1'] = df['BsmtFinType1'].replace({'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec':3, 'LwQ':2, 'Unf':1, np.nan:0}) df['BsmtFinType2'] = df['BsmtFinType2'].replace({'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec':3, 'LwQ':2, 'Unf':1, np.nan:0}) df['HeatingQC'] = df['HeatingQC'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['CentralAir'] = df['ExterQual'].replace({'No':0, 'Yes':1}) df['KitchenQual'] = df['KitchenQual'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['FireplaceQu'] = df['FireplaceQu'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['GarageQual'] = df['GarageQual'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['GarageCond'] = df['GarageCond'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['PoolQC'] = df['PoolQC'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, np.nan:0}) # get rid of nulls in Garage year built feature df['GarageYrBlt'] = df['GarageYrBlt'].replace(np.nan,df['YearBuilt']) ## log linear of price df['ln_price'] = np.log(df['SalePrice']) df_test = df ## features engineered df_test['GarageCars_Squared'] = df_test['GarageCars']**2 df_test['BsmtFinSF1_Squared'] = df_test['BsmtFinSF1']**2 df_test['BsmtQual_Squared'] = df_test['BsmtQual']**2 df_test['GarageYrBlt_Squared'] = df_test['GarageYrBlt']**2 df_test['age_at_time_sold'] = df_test['YrSold'] - df_test['YearBuilt'] df_test['total_area'] = ( df_test['GarageArea'] + df_test['GrLivArea'] +df_test['TotalBsmtSF'] ) df_test['size_yard'] = df_test['LotArea'] - df_test['GrLivArea'] df_test['yard_int_liv'] = df_test['size_yard'] * df_test['GrLivArea'] df_test['qual_garage_int_bsmt'] = df_test['GarageQual'] * df_test['BsmtQual'] df_test['qual_exter_int_kitchen'] = df_test['ExterQual'] * df_test['KitchenQual'] df_test['total_rooms_bathrooms'] = df_test['TotRmsAbvGrd'] + df_test['FullBath'] df_test['Remodeled'] = (df_test['YearRemodAdd'] != df_test['YearBuilt']) * 1 df_test['RecentRemodel'] = (df_test['YearRemodAdd'] == df_test['YrSold']) * 1 ## function to visualize decision tree def viztree(decision_tree, feature_names): """Visualize a decision tree""" dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, filled=True, rounded=True) return graphviz.Source(dot_data) def run_decision_tree_model(X, y, features, max_depth=1): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) tree = DecisionTreeRegressor(max_depth=max_depth) tree.fit(X_train, y_train) print('Train R^2 Score: ', tree.score(X_train, y_train)) print('Test R^2 Score: ', tree.score(X_test, y_test)) display(viztree(tree, feature_names=features)) target = 'ln_price' features = ['total_area', 'OverallQual'] y = df_test[target] X = df_test[features] run_decision_tree_model(X, y, features, 4)Train R^2 Score: 0.8044993479007602 Test R^2 Score: 0.782080670300658Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from 's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), " is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set"import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis"A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days.tennis['PlayTennis'].value_counts(normalize=True) * 100The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy daystennis.groupby('Outlook')['PlayTennis'].mean() * 100On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.)sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.)rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables)y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) XTrain a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.htmldef classifier_decision_tree_model(X, y, features, max_depth=1): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) tree = DecisionTreeClassifier(max_depth=max_depth) tree.fit(X_train, y_train) print('Train Accuracy Score: ', tree.score(X_train, y_train)) print('Test Accuracy Score: ', tree.score(X_test, y_test)) display(viztree(tree, feature_names=features)) features = X.columns classifier_decision_tree_model(X, y, features, 4)Train Accuracy Score: 1.0 Test Accuracy Score: 1.0Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.htmldef logistic_regression(X, y): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) model = LogisticRegression() model.fit(X_train, y_train) print('Train Accuracy Score: ', model.score(X_train, y_train)) print('Test Accuracy Score: ', model.score(X_test, y_test)) logistic_regression(X,y)Train Accuracy Score: 0.8181818181818182 Test Accuracy Score: 0.3333333333333333Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html## ^^norm_fb_dfprint(norm_fb_df.head(10)) fb_df.head(10) import matplotlib.pyplot as plt fb_df.Date ax = fb_df.plot(title="Stock price") ax.set_xlabel("Date") ax.set_ylabel("Adj Close") plt.show() ax = fb_df.plot(title="Stock price") bx = fb_df.plot() bx.set_ylabel("High") plt.show() bx = norm_fb_df["Daily Returns"].plot() bx.set_ylabel("Daily Returns") plt.show() cx = norm_fb_df["Adj Close"].plot() cx.set_ylabel("Adj Close") plt.show() fb_df['2014-03-06':'2014-03-13'] adj_close = norm_fb_df["Adj Close"] adj_close_ax = adj_close.plot() daily_returns = norm_df_fb plt.show() norm_fb_df.head(10) norm_fb_df.at['2017-01-19',["Daily Returns"]] = 0 df = norm_fb_df.copy() df["Label"] = df["Adj Close"] < df["Adj Close"].shift(-1) df["Label"] = df["Label"].astype(int) df.head(5) df = create_training_set("data/fb.csv") from data import * df = pd.read_csv("data/fb.csv", index_col="Date", parse_dates=True, na_values=['nan']) df["Daily_Returns"] = (df["Adj Close"] / df["Adj Close"].shift(1)) - 1 df.at[df.index.values[0], ["Daily_Returns"]] = 0 df.head(5) df.at[df.index.values[0],["Daily_Returns"]]=0 df["Daily_Returns"].at[df.index.values[0], ["Daily_Returns"]] = 0 filename = "data/fb.csv" df = pd.read_csv(filename, index_col="Date", parse_dates=True, na_values=['nan']) df = df/df.iloc[0,:] df["Daily_Returns"] = (df["Adj Close"] / df["Adj Close"].shift(1)) - 1 df.at[df.index.values[0], ["Daily_Returns"]] = 0 df["Label"] = (df["Adj Close"] < df["Adj Close"].shift(-1)).astype(int) df.head(10) from data import * plotModel = PlotModel(title="Log Reg", xlabel="Adj Close", ylabel="Daily Returns") plot_logistic_regression_data(df,xcol="Adj Close", ycol="Daily_Returns", feature="Label", plotModel=plotModel) plotModel = PlotModel(title="Log Reg", xlabel="Adj Close", ylabel="Daily Returns") plot_logistic_regression_data(df,xcol="High", ycol="Low", feature="Label", plotModel=plotModel) plotModel = PlotModel(title="Log Reg", xlabel="Adj Close", ylabel="Daily Returns") plot_logistic_regression_data(df,xcol="Low", ycol="Daily_Returns", feature="Label", plotModel=plotModel) plotModel = PlotModel(title="Log Reg", xlabel="Volume", ylabel="Daily Returns") plot_logistic_regression_data(df,xcol="Volume", ycol="Daily_Returns", feature="Label", plotModel=plotModel) df.columns for c in df.columns: print(c) for c in df.columns[:-1]: print(c) for i in range(len(df.columns)-1): for j in range(i+1,len(df.columns)-1): x = df.columns[i] y = df.columns[j] plotModel = PlotModel(title="Log Reg", xlabel=x, ylabel=y) plot_logistic_regression_data(df, xcol=x, ycol=y, feature="Label", plotModel=plotModel) df.mean() avg = df[["Adj Close","Daily_Returns"]].rolling(center=False,window=5).mean() avg.columns = ["A","B"] avg.head(10) tdf = pd.DataFrame(index=["Date"]) tdf.index = pd.to_datetime(tdf.index) tdf pd.concat([tdf,avg],axis=1,join='outer').head(10) tdf.index avg.index pd.DatetimeIndex([0]) tdf tdf.index = pd.DatetimeIndex([]) tdf tdf.index tdf.clear() tdf = tdf[1:] tdf tdf.index fb_df.head(10) dict(fb_df.shift(-1).iloc[-1]) int(1 > 0) len(fb_df) fb_df["Label5"] = [0 for i in range(len(fb_df))] fb_df["Label5"].head(5) fb_Week 7 & 8TitleTitle -->H1 -->H2 -->H3 -->H4 -->[Green]: (bce35b)[Purple]: (ae8bd5)[Coral]: (9c4957)[Grey]: (8c8c8c) ****** Importing librariesfrom datetime import date, datetime as dttm, timedelta from pprint import pprint from contextlib import closing from dask import dataframe as dd import datetime as dt import sqlite3 from fuzzywuzzy import fuzz, process import lxml from bs4 import BeautifulSoup import sys import scipy import io import math import re from itertools import permutations, dropwhile, zip_longest from collections import deque as deq import requests import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd import numpy as np import os import keyring os.environ['KAGGLE_USERNAME'] = 'hakunapatata' os.environ['KAGGLE_KEY'] = keyring.get_password( service_name='kaggle_api', username=os.environ['KAGGLE_USERNAME']) %matplotlib inline # matplotlib default settings plt.style.use('dark_background') mpl.rcParams.update({'lines.linewidth': 3}) mpl.rcParams.update({'axes.labelsize': 14}) mpl.rcParams.update({'axes.titlesize': 16}) mpl.rcParams.update({'axes.titleweight': 'bold'}) mpl.rcParams.update({'figure.autolayout': True}) mpl.rcParams.update( {'axes.grid': True, 'grid.color': '#424242', 'grid.linestyle': '--'}) # creation random number generator object rng_seed = 777 rng = np.random.default_rng(rng_seed) # pandas dataframe options pd.set_option('display.max_columns', None) # see list of current settings # plt.rcParams.keys()*** Week ActivitiesTitleTitle -->H1 -->H2 -->H3 -->H4 --> -->[Green]: (bce35b)[Purple]: (ae8bd5)[Coral]: (9c4957)[Grey]: (8c8c8c) From the datasets provided, manipulate/clean the dataset using 2 concepts from each of the chapters below.Chapter 7- Filter out missing data- Fill in missing data- Remove duplicates- Transform data using either mapping or a function- Replace values- Discretization and Binning- Manipulate StringsChapter 8- Create hierarchical index- Combine and Merge Datasets- Reshape- Pivot the dataChapter 10- Grouping with Dicts/Series- Grouping with Functions- Grouping with Index Levels- Split/Apply/Combine- Cross TabsChapter 11- Convert between string and date time- Generate date range- Frequencies and date offsets- Convert timestamps to periods and back- Period Frequency conversions Import Metropolitan Museum of Art .csv file into a dask dataframe. Went with dask dataframe over pandas because csv was very large and dask incorporates lazy execution along with multi-core parallel processing.csv_url = "https://media.githubusercontent.com/media/metmuseum/openaccess/master/MetObjects.csv" bad_line_list = [] def bad_line_func(line): bad_line_list.append(line) df = dd.read_csv(csv_url, dtype="string", on_bad_lines=bad_line_func, engine="python")Import dask dataframe data into SQLite database/table. Went with the approace of importing into SQL database/table so that I could efficiently explore, extract, and manipulate the data.db_path = 'HakunaPatata.db' table_name = 'MET_MUSEUM' df.to_sql(table_name, f"sqlite:///{db_path}", index=False, if_exists='replace', parallel=False, chunksize=5000) # sqlite lacks parallel writing. would need to use a different database if parallel processing requiredCreate function for select statement into pandas DF.def sql_df(db_path, sql_txt): with sqlite3.connect(db_path) as conn: df = pd.read_sql_query(sql_txt, conn) return df conn.close() sql_txt = f""" SELECT * FROM MET_MUSEUM AS MM LIMIT 5 """ sql_df('HakunaPatata.db', sql_txt)Now lets find some columns with NULL values and filter them out.null_col_dict = {} for col in df.columns: sql_txt = f""" WITH NULL_COUNTS AS ( SELECT SUM(CASE WHEN MM.\"{col}\" IS NULL OR MM.\"{col}\"='None' OR REPLACE(MM.\"{col}\", ' ', '')='' THEN 1 ELSE 0 END) AS NULL_COUNT , SUM(CASE WHEN MM.\"{col}\" IS NOT NULL AND MM.\"{col}\"<>'None' AND REPLACE(MM.\"{col}\", ' ', '')<>'' THEN 1 ELSE 0 END) AS NOT_NULL_COUNT FROM MET_MUSEUM AS MM ) SELECT NC.NULL_COUNT , NC.NOT_NULL_COUNT , ROUND(100 * NC.NULL_COUNT / (NC.NOT_NULL_COUNT + NC.NULL_COUNT), 1) AS NULL_PERCENT FROM NULL_COUNTS AS NC """ null_df = sql_df('HakunaPatata.db', sql_txt=sql_txt) null_col_dict[col] = (null_df['NULL_COUNT'][0], null_df['NOT_NULL_COUNT'][0], null_df['NULL_PERCENT'][0]) print(f'Column\tNullCount\tNotNullCount\tNull%') pprint(null_col_dict)Column NullCount NotNullCount Null% {'AccessionYear': (3557, 474247, 0.0), 'Artist Alpha Sort': (202441, 275363, 42.0), 'Artist Begin Date': (233688, 244116, 48.0), 'Artist Display Bio': (238475, 239329, 49.0), 'Artist Display Name': (202269, 275535, 42.0), 'Artist End Date': (233718, 244086, 48.0), 'Artist Gender': (374744, 103060, 78.0), 'Artist Nationality': (270059, 207745, 56.0), 'Artist Prefix': (345839, 131965, 72.0), 'Artist Role': (204369, 273435, 42.0), 'Artist Suffix': (379148, 98656, 79.0), 'Artist ULAN URL': (255784, 222020, 53.0), 'Artist Wikidata URL': (260073, 217731, 54.0), 'City': (445397, 32407, 93.0), 'Classification': (78207, 399597, 16.0), 'Constituent ID': (202269, 275535, 42.0), 'Country': (402053, 75751, 84.0), 'County': (469354, 8450, 98.0), 'Credit Line': (452, 477352, 0.0), 'Culture': (270425, 207379, 56.0), 'Department': (1, 477803, 0.0), 'Dimensions': (75295, 402509, 15.0), 'Dynasty': (454571, 23233, 95.0), 'Excavation': (461246, 165[...]Excluding Null values for ACCESSION_YEAR and manipulating strings so that if it is a blank string or equals 'None', then will output a Null value.db_path = 'HakunaPatata.db' sql_txt = r''' SELECT MM."Object Number" AS OBJECT_NUMBER , MM."AccessionYear" AS ACCESSION_YEAR , MM."Object Name" AS OBJECT_NAME , MM."Title" AS TITLE , MM."Object Date" AS OBJECT_DATE , MM."Object Begin Date" AS OBJECT_BEGIN_DATE , MM."Object End Date" AS OBJECT_END_DATE , MM."Artist Alpha Sort" AS ARTIST_NAME , MM."Artist Gender" AS ARTIST_GENDER , MM."Artist Nationality" AS ARTIST_NATIONALITY , MM."Credit Line" AS CREDIT_LINE , MM."Dimensions" AS DIMENSIONS , MM."Medium" AS MEDIUM , MM."Classification" AS CLASSIFICATION , MM."Department" AS DEPARTMENT , MM."Culture" AS CULTURE FROM MET_MUSEUM AS MM WHERE MM."AccessionYear" <> 'None' AND REPLACE(MM."AccessionYear", ' ', '')<>'' -- should exclude Null values too ''' met_df = sql_df(db_path, sql_txt) # if 'None' string, None, or blank string then nan met_df.replace({'None': np.nan, '': np.nan, None: np.nan}, inplace=True) # exclude strings that are only white-space with nan met_df.replace(r'^\s*$', np.nan, regex=True, inplace=True) met_dfNow let's pivot some data and join it onto the DataFrame.Will group objects by begin date and end date and get totals for each. Will then join the grouped data onto the main DataFrame.begin_date_totals_df = pd.DataFrame(met_df.groupby( ['OBJECT_BEGIN_DATE']).size(), columns=['OBJ_BEGIN_DATE_TOTAL']) met_df = met_df.merge(begin_date_totals_df, how='left', left_on='OBJECT_BEGIN_DATE', right_index=True) met_dfDoing split/apply/combine to get the total per object end datedef group_count_df(df, col): return df[col].count() end_date_totals_df = pd.DataFrame(met_df.groupby(['OBJECT_END_DATE']).apply( group_count_df, col='OBJECT_END_DATE'), columns=['OBJ_END_DATE_TOTAL']) met_df = met_df.merge(end_date_totals_df, how='left', left_on='OBJECT_END_DATE', right_index=True) met_dfCreate a date column from the OBJECT_BEGIN_DATE column.- Will need to handle B.C. dates. Will convert negative (B.C.) years to positive and will create a column to categorize if the year is B.C. vs. A.D.- Also, the datetime module does not go past range 9999 (aka year 9999). Even when handling via converting to positive year, an out-of-range exception will be thrown. I started going down the path of looking into the package astropy.time which is used for astrophysics but after about 2 hrs of persistent desk head banging I decided that I'm probably overthinking the assignment I or may be trying to find a solution to something that does not exist. So, if the absolute value of the year is > 9999, then NaT will be output.# function to handle B.C. dates. Will convert negative years to postive and then convert to date. def year_str_to_dttm(string): try: year_num = int(string) if year_num < 0: year_str = str(int(-1 * year_num)) date_year = dttm.strptime(year_str, '%Y') return date_year else: return dttm.strptime(string, '%Y') except Exception: return pd.NaT # function to create column that specifies if the year is B.C. or A.D. def year_bc_ad(string): try: year_num = int(string) if year_num < 0: return 'B.C.' else: return 'A.D.' except Exception: return np.nan # using apply, use formula created above to create a date column based on the object begin year met_df['OBJ_BEGIN_YEAR_TRUNC'] = met_df['OBJECT_BEGIN_DATE'].apply( year_str_to_dttm) # using apply, use formula created above to create a date column based on the object begin year met_df['OBJ_BEGIN_YEAR_BC_AD'] = met_df['OBJECT_BEGIN_DATE'].apply(year_bc_ad) met_df[met_df['OBJ_BEGIN_YEAR_BC_AD'] == 'B.C.']Create date range from min/max date values from OBJ_BEGIN_YEAR_TRUNC. As mentioned above, the datetime module has a range of [1, 9999] so can only make a date range from year 1 A.D. to 9999 A.D. Now I'm starting to see why there was the Y2K problem in year 2000 lol.begin_date = dttm.strptime('0001', '%Y') end_date = max(met_df[met_df['OBJ_BEGIN_YEAR_BC_AD'] == 'A.D.']['OBJ_BEGIN_YEAR_TRUNC']) date_list = [] for i in range(begin_date.year, end_date.year+1): year_str = f"{i:04d}" date_time = dttm.strptime(year_str, '%Y') date_list.append(date_time) pd.DataFrame(date_list, columns=['OBJ_BEGIN_YEAR'], index=[pd.Index(date_list, name='OBJ_BEGIN_YEAR')])import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt original_data = pd.read_csv('/content/StockX-Data-Contest-2019-3.csv', header = 2) df = original_data.copy() df.head() # Change 'order date' dtype df['Order Date'] = pd.to_datetime(df['Order Date'], format='%m/%d/%Y') df.head() # Change 'release date' dtype df['Release Date'] = pd.to_datetime(df['Release Date'], format='%m/%d/%Y') df.head() # Remove - from sneaker name df['Sneaker Name'] = df['Sneaker Name'].apply(lambda x: x.replace('-', ' ')) df.head() # Remove $ and comma from sale price df['Sale Price'] = df['Sale Price'].apply(lambda x: x.replace('$', '')) df['Sale Price'] = df['Sale Price'].apply(lambda x: x.replace(',', '')) df.head() # Remove $ from retail price df['Retail Price'] = df['Retail Price'].apply(lambda x: x.replace('$', '')) df.head() df.to_csv('Clean_Shoe_Data.csv', index = False)Bank Marketing Dataset CGAN Training for synthesize datasetsCGAN: A conditional generative adversarial network (CGAN) is a type of GAN that also takes advantage of labels during the training process. Generator — Given a label and random array as input, this network generates data with the same structure as the training data observations corresponding to the same label.Then, we save the CGAN models for the data generation pipeline.# TODO # try also https://github.com/sdv-dev/CTGANImportsimport os import pandas as pd import numpy as np from ydata_synthetic.synthesizers.regular import CGAN from ydata_synthetic.synthesizers import ModelParameters, TrainParameters import matplotlib.pyplot as plt from sklearn.preprocessing import PowerTransformer from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from src.pipeline.data_generation.data_generator import GANDataGenerator from src.pipeline.datasets.training_datasets import BankMarketingDataset, BankMarketingProcessedDataset from src.pipeline.model.paths import BANK_MARKETING_GEN_CGAN_MODEL_PATH from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all"Reading the Datasets Load Preprocessed Data EDA and Preprocessing# init GANDataGenerator print('Bank Marketing dataset\n') origin_dataset = BankMarketingProcessedDataset() origin_dataset = BankMarketingDataset() df = origin_dataset.raw_df label_col = origin_dataset.label_column_name df.head() df.shape print(f'Label columns name is: {label_col}. With {df[label_col].nunique()} unique values.' f'({df[label_col].unique()})') train_sample = df.sample(4521) cat_cols = [col for col in df.columns if any(cat_col for cat_col in origin_dataset.categorical_feature_names if cat_col + '_' in col)] numeric_cols = [col for col in df.columns if any(numeric_col for numeric_col in origin_dataset.numeric_feature_names if numeric_col in col)] cat_cols = origin_dataset.categorical_feature_names numeric_cols = origin_dataset.numeric_feature_names # numeric_cols.remove('job_management') # assert len(cat_cols)+len(numeric_cols) == len(df.columns)Init the GANto_save = False #Define the Conditional GAN and training parameters noise_dim = 32 dim = 128 batch_size = 128 beta_1 = 0.5 beta_2 = 0.9 log_step = 100 epochs = 500 + 1 learning_rate = 5e-4 gan_args = ModelParameters(batch_size=batch_size, lr=learning_rate, betas=(beta_1, beta_2), noise_dim=noise_dim, n_cols=train_sample.shape[1] - 1, # Don't count the label columns here layers_dim=dim) train_args = TrainParameters(epochs=epochs, cache_prefix='', sample_interval=log_step, label_dim=-1, labels=[0,1]) num_classes = df[label_col].nunique() #Init the Conditional GAN providing the index of the label column as one of the arguments synthesizer = CGAN(model_parameters=gan_args, num_classes=num_classes)2021-12-25 13:37:57.148014: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set 2021-12-25 13:37:57.148934: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2021-12-25 13:37:57.177079: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_UNKNOWN: unknown error 2021-12-25 13:37:57.177111: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: data-science-l-danielle 2021-12-25 13:37:57.177119: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: data-science-l-danielle 2021-12-25 13:37:57.177216: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 470.86.0 2021-12-25 13:37:57.177240: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 470.86.0 2021-12-25 13:37:57.177246: I tensorflow/stream_execut[...]Trainingfrom collections import defaultdict from sklearn.preprocessing import LabelEncoder class LabelProcessor: def __init__(self, dataset): self._df = dataset._raw_df self._label_col = dataset.label_column_name self._cat_cols = dataset.categorical_feature_names self._numeric_cols = dataset.numeric_feature_names self._processed_df = None self._encoder = None def preprocessed_data(self): df = self._df label_col = self._label_col cat_cols = self._cat_cols + [label_col] numeric_cols = self._numeric_cols columns = df.columns df_cat_cols = df[cat_cols] df_numeric_cols = df[numeric_cols] encoder_dict = defaultdict(LabelEncoder) df_cat_cols_processed = df_cat_cols.apply(lambda x: encoder_dict[x.name].fit_transform(x)) df_processed = pd.concat([df_numeric_cols, df_cat_cols_processed], axis=1) df_processed = df_processed[columns] self._encoder = encoder_dict self._processed_df = df_processed def postprocess_data(self): encoder_dict = self._encoder cat_cols = self._cat_cols + [self._label_col] inverse_transform_lambda = lambda x: encoder_dict[x.name].inverse_transform(x) if x.name in cat_cols else x return self._processed_df.apply(inverse_transform_lambda) def encoder_dict(self): np.save('label_encoder_dict.npy', self._encoder) def load_encoder_dict(self): np.load('label_encoder_dict.npy') processor = LabelProcessor(origin_dataset) processor.preprocessed_data() processor.postprocess_data() train_sample = processor._processed_df train_sample #---------------------------- # GAN Training #---------------------------- #Training the Conditional GAN synthesizer.train(data=train_sample, label_col=label_col, train_arguments=train_args, num_cols=numeric_cols, cat_cols=cat_cols ) #Saving the synthesizer if to_save: synthesizer.save(BANK_MARKETING_GEN_CGAN_MODEL_PATH)WARNING:tensorflow:AutoGraph could not transform > and will run it as-is. Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: module 'gast' has no attribute 'Index' To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert WARNING: AutoGraph could not transform > and will run it as-is. Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: module 'gast' has no attribute 'Index' To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convertSave the modelsynthesizer.save(BANK_MARKETING_GEN_CGAN_MODEL_PATH) synthesizer = CGAN.load(BANK_MARKETING_GEN_CGAN_MODEL_PATH) synthesizer.generator.summary() synthesizer.discriminator.summary()Model: "model_19" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_40 (InputLayer) [(128, 1)] 0 __________________________________________________________________________________________________ input_39 (InputLayer) [(128, 95)] 0 __________________________________________________________________________________________________ embedding_13 (Embedding) (128, 1, 1) 2 input_40[0][0] __________________________________________________________________________________________________ flatten_20 (Flatten) (128, 95) 0 input_39[0][0] [...]Synthesize samples based on the trained CGAN:generated_df_class_true = synthesizer.sample(condition=np.array([1]), n_samples=1) generated_df_class_true generated_df_class_false = synthesizer.sample(condition=np.array([0]), n_samples=2) generated_df_class_falseSynthetic data generation: 100%|██████████| 1/1 [00:00<00:00, 18.93it/s]Comparing the synthesized datanumeric_cols real_df_class_true = df[df[label_col]==1].sample(128) real_df_class_true.describe() generated_df_class_true.describe() generated_df_class_true generated_df_class_true.duration.plot(kind='hist') real_df_class_true.duration.plot(kind='hist') # cat_cols = df.select_dtypes(include=['category']).columns # numeric_cols = df.select_dtypes(exclude=['category']).columns # # print('categorical columns are:', cat_cols) # print('numeric columns are:', numeric_cols) # num_classes = df[label_col].nunique() # # df[label_col] = df[label_col].astype('category') # # # There's only 1 class, so essentially rename the 'Class' to 'Class_1', # # which tells weather a sample data is of class 1 or not. # df = pd.get_dummies(df, columns=[label_col], prefix='class', drop_first=True) # # train_sample = df.copy().reset_index(drop=True) # print(f"df columns: {train_sample.columns}") # # # 'Class_1' label # label_cols = [ i for i in train_sample.columns if 'class_' in i ] # # # All columns except 'Class_1' # data_cols = [ i for i in train_sample.columns if i not in label_cols ] # # # Scale down the data, and rename it to 'train_no_label' # train_sample[data_cols] = train_sample[data_cols].astype(int) / 10 # scale to random noise size, one less thing to learn # # # TODO To remove: # train_no_label = train_sample[data_cols] models = {'GAN': ['GAN', False, synthesizer.generator]} # Setup parameters visualization parameters seed = 17 test_size = 492 # number of fraud cases noise_dim = 32 np.random.seed(seed) z = np.random.normal(size=(test_size, noise_dim)) real = synthesizer.get_data_batch(train=train_sample, batch_size=test_size, seed=seed) real_samples = pd.DataFrame(real, columns=data_cols+label_cols) labels = fraud_w_classes['Class'] model_names = ['GAN'] colors = ['deepskyblue','blue'] markers = ['o','^'] class_labels = ['Class 1','Class 2'] col1, col2 = 'V17', 'V10' base_dir = 'cache/' # Actual fraud data visualization model_steps = [ 0, 100, 200] rows = len(model_steps) columns = 1 + len(models) axarr = [[]]*len(model_steps) fig = plt.figure(figsize=(14,rows*3)) # Go through each of the 3 model_step values -> 0, 100, 200 for model_step_ix, model_step in enumerate(model_steps): axarr[model_step_ix] = plt.subplot(rows, columns, model_step_ix*columns + 1) # Plot 'Class 1' and 'Class 2' samples taken from the original data, in a random shuffled fashion for group, color, marker, label in zip(real_samples.groupby('Class_1'), colors, markers, class_labels ): plt.scatter( group[1][[col1]], group[1][[col2]], label=label, marker=marker, edgecolors=color, facecolors='none' ) plt.title('Actual Fraud Data') plt.ylabel(col2) # Only add y label to left plot plt.xlabel(col1) xlims, ylims = axarr[model_step_ix].get_xlim(), axarr[model_step_ix].get_ylim() if model_step_ix == 0: legend = plt.legend() legend.get_frame().set_facecolor('white') # Go through all the GAN models listed in 'model_names' and defined in 'models' for i, model_name in enumerate( model_names[:] ): [model_name, with_class, generator_model] = models[model_name] generator_model.load_weights( base_dir + '_generator_model_weights_step_'+str(model_step)+'.h5') ax = plt.subplot(rows, columns, model_step_ix*columns + 1 + (i+1) ) if with_class: g_z = generator_model.predict([z, labels]) gen_samples = pd.DataFrame(g_z, columns=data_cols+label_cols) for group, color, marker, label in zip( gen_samples.groupby('Class_1'), colors, markers, class_labels ): plt.scatter( group[1][[col1]], group[1][[col2]], label=label, marker=marker, edgecolors=color, facecolors='none' ) else: g_z = generator_model.predict(z) gen_samples = pd.DataFrame(g_z, columns=data_cols+['label']) gen_samples.to_csv('../../data/Generated_sample.csv') plt.scatter( gen_samples[[col1]], gen_samples[[col2]], label=class_labels[0], marker=markers[0], edgecolors=colors[0], facecolors='none' ) plt.title(model_name) plt.xlabel(col1) ax.set_xlim(xlims), ax.set_ylim(ylims) plt.suptitle('Comparison of GAN outputs', size=16, fontweight='bold') plt.tight_layout(rect=[0.075,0,1,0.95]) # Adding text labels for training steps vpositions = np.array([ i._position.bounds[1] for i in axarr ]) vpositions += ((vpositions[0] - vpositions[1]) * 0.35 ) for model_step_ix, model_step in enumerate( model_steps ): fig.text( 0.05, vpositions[model_step_ix], 'training\nstep\n'+str(model_step), ha='center', va='center', size=12) # if not os.path.exists("./img"): # os.makedirs("./img") # plt.savefig('img/Comparison_of_GAN_outputs.png', dpi=100)Load Trained Modelgan_generator = GANDataGenerator(dataset=origin_dataset, model_class=CGAN, trained_model_path=)Инструкция по выполнению 1. Загрузите данные close_prices.csv. В этом файле приведены цены акций 30 компаний на закрытии торгов за каждый день периода. 2. На загруженных данных обучите преобразование PCA с числом компоненты равным 10. Скольких компонент хватит, чтобы объяснить 90% дисперсии? 3. Примените построенное преобразование к исходным данным и возьмите значения первой компоненты. 4. Загрузите информацию об индексе Доу-Джонса из файла djia_index.csv. Чему равна корреляция Пирсона между первой компонентой и индексом Доу-Джонса? 5. Какая компания имеет наибольший вес в первой компоненте? Укажите ее название с большой буквы.Если ответом является нецелое число, то целую и дробную часть необходимо разграничивать точкой, например, 0.42. При необходимости округляйте дробную часть до двух знаков.Ответ на каждое задание — текстовый файл, содержащий ответ в первой строчке. Обратите внимание, что отправляемые файлы не должны содержать перевод строки в конце. Данный нюанс является ограничением платформы Coursera. Мы работаем над тем, чтобы убрать это ограничение.import pandas as pd from pathlib import Path path = Path.cwd() path = path.joinpath('../data/raw/HSE_ML_week4') data = pd.read_csv(path.joinpath('close_prices.csv')) data.head(10) import numpy as np from sklearn.decomposition import PCA X = data.copy() del X['date'] pca = PCA(n_components=10) pca.fit(X) np.add.accumulate(pca.explained_variance_ratio_) file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/pca_90.txt', 'w') file.write('4') file.close() X_reduced = pca.transform(X) component1 = X_reduced[:, 0] djia_index = pd.read_csv(path.joinpath('djia_index.csv')) np.corrcoef(component1, djia_index['^DJI']) file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/corrcoef.txt', 'w') file.write('0.91') file.close() max_var = abs(pca.components_[0]).max() for i in range(pca.components_[0].size): if abs(pca.components_[0][i] - max_var) < 0.001: print(i) file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/important_company.txt', 'w') file.write('V') file.close()SLU12 - Validation metrics for regression: Exercise NotebookIn this notebook, you will implement: - Mean Absolute Error (MAE) - Mean Squared Error (MSE) - Root Mean Squared Error (RMSE) - Coefficient of Determination (R²) - Adjusted R² - Regularized Linear Regression loss - (BONUS) Partial derivatives for the Regularized Linear Regression loss 1 Metrics# This cell creates the data and parameters that # you can use to test your implementations. import numpy as np import pandas as pd from sklearn.datasets import load_boston from sklearn.linear_model import LinearRegression np.random.seed(60) data = load_boston() x = pd.DataFrame(data['data'], columns=data['feature_names']) y = pd.Series(data['target']) lr = LinearRegression() lr.fit(x, y) y_hat = lr.predict(x) betas = pd.Series([lr.intercept_] + list(lr.coef_))1.1 Mean Absolute Error$$MAE = \frac{1}{N} \sum_{n=1}^N \left| y_n - \hat{y}_n \right|$$def mean_absolute_error(y, y_hat): """ Args: y : pandas.Series with shape (num_observations,) The targets y_hat : pandas.Series with shape (num_observations,) The predictions Returns: mae : pandas.Series with shape (num_observations,) """ # 1) Compute the difference. # e = ... # YOUR CODE HERE raise NotImplementedError() # 2) Compute the absolute value of the difference. # a = ... # YOUR CODE HERE raise NotImplementedError() # 3) Compute the mean of the absolute value of the difference. # mae = ... # YOUR CODE HERE raise NotImplementedError() return mae print("MAE: {}".format(mean_absolute_error(y, y_hat)))Expected output:```MAE: 3.2729446379969387```import math assert math.isclose(0.33316349496726444, mean_absolute_error(pd.Series(np.random.RandomState(10).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.39070816989559587, mean_absolute_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.2567117528634928, mean_absolute_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(500).rand(10))))1.2 Mean Squared Error$$MSE = \frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2$$def mean_squared_error(y, y_hat): """ Args: y : pandas.Series with shape (num_observations,) The targets y_hat : pandas.Series with shape (num_observations,) The predictions Returns: mse : pandas.Series with shape (num_observations,) """ # 1) Compute the difference. # e = ... # YOUR CODE HERE raise NotImplementedError() # 2) Compute the squares of the difference # s = ... # YOUR CODE HERE raise NotImplementedError() # 3) Compute the mean of the squares of the difference. # mse = ... # YOUR CODE HERE raise NotImplementedError() return mse print("MSE: {}".format(mean_squared_error(y, y_hat)))Expected output: ```MSE: 21.8977792176875```import math assert math.isclose(0.16469788257519086, mean_squared_error(pd.Series(np.random.RandomState(10).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.22325626250313846, mean_squared_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.13478449093337383, mean_squared_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(500).rand(10))))1.3 Root Mean Squared Error$$RMSE = \sqrt{MSE}$$def root_mean_squared_error(y, y_hat): """ Args: y : pandas.Series with shape (num_observations,) The targets y_hat : pandas.Series with shape (num_observations,) The predictions Returns: rmse : pandas.Series with shape (num_observations,) """ # Compute the mean squared error. # mse = ... # YOUR CODE HERE raise NotImplementedError() # Compute the root square. # rmse = ... # YOUR CODE HERE raise NotImplementedError() return rmse print("RMSE: {}".format(root_mean_squared_error(y, y_hat)))Expected output:```RMSE: 4.679506300635516```import math assert math.isclose(0.4058298690032448, root_mean_squared_error(pd.Series(np.random.RandomState(10).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.4725000132308342, root_mean_squared_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(0.36713007358887645, root_mean_squared_error(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(500).rand(10))))1.4 R² score$$\bar{y} = \frac{1}{N} \sum_{n=1}^N y_n$$$$R² = 1 - \frac{MSE(y, \hat{y})}{MSE(y, \bar{y})} = 1 - \frac{\frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2}{\frac{1}{N} \sum_{n=1}^N (y_n - \bar{y})^2}= 1 - \frac{\sum_{n=1}^N (y_n - \hat{y}_n)^2}{\sum_{n=1}^N (y_n - \bar{y})^2}$$def r_squared(y, y_hat): # Compute the mean squared error between # the target and the predictions. # mse_top = ... # YOUR CODE HERE raise NotImplementedError() # Compute the mean squared error between # the target and the target mean. # mse_bottom = ... # YOUR CODE HERE raise NotImplementedError() # Now, take both mean square errors # and finish the computation of R². # r2 = ... # YOUR CODE HERE raise NotImplementedError() return r2 print("R²: {}".format(r_squared(y, y_hat)))Expected output: ```R²: 0.7406077428649427```import math assert math.isclose(-1.012757734643532, r_squared(pd.Series(np.random.RandomState(10).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(-2.075782802360925, r_squared(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(20).rand(10)))) assert math.isclose(-0.8569146262224803, r_squared(pd.Series(np.random.RandomState(30).rand(10)), pd.Series(np.random.RandomState(500).rand(10))))1.5 Adjusted R² score$$\bar{R}^2 = 1 - \frac{N - 1}{N - K - 1} (1 - R^2)$$where $N$ is the number of observations in the dataset used for training the model (i.e. number of rows of the pandas dataframe) and $K$ is the number of features used by your model (i.e. number of columns of the pandas dataframe)def adjusted_r_squared(y, y_hat, K): """ Args: y : pandas.Series with shape (num_observations,) y_hat : pandas.Series with shape (num_observations,) K : integer Number of features used in the model that computed y_hat. Returns: adj_r2 : float The adjusted value of R². """ # Compute R². # r2 = ... # YOUR CODE HERE raise NotImplementedError() # Adjust R² # adj_r2 = ... # YOUR CODE HERE raise NotImplementedError() return adj_r2 print("Adj. R²: {}".format(adjusted_r_squared(y, y_hat, x.shape[1])))Expected output: ```Adj. R²: 0.7337538824121872```import math assert math.isclose(-1.891075622505615, adjusted_r_squared(pd.Series(np.random.RandomState(65).rand(10)), pd.Series(np.random.RandomState(10).rand(10)), 2)) assert math.isclose(-3.0475058715078607, adjusted_r_squared(pd.Series(np.random.RandomState(65).rand(10)), pd.Series(np.random.RandomState(10).rand(10)), 4)) assert math.isclose(-5.745843119179767, adjusted_r_squared(pd.Series(np.random.RandomState(65).rand(10)), pd.Series(np.random.RandomState(10).rand(10)), 6)) assert math.isclose(-2.138572002282621, adjusted_r_squared(pd.Series(np.random.RandomState(1).rand(10)), pd.Series(np.random.RandomState(42).rand(10)), 2)) assert math.isclose(-3.394000803195669, adjusted_r_squared(pd.Series(np.random.RandomState(1).rand(10)), pd.Series(np.random.RandomState(42).rand(10)), 4)) assert math.isclose(-6.323334671992782, adjusted_r_squared(pd.Series(np.random.RandomState(1).rand(10)), pd.Series(np.random.RandomState(42).rand(10)), 6)) assert math.isclose(-3.6986284467219077, adjusted_r_squared(pd.Series(np.random.RandomState(23).rand(10)), pd.Series(np.random.RandomState(13).rand(10)), 2)) assert math.isclose(-5.57807982541067, adjusted_r_squared(pd.Series(np.random.RandomState(23).rand(10)), pd.Series(np.random.RandomState(13).rand(10)), 4)) assert math.isclose(-9.96346637568445, adjusted_r_squared(pd.Series(np.random.RandomState(23).rand(10)), pd.Series(np.random.RandomState(13).rand(10)), 6))2 Regularization 2.1 Compute Regularized Linear Regression loss$$L_{L_1} = \lambda_1 \|\beta\|_1^1 = \lambda_1 \sum_{k=1}^K \left|\beta_k\right|$$$$L_{L_2} = \lambda_2 \|\beta\|_2^2 = \lambda_2 \sum_{k=1}^K \beta_k^2$$$$L = \frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2 + L_{L_1} + L_{L_2}$$def regularized_linear_regression_loss(y, y_hat, betas, lamb1, lamb2): """ Args: y : pandas.Series with shape (num_observations,) The targets. y_hat : pandas.Series with shape (num_observations,) THe predictions. betas : pandas.Series with shape (num_features+1,) The parameters of your regression model. The first value is the intercept and the remaining ones are the feature coefficients. lamb1 : float The strength of the L1 regularizer. lamb2 : float The strength of the L2 regularizer. Returns: loss : float """ # Compute the L1 part of # the general loss function. # l1_loss = ... # YOUR CODE HERE raise NotImplementedError() # Compute the L2 part of # the general loss function. # l2_loss = ... # YOUR CODE HERE raise NotImplementedError() # Compute the mean square loss part # of the general loss function. # mse = ... # YOUR CODE HERE raise NotImplementedError() # Compute the total loss by # combining all 3 parts. # L = ... # YOUR CODE HERE raise NotImplementedError() return L lamb1 = 0.5 lamb2 = 0.1 L = regularized_linear_regression_loss(y, y_hat, betas, lamb1, lamb2) print("Regularized loss for lin. reg.: {}".format(L))Expected output```Regularized loss for lin. reg.: 69.95737104987452```import math lamb1 = 1.5 lamb2 = 0.1 L = regularized_linear_regression_loss(y, y_hat, betas, lamb1, lamb2) assert math.isclose(L, 97.70368652856253) lamb1 = 0.5 lamb2 = 1.1 L = regularized_linear_regression_loss(y, y_hat, betas, lamb1, lamb2) assert math.isclose(L, 411.8217119783049) lamb1 = 0.5 lamb2 = 1.2 L = regularized_linear_regression_loss(y, y_hat, betas, lamb1, lamb2) assert math.isclose(L, 446.0081460711479)(BONUS) 2.3 Compute Regularized Linear Regression partial derivatives$$\frac{\partial L}{\partial \beta_0} = - \sum_{n=1}^{N} 2 (y_n - \hat{y}_n)$$$$\frac{\partial L}{\partial \beta_k}= - \sum_{n=1}^{N} 2 (y_n - \hat{y}_n) x_{k_n} + \lambda_1 \frac{\beta_k}{\left|\beta_k\right|} + 2 \lambda_2 \beta_k = - \sum_{n=1}^{N} 2 (y_n - \hat{y}_n) x_{k_n} + \lambda_1 sign(\beta_k) + 2 \lambda_2 \beta_k $$$$sign(\beta_k) = \begin{cases} +1,& \text{if } \beta_k > 0\\ -1,& \text{if } \beta_k < 0\\ 0,& \text{if } \beta_k = 0\end{cases}$$def regularized_linear_regression_partial_derivatives(x, y, betas, lamb1, lamb2): """ Args: x : pandas.DataFrame with shape (num_observations, num_features) The input features. y : pandas.Series with shape (num_observations,) The targets. betas : pandas.Series with shape (num_features+1,) The intercept at index 0. The coefficients in the remaining indexes. lamb1 : float The strength of the L1 regularizer. lamb2 : float The strength of the L2 regularizer. Returns: dL_dbetas : pandas.Series with shape (num_features+1,) """ # To make your life easier, extract # the numpy array from x. # YOUR CODE HERE raise NotImplementedError() # Make predictions y_hat by using # the dot product between x and betas. # Don't forget to separate betas[0] # from the remaining betas! # YOUR CODE HERE raise NotImplementedError() # Compute the difference between the # target and the predictions. # YOUR CODE HERE raise NotImplementedError() # Initialize (with zeros) the pandas # Series that will store the partial # derivatives for the betas. K = x.shape[1] dL_dbetas = pd.Series(np.zeros(K)) # Calculate the partial derivative # for beta_0. Don't # YOUR CODE HERE raise NotImplementedError() # Calculate the partial derivatives # for each beta_k, for k > 0. for k in range(1, K): # Perform 3 sums in order to make # things clear. # 1) First, add the part correspoding to the # MSE derivative in order to beta_k. # Don't forget to put the minus sign at # the beginning! # TIP: to get the k column of x, use x[:, k]. # YOUR CODE HERE raise NotImplementedError() # 2) Add the part corresponding to the L1 # regularization. To make it simpler, # use numpy.sign function. # TIP: if you want to use the sign # operation that you saw in the formula, # take a look at np.sign. # YOUR CODE HERE raise NotImplementedError() # 3) Finally, add the part corresponding # to the L2 regularization. # YOUR CODE HERE raise NotImplementedError() return dL_dbetas lamb1 = 1 lamb2 = 1 print(regularized_linear_regression_partial_derivatives(x, y, betas, lamb1, lamb2))Expected output: ```0 -5.684342e-131 -1.214341e+002 1.092790e+003 1.041720e+004 6.377123e+005 -3.659152e+016 8.609505e+007 1.001502e+008 -3.951518e+009 1.611310e+0010 -1.024659e+0011 -2.906927e+0012 1.018785e+00dtype: float64```import math lamb1 = 2 lamb2 = 2 np.testing.assert_array_almost_equal( regularized_linear_regression_partial_derivatives(x, y, betas, lamb1, lamb2).values, np.array([-5.68434189e-13, -2.42868223e+00, 2.18558088e+00, 2.08344096e+00, 1.27542456e+01, -7.31830346e+01, 1.72190098e+01, 2.00300425e+00, -7.90303519e+00, 3.22262015e+00, -2.04931739e+00, -5.81385422e+00, 2.03757005e+00])) lamb1 = 0 lamb2 = 0 np.testing.assert_array_almost_equal( regularized_linear_regression_partial_derivatives(x, y, betas, lamb1, lamb2).values, np.array([-5.68434189e-13, -1.81353244e-09, 1.40516931e-10, 1.13686838e-13, 3.49587026e-12, -3.02406988e-11, 1.00408215e-09, -9.45306056e-11, -6.50288712e-11, -1.46974344e-09, 2.45563569e-11, -2.52475729e-09, 1.82808435e-10])) lamb1 = 0 lamb2 = 3 np.testing.assert_array_almost_equal( regularized_linear_regression_partial_derivatives(x, y, betas, lamb1, lamb2).values, np.array([-5.68434189e-13, -6.43023341e-01, 2.78371317e-01, 1.25161437e-01, 1.61313684e+01, -1.06774552e+02, 2.28285148e+01, 4.50637013e-03, -8.85455278e+00, 1.83393023e+00, -7.39760778e-02, -5.72078133e+00, 5.63550765e-02]))Supplementary notebook for Post-embryonic development and aging of the appendicular skeleton in Ambystoma mexicanum Camilo Riquelme-Guzmán1, , , 1, 1, 2, 2,3,4, 5,6, 1,6 1 Technische Universität Dresden, CRTD/ Center for Regenerative Therapies TU Dresden, Dresden, Germany. 2 System Biology Group (SysBio), Institute of Physics of Liquids and Biological Systems (IFLySiB), National Scientific and Technical Research Council (CONICET) and University of La Plata, La Plata, Argentina. 3 Institute of Technology, Argentinian University of Business (UADE), Buenos Aires, Argentina. 4 Center for Information Services and High Performance Computing (ZIH), Technische Universität Dresden, Dresden, Germany. 5 Department of Medicine III, Universitätsklinikum Dresden, Dresden, Germany. 6 Center for Healthy Aging, Universitätsklinikum Dresden, Dresden, Germany. This work is available at: https://www.biorxiv.org/content/10.1101/2021.03.05.434057v1 This notebook was developed by and . (ASC) and (OC) The next cell plots fig 1A from the paper. Run the following cell to plot fig 1A from the paper and to save it as a png image.# Import libraries. import numpy as np from scipy.optimize import curve_fit from scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import random import copy import scipy.stats # Read the data. dataset = "SV" df = pd.read_excel(io="AxolotlGrowth.xlsx", sheet_name=str(dataset)) Ca_data_sv = df["ST in cm"] tspan = df["Age in months"] dataset = "ST" df2 = pd.read_excel(io="AxolotlGrowth.xlsx", sheet_name=str(dataset)) Ca_data_tl = df2["ST in cm"] # Define the function to fit. def fitfunc(x, a, b): return a+b*x # Fit the selected function to data. k_fit, kcov = curve_fit(fitfunc, Ca_data_tl, Ca_data_sv,absolute_sigma=True) # Calculate the standard deviation from the fit. std = np.sqrt(np.diag(kcov)) # Calculate the predicted line from the results of the fit and its deviation. tfit = Ca_data_tl fit = fitfunc(tfit, k_fit[0], k_fit[1]) fit2 = fitfunc(tfit, k_fit[0]+ 2 * std[0], k_fit[1]+ 2 * std[1]) fit3 = fitfunc(tfit, k_fit[0]- 2 * std[0], k_fit[1]- 2 * std[1]) # Plot the curves and experimental data. plt.plot(tfit, fit, color="black") plt.plot(tfit, fit2, color="black",linestyle="dashed", dashes=(5, 3)) plt.plot(tfit, fit3, color="black",linestyle="dashed", dashes=(5, 3)) plt.scatter(Ca_data_tl,Ca_data_sv, c=tspan, cmap="viridis") plt.colorbar(label='Age (month)') plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.xlabel("ST length (cm)") plt.ylabel("SV length (cm)") plt.savefig("Fig1A.png",dpi=300) plt.show() plt.close()The next cell plots fig 1B and fig 1C from the paper. Run the following cell to plot fig 1B and fig 1C from the paper and to save them as a png image.# Import libraries. import numpy as np from scipy.optimize import curve_fit from scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import random import copy # Choose data set. Options are "SV" or "TL". Remember to put the excel file on the same folder as this jupyter notebook. sheet = ["SV", "ST"] # Select the number of samples for the bootstrap method. n_bootstrap=1 # Sweeps over all posible combinations of datasets. for dataset in sheet: # Read the data. df = pd.read_excel(io="AxolotlGrowth.xlsx", sheet_name=str(dataset)) tspan = df["Age in months"] Ca_data = df["ST in cm"] # Define target function: def fitfunc(x, xc, y0, m1, m2): return (y0 + m1 * x)*(np.sign(xc-x) + 1)*0.5+(y0 + m1 * xc + m2 * (x - xc))*(np.sign(x-xc) + 1)*0.5 # Fit the selected function to data. # Set the initial guess. po_guess = [15,0,2.6,0.01] # Set the parameters constrains. boundaries = ([10, 0, 0, 0], [50, 50, 100, 100]) # Fit the function k_fit, kcov = curve_fit(fitfunc, tspan, Ca_data, p0=po_guess, bounds=boundaries) # R squared calcularion residuals = Ca_data - fitfunc(tspan, *k_fit) ss_res = np.sum(residuals**2) ss_tot = np.sum((Ca_data-np.mean(Ca_data))**2) r_squared = 1 - (ss_res / ss_tot) # Akaike information criterion n = len(Ca_data) k = len(k_fit) + 1 AIC= 2*k + n * np.log(ss_res/n) + (2 * k**2 + 2 * k)/(n-k-1) # Bootstrap method # Different arrays xc_array = [] y0_array = [] m1_array = [] m2_array = [] # run n samples. for i in range(n_bootstrap): tspan_bootstrap = copy.deepcopy(tspan) Ca_data_bootstrap = copy.deepcopy(Ca_data) b , c = random.sample(range(len(Ca_data_bootstrap)), 2) tspan_bootstrap[b] = tspan_bootstrap[c] Ca_data_bootstrap [b] = Ca_data_bootstrap [c] k_fit2, kcov2 = curve_fit(fitfunc, tspan_bootstrap, Ca_data_bootstrap, p0=po_guess, bounds=boundaries) xc_array.append(k_fit2[0]) y0_array.append(k_fit2[1]) m1_array.append(k_fit2[2]) m2_array.append(k_fit2[3]) # Calculate mean and standard deviation xc_mean = np.mean(xc_array) xc_std = np.std(xc_array) y0_mean = np.mean(y0_array) y0_std = np.std(y0_array) m1_mean = np.mean(m1_array) m1_std = np.std(m1_array) m2_mean = np.mean(m2_array) m2_std = np.std(m2_array) # Print on screan the previous information. Uncomment to excute (commented by default). # Add another print for other variables if needed. # print("Mean of transition age:",xc_mean,"and standard deviation of transition age:",xc_std) # print("Mean of y0:",y0_mean,"and standard deviation of y0:",y0_std) # print("Mean of m1:",m1_mean,"and standard deviation of m1:",m1_std) # print("Mean of m2:",m2_mean,"and standard deviation of m2:",m2_std) # Generation of data to plot the result of the fit. tfit = np.arange(0,max(tspan),0.01); fit = fitfunc(tfit, k_fit[0],k_fit[1],k_fit[2],k_fit[3]) # Plot. if dataset=="ST": plt.scatter(tspan, Ca_data, label='ST Exp. Data',color="black", s=20) elif dataset=="SV": plt.scatter(tspan, Ca_data, label='SV Exp. Data',color="black", s=20) plt.plot(tfit, fit, label='Two-line model',color="g") plt.legend(loc=7) if dataset=="ST": plt.title("Snout to tail (ST) length in Axolotl") elif dataset=="SV": plt.title("Snout to vent length (SV) in Axolotl") plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.xlabel("Age (month)") if dataset=="ST": plt.ylabel("ST length (cm)") elif dataset=="SV": plt.ylabel("SV length (cm)") if dataset=="ST": plt.savefig("Fig1B.png",dpi=300) elif dataset=="SV": plt.savefig("Fig1C.png",dpi=300) plt.show() plt.close()The next cell plots fig 1D, fig 1E, fig 1F and fig 1G from the paper. Run the following cell to plot fig 1D, fig 1E, fig 1F and fig 1G from the paper and to save them as a png image.# Import libraries. import numpy as np from scipy.optimize import curve_fit from scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import random import copy # Choose data set. Options are "SV" or "TL". Remember to put the excel file on the same folder as this jupyter notebook. sheet = ["SV", "ST"] # Sweeps over all posible combinations of datasets. for dat in sheet: dataset = dat # number of samples for the bootstrap method. n_bootstrap=1 # Set the initial guess. po_guess = [15,0] # Set the parameters constrains. boundaries = ([0, 0], [50, 50]) # Read the data. df = pd.read_excel(io="AxolotlGrowth.xlsx", sheet_name=str(dataset)) tspan = df["Age in months"] Ca_data = df["ST in cm"] # Translate the time to start on t0=0 because it helps the ode integrator. t2=[] for i in range(len(tspan)): t2.append(tspan[i]-tspan[0]) # Define the growth rate as a hill function. def hill(x, y0, a,c,n): return c+y0/(1+(a*(x+tspan[0]))**n) # Define the target function to fit. def fitfunc(x, y0, a,c,n): 'Function that returns Ca computed from an ODE for a k' def myode(v,x): return (c+y0/(1+(a*(x+tspan[0]))**n))*v v0 = min(Ca_data) Casol = odeint(myode, v0, x) return Casol[:,0] # Comment: In the paper some of these variables are defined differently. # To do the fit it is better to define them this way. # The variables from the paper can be obtained from these variables as: # R_inf = c # R0 = c + y0 # The transition age is equal to 1/a. # Fit the selected function to data. k_fit, kcov = curve_fit(fitfunc, t2, Ca_data, p0=[1,0.20477636,0.00112588,3],bounds=([0,0,0,0], [100,1,10,100]), maxfev=100000) # R squared calcularion residuals = Ca_data - fitfunc(t2, *k_fit) ss_res = np.sum(residuals**2) ss_tot = np.sum((Ca_data-np.mean(Ca_data))**2) r_squared = 1 - (ss_res / ss_tot) # Akaike information criterion n = len(Ca_data) k = len(k_fit) + 1 AIC= 2*k + n * np.log(ss_res/n) + (2 * k**2 + 2 * k)/(n-k-1) # Bootstrap method # Different arrays xc_array = [] y0_array = [] m1_array = [] m2_array = [] # run n samples. for i in range(n_bootstrap): tspan_bootstrap = copy.deepcopy(t2) Ca_data_bootstrap = copy.deepcopy(Ca_data) b , c = random.sample(range(len(Ca_data_bootstrap)), 2) tspan_bootstrap[b] = tspan_bootstrap[c] Ca_data_bootstrap [b] = Ca_data_bootstrap [c] tspan_bootstrap, Ca_data_bootstrap = zip(*sorted(zip(tspan_bootstrap, Ca_data_bootstrap))) k_fit2, kcov2 = curve_fit(fitfunc, tspan_bootstrap, Ca_data_bootstrap, p0=[1,0.20477636,0.00112588,3],bounds=([0,0,0,0], [100,1,10,100]), maxfev=100000) xc_array.append(k_fit2[0]) y0_array.append(k_fit2[1]) m1_array.append(k_fit2[2]) m2_array.append(k_fit2[3]) # Calculate mean and standar deviation xc_mean = np.mean(xc_array) xc_std = np.std(xc_array) y0_mean = np.mean(y0_array) y0_std = np.std(y0_array) m1_mean = np.mean(m1_array) m1_std = np.std(m1_array) m2_mean = np.mean(m2_array) m2_std = np.std(m2_array) # Print on screan the previous information. Uncomment to excute (commented by default). # Add another print for other variables if needed. # print("Mean of y0:",xc_mean,"and standard deviation of y0:",xc_std) # print("Mean of y0a:",y0_mean,"and standard deviation of a:",y0_std) # print("Mean of c:",m1_mean,"and standard deviation of c:",m1_std) # print("Mean of n:",m2_mean,"and standard deviation of n:",m2_std) # Generation of data to plot the result of the fit. tfit = np.arange(0,max(t2),0.0001); fit = fitfunc(tfit, k_fit[0], k_fit[1], k_fit[2], k_fit[3]) fit2 = hill(tfit, k_fit[0], k_fit[1], k_fit[2], k_fit[3]) for i in range(len(tfit)): tfit[i]+=tspan[0] # Plot fig 1D and fig 1E. if dataset=="ST": plt.scatter(tspan, Ca_data, label='ST Exp. Data',color="black", s=20) elif dataset=="SV": plt.scatter(tspan, Ca_data, label='SV Exp. Data',color="black", s=20) plt.plot(tfit, fit, label='Growth model',color="g") plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') if dataset=="ST": plt.title("Snout to tail length (ST) in Axolotl") elif dataset=="SV": plt.title("Snout to vent length (SV) in Axolotl") plt.legend(loc=7) plt.xlabel("Age (month)") if dataset=="ST": plt.ylabel("ST length (cm)") elif dataset=="SV": plt.ylabel("SV length (cm)") if dataset=="ST": plt.savefig("Fig1D.png",dpi=500) elif dataset=="SV": plt.savefig("Fig1E.png",dpi=500) plt.show() plt.close() # Plot fig 1F and fig 1G. plt.plot(tfit, fit2, label='Growth rate',color="blue") plt.vlines(1/k_fit[1], 0, max(fit2), linestyle="dashed", label='Transition age',color="purple") plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.legend(loc=(0.6,0.15)) plt.xlabel("Age (month)") plt.ylabel("Growth rate (1/month)") # Create a zoom of the plot inside the plot. sub_axes = plt.axes([.47, .47, .4, .4]) sub_axes.plot(tfit, fit2, label='Growth rate',color="blue") sub_axes.vlines(1/k_fit[1], 0, max(fit2), linestyle="dashed", label='Transition age',color="purple") sub_axes.spines['right'].set_color('none') sub_axes.spines['top'].set_color('none') sub_axes.set_xlabel("Age (month)") sub_axes.set_ylabel("Growth rate (1/month)") sub_axes.set_xlim(0,20) if dataset=="ST": plt.savefig("Fig1F.png",dpi=500) elif dataset=="SV": plt.savefig("Fig1G.png",dpi=500) plt.show() plt.close()The next cell plots fig 1H from the paper. Run the following cell to plot fig 1H from the paper and to save it as a png image.# Import libraries. import numpy as np from scipy.optimize import curve_fit from scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import random import copy import scipy.stats # Read the data. df = pd.read_excel(io="SV_ST_1yearold-formated-for-jupyter.xlsx", sheet_name="Sheet2") SVMale = df["SV Male"] SVFemale = df["SV Female"] STMale = df["ST Male"] STFemale = df["ST Female"] # Define the positions where the data is going to be drawn. pos1=np.arange(1,2,1/len(SVMale)) pos2=np.arange(3,4,1/len(SVFemale)) pos3=np.arange(5,6,1/len(STMale)) pos4=np.arange(7,8,1/len(STFemale)) # Plot for Male SV. plt.scatter(pos1,SVMale,color="black",label="Male") plt.hlines(np.mean(SVMale),1,2) plt.hlines(np.mean(SVMale)+np.std(SVMale),1,2,linestyles="dashed") plt.hlines(np.mean(SVMale)-np.std(SVMale),1,2,linestyles="dashed") # Plot for Female SV. plt.scatter(pos2,SVFemale,facecolors='none', edgecolors='black',label="Female") plt.hlines(np.mean(SVFemale),3,4) plt.hlines(np.mean(SVFemale)+np.std(SVFemale),3,4,linestyles="dashed") plt.hlines(np.mean(SVFemale)-np.std(SVFemale),3,4,linestyles="dashed") # Plot for Male ST. plt.scatter(pos3,STMale,color="black") plt.hlines(np.mean(STMale),5,6) plt.hlines(np.mean(STMale)+np.std(STMale),5,6,linestyles="dashed") plt.hlines(np.mean(STMale)-np.std(STMale),5,6,linestyles="dashed") # Plot for Female ST. plt.scatter(pos4,STFemale,facecolors='none', edgecolors='black') plt.hlines(np.mean(STFemale),7,8) plt.hlines(np.mean(STFemale)+np.std(STFemale),7,8,linestyles="dashed") plt.hlines(np.mean(STFemale)-np.std(STFemale),7,8,linestyles="dashed") # Draw the information related to the probability. plt.text(0.725, 0.93, "***", transform=plt.gca().transAxes, size='xx-large') plt.hlines(23, 5.5, 7.5) plt.vlines(5.5, 22.5, 23.4) plt.vlines(7.5, 22.5, 23.4) # Format. plt.legend(loc=(0.7,0.2)) plt.ylim(0,25) my_xticks = ['SV','ST'] x=[2.5,6.5] plt.ylabel("Length (cm)") plt.title("Sex and size correlation") plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.xticks(x, my_xticks) plt.savefig("Fig1H.png",dpi=300) plt.show() plt.close()Covid Severity Classifier- A Machine Learning system to predict the severity of a patient's Covid-19 based on their symptoms- Created with the [Kaggle Covid-19 Patient Symptoms](https://www.kaggle.com/bitsofishan/covid19-patient-symptoms) dataset- It preprocesses the data- Defines a Deep Neural Network model in Tensorflow- Trains the Machine Learning model- And outputs the model and some required data to be used in a Django web applicationimport numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) !pip install openpyxl !pip install missingno import openpyxl import missingno as msno import tensorflow as tf import tensorflow.keras.layers as layers from sklearn.model_selection import train_test_split %matplotlib inlineLoad the Dataraw_df = pd.read_excel('../input/covid19-patient-symptoms/COVID-19.xlsx') raw_df.head()Exploratory Data Analysis- Analyze the data to figure out what needs to be done to it before it can be used for machine learningmsno.matrix(raw_df) for i in raw_df.columns: print(f"column name: {i}, unique values: {len(raw_df[i].unique())}")column name: Sno, unique values: 127 column name: age, unique values: 48 column name: gender, unique values: 2 column name: body temperature, unique values: 29 column name: Dry Cough, unique values: 2 column name: sour throat, unique values: 2 column name: weakness, unique values: 2 column name: breathing problem, unique values: 2 column name: drowsiness, unique values: 2 column name: pain in chest, unique values: 2 column name: travel history to infected countries, unique values: 2 column name: diabetes, unique values: 2 column name: heart disease, unique values: 2 column name: lung disease, unique values: 2 column name: stroke or reduced immunity, unique values: 2 column name: symptoms progressed, unique values: 2 column name: high blood pressue, unique values: 2 column name: kidney disease, unique values: 2 column name: change in appetide, unique values: 2 column name: Loss of sense of smell, unique values: 2 column name: Corona result, unique values: 3 column name: Unnamed: 21, uni[...]Data Preprocessing Missing Values- all columns are good, with no missing values- Except for `Unnamed: 21`, `Unnamed: 22` and `Unnamed: 23`- We'll remove them because they won't help the final model Relevant Columns- `Corona result` is the severity of the patients covid symptoms- `Sno, gender, and body temperature` are not relevant for our model or difficult to ask Scaling- All columns we will use are categorical except for age- So leave all columns as they are except for age- Normalize age with min max scaling- Store minimum and maximum so web app can scale age appropriately as well Conclusion- So we will drop the columns `Unnamed: 21, Unnamed: 22, Unnamed: 23, Sno, Gender and body temperature`less_data = raw_df.drop(['Sno','Unnamed: 21', 'Unnamed: 22', 'Unnamed: 23', 'gender', 'body temperature'], axis=1) less_data.head() # Min-Max Normalization age_min = min(raw_df["age"]) age_max = max(raw_df["age"]) target_min = min(raw_df["Corona result"]) target_max = max(raw_df["Corona result"]) print(age_min, age_max) print(target_min, target_max) def min_max_scaling(min_val, max_val, df, col_name): def min_max_scale_val(val): return (val - min_val) / (max_val - min_val) return df.apply(lambda x: min_max_scale_val(x) if x.name == col_name else x) scaled_data = min_max_scaling(age_min, age_max, less_data, "age") scaled_data = min_max_scaling(target_min, target_max, scaled_data, "Corona result") scaled_data.head() print(scaled_data.columns) scores = { "age": 7, "Dry Cough": 2, "sour throat": 1, "weakness": 4, "breathing problem": 7, "drowsiness": 3, "pain in chest" : 8, "travel history to infected countries": 5, "diabetes": 6, "heart disease": 8, "lung disease": 10, "stroke or reduced immunity": 10, "symptoms progressed": 7, "high blood pressue": 6, "kidney disease": 7, "change in appetide": 2, "Loss of sense of smell": 1, } print(len(scores) == len(scaled_data.columns)) for key in scores: scores[key] /= 10 scores def heurestic_score(data): score = 0 for i in range(len(list(scores))): this_symptom = list(scores.keys())[i] score += data[i] * scores[this_symptom] return score heurestic_scores = [] for i in range(len(scaled_data)): this_data = list(scaled_data.iloc[i]) this_score = heurestic_score(this_data) heurestic_scores.append(this_score) heurestic_scores[:10] max_heurestic_score = max(heurestic_scores) min_heurestic_score = min(heurestic_scores) scaled_heurestic_scores = [] for i in range(len(heurestic_scores)): def scale_heurestic_score(score): return score / max_heurestic_score scaled_heurestic_scores.append(scale_heurestic_score(heurestic_scores[i])) print(scaled_heurestic_scores[:20]) target = scaled_data.pop('Corona result', in_place) target.head() scaled_data.insert(0, "heurestic score", scaled_heurestic_scores) shuffled_data = scaled_data.sample(frac = 1) shuffled_data.head()Tensorflow Data Pipeline- Create a data pipeline to transform the data so it can be used to train the Machine Learning modeldataset = tf.data.Dataset.from_tensor_slices((shuffled_data.values, target.values)) train_dataset = dataset.shuffle(len(shuffled_data)).batch(1)Define Model- Define the architecture and training strategy of the deep learning model- We are using a straight-forward deep learning model with 21 layers- Training will use accuracy as the main metricdef get_compiled_model(): model = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1) ]) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) return modelTrain Modelmodel = get_compiled_model() history = model.fit(train_dataset, epochs=15)Epoch 1/15 127/127 [==============================] - 1s 1ms/step - loss: 0.7215 - accuracy: 0.2579 Epoch 2/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6954 - accuracy: 0.2639 Epoch 3/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6814 - accuracy: 0.2500 Epoch 4/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6879 - accuracy: 0.2385 Epoch 5/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6796 - accuracy: 0.2139 Epoch 6/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6727 - accuracy: 0.3041 Epoch 7/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6648 - accuracy: 0.3296 Epoch 8/15 127/127 [==============================] - 0s 2ms/step - loss: 0.6725 - accuracy: 0.2933 Epoch 9/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6689 - accuracy: 0.3273 Epoch 10/15 127/127 [==============================] - 0s 1ms/step - loss: 0.6660 - accuracy: 0.2974[...]Analyze Modeltest_dataset = model.evaluate(dataset.batch(1)) plt.plot([i + 1 for i in range(15)], history.history['loss']) min_pred = float("inf") max_pred = -float("inf") for i in scaled_data.values: this_pred = model.predict(tf.reshape(list(i), (1, 17))) if this_pred < min_pred: min_pred = this_pred elif this_pred > max_pred: max_pred = this_pred print(min_pred, max_pred) scaled_data.values pred_range = (max_pred - min_pred)[0][0] category_size = (pred_range / 3) print(pred_range, category_size) def predict_severity(inp): ''' - given a list containing a Patient's symptoms - predict the severity of their disease - from 0 as least severe, to 2 as most severe ''' this_pred = model.predict(tf.reshape(list(inp), (1, 17)))[0][0] category = -1 if this_pred < min_pred + category_size: category = 0 elif this_pred >= min_pred + category_size and this_pred <= max_pred - category_size: category = 1 else: category = 2 return category for i in scaled_data.values: print( predict_severity(i), end=" ,") model.save('/kaggle/working/model') import shutil shutil.make_archive('/kaggle/working/model', 'zip', '/kaggle/working/model')A\* Route Planner Using Advanced Data Structures.We will implement A\* search to implement a "Google-maps" style route planning algorithm. PathPlanner class`__init__` - We initialize our path planner with a map, M, and typically a start and goal node. If either of these are `None`, the rest of the variables here are also set to none. - `closedSet` includes any explored/visited nodes. - `openSet` are any nodes on our frontier for potential future exploration. - `cameFrom` will hold the previous node that best reaches a given node- `gScore` is the `g` in our `f = g + h` equation, or the actual cost to reach our current node- `fScore` is the combination of `g` and `h`, i.e. the `gScore` plus a heuristic; total cost to reach the goal- `path` comes from the `run_search` function..`reconstruct_path` - This function just rebuilds the path after search is run, going from the goal node backwards using each node's `cameFrom` information.`_reset` - Resets *most* of our initialized variables for PathPlanner. This *does not* reset the map, start or goal variables.`run_search` - The method checks whether the map, goal and start have been added to the class. Then, it will also check if the other variables, other than `path` are initialized (note that these are only needed to be re-run if the goal or start were not originally given when initializing the class.`is_open_empty`, is used to check whether there are still nodes to explore. If we're at our goal, we reconstruct the path. If not, we move our current node from the frontier (`openSet`) and into explored (`closedSet`). Then, we check out the neighbors of the current node, check out their costs, and plan our next move. The Mapfrom helpers import Map, load_map_10, load_map_40, show_map import math %load_ext autoreload %autoreload 2Map Basicsmap_10 = load_map_10() show_map(map_10)The map above shows a disconnected network of 10 intersections. The two intersections on the left are connected to each other but they are not connected to the rest of the road network. This map is quite literal in its expression of distance and connectivity. On the graph above, the edge between 2 nodes(intersections) represents a literal straight road not just an abstract connection of 2 cities.These `Map` objects have two properties we will use to implement A\* search: `intersections` and `roads`**Intersections**The `intersections` are represented as a dictionary. In this example, there are 10 intersections, each identified by an x,y coordinate. The coordinates are listed below. You can hover over each dot in the map above to see the intersection number.map_10.intersections**Roads**The `roads` property is a list where `roads[i]` contains a list of the intersections that intersection `i` connects to.# this shows that intersection 0 connects to intersections 7, 6, and 5 map_10.roads[0] start=5 coords=map_10.intersections[start] x=coords[0] y=coords[1] print(x) print(y) # This shows the full connectivity of the map map_10.roads len(map_10.intersections) # map_40 is a bigger map than map_10 map_40 = load_map_40() show_map(map_40)Advanced VisualizationsThe map above shows a network of roads which spans 40 different intersections (labeled 0 through 39). The `show_map` function which generated this map also takes a few optional parameters which might be useful for visualizing the output of the search algorithm you will write.* `start` - The "start" node for the search algorithm.* `goal` - The "goal" node.* `path` - An array of integers which corresponds to a valid sequence of intersection visits on the map.# run this code, note the effect of including the optional # parameters in the function call. show_map(map_40, start=5, goal=34, path=[5,16,37,12,34])Pathplanner Classimport math import heapq class PathPlanner(): """Construct a PathPlanner Object""" def __init__(self, M, start=None, goal=None): """ """ self.map = M self.start= start self.goal = goal self.closedSet = self.create_closedSet() if goal != None and start != None else None self.openSet = self.create_openSet() if goal != None and start != None else None self.cameFrom = self.create_cameFrom() if goal != None and start != None else None self.gScore = self.create_gScore() if goal != None and start != None else None self.fScore = self.create_fScore() if goal != None and start != None else None self.path = self.run_search() if self.map and self.start != None and self.goal != None else None def reconstruct_path(self, current): """ Reconstructs path after search """ total_path = [current] while current in self.cameFrom.keys(): current = self.cameFrom[current] total_path.append(current) return total_path def _reset(self): """Private method used to reset the closedSet, openSet, cameFrom, gScore, fScore, and path attributes""" self.closedSet = None self.openSet = None self.cameFrom = None self.gScore = None self.fScore = None self.path = self.run_search() if self.map and self.start and self.goal else None def run_search(self): """ """ if self.map == None: raise(ValueError, "Must create map before running search. Try running PathPlanner.set_map(start_node)") if self.goal == None: raise(ValueError, "Must create goal node before running search. Try running PathPlanner.set_goal(start_node)") if self.start == None: raise(ValueError, "Must create start node before running search. Try running PathPlanner.set_start(start_node)") self.closedSet = self.closedSet if self.closedSet != None else self.create_closedSet() self.openSet = self.openSet if self.openSet != None else self.create_openSet() self.cameFrom = self.cameFrom if self.cameFrom != None else self.create_cameFrom() self.gScore = self.gScore if self.gScore != None else self.create_gScore() self.fScore = self.fScore if self.fScore != None else self.create_fScore() while not self.is_open_empty(): current = self.get_current_node() if current == self.goal: self.path = [x for x in reversed(self.reconstruct_path(current))] return self.path else: self.openSet.remove(current) self.closedSet.add(current) for neighbor in self.get_neighbors(current): if neighbor in self.closedSet: continue # Ignore the neighbor which is already evaluated. if not neighbor in self.openSet: # Discover a new node self.openSet.add(neighbor) heapq.heappush(self.openHeap, (self.get_tentative_gScore(current, neighbor) ,neighbor)) # The distance from start to a neighbor #the "dist_between" function may vary as per the solution requirements. if self.get_tentative_gScore(current, neighbor) >= self.get_gScore(neighbor): continue # This is not a better path. # This path is the best until now. Record it! self.record_best_path_to(current, neighbor) print("No Path Found") self.path = None return False def create_closedSet(self): """ Creates and returns a data structure suitable to hold the set of nodes already evaluated""" return set() def create_openSet(self): """ Creates and returns a data structure suitable to hold the set of currently discovered nodes that are not evaluated yet. Initially, only the start node is known.""" if self.start != None: self.openHeap=[] heapq.heappush(self.openHeap, (0, self.start)) return set([self.start]) raise(ValueError, "Must create start node before creating an open set. Try running PathPlanner.set_start(start_node)") def create_cameFrom(self): """Creates and returns a data structure that shows which node can most efficiently be reached from another, for each node.""" cameFrom = {} return cameFrom def create_gScore(self): """Creates and returns a data structure that holds the cost of getting from the start node to that node, for each node. The cost of going from start to start is zero.""" g_scores=[ float("infinity") for _ in range(len(self.map.intersections))] g_scores[self.start]=0.0 return g_scores def create_fScore(self): """Creates and returns a data structure that holds the total cost of getting from the start node to the goal by passing by that node, for each node. That value is partly known, partly heuristic. For the first node, that value is completely heuristic.""" f_scores=[ float("infinity") for _ in range(len(self.map.intersections))] f_scores[self.start]= 1000 return f_scores def set_map(self, M): """Method used to set map attribute """ self._reset(self) self.start = None self.goal = None self.map=M def set_start(self, start): """Method used to set start attribute """ self._reset(self) self.start=start def set_goal(self, goal): """Method used to set goal attribute """ self._reset(self) self.goal=goal # TODO: Set goal value. def is_open_empty(self): """returns True if the open set is empty. False otherwise. """ # TODO: Return True if the open set is empty. False otherwise. if len(self.openSet): return False else: return True def get_current_node(self): """ Returns the node in the open set with the lowest value of f(node).""" #cost, node = heapq.heappop(self.openHeap) return heapq.heappop(self.openHeap)[1] # inefficient: # node_scores=[] # openList = list(self.openSet) #to enable indexing # for node in openList: # node_scores.append(self.calculate_fscore(node)) # min_index=node_scores.index(min(node_scores)) # print("self.openHeap",self.openHeap) # print(node_scores) # return openList[min_index] def get_neighbors(self, node): """Returns the neighbors of a node""" return self.map.roads[node] def get_gScore(self, node): """Returns the g Score of a node""" return self.gScore[node] def distance(self, node_1, node_2): """ Computes the Euclidean L2 Distance""" node1_coords=self.map.intersections[node_1] node2_coords=self.map.intersections[node_2] return math.sqrt((node1_coords[0]-node2_coords[0])**2+ (node1_coords[1]-node2_coords[1])**2) def get_tentative_gScore(self, current, neighbor): """Returns the tentative g Score of a node + distance from the current node to it's neighbors""" return self.gScore[current]+self.distance(current, neighbor) def heuristic_cost_estimate(self, node): """ Returns the heuristic cost estimate of a node """ if self.goal != None: return self.distance(node, self.goal) raise(ValueError, "Must create goal node before.") def calculate_fscore(self, node): """Calculate the f score of a node.F = G + H """ return self.get_gScore(node)+self.heuristic_cost_estimate(node) def record_best_path_to(self, current, neighbor): """Record the best path to a node by updating cameFrom, gScore, and fScore """ self.cameFrom[neighbor]=current self.gScore[neighbor]=self.get_tentative_gScore(current, neighbor) self.fScore[neighbor]=self.get_gScore(neighbor)+ self.heuristic_cost_estimate(neighbor) #Reference:https://en.wikipedia.org/wiki/A*_search_algorithmVisualizeLet's visualize the results of the algorithm!# Visualize your the result of the above test! You can also change start and goal here to check other paths start = 5 goal = 34 show_map(map_40, start=start, goal=goal, path=PathPlanner(map_40, start, goal).path) from test import test test(PathPlanner) # Visualize your the result of the above test! You can also change start and goal here to check other paths start = 5 goal = 35 show_map(map_40, start=start, goal=goal, path=PathPlanner(map_40, start, goal).path)**TESS TICA light curve v2**plt.plot(x,y) tess_texp = np.median(np.diff(x)) #plt.plot(temp1[:,0],temp1[:,1]) tab = Table.read('/Users/arcticfox/Documents/v1298tau/tess/ttvs.csv',format='csv') #Stellar parameters M_star = 1.10, 0.05 R_star = 1.305, 0.07 include_planet_e = True if include_planet_e == True: #Livingston ephemeris t0s = np.array([2231.281202 - 0.75*4.66/24, 2239.400529 + 0.5*5.59/24, 2234.046461 - 0.5*6.42/24, 2263.6229, 4644.08]) tess_t0s = np.array([4689.399860318306, 4682.6055129254755, 4648.09023, 4648.79668]) periods = np.array([8.249147, 12.401369, 24.141445, 36.695032307689445]) rors = np.array([0.0381, 0.0436, 0.0636, 0.0664]) depths = np.array(1e3*(rors**2)) t14s = np.array([4.66, 5.59, 6.42, 7.45])/24.0 elif include_planet_e == False: t0s = np.array([2231.281202, 2239.400529, 2234.046461])# - x_ref periods = np.array([8.249147, 12.401369, 24.141445]) rors = np.array([0.0381, 0.0436, 0.0700]) depths = np.array(1e3*(rors**2)) # Number of planets to be included in fit n_pl = len(t0s) # Compute the expected transit times for a linear ephemeris expected_transit_times = xo.orbits.ttv.compute_expected_transit_times( x.min(), x.max()+100, periods, tess_t0s, )Before we start fitting the light curve let's see if we can identify the transits by eyex.min(),x.max() Time(expected_transit_times[2],format='bkjd').datetime#.to('datetime') nrows = 18 xmin = int(x.min()) + 3*np.arange(nrows) fig,axes = plt.subplots(nrows=nrows, ncols=1, figsize=(10,4*nrows)) for n in range(nrows): ax = axes[n] for i,let in enumerate("cdbe"): ttimes = expected_transit_times[i] for j,_tt in enumerate(ttimes): if (_tt>x.min()) & (_ttIt looks like: 1. Planet d arrives late at BKJD = 4645.42. Planet b arrives early at BKJD = 4648.13. Planet c arrives late at BKJD = 4648.53? There is a dip right before the ingress of planet e, with seemingly the right duration. It's hard to tell for sure because the noise changes significantly around the transit.4. Planet e transits around BKJD = 4648.8 The data beyond BKJD = 4651.5 are corrupted so let's remove it. **The SimpleTransitOrbit model** (fitting in terms of duration)ett = np.array([expected_transit_times[0][0], expected_transit_times[1][0], expected_transit_times[2][0], expected_transit_times[3][0], ]) ett periods = np.array([8.249147, 12.401369, 24.141445, 48.0]) t0s = np.round(ett,2)+0.0# #np.array([4648.14, 4645.4, 4648.1, 4648.8]) rors = np.array([0.0381, 0.0436, 0.0700, 0.0611]) depths = np.array(1e3*(rors**2)) durations = np.array([4.66, 5.59, 6.42, 7.45])/24.0 n_pl = len(periods) R_star = 1.305, 0.07 #x = tica2_bkjd #y = tica2_f1 m = (np.isfinite(x)) & (np.isfinite(y))# & (x<4651.5) x = np.ascontiguousarray(x[m], dtype=np.float64) y = np.ascontiguousarray(y[m], dtype=np.float64) yerr = np.ascontiguousarray(yerr[m], dtype=np.float64) # These arrays are used as the times/phases where the models are # evaluated at higher resolution for plotting purposes phase_lc = np.linspace(-0.3, 0.3, 100) plt.errorbar(x,y,yerr=yerr, marker='.', linestyle='') # These arrays are used as the times/phases where the models are # evaluated at higher resolution for plotting purposes phase_lc = np.linspace(-0.3, 0.3, 100) # Required changes: # We can have different depths for K2 and TESS def build_model(mask=None, start=None, ttvs=False, eccentric=False): if mask is None: mask = np.ones(len(x), dtype=bool) with pm.Model() as model: # Parameters for the stellar properties BoundedNormal = pm.Bound(pm.Normal, lower=0, upper=3) m_star = BoundedNormal("m_star", mu=M_star[0], sd=M_star[1]) r_star = BoundedNormal("r_star", mu=R_star[0], sd=R_star[1]) u_star = xo.QuadLimbDark("u_star") star = xo.LimbDarkLightCurve(u_star) # Fit in terms of transit depth (assuming b<1) b = pm.Uniform("b", lower=0, upper=1, shape=n_pl) #log_depth_tess = pm.Normal("log_depth_tess", mu=np.log(depths), sigma=2.0, shape=n_pl) log_depth_tess = pm.Normal("log_depth_tess", mu=np.log(depths), sigma=0.1, shape=n_pl) ror_tess = pm.Deterministic("ror_tess", star.get_ror_from_approx_transit_depth( 1e-3 * tt.exp(log_depth_tess), b ), ) r_pl_tess = pm.Deterministic("r_pl_tess", ror_tess * r_star) r_pl_rade = pm.Deterministic("r_pl_rade", ror_tess * r_star * c.R_sun/c.R_earth) ecc = np.zeros(n_pl) omega = np.pi/2*np.ones(n_pl) # Orbital parameters for the planets t0 = pm.Normal("t0", mu=np.array(t0s), sd=1, shape=n_pl) log_period = pm.Normal("log_period", mu=np.log(periods), sd=1, shape=n_pl) period = pm.Deterministic("period", tt.exp(log_period)) # Orbit models orbit = xo.orbits.KeplerianOrbit( r_star=r_star, m_star=m_star, period=period, t0=t0, b=b, ecc=ecc, omega=omega, ) ######################################################################################## ######################################################################################## # Compute the model light curve mean_tess = pm.Normal("mean_tess", mu=0.0, sd=10.0) # Quadratic trend for varying background flux trend = pm.Normal( "trend", mu=0, sd=10.0 ** -np.arange(3)[::-1], shape=3 ) # Define the background model A = np.vander(x, 3) bkg = pm.Deterministic("bkg", tt.dot(A, trend)) light_curves_tess = ( star.get_light_curve( orbit=orbit, r=r_pl_tess, t=x[mask], texp=tess_texp) * 1e3 ) light_curve_tess = pm.math.sum(light_curves_tess, axis=-1) + mean_tess resid_tess = y[mask] - light_curve_tess - bkg[mask] # Transit jitter & GP parameters log_sigma_lc_tess = pm.Normal("log_sigma_lc_tess", mu=np.log(0.01*np.std(yerr[mask])), sd=10) log_sigma_jit_tess = pm.Normal("log_sigma_jit_tess", mu=np.log(0.02*np.std(yerr[mask])), sd=10) yerr_tess = pm.Deterministic("yerr_tess", tt.exp(log_sigma_lc_tess) + tt.exp(2*log_sigma_jit_tess)*(light_curve_tess**2)) #yerr_tess = pm.Deterministic("yerr_tess", tt.exp(log_sigma_lc_tess)) #The parameters of the RotationTerm kernel sigma_rot_tess = pm.InverseGamma( "sigma_rot_tess", **pmx.estimate_inverse_gamma_parameters(1.0, 5.0) ) log_period_rot_tess = pm.Normal("log_period_rot_tess", mu=np.log(2.87), sigma=2.0) period_rot_tess = pm.Deterministic("period_rot_tess", tt.exp(log_period_rot_tess)) log_Q0_rot_tess = pm.HalfNormal("log_Q0_rot_tess", sigma=2.0) log_dQ_rot_tess = pm.Normal("log_dQ_rot_tess", mu=0.0, sigma=2.0) f_rot_tess = pm.Uniform("f_rot_tess", lower=0.1, upper=1.0) kernel_tess = terms.RotationTerm( sigma=sigma_rot_tess, period=period_rot_tess, Q0=tt.exp(log_Q0_rot_tess), dQ=tt.exp(log_dQ_rot_tess), f=f_rot_tess, ) gp_tess = GaussianProcess(kernel_tess, t=x[mask], yerr=yerr_tess) gp_tess.marginal("transit_obs_tess", observed=resid_tess) #Compute and save the phased light curve models pm.Deterministic( "lc_pred", 1e3 * tt.stack( [ star.get_light_curve( orbit=orbit, r=r_pl_tess, t=t0[n] + phase_lc, texp=tess_texp )[..., n] for n in range(n_pl) ], axis=-1, ), ) # Fit for the maximum a posteriori parameters, I've found that I can get # a better solution by trying different combinations of parameters in turn if start is None: start = model.test_point map_soln = pmx.optimize(start=start, vars=trend) map_soln = pmx.optimize(start=map_soln, vars=[log_period, t0]) map_soln = pmx.optimize(start=map_soln, vars=[b, log_depth_tess]) map_soln = pmx.optimize(start=map_soln, vars=[sigma_rot_tess, log_period_rot_tess, log_Q0_rot_tess, log_dQ_rot_tess, f_rot_tess, mean_tess, ] ) map_soln = pmx.optimize(start=map_soln) extras = dict( zip( ["light_curves_tess", "gp_pred_tess"], pmx.eval_in_model([light_curves_tess, gp_tess.predict(resid_tess)], map_soln), ) ) return model, map_soln, extras, orbit model0, map_soln0, extras0, orbit0 = build_model(ttvs=True) map_soln0['yerr_tess'] np.nanstd(x) yerr_tess = np.ascontiguousarray(map_soln0['yerr_tess'] + 0.0, dtype=np.float64) np.random.seed(123) yerr_tess = np.ascontiguousarray(np.random.normal(np.nanmedian(lk_43.flux_err.value), np.nanstd(lk_43.flux_err.value), len(x)), dtype=np.float64) plt.errorbar(x,y,yerr=yerr_tess) def depth_duration_model(ttvs=False): with pm.Model() as model: # Physical parameters that will be sampled BoundedNormal = pm.Bound(pm.Normal, lower=0, upper=3) r_star = BoundedNormal("r_star", mu=R_star[0], sd=R_star[1]) m_star = BoundedNormal("m_star", mu=M_star[0], sd=M_star[1]) u_star = xo.QuadLimbDark("u_star") star = xo.LimbDarkLightCurve(u_star) b = pm.Uniform("b", lower=0, upper=1, shape=n_pl) #t0 = pm.Normal("t0", mu=t0s, sigma=0.1, shape=n_pl) #log_period = pm.Normal("log_period", mu=np.log(periods), # sigma=0.1, shape=n_pl) log_depth = pm.Normal("log_depth", mu=np.log(depths), sigma=0.1, shape=n_pl) log_duration = pm.Normal("log_duration", mu=np.log(durations), sigma=0.1, shape=n_pl) # Track parameters of interest as deterministics duration = pm.Deterministic("duration", tt.exp(log_duration)) ror = pm.Deterministic("ror", star.get_ror_from_approx_transit_depth( 1e-3 * tt.exp(log_depth), b ), ) r_pl_tess = pm.Deterministic("r_pl_tess", ror * r_star) r_pl_rade = pm.Deterministic("r_pl_rade", ror * r_star * c.R_sun/c.R_earth) ecc = np.zeros(n_pl) omega = np.pi/2*np.ones(n_pl) if ttvs==True: # Now we have a parameter for each transit time of each planet: transit_times = [] for i in range(n_pl): transit_times.append( pm.Normal( "tts_{0}".format(i), mu=expected_transit_times[i], sd=0.1, #Change this back to 0.1 to work shape=len(expected_transit_times[i]), ) ) # Set up an orbit for the planets orbit = xo.orbits.TTVOrbit( r_star=r_star, m_star=m_star, b=b, ecc=ecc, omega=omega, transit_times=transit_times) # It will be useful later to track some parameters of the orbit t0 = pm.Deterministic("t0", orbit.t0) period = pm.Deterministic("period", orbit.period) log_period = pm.Normal("log_period", mu=np.log(periods), sigma=0.1, shape=n_pl) for i in range(n_pl): pm.Deterministic("ttvs_{0}".format(i), orbit.ttvs[i]) #period = pm.Deterministic("period", tt.exp(log_period)) elif ttvs==False: # Orbital parameters for the planets t0 = pm.Normal("t0", mu=np.array(t0s), sd=1, shape=n_pl) log_period = pm.Normal("log_period", mu=np.log(periods), sd=1, shape=n_pl) period = pm.Deterministic("period", tt.exp(log_period)) # Orbit models orbit = xo.orbits.KeplerianOrbit( r_star=r_star, m_star=m_star, period=period, t0=t0, b=b, ecc=ecc, omega=omega, ) # Quadratic trend for varying background flux trend = pm.Normal( "trend", mu=0, sd=10.0 ** -np.arange(3)[::-1], shape=3 ) # Define the background model A = np.vander(x, 3) bkg = pm.Deterministic("bkg", tt.dot(A, trend)) #Compute the light curve model mean_tess = pm.Normal("mean_tess", mu=0.0, sd=10.0) light_curves_tess = ( star.get_light_curve( orbit=orbit, r=r_pl_tess, t=x, texp=tess_texp) * 1e3 ) light_curve_tess = pm.math.sum(light_curves_tess, axis=-1) + mean_tess resid_tess = y - light_curve_tess - bkg # Transit jitter & GP parameters log_sigma_lc_tess = pm.Normal("log_sigma_lc_tess", mu=np.log(0.01*np.std(y)), sd=5) log_sigma_jit_tess = pm.Normal("log_sigma_jit_tess", mu=np.log(0.02*np.std(y)), sd=5) #yerr_tess = pm.Deterministic("yerr_tess", tt.exp(log_sigma_lc_tess) + tt.exp(2*log_sigma_jit_tess)*(light_curve_tess**2)) #yerr_tess = pm.Deterministic("yerr_tess", tt.exp(log_sigma_lc_tess)) #The parameters of the RotationTerm kernel sigma_rot_tess = pm.InverseGamma( "sigma_rot_tess", **pmx.estimate_inverse_gamma_parameters(1.0, 5.0) ) log_period_rot_tess = pm.Normal("log_period_rot_tess", mu=np.log(2.87), sigma=2.0) period_rot_tess = pm.Deterministic("period_rot_tess", tt.exp(log_period_rot_tess)) log_Q0_rot_tess = pm.HalfNormal("log_Q0_rot_tess", sigma=2.0) log_dQ_rot_tess = pm.Normal("log_dQ_rot_tess", mu=0.0, sigma=2.0) f_rot_tess = pm.Uniform("f_rot_tess", lower=0.1, upper=1.0) kernel_tess = terms.RotationTerm( sigma=sigma_rot_tess, period=period_rot_tess, Q0=tt.exp(log_Q0_rot_tess), dQ=tt.exp(log_dQ_rot_tess), f=f_rot_tess, ) gp = GaussianProcess(kernel_tess, t=x, yerr=yerr_tess) gp.marginal("transit_obs", observed=resid_tess) # Compute and save the phased light curve models if ttvs == False: pm.Deterministic( "lc_pred_tess", 1e3 * tt.stack( [ star.get_light_curve( orbit=orbit, r=r_pl_tess, t=t0[n] + phase_lc, texp=tess_texp )[..., n] for n in range(n_pl) ], axis=-1, ), ) # Perform optimization start = model.test_point map_soln = pmx.optimize(start=start, vars=trend) if ttvs==True: map_soln = pmx.optimize(start=map_soln, vars=transit_times) elif ttvs==False: map_soln = pmx.optimize(start=map_soln, vars=[log_period, t0]) map_soln = pmx.optimize(start=map_soln, vars=[b, log_depth]) if ttvs == True: map_soln = pmx.optimize(start=map_soln, vars=transit_times) map_soln = pmx.optimize(start=map_soln, vars=[sigma_rot_tess, log_period_rot_tess, log_Q0_rot_tess, log_dQ_rot_tess, f_rot_tess, mean_tess, ] ) map_soln = pmx.optimize(start=map_soln) # Package the MAP light curve and GP prediction extras = dict( zip( ["light_curves_tess", "gp_pred_tess"], pmx.eval_in_model([light_curves_tess, gp.predict(resid_tess)], map_soln), ) ) return model, map_soln, extras, orbit model1, map_soln1, extras1, orbit1 = depth_duration_model(ttvs=False) def plot_light_curve(soln, extras, xrange=[4641,4690], mask=None): if mask is None: mask = np.ones(len(x), dtype=bool) fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(10, 10)) ax = axes[0] ax.errorbar(x[mask], y[mask], yerr=yerr_tess)#soln["yerr_tess"]) gp_mod = extras["gp_pred_tess"] + soln["mean_tess"] + soln["bkg"] ax.plot(x[mask], gp_mod, color="k", label="GP + background model", zorder=4) ax.legend(fontsize=10, ncol=2) ax.set_ylabel("Relative flux [ppt]") ax.set_title('TESS') ax = axes[1] ax.errorbar(x[mask], y[mask] - gp_mod, yerr=yerr_tess,color='k')#soln["yerr_tess"]) mod_sum = np.sum(extras["light_curves_tess"], axis=-1) ax.plot(x[mask], mod_sum, label="sum", color="w") for i, l in enumerate("cdbe"): mod = extras["light_curves_tess"][:, i] ax.plot(x[mask], mod, label="planet {0}".format(l), zorder=3, color=tangerine[i]) ax.legend(fontsize=10, loc=3, ncol=3) ax.set_ylabel("De-trended flux [ppt]") ax = axes[2] mod = gp_mod + np.sum(extras["light_curves_tess"], axis=-1) ax.errorbar(x[mask], y[mask] - mod, yerr=yerr_tess)#soln["yerr_tess"]) ax.axhline(0, color="#aaaaaa", lw=1) ax.set_ylabel("Residuals [ppt]") ax.set_xlabel("BKJD [days]") for i in range(3): axes[i].set_xlim(xrange[0],xrange[1]) return fig, gp_mod _ = plot_light_curve(map_soln1, extras1) with model1: trace_ex = pmx.sample(tune=500, draws=5000, start=map_soln1, chains=3, return_inferencedata=True, random_seed=[39248934, 48374109, 84738013]) trace_ex.to_dataframe().to_csv('summary_2min.csv') flat_samps = trace_ex.posterior key = 't0' rnd = 5 for j in range(0,4): med = np.nanmedian(flat_samps[key][:,:,j].data) upp = np.nanpercentile(flat_samps[key][:,:,j].data,84) low = np.nanpercentile(flat_samps[key][:,:,j].data, 16) u = np.round(upp-med,rnd) l = np.round(med-low,rnd) m = np.round(med, rnd) print('$' + str(m)+'_{-'+str(l)+'}^{+'+str(u)+'}$') k2_t0=[ 2231.2797, 2239.3913, 2234.0488] k2_per=[ 8.24958, 12.4032, 24.1396] for i in range(len(k2_t0)): predicted = k2_t0[i]+k2_per[i]*np.arange(2,350) new = np.nanmedian(flat_samps[key][:,:,i].data) diff = np.abs(predicted - new) print(predicted[np.argmin(diff)], new) print(((predicted[np.argmin(diff)] - new) * units.day).to(units.hour)) from astropy import units key = 'r_pl_rade' rnd = 2 for j in range(0,4): med = np.nanmedian(flat_samps[key][:,:,j].data)*units.Rearth upp = np.nanpercentile(flat_samps[key][:,:,j].data,84)*units.Rearth low = np.nanpercentile(flat_samps[key][:,:,j].data, 16)*units.Rearth u = np.round(upp.to(units.Rjup).value-med.to(units.Rjup).value,rnd) l = np.round(med.to(units.Rjup).value-low.to(units.Rjup).value,rnd) m = np.round(med.to(units.Rjup).value, rnd) print('$' + str(m)+'_{-'+str(l)+'}^{+'+str(u)+'}$') gpdict = {} gpdict['time'] = x gpdict['flux'] = y gpdict['flux_err'] = yerr_tess gpdict['gp_mod'] = extras1["gp_pred_tess"] + map_soln1["mean_tess"] + map_soln1["bkg"] letter =['c','d','b','e'] for i in range(4): print(len(extras0['light_curves_tess'][i])) gpdict['planet_{}'.format(letter[i])] = extras1['light_curves_tess'][i] np.save('/Users/arcticfox/Documents/v1298tau/tess/model_2min.npy', model1) np.save('/Users/arcticfox/Documents/v1298tau/tess/map_soln_2min.npy', map_soln1) np.save('/Users/arcticfox/Documents/v1298tau/tess/extras_2min.npy', extras1) np.save('/Users/arcticfox/Documents/v1298tau/tess/gp_2min.npy', gpdict) ((1.16*units.Mjup) / (4.0/3.0 * np.pi * (0.89*units.Rjup)**3)).to(units.g/units.cm**3) ((0.64*units.Mjup) / (4.0/3.0 * np.pi * (0.85*units.Rjup)**3)).to(units.g/units.cm**3) map_soln1['ror']Accounting for TTVsmodel2, map_soln2, extras2, orbit2 = depth_duration_model(ttvs=True) with model2: trace_ex_ttvs = pmx.sample(tune=500, draws=5000, start=map_soln2, chains=3, return_inferencedata=True, random_seed=[39248934, 48374109, 84738013]) trace_ex_ttvs.to_dataframe().to_csv('summary_2min_ttvs.csv') flat_samps_ttvs = trace_ex_ttvs.posterior from astropy.table import Table, Column plt.rcParams['font.size']=18 key = 'ttvs_0' rnd = 6 tab = Table(names=['planet','expected_transit_time', 'tts', 'ttvs_med', 'ttvs_l16', 'ttvs_u84'], dtype=[str,float,float,float,float,float]) for j in range(flat_samps_ttvs[key].shape[-1]): med = np.nanmedian(flat_samps_ttvs[key][:,:,j].data)*units.day upp = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data,84)*units.day low = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data, 16)*units.day u = np.round((upp-med).to(units.min).value,rnd) l = np.round((med-low).to(units.min).value,rnd) m = np.round(med.to(units.min).value, rnd) print('$' + str(m)+'_{-'+str(l)+'}^{+'+str(u)+'}$') tab.add_row(['c', map_soln2['t0'][0]+map_soln2['period'][0]*j, map_soln2['tts_0'][j], m, l, u]) plt.errorbar(map_soln2['t0'][0]+map_soln2['period'][0]*j, m, yerr=np.nanmedian([u,l]), marker='o', color='k',ms=8) key = 'ttvs_1' rnd = 6 for j in range(flat_samps_ttvs[key].shape[-1]): med = np.nanmedian(flat_samps_ttvs[key][:,:,j].data)*units.day upp = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data,84)*units.day low = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data, 16)*units.day u = np.round((upp-med).to(units.min).value,rnd) l = np.round((med-low).to(units.min).value,rnd) m = np.round(med.to(units.min).value, rnd) print('$' + str(m)+'_{-'+str(l)+'}^{+'+str(u)+'}$') tab.add_row(['d', map_soln2['t0'][1]+map_soln2['period'][1]*j, map_soln2['tts_1'][j], m, l, u]) plt.errorbar(map_soln2['t0'][1]+map_soln2['period'][1]*j, m, yerr=np.nanmedian([u,l]), marker='o', color='darkorange',ms=8) key = 'ttvs_2' rnd = 6 for j in range(flat_samps_ttvs[key].shape[-1]): med = np.nanmedian(flat_samps_ttvs[key][:,:,j].data)*units.day upp = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data,84)*units.day low = np.nanpercentile(flat_samps_ttvs[key][:,:,j].data, 16)*units.day u = np.round((upp-med).to(units.min).value,rnd) l = np.round((med-low).to(units.min).value,rnd) m = np.round(med.to(units.min).value, rnd) print('$' + str(m)+'_{-'+str(l)+'}^{+'+str(u)+'}$') tab.add_row(['b', map_soln2['t0'][1]+map_soln2['period'][1]*j, map_soln2['tts_1'][j], m, l, u]) plt.errorbar(map_soln2['t0'][1]+map_soln2['period'][1]*j, m, yerr=np.nanmedian([u,l]), marker='o', color='green',ms=8) plt.xlabel('Time [BKJD - 2454833]') plt.plot(100,100,'ko',label='V1298 Tau c') plt.plot(100,100,'o',color='darkorange',label='V1298 Tau d') plt.legend(fontsize=12) plt.ylabel('TTVs [minutes]') plt.xlim(4643,4693) #plt.ylim(-20,20) #plt.savefig('ttvs.png',dpi=250,rasterize=True,bbox_inches='tight') np.nanmedian(tab[tab['planet']=='d']['ttvs_med']), np.nanstd(tab[tab['planet']=='d']['ttvs_med']) tab tab.write('ttvs.csv',format='csv') _, gp_mod = plot_light_curve(map_soln2, extras2) lc_tab = Table() lc_tab.add_column(Column(x,'time')) lc_tab.add_column(Column(y,'flux')) lc_tab.add_column(Column(yerr_tess,'flux_err')) lc_tab.add_column(Column(gp_mod, 'gp_pred_tess')) lc_tab.write('lc.csv',format='csv') plt.figure(figsize=(14,4)) plt.plot(lc_tab['time'], lc_tab['flux']) plt.plot(lc_tab['time'], lc_tab['gp_pred_tess'])*** 常見的寫法藉由當前迴圈數作為水果編號for i in range(len(fruits)): print('編號 {idx} 的水果是 {fruit}'.format(idx=i, fruit=fruits[i]))編號 0 的水果是 apple 編號 1 的水果是 banana 編號 2 的水果是 cherry 編號 3 的水果是 dragonfruit 編號 4 的水果是 emblica推薦寫法跟上者沒什麼區別,但可讀性大幅提升for idx, fruit in enumerate(fruits): print('編號 {idx} 的水果是 {fruit}'.format(idx=idx, fruit=fruit))編號 0 的水果是 apple 編號 1 的水果是 banana 編號 2 的水果是 cherry 編號 3 的水果是 dragonfruit 編號 4 的水果是 emblicaTopic Modelling overviewIn this notebook:- Description of the data- Looking at the data- Text pre-processing- Topic Modelling with Gensim- Visualisation of Topic Models with pyLDAvis- Topic coherence Some configuration firstThe following cell will download some components of the NLTK libraryimport nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger')Also, watch out for deprecation warnings (show them once)import warnings warnings.filterwarnings(action='once')Description of the data- We're going to use a sub-set of the popular `20newsgroups` dataset- Each document is a newsgroup message- Each document is labelled with the related newsgroup (one newsgroup per document)- There are (surprise!) 20 newsgroups- The newsgroup name tells us about the overall topic Looking at the datafrom sklearn.datasets import fetch_20newsgroups categories = ['comp.sys.mac.hardware', 'rec.autos', 'sci.space', 'misc.forsale', 'talk.politics.guns', 'talk.religion.misc'] newsgroups = fetch_20newsgroups(remove=('headers', 'footers', 'quotes'), categories=categories) len(newsgroups.data) newsgroups.target_namesLet's look into the content of a documentdoc = newsgroups.data[2] doc doc_class = newsgroups.target[2] doc_class newsgroups.target_names[doc_class]Text pre-processingGensim expects the input corpus to be a sequence of tokenised documentse.g. a list of lists (documents) of strings (words/tokens)In our first iteration, we're simply tokenising the input data (from doc to words)from nltk.tokenize import word_tokenize def preprocess(text): return word_tokenize(text) corpus = [preprocess(doc) for doc in newsgroups.data]Building the term-document matrixfrom gensim.corpora import Dictionary id2word = Dictionary(corpus) # Term Document Frequency term_document_matrix = [id2word.doc2bow(text) for text in corpus] # View one document in the term-document matrix print(term_document_matrix[0]) # Number of documents len(term_document_matrix) # Number of unique words (vocabulary size) len(id2word) # View one word id2word[0] # View word frequency distribution in one document doc = term_document_matrix[0] [(id2word[word_id], freq) for word_id, freq in doc]Train topic model with LDA%%time from gensim.models.ldamodel import LdaModel model = LdaModel(corpus=term_document_matrix, id2word=id2word, num_topics=10, passes=10) model.print_topics()Can we do better?Have a look at the topics extracted in the example above:- does the output make sense?- is the output useful at all? Better pre-processingSome options to improve pre-processing:- normalisation (e.g. lowercasing)- stop-word removal- punctuation removalData cleaning is not glamorous, but it can have a big impact on our models.from nltk.corpus import stopwords from string import punctuation STOP_LIST = set(stopwords.words('english') + list(punctuation)) STOP_LIST.update(["'m", "n't", '``', "'s", "'ll", "'re", '--', "''", '""', '...']) STOP_LIST.update(['go', 'get', 'like', 'gon', 'na', 'oh', 'yeah']) def preprocess(text): return [word.lower() for word in word_tokenize(text) if word.lower() not in STOP_LIST] corpus = [preprocess(doc) for doc in newsgroups.data] id2word = Dictionary(corpus) term_document_matrix = [id2word.doc2bow(text) for text in corpus] len(id2word) %%time model = LdaModel(corpus=term_document_matrix, id2word=id2word, num_topics=10, passes=10) model.print_topics()Removing the extremes of the distributionZipf's Law - https://en.wikipedia.org/wiki/Zipf%27s_lawid2word = Dictionary(corpus) id2word.filter_extremes(no_below=10, no_above=0.5) term_document_matrix = [id2word.doc2bow(text) for text in corpus] len(id2word) %%time model = LdaModel(corpus=term_document_matrix, id2word=id2word, num_topics=10, passes=10) model.print_topics()Visualisation with pyLDAvisimport pyLDAvis.gensim pyLDAvis.enable_notebook() %%time pyLDAvis.gensim.prepare(model, term_document_matrix, id2word, mds='mmds')Topic coherence Show the effect of the number of passes:%%time from gensim.models.coherencemodel import CoherenceModel good_model = LdaModel(corpus=term_document_matrix, id2word=id2word, passes=50, num_topics=10) bad_model = LdaModel(corpus=term_document_matrix, id2word=id2word, passes=1, num_topics=10) good_score = CoherenceModel(model=good_model, texts=corpus, dictionary=id2word) bad_score = CoherenceModel(model=bad_model, texts=corpus, dictionary=id2word) good_score.get_coherence(), bad_score.get_coherence()What's the best number of topics?models = [] scores = [] for n in range(5, 10): print("Training model with n={} topics".format(n)) model = LdaModel(corpus=term_document_matrix, id2word=id2word, passes=10, num_topics=n) score = CoherenceModel(model=model, texts=corpus, dictionary=id2word) models.append(model) scores.append(score) %matplotlib inline import matplotlib.pyplot as plt n_topics = range(5, 10) coherence_scores = [s.get_coherence() for s in scores] plt.plot(n_topics, coherence_scores) plt.xlabel("Num Topics") plt.ylabel("Coherence score") plt.legend(("coherence_scores"), loc='best') plt.show()Exercise - Train model with only nouns and adjectives Part-of-speech (PoS) tagging is the process of assigning words to their grammatical categories.We can achieve this using `nltk.pos_tag()`, for example:from nltk import pos_tag sentence = "The quick brown fox jumped over the lazy dog".split() pos_tag(sentence)Note: the function `nltk.pos_tag()` uses the set of tags from the [Penn Treebank project](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html). How to change the pre-processing steps to include only nouns and adjectives? Does this produce better topic models?# Write your solution hereThe old locker subgraph does not account for relocks and will not let you query the balance of a user at a certain block or timestamp (an improvement left for the subgraph that will track the new locker contract). However it can still be used to retrieve a list of addresses who have interacted with the contract. That address can then be used to query the locker contract at a certain block with multicall to get their balances.The present notebook does that.import requests import pandas as pd import json import brownie import requests from brownie import Contract GRT_QUERY_ENDPOINT = "https://api.thegraph.com/subgraphs/name/convex-community/locker" def grt_query(query): r = requests.post(GRT_QUERY_ENDPOINT, json={'query': query}) return r.json() users = [] count = 0 last_users = [] last_amount = 1867562036033395438700658 # max_value (can't use 0 and asc because too many people have migrated from the new contract at this stage) while True: # we have over 6000 address so we can't use the usual first / skip params to paginate and instead need to filter a bit further # as per : https://thegraph.com/docs/en/developer/graphql-api/#example-4 query = f"{{ users(first: 1000 where: {{totalLocked_lte: \"{last_amount}\"}} orderBy: totalLocked orderDirection: desc) {{ id totalLocked }} }}" res = grt_query(query) count += 1 page_users = [entry['id'] for entry in res['data']['users']] if page_users == last_users: break last_users = page_users last_amount = [entry['totalLocked'] for entry in res['data']['users']][-1] print(f"{count}: {last_amount}") users += page_users unique_users = list(set(users)) #brownie.network.disconnect() brownie.network.connect("mainnet-fork") locker_contract = Contract.from_explorer("0xD18140b4B819b895A3dba5442F959fA44994AF50") multi = brownie.multicall(address="0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696", block_identifier=14318930) multi with multi: print(brownie.multicall.address) balances = [locker_contract.lockedBalanceOf(user) for user in users] # update block here to snapshot block final_list = {user: balances[i] for i, user in enumerate(users)} final_list---from kats.tsfeatures.tsfeatures import TsFeatures from kats.consts import TimeSeriesData, TimeSeriesIterator from utils import get_data df_emg = get_data() import pandas as pd import numpy as nptesting time typets_data = TimeSeriesData(value=df_emg, time=pd.Series(np.arange(0, len(df_emg)))) ts_data.timedefault examplets_data_list = [TimeSeriesData(value=df_emg[c], time=df_emg.index) for c in df_emg.columns] fs = 1000 fe = TsFeatures( window_size=fs * 30, window=30 * fs, selected_features=['mean', 'var', 'length'] ) out = [fe.transform(ts) for ts in ts_data_list] ts_data_list[0] out pd.DataFrame(out).info() # fs = 1000 TsFeatures( # window_size=fs * 30, # window=30 * fs, selected_features=['mean', 'var', 'length'] ).transform(ts_data) fe. import sys sys.path.append('../../') from tsflex.features import FeatureCollection, NumpyFuncWrapper from tsflex.features import FeatureDescriptor, MultipleFeatureDescriptors #---------------------------------------------------------------------------- quantiles = [0.25, 0.5, 0.75] def type_wrapper(x: np.ndarray, type_wrapped_func, **kwargs): return type_wrapped_func(x, **kwargs).astype(x.dtype) # -- 2. in-line functions # You can define your functions locally; these will serialize flawleslly def slope(x): return np.polyfit(np.arange(0, len(x)), x, 1)[0] f_slope = NumpyFuncWrapper(type_wrapper, output_names="slope", type_wrapped_func=slope) # -- 3. Lambda's # Or even use lambda's and other modules' functions def rms(x): return np.sqrt(np.mean(x**2)) f_rms = NumpyFuncWrapper(rms, output_names="rms") # f_rms = NumpyFuncWrapper(lambda x: np.sqrt(np.mean(x ** 2)), output_names="rms") f_area = NumpyFuncWrapper(np.sum, output_names="area") # (For convenience) we store the constructed `NumpyFuncWrappers` in a list segment_funcs = [ np.min, np.max, np.mean, np.std, np.var, ss.skew, ss.kurtosis, f_slope, f_rms, f_area, ] from utils import get_data df_emg = get_data() from joblib import Parallel, delayed from joblib import wrap_non_picklable_objects # fc = FeatureCollection( # feature_descriptors=[ # MultipleFeatureDescriptors( # functions=segment_funcs, # series_names=["emg", "eog", "lso", "rio", "m1-a1"], # windows=["30s"], # strides=["10s"], # ) # ] # ) # out = fc.calculate(data=df_emg, n_jobs=None, return_df=True, show_progress=True) fc = FeatureCollection( feature_descriptors=[ MultipleFeatureDescriptors( functions=segment_funcs, series_names=["emg", "eog", "lso", "rio", "m1-a1"], windows=["30s"], strides=["10s"], ) ] ) %mprun -f fc.calculate fc.calculate(data=df_emg, n_jobs=10, return_df=True, show_progress=True)**Excel - Create a Simple Worksheet***using openpyxl* --- Mount Google Drive.Workbooks and related files will be stored in the **excel** folder of **My Drive**.from google.colab import drive drive.mount('/content/gdrive')Mounted at /content/gdriveNot necessary, but check the default directory on Colab and list the files in content/gdrive/MyDrive/excel.print("The current working directory is: ") ! pwd print("\nThe contents of /content/gdrive/excel/ -") ! ls /content/gdrive/MyDrive/excel/ -aThe current working directory is: /content The contents of /content/gdrive/excel/ - CUNY_campuses2.xlsx CUNY_campuses.xlsx CUNY_campuses_BACKUP.xlsx 'New Microsoft Excel Worksheet.xlsx'Install **openpyxl** module for Python, if it is needed.Try the cell with the import of **openpyxl** before pip installing the module.# You only need to try running this cell if the next cell throws an Exception ! pip install openpyxlImport **openpyxl** module.try: import openpyxl except: raise Exception("Unable to import pyxl module. Check pip import openpyxl.")At this point you should have access to the **excel** directory in the **content** folder of your **Colab** environment and you should have the **openpyxl** module imported.You are ready to work on **Excel** sheets and workbooks in **Python**! --- Create an **openpyxl** Workbook object named **wb**Create an **openpyxl** worksheet object named **ws** as the active worksheet in the workbook **wb**.wb = openpyxl.Workbook() ws = wb.active print("The type of wb is: ", type(wb)) print("The type of ws is: ", type(ws)) print("The active worksheet is: ws in the workbook: wb.")The type of wb is: The type of ws is: The active worksheet is: ws in the workbook: wb.We have an active worksheet in a workbook, in memory now -- it has not been saved to a filename on disk yet! Create a matrix as a list of lists with some data which we will add to the ws worksheet.For this example, use the names of CUNY campuses and their street adddress, and borough in New York City.Store that data as a matrix of lists (rows) within a list (range).Each row list will have three elements, representing three columns in a sheet.The matrix_range list will begin with a list of the three column headings.matrix_range = [ ["Campus","Address","Borough"], ["Baruch College","55 Lexington Avenue","Manhattan"], ["Borough of Mahattan Community College","199 Chambers Street","Manhattan"], ["Bronx Community College","2155 University Avenue","Bronx"], ["Brooklyn College","2900 Bedford Avenue","Brooklyn"], ["College of Staten Island","2800 Victory Boulevard","Staten Island"], [" Graduate School of Journalism","219 W 40th Street","Manhattan"], ["CUNY Graduate Center","365 Fifth Avenue","Manhattan"], ["CUNY Graduate School of Public Health and Health Policy","55 W 125th Street","Manhattan"], ["CUNY School of Labor and Urban Studies","25 W 23rd Street","Manhattan"], ["CUNY School of Law","2 Court Square","Queens"], ["CUNY School of Professional Studies","119 W 31st Street","Manhattan"], ["Guttman Community College","50 W 40th Street","Manhattan"], ["Hostos Community College","500 Grand Concourse","Bronx"], ["Hunter College","695 Park Avenue","Manhattan"], ["John Jay College of Criminal Justice","524 W 59th Street","Manhattan"], ["Kingsborough Community College","2001 Oriental Boulevard","Brooklyn"], ["LaGuardia Community College","31-10 Thomson Avenue","Queens"], ["Lehman College","250 Bedford Park Blvd West","Bronx"], ["Macaulay Honors College","35 W 67th Street","Manhattan"], ["Medgar Evars College","1650 Bedford Avenue","Brooklyn"], ["New York City College of Technology","300 Jay Street","Brooklyn"], ["Queens College","65-30 Kissena Boulevard","Queens"], ["Queensborough Community College","220-05 56th Avenue","Queens"], ["The City College of New York","160 Convent Avenue","Manhattan"], ["York College","94-20 Gu Boulevard","Queens"] ]Check the matrix.print("The length of matrix_range is: ", len(matrix_range)) print("\nThe rows of matrix_range are: ") i = 0 for row in matrix_range: print("Row #", i, "\t",row) i +=1The length of matrix_range is: 26 The rows of matrix_range are: Row # 0 ['Campus', 'Address', 'Borough'] Row # 1 ['Baruch College', '55 Lexington Avenue', 'Manhattan'] Row # 2 ['Borough of Mahattan Community College', '199 Chambers Street', 'Manhattan'] Row # 3 ['Bronx Community College', '2155 University Avenue', 'Bronx'] Row # 4 ['Brooklyn College', '2900 Bedford Avenue', 'Brooklyn'] Row # 5 ['College of Staten Island', '2800 Victory Boulevard', 'Staten Island'] Row # 6 ['Craig Newmark Graduate School of Journalism', '219 W 40th Street', 'Manhattan'] Row # 7 ['CUNY Graduate Center', '365 Fifth Avenue', 'Manhattan'] Row # 8 ['CUNY Graduate School of Public Health and Health Policy', '55 W 125th Street', 'Manhattan'] Row # 9 ['CUNY School of Labor and Urban Studies', '25 W 23rd Street', 'Manhattan'] Row # 10 ['CUNY School of Law', '2 Court Square', 'Queens'] Row # 11 ['CUNY School of Professional Studies', '119 W 31st Street', 'Manhattan'] Row # 12 ['Guttma[...]At this point we have an empty worksheet **ws** as the active worksheet in a workbook **wb** and a **matrix** of data in memory. Use the **openpyxl** **append** method on the object **ws** to append the rows of the object **matrix**.for row in matrix_range: ws.append(row)Give the active worksheet **ws** a title (the name on the worksheet tab) of "CampusLocations".*Note: this step could have been done before appending the data from **matrix**. The worksheet name could be assigned at anytime when the worksheet is open.*ws.title = "CampusLocations"Now save the workbook **wb** to the **excel** folder in **/content/gdrive/MyDrive/** with the filename "CUNY_campuses.xlsx".*Note: the workbook **wb** contains only one worksheet (**ws**).*wb.save("/content/gdrive/MyDrive/excel/CUNY_campuses.xlsx")Go to **Google Drive** and the **excel** folder to open and inspect the new workbook in **Google Sheets** or download it to view it in **Excel**. --- **Formatting Excel Sheets** We can adjust column widths and format our column headings.We can also create a named range for our table of data. --- **Creating Excel Tables** We can add a sheet with a dynamic table based on our worksheet with data. Let's start by importing the objects we need from **openpyxl**.* **Table** - for Excel tables, with filtering, formatting, etc.* **TableStyleInfo** - to created a named style to be applied to a table* **Image** -from openpyxl.worksheet.table import Table, TableStyleInfoIf we want to access an Excel workbook which already exists, stored on an accessible drive, we should import the **load_workbook** method from **openpyxl**.from openpyxl import load_workbookOpen an existing Excel workbook and create an active worksheet **ws**.wb = load_workbook("/content/gdrive/MyDrive/excel/CUNY_campuses.xlsx") ws = wb.activeCreate a table object named **tabl_1** with the display name '**Table1**'.* Table names must be unique within a workbook. * By default tables are created with a header from the first row. * Table filters for all the columns must always contain strings. * Table headers and table column headings must always contain strings.tabl_1 = Table(displayName="Table1", ref="A1:C26")Create a style in an object **style_basic** and give the style the name **TableStyleBasic** then apply that style the table **tabl_1**.Styles are managed using the the **TableStyleInfo object**. This allows you to stripe rows or columns and apply the different color schemes.style_clean = TableStyleInfo(name="TableStyleMedium9", showFirstColumn=False, showLastColumn= False, showRowStripes=True, showColumnStripes=False) tabl_1.tableStyleInfo = style_cleanNow the table can be added to the worksheet using **ws.add_table()**.* Table must be added using ws.add_table() method to avoid duplicate names.* Using this method ensures table name is unque through out defined names and all other table name.ws.add_table(tabl_1)Then the workbook can be saved as an **Excel** document.wb.save("/content/gdrive/MyDrive/excel/CUNY_campuses_withTable.xlsx") # to see the table tabl_1 created in the worksheet ws print(ws.tabl_1)--- **Functions and Attributes of Tables**# CHECK THIS IN THE CURRENT VERSION OF OPENPYXL # from openpyxl import tables # to query the contents of the ws object dir(ws)**ws.tables** ???was??? is a dictionary-like object of all the tables in a particular worksheet.**ws._tables** is a list object of tables in a worksheet. You can query all of the tables in a worksheet.# you can query all of the tables in a worksheet with the list ._tables print(ws._tables)??? You can get a table by its name or range# CHECK THIS IN THE CURRENT VERSION OF OPENPYXL # references a table by its name or a range ws.tables["Table1"] # or ws.tables["A1:C26"]You can get a count of the number of tables in a worksheet.# returns integer count of the number of tables in the worksheet print(len(ws._tables))You can get table names and their ranges of all tables in a worksheet.# returns a list of table names and their ranges. # print(ws.tables.items()) print(ws._tables)You can delete a table from a worksheet.# CHECK THIS IN THE CURRENT VERSION OF OPENPYXL # del ws.tables["Table1"]**Web scraping 1**import pandas as pd from bs4 import BeautifulSoup import numpy as np import requests from time import sleep from tqdm import tqdm import numpy as np from multiprocessing.dummy import Pool**Using API's** API´s (application programming interface) allow you to remotely excecute a given function. In this class we are going to see how to use API's to get real life data with Python, using the [Requests library](https://docs.python-requests.org/en/master/) - [Grate place for finding usefull API'S](https://rapidapi.com/hub)import datetime as dt- Lest´s see an example with [Bitcon](https://binance-docs.github.io/apidocs/spot/en/change-log) API to get data from ETHUSDT prices for every hour in in 2017. In this case we are don't have a suscription to the Bitcoin API. For this reason we have a limit on our requests responses. I strongly recomend getting a free subscrition if you are developing a project with Bitcoin dataurl = "https://api.binance.com/api/v3/klines" startTime = str(int(dt.datetime(2017, 5, 1).timestamp() * 1000)) endTime = str(int(dt.datetime(2018, 5, 1).timestamp() * 1000)) limit = '1000' req_params = {"symbol" : 'ETHUSDT', 'interval' : '1h', 'startTime' : startTime, 'endTime' : endTime, 'limit' : limit} response=(requests.get(url, params = req_params))- The response of the request we made comes is delivered in form of a json. Let´s make a DataFrame out of this data.response.text import json df = pd.DataFrame(json.loads(response.text)) df df = df.iloc[:, 0:6] df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume'] df=df.set_index('datetime') df.index=[dt.datetime.fromtimestamp(x / 1000.0) for x in df.index]- Can we make a function that requests data of any symbol for every specified time period?def extraer_data_Symbol(symbol, interval,start, end): req_params = {"symbol" : symbol, 'interval' : interval, 'startTime' : start, 'endTime' : end, 'limit' : limit} limit = '10000' response=(requests.get(url, params = req_params)) df = pd.DataFrame(json.loads(response.text)) df = df.iloc[:, 0:6] df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume'] df.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in df.datetime] return df Sym='https://api.binance.com/api/v3/exchangeInfo' Symbols=json.loads(requests.get(Sym).text) activos=[] for i in Symbols['symbols']: activos.append(i['symbol'])**Web scraping Beautifulsoup** [Documentación BeautifulSoup](https://beautiful-soup-4.readthedocs.io/en/latest/) - Generlay the first thing we want to do is tourl='https://www.imdb.com/chart/moviemeter/?sort=ir,desc&mode=simple&page=1' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') datas=[] for i in tqdm(range(0,len(soup.find_all('td',{'class':'titleColumn'})))): data={} try: data['titulo']=soup.find_all('td',{'class':'titleColumn'})[i].find('a').text except:pass try: data['link']='https://www.imdb.com/'+soup.find_all('td',{'class':'titleColumn'})[i].find('a').get('href') except:pass try: data['puntaje']=soup.find_all('td',{'class':'ratingColumn imdbRating'})[i].find('strong').text except:pass datas.append(data) len(datas) datas[0] data=pd.DataFrame(datas) links=data['link'].to_list() infos=[] for i in tqdm(links): soup=0 page = requests.get(i) soup = BeautifulSoup(page.content, 'html.parser') info={} try: info['año_lanzamiento']=soup.find('ul',{'class':'ipc-inline-list ipc-inline-list--show-dividers TitleBlockMetaData__MetaDataList-sc-12ein40-0 dxizHm baseAlt'}).findAll('li')[0].text[:4] except:pass try: info['duracion_peli']=soup.find('ul',{'class':'ipc-inline-list ipc-inline-list--show-dividers TitleBlockMetaData__MetaDataList-sc-12ein40-0 dxizHm baseAlt'}).findAll('li')[2].text except:pass infos.append(info)3%|▎ | 3/100 [00:08<04:39, 2.88s/it]**Paralelizar scraping**a=int(len(links)/10) infos=[] def scrap(x): global infos for i in tqdm(links[x:x+10]): soup=0 page = requests.get(i) soup = BeautifulSoup(page.content, 'html.parser') info={} try: info['año_lanzamiento']=soup.find('ul',{'class':'ipc-inline-list ipc-inline-list--show-dividers TitleBlockMetaData__MetaDataList-sc-12ein40-0 dxizHm baseAlt'}).findAll('li')[0].text[:4] except:pass try: info['duracion_peli']=soup.find('ul',{'class':'ipc-inline-list ipc-inline-list--show-dividers TitleBlockMetaData__MetaDataList-sc-12ein40-0 dxizHm baseAlt'}).findAll('li')[2].text except:pass infos.append(info) j=0 k=[] for i in range(0,10): k.append(j) j+=10 pool=Pool(10) re=pool.map(scrap, k) pd.DataFrame(infos)- **Using headers**: Sometimes pages won´t respond to your request if your request is not done as a human would do it. One way you can try to make your requet more "human like" is by adding headers.headers={ 'authority': 'www.imdb.com', 'method': 'GET', 'path': '/chart/moviemeter/?sort=ir,desc&mode=simple&page=1', 'scheme': 'https', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'accept-encoding': 'gzip, deflate, br', 'accept-language':'es-ES,es;q=0.9', 'cache-control': 'max-age=0', 'cookie': '; session-id=136-8467877-9681758; adblk=adblk_no; ubid-main=133-3750919-3113945; session-id-time=2082787201l; session-token=SBpG+fidsVTBP0wmmQqV; csm-hit=tb:s-PGX2A0861DEA1XENQNVS|1629483407253&t:1629483408943&adb:adblk_no', 'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"', 'sec-ch-ua-mobile': '?0', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site':'none', 'sec-fetch-user': '?1', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36' } url='https://www.imdb.com/chart/moviemeter/?sort=ir,desc&mode=simple&page=1' page = requests.get(url,headers=headers) soup = BeautifulSoup(page.content, 'html.parser')- [Grate place for finding usefull API'S](https://rapidapi.com/hub) - Scraping an image###nordvpn ###headers2020 Spring CS_349 Machine Learning Final Project Covid-19 Cases Data Analysis with K-Means and RNN Author: Table of Contents: 1. Load Data 2. K-Means Modeling for States' Cases Growth Pattern Classification 3. RNN Modeling for States' Cases Growth Forecast in the Future 30 Days 4. Result Evaluationimport sys sys.path.insert(0, '..') from utils import data import os os.path.abspath('') import sklearn import numpy as np import json from shapely.geometry import Point import matplotlib.pyplot as plt import geopandas as gpd import geoplot as gplt import mapclassify import matplotlib.dates as mdates import matplotlib.colors as mcolors import pandas as pd from pandas import read_csv import math from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from datetime import datetime, date import geoplot as gplot import mapclassify import matplotlib.pyplot as plt import matplotlib.colors as mcolors import geopandas as gpd1. Load DataThis final project mainly uses three datasets. Two datasets, "time_series_covid19_confirmed_global.csv" and "time_series_covid19_confirmed_us.csv" are contained in the provided final project folder. The "us-states.csv" dataset is a dataset contributed by Alberstun in his public Github repository, which contains the up-to-date number of tested, confirmed, and dead cases of COVID-19. Here's a link to this dataset https://github.com/nytimes/covid-19-data/blob/master/us-states.csv. You can also find it in the repository of this project. The two methods as below are used to import these datasets from the project's working directory and group them into two array-like structures which are later used in the training and testing. Both of them return a feature array which contains pure data and an array of corresponding states.def import_US_confirmed(): BASE_PATH = r'C:/Users/Lucas/COVID/test1/coronavirus-2020/COVID-19/csse_covid_19_data/' MIN_CASES = 1000 # ------------------------------------------ confirmed = os.path.join( BASE_PATH, 'csse_covid_19_time_series', 'time_series_covid19_confirmed_US.csv') confirmed = data.load_csv_data(confirmed) state_feature = {} states = list(set(confirmed["Province_State"])) # print (confirmed.iloc[232,11:confirmed.shape[0]-11]) for idx in range(confirmed.shape[0]): if confirmed["Province_State"][idx] in list(state_feature.keys()): state_feature[confirmed["Province_State"][idx]] += confirmed.iloc[idx,11:confirmed.shape[0]-11] else: state_feature[confirmed["Province_State"][idx]] = confirmed.iloc[idx,11:confirmed.shape[0]-11] features = np.asarray(list(state_feature.values())) targets = np.asarray(list(state_feature.keys())) return features, targets def import_US_confirmed_update(): BASE_PATH = r'C:/Users/Lucas/COVID/test1/coronavirus-2020/COVID-19/csse_covid_19_data/' MIN_CASES = 1000 confirmed = os.path.join( BASE_PATH, 'csse_covid_19_time_series', 'us-states.csv') confirmed = data.load_csv_data(confirmed) dates = sorted(list(set(confirmed["date"]))) _confirmed = os.path.join( BASE_PATH, 'csse_covid_19_time_series', 'time_series_covid19_confirmed_US.csv') _confirmed = data.load_csv_data(_confirmed) state_feature = {} states = list(set(_confirmed["Province_State"])) state_feature = dict((state,np.zeros(len(dates))) for state in states) for idx in range(confirmed.shape[0]): # print ((datetime.strptime(confirmed["date"][idx], '%Y-%m-%d').date() - date(2020,1,21)).days) state_feature[confirmed["state"][idx]][(datetime.strptime(confirmed["date"][idx], '%Y-%m-%d').date() - date(2020,1,21)).days] =\ confirmed["cases"][idx] features = np.asarray(list(state_feature.values())) targets = np.asarray(list(state_feature.keys())) return features, targets2. K-Means Modeling for States' Cases Growth Pattern ClassificationIn this sector, we build a K-Means model to classify the COVID-19 cases growth pattern of each state. K-Means is superior to KNN in that the former could easily give a sense of how each state's pattern is close or different from any other's. Below is a function we use to build such a K-Means model in which "4" is chosen for the number of clusters to give the best visual effect after our repeated trials. This function also contains the used for visualization, which involves using "geoplot" and "mapclassify" packages.def kmeans_usa_states(): from sklearn.cluster import KMeans # ------------ HYPERPARAMETERS ------------- BASE_PATH = r'C:/Users/Lucas/COVID/test1/coronavirus-2020/COVID-19/csse_covid_19_data/' MIN_CASES = 1000 # ------------------------------------------ confirmed = os.path.join( BASE_PATH, 'csse_covid_19_time_series', 'time_series_covid19_confirmed_US.csv') confirmed = data.load_csv_data(confirmed) state_feature = {} states = list(set(confirmed["Province_State"])) # print (confirmed.iloc[232,11:confirmed.shape[0]-11]) for idx in range(confirmed.shape[0]): if confirmed["Province_State"][idx] in list(state_feature.keys()): state_feature[confirmed["Province_State"][idx]] += confirmed.iloc[idx,11:confirmed.shape[0]-11] else: state_feature[confirmed["Province_State"][idx]] = confirmed.iloc[idx,11:confirmed.shape[0]-11] features = np.asarray(list(state_feature.values())) targets = np.asarray(list(state_feature.keys())) contiguous_usa = gpd.read_file(gplt.datasets.get_path('contiguous_usa')) contiguous_usa["class"] = np.full(contiguous_usa.shape[0], -1) kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(features, targets) state_classes = kmeans.predict(features) state_classes = dict((targets[idx], state_classes[idx]) for idx in range(targets.shape[0])) for idx in range(contiguous_usa.shape[0]): if contiguous_usa["state"][idx] in targets: contiguous_usa["class"][idx] = state_classes[contiguous_usa["state"][idx]] df_means = contiguous_usa[["state","class"]] c = mcolors.ColorConverter().to_rgb ''' rvb = make_colormap( [ c('orange'), c('violet'), 0.33, c('violet'), 0.50, c('blue'), 0.66, c('red'),c('green')]) ''' scheme = mapclassify.NaturalBreaks(list(df_means["class"])) gplt.choropleth( contiguous_usa, hue=list(df_means["class"]), scheme=scheme, figsize=(8, 4),legend=False,cmap = 'binary' )3. RNN Modeling for States' Cases Growth Forecast in the Future 30 DaysIn this sector, two Recurrent Nerual Networks (RNNs) with Long Short Term Memory (LSTM) are built to predict the growth of COVID-19 cases within a given state in the short or a longer future. We also use "Keras" to facilitate the modeling. The first RNN is designed to forecast the number of cases in the next day by simply looking at today's cases. The first of the two functions below is used to reshape the dataset to work with "Keras". The value of argument "look_back" determines how many days the RNN looks into to make a prediction for the following day. The number of LSTM units is chosen to be 16 for the best practice.def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) def LSTM_forecast(state): features, states = import_US_confirmed() features = features[np.where(states == state)[0][0]] trainX, trainY = create_dataset(features) testX, testY = create_dataset(features) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(16, input_shape=(1, 1), activation="relu")) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) timespan = pd.date_range("2020-01-22", periods=117).to_list() timespan = [str(date.date()) for date in timespan] # from matplotlib.dates import date2num # timespan = date2num(timespan) fig, ax = plt.subplots() ax.set_xticks(np.arange(len(timespan))) ax.set_xticklabels(timespan) # ax.xaxis.set_major_locator(mdates.YearLocator()) # ax.xaxis.set_minor_locator(mdates.MonthLocator()) # ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y')) ax.plot(timespan, testY, label="True Cases") ax.plot(timespan, testPredict, label="Predicted Cases by LSTM") # ax.xaxis.set_major_locator(plt.MaxNLocator(5)) plt.legend() plt.gcf().autofmt_xdate() for n, label in enumerate(ax.xaxis.get_ticklabels()): if n % 30 == 0: label.set_visible(True) else: label.set_visible(False) plt.show()The script below shows our methods to build the second RNN, which is designed to predict the case growth in a longer future. It predicts a sequence of number of cases by looking into a series of past cases. The first function below is used to reshape the dataset to work with "Keras" like that function in the first RNN does. It takes two arguments besides the dataset. "num_x" is the number of days the RNN looks into to make a prediction, and "num_y" controls how many days of cases the RNN needs to predict. Basically, they have the same meaning with the "x_days" and "y_days" in the following function. For example, if these two arguments are set to be 75 and 30 respectively, then the RNN being built will forecast the cases growth of the next 30 days based on the past 75 days' observations.def create_dataset_long_term(dataset, num_x, num_y): dataX, dataY, testX = [], [], [] for i in range(len(dataset)-num_x-num_y+1): a = dataset[i:(i+num_x)] testX.append(dataset[i+num_y:i+num_x+num_y]) dataX.append(a) dataY.append(dataset[i + num_x: i+num_x+num_y]) return np.array(dataX), np.array(dataY), np.array(testX) def LSTM_forecast_long_term(state,x_days,y_days): features, states = import_US_confirmed() features = features[np.where(states == state)[0][0]] features_update, states_update = import_US_confirmed_update() features_update = features_update[np.where(states_update == state)[0][0]] trainX, trainY, testX = create_dataset_long_term(features,x_days,y_days) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(1024, input_shape=(1, x_days), activation="relu")) model.add(Dense(y_days)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) timespan = pd.date_range("2020-01-22", "2020-05-19").to_list() timespan = [str(date.date()) for date in timespan] timespan_update = pd.date_range("2020-01-21", "2020-06-08").to_list() timespan_update = [str(date.date()) for date in timespan_update] # print (testPredict) forecast_timespan = pd.date_range("2020-05-19", periods = y_days + 1).to_list() forecast_timespan = [str(date.date()) for date in forecast_timespan] fig, ax = plt.subplots() ax.set_xticks(np.arange(len(timespan_update))) ax.set_xticklabels(timespan_update) ax.plot(timespan_update,features_update, label="True Cases 2020-05-20 ~ 2020-06-08") # plt.plot(range(0,x_days), testX[-1][0]) ax.plot(forecast_timespan, np.insert(testPredict[-1], 0, features[-1]), label="Predicted Cases by LSTM 2020-05-20 ~ 2020-06-19") ax.plot(timespan, features, label = "Trained Cases 2020-01-21 ~ 2020-05-19") ax.legend() for n, label in enumerate(ax.xaxis.get_ticklabels()): if n % 30 == 0: label.set_visible(True) else: label.set_visible(False) plt.show() def smooth(x,window_len=11,window='hanning'): s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]] #print(len(s)) if window == 'flat': #moving average w=np.ones(window_len,'d') else: w=eval('np.'+window+'(window_len)') y=np.convolve(w/w.sum(),s,mode='valid') return y4. Result EvaluationThis sector provides a showcase of how these methods work with the our actual COVID-19 datasets. Use the K-Means to classify cases growth pattern of each state:kmeans_usa_states()C:\Anaconda3\envs\coronavirus\lib\site-packages\ipykernel_launcher.py:35: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy C:\Anaconda3\envs\coronavirus\lib\site-packages\mapclassify\classifiers.py:1718: UserWarning: Warning: Not enough unique values in array to form k classes Warn(ms, UserWarning) C:\Anaconda3\envs\coronavirus\lib\site-packages\mapclassify\classifiers.py:1719: UserWarning: Warning: setting k to 4 Warn("Warning: setting k to %d" % uvk, UserWarning)There are totally 4 depths of shades in the figure above. States with a similar pattern of cases growth are with the same shade level. The figure highlights 11 states including, California, Illinois, Michigan, Texas, Massachusetts, Florida, and New Jersy, etc, follow similar patterns of COVID-19 case growth. On the other hand, the New York state is alone in one class, meaning that it may follow a very unique pattern which might be hard to compare with other states' situations. Use RNN to forecast cases growth of California in short future:LSTM_forecast("California")Epoch 1/100 - 0s - loss: 783728958.6646 Epoch 2/100 - 0s - loss: 361501947.0039 Epoch 3/100 - 0s - loss: 61886303.5324 Epoch 4/100 - 0s - loss: 437875.9530 Epoch 5/100 - 0s - loss: 311285.4174 Epoch 6/100 - 0s - loss: 326585.1677 Epoch 7/100 - 0s - loss: 320738.6657 Epoch 8/100 - 0s - loss: 321022.4243 Epoch 9/100 - 0s - loss: 300775.2389 Epoch 10/100 - 0s - loss: 332661.8525 Epoch 11/100 - 0s - loss: 316264.8925 Epoch 12/100 - 0s - loss: 308390.0110 Epoch 13/100 - 0s - loss: 324086.4940 Epoch 14/100 - 0s - loss: 320148.6190 Epoch 15/100 - 0s - loss: 328833.8617 Epoch 16/100 - 0s - loss: 360619.4943 Epoch 17/100 - 0s - loss: 318449.2082 Epoch 18/100 - 0s - loss: 301613.8392 Epoch 19/100 - 0s - loss: 315734.5778 Epoch 20/100 - 0s - loss: 321058.3638 Epoch 21/100 - 0s - loss: 304376.0010 Epoch 22/100 - 0s - loss: 301066.3910 Epoch 23/100 - 0s - loss: 323187.4836 Epoch 24/100 - 0s - loss: 341272.0294 Epoch 25/100 - 0s - loss: 288540.4851 Epoch 26/100 - 0s - loss:[...]The orange curve in the figure above is the trend predicted by the RNN using only one day's number. It matches the curve of true cases generally well, though in fact the mean squared error is around 300 thousand. Use RNN to forecast cases growth of California in short future:LSTM_forecast_long_term("California",75,30)Epoch 1/100 - 2s - loss: 2158462523.7333 Epoch 2/100 - 1s - loss: 1214758638.9333 Epoch 3/100 - 1s - loss: 325732305.8667 Epoch 4/100 - 1s - loss: 131300163.4667 Epoch 5/100 - 1s - loss: 104209433.5833 Epoch 6/100 - 1s - loss: 92331020.0667 Epoch 7/100 - 1s - loss: 75280654.6833 Epoch 8/100 - 1s - loss: 66388829.7792 Epoch 9/100 - 1s - loss: 54955021.9333 Epoch 10/100 - 1s - loss: 40354986.3333 Epoch 11/100 - 1s - loss: 35725995.0250 Epoch 12/100 - 1s - loss: 28488542.6333 Epoch 13/100 - 1s - loss: 25390757.3417 Epoch 14/100 - 1s - loss: 22070295.1333 Epoch 15/100 - 1s - loss: 18454095.3333 Epoch 16/100 - 1s - loss: 16909671.6750 Epoch 17/100 - 1s - loss: 15307987.0333 Epoch 18/100 - 1s - loss: 13761540.1083 Epoch 19/100 - 1s - loss: 12831123.0208 Epoch 20/100 - 1s - loss: 12146528.3833 Epoch 21/100 - 1s - loss: 11023533.6917 Epoch 22/100 - 1s - loss: 10550332.6479 Epoch 23/100 - 1s - loss: 10339787.0000 Epoch 24/100 - 1s - loss: 9553627.5500 Epoch 25/100 - 1s [...]data = [38, 27, 49, 3, 9, 82, 10] def quick_sort(array): # 리스트가 하나 이하의 원소를 가지면 종료 if len(array) <= 1: return array pivot, tail = array[0], array[1:] leftSide = [x for x in tail if x <= pivot] rightSide = [x for x in tail if x > pivot] return quick_sort(leftSide) + [pivot] + quick_sort(rightSide) sorted_data = quick_sort(data) sorted_data**Best Model Selection¶**select the one we'll use for the application#Read the csv file from drive !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) import pickle import pandas as pd #Load the data list_pickles = { "df_models_lrc":"12JhjPl9XJyB2Ef7Mibk6_Bq7RjuVjw83", "df_models_mnbc":"1l09InBunP7WvYXcoEfm4ZnKcsrO5iUXs", "df_models_rfc":"1YsaQlxYdkNodD7p_IDtaA8ztTfMJ6jre", } df_summary = pd.DataFrame() for pickle_ ,pickle_id in list_pickles.items() : #Import the data frame from the Pickle files downloaded = drive.CreateFile({'id':pickle_id}) downloaded.GetContentFile(pickle_+'.pickle') with open(pickle_+'.pickle', 'rb') as data: df = pickle.load(data) df_summary = df_summary.append(df) df_summary = df_summary.reset_index().drop('index', axis=1) df_summary.sort_values('Test Set Accuracy', ascending=False)方差分析 Analusis of VariancexyfJASON 1 单因素方差分析考虑一个因素 $A$ 对某关心指标 $X$ 的影响,$A$ 取若干水平,每个水平上作若干试验,且每次试验控制除 $A$ 以外的其他因素不变,因而结果只受 $A$ 和随机因素的影响。我们想根据结果推断因素 $A$ 是否对指标 $X$ 有影响。> 注:$A$ 的不同水平也可以对应成若干待检测的分布,这样能检验这些分布的**均值**是否相同。 1.1 理论取 $A$ 的若干个水平 $A_1,A_2,\ldots,A_s$,并在水平 $A_i$ 下做 $n_i$ 次独立的试验(设一共做了 $n$ 次试验),观察指标 $X$,设第 $i$ 个水平下的第 $j$ 次试验结果为 $x_{ij}$。令 $\bar x_i$ 表示第 $i$ 个水平下试验结果的平均值,$\bar x$ 表示所有试验结果的平均值,$T_i$ 表示第 $i$ 个水平下所有试验结果之和,$T$ 表示所有试验结果之和,即:$$\begin{align}&\bar x_i=\frac{1}{n_i}\sum_{j=1}^{n_i}x_{ij}&&\bar x=\frac{1}{n}\sum_{i=1}^s\sum_{j=1}^{n_i}x_{ij}\end{align}$$由于随机因素的影响,我们可以把第 $i$ 个水平的试验结果视为服从正态分布 $N(\mu_i,\sigma^2)$,注意这里假设不同水平试验结果的方差是相同的。我们想看 $A$ 的变化对 $X$ 是否有影响,也即提出原假设:$$H_0:\mu_1=\mu_2=\cdots=\mu_s$$备择假设 $H_1$ 自然就是 $\mu_1,\mu_2,\ldots,\mu_s$ 不完全相等。为了检验该假设,我们考虑统计量**总体偏差平方和**:$$\begin{align}S_T&=\sum_{i=1}^s\sum_{j=1}^{n_i}(x_{ij}-\bar x)^2\\&=\sum_{i=1}^sn_i(\bar x_i-\bar x)^2+\sum_{i=1}^{s}\sum_{j=1}^{n_i}(x_{ij}-\bar x_i)^2\\&=S_A+S_E\end{align}$$其中 $S_A$ 体现组间偏差平方和,$S_E$ 体现组内偏差平方和。根据正态分布及其导出分布的知识,可以知道,若原假设 $H_0$ 成立,则统计量$$F=\frac{(n-s)S_A}{(s-1)S_E}\sim F(s-1, n-s)$$于是,给定显著性水平 $\alpha$,我们可以查表找到 $F(s-1, n-s)$ 分布的 $1-\alpha$ 分位数,若 $F$ 大于该分位数,则拒绝原假设 $H_0$。(或等价地,如果 $\text{p-value}$ 小于 $\alpha$ 就拒绝原假设 $H_0$)一般而言,取 $\alpha=0.01$ 时拒绝称影响非常显著;取 $\alpha=0.05$ 时拒绝称影响显著;否则称无显著影响。 1.2 代码`scipy.stats.f_oneway` 提供了单因素方差分析的接口,输入若干次试验的数据,返回 $F$ 的值及其 $\text{p-value}$。Documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.f_oneway.html 2 例题 2.1 例一为考察 5 名工人的劳动生产率是否相同,记录了每人 4 天的产量,并算出其平均值,如下表。推断他们的生产率有无显著差别。| | $A_1$ | $A_2$ | $A_3$ | $A_4$ | $A_5$ || :--: | :---: | :---: | :---: | :---: | :---: || 1 | 256 | 254 | 250 | 248 | 236 || 2 | 242 | 330 | 277 | 280 | 252 || 3 | 280 | 290 | 230 | 305 | 220 || 4 | 298 | 295 | 302 | 289 | 252 |编写代码如下(注意每一**行**一次试验):from scipy.stats import f_oneway x = [[256, 242, 280, 298], [254, 330, 290, 295], [250, 277, 230, 302], [248, 280, 305, 289], [236, 252, 220, 252]] res = f_oneway(*x) res可见 $\text{p-value}=0.1109$,高于 $0.05$,故无显著差别。 2.2 例二用4种工艺生产灯泡,从各种工艺制成的灯泡中各抽出了若干个测量其寿命, 结果如下表,试推断这几种工艺制成的灯泡寿命是否有显著差异。| | $A_1$ | $A_2$ | $A_3$ | $A_4$ || :--: | :---: | :---: | :---: | :---: || 1 | 1620 | 1580 | 1460 | 1500 || 2 | 1670 | 1600 | 1540 | 1550 || 3 | 1700 | 1640 | 1620 | 1610 || 4 | 1750 | 1720 | | 1680 || 5 | 1800 | | | |编写代码如下:x = [[1620, 1670, 1700, 1750, 1800], [1580, 1600, 1640, 1720], [1460, 1540, 1620], [1500, 1550, 1610, 1680]] res = f_oneway(*x) resGiven a 1D array, negate all elements which are between 3 and 8#GQ a = np.arange(10) print(a) a[np.where((a>=3) & (a<=8))]*=(-1) print(a) a = np.random.randint(1,20,size=20).reshape([4,5]) a a>10 a[a>10] #1D Array a = np.random.randint(1,20,size=20).reshape([4,5]) a[2][3] = 13 aSubsituting All 13 in fourth Column by -1#GQ print(a) bool_arr = (a[:,3]==13) print(bool_arr) a[bool_arr, 3] = -1 print(a)[[ 5 6 2 12 19] [ 8 19 8 17 2] [ 9 12 6 13 1] [18 16 14 9 12]] [False False True False] [[ 5 6 2 12 19] [ 8 19 8 17 2] [ 9 12 6 -1 1] [18 16 14 9 12]]Calcuate the Circumference of Circule Please Define the radius of Circuleimport math rd = float(input('Please Enter the radius of Circule :')) Circumference = 2*math.pi*rd print ('Circumference of Circule of Redius {} is {}'.format(rd,Circumference))Circumference of Circule of Redius 60.0 is 376.99111843077515Preprocessing of parsed data Part 1. Air traffic and airports dataimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm_notebook import warnings warnings.filterwarnings("ignore") airport_df = pd.read_csv("../data/airport_df.csv", index_col=0) airport_df.head() airport_df.drop( ['ICAO', 'Alt', 'Timezone', 'DST', 'Tz database time zone', 'type', 'source'], axis=1, inplace=True ) airport_df = airport_df[~airport_df.IATA.isin([r"\N"])] airport_df.shape connections_df = pd.read_csv("../data/connections_df.csv", index_col=0) print(connections_df.shape) connections_df.head() routes = pd.read_csv("../data/routes.csv", index_col=0) print(routes.shape) routes.head() plt.figure(figsize=(17, 8)) plt.title("World airports") plt.scatter(airport_df.Long, airport_df.Lat, s=0.5, c='darkblue') plt.ylabel("Latitude") plt.xlabel("Longitude") sns.despine() plt.show()Merging the dataframesconnections = pd.merge( connections_df, routes, how='left', left_on=['Source Airport', 'Dest Airport'], right_on=['course_code', 'destination_code'] ) connections.drop( ['course_code', 'destination_code', 'Codeshare', 'Stops', 'equipment'], axis=1, inplace=True ) connections.columns = connections.columns.str.lower().str.split().str.join("_") connections.head()Problem - half of connections don't have the estimated number of flightsconnections.isnull().sum()/connections.shape[0]Idea - fill missing number of flights with minimal number of flights from that sourceMINIMAL_FLIGHTS_ESTIMATE = 0.1 minimal_flights = connections[ (connections.destination_flights!=0)&(~connections.destination_flights.isnull()) ].groupby(['source_airport']).destination_flights.min() minimal_flights = np.ceil(minimal_flights*MINIMAL_FLIGHTS_ESTIMATE) minimal_flights.apply(np.log).hist(bins=30) sns.despine() plt.title("Distribution of the estimated number of flights") plt.show() # filling the missing values with the estimates connections.destination_flights[ (connections.destination_flights.isnull())|(connections.destination_flights==0) ] = connections[ (connections.destination_flights.isnull())|(connections.destination_flights==0) ]['source_airport'].map(minimal_flights)Sanity checkThe distributions of the number of flights before and after filling the missing data should be approximately the same (properties preservation)(routes.destination_flights[routes.destination_flights!=0]+1).apply(np.log).hist(bins=30) plt.title("Number of flights distribution BEFORE") sns.despine() plt.show() (connections.destination_flights).apply(np.log).hist(bins=50) plt.title("Number of flights distribution After") sns.despine() plt.show()--- Adding airports locations to the dataframeconnections = pd.merge( connections, airport_df[['IATA', 'Lat', 'Long']], left_on='source_airport', right_on='IATA') connections = pd.merge( connections, airport_df[['IATA', 'Lat', 'Long']], left_on='dest_airport', right_on='IATA') connections.drop(['IATA_x', 'IATA_y'], axis=1, inplace=True) connections.rename(columns={ 'Lat_x':'lat_source', 'Long_x':'long_source', 'Lat_y':'lat_dest', 'Long_y':'long_dest' }, inplace=True) # A few flights estimations were still missing - filling with median connections.fillna(connections.destination_flights.median(), inplace=True) connections.head()Part 2. Population datapopulation_country = pd.read_csv("../data/country_population.csv", index_col=0) population_country.pop2019 = population_country.pop2019 * 1000 print(population_country.shape) population_country.head() population_city = pd.read_csv("../data/city_population.csv", index_col=0) print(population_city.shape) population_city.head()(1723, 5)Adding city and country population to airports dataframeairport_df = pd.merge( airport_df, population_city.rename( columns={"Name":"City", "2020 Population":"city_population"} )[['City', 'city_population']], how='left', left_on='City', right_on='City' ) airport_df = pd.merge( airport_df, population_country.rename( columns={"name":"Country", "pop2019":"country_population", 'Density':'country_density'} )[['Country', 'country_population', 'country_density']], how='left', left_on='Country', right_on='Country' ) airport_df.head() airport_df.isnull().sum()/airport_df.shape[0]Problem - 81% of city population is missing!One solution could be parsing wikipedia but we can also try and estimate the population size based on the flights info! Hypothesis - the number of flights from the city is proportional to the city population# Removing 1.8% of observations with missing country info airport_df = airport_df[~airport_df.country_population.isnull()] flights_number = connections.groupby( ['source_airport'], as_index=False )[['destination_flights']].sum() flights_number.head() # adding source airport number of flights to the airport dataframe airport_df = pd.merge( airport_df, flights_number, how='left', left_on='IATA', right_on='source_airport' ) airport_df = airport_df[~airport_df.source_airport.isnull()] airport_df = airport_df[~airport_df.destination_flights.isnull()] # calculating the number of flights on a country level to fill missing flight values country_level_flights = np.ceil( airport_df[ airport_df.destination_flights!=0 ].groupby("Country")['destination_flights'].min() * MINIMAL_FLIGHTS_ESTIMATE ) airport_df.destination_flights[ airport_df.destination_flights==0 ] = airport_df[ airport_df.destination_flights==0 ]['Country'].map(country_level_flights)Let's see if our hypothesis holdsairport_df[['destination_flights', 'city_population', 'country_population', 'country_density']].corr() plt.scatter( np.log(airport_df.destination_flights), np.log(airport_df.city_population) ) plt.ylabel("City population (log-scale)") plt.xlabel("Number of flights (log-scale)") sns.despine() plt.show()Now we can calculate the "average population per flight"...flights_coefs = airport_df[ ~airport_df.city_population.isnull() ].groupby("Country")[['city_population', 'destination_flights']].sum() flights_coefs['coef'] = flights_coefs['city_population']/flights_coefs['destination_flights'] flights_coefs airport_df['flights_coefficient'] = airport_df.Country.map(flights_coefs.coef) airport_df['flights_coefficient'].fillna(airport_df['flights_coefficient'].median(), inplace=True)... and use this coefficient to fill the missing population datacity_estimates = np.ceil(airport_df.flights_coefficient[airport_df.city_population.isnull()] *\ airport_df.destination_flights[airport_df.city_population.isnull()]) airport_df.city_population[airport_df.city_population.isnull()] = city_estimatesFinally - clean connections data and get daily flights estimates from monthly dataconnections = connections[connections.source_airport.isin(airport_df.IATA)] connections = connections[connections.dest_airport.isin(airport_df.IATA)] connections.destination_flights = connections.destination_flights/30Saving all the dataairport_df.to_csv("../data/airport_df_preprocessed.csv") connections.to_csv("../data/connections_preprocessed.csv")ETLExtract – Transform – LoadExtract - get the data fromt eh sourceLoad - insert data into the required destinationWe would like to extract data from rpg_data sqlite 3 and load it into PostgreSQLIf we change so that it fits or change format (calculate/summarize) – that would be the transform stepWe are making our first cloud "ETL"# Getting the data !wget https://github.com/LambdaSchool/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module1-introduction-to-sql/rpg_db.sqlite3?raw=true # Renaming the database !mv 'rpg_db.sqlite3?raw=true' rpg_db.sqlite3 !ls # Step 1 - Extract data from sqlite3 import os import sqlite3 # construction of a path to wherever the database exists # DB_FILEPATH = "rpg_db.sqlite3.db" # DB_FILEPATH = os.path.join(os.path.dirname(__file__), "rpg_db.sqlite3") s1_connection = sqlite3.connect("rpg_db.sqlite3") # print("CONNECTION:", connection) s1_cursor = s1_connection.cursor() # Our goal is to copy the charactercreator_character table get_characters = "SELECT * FROM Charactercreator_character;" characters = s1_cursor.execute(get_characters).fetchall() len(characters) characters[:5] # Step 1 is complete , we have a list of tuples with all our character data # Note that this is not a pandas dataframe # We do not know types so far, so we need to figure out that in the transform # step # Step 2 - Transform # Our goal is to make a schema to define a table that fits this data in PostgreSQL # Can we recheck old schema? s1_cursor.execute("PRAGMA table_info(Charactercreator_character);").fetchall() # Need to make a create statement in PostgreSQL that captures the above types create_character_table = """ CREATE TABLE Charactercreator_character( character_id SERIAL PRIMARY KEY, name VARCHAR(30), level INTEGER, exp INTEGER, hp INTEGER, strength INTEGER, intelligence INTEGER, dexterity INTEGER, wisdom INTEGER ); """ # Defining a function to refresh connection and cursor def refresh_connection_and_cursor(connect, cursor): cursor.close() connect.close() pg_connect = psycopg2.connect(dbname=dbname, user=user, password=password, host=host) pg_cursor = pg_connect.cursor() return pg_connect, pg_cursor pg_connect, pg_cursor = refresh_connection_and_cursor(pg_connect, pg_cursor) # Execute the create table pg_cursor.execute(create_character_table) pg_connect.commit() # We can query postgre database to see what data it has # This is a clever optional step which shows postgre internals show_tables = """ SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; """ pg_cursor.execute(show_tables) pg_cursor.fetchall() # We now have a place to insert our characters without the need to transform as much # Step 3 - LOAD characters[0] characters[0][1:] # If we ran that, we'd insert the first character # But we want them all - loops! for character in characters: insert_character = """ INSERT INTO charactercreator_character (name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES """ + str(character[1:]) + ";" pg_cursor.execute(insert_character) # Note - we're executing each character one at a time # That works, and is simple, but inefficient (lots of roundtrips to database) # Stretch/afternoon goal - see if you can combine into a single # insert that does them all at once pg_connect.commit() # Postgre cursor needs to fetch in a separate step unlike sqlite pg_cursor.execute("SELECT * FROM charactercreator_character LIMIT 5;") pg_cursor.fetchall() pg_cursor # To make sure other connections and cursors know about our insertion # we need to commit pg_connect.commit() # We have done a basic ETL # How can we verify that? len(characters) # Ids are different (on first run, now fixed)! # That's because we had an aborted run # Let's fix this by deleting the data and DROPping the table # Other tables are fine, but we'll dump the data *and* schema to rerun # pg_curs.execute('DROP TABLE charactercreator_character;') # pg_conn.commit() # Now we need to rerun the above... scrolling up and down, because notebooks # Specifically rerunning character table create statement and data inserts # Now the data looks the same! But let's check it systematically pg_cursor.execute('SELECT * FROM charactercreator_character;') pg_characters = pg_cursor.fetchall() # We could do more spot checks, but let's loop and check them all # TODO/afternoon task - consider making this a more formal test for character, pg_character in zip(characters, pg_characters): assert character == pg_character # No complaints - which means they're all the same! # Closing out cursor/connection to wrap up pg_cursor.close() pg_connect.close() s1_cursor.close() s1_connect.close()2 Scalable Learning in Scikit-learn Datasets for trying yourself the real thingimport urllib2 # import urllib.request as urllib2 in Python3 import requests, io, os, StringIO import numpy as np import tarfile, zipfile, gzip def unzip_from_UCI(UCI_url, dest=''): """ Downloads and unpacks datasets from UCI in zip format """ response = requests.get(UCI_url) compressed_file = io.BytesIO(response.content) z = zipfile.ZipFile(compressed_file) print ('Extracting in %s' % os.getcwd()+'\\'+dest) for name in z.namelist(): if '.csv' in name: print ('\tunzipping %s' %name) z.extract(name, path=os.getcwd()+'\\'+dest) def gzip_from_UCI(UCI_url, dest=''): """ Downloads and unpacks datasets from UCI in gzip format """ response = urllib2.urlopen(UCI_url) compressed_file = io.BytesIO(response.read()) decompressed_file = gzip.GzipFile(fileobj=compressed_file) filename = UCI_url.split('/')[-1][:-3] with open(os.getcwd()+'\\'+filename, 'wb') as outfile: outfile.write(decompressed_file.read()) print ('File %s decompressed' % filename) def targzip_from_UCI(UCI_url, dest='.'): """ Downloads and unpacks datasets from UCI in tar.gz format """ response = urllib2.urlopen(UCI_url) compressed_file = StringIO.StringIO(response.read()) tar = tarfile.open(mode="r:gz", fileobj = compressed_file) tar.extractall(path=dest) datasets = tar.getnames() for dataset in datasets: size = os.path.getsize(dest+'\\'+dataset) print ('File %s is %i bytes' % (dataset,size)) tar.close() def load_matrix(UCI_url): """ Downloads datasets from UCI in matrix form """ return np.loadtxt(urllib2.urlopen(UCI_url))Bike Sharing Dataset Data SetUCI_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip' unzip_from_UCI(UCI_url, dest='bikesharing') import os, csv local_path = os.getcwd() source = 'bikesharing\\hour.csv' SEP = ',' # We define this for being able to easily change it as required by the file with open(local_path+'\\'+source, 'rb') as R: iterator = csv.reader(R, delimiter=SEP) for n, row in enumerate(iterator): if n==0: header = row else: # DATA PROCESSING placeholder # MACHINE LEARNING placeholder pass print ('Total rows: %i' % (n+1)) print ('Header: %s' % ', '.join(header)) print ('Sample values: %s' % ', '.join(row)) with open(local_path+'\\'+source, 'rb') as R: iterator = csv.DictReader(R, delimiter=SEP) for n, row in enumerate(iterator): # DATA PROCESSING placeholder # MACHINE LEARNING placeholder pass print ('Total rows: %i' % (n+1)) print ('Sample values: %s' % str(row))Total rows: 17379 Sample values: {'mnth': '12', 'cnt': '49', 'holiday': '0', 'instant': '17379', 'temp': '0.26', 'dteday': '2012-12-31', 'hr': '23', 'season': '1', 'registered': '37', 'windspeed': '0.1343', 'atemp': '0.2727', 'workingday': '1', 'weathersit': '1', 'weekday': '1', 'hum': '0.65', 'yr': '1', 'casual': '12'}Using pandas I/O toolsimport pandas as pd CHUNK_SIZE = 1000 with open(local_path+'\\'+source, 'rb') as R: iterator = pd.read_csv(R, chunksize=CHUNK_SIZE) for n, data_chunk in enumerate(iterator): print ('Size of uploaded chunk: %i instances, %i features' % (data_chunk.shape)) # DATA PROCESSING placeholder # MACHINE LEARNING placeholder pass print ('Sample values: \n%s' % str(data_chunk.iloc[0]))Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 1000 instances, 17 features Size of uploaded chunk: 379 instances, 17 features Sample values: instant 17001 dteday 2012-12-16[...]Working with databasesimport os, sys import sqlite3, csv,glob SEP = ',' def define_field(s): try: int(s) return 'integer' except ValueError: try: float(s) return 'real' except: return 'text' def create_sqlite_db(db='database.sqlite', file_pattern=''): conn = sqlite3.connect(db) conn.text_factory = str # allows utf-8 data to be stored c = conn.cursor() # traverse the directory and process each .csv file useful for building the db target_files = glob.glob(file_pattern) print ('Creating %i table(s) into %s from file(s): %s' % (len(target_files), db, ', '.join(target_files))) for k,csvfile in enumerate(target_files): # remove the path and extension and use what's left as a table name tablename = os.path.splitext(os.path.basename(csvfile))[0] with open(csvfile, "rb") as f: reader = csv.reader(f, delimiter=SEP) f.seek(0) for n,row in enumerate(reader): if n==11: types = map(define_field,row) else: if n>11: break f.seek(0) for n,row in enumerate(reader): if n==0: sql = "DROP TABLE IF EXISTS %s" % tablename c.execute(sql) sql = "CREATE TABLE %s (%s)" % (tablename, ", ".join([ "%s %s" % (col, ct) for col, ct in zip(row, types)])) print ('%i) %s' % (k+1,sql)) c.execute(sql) # Creating indexes for faster joins on long strings for column in row: if column.endswith("_ID_hash"): index = "%s__%s" % ( tablename, column ) sql = "CREATE INDEX %s on %s (%s)" % ( index, tablename, column ) c.execute(sql) insertsql = "INSERT INTO %s VALUES (%s)" % (tablename, ", ".join([ "?" for column in row ])) rowlen = len(row) else: # raise an error if there are rows that don't have the right number of fields if len(row) == rowlen: c.execute(insertsql, row) else: print ('Error at line %i in file %s') % (n,csvfile) raise ValueError('Houston, we\'ve had a problem at row %i' % n) conn.commit() print ('* Inserted %i rows' % n) c.close() conn.close() create_sqlite_db(db='bikesharing.sqlite', file_pattern='bikesharing\\*.csv') import os, sqlite3 import pandas as pd DB_NAME = 'bikesharing.sqlite' DIR_PATH = os.getcwd() CHUNK_SIZE = 2500 conn = sqlite3.connect(DIR_PATH+'\\'+DB_NAME) conn.text_factory = str # allows utf-8 data to be stored sql = "SELECT H.*, D.cnt AS day_cnt FROM hour AS H INNER JOIN day as D ON (H.dteday = D.dteday)" DB_stream = pd.io.sql.read_sql(sql, conn, chunksize=CHUNK_SIZE) for j,data_chunk in enumerate(DB_stream): print ('Chunk %i -' % (j+1)), print ('Size of uploaded chunk: %i istances, %i features' % (data_chunk.shape)) # DATA PROCESSING placeholder # MACHINE LEARNING placeholderChunk 1 - Size of uploaded chunk: 2500 istances, 18 features Chunk 2 - Size of uploaded chunk: 2500 istances, 18 features Chunk 3 - Size of uploaded chunk: 2500 istances, 18 features Chunk 4 - Size of uploaded chunk: 2500 istances, 18 features Chunk 5 - Size of uploaded chunk: 2500 istances, 18 features Chunk 6 - Size of uploaded chunk: 2500 istances, 18 features Chunk 7 - Size of uploaded chunk: 2379 istances, 18 featuresPaying caution to the ordering of instancesimport zlib from random import shuffle def ram_shuffle(filename_in, filename_out, header=True): with open(filename_in, 'rb') as f: zlines = [zlib.compress(line, 9) for line in f] if header: first_row = zlines.pop(0) shuffle(zlines) with open(filename_out, 'wb') as f: if header: f.write(zlib.decompress(first_row)) for zline in zlines: f.write(zlib.decompress(zline)) import os local_path = os.getcwd() source = 'bikesharing\\hour.csv' ram_shuffle(filename_in=local_path+'\\'+source, \ filename_out=local_path+'\\bikesharing\\shuffled_hour.csv', header=True) from random import shuffle import pandas as pd import numpy as np import os def disk_shuffle(filename_in, filename_out, header=True, iterations = 3, CHUNK_SIZE = 2500, SEP=','): for i in range(iterations): with open(filename_in, 'rb') as R: iterator = pd.read_csv(R, chunksize=CHUNK_SIZE) for n, df in enumerate(iterator): if n==0 and header: header_cols =SEP.join(df.columns)+'\n' df.iloc[np.random.permutation(len(df))].to_csv(str(n)+'_chunk.csv', index=False, header=False, sep=SEP) ordering = list(range(0,n+1)) shuffle(ordering) with open(filename_out, 'wb') as W: if header: W.write(header_cols) for f in ordering: with open(str(f)+'_chunk.csv', 'r') as R: for line in R: W.write(line) os.remove(str(f)+'_chunk.csv') filename_in = filename_out CHUNK_SIZE = int(CHUNK_SIZE / 2) import os local_path = os.getcwd() source = 'bikesharing\\hour.csv' disk_shuffle(filename_in=local_path+'\\'+source, \ filename_out=local_path+'\\bikesharing\\shuffled_hour.csv', header=True)Feature management with data streamsimport os, csv local_path = os.getcwd() source = 'bikesharing\\hour.csv' SEP=',' running_mean = list() running_std = list() with open(local_path+'\\'+source, 'rb') as R: iterator = csv.DictReader(R, delimiter=SEP) x = 0.0 x_squared = 0.0 for n, row in enumerate(iterator): temp = float(row['temp']) if n == 0: max_x, min_x = temp, temp else: max_x, min_x = max(temp, max_x),min(temp, min_x) x += temp x_squared += temp**2 running_mean.append(x / (n+1)) running_std.append(((x_squared - (x**2)/(n+1))/(n+1))**0.5) # DATA PROCESSING placeholder # MACHINE LEARNING placeholder pass print ('Total rows: %i' % (n+1)) print ('Feature \'temp\': mean=%0.3f, max=%0.3f, min=%0.3f,sd=%0.3f' \ % (running_mean[-1], max_x, min_x, running_std[-1])) import matplotlib.pyplot as plt %matplotlib inline plt.plot(running_mean,'r-', label='mean') plt.plot(running_std,'b-', label='standard deviation') plt.ylim(0.0,0.6) plt.xlabel('Number of training examples') plt.ylabel('Value') plt.legend(loc='lower right', numpoints= 1) plt.show()Describing the Covtype datasetUCI_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz' gzip_from_UCI(UCI_url) import os, csv local_path = os.getcwd() source = 'covtype.data' SEP=',' forest_type = {1:"Spruce/Fir", 2:"Lodgepole Pine", \ 3:"Ponderosa Pine", 4:"Cottonwood/Willow", \ 5:"Aspen", 6:"Douglas-fir", 7:"Krummholz"} forest_type_count = {value:0 for value in forest_type.values()} forest_type_count['Other'] = 0 lodgepole_pine = 0 spruce = 0 proportions = list() with open(local_path+'\\'+source, 'rb') as R: iterator = csv.reader(R, delimiter=SEP) for n, row in enumerate(iterator): response = int(row[-1]) # The response is the last value try: forest_type_count[forest_type[response]] +=1 if response == 1: spruce += 1 elif response == 2: lodgepole_pine +=1 if n % 10000 == 0: proportions.append([spruce/float(n+1),\ lodgepole_pine/float(n+1)]) except: forest_type_count['Other'] += 1 print ('Total rows: %i' % (n+1)) print ('Frequency of classes:') for ftype, freq in sorted([(t,v) for t,v \ in forest_type_count.iteritems()], key = \ lambda x: x[1], reverse=True): print ("%-18s: %6i %04.1f%%" % \ (ftype, freq, freq*100/float(n+1))) import matplotlib.pyplot as plt import numpy as np %matplotlib inline proportions = np.array(proportions) plt.plot(proportions[:,0],'r-', label='Spruce/Fir') plt.plot(proportions[:,1],'b-', label='Lodgepole Pine') plt.ylim(0.0,0.8) plt.xlabel('Training examples (unit=10000)') plt.ylabel('%') plt.legend(loc='lower right', numpoints= 1) plt.show()The hashing trickfrom sklearn.feature_extraction.text import HashingVectorizer h = HashingVectorizer(n_features=1000, binary=True, norm=None) sparse_vector = h.transform(['A simple toy example will make clear how it works.']) print(sparse_vector) from sklearn.feature_extraction import FeatureHasher h = FeatureHasher(n_features=1000, non_negative=True) example_row = {'numeric feature':3, 'another numeric feature':2, 'Categorical feature = 3':1, 'f1*f2*f3':1*2*3} print (example_row) print (h.transform([example_row]))(0, 16) 2.0 (0, 373) 1.0 (0, 884) 6.0 (0, 945) 3.0Other basis transformationsimport numpy as np v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) vv = np.hstack((v, [v[i]*v[j] for i in range(len(v)) for j in range(i+1, len(v))])) print vv[ 1 2 3 4 5 6 7 8 9 10 2 3 4 5 6 7 8 9 10 6 8 10 12 14 16 18 20 12 15 18 21 24 27 30 20 24 28 32 36 40 30 35 40 45 50 42 48 54 60 56 63 70 72 80 90]Trying SGD in actionimport os local_path = os.getcwd() source = 'covtype.data' ram_shuffle(filename_in=local_path+'\\'+source, \ filename_out=local_path+'\\shuffled_covtype.data', \ header=False) import csv, time import numpy as np from sklearn.linear_model import SGDClassifier source = 'shuffled_covtype.data' SEP=',' forest_type = [t+1 for t in range(7)] SGD = SGDClassifier(loss='log', penalty=None, random_state=1, average=True) accuracy = 0 holdout_count = 0 prog_accuracy = 0 prog_count = 0 cold_start = 200000 k_holdout = 10 with open(local_path+'\\'+source, 'rb') as R: iterator = csv.reader(R, delimiter=SEP) for n, row in enumerate(iterator): if n > 250000: # Reducing the running time of the experiment break # DATA PROCESSING response = np.array([int(row[-1])]) # The response is the last value features = np.array(map(float,row[:-1])).reshape(1,-1) # MACHINE LEARNIN if (n+1) >= cold_start and (n+1-cold_start) % k_holdout==0: if int(SGD.predict(features))==response[0]: accuracy += 1 holdout_count += 1 if (n+1-cold_start) % 25000 == 0 and (n+1) > cold_start: print '%s holdout accuracy: %0.3f' % (time.strftime('%X'), accuracy / float(holdout_count)) else: # PROGRESSIVE VALIDATION if (n+1) >= cold_start: if int(SGD.predict(features))==response[0]: prog_accuracy += 1 prog_count += 1 if n % 25000 == 0 and n > cold_start: print '%s progressive accuracy: %0.3f' % (time.strftime('%X'), prog_accuracy / float(prog_count)) # LEARNING PHASE SGD.partial_fit(features, response, classes=forest_type) print '%s FINAL holdout accuracy: %0.3f' % (time.strftime('%X'), accuracy / ((n+1-cold_start) / float(k_holdout))) print '%s FINAL progressive accuracy: %0.3f' % (time.strftime('%X'), prog_accuracy / float(prog_count)) import csv, time, os import numpy as np from sklearn.linear_model import SGDRegressor from sklearn.feature_extraction import FeatureHasher source = '\\bikesharing\\hour.csv' local_path = os.getcwd() SEP=',' def apply_log(x): return np.log(float(x)+1) def apply_exp(x): return np.exp(float(x))-1 SGD = SGDRegressor(loss='squared_loss', penalty=None, random_state=1, average=True) h = FeatureHasher(non_negative=True) val_rmse = 0 val_rmsle = 0 predictions_start = 16000 with open(local_path+'\\'+source, 'rb') as R: iterator = csv.DictReader(R, delimiter=SEP) for n, row in enumerate(iterator): # DATA PROCESSING target = np.array([apply_log(row['cnt'])]) features = {k+'_'+v:1 for k,v in row.iteritems() \ if k in ['holiday','hr','mnth','season', \ 'weathersit','weekday','workingday','yr']} numeric_features = {k:float(v) for k,v in \ row.iteritems() if k in ['hum', 'temp', '\ atemp', 'windspeed']} features.update(numeric_features) hashed_features = h.transform([features]) # MACHINE LEARNING if (n+1) >= predictions_start: # HOLDOUT AFTER N PHASE predicted = SGD.predict(hashed_features) val_rmse += (apply_exp(predicted) \ - apply_exp(target))**2 val_rmsle += (predicted - target)**2 if (n-predictions_start+1) % 250 == 0 \ and (n+1) > predictions_start: print '%s holdout RMSE: %0.3f' \ % (time.strftime('%X'), (val_rmse \ / float(n-predictions_start+1))**0.5), print 'holdout RMSLE: %0.3f' % \ ((val_rmsle / float(n-predictions_start+1))**0.5) else: # LEARNING PHASE SGD.partial_fit(hashed_features, target) print '%s FINAL holdout RMSE: %0.3f' % \ (time.strftime('%X'), (val_rmse \ / float(n-predictions_start+1))**0.5) print '%s FINAL holdout RMSLE: %0.3f' % \ (time.strftime('%X'), (val_rmsle \ / float(n-predictions_start+1))**0.5)09:31:23 holdout RMSE: 281.214 holdout RMSLE: 1.901 09:31:23 holdout RMSE: 255.092 holdout RMSLE: 1.803 09:31:23 holdout RMSE: 255.578 holdout RMSLE: 1.799 09:31:23 holdout RMSE: 254.598 holdout RMSLE: 1.816 09:31:23 holdout RMSE: 239.728 holdout RMSLE: 1.738 09:31:23 FINAL holdout RMSE: 229.154 09:31:23 FINAL holdout RMSLE: 1.679Wikipedia Functions## Basic stuff %load_ext autoreload %autoreload from IPython.core.display import display, HTML display(HTML("")) display(HTML("""""")) ## Python Version import sys print("Python: {0}".format(sys.version)) from wikifilm import wikifilm from wikipedia import wikipedia from timeUtils import clock, elapsed import datetime as dt start = dt.datetime.now() print("Notebook Last Run Initiated: "+str(start)) film = wikifilm() #film.getWikiFilmYearlyData() #film.processWikiFilmYearlyData() import re from time import sleep from timeUtils import clock, elapsed from ioUtils import saveFile, getFile from fsUtils import setDir, isDir, mkDir, setFile, isFile, setSubFile from fileUtils import getBaseFilename from searchUtils import findSubPatternExt, findPatternExt, findExt from strUtils import convertCurrency from webUtils import getWebData, getHTML from movieDB import movieDB from os import getcwd import operator ############################################################################################################################## # Box Office ############################################################################################################################## class GoldenGlobes(movieDB): def __init__(self, basedir=None): self.name = "GoldenGlobes" movieDB.__init__(self, dbdir=self.name) ########################################################################################################################### # Get GoldenGlobes Files ########################################################################################################################### def downloadGoldenGlobesCategoryData(self, category, outdir, debug=False): url = "https://en.wikipedia.org/wiki/Golden_Globe_Award_for_{0}".format(category) savename = setFile(outdir, category+".p") if isFile(savename): return if debug: print("Downloading {0}".format(url)) getWebData(base=url, savename=savename, useSafari=False) sleep(1) def getGoldenGlobesCategoryData(self, debug=False): outdir = self.getDataDir() if debug: print("Data Directory: {0}".format(outdir)) if not isDir(outdir): mkDir(outdir) categories = ["Best_Motion_Picture_-_Drama", "Best_Motion_Picture_-_Musical_or_Comedy", "Best_Animated_Feature_Film", "Best_Foreign_Language_Film"] for category in categories: self.downloadGoldenGlobesCategoryData(category, outdir, debug) ########################################################################################################################### # Parse Box Office Weekend Files ########################################################################################################################### def parseGoldenGlobesFilmData(self, table, category, debug=False): filmdata = {} ths = table.findAll("th") ths = [x.text for x in ths if x is not None] ths = [x.replace("\n", "") for x in ths] print(ths) trs = table.findAll("tr") year = None pbs = None for i,tr in enumerate(trs[1:]): tds = tr.findAll("td") if len(tds) == 1: continue bs = len(tr.findAll("b")) ## Check for new year if bs > 1 and pbs == 0: try: year = tds[0].text year = int(year) tds = tds[1:] except: raise ValueError("Could not find year in {0}".format(tds[0])) pbs = bs tds = [x.text for x in tds] tds = [x.replace("\n", "") for x in tds] tds = [x.strip() for x in tds] tds.insert(0, year) if tds[0] is None: tds = tds[1:] try: year = int(tds[0]) except: raise ValueError("Could not find year in {0}".format(tds[0])) if len(tds) + 1 == len(ths): tds.insert(2, tds[1]) #print(i,year,'\t',len(tds),'\t',len(ths),'\t',tds[0],'\t',tds[1],'\t',tds[2]) #continue #continue try: row = dict(zip(ths, tds)) except: raise ValueError("Could not zip: [{0}], [{1}]".format(ths, tds)) if row.get("Film") is None: try: row["Film"] = "{0} ({1})".format(row["English title"], row["Original title"]) except: raise ValueError("Could not create film name: {0}".format(row)) if filmdata.get(year) is None: filmdata[year] = {} if filmdata[year].get(category) is None: filmdata[year][category] = [] try: movie = row["Film"] except: raise ValueError("Cannot find movie in {0}".format(row)) filmdata[year][category].append(movie) if debug: print("{0: <10}{1: <20}{2}".format(year,category,movie)) return filmdata def parseGoldenGlobesCategoryData(self, ifile, category, debug = False): htmldata = getFile(ifile) bsdata = getHTML(htmldata) data = {} done = False tables = bsdata.findAll("table", {"class": "wikitable"}) if debug: print(" Found {0} tables".format(len(tables))) for table in tables: yeardata = self.parseGoldenGlobesFilmData(table, category, debug=False) data = {**data, **yeardata} for year,yearData in data.items(): for category in yearData.keys(): data[year][category] = list(set(data[year][category])) return data def processGoldenGlobesCategoryData(self, debug=False): outdir = self.getDataDir() files = findExt(outdir, ext="*.p") from collections import OrderedDict movies = OrderedDict() for ifile in files: if debug: print("Processing {0}".format(ifile)) category = getBaseFilename(ifile) results = self.parseGoldenGlobesCategoryData(ifile, category, debug=debug) if len(results) == 0: raise ValueError("No results for {0}".format(ifile)) for year,yearData in results.items(): for category,categoryData in yearData.items(): if movies.get(year) is None: movies[year] = [] for movie in categoryData: movies[year].append(movie) for year in movies.keys(): movies[year] = list(set(movies[year])) yearlyMovies = movies[year] movies[year] = [] for movie in yearlyMovies: movies[year].append([movie,10]) print(movies[year]) savename = setFile(self.getResultsDir(), "{0}.json".format(self.name)) print("Saving {0} Years of GoldenGlobes Data to {1}".format(len(movies), savename)) saveFile(savename, movies) #yamldata.saveYaml(savename, movies)Get/Parse/Merge/Process Wikipedia Datagg = GoldenGlobes() _, _ = clock("Last Run") gg.getGoldenGlobesCategoryData(debug=True) gg.processGoldenGlobesCategoryData(debug=True)Processing /Users/tgadfort/Documents/code/movies/GoldenGlobes/data/Best_Foreign_Language_Film.p Found 6 tables ['Year', 'English title', 'Original title', 'Director', 'Country'] ['Year', 'English title', 'Original title', 'Director', 'Country'] ['Year', 'English title', 'Original title', 'Director', 'Country'] ['Year', 'English title', 'Original title', 'Director', 'Country'] ['Year', 'English title', 'Original title', 'Director', 'Country'] ['Year', 'English title', 'Original title', 'Director', 'Country'] Processing /Users/tgadfort/Documents/code/movies/GoldenGlobes/data/Best_Motion_Picture_-_Drama.p Found 8 tables ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]'] ['Year', 'Film', 'Director[4]', 'Producer/s[5]']1.1 선형모형- 종속변수 rainfall을 다른 변수로 예측하는 선형 모형을 만들고 결정계수로 성능을 구하여라%matplotlib inline from matplotlib import rc plt.style.use('seaborn') rc('font', family='NanumGothic') plt.rcParams['axes.unicode_minus'] = False formula_simple = "rainfall ~ " + "seeding + scale(time) + scale(sne) + scale(cloudcover)\ + scale(prewetness) + echomotion" model1 = sm.OLS.from_formula(formula_simple, data=df) result1 = model1.fit() print(result1.summary())OLS Regression Results ============================================================================== Dep. Variable: rainfall R-squared: 0.385 Model: OLS Adj. R-squared: 0.168 Method: Least Squares F-statistic: 1.773 Date: Sat, 23 May 2020 Prob (F-statistic): 0.165 Time: 09:19:50 Log-Likelihood: -54.936 No. Observations: 24 AIC: 123.9 Df Residuals: 17 BIC: 132.1 Df Model: 6 Covariance Type: nonrobust ============================================================================================ coef [...]1.3 seeding이 영향을 끼치는지?- ANOVA분석으로 검정, 유의수준 10%sns.boxplot(x = "seeding", y = "rainfall", data=df) plt.show() model = sm.OLS.from_formula("rainfall ~ seeding", data=df) result = model.fit() sm.stats.anova_lm(result) # R-squared 값이 0인가 아닌가를 테스트 하는게 핵심 # F-test # 답 : ANOVA 분석의 유의확률은 72%이므로 seeding은 영향을 미치지 않는다.1.4 seeding과 다른 변수의 상호작용(interrection) 확인- 위 단순 모형에 seeding과 다른 변수의 상호작용을 추가하여 변수 중 seeding과의 상호작용이 유의한 변수를 찾아라# seeding - time 상호작용 # 유의함 model = sm.OLS.from_formula("rainfall ~ seeding : scale(time)", data=df) result = model.fit() sm.stats.anova_lm(result) # seeding - sne 상호작용 model = sm.OLS.from_formula("rainfall ~ seeding : scale(sne)", data=df) result = model.fit() sm.stats.anova_lm(result) # seeding - cloudcover 상호작용 # 유의함 model = sm.OLS.from_formula("rainfall ~ seeding : scale(cloudcover)", data=df) result = model.fit() sm.stats.anova_lm(result) # seeding - echomotion 상호작용 model = sm.OLS.from_formula("rainfall ~ seeding : echomotion", data=df) result = model.fit() sm.stats.anova_lm(result) # seeding - prewetness 상호작용 model = sm.OLS.from_formula("rainfall ~ seeding : scale(prewetness)", data=df) result = model.fit() sm.stats.anova_lm(result) # 이렇게 찾아주는게 좋다 formula_interact = "rainfall ~ " + "seeding*(scale(time) + scale(sne) + scale(cloudcover)\ + scale(prewetness) + echomotion)" model_interact = sm.OLS.from_formula(formula_interact, data = df) result_interact = model_interact.fit() print(result_interact.summary()) # 범수형 자료형과 실수형 자료형의 상호작용 # -> 기울기가 달라짐 # 최종적으로 유의한 것들만 모으기 formula_last = "rainfall ~ scale(time) + seeding:scale(sne)" model_last = sm.OLS.from_formula(formula_last, data = df) result_last = model_last.fit() print(result_last.summary()) # seeding을 안했을 경우 p-value : 0.786 # coef값이 0.2222는 믿을수 없는 값임 -> 0이라고 생각OLS Regression Results ============================================================================== Dep. Variable: rainfall R-squared: 0.380 Model: OLS Adj. R-squared: 0.287 Method: Least Squares F-statistic: 4.090 Date: Sat, 23 May 2020 Prob (F-statistic): 0.0204 Time: 09:43:15 Log-Likelihood: -55.028 No. Observations: 24 AIC: 118.1 Df Residuals: 20 BIC: 122.8 Df Model: 3 Covariance Type: nonrobust =========================================================================================== coef s[...]2. 다항회귀dataset = sm.datasets.get_rdataset("Salaries", package = "carData") df = dataset.data[["yrs.since.phd", "salary"]] df = df.rename(columns = {"yrs.since.phd": "experience"}) df.tail() sns.scatterplot(x = "experience", y = "salary", data = df) plt.show() model = sm.OLS.from_formula("salary ~ experience", data=df) result = model.fit() print(result.summary()) sns.regplot(x = "experience", y = "salary", data = df) plt.show() model2 = sm.OLS.from_formula("salary ~ experience + I(experience**2)", data=df) result2 = model2.fit() print(result2.summary()) # y값에 log를 취해줌 model2 = sm.OLS.from_formula("np.log(salary) ~ experience + I(experience**2)", data=df) result2 = model2.fit() print(result2.summary()) # cross validation을 해서 train data에서 퍼포먼스 늘리는것 x -> test 데이터에서 퍼포먼스를 늘려야함 # 근본적으로는 test 퍼포먼스만 좋아지면 일단은 된거임 # 상대방에게 설득력을 높이기 위해서는 현실에서 왜 이런 현상이 일어났는가를 설명할수 있으면 더 좋다 mtcars = sm.datasets.get_rdataset("mtcars") df = mtcars.data df.tail() print(mtcars.__doc__) formula0 = "mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb" model0 = sm.OLS.from_formula(formula0, data = df) result0 = model0.fit() print(result0.summary()) # 스캐일링 formula1 = "mpg ~ scale(cyl) + scale(disp) + scale(hp) + scale(drat) + scale(wt) + scale(qsec)\ + C(vs) + C(am) + C(gear) + C(carb)" model1 = sm.OLS.from_formula(formula1, data = df) result1 = model1.fit() print(result1.summary()) formula2 = "mpg ~ scale(cyl) + scale(disp) + scale(hp) + scale(drat) + scale(wt) + scale(qsec)\ + C(vs) + C(am) + scale(gear) + scale(carb)" model2 = sm.OLS.from_formula(formula2, data = df) result2 = model2.fit() print(result2.summary()) # wt만 유의함 formula3 = "mpg ~ scale(wt) + C(am):(scale(wt) + scale(qsec))" model3 = sm.OLS.from_formula(formula3, data = df) result3 = model3.fit() print(result3.summary())OLS Regression Results ============================================================================== Dep. Variable: mpg R-squared: 0.895 Model: OLS Adj. R-squared: 0.879 Method: Least Squares F-statistic: 57.37 Date: Sat, 23 May 2020 Prob (F-statistic): 8.28e-13 Time: 11:23:05 Log-Likelihood: -66.359 No. Observations: 32 AIC: 142.7 Df Residuals: 27 BIC: 150.0 Df Model: 4 Covariance Type: nonrobust ======================================================================================== coef std err[...]텍스트 분석# !wget -nc https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt import codecs with codecs.open("ratings_train.txt", encoding = 'utf-8') as f: data = [line.split('\t') for line in f.read().splitlines()] data = data[ 1:] docs = list(zip(*data))[1] docs````doc : list of str list of list of str[["아버지", "가", "방","에"], ....]````from konlpy.tag import Okt tagger = Okt() def tokenize(doc): tokens = [t for t in tagger.nouns(doc)] return tokens tokenize(docs[0]) %%time sentences = [tokenize(d) for d in docs[:10000]] words = [word for sentence in sentences for word in sentence] from nltk import FreqDist fd = FreqDist(words) fd.most_common(10) from wordcloud import WordCloud font_path = '/usr/share/fonts/truetype/nanum/NanumGothic.ttf' wc = WordCloud(width=1000, height=600, background_color="white", random_state=0, font_path=font_path) plt.imshow(wc.generate_from_frequencies(fd)) plt.axis("off") plt.show()1. What is BioPython??The BioPython Project is an international association of developers of freely available Python tools for computational molevular biology. 2.What can I find in the Biopython Package The ability to parse bioinformatics les into Python utilizable data structures, including support forthe following formats: Blast output { both from standalone and WWW Blast. Clustalw . FASTA . GenBank . PubMed and Medline . ExPASy les, like Enzyme and Prosite . SCOP, including dom and **lin** les . UniGene . SwissProt  Files in the supported formats can be iterated over record by record or indexed and accessed via aDictionary interface.# !pip install biopython # if something error try uninstall and reboot and again install # !pip uninstall biopythonWorking with Sequencesfrom Bio.Seq import Seq my_seq = Seq("AGTACACTGGT") my_seqWhat we have here is a sequence object with a generic alphabet - reecting the fact we have not speci ed if this is a DNA or protein sequence ```(okay, a protein with a lot of Alanines, Glycines, Cysteines andThreonines!)```# Reverse the Sequence print(my_seq.complement()) #complemetn A-T , G-C , T-A , C-G print(my_seq.reverse_complement()) #IN REVERSE ORDERTCATGTGACCA ACCAGTGTACTPARSING SEQUENCE FILE FORMATS https://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?db=Nucleotide Simple FASTA parsing examplefrom Bio import SeqIO for seq_record in SeqIO.parse("datas/ls_orchid.fasta","fasta"): #file location data/ print(seq_record.id) print(repr(seq_record)) #The repr() method returns a printable representational string of the given object. print(len(seq_record)) # from Bio import SeqIO record = SeqIO.read("datas/chr3_184033625.fa","fasta") record.seq len(record.seq) record.seq[200]Simple GenBank parsing examplefrom Bio import SeqIO for seq_record in SeqIO.parse("datas/ls_orchid.gbk","genbank"): print(seq_record.id) print(seq_record.seq) #repr(seq_record.seq) #check the differences print(len(seq_record))Z78533.1 CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGATGAGACCGTGGAATAAACGATCGAGTGAATCCGGAGGACCGGTGTACTCAGCTCACCGGGGGCATTGCTCCCGTGGTGACCCTGATTTGTTGTTGGGCCGCCTCGGGAGCGTCCATGGCGGGTTTGAACCTCTAGCCCGGCGCAGTTTGGGCGCCAAGCCATATGAAAGCATCACCGGCGAATGGCATTGTCTTCCCCAAAACCCGGAGCGGCGGCGTGCTGTCGCGTGCCCAATGAATTTTGATGACTCTCGCAAACGGGAATCTTGGCTCTTTGCATCGGATGGAAGGACGCAGCGAAATGCGATAAGTGGTGTGAATTGCAAGATCCCGTGAACCATCGAGTCTTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCTAAGGGCACGCCTGCTTGGGCGTCGCGCTTCGTCTCTCTCCTGCCAATGCTTGCCCGGCATACAGCCAGGCCGGCGTGGTGCGGATGTGAAAGATTGGCCCCTTGTGCCTAGGTGCGGCGGGTCCAAGAGCTGGTGTTTTGATGGCCCGGAACCCGGCAAGAGGTGGACGGATGCTGGCAGCAGCTGCCGTGCGAATCCCCCATGTTGTCGTGCTTGTCGGACAGGCAGGAGAACCCTTCCGAACCCCAATGGAGGGCGGTTGACCGCCATTCGGATGTGACCCCAGGTCAGGCGGGGGCACCCGCTGAGTTTACGC 740 Z78532.1 CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGTTGAGACAACAGAATATATGATCGAGTGAATCTGGAGGACCTGTGGTAACTCAGCTCGTCGTGGCACTGCTTTTGTCGTGACCCTGCTTTGTTGTTGGGCCTCCTCAAGAGCTTTCATGGCAGGTTTGAACTTTAGTACGGTGCAGTTTGCGCCAAGTCATATAAAGCATCACTGATGAATGACATTATTGTCAG[...]Chapter 3 Sequence objects We'll use the IUPAC alphabets here to deal with some of our favorite objects: DNA, RNA and Proteins. Bio.Alphabet.IUPAC provides basic de nitions for proteins, DNA and RNA, but additionally providesthe ability to extend and customize the basic de nitions. For instance, for proteins, there is a basic IUPACProteinclass, but there is an additional ExtendedIUPACProtein class providing for the additionalelements \U" (or \Sec" for selenocysteine) and \O" (or \Pyl" for pyrrolysine), plus the ambiguous symbols\B" (or \Asx" for asparagine or aspartic acid), \Z" (or \Glx" for glutamine or glutamic acid), \J" (or \Xle"for leucine isoleucine) and \X" (or \Xxx" for an unknown amino acid). For DNA you've got choices of IUPACUnambiguousDNA,which provides for just the basic letters, IUPACAmbiguousDNA (which provides forambiguity letters for every possible situation) and ExtendedIUPACDNA, which allows letters for modi edbases. Similarly, RNA can be represented by IUPACAmbiguousRNA or IUPACUnambiguousRNA.The advantages of having an alphabet class are two fold. First, this gives an idea of the type of informationthe Seq object contains. Secondly, this provides a means of constraining the information, as a means of typechecking.Now that we know what we are dealing with, let's look at how to utilize this class to do interesting work.You can create an ambiguous sequence with the default generic alphabet like this:from Bio.Seq import Seq my_seq = Seq("AGTACACTGGT") my_seq # Alphabet my_seq.alphabetHowever, where possible you should specify the alphabet explicitly when creating your sequence objects- in this case an unambiguous DNA alphabet object:from Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq("AGTACACTGGT",IUPAC.unambiguous_dna) my_seq my_seq.alphabet #Unless of course, this really is an amino acid sequence: from Bio.Seq import Seq from Bio.Alphabet import IUPAC my_prot = Seq("AGTACACTGGT", IUPAC.protein) print(my_prot) print(my_prot.alphabet) my_prot.alphabetSequences act like stringsfrom Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq("GATCG", IUPAC.unambiguous_dna) for index, letter in enumerate(my_seq): print("%i %s"%(index,letter)) print(len(my_seq)) my_seq print(my_seq[0]) #First Letter print(my_seq[1]) #Second Letter print(my_seq[-1]) #Last letter # same like Strintg FunctionG A GThe Seq object has a .count() method , just like a stringfrom Bio.Seq import Seq print("AAAA".count("AA")) print(Seq("AAAA").count("AA"))2 2For some biological uses, you may actually want an overlapping count (i.e. 3 in this trivial example). Whensearching for single letters, this makes no di erence:from Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq(" GATCGATGGGCCTATATAGGATCGAAAATCGC ", IUPAC.unambiguous_dna) len(my_seq) my_seq.count("AT") #try different terms 50 * float(my_seq.count("G")+my_seq.count("C"))/len(my_seq) #can perform mathmatical performaceWhile you could use the above snippet of code to calculate a GC%, note that the Bio.SeqUtils modulehas several GC functions already built. For example:from Bio.Seq import Seq from Bio.Alphabet import IUPAC from Bio.SeqUtils import GC my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC",IUPAC.unambiguous_dna) GC(my_seq)Note that using the Bio.SeqUtils.GC() function should automatically cope with mixed case sequences andthe ambiguous nucleotide S which means G or C.Also note that just like a normal Python string, the Seq object is in some ways \read-only". If you needto edit your sequence, for example simulating a point mutation, look at the Section 3.12 below which talksabout the MutableSeq object. Slecing a sequence# A more complicated example, let's get a slice of the sequence from Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC", IUPAC.unambiguous_dna) my_seq[4:12] my_seq[0::3]Turning Seq objects into strings If you really do just need a plain string, for example to write to a file, or inssert into a database then this is very easy to get my_seqmy_seq str(my_seq) # change Seq object to str file print(my_seq) # you can also use Seq object directly with a %s placeholder when using the Python string formatting # or interpolation operator(%) fasta_format_string = ">Name\n%s\n1"%my_seq print(fasta_format_string)>Name GATCGATGGGCCTATATAGGATCGAAAATCGCConcatenating or adding sequences Naturally you can in principle add any two Seq objects together just like you can with Python strings to concatenate them. However, you can't add sequences with incompatible alphabets, such as a proteinsequence and a DNA sequence. from Bio.Alphabet import IUPAC from Bio.Seq import Seq protein_seq = Seq("EVERNAK", IUPAC.protein) dna_seq = Seq("ACGT",IUPAC.unambiguous_dna) protein_seq + dna_seq the abovecode give ErrorIf you really wanted to do this, you'd have to first give both sequences generic alphabets:from Bio.Alphabet import generic_alphabet protein_seq.alphabet = generic_alphabet dna_seq.alphabet = generic_alphabet protein_seq + dna_seqHere is an example of adding a generic nucleotide sequence to an unambiguous IUPAC DNA sequence, resulting in an ambiguous nucleotide sequence:from Bio.Seq import Seq from Bio.Alphabet import generic_nucleotide from Bio.Alphabet import IUPAC nuc_seq = Seq("GATCGATGC",generic_nucleotide) dna_seq= Seq("ACGT", IUPAC.unambiguous_dna) nuc_seq dna_seq nuc_seq dna_seq nuc_seq+dna_seq #You may often have many sequences to add together , which can be done with a for loop like this: from Bio.Seq import Seq from Bio.Alphabet import generic_dna list_of_seqs = [Seq("ACGT",generic_dna),Seq("AACC",generic_dna), Seq("GGTT", generic_dna)] concatenated = Seq("", generic_dna) for s in list_of_seqs: concatenated +=s concatenatedOr, a more elegant approach is to the use built in sum function with its optional start value argument(which otherwise defaults to zero):from Bio.Seq import Seq from Bio.Alphabet import generic_dna list_of_seqs = [Seq("ACGT",generic_dna),Seq("AACC", generic_dna), Seq("GGTT", generic_dna)] sum(list_of_seqs , Seq("", generic_dna))3.6 Changing Case BioPython has the same methods like stringfrom Bio.Seq import Seq from Bio.Alphabet import generic_dna dna_seq = Seq("acgtACGT", generic_dna) print(dna_seq) print(dna_seq.upper()) print(dna_seq.lower()) print("GTAC" in dna_seq) print("GTAC" in dna_seq.upper()) print("gtac" in dna_seq.lower())acgtACGT ACGTACGT acgtacgt False True True3.7Nucleotide Sequences and (reverse) complements(page 24) For nucleotide sequences you can easily obtain the complement or reverse complement of s Seq object using its built-in methods:from Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC", IUPAC.unambiguous_dna) my_seq my_seq.complement() #Complement my_seq.reverse_complement() #Reverse the Seq.An easy way to just reverse a Seq object (or a Python string) is slice it with -1 step:name = "krishna" name[::-2] my_seq[::-1] from Bio.Seq import Seq from Bio.Alphabet import IUPAC protein_seq = Seq("EVERNAK", IUPAC.protein) protein_seq.complement() # ValueError :Proteins do not have complements!Transcription DNA coding strand (aka Crick strand, strand +1)5' ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG 3'|||||||||||||||||||||||||||||||||||||||3' TACCGGTAACATTACCCGGCGACTTTCCCACGGGCTATC 5'DNA template strand (aka Watson strand, strand 􀀀1)jTranscription5' AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG 3'Single stranded messenger RNA to understand clear biopython tutorial page 25 The actual biological transcription process works from the template strand, doing a reverse complement (TCAG ! CUGA) to give the mRNA. However, in Biopython and bioinformatics in general, we typicallywork directly with the coding strand because this means we can get the mRNA sequence just by switchingT ! U.Now let's actually get down to doing a transcription in Biopython. First, let's create Seq objects for thecoding and template DNA strands:from Bio.Seq import Seq from Bio.Alphabet import IUPAC coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG", IUPAC.unambiguous_dna) print(coding_dna) template_dna = coding_dna.reverse_complement() print(template_dna) coding_dna messanger_rna = coding_dna.transcribe() #Change T to U messanger_rnaAs you can see, all this does is switch **T ! U** , and adjust the alphabet.If you do want to do a true biological transcription starting with the template strand, then this becomesa two-step process:template_dna.reverse_complement().transcribe() from Bio.Seq import Seq from Bio.Alphabet import IUPAC messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG", IUPAC.unambiguous_rna) messanger_rna messenger_rna.back_transcribe()Translation Sticking with the same example discussed in the transcription section above, now let's translate this mRNA into the corresponding protein sequence again taking advantage of one of the Seq object's biological methods:from Bio.Seq import Seq from Bio.Alphabet import IUPAC messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG", IUPAC.unambiguous_rna) messenger_rna messenger_rna.translate() #You can also translate directly from the coding strand DNA sequence: from Bio.Seq import Seq from Bio.Alphabet import IUPAC coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG", IUPAC.unambiguous_dna) coding_dna coding_dna.translate()The translation tables available in Biopython are based on those from the NCBI (see the next section of this tutorial). By default, translation will use the standard genetic code (NCBI table id 1). Suppose we are dealing with a mitochondrial sequence. We need to tell the translation function to use the relevant genetic code instead:coding_dna.translate(table= "Vertebrate Mitochondrial")You can also specify the table using the NCBI table number which is shorter, and often included in thefeature annotation of GenBank les:coding_dna.translate(table=2)Now, You may want to translate the nucleotides up to the first in frame stop codon, and then stop( as happens in nature):coding_dna.translate() coding_dna.translate(to_stop = True) # You can even specify the stop symbol if you don't like the default asterisk: coding_dna.translate(table=2, stop_symbol="@")given a complete CDS, the default translate method will do what you want (perhaps with theto_stop option). However, what if your sequence uses a non-standard start codon? This happens a lot in bacteria for example the **gene yaaX in E. coli K12**:from Bio.Seq import Seq from Bio.Alphabet import generic_dna gene = Seq("GTGAAAAAGATGCAATCTATCDTACTATCGTACTAATCTATCGTACTCGCACTTTCCCTGGTTCTGGTCGCTCCCATGGCA" +\ "GCACAGGCTGCGGAAATTACGTTAGTCCCGTCAGTAAAATTACAGATAGGCGATCGTGAT" + \ "AATCGTGGCTATTACTGGGATGGAGGTCACTGGCGCGACCACGGCTGGTGGAAACAACAT" + \ "TATGAATGGCGAGGCAATCGCTGGCACCTACACGGACCGCCGCCACCGCCGCGCCACCAT" + \ "AAGAAAGCTCCTCATGATCATCACGGCGGTCATGGTCCAGGCAAACATCACCGCTAA",generic_dna) gene gene.translate(table="Bacterial") gene.translate(table = "Bacterial", to_stop=True) # In the bacterial genetic code GTG is a valid start codon, and while it does normally encode Valine, if used as #a start codon it should be translated as methionine. This happens if you tell Biopython your sequence is a #complete CDS: gene.translate(table="Bacterial", cds = True)3.1 Translation Tablesfrom Bio.Data import CodonTable standard_table = CodonTable.unambiguous_dna_by_name["Standard"] mito_table = CodonTable.unambiguous_dna_by_id[2] # Alternatively these tables are alabeled with ID numbers 1 and 2, respectively from Bio.Data import CodonTable Standard_table = CodonTable.unambiguous_dna_by_id[1] mito_table = CodonTable.unambiguous_dna_by_id[2] # You can compare the actual tables visually by printing them: print(standard_table) #Mitotable print(mito_table)Table 2 Vertebrate Mitochondrial, SGC1 | T | C | A | G | --+---------+---------+---------+---------+-- T | TTT F | TCT S | TAT Y | TGT C | T T | TTC F | TCC S | TAC Y | TGC C | C T | TTA L | TCA S | TAA Stop| TGA W | A T | TTG L | TCG S | TAG Stop| TGG W | G --+---------+---------+---------+---------+-- C | CTT L | CCT P | CAT H | CGT R | T C | CTC L | CCC P | CAC H | CGC R | C C | CTA L | CCA P | CAA Q | CGA R | A C | CTG L | CCG P | CAG Q | CGG R | G --+---------+---------+---------+---------+-- A | ATT I(s)| ACT T | AAT N | AGT S | T A | ATC I(s)| ACC T | AAC N | AGC S | C A | ATA M(s)| ACA T | AAA K | AGA Stop| A A | ATG M(s)| ACG T | AAG K | AGG Stop| G --+---------+---------+---------+---------+-- G | GTT V | GCT A | GAT D | GGT G | T G | GTC V | GCC A | GAC D | GGC G | C G | GTA V | GCA A | GAA E | GGA G | A G | GTG V(s)| GCG A | GAG E | GGG G [...]You may find these following properties useful - for example if you are trying to do your own gene finding:mito_table.stop_codons mito_table.start_codons mito_table.forward_table["ACG"]Comparing Seq objects Sequence comparison is actually a very complicated topic, and there is no easy way to decide if two sequences are equal "A" could be part of a DNA, RNA or protein sequence. Biopython uses alphabet objects as part of eachSeq object to try to capture this information - so comparing two Seq objects could mean considering boththe sequence strings and the alphabets.from Bio.Seq import Seq from Bio.Alphabet import IUPAC seq1 = Seq("ACGT", IUPAC.unambiguous_dna) seq2 = Seq("ACGT",IUPAC.ambiguous_dna) print(str(seq1)==str(seq1)) print(str(seq1)==str(seq1)) seq1 == seq2, seq1 =="ACGT" from Bio.Seq import Seq from Bio.Alphabet import generic_dna , generic_protein dna_seq = Seq("ACGT", generic_dna) prot_seq = Seq("ACGT", generic_protein) dna_seq == prot_seqC:\ProgramData\Anaconda3\lib\site-packages\Bio\Seq.py:224: BiopythonWarning: Incompatible alphabets DNAAlphabet() and ProteinAlphabet() BiopythonWarning)Mutable Seq objectsfrom Bio.Seq import Seq from Bio.Alphabet import IUPAC my_seq = Seq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA",IUPAC.unambiguous_dna) #Observe what happens if you try to edit the sequence: my_seq[5]="G" #No item assignment # However, you can convert it into a mutable sequ3ence( a MutableSeq object) and do pretty much anything mutable_seq = my_seq.tomutable() mutable_seqAlternatively you can create a Mutable Seq object directly from a stringfrom Bio.Seq import MutableSeq from Bio.Alphabet import IUPAC mutable_seq = MutableSeq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA", IUPAC.unambiguous_dna) mutable_seq # Either way will give you a sequence object which can be changed: mutable_seq[3]="C" mutable_seq #C has been replaced mutable_seq.remove("T") # all T has been removed mutable_seq mutable_seq.reverse() mutable_seq #Once you have nished editing your a MutableSeq object, it's easy to get back to a read-only Seq object #should you need to: new_seq = mutable_seq.toseq() new_seqUnknownSeq objects# The UnknownSeq object is a subclass of the basic Seq object and its purpose is to represent a sequence # where we know the length but not the actual letters making it up. from Bio.Seq import UnknownSeq unk = UnknownSeq(20) print(unk) unk # you can of course specify an alphabet, meaning for nucleotide for nucleotide sequences the letter # defaults to "N" and for proteins "X" rather than just "?" from Bio.Seq import UnknownSeq from Bio.Alphabet import IUPAC unk_dna = UnknownSeq(20, alphabet = IUPAC.ambiguous_dna) print(unk_dna) unk_dna # YOU CAN OF COURSE SPECIFY AN ALPHABET, MEANING FOR NUCLEOTIDE SEQUUENCES THE LETTER DEFAULTS TO "N" from Bio.Seq import UnknownSeq from Bio.Alphabet import IUPAC unk_dna = UnknownSeq(20 , alphabet = IUPAC.ambiguous_dna) print(unk_dna) unk_dna unk_dna.complement() unk_dna.reverse_complement() unk_dna.transcribe() unk_proteinn = unk_dna.translate() unk_proteinn print(unk_proteinn) len(unk_proteinn)Working with strings directlyfrom Bio.Seq import reverse_complement, transcribe, back_transcribe, translate my_string = "GCTGTTATGGGTCGTTGGAAGGGTGGTCGTGCTGCTGGTTAG" reverse_complement(my_string) transcribe(my_string) back_transcribe(my_string) translate(my_string)The Travelling Salesperson ProblemThis notebook has been adapted from [a Pyevolve example](http://pyevolve.sourceforge.net/0_6rc1/examples.htmlexample-12-the-travelling-salesman-problem-tsp).The [travelling salesperson problem (TSP)](http://en.wikipedia.org/wiki/Travelling_salesman_problem) is an NP-hard problem in combinatorial optimization studied in operations research and theoretical computer science. Given a list of cities and their pairwise distances, the task is to find the shortest possible route that visits each city exactly once and returns to the origin city. It is a special case of the travelling purchaser problem.[](http://en.wikipedia.org/wiki/Travelling_salesman_problem)The code below shows the use of Pyevolve to solve the TSP. Images of the intermediate and final solutions are stored in the 'tspimg' folder.Your tasks are:1. Add the necessary statements for storing the results in a database named 'tsp.db' with identifier 'ex1'.2. For the maximum grade: modify the code to solve the problem with the [ATT 48 dataset](att48.tsp), a set of 48 cities (US state capitals) from [TSPLIB](http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsplib.html). Store the results in a database named 'tsp_att48.db' with identifier 'ex1'. For your information, [the optimal cost is 10648](https://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/STSP.html).from pyevolve import G1DList from pyevolve import GSimpleGA from pyevolve import Crossovers from pyevolve import Consts import random from math import sqrt from PIL import Image, ImageDraw, ImageFont cm = [] coords = [] CITIES = 30 WIDTH = 600 HEIGHT = 400 LAST_SCORE = -1 def cartesian_matrix(coords): """ A distance matrix """ matrix={} for i,(x1,y1) in enumerate(coords): for j,(x2,y2) in enumerate(coords): dx, dy = x1-x2, y1-y2 dist=sqrt(dx*dx + dy*dy) matrix[i,j] = dist return matrix def tour_length(matrix, tour): """ Returns the total length of the tour """ total = 0 t = tour.getInternalList() for i in range(CITIES): j = (i+1)%CITIES total += matrix[t[i], t[j]] return total def write_tour_to_img(coords, tour, img_file): """ The function to plot the graph """ padding=20 coords=[(x+padding,y+padding) for (x,y) in coords] maxx,maxy=0,0 for x,y in coords: maxx, maxy = max(x,maxx), max(y,maxy) maxx+=padding maxy+=padding img=Image.new("RGB",(int(maxx),int(maxy)),color=(255,255,255)) font=ImageFont.load_default() d=ImageDraw.Draw(img); num_cities=len(tour) for i in range(num_cities): j=(i+1)%num_cities city_i=tour[i] city_j=tour[j] x1,y1=coords[city_i] x2,y2=coords[city_j] d.line((int(x1),int(y1),int(x2),int(y2)),fill=(0,0,0)) d.text((int(x1)+7,int(y1)-5),str(i),font=font,fill=(32,32,32)) for x,y in coords: x,y=int(x),int(y) d.ellipse((x-5,y-5,x+5,y+5),outline=(0,0,0),fill=(196,196,196)) del d img.save(img_file, "PNG") print "The plot was saved into the %s file." % (img_file,) def G1DListTSPInitializator(genome, **args): """ The initializator for the TSP """ lst = [i for i in xrange(genome.getListSize())] random.shuffle(lst) genome.setInternalList(lst) def evolve_callback(ga_engine): global LAST_SCORE if ga_engine.getCurrentGeneration() % 100 == 0: best = ga_engine.bestIndividual() if LAST_SCORE != best.getRawScore(): write_tour_to_img( coords, best, "tspimg/tsp_result_%05d.png" % ga_engine.getCurrentGeneration()) LAST_SCORE = best.getRawScore() return False coords = [(random.randint(0, WIDTH), random.randint(0, HEIGHT)) for i in xrange(CITIES)] cm = cartesian_matrix(coords) genome = G1DList.G1DList(len(coords)) genome.evaluator.set(lambda chromosome: tour_length(cm, chromosome)) genome.crossover.set(Crossovers.G1DListCrossoverEdge) genome.initializator.set(G1DListTSPInitializator) ga = GSimpleGA.GSimpleGA(genome) ga.setGenerations(2000) ga.setMinimax(Consts.minimaxType["minimize"]) ga.setCrossoverRate(1.0) ga.setMutationRate(0.02) ga.setPopulationSize(80) ga.stepCallback.set(evolve_callback) ga.evolve(freq_stats=200) best = ga.bestIndividual() write_tour_to_img(coords, best, "tspimg/tsp_result.png")Main# set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # make an environment # env = gym.make('CartPole-v0') env = gym.make('CartPole-v1') # env = gym.make('MountainCar-v0') # env = gym.make('LunarLander-v2') SEED = 0 env.seed(SEED) obs_space = env.observation_space.shape[0] action_space = env.action_space.n OBS_NORM = True n_episodes = 10000 n_eval = env.spec.trials # global values total_steps = 0 obses = [] rewards = [] reward_eval = deque(maxlen=n_eval) # load a model old_net = ActorCriticNet(obs_space, action_space).to(device) old_net.load_state_dict(torch.load( './saved_models/CartPole-v1_up36_clear_model_ppo_st.pt')) with open('./saved_models/CartPole-v1_up36_clear_norm_obs.pkl', 'rb') as f: norm_obs = pickle.load(f) env.spec.max_episode_steps env.spec.trials env.spec.reward_threshold # play # frames = [] for i in range(1, n_episodes + 1): obs = env.reset() done = False ep_reward = 0 while not done: # frames.append(env.render(mode = 'rgb_array')) env.render() if OBS_NORM: obs_norm = np.clip( (obs - norm_obs.mean) / np.sqrt(norm_obs.var+1e-8), -10, 10) action, _ = get_action_and_value(obs_norm, old_net) else: action, _ = get_action_and_value(obs, old_net) _obs, reward, done, _ = env.step(action) obs = _obs total_steps += 1 ep_reward += reward if done: env.render() norm_obs.update(_obs) rewards.append(ep_reward) reward_eval.append(ep_reward) print('{:3} Episode in {:5} steps, reward {:.2f}'.format( i, total_steps, ep_reward)) # frames.append(env.render(mode = 'rgb_array')) # imageio.mimsave(f'{env.spec.id}.gif', frames,) if len(reward_eval) >= n_eval: if np.mean(reward_eval) >= env.spec.reward_threshold: print('\n{} is sloved! {:3} Episode in {:3} steps'.format( env.spec.id, i, total_steps)) print(np.mean(reward_eval)) break env.close() plt.figure(figsize=(15, 5)) plt.title('reward') plt.plot(rewards) plt.show() [ ('CartPole-v0', 412, 1), ('CartPole-v1', 452, 0.05), ('MountainCar-v0', 193, 0.1), ('LunarLander-v2', 260, 0.1) ]Hypotesis* H0 the retirement value for men and women are equal* HA are differentmen = dataset.loc[dataset.gender == "M", "value"].to_numpy() mn = men.shape[0] print("Nº men:", mn) women = dataset.loc[dataset.gender == "F", "value"].to_numpy() wn = women.shape[0] print("Nº women:", wn) dataset.loc[dataset.gender == "M", "value"].describe() dataset.loc[dataset.gender == "F", "value"].describe() t, p = stats.ttest_ind(men, women, equal_var=False) print("t = " + str(t)) print("p = " + str(p))t = 62.142643046731465 p = 0.0Profissional levelmax(dataset["level"].unique()) men = dataset.loc[(dataset.gender == "M") & (dataset.level == 801), "value"].to_numpy() mn = men.shape[0] women = dataset.loc[(dataset.gender == "F") & (dataset.level == 801), "value"].to_numpy() wn = women.shape[0] t, p = stats.ttest_ind(men, women, equal_var=False) print("t = " + str(t)) print("p = " + str(p)) men.mean() women.mean()Regionaposentados = pd.read_csv("data/processed/aposentados.csv") abono = pd.read_csv("data/processed/abono.csv") aposentados = aposentados.drop_duplicates(subset=['name', 'cpf'], keep='first') abono = abono.drop_duplicates(subset=['name', 'cpf'], keep='first') aposentados["retirement_year"] = pd.to_datetime(aposentados.retirement_date).dt.year use_cols = [ 'registration_n', 'agency_acronym', 'superior_agency_cod', 'classes', 'pattern', 'ref', 'level', 'retirement_type', 'legal_substantiation', 'doc_title', 'retirement_date', 'admission_type', 'admission_date', 'value' ] data = abono.merge(aposentados[['name', 'cpf'] + use_cols], left_on=['name', 'cpf'], right_on=['name', 'cpf']) data.shape dataset = data dataset.loc[dataset.upag_state == "pi", "region"] = "nordeste" dataset.loc[dataset.upag_state == "ma", "region"] = "nordeste" dataset.loc[dataset.upag_state == "ce", "region"] = "nordeste" dataset.loc[dataset.upag_state == "rn", "region"] = "nordeste" dataset.loc[dataset.upag_state == "pb", "region"] = "nordeste" dataset.loc[dataset.upag_state == "pe", "region"] = "nordeste" dataset.loc[dataset.upag_state == "al", "region"] = "nordeste" dataset.loc[dataset.upag_state == "se", "region"] = "nordeste" dataset.loc[dataset.upag_state == "ba", "region"] = "nordeste" dataset.loc[dataset.upag_state == "es", "region"] = "sudeste" dataset.loc[dataset.upag_state == "rj", "region"] = "sudeste" dataset.loc[dataset.upag_state == "sp", "region"] = "sudeste" dataset.loc[dataset.upag_state == "mg", "region"] = "sudeste" dataset.loc[dataset.upag_state == "ac", "region"] = "norte" dataset.loc[dataset.upag_state == "am", "region"] = "norte" dataset.loc[dataset.upag_state == "ro", "region"] = "norte" dataset.loc[dataset.upag_state == "rr", "region"] = "norte" dataset.loc[dataset.upag_state == "am", "region"] = "norte" dataset.loc[dataset.upag_state == "pa", "region"] = "norte" dataset.loc[dataset.upag_state == "to", "region"] = "norte" dataset.loc[dataset.upag_state == "df", "region"] = "centro-oeste" dataset.loc[dataset.upag_state == "go", "region"] = "centro-oeste" dataset.loc[dataset.upag_state == "mt", "region"] = "centro-oeste" dataset.loc[dataset.upag_state == "ms", "region"] = "centro-oeste" dataset.loc[dataset.upag_state == "pr", "region"] = "sul" dataset.loc[dataset.upag_state == "rs", "region"] = "sul" dataset.loc[dataset.upag_state == "sc", "region"] = "sul" for region in ["nordeste", "sudeste", "norte", "centro-oeste", "sul"]: men = dataset.loc[(dataset.gender == "M") & (dataset.region == region), "value_y"].to_numpy() mn = men.shape[0] women = dataset.loc[(dataset.gender == "F") & (dataset.region == region), "value_y"].to_numpy() wn = women.shape[0] t, p = stats.ttest_ind(men, women, equal_var=False) print(region) print("t = " + str(t)) print("p = " + str(p)) dataset.loc[(dataset.gender == "M") & (dataset.region == "nordeste"), "value_y"].mean() dataset.loc[(dataset.gender == "F") & (dataset.region == "nordeste"), "value_y"].mean()Educational levellevel = [ 'doutorado ', 'mestrado ' ] men = abono.loc[(abono.gender == "M") & (abono.educational_level.isin(level)), "value"].to_numpy() mn = men.shape[0] women = abono.loc[(abono.gender == "F") & (abono.educational_level.isin(level)), "value"].to_numpy() wn = women.shape[0] t, p = stats.ttest_ind(men, women, equal_var=False) print("t = " + str(t)) print("p = " + str(p)) dataset.educational_level.unique() women.mean() abono.loc[abono.educational_level == 'doutorado '].shape1.10.1 迭代器data = [1, 2, 3, 4, 5, 6, 7, 8] # for 循环中使用迭代器 it = iter(data) for num in it: print(num, end=' ') data = [1, 2, 3, 4, 5, 6, 7, 8] # while 循环中使用迭代器 it = iter(data) while True: try: print(next(it), end=' ') except StopIteration: break # 定义累加器 class Accumulator: def __init__(self, zero): self.zero = zero # 返回一个特殊的迭代器对象 def __iter__(self): self.it = self.zero return self # 返回下一个迭代器对象 def __next__(self): if self.it < 30: val, self.it = self.it, self.it + 1 return val accumulator = Accumulator(zero=0) it = iter(accumulator) for i in range(20): print(next(it), end=' ') # 定义累加器 class Accumulator: def __init__(self, zero): self.zero = zero # 返回一个特殊的迭代器对象 def __iter__(self): self.it = self.zero return self # 返回下一个迭代器对象 def __next__(self): if self.it < 30: val, self.it = self.it, self.it + 1 return val else: # 标识迭代的完成 raise StopIteration accumulator = Accumulator(zero=0) it = iter(accumulator) for num in it: print(num, end=' ') # 斐波拉契数列迭代器 class Fibonacci: def __init__(self): self.prev = self.current = None def __iter__(self): self.prev, self.current = 0, 1 return self def __next__(self): self.prev, self.current = self.current, self.prev + self.current return self.prev it = iter(Fibonacci()) for i in range(10): print(next(it), end=' ')1 1 2 3 5 8 13 21 34 551.10.2 生成器# 斐波拉契数列生成器 def fibonacci(n): prev, current = 0, 1 for i in range(n): prev, current = current, prev + current yield prev # else: # raise StopIteration it = iter(fibonacci(15)) for ele in it: print(ele, end=' ') # 列表生成式 out1 = [x ** 2 for x in range(10)] print(out1) # 生成器 out2 = (x ** 2 for x in range(10)) print(out2) print(next(out2), next(out2)) # 提到了列表生成式,顺带提一提 python 的 map 函数 # map() 会根据提供的函数对指定序列做映射 # 一般情况下,与列表生成式的使用场合类似 data = map(lambda x: x * x, range(10)) print(data) print(list(data)) # 生成器的 send def accumulator(zero, one): val = zero while True: _ = yield val val += one # 使用 next it = accumulator(0, 1) for i in range(10): print(next(it), end=' ') print() # 使用 send 来代替 next(不规范的做法) it = accumulator(0, 1) for i in range(10): print(it.send(None), end=' ') print() def accumulator(zero, one): val = zero while True: val = yield val val += one # 使用 send 来进行交互 it = accumulator(0, 1) prev = next(it) for i in range(10): print(prev, end=' ') prev = it.send(prev + 1) # 错误的写法 # TypeError: can't send non-None value to a just-started generator it = accumulator(0, 1) for i in range(10): print(it.send(1), end=' ') # 生成器的 throw def accumulator(zero, one): val = zero while True: try: yield val val += one except RuntimeError: val = zero # 使用 throw 来进行交互 it = accumulator(0, 1) for i in range(10): print(next(it), end=' ') print() print(it.throw(RuntimeError), end=' ') for i in range(9): print(next(it), end=' ') # 生成器的 close def accumulator(zero, one): val = zero while True: try: yield val val += one except RuntimeError: val = zero # 使用 close 来进行交互 it = accumulator(0, 1) for i in range(10): print(next(it), end=' ') it.close() # close 之后再调用生成器会报错 print(next(it))0 1 2 3 4 5 6 7 8 9What am I doing here?- GBT model- Setting up doing three runs, can change to three different models for a voting classifier- Every time I checkpoint a step to a file, it's in an 'if False' block. If you need tocreate a file, change that to True to make the file. Then change it back to False to get the faster way through the notebook.import pyspark import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.ml.feature import VectorAssembler from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder from pyspark.ml.classification import GBTClassifier from pyspark.ml.evaluation import BinaryClassificationEvaluator # This is optional stuff - either pip install watermark # or just comment it out (it just keeps track of what library # versions I have) %load_ext watermark %watermark -iv # Comment these out to run on a cluster. Also, adjust memory to size of your laptop pyspark.sql.SparkSession.builder.config('spark.driver.memory', '8g') pyspark.sql.SparkSession.builder.config('spark.sql.shuffle.paritions', 5) spark = pyspark.sql.SparkSession.builder.getOrCreate()Global Variablesunigrams = [ 'os', 'channel', 'app' ] bigrams = [[ 'device', 'os'], ['device', 'channel'], ['device', 'app'], ['channel', 'app']]Checkpoint 1 Read the csv file, drop the attributed_time (because I didn't use it in the MVP),and downsample the 0 class to 25% because I'm still on my laptopif False: df = spark.read.csv('../data/train.csv', header=True, inferSchema=True) df = df.drop('attributed_time') df = df.sampleBy('is_attributed', fractions={0:.25,1:1.}) test = spark.read.csv('../data/test.csv', header= True, inferSchema=True) df.write.parquet('../data/checkpoint1.parquet', mode='overwrite') test.write.parquet('../data/test_checkpoint1.parquet', mode='overwrite') else: df = spark.read.parquet('../data/checkpoint1.parquet') test = spark.read.parquet('../data/test_checkpoint1.parquet') df.dtypes test.dtypes df.count() test.count()Daily IP prevalence Because IP addresses get reassigned, need to do these as feature engineering on train and testsets separately.(See the link Elyse posted on the slack.)df = df.withColumn('doy', F.dayofyear('click_time')) test = test.withColumn('doy', F.dayofyear('click_time')) df_ip_counts = df[['doy', 'ip']].groupby(['doy','ip']).count() test_ip_counts = test[['doy', 'ip']].groupby(['doy', 'ip']).count() df_day_max = df_ip_counts[['doy','count']]\ .groupby(['doy'])\ .max()\ .withColumnRenamed('max(count)', 'day_max')\ .drop('max(doy)') test_day_max = test_ip_counts[['doy','count']]\ .groupby(['doy'])\ .max()\ .withColumnRenamed('max(count)', 'day_max')\ .drop('max(doy)') df_ip_counts = df_ip_counts.join(df_day_max, ['doy'], how='left') test_ip_counts = test_ip_counts.join(test_day_max, ['doy'], how='left') df_ip_counts.dtypes df_ip_counts = df_ip_counts.withColumn('ip_pct', F.col('count').astype(T.FloatType())/ F.col('day_max').astype(T.FloatType())) test_ip_counts = test_ip_counts.withColumn('ip_pct', F.col('count').astype(T.FloatType())/ F.col('day_max').astype(T.FloatType())) df = df.join( df_ip_counts[['doy','ip','ip_pct']], on=['doy','ip'], how='left' ) test = test.join( test_ip_counts[['doy','ip','ip_pct']], on=['doy','ip'], how='left' )Same class balancing as MVPStill hacky - but I reordered it so that the join happens on asmaller table.And, now there are three versions to stack.class1_a = df.filter(df.is_attributed == 1).sample( withReplacement=True, fraction=4.0, seed=111) class1_b = df.filter(df.is_attributed == 1).sample( withReplacement=True, fraction=4.0, seed=222) class1_c = df.filter(df.is_attributed == 1).sample( withReplacement=True, fraction=4.0, seed=333) df_a = df.sampleBy('is_attributed', {0:.11}, seed=111).unionAll(class1_a) df_b = df.sampleBy('is_attributed', {0:.11}, seed=222).unionAll(class1_b) df_c = df.sampleBy('is_attributed', {0:.11}, seed=333).unionAll(class1_c)Counting Built count tables except for IP with the full training set rather than the subset. Results here.def get_count_table( group ): if type(group) == str: column_name = group + '_pct' # for example: ip_pct else: column_name = "_".join(group) # for example: device_os table_name = 'table_' + column_name counts_sdf = spark.read.parquet(f'../data/{table_name}.parquet') return counts_sdf def join_table( sdf, count_table, group ): sdf = sdf.join(count_table, group, how='left') return sdf # create the count columns with the training data # write everything out to disk so we don't have to redo # feature engineering when all I want to do is tune hyperparameters if False: for c in unigrams: ct = get_count_table( c ) df_a = join_table(df_a, ct, [c]) df_b = join_table(df_b, ct, [c]) df_c = join_table(df_c, ct, [c]) test = join_table(test, ct, [c]) for bigram in bigrams: ct = get_count_table( bigram ) df_a = join_table(df_a, ct, bigram) df_b = join_table(df_b, ct, bigram) df_c = join_table(df_c, ct, bigram) test = join_table(test, ct, bigram) df_a.write.parquet('../data/dfa.parquet', mode='overwrite') df_b.write.parquet('../data/dfb.parquet', mode='overwrite') df_c.write.parquet('../data/dfc.parquet', mode='overwrite') test.write.parquet('../data/test_stack.parquet', mode='overwrite') else: df_a = spark.read.parquet('../data/dfa.parquet') df_b = spark.read.parquet('../data/dfb.parquet') df_c = spark.read.parquet('../data/dfc.parquet') test = spark.read.parquet('../data/test_stack.parquet') df_a = df_a.fillna(0) df_b = df_b.fillna(0) df_c = df_c.fillna(0) test = test.fillna(0) for sdf in [ df_a, df_b, df_c ]: sdf.groupby('is_attributed').count().show() test.count()Last minute model tweak - add hour columndef add_hour(sdf): return sdf.withColumn('hour', (F.hour('click_time').astype(T.FloatType()) + (F.minute('click_time').astype(T.FloatType()) / 60.)) / 24. ) test = add_hour(test) df_a = add_hour(df_a) df_b = add_hour(df_b) df_c = add_hour(df_c)Create model data in format expected by Sparkinput_cols = [ c + '_pct' for c in unigrams ] input_cols += [ '_'.join(b) for b in bigrams ] input_cols += ['ip_pct', 'hour'] input_cols vec_assembler = VectorAssembler(inputCols=input_cols, outputCol = 'features') evaluator = BinaryClassificationEvaluator(labelCol = 'is_attributed') model_a = vec_assembler.transform(df_a).select('is_attributed', 'features') model_b = vec_assembler.transform(df_b).select('is_attributed', 'features') model_c = vec_assembler.transform(df_c).select('is_attributed', 'features')GBT Classifiergbtc = GBTClassifier( labelCol = 'is_attributed', ) # Preparting for future hyperparameter tuning pg = ParamGridBuilder( ).addGrid( gbtc.maxDepth, [ 10 ] ).addGrid( gbtc.subsamplingRate, [ .8 ] ).addGrid( gbtc.featureSubsetStrategy, [ '6' ] ).addGrid( gbtc.maxBins, [ 64 ] ).addGrid( gbtc.stepSize, [ .2 ] ).addGrid( gbtc.maxIter, [ 30 ] ).build( ) tvs = TrainValidationSplit( estimator = gbtc, estimatorParamMaps = pg, evaluator = evaluator, trainRatio = .8 ) tvs_a = tvs.fit(model_a) results_a = tvs_a.transform(model_a) evaluator.evaluate(results_a) tvs_a.bestModel.extractParamMap() tvs_b = tvs.fit(model_b) results_b = tvs_b.transform(model_b) evaluator.evaluate(results_b) tvs_c = tvs.fit(model_c) results_c = tvs_c.transform(model_c) evaluator.evaluate(results_c)Let's bring the test set in heretest_model = vec_assembler.transform(test) results_a = tvs_a.transform(test_model) results_b = tvs_b.transform(test_model) results_c = tvs_c.transform(test_model) def get_prediction(sdf): sdf = sdf.select('click_id', F.col('prediction').astype(T.ShortType()), 'probability') sdf.groupby('prediction').count().show() return sdf results_a = get_prediction(results_a) results_b = get_prediction(results_b) results_c = get_prediction(results_c)+----------+--------+ |prediction| count| +----------+--------+ | 1| 561267| | 0|18229202| +----------+--------+ +----------+--------+ |prediction| count| +----------+--------+ | 1| 555951| | 0|18234518| +----------+--------+ +----------+--------+ |prediction| count| +----------+--------+ | 1| 657822| | 0|18132647| +----------+--------+Extract probabilitiesmySchema = T.StructType([ T.StructField('click_id', T.IntegerType()), T.StructField('prediction', T.ShortType()), T.StructField('pclass1', T.FloatType()) ]) def save_stuff(x): return T.Row(click_id=x.click_id, prediction=x.prediction, pclass1=float(x.probability[1])) vec_a = results_a.rdd.map(lambda x: save_stuff(x)).toDF(schema=mySchema) vec_b = results_b.rdd.map(lambda x: save_stuff(x)).toDF(schema=mySchema) vec_c = results_c.rdd.map(lambda x: save_stuff(x)).toDF(schema=mySchema)Take the median of the three models as my final answervec_a = vec_a.select('click_id', F.col('pclass1').alias('vec_a') ) vec_b = vec_b.select('click_id', F.col('pclass1').alias('vec_b') ) vec_c = vec_c.select('click_id', F.col('pclass1').alias('vec_c') ) joined = vec_a.join(vec_b, ['click_id']).join(vec_c, ['click_id']) mySchema = T.StructType([ T.StructField('click_id', T.IntegerType()), T.StructField('is_attributed', T.FloatType()) ]) from statistics import median def get_predict(x): return T.Row(click_id=x.click_id, is_attributed=median([x.vec_a, x.vec_b, x.vec_c])) joined = joined.rdd.map(lambda x: get_predict(x)).toDF(schema=mySchema) joined.write.csv('../data/vote_results.csv', mode='overwrite') spark.stop()Starbucks Capstone Challenge IntroductionThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. CleaningThis makes data cleaning especially important and tricky.You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. Final AdviceBecause this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:Then you will want to run the above command:Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.import pandas as pd import numpy as np import math import json import datetime import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score % matplotlib inline # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True)1. Gather Data In this chapter the required data is gathered and briefly analyzed.portfolio.head(10) profile.head(5) transcript.head(5) # 2,175 NULL values present in columns 'gender' and 'income' profile.info() # max age of 118 years might be an outlier profile.describe() # distribution of the 'age' column including the mean of 'age' profile['age'].hist() plt.axvline(profile['age'].mean(), color='k', linestyle='dashed', linewidth=2); # distribution of 'age' greater than or equal to 100 profile[profile['age'] >= 100]['age'].hist(); # count of 'user_ids' where 'age' is greate than or equal to 110. # 2,175 users have an age of equal or mare than 110 which doesn't see valid profile[profile['age'] >= 110]['age'].count() # the first sign up was on July 29th 2013 and the most recent sign up was on July 26th 2018 print(profile['became_member_on'].min()), print(profile['became_member_on'].max()) # distribution of the 'became_member_on' column profile['became_member_on'].hist(); # distribution of the 'income' column profile['income'].hist() plt.axvline(profile['income'].mean(), color='k', linestyle='dashed', linewidth=2); # count by 'gender' profile['gender'].value_counts() transcript.info() # counts by 'event' transcript['event'].value_counts() transcript['time'].hist();2. Clean Data In this chapter the required data is cleaned to provide meaningful insights in the Exploratory Analysis later. 2.1 Clean 'portfolio' DataFrame# create a copy of the original DataFrame portfolio_clean = portfolio.copy() # create dummy variables for 'offer_type' offer_type_dummy = pd.get_dummies(portfolio_clean['offer_type']) # concatenate DataFrame 'portfolio' and offer_type_dummy portfolio_clean = pd.concat([portfolio_clean, offer_type_dummy], axis = 1) # create dummy variables for 'channels' channel_dummy = portfolio_clean['channels'].map(lambda x: ','.join(map(str, x))).str.get_dummies(sep=',') # concatenate DataFrame 'portfolio' and channel_dummy portfolio_clean = pd.concat([portfolio_clean, channel_dummy], axis = 1) # convert column 'duration' from days to hours to match with 'transcript' DataFrame portfolio_clean['duration'] = portfolio_clean['duration'] * 24 # rename column 'id' to 'offer_id' portfolio_clean.rename(columns = {'id':'offer_id'}, inplace = True) # create offer_name based on offer_type, 'difficulty', and 'duration' portfolio_clean['offer_name'] = portfolio_clean['offer_type'].astype(str) + '_' + portfolio_clean['difficulty'].astype(str) + '_' + portfolio_clean['duration'].astype(str)2.2 Clean 'profile' DataFrame# create a copy of the original DataFrame profile_clean = profile.copy() # store ids of user with an age of 110 and higher in variable age_110 to be able to delete these rows from other DataFrames later age_110 = profile_clean[profile_clean['age'] >= 110]['id'] # drop rows where age is higher than 110 years profile_clean.drop(profile_clean[profile_clean['age'] >= 110].index, inplace = True) # convert from integer to datetime64 profile_clean['became_member_on'] = profile_clean['became_member_on'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d')) # year of 'became_member_on' date profile_clean['became_member_year'] = profile_clean['became_member_on'].dt.year # days between today and 'became_member_on' date profile_clean['member_days'] = datetime.datetime.now() - profile_clean['became_member_on'] profile_clean['member_days'] = profile_clean['member_days'].dt.days # create variables for 'gender' gender_dummy = pd.get_dummies(profile_clean['gender']) # concatenate DataFrame 'profile' and gender_dummy profile_clean = pd.concat([profile_clean, gender_dummy], axis = 1) # create dummy variables that represent the century of the customer's age def add_year_decade(val): val = str(val) if val[0] == yr: return 1 else: return 0 for yr in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']: profile_clean[str(yr) + "0s"] = profile_clean['age'].apply(add_year_decade) # rename 'id' to 'user_id' profile_clean.rename(columns = {'id':'user_id'}, inplace = True) # create income groups profile_clean.loc[profile_clean['income'] < 40000, 'income_group'] = 'low' profile_clean.loc[(profile_clean['income'] >= 40000) & (profile_clean['income'] <= 90000), ['income_group']] = 'medium' profile_clean.loc[profile_clean['income'] > 90000, 'income_group'] = 'high' # create variables for 'income_group_dummy' income_group_dummy = pd.get_dummies(profile_clean['income_group']) # concatenate DataFrame 'profile' and income_group_dummy profile_clean = pd.concat([profile_clean, income_group_dummy], axis = 1) profile_clean.head()2.3 Clean 'transcript' DataFrame and create 'transactions' DataFrame# create a copy of the original DataFrame transcript_clean = transcript.copy() transcript_clean.head() # create Pandas Series 'offer_id' based on Pandas Series 'value' transcript_clean['offer_id'] = transcript_clean['value'].apply(lambda x: x['offer id'] if 'offer id' in x else (x['offer_id'] if 'offer_id' in x else np.nan)) # create Pandas Series 'offer_id' based on Pandas Series 'value' transcript_clean['amount'] = transcript_clean['value'].apply(lambda x: x['amount'] if ('amount' in x) else np.nan) # rename 'person' to 'user_id' transcript_clean.rename(columns = {'person':'user_id'}, inplace = True) # create DataFrame 'transactions' from 'transcript' DataFrame by using the 'user_id' and 'amount' transactions = transcript_clean.loc[transcript_clean['event'] == 'transaction', ['user_id', 'time' ,'amount']] transactions.reset_index(drop = True, inplace = True) # drop rows where 'event' equals 'transcation' transcript_clean.drop(transcript_clean[transcript_clean['event'] == 'transaction'].index, inplace = True) # drop column 'value' transcript_clean.drop(columns = ['value', 'amount'], axis = 1, inplace = True) # create dummy variables for 'event' event_dummy = pd.get_dummies(transcript_clean['event']) # concatenate DataFrame 'transcript' and event_dummy transcript_clean = pd.concat([transcript_clean, event_dummy], axis = 1) # rename multiple columns in the transcript DataFrame transcript_clean.rename(columns = {'offer completed':'offer_completed', 'offer received':'offer_received', 'offer viewed':'offer_viewed'}, inplace = True) # drop column 'event' transcript_clean.drop('event', axis = 1, inplace = True) # remove rows where user_id matches user_id in age_110 list transcript_clean = transcript_clean[~transcript_clean['user_id'].isin(age_110)] # remove rows where user_id matches user_id in age_110 list transcript_clean = transcript_clean[~transcript_clean['user_id'].isin(age_110)] # inner join between 'profile_clean' and 'transcript_clean' on 'user_id' master_df = profile_clean.merge(transcript_clean, how = 'inner', on = 'user_id') # inner join between 'master_df' and 'portfolio_clean' on 'offer_id' master_df = master_df.merge(portfolio_clean, how = 'inner', on = 'offer_id') master_df.head() # no NULL values master_df.info() Int64Index: 148805 entries, 0 to 148804 Data columns (total 42 columns): age 148805 non-null int64 became_member_on 148805 non-null datetime64[ns] gender 148805 non-null object user_id 148805 non-null object income 148805 non-null float64 became_member_year 148805 non-null int64 member_days 148805 non-null int64 F 148805 non-null uint8 M 148805 non-null uint8 O 148805 non-null uint8 10s 148805 non-null int64 20s 148805 non-null int64 30s 148805 non-null int64 40s 148805 non-null int64 50s 148805 non-null int64 60s 148805 non-null int64 70s 148805 non-null int64 80s 148805 non-null int64 90s 148805 non-null int64 100s 148805 non-null in[...]3. Exploratory Data Analysis Questions:- How many new members does Starbucks get each year and what's the projection for 2018?- What's the age distribution?- How is the is the income distributed across genders?- What's the viewing rate of each offer by gender?- What's the viewing rate of each offer and income group?- What's the completion rate of each offer?- What's the completion rate of each offer by gender?- How long does it take in average to complete an offer?- How long dies it take in average to complete an offer based on the maximum duration?- How much money that was spent is associated with a completed offer?# group by count of new memberships by month to depict seasonal trend avg_new_memb_month = profile_clean['became_member_on'].groupby([profile_clean.became_member_on.dt.month]).count()/profile_clean['became_member_on'].count() # the last 6 month account for roughly 53.35% of new memberships in 2018 perc_last6month = avg_new_memb_month.iloc[6:].sum() # projected count of new memberships in 2018 proj_new_memb_2018 = profile_clean[(profile_clean.became_member_on.dt.month <= 6) & (profile_clean.became_member_on.dt.year == 2018)]['became_member_year'].count()/(1 - perc_last6month) # projected count of memberships for 2018 - actual count of membership up to June 30th 2018 proj_2018 = proj_new_memb_2018 - profile_clean.groupby('became_member_year')['became_member_year'].count().values[-1] N = len(profile_clean.groupby('became_member_year')['became_member_year'].count().index.values) ind = np.arange(N) # set bar width barWidth = 0.35 # set figure size plt.figure(figsize=(20,10)) # set height of bar bars1 = profile_clean.groupby('became_member_year')['became_member_year'].count().values bars2 = np.array([0, 0, 0, 0, 0, proj_2018]) # Add xticks on the middle of the group bars plt.title('Count of new Starbucks memberships by year', fontsize = 15) plt.ylabel('Count of memberships', fontweight ="bold") plt.xlabel('Calendar year', fontweight ="bold") plt.bar(profile_clean.groupby('became_member_year')['became_member_year'].count().index.values, bars1, label = 'Actual new memberships') plt.bar(profile_clean.groupby('became_member_year')['became_member_year'].count().index.values, bars2, bottom = bars1, label = 'Projected new memberships') plt.legend() # Create legend & Show graphic plt.show()There is an constant increase of new memberships each year since 2013. Since there's only membership data included up to July 26th 2018, the count of new memberships in 2018 is misleading, as it takes in account the first 206 days or 56,5% of the year. Based on the calculation above there will be roughly 6,900 new members in 2018, which is an increase of 23,24% compared to 5,599 new memberships in 2017.column_name = [] column_val = [] for age_column in profile_clean[["10s", "20s", "30s", "40s", "50s", "60s", "70s", "80s", "90s"]]: column_name.append(profile_clean[age_column].name) column_val.append(profile_clean[age_column].sum()) plt.figure(figsize=(20,10)) plt.bar(column_name, column_val) plt.title('Age distribution of Starbucks members', fontsize = 15) plt.xlabel('Age groups', fontweight ="bold") plt.ylabel('Count of memberships', fontweight ="bold") plt.show();The age of Starbucks members looks normally distributed which is in contrast to the age distribution of the US population: https://www.census.gov/prod/cen2010/briefs/c2010br-03.pdf. Most of the Starbucks members are in their 50's and 60'sn_men = profile_clean[profile_clean['gender']== 'M']['gender'].count() n_women = profile_clean[profile_clean['gender']== 'F']['gender'].count() n_na = profile_clean[profile_clean['gender']== 'O']['gender'].count() string = " n: \n Male: {:,} \n Female: {:,} \n N/A: {:,}".format(n_men, n_women, n_na) # convert 'income_group' to a categorical level_order = ['low', 'medium', 'high'] ordered_cat = pd.api.types.CategoricalDtype(ordered = True, categories = level_order) profile_clean['income_group']= profile_clean['income_group'].astype(ordered_cat) # set bar width barWidth = 0.25 # set figure size plt.figure(figsize=(20,10)) r1 = np.arange(len(profile_clean.groupby('income_group')['income_group'].count().index.values)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # set height of bar plt.bar(r1, profile_clean[profile_clean['gender']== 'M'].groupby('income_group')['gender'].count().values, width=barWidth, edgecolor='white', label='Male') plt.bar(r2, profile_clean[profile_clean['gender']== 'F'].groupby('income_group')['gender'].count().values, width=barWidth, edgecolor='white', label='Femal') plt.bar(r3, profile_clean[profile_clean['gender']== 'O'].groupby('income_group')['gender'].count().values, width=barWidth, edgecolor='white', label='N/A') # Add xticks on the middle of the group bars plt.title('Distribution of income by gender', fontsize = 15) plt.ylabel('Count of memberships', fontweight ="bold") plt.xlabel('Income group in US Dollar', fontweight ="bold") plt.xticks([r + barWidth for r in range(len(profile_clean.groupby('income_group')['income_group'].count().index.values))], ['Low (Less than 40,000 USD)', 'Medium (Between 40,000 USD and 90,000 USD)', 'High (more than 90,000 USD)']) plt.annotate(string, xy=(0.02, 0.90), xycoords='axes fraction') plt.legend() # Create legend & Show graphic plt.show()There are roughly 38% more male than female members. There are more female members in the high income groupe despite the total amount of females is lower.m_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['gender'] == 'M')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['gender'] == 'M')].groupby('offer_type')['offer_type'].count().values) f_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['gender'] == 'F')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['gender'] == 'F')].groupby('offer_type')['offer_type'].count().values) o_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['gender'] == 'O')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['gender'] == 'O')].groupby('offer_type')['offer_type'].count().values) # set bar width barWidth = 0.25 # set figure size plt.figure(figsize=(20,10)) r1 = np.arange(len(master_df.groupby('offer_type')['offer_type'].count().index.values)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # set height of bar plt.bar(r1, m_viewed_received, width=barWidth, edgecolor='white', label='Male') plt.bar(r2, f_viewed_received, width=barWidth, edgecolor='white', label='Femal') plt.bar(r3, o_viewed_received, width=barWidth, edgecolor='white', label='N/A') # Add xticks on the middle of the group bars plt.title('Viewing rate by offer type and gender', fontsize = 15) plt.ylabel('Viewing rate', fontweight ="bold") plt.xlabel('Offer type', fontweight ="bold") plt.gca().set_yticklabels(['{:.0f}%'.format(x*100) for x in plt.gca().get_yticks()]) plt.xticks([r + barWidth for r in range(len(master_df.groupby('offer_type')['offer_type'].count().index.values))], master_df[master_df['offer_received'] == 1].groupby('offer_type')['user_id'].count().index) plt.legend() # Create legend & Show graphic plt.show()The Buy one get one (BOGO) offer type has the highest viewing rate out of the three offer types. Not available genders have the highest viewing rate although the sample size of this subset is significantly compared to the male and femal subset. Female members have a slightly higher viewing rate than male members.l_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['income_group'] == 'low')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['income_group'] == 'low')].groupby('offer_type')['offer_type'].count().values) m_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['income_group'] == 'medium')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['income_group'] == 'medium')].groupby('offer_type')['offer_type'].count().values) h_viewed_received = (master_df[(master_df['offer_viewed'] == 1) & (master_df['income_group'] == 'high')].groupby('offer_type')['offer_type'].count().values)/(master_df[(master_df['offer_received'] == 1) & (master_df['income_group'] == 'high')].groupby('offer_type')['offer_type'].count().values) # set bar width barWidth = 0.25 # set figure size plt.figure(figsize=(20,10)) r1 = np.arange(len(master_df.groupby('offer_type')['offer_type'].count().index.values)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # set height of bar plt.bar(r1, l_viewed_received, width=barWidth, edgecolor='white', label='Low') plt.bar(r2, m_viewed_received, width=barWidth, edgecolor='white', label='Medium') plt.bar(r3, h_viewed_received, width=barWidth, edgecolor='white', label='High') # Add xticks on the middle of the group bars plt.title('Viewing rate by offer type and income group', fontsize = 15) plt.ylabel('Viewing rate', fontweight ="bold") plt.xlabel('Offer type', fontweight ="bold") plt.gca().set_yticklabels(['{:.0f}%'.format(x*100) for x in plt.gca().get_yticks()]) plt.xticks([r + barWidth for r in range(len(master_df.groupby('offer_type')['offer_type'].count().index.values))], master_df[master_df['offer_received'] == 1].groupby('offer_type')['user_id'].count().index) plt.legend() # Create legend & Show graphic plt.show()Bogo offers have to highest viewing rate across all income groups. Discout and informational offers have a pretty similar viewing rate. Nembers in the low income group have the lowest viewing rate.# create DataFrame where offer_completed is true and sort by time ascending df_offer_completed = master_df[master_df['offer_completed'] == 1][['user_id','offer_id','time']].sort_values('time') # create DataFrame where offer_received is true and sort by time ascending df_offer_viewed = master_df[master_df['offer_viewed'] == 1][['user_id','offer_id','time']].sort_values('time') # merge 'df_offer_completed' and 'df_offer_received' df_offer_merged = pd.merge(df_offer_viewed, df_offer_completed, how='left', on=['user_id','offer_id']) # rename columns df_offer_merged.rename(columns = {'time_x':'time_viewed', 'time_y':'time_completed'}, inplace = True) # get the difference between the time the offer was received and completed df_offer_merged['time_diff'] = df_offer_merged['time_completed'] - df_offer_merged['time_viewed'] # get rows where time difference is negative to drop unrelated rows later invalid_offers_df = df_offer_merged[df_offer_merged['time_diff'] < 0].index # drop rows where time difference is negative df_offer_merged.drop(invalid_offers_df, inplace = True) # drop duplicates based on 'user_id', 'offer_id', 'time_received' and keep first record df_offer_merged.drop_duplicates(subset=['user_id', 'offer_id', 'time_viewed'], keep = 'first', inplace = True) # If 'time_completed' is na then success is false, else true df_offer_merged['success'] = np.where(df_offer_merged['time_completed'].isna(), 0, 1) # merge DataFrame 'portfolio_clean' to get additional information in regards to offers df_offer_merged = pd.merge(df_offer_merged, portfolio_clean, how = 'inner', on = 'offer_id') # drop unnecessary columns df_offer_merged.drop(['offer_id', 'channels'], axis = 1, inplace = True) # merge DataFrame 'profile_clean' to get additional information in regards to members df_offer_merged = pd.merge(df_offer_merged, profile_clean, how = 'left', on = 'user_id') # get amount of offers viewed offer_viewed = df_offer_merged.groupby('offer_name')['user_id'].count() # get amount of offers completed offer_success = df_offer_merged[df_offer_merged['success'] == 1].groupby('offer_name')['user_id'].count() # get ration of offer viewed and offer completed offer_success_rate = (offer_success / offer_viewed) * 100 # drop offer type information offer_success_rate = offer_success_rate.sort_values(ascending = False).dropna() offer_viewed plt.figure(figsize=(20,10)) plt.bar(offer_success_rate.index.values, offer_success_rate.values) plt.title('Completion rate by offer', fontsize = 15) plt.xlabel('Offer', fontweight ="bold") plt.ylabel('Completion rate', fontweight ="bold") plt.gca().set_yticklabels(['{:.0f}%'.format(x) for x in plt.gca().get_yticks()]) plt.show();The offer type discount with a difficulty of 0.5 and a duration of 240 hours has with 77.63% the highest completion rate, followed by another discount offer type with a difficulty of 0.35 and a duration of 168 hours with a 76.75% completion rate. In general it appears to be that the easier the offer, the higher the completion rate. Also the offer_type discount performs better then the bogo.m_completed = (df_offer_merged[(df_offer_merged['gender'] == 'M') & (df_offer_merged['success'] == 1)].groupby('offer_name')['user_id'].count()/df_offer_merged[df_offer_merged['gender'] == 'M'].groupby('offer_name')['user_id'].count()).dropna() * 100 f_completed = (df_offer_merged[(df_offer_merged['gender'] == 'F') & (df_offer_merged['success'] == 1)].groupby('offer_name')['user_id'].count()/df_offer_merged[df_offer_merged['gender'] == 'F'].groupby('offer_name')['user_id'].count()).dropna() * 100 o_completed = (df_offer_merged[(df_offer_merged['gender'] == 'O') & (df_offer_merged['success'] == 1)].groupby('offer_name')['user_id'].count()/df_offer_merged[df_offer_merged['gender'] == 'O'].groupby('offer_name')['user_id'].count()).dropna() * 100 # set bar width barWidth = 0.25 # set figure size plt.figure(figsize=(20,10)) r1 = np.arange(len(m_completed.index.values)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # set height of bar plt.bar(r1, m_completed, width=barWidth, edgecolor='white', label='Male') plt.bar(r2, f_completed, width=barWidth, edgecolor='white', label='Femal') plt.bar(r3, o_completed, width=barWidth, edgecolor='white', label='N/A') # Add xticks on the middle of the group bars plt.title('Completion rate by offer type and gender', fontsize = 15) plt.ylabel('Completion rate', fontweight ="bold") plt.xlabel('Offer Type', fontweight ="bold") plt.gca().set_yticklabels(['{:.0f}%'.format(x) for x in plt.gca().get_yticks()]) plt.xticks([r + barWidth for r in range(len(m_completed.index.values))], m_completed.index.values) plt.legend() # Create legend & Show graphic plt.show()Female members have a 10% to 25% higher completion rate then male members consistently across all offer types.# get the mean of duration by 'offer_name' time_diff = df_offer_merged.groupby('offer_name')['time_diff'].mean().dropna() # get the maximum amount of time a customer has to complete the offer by 'offer_name' offer_duration = df_offer_merged.groupby('offer_name')['duration'].min() # how fast was to offer completed in percent time_diff_perc = (time_diff / offer_duration) * 100 # drop offer_name 'information' time_diff_perc = time_diff_perc.dropna() plt.figure(figsize=(20,10)) plt.bar(time_diff.index.values, time_diff.values) plt.title('Average duration in hours to complete offer', fontsize = 15) plt.xlabel('Offer', fontweight ="bold") plt.ylabel('Duration in hours', fontweight ="bold") plt.show(); plt.figure(figsize=(20,10)) plt.bar(time_diff_perc.index.values, time_diff_perc.values) plt.title('Percentage of duration to complete offer', fontsize = 15) plt.xlabel('Offer', fontweight ="bold") plt.ylabel('Duration in hours', fontweight ="bold") plt.gca().set_yticklabels(['{:.0f}%'.format(x) for x in plt.gca().get_yticks()]) plt.show();The average time it takes members to complete their respective offer is under 50% of the maximum offer duration.# only get successful offers df_success = df_offer_merged[df_offer_merged['success'] == 1] # join 'df_success' with 'transactions' to analyze how much money was spent to complete an offer df_success = df_success.merge(transactions, how = 'inner', on = 'user_id') # only keep rows where transaction time is between 'time_received' and 'time_completed' df_success = df_success[(df_success['time'] >= df_success['time_viewed']) & (df_success['time'] <= df_success['time_completed'])] # group by 'offer_name' to get money spent on each offer df_success = df_success.groupby('offer_name')['amount'].sum() # total amount of money spent total_spending = transactions['amount'].sum() # percentage of money spent to complete offer vs. total money spent not_successful_spending = total_spending - df_success.sum() string = " Total Spending: ${:,.2f} \n Successful Spending: ${:,.2f} \n Unsuccessful Spending: ${:,.2f}".format(total_spending, df_success.sum(), not_successful_spending) plt.figure(figsize=(20,10)) plt.bar(df_success.index.values, df_success.values) plt.title('Spending related to offer', fontsize = 15) plt.xlabel('Offer', fontweight ="bold") plt.ylabel('Spending in US Dollar', fontweight ="bold") plt.gca().set_yticklabels(['${:,.2f}'.format(x) for x in plt.gca().get_yticks()]) plt.annotate(string, xy=(0.02, 0.90), xycoords='axes fraction') plt.show();Roughly 44.5% of the spending can be tied to a completed offer. 4. ML Modelingdf_offer_merged.info() # display all columns pd.set_option('display.max_columns', None) df_offer_merged.drop(['user_id', 'time_viewed', 'time_completed', 'time_diff', 'offer_type', 'offer_name', 'age', 'became_member_on', 'gender', 'income', 'became_member_year', 'income_group'], axis = 1, inplace = True) df_offer_merged.info() # normalize columns 'duration' and 'member_days' # https://www.geeksforgeeks.org/normalize-a-column-in-pandas/ df_offer_merged['duration'] = df_offer_merged['duration'] / df_offer_merged['duration'].abs().max() df_offer_merged['member_days'] = df_offer_merged['member_days'] / df_offer_merged['member_days'].abs().max() df_offer_merged['difficulty'] = df_offer_merged['difficulty'] / df_offer_merged['difficulty'].abs().max() df_offer_merged['reward'] = df_offer_merged['reward'] / df_offer_merged['reward'].abs().max() # split Dataframe df into X and y X = df_offer_merged.iloc[:, 1:] y = df_offer_merged['success'] # split data into training and test data X_train, X_test, y_train, y_test = train_test_split(X, y) # use RandomForestClassifier and GradientBoostingClassifier as a classifier rfc = RandomForestClassifier(random_state = 42) gbc = GradientBoostingClassifier(random_state = 42) # train baisc models rfc.fit(X_train, y_train) gbc.fit(X_train, y_train) # predict responses for basic models rfc_y_pred = rfc.predict(X_test) gbc_y_pred = gbc.predict(X_test) print('Accuracy for RandomForestClassifier:', accuracy_score(y_test, rfc_y_pred)) print('Accuracy for GradientBoostingClassifier:', accuracy_score(y_test, gbc_y_pred))Accuracy for RandomForestClassifier: 0.775368679775 Accuracy for GradientBoostingClassifier: 0.794768258427The GradientBoostingClassifier outperforms the RandomForestClassifier. Hence, I'll go ahead and improve the better performing GradientBoostingClassifier classifier by using hyperparameter tuning. Cross validation is used to avoid overfitting.rfc.get_params() gbc.get_params() # hyperparameter tuning and cross validation parameters = { 'n_estimators': [50, 100, 150, 200], 'min_samples_split': [2, 5, 7], 'max_depth' : [5, 6, 7, 8] } cv_gbc = GridSearchCV(gbc, param_grid = parameters, cv = 3, verbose = 2, n_jobs = 4) # train improved model cv_gbc.fit(X_train, y_train) # display the best performing parameters cv_gbc.best_params_ # predict responses for improved model y_pred = cv_gbc.predict(X_test) print('Accuracy:', accuracy_score(y_test, y_pred))Accuracy: 0.812324438202def solution(arr): res=count = 0 flag = False for i in arr: if i == 'R': flag = True res += count count = 0 else: if flag: count += 1 return res print(solution('WRRWWR')) from typing import List nums = [1,3,-1,-3,5,3,6,7] k = 3 def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]: queue = collections.deque() res = [] for i in range(len(nums)): if i >= k and i - k == queue[0]: queue.popleft() while queue and nums[i] > nums[queue[-1]]: queue.pop() queue.append(i) if i >= k - 1: res.append(nums[queue[0]]) return resAnalyzing time for each step in tracking.process_im function.Conclusion: Most time-intensive step was filtering blobs, which varied significantly depending on how many blobs the segmentation function returned.import time import numpy as np import pandas as pd import os import skimage.io import skimage.morphology import scipy.spatial.distance import tracking import bootcamp_utils from behavioral_analysis import segmentation as seg import skimage.morphology _kw_dict = {'c': 'centroid', 'e': 'eccentricity', 'j': 'major_axis_length', 'n': 'minor_axis_length', 'o': 'orientation', 'p': 'perimeter', 's': 'area'} fdir = '//scepto.caltech.edu/Parker_lab/Christina_Julian/8_10_2018/' allfiles = os.listdir(fdir) im_names = [f for f in allfiles if '.tiff' in f] im_names = im_names[::600] ims = [skimage.io.imread(fdir + f) for f in im_names] im_bg = skimage.io.imread('C:/Users/1004p/Desktop/Caltech Amgen 2018/Behavioral Analysis/sceptovideo/im_bg.tiff', as_gray = True) im = ims[1] s1 = time.perf_counter() s2 = time.process_time() im = seg.bg_subtract(im, im_bg) f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) s1 = time.perf_counter() s2 = time.process_time() im_bw, im_labeled, n_labels = seg.segment(im) print(n_labels) im = skimage.morphology.remove_small_objects(im_labeled, min_size = 200, connectivity = 2) + 0 im_labeled, n_labels = skimage.measure.label(im, return_num=True) print(n_labels) f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) s1 = time.perf_counter() s2 = time.process_time() #specs is a list of all specimen [label, size, coordinates] specs = [] properties = skimage.measure.regionprops(im_labeled) f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) include_frame = True frame_id = 0 props = 'scjn' s1 = time.perf_counter() s2 = time.process_time() for i in range(n_labels): if True: blob = (im_labeled == (i+1)) + 0 if tracking.thresholding(blob, thresh_list=[(200, 9000), (0.88, 1)]): spec_data = [properties[i][_kw_dict[x]] for x in props] if include_frame: spec_data.insert(0, frame_id) else: continue else: spec_data = [properties[i][_kw_dict[x]] for x in props] if include_frame: spec_data.insert(0, frame_id) specs.append(spec_data) f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) print(spec_data) df = pd.DataFrame(data=specs, columns=col_names) df1 = df.reindex(columns = col_names + ['x', 'y']) df1[['x', 'y']] = df['centroid'].apply(pd.Series) print(df1.head()) s1 = time.perf_counter() s2 = time.process_time() col_names = [_kw_dict[x] for x in props] if include_frame: col_names.insert(0, 'frame') df = pd.DataFrame(data=specs, columns=col_names) df[['x', 'y']] = df['centroid'].apply(pd.Series) df.drop('centroid', axis = 1, inplace = True) f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) sep_centroid = True s1 = time.perf_counter() s2 = time.process_time() if type(im) != np.ndarray: raise Exception('im must be a 2d numpy ndarray.') if len(props) != len(set(props)): print('WARNING: Some property keywords are repeated. Redundancies were removed.') props = list(sorted(set(props), key=props.index)) if type(include_frame) != bool: raise Exception('include_frame must be a boolean.') if type(im_bg) != np.ndarray: if im_bg == None: pass else: raise Exception('im_bg must be a 2d numpy ndarray.') if sep_centroid and 'c' not in props: raise Exception('There is no centroid data to separate.') f1 = time.perf_counter() f2 = time.process_time() print(f1-s1) print(f2-s2) dfImport preprocessed soil datasoil = pd.read_csv('../../data/soil/processed/soil_116.csv', sep=',', index_col = 0).T soil.head() ph = pd.read_csv('../../data/soil/processed/ph.csv', sep=',', index_col = 0) ph = ph.reindex(soil.columns) ph.head() depth = soil.sum(axis=0) metadata = pd.read_table('../../data/soil/original/88soils_modified_metadata.txt', index_col=0) temperature = metadata["annual_season_temp"].reindex(ph.index) #check if any ids are missing assert not ph.isnull().values.any() # assert that samples of ph and soil are identical assert set(soil.columns) == set(ph.index)CLR-transformation of X Dataframe `soil` need to be of shape (p,N) for normalizing to simplex + clr transformsoil.shape X = normalize(soil) X.sum(axis=0) X = log_transform(X) (p,N) = X.shape (p,N)Calculate covariance and scale to correlationsS0 = np.cov(X.values, bias = True) S = scale_array_by_diagonal(S0) np.diag(S)GGLasso problem and model selectionP = glasso_problem(S, N, latent = True, do_scaling = False) print(P) #lambda1_range = [0.14447343] #mu1_range = [2.36] lambda1_range = np.logspace(0.5,-1.5,8) mu1_range = np.logspace(1.5,-0.2,6) modelselect_params = {'lambda1_range': lambda1_range, 'mu1_range': mu1_range} P.model_selection(modelselect_params = modelselect_params, method = 'eBIC', gamma = 0.25) # regularization parameters are set to the best ones found during model selection print(P.reg_params)Plot results from model selectionfig, axs = plt.subplots(1,2,figsize=(18,12)) sns.heatmap(P.modelselect_stats["RANK"], annot = True, square = True, cbar = False, \ yticklabels = np.round(lambda1_range,2), xticklabels = np.round(mu1_range,2), ax = axs[0]) axs[0].set_title("Rank of L") sns.heatmap(np.round(P.modelselect_stats["SP"],2), annot = True, square = True, cbar = False, \ yticklabels = np.round(lambda1_range,2), xticklabels = np.round(mu1_range,2), ax = axs[1]) axs[1].set_title("Sparsity of Theta") from mpl_toolkits.mplot3d import Axes3D def single_surface_plot(L1, MU1, C, ax, name = 'eBIC'): X = np.log10(L1) Y = np.log10(MU1) Z = np.log(C) ax.plot_surface(X, Y, Z , cmap = plt.cm.ocean, linewidth=0, antialiased=True) ax.set_xlabel(r'$\lambda_1$', fontsize = 14) ax.set_ylabel(r'$\mu1$', fontsize = 14) ax.set_zlabel(name, fontsize = 14) ax.view_init(elev = 18, azim = 51) plt.xticks(fontsize = 8) plt.yticks(fontsize = 8) ax.zaxis.set_tick_params(labelsize=8) ax.tick_params(axis='both', which='major', pad=.5) for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(False) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(False) for label in ax.zaxis.get_ticklabels()[::2]: label.set_visible(False) return fig = plt.figure(figsize = (20,10)) fig.suptitle("eBIC surface for different gamma value") C = P.modelselect_stats["BIC"] gammas = np.sort(list(C.keys())) for j in np.arange(len(gammas)): ax = fig.add_subplot(2, 3, j+1, projection='3d') single_surface_plot(P.modelselect_stats["LAMBDA"], P.modelselect_stats["MU"], C[gammas[j]], ax) if gammas is not None: ax.set_title(rf"$\gamma = $ {gammas[j]}")The solution of Graphical Lasso with latent variables has the form $\Theta-L$ where $\Theta$ is sparse and $L$ has low rank.L = P.solution.lowrank_ r = np.linalg.matrix_rank(L) print('Rank of low-rank component: {0}'.format(r)) fig, axs = plt.subplots(1,2, figsize = (20,8)) sns.heatmap(P.solution.precision_, ax = axs[0], cmap = "coolwarm", vmin = -0.5, vmax = 0.5, cbar = False, square = True) axs[0].set_title("Heatmap of Theta") sns.heatmap(L, ax = axs[1], cmap = "coolwarm", vmin = -0.05, vmax = 0.05, cbar = False, square = True) axs[1].set_title("Heatmap of L")Robust PCA in GGLasso We use the low rank component of the Graphical Lasso solution in order to do a robust PCA. For this, we use the eigendecomposition$$L = V \Sigma V^T$$where the columns of $V$ are the orthonormal eigenvecors and $\Sigma$ is diagonal containing the eigenvalues.Denote the columns of $V$ corresponding only to positive eigenvalues with $\tilde{V} \in \mathbb{R}^{p\times r}$ and $\tilde{\Sigma} \in \mathbb{R}^{r\times r}$ accordingly, where $r=\mathrm{rank}(L)$. Then we have $$L = \tilde{V} \tilde{\Sigma} \tilde{V}^T.$$Now we project the data matrix $X\in \mathbb{R}^{p\times N}$ onto the eigenspaces of $L^{-1}$ - which are the same as of $L$ - by computing$$U := X^T \tilde{V}\tilde{\Sigma}$$We plot the columns of $U$ vs. the vector of pH values.def robust_PCA(X, L, inverse=True): sig, V = np.linalg.eigh(L) # sort eigenvalues in descending order sig = sig[::-1] V = V[:,::-1] ind = np.argwhere(sig > 1e-9) if inverse: loadings = V[:,ind] @ np.diag(np.sqrt(1/sig[ind])) else: loadings = V[:,ind] @ np.diag(np.sqrt(sig[ind])) # compute the projection zu = X.values.T @ loadings return zu, loadings, np.round(sig[ind].squeeze(),3) L = P.solution.lowrank_ proj, loadings, eigv = robust_PCA(X, L, inverse=True) r = np.linalg.matrix_rank(L)Plot GGLasso/pH correlationfor i in range(r): fig, ax = plt.subplots(1,1) im = ax.scatter(proj[:,i], ph, c = depth, cmap = plt.cm.Blues, vmin = 0) cbar = fig.colorbar(im) cbar.set_label("Sampling depth") ax.set_xlabel(f"PCA component {i+1} with eigenvalue {eigv[i]}") ax.set_ylabel("pH") for i in range(r): print("Spearman correlation between pH and {0}th component: {1}, p-value: {2}".format(i+1, stats.spearmanr(ph, proj[:,i])[0], stats.spearmanr(ph, proj[:,i])[1]))SpiecEasi results with lambda = 0.14447343, rank=6SE_lowrank = pyreadr.read_r('../../data/soil/SE_lowrank.rds') SE_lowrank = np.array(SE_lowrank[None]) SE_lowrank.shape se_rank = np.linalg.matrix_rank(SE_lowrank) print('Rank of low-rank component: {0}'.format(se_rank))Compare low rank SpiecEasi vs GGLassonp.allclose(SE_lowrank, L, atol=1e-1) fig, axs = plt.subplots(1,2, figsize = (20,8)) sns.heatmap(SE_lowrank, ax = axs[0], cmap = "coolwarm", vmin = -0.1, vmax = 0.1, cbar = False, square = True) sns.heatmap(L, ax = axs[1], cmap = "coolwarm", vmin = -0.1, vmax = 0.1, cbar = False, square = True)Robust PCA in [SpiecEasi](https://github.com/zdk123/SpiecEasi/blob/ff528b23fafbd455efcca9dd356bef28951edf82/R/SparseLowRankICov.R) Plot SE/pH correlationzu_SE, se_loadings, se_eigv = robust_PCA(X, SE_lowrank, inverse=True) zu_SE.shape for i in range(se_rank): plt.scatter(zu_SE[:,i], ph, c = depth, cmap = plt.cm.Blues, vmin = 0) cbar = plt.colorbar() cbar.set_label("Sampling depth") plt.xlabel(f"PCA component {i+1} with eigenvalue {se_eigv[i]}") plt.ylabel("pH") plt.show() for i in range(se_rank): print("Spearman correlation between pH and {0}th component: {1}, p-value: {2}".format(i+1, stats.spearmanr(ph, zu_SE[:,i])[0], stats.spearmanr(ph, zu_SE[:,i])[1]))Correlation of zero-entries vs. low-ranksoil_0 = soil.replace(1, 0) soil_0_norm = soil_0.astype(bool).sum(axis=0) for i in range(r): fig, ax = plt.subplots(1,1) im = ax.scatter(proj[:,i], temperature, c = depth, cmap = plt.cm.Blues, vmin = 0) cbar = fig.colorbar(im) cbar.set_label("Sampling depth") ax.set_xlabel(f"PCA component {i+1} with eigenvalue {eigv[i]}") ax.set_ylabel("Temperature") for i in range(r): print("Spearman correlation between pH and {0}th component: {1}, p-value: {2}".format(i+1, stats.spearmanr(temperature, proj[:,i])[0], stats.spearmanr(temperature, proj[:,i])[1]))Notebook for viewing the training losses of the two GAN modelsimport matplotlib.pyplot as plt import seaborn as sns import numpy as np import matplotlib %matplotlib inlineAD subjectsRead the files containing the losses for the GAN trained on the AD subset.def get_losses(file): loss_list = [] for x in file: loss_list.append(float(x)) return loss_list d_fake_path = 'ad/disc_fake_average.txt' d_fake_file = open(d_fake_path, 'r') d_fake_loss = get_losses(d_fake_file) d_real_path = 'ad/disc_real_average.txt' d_real_file = open(d_real_path, 'r') d_real_loss = get_losses(d_real_file) g_path ='ad/gen.txt' g_file = open(g_path, 'r') g_loss = get_losses(g_file)We have 3 files in total, the first two are the discriminator losses, while the second is the generator's loss.plt.figure(figsize=(15,4)) ax = plt.subplot(131) ax.plot(d_fake_loss) ax = plt.subplot(132) ax.plot(d_real_loss) ax = plt.subplot(133) ax.plot(g_loss)Calculate the discriminator's loss by subtracting the loss for the *fake* images from the loss for the *real* images.d_loss = [] for real, fake in zip(d_real_loss, d_fake_loss): d_loss.append(real - fake) plt.plot(d_loss)Calculate the average loss for each batch for both the discriminator and the generator.epochs = 400 batches = 178 # 179 + 1 for 'NC' subjects, 177 + 1 for 'AD' subjects g_avg = [] for i in range(epochs): g_avg.append(np.mean(g_loss[i * batches:(i + 1) * batches])) print(len(g_avg)) d_avg = [] for i in range(epochs): d_avg.append(np.mean(d_loss[i * batches:(i + 1) * batches])) print(len(d_avg)) plt.figure(figsize=(15,4)) ax = plt.subplot(121) ax.plot(g_avg) ax = plt.subplot(122) ax.plot(d_avg) sns.set_context('paper') cp = sns.color_palette() c1 = cp[0] c2 = cp[1] ax = plt.subplot(111) ax.plot(d_avg, c=c1, lw=2, label='discriminator') ax.plot(g_avg, c=c2, lw=2, label='generator') ax.plot([0, len(d_avg)], [0, 0], c='0.5', lw=2, ls='--') #ax.plot([332, 332], [-1, 1], c='0.5', ls='--') #ax.plot([357, 357], [-1, 1], c='0.5', ls='--') r = matplotlib.patches.Rectangle([332, -0.5], 357-332, 1, color='0.5', alpha=0.3) ax.add_artist(r) ax.annotate('discriminator loss\nnear zero', horizontalalignment='center', xy=((357-332)/2 + 332, 0.5), xycoords='data', xytext=(250, +5), arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-.2")) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_xlim([-5, len(d_avg)]) ax.set_ylim([-5, 10]) ax.set_title('GAN loss on AD subset') ax.set_xlabel('epochs') ax.set_ylabel('loss') ax.legend(loc='upper right') plt.savefig('figures/ad_loss.png', bbox_inches='tight')NC subjectsSame thing for the NC subset.d_fake_path = 'nc/disc_fake_average.txt' d_fake_file = open(d_fake_path, 'r') d_fake_loss = get_losses(d_fake_file) d_real_path = 'nc/disc_real_average.txt' d_real_file = open(d_real_path, 'r') d_real_loss = get_losses(d_real_file) g_path ='nc/gen.txt' g_file = open(g_path, 'r') g_loss = get_losses(g_file) d_loss = [] for real, fake in zip(d_real_loss, d_fake_loss): d_loss.append(real - fake) epochs = 600 batches = 180 # 179 + 1 gia normal, 177 + 1 gia ad g_avg = [] for i in range(epochs): g_avg.append(np.mean(g_loss[i*batches:(i + 1)*batches])) print(len(g_avg)) d_avg = [] for i in range(epochs): d_avg.append(np.mean(d_loss[i*batches:(i + 1)*batches])) print(len(d_avg)) sns.set_context('paper') ax = plt.subplot(111) d_avg = d_avg[:400] g_avg = g_avg[:400] ax.plot(d_avg, c=c1, lw=2, label='discriminator') ax.plot(g_avg, c=c2, lw=2, label='generator') ax.plot([0, len(d_avg)], [0, 0], c='0.5', lw=2, ls='--') #ax.plot([332, 332], [-1, 1], c='0.5', ls='--') #ax.plot([357, 357], [-1, 1], c='0.5', ls='--') r = matplotlib.patches.Rectangle([339, -0.5], 359-339, 1, color='0.5', alpha=0.3) ax.add_artist(r) ax.annotate('discriminator loss\nnear zero', horizontalalignment='center', xy=((359-339)/2 + 339, 0.5), xycoords='data', xytext=(250, +5), arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-.2")) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_xlim([-5, len(d_avg)]) ax.set_ylim([-5, 10]) ax.set_title('GAN loss on NC subset') ax.set_xlabel('epochs') ax.set_ylabel('loss') ax.legend(loc='upper right') plt.savefig('figures/nc_loss.png', bbox_inches='tight')Error in the computation Demoninator = 0. Troubleshoot. Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) , 2017. Thanks to NSF for support via CAREER award 1149784.@LorenaABarba 12 steps to Navier–Stokes The final two steps in this interactive module teaching beginning CFD with Python will both solve the Navier–Stokes equations in two dimensions, but with different boundary conditions.The momentum equation in vector form for a velocity field$$\frac{\partial \vec{\textbf{v}}}{\partial t}+(\vec{\textbf{v}}\cdot\nabla)\vec{\textbf{v}}=-\frac{1}{\rho}\nabla p + \nu \nabla^2\vec{\textbf{v}}$$This vector equation represents three scalar equations, one for each velocity component $(u,v,w)$. But we will solve it in two dimensions $(u,v)$, so there will be two scalar equations.Remember the continuity equation, in incompressable flow? This is where the Poisson equation for pressure comes in!$$\nabla \cdot\vec{\textbf{v}} = 0 $$ Step 12: Channel flow with Navier-StokesThe only difference between this final step and Step 11 is that we are going to add a source term to the u-momentum equation to mimic the effect of pressure-driven channel flow. Here are our modified Navier-Stokes equations. $$\frac{\partial u}{\partial t}+u\frac{\partial u}{\partial x}+v\frac{\partial u}{\partial y} = -\frac{1}{\rho}\frac{\partial p}{\partial x}+\nu \left(\frac{\partial^2 u}{\partial x^2}+\frac{\partial^2 u}{\partial y^2} \right) + F_{i,j}$$$$\frac{\partial v}{\partial t}+u\frac{\partial v}{\partial x}+v\frac{\partial v}{\partial y} = -\frac{1}{\rho}\frac{\partial p}{\partial y}+\nu\left(\frac{\partial^2 v}{\partial x^2}+\frac{\partial^2 v}{\partial y^2}\right) $$$$\frac{\partial^2 p}{\partial x^2}+\frac{\partial^2 p}{\partial y^2} = -\rho\left(\frac{\partial u}{\partial x}\frac{\partial u}{\partial x}+2\frac{\partial u}{\partial y}\frac{\partial v}{\partial x}+\frac{\partial v}{\partial y}\frac{\partial v}{\partial y} \right)$$From our previous steps, we already know how to discretize all of these terms. Discretized equationsWith patience and care, we write the discretized form of the equations. It is highly recommended that you write these in your own hand, mentally following each term as you write it.The $\textbf{u}$-momentum equation (copied from step 11, with the $+ F_{i,j}$ term added):\begin{split}\frac{u_{i,j}^{n+1}-u_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i,j-1}^{n}}{\Delta y} = \\ \qquad -\frac{1}{\rho}\frac{p_{i+1,j}^{n}-p_{i-1,j}^{n}}{2\Delta x}+\nu\left(\frac{u_{i+1,j}^{n}-2u_{i,j}^{n}+u_{i-1,j}^{n}}{\Delta x^2}+\frac{u_{i,j+1}^{n}-2u_{i,j}^{n}+u_{i,j-1}^{n}}{\Delta y^2} \right)+F_{i,j}\end{split}Similarly for the $v$-momentum equation (copied from step 11):\begin{split}\frac{v_{i,j}^{n+1}-v_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i,j-1}^{n}}{\Delta y} = \\\qquad -\frac{1}{\rho}\frac{p_{i,j+1}^{n}-p_{i,j-1}^{n}}{2\Delta y}+\nu\left(\frac{v_{i+1,j}^{n}-2v_{i,j}^{n}+v_{i-1,j}^{n}}{\Delta x^2}+\frac{v_{i,j+1}^{n}-2v_{i,j}^{n}+v_{i,j-1}^{n}}{\Delta y^2}\right)\end{split}and our pressure equation (copied from step 11):Consolidating the elements of the equation within the [] that are squared terms we can write:\begin{split}\frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2}+\frac{p_{i,j+1}^{n}-2p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2} = \\- \frac{\rho\Delta x^2 \Delta y^2}{2(\Delta x^2 + \Delta y^2} \\\qquad \times \left[ \frac{1}{\Delta t}\left( \frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}+\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right) -\left(\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}\right)^2 - 2\frac{u_{i,j+1}-u_{i,j-1}}{2\Delta y}\frac{v_{i+1,j}-v_{i-1,j}}{2\Delta x} - \left(\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y} \right)^2 \right]\end{split} The initial condition is u, v, p = 0 everywhere and the boundary conditions are:u,v,p are periodic on x = 0,2u,v = 0 at y = 0,2$\frac{\partial{p}}{\partial{y}} = 0$ at y = 0,2$F = 1$ everywhere. Let's begin by importing our usual libraries.import numpy from matplotlib import pyplot, cm from mpl_toolkits.mplot3d import Axes3D %matplotlib inlineIn step 11, we isolated a portion of our transposed equation to make it easier to parse and we're going to do the same again here. One thing to note that is new from Step 11 is the periodic boundary conditions throughout the grid. Because of this, we need to explicitly calculate the values at the leading and trailing edge of our u vector.Python Note: If you put """ text""", as below, directly after a function definition in Python, Python's help(function_name) will return that information about the function. Replace the function_name within parenthesis with the name of the actual function you want help on!# I had a hard time keeping all the terms in [] straight, so I cheated # and copied the first part of the code from Github. def build_up_b(rho, dt, dx, dy, u, v): """ In ordeer to make it easier to parse our transposed equation we are creating this function to calculate the portion of the pressure equation shown above in square brackets [] when the boundary conditions of u,v,p are periodic on x. The variables are: b = The array to hold the value computed from the above pressuer equation for the part inside the square brackets []. rho = The fluid density is given by the Greek symbol "rho". dt = the time derivative(increment) of the equation u = The u-momentum array v = The v-momentum array dx = the derivitive(increment) of x in the equaation dy = the derivitive(increment) of x in the equaation """ # create an array b whose shape is similar to u. b = numpy.zeros_like(u) b[1:-1, 1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) + (v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))) - ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 - 2 * ((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) * (v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx))- ((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2) # Now add in the boundary conditions # Periodic Boundary Condition Pressure @ x=2 b[1:-1, -1] = (rho * (1 / dt * ((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx) + (v[2:, -1] - v[0:-2, -1]) / (2 * dy))) - ((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx))**2 - 2 * ((u[2:, -1] - u[0:-2, -1]) / (2 * dy) * (v[1:-1, 0] - v[1:-1, -2]) / (2 * dx))- ((v[2:, -1] - v[0:-2, -1]) / (2 * dy))**2) # Periodic Boundary Condition Pressure @ x = 0 b[1:-1, 0] = (rho * (1 / dt * ((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx) + (v[2:, 0] - v[0:-2, 0]) / (2 * dy))) - ((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx))**2 - 2 * ((u[2:, 0] - u[0:-2, 0]) / (2 * dy) * (v[1:-1, 1] - v[1:-1, -1]) / (2 * dx))- ((v[2:, 0] - v[0:-2, 0]) / (2 * dy))**2) return bWe also define a Pressure Poisson iterative function, again like we did in Step 11. Once again, Note that we have included the periodic boundary condition in the leading and trailing edges. We also have to specify the boundary conditions at the top and bottom of our grid.def pressure_poisson_periodic(p, dx, dy): """ We also define a Pressure Poisson iterative function to caluclate the pressure array in the above pressure equation when the boundary conditions are periodic in the leading and trailing edges such that u,v,p are periodic on x. p = pressure array at every point in the x,y matrix. dx = the derivitive(increment) of x in the equaation dy = the derivitive(increment) of x in the equaation """ pn = numpy.empty_like(p) # Again, we make an empty array like p. for q in range(nit): pn = p.copy() p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 + (pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) / (2 * (dx**2 + dy**2)) - (dx**2 * dy**2 / (2 * (dx**2 + dy**2))) * b[1:-1, 1:-1]) # Periodic Boundary Condition @ x = 2 p[1:-1, -1] = (((pn[1:-1, 0] + pn[1:-1, -2]) * dy**2 + (pn[2:, -1] + pn[0:-2, -1]) * dx**2) / (2 * (dx**2 + dy**2)) - (dx**2 * dy**2 / (2 * (dx**2 + dy**2))) * b[1:-1, -1]) # Periodic Boundary Condition @ x = 0 p[1:-1, 0] = (((pn[1:-1, 1] + pn[1:-1, -1]) * dy**2 + (pn[2:, 0] + pn[0:-2, 0]) * dx**2) / (2 * (dx**2 + dy**2)) - (dx**2 * dy**2 / (2 * (dx**2 + dy**2))) * b[1:-1, 0]) # Now our wall boundary condition pressure p[-1, :] = p[-2, :] # dp/dx = 0 at y = 2 p[0, :] = p[1, :] # dp/dy = 0 at y = 0 return pNow we have our familiar list of variabless and initial conditions to declare before we start.# Parameters nx = 41 ny = 41 nt = 10 nit = 50 c=1 # Size of the dx and dy and dt increments dx = 2 / (nx-1) dy = 2 / (ny-1) dt = 0.01 # Build our base. x = numpy.linspace(0,2,nx) y = numpy.linspace(0,2,ny) X, Y = numpy.meshgrid(x, y) # Physical variables rho = 1 # The density of fluid or fluid density, denoted ρ (Greek: rho) nu = 0.1 # Kinematic Viscosity of the fluid, denoted v (Greek nu) F = 1 # A source term for the u-momentum equation # to mimic the effect of pressure-driven channel flow. # The initial condition is b,𝑢,𝑣,𝑝=0 everywhere u = numpy.zeros((ny, nx)) un = numpy.zeros((ny, nx)) v = numpy.zeros((ny, nx)) vn = numpy.zeros((ny, nx)) p = numpy.zeros((ny, nx)) pn = numpy.zeros((ny, nx)) b = numpy.zeros((ny, nx))For the meat of our computation, we are going to reach back to a trick we used in Step 9 for Laplace Equations with periodic boundary conditions. We are interested in what our grid looks like once we've reached a near-steady state. We can either specify a number of time steps nt and increment until we are satisfied with the results, or we can tell our code to run until the difference between two consecutive iterations is very small.We also have to manage 8 separate boundary conditions for each iteration. The code below writes each of them out explicitly. If you're interested in tackling that you should should probably read up on Python dictionaries.# Loop around until we reach a steady state where the variation is < var = 0.001 in our example. udiff = 1 stepcount = 0 while udiff> 0.001: un = u.copy() vn = v.copy() b = build_up_b(rho, dt, dx, dy, u, v) p = pressure_poisson_periodic(p, dx, dy) # New term F *dt added from step 11. u[1:-1, 1:-1] = (un[1:-1, 1:-1]- un[1:-1, 1:-1] * dt / dx * (un[1:-1, 1:-1] - un[1:-1, 0:-2]) - vn[1:-1, 1:-1] * dt / dy * (un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dt / (2 * rho * dx) * (p[1:-1, 2:] - p[1:-1, 0:-2]) + nu * (dt / dx**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) + dt / dy**2 * (un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))+ F * dt ) v[1:-1,1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx * (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) - vn[1:-1, 1:-1] * dt / dy * (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dt / (2 * rho * dy) * (p[2:, 1:-1] - p[0:-2, 1:-1]) + nu * (dt / dx**2 * (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) + dt / dy**2 * (vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]))) # Periodic BC u @ x = 2 u[1:-1, -1] = (un[1:-1, -1] - un[1:-1, -1] * dt / dx * (un[1:-1, -1] - un[1:-1, -2]) - vn[1:-1, -1] * dt / dy * (un[1:-1, -1] - un[0:-2, -1]) - dt / (2 * rho * dx) * (p[1:-1, 0] - p[1:-1, -2]) + nu * (dt / dx**2 * (un[1:-1, 0] - 2 * un[1:-1, -1] + un[1:-1, -2]) + dt / dy**2 * (un[2:, -1] - 2 * un[1:-1, -1] + un[0:-2, -1])) + F * dt ) # Periodic BC u @ x = 0 u[1:-1, 0] = (un[1:-1, 0] - un[1:-1, 0] * dt / dx * (un[1:-1, 0] - un[1:-1, -1]) - vn[1:-1, 0] * dt / dy * (un[1:-1, 0] - un[0:-2, 0]) - dt / (2 * rho * dx) * (p[1:-1, 1] - p[1:-1, -1]) + nu * (dt / dx**2 * (un[1:-1, 1] - 2 * un[1:-1, 0] + un[1:-1, -1]) + dt / dy**2 * (un[2:, 0] - 2 * un[1:-1, 0] + un[0:-2, 0])) + F * dt ) # Periodic BC v @ x = 2 v[1:-1,-1] = (vn[1:-1, -1] - un[1:-1, -1] * dt / dx * (vn[1:-1, -1] - vn[1:-1, -2]) - vn[1:-1, -1] * dt / dy * (vn[1:-1, -1] - vn[0:-2, -1]) - dt / (2 * rho * dy) * (p[2:, -1] - p[0:-2, -1]) + nu * (dt / dx**2 * (vn[1:-1, 0] - 2 * vn[1:-1, -1] + vn[1:-1, -2]) + dt / dy**2 * (vn[2:, -1] - 2 * vn[1:-1, -1] + vn[0:-2, -1]))) # Periodic BC v @ x = 0 v[1:-1,0] = (vn[1:-1, 0] - un[1:-1, 0] * dt / dx * (vn[1:-1, 0] - vn[1:-1, -1]) - vn[1:-1, 0] * dt / dy * (vn[1:-1, 0] - vn[0:-2, 0]) - dt / (2 * rho * dy) * (p[2:, 0] - p[0:-2, 0]) + nu * (dt / dx**2 * (vn[1:-1, 1] - 2 * vn[1:-1, 0] + vn[1:-1, -1]) + dt / dy**2 * (vn[2:, 0] - 2 * vn[1:-1, 0] + vn[0:-2, 0]))) # Wall boundary condition u, v = 0 @ y = 0, 2 u[0, :] = 0 u[-1, :] = 0 # set velocity on cavity lid equal to 1 v[0, :] = 0 v[-1, :] = 0 udiff = (numpy.sum(u) - numpy.sum(un)) / numpy.sum(u) stepcount += 1We've also included a variable 'stepcount' to see how many iterations our loop went through before our stop condition was met.print(stepcount) print("b = \n", b) print("p = \n",p) print("u = \n", u) print("v = \n",v) fig = pyplot.figure(figsize = (11,7), dpi = 100) pyplot.quiver(X[::3,::3], Y[::3,::3], u[::3,::3], v[::3,::3] ); fig = pyplot.figure(figsize = (11,7), dpi = 100) pyplot.quiver(X,Y,u,v);Exploratory Data Analysis (EDA) Breast Cancer Dataset Haberman's Survival Data Set Survival of patients who had undergone surgery for breast cancer The dataset contains cases from a study that was conducted between 1958 and 1970 at the University of Chicago's Billings Hospital on the survival of patients who had undergone surgery for breast cancer. Haberman Dataset: [https://www.kaggle.com/gilsousa/habermans-survival-data-sethaberman.csv]About this file 1. Title: Haberman's Survival Data
 2. Sources: (a) Donor: () (b) Date: March 4, 1999
 3. Past Usage: a. . (1976). Generalized Residuals for Log-Linear Models, Proceedings of the 9th International Biometrics Conference, Boston, pp. 104-122. b. ., ., and . (1984), Graphical Models for Assessing Logistic Regression Models (with discussion), Journal of the American Statistical Association 79: 61-83. c. . (1993). Logistic Regression Trees, PhD thesis, Department of Statistics, University of Wisconsin, Madison, WI. 4. Relevant Information: The dataset contains cases from a study that was conducted between 1958 and 1970 at the University of Chicago's Billings Hospital on the survival of patients who had undergone surgery for breast cancer.
 5. Number of Instances: 306
 6. Number of Attributes: 4 (including the class attribute)
 7. Attribute Information: a. Age of patient at time of operation (numerical) b. Patient's year of operation (year - 1900, numerical) c. Number of positive axillary nodes detected (numerical) d. Survival status (class attribute) 1 = the patient survived 5 years or longer 2 = the patient died within 5 year 8. Missing Attribute Values: None#importing required libraries and loading the Haberman data set import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np '''downlaod haberman.csv from https://www.kaggle.com/gilsousa/habermans-survival-data-set''' #Load haberman.csv into a pandas dataFrame. haberman = pd.read_csv("./Datasets/haberman.csv", names=['Age', 'Op_Year', 'Axil_nodes', 'Survival_Status']) # checking the number of data-points and features print (haberman.shape) # Features of the dataset print (haberman.columns) print (haberman.head()) haberman.tail() # Unique class labels print(list(haberman['Survival_Status'].unique())) # Data points for each class haberman["Survival_Status"].value_counts()The dataset is imbalanced where 26% could not survived after Surgeryhaberman.describe() # Mapping labels 1 to 'survived' and 2 to 'not_Survived' haberman["Survival_Status"]=haberman["Survival_Status"].map({1:'survived',2:'not_Survived'}) haberman["Survival_Status"].value_counts(normalize= True) haberman.info() RangeIndex: 306 entries, 0 to 305 Data columns (total 4 columns): Age 306 non-null int64 Op_Year 306 non-null int64 Axil_nodes 306 non-null int64 Survival_Status 306 non-null object dtypes: int64(3), object(1) memory usage: 9.6+ KBThere are no Null values in this datasethaberman.describe()The maximum Axillary nodes found were 52 where average Axil_nodes were 4.02, that means there is an outlier in Axil_nodes which has to be dealt during Modeling for classification 2-D Scatter Plot#2-D scatter plot: haberman.plot(kind='scatter', x='Age', y='Op_Year') ; plt.show() # 2-D Scatter plot with color-coding for each type/class. # Using seaborn(sns) sns.set_style("whitegrid"); sns.FacetGrid(haberman, hue="Survival_Status", size=4) \ .map(plt.scatter, "Age", "Op_Year") \ .add_legend(); plt.show(); sns.set_style("whitegrid"); sns.FacetGrid(haberman, hue="Survival_Status", size=4) \ .map(plt.scatter, "Age", "Axil_nodes") \ .add_legend(); plt.show();**Observation(s):** Seperating survived from non-survived is much harder as they have considerable overlap. Pair-plot# pairwise scatter plot: Pair-Plot plt.close(); sns.set_style("whitegrid"); sns.pairplot(haberman, hue="Survival_Status", size=4); plt.show() sns.set_style('whitegrid') sns.pairplot(haberman, hue='Survival_Status', vars=['Age', 'Op_Year', 'Axil_nodes'], size=3) plt.show()**Observations** Almost not possible to linearly seperable as both class labels are overlapped Plotting 1D - scatter plot# 1-D scatter plot using just one feature(Axil_nodes) import numpy as np survived = haberman.loc[haberman["Survival_Status"] == 'survived']; not_survived = haberman.loc[haberman["Survival_Status"] == 'not_Survived']; plt.plot(survived["Axil_nodes"], np.zeros_like(survived["Axil_nodes"]), 'o') plt.plot(not_survived["Axil_nodes"], np.zeros_like(not_survived["Axil_nodes"]), 'o') plt.title("1-D Scatter Plot of Axil Nodes") plt.xlabel('Axil_nodes') plt.show()Highly overlapping points observed from 1D scatter plot, cannot infer things PDF Probability Density Function is a function of a continuous random variable, whose integral across an interval gives the probability that the value of the variable lies within the same interval. Probality Density Function (PDF) is the probabilty that the variable takes a value x Kernel Density Estimate (KDE) is the way to estimate the PDF. The area under the KDE curve is 1 Here the height of the bar denotes the percentage of data points under the corresponding groupsns.FacetGrid(haberman, hue="Survival_Status", size=5) \ .map(sns.distplot, "Age",hist=True,kde=True) \ .add_legend(); plt.show();1. As per the above pdf, both the survived and non-survived people probabilities are overlapped so its difficult to seperate both with if conditions2. Both type of people are having high density at age around 50-55 yearssns.FacetGrid(haberman, hue="Survival_Status", size=5) \ .map(sns.distplot, "Axil_nodes") \ .add_legend(); plt.show();***observations*** probability of survival is more for Axil_nodes<10sns.FacetGrid(haberman, hue="Survival_Status", size=5) \ .map(sns.distplot, "Op_Year",hist=True,kde=True) \ .add_legend(); plt.show();1. Both the graphs are highly overlapped2. Number of deaths were more during 63-65 yearshaberman.head() # Cumulative Distribution Function (CDF) # The cumulative distribution function (cdf) is the probability that the variable takes a value # less than or equal to x. #Plotting CDF of Axil_nodes counts, bin_edges = np.histogram(survived['Axil_nodes'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges); cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf,label='PDF'); plt.plot(bin_edges[1:], cdf, label='CDF') plt.legend(loc='right') plt.show();[0.83555556 0.08 0.02222222 0.02666667 0.01777778 0.00444444 0.00888889 0. 0. 0.00444444] [ 0. 4.6 9.2 13.8 18.4 23. 27.6 32.2 36.8 41.4 46. ]Observations: People with Axial_nodes less than 20 have 98% chance of surviving.#Plotting CDF of Age counts, bin_edges = np.histogram(survived['Age'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) #compute CDF cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf,label='PDF') plt.plot(bin_edges[1:], cdf, label='CDF') plt.legend(loc='right') plt.show();[0.05333333 0.10666667 0.12444444 0.09333333 0.16444444 0.16444444 0.09333333 0.11111111 0.06222222 0.02666667] [30. 34.7 39.4 44.1 48.8 53.5 58.2 62.9 67.6 72.3 77. ]Observations: People who Aged less than 65 have around 90% of survival chances# Survived counts, bin_edges = np.histogram(survived['Axil_nodes'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf, label='Survived') # not Survived counts, bin_edges = np.histogram(not_survived['Axil_nodes'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf, label='non-Survived') plt.legend() plt.show();[0.83555556 0.08 0.02222222 0.02666667 0.01777778 0.00444444 0.00888889 0. 0. 0.00444444] [ 0. 4.6 9.2 13.8 18.4 23. 27.6 32.2 36.8 41.4 46. ] [0.56790123 0.14814815 0.13580247 0.04938272 0.07407407 0. 0.01234568 0. 0. 0.01234568] [ 0. 5.2 10.4 15.6 20.8 26. 31.2 36.4 41.6 46.8 52. ]People with Axial nodes greater than 35 has almost no chance of surviving#Mean, Variance, Std-deviation, print("Means:") print(np.mean(survived["Age"])) #Mean with an outlier. print(np.mean(np.append(survived["Age"],50))); print(np.mean(not_survived["Age"])) print("\nStd-dev:"); print(np.std(survived["Age"])) print(np.std(not_survived["Age"])) #Median, Quantiles, Percentiles, IQR and MAD. print("\nMedians:") print(np.median(survived["Axil_nodes"])) #Median with an outlier print(np.median(np.append(survived["Axil_nodes"],50))); print(np.median(not_survived["Axil_nodes"])) print("\nQuantiles:") print(np.percentile(survived["Axil_nodes"],np.arange(0, 100, 25))) print(np.percentile(not_survived["Axil_nodes"],np.arange(0, 100, 25))) print("\n90th Percentiles:") print(np.percentile(survived["Axil_nodes"],90)) print(np.percentile(not_survived["Axil_nodes"],90)) from statsmodels import robust print ("\nMedian Absolute Deviation") print(robust.mad(survived["Axil_nodes"])) print(robust.mad(not_survived["Axil_nodes"])) survived.shape survived.head() not_survived.head() survived.iloc[112] print("\nMedians:") print(np.median(survived["Age"])) #Median with an outlier print(np.median(np.append(survived["Age"],50))); print(np.median(not_survived["Age"])) print("\nQuantiles:") print(np.percentile(survived["Age"],np.arange(0, 100, 25))) print(np.percentile(not_survived["Age"],np.arange(0, 100, 25))) print("\n90th Percentiles:") print(np.percentile(survived["Age"],90)) print(np.percentile(not_survived["Age"],90)) from statsmodels import robust print ("\nMedian Absolute Deviation") print(robust.mad(survived["Age"])) print(robust.mad(not_survived["Age"])) print("\nMedians:") print(np.median(survived["Op_Year"])) #Median with an outlier print(np.median(np.append(survived["Op_Year"],50))); print(np.median(not_survived["Op_Year"])) print("\nQuantiles:") print(np.percentile(survived["Op_Year"],np.arange(0, 100, 25))) print(np.percentile(not_survived["Op_Year"],np.arange(0, 100, 25))) print("\n90th Percentiles:") print(np.percentile(survived["Op_Year"],90)) print(np.percentile(not_survived["Op_Year"],90)) from statsmodels import robust print ("\nMedian Absolute Deviation") print(robust.mad(survived["Op_Year"])) print(robust.mad(not_survived["Op_Year"]))Medians: 63.0 63.0 63.0 Quantiles: [58. 60. 63. 66.] [58. 59. 63. 65.] 90th Percentiles: 67.0 67.0 Median Absolute Deviation 4.447806655516806 4.447806655516806Box plot and Whiskers A Box Plot is the visual representation of the statistical five number summary of a given data set. Box-plot with whiskers: another method of visualizing the 1-D scatter plot more intuitivey. The Concept of median, percentile, quantile. How to draw whiskers: [no standard way] Could use min and max or use other complex statistical techniques. IQR like idea.NOTE: IN the plot below, a technique call inter-quartile range is used in plotting the whiskers. Whiskers in the plot below donot correposnd to the min and max values. Above image taken from chartio.com#Box-plot can be visualized as a PDF on the side-ways. sns.boxplot(x='Survival_Status',y='Axil_nodes', data=haberman) plt.show()Boxplot of 'Survived' had 75th percentile value at Axil_nodes is 2 and the 25th and 50th percentiles are overlappedBoxplot of 'not Survived' had 25th percentile value at Axil_nodes is 1, 50th percentile value at axil_nodes is 3 and 75th percentile value at axil_nodes is 11sns.boxplot(x='Survival_Status',y='Age', data=haberman) plt.show() sns.boxplot(x='Survival_Status',y='Op_Year', data=haberman) plt.show()Visualising through Violin plots# A violin plot combines the benefits of the previous two plots #and simplifies them # Denser regions of the data are fatter, and sparser ones thinner #in a violin plot sns.violinplot(x="Survival_Status", y="Axil_nodes", data=haberman, size=8) plt.show()Violin plot of survivors at 50th percentile have 0 positive nodes, at 75th percentie survivors have less than 3 positive axilary nodesViolin plot of non-survivors at 25th percentile have 1 positive axilary node, at 50th percentile have positive axilary nodes below 4, and at 75th percentile have positive nodes below 11sns.violinplot(x="Survival_Status", y="Age", data=haberman, size=8) plt.show() sns.violinplot(x="Survival_Status", y="Op_Year", data=haberman, size=8) plt.show()In the above plot, as per 75th percentiles of both the plots we can conclude that the percentage of survivals are greater for the operations done after 1965# mapping back 'survived' to 1 and 'non_survived' to 2 haberman["Survival_Status"]=haberman["Survival_Status"].map({'survived':1,'not_Survived':2})Contour PlotsA contour plot is a graphical technique for representing a 3-dimensional surface by plotting constant z slices, called contours, on a 2-dimensional format. That is, given a value for z, lines are drawn for connecting the (x,y) coordinates where that z value occurs.To draw the contour line for a certain z value, we connect all the (x, y) pairs, which produce the value z#2D Density plot, contors-plot sns.jointplot(x="Age", y="Axil_nodes", data=survived, kind="kde"); plt.show();The above Contour density plot tells that people with Axial_nodes<5 and of age<60 has more chances of Survival#2D Density plot, contors-plot sns.jointplot(x="Op_Year", y="Axil_nodes", data=survived, kind="kde"); plt.show();1. The above Contour density plot tells that people who had with Axial_nodes<5 has more chances of Survival and that most of the surgeries of survivours were noted ro be done between years 1960-19662. Plot is more dense for Years 60-66 and for axial_nodes<3#2D Density plot, contors-plot sns.jointplot(x="Age", y="Op_Year", data=survived, kind="kde"); plt.show();Most of the survivors are of age 48-55 yrs and most of the successful operations were done during 1960-63 yrs#2D Density plot, contors-plot sns.jointplot(x="Age", y="Axil_nodes", data=not_survived, kind="kde"); plt.show();Most of the non survivors are of age 45-55 yrs with axil_nodes atleast 6#2D Density plot, contors-plot sns.jointplot(x="Op_Year", y="Axil_nodes", data=not_survived, kind="kde"); plt.show();Two denser parts are observed for non survivors between operation years 1958-60 and between 1964-66 with axial_nodes 3-7.#2D Density plot, contors-plot sns.jointplot(x="Age", y="Op_Year", data=not_survived, kind="kde"); plt.show();The first step in any data analysis is acquiring and munging the dataAn example data set can be found HEREDownload the file output.txt and transform it into a format like below where the event column should be 0 if there's only one entry for an id, and 1 if there are two entries:id,time_to_convert,age,male,event#Solution to part one: def convert_to_minutes(dt): day_diff = dt / np.timedelta64(1, 'D') if day_diff == 0: return 23.0 else: return day_diff base_df = pd.read_csv("E:/output.txt") base_df["time_to_convert"] = pd.to_datetime(base_df['datetime']) base_df = base_df.drop('datetime', 1) time_deltas = base_df.groupby(by = "id").max() - base_df.groupby(by = "id").min() df = time_deltas["time_to_convert"].apply(convert_to_minutes).to_frame() grouped_base = base_df.groupby(by = "id").max() df["age"] = grouped_base["age"] df["male"] = grouped_base["male"] df["search"] = grouped_base["search"] df["brand"] = grouped_base["brand"] df["event"] = df["time_to_convert"] == 23.0 df["event"] = df["event"].apply(lambda x: 0 if x else 1) df["time_to_convert"].median() ###Parametric Bayes #Shout out to ## Example fully worked model using toy data ## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html alpha = pm.Uniform("alpha", 0,20) beta = pm.Uniform("beta", 0,20) obs = pm.Weibull('obs', alpha, beta, value = df["time_to_convert"], observed = True ) obs.random @pm.potential def censorfactor(obs=obs): if np.any(obs>23 ): return -100000 else: return 0 mcmc = pm.MCMC([alpha, beta, obs, censorfactor ] ) mcmc.sample(5000, burn = 0, thin = 1) pm.Matplot.plot(mcmc)Plotting beta Plotting alphaProblems: 1 - Work out the mean observed time to convert 2 - Try to fit your data from section 1 3 - Use the results to plot the distribution of the median 4 - Try adjusting the number of samples, the burn parameter and the amount of thinning to correct get good answers 5 - Try adjusting the prior and see how it affects the estimate 6 - Try to fit a different distribution to the data 7 - Compare answers Bonus - test the hypothesis that the true median is greater than a certain amount For question 2, note that the median of a Weibull is:$$β(log 2)^{1/α}$$#Solution to question 4: def weibull_median(alpha, beta): return beta * ((log(2)) ** ( 1 / alpha)) plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) #Solution to question 4: ### Increasing the burn parameter allows us to discard results before convergence ### Thinning the results removes autocorrelation mcmc = pm.MCMC([alpha, beta, obs, censorfactor ] ) mcmc.sample(50000, burn = 30000, thin = 20) pm.Matplot.plot(mcmc) #Solution to Q5 ## Adjusting the priors impacts the overall result ## If we give a looser, less informative prior then we end up with a broader, shorter distribution ## If we give much more informative priors, then we get a tighter, taller distribution ## Note the narrowing of the prior #alpha = pm.Uniform("alpha", 2.5, 4.5) #beta = pm.Uniform("beta", 14, 15) ####Uncomment this to see the result of looser priors alpha = pm.Uniform("alpha", 0, 30) beta = pm.Uniform("beta", 0, 30) obs = pm.Weibull( 'obs', alpha, beta, value = df["time_to_convert"], observed = True ) mcmc = pm.MCMC([alpha, beta, obs, censorfactor ] ) mcmc.sample(10000, burn = 100, thin = 20) pm.Matplot.plot(mcmc) #plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) #Solution to Q6 ## To fit a new distribution to the data, we need to change the obs variable to a new distribution. ## This requires creating new hyper parameters, ## The normal distribution has two parameters, the mean and the stdev (we use 1/stdev in pymc) import pymc as mc import numpy.ma as ma #this begins the model alpha = pm.Uniform("mean", 0,15) tau = pm.Uniform("tau", 0, 3) obs = pm.Normal( 'obs', alpha, tau, value = df["time_to_convert"], observed = True ) mcmc = pm.MCMC([alpha, tau, obs, censorfactor ] ) mcmc.sample(50000, burn = 30000, thin = 20) #pm.Matplot.plot(mcmc) plt.hist(mcmc.trace("mean")[:]) ## Solution to bonus ## Super easy to do in the Bayesian framework, all we need to do is look at what % of samples ## meet our criteria testing_value = 10 number_of_greater_samples = sum([x >= testing_value for x in mcmc.trace("mean")[:]]) 100 * (number_of_greater_samples / len(mcmc.trace("mean")[:])) #Cox modelIf we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards. More information here.#Fitting solution cf = lifelines.CoxPHFitter() cf.fit(df, 'time_to_convert', event_col = 'event') cf.summaryC:\Users\j.coltman\AppData\Local\Continuum\Anaconda3\lib\site-packages\lifelines\fitters\coxph_fitter.py:285: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....) df.sort(duration_col, inplace=True)Once we've fit the data, we need to do something useful with it. Try to do the following things: 1 - Plot the baseline survival function 2 - Predict the functions for a particular set of regressors 3 - Plot the survival function for two different set of regressors 4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time#Solution to 1 fig, axis = plt.subplots(nrows=1, ncols=1, sharex=True, sharey = True) cf.baseline_survival_.plot(ax = axis, title = "Baseline Survival") # Solution to prediction regressors = np.array([[45,0,0,0]]) survival = cf.predict_survival_function(regressors) survival #Solution to plotting multiple regressors fig, axis = plt.subplots(nrows=1, ncols=1, sharex=True) regressor1 = np.array([[18,0,0,1]]) regressor2 = np.array([[56,0,0,1]]) survival_1 = cf.predict_survival_function(regressor1) survival_2 = cf.predict_survival_function(regressor2) plt.plot(survival_1,label = "32 year old male") plt.plot(survival_2,label = "46 year old female") plt.legend(loc = "lower left") #Difference in survival odds = survival_2 / survival_1 plt.plot(odds, c = "red")Model selectionDifficult to do with classic tools (here)Problem: 1 - Calculate the BMA coefficient values 2 - Compare these results to past the lifelines results 3 - Try running with different priors##Solution to 1 bmaCox = CoxPHFitter() bmaCox.fit(df, "time_to_convert", event_col= "event", priors= [0.5]*7) print(coefficient) difference = coefficient - cf.summary["coef"] print(difference) print("Most very close, some significantly out (because no leaps and jumps)")Load learned modelWe deploy the learned model which have about 5,000 sample image's knowldegeimport cv2 import matplotlib.pyplot as plt import _pickle as cPickle import numpy as np f = open("/home/jtlee/workspace/cancer_segmentation/ckpt/ckpt2_0.pkl", "rb") rf = cPickle.load(f)Show example that doesn't contains in Training setFirst example is one of CT image, which can be input of modelAnother one is label for the first. the white part means cancer# Read image target_name = "000083.dcm.png" target_file = "/home/jtlee/workspace/cancer_segmentation/data/CT_PNG100/NSCLC-Radiomics/LUNG1-268/" + target_name image = cv2.imread(target_file, cv2.IMREAD_GRAYSCALE) image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) gt_name = "000083.dcm.gt.png" gt_file = "/home/jtlee/workspace/cancer_segmentation/data/CT_PNG100/NSCLC-Radiomics/LUNG1-268/" + gt_name gt_image = cv2.imread(gt_file, cv2.IMREAD_GRAYSCALE) gt_image = cv2.cvtColor(gt_image, cv2.COLOR_GRAY2RGB) plt.figure() plt.axis("off") plt.imshow(image) plt.show() plt.figure() plt.axis("off") plt.imshow(gt_image) plt.show()Show result of above imageHow looks like compare to above label? Is it reasonable?def context_feature(image, size = 3): if size % 2 == 0: raise NotImplementedError(" [!] Currently even size of context feature is not supporting!") padding = int(size / 2) shape = image.shape pad_image = np.zeros((shape[0] + padding*2, shape[1] + padding*2)) pad_image[padding:-padding, padding:-padding] = image features = [] for i in range(padding, shape[0]+padding): for j in range(padding, shape[1]+padding): feature = pad_image[i-padding:i+padding+1, j-padding:j+padding+1].reshape((-1)) np.append(feature, [i, j]) # add location info features.append(feature) features = np.concatenate(features, axis = 0) features = features.reshape((-1, size**2)) return features # cancer detection image = cv2.imread(target_file, cv2.IMREAD_GRAYSCALE) img = cv2.resize(image, (100, 100), interpolation=cv2.INTER_CUBIC) flat = context_feature(img, size = 3) pred = rf.predict(flat) pred = pred.reshape((100, 100)) plt.figure() plt.axis('off') plt.imshow(pred) plt.show()Exploration of higher order gamma model* Additional Taylor coefficents can better match observed shape of gamma model for MD data* But the coefficients are not well constrained and yield unphysical behavior at very high pressures beyond the data range * gamma tends toward negative infinity at high compressionimport matplotlib.pyplot as plt %matplotlib notebook import numpy as np import pandas as pd from scipy import interpolate import pickle from scipy import optimize import xmeos from xmeos import models from xmeos import datamod analysis_file = 'data/analysis.pkl' with open(analysis_file, 'rb') as f: analysis = pickle.load(f) gamma_avg_md = analysis['gamma_avg_md'] V0 = 12.97 a1=6*.134 a2=-12*.1+36*.1**2-18*(-2.113) a3=0 a4=0 a5=0 # params0 = [a1,a2,a3] #params0 = [a1,a2] def calc_gamma(V, params, V0=V0): a = np.zeros(4) a[0:len(params)] = params fstr = .5*((V0/V)**(2/3)-1) gamma = 1/6*(2*fstr+1)*( a[0]+a[1]*fstr+1/2*a[2]*fstr**2+1/6*a[3]*fstr**3)/( 1+a[0]*fstr+.5*a[1]*fstr**2+1/6*a[2]*fstr**3+1/24*a[3]*fstr**4) return gamma def calc_adiabatic_temp(V, params, V0=V0, T0=1): a = np.zeros(4) a[0:len(params)] = params fstr = .5*((V0/V)**(2/3)-1) Tad = T0*np.sqrt( 1+a[0]*fstr+.5*a[1]*fstr**2+1/6*a[2]*fstr**3+1/24*a[3]*fstr**4) return Tad def resid(params, gamma_dat=gamma_avg_md['gamma'], V=gamma_avg_md['V'], V0=V0): gamma_mod = calc_gamma(V, params, V0=V0) resid = gamma_mod-gamma_dat return resid params0_simp = [a1,a2] params0 = [a1,a2,a3,a4] # params0 = [a1,a2,a3] fit_tup = optimize.leastsq(resid, params0) paramsf = fit_tup[0] # paramsf[2]=0 fit_tup_simp = optimize.leastsq(resid, params0_simp) paramsf_simp = fit_tup_simp[0] Vmod = V0*np.linspace(.32,1.2,1001) gamma_f = calc_gamma(Vmod, paramsf) gamma_f_simp = calc_gamma(Vmod, paramsf_simp) gamma_init = calc_gamma(Vmod,params0) print(paramsf) # logTad = np.log(calc_adiabatic_temp(Vmod, paramsf)) # gamma_grad = -np.diff(logTad)/np.diff(np.log(Vmod)) plt.figure() plt.plot(Vmod,gamma_f_simp,'r-', gamma_avg_md['V'], gamma_avg_md['gamma'],'ko', Vmod, gamma_f, 'k--')[ 1.66856894e+00 2.64790806e+01 2.58061081e+02 -2.05182754e+03]Copyright 2018 The TensorFlow Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE.Modelleri kaydedelim ve tekrar yükleyelim Run in Google Colab View source on GitHub Note: Bu dökümanlar TensorFlow gönüllü kullanıcıları tarafından çevirilmiştir.Topluluk tarafından sağlananan çeviriler gönüllülerin ellerinden geldiğincegüncellendiği için [Resmi İngilizce dökümanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynı olmasını garantileyemeyiz. Eğer bu tercümeleri iyileştirmekiçin önerileriniz var ise lütfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gönderin. Gönüllü olarak çevirilere katkıda bulunmak için[](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletişime geçebilirsiniz. Eğitim sırasında veya sonrasında modelin ilerleyişi kaydedilebilir. Bunun anlamı, modelin kaldığı yerden ilerlemeye devam edebilir olması, ayrıca uzun eğitim sürelerinin önüne geçilebilir olmasıdır. Modellerin kaydedilmesi, aynı zamanda modellerin paylaşılabilmesi ve diğerlerinin yaptığımız çalışmaları tekrardan oluşturabilmeleri anlamına gelmektedir. Çoğu uygulayıcı araştırma modellerini ve tekniklerini yayınladıklarında, aşağıdaki bilgileri paylaşırlar: * modeli oluşturan kodu* modele ait eğitilmiş ağırlık değerlerini veya parametreleriniBu verilerin paylaşılması, diğerlerinin modelimizin nasıl çalıştığını anlamasına ve yeni veriler ile modeli denemelerine yardımcı olur. Dikkat: Güvenilmeyen kodlar ile ilgili dikkatli olunuz-Tensorflow modelleri kodlardan oluşmaktadır. Detaylar için [TensorFlow'un Güvenli Kullanımı](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) linkine göz atınız. SeçeneklerKullandığınız API'ye bağlı olarak, Tensorflow modellerini kaydetmenin farklı yolları vardır. Bu eğitim dökümanı, Tensorflowda yapay zeka modellerinin oluşturulması ve eğitilmesinde kullanılan [tf.keras](https://www.tensorflow.org/r1/guide/keras) 'ı kullanmaktadır. Farklı yöntemler için TensorFlow [Kaydet ve Geri Yükle](https://www.tensorflow.org/r1/guide/saved_model) eğitim dökümanına veya [eager'da kaydedelim](https://www.tensorflow.org/r1/guide/eagerobject-based_saving) linkine göz atailirsiniz. Kurulum Kuralım ve İçeri Alalım Tensorflow ve bağlı kütüphanelerini kuralım ve içeri alalım:!pip install h5py pyyamlÖrnek veri setini alalımAğırlıkların nasıl kaydedildiğini gösterebilmek için, modelimizi [MNIST dataset](http://yann.lecun.com/exdb/mnist/) verisi ile eğiteceğiz. Süreci hızlandırmak için, sadece ilk 1000 örneği kullanacağız:from __future__ import absolute_import, division, print_function, unicode_literals import os try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf from tensorflow import keras tf.__version__ (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() train_labels = train_labels[:1000] test_labels = test_labels[:1000] train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0 test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0Modeli tanımlayalım Ağırlıkların nasıl kaydedileceğini ve yükleneceğini gösterebilmek için basit bir model oluşturalım.# Returns a short sequential model def create_model(): model = tf.keras.models.Sequential([ keras.layers.Dense(512, activation=tf.keras.activations.relu, input_shape=(784,)), keras.layers.Dropout(0.2), keras.layers.Dense(10, activation=tf.keras.activations.softmax) ]) model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) return model # Create a basic model instance model = create_model() model.summary()Eğitim sırasında kontrol noktalarını (checkpoints) kaydedelim Ana kullanım şekli, eğitim sırasında ve sonunda kontrol noktalarının otomatik olarak kaydedilmesidir. Bu şekilde eğitilmiş modeli tekrar eğitmeye gerek kalmadan kullanabiliriz veya eğitim süreci yarıda kalmışsa kaldığı yerden eğitime devam edebiliriz. `tf.keras.callbacks.ModelCheckpoint` bu işlemi yapan callback fonksiyonudur. Bu fonksiyon, kontrol noktalarını yapılandırmak için birkaç parametre değeri alır. Kontrol noktası callback fonksiyonu kullanımıModeli eğitelim ve `ModelCheckpoint` callback fonksiyonunu modele aktaralım:checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create checkpoint callback cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) model = create_model() model.fit(train_images, train_labels, epochs = 10, validation_data = (test_images,test_labels), callbacks = [cp_callback]) # pass callback to training # This may generate warnings related to saving the state of the optimizer. # These warnings (and similar warnings throughout this notebook) # are in place to discourage outdated usage, and can be ignored.Bu kod, her bir ephoc sonunda güncellenen, bir grup TensorFlow kontrol noktası dosyası oluşturur:!ls {checkpoint_dir}Eğitilmemiş yeni bir model oluşturalım. Sadece ağırlık değerleri ile bir modeli geri yüklemek istediğimizde, elimizde orjinal model ile aynı yapıyı sahip bir modelimiz olmalıdır. Aynı model yapısına sahip olduğumuz için, farklı zamanlarda oluşmuş model örnekleri arasında ağırlık değerlerini paylaşabiliriz. Şimdi yeni bir eğitilmemiş model oluşturalım ve bu modeli test veri seti ile değerlendirelim. Eğitilmemiş bir model, şans yüzdesi kadar (~10% doğruluk) doğruluğa sahip olacaktır:model = create_model() loss, acc = model.evaluate(test_images, test_labels, verbose=2) print("Untrained model, accuracy: {:5.2f}%".format(100*acc))Sonrasında ağırlık değerlerini kaydettiğimiz kontrol noktasından model geri yükleyelim ve modeli tekrardan değerlendirelim:model.load_weights(checkpoint_path) loss,acc = model.evaluate(test_images, test_labels, verbose=2) print("Restored model, accuracy: {:5.2f}%".format(100*acc))Kontrol noktası callback seçenekleri:Callback fonksiyonu, kontrol noktalarının isimlendirilmesi ve frekanslarının ayarlanması için çeşitli seçenekler sunar. Yeni bir modeli eğitelim ve her 5 epoch'ta bir farklı isimler ile kontrol noktalarını isimlendirelim:# include the epoch in the file name. (uses `str.format`) checkpoint_path = "training_2/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, verbose=1, save_weights_only=True, # Save weights, every 5-epochs. period=5) model = create_model() model.save_weights(checkpoint_path.format(epoch=0)) model.fit(train_images, train_labels, epochs = 50, callbacks = [cp_callback], validation_data = (test_images,test_labels), verbose=0)Şimdi, oluşan kontrol noktalarına bakalım ve en güncel olanını seçelim:! ls {checkpoint_dir} latest = tf.train.latest_checkpoint(checkpoint_dir) latestNot: Tensorflow varsayılan formatı, sadece en güncel 5 kontrol noktasını kaydeder.Test için, modeli resetleyelim ve en güncel kontrol noktasını yükleyelim:model = create_model() model.load_weights(latest) loss, acc = model.evaluate(test_images, test_labels, verbose=2) print("Restored model, accuracy: {:5.2f}%".format(100*acc))Bu dosyalar nedir? Yukardaki kod, ağırlık değerlerini bir grup [checkpoint](https://www.tensorflow.org/r1/guide/saved_modelsave_and_restore_variables)- dosyaya binary formatta kaydeder. Kontrol noktası aşağıdakileri kapsar: * Modele ait ağırlık değerlerini içeren, bir veya daha fazla dosya parçası (shards). * Hangi ağırlık değerinin hangi dosya parçasında olduğunu gösteren bir index dosyası. Eğer modelinizi tek bir bilgisayarda eğitiyorsanız, son takısı `.data-00000-of-00001` olan tek bir dosya parçası oluşacaktır. Ağırlıkların manuel kaydedilmesiYukarıda, ağırlıkların modele nasıl yüklendiğini gördünüz.`Model.save_weights` methodunu kullanarak, ağırlıkların manuel olarak kaydedilmeside aynı şeklide kolaydır.# Save the weights model.save_weights('./checkpoints/my_checkpoint') # Restore the weights model = create_model() model.load_weights('./checkpoints/my_checkpoint') loss,acc = model.evaluate(test_images, test_labels, verbose=2) print("Restored model, accuracy: {:5.2f}%".format(100*acc))Tüm modelin kaydedilmesiAğırlık değerleri, model yapısı ve hatta optimize edici parametrelerini (yapılandırmaya bağlı olarak) kapsayan tek bir dosya ile tüm model kaydedilebilir. Bu, orjinal koda ulaşmaya gerek kalmadan, modele ait kontrol noktasını kaydetmeyi ve sonrasında eğitime kalındığı yerden tekrardan başlanmasını sağlar. Tam-fonksiyonel modelin kaydedilmesi çok faydalıdır, bu modeli TensorFlow.js ye ([HDF5](https://js.tensorflow.org/r1/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/r1/tutorials/import-saved-model.html)) yükleyip sonrasında eğitebilir ve web browserda çalıştırabiliriz. Veya modeli TensorFlow Lite formatına dönüştürerek ([HDF5](https://www.tensorflow.org/lite/convert/python_apiexporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_apiexporting_a_savedmodel_)) mobil cihazlarda çalıştırabiliriz. Modeli HDF5 dosyası olarak kaydedelimKeras, [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) standartını kullanarak, temel bir kayıt formatı sunar. Bizim kullanım amacımıza göre, kayıtlı model tek bir binary blob olarak değerlendirilebilir.model = create_model() model.fit(train_images, train_labels, epochs=5) # Save entire model to a HDF5 file model.save('my_model.h5')Şimdi, bu dosya ile modelimizi tekrardan oluşturalım:# Recreate the exact same model, including weights and optimizer. new_model = keras.models.load_model('my_model.h5') new_model.summary()Doğruluğunu kontrol edelim:loss, acc = new_model.evaluate(test_images, test_labels, verbose=2) print("Restored model, accuracy: {:5.2f}%".format(100*acc))Bu yöntem modelle ilgili herşeyi kaydeder:* Ağırlık değerlerini* Model yapısını* Optimizer parametreleriniKeras, modelleri kaydederken model yapılarını inceler ve TensorFlow optimizer'ları (`tf.train`'dan) kaydetmesi halihazırda mümkün değildir. Bunu kullanırken, modeli yükledikten sonra tekrar derlememiz gerekecektir ve optimizer'ın son durum bilgisini kaybetmiş oluruz. Modelin `saved_model` olarak kaydedilmesi Dikkat: `tf.keras` modellerinin bu yöntemle kaydedilmesi deneysel olup, gelecek versiyonlarda değişiklik gösterebilir. Yeni ir model oluşturalım:model = create_model() model.fit(train_images, train_labels, epochs=5)`saved_model` oluşturalım:saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./saved_models")Kaydedilmiş modeller zaman bilgisini içeren klasörler içerisine oluşturulur:!ls saved_models/Kaydedilmiş modeli kullanarak yeni bir keras modelini yükleyelim.new_model = tf.contrib.saved_model.load_keras_model(saved_model_path) new_model.summary()Yeniden yüklenmiş modeli çalıştıralım.# The model has to be compiled before evaluating. # This step is not required if the saved model is only being deployed. new_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) # Evaluate the restored model. loss, acc = new_model.evaluate(test_images, test_labels, verbose=2) print("Restored model, accuracy: {:5.2f}%".format(100*acc))cbow模型(Continuous Bag-Of-Words Model)预测https://github.com/oreilly-japan/deep-learning-from-scratch-2/blob/master/ch03/cbow_predict.py# coding: utf-8 import sys sys.path.append('..') import numpy as np from common.layers import MatMul # 示例上下文数据 c0 = np.array([[1, 0, 0, 0, 0, 0, 0]]) c1 = np.array([[0, 0, 1, 0, 0, 0, 0]]) # 权重初始化 W_in = np.random.randn(7, 3) W_out = np.random.randn(3, 7) # 生成层 in_layer0 = MatMul(W_in) in_layer1 = MatMul(W_in) out_layer = MatMul(W_out) # 前向传播 h0 = in_layer0.forward(c0) h1 = in_layer1.forward(c1) h = 0.5 * (h0 + h1) s = out_layer.forward(h) print(s)[[ 0.28830492 0.58316349 -0.29270063 -0.05973828 0.6979968 -0.3119117 0.39784004]]simple cbow模型实现https://github.com/oreilly-japan/deep-learning-from-scratch-2/blob/master/ch03/simple_cbow.py# coding: utf-8 import sys sys.path.append('..') import numpy as np from common.layers import MatMul, SoftmaxWithLoss class SimpleCBOW: def __init__(self, vocab_size, hidden_size): V, H = vocab_size, hidden_size # 权重初始化 W_in = 0.01 * np.random.randn(V, H).astype('f') W_out = 0.01 * np.random.randn(H, V).astype('f') # 生成层 self.in_layer0 = MatMul(W_in) self.in_layer1 = MatMul(W_in) self.out_layer = MatMul(W_out) self.loss_layer = SoftmaxWithLoss() # 将所有的权重和梯度汇总到列表 layers = [self.in_layer0, self.in_layer1, self.out_layer] self.params, self.grads = [], [] for layer in layers: self.params += layer.params self.grads += layer.grads # 设置单词的离散表示成员变量 self.word_vecs = W_in def forward(self, contexts, target): h0 = self.in_layer0.forward(contexts[:, 0]) h1 = self.in_layer1.forward(contexts[:, 1]) h = (h0 + h1) * 0.5 score = self.out_layer.forward(h) loss = self.loss_layer.forward(score, target) return loss def backward(self, dout=1): ds = self.loss_layer.backward(dout) da = self.out_layer.backward(ds) da *= 0.5 self.in_layer1.backward(da) self.in_layer0.backward(da) return None训练上述模型https://github.com/oreilly-japan/deep-learning-from-scratch-2/blob/master/ch03/train.py# coding: utf-8 import sys sys.path.append('..') # 为导入父目录中的文件而进行的设置 from common.trainer import Trainer from common.optimizer import Adam #from simple_cbow import SimpleCBOW from common.util import preprocess, create_contexts_target, convert_one_hot window_size = 1 hidden_size = 5 batch_size = 3 max_epoch = 1000 text = 'You say goodbye and I say hello.' corpus, word_to_id, id_to_word = preprocess(text) vocab_size = len(word_to_id) contexts, target = create_contexts_target(corpus, window_size) target = convert_one_hot(target, vocab_size) contexts = convert_one_hot(contexts, vocab_size) model = SimpleCBOW(vocab_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) trainer.fit(contexts, target, max_epoch, batch_size) trainer.plot() word_vecs = model.word_vecs for word_id, word in id_to_word.items(): print(word, word_vecs[word_id])| epoch 1 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 2 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 3 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 4 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 5 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 6 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 7 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 8 | iter 1 / 2 | time 0[s] | loss 1.95 | epoch 9 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 10 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 11 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 12 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 13 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 14 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 15 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 16 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 17 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 18 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 19 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 20 | iter 1 / 2 | time 0[s] | loss 1.94 | epoch 21 | iter 1 / 2 | ti[...]simple skip gram实现https://github.com/oreilly-japan/deep-learning-from-scratch-2/blob/master/ch03/simple_skip_gram.py# coding: utf-8 import sys sys.path.append('..') import numpy as np from common.layers import MatMul, SoftmaxWithLoss class SimpleSkipGram: def __init__(self, vocab_size, hidden_size): V, H = vocab_size, hidden_size # 权重初始化 W_in = 0.01 * np.random.randn(V, H).astype('f') W_out = 0.01 * np.random.randn(H, V).astype('f') # 生成层 self.in_layer = MatMul(W_in) self.out_layer = MatMul(W_out) self.loss_layer1 = SoftmaxWithLoss() self.loss_layer2 = SoftmaxWithLoss() # 将所有的权重和梯度汇总到列表 layers = [self.in_layer, self.out_layer] self.params, self.grads = [], [] for layer in layers: self.params += layer.params self.grads += layer.grads # 设置单词的离散表示成员变量 self.word_vecs = W_in def forward(self, contexts, target): h = self.in_layer.forward(target) s = self.out_layer.forward(h) l1 = self.loss_layer1.forward(s, contexts[:, 0]) l2 = self.loss_layer2.forward(s, contexts[:, 1]) loss = l1 + l2 return loss def backward(self, dout=1): dl1 = self.loss_layer1.backward(dout) dl2 = self.loss_layer2.backward(dout) ds = dl1 + dl2 dh = self.out_layer.backward(ds) self.in_layer.backward(dh) return None训练上述模型原作者未提供SimpleSkipGram的训练代码,这里参考前面的训练代码,编写了训练代码# coding: utf-8 import sys sys.path.append('..') # 为导入父目录中的文件而进行的设置 from common.trainer import Trainer from common.optimizer import Adam from common.util import preprocess, create_contexts_target, convert_one_hot window_size = 1 hidden_size = 5 batch_size = 3 max_epoch = 1000 text = 'You say goodbye and I say hello.' corpus, word_to_id, id_to_word = preprocess(text) vocab_size = len(word_to_id) contexts, target = create_contexts_target(corpus, window_size) target = convert_one_hot(target, vocab_size) contexts = convert_one_hot(contexts, vocab_size) model = SimpleSkipGram(vocab_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) trainer.fit(contexts, target, max_epoch, batch_size) trainer.plot() word_vecs = model.word_vecs for word_id, word in id_to_word.items(): print(word, word_vecs[word_id])| epoch 1 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 2 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 3 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 4 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 5 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 6 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 7 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 8 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 9 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 10 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 11 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 12 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 13 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 14 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 15 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 16 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 17 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 18 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 19 | iter 1 / 2 | time 0[s] | loss 3.89 | epoch 20 | iter 1 / 2 | time 0[s] | loss 3.88 | epoch 21 | iter 1 / 2 | ti[...]barrier_reach_avoidSynthesize a probabilistic barrier certificate to get a lower bound on the probability that a target set is reached. MathematicsConsider the polynomial dynamics$$x(t+1) = f(x(t), t, w), \quad w \sim \mathcal N(0, \Sigma)$$ and the sets\begin{align} X = \{x : g(x) \geq 0 \} \\X_0 = \{x : g_0(x) \geq 0\} \\X_T = \{x : g_1(x) \geq 0\} \\X_U = \{x : g_U(x) \geq 0\} \end{align}**Remark**: for numeric reasons it is preferable that all these sets are compact so that restrictions are only imposed on bounded sets.We want to find a lower bound on the property that we avoid $X_U$ and reach $X_T$ given that we start in $X_0$ and remain in $X$ during the whole transition.$$p_{01} = \mathbb{P} \left[ x(T) \in X_1 \; \land \; x(t) \not \in X_U, t=1, \ldots, T-1 \; \mid \; x(0) \in X_0, x(t) \in X \right]$$Consider a certificate $B(x, t)$ satisfying\begin{align} B(x, t) \geq 0 \quad & x \in X \\ B(x, 0) \leq \gamma \quad & x \in X_0 \\ B(x, t) \geq 1 \quad & x \in X_U, \; t = 1, \ldots, T-1 \\ B(x, T) \geq 1 \quad & x \not \in X_T, x \in X\\ B(x(t), t) \leq \mathbb{E} \left[ B(x(t+1), t+1)) \; \mid \; x(t) \right] \leq B(x(t), t) + c \quad & x \in X\end{align}It obviously follows that with $B(t) = B(x(t), t)$ the last constraint implies $\mathbb{E} \left[ B(T) \right] < B(0) + cT$. Since $B$ is a positive submartingale we can use Doob's inequality$$\mathbb{P} \left[ \sup_{0 \leq t \leq T} B(t) \leq 1 \right] = 1 - \mathbb{P} \left[ \sup_{0 \leq t \leq T} B(t) \geq 1 \right] \geq 1 - \mathbb{E} [B(T)] \geq 1 - B(0).$$If $A \impliedby B$ and $C \implies D$, then $\mathbb{P}(A \mid C) \geq \mathbb{P}(B \mid D)$. From the constraints we have that\begin{align} x(T) \in X_T \; \land \; x(t) \not \in X_U \impliedby sup_{0 \leq t \leq T} B(t) \leq 1, \\ x(0) \in X_0 \implies B(0) \leq \gamma.\end{align}Therefore,$$p_{01} = \mathbb{P} \left[ x(T) \in X_T \; \land \; x(t) \not \in X_U, t=1, \ldots, T-1 \; \mid \; x(0) \in X_0, x(t) \in X \right] \geq \mathbb{P}\left[ B(T) \leq 1 \; \mid \; B(0) \leq \gamma \right] \geq 1 - (cT + \gamma).$$Thus we search for a barrier certificate that satisfies the conditions above while minimizing $cT + \gamma$.from sympy.abc import x, y, w import posipoly as pp import matplotlib.pyplot as plt import numpy as np # parameters n = 2 # dimension T = 4 # time horizon sigma = 0.1 # noise standard deviation tot_deg = 8 # overall degree of ppp # polynomials defining sets g = pp.Polynomial.from_sympy(1, [x,y]) g0 = pp.Polynomial.from_sympy(0.25**2 - x**2 - y**2, [x,y]) gT = pp.Polynomial.from_sympy(0.5**2 - (x-1)**2 - y**2, [x,y]) gu = pp.Polynomial.from_sympy(0.2**2 - (x-0.4)**2 - (y-0.5)**2, [x,y]) # dynamics: # x(t+1) = x(t) + 0.25 + w # y(t+1) = y(t) ft_x = pp.Polynomial.from_sympy(x + 0.25 + w, [x,y,w]) ft_y = pp.Polynomial.from_sympy(y + w, [x,y,w]) # Plot the sets xx = np.linspace(-0.5, 1.5, 50) yy = np.linspace(-1, 1, 50) XX, YY = np.meshgrid(xx, yy) g0_val = np.vectorize(g0)(XX, YY) gu_val = np.vectorize(gu)(XX, YY) gT_val = np.vectorize(gT)(XX, YY) plt.figure() plt.contour(XX, YY, g0_val, levels=[0], colors='black', linestyles='dotted') plt.contour(XX, YY, gu_val, levels=[0], colors='red', linestyles='dotted') plt.contour(XX, YY, gT_val, levels=[0], colors='black', linestyles='dotted') plt.show()We synhesize a separate barrier for each discrete time step, i.e. $B(x,t) = B_t(x)$. Using the S procedure we can rewrite the problem above as\begin{align} \mathbb{E} [B_{t+1}(f(x, t, w)) ] - B_t(x) - \sigma_{1}^t(x) g(x) \geq 0, & \qquad t=0, \ldots, T-1 \\ c + B_t(x) - \mathbb{E} [B_{t+1}(f(x, t, w)) ] - \sigma_{2}^t(x) g(x) \geq 0, & \qquad t=0, \ldots, T-1 \\ \gamma - B_0(x) - \sigma_{3}(x) g_0(x) \geq 0, \\ B_t(x) - 1 - \sigma_{4}^t(x) g_U(x) \geq 0, & \qquad t=1, \ldots, T-1 \\ B_T(x) - 1 + \sigma_{5a}(x) g_T(x) - \sigma_{5b}(x)g(x) \geq 0, & \\ B_t(x) - \sigma_6(x) g(x) \geq 0, & \qquad t=0, \ldots, T \\ \sigma_1^t(x), \sigma_2^t(x) \geq 0, & \qquad t=0, \ldots, T-1 \\ \sigma_4^t(x) \geq 0, & \qquad t = 1, \ldots, T-1 \\ \sigma_6^t(x) \geq 0, & \qquad t=0, \ldots, T \\ \sigma_3(x), \sigma_{5a}(x), \sigma_{5b}(x) \geq 0, & \qquad t=0, ..., T\end{align}deg_B = tot_deg # need to subtract if deg f larger than 1 deg_g0 = tot_deg-g0.d deg_gT = tot_deg-gT.d deg_gu = tot_deg-gu.d deg_g = tot_deg-g.d prog = pp.PPP() prog.add_var('c', n, 0, 'coef') # scalar variable = polynomial of degree 0 prog.add_var('gamma', n, 0, 'coef') # scalar variable for t in range(T+1): prog.add_var('B{}'.format(t), n, deg_B, 'coef') for t in range(T): prog.add_var('s1{}'.format(t), n, deg_g, 'pp') prog.add_var('s2{}'.format(t), n, deg_g, 'pp') prog.add_var('s3', n, deg_g0, 'pp') for t in range(1, T): prog.add_var('s4{}'.format(t), n, deg_gu, 'pp') prog.add_var('s5a', n, deg_gT, 'pp') prog.add_var('s5b', n, deg_g, 'pp') for t in range(T+1): prog.add_var('s6{}'.format(t), n, deg_g, 'pp')Prepare the transformations# Identity for scalars to tot_deg T1 = pp.PTrans.eye(n0=n, d0=0, n1=n, d1=tot_deg) # Identity for scalars to scalar T10 = pp.PTrans.eye(n0=n, d0=0, n1=n, d1=0) # B(x,y) -> E_w[ B(fx(x,y,w), fy(x,y,w)) ] TBp = pp.PTrans.gaussian_expectation(n0=3, d0=deg_B, i=2, sigma=sigma) \ * pp.PTrans.composition(n0=n, d0=deg_B, g_list=[ft_x, ft_y]) # Identity for B TB = pp.PTrans.eye(n, deg_B) # Multiplication with g, g0, g1, gu Tg = pp.PTrans.mul_pol(n, deg_g, g) Tg0 = pp.PTrans.mul_pol(n, deg_g0, g0) TgT = pp.PTrans.mul_pol(n, deg_gT, gT) Tgu = pp.PTrans.mul_pol(n, deg_gu, gu)These are the constraints in algebra form Tbp.B(t+1) - TB.Bt - Tg.s1t pp t=0, ..., T-1 (1) T1.c + TB.Bt - Tbp.B(t+1) - Tg.s2t pp t=0, ..., T-1 (2) T1.gamma - TB.B0 - Tg0.s3 pp (3) TB.Bt - Tgu.s4t - 1 pp t=1, ..., T-1 (4) TB.BT + TgT.s5a - Tg.s5b - 1 pp (5) TB.Bt - Tg.s6 pp t=0, ..., T (6)# add (1) for t in range(T): Aop = {'B{}'.format(t+1): TBp, 'B{}'.format(t): -TB, 's1{}'.format(t): -Tg} prog.add_constraint(Aop, pp.Polynomial.zero(n), 'pp') # add (2) for t in range(T): Aop = {'c': T1, 'B{}'.format(t): TB, 'B{}'.format(t+1): -TBp, 's2{}'.format(t): -Tg} prog.add_constraint(Aop, pp.Polynomial.zero(n), 'pp') # add (3) prog.add_constraint({'gamma': T1, 'B0': -TB, 's3': -Tg0}, pp.Polynomial.zero(n), 'pp') # add (4) for t in range(1, T): prog.add_constraint({'B{}'.format(t): TB, 's4{}'.format(t): -Tgu,}, pp.Polynomial.one(n), 'pp') # add (5) prog.add_constraint({'B{}'.format(T): TB, 's5a': TgT, 's5b': -Tg}, pp.Polynomial.one(n), 'pp') # add (6) for t in range(T+1): Aop = {'B{}'.format(t): TB, 's6{}'.format(t): -Tg} prog.add_constraint(Aop, pp.Polynomial.zero(n), 'pp') # add inequality: gamma <= 1 (to exclude large solutions) prog.add_constraint({'gamma': T10}, pp.Polynomial.one(n), 'iq') # set c=0 # prog.add_row({'c': T10}, pp.Polynomial.zero(n), 'eq') # add objective prog.set_objective({'c': pp.PTrans.eval0(n, 0)* T, 'gamma': pp.PTrans.eval0(n, 0)})Time to solve it!sol, status = prog.solve('sdd') c = prog.get_poly('c')(0,0) gamma = prog.get_poly('gamma')(0,0) lb = 1-(gamma+c*T) print('got c={:.2f} and gamma={:.2f}'.format(c, gamma)) print('lower bound is {:.2f}'.format(lb))optimizing... solved in 2.19s solsta.optimal got c=0.00 and gamma=0.80 lower bound is 0.20From the certificates we can now plot a lower bound on the probability that the target set is reached. From the proof above it follows that$$\mathbb{P}(x(T) \in X_1 \mid x(t) = x) \geq 1 - (c(T-t) + B_t(x)).$$%matplotlib notebook from matplotlib.pyplot import cm xx = np.linspace(-0.5, 1.5, 50) yy = np.linspace(-0.8, 0.8, 50) XX, YY = np.meshgrid(xx, yy) g0_val = np.vectorize(g0)(XX, YY) gu_val = np.vectorize(gu)(XX, YY) gT_val = np.vectorize(gT)(XX, YY) levels = sorted([0, 0.4, 0.8, 0.95, 0.99, lb]) for t in [0, 1, 2, 3, 4]: Bt = prog.get_poly('B{}'.format(t)) proba_min = 1 - (np.vectorize(Bt)(XX, YY) + c * (T-t)) plt.figure() CS = plt.contour(XX, YY, proba_min, levels=levels, cmap=cm.get_cmap('plasma'), vmin=0, vmax=1) plt.clabel(CS, inline=1, fontsize=10) plt.contour(XX, YY, g0_val, levels=[0], colors='black', linestyles='dotted') plt.contour(XX, YY, gu_val, levels=[0], colors='red', linestyles='dotted') plt.contour(XX, YY, gT_val, levels=[0], colors='black', linestyles='dotted') plt.title('t={}'.format(t)) plt.show()Time Series Analysisimport warnings import itertools import numpy as np import matplotlib.pyplot as plt warnings.filterwarnings("ignore") plt.style.use('fivethirtyeight') import pandas as pd import statsmodels.api as sm import matplotlib matplotlib.rcParams['axes.labelsize']=14 matplotlib.rcParams['xtick.labelsize']=12 matplotlib.rcParams['ytick.labelsize']=12 matplotlib.rcParams['text.color']='k' df=pd.read_excel('/home/anilla/DataScience/TimeSeries/Sample - Superstore.xls',parse_dates=True) df.head() df.columns.tolist()Time series analysis and forecasting of different categories i)Forecasting Furniture salesfurniture=df.loc[df['Category']== 'Furniture'] furniture.head() ## checking time range of the sale of the furniture start=furniture['Order Date'].min() end=furniture["Order Date"].max() start endData Prepocessingfurniture.columns.isna() cols=furniture.columns.tolist() cols furniture=furniture.set_index('Order Date') furniture.index y=furniture['Sales'].resample('MS').mean() y['2017':]Visualization of Furniture Sales Time Series Datay.plot(figsize=(15,8)) plt.show()one can also visualize data using time-series dedcomposition where the time series is decomposed to trend,seasonality and noisefrom pylab import rcParams rcParams['figure.figsize'] =18,8 decomposition=sm.tsa.seasonal_decompose(y,model='additive') fig=decomposition.plot() plt.show()Time series forecasting with ARIMA(Autoregressive Intergrated Moving Average)p=d=q=range(0,2) #p=number of lag observation #d=number of time raw observations are differencing #q=order of moving average pdq=list(itertools.product(p,d,q)) seasonal_pdq=[(x[0],x[1],12) for x in list(itertools.product(p,d,q))] w=list(itertools.product(p,q)) w list(p),list(q),list(d) pdq print('Eaxamples of parameter combinations for saesona ARIMA...') print('SARIMAX:{} x {}'.format(pdq[1],seasonal_pdq[1])) print('SARIMAX:{} x {}'.format(pdq[1],seasonal_pdq[2])) print('SARIMAX:{} x {}'.format(pdq[2],seasonal_pdq[3])) print('SARIMAX:{} x {}'.format(pdq[2],seasonal_pdq[4]))Eaxamples of parameter combinations for saesona ARIMA... SARIMAX:(0, 0, 1) x (0, 0, 12) SARIMAX:(0, 0, 1) x (0, 1, 12) SARIMAX:(0, 1, 0) x (0, 1, 12) SARIMAX:(0, 1, 0) x (1, 0, 12)Supplement Figure 25Figure S25. Heat map of the number of shared UMR regions across all pairwise comparisons of NAM lines. Boxed areas represent group by group comparisons Import required packagesfrom string import ascii_letters import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 150Set themesns.set_theme(style="white")Load the file and se order for NAM namesfile = "matrix-b73-ref.csv" b73Ref = pd.read_csv(file, index_col=0).reindex(["B97", "Ky21", "M162W", "Ms71", "Oh43", "Oh7B", "M37W", "Mo18W", "Tx303", "HP301", "P39", "Il14H", "CML52", "CML69", "CML103", "CML228", "CML247", "CML277", "CML322", "CML333", "Ki3", "Ki11", "NC350", "NC358", "Tzi8"]) b73Ref = b73Ref[["B97", "Ky21", "M162W", "Ms71", "Oh43", "Oh7B", "M37W", "Mo18W", "Tx303", "HP301", "P39", "Il14H", "CML52", "CML69", "CML103", "CML228", "CML247", "CML277", "CML322", "CML333", "Ki3", "Ki11", "NC350", "NC358", "Tzi8"]]Check to see if everyting is okayb73Ref.head(5)Plotting the heatmap for the UMR data: this data was using the B73 as reference (individual NAMs were mapped to B73 and then UMRs were determined)mask = np.triu(np.ones_like(b73Ref, dtype=bool)) f, ax = plt.subplots(figsize=(14, 14)) cmap = sns.diverging_palette(370, 120, n=80, as_cmap=True) sns.heatmap(b73Ref, mask=mask, cmap=cmap, robust=True, square=True, linewidths=.5, cbar_kws={"shrink": .5}) plt.ylabel('') ax.axvline(x=6, color ='blue', lw = 1.5, alpha = 0.75, ymax = 0.76) ax.axvline(x=9, color ='blue', lw = 1.5, alpha = 0.75, ymax = 0.64) ax.axvline(x=10, color ='blue', lw = 1.5, alpha = 0.75, ymax = 0.6) ax.axvline(x=12, color ='blue', lw = 1.5, alpha = 0.75, ymax = 0.52) ax.axhline(y=6, color ='black', lw = 1.5, alpha = 0.75, xmax = 0.24) ax.axhline(y=9, color ='black', lw = 1.5, alpha = 0.75, xmax = 0.36) ax.axhline(y=10, color ='black', lw = 1.5, alpha = 0.75, xmax = 0.4) ax.axhline(y=12, color ='black', lw = 1.5, alpha = 0.75, xmax = 0.48) mycol = ["#4169E1", "#4169E1", "#4169E1", "#4169E1", "#4169E1", "#4169E1", "#787878", "#787878", "#787878", "#DA70D6", "#FF4500", "#FF4500", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32", "#32CD32"] for tick, color in zip(ax.get_xticklabels(), mycol): tick.set_color(color) for tick, color in zip(ax.get_yticklabels(), mycol): tick.set_color(color)Experiments on toy ring data%env CUDA_VISIBLE_DEVICES=5 %load_ext autoreload %autoreload 2 import numpy as np import umap import matplotlib.pyplot as plt from scipy.sparse import coo_matrix from umap.my_utils import compute_low_dim_psims, get_ring import os import pickle fig_path = "../figures" data_path = "../data/toy_ring" data_seed = 3 umap_seed = 0 radius = 4 sig = 0.5 sig_str = "_".join(str(sig).split(".")) n = 1000 try: data = np.load(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_original_seed_{data_seed}.npy")) except FileNotFoundError: np.random.seed(data_seed) data = get_ring(n, radius, sig, noise="uniform") np.save(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_original_seed_{data_seed}.npy"), data) fig_data = plt.figure() plt.scatter(*data.T, s=1, alpha = 0.5) plt.gca().set_aspect("equal") plt.axis('off') fig_data.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_original_seed_{data_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Initialize at the input data try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_t_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init = pickle.load(file) embd_init = umapperns_init.embedding_ except FileNotFoundError: umapperns_init = umap.UMAP(init=data, random_state=umap_seed) embd_init = umapperns_init.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_init.T, s=1, alpha = 0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Default UMAP hyperparameters try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_default_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_default = pickle.load(file) embd_default = umapperns_default.embedding_ except FileNotFoundError: umapperns_default = umap.UMAP( random_state=0) embd_default = umapperns_default.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_default_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_default, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_default.T, s=1, alpha=0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_default_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Optimize for 10000 epochs try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_10000_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_10000 = pickle.load(file) embd_10000= umapperns_10000.embedding_ except FileNotFoundError: umapperns_10000 = umap.UMAP(random_state=umap_seed, n_epochs=10000, verbose=True) embd_10000 = umapperns_10000.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_10000_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_10000, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_10000.T, s=1, alpha=0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_10000_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Initialize at input data, optimize for 10000 epochs try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_10000_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init_10000 = pickle.load(file) embd_init_10000= umapperns_init_10000.embedding_ except FileNotFoundError: umapperns_init_10000 = umap.UMAP(random_state=umap_seed, init=data, n_epochs=10000, verbose=True) embd_init_10000 = umapperns_init_10000.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_10000_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init_10000, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_init_10000.T, s=1,alpha=0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_10000_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # get dense input similarities min_dist = 0.1 spread = 1.0 a, b= umap.umap_.find_ab_params(spread=spread, min_dist=min_dist) low_sim = compute_low_dim_psims(data, a, b) low_sim_sparse = coo_matrix(low_sim) # Initialize at input data, use dense input similarities try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init_graph = pickle.load(file) embd_init_graph = umapperns_init_graph.embedding_ except FileNotFoundError: umapperns_init_graph = umap.UMAP(init=data, graph=low_sim_sparse, min_dist=min_dist, spread=spread, random_state=umap_seed, verbose=True) embd_init_graph = umapperns_init_graph.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init_graph, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_init_graph.T, s=1, alpha=0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_graph_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Initialize at input data, optimize for 10000 epochs use dense input similarities try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_10000_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init_graph_10000 = pickle.load(file) embd_init_graph_10000 = umapperns_init_graph_10000.embedding_ except FileNotFoundError: umapperns_init_graph_10000 = umap.UMAP(init=data, graph=low_sim_sparse, min_dist=min_dist, spread=spread, n_epochs=10000, random_state=umap_seed, verbose=True) embd_init_graph_10000 = umapperns_init_graph_10000.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_10000_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init_graph_10000, file, pickle.HIGHEST_PROTOCOL) plt.scatter(*embd_init_graph_10000.T, s=1, alpha=0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_graph_10000_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300)Push tail of negative samples# Initialize at input data, optimize for 10000 epochs and push tail of negative sample try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_10000_losses_push_tail_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init_10000_losses_push_tail = pickle.load(file) embd_init_10000_losses_push_tail = umapperns_init_10000_losses_push_tail.embedding_ except FileNotFoundError: umapperns_init_10000_losses_push_tail = umap.UMAP(init=data, random_state=umap_seed, n_epochs=10000, verbose=True, push_tail=True) embd_init_10000_losses_push_tail = umapperns_init_10000_losses_push_tail.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_10000_losses_push_tail_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init_10000_losses_push_tail, file, pickle.HIGHEST_PROTOCOL) plt.figure() plt.scatter(*embd_init_10000_losses_push_tail.T, s=1, alpha = 0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_10000_push_tail_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300) # Initialize at input data, optimize for 10000 iterations, use dense input similarities and push tail of negative sample try: with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_10000_losses_push_tail_seed_{data_seed}_{umap_seed}.pkl"), "rb") as file: umapperns_init_graph_10000_losses_push_tail = pickle.load(file) embd_init_graph_10000_losses_push_tail = umapperns_init_graph_10000_losses_push_tail.embedding_ except FileNotFoundError: umapperns_init_graph_10000_losses_push_tail = umap.UMAP(init=data, graph=low_sim_sparse, random_state=umap_seed, n_epochs=10000, verbose=True, push_tail=True) embd_init_graph_10000_losses_push_tail = umapperns_init_graph_10000_losses_push_tail.fit_transform(data) with open(os.path.join(data_path, f"toy_ring_{n}_{radius}_{sig_str}_umapperns_init_graph_10000_losses_push_tail_seed_{data_seed}_{umap_seed}.pkl"), "wb") as file: pickle.dump(umapperns_init_graph_10000_losses_push_tail, file, pickle.HIGHEST_PROTOCOL) plt.figure() plt.scatter(*embd_init_graph_10000_losses_push_tail.T, s=1, alpha = 0.5) plt.gca().set_aspect("equal") plt.axis('off') plt.savefig(os.path.join(fig_path, f"toy_ring_{n}_{radius}_{sig_str}_init_graph_10000_push_tail_seed_{data_seed}_{umap_seed}.png"), bbox_inches = 'tight', pad_inches = 0,dpi=300)Learning from Voxelsimport numpy as np from PIL import Image import open3d as o3d import plotly.graph_objects as go import plotly.io as pio from plotly.subplots import make_subplots import plotly.figure_factory as ffThe point cloud# Load point cloud data pcd = o3d.io.read_point_cloud("../data/bun_zipper_res2.ply") points = np.asarray(pcd.points) x = points[:, 0] y = points[:, 1] z = points[:, 2] point_cloud = go.Scatter3d(x=x, y=y, z=z, mode='markers', hovertemplate="Point
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), marker=dict(size=5, color=z, colorscale='magma'))Point clouds as voxel grids# Helper functions def make_voxel(origin, index=0, voxel_size=1): ox, oy, oz = origin vs = voxel_size x = [ox, ox, ox+vs, ox+vs, ox, ox, ox+vs, ox+vs] y = [oy, oy+vs, oy+vs, oy, oy, oy+vs, oy+vs, oy] z = [oz, oz, oz, oz, oz+vs, oz+vs, oz+vs, oz+vs] i = (np.array([7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2]) + index * 8).tolist() j = (np.array([3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3]) + index * 8).tolist() k = (np.array([0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6]) + index * 8).tolist() return x, y, z, i, j, k def get_voxels(voxel_grid): x, y, z, i, j, k = [], [], [], [], [], [] for index, v in enumerate(voxel_grid.get_voxels()): voxel = make_voxel(origin=v.grid_index, index=index) x.extend(voxel[0]) y.extend(voxel[1]) z.extend(voxel[2]) i.extend(voxel[3]) j.extend(voxel[4]) k.extend(voxel[5]) return x, y, z, i, j, k # Make voxel grid from point cloud and transform to mesh for visualization voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.005) x, y, z, i, j, k = get_voxels(voxel_grid) # Use Open3D to remove duplicate and faulty vertices and triangles vertices = np.vstack([x, y, z]).T triangles = np.vstack([i, j, k]).T mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.triangles = o3d.utility.Vector3iVector(triangles) mesh.remove_duplicated_vertices() mesh.remove_unreferenced_vertices() mesh.remove_duplicated_triangles() mesh.remove_degenerate_triangles() # Convert to Numpy for visualization vertices = np.asarray(mesh.vertices) triangles = np.asarray(mesh.triangles) x = vertices[:, 0] y = vertices[:, 1] z = vertices[:, 2] i = triangles[:, 0] j = triangles[:, 1] k = triangles[:, 2] voxel_grid = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k, hovertemplate="Voxel
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), colorscale='magma', intensity=z, flatshading=True, showscale=False) # Make figure fig = make_subplots(rows=1, cols=2, column_widths=[0.5, 0.5], horizontal_spacing=0, vertical_spacing=0, specs=[[dict(type='Mesh3d'), dict(type='Mesh3d')]]) fig.add_trace(point_cloud, row=1, col=1) fig.add_trace(voxel_grid, row=1, col=2) # Viewpoint camera1 = dict(eye=dict(x=-1.8, y=1.3, z=1.9), up=dict(x=0, y=1, z=0), center=dict(x=0, y=0, z=0)) camera2 = dict(eye=dict(x=-1.8, y=1.3, z=1.9), up=dict(x=0, y=1, z=0), center=dict(x=0, y=0, z=0)) fig.update_layout(scene1=dict( xaxis=dict(visible=False), yaxis=dict(visible=False), zaxis=dict(visible=False), aspectmode='data', camera=camera1), scene2=dict( xaxis=dict(visible=False), yaxis=dict(visible=False), zaxis=dict(visible=False), aspectmode='data', camera=camera2), height=500, margin=dict(r=0, l=0, b=0, t=0, pad=0), scene_dragmode='orbit', showlegend=False) # Save figure pio.write_html(fig, file="../_includes/figures/pcd_vs_voxel.html", full_html=False, include_plotlyjs='cdn')The griddef make_grid(res, pos=[0, 0, 0], color='black', line_width=1): lines = [] for i in range(res + 1): for j in range(res + 1): lines.append(go.Scatter3d(x=np.array([0, res]) + pos[0], y=np.array([i, i]) + pos[1], z=np.array([j, j]) + pos[2], mode='lines', marker=dict(color=color), line=dict(width=line_width), showlegend=False)) lines.append(go.Scatter3d(x=np.array([i, i]) + pos[0], y=np.array([0, res]) + pos[1], z=np.array([j, j]) + pos[2], mode='lines', marker=dict(color=color), line=dict(width=line_width), showlegend=False)) lines.append(go.Scatter3d(x=np.array([i, i]) + pos[0], y=np.array([j, j]) + pos[1], z=np.array([0, res]) + pos[2], mode='lines', marker=dict(color=color), line=dict(width=line_width), showlegend=False)) return lines fig = go.Figure(make_grid(res=18)) # Make voxel grid from point cloud and transform to mesh for visualization voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.01) x, y, z, i, j, k = get_voxels(voxel_grid) # Use Open3D to remove duplicate and faulty vertices and triangles vertices = np.vstack([x, y, z]).T triangles = np.vstack([i, j, k]).T mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.triangles = o3d.utility.Vector3iVector(triangles) mesh.remove_duplicated_vertices() mesh.remove_unreferenced_vertices() mesh.remove_duplicated_triangles() mesh.remove_degenerate_triangles() # Convert to Numpy for visualization vertices = np.asarray(mesh.vertices) triangles = np.asarray(mesh.triangles) x = vertices[:, 0] + 1 y = vertices[:, 1] + 1 z = vertices[:, 2] + 1 i = triangles[:, 0] j = triangles[:, 1] k = triangles[:, 2] fig.add_trace(go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k, hovertemplate="Voxel
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), colorscale='magma', intensity=z, flatshading=True, showscale=False)) # Viewpoint camera = dict(eye=dict(x=0, y=0, z=2), up=dict(x=0, y=1, z=0), center=dict(x=0, y=0, z=0), projection=dict(type="orthographic")) fig.update_layout(scene=dict( xaxis=dict(visible=False), yaxis=dict(visible=False), zaxis=dict(visible=False), aspectmode='data'), height=700, margin=dict(r=0, l=0, b=0, t=0, pad=0), scene_camera=camera, scene_dragmode="orbit") # Save figure pio.write_html(fig, file="../_includes/figures/voxel_grid.html", full_html=False, include_plotlyjs='cdn')3D Convolutionsdef conv_3d(index, res, colors, weights): # This is not a correct convolution because edge cases are ignored! mask = [] indices = np.arange(-13, 14) for i in indices: # zero padding if i >= 0 and i < res**3: mask.append(colors[index + i]) else: mask.append(0) return sum(np.array(mask) * np.array(weights)) data = make_grid(res=7, line_width=2) + make_grid(res=3, pos=[4, 4, 4], color='red', line_width=3) # Input (voxel grid) grid = np.array(np.meshgrid(np.arange(5), np.arange(5), np.arange(5))).T.reshape(-1, 3) colors = [] for index, v in enumerate(grid): voxel = make_voxel(origin=[v[0], v[1], v[2]], index=0) c = np.random.randint(255) colors.append(c) data.append(go.Mesh3d(x=np.array(voxel[0]) + 1, y=np.array(voxel[1]) + 1, z=np.array(voxel[2]) + 1, i=voxel[3], j=voxel[4], k=voxel[5], hovertemplate="Voxel
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), color=f"rgb{c, 204, 150}", flatshading=True, showscale=False)) # Kernel (weights) grid = np.array(np.meshgrid(np.arange(3), np.arange(3), np.arange(3))).T.reshape(-1, 3) weights = [] for index, v in enumerate(grid): voxel = make_voxel(origin=[v[0], v[1], v[2]], index=0) w = np.random.randint(255) weights.append(w * 0.001) data.append(go.Mesh3d(x=np.array(voxel[0]) + 4, y=np.array(voxel[1]) + 4, z=np.array(voxel[2]) + 4, i=voxel[3], j=voxel[4], k=voxel[5], hovertemplate="Voxel
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), color=f"rgba{w, w, w, 0.5}", flatshading=True, showscale=False)) # Output (feature map) grid = np.array(np.meshgrid(np.arange(4), np.arange(4), np.arange(4))).T.reshape(-1, 3) for index, v in enumerate(grid): voxel = make_voxel(origin=[v[0], v[1], v[2]], index=0) o = conv_3d(index, 5, colors, weights) data.append(go.Mesh3d(x=np.array(voxel[0]) + 10, y=np.array(voxel[1]) + 1.5, z=np.array(voxel[2]) + 1.5, i=voxel[3], j=voxel[4], k=voxel[5], hovertemplate="Voxel
x: %{x}
y: %{y}
z: %{z}", hoverlabel=dict(bgcolor='white'), color=f"rgb{255, o, 146}", flatshading=True, showscale=False)) fig = go.Figure(data) # Viewpoint camera = dict(eye=dict(x=1.25, y=1.25, z=1.25), up=dict(x=0, y=1, z=0), center=dict(x=0, y=0, z=0)) fig.update_layout(scene=dict( xaxis=dict(visible=False), yaxis=dict(visible=False), zaxis=dict(visible=False), aspectmode='data'), height=700, margin=dict(r=0, l=0, b=0, t=0, pad=0), scene_camera=camera, scene_dragmode="orbit") # Save figure pio.write_html(fig, file="../_includes/figures/3d_conv.html", full_html=False, include_plotlyjs='cdn')Analysis# Imports import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns fraud = pd.read_csv('../data/data.csv')Inspect Datatypes = fraud.dtypes rsample = fraud.sample(frac=1) #print(rsample) #print(type(rsample)) sample = rsample.head() n = len(fraud) print(sample, "\n") print("Length: ", n, "\n") # 6,362,620 rows print("Types: ", types) # Description stats = fraud.describe().apply(lambda s: s.apply('{0:.2f}'.format)) print(stats)step amount oldbalanceOrg newbalanceOrig oldbalanceDest \ count 6362620.00 6362620.00 6362620.00 6362620.00 6362620.00 mean 243.40 179861.90 833883.10 855113.67 1100701.67 std 142.33 603858.23 2888242.67 2924048.50 3399180.11 min 1.00 0.00 0.00 0.00 0.00 25% 156.00 13389.57 0.00 0.00 0.00 50% 239.00 74871.94 14208.00 0.00 132705.66 75% 335.00 208721.48 107315.18 144258.41 943036.71 max 743.00 92445516.64 59585040.37 49585040.37 356015889.35 newbalanceDest isFraud isFlaggedFraud count 6362620.00 6362620.00 6362620.00 mean 1224996.40 0.00 0.00 std 3674128.94 0.04 0.00 min 0.00 0.00 0.00 25% 0.00 0.00 0.00[...]Plots* https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.plot.html* https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html# Filtered Data flagged_rows = fraud[fraud.isFlaggedFraud == 1] unflagged_rows=fraud[fraud.isFlaggedFraud == 0] fraud_rows = fraud[fraud.isFraud == 1] notfraud_rows = fraud[fraud.isFraud == 0] flagged_rows.head() unflagged_rows.head()Amount vs Steps* Seems to be a sharp cutoff in amount among all steps* huge spike of amount around ~300 steps# Amount vs Steps fig1, ax1 = plt.subplots(figsize=(10, 10)) # width, height ax1.plot(fraud.amount, fraud.step); ax1.set(xlabel='amount', ylabel='steps', title='Amount vs Steps') fig1.savefig('AmountStep.png') # Range #bin_range = range(min(unflagged_rows), max(unflagged_rows) + 50, 50) # Amout hist for nonflagged fig2, ax2 = plt.subplots(figsize=(10,10)) ax2.hist(unflagged_rows.amount, bins=50, range=[0, 15000000], density=True, color="Purple") ax2.set(xlabel='unflagged amount', ylabel='Freq', title='Amount Dist of Unflagged') fig2.savefig('AmountDistUnflagged.png') # Amount hist for Flagged # rsample.plot(x='amount', y='isFlaggedFraud', kind="hist") fig3, ax3 = plt.subplots(figsize=(10, 10)) # width, height ax3.hist(flagged_rows.amount, bins=10, range=[0, 15000000], density=True, cumulative=False, color="Magenta") ax3.set(xlabel='amount', ylabel='Freq', title='Amount vs Freq') #ax.set_xlim([0, 20000000]) fig3.savefig('AmountDistFlagged.png') # Amount hist for Fraudulent transactions fig4, ax4 = plt.subplots(figsize=(10,10)) ax4.hist(fraud_rows.amount, bins=10, range=[0, 15000000], density=True, color="Red") ax4.set(xlabel='amount', ylabel='Freq', title='Amount dist for Fraudulent transactions') fig4.savefig('AmountDistFraud.png') # Amount hist for Non-Fraudulent transactions fig5, ax5 = plt.subplots(figsize=(10,10)) ax5.hist(notfraud_rows.amount, bins=10, range=[0, 15000000], density=True, color="Green") ax5.set(xlabel='amount', ylabel='Freq', title='Amount dist for Non-Fraudulent transactions') fig5.savefig('AmountDistNotFraud.png')From these plots, it can be seen that potential and actual fraudulent transactions, if the amount is higher thana certain amount, then it is most likely a fraudulent. That amount seems to be around .175*10^7# Range range6 =[0, 800] # Step distribution for non and fraudulent transactions fig6, ((ax61, ax62)) = plt.subplots(nrows=1, ncols=2, figsize=(10, 10)) ax61.hist(fraud_rows.step, bins=20, range=range6, density=True, color="yellow") ax62.hist(notfraud_rows.step, bins=20, range=range6, density=True, color="blue") ax61.set(title="Step dist for Fraud") ax62.set(title="Step dist for NotFraud") fig6.savefig('StepDists.png')We see that Fraud also as a higher chance to have more steps.High steps + High amount == Higher chance to be Fraud Possible points of analysis* newbalanceOrg going to 0* previously fraudulent merchant/customers* oldbalanceDeset raising from 0* what types is fraud most likely to be# Types fig7, ((ax71, ax72)) = plt.subplots(nrows=1, ncols=2, figsize=(10,10)) ax71.hist(fraud_rows.type) ax72.hist(notfraud_rows.type) ax71.set(title="Type hist for Fraud") ax72.set(title="Type hist for NotFraud") fig7.savefig('TypeHist.png')Most to all fraudulent transactions are either TRANSFER or CASH_OUT# newBalanceOrig, newbalanceDest fig8, ((ax81, ax82), (ax83, ax84)) = plt.subplots(nrows=2, ncols=2, figsize=(10,10)) ax81.hist(fraud_rows.newbalanceOrig) ax82.hist(notfraud_rows.newbalanceOrig) ax81.set(title="Fraud") ax82.set(title="NotFraud") ax83.hist(fraud_rows.newbalanceDest) ax84.hist(notfraud_rows.newbalanceDest) ax83.set(title="Fraud") ax84.set(title="NotFraud") #fig8.savefig('TypeHist.png')Dataset Description* 6.3 million rows* 11 features Columns* Numerical: **step**: maps unit of time in real world (steps to hours)* Categorical: **type**: CASH-IN, CASH-OUT, DEBIT, PAYMENT, TRANSFER (discrete)* Numerical: **amount**: amount of transaction in local currency* ID: **nameOrig**: customer who started transaction (Customer ID, Merchant ID)* Numerical: oldbalanceOrg: initial balance before transaction* Numerical: **newbalanceOrig**: new balance after transaction* ID: nameDest: customer who is recipient of the transaction* Numerical: oldbalanceDest: initial balance recipient before transaction (no info for merchants)* Numerical: newbalanceDest: new balance recipient after transaction (no info for merchants Last two* Bool (Target): isFraud: is a fraudulent transaction (fraud if agent aims to emptying funds into another account and cashing out)* Bool: isFlaggedFraud: any massive transfers or illegal attempts Data preparation and training* ~~Prep NA~~ (no null values in this dataset)* Get feature matrix and labels* Train* Test* Evaluateimport pandas as pd import numpy as np from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split data = pd.read_csv('../data/data.csv')* choose right model and hyp from sklearn flowchart* Categorizing: with labeled Data: 6m samples:-> SGD Classifier and kernel approxfrom sklearn.linear_model import SGDClassifier # Create Pipeline # Define Features and Transformer categorical_features =["type"] categorical_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(strategy="constant", fill_value="missing")), ("onehot", OneHotEncoder(handle_unknown="ignore")) ]) numerical_features = ['step', 'amount', 'oldbalanceOrg', 'newbalanceOrig', 'oldbalanceDest', 'newbalanceDest'] numerical_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(strategy="mean", fill_value="missing")) ]) # id_features = ['nameOrig', 'nameDest'] # id_transformer = Pipeline(step=[ # ("imputer", SimpleImputer(strategy="constant", fill_value="missing")), # ("onehot") # ]) #flag_feature = ["isFlaggedFraud"] #flag_transformer = Pipeline preprocessor = ColumnTransformer( transformers=[ ("cat", categorical_transformer, categorical_features), ("num", numerical_transformer, numerical_features) ] ) clf = Pipeline(steps=[ ("preprocessor", preprocessor), ("model", SGDClassifier()) ]) # Feature Matrix X = fraud.drop("isFraud", axis=1).drop("nameDest", axis=1).drop("nameOrig", axis=1) # remove IDs for now # Labels y = fraud["isFraud"] print("X: ", X.head()) print("y: ", y.head()) #Split into sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2) clf.fit(X_train, y_train) clf.score(X_test, y_test)* Results without the ID columns returns 99.7% accurate* Optimize hyperparameters with searchCV''' from sklearn.model_selection import RandomizedSearchCV grid = { "loss": ['hinge', 'modified_huber', 'log'], "penalty": ['l2', 'l1', 'elasticnet'], "max_iter": [1,5,10,15] } rs_clf = RandomizedSearchCV( estimator=clf, param_distributions=grid, n_iter=10 ) # Run RS CLF rs_clf.fit(X_train, y_train) print(rs_clf.best_params) best = rs_clf.best_params_ '''Try other model:from sklearn.svm import LinearSVC clf2 = Pipeline(steps=[ ("preprocessor", preprocessor), ("model", LinearSVC()) ]) clf2.fit(X_train, y_train) clf2.score(X_test, y_test)* LinearSVC took too long because >100k samples* 98.8% accuracy Evaluate both modelsy_preds = clf.predict(X_test) y_preds2 = clf2.predict(X_test) from sklearn.metrics import classification_report, confusion_matrix, accuracy_score print("\nSGDClassifier: \n") print(classification_report(y_test, y_preds)) confusion_matrix(y_test, y_preds) print("Accuracy: ", accuracy_score(y_test, y_preds)) print("\nLinearSVC: \n") print(classification_report(y_test, y_preds2)) confusion_matrix(y_test, y_preds2) print("Accuracy: ", accuracy_score(y_test, y_preds2))SGDClassifier: precision recall f1-score support 0 1.00 1.00 1.00 1270888 1 0.28 0.74 0.41 1636 accuracy 1.00 1272524 macro avg 0.64 0.87 0.70 1272524 weighted avg 1.00 1.00 1.00 1272524 Accuracy: 0.9972644916716699 LinearSVC: precision recall f1-score support 0 1.00 0.99 0.99 1270888 1 0.09 0.93 0.17 1636 accuracy 0.99 1272524 macro avg 0.55 0.96 0.58 1272524 weighted avg 1.00 0.99 0.99 1272524 Accuracy: 0.9883153480798791Overview `clean_us_data.ipynb`: Fix data inconsistencies in the raw time series data from [`etl_us_data.ipynb`](./etl_us_data.ipynb).Inputs:* `outputs/us_counties.csv`: Raw county-level time series data for the United States, produced by running [etl_us_data.ipynb](./etl_us_data.ipynb)* `outputs/us_counties_meta.json`: Column type metadata for reading `data/us_counties.csv` with `pd.read_csv()`* `inputs/co-est2019-alldata.csv`: County-level population data from the U.S. Census Bureau, converted to CSV format with Excel (Original public domain data file available in many proprietary formats [here](https://www.census.gov/data/tables/time-series/demo/popest/2010s-state-total.html)).* [`https://github.com/thecityny/covid-19-nyc-data`](https://github.com/thecityny/covid-19-nyc-data): New York newspaper [THE CITY](https://thecity.nyc/)'s digest of the daily reports that the New York City health department posts at [`https://github.com/nychealth/coronavirus-data`](https://github.com/nychealth/coronavirus-data).Outputs:* `outputs/us_counties_clean.csv`: The contents of `outputs/us_counties.csv` after data cleaning* `outputs/us_counties_clean_meta.json`: Column type metadata for reading `data/us_counties_clean.csv` with `pd.read_csv()`**Note:** You can redirect these input and output files by setting the environment variables `COVID_INPUTS_DIR` and `COVID_OUTPUTS_DIR` to replacement values for the prefixes `inputs` and `outputs`, respectively, in the above paths. Read in the CSV file and apply the saved type information# Initialization boilerplate import os import json import pandas as pd import numpy as np import scipy.optimize import sklearn.metrics import matplotlib.pyplot as plt from typing import * import text_extensions_for_pandas as tp # Local file of utility functions import util # Allow environment variables to override data file locations. _OUTPUTS_DIR = os.getenv("COVID_OUTPUTS_DIR", "outputs") _INPUTS_DIR = os.getenv("COVID_INPUTS_DIR", "inputs") csv_file = os.path.join(_OUTPUTS_DIR, "us_counties.csv") meta_file = os.path.join(_OUTPUTS_DIR, "us_counties_meta.json") # Read column type metadata with open(meta_file) as f: cases_meta = json.load(f) # Pandas does not currently support parsing datetime64 from CSV files. # As a workaround, read the "Date" column as objects and manually # convert after. cases_meta["Date"] = "object" cases_raw = pd.read_csv(csv_file, dtype=cases_meta, parse_dates=["Date"]) # Restore the Pandas index cases_vertical = cases_raw.set_index(["FIPS", "Date"], verify_integrity=True) cases_vertical # Replace the missing values in the secondary dataset with 0's. cases_vertical["Confirmed_NYT"].fillna(0, inplace=True) cases_vertical["Deaths_NYT"].fillna(0, inplace=True) cases_vertical["Confirmed_NYT"] = cases_vertical["Confirmed_NYT"].astype("int64") cases_vertical["Deaths_NYT"] = cases_vertical["Deaths_NYT"].astype("int64") cases_vertical # Collapse each time series down to a single cell cases, dates = util.collapse_time_series(cases_vertical, ["Confirmed", "Deaths", "Recovered", "Confirmed_NYT", "Deaths_NYT"]) cases # Note that the previous cell also saved the values from the "Date" # column of `cases_vertical` into the Python variable `dates`: dates[:10], dates.shapeClean up the New York City dataThose silly folks at Johns Hopkins code all of New York city as beingin New York County. Each borough is actually in a different countywith a different FIPS code.Our secondary data set also merges together all of New York City, so wecan't use its data as a substitute.So we use generate numbers from THE CITY's digest of the New York City health department's fine-grained numbers.For dates before and after the availability of official numbers by borough,we extrapolate from the Johns Hopkins data for all of New York City.# First, double-check that the error is still there. max_bronx_confirmed = cases.loc[36005]["Confirmed"].max() if max_bronx_confirmed > 0: raise ValueError("Someone has fixed the problem with the New York data. " "Please disable the fix in this cell.") print(f"Time series for the Bronx before correction:\n{cases.loc[36005]['Confirmed']}")Time series for the Bronx before correction: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]Correct population numbers for New York CityThe raw data assigns the entire population of New York City toNew York County. Load the actual population from some Census Bureau data.# Population by county, from U.S. Census Bureau, converted to CSV with Excel; # hence the cp1255 encoding. # Original public domain data file available in many proprietary formats # at https://www.census.gov/data/tables/time-series/demo/popest/2010s-state-total.html population_data_file = os.path.join(_INPUTS_DIR, "co-est2019-alldata.csv") population_raw = pd.read_csv(population_data_file, encoding="cp1255") # Filter out state-level totals county_population = population_raw[population_raw["COUNTY"] != 0] # Convert FIPS code to the same format as the other tables county_population.insert(0, "FIPS", (county_population["STATE"] * 1000) + county_population["COUNTY"]) # Filter down to just the columns we need county_population = county_population[county_population["FIPS"] >= 1000][["FIPS", "POPESTIMATE2019"]] # Pull the populations for the New York city counties out of the above # dataframe. new_york_county_fips = 36061 nyc_fips = [ 36005, # Bronx County 36047, # Kings County new_york_county_fips, # New York County 36081, # Queens County 36085, # Richmond County ] nyc_populations = [ county_population[county_population["FIPS"] == f]["POPESTIMATE2019"].iloc[0] for f in nyc_fips ] total_population = sum(nyc_populations) # Before: cases.loc[nyc_fips][["State", "County", "Population"]] ny_county_population = county_population[county_population["FIPS"] == new_york_county_fips]["POPESTIMATE2019"].iloc[0] # Need to copy the series and modify the copy to avoid Pandas' "A value # is trying to be set on a copy of a slice from a DataFrame" warning. new_population = cases["Population"].copy(deep=True) new_population.loc[new_york_county_fips] = ny_county_population cases["Population"] = new_population # After: cases.loc[nyc_fips][["State", "County", "Population"]]Obtain New York City numbers by boroughThe New York City Department of Health and Mental Hygiene (DOHMH) publishes a breakdownof the latest case numbers by borough, updated about once a day.[THE CITY](https://thecity.nyc/), an independent newpaper, maintains anarchive of the DOHMH numbers over time.# Download THE CITY's digest of New York numbers by borough of residence. _THE_CITY_CSV_URL = ("https://github.com/thecityny/covid-19-nyc-data/" + "raw/master/borough.csv") nyc_raw = pd.read_csv(_THE_CITY_CSV_URL, parse_dates=["timestamp"]) nyc_raw nyc_cases = nyc_raw[nyc_raw["type"] == "cases"] nyc_deaths = nyc_raw[nyc_raw["type"] == "deaths"] nyc_probable_deaths = nyc_raw[nyc_raw["type"] == "deaths-probable"] nyc_cases nyc_deaths nyc_probable_deathsRegenerate the New York City time seriesUse the NYC health department numbers when possible. Extrapolate fro the JHU data for dates where borough-level numbers are unavailable or unreliable.# Pick points from the THE CITY data that align with timestamps # in the JHU data. # Earliest dates we try to align with. nyc_cases_start = np.datetime64("2020-03-17") nyc_deaths_start = np.datetime64("2020-04-15") nyc_cases_dates_to_align = pd.DatetimeIndex(dates[dates >= nyc_cases_start]).tz_localize("UTC") nyc_deaths_dates_to_align = pd.DatetimeIndex(dates[dates >= nyc_deaths_start]).tz_localize("UTC") borough_name_to_fips = { "bronx": 36005, # Bronx County "brooklyn": 36047, # Kings County "manhattan": new_york_county_fips, # New York County "queens": 36081, # Queens County "staten_island": 36085, # Richmond County } nyc_confirmed_jhu = cases["Confirmed"].loc[new_york_county_fips].copy() nyc_deaths_jhu = cases["Deaths"].loc[new_york_county_fips].copy() fips_to_confirmed_tail = {} for borough_name, fips in borough_name_to_fips.items(): cases_tail = np.zeros(shape=len(nyc_cases_dates_to_align), dtype=np.int64) for _, row in nyc_cases.iterrows(): ts = row["timestamp"] value = row[borough_name] day_plus_one = np.argmax(nyc_cases_dates_to_align >= ts) if day_plus_one > 0: cases_tail[day_plus_one - 1] = value # The detailed NYC data is often a day or two behind JHU. Use the # proportional increase from the corresponding JHU data points to # extrapolate missing values at the tail end of the new tail. tail_offset = len(nyc_confirmed_jhu) - len(cases_tail) for i in range(1, len(cases_tail)): if 0 == cases_tail[i]: ratio = (nyc_confirmed_jhu[tail_offset + i] / nyc_confirmed_jhu[tail_offset + i - 1]) cases_tail[i] = cases_tail[i-1] * ratio fips_to_confirmed_tail[fips] = cases_tail fips_to_confirmed = {} first_day_total_confirmed = sum([a[0] for a in fips_to_confirmed_tail.values()]) for borough_name, fips in borough_name_to_fips.items(): tail = fips_to_confirmed_tail[fips] first_day_ratio = tail[0] / first_day_total_confirmed before = (nyc_confirmed_jhu * first_day_ratio).astype(np.int64) after = before.copy() after[-len(tail):] = tail fips_to_confirmed[fips] = after fips_to_deaths_tail = {} for borough_name, fips in borough_name_to_fips.items(): deaths_tail = np.zeros(shape=len(nyc_deaths_dates_to_align), dtype=np.int64) for _, row in nyc_deaths.iterrows(): ts = row["timestamp"] value = row[borough_name] day_plus_one = np.argmax(nyc_deaths_dates_to_align >= ts) if day_plus_one > 0: deaths_tail[day_plus_one - 1] = value for _, row in nyc_probable_deaths.iterrows(): ts = row["timestamp"] value = row[borough_name] day_plus_one = np.argmax(nyc_deaths_dates_to_align >= ts) if day_plus_one > 0: deaths_tail[day_plus_one - 1] += value tail_offset = len(nyc_deaths_jhu) - len(deaths_tail) for i in range(1, len(deaths_tail)): if 0 == deaths_tail[i]: ratio = (nyc_deaths_jhu[tail_offset + i] / nyc_deaths_jhu[tail_offset + i - 1]) deaths_tail[i] = deaths_tail[i-1] * ratio fips_to_deaths_tail[fips] = deaths_tail print(fips_to_deaths_tail[36005]) fips_to_deaths = {} first_day_total_deaths = sum([a[0] for a in fips_to_deaths_tail.values()]) for borough_name, fips in borough_name_to_fips.items(): tail = fips_to_deaths_tail[fips] # The JHU data counts considerably more deaths than the NYC health # department's. Assume that the city's own data is more accurate and # scale the early JHU numbers to match. first_day_jhu_deaths = nyc_deaths_jhu[-len(tail)] first_day_ratio = tail[0] / first_day_total_deaths jhu_nyc_ratio = first_day_jhu_deaths / first_day_total_deaths before = (nyc_deaths_jhu * first_day_ratio / jhu_nyc_ratio).astype(np.int64) after = before.copy() after[-len(tail):] = tail fips_to_deaths[fips] = after # print(borough_name) # print(f"{tail[0]} of {first_day_total_deaths} (vs {first_day_jhu_deaths})") # print(f"All of NYC:\n{nyc_deaths_jhu}") # print(f"Before:\n{before}") # print(f"After:\n{after}") #cases_vertical.loc[new_york_county_fips].iloc[50:100] # Copy to avoid the chained indexing warning. # (https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy) new_confirmed = cases["Confirmed"].copy(deep=True) for fips, series in fips_to_confirmed.items(): new_confirmed.loc[fips] = series cases["Confirmed"] = new_confirmed new_deaths = cases["Deaths"].copy(deep=True) for fips, series in fips_to_deaths.items(): new_deaths.loc[fips] = series cases["Deaths"] = new_deaths # NYC before plt.title("NYC Confirmed Cases (JHU Data)") plt.plot(nyc_confirmed_jhu, "o", markersize=2) plt.show() plt.title("NYC Deaths (JHU Data)") plt.plot(nyc_deaths_jhu, "o", markersize=2) plt.show() # NYC confirmed cases after cleaning: util.graph_examples(cases.loc[nyc_fips], "Confirmed", {}, num_to_pick=5) # NYC deaths by county after cleaning: util.graph_examples(cases.loc[nyc_fips], "Deaths", {}, num_to_pick=5)Clean up the Rhode Island dataThe Johns Hopkins data reports zero deaths in most of Rhode Island. Use the secondary data set from the New York Times for Rhode Island.print("Before:") util.graph_examples(cases, "Deaths", {}, num_to_pick=8, mask=(cases["State"] == "Rhode Island")) # Use our secondary data set for all Rhode Island data. ri_fips = cases[cases["State"] == "Rhode Island"].index.values.tolist() for colname in ["Confirmed", "Deaths"]: new_series = cases[colname].copy(deep=True) for fips in ri_fips: new_series.loc[fips] = cases[colname + "_NYT"].loc[fips] cases[colname] = new_series # Note that the secondary data set has not "Recovered" time series, so # we leave those numbers alone for now. print("After:") util.graph_examples(cases, "Deaths", {}, num_to_pick=8, mask=(cases["State"] == "Rhode Island"))After:Clean up the Utah dataThe Johns Hopkins data for Utah is missing quite a few data points.Use the New York Times data for Utah.print("Before:") util.graph_examples(cases, "Confirmed", {}, num_to_pick=8, mask=(cases["State"] == "Utah")) # The Utah time series from the New York Times' data set are more # complete, so we use those numbers. ut_fips = cases[cases["State"] == "Utah"].index.values for colname in ["Confirmed", "Deaths"]: new_series = cases[colname].copy(deep=True) for fips in ut_fips: new_series.loc[fips] = cases[colname + "_NYT"].loc[fips] cases[colname] = new_series # Note that the secondary data set has not "Recovered" time series, so # we leave those numbers alone for now. print("After:") util.graph_examples(cases, "Confirmed", {}, num_to_pick=8, mask=(cases["State"] == "Utah"))After:Flag additional problematic and missing data pointsUse heuristics to identify and flag problematic data points across all the time series. Generate Boolean masks that show the locations of theseoutliers.# Now we're done with the secondary data set, so drop its columns. cases = cases.drop(columns=["Confirmed_NYT", "Deaths_NYT"]) cases # Now we need to find and flag obvious data-entry errors. # We'll start by creating columns of "is outlier" masks. # We use integers instead of Boolean values as a workaround for # https://github.com/pandas-dev/pandas/issues/33770 # Start out with everything initialized to "not an outlier" cases["Confirmed_Outlier"] = tp.TensorArray(np.zeros_like(cases["Confirmed"].values)) cases["Deaths_Outlier"] = tp.TensorArray(np.zeros_like(cases["Deaths"].values)) cases["Recovered_Outlier"] = tp.TensorArray(np.zeros_like(cases["Recovered"].values)) casesFlag time series that go from zero to nonzero and back againOne type of anomaly that occurs fairly often involves a time seriesjumping from zero to a nonzero value, then back to zero again.This pattern most likely indicates that a patient was erroneouslyclassified as COVID-19-positive, but that the county health departmentdidn't correct its historical data after they found out.Locate all instances of that pattern and mark the nonzero valuesas outliers.def nonzero_then_zero(series: np.array): empty_mask = np.zeros_like(series, dtype=np.int8) if series[0] > 0: # Special case: first value is nonzero return empty_mask first_nonzero_offset = 0 while first_nonzero_offset < len(series): if series[first_nonzero_offset] > 0: # Found the first nonzero. # Find the distance to the next zero value. next_zero_offset = first_nonzero_offset + 1 while (next_zero_offset < len(series) and series[next_zero_offset] > 0): next_zero_offset += 1 # Check the length of the run of zeros after # dropping back to zero. second_nonzero_offset = next_zero_offset + 1 while (second_nonzero_offset < len(series) and series[second_nonzero_offset] == 0): second_nonzero_offset += 1 nonzero_run_len = next_zero_offset - first_nonzero_offset second_zero_run_len = second_nonzero_offset - next_zero_offset # print(f"{first_nonzero_offset} -> {next_zero_offset} -> {second_nonzero_offset}; series len {len(series)}") if next_zero_offset >= len(series): # Everything after the first nonzero was a nonzero return empty_mask elif second_zero_run_len <= nonzero_run_len: # Series dropped back to zero, but the second zero # part was shorter than the nonzero section. # In this case, it's more likely that the second run # of zero values are actually missing values. return empty_mask else: # Series went zero -> nonzero -> zero -> nonzero # or zero -> nonzero -> zero -> [end] nonzero_run_mask = empty_mask.copy() nonzero_run_mask[first_nonzero_offset:next_zero_offset] = 1 return nonzero_run_mask first_nonzero_offset += 1 # If we get here, the series was all zeros return empty_mask for colname in ["Confirmed", "Deaths", "Recovered"]: addl_outliers = np.stack([nonzero_then_zero(s) for s in cases[colname]]) outliers_colname = colname + "_Outlier" new_outliers = cases[outliers_colname].values.astype(np.bool) | addl_outliers cases[outliers_colname] = tp.TensorArray(new_outliers.astype(np.int8)) # fips = 13297 # print(cases.loc[fips]["Confirmed"]) # print(nonzero_then_zero(cases.loc[fips]["Confirmed"])) # Let's have a look at which time series acquired the most outliers as # a result of the code in the previous cell. df = cases[["State", "County"]].copy() df["Confirmed_Num_Outliers"] = np.count_nonzero(cases["Confirmed_Outlier"], axis=1) counties_with_outliers = df.sort_values("Confirmed_Num_Outliers", ascending=False).head(10) counties_with_outliers # Plot the couties in the table above, with outliers highlighted. # The graph_examples() function is defined in util.py. util.graph_examples(cases, "Confirmed", {}, num_to_pick=10, mask=(cases.index.isin(counties_with_outliers.index)))Flag time series that drop to zero, then go back upAnother type of anomaly involves the time series dropping down to zero, then going up again. Since all three time series are supposedto be cumulative counts, this pattern most likely indicates missingdata.To correct for this problem, we mark any zero values after thefirst nonzero, non-outlier values as outliers, across all time series.def zeros_after_first_nonzero(series: np.array, outliers: np.array): nonzero_mask = (series != 0) nonzero_and_not_outlier = nonzero_mask & (~outliers) first_nonzero = np.argmax(nonzero_and_not_outlier) if 0 == first_nonzero and series[0] == 0: # np.argmax(nonzero_mask) will return 0 if there are no nonzeros return np.zeros_like(series) after_nonzero_mask = np.zeros_like(series) after_nonzero_mask[first_nonzero:] = True return (~nonzero_mask) & after_nonzero_mask for colname in ["Confirmed", "Deaths", "Recovered"]: outliers_colname = colname + "_Outlier" addl_outliers = np.stack([zeros_after_first_nonzero(s, o) for s, o in zip(cases[colname], cases[outliers_colname])]) new_outliers = cases[outliers_colname].values.astype(np.bool) | addl_outliers cases[outliers_colname] = tp.TensorArray(new_outliers.astype(np.int8)) # fips = 47039 # print(cases.loc[fips]["Confirmed"]) # print(cases.loc[fips]["Confirmed_Outlier"]) # print(zeros_after_first_nonzero(cases.loc[fips]["Confirmed"], cases.loc[fips]["Confirmed_Outlier"])) # Redo our "top 10 by number of outliers" analysis with the additional outliers df = cases[["State", "County"]].copy() df["Confirmed_Num_Outliers"] = np.count_nonzero(cases["Confirmed_Outlier"], axis=1) counties_with_outliers = df.sort_values("Confirmed_Num_Outliers", ascending=False).head(10) counties_with_outliers util.graph_examples(cases, "Confirmed", {}, num_to_pick=10, mask=(cases.index.isin(counties_with_outliers.index))) # The steps we've just done have removed quite a few questionable # data points, but you will definitely want to flag additional # outliers by hand before trusting descriptive statistics about # any county. # TODO: Incorporate manual whitelists and blacklists of outliers # into this notebook.Write out cleaned time series dataBy default, output files go to the `outputs` directory. You can use the `COVID_OUTPUTS_DIR` environment variable to override that location.# Break out our time series into multiple rows again for writing to disk. cleaned_cases_vertical = util.explode_time_series(cases, dates) cleaned_cases_vertical # The outlier masks are stored as integers as a workaround for a Pandas # bug. Convert them to Boolean values for writing to disk. cleaned_cases_vertical["Confirmed_Outlier"] = cleaned_cases_vertical["Confirmed_Outlier"].astype(np.bool) cleaned_cases_vertical["Deaths_Outlier"] = cleaned_cases_vertical["Deaths_Outlier"].astype(np.bool) cleaned_cases_vertical["Recovered_Outlier"] = cleaned_cases_vertical["Recovered_Outlier"].astype(np.bool) cleaned_cases_vertical # Write out the results to a CSV file plus a JSON file of type metadata. cleaned_cases_vertical_csv_data_file = os.path.join(_OUTPUTS_DIR,"us_counties_clean.csv") print(f"Writing cleaned data to {cleaned_cases_vertical_csv_data_file}") cleaned_cases_vertical.to_csv(cleaned_cases_vertical_csv_data_file, index=True) col_type_mapping = { key: str(value) for key, value in cleaned_cases_vertical.dtypes.iteritems() } cleaned_cases_vertical_json_data_file = os.path.join(_OUTPUTS_DIR,"us_counties_clean_meta.json") print(f"Writing metadata to {cleaned_cases_vertical_json_data_file}") with open(cleaned_cases_vertical_json_data_file, "w") as f: json.dump(col_type_mapping, f)Writing cleaned data to outputs/us_counties_clean.csv Writing metadata to outputs/us_counties_clean_meta.jsonAnalysis of Sales data DatasetThe given dataset contains monthly total sales of a company for the period 2013-2016. Objectives1. To analyse the sales data and understand the performance of the company.2. Find patterns and construct a model to forecast future sales. Load sales data and create visualizationfrom time_series import TimeSeries # Imports for data visualization import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters from matplotlib.dates import DateFormatter from matplotlib import dates as mpld register_matplotlib_converters() ts = TimeSeries('dataset/monthly_sales.csv', train_size=0.8) print("Sales Data") print(ts.data.describe()) print("Head and Tail of the time series") print(ts.data.head(5).iloc[:,1]) print(ts.data.tail(5).iloc[:,1]) # Plot of raw time series data plt.plot(ts.data.index,ts.data.sales) plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%Y-%m') plt.gca().xaxis.set_major_formatter(date_format) plt.title("Sales Data Analysis (2013-2016)") plt.xlabel("Time") plt.ylabel("Sales") plt.show()Sales Data sales count 48.000000 mean 47858.351667 std 25221.124187 min 4519.890000 25% 29790.100000 50% 39339.515000 75% 65833.345000 max 118447.830000 Head and Tail of the time series date 2013-01-01 14236.90 2013-02-01 4519.89 2013-03-01 55691.01 2013-04-01 28295.35 2013-05-01 23648.29 Name: sales, dtype: float64 date 2016-08-01 63120.89 2016-09-01 87866.65 2016-10-01 77776.92 2016-11-01 118447.83 2016-12-01 83829.32 Name: sales, dtype: float64Seasonal Decompose of the time seriesSeasonal decompose is a method used to decompose the components of a time series into the following:- Level - average value in the series.- Trend - increasing or decreasing value in the series.- Seasonality - repeating short-term cycle in the series.- Noise - random variation in the series.The analysis of the components individually provide better insights for model selection.from statsmodels.tsa.seasonal import seasonal_decompose result_add = seasonal_decompose(ts.data.iloc[:,1],period=12,model='additive') result_add.plot() plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%y-%m') plt.gca().xaxis.set_major_formatter(date_format) result_mul = seasonal_decompose(ts.data.iloc[:,1],period=12,model='multiplicative') result_mul.plot() plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%y-%m') plt.gca().xaxis.set_major_formatter(date_format) plt.show()Observations from Seasonal Decompose1. The time series seems to roughly have a constant seasonality but has an overall **increasing trend**.2. A slightly decreasing trend is observed till 2014-07 after that an increasing trend is observed. Model SelectionFrom the above observations we can evidently conclude that **Holt-Winter additive model** would be an appropriate choice as there is a constant seasonality component along with an increasing trend.from statsmodels.tsa.holtwinters import ExponentialSmoothing # Scaling down the data by a factor of 1000 ts.set_scale(1000) # Training the model model = ExponentialSmoothing(ts.train,trend='additive',seasonal='additive',seasonal_periods=12).fit(damping_slope=1) plt.plot(ts.train.index,ts.train,label="Train") plt.plot(ts.test.index,ts.test,label="Actual") # Create a 5 year forecast plt.plot(model.forecast(60),label="Forecast") plt.legend(['Train','Actual','Forecast']) plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%Y-%m') plt.gca().xaxis.set_major_formatter(date_format) plt.title("Sales Data Analysis (2013-2016)") plt.xlabel("Time") plt.ylabel("Sales (x1000)") plt.show()Validation of the modelLet's do a brief comparison between the additive and the multiplicative models.from statsmodels.tsa.holtwinters import ExponentialSmoothing ts = TimeSeries('dataset/monthly_sales.csv', train_size=0.8) # Additive model model_add = ExponentialSmoothing(ts.data.iloc[:,1],trend='additive',seasonal='additive',seasonal_periods=12,damped=True).fit(damping_slope=0.98) prediction = model_add.predict(start=ts.data.iloc[:,1].index[0],end=ts.data.iloc[:,1].index[-1]) plt.plot(ts.data.iloc[:,1].index,ts.data.iloc[:,1],label="Train") plt.plot(ts.data.iloc[:,1].index,prediction,label="Model") plt.plot(model_add.forecast(60)) plt.legend(['Actual','Model','Forecast']) plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%Y-%m') plt.gca().xaxis.set_major_formatter(date_format) plt.title("Sales Data Analysis (2013-2016)") plt.xlabel("Time") plt.ylabel("Sales") plt.show() # Multiplicative model model_mul = ExponentialSmoothing(ts.data.iloc[:,1],trend='additive',seasonal='multiplicative',seasonal_periods=12,damped=True).fit() prediction = model_mul.predict(start=ts.data.iloc[:,1].index[0],end=ts.data.iloc[:,1].index[-1]) plt.plot(ts.data.iloc[:,1].index,ts.data.iloc[:,1],label="Train") plt.plot(ts.data.iloc[:,1].index,prediction,label="Model") plt.plot(model_mul.forecast(60)) plt.legend(['Actual','Model','Forecast']) plt.gcf().autofmt_xdate() date_format = mpld.DateFormatter('%Y-%m') plt.gca().xaxis.set_major_formatter(date_format) plt.title("Sales Data Analysis (2013-2016)") plt.xlabel("Time") plt.ylabel("Sales") plt.show() print(model_add.summary()) print(model_mul.summary())ExponentialSmoothing Model Results ================================================================================ Dep. Variable: endog No. Observations: 48 Model: ExponentialSmoothing SSE 5088109579.122 Optimized: True AIC 920.991 Trend: Additive BIC 952.801 Seasonal: Additive AICC 948.133 Seasonal Periods: 12 Date: Fri, 27 Mar 2020 Box-Cox: False Time: 16:57:56 Box-Cox Coeff.: None ================================================================================= coeff code optimized -------------------------------------------------[...]Lab 11: ODEs, Failure Rates, and Evolutionary Games%matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img import numpy as np import scipy as sp import scipy.stats as st from scipy.integrate import odeint print ('Modules Imported!')In this lab we're to going analyze evolutionary games using drift analysis. In order to do this, we first need to determine how to integrate ordinary differential equations. So, we begin with a brief exposure of how to achieve this in Python. While we're at it, we'll do a short exercise about failure rate functions. Then we examine some evolutionary games, using both a stochastic model and a deterministic model, which involves the differential equations. Integration of ODEs This section gives an introdution to integrating ordinary differential equations (odes), with an application to the calculation of a CDF from a failure rate function. If you recall from your math classes we can describe an ODE as the following:$\dot{y}=f(y),$ where both $y$ and the function $f$ are vector valued, and $\dot{y}$ is shorthand for $\frac{dy}{dt}.$ The variables being vector valued simply implies that we could have a system of dependent one dimensional (1D) ODEs as opposed to just a single one dimensional variable. Let's look at the following system:\begin{eqnarray*}\binom{\dot{y_0}}{\dot{y_1}} = \binom{2y_1-0.1y_0}{-y_0}\end{eqnarray*}with the initial condition $y_{init}=\binom{y_1(0)}{y_2(0)}=\binom{1}{1}.$Our goal in using Python is not to find a closed form solution to the ODE as you would have probably done in your math class, but to numerically integrate it and produce a graphical solution. In order to do this we use the odeint method from scipy.integrate. It takes a function with an initial value and specified times at which the solution is to be output. Internally, the numerical method will often use a much finer grid of time points to solve the ode -- but that is none of your concern. The times you put in are the times at with the method needs to provide the values of the solution. Look how it is used below:# Simple example integrating a 2d ordinary differential equation (ode) dy/dt=f(y,t) # Note that the odeint package is imported in the code at the beginning of this file. # Function to integrate over def f(y,t): return np.array([2.*y[1]-.1*y[0],-y[0] ]) time=np.linspace(0.0,20.0,10000) # The times at which the solution is output. yinit=np.array([1.0,1.0]) # The initial condition y=odeint(f,yinit,time) plt.plot(y[:,0],y[:,1]) plt.title('trajectory of y up to time 20') plt.ylabel('y_1') plt.xlabel('y_0')Typically in a plot like this you would add an arrow to indicate the direction of time in the graph. However, we know the initial state is $y_{init}=\binom{1}{1}.$ So as time increases, the trajectory moves concentrically inwards in a clockwise fashion. Depending on how much of differential equations you remember, this solution should make sense. The equation happens to have a linear form and can be written as $\dot y = Ay$ where$A=\left( \begin{array}{cc} -0.1 & 2 \\ -1 & 0 \end{array} \right).$The eigenvalues of $A$ are complex with negative real parts, so we would expect a decaying oscillatory behavior. (If you didn't follow that, don't worry about it.)As a simple application of integrating a 1D (one-dimensional) ode, the following code calculates a CDF $F$ from its failure rate function $h.$ But first, let's look at some theory to remember what a failure rate function is. If we let $T$ be a positive random variable with a pdf $f_T$ thenthe failure rate function is defined by the following:$$h(t)=\lim_{\epsilon \to 0} \frac{P(tt)}{\epsilon}$$Equivalently, an object having failure rate function $h$ means that for any $t > 0,$ giventhat the object is still working at time $t$, the probability it will fail in the next$\epsilon$ time units is $h(t)\epsilon + o(\epsilon)$. (As usual, $o(\epsilon)$ represents an error term such that $o(\epsilon)/\epsilon \to 0$ as $\epsilon\to 0.$) With a little bit more derivation (shown in Section 3.9 of your ECE 313 text), we can see that:$$h(t) = \frac{f_T(t)}{1-F_T(t)}$$Of course, the pdf $f_T$ is the derivative of the CDF $F_T$. Here's where your 1D ODE comes into play. Given a failure rate function, we want to be able to calculate the CDF. We can easily turn this into an ODE. $f_T(t) = (1-F_T(t))h(t)$Let $F_T(t) = y$ and $f_T(t) = \frac{dy}{dt} = \dot{y}$, then:$\dot{y} = (1-y)h(t)$Now we have our differential equation set up and we can solve it by the same method used in the previous example. For the code below, we define a particular bathtub shaped failure rate function to model the lifetime of a component such as a hard disk drive. There is a high, but decreasing initial rate of failure, followed be a period with a very low rate failure, followed by an increasing rate of failure. You should be able to think of reasons why an object might exhibit this type of failure rate function. Notice that the CDF behaves as we would expect: always between 0 and 1, and reactive to $h(t)$.If you examine the ECE 313 notes you'll see another way to determine $F_T,$ namely, using$F_T(t) = 1- \exp \left( \int_o^t -h(s) ds \right)$. The point here, though, is to get some practice usingodeint, instead of using this formula.# Calculation of CDF from its failure rate function, using dF/dt=(1-F(t))h(t) from scipy.integrate import odeint def h(t): # h is the failure rate function with a bathtub shape if t<1: return 1-t elif t<3: return 0.0 else: return np.sqrt(t-3) def Fdot(y,t): return (1-y)*h(t) t_max=8.0 # Select so that F(t_max) is very close to one time=np.linspace(0.0,t_max,1000) Finit=0.0 F=odeint(Fdot,Finit,time) plt.plot(time,list(map(h,time))) plt.text(4,h(4)+0.5, r'$h(t)$',fontsize=24) #Plots labels in the graph plt.text(4,0.5, r'$F(t)$',fontsize=24) plt.plot(time,F) plt.title('failure rate function $h$ and associated CDF, $F$') plt.axis(ymin=0,ymax=2.0) plt.ylabel('$h(t), F(t)$',fontsize=20) plt.xlabel('$t$',fontsize=20)**Problem 1:** Using the information on failure rate functions and the example above, do the following: Write code that, given the failure rate function of a random variable $T$, computes the mean lifetime, $E[T].$ Use this code to determine the mean lifetime for the example given above. (Hint: An easy way to do this problem is to use the area rule for expectations. Since $T$ is nonnegative, the area rule for expectations gives $E[T]=\int_0^\infty(1-F(t)) dt.$The integral $\int_0^\infty (1-F(t)) dt$ is approximately equal to$\int_0^{t_{max}} (1-F(t)) dt,$ which, by the definition of Riemann integration, is approximately equal to$h \sum_{i=1}^{1000} (1-F(h_i)) $ where $h=t_{max}/1000.$ Basically the code above returns the samples $F(h_i)$ so all you need to do is subtract these values from 1, sum them up and multiply by $h$.) What happens to $E[T]$ if the failure rate function is doubled for all time? Print out the new value of $E[T]$.# Your code here__Answer:__ (Your answer here) **End of Problem 1** Evolutionary games (replicator dynamics): Consider a population of individuals, where each individual is of some type. Suppose individuals have occasional pairwise encounters. During an encounter the two individuals involved play a two player symmetric game in which the strategies are the types of the individuals. As a result of the encounter, each of the two individuals produces a number of offspring of its same type, with the number being determined by a fitess table or, equivalently, a fitness matrix. For example, consider a population of crickets such that each cricket is either small or large. If two small crickets meet each other then they each spawn five more small crickets. If a small cricket encounters a large cricket then the small cricket spawns one more small cricket and the large cricket spawns eight new large crickets. If two large crickets meet then each of them spawns three new large crickets. We can summarize these outcomes using the following table:$$\begin{array}{c|cc} & small & large \\ \hline small & 5 & 1 \\ large & 8 & 3 \end{array}$$or by a fitness matrix$$F=\left( \begin{array}{cc} 5 & 1 \\ 8 & 3 \end{array}\right).$$ Associating 0 with "small" and 1 with "large," we see that if a type i individual encounters a type j individual, then the type i individual spawns F[i,j] new individuals of type i, and the type j individual spawns F[j,i] new individuals of type j. Below is a stochastic simulation for this game where we start with a population of four small crickets and four large crickets. It may be a useful exercise for you to think of how the values for $a$ and $b$ were created.# Stochastic simulation of replicator dynamics for cricket game F=np.array([[5,1],[8,3]]) # fitness matrix n=np.array([4,4]) #[number of small crickets, number of large crickets], initialized print (" State Fractions of each type") print ("Initially ", n, " ", n/1./np.sum(n)) T = 100 #total number of encounters for k in range(T): # k tracks number of encounters a=n[0]*(n[0]-1.0)/((n[0]+n[1])*(n[0]+n[1]-1)) # probability both players are type 0 b=2*n[0]*n[1]/((n[0]+n[1])*(n[0]+n[1]-1.0)) # probability one player of each type U=np.random.rand() if U<=a: n+=[2*F[0,0],0] print ("Round",k+1,"two small crickets meet ", n, n/1./np.sum(n)) elif aThe above simulation displays, after each encounter, the number of individuals of each type and the fraction of individuals of each type. Run the simulation multiple times for different initial conditions. **Problem 2:** Answer the following questions. You do not need to provide code for this problem, but explain your answers. How quickly does the total population grow if initally all the crickets are small? How quickly does the total population grow if initally all the crickets are large? Do the fractions converge to a stable equilibrium in which the fractions of both small and large crickets are nonzero? (To explore convergence you may need to increase the number of encounters simulated.) __Answer:__ (Your answer here) **End of Problem 2** The code given next is essentially the same as above. One difference is a time variable is added such that the average rate that an individual cricket has encounters is proportional to the total number of crickets. This results in exponential growth of the population with time. Another difference is that population sizes are saved and plotted.# Stochastic simulation of replicator dynamics for cricket game # Time is scaled so contact rate of each individual is one per unit time # The evolution trajectory is stored an plotted F=np.array([[5,1],[8,3]]) # fitness matrix n_max=100 # maximum number of interactions n=np.zeros((n_max,2),dtype=float) time=np.zeros(n_max,dtype=float) n[0,0], n[0,1] = 4,4 # initial [number of small crickets, number of large crickets] for k in range(n_max-1): time[k+1]=time[k]+2.0/sum(n[k,:]) # The 2 is here because two individuals make contact at a time a=n[k,0]*(n[k,0]-1.0)/((n[k,0]+n[k,1])*(n[k,0]+n[k,1]-1)) # probability both players are type 0 b=2*n[k,0]*n[k,1]/((n[k,0]+n[k,1])*(n[k,0]+n[k,1]-1.0)) # probability one player of each type U=np.random.rand() if U<=a: n[k+1,:]= n[k,:] + [2*F[0,0],0] elif aThe following code computes the deterministic analog of the cricket evolution. The basic idea is that if the current population is [n[0], n[1]] then the drift (i.e. the expected rate of increase) of n[0] is n[0] multiplied by the expected number of small crickets generated by a small cricket if an encounter happens. That is true because each of the existing small crikets is having encounters at expected rate one. To calculate the mean number of new small crickets generated by the encounter of a small cricket with a randomly selected other cricket, we use the law of total probability. The encountered cricket is type zero with probabiity n[0]/(n[0]+n[1]), which results in F[0,0] new crickets of type zero, and the encountered cricket is type one with probability n[1]/(n[0]+n[1]), which results in F[0,1] new crickets of type zero. Combining we get$$\dot{n}[0] = n[0]*(F[0,0]*n[0]+F[0,1]*n[1])/(n[0]+n[1])$$The equation just derived is based on the law of large numbers (LLN). If the population is fairly large then no one individual plays a major role in the evolution. Over a small period of time the ratios of small and large crickets is nearly constant, so the number of new crickets of each type in each time slot are nearly independent and identically distributed. The LLN suggests approximating the sum of number of individuals of each type by the expected numbers of individuals of each type. That's how we arrive at a deterministic differential equation.A similar expression holds for the derivative of n[1], and the two equations can be writen in matrix form as$$\dot{n} = diag(n)*F*n/sum(n)$$where diag(n) is the diagonal matrix with n[0] and n[1] on the diagonal, and "$*$" represents usual matrix multiplication. (In the python code below, "$*$" denotes element by element multiplication, which is used instead of creating a diagonal matrix, and np.dot is used for ordinary matrix multiplication.)# Deterministic (ODE) replicator dynamics for cricket game from scipy.integrate import odeint F=np.array([[5,1],[8,3]]) # fitness matrix def f(n,t): return (n*np.dot(F,n)/sum(n)/1.) ninit=[4,4] # initial [number of small crickets, number of large crickets] time=np.linspace(0.0,1.5, 1000) n=odeint(f,ninit,time) plt.plot(time,n[:,0],'orange') plt.plot(time,n[:,1],'blue') plt.xlabel('time') plt.text(1.1,n[800,0], r'small',fontsize=14) plt.text(1.1,n[800,1], r'large',fontsize=14) plt.ylabel('population size') plt.title("Numbers of small and large crickets vs. time (ode prediction)")**Problem 3:** Run the code above and compare the output to the output of the previous code for stochastic evolutionary dynamics. Try different intial conditions such as [0,4], [4,0], and [10,1]. Comment on how the result of this compares to our previous simulation. Is the behavior similar? __Answer__: (Your answer here) **End of Problem 3** If we let $\theta[0]=\frac{n[0]}{n[0]+n[1]}$ and $\theta[1]=\frac{n[1]}{n[0]+n[1]}$ then the vector $\theta=\binom{\theta[0]}{\theta[1]}$ describes the fractions of the popuation of each type. It's entries sum to one. Using the chain rule of calculus we can derive the following differential equation for $\theta$ from the above ode for $n$:$$\dot{\theta} = diag(\theta) \left( F\theta - \mathbf{1} \theta^T F \theta \right)$$where $\mathbf{1}$ is the column vector of ones (the derivation is a bit involved, try deriving it by yourself if interested, but it is not required). The two components of the vector $F \theta$ give the average fitness level of small crickets and large crickets, respectively, in an encounter with a cricket selected randomly from the population. And $\theta^T F \theta$ is the weighted average of that vector, giving the average fitness over all crickets in the population. Thus, $\theta[0]$ is increasing exactly when the fitness of small crickets is greater than the average fitness.**Problem 4:**1. Modify the above code to integrate the equation for the $\theta$ vector to obtain $\theta$ vs. time.To check your answer, compare it to what you get by first computing the trajectory of $n$ and then calculating $\theta$ from $n.$ (The code given above for deterministic evolution of n works for any number of types, not just two types. A problem below involves three types of individuals, so it'd be nice if you could implement your code for computing $\theta$ to work for any number of types.) __Be careful when you write your program, since matrix multiplication in Python can be tricky. Understand "$*$" multiplication and np.dot multiplication first. After each small step, debug, confirm, then proceed.__ 2. Using the fact that $\theta[1]=1-\theta[0],$ a differential equation can be derived for the variable $\theta[0]$ alone.Plot the function $h$ on the interval $[0,1]$ such that $\dot{\theta}[0]=h(\theta[0]).$ Note that the horizontal axis of the graph is $\theta$, the vertical axis is $\dot{\theta}$. (You can just do thisnumerically. It can also be done algebrically; in fact $h$ is a cubic polynomial, but it is a bit complicated to find the polynomial.)3. What are the zeroes of $h$ (those are the equilibrium points of $\theta[0]$)? Which of those are stable (in the sense that if $\theta[0]$ is slightly moved away from the equilibrium point, it returns to the equilibrium point)?# Your code here__Answer:__ (Your answer here) **End of Problem 4** The overall behavior in evolutionary games depends heavily on the fitness matrix $F.$ Another choice of $F$ corresponds to the so-called doves and hawks evolution game, for which the fitness matrix is$$\begin{array}{c|cc} & dove & hawk \\ \hline dove & 3 & 1 \\ hawk & 5 & 0 \end{array}$$or by a fitness matrix$$F=\left( \begin{array}{cc} 3 & 1 \\ 5 & 0 \end{array}\right).$$ Think of the doves and hawks as two types of birds that need to share resources, such as food. (The hawks are so aggressive that they attack the doves). A dove has higher fitness, 3, against another dove than against a hawk, 1. A hawk has a high fitness against a dove (5) but zero fitness against another hawk, because the hawks fight over their resources.**Problem 5:** 1. Repeat the deterministic (ode) approach above, but now for the doves and hawks evolution game. You can use either the random or deterministic evolution equations for the numerical analysis. 2. Comment and verify your assertions about equilibrium behavior by finding a differential equation for $\theta[0]$ and exploring its equilbrium points and their stability.# Your code here__Answer__: (Your answer here) **End of Problem 5** **Problem 6:** Find a 3x3 fitness matrix $F$ and an initial condition for an evolutionary game with three types of individuals such that the solution $\theta$ has a limit cycle, not just converging to a limit point. Does the limit cycle depend on the intial state for your fitness matrix? Hint 1: Adapt the code you wrote for Problem 4.Hint 2: Solve this problem by trial and error, i.e. trying different values of the entries in the fitness matrix. Think of the three types of individuals as three kinds of animals. What kind of relationship among these three animals would likely to cause an oscillating population for each animal?# Your code hereCell below downloads dataset from GDriveimport requests import tarfile def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) file_id = '0BxYys69jI14kYVM3aVhKS1VhRUk' destination = 'UTKFace.tar.gz' download_file_from_google_drive(file_id, destination) tar = tarfile.open('UTKFace.tar.gz', "r:gz") tar.extractall() tar.close() %reload_ext autoreload %autoreload 2 %matplotlib inline !ls from IPython.core.debugger import set_trace from glob import glob from sklearn.cluster import KMeans from tqdm import tqdm_notebook as tqdm from mtcnn.mtcnn import MTCNN import pandas as pd import numpy as np import cv2 import matplotlib.pyplot as plt # Create DataFrame from files df = pd.DataFrame() for i, path in enumerate(glob('UTKFace/*jpg')): df.loc[i, 'path'] = path for i in tqdm(df.index): path = df.loc[i,'path'] gender = path.split('/')[1].split('_')[1] age = path.split('/')[1].split('_')[0] etnicity = path.split('/')[1].split('_')[2] df.loc[i, 'gender'] = gender df.loc[i, 'age'] = age df.loc[i, 'etnicity'] = etnicity df = df[df.etnicity.apply(lambda x: x.isnumeric())] df['age'] = df['age'].values.astype('int8') df['gender'] = df['gender'].values.astype('int8') df['etnicity'] = df['etnicity'].values.astype('int') print(df.shape) print(df.dtypes) df.describe() df.to_pickle('UTKFace_2.0.pkl') def resize_img(img, desired_height): ''' Resize image with desired height ''' height, width = img.shape[:2] resize_multiple = desired_height/height img_height = int(height / resize_multiple) img_width = int(height / resize_multiple) img = cv2.resize(img, None, fx=resize_multiple, fy=resize_multiple, interpolation=cv2.INTER_AREA) return img def centroid_histogram(clt): ''' Return histogram based on the number of pixels assigned to each cluster. ''' numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1) (hist, _) = np.histogram(clt.labels_, bins = numLabels) # Normalize the histogram, such that it sums to one. hist = hist.astype("float") hist /= hist.sum() return hist def get_top_colors(hist, centroids): ''' Return the colors with maximum percentage of area covered in descending order. ''' top1 = np.argsort(hist)[-1] top2 = np.argsort(hist)[-2] top3 = np.argsort(hist)[-3] top4 = np.argsort(hist)[-4] col1 = np.uint8(centroids[top1]) col2 = np.uint8(centroids[top2]) col3 = np.uint8(centroids[top3]) col4 = np.uint8(centroids[top4]) return col1, col2, col3, col4 def get_face_coords_MTCNN(img): ''' Returns coordinates for top left and right bottom corners of the face ''' results = detector_MTCNN.detect_faces(img) if results != []: b = results[0]['box'] x1 = int(b[0]) x1 = (x1 if x1>0 else 0) y1 = int(b[1]) y1 = (y1 if y1>0 else 0) x2 = int(b[0]) + int(b[2]) x2 = (x2 if x2 0: x1 = faceRects[0].left() x1 = (x1 if x1>0 else 0) y1 = faceRects[0].top() y1 = (y1 if y1>0 else 0) x2 = faceRects[0].right() x2 = (x2 if x2Medical Examiner Case ArchivesCook County (Chicago) medical examiner records, taken from [here](https://datacatalog.cookcountyil.gov/Public-Safety/Medical-Examiner-Case-Archive/cjeq-bs86) after discovery via [Data is Plural](https://tinyletter.com/data-is-plural). Do your importing/setupimport pandas as pd import numpy as np %matplotlib inlineRead in the data, check its row count and column typesdf = pd.read_csv("case-archive-encoded.csv") df.head(5) df.shape df.dtypesCleaning up your dataFirst you'll want to convert the `Race` and `Gender` columns from codes into actual text to make analysis easier. Gender codes* `-1` - `Data missing`* `0` - `Female`* `1` - `Male`* `2` - `Unknown` Race codes* `-1` - `Data missing`* `0` - `American Indian`* `1` - `Asian`* `2` - `Black`* `3` - `Other`* `4` - `Unknown`* `5` - `White`df.Gender.replace({ -1: "Data missing", 0: "Female", 1: "Male", 2: "Unknown" }, inplace=True) df.Gender.value_counts() df.Race.value_counts()What percent of the dataset is men, and what percent is women?It should display as **Male** and **Female**, not as numbers.df.Gender.value_counts(normalize=True) * 100Getting rid of "Data missing"`Unknown` means that officially the gender or race is unknown, while `Data missing` means the record is incomplete. That means "Data missing" should have been `NaN`!Go back to your `read_csv` many cells before and make it so that "Data missing" is automatically set as `NaN`.- *Tip: Do not use `.replace` for this one!*- *Tip: Look at the options for `read_csv`, there's something that lets you specify missing values*- *Tip: It isn't `"Data missing"` - think about how you already replaced*- *Tip: Be sure you're using an array when you tell it what the 'missing' options are* After you've done this, re-run all of the the previous cells and confirm that `"Data missing"` does not exist any more What is the most common race in the dataset? We want percentages.We'll come back to this later, I'm just having you check the column for now.df.Race.value_counts(normalize=True) * 100Do a `.value_counts()` on the `Opioid Related` columndf.columns = [c.replace(' ', '_') for c in df.columns] df.Opioid_Related.value_counts()That's weird. Did everyone die from opioids? Try again, but including missing data.df.Opioid_Related.isnull().value_counts()Cleaning up True/False columnsFor some reason in this dataset, the True/False columns are either `True` or `NaN`. `NaN` causes a lot of problems, I'd rather have it be false.You can use [`fillna`](http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html) to fill in empty data - it's like `.replace` but for `NaN`. Replace all `NaN` values with `False` for the "Gun Related" and "Opioid Related" columns.df.Gun_Related.fillna(False, inplace=True) df.Opioid_Related.fillna(False, inplace=True)Do another value counts on Opioid Related to make sure it has both True and False valuesdf.Opioid_Related.value_counts()Back to analysis! What's the average age people were when they died?df.Age.mean()Let's look at how the oldest people diedWe're just going to browse. Read through how the **oldest 30 people died.**df.sort_values(by='Age', ascending=False).head(30).Primary_Cause.value_counts()Seems like a lot of problems with fractures What's the median age of someone dying from a cause that involves a fracture?Are fractures especially dangerous for the elderly?- *Tip: Filter for a cause that involves a fracture, then take the median age*- *Tip: If you get a "cannot index NA values" error, the problem is it's trying to search `NaN` values and doesn't know what to do with them. You need to tell pandas to count `NaN` as false by setting another option - it isn't `NaN=False`, but it's close!*fracture_deaths = df[df.Primary_Cause.str.contains("FRACTURE", na=False)] fracture_deaths.Age.median()To get a "compared to what?", what's the median age of _anyone_ dying an accidental death?df.Manner_of_Death accidental_deaths = df[df.Manner_of_Death.str.contains("ACCIDENT", na=False)] accidental_deaths.Age.median()What's the median age of each manner of death?It looks like different kinds of death might happen to different ages of people. Let's investigate that further.df.Manner_of_Death.value_counts() natural_deaths = df[df.Manner_of_Death.str.contains("NATURAL", na=False)] natural_deaths.Age.median() homicide_deaths = df[df.Manner_of_Death.str.contains("HOMICIDE", na=False)] homicide_deaths.Age.median() suicide_deaths = df[df.Manner_of_Death.str.contains("SUICIDE", na=False)] suicide_deaths.Age.median()Who is the oldest homicide victim?It looks like homicide is for young people, so maybe we'll find an interesting outlier?suicide_deaths.sort_values(by='Age', ascending=False).head(1)Investigating toxicity-related homicidesShe was old, and was purposefully overdosed on morphine and hydrocodone. Might have been euthenasia? Let's find similar cases. Find every homicide where the primary cause of death is some sort of toxicityToxicity can just overdose. You should have **ten rows**.- *Tip: If you're doing this as one statement, make sure you use your parentheses correctly. If you leave them out, you'll have zero rows*- *Tip: You could make a homicides-only dataframe if you wanted to*toxicity_deaths = homicide_deaths[homicide_deaths.Primary_Cause.str.contains("TOXICITY", na=False)] toxicity_deathsOkay, nope, we were wrong.Those were almost **all from fires**. Apparently homicide is not the best place to go looking for toxicity. What's the most popular manner of death for primary causes involving toxicity?- *Tip: Remember that `['colname']` is the same as `.colname`. You can't do `.col with spaces` so you'll need to do `['col with spaces']` a lot in this dataset- *Tip: Or I guess if you really wanted to, you could rename your columns to have spaces in them (IF YOU DO THIS DON'T DO IT IN EXCEL BECAUSE IT WILL PROBABLY BREAK YOUR CSV.)*df[df.Primary_Cause.str.contains("TOXICITY", na=False)].Manner_of_Death.value_counts()Okay, toxicity deaths (overdoses) are mostly accidents. Let's look at the first 30 accidental deaths involving toxicity.- *Tip: Remember your parentheses!*accidental_toxicity = accidental_deaths[accidental_deaths.Primary_Cause.str.contains("TOXICITY", na=False)].head(30) accidental_toxicityWow, that's a lot of drug overdoses. What's more popular for overdosing: heroin, fentanyl, cocaine, or ethanol?You can count something like "COMBINED ETHANOL, NORDIAZEPAM, AND FENTANYL TOXICITY" under both ethanol and fentanyl.- *Tip: Search for them individually*accidental_toxicity[accidental_toxicity.Primary_Cause.str.contains("HEROIN", na=False)].Primary_Cause.value_counts().sum() accidental_toxicity[accidental_toxicity.Primary_Cause.str.contains("FENTANYL", na=False)].Primary_Cause.value_counts().sum() accidental_toxicity[accidental_toxicity.Primary_Cause.str.contains("COCAINE", na=False)].Primary_Cause.value_counts().sum() accidental_toxicity[accidental_toxicity.Primary_Cause.str.contains("ETHANOL", na=False)].Primary_Cause.value_counts().sum()Cleaning up Primary CauseLet's stop investigating for a second and maybe clean up this "Primary Cause" column. What are the most common Primary Cause of death? Include `NaN` values- *Tip: There is an option that keeps `NaN` values when counting things in a column.*df.Primary_Cause.value_counts()That was horrible looking. I don't want to read through that - how many `NaN` causes of death are there?- *Tip: You can use `isnull()` to see if it's missing data, but how do you count the results?*df.Primary_Cause.isnull().value_counts()Remove all rows where the primary cause of death has not been filled out.- *Tip: confirm that you have 22510 rows when you're done*df.dropna(subset=['Primary_Cause'], inplace=True) df.Primary_Cause.notnull().value_counts()Cardiovascular diseaseCardiovascular disease (heart disease) is the number one or number two killer in America. Filter for only rows where cardiovascular disease was a primary cause- *Tip: I hope you know how to deal with the `NaN` error message by now!*df[df.Primary_Cause.str.contains("CARDIOVASCULAR DISEASE", na=False)]What are the different types?df[df.Primary_Cause.str.contains("CARDIOVASCULAR DISEASE", na=False)].Primary_Cause.value_counts()Replace all of those with a nice simple 'CARDIOVASCULAR DISEASE'- *Tip: you can use `.replace` or `.str.replace`, but they each involve different things! I suggest `.replace`, it looks a little cleaner in this situation*- *Tip: for `.replace`, you need to give it more options than usual*- *Tip: for `.str.replace`, it won't automatically save back into the column, you need to do that yourself*df.Primary_Cause.replace(".*CARDIOVASCULAR DISEASE","CARDIOVASCULAR DISEASE", regex=True, inplace=True) df.Primary_Cause.replace("CARDIOVASCULAR DISEASE.*","CARDIOVASCULAR DISEASE", regex=True, inplace=True) df[df.Primary_Cause.str.contains("CARDIOVASCULAR DISEASE", na=False)].Primary_Cause.value_counts()Check the top 5 primary causes. Cardiovascular disease should be first with about 28.4%df.sort_values('Primary_Cause', ascending=False).Primary_Cause.value_counts(normalize = True).head(5) *100We could also clean up gunshots, but... let's just move on. The Opioid EpidemicAmerica has a [big problem with fentanyl](https://www.theatlantic.com/health/archive/2018/05/americas-opioid-crisis-is-now-a-fentanyl-crisis/559445/) and other opioids. Find all of the rows where fentanyl was part of the primary cause of deathWe don't need `na=False` any more because we *dropped the rows without primary causes*.df[df.Primary_Cause.str.contains("FENTANYL")]Fentanyl and raceIn the late 80's and 90's, the [crack cocaine epidemic](https://en.wikipedia.org/wiki/Crack_epidemic) swept through inner cities in the US. It was treated primarily as a crime problem, while many people say fentanyl and heroin overdoses are being treated as a medical problem due to the racial differences - the crack epidemic mainly affected Black communities, while fentanyl seems to be a problem for everyone. How does the racial breakdown of fentanyl deaths compare to the racial breakdown of other causes of death? How about compared to causes of accidental death?fentanyl_deaths = df[df.Primary_Cause.str.contains("FENTANYL")].Race.value_counts() fentanyl_deaths other_causes = df[df.Primary_Cause.str.contains("FENTANYL") == False].Race.value_counts() other_causes accidents = df[df.Primary_Cause.str.contains("ACCIDENT")].Race.value_counts() accidentsNow compare it to homicideshomicides = df[df.Manner_of_Death.str.contains("HOMICIDE", na=False)].Race.value_counts() homicidesNow compare it to suicidesuicides = df[df.Manner_of_Death.str.contains("SUICIDE", na=False)].Race.value_counts() suicidesThese differences seems kind of crazyLet's look at all of these at once: I want a breakdown of the most common manners of death for **men**, based on race.Percentages, please, not raw numbers.You can look at women, too, although I think the numbers are more surprising for men.df[df.Gender == 'Male'].groupby('Race').Manner_of_Death.value_counts(normalize = True) * 100 df[df.Gender == 'Female'].groupby('Race').Manner_of_Death.value_counts(normalize = True) * 100Back to drugs: what is the most popular opioid-related primary cause of death that does NOT involve fentanyl?- *Tip: Pay attention to your column names! There's one that might tell you if something is opioid-related...*- *Tip: Usually you can use `not` or `!` to means "not", but for pandas and `.isin` or `.str.contains` you need to use `~`*- *Tip: For "and" in pandas you'll need to use `&`, and make sure all of your clauses have parens around them, e.g. `df[(df.col1 = 'A') & (df.col2 = 'B')]`.*opioid_no_fentanyl = df[~df['Primary_Cause'].str.contains('.*FENTANYL', na=False) & (df['Opioid_Related']==True)] opioid_no_fentanyl.Primary_Cause.value_counts().head(1)How do heroin and fentanyl deaths compare? Count the number of deaths involving heroin, the number of deaths involving fentanyl, and the number of deaths involving both.- *Tip: This will take 3 different statements*- *Tip: You should get `813` that include both*opioid_deaths = df[df['Opioid_Related']==True] heroin_NOT_fentanyl = opioid_deaths[opioid_deaths['Primary_Cause'].str.contains('.*HEROIN' , na=False) & (~opioid_deaths['Primary_Cause'].str.contains('.*FENTANYL' , na=False))] len(heroin_NOT_fentanyl) fentanyl_NOT_heroin = opioid_deaths[~opioid_deaths['Primary_Cause'].str.contains(".*HEROIN", na=False) & (opioid_deaths['Primary_Cause'].str.contains(".*FENTANYL", na=False))] len(fentanyl_NOT_heroin) fentanyl_and_heroin = opioid_deaths[opioid_deaths['Primary_Cause'].str.contains('.*HEROIN', na=False) & (opioid_deaths['Primary_Cause'].str.contains('.*FENTANYL', na=False))] len(fentanyl_and_heroin)That's weird.I heard fentanyl really surpassed heroin in the past few years. Let's see how this Pull the year out and store it in a new column called `year`If you run `df['Date of Incident'].str.extract("(\d\d\d\d)", expand=False)`, it will pull out the year of each incident. **Store this in a new column called `year`.**(It's regular expression stuff. `\d\d\d\d` means "four numbers in a row", and `()` + `.str.extract` means "pull it out".)df['Year'] = df['Date_of_Incident'].str.extract("(\d\d\d\d)", expand=False) dfWhat is the datatype of the new `year` column?df.dtypesConvert this new column to an integer and save it back on top of itself- *Tip: This uses is your friend `.astype`*- *Tip: Make sure to save it back on top of itself!*df['Year'] = df.Year.astype(int)Confirm the column is a numberdf.dtypesPlot the number of opioid deaths by yearIf you'd like to make it look nicer, do some sorting and get rid of 2018.- *Tip: Think of it in a few steps. First, filter for opioid deaths. Then get the number of deaths for each year. Then plot it.*- *Tip: What's up with 2018? Why's it look so weird? Can you get rid of it? Remember to use lots of parens!*- *Tip: Make sure the earliest year is on the left. You might need to sort by something other than values.*opioid_deaths = df[df['Opioid_Related'] == True] opioid_deaths = opioid_deaths[opioid_deaths.Year != 2018] opioid_deaths.Year.value_counts().plot(kind ='barh')Plot the number of fentanyl deaths by year, and the number of heroin deaths by year- *Tip: You'll want to look up how to use `ylim` - it will let you set each graphic to use the same scale. This should be separate graphics.*- *Tip: Pay attention to the numbers on your axes. `sort_index()` will be your friend.*- *Tip: You should probably get rid of 2018*heroin_NOT_fentanyl = df[df['Primary_Cause'].str.contains('.*HEROIN' , na=False) & (~df['Primary_Cause'].str.contains('.*FENTANYL' , na=False))] heroin_NOT_fentanyl = heroin_NOT_fentanyl[heroin_NOT_fentanyl.Year != 2018] heroin_NOT_fentanyl.Year.value_counts().sort_index(axis=0).plot(kind = 'bar', ylim=5) fentanyl_NOT_heroin = df[~df['Primary_Cause'].str.contains('.*HEROIN' , na=False) & (df['Primary_Cause'].str.contains('.*FENTANYL' , na=False))] fentanyl_NOT_heroin = fentanyl_NOT_heroin[fentanyl_NOT_heroin.Year != 2018] fentanyl_NOT_heroin.Year.value_counts().sort_index(axis=0).plot(kind = 'bar', ylim=5)How does this compare to gun deaths?gun_deaths = df[df['Primary_Cause'].str.contains('.*GUN.*', na=False) | (df['Primary_Cause'].str.contains('.*BULLET.*' , na=False)) | (df['Primary_Cause'].str.contains('.*SHOT.*' , na=False))] gun_deaths = gun_deaths[gun_deaths.Year != 2018] gun_deaths.Year.value_counts().sort_index(axis=0).plot(kind = 'bar', ylim=5)But hey: numbers can lie pretty easily!The numbers are just so low in 2014 and much higher in 2017. What's going on there?Well, maybe **there just isn't as much data from the earlier years**. Plot how many entries there are for each year.gun_deaths.Year.value_counts() gun_deaths.Year.value_counts().sort_index(axis=0).plot(kind='bar', figsize=(10,8))And we don't know the best way to fix that up yet, so instead I'm going to give you a present. Is the true lesson here, don't move to Cook County, Illinois?Cook County is basically Chicago. It's probably just certain areas that are trouble, right? Let's investigate that without even having a clue how mapping works. Fun bonus: Making cheating maps Make a new dataframe of every death in the actual city of Chicagochicago = df[df.Incident_City == 'CHICAGO']Confirm this new dataframe has 13,627 rowschicago.shape chicago.describe()Use lat and long in the worst way possible to make a mapUse `longitude` and `latitude` and `plot` to make a rough map of the city. Chicago [looks like this](https://en.wikipedia.org/wiki/File:DuPage_County_Illinois_Incorporated_and_Unincorporated_areas_Chicago_Highlighted.svg)- *Tip: Use the `latitude` and `longitude` columns*- *Tip: You don't want a line graph, of course. Or a bar. What kind is the kind with dots on it?*- *Tip: Use something like like `figsize=(10,5)` to specify the height and width of the map (but, you know, with better numbers that make it look like chicago)*chicago.plot.scatter(x='latitude', y='longitude', figsize=(11,9)) ##Pretty sure it should be inverted (??)Now let's find out where to liveMake a map of every non-homicide death in Chicago, then plot the homicides on top of it.Use the `ax=df.plot` trick from the beer cans assignment to plot all of the rows representing homicides vs non-homicides. You can use `color='red'` to make one of them red, and `alpha=0.05` to make each mark very transparent to allow them to layer on top of each other.ax = chicago[chicago.Primary_Cause.str.contains("HOMICIDE") == False].plot() #Couldn't figure that out, quite sure it's not supposed to look like thisNever tell anyone I let you do that.But you want to see something actually completely legitimately insane?**Chicago is one of the most segregated cities in America.** If you'd like to see this for yourself, make a map of `Race`. Plot black vs white in a way similar to what we did above.Pandas PipesCleaner Data Analysis with Pandas Using Pipes https://towardsdatascience.com/cleaner-data-analysis-with-pandas-using-pipes-4d73770fbf3c Great idea for a standard set of utility cleanup functions in a libraryData: https://www.kaggle.com/yoghurtpatil/direct-marketing?select=DirectMarketing.csvimport numpy as np import pandas as pd marketing = pd.read_csv("./data/DirectMarketing.csv") marketing.head()Functions used by PIPEdef drop_missing(df): thresh = len(df) * 0.6 df.dropna(axis=1, thresh=thresh, inplace=True) return df def remove_outliers(df, column_name): low = np.quantile(df[column_name], 0.05) high = np.quantile(df[column_name], 0.95) return df[df[column_name].between(low, high, inclusive=True)] def to_category(df): cols = df.select_dtypes(include='object').columns for col in cols: ratio = len(df[col].value_counts()) / len(df) if ratio < 0.05: df[col] = df[col].astype('category') return df def copy_df(df): return df.copy()Pipe to clean a copy of the datamarketing_cleaned = (marketing. pipe(copy_df). pipe(drop_missing). pipe(remove_outliers, 'Salary'). pipe(to_category)) print(marketing.shape) marketing.dtypes print(marketing_cleaned.shape) marketing_cleaned.dtypes(900, 10)In this tutorial, we will introduce readers to how to write new ```pcells```.Each layout class in ```pirel``` is derived by ```pt.LayoutPart```, which defines common methods such as ```view()```, ```get_params()``` , ```set_params()``` etc. Define A ```pirel``` pcellimport pirel.tools as pt import pirel.pcells as pc class NewLayoutClass(pt.LayoutPart): pass o=NewLayoutClass(name='New Born Class') oThe previous code fails because, as a minimum, each class derived from ```LayoutPart``` needs to define an implementation of the abstract method ```draw()```:class NewerLayoutClass(pt.LayoutPart): def draw(self): from phidl.geometry import ring return ring() pass o=NewerLayoutClass(name='New Born Class') o o.view()Add layout parameters to a ```pirel``` pcellClasses in ```pirel``` can have layout parameters.Let's imagine we want to define a class with an ```pc.IDT``` component and a ring on a layer ```ring_layer``` with inner diameter ```inner_diameter``` and outer diameter ```outer_diameter```.To instantiate parameter, we use the ```pt.LayoutParamInterface()``` descriptor.from pirel.tools import LayoutParamInterface class PirelRing(pc.LayoutPart): ring_layer=LayoutParamInterface() inner_diameter=LayoutParamInterface() outer_diameter=LayoutParamInterface() def __init__(self,*a,**k): super().__init__(*a,**k) self.ring_layer=3 self.inner_diameter=20.0 self.outer_diameter=30.0 def draw(self): from phidl.geometry import ring return ring(radius=self.inner_diameter/2,width=(self.outer_diameter-self.inner_diameter)/2,layer=self.ring_layer) o=PirelRing('newring') o.view()Note that as long as instance parameters are initialized in ```__init__``` they are automatically included in the list of instance parameters (gettable through ```get_params()```, settable via ```set_params()```).The set value of each ```LayoutParamInterface()``` is actually used to validate assignment:o.ring_layer='a' #throws errorSince an _int_ was assigned to _ring_layer_ in __init__, this parameter cannot be set to something that is not an _int_o.ring_layer=4 #no problem! o.view()Also note that instance attributes can be accessed in _snake_case_style_, while when exported they are accessed in _UpperCaseStyle_ Define components as parts of a ```pcell``` ```components``` parameter are passed to the instance as well, by overriding the static ```get_components()``` method.```get_components()``` returns a dict : each ```key``` specifies the component tag to prepend to the instance, each ```value``` specifies the class of the component to be included in the class.class IDTRing(PirelRing): @staticmethod def get_components(): return {'IDT':pc.IDT} pass n=IDTRing() nNote that ```IDTRing``` has all the parameters of ```PirelRing``` but also the parameters of ```pc.IDT```, appended to the identifier ```IDT``` which was the ```pc.IDT``` key in ```get_components()```.```IDTRing``` has also an attribute that contains an instance of ```pc.IDT```, and can be accessed by lowercasing the key ```IDT``` to ```idt```:n.idtIn this way, ```IDTRing``` can access all methods of ```IDT```.For example, ```IDTRing``` can redefine the ```draw()``` method so to reuse the ```draw()``` of its ```components``` and its parents.class IDTRing(PirelRing): def draw(self): from phidl.device_layout import Group from phidl.device_layout import Device ring_cell=PirelRing.draw(self) idt_cell=self.idt.draw() out_cell=Device(self.name) ring_ref=out_cell<**Load the Data**fashion_mnist = tf.keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step**Prepare Data**num_classes=10 image_height=28 image_width=28 image_channels=1 class_names= ['T-shirt/top','Trouser','Pullover','Dress','Coat/jacket','Sandal','Shirt','Sneaker','Bag','Ankel boot'] # Shape the data using channels_last ordering # num_samples*num_rows*num_columns*num_channels x_train=x_train.reshape((x_train.shape[0],image_height,image_width,image_channels)) x_test=x_test.reshape((x_test.shape[0],image_height,image_width,image_channels)) x_test.shape # scaling data in the range of [0.0,1.0] x_train, x_test = x_train.astype("float64") / 255.0, x_test.astype("float64")/ 255.0 # Pad the data to 32x32 to use in Lenet network x_train=np.pad(x_train,((0,0),(2,2),(2,2),(0,0)),mode='constant') x_test=np.pad(x_test,((0,0),(2,2),(2,2),(0,0)),mode='constant') # Display data_dimensions print("x_train shape :",x_train.shape) print("x_test shape :",x_test.shape) print("y_train shape :",y_train.shape) print("y_test shape :",y_test.shape) # parameters for training data set num_classes=10 image_height=32 image_width=32 image_channels=1 # import necessary packages from tensorflow.keras import backend from tensorflow.keras import layers from tensorflow.keras import models # define model as a class class LeNet: # INPUT => CONV => TANH => AVG-POOL => CONV => TANH => AVG-POOL => FC => TANH => FC => TANH => FC => SOFTMAX @staticmethod def init(num_classes,image_height,image_width,image_channels,weightsPath=None): # if we are using channel first then update the shape if backend.image_data_format()=='channels_first': inputShape=(image_channels,image_height,image_width) else: inputShape=(image_height,image_width,image_channels) # initalize the model model=models.Sequential() # Define first set of CONV => ACTIVATION => POOL layers model.add(layers.Conv2D(filters=6,kernel_size=(5,5),strides=(1,1),padding='valid',activation=tf.nn.tanh,input_shape=inputShape)) model.add(layers.AveragePooling2D(pool_size=(2,2),strides=(2,2))) # Define second set of CONV => ACTIVATION => POOL layers model.add(layers.Conv2D(filters=6,kernel_size=(5,5),strides=(1,1),padding='valid',activation=tf.nn.tanh)) model.add(layers.AveragePooling2D(pool_size=(2,2),strides=(2,2))) # Flatten the convolution volume to fully connected layers model.add(layers.Flatten()) # Define the first FC => ACTIVATION layers model.add(layers.Dense(units=120,activation=tf.nn.tanh)) # Define the second FC => ACTIVATION layers model.add(layers.Dense(units=84,activation=tf.nn.tanh)) # lastly define the softmax classifier model.add(layers.Dense(units=num_classes,activation=tf.nn.softmax)) # if weights path is supplied (indicating that model was pre-trained), then load the weights if weightsPath is not None : model.load_weights(weightsPath) # return the constructed network architecture return model**Compile Model**# initialize the model model=LeNet.init(num_classes=num_classes,image_height=image_height,image_width=image_width,image_channels=image_channels,weightsPath=None) # compile the model model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=.01), #Stochiostic Gradient Descent loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Model summary model.summary() # Define callback function for training termination criteria # accuracy_cutoff=.99 class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): if(logs.get('accuracy')>0.99): print("\nReached 99% accuracy so cancelling training!") self.model.stop_training = True # initialize training configuration batch_size=128 epochs=100 # run training history = model.fit(x=x_train, y=y_train, validation_data=(x_test,y_test), epochs=epochs, batch_size=batch_size, callbacks=[myCallback()], verbose=1)Epoch 1/100 469/469 [==============================] - 3s 6ms/step - loss: 1.3074 - accuracy: 0.6046 - val_loss: 0.9111 - val_accuracy: 0.6925 Epoch 2/100 469/469 [==============================] - 3s 6ms/step - loss: 0.8024 - accuracy: 0.7197 - val_loss: 0.7496 - val_accuracy: 0.7265 Epoch 3/100 469/469 [==============================] - 3s 6ms/step - loss: 0.6909 - accuracy: 0.7494 - val_loss: 0.6778 - val_accuracy: 0.7493 Epoch 4/100 469/469 [==============================] - 3s 6ms/step - loss: 0.6324 - accuracy: 0.7680 - val_loss: 0.6319 - val_accuracy: 0.7657 Epoch 5/100 469/469 [==============================] - 3s 6ms/step - loss: 0.5926 - accuracy: 0.7846 - val_loss: 0.5979 - val_accuracy: 0.7811 Epoch 6/100 469/469 [==============================] - 3s 6ms/step - loss: 0.5622 - accuracy: 0.7963 - val_loss: 0.5730 - val_accuracy: 0.7925 Epoch 7/100 469/469 [==============================] - 3s 6ms/step - loss: 0.5383 - accuracy: 0.8058 - val_loss: 0.5530 - val_accuracy: 0.7985[...]**Evaluating Training Performance**%matplotlib inline import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs_range = range(len(acc)) plt.figure(figsize=(10, 10)) plt.subplot(2, 1, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # show accuracy on testing set test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1, batch_size=batch_size) print("test accuracy : {:.2f}%".format(test_acc*100)) model.save_weights("/usr/LeNet",overwrite=True)**Evaluate Pre-trained Model**# init model and load the model weights # initialize the model model=LeNet.init(num_classes=num_classes,image_height=image_height,image_width=image_width,image_channels=image_channels,weightsPath="/usr/LeNet") # compile the model model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=.01), #Stochiostic Gradient Descent loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Model summary model.summary() # show accuracy on testing set batch_size=128 test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1, batch_size=batch_size) print("test accuracy : {:.2f}%".format(test_acc*100))79/79 [==============================] - 0s 3ms/step - loss: 0.3203 - accuracy: 0.8864 test accuracy : 88.64%**Model Predictions**%matplotlib inline import numpy as np import cv2 import matplotlib.pyplot as plt # set up matplotlib figure and size it to fit 3*4 pics nrows= 3 ncolumns= 4 fig = plt.gcf() fig.set_size_inches(ncolumns*4,nrows*4) # Randomly select few testing digits num_predictions=12 test_indices=np.random.choice(np.arange(0,len(y_test)),size=(num_predictions,)) test_images=np.stack([x_test[i] for i in test_indices]) #not understood how it is printed based on shape test_labels=np.stack([y_test[i] for i in test_indices]) #and not understood how it is stacked # # compute predictions predictions=model.predict(test_images) for i in range(num_predictions): # select the most probable class prediction=np.argmax(predictions[i]) # rescale the test image image=(test_images[i]*255).astype("uint8") # resize the image from 28x28 to 96x96 so that we can see it better image=cv2.resize(image,(96,96),interpolation=cv2.INTER_CUBIC) # INTER_CUBIC – a bicubic interpolation over 4×4 pixel neighborhood #how 32*32*1 is converted to 96*96 # convert gray scale image to RGB color image=cv2.merge([image]*3) # select prediction text color if prediction == test_labels[i]: rgb_color = (0, 255, 0) #green for correct predictions else: rgb_color = (255, 0, 0) #red for wrong predictions # show the image and prediction cv2.putText(image, str(class_names[prediction]), (0,10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, rgb_color, 1) # set up subplot, subplot indices start at 1 sp = plt.subplot(nrows, ncolumns, i+1, title="label: %s" %class_names[test_labels[i]]) sp.axis("Off") #don't show axes(or gridlines) plt.imshow(image) #not understood difference between plt.imshow() and plt.show() # show figure matrix plt.show()To fill in, follow DETR colabfound here https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/DETR_panoptic.ipynbscrollTo=QD4mQxHIqGCrimport torch from torchvision import transforms as T from PIL import Image path = 'sample_pics/idris.jpg' pic = Image.open(path) model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True) model.eval() transform = T.Compose([ T.Resize(800), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) tensored = transforms.ToTensor()(pic) tensored = tensored.unsqueeze(0) a = model(tensored) a['pred_logits'].shape a['pred_boxes'].shapeCO2 data*** Historical CO2 Record from the Vostok Ice Core ****** ****** Source: ****** ****** ****** Laboratoire de Glaciologie et de Geophysique de l'Environnement ****** 38402 Saint Martin d'Heres Cedex, France ****** ****** ****** Arctic and Antarctic Research Institute ****** Beringa Street 38 ****** St. Petersburg 199226, Russia ****** ****** January 2003 ***from astropy.io import ascii from PIL import Image import numpy as np import matplotlib.pyplot as plt %matplotlib inline coreDat = open('vostok.dat') year=[] co2=[] for line in coreDat: elements = line.split() if (len(elements)==4 and elements[0].replace('.','',1).isdigit()): year.append(float(elements[2])) co2.append(float(elements[3])) coreDat.close() print(min(year),max(year)) print(min(co2),max(co2)) yearGrid=np.arange(2500,402500,500) co2Grid=np.interp(yearGrid,year,co2) len(co2Grid) min(co2Grid), max(co2Grid) co2Texture = Image.new("RGB",(800,1)) r = np.array([]) g = np.array([]) for i,co2Val in enumerate(co2Grid): valR=max(0,int(co2Val)-100) valG=int(256.0*(float(co2Val)-np.floor(float(co2Val)))) r = np.append(r,valR) g = np.append(g,valG) co2Texture.putpixel((i,0),(valR,valG,0)) co2Texture co2Texture.save('../data/vostok.png') f = plt.figure() plt.plot(r + 100 + g/256.) f = plt.figure() plt.plot(np.array(year), co2)For the EarthFuture.dds data setfrom scipy.optimize import curve_fit # years_AD CO2_ppm # 2000 380 # 2050 510 # 2100 765 # 2150 650 # 2200 620 # 2500 578 # 3000 550 # 10000 500 yr = np.array([2000, 2050, 2100, 2150, 2200, 2500, 3000, 10000]) co2 = np.array([380, 510, 765, 650, 620, 578, 550, 500]) plt.semilogx(yr, co2, 'o') #fit1 xx = np.where(yr <= 2100) f = np.polyfit(yr[xx], co2[xx],1) f1 = np.poly1d(f) print(f1) y1 = np.linspace(2000, 2100,100) plt.plot(y1, f1(y1)) print("range",f1(2000), f1(2100)) #fit2 def f2(x, a, b, c): return a * np.exp(-b * (x - 2100.)) + c xx = np.where(yr > 2100) sig = np.sqrt(co2[xx]) popt, pcov = curve_fit(f2, yr[xx], co2[xx], p0=[300, 0.003, 500], sigma = 1./sig ) print(popt) y2 = np.linspace(2100, 10000,100) plt.plot(y2, f2(y2, *popt)) plt.plot(y2, 240*np.exp(-0.002*(y2 - 2100.)) + 500)3.85 x - 7341 range 359.16666666666697 744.1666666666679 [1.47668981e+02 1.46399384e-03 5.02386771e+02]import codecs import io import requests url = 'https://developers.google.com/public-data/docs/canonical/countries_csv' s = requests.get(url).content s.decode('utf-8') s import pandas as pd d = pd.read_html(url) d df = d[0] # o primeiro elemento da lista é um dataframe df latitude = df.loc[:,'latitude'] longitude = df.loc[:,'longitude'] print(latitude,longitude)0 42.546245 1 23.424076 2 33.939110 3 17.060816 4 18.220554 ... 240 15.552727 241 -12.827500 242 -30.559482 243 -13.133897 244 -19.015438 Name: latitude, Length: 245, dtype: float64 0 1.601554 1 53.847818 2 67.709953 3 -61.796428 4 -63.068615 ... 240 48.516388 241 45.166244 242 22.937506 243 27.849332 244 29.154857 Name: longitude, Length: 245, dtype: float64Soft Actor-Critic 1L ss10000import numpy as np import os CPU_NUM = 2 JOB_TIME = '0-06:00' JOB_MEMORY = '8000M' job_sub_dir = './job_scripts' job_out_dir = './job_scripts_output' os.makedirs(job_sub_dir) os.makedirs(job_out_dir) tasks = ['RoboschoolAnt-v1', 'RoboschoolHalfCheetah-v1', 'RoboschoolWalker2d-v1', 'RoboschoolHopper-v1', 'RoboschoolInvertedPendulum-v1', 'RoboschoolInvertedPendulumSwingup-v1', 'RoboschoolInvertedDoublePendulum-v1', 'RoboschoolReacher-v1', 'RoboschoolPong-v1'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 1 --n_step {2} --replay_size {3} --without_delay_train --start_steps 10000 --exp_name sac_n_step_1L_NoDelayTrain_ss10000_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s))1L ss100import numpy as np import os CPU_NUM = 2 JOB_TIME = '0-06:00' JOB_MEMORY = '8000M' job_sub_dir = './job_scripts' job_out_dir = './job_scripts_output' tasks = ['RoboschoolAnt-v1', 'RoboschoolHalfCheetah-v1', 'RoboschoolWalker2d-v1', 'RoboschoolHopper-v1', 'RoboschoolInvertedPendulum-v1', 'RoboschoolInvertedPendulumSwingup-v1', 'RoboschoolInvertedDoublePendulum-v1', 'RoboschoolReacher-v1', 'RoboschoolPong-v1'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 1 --n_step {2} --replay_size {3} --without_delay_train --start_steps 100 --data_dir spinup_data_sac_1L_ss100 --exp_name sac_n_step_1L_NoDelayTrain_ss100_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s)) import os job_sub_dir = './job_scripts' jobs = os.listdir('./job_scripts') jobs.sort() i=1 for job in jobs: code = os.system('sbatch {}'.format(os.path.join(job_sub_dir, job))) print('{} ---- {}: {}'.format(i, job, code)) i += 12L ss10000import numpy as np import os CPU_NUM = 2 JOB_TIME = '0-06:00' JOB_MEMORY = '8000M' job_sub_dir = './job_scripts' job_out_dir = './job_scripts_output' os.makedirs(job_sub_dir) os.makedirs(job_out_dir) tasks = ['RoboschoolAnt-v1', 'RoboschoolHalfCheetah-v1', 'RoboschoolWalker2d-v1', 'RoboschoolHopper-v1'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 2 --n_step {2} --replay_size {3} --without_delay_train --start_steps 10000 --data_dir spinup_data_sac_2L --exp_name sac_n_step_2L_NoDelayTrain_ss10000_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s))2L ss100import numpy as np import os CPU_NUM = 2 JOB_TIME = '0-06:00' JOB_MEMORY = '8000M' job_sub_dir = './job_scripts' job_out_dir = './job_scripts_output' tasks = ['RoboschoolAnt-v1', 'RoboschoolHalfCheetah-v1', 'RoboschoolWalker2d-v1', 'RoboschoolHopper-v1', 'RoboschoolInvertedPendulum-v1', 'RoboschoolInvertedPendulumSwingup-v1', 'RoboschoolInvertedDoublePendulum-v1', 'RoboschoolReacher-v1', 'RoboschoolPong-v1'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_ss100_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 2 --n_step {2} --replay_size {3} --without_delay_train --start_steps 100 --data_dir spinup_data_sac_2L_ss100 --exp_name sac_n_step_2L_NoDelayTrain_ss100_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s)) import os job_sub_dir = './job_scripts' jobs = os.listdir('./job_scripts') jobs.sort() i=1 for job in jobs: code = os.system('sbatch {}'.format(os.path.join(job_sub_dir, job))) print('{} ---- {}: {}'.format(i, job, code)) i += 1PyBulletGym 2L ss100import numpy as np import os CPU_NUM = 2 JOB_TIME = '0-06:00' JOB_MEMORY = '8000M' job_sub_dir = './job_scripts' job_out_dir = './job_scripts_output' tasks = ['AntPyBulletEnv-v0', 'HalfCheetahPyBulletEnv-v0', 'Walker2DPyBulletEnv-v0', 'HopperPyBulletEnv-v0', 'ReacherPyBulletEnv-v0', 'PusherPyBulletEnv-v0', 'ThrowerPyBulletEnv-v0', 'StrikerPyBulletEnv-v0'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_ss100_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 2 --n_step {2} --replay_size {3} --without_delay_train --start_steps 100 --data_dir spinup_data_sac_PyBullet_2L_ss100 --exp_name sac_n_step_2L_NoDelayTrain_ss100_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s))2L ss10000tasks = ['AntPyBulletEnv-v0', 'HalfCheetahPyBulletEnv-v0', 'Walker2DPyBulletEnv-v0', 'HopperPyBulletEnv-v0', 'ReacherPyBulletEnv-v0', 'PusherPyBulletEnv-v0', 'ThrowerPyBulletEnv-v0', 'StrikerPyBulletEnv-v0'] n_steps = ['1', '2', '3', '4', '5'] replay_size = ['1000000', '500000', '100000', '50000', '10000'] seeds = ['0', '1', '2', '3'] for s in seeds: for task in tasks: for n_s in n_steps: for b_s in replay_size: job_filename = 'job_{0}_{1}_{2}_{3}.sh'.format(task, s, n_s, b_s) print(job_filename) with open(os.path.join(job_sub_dir, job_filename), 'w') as job_file: job_file.write('#!/bin/bash\n') job_file.write('#SBATCH --account=def-dkulic\n') job_file.write('#SBATCH --cpus-per-task={} #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.\n'.format(CPU_NUM)) job_file.write('#SBATCH --mem={} # memory per node\n'.format(JOB_MEMORY)) job_file.write('#SBATCH --time={} # time (DD-HH:MM)\n'.format(JOB_TIME)) job_file.write('#SBATCH --output=./job_scripts_output/sac_n_step_2L_ss100_NoDelayTrain_{0}_{1}_{2}_{3}_%N-%j.out # %N for node name, %j for jobID\n'.format(task, s, n_s, b_s)) job_file.write('## Main processing command\n') job_file.write('module load cuda cudnn \n') job_file.write('source ~/tf_gpu/bin/activate\n') job_file.write('python ./sac_n_step.py --env {0} --seed {1} --l 2 --n_step {2} --replay_size {3} --without_delay_train --start_steps 10000 --data_dir spinup_data_sac_PyBullet_2L_ss10000 --exp_name sac_n_step_2L_NoDelayTrain_ss10000_{0}_{1}_{2}_{3}'.format(task, s, n_s, b_s))Building a dataset, training a Seq2Seq model, and testing itThe Virginia Tech Natural Motion Dataset contains .h5 files with unscripted human motion data collected in real-world environments as participants went about their day-to-day lives. This is a brief tutorial in using the dataset and then training and testing a neural network.This tutorial illustrates how to use the shell (.sh) scripts to train a seq2seq model (particularly **train_seq2seq.sh** and **test_seq2seq.sh**). Similar shell scripts are also available for the Transformers (see **train_transformer.sh** and **test_transformer.sh**) Building a datasetWe will first cover how to build a dataset with data from a few participants using the build-dataset.py file.We are running the script from a Jupyter Notebook, but this can just as easily be run as a shell script (see build_dataset.sh).In this case, we are drawing data from the h5-dataset folder located in the cloud. We are going to output the training.h5, validation.h5, and testing.h5 files to the folder data/set-2.We will be using participants 1, 5, and 10 (P1, P5, P10, respectively) and extracting normOrientation and normAcceleration data on a few segments (norm* means data normalized relative to the pelvis). As output data we will be extracting normOrientation data for every segment.In other words, our task is as follows: use orientation and acceleration from a set of sparse segments and try to train a model mapping that input data to orientations for every segment on the human body.!mkdir -p /home/jackg7/VT-Natural-Motion-Processing/data/set-2 !python build-dataset.py --data-path "/groups/MotionPred/h5-dataset" \ --output-path "/home/jackg7/VT-Natural-Motion-Processing/data/set-2" \ --training "P1" \ --validation "P5" \ --testing "P10" \ --task-input "normOrientation normAcceleration" \ --input-label-request "T8 RightForeArm RightLowerLeg LeftForeArm LeftLowerLeg" \ --task-output "normOrientation" \ --output-label-request "all"2020-08-10 13:09:31 INFO Writing X to the training file group... 2020-08-10 13:09:37 INFO Writing X to the validation file group... 2020-08-10 13:09:40 INFO Writing X to the testing file group... 2020-08-10 13:09:50 INFO Writing Y to the training file group... 2020-08-10 13:09:57 INFO Writing Y to the validation file group... 2020-08-10 13:09:58 INFO Writing Y to the testing file group...Training a seq2seq modelWe can now train a seq2seq model to map the normOrientation and normAcceleration data from the sparse segments to the full-body normOrientation data.We will be using a seq-length of 30 (at 240 Hz) downsample it by a factor of 6 (to 40 Hz). The resulting sequences will be of length 5 for the input and output. The in-out-ratio will then be used to reduce the output sequence length to 1.The input sequence will be of shape (B, 5, 35) and output shape will be of shape (B, 1, 92). Orientations are stored as quaternions, so orientation value will be 4 in length. The number 35 comes from our use of 5 segment orientations and accelerations or $5*4 + 5*3 = 35$. The full-body has 23 segments and we're predicting orientation values for each one or $23*4 = 92$We're training a seq2seq model with a hidden size of 512, a bidirectional encoder and dot product attention. The model will be trained for a single epoch.Our loss function for training will be the L1Loss and our validation losses will be the L1Loss and the QuatDistance (cosine similarity) loss.!mkdir -p /home/jackg7/VT-Natural-Motion-Processing/models/set-2 !python train-seq2seq.py --task conversion \ --data-path "/home/jackg7/VT-Natural-Motion-Processing/data/set-2" \ --model-file-path "/home/jackg7/VT-Natural-Motion-Processing/models/set-2/model.pt" \ --representation quaternions \ --batch-size=32 \ --seq-length=30 \ --downsample=6 \ --in-out-ratio=5 \ --stride=30 \ --learning-rate=0.001 \ --num-epochs=1 \ --hidden-size=512 \ --attention=dot \ --bidirectional2020-08-10 13:10:06 INFO task - conversion 2020-08-10 13:10:06 INFO data_path - /home/jackg7/VT-Natural-Motion-Processing/data/set-2 2020-08-10 13:10:06 INFO model_file_path - /home/jackg7/VT-Natural-Motion-Processing/models/set-2/model.pt 2020-08-10 13:10:06 INFO representation - quaternions 2020-08-10 13:10:06 INFO auxiliary_acc - False 2020-08-10 13:10:06 INFO batch_size - 32 2020-08-10 13:10:06 INFO learning_rate - 0.001 2020-08-10 13:10:06 INFO seq_length - 30 2020-08-10 13:10:06 INFO downsample - 6 2020-08-10 13:10:06 INFO in_out_ratio - 5 2020-08-10 13:10:06 INFO stride - 30 2020-08-10 13:10:06 INFO num_epochs - 1 2020-08-10 13:10:06 INFO hidden_size - 512 2020-08-10 13:10:06 INFO dropout - 0.0 2020-08-10 13:10:06 INFO bidirectional - True 2020-08-10 13:10:06 INFO attention - dot 2020-08-10 13:10:06 INFO Starting seq2seq model training... 2020-08-10 13:10:06 INFO Retrieving training data for sequences 125 ms[...]Testing our modelWe can now test our model and output a histogram of performance over the testing data. The model parameters must be the same to properly read in the model.!mkdir -p /home/jackg7/VT-Natural-Motion-Processing/images !python test-seq2seq.py --task conversion \ --data-path-parent /home/jackg7/VT-Natural-Motion-Processing/data \ --figure-file-path /home/jackg7/VT-Natural-Motion-Processing/images/seq2seq-test.pdf \ --figure-title "Seq2Seq" \ --model-dir /home/jackg7/VT-Natural-Motion-Processing/models/set-2 \ --representation quaternions \ --batch-size=512 \ --seq-length=30 \ --downsample=6 \ --in-out-ratio=5 \ --stride=30 \ --hidden-size=512 \ --attention=dot \ --bidirectional2020-08-10 13:14:06 INFO task - conversion 2020-08-10 13:14:06 INFO data_path_parent - /home/jackg7/VT-Natural-Motion-Processing/data 2020-08-10 13:14:06 INFO figure_file_path - /home/jackg7/VT-Natural-Motion-Processing/images/seq2seq-test.pdf 2020-08-10 13:14:06 INFO figure_title - Seq2Seq 2020-08-10 13:14:06 INFO include_legend - False 2020-08-10 13:14:06 INFO model_dir - /home/jackg7/VT-Natural-Motion-Processing/models/set-2 2020-08-10 13:14:06 INFO representation - quaternions 2020-08-10 13:14:06 INFO batch_size - 512 2020-08-10 13:14:06 INFO seq_length - 30 2020-08-10 13:14:06 INFO downsample - 6 2020-08-10 13:14:06 INFO in_out_ratio - 5 2020-08-10 13:14:06 INFO stride - 30 2020-08-10 13:14:06 INFO hidden_size - 512 2020-08-10 13:14:06 INFO dropout - 0.0 2020-08-10 13:14:06 INFO bidirectional - True 2020-08-10 13:14:06 INFO attention - dot 2020-08-10 13:14:06 INFO Starting seq2seq model testing... 2020-08-10 13:14[...]We can now visualize the performance of the seq2seq model on the test data.from IPython.display import IFrame IFrame("../images/seq2seq-test.pdf", width=600, height=300)Syngas prediction with consideration of timeCopyright 2021-2022 at ISU, and and at WUSTLIn this approach, we consider building a function $f(N_2, CO, H_2, CO_2, \text{flowrate}, \text{time}) = (\text{ biomass, acetate, butanol, butyrate, ethanol})$where the outputs are concentration. The dataset has multiple trials (done by different labs under different conditions), some of which for training while the remaining of which for test. Because the raw data is very sparse, we also tried data augmentation (curving) using polynomial fitting, resulting multiple versions of the models (a model always predicts y given X, i.e., y=f(X)): | X (input) | y (output) | X source | y source | note | | ----------- | ---------- | -------- | ---------| ---- || concentration | concentration | raw | raw | || concentration | concentration | curved | curved | || concentration | concentration | curved | raw | || concentration | rate | raw | raw | impossible || concentration | rate | curved | curved | | | concentration | rate | curved | raw | impossible | In the concentration-to-rate cases, the function $f$ becomes: $f(CO, H_2, CO_2, \text{flowrate}, \text{time}) = (\Delta biomass, \Delta acetate, \Delta butanol, \Delta butyrate, \Delta ethanol)$where $\Delta = {\partial \text{ concentration} \over \partial \text{ time}}$. Organization of code (key functions and variables):* `complete_loop()`: the main function, training and test trial split is hard-coded (`training_test_composition_splits`) in it. * `prepare_data_one_y()`: Return the X and y as in common ML tasks from one Pandas dataframe, given the training and test trial ID (integers). The trial ID is in the dataframe. * `cv_fixed_trial_split()`: Run cross-validation to find the best hyperparameters for several types of ML regressors (NNs, SVMs, DTs, RFs, etc.). * `gen_NN_uni()`: Enumerate fully-connected layers/MLPs given maximal number of layers and maximal number of neurons at each layer. * `gen_NN_fixed_n_layers()`: Given the number of layers and maximal number of neurons per layer, produce various MLPs. * `beautiful_plot()`: Visualize the results. * `full_cfgs` and `test_cfgs`: two dicts storing hyperparameter grids for various types of ML regressors. The call of `complete_loop` is at the end of this notebook where the three versions are presented with different input CSV files and arguments. 0. Load modulesimport pandas as pd import numpy as np import os import sys import sklearn.model_selection import matplotlib.pyplot as plt import warnings import sklearn, sklearn.preprocessing, sklearn.pipeline, sklearn.model_selection import sklearn.svm, sklearn.tree, sklearn.ensemble, sklearn.neural_network import sklearn.linear_model, sklearn.gaussian_process, sklearn.neighbors import sklearn.multioutput import numpy import functools, operator, itertools import time import json import scipy.stats, scipy # import scikitlearn_plus.neural_network warnings.filterwarnings('ignore') import typing import pickle1. Defining functions needed for experiments 1.1 The function to extraction training and test data for one output/regressor from the datadef prepare_data_one_y(train_df, test_df, training_trial_indexes:typing.List[int], test_trial_indexes:typing.List[int], X_columns:typing.List[str], y_column:str, scaler_type:str, columns_to_scale:typing.List[str]): """Prepare the X and y for one yield prediction, given the yield name, and training and test trial indexes train_df, test_df: DataFrames storing X and y. We allow training data and test data to be of different sources, e.g., one curved and the other raw. """ # scale for X and y if scaler_type == "minmax": Scaler = sklearn.preprocessing.MinMaxScaler() elif scaler_type == "standard": Scaler = sklearn.preprocessing.StandardScaler() else: print ("wrong scaler type ") exit(0) Scaler.fit(train_df[columns_to_scale]) train_df[columns_to_scale] = Scaler.transform(train_df[columns_to_scale]) test_df[columns_to_scale] = Scaler.transform(test_df[columns_to_scale]) # Separate training and testing training_data = train_df[train_df['composition'].isin(training_trial_indexes)] test_data = test_df[test_df['composition'].isin(test_trial_indexes)] training_X, training_y = training_data[X_columns], training_data[y_column] test_X, test_y = test_data[X_columns], test_data[y_column] return training_X, training_y, test_X, test_y # Test train_df = pd.read_csv(f'../data/rates_data.csv') print(f'Shape of the training data: {train_df.shape[0]} rows by {train_df.shape[1]} columns') test_df = pd.read_csv(f'../data/experimental_data.csv') print(f'Shape of the test data: {test_df.shape[0]} rows by {test_df.shape[1]} columns') training_trial_indexes = [1,2,3,4,5,6,7] test_trial_indexes = [8,9,10] y_column = 'biomass (g/L)' # 'biomass', 'ethanol', 'acetate', 'butanol', or 'butyrate' X_columns = ['flow rate (mL/min)', 'H2', 'CO', 'CO2'] columns_to_scale = ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)', 'flow rate (mL/min)', 'H2', 'CO', 'CO2'] training_X, training_y, test_X, test_y = prepare_data_one_y(train_df, test_df, training_trial_indexes, test_trial_indexes, X_columns, y_column, "minmax", columns_to_scale)Shape of the training data: 836 rows by 18 columns Shape of the test data: 176 rows by 13 columns1.2 Two functions to enumerate network structuresdef gen_NN_fixed_n_layers(n_layers, n_neurons, neuron_step): """Generate NN hidden_layer_sizes of n_layers and up to n_neurons per layer """ # print (n_layers) if n_layers == 1: return [[i] for i in range(neuron_step, n_neurons+1, neuron_step)] else: pairs = [ (i, tail) for tail in gen_NN_fixed_n_layers(n_layers-1, n_neurons+1, neuron_step) for i in range(neuron_step, n_neurons+1, neuron_step) ] return [[i]+ t for (i, t) in pairs] # print (gen_NN_fixed_n_layers(4, 10, 5)) def gen_NN_uni(n_layers, n_neurons, layer_step, neuron_step): """Generate hidden layers of various number of layers and number of neurons """ various_NNs = [ gen_NN_fixed_n_layers(i , n_neurons, neuron_step) for i in range(2, n_layers+1, layer_step)] return functools.reduce(operator.add, various_NNs)1.3 The function and hyperparameter grid for grid-searching hyperparametersdef cv_fixed_trial_split(training_X, training_y, test_X, test_y, model_cfgs, refit:str): """Given training and test sets fixed on trials, do Grid search CV to tune hyperparameter and then test on test sets. """ estimator, hyperparam_grid = model_cfgs['estimator'], model_cfgs['param_grid'] search = sklearn.model_selection.GridSearchCV( estimator = estimator, param_grid = hyperparam_grid, # scoring = ['neg_root_mean_squared_error'], # refit = 'neg_root_mean_squared_error', scoring = ['r2', 'neg_mean_absolute_percentage_error'], refit = refit, cv = sklearn.model_selection.ShuffleSplit(n_splits=10, test_size=0.2, random_state=0), n_jobs=-1, verbose=0 ) # run grid search search.fit(training_X, training_y) # test on test set test_score = search.score(test_X, test_y) # print ("test score %0.3f:" % test.score) test_y_hat = search.predict(test_X) cc = scipy.stats.pearsonr(test_y_hat, test_y)[0] return test_score, search.best_score_, search.best_params_, cc, search.cv_results_ test_cfgs = { # "nn":{ # 'estimator': sklearn.neural_network.MLPRegressor(shuffle=True), # # Test grid # 'param_grid': { # # hidden_layer_sizes made the search space many order of magnitudes larger # 'activation': ['tanh', 'logistic', 'relu'], # 'max_iter': [400*i for i in range(1, 2)], # 'learning_rate': ['adaptive'] # } # }, # "svm_rbf":{ # 'estimator': sklearn.svm.SVR(kernel='rbf'), # # Test grid # 'param_grid': { # 'C': [10**i for i in range(-1, 1)], # 'epsilon': [10**i for i in range(-1, 1)], # } # }, # "rf":{ # 'estimator': sklearn.ensemble.RandomForestRegressor(), # # Test grid # 'param_grid': { # 'n_estimators': [10*i for i in range(1, 2)], # 'max_depth': [2*i for i in range(1, 1+1)], # } # }, # "gauss":{ # 'estimator': sklearn.gaussian_process.GaussianProcessRegressor(), # # Full grid # 'param_grid': { # 'alpha': [10**i for i in range(0, 5)], # 'normalize_y': [True, False], # # 'n_restarts_optimizer': [2**i for i in range(0,5)] # } # }, "knn":{ 'estimator': sklearn.neighbors.KNeighborsRegressor(), # Full grid 'param_grid': { 'n_neighbors': [i for i in range(1, 30+1)], 'weights': ['distance'], 'algorithm': ['ball_tree', 'kd_tree'], 'leaf_size': [5*i for i in range(1, 10+1)] } }, } full_cfgs = { "nn":{ 'estimator': sklearn.neural_network.MLPRegressor(shuffle=True), # Full grid 'param_grid': { 'hidden_layer_sizes': gen_NN_uni(4, 100, 1, 20), 'activation': ['tanh', 'relu'], 'max_iter': [5000], 'learning_rate': ['adaptive'] } }, "svm_rbf":{ 'estimator': sklearn.svm.SVR(kernel='rbf'), # Full grid 'param_grid': { 'C': [10**i for i in range(-5, 5)], 'epsilon': [10**i for i in range(-5, 5)], 'gamma': [10**i for i in range(-5, 5)] # gamma gave me an error } }, "rf":{ 'estimator': sklearn.ensemble.RandomForestRegressor(), # Full grid 'param_grid': { 'n_estimators': [10*i for i in range(1, 20)], 'max_depth': [2*i for i in range(20)], 'max_samples': [0.05*i for i in range(1, 10+1)] # max samples gave me an error } }, "en":{ 'estimator': sklearn.linear_model.ElasticNet(), # Full grid 'param_grid': { 'alpha': [10**i for i in range(-10, 10)], 'l1_ratio': [0.1*i for i in range(1, 10+1)] } }, "lasso":{ 'estimator': sklearn.linear_model.Lasso(), # Full grid 'param_grid': { 'alpha': [10**i for i in range(-10, 10)] } }, "gauss":{ 'estimator': sklearn.gaussian_process.GaussianProcessRegressor(), # Full grid 'param_grid': { 'alpha': [10**i for i in range(-10, 10)], 'normalize_y': [True, False], } }, "knn":{ 'estimator': sklearn.neighbors.KNeighborsRegressor(), # Full grid 'param_grid': { 'n_neighbors': [i for i in range(1, 30+1)], 'weights': ['distance'], 'algorithm': ['ball_tree', 'kd_tree'], 'leaf_size': [5*i for i in range(1, 10+1)] } }, } # Test # for name, cfg in test_cfgs.items(): # print (name) # _ = \ # ml_fixed_trial_split(training_X, training_y, test_X, test_y, cfg, 'neg_mean_absolute_percentage_error')1.4 The exhaustive CV loop# def process_cv_results(): # best for each metric # for key, value in search.cv_results_.items(): # if "_score" in key: def complete_loop(train_df, test_df, model_cfgs, X_columns, y_columns, pickle_file, refit:str): """Complete multi-layer loops for entire ML task train_df, test_df: pandas dataframes loaded from CSV files model_cfgs: dict, keys as str (model type) and values as sklearn grid CV parameter dict y_columns: list of str, To predict absolute value, use: ['biomass', 'ethanol', 'acetate', 'butanol', 'butyrate'] To predict rate, use: ['biomass_Δ', 'ethanol_Δ', 'acetate_Δ', 'butanol_Δ', 'butyrate_Δ'] X_columns: list of str, e.g., ['flow rate', 'H2', 'CO', 'CO2'] pickle_file: str where to save the results using various regressors refit: str the sklearn.metrics string """ loo = sklearn.model_selection.LeaveOneOut() compositions = np.array(range(1, 10+1)) training_test_composition_splits = [ ( (1,2,3,4,5,6,7), (8,9,10) ), # training in compositions 1 to 7 and test in compositions 8 to 10 ] \ # + \ # [(compositions[train_indexes], compositions[test_indexes]) # for train_indexes, test_indexes in # loo.split(range(7)) # ] # leave one composition as test # print (training_test_composition_splits) columns_to_scale = X_columns + y_columns full_results = {} for y_column in y_columns: # print (y_column) for (training_compositions, test_compositions) in training_test_composition_splits: # for scaler_type in ['minmax', 'standard']: for scaler_type in ['minmax']: training_X, training_y, test_X, test_y = \ prepare_data_one_y(train_df, test_df, training_compositions, test_compositions, X_columns, y_column, scaler_type, columns_to_scale) for estimator_type, cfg in model_cfgs.items(): # print (estimator_type) test_score, train_score, best_params, cc, cv_results = \ cv_fixed_trial_split(training_X, training_y, test_X, test_y, cfg, refit) # save result if type(test_compositions) == numpy.ndarray: test_compositions = tuple(test_compositions.tolist()) r2_per_sk = np.mean(cv_results['mean_test_r2']) cv_results.update({ "my_cc":cc, "r2_per_sk":r2_per_sk, "my_test_score": test_score, "my_train_score": train_score}) full_results[ (scaler_type, y_column, test_compositions, estimator_type) ] = cv_results # now print the result line = [scaler_type, y_column, test_compositions, estimator_type, "{0:.3f}".format(cc), "{0:.3f}".format(r2_per_sk), best_params] line = map(str, line) print ("\t".join(line)) # json.dump(full_results, open("results.json", "w"), indent=2, ) pickle.dump(full_results, open(pickle_file, "bw")) return full_results1.5 The plot function to visualize resultsdef beautiful_plot(full_results, Y_names, metric, main_title): """ metric: str, "my_cc" or "r2_per_sk' """ scaler = 'minmax' test_trial = (8,9,10) bar_width = 0.3 CC = {condition: CV_numbers[metric] for condition, CV_numbers in full_results.items() } # from CC to R2 if metric == "my_cc": CC = {key:value**2 for key,value in CC.items()} model_names = [condition[3] for condition in full_results.keys()] model_names = list(set(model_names)) model_names.sort() table = [ [ CC[(scaler, y, test_trial, model_name,)] for model_name in model_names ] for y in Y_names ] Y_max = np.nanmax(np.nanmax(table)) figure = plt.figure(figsize=(25,4)) index = np.arange(0, len(model_names))*bar_width + bar_width/2 for row_number , row in enumerate(table): # print (100 + len(Y_names)*10 + row_number + 1, index, row) plt.subplot(100 + len(Y_names)*10 + row_number + 1) plt.ylim(0, Y_max + 0.1) plt.bar(index, row, width = bar_width, tick_label=model_names, color = plt.cm.gist_ncar(np.linspace(0, 0.5, len(model_names)))) plt.title(Y_names[row_number]) # break figure.suptitle(main_title ) # comparison = {} # for y in Y_names: # for each output or each regressor # comparison[y] = {model_name: cc for # ((scaler, y, _, model_name), cc) in CC.items()} # for # bar_width = 0.5 # tick_label = list(comparison[y].keys()) # cc_value = list(comparison['butyrate'].values() ) # index = np.arange(0, len(tick_label))*bar_width + bar_width/2 # print (index, tick_label, cc_value) # plt.bar(index, cc_value , # width = bar_width, tick_label=tick_label, # color = plt.cm.gist_ncar(np.linspace(0, 0.5, len(comparison[y])))) # Test # full_results = pickle.load(open("test.pickle", "br")) # beautiful_plot(full_results, # ['biomass', 'ethanol', 'acetate', 'butanol', 'butyrate'], # "my_cc", # "1. Concentration to concentration")2. Experiments 2.1 When Refit goal is MAPE 2.1.1. Running the experiments**1. raw concentration => raw concentration**train_df = pd.read_csv(f'../data/experimental_data.csv') test_df = train_df results_raw_concentration = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "raw_concentration_to_raw_concentration.pickle", 'neg_mean_absolute_percentage_error')minmax biomass (g/L) (8, 9, 10) nn -0.192 -0.134 {'activation': 'tanh', 'hidden_layer_sizes': [80, 60, 100, 60], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf nan -0.053 {'C': 1e-05, 'epsilon': 10000, 'gamma': 1e-05} minmax biomass (g/L) (8, 9, 10) rf 0.079 nan {'max_depth': 26, 'max_samples': 0.05, 'n_estimators': 10} minmax biomass (g/L) (8, 9, 10) en nan -0.057 {'alpha': 0.001, 'l1_ratio': 1.0} minmax biomass (g/L) (8, 9, 10) lasso nan -0.057 {'alpha': 0.001} minmax biomass (g/L) (8, 9, 10) gauss -0.010 -1.033 {'alpha': 10000, 'normalize_y': False} minmax biomass (g/L) (8, 9, 10) knn 0.211 -0.175 {'algorithm': 'ball_tree', 'leaf_size': 10, 'n_neighbors': 1, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.155 0.114 {'activation': 'tanh', 'hidden_layer_sizes': [60, 80, 80, 20], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.253 -0.529 {'C': 10000, 'epsilon': 1, 'gamma': 1e-05} minmax ethanol ([...]**2. curved concentration => curved concentration**# Why Jupyter is bad here. # i want variables train_df and test_df to be different for each experiment # rather than being global variables overridden on experiment after the other. train_df = pd.read_csv(f'../data/rates_data.csv') test_df = train_df results_curve_concentration = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "curved_concentration_to_curved_concentration.pickle", 'neg_mean_absolute_percentage_error')minmax biomass (g/L) (8, 9, 10) nn 0.478 0.001 {'activation': 'relu', 'hidden_layer_sizes': [100, 40, 20, 20], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf 0.525 0.046 {'C': 100, 'epsilon': 0.001, 'gamma': 1000} minmax biomass (g/L) (8, 9, 10) rf 0.625 nan {'max_depth': 24, 'max_samples': 0.05, 'n_estimators': 20} minmax biomass (g/L) (8, 9, 10) en 0.665 0.036 {'alpha': 1e-08, 'l1_ratio': 0.1} minmax biomass (g/L) (8, 9, 10) lasso 0.665 0.037 {'alpha': 1e-10} minmax biomass (g/L) (8, 9, 10) gauss 0.216 -0.027 {'alpha': 1000000000, 'normalize_y': False} minmax biomass (g/L) (8, 9, 10) knn 0.530 0.463 {'algorithm': 'ball_tree', 'leaf_size': 20, 'n_neighbors': 3, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.505 0.408 {'activation': 'relu', 'hidden_layer_sizes': [80, 80, 60, 80], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.473 -0.303 {'C': 1, 'epsilon': 1e-05, 'gamma': 0.01} minmax ethanol ([...]**3. curved concentration => raw concentration**train_df = pd.read_csv(f'../data/rates_data.csv') test_df = pd.read_csv(f'../data/experimental_data.csv') results_curve_rate = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "curved_concentration_to_raw_concentration.pickle", 'neg_mean_absolute_percentage_error')minmax biomass (g/L) (8, 9, 10) nn 0.217 0.444 {'activation': 'tanh', 'hidden_layer_sizes': [20, 100, 100, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf nan 0.097 {'C': 10000, 'epsilon': 0.1, 'gamma': 1000} minmax biomass (g/L) (8, 9, 10) rf 0.216 nan {'max_depth': 4, 'max_samples': 0.05, 'n_estimators': 10} minmax biomass (g/L) (8, 9, 10) en 0.211 0.046 {'alpha': 1e-10, 'l1_ratio': 0.1} minmax biomass (g/L) (8, 9, 10) lasso 0.211 0.044 {'alpha': 1e-10} minmax biomass (g/L) (8, 9, 10) gauss 0.003 -0.521 {'alpha': 1000000000, 'normalize_y': False} minmax biomass (g/L) (8, 9, 10) knn 0.217 0.458 {'algorithm': 'ball_tree', 'leaf_size': 35, 'n_neighbors': 1, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.240 0.393 {'activation': 'tanh', 'hidden_layer_sizes': [20, 40, 20, 80], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.250 -0.549 {'C': 10000, 'epsilon': 0.1, 'gamma': 0.01} minmax ethanol [...]**5. curved concentration => curved rate**train_df = pd.read_csv(f'../data/rates_data.csv') test_df = train_df results_curve_rate = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass rate','ethanol rate','acetate rate','butanol rate','butyrate rate'], "curved_concentration_to_curved_rate.pickle", 'neg_mean_absolute_percentage_error')minmax biomass rate (8, 9, 10) nn -0.111 0.004 {'activation': 'relu', 'hidden_layer_sizes': [40, 80, 20, 40], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass rate (8, 9, 10) svm_rbf 0.066 -0.174 {'C': 0.1, 'epsilon': 0.0001, 'gamma': 1000} minmax biomass rate (8, 9, 10) rf 0.109 nan {'max_depth': 8, 'max_samples': 0.1, 'n_estimators': 20} minmax biomass rate (8, 9, 10) en 0.111 0.034 {'alpha': 1e-08, 'l1_ratio': 0.30000000000000004} minmax biomass rate (8, 9, 10) lasso 0.111 0.035 {'alpha': 1e-10} minmax biomass rate (8, 9, 10) gauss 0.111 -4.891 {'alpha': 1e-05, 'normalize_y': False} minmax biomass rate (8, 9, 10) knn 0.110 0.062 {'algorithm': 'kd_tree', 'leaf_size': 20, 'n_neighbors': 20, 'weights': 'distance'} minmax ethanol rate (8, 9, 10) nn 0.066 0.325 {'activation': 'relu', 'hidden_layer_sizes': [60, 40, 60, 20], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol rate (8, 9, 10) svm_rbf 0.028 0.095 {'C': 1e-05, 'epsilon': 1e-05, 'gamma': 1e-05} minmax et[...]2.1.2 Visualize MAPE-based resultsfull_results = pickle.load(open("raw_concentration_to_raw_concentration.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)', 'butyrate (mM)'], "my_cc", "1. Raw concentration to raw concentration") full_results = pickle.load(open("curved_concentration_to_curved_concentration.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "my_cc", "2. Curved concentration to curved concentration") full_results = pickle.load(open("curved_concentration_to_raw_concentration.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "my_cc", "3. Curved concentration to raw concentration") full_results = pickle.load(open("curved_concentration_to_curved_rate.pickle", "br")) beautiful_plot(full_results, ['biomass rate', 'ethanol rate', 'acetate rate', 'butanol rate', 'butyrate rate'], "my_cc", "5. Curved concentration to curved rate")2.2 When Refit goal is R2 2.2.1 Running the experiments**1. raw concentration => raw concentration**train_df = pd.read_csv(f'../data/experimental_data.csv') test_df = train_df results_raw_concentration = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "raw_concentration_to_raw_concentration_r2.pickle", 'r2')minmax biomass (g/L) (8, 9, 10) nn -0.115 -0.132 {'activation': 'tanh', 'hidden_layer_sizes': [40, 100, 40, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf 0.132 -0.053 {'C': 0.01, 'epsilon': 1e-05, 'gamma': 1000} minmax biomass (g/L) (8, 9, 10) rf 0.212 nan {'max_depth': 16, 'max_samples': 0.05, 'n_estimators': 100} minmax biomass (g/L) (8, 9, 10) en nan -0.057 {'alpha': 0.001, 'l1_ratio': 1.0} minmax biomass (g/L) (8, 9, 10) lasso nan -0.057 {'alpha': 0.001} minmax biomass (g/L) (8, 9, 10) gauss 0.131 -1.033 {'alpha': 1000000000, 'normalize_y': True} minmax biomass (g/L) (8, 9, 10) knn 0.212 -0.175 {'algorithm': 'ball_tree', 'leaf_size': 5, 'n_neighbors': 19, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.249 0.114 {'activation': 'tanh', 'hidden_layer_sizes': [20, 100, 60, 60], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.262 -0.529 {'C': 1000, 'epsilon': 10, 'gamma': 1e-05} minmax eth[...]**2. curved concentration => curved concentration**train_df = pd.read_csv(f'../data/rates_data.csv') test_df = train_df results_curve_concentration = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "curved_concentration_to_curved_concentration_r2.pickle", 'r2')minmax biomass (g/L) (8, 9, 10) nn -0.370 0.001 {'activation': 'relu', 'hidden_layer_sizes': [100, 100, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf 0.622 0.046 {'C': 1000, 'epsilon': 0.1, 'gamma': 10} minmax biomass (g/L) (8, 9, 10) rf 0.564 nan {'max_depth': 34, 'max_samples': 0.30000000000000004, 'n_estimators': 10} minmax biomass (g/L) (8, 9, 10) en 0.665 0.036 {'alpha': 1e-10, 'l1_ratio': 1.0} minmax biomass (g/L) (8, 9, 10) lasso 0.665 0.037 {'alpha': 1e-10} minmax biomass (g/L) (8, 9, 10) gauss 0.644 -0.027 {'alpha': 1e-06, 'normalize_y': True} minmax biomass (g/L) (8, 9, 10) knn 0.621 0.463 {'algorithm': 'ball_tree', 'leaf_size': 5, 'n_neighbors': 30, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.553 0.408 {'activation': 'relu', 'hidden_layer_sizes': [80, 40, 100, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.552 -0.303 {'C': 10000, 'epsilon': 10, 'gamma': 0.0001} minmax[...]**3. curved concentration => raw concentration**train_df = pd.read_csv(f'../data/rates_data.csv') test_df = pd.read_csv(f'../data/experimental_data.csv') results_curve_rate = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "curved_concentration_to_raw_concentration_r2.pickle", 'r2')minmax biomass (g/L) (8, 9, 10) nn 0.218 0.445 {'activation': 'tanh', 'hidden_layer_sizes': [100, 80, 20, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass (g/L) (8, 9, 10) svm_rbf 0.215 0.097 {'C': 10000, 'epsilon': 0.01, 'gamma': 0.001} minmax biomass (g/L) (8, 9, 10) rf 0.218 nan {'max_depth': 38, 'max_samples': 0.1, 'n_estimators': 60} minmax biomass (g/L) (8, 9, 10) en 0.211 0.046 {'alpha': 1e-10, 'l1_ratio': 0.1} minmax biomass (g/L) (8, 9, 10) lasso 0.211 0.044 {'alpha': 1e-10} minmax biomass (g/L) (8, 9, 10) gauss 0.216 -0.521 {'alpha': 0.1, 'normalize_y': True} minmax biomass (g/L) (8, 9, 10) knn 0.213 0.458 {'algorithm': 'kd_tree', 'leaf_size': 5, 'n_neighbors': 30, 'weights': 'distance'} minmax ethanol (mM) (8, 9, 10) nn 0.263 0.393 {'activation': 'relu', 'hidden_layer_sizes': [60, 80, 100, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol (mM) (8, 9, 10) svm_rbf 0.256 -0.549 {'C': 1000, 'epsilon': 0.1, 'gamma': 0.1} minmax ethanol (mM) (8[...]**5. curved concentration => curved rate**train_df = pd.read_csv(f'../data/rates_data.csv') test_df = train_df results_curve_rate = complete_loop( train_df, test_df, full_cfgs, ['flow rate (mL/min)', 'N2', 'H2', 'CO', 'CO2'] , ['biomass rate','ethanol rate','acetate rate','butanol rate','butyrate rate'], "curved_concentration_to_curved_rate_r2.pickle", 'r2')minmax biomass rate (8, 9, 10) nn -0.041 0.004 {'activation': 'tanh', 'hidden_layer_sizes': [100, 100, 100, 100], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax biomass rate (8, 9, 10) svm_rbf 0.093 -0.174 {'C': 0.1, 'epsilon': 0.1, 'gamma': 1000} minmax biomass rate (8, 9, 10) rf 0.107 nan {'max_depth': 30, 'max_samples': 0.1, 'n_estimators': 20} minmax biomass rate (8, 9, 10) en 0.111 0.034 {'alpha': 1e-06, 'l1_ratio': 0.1} minmax biomass rate (8, 9, 10) lasso 0.111 0.035 {'alpha': 1e-05} minmax biomass rate (8, 9, 10) gauss 0.111 -4.891 {'alpha': 1e-09, 'normalize_y': False} minmax biomass rate (8, 9, 10) knn 0.111 0.062 {'algorithm': 'ball_tree', 'leaf_size': 10, 'n_neighbors': 28, 'weights': 'distance'} minmax ethanol rate (8, 9, 10) nn -0.009 0.325 {'activation': 'tanh', 'hidden_layer_sizes': [60, 100, 80, 20], 'learning_rate': 'adaptive', 'max_iter': 5000} minmax ethanol rate (8, 9, 10) svm_rbf 0.106 0.095 {'C': 1000, 'epsilon': 10, 'gamma': 0.001} minmax ethanol rate (8,[...]2.2.2 Visualize R2-based resultsfull_results = pickle.load(open("raw_concentration_to_raw_concentration_r2.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "my_cc", "1. Raw concentration to raw concentration") full_results = pickle.load(open("curved_concentration_to_curved_concentration_r2.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "my_cc", "2. Curved concentration to curved concentration") full_results = pickle.load(open("curved_concentration_to_raw_concentration_r2.pickle", "br")) beautiful_plot(full_results, ['biomass (g/L)','ethanol (mM)','acetate (mM)','butanol (mM)','butyrate (mM)'], "my_cc", "3. Curved concentration to raw concentration") full_results = pickle.load(open("curved_concentration_to_curved_rate_r2.pickle", "br")) beautiful_plot(full_results, ['biomass rate', 'ethanol rate', 'acetate rate', 'butanol rate', 'butyrate rate'], "my_cc", "5. Curved concentration to curved rate")Part III: What makes a property attractive?Can you suggest to your friends what other people are looking for when using the services of Airbnb in Boston? It might help them make a better choice… We use feature `availability_30` as a proxy for the attractiveness of a property. This feature represents the number of days a property is available in next 30 days. In this analysis, we consider less available properties to be more attractive.# Read necessary libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline # Read data df = pd.read_csv('data/listings.csv') df.head() # Drop the rows with missing response values df = df.dropna(subset = ['price'], axis = 0) # Drop columns with all NaN values df = df.dropna(how = 'all', axis = 1)In the figures below, you can see that the analysis with respect to neighbourhood suggests the most attractive properties to be located in Financial District.Villas are the most attractive type of property, while private room dominates with respect to the type of room. If the host of a property is a super-host and/or has a profile picture, the attractiveness of the property raises.# How does the neighbourhood affect the attractiveness? values = df.groupby('neighbourhood').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('Neighbourhood'); # How does the property_type affect the attractiveness? values = df.groupby('property_type').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('Property type'); # How does the room_type affect the attractiveness? values = df.groupby('room_type').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('Room type'); # How does the bed_type affect the attractiveness? values = df.groupby('bed_type').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('Bed type'); df.groupby(['neighbourhood','property_type', 'room_type']).availability_30.mean().sort_values().head() # How does the review_scores_value affect the attractiveness? values = df.groupby('review_scores_value').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('review_scores_value'); # How does the host_has_profile_pic affect the attractiveness? values = df.groupby('host_has_profile_pic').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('host_has_profile_pic'); # How does the host_is_superhost affect the attractiveness? df.groupby('host_is_superhost').availability_30.mean().sort_values() plt.figure(figsize=(10,10)) (values/df.shape[0]).plot(kind="bar"); plt.title("Attractiveness"); plt.ylabel('availability_30 (average)'); plt.xlabel('host_is_superhost');Create dataset Dataset Aufbereitung 1. Verzeichnisstruktur Dataset (geeignet für lokale Entwicklung mit PyCharm und für das Training mit der FloydHub Cloud)2. Bilder für Training mit Kamera erstellen oder von Google ect. laden3. Bilder annotieren mit LabelImg (https://github.com/tzutalin/labelImg) => Annotationen im Pascal VOC Format 1. Class-Names ![LabelImg](notebook\images\labelimg.png)4. Path der Annotationen von absolutem Pfad in relativen Pfad korrigieren- FloydHub Dataset erstellen... alle dateipdade mit "/" anstatt "\" dateipfade und -namen alle klein => Kompatibilität Windows Linux (FloydHub)!python correct_path_in_voc_annotations.py -d floyd/input/fabianNeural Machine Translation by Jointly Learning to Align and Translate, , *ICLR 2015*https://arxiv.org/pdf/1409.0473.pdfimport os import re from typing import List from IPython.display import Image import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer0 Setup Sample ExamplesFollowing examples are english to french translation sampled from [wmt14_translate](https://www.tensorflow.org/datasets/catalog/wmt14_translatewmt14_translatefr-en). Our task is to train a model to translate english sentences to french.sample_en = [ "Crop insurance payments include only government crop insurance programs; private hail insurance payments are excluded.", "Activities of the second type will be a major determinant of the successful implementation of a language policy.", "There are no known natural sources of acrylonitrile.", "James recounts how the community was developed." ] sample_fr = [ "Les indemnités d’assurance-récolte comprennent uniquement celles des programmes publics; les indemnités de l’assurance-grêle privée sont exclues.", "Pour la mise en oeuvre, la deuxième catégorie d'activités constitue un déterminant essentiel du succès de la politique linguistique.", "On ne connaît aucune source naturelle d'acrylonitrile.", "Mme James raconte comment la collectivité a été créée." ]Preprocessingdef PreprocessSentence(sentence: str) -> str: """Preprocess an input sentence, make it cosumbale by the model.""" sentence = sentence.lower().strip() # Add whitespace after certain special characters. sentence = re.sub(r"([,.?!$%'])", r" \1 ", sentence) # Add and token to sentence. sentence = " " + sentence + " " # Remove the redundant whitespaces. sentence = re.sub(r"[' ']+", " ", sentence) return sentence sample_en = [PreprocessSentence(x) for x in sample_en] sample_fr = [PreprocessSentence(x) for x in sample_fr] print("Sample english sentences:") for en in sample_en: print(en) print("\nSample french sentences:") for fr in sample_fr: print(fr)Sample english sentences: crop insurance payments include only government crop insurance programs; private hail insurance payments are excluded . activities of the second type will be a major determinant of the successful implementation of a language policy . there are no known natural sources of acrylonitrile . james recounts how the community was developed . Sample french sentences: les indemnités d’assurance-récolte comprennent uniquement celles des programmes publics; les indemnités de l’assurance-grêle privée sont exclues . pour la mise en oeuvre , la deuxième catégorie d activités constitue un déterminant essentiel du succès de la politique linguistique . on ne connaã®t aucune source naturelle d acrylonitrile . mme james raconte comment la collectivité a été créée . Tokenizationdef GetTokenizer(sentences: List[str]) -> Tokenizer: """Create tokenizer.""" tokenizer = Tokenizer(filters="", oov_token="") tokenizer.fit_on_texts(sentences) return tokenizer en_tokenizer = GetTokenizer(sample_en) en_vocab_size = len(en_tokenizer.word_index) en_sequences = en_tokenizer.texts_to_sequences(sample_en) print(f"English vocab size: {en_vocab_size}\n") print(en_tokenizer.word_index) print(f"\nInput sequences:") for seq in en_sequences: print(seq) print("\n") fr_tokenizer = GetTokenizer(sample_fr) fr_vocab_size = len(fr_tokenizer.word_index) fr_sequences = fr_tokenizer.texts_to_sequences(sample_fr) print(f"French vocab size: {fr_vocab_size}\n") print(fr_tokenizer.word_index) print(f"\nTarget sequences:") for seq in fr_sequences: print(seq)English vocab size: 41 {'': 1, '': 2, '.': 3, '': 4, 'of': 5, 'insurance': 6, 'the': 7, 'crop': 8, 'payments': 9, 'are': 10, 'a': 11, 'include': 12, 'only': 13, 'government': 14, 'programs;': 15, 'private': 16, 'hail': 17, 'excluded': 18, 'activities': 19, 'second': 20, 'type': 21, 'will': 22, 'be': 23, 'major': 24, 'determinant': 25, 'successful': 26, 'implementation': 27, 'language': 28, 'policy': 29, 'there': 30, 'no': 31, 'known': 32, 'natural': 33, 'sources': 34, 'acrylonitrile': 35, 'james': 36, 'recounts': 37, 'how': 38, 'community': 39, 'was': 40, 'developed': 41} Input sequences: [2, 8, 6, 9, 12, 13, 14, 8, 6, 15, 16, 17, 6, 9, 10, 18, 3, 4] [2, 19, 5, 7, 20, 21, 22, 23, 11, 24, 25, 5, 7, 26, 27, 5, 11, 28, 29, 3, 4] [2, 30, 10, 31, 32, 33, 34, 5, 35, 3, 4] [2, 36, 37, 38, 7, 39, 40, 41, 3, 4] French vocab size: 51 {'': 1, '': 2, '.': 3, '': 4, 'la': 5, 'les': 6, 'indemnités': 7, 'de': 8, 'd': 9, 'd’assurance-récolte': 10, 'comprennent': [...]PaddingPad the sequences by 0 to get same sequence_length.def Padding(sequences: List[List[int]]) -> List[List[int]]: """Pad sequences.""" padded = tf.keras.preprocessing.sequence.pad_sequences( sequences, padding="post") return padded en_sequences = Padding(en_sequences) max_input_len = len(en_sequences[0]) print(f"Padded input sequences:") for seq in en_sequences: print(seq) print(f"max_input_len: {max_input_len}") fr_sequences = Padding(fr_sequences) max_target_len = len(fr_sequences[0]) print(f"\nPadded target sequences:") for seq in fr_sequences: print(seq) print(f"max_target_len: {max_target_len}")Padded input sequences: [ 2 8 6 9 12 13 14 8 6 15 16 17 6 9 10 18 3 4 0 0 0] [ 2 19 5 7 20 21 22 23 11 24 25 5 7 26 27 5 11 28 29 3 4] [ 2 30 10 31 32 33 34 5 35 3 4 0 0 0 0 0 0 0 0 0 0] [ 2 36 37 38 7 39 40 41 3 4 0 0 0 0 0 0 0 0 0 0 0] max_input_len: 21 Padded target sequences: [ 2 6 7 10 11 12 13 14 15 16 6 7 8 17 18 19 20 3 4 0 0 0 0 0] [ 2 21 5 22 23 24 25 5 26 27 9 28 29 30 31 32 33 34 8 5 35 36 3 4] [ 2 37 38 39 40 41 42 9 43 3 4 0 0 0 0 0 0 0 0 0 0 0 0 0] [ 2 44 45 46 47 5 48 49 50 51 3 4 0 0 0 0 0 0 0 0 0 0 0 0] max_target_len: 24AbstractCurrent models usually choose a encoder-decoder architecture. The encoder encodes a source sequence into a fixed-length hidden state from which a decoder generates a target sequence. The paper argues that the use of a fixed-length hidden state is a bottleneck in improving the performance. The paper, instead, proposes a (soft-)search for parts of a source sequence that are relevant to predicting a target word. (i.e. the target word should "pay attention" to certain input words.) 1 Introduction & Background***Neural machine translation*** attempts to build and train **a single, large neural network** that reads a sentence and outputs a correct translation. (While tradation phtrase-based systems are usually consist of many sub-components that are tuned separately.) Most of the proposed models belong to a family of ***encoder-decoders*** - [Sequence to sequence learning with neural networks](https://arxiv.org/pdf/1409.3215.pdf) , , and *NIPS 2014* **(Seq2Seq)** - [ Learning phrase representations using rnn encoder-decoder for statistical machine translation](https://arxiv.org/pdf/1406.1078.pdf) , *CoRR 2014* **(Enc-Dec)** A potential issue with this approach lies on the fact that all necessary information of a source sequence need to be compressed into a fixed-length vector (hidden state). This makes it hard for the model to cope with long sentences. Following paper showed that the performance of a basic encoder-decoder deteriorates rapidly as the length of an input sentence increases. - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/pdf/1409.1259.pdf) , , , *SSST-8 2014* To address the issue, the paper introduces attention mechanism. The model encodes the input sequence into context vectors Each time step the decoder predicts based on the context vectors and previous decoder state. 2 Model ArchitectureImage(filename='../pics/bahdanau_attention.png')2.1 EncoderPaper chooses a bidirectional-GRU structure to take care of both preceding and following words.The forward GRU encodes input sequence into *forward hidden states* $(\overrightarrow{h}_1, ..., \overrightarrow{h}_{T_x})$.The backward GRU encodes input sequence into *backward hidden states* $(\overleftarrow{h}_1, ..., \overleftarrow{h}_{T_x})$.The hidden vector for each token $x_j$ is then the concatenation of forward and backward state.$$h_j=[\overrightarrow{h}_j^T,\overleftarrow{h}_j^T]^T$$In the paper `emb_size=600`, `hidden_size=1000`def Encoder(input_vocab_size: int, emb_size: int, hidden_size: int, name: str="encoder"): """Bi-directional GRU encoder. Inputs: sequences: Indices of input sequence tokens, of shape (batch_size, input_seq_len) Args: input_vocab_size: Size of input vocab. emb_size: Dimensionality of the embeddings. hidden_size: Dimensionality of the layers. name: Name of the Encoder. Returns: Encoder output, of shape (batch_size, input_seq_len, hidden_size) Last encoder state, of shape (batch_size, hidden_size) """ sequences = tf.keras.Input(shape=(None, ), name="input_sequences") embedding_layer = tf.keras.layers.Embedding(input_dim=input_vocab_size, output_dim=emb_size) bi_gru_layer = tf.keras.layers.Bidirectional( tf.keras.layers.GRU(units=hidden_size, return_sequences=True, return_state=True)) reduce_states = tf.keras.layers.Dense(hidden_size) # Embedding layer. (batch_size, input_seq_len, emb_size) embeddings = embedding_layer(sequences) # Bi-GRU layer. # - encoder_outputs: (batch_size, input_seq_len, hidden_size*2) # - forward_state: (batch_size, hidden_size) # - backward_state: (batch_size, hidden_size) encoder_output, forward_state, backward_state = bi_gru_layer(embeddings) # Reduce the forward and backward state into a single initial state for the # decoder since decoder is not Bi-directional. (batch_size, hidden_size) state = reduce_states(tf.concat([forward_state, backward_state], axis=1)) return tf.keras.Model( inputs=[sequences], outputs=[encoder_output, state], name=name) # Example EMB_SIZE = 600 HIDDEN_SIZE = 1000 encoder = Encoder(input_vocab_size=en_vocab_size+1, # +1 for padding emb_size=EMB_SIZE, hidden_size=HIDDEN_SIZE) encoder_output, encoder_state = encoder(en_sequences) tf.keras.utils.plot_model(encoder, show_shapes=True)2.2 Masking Padding maskMask all the pad tokens in the batch of sequences, to make sure the model doesn't treat paddings as inputs.def GetPaddingMask(sequences: tf.Tensor) -> tf.Tensor: """ Create padding mask. Args: sequences: input sequences, of shape (batch_size, seq_len) Returns: mask: mask tensor of shape (batch_size, seq_len) """ mask = tf.cast(tf.not_equal(sequences, tf.constant(0)), tf.float32) return mask # Example input_padding_mask = GetPaddingMask(en_sequences) print("Input padding mask:") print(input_padding_mask)Input padding mask: tf.Tensor( [[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0.] [1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]], shape=(4, 21), dtype=float32)2.3 Bahdanau attentionFor timestep $i$, given $s_{i-1}$ is the hidden state from previous decoding step and $h=\{h_j\}$ are encoder outputsThe attention is calculated in three steps:- **Score** Q (hidden state in previous decoding steps) and K (encoder outputs)$$e_{ij}=W_a\tanh(W_ss_{t-1}+W_hh))$$- **Aignment** with softmax to get attention weights$$\alpha_{ij}=\frac{exp(e_{ij})}{\sum_{k=1}^{T_X}exp(e_{ik})}$$- **Calculate Attention** as weighted sum of V (encoder outputs)$$c_i=\sum_{j=1}^{T_x}\alpha_{ij}h_j$$class BahdanauAttention(tf.keras.layers.Layer): """ Bahdanau attention layer. Args: hidden_size: Dimensionality of the layers. name: Name of the layer. """ def __init__(self, hidden_size: int, name: str="attention"): super(BahdanauAttention, self).__init__(name=name) self.query_linear = tf.keras.layers.Dense(units=hidden_size) self.value_linear = tf.keras.layers.Dense(units=hidden_size) self.score_linear = tf.keras.layers.Dense(units=1) def call(self, query: tf.Tensor, values: tf.Tensor, input_padding_mask: tf.Tensor): """ Args: query: The query tensor of shape (batch_size, hidden_size) values: The query tensor of shape (batch_size, input_seq_len, hidden_size*2) input_padding_mask: The mask tensor of shape (batch_size, input_seq_len) """ q = self.query_linear(tf.expand_dims(query, 1)) v = self.value_linear(values) # Attention - Score (Additive attention), of shape (batch_size, input_seq_len, 1) score = self.score_linear(tf.nn.tanh(q+v)) # Attention - Alignment # - Softmax on the second axis (input_seq_len) so that the scores add up to 1. attention_weights = tf.nn.softmax(score, axis=1) # - Mask the paddings in encoder sequence so their are not included in the attentions. input_padding_mask = tf.expand_dims(input_padding_mask, axis=-1) attention_weights *= tf.cast(input_padding_mask, tf.float32) # Attention - Calculate context vector, of shape (batch_size, 1, hidden_dim) context_vector = attention_weights * v context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, tf.squeeze(attention_weights, axis=-1) # Example attention = BahdanauAttention(hidden_size=HIDDEN_SIZE) context_vector, attention_weights = attention( query=encoder_state, values=encoder_output, input_padding_mask=input_padding_mask) print("Context vector:") print(context_vector) print("\nattention weights:") print(attention_weights)Context vector: tf.Tensor( [[ 0.00132546 -0.00354546 0.00955405 ... 0.00307471 -0.00306587 0.00538846] [-0.00297993 -0.00270179 -0.00343082 ... -0.00329211 0.00572293 0.0045021 ] [-0.00225976 -0.000354 0.00119226 ... 0.0037788 -0.00380389 0.00520081] [ 0.00453089 0.00362038 0.00112795 ... 0.00231981 0.00416471 0.00056484]], shape=(4, 1000), dtype=float32) attention weights: tf.Tensor( [[0.04922134 0.04799827 0.04823243 0.04718865 0.04742919 0.04774665 0.04849301 0.04749199 0.04790553 0.04853286 0.04846075 0.04758029 0.04810355 0.04644806 0.0469764 0.04682632 0.04730159 0.04709158 0. 0. 0. ] [0.04926461 0.04859963 0.04755028 0.04779278 0.04890285 0.04811487 0.04632172 0.04605797 0.04722239 0.04735815 0.04695009 0.0469963 0.04723995 0.04751648 0.04756604 0.0467879 0.04765843 0.04819013 0.04799059 0.04801839 0.04790051] [0.0492771 0.04750408 0.04858499 0.04822636 0.04836567 0.04749927 0.04673134 0.04683707 0.04763693 0.[...]2.4 DecoderThe decoder defines a probability over the translation $y$ by decomposing the joint probability$$p(y)=\sum_{i=1}^{T_y}p(y_i|\{y_1, ..., y_{t-1}\}, c)$$With an RNN, each conditional probability is modeled as$$p(y_i|\{y_1, ..., y_{t-1}\}, c)=RNN(y_{t-1}, s_{t-1}, c)$$The decoder first get context_vector with ($s_{t-1}$, $c$), then concatenate it with the embeddings of target token and feed into the RNNdef Decoder(target_vocab_size: int, emb_size: int, hidden_size: int, name: str="decoder"): """GRU decoder. Inputs: sequences: Indices of target sequence tokens, of shape (batch_size, 1) decoder_hidden: hidden state from previous decode step. of shape (batch_size, hidden_size) encoder_output: of shape (batch_size, input_seq_len, hidden_size) input_padding_mask: The mask tensor of shape (batch_size, input_seq_len) Args: target_vocab_size: Size of target vocab. emb_size: Dimensionality of the embeddings. hidden_size: Dimensionality of the layers. name: Name of the Decoder. Returns: Decoder output, of shape (batch_size, target_vocab_size) Last decoder state, of shape (batch_size, hidden_size) attention weights of shape (batch_size, input_seq_len) """ sequences = tf.keras.Input(shape=(1, ), name="target_sequences") decoder_hidden = tf.keras.Input(shape=(hidden_size, ), name="decoder_hidden") encoder_output = tf.keras.Input(shape=(None, hidden_size*2), name="encoder_output") input_padding_mask = tf.keras.Input(shape=(None, ), name="mask") embedding_layer = tf.keras.layers.Embedding(input_dim=target_vocab_size, output_dim=emb_size) attention_layer = BahdanauAttention(hidden_size=hidden_size) gru_layer = tf.keras.layers.GRU(units=hidden_size, return_sequences=True, return_state=True) output_linear = tf.keras.layers.Dense(target_vocab_size) # Embedding layer. (batch_size, 1, emb_size) embeddings = embedding_layer(sequences) # Attention of shape (batch_size, hidden_dim) context_vector, attention_weights = attention_layer(query=decoder_hidden, values=encoder_output, input_padding_mask=input_padding_mask) # Concat embeddings and context vector, of shape (batch_size, 1, emb_size + hidden_size) decoder_input = tf.concat([tf.expand_dims(context_vector, 1), embeddings], axis=-1) # GRU layer. # - gru_outputs: (batch_size, 1, hidden_size) # - state: (batch_size, hidden_size) gru_output, state = gru_layer(decoder_input) # Get decoder output gru_output = tf.reshape(gru_output, (-1, hidden_size)) decoder_output = output_linear(gru_output) return tf.keras.Model( inputs=[sequences, decoder_hidden, encoder_output, input_padding_mask], outputs=[decoder_output, state, attention_weights], name=name) # Example decoder = Decoder(target_vocab_size=fr_vocab_size, emb_size=EMB_SIZE, hidden_size=HIDDEN_SIZE) decoder_output, state, attention_weights = decoder( [fr_sequences[:, :1], encoder_state, encoder_output, input_padding_mask]) tf.keras.utils.plot_model(decoder, show_shapes=True)Predicción de casos de Dengue preservando la privacidad de los datos> Demo en la construcción de un modelo para la predicción de casos de dengue con el objetivo de preservar la privacidad de los datos utilizados. Esta implementación trata de reproducir el paper [Prediction of Dengue Cases in Paraguay Using Artificial NeuralNetworks](https://csce.ucmss.com/books/LFS/CSREA2017/HIM3277.pdf) simulando una situación donde el ```dataset``` está distribuido en diferentes entidades y por motivos de privacidad, los mismos no pueden ser compartidos entre entidades. MotivaciónLas tecnicas de machine learning pueden ayudar a mejorar el diagnostico de enfermedades, como detección de tumores en imagenes de MRI, detectar con tiempo retinopatía diabética en imagenes de retina, detección de cancer en imagenes de melanoma, hasta **detectar el brotes de enfermedades** entre varias otras aplicaciónes más. Pero este tipo de datos son bastante sensibles ya que son datos de los pacientes, una filtración de este tipo de información sería muy grave. Pero no solo filtraciones, por culpa de varios escandalos respecto al uso de los datos sensibles de usuarios de parte de grandes empresas como Equifax, Facebook y Google generaron gran desconfianza en los mismos sobre como estos manipulan datos de sus usuarios. Un caso reciente es el uso de datos de pacientes para el ```Proyecto Nightingale``` de Google, el cual se encuentra bajo examinación por parte del gobierno estadounidense:- [**Google's "Project Nightingale" faces government inquiry over patient privacy**](https://www.cbsnews.com/news/googles-project-nightingale-faces-government-inquiry-over-patient-privacy/)Mediante técnicas de preservación de privacidad como [*Federated Learning*](https://ai.googleblog.com/2017/04/federated-learning-collaborative.html), [*Differential Privacy*](https://privacytools.seas.harvard.edu/files/privacytools/files/pedagogical-document-dp_new.pdf), [*Homomorphic Encryption*](https://www.wikiwand.com/en/Homomorphic_encryption) entre otros es posible crear modelos útiles preservando la privacidad de los datos de los usuarios> Obs.: En el notebook de [federated learning](13-federated-learning.ipynb) se dan más detalle sobre federated learning, homomorphic encryption y sus posibles aplicaciónesimport os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) sys.setrecursionlimit(15000)Los modelos de este notebook se desarrollaran utilizando el framework desarrollado en los ```notebooks``` anteriores.Se importan las clases necesarias para definir una red neuronal del framework ```lightdlf``` además de definirse un método de evaluación del modeloimport numpy as np import pandas as pd import copy import phe from lightdlf_old.cpu.core import Tensor from lightdlf_old.cpu.layers import Linear, Relu, Sigmoid, Tanh, MSELoss, Sequential from lightdlf_old.cpu.optimizers import SGD np.random.seed(123) def rmse (pred, y): se_sum = 0 for i in range(len(pred)): se = (pred[i] - y[i]) * (pred[i] - y[i]) se_sum += se mse = se_sum/len(pred) rmse = np.sqrt(mse) return rmseSe carga el dataset con datos epidemiológicos y climatológicosdf = pd.read_csv('datasets/dengue/asu_dengue_dataset.csv') df.head() # for column in df.columns: # print(column)Se toman las columnas mencionadas en el ```paper``` para la creación del modelodf_reduced = df[['cantidad', 'cantidad(-1)', 'temperatura_max_media(-1)', 'temperatura_max_media(-2)', 'temperatura_max_media(-3)', 'temperatura_max_media(-4)', 'temperatura_max_media(-5)', 'temperatura_max_media(-6)', 'temperatura_max_media(-7)', 'temperatura_max_media(-8)', 'temperatura_max_media(-9)', 'temperatura_max_media(-10)', 'temperatura_max_media(-11)', 'lluvia_mm(-1)', 'lluvia_mm(-2)', 'humedad_min_media_porc(-1)', 'humedad_min_media_porc(-2)', 'humedad_min_media_porc(-3)', 'humedad_min_media_porc(-4)', 'humedad_min_media_porc(-5)', 'humedad_min_media_porc(-6)', 'humedad_min_media_porc(-7)', 'humedad_min_media_porc(-8)', 'humedad_min_media_porc(-9)', 'humedad_min_media_porc(-10)', 'humedad_min_media_porc(-11)']] df_reduced.dtypes df_reduced.head() df_reduced.describe() max_values = df_reduced.max() min_values = df_reduced.min() # Normalización del dataset df_normalizado = (df_reduced - df_reduced.min())/(df_reduced.max() - df_reduced.min()) df_normalizado.head()Definición del conjunto de entrenamiento y de pruebaY = df_normalizado[['cantidad']].to_numpy() X = df_normalizado.drop(['cantidad'], axis=1).to_numpy() Y[0], X[0] len(X[0]) bunch_size = int(len(Y)/4) bunch_size x_train = X[0:len(Y)-bunch_size] x_test = X[-bunch_size:] y_train = Y[0:len(Y)-bunch_size] y_test = Y[-bunch_size:] len(y_train), len(y_test)Definicion y Entrenamiento del ModeloA modo de prueba se entrena y evalua un modelo centralizadonp.random.seed(0) data = Tensor(x_train, autograd=True) target = Tensor(y_train, autograd=True) # model = Sequential([Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()]) # model = Sequential([Linear(25,4), Sigmoid(), Linear(4,5), Sigmoid(), Linear(5,1), Sigmoid()]) # model = Sequential([Linear(25,4), Relu(), Linear(4,6), Relu(), Linear(6,1), Sigmoid()]) # model = Sequential([Linear(25,4), Sigmoid(), Linear(4,6), Sigmoid(), Linear(6,1), Sigmoid()]) model = Sequential([Linear(25,4), Tanh(), Linear(4,6), Tanh(), Linear(6,1), Sigmoid()]) criterion = MSELoss() # optim = SGD(parameters=model.get_parameters(), alpha=0.01) optim = SGD(parameters=model.get_parameters(), alpha=0.01) # 500 for i in range(500): # Predecir pred = model.forward(data) # Comparar loss = criterion.forward(pred, target) # Aprender loss.backward(Tensor(np.ones_like(loss.data))) optim.step() if (i%100 == 0): print(loss) test_data = Tensor(x_test) test_target = Tensor(y_test) pred = model.forward(test_data) pred_list = [x[0] for x in pred.data] test_target_list = [x[0] for x in test_target.data] comparison = pd.DataFrame({'actual':test_target_list, 'predicted':pred_list}) comparison.head() denormalized_pred_list = [(x[0] * (max_values['cantidad'] - min_values['cantidad'])) + min_values['cantidad'] for x in pred.data] denormalized_test_target_list = [(x[0] * (max_values['cantidad'] - min_values['cantidad'])) + min_values['cantidad'] for x in test_target.data] denormalized_comparison = pd.DataFrame({'actual':denormalized_test_target_list, 'predicted':denormalized_pred_list}) denormalized_comparison.head() print('RMSE:',rmse(pred_list, test_target_list))RMSE: 0.022350545248195304Modelo de Aprendizaje Federado con Cifrado HomomorficoComo se dijo al inicio, vamos hacer las siguientes suposiciones sobre nuestro ```dataset```: - Se encuentra distribuido entre 3 instituciones- Contienen datos confidenciales que no pueden ser compartidos entre sí ni a un terceroCon estas condiciónes, el siguiente codigo pretende mostrar como podemos obtener un modelo capaz de predecir casos de dengue sin la necesidad de que el dueño del modelo o (```model owner```) tenga que acceder directamente a los datos de las instituciones (```data owners```) Definición de la arquitectura y del metodo de entrenamiento del modelonp.random.seed(0) data = Tensor(x_train, autograd=True) target = Tensor(y_train, autograd=True) layers = [Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()] model = Sequential(layers) def train(model, data, target, iterations=5, alpha=0.01, print_loss=True): criterion = MSELoss() optim = SGD(parameters=model.get_parameters(), alpha=alpha) for i in range(iterations): # Predecir pred = model.forward(data) # Comparar loss = criterion.forward(pred, target) # Aprender loss.backward(Tensor(np.ones_like(loss.data))) optim.step() if (i%100 == 0 and print_loss): sys.stdout.write("\r\tLoss:" + str(loss)) return modelDefinicion de funciones auxiliares para manipular modelos encryptadosdef encrypt_tensor(matrix, pubkey): encrypt_weights = list() for vector in matrix: # print(vector) for val in vector: # print(val) encrypt_weights.append(pubkey.encrypt(val)) restore = np.array(encrypt_weights).reshape(matrix.shape) # print(restore) return restore def decrypt_tensor(matrix, privkey): decrypted_weights = list() for vector in matrix: # print(vector) for val in vector.flatten(): # print(val) decrypted_weights.append(privkey.decrypt(val)) restore = np.array(decrypted_weights).reshape(matrix.shape) # print(restore) return restore def encrypt_sequential_model(model, pubkey): for layer in model.layers: if type(layer) == Linear: layer.weight.data = encrypt_tensor(layer.weight.data, pubkey) return model def decrypt_sequential_model(model, n_models, privkey): for layer in model.layers: if type(layer) == Linear: layer.weight.data = decrypt_tensor(layer.weight.data, privkey)/n_models return model def zero_sequential_model(model): for layer in model.layers: if type(layer) == Linear: layer.weight.data = np.zeros_like(layer.weight.data) return model def aggregate_models(list_of_models): aggregated_model = zero_sequential_model(copy.deepcopy(list_of_models[0])) # print(list_of_models) for model in list_of_models: # print(model) for i in range(len(model.layers)): if type(model.layers[i]) == Linear: aggregated_model.layers[i].weight.data += model.layers[i].weight.data return aggregated_model def train_and_encrypt(model, data, target, pubkey, iterations=50, alpha=0.01, print_loss=True): new_model = train(copy.deepcopy(model), data, target, iterations, print_loss=print_loss) encrypted_model = encrypt_sequential_model(new_model, pubkey) return encrypted_modelPrueba de la funcion de entrenamientonew = train(model, data, target, iterations=500)Loss:[0.8324166]Pruebas de creación de un modelo encriptadopublic_key, private_key = phe.generate_paillier_keypair(n_length=128) np.random.seed(0) data = Tensor(x_train, autograd=True) target = Tensor(y_train, autograd=True) layers = [Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()] model = Sequential(layers) for i in range(9): model = train_and_encrypt(model, data, target, public_key) model = aggregate_models([model]) model = decrypt_sequential_model(model, 1, private_key) pred = model.forward(test_data) pred_list = [x[0] for x in pred.data] test_target_list = [x[0] for x in test_target.data] print('RMSE:',rmse(pred_list, test_target_list))RMSE: 0.023673897541489297Distrubución del dataset en las diferentes Instituciones```Inicializamos``` las entidades distribuyendo el dataset entre las tres y definimos un perceptron multicapa para realizar una regresión (predecir el numero de casos futuros)np.random.seed(0) rangos = list() for i in range(4): rangos.append(int((len(x_train)/3)*i)) # print(rangos) data_entidad_01 = Tensor(x_train[rangos[0]:rangos[1]], autograd=True) target_entidad_01 = Tensor(y_train[rangos[0]:rangos[1]], autograd=True) data_entidad_02 = Tensor(x_train[rangos[1]:rangos[2]], autograd=True) target_entidad_02 = Tensor(y_train[rangos[1]:rangos[2]], autograd=True) data_entidad_03 = Tensor(x_train[rangos[2]:rangos[3]], autograd=True) target_entidad_03 = Tensor(y_train[rangos[2]:rangos[3]], autograd=True) layers = [Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()] model = Sequential(layers) # print(len(data_entidad_01.data)) # print(len(data_entidad_02.data)) # print(len(data_entidad_03.data))Simulacion de aprendizaje federadoUna vez tenemos el dataset distribuido entre las tres instituciones se procede a entrenar el ```modelo federado```.Para el entrenamiento del modelo se tienen los siguientes componentes:- **```Data Owner```**: Son los dueños de los datos, que por motivos de privacidad (tienen datos sensibles como datos privados de personas) no pueden compartir sus datos con otras entidades. Estos se encargan de entrenar el modelo enviado por el ```Model Owner``` localmente y luego lo encriptan utilizando la ```clave pública``` del ```Model Owner``` para poder compartir su modelo con el ```Model Aggregator``` y evitar que este pueda determinar de alguna manera los datos sobre los que se entrenó el modelo.- **```Model Aggregator```**: Se encarga de recibir todos los modelos encriptados de los ```Data Owners``` y los agrega usando ```encriptacion homomorfica```, y ya que el model agregator no posee la ```clave privada``` privada para desencriptar cada modelo, este no es capaz de interpretar los pesos de los modelos de los ```Data Owners``` ni el modelo agregado, manteniendo así seguro los datos de los ```Data Owners```. Un ```Model Aggregator``` puede ser cualquiera que no tenga la ```clave privada``` consigo, por lo que un ```Data Owner``` puede cumplir también el rol de ```Model Aggregator```- **```Model Owner```** Es el dueño del modelo, en este caso el que define la arquitectura y el modo de entrenamiento del mismo. El model owner tiene la ```clave privada``` para poder desencriptar el modelo creado.En el diagrama de arriba se pueden ver los pasos para realizar el entrenamiento:1. El ```Model Owner``` envia su modelo a los ```Data Owners```.2. Los ```Data Owners``` entrenan localmente el modelo con sus datos y luego lo encriptan.3. Los ```Data Owners``` envían sus modelos al ```Model Aggregator``` el cual agrega todos los modelos en un solo modelo encriptado.4. El ```Model Aggregator``` envía el modelo agregado al ```Model Owner```.5. El ```Model Owner``` desencripta el modelo agregado.Este proceso se repite hasta que el modelo tenga la rendimiento deseado por el ```Model Owner```np.random.seed(0) layers = [Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()] # layers = [Linear(25,4), Relu(), Linear(4,4), Relu(), Linear(4,1), Sigmoid()] # layers = [Linear(25,4), Relu(), Linear(4,5), Relu(), Linear(5,1), Sigmoid()] # layers = [Linear(25,4), Tanh(), Linear(4,3), Tanh(), Linear(3,1), Sigmoid()] # layers = [Linear(25,4), Relu(), Linear(4,1), Sigmoid()] # model = Sequential([Linear(25,4), Tanh(), Linear(4,6), Tanh(), Linear(6,1), Sigmoid()]) model = Sequential(layers) for i in range(9): print('\nIniciando la ronda de entrenamiento Nro:', i+1) print('\tPaso 1: enviamos el modelo a Institucion 01') entidad_01_encrypted_model = train_and_encrypt(model, data_entidad_01, target_entidad_01, public_key, iterations=50, alpha=0.007) print('\n\tPaso 2: enviamos el modelo a Institucion 02') entidad_02_encrypted_model = train_and_encrypt(model, data_entidad_02, target_entidad_02, public_key, iterations=50, alpha=0.007) print('\n\tPaso 3: enviamos el modelo a Institucion 03') entidad_03_encrypted_model = train_and_encrypt(model, data_entidad_03, target_entidad_03, public_key, iterations=50, alpha=0.007) print('\n\tPaso 4: Institucion 01, Institucion 02 y Institucion 03 envian') print('\ty agregan sus modelos encriptados ente sí') models_list = [entidad_01_encrypted_model, entidad_02_encrypted_model, entidad_03_encrypted_model] encrypted_model = aggregate_models(models_list) print('\n\tPaso 5: Solo el modelo agregado') print('\tse envia devuelta al dueño del modelo') print('\tque puede desencriptarlo') model = decrypt_sequential_model(encrypted_model, len(models_list), private_key) pred = model.forward(test_data) pred_list = [x[0] for x in pred.data] test_target_list = [x[0] for x in test_target.data] print('RMSE:',rmse(pred_list, test_target_list)) comparison = pd.DataFrame({'actual':test_target_list, 'predicted':pred_list}) comparison.head()Como se puede ver en los resultados, el modelo obtenido es casi tan bueno que el modelo entrenado de forma centralizada. Notas FinalesSi bien los resultados no logran ser tan buenos como los resultados obtenidos en el paper, se logra demostrar que es posible entrenar un modelo con un ```performance``` relativamente bueno sin necesidad de acceder directamente a los datos. Con una mejora en la busqueda de los ```hiperparametros``` se podría mejorar aún más el performance del modelo. Grid SearchPrueba para encontrar un mejor modelo usando Grid Searchnp.random.seed(0) alphas = [0.001, 0.003, 0.005, 0.007, 0.01, 0.01, 0.03, 0.05] architectures = [[Linear(25,1), Sigmoid()], [Linear(25,4), Sigmoid(), Linear(4,3), Sigmoid(), Linear(3,1), Sigmoid()], [Linear(25,4), Tanh(), Linear(4,3), Tanh(), Linear(3,1), Sigmoid()], [Linear(25,4), Relu(), Linear(4,3), Relu(), Linear(3,1), Sigmoid()], [Linear(25,4), Sigmoid(), Linear(4,5), Sigmoid(), Linear(5,1), Sigmoid()], [Linear(25,4), Tanh(), Linear(4,5), Tanh(), Linear(5,1), Sigmoid()], [Linear(25,4), Relu(), Linear(4,5), Relu(), Linear(5,1), Sigmoid()], [Linear(25,5), Sigmoid(), Linear(5,6), Sigmoid(), Linear(6,1), Sigmoid()], [Linear(25,5), Tanh(), Linear(5,6), Tanh(), Linear(6,1), Sigmoid()], [Linear(25,5), Relu(), Linear(5,6), Relu(), Linear(6,1), Sigmoid()]] best_model = {} actual_rmse = 100.0 for architecture in architectures: for alpha in alphas: model = Sequential(copy.deepcopy(architecture)) for i in range(10): entidad_01_encrypted_model = train_and_encrypt(model, data_entidad_01, target_entidad_01, public_key, iterations=50, alpha=alpha) # print('\n\tPaso 2: enviamos el modelo a Institucion 02') entidad_02_encrypted_model = train_and_encrypt(model, data_entidad_02, target_entidad_02, public_key, iterations=25, alpha=alpha) # print('\n\tPaso 3: enviamos el modelo a Institucion 03') entidad_03_encrypted_model = train_and_encrypt(model, data_entidad_03, target_entidad_03, public_key, iterations=25, alpha=alpha) # print('\n\tPaso 4: Institucion 01, Institucion 02 y Institucion 03 envian') # print('\ty agregan sus modelos encriptados ente sí') models_list = [entidad_01_encrypted_model, entidad_02_encrypted_model, entidad_03_encrypted_model] encrypted_model = aggregate_models(models_list) # print('\n\tPaso 5: Solo el modelo agregado') # print('\tse envia devuelta al dueño del modelo') # print('\tque puede desencriptarlo') model = decrypt_sequential_model(encrypted_model, len(models_list), private_key) pred = model.forward(test_data) pred_list = [x[0] for x in pred.data] # test_target_list = [x[0] for x in test_target.data] new_rmse = rmse(pred_list, test_target_list) if (new_rmse < actual_rmse): print('\tNuevo mejor RMSE:',new_rmse) actual_rmse = new_rmse best_model['model'] = model best_model['architecture'] = architecture best_model['alpha'] = alpha best_model['rmse'] = actual_rmse print(best_model['rmse']) print(best_model['alpha']) print(best_model['architecture'])0.027510551234677393 0.001 [, , , , , ]Apartado de pruebas de las funciones de Cifrado Homomorfico (Homomorphic Encryption)Pruebas relizadas para las metodos de encriptado, agregación y desencriptado de un modeloaux = Sequential([Linear(3,2)]) aux.layers[0].weight.data encripted_tensor = encrypt_tensor(aux.layers[0].weight.data, pubkey=public_key) decrypt_tensor(encripted_tensor, privkey=private_key) seq_aux = Sequential([Linear(2,3), Linear(3,2)]) print(seq_aux.layers[0].weight.data) print() encrypted_model = encrypt_sequential_model(seq_aux, pubkey=public_key) print(encrypted_model.layers[0].weight.data) print() decrypted_model = decrypt_sequential_model(encrypted_model, n_models=1, privkey=private_key) print(decrypted_model.layers[0].weight.data) zero_seq = zero_sequential_model(seq_aux) print(zero_seq.layers[1].weight) new_model = aggregate_models([aux, aux]) print(aux.layers[0].weight.data) print(new_model.layers[0].weight.data)[[-0.14882243 0.12089122] [-0.66242815 -0.76576489] [ 0.51999856 0.51318502]] [[-0.29764486 0.24178245] [-1.3248563 -1.53152978] [ 1.03999713 1.02637003]]The purpose of this notebook is to complete a data cleaning workflow from start to finish in order to validate the core functionality our package TO DO:- Organize code modules & tests- Clean up/finish writing tests- Write main script wrapper function# imports #from core import * #from cpd_info import * #from mol_sim_copy import * #from pubchem_client import * import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import sklearn from sklearn import linear_model from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve import matplotlib.pyplot as plt import numpy as np import pandas as pd import pubchempy as pc import requests import re from time import sleep from bs4 import BeautifulSoup #rdkit imports import rdkit from rdkit import Chem from rdkit.Chem import Draw from rdkit.Chem.EState import Fingerprinter from rdkit.Chem import Descriptors from rdkit.Chem import rdFMCS from rdkit.Chem.rdmolops import RDKFingerprint from rdkit.Chem.Fingerprints import FingerprintMols from rdkit import DataStructs from rdkit.Avalon.pyAvalonTools import GetAvalonFP #housekeeping imports import pandas as pd import matplotlib import numpy as np import scipy as spStep 1 Read in master dataframe# read in the master_df master_df = pd.read_csv('../../../big-datasets/master_dataframe_metacyc.csv.gz', compression='gzip') print(master_df.shape) master_df = master_df.drop(columns='Fingerprint') master_df.head()(11467, 24)Step 2 Get query SMILES string & pair query compound with each unique enzyme in the master DataFrameExample: PubChem SID 3480sid_to_smiles(3480) cid_to_smiles(243) def sid_to_smiles(sid): """Takes a PubChem SID. Returns the associated isomeric SMILES string and PubChem CID. Args: sid : The PubChem SID number. Returns: str: isomeric smiles. int: Pubchem CID number. """ substance = pc.Substance.from_sid(sid) cid = substance.standardized_cid compound = pc.get_compounds(cid)[0] return compound.isomeric_smiles, cid def cid_to_smiles(cid): try: compound = pc.get_compounds(cid)[0] smiles = compound.canonical_smiles except BaseException: pass return smiles, cid # function to query the SMILES string and append new pairs to the master dataframe def pair_query_compound(master_df, enzyme_col, pubchem_col, smiles_col, pubchem_cid): """ pair_query_compound_with_enzymes() queries pubchem to get a SMILES string from an input pubchem_sid, then pairs that query compound with each unique enzyme id in the master dataframe Args: master_df (pandas.DataFrame): master dataframe containing enzyme ids enzyme_col (str): column containing enzyme id pubchem_col (str): column containing pubchem cid smiles_col (str): column containing SMILES string pubchem_cid (str): query PubChem cid Returns: pandas.DataFrame: with rows added to include query compound """ master_df = master_df[[enzyme_col, pubchem_col, smiles_col]] new_pairs = [] smiles, _ = cid_to_smiles(pubchem_cid) if len(smiles) == 0: raise 'query compound SMILES string could not be retrieved' else: pass unique_enzymes = master_df[enzyme_col].unique().tolist() for enzyme in unique_enzymes: pair = {enzyme_col:enzyme, pubchem_col:pubchem_cid, smiles_col:smiles} new_pairs.append(pair) new_paris_df = pd.DataFrame(new_pairs) output_df = pd.concat((master_df, new_paris_df), axis=0, sort=False) return output_df # pair_query_compound_with_enzymes() updated_df = pair_query_compound(master_df, 'Enzyme', 'PubChemID', 'SMILES', '243') print(updated_df.shape) updated_df.head() #updated_df.drop(columns=['Mol', 'Fingerprint'], inplace=True) small = updated_df.iloc[:100,:] updated_df.head(20)Step 3 Calculate molecular distances between products of the same enzymeThe model is based on the fact that we expect these distances to be closer for reactive enzyme-product pairs than for non-reactive enzyme-product pairsdef calculate_dist(input_df): '''Main method, takes an input dataframe and builds and returns a master dataframe which is the original dataframe, with three additional columns, an rdkit Mol column, an rdkit Fingerprint column, and a column which describes the average distance of a product row to all the products of the associated enzyme entry. Requires the KEGG enzyme entry column to be named 'entry' and the SMILES string column to be named 'SMILES' ''' master_df = fingerprint_products(input_df) #expand input df: generate mols from SMILES then generate #fingerprints from mols, adding columns for each # enzyme_df_list = split_by_enzyme(input_df) #split expanded df by rows, grouped by enzyme entry (1.1.1.110 etc), #into a list of dataframes unique_enzymes = set(master_df['Enzyme'].unique()) # create set of unique enzymes dist_lookup = {} # initialize master dist list for enzyme in unique_enzymes: #loop through list of enzyme dataframes # enzyme_df['Dist'] = '' #initialize distance column enzyme_df = master_df[master_df['Enzyme'] == enzyme] metric = sim_metric(enzyme_df) #get similarity matrix dataframe vals = metric.values #use np array of similarity matrix start_at = 1 #skip autocorrelation dist_list =[] #initialize list if len(vals) == 1: dist_list.append(vals) #add distance value to list elif len(vals) > 1: for i in range(len(vals)-1): #row of matrix except for last row for j in range(start_at, len(vals)): #col of matrix skipping first column dist_list.append(vals[i][j]) #add distance value to list start_at += 1 #start at higher index to skip redundancy avg_dist = sum(dist_list)/len(dist_list) #compute average distance dist_lookup[enzyme] = avg_dist # for _, row in enzyme_df.iterrows(): #loop through enzyme dataframe # # enzyme_df['Dist'].loc[index] = avg_dist #add averaged distance to each product row of enzyme dataframe master_df['dist'] = [dist_lookup[row['Enzyme']] for _, row in master_df.iterrows()] return master_df def sim_i_all(input_df, index_i, row_i, metric): """From the input dataframe, check the passed indexes against the DataFrame, and construct a new dataframe which is the similarity matrix of all of the products contained in the dataframe.""" for index_j, row_j in input_df.iterrows(): if index_j < index_i: #skip redundant rows continue elif index_i == index_j: #autocorrelate rows metric.loc[index_i, index_j] = 1 else: metric.loc[index_i, index_j] = sim_i_j(row_i, row_j) #fill matrix with calculated similarity at two position #s at once metric.loc[index_j, index_i] = metric.loc[index_i, index_j] return def sim_metric(input_df): """From an input_df, use sim_i_j and sim_i_all to build and return a similarity matrix dataframe.""" metric = pd.DataFrame() for index_i, row_i in input_df.iterrows(): sim_i_all(input_df, index_i, row_i, metric) return metric def sim_i_j(row_i, row_j): """For two given rows of a dataframe, use the rdkit fingerprints to compute TanimotoSimilarity and return the resulting float""" return DataStructs.FingerprintSimilarity(row_i['Fingerprint'], row_j['Fingerprint'], metric=DataStructs.TanimotoSimilarity) def fingerprint_products(input_df): #fingerprints all products in a given df '''DocString''' mol_list = [] fp_list = [] for index, row in input_df.iterrows(): mol_list.append(Chem.rdmolfiles.MolFromSmiles(row['SMILES'])) #get mols from SMILES and add mols to list fp_list.append(FingerprintMols.FingerprintMol(Chem.rdmolfiles.MolFromSmiles(row['SMILES']))) #get fingerprints from mols and and fingerprints to list input_df.insert(0, column='Mol', value=mol_list) input_df.insert(1, column='Fingerprint', value= fp_list) return input_df # calculate_dist() with appended dataset # this took 1.5 min to run on 14807 lines distance_df = calculate_dist(updated_df) print(distance_df.shape) distance_df.head() distance_df.head(30)Step 4 Get dummy variables to represent enzyme classWe expect that many enzyme properties could be predictive features for this model. Enzyme class should encapsulate many of these features at a high level.# remove any rows that are not the query compound reduced_df = distance_df[distance_df['PubChem'] == '3480'] # binarize_enzyme_class() query_df = binarize_enzyme_class(reduced_df, 'entry') query_df = query_df.reset_index(drop=True) print(query_df.shape) query_df.head()(516, 13)Step 5 Add in compound features with RDKitThis step uses the RDKit packages to generate descriptive features of the reaction product compoundsquery_df = create_cpd_info(query_df) print(query_df.shape) query_df.head()(516, 22)Step 6 Re-Instantiate modelfeature_df = master_df[['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3', 'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7', 'n_O', 'n_N', 'n_S', 'n_X', 'DoU']] features = np.array(feature_df) #shape balance array for regression reactions = list(master_df['reacts']) feature_train, feature_test, reaction_train, reaction_test = train_test_split(features, reactions, test_size=0.20, random_state=42) model_1 = linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, class_weight='balanced') model_1.fit(feature_train, np.ravel(reaction_train)) # test the model logit_roc_auc = roc_auc_score(reaction_test, model_1.predict(feature_test)) fpr, tpr, thresholds = roc_curve(reaction_test, model_1.predict_proba(feature_test)[:,1]) plt.figure() plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.show() score = model_1.score(feature_test, reaction_test) print('The model score is ' + str(round(score*100, 2)) + '%.')Step 7 Use model to predict reactivity of pairs# select query features query_feat_df = query_df[['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3', 'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7', 'n_O', 'n_N', 'n_S', 'n_X', 'DoU']] predictions = model_1.predict(query_feat_df) # change me to the data you want to predict based on pred = model_1.predict_proba(query_feat_df) prediction_values = pd.DataFrame(pred) model_descriptive_df = pd.DataFrame() model_descriptive_df['0']=prediction_values[0] model_descriptive_df['1']=prediction_values[1] prediction_df = pd.merge(model_descriptive_df, query_df, left_index=True, right_index=True) print(prediction_df.shape) prediction_df.head() prediction_df = prediction_df.sort_values(by=['1'], ascending=False) prediction_df.head() #%%writefile query_model.py # long query function that does all of the above, so that we can query it many times over with our validation data def query_model(master_df, query_sid): """ NOTE: Fields containing enzyme, compound PubChem sid, and SMILES string must be named ['entry', 'PubChem', 'SMILES'] respectively """ # get query SMILES string & pair query compound with each unique enzyme in the master DataFrame updated_df = pair_query_compound(master_df, 'entry', 'PubChem', 'SMILES', query_sid) # calculate molecular distances between products of the same enzyme distance_df = calculate_dist(updated_df) # remove any rows that are not the query compound reduced_df = distance_df[distance_df['PubChem'] == query_sid] # get dummy variables to represent enzyme class query_df = binarize_enzyme_class(reduced_df, 'entry') query_df = query_df.reset_index(drop=True) # add in compound features with RDKit cpd_query_df = create_cpd_info(query_df) # re-instantiate log reg model ###### feature_df = master_df[['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3', 'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7', 'n_O', 'n_N', 'n_S', 'n_X', 'DoU']] features = np.array(feature_df) #shape balance array for regression reactions = list(master_df['reacts']) feature_train, feature_test, reaction_train, reaction_test = train_test_split(features, reactions, test_size=0.20, random_state=42) model_1 = linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, class_weight='balanced') model_1.fit(feature_train, np.ravel(reaction_train)) ###### # select query features query_feat_df = query_df[['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3', 'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7', 'n_O', 'n_N', 'n_S', 'n_X', 'DoU']] # query reactive enzymes predictions = model_1.predict(query_feat_df) pred = model_1.predict_proba(query_feat_df) # write results to a DataFrame prediction_values = pd.DataFrame(pred) model_descriptive_df = pd.DataFrame() # model_descriptive_df['0']=prediction_values[0] model_descriptive_df['p_reacts']=prediction_values[1] prediction_df = pd.merge(model_descriptive_df, query_df, left_index=True, right_index=True) # sort DataFrame prediction_df = prediction_df.sort_values(by=['p_reacts'], ascending=False) # reset index in output dataframe prediction_df = prediction_df.reset_index(drop=True) # add rank to dataframe prediction_df['rank'] = prediction_df.index + 1 # return DataFrame return prediction_df # read in validation data validation_df = pd.read_csv('../datasets/validation_data.csv') # read in smiles string data smiles_df = pd.read_csv('../datasets/df_cleaned_kegg_with_smiles.csv', dtype='str') # join on validation_df to get sid numbers valid_df = pd.merge(validation_df, smiles_df, left_on='product', right_on='KEGG', how='left') valid_df = valid_df.drop(columns=['Unnamed: 0_x', 'product', 'Unnamed: 0_y', 'entry_y', 'CID', 'SMILES']) valid_df = valid_df.rename(columns={'entry_x': 'true_enzyme'}) valid_df = valid_df.drop_duplicates() valid_df = valid_df.dropna(subset=['PubChem']) valid_df = valid_df.reset_index(drop=True) print(valid_df.shape) valid_df.head() # run each of these validation compounds through the query_model() function headers = ['true_enzyme', 'KEGG', 'PubChem', 'prediction_rank', 'prediction_prob'] valid_pred_df = valid_df.reindex(columns=headers) cpd_validation_list = valid_pred_df['PubChem'].unique().tolist() # cpd_validation_list = ['135626334', '254741367'] for cpd in cpd_validation_list: try: prediction_df = query_model(master_df, cpd) except: pass handle = '../datasets/validation_data/{}.csv'.format(cpd) prediction_df.to_csv(handle, index=False) for index, row in valid_pred_df.loc[valid_pred_df.loc[:, 'PubChem'] == cpd].iterrows(): enzyme = row['true_enzyme'] for _, pred_row in prediction_df.loc[prediction_df.loc[:,'entry'] == enzyme].iterrows(): valid_pred_df.loc[index, 'prediction_rank'] = pred_row['rank'] valid_pred_df.loc[index, 'prediction_prob'] = pred_row['p_reacts'] valid_pred_df.to_csv('validation_summary.csv', index=False) valid_pred_df.head() valid_pred_df.head()Out of curiosity: Examine average molecular distance distributions for negative and positive dataOn first glance, it appears that our hypothesis is correct in that the distributions of average molecular distances are qualitatively different between the positive and negative datasets# look at distributions of distances for positive and negative data fig, axes = plt.subplots(1, 2, figsize=(15, 5)) pos = sns.distplot(distance_df['dist'], bins=50, kde=False, ax=axes[0]) axes[0].set_title('positive data avg. mol. dist.') neg = sns.distplot(distance_df['dist'], bins=50, kde=False, ax=axes[1]) axes[1].set_title('negative data avg. mol. dist.') for axis in axes: axis.set_xlim([0.0, 1.0])ORFEUS Workshop - Lisbon 2017 ORFEUS EIDA Webservices Seismo-Live: http://seismo-live.org Authors:* ([@jollyfant](https://github.com/jollyfant))--- 1 Basic Webservice Usage 1.1 IntroductionEIDA webservices are designed to provide programmatic access to waveform data and instrument metadata from EIDA. FDSN standerdised webservices are running since 2015 and are scheduled to replace Arclink and other deprecated procotols in the near future. Because webservices requests are URLs It is possible to communicate directly with the webservice APIs in a browser, command-line tools (e.g. curl; wget) or through abstracted clients (e.g. [ObsPy](http://obspy.org), [fdsnws-fetch](https://github.com/andres-h/fdsnws_scripts/blob/master/fdsnws_fetch.py)).Webservices are identified by the service domain (URL) that is data center specific, a label that identifies the service (e.g. dataselect; station) and a list of request options (e.g. stream identifiers or time window) included in its query string. In this initial exercise we will introduce five webservices:* 1.2 FDSNWS-Dataselect - Raw waveform service* 1.3 FDSNWS-Station - Station metadata and instrument specifics* 1.4 EIDAWS-Routing - Service routing within EIDA* 1.5 EIDAWS-WFCatalog - Waveform metadata* 1.6 EIDA Mediator - Automatically federated requests across EIDAIn this notebook we will practise direct communication with the webservice APIs in addition to recommended and more convenient workflows using ObsPy. 1.2 FDSNWS-Dataselect 1.2.1 Interacting with the APIThe following example makes a request to the FDSNWS-Dataselect API hosted at ORFEUS Data Center (http://orfeus-eu.org). We will request a 10-minute window of miniSEED data from a single station. The data will be read and plotted using ObsPy. Alternatively, we could save the data to disk. The service label for FDSNWS-Dataselect is:> fdsnws/dataselect/1/query%matplotlib inline # Import the read module from ObsPy from obspy import read # The URL that points to the dataselect service # The label that identifies the service SERVICE_DOMAIN = "http://www.orfeus-eu.org" LABEL = "fdsnws/dataselect/1/query" # The 10-minute time window tuple starttime, endtime = ("2016-01-01T00:00:00", "2016-01-01T00:10:00") # Get the SEED codes, we will use wildcards for location, channel network, station, location, channel = "NL", "HGN", "*", "*" # Create a query string queryString = "&".join([ "network=%s" % network, "station=%s" % station, "location=%s" % location, "channel=%s" % channel, "starttime=%s" % starttime, "endtime=%s" % endtime ]) # The URL that we are requesting data from # Try visiting this URL in your browser: # http://www.orfeus-eu.org/fdsnws/dataselect/1/query?network=NL&station=HGN&location=*&channel=*&starttime=2016-01-01T00:00:00&endtime=2016-01-01T00:10:00 st = read("%s/%s?%s" % (SERVICE_DOMAIN, LABEL, queryString)) # Plot the data returned by the webservice st.plot();1.2.2 Waveforms through ObsPy (recommended usage)Alternatively we can use the ObsPy library to communicate with the API through an abstracted client. All we need to do is call an ObsPy function with our time window constraint and SEED identifiers. This function will do all the work of the previous exercise for us internally and make the result available for use within ObsPy.**Note:** Instead of building the URL yourself in the previous exercise, when working with ObsPy it is recommended that the client class is used.# Include the Client class from ObsPy from obspy.clients.fdsn import Client # Create an ObsPy Client that points to ODC (http://www.orfeus-eu.org) client = Client("ODC") # Get the waveforms for the same trace identifiers and time window st = client.get_waveforms(network, station, location, channel, starttime, endtime) # Plot identical result st.plot();1.3 FDSNWS-Station 1.3.1 Interacting with the APIThe fdsnws-station service works similar to the fdsnws-dataselect but has a service different label (*station* instead of *dataselect*). The response of this webservice is StationXML by default. In the following example we will however request the output formatted as text for clarity. The label for this webservice is:> fdsnws/station/1/query# Import a library to make a HTTP request to the webservice import requests # The URL that points to the station service SERVICE_DOMAIN = "http://www.orfeus-eu.org" LABEL = "fdsnws/station/1/query" # Get the SEED codes for the entire NL network network, station, location, channel = "NL", "*", "*", "*" # The query string includes our seed identifiers # and we request output format text queryString = "&".join([ "network=%s" % network, "station=%s" % station, "location=%s" % location, "channel=%s" % channel, "format=text", "level=station" ]) # The URL that we are requesing # Try this in your browser: # http://www.orfeus-eu.org/fdsnws/station/1/query?network=NL&station=*&location=*&channel=*&format=text r = requests.get("%s/%s?%s" % (SERVICE_DOMAIN, LABEL, queryString)) # This will print station information for all stations in network NL print(r.text)#Network|Station|Latitude|Longitude|Elevation|SiteName|StartTime|EndTime NL|0171|51.03544|5.86908|35.0|Nieuwstadt|2006-01-27T00:00:00|2011-11-25T00:00:00 NL|1979|50.9708|5.9268|80.0|Bingelrade|2009-03-10T00:00:00|2014-11-28T00:00:00 NL|1980|50.8|5.9727|154.0|Mamelis|2009-03-10T00:00:00|2015-04-17T00:00:00 NL|1982|50.86939|6.08469|145.0|Rolduc|2007-12-10T00:00:00|2015-04-17T00:00:00 NL|ALK2|52.65104|4.723407|-1.0|Alkmaar - Bergerweg|2019-07-24T00:00:00| NL|ARCN|51.5013|6.1942|0.0|Arcen|2018-05-23T00:00:00| NL|BAPP|53.31482|6.83539|0.0|Appingedam|2013-10-07T00:00:00| NL|BBER|52.661427|4.7045|2.0|Bergen|2017-12-04T00:00:00|2019-05-21T00:00:00 NL|BER2|52.6478|4.714988|-1.0|Bergen - Groeneweg|2019-07-25T00:00:00| NL|BFB2|53.18751|6.76549|1.0|Froombosch2|2014-09-17T00:00:00| NL|BGAR|53.36786|6.71359|1.0|Garsthuizen|2014-09-17T00:00:00| NL|BHAR|53.22916|6.70898|1.0|Harkstede|2014-09-17T00:00:00| NL|BHKS|53.29195|6.78502|1.0|Hoeksmeer|2014-09-17T00:00:00| NL|BING|50.9708|5.9268|80.0|Bingelrade[...]Practically, the data would be requested in StatonXML format and saved to file, to be further used during data processing. In the following exercise we will read the data directly into ObsPy. Note again that when working with ObsPy, using the client class is the best solution. 1.3.2 Station Metadata through ObsPy (recommended usage)Alternatively, we use an ObsPy client to be able to directly manipulate the data in ObsPy. In the following example we request the instrument response for a single channel and print the response information. In combination with the raw waveform data returned from dataselect service we can deconvolve the frequency response for this sensor.# We will request instrument metadata for a single trace network, station, location, channel = "NL", "HGN", "02", "BH*" # We pass level=response to request instrument response metadata inv = client.get_stations( network=network, station=station, location=location, channel=channel, level="response" ) # This object now has response information for the selected trace (NL.HGN.02.BHZ) for network in inv: for station in network: for channel in station: print(channel.response) # Deconvolve instrument response st.remove_response(inventory=inv) # Plot the data (output units = velocity) st.plot();Channel Response From M/S () to COUNTS () Overall Sensitivity: 3.84869e+09 defined at 1.000 Hz 3 stages: Stage 1: PolesZerosResponseStage from M/S to V, gain: 2294 Stage 2: CoefficientsTypeResponseStage from V to COUNTS, gain: 1.67772e+06 Stage 3: FIRResponseStage from COUNTS to COUNTS, gain: 1 Channel Response From M/S () to COUNTS () Overall Sensitivity: 3.88225e+09 defined at 1.000 Hz 3 stages: Stage 1: PolesZerosResponseStage from M/S to V, gain: 2314 Stage 2: CoefficientsTypeResponseStage from V to COUNTS, gain: 1.67772e+06 Stage 3: FIRResponseStage from COUNTS to COUNTS, gain: 1 Channel Response From M/S () to COUNTS () Overall Sensitivity: 3.91245e+09 defined at 1.000 Hz 3 stages: Stage 1: PolesZerosResponseStage from M/S to V, gain: 2332 Stage 2: CoefficientsTypeResponseStage from V to COUNTS, gain: 1.67772e+06 Stage 3: FIRResponseStage from COUNTS to COUNTS, gain: 11.4 EIDAWS-RoutingThe seismic archive of EIDA is distributed across 11 different data centers, called EIDA Nodes. EIDAWS-routing helps you to find data within this federated data archive. If you don't know which EIDA node holds your data of interest the routing service will provide you with the appropriate EIDA node and corresponding webservice URL to be queried.In this example we will request the "get" format (i.e. URLs that hold the data) for four networks. We are asking for all routes to the station webservice. The label for this service is:> eidaws/routing/1/query**Note:** routing and communication with all EIDA nodes individually can be omitted by using the EIDA Mediator in federated mode (see section 1.6).# The URL that points to the routing service (notice the different eidaws label) SERVICE_DOMAIN = "http://www.orfeus-eu.org" LABEL = "eidaws/routing/1/query" # Network codes must be comma delimited network = ",".join(["HL", "GE", "NL", "KO"]) # The query string includes our network codes and our output format must is set as URLs (get) # We specify the service as fdsnws-station (change this to dataselect) queryString = "&".join([ "network=%s" % network, "format=get", "service=station" ]) # The URL that we are requesing # Try this in your browser: # http://www.orfeus-eu.org/eidaws/routing/1/query?network=HL,GE,NL,KO&format=get r = requests.get("%s/%s?%s" % (SERVICE_DOMAIN, LABEL, queryString)) # Should print four routes to different data centers # Here we can find station metadata for these four networks respectively # We make a request to all returned routes (status 200 indicates success!) for line in r.text.split("\n"): r = requests.get(line) print("[%i] %s" % (r.status_code, line))[200] http://eida.gein.noa.gr/fdsnws/station/1/query?net=HL&start=1997-01-01T00:00:00 [200] http://geofon.gfz-potsdam.de/fdsnws/station/1/query?net=GE&start=1993-01-01T00:00:00 [200] http://www.orfeus-eu.org/fdsnws/station/1/query?net=NL&start=1980-01-01T00:00:00 [200] http://eida-service.koeri.boun.edu.tr/fdsnws/station/1/query?net=KO&start=1980-01-01T00:00:001.5 EIDAWS-WFCatalogThe WFCatalog is a catalogue of seismic waveform metadata. This is not to be confused with station metadata but contains purely metadata describing the waveforms. These metadata include availability information (e.g. gaps), sample metrics (e.g. mean, standard deviations, median values) and miniSEED header flags.The EIDAWS-WFCatalog webservice returns quality metrics from raw waveform data. The WFCatalog can serve as a powerful waveform index for data discovery by appending filters (e.g. lt, ge) to the query string. This can help identify waveforms with metric values below or above a certain threshold. The label for this service is:> eidaws/wfcatalog/1/query# The URL that points to the routing service (notice the different eidaws label) SERVICE_DOMAIN = "http://www.orfeus-eu.org" LABEL = "eidaws/wfcatalog/1/query" # The start and end date for the metrics # Feel free to change the window starttime, endtime = ("2010-11-01", "2010-11-07") # Network codes must be comma delimited network, station, location, channel = "NL.HGN.02.BHZ".split(".") # The query string includes our seed identifiers, temporal constraints, we ask for sample metrics to be included # include can be either (default, sample, header, all) # We request metrics for daily waveforms with an availability over 50% # Try changing the percent_availability to 100 - less documents will be returned queryString = "&".join([ "network=%s" % network, "station=%s" % station, "location=%s" % location, "channel=%s" % channel, "starttime=%s" % starttime, "endtime=%s" % endtime, "include=sample", "percent_availability_ge=50" ]) # Try this in your browser: # http://www.orfeus-eu.org/eidaws/wfcatalog/1/query?network=NL&station=HGN&location=02&channel=BHZ&start=2010-11-01&end=2010-11-07&include=sample r = requests.get("%s/%s?%s" % (SERVICE_DOMAIN, LABEL, queryString)) # Should print JSON response of quality metrics for three days. r.json()1.6 EIDA MediatorThe EIDA mediator (beta) can automatically route and retrieve requests federated between EIDA nodes. This prevents using from having to query the routing service before making data requests. There is a single entry poiny to the entire archive available within EIDA as demonstrated below. Currently there is supported for federated mode between **station** and **dataselect**. Federation of **WFCatalog** requests will be supported in the future.# The URL that points to the routing service (the EIDA mediator is hosted by ETHZ) SERVICE_DOMAIN = "http://mediator-devel.ethz.ch" LABEL = "fdsnws/station/1/query" # Network codes must be comma delimited # Networks are federated across 4 different EIDA nodes network = ",".join(["HL", "GE", "NL", "KO"]) # Creathe queyr string and append all networks # We ask for level=network to limit the amount of data returned for clarity queryString = "&".join([ "network=%s" % network, "level=network" ]) # Try this in your browser: # http://mediator-devel.ethz.ch/fdsnws/station/1/query?network=HL,GE,NL&level=network ##### This currently does not seem to work. # r = requests.get("%s/%s?%s" %(SERVICE_DOMAIN, LABEL, queryString)) # StationXML for all four networks # print(r.text)Graphical user interfacesThe following tools are available on orfeus-eu.org and are built on top of the discussed webservices. Please note that these interfaces currently only work for data archived at ORFEUS Data Center.> http://www.orfeus-eu.org/data/odc/quality 2 Advanced Example - Webservices pipeline 2.1 IntroductionThis example demonstrates the use of FDSN webservices in a processing pipeline. The goal of this exercise is to download raw waveform data from stations surrounding an earthquake. This pipeline is based on functionality provided with ObsPy.# Define the module imports import requests import math from obspy.taup import TauPyModel from obspy.geodetics import locations2degrees from obspy import read, UTCDateTime import datetime import dateutil.parser2.2 FDSNWS-EventWe define a function that collects event information from fdsnws-event. We pass an event identifier to the webservice, parse the response and return an Event class that has **location**, **origin time**, and **depth** attributes. The event data is requested from the seismicportal webservice provided by the EMSC.def getEvent(identifier): # Try in your browser: # http://www.seismicportal.eu/fdsnws/event/1/query?eventid=20170720_0000091&format=text # Service address FDSN_EVENT = "http://www.seismicportal.eu/fdsnws/event/1/query" # Define class for Events class Event(): def __init__(self, line): self.id, self.time, self.latitude, self.longitude, self.depth = line.split("|")[:5] self.latitude = float(self.latitude) self.longitude = float(self.longitude) self.depth = float(self.depth) # We query for a single event identifier and request a text format return queryString = "&".join([ "eventid=%s" % identifier, "format=text" ]) # Create the query for an event identifier r = requests.get("%s?%s" % (FDSN_EVENT, queryString)) # Split by lines and remove head & tail lines = r.text.split("\n")[1:-1] # Return Event classes for each entry return list(map(Event, lines))[0] # Should print a single Event instance print(getEvent("20170720_0000091"))<__main__.getEvent..Event object at 0x10d817cf8>2.3 FDSNWS-StationDefine a function that can find the stations around an event. We pass the Event instance to the function and call the station webservice to return stations within 20 degrees arc-distance of this event location. We parse the response and return a map of station instances with attributes network, station, and location.def getStations(event): # Try it in your browser: # http://orfeus-eu.org/fdsnws/station/1/query?latitude=30&longitude=30&maxradius=20&format=text # Service address FDSN_STATION = "http://orfeus-eu.org/fdsnws/station/1/query" MAX_RADIUS = 20 # Define a Station class class Station(): def __init__(self, line): self.network, self.station, self.latitude, self.longitude = line.split("|")[:4] self.latitude = float(self.latitude) self.longitude = float(self.longitude) # We query with the event location and a maximum radius around the event queryString = "&".join([ "latitude=%s" % event.latitude, "longitude=%s" % event.longitude, "maxradius=%s" % MAX_RADIUS, "format=text" ]) # Request from webservice r = requests.get("%s?%s" % (FDSN_STATION, queryString)) # Split by lines and remove head & tail lines = r.text.split("\n")[1:-1] # Return Event classes for each entry return map(Station, lines) # Should print a map (array) of Station instances print(getStations(getEvent("20170720_0000091")))2.4 Theoretical Arrival TimesDefine a function that calculates the theoretical P arrival time at a station location using the TauP module in ObsPy. The function takes an Event and Station instance. The arc-distance in degrees between the source and receiver is calculated using the *haversine function* (see below).# We use the iasp91 reference model TAUP_MODEL = TauPyModel(model="iasp91") def getPArrival(event, station): # Determine the arc distance using the haversine formula arcDistanceDegrees = locations2degrees( event.latitude, station.latitude, event.longitude, station.longitude ) # Calculate the theoretical P-arrival time arrivals = TAUP_MODEL.get_travel_times( source_depth_in_km=1E-3 * event.depth, distance_in_degree=arcDistanceDegrees, phase_list=["P"] ) # Add the theorical P-arrival delta to the event time return UTCDateTime(event.time) + arrivals[0].timeDefinition of the havesine function, we pass two latitudes and longitudes and return the arc-distance in degrees. This is a supplementary function. 2.5 FDSNWS-DataselectThe main body of the script that collects an event with event identifier 20170720_0000091. We loop over all the stations returned by the getStations function within 20 degrees arc-distance of the event. In each iteration, we make a call to fdsnws-dataselect to collect the waveform data for all stations between 300 seconds before, and 1200 seconds after the theoretical P-arrival time.This data (channel BH?) is loaded in to ObsPy using the read function, filtered and plotted. After the first iteration the loop is broken. Alternatively, all data can be saved to disk.FDSN_DATASELECT = "http://orfeus-eu.org/fdsnws/dataselect/1/query" EVENT_IDENTIFIER = "20170720_0000091" # Get the event event = getEvent(EVENT_IDENTIFIER) # Go over all stations returned in the radius for station in getStations(event): # Get the theoretical (TauP) pArrval from event to station stationArrivalTime = getPArrival(event, station) # Create the query for fdsn-dataselect # between 300 seconds before & 1200 seconds after the theoretical P-arrival queryString = "&".join([ "network=%s" % station.network, "station=%s" % station.station, "starttime=%s" % (stationArrivalTime - 300).isoformat(), "endtime=%s" % (stationArrivalTime + 1200).isoformat(), "channel=BH?" ]) # Get the waveform data and read to ObsPy Stream # Empty responses are skipped try: st = read("%s?%s" % (FDSN_DATASELECT, queryString)) except Exception: continue # Use with ObsPy and apply a filter, then plot # Alternatively, we would save the data to a file st.filter("lowpass", freq=0.5) st.plot() # Break after the first result break/Users/lion/miniconda3/envs/seismo_live/lib/python3.7/site-packages/obspy/taup/tau_branch.py:496: UserWarning: Resizing a TauP array inplace failed due to the existence of other references to the array, creating a new array. See Obspy #2280. warnings.warn(msg)Project 3 An analysis of the Twitter use of and - Part 2%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import re import string import shelve from collections import Counter import nltk from nltk.stem import SnowballStemmer from nltk.corpus import stopwords from nltk.tokenize import sent_tokenize, word_tokenize import itertools from nltk import bigrams plt.style.use('seaborn-dark') plt.rcParams['figure.figsize'] = (10, 6) with shelve.open('result/vars1') as db: obama = db['obama'] trump = db['trump']First, we want to clean up the words they tweetedemoticons_str = r""" (?: [:=;] # Eyes [oO\-]? # Nose (optional) [D\)\]\(\]/\\OpP] # Mouth )""" regex_str = [ emoticons_str, r'<[^>]+>', # HTML tags r'(?:@[\w_]+.)', # @-mentions r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers r"(?:[a-z][a-z'\-_]+[a-z].')", # words with - and ' r'(?:[\w_]+.)', # other words r'(?:\S)' # anything else ] tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE) emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE) def preprocess(s, lowercase=False): tokens = tokens_re.findall(s) if lowercase: tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens] return tokensNext, we count all the number of words, unique words, and number of characters they used and restore them in the datafram.obama_cleaned_words = [preprocess(i) for i in obama.text] obama['n_words'] = pd.Series([len(obama_cleaned_words[i]) for i in range(len(obama_cleaned_words))]) obama_uwords = [len(np.unique((np.array(preprocess(x))))) for x in obama.text] obama['n_uwords'] = pd.Series(obama_uwords) obama_chars = [len(x) for x in obama.text] obama['n_chars'] = pd.Series(obama_chars) obama.head() trump_cleaned_words = [preprocess(i) for i in trump.text] trump['n_words'] = pd.Series([len(trump_cleaned_words[i]) for i in range(len(trump_cleaned_words))]) trump_uwords = [len(np.unique((np.array(preprocess(x))))) for x in trump.text] trump['n_uwords'] = pd.Series(trump_uwords) trump_chars = [len(x) for x in trump.text] trump['n_chars'] = pd.Series(trump_chars) trump.head()Next, we want to compare them by looking at statisticspd.options.display.precision = 2 obama.describe() trump.describe()Visualization: Explore some of the relationships between the twitters, their authors, and time. Pandas has its own object for time series. Since we have a whole vector with creation dates, we can construct time series respect tweets lengths, likes and retweets. And if we want to plot the time series, pandas already has its own method in the object. We can plot a time series as follows:fig,axes = plt.subplots(1, 2, figsize = (16,4), sharey= True) axes[0].plot_date(x=obama.created_at, y = obama.n_words,linestyle = '-',marker='None') axes[1].plot_date(x=trump.created_at, y = trump.n_words,linestyle='solid',marker='None') plt.savefig("fig/n_word_comparison.png") fig,axes = plt.subplots(1, 2, figsize = (16,4), sharey= True) axes[0].plot_date(x=obama.created_at, y = obama.n_uwords,linestyle = '-',marker='None') axes[1].plot_date(x=trump.created_at, y = trump.n_uwords,linestyle='solid',marker='None') plt.savefig("fig/n_uword_comparison.png") fig,axes = plt.subplots(1, 2, figsize = (16,4), sharey= True) axes[0].plot_date(x=obama.created_at, y = obama.n_chars,linestyle = '-',marker='None') axes[1].plot_date(x=trump.created_at, y = trump.n_chars,linestyle='solid',marker='None') plt.savefig("fig/n_char_comparison.png")In order to keep track of the frequencies while we are processing the tweets, we can use collections.Counter() which internally is a dictionary (term: count) with some useful methods like most_common():count_all = Counter() terms_all = [term for term in list(itertools.chain.from_iterable(obama_cleaned_words))] count_all.update(terms_all) # Print the first 5 most frequent words print(count_all.most_common(5))[('the ', 2018), ('to ', 1670), ('"', 1388), ('a ', 882), ('Obama ', 848)]The above code will produce some unimpressive results:[('the ', 2018), ('to ', 1670), ('"', 1388), ('a ', 882), ('Obama ', 848)]As you can see, the most frequent words (or should I say, tokens), are not exactly meaningful. In every language, some words are particularly common. While their use in the language is crucial, they don’t usually convey a particular meaning, especially if taken out of context. This is the case of articles, conjunctions, some adverbs, etc. which are commonly called stop-words. Thus, we try to do the stop-word removal. And it's one important step.punctuation = list(string.punctuation) stop = stopwords.words('english') + punctuation + ['rt', 'via'] terms_stop = [term for term in list(itertools.chain.from_iterable(obama_cleaned_words)) if term not in stop] count_stop = Counter() count_stop.update(terms_stop) print(count_stop.most_common(5))[('the ', 2018), ('to ', 1670), ('a ', 882), ('Obama ', 848), ('s ', 820)]After counting, sorting the terms and printing the top 5, this is the result:[('the ', 2018), ('to ', 1670), ('a ', 882), ('Obama ', 848), ('s ', 820)]It's very similar with the last output. But we remove the '"'. Besides stop-word removal, we can further customise the list of terms/tokens we are interested in. There are many term filters.# Count terms only once, equivalent to Document Frequency terms_single = set(terms_all) count_single = Counter() count_single.update(terms_single) print(count_single.most_common(5)) # Count hashtags only terms_hash = [term for term in list(itertools.chain.from_iterable(obama_cleaned_words)) if term.startswith('#')] count_hash = Counter() count_hash.update(terms_hash) print(count_hash.most_common(5)) # Count terms only (no hashtags, no mentions) terms_only = [term for term in list(itertools.chain.from_iterable(obama_cleaned_words)) if term not in stop and not term.startswith(('#', '@'))] # mind the ((double brackets)) # startswith() takes a tuple (not a list) if # we pass a list of inputs count_only = Counter() count_only.update(terms_only) print(count_only.most_common(5)) terms_bigram = bigrams(terms_stop) count_bigram = Counter() count_bigram.update(terms_bigram) print(count_bigram.most_common(5))[(('äóîPresident ', 'Obama '), 452), (('President ', 'Obama '), 380), (('RT ', '@WhiteHouse:'), 194), (('äóîPresident ', 'Obama'), 169), (('of ', 'the '), 154)]The bigrams() function from NLTK will take a list of tokens and produce a list of tuples using adjacent tokens. Notice that we could use terms_all to compute the bigrams, but we would probably end up with a lot of garbage. In case we decide to analyse longer n-grams (sequences of n tokens), it could make sense to keep the stop-words, just in case we want to capture phrases like “to be or not to be”.So after counting and sorting the bigrams, this is the result:[(('äóîPresident ', 'Obama '), 452), (('President ', 'Obama '), 380), (('RT ', '@WhiteHouse:'), 194), (('äóîPresident ', 'Obama'), 169), (('of ', 'the '), 154)] Intermediate resultsLet's save some intermediate results for further analysis. This lets us work on subsequent notebooks without rerunning this one. We use python builtin shelve module for simple Python variables. We'll create for each notebook variable stores with the notebook number for python variables (vars1.db, vars2.db, etc.). These will all go into the `results/` subdirectory.obama.to_hdf('result/n2.h5','obama',table=True,mode='a') trump.to_hdf('result/n2.h5','trump',table=True,mode='a') with shelve.open('result/vars2') as db: db['obama'] = obama[['created_at', 'text']] db['trump'] = trump[['created_at', 'text']]Draw a ShapesAttempt at programmatically drawing shapes.All units in mm. ```1``` = ```1 mm```.# Draw line from (0, 0) to (10, 0) lines = list() line_points = np.array([[0, 0], [10, 0]]) lines.append(GCode.Line(points=line_points)) # Draw line from (0, 0) to (0, 10) line_points = np.array([[0, 0], [0, 10]]) lines.append(GCode.Line(points=line_points)) lines # Draw line from (0, 0) to (-10, 0) line_points = np.array([[0, 0], [-10, 0]]) lines.append(GCode.Line(points=line_points)) lines # Draw line from (0, 0) to (0, -10) line_points = np.array([[0, 0], [0, -10]]) lines.append(GCode.Line(points=line_points)) lines prog = GCode.Program(lines=lines) prog.lines prog.laserin_dist prog.laserin_time 300 / 60 prog.lines prog.jog_dist prog.laserin_dist prog.dist prog.time class Square(GCode.Line): def __init__( self, len_side=10, origin=np.array([0, 0]), rotation=0, *args, **kwargs ): self.len_side = len_side self.origin = origin kwargs["points"] = self._points super().__init__(*args, **kwargs) @property def _points(self): return np.array( [ [self.origin[0], self.origin[1]], [self.origin[0] + self.len_side, self.origin[1]], [ self.origin[0] + self.len_side, self.origin[1] + self.len_side, ], [self.origin[0], self.origin[1] + self.len_side], [self.origin[0], self.origin[1]], ] ) def generate_gcode(self): self.points = self._points super().generate_gcode() @property def _cls(self): return self.__class__.__name__ def __repr__(self): return "{}".format(self._cls, self.origin, self.len_side) s = Square() s.__repr__() s s.len_side = 5 s s.points s.generate_gcode() s.pointsQ1 Company: Description: Open Source Intelligence Analyst[job website](https://www.google.com/search?source=hp&ei=k5uKXJ6GCses5wL2uIb4CQ&q=intelligence+anlayst+job&oq=intelligence+anlayst+job&gs_l=psy-ab.3..0i13l10.1526.5701..5975...1.0..0.168.1378.24j1......0....1..gws-wiz.....0..0i131j0j0i10j0i22i10i30j0i22i30j0i13i30.BvbVxKbdTzg&ibp=htl;jobs&sa=X&ved=2ahUKEwjfu4Tmn4LhAhUSy1kKHQ77BmEQiYsCKAF6BAgBECgfpstate=tldetail&htidocid=8wDGwcsYOEtSMdmtAAAAAA%3D%3D&htivrt=jobs)job = open('job.txt','r') print(job.read()) job.close()Job Description Job Number: R0046441 Open Source Intelligence Analyst Key Role: Support an intelligence community (IC) client as an open source intelligence (OSINT) analyst. Support the analysis of open source intelligence under limited supervision. Consult on how to leverage OSINT data and information, use specialized OSINT tools and services, and consult on the development of OSINT products. Support the development of new OSINT targeting and exploitation techniques and apply expertise to analyzing the development of OSINT policy and tradecraft. This position is located in Charlottesville, VA. Basic Qualifications: -Experience as an OSINT analyst supporting IC or DoD missions -Knowledge of IC OSINT tools and techniques -Ability to discover documents and synthesize and report information from publicly available sources and private or government open source materials within established security protocols quickly and efficiently -Ability to perform specific search projects with li[...]Q2from collections import Counter with open ('job.txt','r') as job: job_list=job.read().split() count_result= Counter(job_list) for word, count in count_result.most_common(20): print(word,count)and 18 to 9 OSINT 8 of 7 an 5 or 5 social 5 media 5 the 4 development 4 Support 3 intelligence 3 open 3 source 3 tools 3 -Experience 3 IC 3 -Ability 3 with 3 Job 2Q3import xlwt from collections import Counter book=xlwt.Workbook() sheet=book.add_sheet('word_count') i=0 sheet.write(i,0,'word') sheet.write(i,1,'count') with open ('job.txt','r') as job: job_list=job.read().split() count_result=Counter(job_list) for word, count in count_result.most_common(20): if i>0: sheet.write(i,0,word) sheet.write(i,1,count) i=i+1 book.save('job_word_count.xls')Q4import xlrd book=xlrd.open_workbook('job_word_count') sheet=book.sheet_by_name('word_count') for i in rangeBlazingText를 활용한 네이버 영화 리뷰 감성(Sentiment) 이진 분류*본 노트북 예제는 [DBPedia Ontology Dataset의 텍스트 분류](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_text_classification_dbpedia/blazingtext_text_classification_dbpedia.ipynb) 문서에 기반하여네이버 영화 리뷰의 텍스트 분류를 수행하는 예제입니다.*개발자 가이드: https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/master/doc_source/blazingtext.md 개요텍스트 분류(Text cassification)는 감성 분석(sentiment analysis), 스팸 탐지(spam detection), 해시 태그 예측(hashtag prediction) 등과 같은 다양한 사례들을 해결하는 데 사용될 수 있습니다. BlazingText는 최신 딥러닝 텍스트 분류 알고리즘과 동등한 성능을 달성하면서 멀티 코어 CPU 또는 GPU를 사용하여 몇 분 안에 10억 단어 이상의 모델을 훈련시킬 수 있습니다. BlazingText는 사용자 정의 CUDA 커널을 사용하여 GPU 가속을 활용하도록 FastText 텍스트 분류기를 확장합니다.본 노트북에서는 네이버 영화 리뷰 데이터의 감성 이진 분류를 BlzaingText로 수행해 보겠습니다. 이 데이터는 총 20만개 리뷰로 구성된 데이터로 영화 리뷰에 대한 텍스트와 레이블(0: 부정, 1: 긍정)으로 구성되어 있습니다. Setup학습 데이터 및 모델 아티팩트(Model Artifact) 저장에 사용될 S3 버킷(bucket) 및 접두사(prefix)는 노트북 인스턴스, 학습 및 호스팅과 같은 리전 내에 있어야 합니다. 버킷을 지정하지 않으면 SageMaker SDK는 동일 리전에서 사전에 정의된 명명 규칙에 따라 기본 버킷을 생성합니다.데이터에 대한 엑세스 권한을 부여하는 데 사용된 IAM(Identity and Access Management) role ARN(Amazon Resource Name)은 SageMaker Python SDK의 `get_execution_role` 메소드를 사용하여 가져올 수 있습니다.import sagemaker from sagemaker import get_execution_role import boto3 import json sess = sagemaker.Session() # This is the role that SageMaker would use to leverage AWS resources (S3, CloudWatch) on your behalf role = get_execution_role() #print(role) #bucket = sess.default_bucket() bucket = '[YOUR-BUCKET]' # Replace with your own bucket name if needed prefix = 'sagemaker/DEMO-blazingtext-sentiment-analysis' #Replace with the prefix under which you want to store the data if needed데이터 준비https://github.com/e9t/nsmc/ 에 공개된 네이버 영화 리뷰 학습/검증 데이터를 다운로드합니다.학습 데이터는 총 15만건이며, 검증 데이터는 총 5만건입니다.!wget -nc https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt -P ./data/ !wget -nc https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt -P ./data/--2019-11-06 06:55:17-- https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 172.16.17.32 Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|172.16.17.32|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 14628807 (14M) [text/plain] Saving to: ‘./data/ratings_train.txt’ ratings_train.txt 100%[===================>] 13.95M --.-KB/s in 0.1s 2019-11-06 06:55:17 (100 MB/s) - ‘./data/ratings_train.txt’ saved [14628807/14628807] --2019-11-06 06:55:17-- https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 172.16.17.32 Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|172.16.17.32|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 4893335 (4.7M) [text/plain] Saving to: ‘./data/ratings_test.txt’ ratings_test.txt 100%[=======[...]EDA (탐색적 데이터 분석; Exploratory Data Analysis)간단하게 EDA를 수행해 봅니다. 네이버 영화 리뷰 데이터는 정제가 잘 되어 있는 편이지만, 실제 데이터들은 클래스 불균형(class imbalance)한 데이터도 많고 데이터 정제가 필요한 경우가 많기에 EDA를 통해 데이터의 분포, 통계량 등을 확인하는 것이 좋습니다.먼저 판다스(pandas)로 학습/검증 데이터를 로드해서 데이터를 확인해 보겠습니다. `id`는 고유 id 이며, `document`는 영화 리뷰 문장, `label`은 긍정/부정 여부입니다. (긍정: 1, 부정: 0)import pandas as pd import numpy as np from wordcloud import WordCloud train_df = pd.read_csv('./data/ratings_train.txt', header=0, delimiter='\t') test_df = pd.read_csv('./data/ratings_test.txt', header=0, delimiter='\t') train_df.head()EDA를 위해 문자 개수 및 단어 개수를 계산합니다.# character count 계산 train_df['char_cnt'] = train_df['document'].astype(str).apply(len) test_df['char_cnt'] = test_df['document'].astype(str).apply(len) # word count 계산 train_df['word_cnt'] = train_df['document'].astype(str).apply(lambda x: len(x.split(' '))) test_df['word_cnt'] = train_df['document'].astype(str).apply(lambda x: len(x.split(' '))) train_df.head()한글 출력을 위한 설정입니다.%matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.font_manager as fm font_files = fm.findSystemFonts(fontpaths='/usr/share/fonts/nanum', fontext='ttf') font_list = fm.createFontList(font_files) fm.fontManager.ttflist.extend(font_list) mpl.rcParams['font.family'] = 'NanumGothic' mpl.rc('axes', unicode_minus=False) mpl.rcParams['font.size'] = 14문자 개수와 단어 개수를 확인해 봅니다.plt.figure(figsize=(12,5)) plt.hist(train_df['char_cnt'], bins=250, alpha=0.5, color='b', label='word') plt.title('Histogram of Character Count of Naver Movie Review') plt.xlabel('문자 개수') plt.figure(figsize=(12,5)) plt.hist(train_df['word_cnt'], bins=75, alpha=0.5, color='b', label='train') plt.yscale('log', nonposy='clip') plt.title('Log Histogram of Word Count of Naver Movie Review') plt.xlabel('단어 개수')워드 클라우드로 자주 등장하는 단어들을 확인합니다. `영화, 진짜, 너무, 정말` 등의 단어들이 많이 사용된 것을 확인할 수 있습니다.%%time train_review = [row for row in train_df['document'] if type(row) is str] wordcloud = WordCloud(font_path='/usr/share/fonts/nanum/NanumGothic.ttf').generate(' '.join(train_review)) plt.figure(figsize=(10,6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()기본 통계를 확인해 봅니다.def print_basic_stats(df, col_name): print('===== {} ====='.format(col_name)) print('Maximum: {}'.format(np.max(df[col_name]))) print('Minimum: {}'.format(np.min(df[col_name]))) print('Mean: {:.3f}'.format(np.mean(df[col_name]))) print('Stddev: {:.3f}'.format(np.std(df[col_name]))) print('1st quartile: {}'.format(np.percentile(df[col_name], 25))) print('Median: {}'.format(np.median(df[col_name]))) print('3rd quartile: {}'.format(np.percentile(df[col_name], 75))) print_basic_stats(train_df, 'char_cnt') print_basic_stats(train_df, 'word_cnt')===== char_cnt ===== Maximum: 146 Minimum: 1 Mean: 35.203 Stddev: 29.532 1st quartile: 16.0 Median: 27.0 3rd quartile: 42.0 ===== word_cnt ===== Maximum: 41 Minimum: 1 Mean: 7.585 Stddev: 6.514 1st quartile: 3.0 Median: 6.0 3rd quartile: 9.0클래스 균형을 확인합니다. 본 데이터는 거의 1:1 비율을 보여주고 있지만, 실제 데이터는 95:5 같은 불균형 데이터들도 많다는 점을 숙지해 주세요.import seaborn as sns sns.countplot(train_df['label']) train_df['label'].value_counts()Data PreprocessingBlazingText 알고리즘으로 분류 문제를 학습하기 위해서는 말뭉치 데이터의 각 문장의 클래스 레이블 앞에 `__label__`을 접두사로 붙여야 합니다. 변환 예시는 아래를 참조해 주세요.```__label__0 아 더빙.. 진짜 짜증나네요 목소리__label__1 흠...포스터보고 초딩영화줄....오버연기조차 가볍지 않구나```또한, 본 노트북에서는 정규식을 활용하여 탭(tab) 문자, 구두점(punctuation) 문자, 한글 문자가 아닌 문자를 제거하는 간단한 전처리를 수행해 보겠습니다.def preprocess_text(corpus_path, output_path): import re with open(corpus_path, 'r', encoding='utf-8') as f, \ open(output_path, 'w', encoding='utf-8') as fw: next(f) for line in f: # Remove tab _, sentence, label = line.strip().split('\t') # Remove punctuations sentence = re.sub('[\.\,\(\)\{\}\[\]\`\'\!\?\:\;\-\=]', ' ', sentence) # Remove non-Korean characters sentence = re.sub('[^가-힣ㄱ-하-ㅣ\\s]', '', sentence) if not sentence: continue fw.writelines('__label__' + label + ' '+ sentence + '\n')학습 데이터 전처리 수행corpus_path = 'data/ratings_train.txt' output_path = 'data/ratings_train_preprocessd' preprocess_text(corpus_path, output_path) !head data/ratings_train_preprocessd -n 5__label__0 아 더빙 진짜 짜증나네요 목소리 __label__1 흠 포스터보고 초딩영화줄 오버연기조차 가볍지 않구나 __label__0 너무재밓었다그래서보는것을추천한다 __label__0 교도소 이야기구먼 솔직히 재미는 없다 평점 조정 __label__1 사이몬페그의 익살스런 연기가 돋보였던 영화 스파이더맨에서 늙어보이기만 했던 커스틴 던스트가 너무나도 이뻐보였다검증 데이터 전처리 수행corpus_path = 'data/ratings_test.txt' output_path = 'data/ratings_test_preprocessd' preprocess_text(corpus_path, output_path) !head data/ratings_test_preprocessd -n 5__label__1 굳 ㅋ __label__0 뭐야 이 평점들은 나쁘진 않지만 점 짜리는 더더욱 아니잖아 __label__0 지루하지는 않은데 완전 막장임 돈주고 보기에는 __label__0 만 아니었어도 별 다섯 개 줬을텐데 왜 로 나와서 제 심기를 불편하게 하죠 __label__1 음악이 주가 된 최고의 음악영화S3 경로 설정train_channel = prefix + '/train' validation_channel = prefix + '/validation' sess.upload_data(path='data/ratings_train_preprocessd', bucket=bucket, key_prefix=train_channel) sess.upload_data(path='data/ratings_test_preprocessd', bucket=bucket, key_prefix=validation_channel) s3_train_data = 's3://{}/{}'.format(bucket, train_channel) s3_validation_data = 's3://{}/{}'.format(bucket, validation_channel)모델 아티팩트(Model Artifact)가 저장될 S3의 경로를 설정합니다.s3_output_location = 's3://{}/{}/output'.format(bucket, prefix) print(s3_train_data, s3_validation_data, s3_output_location)s3://blazingtext-hol-daekeun/sagemaker/DEMO-blazingtext-sentiment-analysis/train s3://blazingtext-hol-daekeun/sagemaker/DEMO-blazingtext-sentiment-analysis/validation s3://blazingtext-hol-daekeun/sagemaker/DEMO-blazingtext-sentiment-analysis/outputTraining이제 학습에 필요한 데이터가 준비되었으므로 `sageMaker.estimator.Estimator` 객체를 생성하여 학습을 수행해 봅니다.region_name = boto3.Session().region_name container = sagemaker.amazon.amazon_estimator.get_image_uri(region_name, "blazingtext", "latest") print('SageMaker BlazingText 컨테이너 위치: {} ({})'.format(container, region_name))SageMaker BlazingText 컨테이너 위치: 811284229777.dkr.ecr.us-east-1.amazonaws.com/blazingtext:latest (us-east-1)SageMaker BlazingText는 Word2Vec의 원래 구현과 유사하게 네거티브 샘플링(Negative Sampling)을 사용하여 CPU 및 GPU(들)에서 CBOW(Continuous Bag-of-Words) 및 스킵 그램(Skip-gram) 아키텍처를 효율적으로 구현합니다. GPU 구현은 고도로 최적화된 CUDA 커널을 사용합니다. 자세한 내용은 [*BlazingText: Scaling and Accelerating Word2Vec using Multiple GPUs*](https://dl.acm.org/citation.cfm?doid=3146347.3146354)를 참조하세요. 또한, BlazingText는 CBOW 및 스킵 그램 모드로 서브 워드(subwords) 임베딩 학습을 지원합니다. 이를 통해 BlazingText는 out-of-vocabulary(OOV)를 생성할 수 있습니다.서브 워드 임베딩 학습은 [notebook (text8 데이터셋 서브 워드 임베딩)](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_word2vec_subwords_text8/blazingtext_word2vec_subwords_text8.ipynb)을 참조하세요.스킵 그램 및 CBOW 외에도 SageMaker BlazingText는 효율적인 미니 배치 및 행렬 연산을 수행하는 "배치 스킵 그램(Batch Skipgram)" 모드도 지원합니다. ([BLAS Level 3 routines](https://software.intel.com/en-us/mkl-developer-reference-fortran-blas-level-3-routines)) 이 모드는 여러 CPU 노드에 걸쳐 분산된 Word2Vec의 학습을 가능하게 하여 보다 빠른 학습이 가능합니다. 자세한 내용은 [*Parallelizing Word2Vec in Shared and Distributed Memory*](https://arxiv.org/pdf/1604.04661.pdf)를 참조하세요.BlazingText는 텍스트 분류를 위한 교사 학습(supervised learning)도 지원하며, 사용자 지정 CUDA 커널을 사용하여 GPU 가속을 활용하도록 FastText 텍스트 분류기를 확장합니다. 이 모델은 최신 딥러닝 텍스트 분류 알고리즘과 동등한 성능을 달성하면서 멀티 코어 CPU 또는 GPU를 사용하여 몇 분 안에 10억 단어 이상을 학습할 수 있습니다. 자세한 내용은 [알고리즘 설명서](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html)를 참조하세요.아래 표는 BlazingText에서 지원하는 모드입니다.| Modes | cbow (서브워드 학습 지원) | skipgram (서브워드 학습 지원) | batch_skipgram | supervised ||:----------------------: |:----: |:--------: |:--------------:| :--------------:|| 단일 CPU 인스턴스 | ✔ | ✔ | ✔ | ✔ || 단일 GPU 인스턴스 | ✔ | ✔ | X | ✔ (1 GPU 인스턴스만 지원) || 다중 CPU 인스턴스 | X | X | ✔ | X |bt_model = sagemaker.estimator.Estimator(container, role, train_instance_count=1, train_instance_type='ml.c4.2xlarge', train_volume_size=30, train_max_run=360000, input_mode='File', output_path=s3_output_location, sagemaker_session=sess)BlazingText 하이퍼파라메터의 자세한 설정 방법은 [이 문서](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext_hyperparameters.html)를 참조해 주세요.bt_model.set_hyperparameters(mode="supervised", epochs=30, min_count=2, learning_rate=0.005, vector_dim=100, early_stopping=True, patience=4, # Number of epochs to wait before early stopping if no progress on the validation set is observed buckets=2000000, # Number of hash buckets to use for word n-grams min_epochs=5, word_ngrams=2)학습을 위한 `sagemaker.session.s3_input` 객체를 생성하여 데이터 채널을 알고리즘과 연결합니다.train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') data_channels = {'train': train_data, 'validation': validation_data}지금까지 `Estimator` 객체에 대한 하이퍼파라미터를 설정했으며 데이터 채널을 알고리즘과 연결했습니다. 남은 것은 `fit` 메소드로 학습하는 것뿐입니다.학습에는 몇 가지 단계가 포함됩니다. 먼저 `Estimator` 클래스를 작성하는 동안 요청한 인스턴스가 프로비저닝되고 적절한 라이브러리로 설정됩니다. 그 다음 채널의 데이터가 학습 인스턴스로 다로드되며 이후 학습 작업이 시작됩니다. 데이터 크기에 따라 프로비저닝 및 데이터 다운로드에 시간이 다소 걸리며, 이에 따라 학습 작업에 따른 로그를 확인하는 데 몇 분이 걸립니다.로그는 `min_epochs`(이 파라메터는 학습에 최소로 필요한 epoch 횟수입니다) 이후 모든 epoch에 대한 검증 데이터의 정확도(accuracy)를 출력합니다.학습이 완료되면 "작업 완료(Job compelete)" 메시지가 출력됩니다. 학습된 모델은 `Estimator`에서 `output_path`로 설정된 S3 버킷에서 찾을 수 있습니다.bt_model.fit(inputs=data_channels, logs=True)2019-11-05 22:52:16 Starting - Starting the training job... 2019-11-05 22:52:34 Starting - Launching requested ML instances......... 2019-11-05 22:54:06 Starting - Preparing the instances for training... 2019-11-05 22:54:40 Downloading - Downloading input data... 2019-11-05 22:55:14 Training - Training image download completed. Training in progress..Arguments: train [11/05/2019 22:55:15 WARNING 140218781001536] Loggers have already been setup. [11/05/2019 22:55:15 WARNING 140218781001536] Loggers have already been setup. [11/05/2019 22:55:15 INFO 140218781001536] nvidia-smi took: 0.025171995163 secs to identify 0 gpus [11/05/2019 22:55:15 INFO 140218781001536] Running single machine CPU BlazingText training using supervised mode. [11/05/2019 22:55:15 INFO 140218781001536] Processing /opt/ml/input/data/train/ratings_train_preprocessd . File size: 13 MB [11/05/2019 22:55:15 INFO 140218781001536] Processing /opt/ml/input/data/vali[...]Tip데이터셋이 잘 정제되어 있어 전처리를 거의 수행하지 않았음에도 검증셋에서 비교적 높은 정확도(accuracy)를 보입니다.이를 baseline으로 잡고 불용어(stopword) 제거, 형태소 분석 등의 전처리와 하이퍼파라메터 튜닝을 통해 좀 더 높은 정확도를 달성할 수 있습니다. 특히, 한국어 데이터의 다운스트림 작업들은 하이퍼파라메터 튜닝보다는 전처리가 훨씬 중요하니 이 점을 유의해 주세요. Hosting / Inference학습을 완료하면 모델을 Amazon SageMaker 실시간 호스팅 엔드포인트(real-time hosted endpoint)로 배포할 수 있고, 이를 통해 모델로부터 추론(inference)을 수행합니다. (추론은 예측; prediction 이라고도 합니다.) 실시간 추론 수행 시 엔드포인트는 계속 가동되어야 하므로, 추론을 위해 저렴한 인스턴스를 선택하시는 것을 권장합니다.이 과정은 약 10분 정도 소요됩니다.text_classifier = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge')----------------------------------------------------------------------------------------------------------!Use JSON format for inferenceBlazingText는 추론을 위한 컨텐츠 유형(content-type)으로 `application/json` 을 지원합니다. 엔드포인트로 전달할 입력 문장은 "**instances**" 키가 반드시 포함되어야 합니다.import nltk nltk.download('punkt') sentences = ["재미있게 봤습니다. 제 점수는요 100점 만점에 80점 드리겠습니다.", "스토리가 너무 단방향이라 재미가 없고 성우 더빙도 그닥이네요..."] tokenized_sentences = [' '.join(nltk.word_tokenize(sent)) for sent in sentences] payload = {"instances" : tokenized_sentences} response = text_classifier.predict(json.dumps(payload)) predictions = json.loads(response) print(json.dumps(predictions, indent=2))[nltk_data] Downloading package punkt to /home/ec2-user/nltk_data... [nltk_data] Unzipping tokenizers/punkt.zip. [ { "prob": [ 0.9286458492279053 ], "label": [ "__label__1" ] }, { "prob": [ 0.9930349588394165 ], "label": [ "__label__0" ] } ]기본적으로는 확률이 가장 높은 예측 결과(top-1 prediction)만 반환합니다. 상위 k개의 예측(top-k prediction)을 얻으려면 `configuration` key에서 `k`를 지정하면 됩니다. 아래 code snippet을 참조해 주세요.```pythonpayload = {"instances" : tokenized_sentences, "configuration": {"k": 2}}response = text_classifier.predict(json.dumps(payload))predictions = json.loads(response)print(json.dumps(predictions, indent=2))``` Stop / Close the Endpoint (Optional)실시간 예측을 제공하기 위해 엔드포인트를 계속 실행할 필요가 없는 경우, 과금을 막기 위해 엔드포인트를 삭제합니다.sess.delete_endpoint(text_classifier.endpoint)Detalhes do Regex Link para o site de exemplificação de Regex: https://regex101.com Explicação dos Metacaracteries: - " \ " : serve para fazer o caracterie seguinte ser reconhecido como ele mesmo > Exemplo: \ . reconhece o ponto como um ponto mesmo, como o ultimo caractere da regex - " () " : serve para criar grupos > Exemplo: (gmail|hotmail), seriam os provedores reconhecidos pela regex - " [] " : serve para criar partes em que algumas opções de letras podem aparecer > Exemplo: a[lt]ura, tanto alura como atura seriam reconhecidos pela regex - " - " : serve para definir um intervalo de caracteries que podem aparecer > Exemplo: [a-z], reconheceria todos os caracteries do alfabeto - " * " : serve para definir que vai haver uma repetição de caracteries, mas tem o problema de que pode ser vazio > Exemplo: [a-z]*, reconheceria todas as palavras - " + " : serve para definir que vai haver uma repetição de caracteries, indicando que deve haver pelo menos 1 letra > Exemplo: [a-z]+, reconheceria todos os caracteries do alfabeto - " {} " : serve para definir o número máximo de repetições que podem ocorrer > Exemplo: [a-z]{1,5}, reconheceria todas as palavras com no mínimo 1 letra e no máximo 5 - " ^d.* " : serve para indicar com qual letra a palavra da regex deve inicia > Exemplo: ^d iria reconhecer palavras iniciadas com d - " .*br$ " : serve para indicar com qual letra a palavra da regex deve terminar > Exemplo: .*br\$ iria reconhecer palavras terminadas com br Import das Libsimport re import nltk import pandas as pd from nltk.util import bigrams from nltk.lm import MLE, NgramCounter, Laplace from nltk.lm.preprocessing import pad_both_ends, padded_everygram_pipeline from nltk.tokenize import WhitespaceTokenizer from timeit import timeit from sklearn.model_selection import train_test_splitLeitura e análise dos dados Dados em portuguêsdados_portugues = pd.read_csv("stackoverflow_portugues.csv") dados_portugues.head()Exemplo de questão em portuguêsprint(dados_portugues["Questão"][5])

Desenvolvi uma página em PHP para uso interno da empresa que trabalho e apenas pouquíssimas pessoas a utilizam. Através dessa página é possível fazer algumas consultas, inserções, alterações e remoções de dados de uma tabela em um banco de dados MySQL, porém eu acredito que meu código em PHP não está protegido contra injeção de código SQL, por exemplo:

//----CONSULTA SQL----//
$busca = mysql_query ('insert into Produtos (coluna) values(' . $valor . ')');

Logo, digamos que o usuário usar a sentença: 1); DROP TABLE Produtos; para ao campo valor o comando ficaria:

insert into Produtos (coluna) values(1); DROP TABLE Produtos;

Ele vai inserir um novo registro cujo o campo coluna será 1 e logo em seguida ele vai deletar a tabela Produtos.

Como posso melhorar meu código para prevenir essa situação?

Dados em inglêsdados_ingles = pd.read_csv("stackoverflow_ingles.csv") dados_ingles.head()Exemplo de questão em portuguêsprint(dados_ingles["Questão"][5])

What is the use of the yield keyword in Python? What does it do?

For example, I'm trying to understand this code1:

def _get_child_candidates(self, distance, min_dist, max_dist):
    if self._leftchild and distance - max_dist < self._median:
        yield self._leftchild
    if self._rightchild and distance + max_dist >= self._median:
        yield self._rightchild  

And this is the caller:

result, candidates = [], [self]
while candidates:
    node = candidates.pop()
    distance = node._get_dist(obj)
    if distance <= max_dist and distance >= min_dist:
        result.extend(node._values)
    candidates.extend(node._get_child_candidates(distance, min_dist, max_dist))
return result

What happens when the method _get_child_candidates is called? Is a list returned? A single element? Is it called again? When will subsequent calls stop?


<[...]Dados em espanholdados_espanhol = pd.read_csv("stackoverflow_espanhol.csv") dados_espanhol.head()Exemplo de questão em espanholprint(dados_espanhol["Questão"][5])

Siempre he visto que en JavaScript hay:

  • asignaciones =
  • comparaciones == y ===

Creo entender que == hace algo parecido a comparar el valor de la variable y el === también compara el tipo (como un equals de java).

¿Alguien podría confirmarme este punto y extenderlo?. Soy javero y el no tipado de javascript a veces me encanta y otras lo odio.


¿Cuál es la manera correcta en javascript de comparar undefined, null y otros valores por defecto?

variable == null
variable === null

¿undefined se usa como cadena de texto o como palabra clave? ¿Cual de las siguientes comparaciones es la correcta para un elemento html sin value? (por ejemplo un label sin contenido)

variable == "undefined"
variable === "undefined"[...]Exemplificando uso de Regexquestao_portugues = dados_portugues.Questão[5]
print(questao_portugues)

# Encontra dentro do texto todos os matchs com aquelas caracteristicas
lista = re.findall(r"<.*?>", questao_portugues)
lista
# Substitui as regex que deram match para o segundo parametro
# Removendo nesse caso as tags html
texto_teste = re.sub(r"<.*?>", "", questao_portugues)
print(texto_teste)Desenvolvi uma página em PHP para uso interno da empresa que trabalho e apenas pouquíssimas pessoas a utilizam. Através dessa página é possível fazer algumas consultas, inserções, alterações e remoções de dados de uma tabela em um banco de dados MySQL, porém eu acredito que meu código em PHP não está protegido contra injeção de código SQL, por exemplo:

//----CONSULTA SQL----//
$busca = mysql_query ('insert into Produtos (coluna) values(' . $valor . ')');


Logo, digamos que o usuário usar a sentença: 1); DROP TABLE Produtos; para ao campo valor o comando ficaria: 

insert into Produtos (coluna) values(1); DROP TABLE Produtos;


Ele vai inserir um novo registro cujo o campo coluna será 1 e logo em seguida ele vai deletar a tabela Produtos.

Como posso melhorar meu código para prevenir essa situação?Otimização do uso das Regex# Maneira direta
re.search(r"70", "234826357236234263578263457263470")
# Maneira atráves da compilação da Regex
regex = re.compile(r"70")
regex
regex.search("234826357236234263578263457263470")Analisando o tempo de cada uma das duas maneiras acima para 1 milhão de execuções# Maneira direta
setup = """import re"""

timeit("""re.search(r"70", "234826357236234263578263457263470")""", setup)
# Maneira compilada
setup = """import re
regex = re.compile(r"70")"""

timeit("""regex.search("234826357236234263578263457263470")""", setup)Removendo as sujeiras dos textos  Função para realizar a remoção tanto em um texto como em uma lista de textos# Para remover uma regex especificada de um texto
def remover(textos, regex):
    if type(textos) == str:
        return regex.sub("", textos)
    else:
        return [regex.sub("", texto) for texto in textos]
def substituir_codigo(textos, regex):
    if type(textos) == str:
        return regex.sub("CODE", textos)
    else:
        return [regex.sub("CODE", texto) for texto in textos]Remoção do código da questãoregex_codigo = re.compile(r"(.|\n)*?")

questao_sem_codigo = substituir_codigo(dados_ingles.Questão[0], regex_codigo)

print(questao_sem_codigo)

Here is a piece of C++ code that seems very peculiar. For some strange reason, sorting the data miraculously makes the code almost six times faster.

CODE
  • Without CODE, the code runs in 11.54 seconds.
  • With the sorted data, the code runs in 1.93 seconds.

Initially, I thought this might be just a language or compiler anomaly. So I tried it in Java.

CODE

With a somewhat similar but less extreme result.


My first thought was that sorting brings the data into the cache, but then I thought how silly that is because the array was just generated.

  • What is going on?
  • Why is it faster to process a sorted array than an unsorted array?
  • The code is summing up some independent terms, and the order should not matter.
Remoção das TAGs HTMLregex_html = re.compile(r"<.*?>") questao_sem_tag = remover(questao_sem_codigo, regex_html) print(questao_sem_tag)Here is a piece of C++ code that seems very peculiar. For some strange reason, sorting the data miraculously makes the code almost six times faster. CODE Without CODE, the code runs in 11.54 seconds. With the sorted data, the code runs in 1.93 seconds. Initially, I thought this might be just a language or compiler anomaly. So I tried it in Java. CODE With a somewhat similar but less extreme result. My first thought was that sorting brings the data into the cache, but then I thought how silly that is because the array was just generated. What is going on? Why is it faster to process a sorted array than an unsorted array? The code is summing up some independent terms, and the order should not matter.Tratamento do banco de dados# Tratamento questões em português questoes_port_sem_code = substituir_codigo(dados_portugues.Questão, regex_codigo) questoes_port_sem_hmtl = remover(questoes_port_sem_code, regex_html) dados_portugues["sem_code_tag"] = questoes_port_sem_hmtl # Tratamento questões em inglês questoes_ing_sem_code = substituir_codigo(dados_ingles.Questão, regex_codigo) questoes_ing_sem_hmtl = remover(questoes_ing_sem_code, regex_html) dados_ingles["sem_code_tag"] = questoes_ing_sem_hmtl # Tratamento das questões em espanhol questoes_esp_sem_code = substituir_codigo(dados_espanhol.Questão, regex_codigo) questoes_esp_sem_hmtl = remover(questoes_esp_sem_code, regex_html) dados_espanhol["sem_code_tag"] = questoes_esp_sem_hmtl display(dados_portugues.head(2)) display(dados_ingles.head(2)) display(dados_espanhol.head(2))Refinando o tratamento Criando Regex para remover a pontuação# A estrutura => [^ ] combina com o complementar da regex que está sendo passada # regex_pontuacao = re.compile(r"[^ \w]"), também poderia ser uma opção regex_pontuacao = re.compile(r"[^\w\s]") questao_sem_pontuacao = remover(dados_espanhol.sem_code_tag[0], regex_pontuacao) print(questao_sem_pontuacao)Las sentencias dinámicas son sentencias SQL que se crean como cadenas de texto strings y en las que se insertanconcatenan valores obtenidos de alguna fuente normalmente proveniente del usuario lo que puede hacer que sean vulnerables a inyección SQL si no se sanean las entradas como por ejemplo CODE Eso es un ejemplo de una vulnerabilidad grave en la seguridad de una aplicación web o no porque si el usuario introdujese un valor como CODE nos encontraríamos con que la sentencia ejecutada sería CODE Y se eliminaría la tabla Usuarios con todos los datos contenidos en ella Cómo puedo evitar que la inyección SQL ocurra en PHPFunção para transformar os textos para minusculodef minusculo(textos): if type(textos) == str: return textos.lower() else: return [texto.lower() for texto in textos] questao_minusculo = minusculo(questao_sem_pontuacao) print(questao_minusculo)las sentencias dinámicas son sentencias sql que se crean como cadenas de texto strings y en las que se insertanconcatenan valores obtenidos de alguna fuente normalmente proveniente del usuario lo que puede hacer que sean vulnerables a inyección sql si no se sanean las entradas como por ejemplo code eso es un ejemplo de una vulnerabilidad grave en la seguridad de una aplicación web o no porque si el usuario introdujese un valor como code nos encontraríamos con que la sentencia ejecutada sería code y se eliminaría la tabla usuarios con todos los datos contenidos en ella cómo puedo evitar que la inyección sql ocurra en phpRegex para remover os dígitos dos textosregex_digito = re.compile(r"\d+") questao_sem_digito = regex_digito.sub("", "O valor do meu calculo foi de 40 e34 56t \n \n 45 foi") print(questao_sem_digito)O valor do meu calculo foi de e t foiRegex para remover espaços duplicados que restaram do passos anterioresregex_espaco = re.compile(r" +") regex_quebra_linha = re.compile(r"(\n|\r)") def substituir_por_espaco(textos, regex): if type(textos) == str: return regex.sub(" ", textos) else: return [regex.sub(" ", texto) for texto in textos] questao_sem_quebra_linha = substituir_por_espaco(questao_sem_digito, regex_quebra_linha) questao_sem_espaco_duplo = substituir_por_espaco(questao_sem_quebra_linha, regex_espaco) print(questao_sem_espaco_duplo)O valor do meu calculo foi de e t foiAplicando os novos tratamentos no Dataset - Regex pontuação - Texto minúsculo - Regex dígito - Regex quebra de linha - Regex espaço duplo Aplicando tratamentos para o Dataset em portuguêsquestoes_port_sem_pont = remover(dados_portugues.sem_code_tag, regex_pontuacao) questoes_port_minusculo = minusculo(questoes_port_sem_pont) questoes_port_sem_digito = remover(questoes_port_minusculo, regex_digito) questoes_port_sem_quebra_linha = substituir_por_espaco(questoes_port_sem_digito, regex_quebra_linha) questoes_port_sem_espaco_duplo = substituir_por_espaco(questoes_port_sem_quebra_linha, regex_espaco) print(questoes_port_sem_espaco_duplo[0]) dados_portugues["questoes_tratadas"] = questoes_port_sem_espaco_duplo dados_portugues.head(2)se eu fizer o hash de senhas antes de armazenálas em meu banco de dados é suficiente para evitar que elas sejam recuperadas por alguém estou falando apenas da recuperação diretamente do banco de dados e não qualquer outro tipo de ataque como força bruta na página de login da aplicação keylogger no cliente e criptoanálise rubberhose qualquer forma de hash não vai impedir esses ataques tenho preocupação em dificultar ou até impossibilitar a obtenção das senhas originais caso o banco de dados seja comprometido como dar maior garantia de segurança neste aspecto quais preocupações adicionais evitariam o acesso às senhas existem formas melhores de fazer esse hashAplicando tratamentos para o Dataset em inglêsquestoes_ing_sem_pont = remover(dados_ingles.sem_code_tag, regex_pontuacao) questoes_ing_minusculo = minusculo(questoes_ing_sem_pont) questoes_ing_sem_digito = remover(questoes_ing_minusculo, regex_digito) questoes_ing_sem_quebra_linha = substituir_por_espaco(questoes_ing_sem_digito, regex_quebra_linha) questoes_ing_sem_espaco_duplo = substituir_por_espaco(questoes_ing_sem_quebra_linha, regex_espaco) print(questoes_ing_sem_espaco_duplo[0]) dados_ingles["questoes_tratadas"] = questoes_ing_sem_espaco_duplo dados_ingles.head(2)here is a piece of c code that seems very peculiar for some strange reason sorting the data miraculously makes the code almost six times faster code without code the code runs in seconds with the sorted data the code runs in seconds initially i thought this might be just a language or compiler anomaly so i tried it in java code with a somewhat similar but less extreme result my first thought was that sorting brings the data into the cache but then i thought how silly that is because the array was just generated what is going on why is it faster to process a sorted array than an unsorted array the code is summing up some independent terms and the order should not matterAplicando tratamentos para o Dataset em espanholquestoes_esp_sem_pont = remover(dados_espanhol.sem_code_tag, regex_pontuacao) questoes_esp_minusculo = minusculo(questoes_esp_sem_pont) questoes_esp_sem_digito = remover(questoes_esp_minusculo, regex_digito) questoes_esp_sem_quebra_linha = substituir_por_espaco(questoes_esp_sem_digito, regex_quebra_linha) questoes_esp_sem_espaco_duplo = substituir_por_espaco(questoes_esp_sem_quebra_linha, regex_espaco) print(questoes_esp_sem_espaco_duplo[0]) dados_espanhol["questoes_tratadas"] = questoes_esp_sem_espaco_duplo dados_espanhol.head(2)las sentencias dinámicas son sentencias sql que se crean como cadenas de texto strings y en las que se insertanconcatenan valores obtenidos de alguna fuente normalmente proveniente del usuario lo que puede hacer que sean vulnerables a inyección sql si no se sanean las entradas como por ejemplo code eso es un ejemplo de una vulnerabilidad grave en la seguridad de una aplicación web o no porque si el usuario introdujese un valor como code nos encontraríamos con que la sentencia ejecutada sería code y se eliminaría la tabla usuarios con todos los datos contenidos en ella cómo puedo evitar que la inyección sql ocurra en phpNLTK e Bigramstexto_teste = "alura" list(bigrams(texto_teste))Caracteres Falsoslist(bigrams(pad_both_ends(texto_teste, n=2)))Criando o primero modelo de linguagem Criação de uma coluna de classificação para cada Datasetdados_portugues["idioma"] = "port" dados_espanhol["idioma"] = "esp" dados_ingles["idioma"] = "ing" display(dados_portugues.head(1)) display(dados_ingles.head(1)) display(dados_espanhol.head(1))Separando cada Dataset em treino(80%) e teste(20%)# Separação português port_treino, port_teste = train_test_split(dados_portugues.questoes_tratadas, test_size=0.2, random_state=123) # Separação inglês ing_treino, ing_teste = train_test_split(dados_ingles.questoes_tratadas, test_size=0.2, random_state=123) # Separação espanhol esp_treino, esp_teste = train_test_split(dados_espanhol.questoes_tratadas, test_size=0.2, random_state=123)Tokenização das questões# Junta todas as questões dentro de uma variavel todas_questoes_port = " ".join(port_treino) todas_questoes_port # Separa todas as palavras por espaços em branco todas_palavras_port = WhitespaceTokenizer().tokenize(todas_questoes_port) print(len(todas_palavras_port)) print(todas_palavras_port)36716 ['sou', 'iniciante', 'em', 'php', 'e', 'gostaria', 'de', 'saber', 'se', 'pdophp', 'data', 'objects', 'é', 'a', 'maneira', 'mais', 'segura', 'de', 'se', 'conectar', 'a', 'um', 'banco', 'de', 'dados', 'preciso', 'também', 'de', 'um', 'exemplo', 'de', 'como', 'fazer', 'esta', 'conexão', 'e', 'inserirselecionar', 'dados', 'por', 'exemplo', 'code', 'estou', 'fazendo', 'um', 'efeito', 'aqui', 'mas', 'não', 'estou', 'entendendo', 'esse', 'sinal', 'de', 'code', 'que', 'tem', 'no', 'framework', 'resultado', 'final', 'code', 'code', 'code', 'qual', 'diferença', 'de', 'linguagem', 'compilada', 'para', 'linguagem', 'interpretada', 'e', 'quais', 'as', 'vantagens', 'de', 'uma', 'para', 'outra', 'qual', 'a', 'diferença', 'entre', 'code', 'e', 'code', 'ambas', 'são', 'muito', 'parecidas', 'mas', 'em', 'qual', 'caso', 'é', 'melhor', 'usar', 'uma', 'ou', 'a', 'outra', 'gostaria', 'de', 'código', 'de', 'exemplos', 'um', 'compilador', 'é', 'programado', 'em', 'qual', 'linguagem', 'todos', 'os', 'com[...]Criação dos bigrams e do vocabulárioport_treino_bigram, vocab_port = padded_everygram_pipeline(2, todas_palavras_port)Criação do modelo MLEmodelo_port = MLE(2) modelo_port.fit(port_treino_bigram, vocab_port) modelo_port.generate(num_words=4) # Ocorrências de caracteres depois do m modelo_port.counts[['m']].items()Testando a perplexidade das frases no modelotexto = "bom dia" palavras = WhitespaceTokenizer().tokenize(texto) palavras_fakechar = [list(pad_both_ends(palavra, n=2)) for palavra in palavras] palavras_bigrams = [list(bigrams(palavras_fake)) for palavras_fake in palavras_fakechar] palavras_bigrams # Quanto mais próximo de 0 zero melhor for i in range(len(palavras_bigrams)): print(modelo_port.perplexity(palavras_bigrams[i]))15.299125474590992 8.118290440853844Função para treinar os modelosdef treinar_modelo_mle(lista_textos): # Tratamento das de junção e tokenização das questões todas_questoes_ling = " ".join(lista_textos) todas_palavras_ling = WhitespaceTokenizer().tokenize(todas_questoes_ling) # Criação dos bigrams e do vocabulário da linguagem ling_treino_bigram, vocab_ling = padded_everygram_pipeline(2, todas_palavras_ling) # Criação do modelo modelo_ling = MLE(2) modelo_ling.fit(ling_treino_bigram, vocab_ling) return modelo_ling # Modelo do inglês modelo_ing = treinar_modelo_mle(ing_treino) # Modelo do espanhol modelo_esp = treinar_modelo_mle(esp_treino)Analisando o calculo da perplexidade de uma frase para todos os modelostexto = "good morning" palavras = WhitespaceTokenizer().tokenize(texto) palavras_fakechar = [list(pad_both_ends(palavra, n=2)) for palavra in palavras] palavras_bigrams = [list(bigrams(palavras_fake)) for palavras_fake in palavras_fakechar] per_port = 0 per_ing = 0 per_esp = 0 print("Perplexidade Português:") for i in range(len(palavras_bigrams)): per_port += modelo_port.perplexity(palavras_bigrams[i]) print(modelo_port.perplexity(palavras_bigrams[i])) print(f"Perplexidade Total: {per_port}") print("\nPerplexidade Inglês:") for i in range(len(palavras_bigrams)): per_ing += modelo_ing.perplexity(palavras_bigrams[i]) print(modelo_ing.perplexity(palavras_bigrams[i])) print(f"Perplexidade Total: {per_ing}") print("\nPerplexidade Espanhol:") for i in range(len(palavras_bigrams)): per_esp += modelo_esp.perplexity(palavras_bigrams[i]) print(modelo_esp.perplexity(palavras_bigrams[i])) print(f"Perplexidade Total: {per_esp}")Perplexidade Português: 47.67178908687439 21.535806001201696 Perplexidade Total: 69.20759508807609 Perplexidade Inglês: 17.26133308212415 12.900755877319751 Perplexidade Total: 30.1620889594439 Perplexidade Espanhol: 32.15357126058061 22.401851392617296 Perplexidade Total: 54.55542265319791Função para calcular a perplexidade de um texto com base no modelo passadodef calcular_perplexidade(modelo, texto): # Variavel que vai armazenar a perplexidade perplexidade = 0 # Tratamento do texto que foi passado palavras = WhitespaceTokenizer().tokenize(texto) palavras_fakechar = [list(pad_both_ends(palavra, n=2)) for palavra in palavras] palavras_bigrams = [list(bigrams(palavras_fake)) for palavras_fake in palavras_fakechar] # Loop para o calculo da perplexidade for i in range(len(palavras_bigrams)): perplexidade += modelo.perplexity(palavras_bigrams[i]) return perplexidade calcular_perplexidade(modelo_ing, "good morning")Testando o calculo da perplexidadeprint(calcular_perplexidade(modelo_port, port_teste.iloc[0])) print(port_teste.iloc[0])2006.9786364086417 até a época em que os computadores eram puramente mecânicos e eram programados por cartões perfurados eu entendo como funciona depois quando surgiram os primeiros computadores digitais operados com válvulas e relés fica mais complicado de compreender como eram feitas as programações imagino que deveria haver algo semelhante a um compartimento com uma matriz de relés semelhante aos cartões perfurados pré programada que continha um programa fixo e único a ser executado pela máquina quando fosse preciso trocar o programa os programadores apenas rearranjavam as válvulas e a máquina continuava a trabalhar como surgiu essa noção que temos de programação hoje que o programador digita códigos em um editor de texto e compila eu não consigo imaginar como chegou a esse ponto por exemplo o assembly ele é apenas traduzido de textos para binário pelo assembler mas poxa seria preciso ter outra linguagem de programação para se criar o assembler suponha que só existam os computadores[...]Problema da perplexidade infinita - Isso ocorre devido a como é feito o calculo da perplexidade: > $Perplexidade(X) = 1/P(X)$ - Logo quando a probabilidade $P(X)=0$, nossa divisão vai dar infinito. - Então como no nosso caso os dados da frase em português não foram encontrados no modelo em inglês, acabou dando infinito.print(calcular_perplexidade(modelo_ing, port_teste.iloc[0]))infUsando modelo de Laplace para contornar esse problemadef treinar_modelo_Laplace(lista_textos): # Tratamento das de junção e tokenização das questões todas_questoes_ling = " ".join(lista_textos) todas_palavras_ling = WhitespaceTokenizer().tokenize(todas_questoes_ling) # Criação dos bigrams e do vocabulário da linguagem ling_treino_bigram, vocab_ling = padded_everygram_pipeline(2, todas_palavras_ling) # Criação do modelo modelo_ling = Laplace(2) modelo_ling.fit(ling_treino_bigram, vocab_ling) return modelo_lingTreinamento dos novos modelos usando Laplacemodelo_port_laplace = treinar_modelo_Laplace(port_treino) modelo_ing_laplace = treinar_modelo_Laplace(ing_treino) modelo_esp_laplace = treinar_modelo_Laplace(esp_treino)Testando a perplexidade usando Laplaceprint(f"Perplexidade Port: {calcular_perplexidade(modelo_port_laplace, port_teste.iloc[0])}") print(f"Perplexidade Ing: {calcular_perplexidade(modelo_ing_laplace, port_teste.iloc[0])}") print(f"Perplexidade Esp: {calcular_perplexidade(modelo_esp_laplace, port_teste.iloc[0])}")Perplexidade Port: 2009.1937946178912 Perplexidade Ing: 5876.837588345698 Perplexidade Esp: 3488.5698949157722Criando uma função para atribuir o idioma ao textodef atribuir_idioma(lista_textos): if type(lista_textos) == str: portugues = calcular_perplexidade(modelo_port_laplace, lista_textos) ingles = calcular_perplexidade(modelo_ing_laplace, lista_textos) espanhol = calcular_perplexidade(modelo_esp_laplace, lista_textos) if ingles >= portugues <= espanhol: idioma = "port" elif portugues > ingles < espanhol: idioma = "ing" else: idioma = "esp" return idioma else: idioma = [] for texto in lista_textos: portugues = calcular_perplexidade(modelo_port_laplace, texto) ingles = calcular_perplexidade(modelo_ing_laplace, texto) espanhol = calcular_perplexidade(modelo_esp_laplace, texto) if ingles >= portugues <= espanhol: idioma.append("port") elif portugues > ingles < espanhol: idioma.append("ing") else: idioma.append("esp") return idiomaAvaliação da precisão dos modelos para os dados de teste# Atribuindo os idiomas aos dados de teste de português resultados_portugues = atribuir_idioma(port_teste) # Atribuindo os idiomas aos dados de teste de inglês resultados_ingles = atribuir_idioma(ing_teste) # Atribuindo os idiomas aos dados de teste de espanhol resultados_espanhol = atribuir_idioma(esp_teste)Precisão de cada modelo para os dados apresentadosvalores_port = resultados_portugues.count("port") print(f"Valores que foram indicados como Port: {valores_port}") print(f"Valores que realmente eram Port: {len(port_teste)}") taxa_acerto_port = valores_port/len(port_teste) * 100 print(f"Taxa de acerto: {taxa_acerto_port:.2f}%") valores_ing = resultados_ingles.count("ing") print(f"Valores que foram indicados como Ing: {valores_ing}") print(f"Valores que realmente eram Ing: {len(ing_teste)}") taxa_acerto_ing = valores_ing/len(ing_teste) * 100 print(f"Taxa de acerto: {taxa_acerto_ing:.2f}%") valores_esp = resultados_espanhol.count("esp") print(f"Valores que foram indicados como Esp: {valores_esp}") print(f"Valores que realmente eram Esp: {len(esp_teste)}") taxa_acerto_esp = valores_esp/len(esp_teste) * 100 print(f"Taxa de acerto: {taxa_acerto_esp:.2f}%")Valores que foram indicados como Esp: 97 Valores que realmente eram Esp: 100 Taxa de acerto: 97.00%Caso do Espanhol que foi classificado erradoresultados_espanhol.index("ing") esp_teste.iloc[23]Resultado final do detector de idiomasfrase = "The bruno is very boring" atribuir_idioma(frase) frase = "Ai amiga eu achei aquela over muito extended" display(atribuir_idioma(frase))Greedy model selectiondef objective(trial): weights = [trial.suggest_float(f"weight_{name}", 0, 1) for name in selected_names] return get_weighted_mean(selected_models, weights) oofs_by_score = [ (61, oof_61), # 0.715 <-- start (62, oof_62), # 0.714 (64, oof_64), # 0.714 (30, oof_30), # 0.714 (63, oof_63), # 0.714 (52, oof_52), # 0.714 (28, oof_28), # 0.713 (58, oof_58), # 0.713 (29, oof_29), # 0.712 (60, oof_60), # 0.711 (43, oof_43), # 0.711 (57, oof_57), # 0.708 ] selected_models = [] selected_names = [] name, data = oofs_by_score.pop(0) selected_names.append(name) selected_models.append(data) best_oof = 0 best_params = None best_models = [] while len(oofs_by_score) > 0: round_best_oof = best_oof round_best_params = None round_best_name = None round_best_model = None print(f"Round start with {selected_names}") for name, data in oofs_by_score: selected_names.append(name) selected_models.append(data) study = optuna.create_study(direction='maximize') study.optimize(objective, n_trials=1000) if study.best_trial.value > round_best_oof: round_best_oof = study.best_trial.value round_best_params = study.best_trial.params round_best_model = data round_best_name = name print(f"trial add {name}, score improved to {round_best_oof}") else: print(f"trial add {name}, no improvement") selected_names = selected_names[:-1] selected_models = selected_models[:-1] if round_best_oof > best_oof: print(f"Successfully improved score with {round_best_name} to get score {round_best_oof}") selected_names.append(round_best_name) selected_models.append(round_best_model) best_names = [m for m in selected_names] best_oof = round_best_oof best_params = round_best_params oofs_by_score.remove((round_best_name, round_best_model)) else: print("Round did not improve overall score. Stopping") break print(f"Selected: {selected_names}") print(f"OOF: {best_oof}") print(f"Best hyperparameters: {best_params}")Image detailsheight = 32 width = 32 channel = 3 def readImage(imageName): global ImageDir return mpimg.imread(os.path.join(ImageDir, imageName.strip())) def getInputValues(image): global height, width, channel images = np.empty(shape =[1, height,width,channel], dtype=np.float32) _image= imageResize(image) images[0] = (_image/127.5) -1 return images def imageResize(image): global height, width return cv2.resize(image, (width, height), cv2.INTER_AREA) sess = tf.Session() saver = tf.train.import_meta_graph("model/Model_15425395460637_____9.meta") saver.restore(sess, tf.train.latest_checkpoint('model')) graph = tf.get_default_graph() Xplace = graph.get_tensor_by_name("X_Placeholder:0") output = graph.get_tensor_by_name("dense_2/BiasAdd:0") def predictValue(image): x = getInputValues(image) _yPred = sess.run(output,feed_dict = {Xplace:x}) return _yPredServersio = socketio.Server() app = Flask(__name__) @sio.on('telemetry') def telemetry(sid, data): image = Image.open(BytesIO(base64.b64decode(data["image"]))) try: image = np.asarray(image) temp = predictValue(image) steering_angle =float(temp[0]) print(steering_angle) send_control(steering_angle, 1) except Exception as e: print(e) @sio.on('connect') def connect(sid, environ): send_control(0, 0) print("connected") def send_control(steering_angle, accelerator): sio.emit( "steer", data={ 'steering_angle': steering_angle.__str__(), 'accelerator': accelerator.__str__() }, skip_sid=True) app = socketio.Middleware(sio, app) eventlet.wsgi.server(eventlet.listen(('', 9876)), app) sess.close()[Requests: HTTP for Humans™](https://2.python-requests.org//en/latest/index.html)import requestsquickstarthttps://2.python-requests.org//en/latest/user/quickstart/ headr = requests.head('http://httpbin.org/get') r.headersputr = requests.put('http://httpbin.org/put', data = {'key':'value'}) r.json()deleter = requests.delete('http://httpbin.org/delete') r.json()optionsr = requests.options('http://httpbin.org/get') r.headers r.headers['Content-Type'] r.headers.get('content-type') r.textPassing Parameters In URLspayload = {'key1': 'value1', 'key2': 'value2'} r = requests.get('http://httpbin.org/get', params=payload) r.url payload = {'key1': 'value1', 'key2': ['value2', 'value3']} r = requests.get('http://httpbin.org/get', params=payload) r.urlResponse Contentr = requests.get('https://api.github.com/events') r.text r.encoding r.encoding = 'ISO-8859-1' r.status_codeBinary Response Contentr.contentJSON Response Contentr.json()Raw Response Content在极少数情况下,您希望从服务器获得原始套接字响应,您可以访问r.raw。如果希望这样做,请确保在初始请求中设置stream=True。r = requests.get('https://api.github.com/events', stream=True) r.raw r.raw.read(10)POST a Multipart-Encoded File>>> url = 'https://httpbin.org/post' >>> files = {'file': open('report.xls', 'rb')} >>> r = requests.post(url, files=files) >>> r.text { ... "files": { "file": "" }, ... }Redirection and History By default Requests will perform location redirection for all verbs except HEAD.We can use the history property of the Response object to track redirection.The Response.history list contains the Response objects that were created in order to complete the request. The list is sorted from the oldest to the most recent response.r = requests.get('http://github.com/') r.url r.history r = requests.get('http://github.com/', allow_redirects=False) r.status_codeTimeouts You can tell Requests to stop waiting for a response after a given number of seconds with the timeout parameter. Nearly all production code should use this parameter in nearly all requests. Failure to do so can cause your program to hang indefinitely:requests.get('https://github.com/', timeout=0.001)post Custom Headersheaders = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000', 'Content-Type': 'application/json' } data = { "createdAtMs": 1279957897000 }和Swagger界面进行调试时的post参数传递方式相同!!!r = requests.post('http://rc-dev.leapstack.cn:10318/model/feedback/claim', headers=headers, json=data) r.text type(r.json()) r.json()爬取天天基金指定基金净值import requests Y='000968 001052 110027 000478 100032 001180 519977 000614 162411 003376 001064'.split() L='000478 100038 100032 100033 340001 004752 001064 502010 001469 161017 001180 000614 270048 001051 162411 002903 003765 110027 050027'.split() Y import json # 制作dict ll = [] headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000', } for i in set(Y+L): try: # print(i) if i == '100033': continue r = requests.get(f'http://fundgz.1234567.com.cn/js/{i}.js', headers=headers) s = r.text # print(s) ll.append(json.loads(s[s.find('{'):(s.find('}')+1)])) except Exception as e: print(str(e)) print(i, s) d = dict() for i in ll: dd = dict() d[i['name']] = i['fundcode'] dd['trade_date'] = i['jzrq'] dd['close'] = i['dwjz'] dd from datetime import datetime datetime.strptime(dd['trade_date'], '%Y-%m-%d') df = pd.DataFrame(columns=['trade_date', 'close']) df.append(dd, ignore_index=True) df import pandas as pd d['华安德国30(DAX)联接'] = '000614' d pd.Series(d).to_pickle('fund_otc_series.pkl') d.keys() d r = requests.get('http://fundgz.1234567.com.cn/js/000614.js') r.text r = requests.get('http://fundgz.1234567.com.cn/js/100032.js') r.text r = requests.get('http://fundgz.1234567.com.cn/js/004752.js') r.text s = r.text s[s.find('{'):(s.find('}')+1)] s[(s.find('}')+1):] json.loads(s[s.find('{'):(s.find('}')+1)]) from time import time import time import datetime r = requests.get(url) r.text直接从网页解析r = requests.get('http://fund.eastmoney.com/162411.html') r = requests.get('http://fund.eastmoney.com/000478.html') r = requests.get('http://fund.eastmoney.com/000614.html') from pyquery import PyQuery as pq r.encoding='utf8' root = pq(r.text)body > div:nth-child(12) > div > div > div.fundDetail-main > div.fundInfoItem > div.dataOfFund > dl.dataItem02 > dd.dataNums > span.ui-font-middle.ui-color-red.ui-num body > div:nth-child(12) > div > div > div.fundDetail-main > div.fundInfoItem > div.dataOfFund > dl.dataItem02 > dd.dataNums > span.ui-font-middle.ui-color-green.ui-numd = root('#body > div:nth-child(12) > div > div > div.fundDetail-main > div.fundInfoItem > div.dataOfFund > dl.dataItem02 > dd.dataNums > span.ui-font-middle.ui-num') d.text() d = root('#body > div:nth-child(12) > div > div > div.fundDetail-main > div.fundInfoItem > div.dataOfFund > dl.dataItem02 > dd.dataNums > span.ui-font-large.ui-num') d.text() d = root('#body > div:nth-child(12) > div > div > div.fundDetail-main > div.fundInfoItem > div.dataOfFund > dl.dataItem02 > dt > p' ) d.text()[-11:-1] float(d.text()[:-1])爬取同花顺板块概念成分股http://q.10jqka.com.cn/gn/import time def t(): from pyquery import PyQuery as pq la = [] headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000', 'Cookie': 'v=AqsTXYKWco_gqq7bwt5h9vnBPMSWwLsgOdGD9h0ohdBDw8WypZBPkkmkE2cu' } for i in range(1, 27): print(i) url=f'http://q.10jqka.com.cn/gn/index/field/addtime/order/desc/page/{i}/ajax/1/' r = requests.get(url, headers = headers) r.encoding='gbk' # print(r.text) root = pq(r.text) d1 = root('table > tbody > tr') for i in range(len(d1)): date = d1.eq(i)('td:nth-child(1)').text() name = d1.eq(i)('td:nth-child(2) > a').text() link = d1.eq(i)('td:nth-child(2) > a').attr.href la.append([date, name, link]) time.sleep(0.5) return la t() d.text() d.attr.href headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000', 'Cookie': '' } r1 = requests.get(d.attr.href, headers = headers) r1.encoding='gbk' root1 = pq(r1.text) d1 = root1('#maincont > table > tbody > tr:nth-child(1) > td:nth-child(2)') d1.text() len(root1('#maincont > table > tbody > tr')) for i in range(len(root1('#maincont > table > tbody > tr'))): dtcode = root1('#maincont > table > tbody > tr').eq(i)('td:nth-child(2)').text() dtname = root1('#maincont > table > tbody > tr').eq(i)('td:nth-child(3)').text() print(dtname) headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000', 'Cookie': '' } r2 = requests.get('http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/2/ajax/1/code/302436', headers = headers) from pyquery import PyQuery as pq r2.encoding='gbk' r2.text from pyquery import PyQuery as pq r2.encoding='gbk' root = pq(r2.text)数据描述 1. variance of Wavelet Transformed image (continuous) 小波变换图像的偏度2. skewness of Wavelet Transformed image (continuous) 图像的方差3. curtosis of Wavelet Transformed image (continuous) 图像的熵4. entropy of image (continuous) 图像的曲率5. class (integer) 导入数据bankdata = pd.read_csv('../dataset/bill_authentication.csv')探索性数据分析bankdata.shape bankdata.head()数据预处理X = bankdata.drop('Class', axis=1) y = bankdata['Class'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)算法训练from sklearn.svm import SVC svclassifier = SVC(kernel='linear') svclassifier.fit(X_train, y_train)做预测y_pred = svclassifier.predict(X_test)算法评价from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred))[[142 2] [ 3 128]] precision recall f1-score support 0 0.98 0.99 0.98 144 1 0.98 0.98 0.98 131 avg / total 0.98 0.98 0.98 275Tarea 3: Librería Numpy Cada clase que veamos tendrá una tarea asignada, la cual contendrá problemas varios que se pueden resolver con lo visto en clase, de manera que puedas practicar lo que acabas de aprender.En esta ocasión, la tarea tendrá ejercicios relativos a la clases 4 y 5, de la librería NumPy y la librería Matplotlib.Para resolver la tarea, por favor cambiar el nombre del archivo a "Tarea3_ApellidoNombre.ipynb", sin acentos ni letras ñ (ejemplo: en mi caso, el archivo se llamaría "Tarea3_JimenezEsteban.ipynb"). Luego de haber cambiado el nombre, resolver cada uno de los puntos en los espacios provistos.Referencias:- http://www.math.pitt.edu/~sussmanm/3040Summer14/exercisesII.pdf- https://scipy-lectures.org/intro/numpy/exercises.html**Todos los ejercicios se pueden realizar sin usar ciclos `for` ni `while`**___ 1. Cuadrado mágicoUn cuadrado mágico es una matriz cuadrada tal que las sumas de los elementos de cada una de sus filas, las sumas de los elementos de cada una de sus columnas y las sumas de los elementos de cada una de sus diagonales son iguales (hay dos diagonales: una desde el elemento superior izquierdo hasta el elemento inferior derecho, y otra desde el elemento superior derecho hasta el elemento inferior izquierdo).Muestre que la matriz A dada por:import numpy as np A = np.array([[17, 24, 1, 8, 15], [23, 5, 7, 14, 16], [ 4, 6, 13, 20, 22], [10, 12, 19, 21, 3], [11, 18, 25, 2, 9]])constituye un cuadrado mágico.Ayuda: las funciones `np.sum()`, `np.diag()` y `np.fliplr()` pueden ser de mucha utilidad 2. ¿Qué más podemos hacer con NumPy?Este ejercicio es más que nada informativo, para ver qué más pueden hacer con la librería NumPy.Considere el siguiente vector:x = np.array([-1., 4., -9.])1. La función coseno (`np.cos()`) se aplica sobre cada uno de los elementos del vector. Calcular el vector `y = np.cos(np.pi/4*x)` 2. Puedes sumar vectores y multiplicarlos por escalares. Calcular el vector `z = x + 2*y` 3. También puedes calcular la norma de un vector. Investiga como y calcular la norma del vector xAyuda: buscar en las funciones del paquete de algebra lineal de NumPy 4. Utilizando la función `np.vstack()` formar una matriz `M` tal que la primera fila corresponda al vector `x`, la segunda al vector `y` y la tercera al vector `z`. 5. Calcule la transpuesta de la matriz `M`, el determinante de la matriz `M`, y la multiplicación matricial de la matriz `M` por el vector `x`. 3. Graficando funcionesGenerar un gráfico de las funciones $f(x)=e^{-x/10}\sin(\pi x)$ y $g(x)=x e^{-x/3}$ sobre el intervalo $[0, 10]$. Incluya las etiquetas de los ejes y una leyenda con las etiquetas de cada función. 4. Analizando datosLos datos en el archivo `populations.txt` describen las poblaciones de liebres, linces (y zanahorias) en el norte de Canadá durante 20 años.Para poder analizar estos datos con NumPy es necesario importarlos. La siguiente celda importa los datos del archivo `populations.txt`, siempre y cuando el archivo y el notebook de jupyter estén en la misma ubicación:data = np.loadtxt('populations.txt')Subsample neurons and plot performancescan = data.MovieScan & dict(animal_id = 16314) rel = dj.U('brain_area', 'layer', 'sample_size','seed').aggr((CurvatureResponseSample & scan), average_curvature='avg(avg_curvature)') df = pd.DataFrame(rel.fetch()) order_areas = ['V1', 'LM', 'LI', 'AL', 'LLA','P', 'POR','RL'] sns.set_context('paper', font_scale=1.1) sns.set_palette(sns.xkcd_palette(['grey', 'golden yellow'])) with sns.axes_style("ticks"): g = sns.catplot('brain_area', 'average_curvature', data=df, col='sample_size', col_wrap=3, order=order_areas, \ kind='strip', s=6, linewidth=1, edgecolor="gray", jitter=0.3) g.set_xlabels('Brain area') g.set_ylabels('Average Curvature (°)') g.set_titles('Num. neurons: {col_name}') sns.despine(trim=True) g.fig.set_size_inches(8,5.5) #g.fig.savefig('temp_vis/subsampled_neurons_brain_areas.png', bbox_inches='tight', dpi=200)Chapter: Best Practices Topic: Regularization to prevent overfitting# read data import numpy as np data = np.loadtxt('quadratic_raw_data.csv', delimiter=',') x = data[:,0,None]; y = data[:,1,None] # separate training data from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1) ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Ordinary Least Squares Regression ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # create pipeline for quadratic fit via OLS from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression pipe_OLS = Pipeline([('poly', PolynomialFeatures(degree=10, include_bias=False)), ('scaler', StandardScaler()), ('model', LinearRegression())]) # fit pipeline and predict pipe_OLS.fit(x_train, y_train) y_predicted_train_OLS = pipe_OLS.predict(x_train) y_predicted_test_OLS = pipe_OLS.predict(x_test) # performance metrics from sklearn.metrics import mean_squared_error as mse print('OLS Training metric (mse) = ', mse(y_train, y_predicted_train_OLS)) print('OLS Test metric (mse) = ', mse(y_test, y_predicted_test_OLS)) # plot predictions y_predicted_OLS = pipe_OLS.predict(x) from matplotlib import pyplot as plt plt.figure() plt.plot(x_train,y_train, 'bo', label='raw training data') plt.plot(x_test,y_test, 'ro', label='raw test data') plt.plot(x,y_predicted_OLS, color='orange', label='OLS fit') plt.legend() plt.xlabel('x'), plt.ylabel('y') plt.show() # print coefficients print(pipe_OLS['model'].coef_) ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Ridge Regression ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # create pipeline for quadratic fit via ridge model from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Ridge pipe_L2 = Pipeline([('poly', PolynomialFeatures(degree=10,include_bias=False)), ('scaler', StandardScaler()), ('model', Ridge(alpha=0.1))]) # fit pipeline and predict pipe_L2.fit(x_train, y_train) y_predicted_train_L2 = pipe_L2.predict(x_train) y_predicted_test_L2 = pipe_L2.predict(x_test) # performance metrics from sklearn.metrics import mean_squared_error as mse print('L2 Training metric (mse) = ', mse(y_train, y_predicted_train_L2)) print('L2 Test metric (mse) = ', mse(y_test, y_predicted_test_L2)) # plot predictions y_predicted_L2 = pipe_L2.predict(x) plt.figure() plt.plot(x_train,y_train, 'bo', label='raw training data') plt.plot(x_test,y_test, 'ro', label='raw test data') plt.plot(x,y_predicted_L2, color='orange', label='Ridge fit') plt.legend() plt.xlabel('x'), plt.ylabel('y') plt.show() # print coefficients print(pipe_L2['model'].coef_) ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Lasso Regression ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # create pipeline for quadratic fit via ridge model from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Lasso pipe_L1 = Pipeline([('poly', PolynomialFeatures(degree=10,include_bias=False)), ('scaler', StandardScaler()), ('model', Lasso(alpha=0.1))]) # fit pipeline and predict pipe_L1.fit(x_train, y_train) y_predicted_train_L1 = pipe_L1.predict(x_train) y_predicted_test_L1 = pipe_L1.predict(x_test) # performance metrics from sklearn.metrics import mean_squared_error as mse print('L1 Training metric (mse) = ', mse(y_train, y_predicted_train_L1)) print('L1 Test metric (mse) = ', mse(y_test, y_predicted_test_L1)) # plot predictions y_predicted_L1 = pipe_L1.predict(x) plt.figure() plt.plot(x_train,y_train, 'bo', label='raw training data') plt.plot(x_test,y_test, 'ro', label='raw test data') plt.plot(x,y_predicted_L1, color='orange', label='Lasso fit') plt.legend() plt.xlabel('x'), plt.ylabel('y') plt.show() # print coefficients print(pipe_L1['model'].coef_)[-0. 4.38109181 -0.09273215 1.679048 -0. 0. -0. 0. 0. -0. ]Reading data and 1-D plotting with DISPATCH **NB**: Remember to set the `PYTHONPATH` environment variable to `$DISPATCH_DIR/utilities/python`, where `$DISPATCH_DIR` is the location of your DISPATCH repository. This notebook assumes you have already compiled DISPATCH for the 1-D MHD shock experiment (`make`) and run the code (`./dispatch.x`) successfully.The data can be found in the `data` directory.import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import dispatch import dispatch.select import itertoolsFirst, read a snapshot.datadir = '../../../experiments/mhd_shock/data' snap = dispatch.snapshot(iout=9,verbose=1,data=datadir) for p in snap.patches: print(p.id, p.position, p.size, p.time, p.gn, p.ds, p.level)1 [0.16666667 0.005 0.005 ] [0.33333333 0.01 0.01 ] 0.09 [66 1 1] [0.00564972 0.01 0.01 ] 7 2 [0.5 0.005 0.005] [0.33333333 0.01 0.01 ] 0.09 [66 1 1] [0.00564972 0.01 0.01 ] 7 3 [0.83333333 0.005 0.005 ] [0.33333333 0.01 0.01 ] 0.09 [66 1 1] [0.00564972 0.01 0.01 ] 7Printed from left to right are the patch ID, the centre of the patch in Cartesian coordinates, the time of the patch in the current snapshot, and the dimensions of the density/patch.In this case, although we are dealing with a 1-D MHD shock tube, the solver employed does not permit true 1-D or 2-D calculations and thus there are a few dummy zones in the y- and z-directions.indices = snap.patches[0].idx.vars.copy() print(indices) print("Patch kind:",snap.patches[0].kind){0: 'd', 4: 'e', 1: 'px', 2: 'py', 3: 'pz', 5: 'bx', 6: 'by', 7: 'bz'} Patch kind: stagger2_mhd_patNote: From here on, we assume that the shock tube runs along the *x-direction*.fig = plt.figure(figsize=(14.0,6.5)) fig.clf() ncols, nrows = 4, 2 gs = fig.add_gridspec(ncols=ncols,nrows=nrows) axes = [] for j,i in itertools.product(range(ncols),range(nrows)): cax = fig.add_subplot(gs[i,j]) axes.append(cax) if 'et' in indices.values(): indices.pop(snap.patches[0].idx.et) # If stored, don't plot total energy. for cax, v in zip(axes, indices): colrs = itertools.cycle(['r','g','b','c','m','y','k']) for p in snap.patches: jslice, kslice = 0, 0 if p.kind == 'ramses_mhd_patch': jslice, kslice = 4,4 cax.plot(p.x[p.li[0]:p.ui[0]],p.var(v)[p.li[0]:p.ui[0],jslice,kslice],marker='o',color=next(colrs),zorder=0.1*p.level) cax.set_xlabel(r'$x$') cax.set_ylabel(p.idx.vars[v]) axes[0].set_title(r't = {0:.03g}'.format(snap.patches[0].time)) fig.tight_layout() plt.savefig(snap.patches[0].kind.strip()) plt.show()Here are the MHD variables stored in this patch and their associated index in the data. `px` is the x-component of momentum, etc.You don't have to remember these indices because you can always retrieve them using aliases, e.g., `patch.idx.d`. Here's the density at t = 0.09. Different colours have been used for each patch. Now what if you want to see all of the data as one single array (which can be useful for analysis)?x, rho = dispatch.select.values_along(snap.patches,[0.0,0.0,0.0],dir=0,iv=snap.patches[0].idx.d) fig2 = plt.figure() fig2.clf() plt.plot(x,rho,'co') for p in snap.patches: edge = p.position[0] - 0.5*p.size[0] plt.plot([edge,edge],[-10,10],'k--') plt.axis(ymin=0.15,ymax=1.05,xmin=0.0,xmax=1.0) plt.xlabel(r'$x$') plt.ylabel(r'$\rho$')Note that I've manually added vertical lines to denote patch boundaries.print(rho.shape)(180,)The Stanford Sentiment Treebank The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. We use the two-way (positive/negative) class split, and use only sentence-level labels.from IPython.display import display, Markdown with open('../../doc/env_variables_setup.md', 'r') as fh: content = fh.read() display(Markdown(content))Import Packagesimport tensorflow as tf from transformers import ( BertConfig, BertTokenizer, XLMRobertaTokenizer, TFBertModel, TFXLMRobertaModel, ) import os from datetime import datetime import tensorflow_datasets from tensorboard import notebook import math #from google.cloud import storage from googleapiclient import discovery from googleapiclient import errors import logging import jsonCheck configurationprint(tf.version.GIT_VERSION, tf.version.VERSION) print(tf.keras.__version__) gpus = tf.config.list_physical_devices('GPU') if len(gpus)>0: for gpu in gpus: print('Name:', gpu.name, ' Type:', gpu.device_type) else: print('No GPU available !!!!')No GPU available !!!!Define Pathstry: data_dir=os.environ['PATH_DATASETS'] except KeyError: print('missing PATH_DATASETS') try: tensorboard_dir=os.environ['PATH_TENSORBOARD'] except KeyError: print('missing PATH_TENSORBOARD') try: savemodel_dir=os.environ['PATH_SAVE_MODEL'] except KeyError: print('missing PATH_SAVE_MODEL')Import local packagesimport utils.model_utils as mu import importlib importlib.reload(mu);Train the model on AI Platform Training (for production)project_name = os.environ['PROJECT_ID'] project_id = 'projects/{}'.format(project_name) ai_platform_training = discovery.build('ml', 'v1', cache_discovery=False) # choose the model model_name = 'tf_bert_classification' #model_name = 'test_log_bert' # variable used to build some variable's name type_production = 'test' #'test', 'production' hardware = 'cpu' #'cpu', 'gpu', 'tpu' owner = os.environ['OWNER'] tier = 'basic' #'basic', 'custom' python_version = '3.7' runtime_version = '2.2' hp_tuning= False verbosity = 'INFO' profiling = False # use custom container use_custom_container = False tag='/test:v0.0.0' # overwrite parameter for testing logging test_logging = False print(' modifying Tensorflow env variable') # 0 = all messages are logged (default behavior) # 1 = INFO messages are not printed # 2 = INFO and WARNING messages are not printed # 3 = INFO, WARNING, and ERROR messages are not printed with open(os.environ['DIR_PROJ']+'/utils/env_variables.json', 'r') as outfile: env_var = json.load(outfile) if verbosity == 'DEBUG' or verbosity == 'VERBOSE' or verbosity == 'INFO': env_var['TF_CPP_MIN_LOG_LEVEL'] = 0 env_var['TF_CPP_MIN_VLOG_LEVEL'] = 0 elif verbosity == 'WARNING': env_var['TF_CPP_MIN_LOG_LEVEL'] = 1 env_var['TF_CPP_MIN_VLOG_LEVEL'] = 1 elif verbosity == 'ERROR': env_var['TF_CPP_MIN_LOG_LEVEL'] = 2 env_var['TF_CPP_MIN_VLOG_LEVEL'] = 2 else: env_var['TF_CPP_MIN_LOG_LEVEL'] = 3 env_var['TF_CPP_MIN_VLOG_LEVEL'] = 3 print("env_var['TF_CPP_MIN_LOG_LEVEL']=", env_var['TF_CPP_MIN_LOG_LEVEL']) print("env_var['TF_CPP_MIN_VLOG_LEVEL']=", env_var['TF_CPP_MIN_VLOG_LEVEL']) data={} data['TF_CPP_MIN_LOG_LEVEL'] = env_var['TF_CPP_MIN_LOG_LEVEL'] data['TF_CPP_MIN_VLOG_LEVEL'] = env_var['TF_CPP_MIN_VLOG_LEVEL'] with open(os.environ['DIR_PROJ']+'/utils/env_variables.json', 'w') as outfile: json.dump(data, outfile) # define parameters for ai platform training if not use_custom_container: # delete old package version for root, dirs, files in os.walk(os.environ['DIR_PROJ'] + '/dist/'): for filename in files: package_dist=os.environ['DIR_PROJ'] + '/dist/'+filename if package_dist[-7:]=='.tar.gz': print('removing package"', package_dist) os.remove(package_dist) package_gcs = mu.create_module_tar_archive(model_name) else: package_gcs = None timestamp = datetime.now().strftime("%Y_%m_%d_%H%M%S") if hp_tuning: job_name = model_name+'_hp_tuning_'+hardware+'_'+timestamp else: job_name = model_name+'_'+hardware+'_'+timestamp module_name = 'model.'+model_name+'.task' if tier=='basic' and hardware=='cpu': # CPU region = 'europe-west1' elif tier=='basic' and hardware=='gpu': # GPU region = 'europe-west1' elif tier=='custom' and hardware=='gpu': # Custom GPU region = 'europe-west4' elif tier=='basic' and hardware=='tpu': # TPU #region = 'us-central1' region = 'europe-west4' # No zone in region europe-west4 has accelerators of all requested types #region = 'europe-west6' # The request for 8 TPU_V2 accelerators exceeds the allowed maximum of 0 K80, 0 P100, 0 P4, 0 T4, 0 TPU_V2, 0 TPU_V2_POD, 0 TPU_V3, 0 TPU_V3_POD, 0 V100 #region = 'europe-west2' # No zone in region europe-west2 has accelerators of all requested types elif tier=='custom' and hardware=='tpu': # TPU #region = 'us-central1' region = 'europe-west4' #region = 'europe-west6' #region = 'europe-west2' else: # Default region = 'europe-west1' # define parameters for training of the model if type_production=='production': # reading metadata _, info = tensorflow_datasets.load(name='glue/sst2', data_dir=data_dir, with_info=True) # define parameters epochs = 2 batch_size_train = 32 #batch_size_test = 32 batch_size_eval = 64 # Maxium length, becarefull BERT max length is 512! max_length = 128 # extract parameters size_train_dataset=info.splits['train'].num_examples #size_test_dataset=info.splits['test'].num_examples size_valid_dataset=info.splits['validation'].num_examples # computer parameter steps_per_epoch_train = math.ceil(size_train_dataset/batch_size_train) #steps_per_epoch_test = math.ceil(size_test_dataset/batch_size_test) steps_per_epoch_eval = math.ceil(size_valid_dataset/batch_size_eval) #print('Dataset size: {:6}/{:6}/{:6}'.format(size_train_dataset, size_test_dataset, size_valid_dataset)) #print('Batch size: {:6}/{:6}/{:6}'.format(batch_size_train, batch_size_test, batch_size_eval)) #print('Step per epoch: {:6}/{:6}/{:6}'.format(steps_per_epoch_train, steps_per_epoch_test, steps_per_epoch_eval)) #print('Total number of batch: {:6}/{:6}/{:6}'.format(steps_per_epoch_train*(epochs+1), steps_per_epoch_test*(epochs+1), steps_per_epoch_eval*1)) print('Number of epoch: {:6}'.format(epochs)) print('Batch size: {:6}/{:6}'.format(batch_size_train, batch_size_eval)) print('Step per epoch: {:6}/{:6}'.format(steps_per_epoch_train, steps_per_epoch_eval)) else: if hardware=='tpu': epochs = 1 steps_per_epoch_train = 6 #5 batch_size_train = 32 steps_per_epoch_eval = 1 batch_size_eval = 64 else: epochs = 1 steps_per_epoch_train = 6 #5 batch_size_train = 32 steps_per_epoch_eval = 1 batch_size_eval = 64 steps=epochs*steps_per_epoch_train if steps<=5: n_steps_history=4 elif steps>=5 and steps<1000: n_steps_history=10 print('be carefull with profiling between step: 10-20') else: n_steps_history=int(steps/100) print('be carefull with profiling between step: 10-20') print('will compute accuracy on the test set every {} step so {} time'.format(n_steps_history, int(steps/n_steps_history))) if profiling: print(' profiling ...') steps_per_epoch_train = 100 n_steps_history=25 input_eval_tfrecords = 'gs://'+os.environ['BUCKET_NAME']+'/tfrecord/sst2/bert-base-multilingual-uncased/valid' #'gs://public-test-data-gs/valid' input_train_tfrecords = 'gs://'+os.environ['BUCKET_NAME']+'/tfrecord/sst2/bert-base-multilingual-uncased/train' #'gs://public-test-data-gs/train' if hp_tuning: output_dir = 'gs://'+os.environ['BUCKET_NAME']+'/training_model_gcp/'+model_name+'_hp_tuning_'+hardware+'_'+timestamp else: output_dir = 'gs://'+os.environ['BUCKET_NAME']+'/training_model_gcp/'+model_name+'_'+hardware+'_'+timestamp pretrained_model_dir = 'gs://'+os.environ['BUCKET_NAME']+'/pretrained_model/bert-base-multilingual-uncased' #epsilon = 1.7788921050163616e-06 #learning_rate= 0.0007763625134788308 epsilon = 1e-8 learning_rate= 5e-5 # bulding training_inputs parameters = ['--epochs', str(epochs), '--steps_per_epoch_train', str(steps_per_epoch_train), '--batch_size_train', str(batch_size_train), '--steps_per_epoch_eval', str(steps_per_epoch_eval), '--n_steps_history', str(n_steps_history), '--batch_size_eval', str(batch_size_eval), '--input_eval_tfrecords', input_eval_tfrecords , '--input_train_tfrecords', input_train_tfrecords, '--output_dir', output_dir, '--pretrained_model_dir', pretrained_model_dir, '--verbosity_level', verbosity, '--epsilon', str(epsilon), '--learning_rate', str(learning_rate)] if hardware=='tpu': parameters.append('--use_tpu') parameters.append('True') training_inputs = { 'args': parameters, 'region': region, } if not use_custom_container: training_inputs['packageUris'] = [package_gcs] training_inputs['pythonModule'] = module_name training_inputs['runtimeVersion'] = runtime_version training_inputs['pythonVersion'] = python_version else: accelerator_master = {'imageUri': image_uri} training_inputs['masterConfig'] = accelerator_master if tier=='basic' and hardware=='cpu': # CPU training_inputs['scaleTier'] = 'BASIC' #training_inputs['scaleTier'] = 'STANDARD_1' elif tier=='custom' and hardware=='cpu': # CPU training_inputs['scaleTier'] = 'CUSTOM' training_inputs['masterType'] = 'n1-standard-16' elif tier=='basic' and hardware=='gpu': # GPU training_inputs['scaleTier'] = 'BASIC_GPU' elif tier=='custom' and hardware=='gpu': # Custom GPU training_inputs['scaleTier'] = 'CUSTOM' training_inputs['masterType'] = 'n1-standard-8' accelerator_master = {'acceleratorConfig': { 'count': '1', 'type': 'NVIDIA_TESLA_V100'} } training_inputs['masterConfig'] = accelerator_master elif tier=='basic' and hardware=='tpu': # TPU training_inputs['scaleTier'] = 'BASIC_TPU' elif tier=='custom' and hardware=='tpu': # Custom TPU training_inputs['scaleTier'] = 'CUSTOM' training_inputs['masterType'] = 'n1-highcpu-16' training_inputs['workerType'] = 'cloud_tpu' training_inputs['workerCount'] = '1' accelerator_master = {'acceleratorConfig': { 'count': '8', 'type': 'TPU_V3'} } training_inputs['workerConfig'] = accelerator_master else: # Default training_inputs['scaleTier'] = 'BASIC' print('======') # add hyperparameter tuning to the job config. if hp_tuning: hyperparams = { 'algorithm': 'ALGORITHM_UNSPECIFIED', 'goal': 'MAXIMIZE', 'maxTrials': 3, 'maxParallelTrials': 2, 'maxFailedTrials': 1, 'enableTrialEarlyStopping': True, 'hyperparameterMetricTag': 'metric_accuracy_train_epoch', 'params': []} hyperparams['params'].append({ 'parameterName':'learning_rate', 'type':'DOUBLE', 'minValue': 1.0e-8, 'maxValue': 1.0, 'scaleType': 'UNIT_LOG_SCALE'}) hyperparams['params'].append({ 'parameterName':'epsilon', 'type':'DOUBLE', 'minValue': 1.0e-9, 'maxValue': 1.0, 'scaleType': 'UNIT_LOG_SCALE'}) # Add hyperparameter specification to the training inputs dictionary. training_inputs['hyperparameters'] = hyperparams # building job_spec labels = {'accelerator': hardware, 'prod_type': type_production, 'owner': owner} if use_custom_container: labels['type'] = 'custom_container' else: labels['type'] = 'gcp_runtime' job_spec = {'jobId': job_name, 'labels': labels, 'trainingInput': training_inputs} if test_logging: # test # variable used to build some variable's name owner = os.environ['OWNER'] tier = 'basic' verbosity = 'INFO' # define parameters for ai platform training if not use_custom_container: package_gcs = package_gcs else: image_uri='gcr.io/'+os.environ['PROJECT_ID']+tag job_name = 'debug_test_'+datetime.now().strftime("%Y_%m_%d_%H%M%S") module_name = 'model.test-log.task' #module_name = 'model.test.task' region = 'europe-west1' # bulding training_inputs parameters = ['--verbosity_level', verbosity] training_inputs = { 'args': parameters, 'region': region, } if not use_custom_container: training_inputs['packageUris'] = [package_gcs] training_inputs['pythonModule'] = module_name training_inputs['runtimeVersion'] = runtime_version training_inputs['pythonVersion'] = python_version else: accelerator_master = {'imageUri': image_uri} #training_inputs['pythonModule'] = module_name # not working to overwrite the entrypoint training_inputs['masterConfig'] = accelerator_master training_inputs['scaleTier'] = 'BASIC' # building job_spec labels = {'accelerator': 'cpu', 'prod_type': 'debug', 'owner': owner} if use_custom_container: labels['type'] = 'custom_container' else: labels['type'] = 'gcp_runtime' job_spec = {'jobId': job_name, 'labels': labels, 'trainingInput': training_inputs} training_inputs, job_name # submit the training job request = ai_platform_training.projects().jobs().create(body=job_spec, parent=project_id) try: response = request.execute() print('Job status for {}:'.format(response['jobId'])) print(' state : {}'.format(response['state'])) print(' createTime: {}'.format(response['createTime'])) except errors.HttpError as err: # For this example, just send some text to the logs. # You need to import logging for this to work. logging.error('There was an e0rror creating the training job.' ' Check the details:') logging.error(err._get_reason()) # if you wnat to specify a specif job ID #job_name = 'tf_bert_classification_2020_05_16_193551' jobId = 'projects/{}/jobs/{}'.format(project_name, job_name) request = ai_platform_training.projects().jobs().get(name=jobId) response = None try: response = request.execute() print('Job status for {}:'.format(response['jobId'])) print(' state : {}'.format(response['state'])) if 'trainingOutput' in response.keys(): if 'trials' in response['trainingOutput'].keys(): for sub_job in response['trainingOutput']['trials']: print(' trials : {}'.format(sub_job)) if 'consumedMLUnits' in response.keys(): print(' consumedMLUnits : {}'.format(response['trainingOutput']['consumedMLUnits'])) if 'errorMessage' in response.keys(): print(' errorMessage : {}'.format(response['errorMessage'])) except errors.HttpError as err: logging.error('There was an error getting the logs.' ' Check the details:') logging.error(err._get_reason()) # how to stream logs # --stream-logsTensorBoard for job running on GCP# View open TensorBoard instance #notebook.list() # View pid #!ps -ef|grep tensorboard # Killed Tensorboard process by using pid #!kill -9 pid %load_ext tensorboard #%reload_ext tensorboard %tensorboard --logdir {output_dir+'/tensorboard'} \ #--host 0.0.0.0 \ #--port 6006 \ #--debugger_port 6006 %load_ext tensorboard #%reload_ext tensorboard %tensorboard --logdir {output_dir+'/hparams_tuning'} \ #--host 0.0.0.0 \ #--port 6006 \ #--debugger_port 6006 !tensorboard dev upload --logdir \ 'gs://multilingual_text_classification/training_model_gcp/tf_bert_classification_cpu_2020_08_20_093837/tensorboard' --one_shot --yesusage: tensorboard [-h] [--helpfull] {serve,dev} ... tensorboard: error: unrecognized arguments: --yesVertical bars at certain times, and highlight time intervals This notebook shows how to create vertical bars and highlight time intervals in tplot figures. Originally created at the 2022 PyHC Spring Meeting Hackathon!pip install https://github.com/MAVENSDC/PyTplot/archive/matplotlib-backend.zip from pytplot import store_data, highlight, tplotCreate a simple tplot variable named `data` with all values set to 1store_data('data', data={'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 1, 1, 1, 1]}) tplot('data') from pytplot import timebarCheck the documentation for avaiable options. Note: the `matplotlib` version of pytplot doesn't support the `dash` keywordhelp(timebar)Help on function timebar in module pytplot.timebar: timebar(t, varname=None, databar=False, delete=False, color='black', thick=1, dash=False) This function will add a vertical bar to all time series plots. This is useful if you want to bring attention to a specific time. Parameters: t : flt/list The time in seconds since Jan 01 1970 to place the vertical bar. If a list of numbers are supplied, multiple bars will be created. If "databar" is set, then "t" becomes the point on the y axis to place a horizontal bar. varname : str/list, optional The variable(s) to add the vertical bar to. If not set, the default is to add it to all current plots. databar : bool, optional This will turn the timebar into a horizontal data bar. If this is set True, then variable "t" becomes the point on the y axis to place a horizontal bar. delete : bool, optional [...]Call `timebar` to create a timebar at the unix time of 2.0 (2 seconds after 1 Jan 1970). If no `varname` keyword is specified, all panels on the figure will have the vertical bartimebar(2) tplot(['data', 'data'])Specify the `varname` keyword to limit the vertical bar to one specific paneltimebar(3, varname='data') tplot('data')Set the color to blue using the `color` keywordtimebar(4, color='blue') tplot('data')Highlight time intervals with the `highlight` function in `pytplot`help(highlight)Help on function highlight in module pytplot.tplot_utilities: highlight(variables=None, range=None, color='gray', alpha=0.2, fill=True, edgecolor=None, facecolor=None, hatch=None, delete=False) This function highlights a time interval on tplot variablesHighlight a time interval between 4.5 and 5 secondshighlight(['data'], [4.5, 5]) tplot('data')Highlight another time interval between 5.1 and 6 seconds, with a blue 'o' hatch patternhighlight(['data'], [5.1, 6], color='blue', hatch='O') tplot('data')Delete all of the highlighted time intervals using the `delete` keywordhighlight('data', delete=True) tplot('data')Delete vertical bars using the `delete` keywordtimebar(2, delete=True) tplot('data')`timebar` also accepts keywords for controlling the aesthetics of the vertical barstimebar(2, thick=5, color='pink') tplot('data')CodePlayFieldCodePlayField provides a way to learn audiovisual editing and programming using Python 3using many simple demonstration programs.To run the demo programs below, click on the code box labeled "In [ ]:", and click on the Run button above. ProgrammingSupportToolsStepsStorageValuesTypesFlowLoopsFunctionsEditingProblemsImagesSoundEditing ImagesEditing Images (Shapes)Editing SoundEdit a VideoGrab a Screenshot (Image)Grab a Screencast (Video) GraphicsGraphic images can be created from shapes such as circles and lines.Graphics can also be things like pictures. Graphics - Display ImageDisplay an image.View [Graphics Display Image](./jupyter/GraphicsDisplayImage.ipynb)Learn About [Steps](./jupyter/Steps.ipynb)Learn About [Images](./jupyter/Images.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 1-drawImage.py/dave/software/codeplayfield/fields/graphicsGraphics - Display ComicDisplay an image, wait for a keypress, and display another image.View [Graphics Display Comic](./jupyter/GraphicsDisplayComic.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 2-drawImageComic.pyGraphics - Draw Simple ShapesDisplay some shapes.View [Graphics Draw Shapes](./jupyter/GraphicsDrawSimpleShapes.ipynb)Learn About [Steps](./jupyter/Steps.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 3-drawSimpleShapes.py/dave/software/codeplayfield/fields/graphicsGraphics - Draw TriangleGet some mouse clicks, then draw a triangle.View [Graphics Draw Triangle](./jupyter/GraphicsDrawTriangle.ipynb)Learn About [Storage](./jupyter/Storage.ipynb)Learn About [Values](./jupyter/Values.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 4-drawTriangle.pyGraphics - Draw LineView [Graphics Draw Line](./jupyter/GraphicsDrawLine.ipynb)Learn About [Flow](./jupyter/Flow.ipynb)Learn About [Loops](./jupyter/Loops.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 5-drawLine.pyGraphics - Draw RectView [Graphics Draw Rect](./jupyter/GraphicsDrawRect.ipynb)Learn About [Functions](./jupyter/Functions.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Move to the graphics folder %cd fields/graphics # Run the code %run 6-drawRect.pyGame Game - AirportDraw a bird flying near an airport.Run the following lines to see the demo video.%%HTML View [Game Airport](./jupyter/GameAirport.ipynb)Learn About [Steps](./jupyter/Steps.ipynb)Learn About [Images](./jupyter/Images.ipynb)More Advanced:Learn About [Storage](./jupyter/Storage.ipynb)Learn About [Values](./jupyter/Values.ipynb)Learn About [Flow](./jupyter/Flow.ipynb)Learn About [Loops](./jupyter/Loops.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 1-airport.pyRequirement already satisfied: pygame in /dave/.local/lib/python3.8/site-packages (2.0.0) /dave/software/codeplayfield/fields/game pygame 2.0.0 (SDL 2.0.12, python 3.8.5) Hello from the pygame community. https://www.pygame.org/contribute.htmlGame - DollsUse the keyboard to move two people on the screen.Run the following lines to see the demo video.%%HTML View [Game Dolls](./jupyter/GameDolls.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 2-dolls.py The following game allows going inside the house and going upstairs. # Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 5-dolls-scenes.py The following game has object oriented scenes. # Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 6-dolls-scenesObj.pyGame - BugsUse the keyboard to move a bug on the screen.Run the following lines to see the demo video.%%HTML View [Game Bugs](./jupyter/GameBugs.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 3-bugs.pyGame - TickleUse the mouse to tickle the bear.Learn About [Sound](./jupyter/Sound.ipynb)The following code can run by clicking on the next rows and using the Run/Run Selected Cells menu. If this doesn't work, do Kernel/Restart Kernel... first.# Install pygame using pip import sys !{sys.executable} -m pip install pygame # Move to the game folder %cd fields/game # Run the code %run 4-tickle.pyBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10 001st prime number?def find_nth_prime(n): num = 3 prime_count = 1 primes = [2] while prime_count < n: upper_bound = math.sqrt(num) for prime in primes: if num % prime == 0: break else: primes.append(num) prime_count += 1 num += 2 return primes[-1] find_nth_prime(10001)Intro to some key functions in ifcopenshell and IFC documentationIfcopenshell is a library that you could use to parse and handle IFC model data. It works in both C++ and python. And for both Ifc2x3 and IFC4. For a collection of information on IFC please refer to the [buildingSMART IFC pages](http://www.buildingsmart-tech.org/ifc/). It might also be usefull to read up on the [Ifc2x3 Implementation Guide here](http://www.buildingsmart-tech.org/downloads/accompanying-documents/guidelines/IFC2x%20Model%20Implementation%20Guide%20V2-0b.pdf)The [ifcopenshell academy](http://academy.ifcopenshell.org/) and [pythonocc tutorials](http://www.pythonocc.org/) are good resources as well. One particular supportive tutorial for this notebook is the [Using The Parsing Functionality of Ifcopenshell Interactively Tutorial](http://academy.ifcopenshell.org/using-the-parsing-functionality-of-ifcopenshell-interactively/).The topics for this notebook is: 1. [Opening an ifc file and create a file object](Opening-an-ifc-file-and-create-a-file-object)2. [The file.by_type() function in ifcopenshell](The-file.by_type-function-in-ifcopenshell)3. [The is_a() function in ifcopenshell](The-is_a-function-in-ifcopenshell)4. [The "." operator function in python and ifcopenshell](The-"."-operator-in-Python-with-ifcopenshell)5. [Combining by_type, is_a and the "." operator to extract property set information](Combining-by_type,-is_a-and-the-"."-operator-to-extract-property-set-information)After this you could try walking through the 02_analyze and 01_visualize notebooks.import ifcopenshellOpening an ifc file and create a file objectWith ifcopenshell you could start interacting with a file through the "open()" function. The only argument to this function is a path to the ifc file like showed below. If you want to interact with your own file you could upload that file in eg. the models folder and change the input to this function to "models/name_of_your_file.ifc".f = ifcopenshell.open("models/Grethes_hus_bok_2.ifc")The file.by_type function in ifcopenshellIn ifcopenshell the function by_type("") could be performed on the file object. The argument to this function is an Ifc type. Like, eg. [IfcWall](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcwall.htm]) or other ifc types in the schema. If the entity is in the file, the function will return a tuple listing up all the entities of that type in the file. If it is not in the file, the function returns an empty tuple. Since we have loaded an architectural model, entities such as [IfcWall](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcwall.htm]), [IfcWindow](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcwindow.htm), or [IfcDoor](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcdoor.htm) is probable to be in the model, however [IfcFlowSegment](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgserviceelements/lexical/ifcflowsegment.htm) is likely to not be in the model.## Here we use the by_type() function to test on the entities above no_walls = len(f.by_type("IfcWall")) no_windows = len(f.by_type("IfcWindow")) no_doors = len(f.by_type("IfcDoor")) no_flo_segments = len(f.by_type("IfcFlowSegment")) print("There is {} walls, {} windows, {} doors and {} flow segments in the file".format(no_walls,no_windows,no_doors,no_flo_segments))There is 24 walls, 26 windows, 6 doors and 0 flow segments in the fileThe is_a function in ifcopenshellAnother usefull function in ifcopenshell is the ifc_object.is_a() that returns the IFC type of the ifc_object. It could also be used as ifc_object.is_a("particular ifc type") to check if the ifc_object you have a reference to is of a particular ifc type. If it is of the particular type, it returns True and False if its not.# get the list of all object in file "f" of type IfcWall ifc_wall_objects = f.by_type("IfcWall") # get the first wall object in the list. a_wall = ifc_wall_objects[0] # print the ifc type of a_wall print("a_wall is of type %s"%(a_wall.is_a())) # Check if a_wall is of type IfcWall print("\nIt is %s that a_wall is of type IfcWall"%(a_wall.is_a("IfcWall")))a_wall is of type IfcWallStandardCase It is True that a_wall is of type IfcWallThe "." operator in Python with ifcopenshellWhen you have a reference to a ifc object in you can use the "." operator to access its attributes like so: ```ifc_object.Attribute```. Name is a common Attribute of objects as well as Description.# print a_walls Name attribute print("The Name of a_wall is %s"%(a_wall.Name)) # print a_walls Description attribute print("The Description of a_wall is %s"%(a_wall.Description))The Name of a_wall is Basic Wall:Generic - 200mm:345653 The Description of a_wall is NoneIfcopenshell and buildingSMARTs IFC documentation The by_type function buildingSMARTs IFC documentationYou might wonder what happens if you pass an entity to by_type() that is not defined in IFC schema. Then the **function returns an error**This is because the ifcopenshell knows the ifc schema. Hence, when working with ifcopenshell the documentation on the ifc schema defining the model you are working with is very usefull. The is_a() function and buildingSMARTs IFC documentationIt is quite obvious that ```wall_in_list_of_walls.is_a("IfcWall")``` would return ```True``` and ``` wall_in_list_of_walls.is_a("IfcWindow")``` would return ```python False```. However, did you notice that the type was IfcWallStandardCase but also of tupe IfcWall? What of you try:```wall_in_list_of_walls.is_a("IfcBuildingElement")```? Check the documentation of [IfcWall here](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcwall.htm) and evaluate its inherritance graph before you try.# Try with "IfcBuildingElement", "IfcElement", "IfcProduct" and "IfcRoot": a_wall.is_a("IfcBuildingElement")The "." operator using ifcopenshell and buildingSMARTs IFC documentationWhen you have a reference to a ifc type that is in your file using the f.by_type() function, you could refer to the documentation again to see what you could do with it. ifcopenshell supports the schema, and enables you to access the objects attributes through the "." operator.So looking into the documentation of [IfcWall here](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcsharedbldgelements/lexical/ifcwall.htm) again, we see that IfcWall is inherriting from IfcRoot, which gives the following attributes: * GlobalId * OwnerHistory * Name * Description# If our wall is inheriting from IfcRoot, it should have the following attributes: guid = a_wall.GlobalId owner_history = a_wall.OwnerHistory name = a_wall.Name description = a_wall.Description print("GlobalId: {},\nOwnerHistory: {},\nName: {},\nDescription: {}".format(guid,owner_history,name,description))GlobalId: 1xzRHg5wPCVvg4uLjqox1I, OwnerHistory: #48=IfcOwnerHistory(#45,#5,$,.NOCHANGE.,$,$,$,1556267234), Name: Basic Wall:Generic - 200mm:345653, Description: NoneAs showed above the "." operator works directly on the schema attrubutes names. It could also be used on objects that is returned, such as [IfcOwnerHistory](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcutilityresource/lexical/ifcownerhistory.htm). As described by the inherritance graph of the IfcOwnerHistory objects, OwningUser and OwningApplication is attributes on this object. These again refer to [IfcPersonAndOrganization](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcactorresource/lexical/ifcpersonandorganization.htm) and [IfcApplication](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcutilityresource/lexical/ifcapplication.htm) respectively.## OwningUser: owning_user = owner_history.OwningUser # attributes ref docs the_person = owning_user.ThePerson the_org = owning_user.TheOrganization roles = owning_user.Roles print("Person: {}, \nOrganization: {}, \nRoles: {}\n".format(the_person,the_org,roles)) ## OwningApplication: owning_app = owner_history.OwningApplication # attributes ref docs app_dev = owning_app.ApplicationDeveloper version = owning_app.Version app_f_name = owning_app.ApplicationFullName app_id = owning_app.ApplicationIdentifier print("Developer: {},\nVersion: {},\nApp Name: {}, \nApp id: {}".format(app_dev,version,app_f_name,app_id))Person: #39=IfcPerson($,'Eikerol','Hans',('Martin'),$,$,$,(#35)), Organization: #44=IfcOrganization($,'','',$,$), Roles: None Developer: #1=IfcOrganization($,'Autodesk Revit 2019 (ENU)',$,$,$), Version: 2019, App Name: Autodesk Revit 2019 (ENU), App id: Revit**Cleaning up by accessing objects.**## The person object only contain the third attribute. That is the GivenName p_given_name = the_person.GivenName ## The IfcOrganization object only returns empty strings for parameter 2 and 3. Thus, its not needed. ## roles is also returning none value, so not needed. ## organization that developed this was Autodesk, as showed by attribute 2 of the IfcOrganization obj. org_name = app_dev.Name ## Version and app id is returning a valid string value. # adding som line shift (\n) and tabulations (\t) print("Persons Given Name:\t {},\nApp developer:\t\t {},\nApp:\t\t\t {},\nVersion:\t\t {}".format(p_given_name,org_name,app_id,version))Persons Given Name: Hans, App developer: Autodesk Revit 2019 (ENU), App: Revit, Version: 2019Combining by_type, is_a and the "." operator to extract property set information So, with these ifcopenshell capabilities and some python and ifc knowledge we can do interesting things. For exsample we could create a function to get all quanity set information of IfcElements we pass in. In Ifc propertysets are related to elements through the IsDefinedBy attribute, that returns a [IfcRelDefinesByProperties](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifckernel/lexical/ifcreldefinesbyproperties.htm) that is an objectified relationship that defines the relationships between property set definitions and objects.a_wall.IsDefinedByThe most important attributes of this objectified relationship entity is RelatedObjects and RelatingPropertyDefinition, which is respectively the list of all objects that has this related property set definition and the property set definition itself. One use case of this would be to see all objects that have the same property set definition.# Which objects has the Property Set Definition wit the 2mdDGS1KjAKguN3yt8a52r guid? # Which objects has the Property Set Definition wit the 0CpuJJ7wTFA8vmntAxzlqi guid? # alternative: note that you can also use ifc_file.by_guid(guid) or even ifc_file[guid]. guid = "24Qrd55HL9WfKkEtlVL8LF" pSet_w_guid = [pset for pset in a_wall.IsDefinedBy if pset.GlobalId ==guid][0] objects_w_pset = pSet_w_guid.RelatedObjects objects_w_pset # As seen from the previous output the list shows in the step formatting. # for 2mdDGS1KjAKguN3yt8a52r (#91) # and for 0CpuJJ7wTFA8vmntAxzlqi (#216,#135,#134,#131,#220,#130,#92,#155,#144,#132,#91,#160,#143,#154,#157,#158,#257,#261)Another usecase is obviously to access the properties that is defined in these sets. In order to do this we need to access the [IfcPropertySet](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifckernel/lexical/ifcpropertyset.htm) through its RelatingPropertyDefinition attribute. Like for example for your pset above:pSet=pSet_w_guid.RelatingPropertyDefinition pSetThis could also return [IfcElementQuantity](http://www.buildingsmart-tech.org/ifc/IFC2x3/TC1/html/ifcproductextension/lexical/ifcelementquantity.htm). pSets hasProperties and qSets has Quantities, so, you'd want to check that it is of type IfcPropertySet in order to securely access its HasProperties attribute. Remeber a nice ifcopenshell function that does that?pSet=pSet_w_guid.RelatingPropertyDefinition if pSet.is_a("IfcPropertySet"): print(pSet.HasProperties)(#593=IfcPropertySingleValue('LoadBearing',$,IfcBoolean(.T.),$), #656=IfcPropertySingleValue('Reference',$,IfcIdentifier('Generic - 200mm'),$), #657=IfcPropertySingleValue('ExtendToStructure',$,IfcBoolean(.F.),$), #658=IfcPropertySingleValue('IsExternal',$,IfcBoolean(.T.),$))Pulling it all togetherAs this outputs a rather unformated list of different properties, where we need to "dot alot" Thomas has provided some python magic below using the [map](http://book.pythontips.com/en/latest/map_filter.html) function.# Takes in an a pset of type IfcPropertySet or IfcElementQuanitites and returns a tuple of its key-value tuples of respectively # properties (name, value) or quantities (name, value) def get_key_values(pset): def to_tuple(prop): if prop.is_a("IfcPropertySingleValue"): return prop.Name, prop.NominalValue.wrappedValue elif prop.is_a("IfcPhysicalQuantity"): if prop.is_a("IfcQuantityArea"): return prop.Name, prop.AreaValue if pset.is_a("IfcPropertySet"): return tuple(map(to_tuple, pset.HasProperties)) elif pset.is_a("IfcElementQuantity"): return tuple(map(to_tuple, pset.Quantities)) else: return () # testing it on our pSet from above: get_key_values(pSet_w_guid.RelatingPropertyDefinition) # test with IfcElementQuantity test_el_Quantity = f.by_type("IfcElementQuantity") if test_el_Quantity: get_key_values(test_el_Quantity[0]) else: print("No IfcElementQuantity in model")No IfcElementQuantity in modelCORDIS FP7import json import re import urllib from titlecase import titlecase import pandas as pd pd.set_option('display.max_columns', 50)Read in Dataall_projects = pd.read_excel('input/fp7/cordis-fp7projects.xlsx') all_projects.shape all_organizations = pd.read_excel('input/fp7/cordis-fp7organizations.xlsx') all_organizations.shape all_briefs = pd.read_excel('input/fp7/cordis-fp7briefs.xlsx') all_briefs.shapeCount Organisations and CountriesIt is useful to know the total number of organisations and the number of countries involved, to deal with cases where the contribution of each organisation is unknown.all_organizations[['projectRcn', 'id', 'country']].count() [ all_organizations.country.isna().sum(), (all_organizations.country[~all_organizations.country.isna()] != all_organizations.country[~all_organizations.country.isna()].str.strip()).sum(), (all_organizations.country[~all_organizations.country.isna()] != all_organizations.country[~all_organizations.country.isna()].str.upper()).sum(), ] project_num_organizations = all_organizations.groupby('projectRcn').\ id.nunique().reset_index().rename(columns={'id': 'num_organizations'}) project_num_organizations.shape project_num_countries = all_organizations.groupby('projectRcn').\ country.nunique().reset_index().rename(columns={'country': 'num_countries'}) project_num_countries.shape project_num_organizations_and_countries = pd.merge( project_num_countries, project_num_organizations, on='projectRcn', validate='1:1' ) project_num_organizations_and_countries.shape project_num_organizations_and_countries.head()Restrict to UKWe are only interested in projects and organizations where the coordinator or at least one participant institution is in the UK.uk_organizations = all_organizations[all_organizations.country == 'UK'] uk_organizations.shape uk_organizations.head() uk_projects = all_projects[all_projects.id.isin(uk_organizations.projectID)] uk_projects.shape uk_projects.head() uk_briefs = all_briefs[all_briefs.projectRcn.isin(uk_projects.rcn)] uk_briefs.shape uk_briefs.head()Examples Coordinator outside UKThe UK has two participant institutions. It appears that `projects.ecMaxContribution` is the sum of all `organizations.ecContribution`s for all coordinator and participant institutions.uk_projects[uk_projects.rcn == 101244] uk_organizations[uk_organizations.projectRcn == 101244] all_organizations[all_organizations.projectRcn == 101244].ecContribution.max() all_organizations[all_organizations.projectRcn == 101244].ecContribution.sum() all_briefs[all_briefs.projectRcn == 101244]Coordinator in UKThis one is also interesting in that it seems to have a lot of duplicate records that don't have titles, for some reason. We will need to filter those out.uk_projects[uk_projects.rcn == 99464] uk_organizations[uk_organizations.projectRcn == 99464] uk_organizations[uk_organizations.projectRcn == 99464].ecContribution.unique().sum() all_briefs[all_briefs.projectRcn == 99464]Duplicate ProjectsIt looks like it's safe to just drop projects without titles; those seem to be the only duplicates.[uk_projects.rcn.nunique(), uk_projects.id.nunique(), uk_projects.shape] uk_projects[uk_projects.duplicated('rcn', keep=False)] uk_projects[pd.isnull(uk_projects.title)] clean_projects = uk_projects[~pd.isnull(uk_projects.title)].copy() # Could include coordinator and participants... would need some extra cleaning. clean_projects.drop([ 'id', 'programme', 'topics', 'frameworkProgramme', 'call', 'fundingScheme', 'coordinator', 'participants', 'subjects' ], axis=1, inplace=True) clean_projects.rename(columns={ 'startDate': 'start_date', 'endDate': 'end_date', 'projectUrl': 'project_url', 'totalCost': 'total_cost_eur', 'ecMaxContribution': 'max_contribution_eur', 'coordinatorCountry': 'coordinator_country', 'participantCountries': 'participant_countries' }, inplace=True) clean_projects.shape clean_projects.describe() clean_projects.head()Check Project Columnsclean_projects.count()AcronymJust missing one.clean_projects[clean_projects.acronym.isna()]StatusSome projects are listed as cancelled. It's not clear what this means exactly. Spot checks reveal that some of them apparently received at least partial funding and delivered some results, so it does not seem appropriate to remove them altogether.- https://cordis.europa.eu/result/rcn/237795_en.html (TORTELLEX)- https://cordis.europa.eu/result/rcn/196663_en.html (YSCHILLER)- https://cordis.europa.eu/project/rcn/188111_en.html (MICARTREGEN) - no resultsclean_projects.status.value_counts() clean_projects[clean_projects.status == 'CAN'].head()Title(clean_projects.title.str.strip() != clean_projects.title).sum()Start and End DatesSome are missing. Discard for now. There is some overlap with the cancelled projects, but it is not exact.(clean_projects.start_date.isna() | clean_projects.end_date.isna()).sum() ((clean_projects.status == 'CAN') & (clean_projects.start_date.isna() | clean_projects.end_date.isna())).sum() ((clean_projects.status != 'CAN') & (clean_projects.start_date.isna() | clean_projects.end_date.isna())).sum() clean_projects = clean_projects[ ~clean_projects.start_date.isna() | ~clean_projects.end_date.isna() ] clean_projects.shape (clean_projects.start_date > clean_projects.end_date).sum()Project URLLooks pretty clean.(~clean_projects.project_url.isna()).sum() def is_valid_url(url): result = urllib.parse.urlparse(str(url)) return bool((result.scheme == 'http' or result.scheme == 'https') and result.netloc) project_url_bad = ~clean_projects.project_url.isna() & ~clean_projects.project_url.apply(is_valid_url) project_url_bad.sum() clean_projects[project_url_bad] clean_projects.loc[project_url_bad, 'project_url'] = 'http://' + clean_projects.loc[project_url_bad, 'project_url'] (~clean_projects.project_url.isna() & ~clean_projects.project_url.apply(is_valid_url)).sum()Objective(clean_projects.objective.str.strip() != clean_projects.objective).sum() clean_projects.objective = clean_projects.objective.str.strip()Total Cost and EC Max Contributionclean_projects.total_cost_eur.describe() clean_projects.max_contribution_eur.describe() (clean_projects.max_contribution_eur > clean_projects.total_cost_eur).sum()Clean Up OrganizationsI notice several issues:- Some are missing IDs (but do have postcodes)- Some are missing postcodes- Some postcodes are clearly typo'd (digit substitutions, etc);- Some postcodes have been terminated (searched for them with google)There are only 2993 unique organization IDs, so this is probably the result of a join.For now, drop all organizations that don't have both an ID and a valid postcode. (It does look possible to match names to find IDs, and many without postcodes still have addresses, which we could geocode.)Would be interesting to try this: https://codereview.stackexchange.com/questions/117801/uk-postcode-validation-and-format-correction-tool[ uk_organizations.shape, uk_organizations.id.notna().sum(), uk_organizations.id.isna().sum(), uk_organizations.id[uk_organizations.id.notna()].nunique(), uk_organizations.postCode.isna().sum(), uk_organizations.postCode[uk_organizations.postCode.notna()].nunique() ] organizations = uk_organizations[uk_organizations.id.notna() & uk_organizations.postCode.notna()].copy() organizations.id = organizations.id.astype('int64') organizations.postCode = organizations.postCode.astype('str') [ organizations.shape, organizations.id.nunique(), organizations.postCode.nunique() ] ukpostcodes = pd.read_csv('../postcodes/input/ukpostcodes.csv.gz') ukpostcodes.shape organizations.postCode.isin(ukpostcodes.postcode).sum() organizations['cleanPostcode'] = organizations.postCode.\ str.upper().\ str.strip().\ str.replace(r'[^A-Z0-9]', '').\ str.replace(r'^(\S+)([0-9][A-Z]{2})$', r'\1 \2') organizations.cleanPostcode.isin(ukpostcodes.postcode).sum() organizations.cleanPostcode[~organizations.cleanPostcode.isin(ukpostcodes.postcode)].unique() organizations = organizations[organizations.cleanPostcode.isin(ukpostcodes.postcode)] organizations.shape clean_projects = clean_projects[clean_projects.rcn.isin(organizations.projectRcn)] clean_projects.shapeClean Up Duplicate OrganizationsI think there is also a join on the contacts, because we get multiple rows for some project-organization pairs. The main thing is that we want the `ecContribution` to be consistent. Otherwise, any row will do.organizations.sort_values(['projectRcn', 'id']).\ groupby(['projectRcn', 'id']).\ filter(lambda x: x.shape[0] > 1) organizations.groupby(['projectRcn', 'id']).\ filter(lambda x: x.ecContribution.nunique() > 1).shape clean_organizations = organizations.groupby(['projectRcn', 'id']).first() clean_organizations.reset_index(inplace=True) clean_organizations.drop([ 'projectID', 'projectAcronym', 'shortName', 'activityType', 'endOfParticipation', 'country', 'street', 'city', 'postCode', 'contactType', 'contactTitle', 'contactFirstNames', 'contactLastNames', 'contactFunction', 'contactTelephoneNumber', 'contactFaxNumber', 'contactEmail' ], axis=1, inplace=True) clean_organizations.rename({ 'projectRcn': 'project_rcn', 'id': 'organization_id', 'ecContribution': 'contribution_eur', 'organizationUrl': 'organization_url', 'cleanPostcode': 'postcode' }, axis=1, inplace=True) clean_organizations.name = clean_organizations.name.apply(titlecase) clean_organizations.shape clean_organizations.head()Check Organisationsclean_organizations.count()Roleclean_organizations.role.value_counts()Name(clean_organizations.name.str.strip() != clean_organizations.name).sum()Contribution EURMissing for some organisations.clean_organizations.contribution_eur.describe() clean_organizations.contribution_eur.isna().sum()Organisation URLMostly clean. Found a couple with a `;` delimiting two URLs, neither of which resolved, so we can get rid of those.(~clean_organizations.organization_url.isna()).sum() organization_url_bad = ~clean_organizations.organization_url.isna() & \ ~clean_organizations.organization_url.apply(is_valid_url) organization_url_bad.sum() clean_organizations.loc[organization_url_bad, 'organization_url'] = \ 'http://' + clean_organizations.loc[organization_url_bad, 'organization_url'] organization_url_bad = ~clean_organizations.organization_url.isna() & \ ~clean_organizations.organization_url.apply(is_valid_url) organization_url_bad.sum() clean_organizations[ ~clean_organizations.organization_url.isna() & \ clean_organizations.organization_url.str.match('http.*http')].organization_url.unique() clean_organizations.loc[ ~clean_organizations.organization_url.isna() & \ clean_organizations.organization_url.str.match('http.*http'), 'organization_url'] = float('nan')BriefsMight as well merge these into the projects where we have them. We have a few duplicates to take care of.clean_briefs = uk_briefs[ uk_briefs.projectRcn.isin(clean_projects.rcn) &\ (uk_briefs.title.notna() | uk_briefs.teaser.notna() | uk_briefs.article.notna()) ].copy() clean_briefs.shape clean_briefs[clean_briefs.projectRcn.duplicated(keep=False)] clean_briefs = clean_briefs.sort_values('lastUpdateDate') clean_briefs = clean_briefs[~clean_briefs.projectRcn.duplicated(keep='last')] clean_briefs.shape clean_briefs.drop([ 'rcn', 'language', 'lastUpdateDate', 'country', 'projectAcronym', 'programme', 'topics', 'relatedReportRcn' ], axis=1, inplace=True) clean_briefs.rename({ 'projectRcn': 'rcn', 'title': 'brief_title', 'relatedReportTitle': 'related_report_title', 'imageUri': 'image_path' }, axis=1, inplace=True) clean_briefs.head() clean_projects_with_briefs = pd.merge( clean_projects, clean_briefs, on='rcn', how='left', validate='1:1' ) clean_projects_with_briefs.head()Checksclean_organizations[clean_organizations.project_rcn == 101244] clean_projects_with_briefs[clean_projects_with_briefs.rcn == 101244] clean_organizations[clean_organizations.project_rcn == 99464] clean_projects_with_briefs[clean_projects_with_briefs.rcn == 99464] project_organizations = pd.merge( clean_projects_with_briefs, clean_organizations, left_on='rcn', right_on='project_rcn', validate='1:m') project_organizations.drop(['project_rcn'], axis=1, inplace=True) project_organizations.shape project_organizations.head() uk_contributions = project_organizations.groupby('rcn').aggregate({'contribution_eur': sum}) uk_contributions.reset_index(inplace=True) uk_contributions.head() project_uk_contributions = pd.merge( clean_projects_with_briefs, uk_contributions, on='rcn', validate='1:1') project_uk_contributions.head() project_uk_contributions[project_uk_contributions.contribution_eur > project_uk_contributions.max_contribution_eur + 0.1].shape project_organization_uk_contributions = pd.merge( project_uk_contributions, clean_organizations, left_on='rcn', right_on='project_rcn', validate='1:m' ) project_organization_uk_contributions = pd.merge( project_organization_uk_contributions, ukpostcodes, on='postcode', validate='m:1' ) project_organization_uk_contributions.shape project_organization_uk_contributions.head() (project_uk_contributions.contribution_eur < 1000).value_counts()Add Numbers of Organisations and CountriesAdd these back on and do a sanity check against the `participant_countries` field. They mostly match up, except for a few relatively small discrepancies.clean_projects_with_briefs.shape clean_projects_with_briefs = pd.merge( clean_projects_with_briefs, project_num_organizations_and_countries, left_on='rcn', right_on='projectRcn', validate='1:1') clean_projects_with_briefs.drop('projectRcn', axis=1, inplace=True) clean_projects_with_briefs.shape clean_projects_with_briefs.head() [ clean_projects_with_briefs.num_countries.isna().sum(), clean_projects_with_briefs.coordinator_country.isna().sum(), clean_projects_with_briefs.participant_countries.isna().sum() ] def check_num_countries(): ccs = clean_projects_with_briefs.coordinator_country pcs = clean_projects_with_briefs.participant_countries ncs = clean_projects_with_briefs.num_countries pcs_isna = pcs.isna() coordinator_mismatch = clean_projects_with_briefs[pcs_isna][ncs[pcs_isna] != 1].copy() coordinator_mismatch['check'] = 1 cs = ccs[~pcs_isna] + ';' + pcs[~pcs_isna] check_ncs = cs.apply(lambda x: len(set(x.split(';')))) participant_mismatch = clean_projects_with_briefs[~pcs_isna][ncs[~pcs_isna] != check_ncs].copy() participant_mismatch['check'] = check_ncs return pd.concat([coordinator_mismatch, participant_mismatch])\ [['rcn', 'coordinator_country', 'participant_countries', 'num_countries', 'check', 'num_organizations']] check_num_countries() all_organizations.country[all_organizations.projectRcn == 100467].unique() all_organizations.country[all_organizations.projectRcn == 203681].unique() all_organizations.country[all_organizations.projectRcn == 90982].unique()I suspect a problem with handling of `NA`; that is a valid code (Namibia), but maybe in some cases it is being used for Not Available. Convert to GBPeur_gbp = pd.read_pickle('../exchange_rates/output/exchange_rates.pkl.gz') eur_gbp.tail() def find_average_eur_gbp_rate(row): # create timeseries from start to end days = pd.date_range(row.start_date, row.end_date, closed='left') daily = pd.DataFrame({ 'month_start': days, 'weight': 1.0 / days.shape[0] }) monthly = daily.resample('MS', on='month_start').sum() monthly = pd.merge(monthly, eur_gbp, on='month_start', validate='1:1') return (monthly.weight * monthly.rate).sum() clean_projects_with_briefs['eur_gbp'] = \ clean_projects_with_briefs.apply( find_average_eur_gbp_rate, axis=1, result_type='reduce') clean_projects_with_briefs.head()Save Dataclean_projects_with_briefs.to_pickle('output/fp7_projects.pkl.gz') clean_organizations.to_pickle('output/fp7_organizations.pkl.gz')**File name**: main.ipynb**Authors**: <[](mailto:)>, <[](mailto:)>This file is part of REDE project (https://github.com/akarazeev/REDE)import torch import torchvision import torchvision.transforms as transforms %load_ext autoreload %autoreload 2 from utils.dataset import REDE train_dataset = REDE('data/rede', train=True, transform=torchvision.transforms.ToTensor(), download=True, test_size=0.2) train_dataset test_dataset = REDE('data/rede', train=False, transform=torchvision.transforms.ToTensor(), download=True, test_size=0.2, test_indices=train_dataset.test_indices) test_dataset BATCH = 10 train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH) import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Function to show an image. def imshow(img): npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) dataiter = iter(train_loader) images, parameters = dataiter.next() # Show images. imshow(torchvision.utils.make_grid(images)) # Corresponding parameters of simulated models - # (gap, width1, height, radius1, width2). parameters from torch.autograd import Variable import torch.optim as optim import torch.nn as nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.encoder = nn.Sequential( nn.Conv2d(1, 3, 5, stride=2), nn.MaxPool2d(5, 1), nn.Conv2d(3, 5, 3, stride=1), nn.MaxPool2d(3, 1), nn.ReLU(True), nn.Conv2d(5, 2, 3, stride=1), nn.MaxPool2d(2, 2) ) self.head = nn.Linear(396, 5) def forward(self, x): x = self.encoder(x) return self.head(x.view(x.size(0), -1)) criterion = nn.MSELoss()Traindef test_net(net): """Function helps to calculate loss on test dataset.""" test_loss = 0 for data in test_loader: inputs, labels = data labels = labels * 1e7 inputs, labels = Variable(inputs), Variable(labels) outputs = net(inputs) loss = criterion(outputs, labels) test_loss += loss.data[0] return test_lossTraining loop%%time EPOCH = 10 # Total number of epochs to train. LR = 0.0001 # Initial learning rate. net = Net() optimizer = optim.Adam(net.parameters(), LR) print_each = 30 # Print training loss every `print_each` iteration. test_each = 2 # Print testing loss every `test_each` epoch. reduce_lr = 4 # Reduce `LR` by 2 every `reduce_lr` epoch. curtimestep = 0 test_steps = [] test_losses = [] train_steps = [] train_losses = [] for epoch in range(EPOCH): # Loop over the dataset multiple times. running_loss = 0.0 for i, data in enumerate(train_loader, 0): for param_group in optimizer.param_groups: param_group['lr'] = LR # Get the inputs. inputs, parameters = data parameters = parameters * 1e7 # Wrap them in Variable. inputs, parameters = Variable(inputs), Variable(parameters) # Zero the parameter gradients. optimizer.zero_grad() # Forward -> backward -> optimize. outputs = net(inputs) loss = criterion(outputs, parameters) loss.backward() optimizer.step() # Add loss per element. running_loss += loss.data[0] / len(parameters) curtimestep += 1 # Print statistics. if i % print_each == print_each - 1: loss_per_element = running_loss / print_each print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, loss_per_element)) train_losses.append(loss_per_element) train_steps.append(curtimestep) running_loss = 0.0 # Calculate loss on test dataset. if epoch % test_each == test_each - 1: test_loss = test_net(net) test_loss /= len(test_dataset) print('[%d] Test loss: %.3f' % (epoch + 1, test_loss)) print('LR: {}'.format(LR)) test_losses.append(test_loss) test_steps.append(curtimestep) # Reduce LR by 2 every `reduce_lr` epoch. if epoch % reduce_lr == reduce_lr - 1: LR /= 2. print('Finished Training') plt.plot(train_steps, train_losses, label='Train Loss') plt.plot(test_steps, test_losses, label='Test Loss') plt.xlabel('Timesteps') plt.ylabel('Loss per element') plt.legend() plt.show()Test# Get some random testing images. dataiter = iter(test_loader) images, parameters = dataiter.next() # Print images. imshow(torchvision.utils.make_grid(images)) parameters * 1e7 inputs = Variable(images) outputs = net(inputs) outputs*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by ; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* | [Contents](Index.ipynb) | [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) > 1. Introduction Conceived in the late 1980s as a teaching and scripting language, Python has since become an essential tool for many programmers, engineers, researchers, and data scientists across academia and industry.As an astronomer focused on building and promoting the free open tools for data-intensive science, I've found Python to be a near-perfect fit for the types of problems I face day to day, whether it's extracting meaning from large astronomical datasets, scraping and munging data sources from the Web, or automating day-to-day research tasks.The appeal of Python is in its simplicity and beauty, as well as the convenience of the large ecosystem of domain-specific tools that have been built on top of it.For example, most of the Python code in scientific computing and data science is built around a group of mature and useful packages:- [NumPy](http://numpy.org) provides efficient storage and computation for multi-dimensional data arrays.- [SciPy](http://scipy.org) contains a wide array of numerical tools such as numerical integration and interpolation.- [Pandas](http://pandas.pydata.org) provides a DataFrame object along with a powerful set of methods to manipulate, filter, group, and transform data.- [Matplotlib](http://matplotlib.org) provides a useful interface for creation of publication-quality plots and figures.- [Scikit-Learn](http://scikit-learn.org) provides a uniform toolkit for applying common machine learning algorithms to data.- [IPython/Jupyter](http://jupyter.org) provides an enhanced terminal and an interactive notebook environment that is useful for exploratory analysis, as well as creation of interactive, executable documents. For example, the manuscript for this report was composed entirely in Jupyter notebooks.No less important are the numerous other tools and packages which accompany these: if there is a scientific or data analysis task you want to perform, chances are someone has written a package that will do it for you.To tap into the power of this data science ecosystem, however, first requires familiarity with the Python language itself.I often encounter students and colleagues who have (sometimes extensive) backgrounds in computing in some language – MATLAB, IDL, R, Java, C++, etc. – and are looking for a brief but comprehensive tour of the Python language that respects their level of knowledge rather than starting from ground zero.This report seeks to fill that niche.As such, this report in no way aims to be a comprehensive introduction to programming, or a full introduction to the Python language itself; if that is what you are looking for, you might check out one of the recommended references listed in [Resources for Learning](16-Further-Resources.ipynb).Instead, this will provide a whirlwind tour of some of Python's essential syntax and semantics, built-in data types and structures, function definitions, control flow statements, and other aspects of the language.My aim is that readers will walk away with a solid foundation from which to explore the data science stack just outlined. Using Code ExamplesSupplemental material (code examples, exercises, etc.) is available for download at https://github.com/jakevdp/WhirlwindTourOfPython/.This book is here to help you get your job done.In general, if example code is offered with this book, you may use it in your programs and documentation.You do not need to contact us for permission unless you’re reproducing a significant portion of the code.For example, writing a program that uses several chunks of code from this book does not require permission.Selling or distributing a CD-ROM of examples from O’Reilly books does require permission.Answering a question by citing this book and quoting example code does not require permission.Incorporating a significant amount of example code from this book into your product’s documentation does require permission.We appreciate, but do not require, attribution.An attribution usually includes the title, author, publisher, and ISBN.For example: "A Whirlwind Tour of Python by (O’Reilly). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1."If you feel your use of code examples falls outside fair use or the per‐ mission given above, feel free to contact us at . Installation and Practical ConsiderationsInstalling Python and the suite of libraries that enable scientific computing is straightforward whether you use Windows, Linux, or Mac OS X. This section will outline some of the considerations when setting up your computer. Python 2 vs Python 3This report uses the syntax of Python 3, which contains language enhancements that are not compatible with the *2.x* series of Python.Though Python 3.0 was first released in 2008, adoption has been relatively slow, particularly in the scientific and web development communities.This is primarily because it took some time for many of the essential packages and toolkits to be made compatible with the new language internals.Since early 2014, however, stable releases of the most important tools in the data science ecosystem have been fully-compatible with both Python 2 and 3, and so this book will use the newer Python 3 syntax.Even though that is the case, the vast majority of code snippets in this book will also work without modification in Python 2: in cases where a Py2-incompatible syntax is used, I will make every effort to note it explicitly. Installation with condaThough there are various ways to install Python, the one I would suggest – particularly if you wish to eventually use the data science tools mentioned above – is via the cross-platform Anaconda distribution.There are two flavors of the Anaconda distribution:- [Miniconda](http://conda.pydata.org/miniconda.html) gives you Python interpreter itself, along with a command-line tool called ``conda`` which operates as a cross-platform package manager geared toward Python packages, similar in spirit to the ``apt`` or ``yum`` tools that Linux users might be familiar with.- [Anaconda](https://www.continuum.io/downloads) includes both Python and ``conda``, and additionally bundles a suite of other pre-installed packages geared toward scientific computing.Any of the packages included with Anaconda can also be installed manually on top of Miniconda; for this reason I suggest starting with Miniconda.To get started, download and install the Miniconda package – make sure to choose a version with Python 3 – and then install the IPython notebook package:```[~]$ conda install ipython-notebook```For more information on ``conda``, including information about creating and using conda environments, refer to the Miniconda package documentation linked at the above page. The Zen of PythonPython aficionados are often quick to point out how "intuitive", "beautiful", or "fun" Python is.While I tend to agree, I also recognize that beauty, intuition, and fun often go hand in hand with familiarity, and so for those familiar with other languages such florid sentiments can come across as a bit smug.Nevertheless, I hope that if you give Python a chance, you'll see where such impressions might come from.And if you *really* want to dig into the programming philosophy that drives much of the coding practice of Python power-users, a nice little Easter egg exists in the Python interpreter: simply close your eyes, meditate for a few minutes, and ``import this``:import thisThe Zen of Python, by Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those!dimensionality vs avg mapping penalty (source vs target) ~~cache maps~~import map_tools import importlib importlib.reload(IJB) importlib.reload(map_tools)eval mapsdataframes = {} fit_flips = False decay_coef = 0.001 pre_template_map= True is_rotation_map = True class Args: def __init__(self, subset='IJBC', is_bunch=False, restore_embs_left=None, restore_embs_right=None, fit_mapping=False, fit_flips=False, decay_coef=0.0, pre_template_map=False, is_rotation_map=True, is_procrustes=False, is_wahba=False, use_face_scores=False, explained_variance_proportion=1.0, save_result="IJB_result/{model_name}_{subset}.npz"): self.subset = subset self.is_bunch=is_bunch self.restore_embs_left = restore_embs_left self.restore_embs_right = restore_embs_right self.fit_mapping = fit_mapping self.fit_flips = fit_flips self.decay_coef = decay_coef self.pre_template_map = pre_template_map self.is_rotation_map = is_rotation_map self.is_procrustes = is_procrustes self.is_wahba = is_wahba self.explained_variance_proportion = explained_variance_proportion self.save_result = save_result self.save_embeddings = False self.model_file = None self.data_path = './' self.batch_size=64 self.save_label=False self.force_reload=False self.is_one_2_N=False self.use_face_scores=use_face_scores self.plot_only=None self.n_individuals=-1 self.explained_variance_proportion=1.0 self.print_log=False def __str__(self): return str(self.__class__) + ": " + str(self.__dict__) for left_embs, left_dataset, left_architecture, left_head in embs_list: for right_embs, right_dataset, right_architecture, right_head in embs_list: if left_embs == right_embs: fit_mapping=False else: fit_mapping=True try: #for pre_template_map in pre_template_map: save_result_name = '{}_TO_{}_procrustes'.format(left_embs.split('/')[-1].split('.')[0], right_embs.split('/')[-1].split('.')[0]) save_result = '../../../../results/{}.npz'.format(save_result_name) args = Args(subset='IJBC', is_bunch=False, restore_embs_left=left_embs, restore_embs_right=right_embs, fit_mapping=fit_mapping, fit_flips=fit_flips, decay_coef=decay_coef, pre_template_map=pre_template_map, is_rotation_map=is_rotation_map, is_procrustes=True, save_result=save_result) df, fig = IJB.main(args) df['L_DATASET'] = left_dataset df['L_ARCH'] = left_architecture df['L_HEAD'] = left_head df['R_DATASET'] = right_dataset df['R_ARCH'] = right_architecture df['R_HEAD'] = right_head display(df) dataframes[save_result_name] = df print('saving to', args.save_result + '.csv') df.to_csv(args.save_result + '.csv') except Exception: traceback.print_exc()Projeto de Análise de Dados com Pandas Dados do Titanic analisado com Python 3! 1. Introdução Nesse projeto iremos investigar os dados que contém informações sobre os passageiros do Titanic,que mostram se cada indivíduo morreu ou sobreviveu. Faremos uma correspondência entre a classe de cada de cada passageiro e sua sobrevivência para explicitar se houve uma preferência a salvar os passageiros das classes mais altas. Também analisaremos a influência de fatores como idade e tarifa paga (ligada à classe no navio) influência a sobrevivência ou não do indivíduo. Importando as bibliotecas necessárias!%matplotlib inline from datetime import datetime # importing modules for data anaylysis import numpy as np import pandas as pd # para fazer graficos import matplotlib.pyplot as plt # e para fazer graficos mais bonitos! import seaborn as snsVamos começar a análise importando os dados do Titanic!file_ti = 'titanic-data-6.csv' titanics = pd.read_csv(file_ti)Mostrando as primeiras linhas do arquivo para termos uma ideia dos dados!titanics.head(5)Analisando as dimensões dos dados!DimX = titanics.shape[0] DimY = titanics.shape[1] print("Temos "+ str(DimX) + " linhas no DataFrame do Titanic.") print("Temos "+ str(DimY) + " colunas no DataFrame do Titanic.")Temos 891 linhas no DataFrame do Titanic. Temos 12 colunas no DataFrame do Titanic.2. Limpeza de Dados Vamos ver se há alguma duplicata no dataframe!titanics.duplicated().all()logo não há duplicatas no arquivo! vamos ter uma ideia dos atributos e valores distintos de cada coluna do dataframe!titanics.nunique()3. Análise Exploratória de Dados PERGUNTA 1 Aqui serão utilizadas funcionalidades do Python para analisar qual o sexo predominante, masculino ou feminino. Igualmente pode-se investigar qual a idade média das pessoas e como ela está distribuída.titanics["Sex"].describe()Da descrição acima podemos concluir que:Temos 891 entradas, sendo que haviam diversas duplicatas e valores que não eram números só temos dois valores, portanto, "masculino" ou "feminino"o sexo que mais aparece é o masculino, com frequencia 577, logo temos 314 (891-577) do sexo femininotitanics["Age"].describe()Analisando a idade média vemos que ela é de aproximadamente 30 anos. O desvio padrão da idade media é relativamente grande, correspondendo à aproximadamente metade do valor médio da idade, ou seja, há um perfil bem variado de pessoas. O valor mínimo que corresponde a 0.42 deve ser um recém nascido, e o valor máximo, que corresponde a 80 anos, é uma pessoa bem idosa para a época. PERGUNTA 2 Com respeito à tarifa paga, como ela está distribuída?all_fare = titanics['Fare'] all_fare.hist() plt.xticks(rotation=30,size=16) plt.yticks(size=16) plt.grid(linestyle="--",color='r') plt.title('Histograma de tarifa paga',size=28) plt.xlabel("Tarifa",size=24) plt.ylabel("Pessoas",size=24) plt.show()Do gráfico acima vemos que um enorme número de pessoas foram de classe "econômica".Pode-se inferir claramente nesses dados uma distribuição centradas em três valores,mostrando a existencia de três classes.Diminui muito o número de pessoas viajando em primeira e segunda classe! PERGUNTA 3 Vamos analisar abaixo o caso de sobrevivência, ou não, em função de quanto foi pago o que deve ser indicador se a classe social mais elevada foi previlegiada.### são tres classes de passageiros, vamos agora filtrar agora por classe def class_titanics(titanics,Class): """ Input: titanics-> o dataframe a ser analisado Class -> a classe que vai ser analisada Output: retorna o dataframe dado como parametro filtrando os dados onde a coluna 'PClass' é igual a Class. """ return titanics[titanics['Pclass']==Class]['Pclass'].count() p1= class_titanics(titanics,1) p2= class_titanics(titanics,2) p3= class_titanics(titanics,3) print(p1,p2,p3) ### verificando se o total de pessoas nas três classes é igual ao total! print(p1+p2+p3 == titanics.shape[0])216 184 491 TrueVamos agora fazer um histograma de quantas morreram por classeclass_died = titanics[titanics['Survived']==0]['Pclass'] #class_surv= class_surv/class_surv.sum() class_died.hist( weights =100* np.ones_like(class_died.index)/len(class_died.index),figsize=(10,7)) #locations=bins #labels = ['1','2','3',] plt.xticks(rotation=30,size=16) plt.yticks(size=16) plt.grid(linestyle="--",color='r') plt.title('Percentagem de pessoas mortas por Classe',size=28) plt.xlabel("Classe",size=36) plt.ylabel("Percentagem (%)",size=28) plt.show()A partir do histograma acima podemos inferir que muito mais pessoas da terceira classemorreram. Será que as primeiras classes foram previlegiadas em detrimento da terceira classe? Agora fazemos um histograma dos sobreviventes por classe!class_surv = titanics[titanics['Survived']==1]['Pclass'] class_surv.hist( weights =100* np.ones_like(class_surv.index)/len(class_surv.index),figsize=(10,7)) plt.xticks(rotation=30,size=16) plt.yticks(size=16) plt.grid(linestyle="--",color='r') plt.title('Percentagem de pessoas sobreviventes por Classe',size=28) plt.xlabel("Classe",size=36) plt.ylabel("Percentagem (%)",size=28) plt.show()Podemos notar que a terceira classe teve um número semelhante de sobreviventes do que as classes mais altas.No entanto, há muito mais pessoas da terceira classe, o que vem a mostrar que relativamente ela foi prejudicada. Vamos analisar agora os sobreviventes com relação à população inicial de cada classe!Assim as coisas ficarão mais clarasages_group = titanics[titanics['Survived']==1] agesurv_1=ages_group.query('Pclass==1')['Pclass'].count()/p1 agesurv_2=ages_group.query('Pclass==2')['Pclass'].count()/p2 agesurv_3=ages_group.query('Pclass==3')['Pclass'].count()/p3 list_ages = [agesurv_1,agesurv_2,agesurv_3] ages_0_to_80 = pd.Series(list_ages, index = ['1a Classe','2a Classe','3a Classe']) labels=['1st Class','2nd Class','3rd Class'] plt.figure(figsize=(9,7),dpi=100) ages_0_to_80.plot(kind='pie',subplots=True, autopct='%1.1f%%', startangle=90, shadow=False, fontsize=18,labels=labels,legend=True,x='V') plt.title('Titanic Accident Survivors by Class',size=20,color='b',weight='bold') plt.axes().set_ylabel(' ') plt.legend(loc='upper left',fontsize=12) plt.show()/home/vagner/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. warnings.warn(message, mplDeprecation, stacklevel=1)Do gráfico acima podemos concluir que a taxa relativa de sobrevivência da primeira classeé quase três vezes maior que a da terceira classe!!!Ainda, a segunda classe tem uma taxa de sobrevivência aproximadamente duas vezes maiorque a terceira classe SIM! As primeiras classes foram previlegiadas!!!! Agora analisando com respeito aos que não sobreviveramages_group = titanics[titanics['Survived']==0] agenotsurv_1=ages_group.query('Pclass==1')['Pclass'].count()/p1 agenotsurv_2=ages_group.query('Pclass==2')['Pclass'].count()/p2 agenotsurv_3=ages_group.query('Pclass==3')['Pclass'].count()/p3 list_ages = [agenotsurv_1,agenotsurv_2,agenotsurv_3] ages_0_to_80 = pd.Series(list_ages, index = ['1a Classe','2a Classe','3a Classe']) # plot chart labels=['1st Class','2nd Class','3rd Class'] #explode=(0,0,.00,.00,.0,.0) #plt.ylabel='' plt.figure(figsize=(9,7),dpi=100) ages_0_to_80.plot(kind='pie',subplots=True, autopct='%1.1f%%', startangle=135, shadow=False, fontsize=18,labels=labels,legend=True,x='V') #plt.ylabel='' plt.title('Titanic Accident Victims by Class',size=20,color='b',weight='bold') plt.axes().set_ylabel(' ') # plot table plt.legend(loc='upper left',fontsize=12) plt.show()/home/vagner/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. warnings.warn(message, mplDeprecation, stacklevel=1)Do gráfico em forma de torta acima é explicitado que as primeiras classes tem taxa relativade mortalidade muito menor que a terceira classe! PERGUNTA 4 Com relação aos sobreviventes, como a idade está distribuída?ages_group = titanics[titanics['Survived']==1] ages_10=ages_group.query('Age > 0 & Age <10')['Parch'].count() ages_20=ages_group.query('Age > 10 & Age <20')['Parch'].count() ages_30=ages_group.query('Age > 20 & Age <30')['Parch'].count() ages_40=ages_group.query('Age > 30 & Age <40')['Parch'].count() ages_50=ages_group.query('Age > 40 & Age <50')['Parch'].count() ages_60=ages_group.query('Age > 50 ')['Parch'].count() list_ages = [ages_10,ages_20,ages_30,ages_40,ages_50,ages_60] ages_0_to_80 = pd.Series(list_ages, index = ['0-10','10-20','20-30','30-40', '40-50','50-80']) labels=['0-10','10-20','20-30','30-40', '40-50','50-80'] plt.figure(figsize=(9,7),dpi=100) ages_0_to_80.plot(kind='pie',subplots=True, autopct='%1.2f%%', startangle=90, shadow=False, fontsize=18,labels=labels,legend=True,x='V') plt.title('Titanic Accident Survivors by Age',size=20,color='b',weight='bold') plt.axes().set_ylabel(' ') plt.legend(loc='upper left',fontsize=11) plt.show()/home/vagner/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. warnings.warn(message, mplDeprecation, stacklevel=1)Lab 5: House Candidate Positioning Graph Recreations In this lab, we'll be recreating Figure 1 from this paper titled [Candidate Positioning in U.S. Elections](https://www-jstor-org.libproxy.berkeley.edu/stable/2669364?seq=1metadata_info_tab_contents). The table we will be recreating shows the estimated issue positions of all Democrats and Republicans running for House positions in 2000 plotted against the conservatism of their district. We'll see that candidates tend to take positions according to the convervatism of their district with little deviation across party lines. Run the next cell to import the libraries we'll be using to do our analysisimport pandas as pd import json import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import linregressBelow, you'll find a chart containing labels of the columns in the dataset we'll be working with for this lab: | Variable | Meaning ||--------|------------------------|| Gore | % of candidate's district that voted for Gore in '00 || Bush '00 | % of candidate's district that voted for Bush in '00 || location | candidate's state and district number || first_name | candidate's first name || middle_name | candidate's middle name | | last_name | candidate's last name | | votes_with_party_pct | % of times the candidate voted with their party in the previous Congressional session | | votes_against_party_pct | % of times the candidate voted against their party in the previous Congressional session | | party | candidate's party | | Member Position | 0-1 scale for how conservative a candidate's stances are (0=lowest conservative, 1=highest conservative) | Load Data For our analysis, we'll be using district-level data on House members in the 106th Congress and their district behavior from the 2000 presidential election. We'll begin by loading our file housedata.csv into a pandas dataframe named df.filename = "housedata.csv" df = pd.read_csv(filename) dfCleaning Data Before we can begin manipulating our data to recreate our table, we must first clean the data. The following cells will walk you through dropping unnecessary columns and removing null values that could disrupt our analysis. Drop Columns Since we are mainly interested in the voting patterns of the members and their districts, there are a few columns currently included in df that we can get rid of. First, we'll start with an example. Then, you'll get to write your own code to drop certain columns. Run the following cell to drop the "geoid" column:#Example df = df.drop(['State'], axis=1) dfNow it's your turn! In the following cell, write some code that drops the following columns: suffix, gender, geoid, district#Use this cell to drop the specified columns #... df = df.drop(['suffix', 'gender', 'geoid', 'district'], axis=1) dfGreat job! You have successfully dropped all unneeded columns. Removing Null Values Taking a look at the dataset, we'll see that some rows contain "NaN" in the last_name column. For the purpose of our analysis, we want to exclude these rows because they can disrupt what we are able to do with the data. The following cell provides an example for how you can drop rows containing "NaN" in the first_name column.#Example df.dropna(subset=['first_name'])Now it's your turn! Write some code that will drop rows containing "NaN" in the last_name column.#Use this cell to drop rows in the last_name column containing "NaN" #df = ... #df df = df.dropna(subset=['last_name']) dfGraphing the Data This section will walk you through how to create a scatterplot and fit linear regressions to our data.#Graphing the scatterplot sns.lmplot(x="Bush '00", y='Member Position', hue="party", data=df,markers=["o", "x"], palette="Set1") #Adjusting scatterplot labels sns.set(style='ticks') plt.xlabel("District Conservatism") plt.ylabel("Member's Position") plt.title("Member's Position in 2000 by District Conservatism") #Adding regression line analysis democrats = df[df.party == 'D'] republicans = df[df.party == 'R'] d = linregress(democrats["Bush '00"], democrats["Member Position"]) r = linregress(republicans["Bush '00"], republicans["Member Position"]) print("Democratic slope: " + str(d.slope)) print("Republican slope: " + str(r.slope))Democratic slope: 0.15687461066120845 Republican slope: 0.13827041986315086Example of recipe computation, model fit, predict* and conversion to raster using [distributed](http://distributed.dask.org/en/latest/), API of antares3 and [kale](https://github.com/kubeflow-kale/kale) functionality*Prediction is pixel wise. **Will use an already ingested and processed Landsat8 data via antares3** Some importsimport sys import os import json from datetime import datetime import matplotlib from matplotlib.patches import Patch from matplotlib import pyplot as plt import numpy as np import xarray as xr from shapely.geometry import Point import rasterio import dill import geopandas as gpd import fiona from affine import Affine from dask.distributed import Client from rasterio.features import rasterize import datacube from datacube.api import GridWorkflow from datacube.storage import masking from datacube.drivers.netcdf import write_dataset_to_netcdf from madmex.util.db import get_cmap_from_scheme from madmex.models import Tag from madmex.overlay.extractions import zonal_stats_xarray from madmex.io.vector_db import VectorDb from madmex.wrappers import gwf_query from madmex.modeling.supervised.xgb import Model from madmex.models import Tag from madmex.overlay.extractions import zonal_stats_xarray from madmex.util import randomword, mid_date, join_dicts from madmex.util.xarray import to_float, to_int from django.contrib.gis.geos.geometry import GEOSGeometry from madmex.models import PredictObjectRecipe computationdef recipe_computation(tile): crs = tile[1][0].geobox.crs ds = xr.combine_by_coords([GridWorkflow.load(x, dask_chunks={'x': 1200, 'y': 1200}) for x in tile[1]], data_vars='minimal', coords='minimal') ds.attrs['geobox'] = tile[1][0].geobox # Mask clouds, shadow, water, ice,... and drop qa layer clear = masking.make_mask(ds.pixel_qa, cloud=False, cloud_shadow=False, snow=False) ds_1 = ds.where(clear) ds_1 = ds_1.drop('pixel_qa') ds_1 = ds_1.apply(func=to_float, keep_attrs=True) # Compute vegetation indices ds_1['ndvi'] = ((ds_1.nir - ds_1.red) / (ds_1.nir + ds_1.red)) * 10000 ds_1['ndvi'].attrs['nodata'] = -9999 ds_1['ndmi'] = ((ds_1.nir - ds_1.swir1) / (ds_1.nir + ds_1.swir1)) * 10000 ds_1['ndmi'].attrs['nodata'] = -9999 # Run temporal reductions and rename DataArrays ds_mean = ds_1.mean('time', keep_attrs=True, skipna=True) ds_mean = ds_mean.rename({'blue': 'blue_mean', 'green': 'green_mean', 'red': 'red_mean', 'nir': 'nir_mean', 'swir1': 'swir1_mean', 'swir2': 'swir2_mean', 'ndmi': 'ndmi_mean', 'ndvi': 'ndvi_mean'}) # Compute min/max/std only for vegetation indices ndvi_max = ds_1.ndvi.max('time', keep_attrs=True, skipna=True) ndvi_max = ndvi_max.rename('ndvi_max') ndvi_max.attrs['nodata'] = -9999 ndvi_min = ds_1.ndvi.min('time', keep_attrs=True, skipna=True) ndvi_min = ndvi_min.rename('ndvi_min') ndvi_min.attrs['nodata'] = -9999 # ndmi ndmi_max = ds_1.ndmi.max('time', keep_attrs=True, skipna=True) ndmi_max = ndmi_max.rename('ndmi_max') ndmi_max.attrs['nodata'] = -9999 ndmi_min = ds_1.ndmi.min('time', keep_attrs=True, skipna=True) ndmi_min = ndmi_min.rename('ndmi_min') ndmi_min.attrs['nodata'] = -9999 # Load terrain metrics using same spatial parameters than sr dc = datacube.Datacube(app = 'landsat_madmex_003_%s' % randomword(5)) terrain = dc.load(product='srtm_cgiar_mexico', like=ds, time=(datetime(1970, 1, 1), datetime(2018, 1, 1)), dask_chunks={'x': 1200, 'y': 1200}) dc.close() # Merge dataarrays combined = xr.merge([ds_mean.apply(to_int), to_int(ndvi_max), to_int(ndvi_min), to_int(ndmi_max), to_int(ndmi_min), terrain]) combined.attrs['crs'] = crs combined.attrs['affine'] = Affine(*list(ds.affine)[0:6]) #write_dataset_to_netcdf(combined.compute(scheduler='threads'), nc_filename) return (tile[0], combined)Following [landsat_madmex_003.py](https://github.com/CONABIO/antares3/blob/develop/madmex/recipes/landsat_madmex_003.py)Also could be helpful:[1c_clusterization_for_agriculture_inecol](https://github.com/CONABIO/antares3-sandbox/blob/master/notebooks/agriculture_madmex_app/1c_clusterization_for_agriculture_inecol.ipynb)[1d_clusterization_for_agriculture_inecol](https://github.com/CONABIO/antares3-sandbox/blob/master/notebooks/agriculture_madmex_app/1d_clusterization_for_agriculture_inecol.ipynb)[2_clusterization_for_agriculture_inecol_intersect_with_area_of_interest.](https://github.com/CONABIO/antares3-sandbox/blob/master/notebooks/agriculture_madmex_app/2_clusterization_for_agriculture_inecol_intersect_with_area_of_interest.ipynb)os.environ.setdefault("DJANGO_ALLOW_ASYNC_UNSAFE", "true") region = 'Chiapas' products = ['ls8_espa_mexico'] begin = '2017-01-01' end = '2017-12-31' gwf_kwargs = {'region': region, 'begin': begin, 'end':end} #query dict_list = [] for prod in products: gwf_kwargs.update(product = prod) try: dict_list.append(gwf_query(**gwf_kwargs, view=False)) # Exception is in case one of the product hasn't been registered in the datacube except Exception as e: pass iterable = join_dicts(*dict_list, join='full').items() list_iter = list(iterable) list_iter_sorted = sorted(list_iter, key = lambda x: (x[0][0], x[0][1])) list_iter_sorted = list_iter_sorted[0:3] print(list_iter_sorted) os.environ.setdefault("DJANGO_ALLOW_ASYNC_UNSAFE", "true") futures_recipe = Client(n_workers=2,memory_limit='15GB', threads_per_worker=1).map(recipe_computation, list_iter_sorted, pure=False) results_recipe = [future.result() for future in futures_recipe] def model_fit(tup, training_data): tile, combined = tup loader = VectorDb() fc_train_0 = loader.load_training_from_dataset(dataset=combined, training_set=training_data, sample=1) fc_train_0 = list(fc_train_0) #Assign code level to this training data according to next scheme... scheme = "madmex" qs = Tag.objects.filter(scheme=scheme) tag_mapping = {x.id:x.numeric_code for x in qs} tag_id_list = [x['properties']['class'] for x in fc_train_0] fc_train = [{'geometry': x[0]['geometry'], 'properties': {'code': tag_mapping[x[1]]}, 'type': 'feature'} for x in zip(fc_train_0, tag_id_list)] X_train, y_train = zonal_stats_xarray(combined, fc_train, 'code') xgb_model = Model() xgb_model.fit(X_train, y_train) #filename_model = 'model_landsat8_chiapas_2017_madmex_31_clases_via_kale' + '_%d_%d' %(tile[0],tile[1]) + '.pkl' #filepath_model = os.path.join(path_result, filename_model) #with open(filepath_model, 'wb') as dst: # dill.dump(xgb_model, dst) return (tile, xgb_model, combined)Model Fitos.environ.setdefault("DJANGO_ALLOW_ASYNC_UNSAFE", "true") training_data = "train_chiapas_dummy" #path_result = "/shared_volume/land_cover_results_parallel" #if not os.path.exists(path_result): # os.makedirs(path_result) futures_model_fit = Client(n_workers=2,memory_limit='15GB', threads_per_worker=1).map(model_fit, results_recipe, **{'training_data': training_data}) results_model_fit = [future.result() for future in futures_model_fit]Predict and write raster to FSdef predict_and_write_raster(tup, path_result): tile, xgb_model, combined = tup arr_3d = combined.to_array().squeeze().values #squeeze to remove time dimension #because has length 1 arr_3d = np.moveaxis(arr_3d, 0, 2) shape_2d = (arr_3d.shape[0] * arr_3d.shape[1], arr_3d.shape[2]) arr_2d = arr_3d.reshape(shape_2d) predicted_array = xgb_model.predict(arr_2d) #write to FS predicted_array = predicted_array.reshape((arr_3d.shape[0], arr_3d.shape[1])) predicted_array = predicted_array.astype('uint8') rasterio_meta = {'width': predicted_array.shape[1], 'height': predicted_array.shape[0], 'transform': combined.affine, 'crs': combined.crs.crs_str, 'count': 1, 'dtype': 'uint8', 'compress': 'lzw', 'driver': 'GTiff', 'nodata': 0} filename_raster = 'raster_landsat8_chiapas_madmex_31_clases_pixel_wise_via_kale' + '_%d_%d' %(tile[0],tile[1]) + '.tif' filename_raster = os.path.join(path_result, filename_raster) with rasterio.open(filename_raster, 'w', **rasterio_meta) as dst: dst.write(predicted_array, indexes = 1) return filename_raster os.environ.setdefault("DJANGO_ALLOW_ASYNC_UNSAFE", "true") path_result = "/shared_volume/land_cover_results_parallel" if not os.path.exists(path_result): os.makedirs(path_result) futures_predict_and_write_raster = Client(n_workers=2,memory_limit='15GB', threads_per_worker=1).map(predict_and_write_raster, results_model_fit, **{'path_result': path_result}) results_predict_and_write_raster = [future.result() for future in futures_predict_and_write_raster] print(results_predict_and_write_raster)循环- 循环是一种控制语句块重复执行的结构- while 适用于广度遍历- for 开发中经常使用import os while 1: os.system(" say 你真是个小天才!!!")while 循环- 当一个条件保持真的时候while循环重复执行语句- while 循环一定要有结束条件,否则很容易进入死循环- while 循环的语法是: while loop-contunuation-conndition: Statement 示例:sum = 0i = 1while i <10: sum = sum + i i = i + 1sum_ = 0 i = 1 while i<10: sum_ = sum_ + i i = i +1 print(sum_)1 3 6 10 15 21 28 36 45错误示例:sum = 0i = 1while i <10: sum = sum + ii = i + 1- 一旦进入死循环可按 Ctrl + c 停止sum_ = 0 i = 1 while i <10: sum_ = sum_ + i i = i + 1EP:![](../Photo/143.png)![](../Photo/144.png) 验证码- 随机产生四个字母的验证码,如果正确,输出验证码正确。如果错误,产生新的验证码,用户重新输入。- 验证码只能输入三次,如果三次都错,返回“别爬了,我们小网站没什么好爬的”- 密码登录,如果三次错误,账号被锁定import random a = random.randint(0,9) b = random.randint(0,9) c = random.randint(0,9) d = random.randint(0,9) m = str(a)+str(b)+str(c)+str(d) print(m) mun = (input("请输入验证码:")) if mun==m: print("欢迎您!") elif mun!=m: print("输入错误,请重新输入!") mun = (input("请输入验证码:")) if mun==m: print("欢迎您!") elif mun!=m: print("输入错误,请重新输入!") if mun==m: print("欢迎您!") else: print("别爬了!") import random i = 0 while i <3 : yanzhengma = str(random.randint(1000,9999)) print('验证码是:',yanzhengma) input_ = input('>>') if yanzhengma == input_: print('验证码正确') break else: print('请再输入') i +=1 # if i ==3: # print('验证码次数超限') else: print('验证码次数超限') import random i = 0 while i <3 : yanzhengma1 = chr(random.randint(97,122)) yanzhengma2 = chr(random.randint(97,122)) yanzhengma3 = chr(random.randint(97,122)) yanzhengma4 = chr(random.randint(97,122)) yanzhengma = yanzhengma1 + yanzhengma2+ yanzhengma3 + yanzhengma4 print('验证码是:',yanzhengma) input_ = input('>>') if yanzhengma == input_: print('验证码正确') break else: print('请再输入') i +=1 # if i ==3: # print('验证码次数超限') else: print('验证码次数超限') import random i = 0 while i <3 : list_ = ['1','2','3','4','5','6','7','8','9','0','q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','n','m'] random.shuffle(list_) yanzhengma = ''.join(list_[0:4]) print('验证码是:',yanzhengma) input_ = input('>>') if yanzhengma == input_: print('验证码正确') break else: print('请再输入') i +=1 # if i ==3: # print('验证码次数超限') else: print('验证码次数超限')验证码是: y0cp >>y0cp 验证码正确尝试死循环 实例研究:猜数字- 你将要编写一个能够随机生成一个0到10之间的且包括两者的数字程序,这个程序- 提示用户连续地输入数字直到正确,且提示用户输入的数字是过高还是过低 使用哨兵值来控制循环- 哨兵值来表明输入的结束- ![](../Photo/54.png) 警告![](../Photo/55.png) for 循环- Python的for 循环通过一个序列中的每个值来进行迭代- range(a,b,k), a,b,k 必须为整数- a: start- b: end- k: step- 注意for 是循环一切可迭代对象,而不是只能使用range 在Python里面一切皆对象 EP:- ![](../Photo/145.png)i = 1 sum_ = 0 while sum_< 10000: sum_ +=i i+=1 print(sum_) mun =0 sm = 0 i=0 while i <5: mun = eval(input("Enter :")) sm += mun i+=1 print("mun is"+str(sm)) print("i is "+str(i)) sum_ =0 for i in range(1001): sum_ +=i print(sum_) sum_ =0 i=0 while i<1001: sum_+=i i+=1 print(sum_)500500嵌套循环- 一个循环可以嵌套另一个循环- 每次循环外层时,内层循环都会被刷新重新完成循环- 也就是说,大循环执行一次,小循环会全部执行一次- 注意:> - 多层循环非常耗时 - 最多使用3层循环i= 1 j = 1 for i in range(10): for j in range(10): print(i,j)0 0 0 1 0 2 0 3 0 4 0 5 0 6 0 7 0 8 0 9 1 0 1 1 1 2 1 3 1 4 1 5 1 6 1 7 1 8 1 9 2 0 2 1 2 2 2 3 2 4 2 5 2 6 2 7 2 8 2 9 3 0 3 1 3 2 3 3 3 4 3 5 3 6 3 7 3 8 3 9 4 0 4 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 5 0 5 1 5 2 5 3 5 4 5 5 5 6 5 7 5 8 5 9 6 0 6 1 6 2 6 3 6 4 6 5 6 6 6 7 6 8 6 9 7 0 7 1 7 2 7 3 7 4 7 5 7 6 7 7 7 8 7 9 8 0 8 1 8 2 8 3 8 4 8 5 8 6 8 7 8 8 8 9 9 0 9 1 9 2 9 3 9 4 9 5 9 6 9 7 9 8 9 9EP:- 使用多层循环完成9X9乘法表- 显示50以内所有的素数for i in range(1,10): for j in range(1,i+1): print('{}x{}={}\t'.format(i,j,i*j),end="") print() for i in range(2,51): for j in range(2,i): if i%j == 0: break else: print(i)2 3 5 7 11 13 17 19 23 29 31 37 41 43 47关键字 break 和 continue- break 跳出循环,终止循环- continue 跳出此次循环,继续执行 注意![](../Photo/56.png)![](../Photo/57.png) Homework- 1 ![](../Photo/58.png)import math mun =int (input("Enter an integer ,the input ends if it is 0:")) if mun==0: i=0 #判断输入的第一个数是0、负数、还是正数 j=0 sum_1 =sum_2=0 elif mun>0: i=1 #如果输入的第一数是正数,则i=1、j=0 j=0 sum_1=mun sum_2=0 else: j=1 # 如果输入的第一个数是负数,则i=0、j=1 i=0 sum_1=0 sum_2=mun while mun!=0: mun = int(input("Enter an integer ,the input ends if it is 0:")) if mun>0: i+=1 sum_1+=mun elif mun<0: j+=1 sum_2+=mun else: m=(sum_1+sum_2)/(i+j) print("它们的和:"+str(sum_1+sum_2) ) print("正数"+str(i)+"个") print("负数"+str(j)+"个") print("平均值:"+str(m))Enter an integer ,the input ends if it is 0:1 Enter an integer ,the input ends if it is 0:2 Enter an integer ,the input ends if it is 0:3 Enter an integer ,the input ends if it is 0:-2 Enter an integer ,the input ends if it is 0:0 它们的和:4 正数3个 负数1个 平均值:1.0- 2![](../Photo/59.png)sum_=10000 j=0 i=0 sum_2=0 sum_1=0 while i<10: sum_=(sum_*0.05)+sum_ i+=1 while j<4: sum_1=sum_ sum_1=(sum_1*0.05)+sum_1 sum_2 +=sum_1 j+=1 print("十年以后的学费:",sum_) print("十年后大学四年的学费:",sum_2)十年以后的学费: 16288.946267774414 十年后大学四年的学费: 68413.57432465254- 3![](../Photo/58.png)import math mun =int (input("Enter an integer ,the input ends if it is 0:")) if mun==0: i=0 #判断输入的第一个数是0、负数、还是正数 j=0 sum_1 =sum_2=0 elif mun>0: i=1 #如果输入的第一数是正数,则i=1、j=0 j=0 sum_1=mun sum_2=0 else: j=1 # 如果输入的第一个数是负数,则i=0、j=1 i=0 sum_1=0 sum_2=mun while mun!=0: mun = int(input("Enter an integer ,the input ends if it is 0:")) if mun>0: i+=1 sum_1+=mun elif mun<0: j+=1 sum_2+=mun else: m=(sum_1+sum_2)/(i+j) print("它们的和:"+str(sum_1+sum_2) ) print("正数"+str(i)+"个") print("负数"+str(j)+"个") print("平均值:"+str(m))Enter an integer ,the input ends if it is 0:1 Enter an integer ,the input ends if it is 0:2 Enter an integer ,the input ends if it is 0:3 Enter an integer ,the input ends if it is 0:-2 Enter an integer ,the input ends if it is 0:-3 Enter an integer ,the input ends if it is 0:0 它们的和:1 正数3个 负数2个 平均值:0.2- 4![](../Photo/60.png)m=0 for i in range(100,1000): if i%5==0 and i%6==0: m+=1 print(i,end=" ",) if m%10==0: print("\t") else: i+=1120 150 180 210 240 270 300 330 360 390 420 450 480 510 540 570 600 630 660 690 720 750 780 810 840 870 900 930 960 990- 5![](../Photo/61.png)n=0 while n**2<12000: n+=1 print(n) y =0 while y**3<12000: y+=1 print(y-1)110 22- 6![](../Photo/62.png) - 7![](../Photo/63.png)i=0 sum_=0 while i<50001: sum_+=1/(i+1) i+=1 print(sum_) n=50000 sum_=0 while n>0: sum_+=1/n n -=1 print(sum_)11.397003949278519- 8![](../Photo/64.png)a=1 b=3 sum_=0 while a<98: sum_+=(a/b) a+=2 b+=2 print(sum_)45.124450303050196- 9![](../Photo/65.png) - 10 ![](../Photo/66.png) - 11![](../Photo/67.png)for a in range(1,7): for b in range(1,7): print(a,b)1 1 1 2 1 3 1 4 1 5 1 6 2 1 2 2 2 3 2 4 2 5 2 6 3 1 3 2 3 3 3 4 3 5 3 6 4 1 4 2 4 3 4 4 4 5 4 6 5 1 5 2 5 3 5 4 5 5 5 6 6 1 6 2 6 3 6 4 6 5 6 6Link Prediction with NetworKit Link prediction is concerned with estimating the probability of the existence of edges between nodes in a graph. The `linkprediction` module in NetworKit provides sampling algorithms as well link prediction algorithms.This notebook introduces a several link prediction algorithms available in NetworKit. It shows how to calculate link prediction measures, and how to use the sampling algorithms in combination with link prediction algorithms.import networkit as nkLink prediction algorithms Adamic/Adar Index The Adamic/Adar Index predicts links in a graph according to the amount of shared links between two nodes. The index sums up the reciprocals of the logarithm of the degree of all common neighbors between two nodes `u` and `v`.The constructor, [AdamicAdarIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=adamicnetworkit.linkprediction.AdamicAdarIndex) expects a graph as input. The `run(u, v)` method takes a pair of nodes `(u, v)` as input and returns the Adamic/Adar Index of the given pair of nodes.# Read graph G = nk.graphio.readGraph("../input/karate.graph", nk.Format.METIS) # Initialize algorithm aai = nk.linkprediction.AdamicAdarIndex(G) # Get Adamic/Adar Index of two nodes aai.run(14, 32)Algebraic Distance Index The Algebraic Distance Index assigns a distance value to pairs of nodes according to their structural closeness in the graph.The constructor [AlgebraicDistanceIndex(G, numberSystems, numberIterations, omega=0.5, norm= 2)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=algebnetworkit.linkprediction.AlgebraicDistanceIndex) expects as inputs a graph, the number of systems to use for algebraic iteration, and the number of iterations in each system. `omega` is the over-relaxation parameter and `norm` is the norm factor of the extended algebraic distance. Maximum norm is realized by setting `norm` to 0.After initialization, call the `preprocess()` method. Afterwards, call the `run` method: it takes a pair of nodes `(u, v)` as input and returns the Algebraic Distance Index of the given pair of nodes.# Initialize the algorithm adi = nk.linkprediction.AlgebraicDistanceIndex(G, 30, 200) adi.preprocess() # Get the algebraic distance index of two nodes adi.run(1, 32)Common Neighbors Index The Common Neighbors Index calculates the number of common neighbors of a pair of nodes in a graph. The constructor [CommonNeighborsIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=commonnetworkit.linkprediction.CommonNeighborsIndex), expects a graph as input. The `run(u, v)` method takes as input a pair of nodes `(u, v)` and returns the number of common neighbors between `u` and `v`.# Initialize the algorithm cni = nk.linkprediction.CommonNeighborsIndex(G) # Calculate common neighbors between two nodes cni.run(14, 15)Neighbors Measure Index The Neighbors Measure Index returns the number of connections between neighbors of the given nodes `u` and `v`.The constructor [NeighborsMeasureIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=neighborsmenetworkit.linkprediction.NeighborsMeasureIndex) expects a graph as input. The `run(u, v)` takes a pair of nodes `(u, v)` as input and returns the neighbors measure index between `u` and `v`.# Initialize the algorithm nmi = nk.linkprediction.NeighborsMeasureIndex(G) # Calculate the neighbors measure index between two nodes nmi.run(14, 32)Preferential Attachment Index The Preferential Attachment Index suggests that the more connected a node is, the more likely it is to receive new links.The constructor [PreferentialAttachmentIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=preferentialnetworkit.linkprediction.PreferentialAttachmentIndex) expects a graph as input. The `run(u, v)` method takes a pair of nodes `(u, v)` as input and returns the product of the cardinalities of the neighborhoods of nodes `u` and `v`.# Initialize the algorithm pai = nk.linkprediction.PreferentialAttachmentIndex(G) # Calculate the preferential attachment index between two nodes pai.run(14, 32)Resource Allocation Index The constructor [ResourceAllocationIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=resourcenetworkit.linkprediction.ResourceAllocationIndex) expects a graph as input. The `run(u, v)` method takes a pair of nodes `(u, v)` as input and returns the Resource Allocation Index between `u` and `v`.# Initialize the algorithm rai = nk.linkprediction.ResourceAllocationIndex(G) # Calculate the resource allocation index between two nodes rai.run(14, 32)Same Community Index The Same Community Index determines whether two nodes `u` and `v` are in the same community.The constructor [SameCommunityIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=samecommunitynetworkit.linkprediction.SameCommunityIndex) expects a graph as input. The `run(u, v)` method takes a pair of nodes `(u, v)` as input and returns `1` if `u` and `v` are in the same community, `0` otherwise.# Initialize the algorithm sni = nk.linkprediction.SameCommunityIndex(G) # Compute the Same Community Index between two pairs of nodes print(sni.run(14, 32)) print(sni.run(0, 32))Total Neighbors Index The Total Neighbors Index returns the number of nodes in the neighborhood-union of nodes `u` and `v`.The constructor [TotalNeighborsIndex(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=totalneighbnetworkit.linkprediction.TotalNeighborsIndex) expects a graph as input. The `run(u, v)` method takes a pair of nodes `(u, v)` as input and returns the Total Neighbors Index between `u` and `v`.# Initialize the algorithm tni = nk.linkprediction.TotalNeighborsIndex(G) # Calculate the Total Neighbors Index between two nodes tni.run(14, 32)Link sampling and link prediction This section shows how to use the training algorithms in combination with link prediction algorithms. In this example, we use the Random Link Sampler, the Missing Links Finder and the Katz Index algorithms. The Katz index assigns a similarity score to a pair of nodes. This score is based on the weighted sum of the number of paths with length $l$, where $l$ is smaller than a given limit.[KatzIndex(G=None, maxPathLength=5, dampingValue=0.005)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=katzindexnetworkit.linkprediction.KatzIndex) takes as inputs a graph (optional), the maximum length of paths to consider, and the damping value. [RandomLinkSampler(G, numLinks)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=randomlinksamplernetworkit.linkprediction.RandomLinkSampler) provides methods to randomly sample a number of edges from a given graph. `numLinks` is the number of edges the returned graph should have.[MissingLinksFinder(G)](https://networkit.github.io/dev-docs/python_api/linkprediction.html?highlight=missingnetworkit.linkprediction.MissingLinksFinder) finds the missing edges in the given graph. The `findAtDistance(k)` function returns all missing links in the graph. The absent links to find are narrowed down by providing a distance that the nodes of the missing links should have. For example in case of distance 2 only node-pairs that would close a triangle in the given graph get returned.# Read graph G = nk.graphio.readGraph("../input/jazz.graph", nk.Format.METIS) # Sample graph trainingGraph = nk.linkprediction.RandomLinkSampler.byPercentage(G, 0.7) # Find missing links missingLinks = nk.linkprediction.MissingLinksFinder(trainingGraph).findAtDistance(5) # Run link prediticion predictions = nk.linkprediction.KatzIndex(G).runOn(missingLinks) # Print the first 5 predictions for p in predictions[:5]: print(p)Fig 1df = pd.read_excel("raw/Book2.xlsx").dropna(axis=1, how="all") dfs = [] for c in df.columns: dx = df[[c]] dx["x"] = dx[c].str.split(",").str[0] dx["y"] = dx[c].str.split(",").str[1] dx["year"] = c dx = dx.drop(c, axis=1) dx["x"] = dx["x"].astype(float).round(0) dfs.append(dx) df = pd.concat(dfs).dropna() df = ( df.set_index(["x", "year"]).unstack()["y"].reset_index().astype(float).interpolate() ) df["bin"] = 3.5 + df["bin"] df["x"] = ( ["< 6"] + [str(i * 3) + "-" + str((i + 1) * 3) for i in range(2, 14)] + ["42 <"] ) df['z']=0 f = "fig1_productivity" f1 = eco_git_path + f + ".csv" df.to_csv("data/" + f + ".csv") f += local_suffix open("visualisation/" + f + ".html", "w").write( vega_embed.replace( "JSON_PATH", f1.replace("/data/", "/visualisation/").replace(".csv", ".json") ) ) if LOCAL: f1 = df readme = "### " + f + '\n!["' + f + '"](visualisation/' + f + '.png "' + f + '")\n\n' df.head() base = alt.Chart(f1).encode( x=alt.X( "x:N", sort=[], axis=alt.Axis( grid=False, titleAlign="right", titleAnchor="end", titleY=-15, title="Temperature bin (°C)", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], tickCount=7, orient="bottom", labelAngle=0, ), # scale=alt.Scale(domain=[1, 14], nice=False), ) ) line = base.mark_line(color=colors["eco-turquiose"]).encode( y=alt.Y( "a:Q", sort=[], axis=alt.Axis( grid=True, title="", titleAnchor="start", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], gridColor=colors["eco-gray"], gridOpacity=0.1, titleFontSize=10, titleFontWeight="normal", ticks=False, labelAlign="left", labelBaseline="middle", labelPadding=-5, labelOffset=-10, titleX=22, titleY=7, titleBaseline="bottom", titleAngle=0, titleAlign="left", tickCount=7, format=".1f", ), scale=alt.Scale(domain=[-3.0, 1]), ) ) zeroLine=base.mark_line(color=colors['eco-gray'],opacity=0.5).encode(y='z:Q') area = line.mark_area(opacity=0.2, color=colors["eco-turquiose"]).encode( y="bottom:Q", y2="top:Q" ) bars = base.mark_bar(width=20, opacity=0.5,color=colors["eco-light-blue"]).encode( y=alt.Y("bin:Q", axis=None), x=alt.X( "x:N", sort=[], axis=alt.Axis( grid=False, titleAlign="right", titleAnchor="end", titleY=-15, title="", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], tickCount=7, orient="bottom", labelAngle=0, ), # scale=alt.Scale(domain=[1, 14], nice=False), ), ) title = alt.TitleParams( "Change in productivity at different temperatures", subtitle=["Change in output per ha."], anchor="start", align="left", dx=5, dy=-5, fontSize=12, subtitleFontSize=11, subtitleFontStyle="italic", ) layer1 = ( ( (zeroLine+area + line).properties(height=300, width=400) & bars.properties(height=50, width=400) ) .configure_view(stroke=None) .properties(title=title) ) layer1.save("visualisation/" + f + ".json") layer1.save("visualisation/" + f + ".png",scale_factor=2.0) layer1.save("visualisation/" + f + ".svg") open("README.md", "w").write(readme) layer1 theme = "_dark" layer1 = ( ( (zeroLine+area + line).properties(height=300, width=400) & bars.properties(height=50, width=400) ) .configure_view(stroke=None) .properties(title=title) ) layer1 = layer1.configure_axisYQuantitative(labelFontSize=12) layer1 = layer1.configure_axisXQuantitative(labelFontSize=12) layer1.config.font="Georgia" layer1.config.background=colors["eco-background"] layer1.config.view.stroke=None layer1.title.fontSize = 14 layer1.title.subtitleFontSize = 12 layer1.title.dy -= 2 layer1.title.color = colors["eco-dot"] layer1.title.subtitleColor = colors["eco-dot"] layer1.save("visualisation/" + f + theme + ".json") layer1.save("visualisation/" + f + theme + ".png", scale_factor=2.0) layer1.save("visualisation/" + f + theme + ".svg") readme = re.sub(f, f + theme, readme) open("README.md", "a").write(readme) layer1Fig 2# df = pd.read_excel("raw/Book1.xlsx").dropna(axis=1, how="all") # dfs = [] # for c in df.columns: # dx = df[[c]] # dx["x"] = dx[c].str.split(",").str[0] # dx["y"] = dx[c].str.split(",").str[1] # dx["year"] = c # dx = dx.drop(c, axis=1) # dx["x"] = dx["x"].astype(float).round(0) # dfs.append(dx) # df = pd.concat(dfs,axis=1).dropna() # df.to_excel('raw/fig2.xlsx') df=pd.read_excel('raw/fig2.xlsx') df['z']=0 df=df[::2] f = "fig2_yield" f2 = eco_git_path + f + ".csv" df.to_csv("data/" + f + ".csv") f += local_suffix open("visualisation/" + f + ".html", "w").write( vega_embed.replace( "JSON_PATH", f2.replace("/data/", "/visualisation/").replace(".csv", ".json") ) ) if LOCAL: f2 = df readme = "### " + f + '\n!["' + f + '"](visualisation/' + f + '.png "' + f + '")\n\n' df.head() base = alt.Chart(f2).encode( x=alt.X( "x:N", sort=[], axis=alt.Axis( grid=False, titleAlign="right", titleAnchor="end", titleY=-15, title="Temperature (°C)", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], tickCount=7, orient="bottom", labelAngle=0, ), # scale=alt.Scale(domain=[1, 14], nice=False), ) ) line = base.mark_line(color=colors["eco-turquiose"]).encode( y=alt.Y( "a:Q", sort=[], axis=alt.Axis( grid=True, title="log-yield (bu per hectare)", titleAnchor="start", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], gridColor=colors["eco-gray"], gridOpacity=0.1, titleFontSize=10, titleFontWeight="normal", ticks=False, labelAlign="left", labelBaseline="middle", labelPadding=-5, labelOffset=-10, titleX=30, titleY=35, titleBaseline="bottom", titleAngle=0, titleAlign="left", tickCount=4, format=".2f", ), # scale=alt.Scale(domain=[-3.0, 1]), ) ) zeroLine=base.mark_line(color=colors['eco-gray'],opacity=0.5).encode(y='z:Q') area = line.mark_area(opacity=0.2, color=colors["eco-turquiose"]).encode( y="bottom:Q", y2="top:Q" ) title = alt.TitleParams( "Effects of temperature on wheat yields", subtitle=["Data from South Africa. Source: Shew et al. (2020)"], anchor="start", align="left", dx=5, dy=-5, fontSize=12, subtitleFontSize=11, subtitleFontStyle="italic", ) layer1 = ( ( (zeroLine+area + line).properties(height=300, width=400) ) .configure_view(stroke=None) .properties(title=title) ) layer1.save("visualisation/" + f + ".json") layer1.save("visualisation/" + f + ".png", scale_factor=2.0) layer1.save("visualisation/" + f + ".svg") open("README.md", "a").write(readme) layer1 line.encoding.y.axis.titleFontSize = 12 line.encoding.y.axis.titleY += 5 line.encoding.y.axis.titleX += 3 theme = "_dark" layer1 = ( ( (zeroLine+area + line).properties(height=300, width=400) ) .configure_view(stroke=None) .properties(title=title) ) layer1 = layer1.configure_axisYQuantitative(labelFontSize=12) layer1 = layer1.configure_axisXQuantitative(labelFontSize=12) layer1.config.font="Georgia" layer1.config.background=colors["eco-background"] layer1.config.view.stroke=None layer1.title.fontSize = 14 layer1.title.subtitleFontSize = 12 layer1.title.dy -= 2 layer1.title.color = colors["eco-dot"] layer1.title.subtitleColor = colors["eco-dot"] layer1.save("visualisation/" + f + theme + ".json") layer1.save("visualisation/" + f + theme + ".png", scale_factor=2.0) layer1.save("visualisation/" + f + theme + ".svg") readme = re.sub(f, f + theme, readme) open("README.md", "a").write(readme) layer1Fig 3df = pd.read_csv("raw/data_noaa.csv", skiprows=4) f = "fig3_africa" f3 = eco_git_path + f + ".csv" df.to_csv("data/" + f + ".csv") f += local_suffix open("visualisation/" + f + ".html", "w").write( vega_embed.replace( "JSON_PATH", f3.replace("/data/", "/visualisation/").replace(".csv", ".json") ) ) if LOCAL: f3 = df readme = "### " + f + '\n!["' + f + '"](visualisation/' + f + '.png "' + f + '")\n\n' df.head() base = alt.Chart(f3).encode( x=alt.X( "Year:Q", sort=[], axis=alt.Axis( grid=False, titleAlign="center", titleAnchor="middle", title="", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], tickCount=7, orient="bottom", labelAngle=0, format=".0f", ), scale=alt.Scale(domain=[1896, 2025], nice=False), ) ) bars1 = ( base.mark_bar(color=colors["eco-dot"]) .encode( y=alt.Y( "Value:Q", sort=[], axis=alt.Axis( grid=True, title="°C", titleAnchor="start", labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], gridColor=colors["eco-gray"], gridOpacity=0.1, titleFontSize=10, titleFontWeight="normal", ticks=False, labelAlign="left", labelBaseline="middle", labelPadding=-5, labelOffset=-10, titleX=22, titleY=7, titleBaseline="bottom", titleAngle=0, titleAlign="left", tickCount=7, format=".1f", ), ) ) .transform_filter("datum.Value>0") ) bars2 = ( base.mark_bar(color=colors["eco-light-blue"]) .encode(y=alt.Y("Value:Q", sort=[])) .transform_filter("datum.Value<0") ) axis1 = ( alt.Chart(pd.DataFrame([{"x": 1895, "y": 0}, {"x": 2025, "y": 0}])) .mark_line(color=colors["eco-gray"]) .encode(x=alt.X("x:Q", sort=[]), y="y:Q") ) label = ( alt.Chart(pd.DataFrame([{"x": 2025, "y": 0, "t": "20th century average"}])) .mark_text(dy=5, align="right", baseline="top", color=colors["eco-gray"]) .encode(x="x:Q", y="y:Q", text="t:N") ) title = alt.TitleParams( "Temperature anomaly in Africa", subtitle=[ "January-December temperatures compared to the 20th century average. Source: NOAA" ], anchor="start", align="left", dx=5, dy=-5, fontSize=12, subtitleFontSize=11, subtitleFontStyle="italic", ) layer1 = ( ((bars1 + bars2 + axis1 + label).properties(height=300, width=400)) .configure_view(stroke=None) .properties(title=title) ) layer1.save("visualisation/" + f + ".json") layer1.save("visualisation/" + f + ".png", scale_factor=2.0) layer1.save("visualisation/" + f + ".svg") open("README.md", "a").write(readme) layer1 theme = "_dark" bars1.encoding.y.axis.titleFontSize = 12 bars1.encoding.y.axis.titleY += 5 bars1.encoding.y.axis.titleX += 3 label.mark.fontSize = 12 layer1 = ( ((bars1 + bars2 + axis1 + label).properties(height=300, width=400)) .configure(font="Georgia", background=colors["eco-background"]) .configure_view(stroke=None) .properties(title=title) ) layer1 = layer1.configure_axisYQuantitative(labelFontSize=12) layer1 = layer1.configure_axisXQuantitative(labelFontSize=12) layer1.title.fontSize = 14 layer1.title.subtitleFontSize = 12 layer1.title.dy -= 2 layer1.title.color = colors["eco-dot"] layer1.title.subtitleColor = colors["eco-dot"] layer1.save("visualisation/" + f + theme + ".json") layer1.save("visualisation/" + f + theme + ".png", scale_factor=2.0) layer1.save("visualisation/" + f + theme + ".svg") readme = re.sub(f, f + theme, readme) open("README.md", "a").write(readme) layer1Fig 4 a# https://impactlab.org/map/#usmeas=absolute&usyear=1981-2010&gmeas=absolute&gyear=1986-2005&tab=globalb# https://climateknowledgeportal.worldbank.org/Minimal examples of how to use MSA Background MSA stands for **"Multiperturbation Shapley value Analysis"** and as the name suggests, it's comprised of "multiple perturbations" and "Shapley value". Fundamentally, it uses a dataset of multi-element perturbation to estimate Shapley values of each element with respect to a global function. I'll refer you to these papers for technical and conceptual details:- , , , and . 2004. “Causal Localization of Neural Function: The Shapley Value Method.” Neurocomputing 58-60 (June): 215–22.- , , , , and . 2004. “Fair Attribution of Functional Contribution in Artificial and Biological Networks.” Neural Computation 16 (9): 1887–1915.- . 2007. “Shapley Ratings in Brain Networks.” Frontiers in Neuroinformatics 1 (NOV): 1–9.- Toba, ., , , , , , , , , and . 2020. “Game Theoretical Mapping of White Matter Contributions to Visuospatial Attention in Stroke Patients with Hemineglect.” Human Brain Mapping, no. February: 1–25.- Zavaglia, Melissa, and . 2016. “Causal Functional Contributions and Interactions in the Attention Network of the Brain: An Objective Multi-Perturbation Analysis.” Brain Structure & Function 221 (5): 2553–68.The toolbox is designed to handle a large variety of systems. All it needs is a list of elements, let's say node labels, node indices, tuples (connections between nodes for examples), but I didn't really test things that are not networks! For example, if you're interested in explainable machine learning there's already a very versatile toolbox called SHAP that calculates the Shapley values of model's input features. MSA is traditionally used as a brain-mapping tool (I mean, just look at those papers) and I'm also a neuroscientist so there's a bias towards networks and lesion-mapping conceptualization. These said, let's see how MSA works using some small networks. Defining the network and the game# Uncomment the line below if you don't have them. # !pip install networkx matplotlib seaborn # Imports n stuff import matplotlib as mpl import matplotlib.pyplot as plt import networkx as nx import numpy as np import seaborn as sns #--------- from msapy import msa, utils as ut, plottings as pl #--------- mpl.rcParams['font.family'] = 'sans-serif' mpl.rcParams['font.sans-serif'] = 'GothamSSm' # Will skip if you don't have Gotham typeface family mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['font.size'] = 10 CM = 1 / 2.54 SEED = 111 FIGPATH = "figures/minimal/"As mentioned, all MSA needs is a list of elements, that's not quite true tho it needs a game. Players (elements) should play the game and well, the game can be anything and in a way, that's the beauty of it. Here, I will define a game called "ge" that stands for "average global efficiency" (yeah it should be "age" but that would be weird!), which is a graph metric. See **[this page](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.efficiency_measures.global_efficiency.htmlnetworkx.algorithms.efficiency_measures.global_efficiency)** for more detail.So you need to define a game, a function that encapsulates the procedure you care about while the system is being lesioned. For example, if you're using MSA in the classic sense of brain mapping, the game function is where you perform the "cognitive experiment". Keep in mind a few things:1. Take care of the edge-cases. MSA goes through all the possible ways you can lesion your system so if you're interested in the contribution (shapley value) of each node to information flow in a network, then in some of these combinations of lesions the network might end up with just a bunch of isolated nodes. Like define conditions, if len(x) < y return 0. 2. Copy your stuff before feeding it to the toolbox. Otherwise (depending on how you're lesioning the system) it might leak to the next step and messes with everything.3. The result should be "one value", a global measure that describes the system with one value.def ge(complements, graph): if len(complements) < 0: # grand coalition is in, nothing to lesion so work with the intact network. return nx.global_efficiency(graph) elif len(complements) == len(graph): # everything is gone, why calling nx.global_efficiency? return 0.0 else: # lesion the system, calculate global efficiency lesioned = graph.copy() lesioned.remove_nodes_from(complements) return nx.global_efficiency(lesioned)Next, I'll make a graph with an intuitive topology, i.e., a balanced tree. Intuitively, the farther we go from the root, the smaller shapley values we will have. It's possible that the outer branch ends with negative shapley values since removing them shortenes the overal paths and improves global efficiency.G = nx.balanced_tree(3,3) fig,ax = plt.subplots() fig.set_dpi(150) fig.set_size_inches((12*CM,12*CM)) ax = nx.draw(G,with_labels=True,node_color='#FFE48D',font_size=8) plt.savefig(f"{FIGPATH}balanced_tree.pdf",dpi=300,bbox_inches='tight')MSA in actionBriefly, we'll estimate shapley values by first permuting the elements N times, then producing an instruction for which combinations to lesion.node_perms = msa.make_permutation_space(elements=list(G.nodes), n_permutations=1_000,random_seed=SEED) print(np.shape(node_perms)) print(f'Nodes: {list(G.nodes)}\n') print(f'Permuted nodes: {node_perms[0]}') node_combs_template = msa.make_combination_space(permutation_space=node_perms) node_compl_template = msa.make_complement_space(combination_space=node_combs_template, elements=list(G.nodes)) print(f'Number of lesion combinations: {len(node_combs_template)}')Number of lesion combinations: 36080Here we use the parallelized_take_contributions to actually play games and fill the values. We then use this multi-site perturbation dataset to calculate shapley values and sort them.global_eff,_ = ut.parallelized_take_contributions(complement_space=node_compl_template, combination_space=node_combs_template, objective_function=ge, objective_function_params={'graph': G}) global_eff_shapley = msa.make_shapley_values(contributions=global_eff, permutation_space=node_perms) global_eff_shapley = ut.sorter(global_eff_shapley) global_eff_shapley.head() intact_global_eff = nx.global_efficiency(G) d_global_eff = ut.distribution_of_processing(shapley_vector=global_eff_shapley.mean()) par = {"ci": 95, "orient": "v", "errcolor": "k"} colors = pl.color_code(shapley_table=global_eff_shapley) fig,ax = plt.subplots() ax = pl.plot_shapley_ranks(shapley_table=global_eff_shapley,colors=colors,ax=ax,barplot_params=par) fig.set_dpi(150) fig.set_size_inches((21*CM,5*CM)) plt.text(0.5, 0.04,f'Intact global efficiency: {intact_global_eff:.2f}') plt.text(0.5, 0.03,f'Distribution of process: {d_global_eff:.4f}') plt.xticks(fontsize=8) plt.title("Contribution of each node to the Global efficiency") plt.savefig(f"{FIGPATH}global_efficiency.pdf",dpi=300,bbox_inches='tight')Voila! Minimal example.p.s: Sum of all the shapley values will add up to the value you get if you run the analysis on the intact network (grand coalition). It's a nice sanity check. Here:print(intact_global_eff-global_eff_shapley.mean().sum())0.0Of course this might not be the case if there are stochasticity in the results. But in that case too, it should not be very far. Anyways, here's the same graph but each node is now colored by its contribution to the global efficiency.fig,ax = plt.subplots() fig.set_dpi(150) fig.set_size_inches((12*CM,12*CM)) colors.reverse() ax = nx.draw(G,with_labels=True,node_color=colors,font_size=8) plt.savefig(f"{FIGPATH}balanced_tree_colorcoded.pdf",dpi=300,bbox_inches='tight')ChordNetBenchmark for musical chord recognition# import module from music_code import chordnet # initialize c = chordnet.ChordNet() # connect to MySQL database, verify credentials c.connect() # run Tkinter app c.main() # human level performance on chord quality recognition c.KPI() # visualize KPI moving average c.display()Test Scikit-image skelton functionimport matplotlib.pyplot as plt from skimage.morphology import skeletonize from skimage import data import numpy as np blobs = data.binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1) skeleton = skeletonize(blobs) skeleton_lee = skeletonize(blobs, method='lee') fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(blobs, cmap=plt.cm.gray) ax[0].set_title('original') ax[0].axis('off') ax[1].imshow(skeleton, cmap=plt.cm.gray) ax[1].set_title('skeletonize') ax[1].axis('off') ax[2].imshow(skeleton_lee, cmap=plt.cm.gray) ax[2].set_title('skeletonize (Lee 94)') ax[2].axis('off') fig.tight_layout() plt.show() data = np.array([[0,1,1,1,0], [0,1,1,1,0], [0,1,1,1,0], [0,1,1,1,0]]) data = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\ 0, 1, 1, 1, 1, 1, 1, 1, 1, 0,\ 0, 1, 1, 1, 1, 1, 1, 1, 1, 0,\ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,\ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,\ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,\ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,\ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).reshape(8, 10) plt.imshow(data) skeleton = skeletonize(data) skeleton_lee = skeletonize(data, method='lee') plt.imshow(skeleton) plt.imshow([[0, 1, 1, 1, 0],[0, 0, 1, 0, 0],[0, 0, 1, 0, 0],[0, 1, 1, 1, 0]]) import pygraspi import numpy as np from pygraspi.combined_descriptors import getpygraspi_descriptors import glob import os import pandas l = [] morph1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0], [1, 0, 0], [1, 0, 1]]) morph2 = np.array([[0, 1, 1], [1, 1, 1], [0, 0, 0], [1, 0, 0], [1, 0, 1]]) l.append(morph1) l.append(morph2) getpygraspi_descriptors(l) os.chdir(r'/users/dgj1/Documents/pygraspi/notebooks/data') myFiles = glob.glob('*.txt') myFiles.sort() l = [] for i, file in enumerate(myFiles[0:100]): morph = np.array(pandas.read_csv(file, delimiter=' ', header=None)).swapaxes(0, 1) l.append(morph) getpygraspi_descriptors(l)Data Preparationhst = img_as_float(ut.read_hyperstack('../data/wt_gbe_20180110.h5')) center_x,center_y = 500,530 radius_x,radius_y = 550,250 ellipse = ut.calc_ellipse(center_x,center_y,radius_x,radius_y) # Loop through each timepoint in hyperstack to create mask for t in range(hst.shape[0]): hst[t] = ut.contour_embryo(hst[t],ellipse) fig,ax = plt.subplots(1,2,figsize=(10,8)) ax[0].imshow(hst[0]) ax[1].imshow(hst[-1]) import h5py # Open new h5py file to add data to f = h5py.File('../data/wt_gbe_20180110_mask.h5','w') # Save each timepoint to a group/dataset in h5 file for t in range(hst.shape[0]): f.create_dataset('t'+str(t)+'/channel1', data=hst[t]) # close file f.close() mhst = hst.copy()Use contour to create bins?# single time point for testing img = hst[0]Calculate contour based on unmasked imagesnake = active_contour(gaussian(img, 3), ellipse, alpha=0.015, beta=10, gamma=0.001) snake.shapeDivide contour into 20pt segments. Could these segments be used to assign bins to the embryo?fig,ax = plt.subplots(figsize=(10,8)) ax.imshow(img,cmap='Greys') ax.plot(ellipse[:,0],ellipse[:,1]) for i in range(20,401,20): ax.plot(snake[i-20:i,0],snake[i-20:i,1],lw=5) ax.plot([100,900],[550,500])Reassign `img` as masked image for calculates and assign background points (=0) to be `np.nan`.img = mhst[0] img[img==0] = np.nan # Crop image to reduce the number of points for processing imgc = img[int(snake[:,1].min()*0.9) : int(snake[:,1].max()*1.1), int(snake[:,0].min()*0.9) : int(snake[:,0].max()*1.1)] plt.imshow(imgc)Divide x axis into 20 segmentsxgrid = np.linspace(0,1024,20).astype(int) xgrid ygrid = np.array([0,180,500]) ygridCreate an roi mask that assigns each chunk of the grid a unqiue integer# Split image into a grid rois = np.zeros_like(imgc) n=1 for j in range(len(ygrid)-1): y = ygrid[j] ydt = ygrid[j+1] for i in range(len(xgrid)-1): x = xgrid[i] xdt = xgrid[i+1] if j > 0: xdt = xgrid[-i-1] x = xgrid[-i-2] print(n,x,xdt,y,ydt) # print(n) rois[y:ydt,x:xdt] = n n = n+1 ut.imshow(rois) nroi = np.unique(rois).shape nroiCalculate the average intensity value in each roi binavgs = [] for n in range(1,nroi[0]+1): avgs.append(np.nanmean(imgc[rois==n])) avgs plt.plot(avgs)Feature Extraction Using openCV#!pip install opencv-python==4.5.1.48 import cv2 import json import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import re import seaborn as sns import skimage from scipy.interpolate import splprep, splev from skimage.exposure import rescale_intensity from skimage.io import imread, imsave from pathlib import Path # helper methods for feature extraction from feature_extract import * #Constants IMG_SIZE = 128 IMAGES_DIR = '../images/final_pigmentation_catalogue_2016' TARGET_IMAGES_DIR = '../images/fin_features_'+str(IMG_SIZE) MAX_FILES = 5000Build up meta data of the source imagesimage_dirs = Path(IMAGES_DIR) images = pd.DataFrame(columns=['label', 'path', 'name', 'img'], dtype=object) for image_dir in image_dirs.glob('*'): label = image_dir.stem for file in image_dir.glob('*'): basename = os.path.basename(file) f_name, f_ext = os.path.splitext(basename) if f_ext.lower() != ".png" or f_name[0:3] !="HG_": continue images.loc[len(images)] = [label, file, f_name, ""] if len(images) >= MAX_FILES: break images['date'] = pd.to_datetime(images.name.str.slice(3, 9), format='%y%m%d') # images[~images['name'].str.slice(3, 4).isin(['1','0'])] # images['name'].str.slice(3, 4) len(images) img = [] for i, file in enumerate(images.itertuples()): image = cv2.imread(str(file.path), cv2.IMREAD_UNCHANGED) # read alpha channel # resized, feature enhanced, mask, contour img_rsz, image, img_mask, img_cntr, img_fd, fourier_desc, status = feature_extract(image,IMG_SIZE) if status == 1: print(file.label+ "_" + file.name + ": no alpha channel") continue if status == 2: print(file.label+ "_" + file.name + ": other error") continue if status == 3: print(file.label+ "_" + file.name + ": resize/pad error") continue try: os.mkdir(TARGET_IMAGES_DIR + "/rsz/"+file.label) os.mkdir(TARGET_IMAGES_DIR + "/ftr/"+file.label) os.mkdir(TARGET_IMAGES_DIR + "/mask/"+file.label) os.mkdir(TARGET_IMAGES_DIR + "/cntr/"+file.label) os.mkdir(TARGET_IMAGES_DIR + "/fdsc/"+file.label) os.mkdir(TARGET_IMAGES_DIR + "/fdsk/"+file.label) except: pass cv2.imwrite(TARGET_IMAGES_DIR + "/rsz/"+file.label+"/pre1_" + file.name + "_rsz_"+file.label+".png", img_rsz) cv2.imwrite(TARGET_IMAGES_DIR + "/ftr/"+file.label+"/pre1_" + file.name + "_ftr_"+file.label+".png", image) cv2.imwrite(TARGET_IMAGES_DIR + "/mask/"+file.label+"/pre1_" + file.name + "_mask_"+file.label+".png", img_mask) cv2.imwrite(TARGET_IMAGES_DIR + "/cntr/"+file.label+"/pre1_" + file.name + "_cntr_"+file.label+".png", img_cntr) cv2.imwrite(TARGET_IMAGES_DIR + "/fdsc/"+file.label+"/pre1_" + file.name + "_fdsc_"+file.label+".png", img_fd) #with open(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_fdsk_"+file.label+".dat", 'w') as f: # data=fourier_desc.tobytes() np.save(TARGET_IMAGES_DIR + "/fdsk/"+file.label+"/pre1_" + file.name + "_fdsk_"+file.label+".npy", fourier_desc) #print(file.label + ": " +str(round(fourier_desc[0].real))+":"+str(round(fourier_desc[1].real))+":"+str(round(fourier_desc[2].real))+":"+str(round(fourier_desc[3].real)) + " - " + file.name) IMG_SIZE = 96 IMAGES_DIR = '../images/final_pigmentation_catalogue_2016' TARGET_IMAGES_DIR = '../images/fin_features_'+str(IMG_SIZE) img = [] for i, file in enumerate(images.itertuples()): image = cv2.imread(str(file.path), cv2.IMREAD_UNCHANGED) # read alpha channel # resized, feature enhanced, mask, contour img_rsz, image, img_mask, img_cntr, img_fd, fourier_desc, status = feature_extract(image,IMG_SIZE) if status == 1: print(file.label+ "_" + file.name + ": no alpha channel") continue if status == 2: print(file.label+ "_" + file.name + ": other error") continue if status == 3: print(file.label+ "_" + file.name + ": resize/pad error") continue try: os.mkdir(TARGET_IMAGES_DIR + file.label) os.mkdir(TARGET_IMAGES_DIR + file.label) os.mkdir(TARGET_IMAGES_DIR + file.label) os.mkdir(TARGET_IMAGES_DIR + file.label) os.mkdir(TARGET_IMAGES_DIR + file.label) os.mkdir(TARGET_IMAGES_DIR + file.label) except: pass cv2.imwrite(TARGET_IMAGES_DIR + file.label+"/pre1_" + file.name + "_rsz_"+file.label+".png", img_rsz) cv2.imwrite(TARGET_IMAGES_DIR + file.label+"/pre1_" + file.name + "_ftr_"+file.label+".png", image) cv2.imwrite(TARGET_IMAGES_DIR + file.label+"/pre1_" + file.name + "_mask_"+file.label+".png", img_mask) cv2.imwrite(TARGET_IMAGES_DIR + file.label+"/pre1_" + file.name + "_cntr_"+file.label+".png", img_cntr) cv2.imwrite(TARGET_IMAGES_DIR + file.label+"/pre1_" + file.name + "_fdsc_"+file.label+".png", img_fd) #with open(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_fdsk_"+file.label+".dat", 'w') as f: # data=fourier_desc.tobytes() np.save(TARGET_IMAGES_DIR + "/fdsk/"+file.label+"/pre1_" + file.name + "_fdsk_"+file.label+".npy", fourier_desc) #print(file.label + ": " +str(round(fourier_desc[0].real))+":"+str(round(fourier_desc[1].real))+":"+str(round(fourier_desc[2].real))+":"+str(round(fourier_desc[3].real)) + " - " + file.name) #training set prep image_dirs = Path(TARGET_IMAGES_DIR) images_trn = pd.DataFrame(columns=['label', 'path', 'name', 'img'], dtype=object) MAX_FILES = 500 #rsz = resized image # for file in image_dirs.glob('*rsz*.png'): # basename = os.path.basename(file) # f_name, f_ext = os.path.splitext(basename) # f_name_part = f_name.split('_') # label = f_name_part[len(f_name_part)-1] # images_trn.loc[len(images_trn)] = [label, file, f_name, ""] # if len(images) >= MAX_FILES: break trn_img="rsz" labels =[] for image_dir in image_dirs.glob('*'): label = image_dir.stem labels.append(label) for file in image_dir.glob('*'+trn_img+'*'): basename = os.path.basename(file) f_name, f_ext = os.path.splitext(basename) if f_ext.lower() != ".png" or f_name[0:4] !="pre1": continue images_trn.loc[len(images_trn)] = [label, file, f_name, ""] if len(images_trn) >= MAX_FILES: break paths={} faces=[] for i, label in enumerate(labels): paths[label] = TARGET_IMAGES_DIR.replace("\\", "/")+"/"+label faces.append(label) for key in paths.keys(): li = [] for i, file in enumerate(images_trn[images_trn.label==key].itertuples()): img1 = cv2.imread(str(file.path), cv2.IMREAD_UNCHANGED) img2 = img1[...,::-1] li.append(np.around(np.transpose(img2, (2,0,1))/255.0, decimals=12)) li # def listcalc(l,thr): # for j in range(len(l)): # l[j]=l[j]/255*thr IMG_SIZE = 256 img = [] for i, file in enumerate(images.itertuples()): image = cv2.imread(str(file.path), cv2.IMREAD_UNCHANGED) # read alpha channel # make fixed size img_rsz = resizeAndPad(image, (IMG_SIZE,IMG_SIZE), 255) # add border # img_rsz = add_border(img_rsz,5,0) # split and extract alpha b, g, r, a = cv2.split(img_rsz) img_a = (255-a) # make a white mask _,a2 = cv2.threshold(img_a, 220, 255, cv2.THRESH_BINARY) # merge channels adding the white mask to get rid of backgounds hidden behind alpha mask image = cv2.merge([cv2.add(b,a2), cv2.add(g,a2), cv2.add(r,a2)],a) # create binary mask and clean away some spots _,a3 = cv2.threshold(img_a, 20, 255, cv2.THRESH_BINARY_INV) # kernel = np.ones((2, 2), np.uint8) a3 = cv2.erode(a3, None, iterations=2) a3 = cv2.dilate(a3, None, iterations=2) #image = claheHSV(image) # make gray image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #contrast stretching xp = [40, 80, 200, 200, 200] fp = [0, 80, 200, 200, 200] x = np.arange(256) table = np.interp(x, xp, fp).astype('uint8') image = cv2.LUT(image, table) # cv2.bilateralFilter(image, 3, 50, 50) #blur to reduce noise cv2.blur(image, (18, 18)) #cv2.GaussianBlur(image, (11, 11), 0) # apply CLAHE image = claheGray(image, 1.5,8) # contrast image = contrast_yt(image,30,0,255) # contour - get the contour from the mask image, cntr, contours, edges = drawContour(image,a3,False,100,1,1,60,240,30,7) #contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2] # print(len(contours[0])) # contours[0] = contours[0][:(len(contours[0])//2)] fourier_desc = findDescriptor(contours) img_fd, fourier_desc = reconstruct(fourier_desc, 80, IMG_SIZE) # fourier_desc = truncate_descriptor(fourier_desc,10) # print(file.name) print(file.label + ": " +str(round(fourier_desc[0].real))+":"+str(round(fourier_desc[1].real))+":"+str(round(fourier_desc[2].real))+":"+str(round(fourier_desc[3].real)) + " - " + file.name) # print(str(type(fourier_desc[0]))) # find and enpasise edges kernel = np.array([[0.5, 1.0, 0.5], [1.0, -6.0, 1.0], [0.5, 1.0, 0.5]]) kernel = kernel/(np.sum(kernel) if np.sum(kernel)!=0 else 1) #filter the source image #image = cv2.filter2D(image,-1,kernel) image = (255-image) # inverse cv2.imwrite(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_rsz_"+file.label+".png", img_rsz) cv2.imwrite(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_ftr_"+file.label+".png", image) cv2.imwrite(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_mask_"+file.label+".png", a3) cv2.imwrite(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_cntr_"+file.label+".png", cntr) cv2.imwrite(TARGET_IMAGES_DIR + "/pre1_" + file.name + "_fdsc_"+file.label+".png", img_fd) # img.append(image/256) # progress = i/MAX_FILES * 100 # if progress % 5 == 0: print(f'{progress}% done') #images.img = img print("Complete!") fourier_descTraining TL networkMODEL_DIR_TL = '../models/finnet' data = pd.read_csv(MODEL_DIR_TL +"/train_rsz_0002_50_100.csv", skiprows=0, header=0,index_col=[0]) fig, axes = plt.subplots(nrows=2, ncols=2) fig.tight_layout() p = data.plot(x_compat=True, linestyle='-', marker='o', label='Loss',title="Fig5: Training Alpha 0.002 / 50Epochs / 100 Steps", ax=axes[0,0],figsize=(16,9)) plt.show() data !jupyter nbconvert --to html 3_opencv_feature_extraction[NbConvertApp] WARNING | Config option `kernel_spec_manager_class` not recognized by `NbConvertApp`. [NbConvertApp] Converting notebook 3_opencv_feature_extraction.ipynb to html [NbConvertApp] Writing 664179 bytes to 3_opencv_feature_extraction.html- [PyTorch for Deep Learning - Full Course (FreeCodeCamp Video)](https://www.youtube.com/watch?v=GIsg-ZUy0MY&t=205s)%config Completer.use_jedi = False import torch import numpy as np data = [[1,2,3], [5,43,2]] x_data = torch.tensor(data) x_data np_array = np.array(data) x_np = torch.from_numpy(np_array) np_array, x_nptensorx = torch.tensor(3.) w = torch.tensor(4., requires_grad=True) b = torch.tensor(5., requires_grad=True) y = x*w +b y y.backward() print(x.grad, y.grad, w.grad, b.grad)None None tensor(3.) tensor(1.)numpyimport numpy as np x = np.array() np.ndarray, np.arrayapples and orangesimport torch #X = (temp, rain, humidity) inputs = np.array([[73,67,43], [91,88,64], [102,43,37], [87,134,58], [69,96,70]], dtype='float32') inputs = torch.from_numpy(inputs) #Y = (apples, oranges) outputs = np.array([ [56,70], [81,101], [119,133], [22,37], [103,119] ], dtype='float32') outputs = torch.from_numpy(outputs) w = torch.randn(2,3,requires_grad=True) # 2 output vars from 3 input vars.. after we transpose it b = torch.randn(2, requires_grad=True) # initialized with normal distribution with mean 0 and standard deviation 1. print(w,b) #define mathetmatical model def model(x, w, b): return x @ w.t() + b # see what it predicts preds = model(inputs) print(preds) # see what it should predict print(outputs) # see how different it is def mse(t1, t2): diff = t1-t2 return torch.sum(diff*diff)/diff.numel() loss = mse(preds, outputs) print(loss) print(f"w.grad before backpropagation: {w.grad}") loss.backward() print(w) print(f"w.grad after backpropagation: {w.grad}") # df/dw print(f"b.grad before backpropagation: {b.grad}") loss.backward() print(b) print(f"b.grad after backpropagation: {b.grad}") # df/db # RuntimeError: Trying to backward through the graph a second time, # but the saved intermediate results have already been freed. # Specify retain_graph=True when calling .backward() or autograd.grad() the first time. w.grad.zero_() b.grad.zero_() print(w.grad, b.grad) w = torch.randn(2,3,requires_grad=True) # 2 output vars from 3 input vars.. after we transpose it b = torch.randn(2, requires_grad=True) # initialized with normal distribution with mean 0 and standard deviation 1. preds = model(inputs, w, b) loss = mse(preds, outputs) loss.backward() # We use torch.no_grad to indicate to PyTorch # that we shouldn't track, calculate or modify gradients # while updating the weights and biases. with torch.no_grad(): learning_rate = 1e-5 w -= w.grad * learning_rate b -= b.grad * learning_rate w.grad.zero_() b.grad.zero_() w, w.grad for i in range(1000): preds = model(inputs, w, b) loss = mse(preds, outputs) if (i % 20 == 0): print(loss) loss.backward() # We use torch.no_grad to indicate to PyTorch # that we shouldn't track, calculate or modify gradients # while updating the weights and biases. with torch.no_grad(): learning_rate = 1e-5 w -= w.grad * learning_rate b -= b.grad * learning_rate w.grad.zero_() b.grad.zero_() print(w, b) print(preds) print(outputs)tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]])Linear Regression using torch.nnimport torch.nn as nn # allows us to access a small section of the training data using the array indexing notation ([0:3] in the above code). It returns a tuple (or pair), in which the first element contains the input variables for the selected rows, and the second contains the targets. from torch.utils.data import TensorDataset # split the data into batches of a predefined size while training from torch.utils.data import DataLoader # Import nn.functional - contains many useful loss functions and several other utilities. import torch.nn.functional as F # Input (temp, rainfall, humidity) inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float32') # Targets (apples, oranges) targets = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float32') inputs = torch.from_numpy(inputs) targets = torch.from_numpy(targets) train_ds = TensorDataset(inputs, targets) train_ds[0:3, 1] inputs.size(), len(inputs) batch_size = 5 # shuffle set to True to have the data reshuffled at every epoch train_dl = DataLoader(train_ds, batch_size, shuffle=True) # shuffled on every iteration for xbatch, ybatch in train_dl: print(xbatch) print(ybatch) model = nn.Linear(3,2) # 3 input vars to 2 output vars print(model.weight, model.bias) # y = xW^T + b # shape of W is 2x3 preds = model(inputs) preds list(filter(lambda x: 'loss' in x, dir(F))) loss = F.mse_loss(model(inputs), targets) print(loss) optim = torch.optim.SGD(model.parameters(), lr=1e-5) # Utility function to train the model def fit(num_epochs, model, loss_fn, opt, train_dl): # Repeat for given number of epochs for epoch in range(num_epochs): # Train with batches of data for xb,yb in train_dl: # 1. Generate predictions pred = model(xb) # 2. Calculate loss loss = loss_fn(pred, yb) # 3. Compute gradients loss.backward() # 4. Update parameters using gradients opt.step() # 5. Reset the gradients to zero opt.zero_grad() # Print the progress if (epoch+1) % 10 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) return model = nn.Linear(3,2) optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) fit(100, model, F.mse_loss, optimizer, train_dl) model(inputs) targets params = model.parameters() print(model) print(model.parameters()) print(model.weight) print(model.bias)Linear(in_features=3, out_features=2, bias=True) Parameter containing: tensor([[-0.3934, 0.8671, 0.6582], [-0.2928, 0.8117, 0.8684]], requires_grad=True) Parameter containing: tensor([0.4906, 0.2416], requires_grad=True)1. sortingnames = ["王飞", "刘洋","李丽"] scores = ["89,92,95,88,91", "92,96,81,90,92", "89,91,91,78,97"] class Solution(object): def scoresort(self, names, scores): dict_with_strs = dict(zip(names, scores)) dict_with_ints = {k:[int(i) for i in v.split(",")] for k, v in dict_with_strs.items()} return([key for key,value in sorted(dict_with_ints.items(),key=lambda i:sum(i[1]),reverse=True)]) x = Solution() x.scoresort(names,scores)2. Max Subarray# https://en.wikipedia.org/wiki/Maximum_subarray_problem class Solution: def maxSubArray(self, arr: List[int]) -> int: if len(arr)==0: return 0 current_sum=arr[0] best_sum=arr[0] for i in range(1,len(arr)): if arr[i]>(current_sum+arr[i]): current_sum=arr[i] else: current_sum+=arr[i] best_sum=max(current_sum,best_sum) #print(current_sum) return best_sum Solution().maxSubArray(nums = [-2,1,-3,4,-1,2,1,-5,4])1 r value is: 1 2 r value is: -2 3 r value is: 4 4 r value is: 3 5 r value is: 5 6 r value is: 6 7 r value is: 1 8 r value is: 53. Length of Last WordGiven a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word (last word means the last appearing word if we loop from left to right) in the string.If the last word does not exist, return 0.Note: A word is defined as a maximal substring consisting of non-space characters only.Example:Input: ""Output: 5class Solution: def lengthOfLastWord(self, s: str) -> int: return 0 if len(s.strip()) == 0 else len(s.strip().split(" ")[-1]) Solution().lengthOfLastWord("s ") def lengthOfLastWord(s): if len(s.split()) == 0: return 0 else: return len(s.split().pop())4. Implement the unique_names methodWhen passed two lists of names, it will return a list containing the names that appear in either or both lists. The returned list should have no duplicates.For example, calling unique_names(['Ava', 'Emma', 'Olivia'], ['Olivia', 'Sophia', 'Emma']) should return a list containing Ava, Emma, Olivia, and Sophia in any order.def unique_names(names1, names2): names2.extend([i for i in names1 if i not in names2]) return (list(set(names2))) if __name__ == "__main__": names1 = ["Ava", "Emma", "Olivia"] names2 = ["Olivia", "Sophia", "Emma"] print(unique_names(names1, names2)) # should print Ava, Emma, Olivia, Sophia['Olivia', 'Sophia', 'Emma', 'Ava']5. Two SumWrite a function that, when passed a list and a target sum, returns, efficiently with respect to time used, two distinct zero-based indices of any two of the numbers, whose sum is equal to the target sum. If there are no two numbers, the function should return None.For example, find_two_sum([3, 1, 5, 7, 5, 9], 10) should return a single tuple containing any of the following pairs of indices:0 and 3 (or 3 and 0) as 3 + 7 = 101 and 5 (or 5 and 1) as 1 + 9 = 102 and 4 (or 4 and 2) as 5 + 5 = 10def find_two_sum(numbers, target_sum): temp = {} for i in range(len(numbers)): if (target_sum - numbers[i]) in temp: return([temp[target_sum - numbers[i]], i]) else: temp[numbers[i]] = i # keep putting the element into the temp list, untill the first appearance of the value return None if __name__ == "__main__": print(find_two_sum([3, 1, 5, 7, 5, 9], 10))[0, 3]6. Palindrome Number Determine whether an integer is a palindrome. An integer is a palindrome when it reads the same backward as forward.Example 1:Input: 121Output: trueExample 2:Input: -121Output: falseExplanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore it is not a palindrome.class Solution: def isPalindrome(self, x: int): if x < 0: return False elif str(x)[::-1] == str(x): return True else: return False Solution().isPalindrome(123)6. Reverse A integer# % modulous, remainder. Special case, when int%10, the result is the "decimal value" def reverse_integer(x): sign = -1 if x < 0 else 1 x *= sign # Remove leading zero in the reversed integer while x: if x % 10 == 0: x /= 10 else: break # string manipulation x = str(x) lst = list(x) # list('234') returns ['2', '3', '4'] lst.reverse() x = "".join(lst) x = int(x) return sign*x print(reverse_integer(234)) print(reverse_integer(-234)) # static and class method: # https://www.geeksforgeeks.org/class-method-vs-static-method-python/7. Check odd or even# Bitwise def odd(number): if number&1 == 1: print("Odd number") else: print("Even number") odd(9) # By modulous def odd(number): if number//2 !=0: print("Odd number") else: print("Even number") odd(9)Odd numberSwap Nodes in Pairs Given a linked list, swap every two adjacent nodes and return its head.You may not modify the values in the list's nodes. Only nodes itself may be changed.class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next print("val is:", self.val) print("next is:", self.next) class Solution: def swapPairs(self, head: ListNode): if head is None: return None if head.next is None: return head class Solution(): def swapPairs(self, head: ListNode): if not head: return if head and not head.next: return head if head.next: node1 = head node2 = head.next remaining_list = node2.next node2.next = node1 node1.next = self.swapPairs(remaining_list) return node2 Solution().swapPairs([1,2,3,4]) #https://www.geeksforgeeks.org/pairwise-swap-elements-of-a-given-linked-list/Running Sum of 1d Array Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).Return the running sum of numsExample 1:Input: nums = [1,2,3,4]Output: [1,3,6,10]Explanation: Running sum is obtained as follows: [1, 1+2, 1+2+3, 1+2+3+4].Example 2:Input: nums = [1,1,1,1,1]Output: [1,2,3,4,5]Explanation: Running sum is obtained as follows: [1, 1+1, 1+1+1, 1+1+1+1, 1+1+1+1+1].Example 3:Input: nums = [3,1,2,10,1]Output: [3,4,6,16,17] Constraints:1 <= nums.length <= 1000-10^6 <= nums[i] <= 10^6class Solution: def runningSum(self, nums): out = 0 lst = [] i = 0 while i < len(nums): out += nums[i] i += 1 lst.append(out) return lst nums = [3, 1, 2, 10, 1] Ans = Solution() Ans.runningSum(nums)Richest Customer Wealth You are given an m x n integer grid accounts where accounts[i][j] is the amount of money the i​​​​​​​​​​​th​​​​ customer has in the j​​​​​​​​​​​th​​​​ bank. Return the wealth that the richest customer has.A customer's wealth is the amount of money they have in all their bank accounts. The richest customer is the customer that has the maximum wealth. Example 1:Input: accounts = [[1,2,3],[3,2,1]] Output: 6 Explanation:1st customer has wealth = 1 + 2 + 3 = 62nd customer has wealth = 3 + 2 + 1 = 6Both customers are considered the richest with a wealth of 6 each, so return 6.Example 2:Input: accounts = [[1,5],[7,3],[3,5]] Output: 10 Explanation: 1st customer has wealth = 62nd customer has wealth = 10 3rd customer has wealth = 8The 2nd customer is the richest with a wealth of 10.Example 3:Input: accounts = [[2,8,7],[7,1,3],[1,9,5]] Output: 17 Constraints:m == accounts.lengthn == accounts[i].length1 <= m, n <= 501 <= accounts[i][j] <= 100accounts = [[2,8,7],[7,1,3],[1,9,5]] type(accounts) lst = [] for i in range(len(accounts)): lst.append(sum(accounts[i])) return max(lst) class Solution: def maximumWealth(self, accounts): lst = [] for i in range(len(accounts)): lst.append(sum(accounts[i])) return max(lst) Ans = Solution() Ans.maximumWealth(accounts)Kids With the Greatest Number of Candies Given the array candies and the integer extraCandies, where candies[i] represents the number of candies that the ith kid has.For each kid check if there is a way to distribute extraCandies among the kids such that he or she can have the greatest number of candies among them. Notice that multiple kids can have the greatest number of candies. Example 1:Input: candies = [2,3,5,1,3], extraCandies = 3Output: [true,true,true,false,true] Explanation: Kid 1 has 2 candies and if he or she receives all extra candies (3) will have 5 candies --- the greatest number of candies among the kids. Kid 2 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids. Kid 3 has 5 candies and this is already the greatest number of candies among the kids. Kid 4 has 1 candy and even if he or she receives all extra candies will only have 4 candies. Kid 5 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids. Example 2: Input: candies = [4,2,1,1,2], extraCandies = 1Output: [true,false,false,false,false] Explanation: There is only 1 extra candy, therefore only kid 1 will have the greatest number of candies among the kids regardless of who takes the extra candy.Example 3:Input: candies = [12,1,12], extraCandies = 10Output: [true,false,true] Constraints:2 <= candies.length <= 1001 <= candies[i] <= 1001 <= extraCandies <= 50class Solution: def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]: lst_True = [i for i in candies if (i + extraCandies) >= max(candies)] return [True if i in lst_True else False for i in candies] candies = [12,1,12] extraCandies = 10 max(candies) lst_True = [i for i in candies if (i + extraCandies) >= max(candies)] [True if i in lst_True else False for i in candies]Number Of Rectangles That Can Form The Largest Square You are given an array rectangles where rectangles[i] = [li, wi] represents the ith rectangle of length li and width wi.You can cut the ith rectangle to form a square with a side length of k if both k <= li and k <= wi. For example, if you have a rectangle [4,6], you can cut it to get a square with a side length of at most 4.Let maxLen be the side length of the largest square you can obtain from any of the given rectangles.Return the number of rectangles that can make a square with a side length of maxLen. Example 1:Input: rectangles = [[5,8],[3,9],[5,12],[16,5]]Output: 3Explanation: The largest squares you can get from each rectangle are of lengths [5,3,5,5].The largest possible square is of length 5, and you can get it out of 3 rectangles.Example 2:Input: rectangles = [[2,3],[3,7],[4,3],[3,7]]Output: 3 Constraints:1 <= rectangles.length <= 1000rectangles[i].length == 21 <= li, wi <= 109li != wiclass Solution: def countGoodRectangles(self, rectangles: List[List[int]]) -> int: return sum([True if min(i) >= max([min(i) for i in rectangles]) else False for i in rectangles]) rectangles = [[5,8],[3,9],[5,12],[16,5]] sum([True if min(i) >= max([min(i) for i in rectangles]) else False for i in rectangles])Maximum 69 Number Given a positive integer num consisting only of digits 6 and 9.Return the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).Example 1:Input: num = 9669Output: 9969Explanation: Changing the first digit results in 6669.Changing the second digit results in 9969.Changing the third digit results in 9699.Changing the fourth digit results in 9666. The maximum number is 9969. Example 2:Input: num = 9996Output: 9999Explanation: Changing the last digit 6 to 9 results in the maximum number. Example 3:Input: num = 9999Output: 9999Explanation: It is better not to apply any change.class Solution: def maximum69Number (self, num: int) -> int: input = [i for i in str(num)] for i in range(len(input)): if input[i] == "6": input[i] = "9" break input = int("".join(input)) return input num = 9669 input = [i for i in str(num)] for i in range(len(input)): if input[i] == "6": input[i] = "9" break input = int("".join(input)) inputShuffle the Array Given the array nums consisting of 2n elements in the form [x1,x2,...,xn,y1,y2,...,yn].Return the array in the form [x1,y1,x2,y2,...,xn,yn].Example 1:Input: nums = [2,5,1,3,4,7], n = 3Output: [2,3,5,4,1,7] Explanation: Since x1=2, x2=5, x3=1, y1=3, y2=4, y3=7 then the answer is [2,3,5,4,1,7]. Example 2:Input: nums = [1,2,3,4,4,3,2,1], n = 4Output: [1,4,2,3,3,2,4,1] Example 3:Input: nums = [1,1,2,2], n = 2Output: [1,2,1,2]class Solution: def shuffle(self, nums: List[int], n: int) -> List[int]: new_list = [] for i in range(n): new_list.append(nums[i]) new_list.append(nums[i+n]) return new_list nums = [2,5,1,3,4,7] n = 3 new_list = [] for i in range(n): new_list.append(nums[i]) new_list.append(nums[i+n]) print(new_list)[2, 3, 5, 4, 1, 7]204. Count PrimesCount the number of prime numbers less than a non-negative number, n.Example 1:Input: n = 10Output: 4Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7. Example 2:Input: n = 0Output: 0Example 3:Input: n = 1Output: 0class Solution: def countPrimes(self, n): if n == 0 or n == 1: return 0 return len([i for i in range(1, n) if i*self.checkprime(i) !=0]) def checkprime(self, n): if n > 1: for i in range(2, n): if n%i == 0: return 0 return 1 return 0 Ans = Solution() Ans.countPrimes(6) [i for i in range(1, 10)]Number of Good PairsGiven an array of integers nums.A pair (i,j) is called good if nums[i] == nums[j] and i < j.Return the number of good pairs. Example 1:Input: nums = [1,2,3,1,1,3] Output: 4 Explanation: There are 4 good pairs (0,3), (0,4), (3,4), (2,5) 0-indexed. Example 2: Input: nums = [1,1,1,1] Output: 6 Explanation: Each pair in the array are good. Example 3: Input: nums = [1,2,3] Output: 0class Solution(): def numIdenticalPairs(self, nums): if (len(nums) == 1): return 0 count = 0 for i in range(len(nums)-1): for j in range(i+1, len(nums)): if (nums[i] == nums[j]): count += 1 print(i, j) return count Ans = Solution() Ans.numIdenticalPairs(nums = [1,2,3,1,1,3]) class Solution: def countMatches(self, items, ruleKey, ruleValue): counter = 0 if ruleKey == "type": for i in items: if (i[0] == ruleValue): counter += 1 return counter elif ruleKey == "color": for i in items: if (i[1] == ruleValue): counter += 1 return counter else: for i in items: if (i[2] == ruleValue): counter += 1 return counter Ans = Solution() Ans.countMatches(items = [["phone","blue","pixel"],["computer","silver","phone"],["phone","gold","iphone"]], ruleKey = "type", ruleValue = "phone") items = [["phone","blue","pixel"],["computer","silver","phone"],["phone","gold","iphone"]] ruleKey = "type" ruleValue = "phone" [0]*2Subtract the Product and Sum of Digits of an Integer Given an integer number n, return the difference between the product of its digits and the sum of its digits. Example 1:Input: n = 234Output: 15 Explanation: Product of digits = 2 * 3 * 4 = 24 Sum of digits = 2 + 3 + 4 = 9 Result = 24 - 9 = 15Example 2:Input: n = 4421Output: 21Explanation: Product of digits = 4 * 4 * 2 * 1 = 32 Sum of digits = 4 + 4 + 2 + 1 = 11 Result = 32 - 11 = 21class Solution: def subtractProductAndSum(self, n: int) -> int: return [for a, b in zip()] n = [2,3,4,4] [x for a, b in zip(n[::2], n[1::2]) for x in a*[b]] ###################### input = "PINEAPPLE" input = "ALVINLEEJUNHUI" print(input, end = "\n") print([i for i in input][0]+[i for i in input][1]+(len(input)-2*2)*" "+[i for i in input][-2]+[i for i in input][-1]) i = 1 while i < (len(input)-3)/2: print([i for i in input][0]+i*" "+[i for i in input][i+1]+(len(input)-4-i*2)*" "+[i for i in input][-1-i-1]+i*" "+[i for i in input][-1]) i+=1 if (len(input))%2 == 0: pass print([i for i in input][0]+((len(input)-3)//2)*" "+[i for i in input][(len(input))//2]+((len(input)-3)//2)*" "+[i for i in input][-1]) # bottom half j = (len(input)-4-1)//2 while j >= 0: print([i for i in input][0]+j*" "+[i for i in input][j+1]+(len(input)-2*j-4)*" "+[i for i in input][2+j+(len(input)-2*j-4)]+j*" "+[i for i in input][-1]) j-=1 print(input, end = "\n") [2+j+] (len(input)-2*j-4) j = (len(input)-4-1)//2 while j > 1: print([i for i in input][0]+j*" "+[i for i in input][j+1]+(len(input)-2*j-4)*" "+[i for i in input][(len(input)-2*j-4)]+j*" "+[i for i in input][-1]) j-=1 (len(input))//2 # recursion def factorial(n): if n==1: return 1 else: return n*factorial(n-1) factorial(5) a = "abcdedfa" set(a) "".join(["ab", "c"]) from collections import defaultdict def numEquivDominoPairs(dominoes): c = defaultdict(lambda: 0) for d in dominoes: print(f"d:{d}") d.sort() print(f"d sorted:{d}") c[tuple(d)] += 1 print(c) print(c) return sum([(i*i-1)//2 for i in c.values() if i >= 2]) dominoes = [[1,2],[1,2],[1,1],[1,2],[2,2]] numEquivDominoPairs(dominoes) set([1, 2, 3]) i = [1, 2] i.sort() dic = {1:1, 2:3} for i in dic.values(): print (i) type((([1, 2]))) ([1, 2]) == [1, 2] type((1, 2)) tuple([1, 2]) # check prime number import timeit from math import sqrt def isPrimes1(n): if n <= 1: return False for i in range(2, int(sqrt(n) + 1)): print(i) if n % i == 0: return False return True isPrimes1(2) int(sqrt(2) + 1) [i for i in range(2, 2)] bin(10)[2:] def strStr(haystack, needle): dic = {i:haystack[i] for i in range(len(haystack))}# https://github.com/mk-takizawa/elmo_for_learn # http://tadaoyamaoka.hatenablog.com/entry/2017/05/23/214339 # http://tadaoyamaoka.hatenablog.com/entry/2017/08/06/182350 # Mount Google Drive # [Go to this URL in a browser: URL] -> Authorization Code # [Enter your authorization code:] from google.colab import drive drive.mount('/content/drive') %cd ~/ !git clone https://github.com/mk-takizawa/elmo_for_learn.git %cd elmo_for_learn/src # // 教師局面を作成する。100万局面で34MB。 # void make_teacher(std::istringstream& ssCmd) { # : # go(pos, static_cast(6)); # : # const int ScoreThresh = 3000; // 自己対局を決着がついたとして止める閾値 # Depth と 閾値を変更する場合 !perl -pi.bak -e 's|static_cast\(6\)|static_cast\(10\)|g;s|ScoreThresh = 3000|ScoreThresh = 30000|g' usi.cpp !diff usi.cpp.bak usi.cpp !make -j 8 sse !mv elmo ../bin/ %cd ~/ %%bash curl -sc /tmp/cookie "https://drive.google.com/uc?id=0B0XpI3oPiCmFalVGclpIZjBmdGs&export=download" > /dev/null export CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)" curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=0B0XpI3oPiCmFalVGclpIZjBmdGs" -o elmo.shogi.zip !ls -l elmo.shogi.zip !unzip elmo.shogi.zip !mkdir -p ~/elmo_for_learn/bin/20161007 !cp -p elmo.shogi/eval/KK_synthesized.bin ~/elmo_for_learn/bin/20161007/kks.kk.bin !cp -p elmo.shogi/eval/KKP_synthesized.bin ~/elmo_for_learn/bin/20161007/kkps.kkp.bin !cp -p elmo.shogi/eval/KPP_synthesized.bin ~/elmo_for_learn/bin/20161007/kpps.kpp.bin !wget https://github.com/HiraokaTakuya/aperygenerateteacher/releases/download/v1.15.0/aperygenerateteacher_v1.15.0.zip !unzip aperygenerateteacher_v1.15.0.zip !cp -p aperygenerateteacher_v1.15.0/bin/roots.hcp ~/elmo_for_learn/bin %cd ~/elmo_for_learn/bin # ./elmo make_teacher roots.hcp <出力ファイル名> <実行スレッド数> <生成する局面数> #!./elmo make_teacher roots.hcp elmo_teacher-001 2 1000000 %%bash save_dir="/content/drive/My Drive/elmo_for_learn/data" mkdir -p "${save_dir}" # 初回のみ #echo "0" > "${save_dir}/number.txt" num=$(cat "${save_dir}/number.txt") max=$((num+4)) export threadNum=2 export teacherNodes=1000000 export testNodes=$((teacherNodes/10)) export depth=10 # for i in $(seq 0 4); do for ((i=${num}; i < ${max}; i++)); do dst=`printf "%03d" $((i+1))` echo "[${dst}]" ./elmo make_teacher roots.hcp elmo_teacher-depth${depth}-${dst} ${threadNum} ${teacherNodes} >/dev/null 2>&1 ./elmo make_teacher roots.hcp elmo_teacher-depth${depth}-${dst}-test ${threadNum} ${testNodes} >/dev/null 2>&1 cp -p elmo_teacher-depth${depth}-${dst}* "${save_dir}" printf "%d" $((i+1)) > "${save_dir}/number.txt" donePost Analysis 1 - 2022 TOP 20 zip codes in Housing Price IncreaseIn this notebook, I analyze the forecasted data. I find the TOP 20 ZIP codes in terms of the housing price increase and plot them on a map using Geo Pandas.import geopandas as gpd import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import warnings warnings.filterwarnings('ignore') import contextily as cxFirst, I load my housing price forecast data.df = pd.read_csv('Data/dcfull.csv') df.head()Next, I load US zip codes shape file for Geo Panda.usa = gpd.read_file("Shape/tl_2019_us_zcta510/tl_2019_us_zcta510.shp") usa['zipcode'] = usa.ZCTA5CE10.astype('int') usa.head()Next, I use my GeoIDs data (df_geo) to filter zip codes in Washington DC area. Read my GeoIDs data in below.# Read Geo Data df_g = pd.read_csv('Data/df_geo.csv') # Include only Washington DC metro area cz = df_g[['countyname', 'countyfips', 'ZIP', 'stateabbrev']][df_g.czname=='Washington DC'] # Adjust columns names and data type cz.rename(columns={'ZIP':'zipcode', 'countyfips':'CTFIPS'}, inplace=True) cz.CTFIPS = cz.CTFIPS.astype('int')I merge us zip code shape files to GeoIDs data and drop outside of Washington DC metro area.map_cz = usa.merge(cz, on='zipcode', how='inner') map_cz.boundary.plot(figsize=(8, 8));Now I have a zip code shape file which includes only Washington DC metro area. The above figure maps the zip codes included in my study. Currently, housing prices and my forecast data are on a monthly basis. To see the changes across years, I create a new data frame that averaged price data across years.In the cell below, I created a year column from date data, and created the new data frame by year.df['year']=df.date.astype(str).str[:4] df_year= df.groupby(['zipcode', 'year']).mean() df_year.reset_index(inplace=True)I merge the year data and the county shape file.# Merge with county map shape file df_map_year = map_cz.merge(df_year, on='zipcode', how ='right') df_map_year.head()I create a new data frame for each year, 2019 to 2022.df19 = df_map_year[df_map_year.year=='2019'] df20 = df_map_year[df_map_year.year=='2020'] df21 = df_map_year[df_map_year.year=='2021'] df22 = df_map_year[df_map_year.year=='2022']Next, I plot housing price change by year on a map. Housing price changes in 2019.# 2019 data # To keep uniform color scale across figrues vmin=df_map_year.change_price.min() vmax=df_map_year.change_price.max() # Plot a figure fig, ax = plt.subplots(1, figsize=(8, 8)) plt.xticks(rotation=90) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') df19.plot(column="change_price", cmap="coolwarm", linewidth=0.4, ax=ax, edgecolor=".4", missing_kwds={'color': 'lightgrey'}) bar_info = plt.cm.ScalarMappable(cmap="coolwarm", norm=plt.Normalize(vmin=vmin, vmax=vmax)) bar_info._A = [] cbar = fig.colorbar(bar_info) ax.axis("off");In 2019, the housing price increased only in a center of the city.Next, I plot housing price changes in 2020.# Year 2020 fig, ax = plt.subplots(1, figsize=(8, 8)) plt.xticks(rotation=90) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') df20.plot(column="change_price", cmap="coolwarm", linewidth=0.4, ax=ax, edgecolor=".4", missing_kwds={'color': 'lightgrey'}) bar_info = plt.cm.ScalarMappable(cmap="coolwarm", norm=plt.Normalize(vmin=vmin, vmax=vmax)) bar_info._A = [] cbar = fig.colorbar(bar_info) ax.axis("off");In 2020, the housing prices outside of the city started rising. Next, I plot housing price changes in 2021.# year 2021 fig, ax = plt.subplots(1, figsize=(8, 8)) plt.xticks(rotation=90) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') df21.plot(column="change_price", cmap="coolwarm", linewidth=0.4, ax=ax, edgecolor=".4", missing_kwds={'color': 'lightgrey'}) bar_info = plt.cm.ScalarMappable(cmap="coolwarm", norm=plt.Normalize(vmin=vmin, vmax=vmax)) bar_info._A = [] cbar = fig.colorbar(bar_info) ax.axis("off");In 2021, the housing price outside of the city kept rising rapidly.Next, I plot my forecast for 2022.# 2022 forecast fig, ax = plt.subplots(1, figsize=(8, 8)) plt.xticks(rotation=90) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') df22.plot(column="change_price", cmap="coolwarm", linewidth=0.4, ax=ax, edgecolor=".4") bar_info = plt.cm.ScalarMappable(cmap="coolwarm", norm=plt.Normalize(vmin=vmin, vmax=vmax)) bar_info._A = [] cbar = fig.colorbar(bar_info) ax.axis("off");My forecast shows that the trend of rising housing prices will slow down in 2022. But I still see some strong growth in some zip codes. Next, I plot the TOP 10 zip codes in terms of price changes in 2022. First, I create a new data frame for the TOP10 zip codes.top10 = df22.sort_values('change_price', ascending=False).head(20) save = top10[['zipcode', 'CTFIPS', 'stateabbrev', 'change_price', 'median_listing_price', 'active_listing_count', 'netinflow']] save.to_csv('Data/top10_22.csv')I plot the top10 zip codes on top of the base map.fig, ax = plt.subplots(figsize = (8,8)) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') top10.plot(ax=ax, color='red') ax.set_title('Top 20 zip codes for the largest housing price increases in 2022') ax.axis("off");Next, I plot the TOP 10 zip codes in 2019 for comparison.top10_19 = df19.sort_values('change_price', ascending=False).head(20) fig, ax = plt.subplots(figsize = (8,8)) map_cz.to_crs(epsg=4326).plot(ax=ax, color='lightgray') top10_19.plot(ax=ax, color='red') ax.set_title('Top 20 zip codes for the largest housing price increases in 2019') ax.axis("off");1차 퀴즈 주의 사항- 자신이 추가로 생성한 셀 이외의 모든 셀 삭제 금지- 과제에 사용된 변수, 함수 이름 변경 금지- 코드 셀 추가 가능 - 새로운 변수, 함수 이름 사용할 것 - 기존의 변수, 함수 이름 사용 금지 과제 1 (10점)`sorted()` 함수는 리스트를 크기 기준 오름차순으로 정렬한다.a_list = [3, 2, 1, 4, 5] sorted_list = sorted(a_list) sorted_list아래 코드에서 `rev_sorted_list` 변수가 `a_list`를 크기 기준 오름차순으로 정렬한 리스트를 참조하도록 `None` 값을 적절한 표현식으로 대체하라.즉, 아래가 성립해야 한다.```pythonrev_sorted_list = [5, 4, 3, 2, 1]```힌트: `sorted()` 함수의 키워드 인자 활용# None을 sorted(a_list, ...) 형식의 표현식으로 대체해야 함 # sorted() 함수의 키워드 인자를 반드시 사용해야 함. # 단순히 리스트를 입력하면 0점 처리됨. rev_sorted_list = None print(rev_sorted_list)과제 2 (20점)`datetime` 모듈의 `datetime` 객체가 아래와 같이 주어졌다.```pythondt = datetime(2021, 3, 14, 15, 5, 20)```변수 `current_time`이 아래 문자열을 가리키도록 아래 코드를 완성하라.current_time_string = "오늘은 2021년 3월 14일이고, 현재 시각은 오후 3시 5분 20초입니다."힌트: datetime 객체의 다양한 메서드 활용 및 적절한 f-문자열 지정- 참고: [datetime 클래스 문서](https://docs.python.org/ko/3/library/datetime.html)- 참고: [f-문자열 포매팅](https://wikidocs.net/13f)from datetime import datetime, date, time dt = datetime(2021, 3, 14, 15, 5, 20) # 추가 코드가 필요하면 여기에 작성 # None을 적절한 f-문자열로 대체하라. # datetime 객체의 다양한 메서드를 사용할 것. # current_time_string에 사용된 문자열을 그대로 작성할 경우 0점 처리됨. current_time = None print(current_time)과제 3 (10점)0부터 10까지의 수 중에서 3의 배수를 제외한 항목으로 이루어진 리스트를 생성하도록 아래 코드를 완성하라.즉, `list10` 변수는 최종적으로 아래 값을 가리켜야 한다.```python[1, 2, 4, 5, 7, 8, 10]```힌트: `for` 반복문과 `continue` 예약어 활용# pass 예약어를 적절한 코드로 대체하세요. # for 반복문을 range() 함수와 함께 활용 list10 = [] pass print(list10)과제 4 (10점)리스트를 입력 받아 사전으로 변환하는 `list_enum()` 함수를 정의하라.단, 키는 항목의 인덱스를 사용한다.예제:```pythonlist_enum(['a', 'b', 'c']) = {0: 'a', 1: 'b', 2: 'c'}list_enum([4, 6, 9, 2]) = {0: 4, 1: 6, 2: 9, 3: 2}```힌트: `enumerate()` 함수 활용# 추가 코드가 필요하면 여기에 작성 # pass 부분을 적절한 코드로 대체할 것 def list_enum(xs): idx_dict = {} pass return idx_dict print(list_enum(['a', 'b', 'c'])) print(list_enum([4, 6, 9, 2]))과제 5 (20점)문자열로 이루어진 리스트가 입력되었을 때 아래 항목들을 갖는 사전 객체를 반환하는 함수 `list2dic()` 함수를 구현하라.반한된 사전의 항목은 아래 형식을 갖는다.- 키(key): 리스트에 사용된 항목- 값(value): 키(key)가 리스트에서 사용된 횟수힌트: `list2dic()` 함수는 아래와 같이 작동해야 한다.```list2dic([2, 5, 2, 3, 3, 2]) = {2: [0, 2, 5], 3: [3, 4], 5: [1]}list2dic([15, 3, 15, 1, 3, 8]) = {1: [3], 3: [1, 4], 8: [5], 15: [0, 2]}```힌트: `enumerate()` 함수, 리스트의 `count()` 메서드,`collections` 모듈의 `defaultdict` 클래스를 이용한다.참고: [사전 기본값 처리](https://www.daleseo.com/python-collections-defaultdict/)from collections import defaultdict # 추가 코드가 필요하면 여기에 작성 # pass 부분을 적절한 코드로 대체할 것 # None 값을 적절한 표현식으로 대체할 것 def list2dic(xs): pass return None print(list2dic([2, 5, 2, 3, 3, 2])) print(list2dic([15, 3, 15, 1, 3, 8]))과제 6 (10점)리스트를 인자로 받아서 사용된 항목의 개수를 반환하는 함수 `count_elem()`를 구현하라.단, 중복 항목은 하나로 간주한다.`count_elem()` 함수는 아래와 같이 작동해야 한다.```count_elem([2, 5, 2, 3, 3, 8, 2, 7]) = 5count_elem([15, 3, 15, 1, 3]) = 3```힌트: `set()` 함수 활용# 추가 코드가 필요하면 여기에 작성 # pass 부분을 적절한 코드로 대체할 것 # None 값을 적절한 표현식으로 대체할 것 def count_elem(xs): pass return None print(count_elem([2, 5, 2, 3, 3, 8, 2, 7])) print(count_elem([15, 3, 15, 1, 3]))과제 7 (20점)0부터 10까지의 자연수 중에서 3으로 나눈 나머지가 2인 수의 제곱으로 이루어진 리스트를 조건제시법으로 정의하라.즉, 아래 리스트를 조건제시법으로 생성해야 한다.```python[4, 25, 64]```힌트: 리스트 조건제시법(comprehension) 활용# 필요한 코드가 있다면 아래에 추가 # None 값을 적절한 리스트 조건제시법으로 대체할 것 list_modulo3 = None print(list_modulo3)수고했습니다. 퀴즈는 여기까지입니다.!!! === 주의: 여기서부터는 수정 절대 금지===# 과제 1 q01 = (rev_sorted_list == sorted(a_list, reverse=True)) # 과제 2 q02 = (current_time == current_time_string) # 과제 3 q03 = (list10 == [1, 2, 4, 5, 7, 8, 10]) # 과제 4 q04_answer1 = (list_enum(['a', 'b', 'c']) == {0: 'a', 1: 'b', 2: 'c'}) q04_answer2 = (list_enum([4, 6, 9, 2]) == {0: 4, 1: 6, 2: 9, 3: 2}) q04 = q04_answer1 and q04_answer2 # 과제 5 q05_answer1 = (list2dic([2, 5, 2, 3, 3, 2]) == {2: [0, 2, 5], 3: [3, 4], 5: [1]}) q05_answer2 = (list2dic([15, 3, 15, 1, 3, 8]) == {1: [3], 3: [1, 4], 8: [5], 15: [0, 2]}) q05 = q05_answer1 and q05_answer2 # 과제 6 q06_answer1 = (count_elem([2, 5, 2, 3, 3, 8, 2, 7]) == 5) q06_answer2 = (count_elem([15, 3, 15, 1, 3]) == 3) q06 = q06_answer1 and q06_answer2 # 과제 7 q07 = (list_modulo3 == [4, 25, 64])최종 점수answers_10 = [q01, q03, q04, q06] answers_20 = [q02, q05, q07] total_score = 0 for q in answers_10: total_score += q * 10 for q in answers_20: total_score += q * 20 total_scoreRemote Access To Climate Model Output- Climate model archive ([CMIP6](https://esgf-node.llnl.gov/search/cmip6/)) is accessed using [AWS.jl](https://github.com/JuliaCloud/AWS.jl) and [Zarr.jl](https://github.com/meggart/Zarr.jl) via [ClimateModels.jl](https://github.com/gaelforget/ClimateModels.jl)- Choose `institution_id`, `source_id`, `variable_id` (inside `CMIP6.jl`)- Compute and plot (1) time mean global map and (2) time evolving global mean (inside `CMIP6.jl`)using ClimateModels p=dirname(pathof(ClimateModels)) include(joinpath(p,"../examples/CMIP6.jl"))1980×2 DataFrame  Row │ time  tas   │ DateTime…  Float64  ──────┼──────────────────── 1 │ 1850-01-… 283.762 2 │ 1850-02-… 284.126 3 │ 1850-03-… 284.995 4 │ 1850-04-… 285.989 5 │ 1850-05-… 286.785 6 │ 1850-06-… 287.528 7 │ 1850-07-… 288.115 8 │ 1850-08-… 288.155 9 │ 1850-09-… 287.43 10 │ 1850-10-… 286.225 11 │ 1850-11-… 285.062 ⋮ │ ⋮ ⋮ 1971 │ 2014-03-… 286.239 1972 │ 2014-04-… 287.248 1973 │ 2014-05-… 287.901 1974 │ 2014-06-… 288.709 1975 │ 2014-07-… 289.282 1976 │ 2014-08-… 289.349 1977 │ 2014-09-… 288.688 1978 │ 2014-10-… 287.646 1979 │ 2014-11-… 286.56 1980 │ 2014-12-… 285.529  1959 rows omittedPandasimport pandas as pd df = pd.read_csv('melb_data_local.csv') df.head() df.tail() type(df) dir(df) #All of the functions available in df len(df) df.shape df.loc[5] df_short=df[0:10] df_short.shape df_thin=df[['Price','Method','Rooms','Car']] df_thin.shape df_cheap = df_thin[df_thin['Price']<=1000000] df_cheap.shape df_thin['Price'].describe() df_thin.describe() df_thin[df_thin['Rooms']==4]['Price'].mean() df_thin[df_thin['Rooms']==3]['Price'].mean() df_thin[df_thin['Rooms']==2]['Price'].mean() df_thin[df_thin['Rooms']==1]['Price'].mean() df_thin[df_thin['Rooms']==5]['Price'].mean() #df_thin['Rooms'].values.unique() g=df.groupby(['Rooms']) for key,df_key in g: print(key) print(df_key) g.mean() import matplotlib.pyplot as plt import seaborn as sns sns.set() ax=sns.pairplot(df_thin,diag_kind='hist') ax=sns.pairplot(df_thin,diag_kind='hist',hue='Rooms')Debuggingimport random def fact(x): if x == 0: return 1 return x*fact(x-1) fact(4) def code_to_debug(): #import pdb; pdb.set_trace() for i in range(10): x=random.random() fact(x) %xmode Verbose code_to_debug() def fact_debugged(x): if not isinstance(x,int): print("x is not an integer") return -1 if x == 0: return 1 return x*fact(x-1) def code_to_debug(): import pdb; pdb.set_trace() #for i in range(10): x=random.random() fact_debugged(x)Le plus grand commun diviseur (PGCD)Le PGCD de nombres entiers naturels différents de zéro est mis en œuvre dans de nombreuses applications comme :- la résolution d’équations diophantiennes ;- la recherche de triplets pythagoriciens ;- la simplification de fractions.Pour cet exercice, on va se concentrer sur la dernière. Exemple de simplification de fractionsPartons d’un exemple simple où l’on souhaite simplifier la fraction $\frac{6}{3}$. Aucun calcul complexe, on observe directement que $\frac{6}{3} = \frac{2}{1} = 2$.En utilisant cette fois-ci la méthode de recherche du PGCD, plus rigoureuse que l’intuition, on liste tout d’abord les diviseurs du numérateur, puis ceux du dénominateur :- 6 : 1, 2, 3, 6- 3 : 1, 3La deuxième étape consiste à comparer les deux listes, puis à ne retenir que les diviseurs en commun, à savoir 1 et 3. Enfin, on sélectionne le plus grand nombre de cet ensemble. Ici, le PGCD vaut 3.Pour simplifier la fraction, il reste à diviser chaque terme par le PGCD :$\frac{6}{3} = 2$ ; $\frac{3}{3} = 1$La fraction réduite vaut donc $\frac{2}{1}$ Méthode de détermination du PGCD par l’algorithme d’EuclideL’exemple précédent était simpliste. Et pourtant… Pourtant, nous avons eu besoin de huit opérations pour parvenir à déterminer le PGCD de 6 et 3 :- six divisions euclidiennes pour obtenir la liste des diviseurs de chaque terme ;- une comparaison ;- une extraction.Si nous voulions chercher le PGCD de 3 045 623 76 et 3 480 avec cette méthode, ça nous prendrait un temps infiniment long. Et cette estimation n’est pas loin de la vérité. Il existe en effet des nombres qui résistent très bien à la détermination de leurs diviseurs. C’est le cas des nombres RSA, utilisés dans le chiffrement RSA, qui font intervenir des nombres semi-premiers (nombres qui ont exactement deux facteurs premiers, de telle manière que $n = pq$).Pour revenir à notre problème de réduire la fraction $\frac{3 045 623 76}{3 480}$, nous allons solliciter une méthode plus rapide de détermination du PGCD que l’on doit à Euclide (300 av. J.-C.). Il en existe d’autres, mais celle-ci a notre préférence. Lorsque l’on divise 6 par 3, on va se demander par quel entier naturel il faut multiplier 3 pour arriver le plus près de 6. C’est une division euclidienne :$a = qb + r$Où $a$ est le dividende, $b$ le diviseur, $q$ le quotient et $r$ le reste. Une condition importante à conserver en mémoire : $r < b$. Si ce n’est pas le cas, cela signifie que l’on peut augmenter $q$ de au moins 1. Dans l’équation $6 = 1 \times 3 + 3$, $r$ ne satisfait pas la condition, donc on peut ajouter 1 à $q$ : $6 = 2 \times 3 + 0$Un cas légèrement plus complexe : trouver le PGCD de 10 et 4. Le calcul nécessite cette fois-ci deux opérations :1. $10 = 2 \times 4 + 2$2. $4 = 2 \times 2 + 0$Le PGCD de 10 et 4 est en fait le dernier reste non nul. Comme dans la deuxième opération $r = 0$, on remonte à la première où $r = 2$.Prenons un autre exemple afin de mettre en lumière la mécanique à l’œuvre. Le PGCD de 80 et 12 vaut 4 :1. $80 = 6 \times 12 + 8$2. $12 = 1 \times 8 + 4$3. $8 = 2 \times 4 + 0$Ce qui est important de remarquer c’est qu’à chaque étape, $r$ devient $b$ et $b$ devient $a$ à l’étape d’après, de telle manière que le dernier reste non nul est également $b$ à la dernière opération, lorsque $r$ est nul. On peut en déduire que :$F(a, b) = \left \{ \begin{array}{l l} b & \text{si } a \bmod b = 0 \\ F(b, a \bmod b) & \text{sinon} \end{array} \right.$À vous d’écrire la version pythonique de l’algorithme récursif !def gcd(a, b): """Returns the greatest common divisor with the Euclidean algorithm. a -- int: numerator b -- int: denominator """ # Your code hereSimplification de fractionsUne fois le PGCD connu, rien de bien compliqué pour simplifier une fraction. Si l’on calcule le PGCD de 3 045 623 76 et 3 480, on obtient 24. Il suffit désormais de diviser chaque nombre par le PGCD pour obtenir la fraction simplifiée :1. $F(304562376, 3480) = 24$2. $304562376 \div 24 = 12 690 099$3. $3480 \div 24 = 145$On obtient donc l’égalité suivante :4. $\frac{304562376}{3480} = \frac{12690099}{145}$À vous d’écrire la fonction qui effectue cette simplification !def reduce(a, b): """Returns a tuple with numerator and denominator of a fraction after reduction, using GCD. a -- int: numerator b -- int: denominator """ # Your code hereReflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, and_ from sqlalchemy import desc engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine)Exploratory Climate Analysis# Design a query to retrieve the last 12 months of precipitation data and plot the results date = [] for _ in session.query(Measurement.date).order_by(Measurement.date.desc()): date.append(_.date) # Calculate the date 1 year ago from the last data point in the database last_date = date[1] one_year_ago = datetime.fromisoformat(last_date) - relativedelta(years=1) one_year_ago # Perform a query to retrieve the data and precipitation scores data = session.query(Measurement).\ filter(Measurement.date >= one_year_ago) data_dict = dict() station = [] date = [] prcp = [] tobs = [] for _ in data: station.append(_.station) date.append(_.date) prcp.append(_.prcp) tobs.append(_.tobs) data_dict["station"] = station data_dict["date"] = date data_dict["prcp"] = prcp data_dict["tobs"] = tobs # Save the query results as a Pandas DataFrame and set the index to the date column data_df = pd.DataFrame(data_dict).set_index("date") # Sort the dataframe by date data_df = data_df.sort_index() data_df # Use Pandas Plotting with Matplotlib to plot the data plt.figure(figsize=(18,18)) plt.bar(data_df.index.values,data_df["prcp"], color='b', align="center", width=1) plt.xlabel("Date") plt.ylabel("Precipitation (2016-08-23 to 2017-08-23)") plt.show()![precipitation](Images/precipitation.png)# Use Pandas to calcualte the summary statistics for the precipitation data data_df["prcp"].describe()![describe](Images/describe.png)# Design a query to show how many stations are available in this dataset? session.query(Measurement).group_by(Measurement.station).count() # What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. active_stations = session.query(Measurement.station, \ func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).\ all() # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature of the most active station? max_temp = session.query(Measurement.station, \ func.max(Measurement.tobs)).\ filter(Measurement.station == active_stations[0][0]).\ all() min_temp = max_temp = session.query(Measurement.station, \ func.min(Measurement.tobs)).\ filter(Measurement.station == active_stations[0][0]).\ all() avg_temp = session.query(Measurement.station, \ func.avg(Measurement.tobs)).\ filter(Measurement.station == active_stations[0][0]).\ all() calculations = [max_temp[0][1], min_temp[0][1], avg_temp[0][1]] calculations # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram station_temp = session.query(Measurement.station, \ func.count(Measurement.tobs)).\ group_by(Measurement.station).order_by(func.count(Measurement.tobs).desc()).\ all() temp_df = data_df.loc[data_df["station"]==station_temp[0][0]] temp_df n_bins = 12 plt.hist(temp_df["tobs"], color='b', alpha= 0.5, bins=n_bins) plt.ylabel("Frequency") plt.title(f"tobs for station {station_temp[0][0]} (2016-08-23 to 2017-08-23)") plt.show()![precipitation](Images/station-histogram.png)# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. print(calc_temps('2017-03-25', '2017-03-30')) # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) calc = calc_temps('2017-03-25', '2017-03-30') yerr = calc[0][2]- calc[0][0] x_axis= [""] plt.bar(x_axis, calc[0][1], yerr=calc[0][2]- calc[0][0], color="r",alpha =0.5) plt.title("Trip Avg Temp") # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation session.query(Measurement.station, Station.name, \ Station.latitude, Station.longitude, \ Station.elevation,\ func.sum(Measurement.prcp)).\ group_by(Measurement.station).\ order_by(func.sum(Measurement.prcp).desc()).\ join(Station, Measurement.station == Station.station).\ filter(Measurement.date <= '2017-03-30').filter(Measurement.date >= '2011-03-25').\ all()Optional Challenge Assignment# Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all() daily_normals("01-01") # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` normals = [] # Set the start and end date of the trip start_date = datetime(year=2018,month=3, day=25) end_date = datetime(year=2018,month=3, day=30) # Use the start and end date to create a range of dates delta = end_date - start_date # Stip off the year and save a list of %m-%d strings travel_dates = [] for i in range(delta.days + 1): day = start_date + timedelta(days=i) day = str(day)[5:-9] travel_dates.append(day) # Loop through the list of %m-%d strings and calculate the normals for each date for date in travel_dates: normals.append(daily_normals(date)) normals # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index trip_dict=dict() dates = [] tmin = [] tmax = [] tavg = [] for i in range(len(travel_dates)): dates.append("2018-"+travel_dates[i]) tmin.append(normals[i][0][0]) tmax.append(normals[i][0][2]) tavg.append(normals[i][0][1]) trip_dict["date"] = dates trip_dict["tmin"] = tmin trip_dict["tavg"] = tavg trip_dict["tmax"] = tmax trip = pd.DataFrame(trip_dict) trip # Plot the daily normals as an area plot with `stacked=False` x = trip["date"] fig, ax = plt.subplots() ax.plot(x, trip["tmin"], color="b", lw=3, alpha=.5) ax.fill_between(x, 0, trip["tmin"], alpha=.3) ax.plot(x, trip["tavg"], color="r", lw=3, alpha=.5) ax.fill_between(x, trip["tmin"], trip["tavg"], alpha=.3) ax.plot(x, trip["tmax"], color="g", lw=3, alpha=.5) ax.fill_between(x, trip["tavg"], trip["tmax"], alpha=.3) plt.legend(["tmin", "tavg", "tmax"], loc='best')**Trigonometric/Hyperbolic Functions** 삼감함수와 쌍곡선 함수 Constantsimport numpy as np PI = np.pi E = np.e # natural constant print(PI, E)3.141592653589793 2.718281828459045deg2rad and rad2degimport numpy as np degree = np.array([30, 45, 60, 90, 180, 360]) rad = np.deg2rad(degree) degree = np.rad2deg(rad) print("radian: ", rad.round(3)) print("degree again: ", degree)radian: [0.524 0.785 1.047 1.571 3.142 6.283] degree again: [ 30. 45. 60. 90. 180. 360.]Trigonometric Functionsimport numpy as np x = np.deg2rad(np.linspace(0, 360, 11)) sin, cos = np.sin(x), np.cos(x) tan = np.tan(x) print(f"np.tan: \n{tan.round(2)}") print(f"np.sin / np.cos: \n{(sin/cos).round(2)}") import numpy as np import matplotlib.pyplot as plt PI = np.pi x = np.linspace(0, 4*PI, 100) sin, cos, tan = np.sin(x), np.cos(x), np.tan(x) fig, ax = plt.subplots(figsize=(10,5)) ax.plot(x, sin, label=r'$y = sin(x)$') ax.plot(x, cos, label=r'$y = cos(x)$') ax.plot(x, tan, label=r'$y = tan(x)$') xticks = np.arange(0, 4*PI+0.1 , 0.5*PI) xticklabels = [str(xtick)+r'$\frac{\pi}{2}$' for xtick in range(9)] ax.set_xticks(xticks) ax.set_xticklabels(xticklabels) ax.tick_params(axis='x', labelsize=20) ax.set_ylim([-2,2]) ax.legend()Exponential Functionsimport numpy as np E = np.e x = np.arange(1, 7) print(f"E**x: \n{(E**x).round(2)}") print(f"np.exp(x): \n{np.exp(x).round(2)}") import numpy as np import matplotlib.pyplot as plt x = np.linspace(-5, 5, 100) sigmoid = 1/(1 + np.exp(-x)) fig, ax = plt.subplots(figsize=(10,5)) ax.plot(x, sigmoid) ax.tick_params(labelsize=20)Hyperbolic Functionsimport numpy as np x = np.linspace(0, 1, 5) sinh, cosh = np.sinh(x), np.cosh(x) tanh = np.tanh(x) print(f"np.tanh: \n{tanh.round(2)}") print(f"np.sinh / np.cosh: \n{(sinh/cosh).round(2)}") import numpy as np x = np.linspace(0, 1, 5) sinh = np.sinh(x) sinh_exp = (np.exp(x) - np.exp(-x)) / 2 cosh = np.cosh(x) cosh_exp = (np.exp(x) + np.exp(-x)) / 2 print("sinh_exp: ", sinh_exp) print("cosh_exp: ", cosh_exp)sinh_exp: [0. 0.25261232 0.52109531 0.82231673 1.17520119] cosh_exp: [1. 1.0314131 1.12762597 1.29468328 1.54308063]![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAkMAAABDCAYAAACfpgGyAAAgAElEQVR4Aex9B5hURda2K2lS5xxmenryDDPknCQpKuacxQSYkCSKrmsOuGbFnCMGzKJ+pnXNa1jBhAkQBJScM7z/85661X37TndPz+iuu/8yz1Nzb99b6VadOvXWOadO7dLaGUC60MYVRGtnEMGK9mnfp0ujngXRxhWSNP6yWhT4S+S+rTuENk7mmb4863NvaTUKA0ybexqdh7ukCo5Iec5l6XT8ZkekDM5oRbPSMh2/mWW6ipm2+XUuCpbCF69BK4elfRx+hCrqMOXG2/DU868i3xFAW3McR0DaiWn1dzTn2tYdRqiyPtFnzUmb54kgUFan0prr1EQfs71ID+GqBqlzK4e/mXUnXdY3M41qV5bL723nCbcofaC8vdS9Oe2k4/J787zRnMtVY1DVm/2b78s9Lcvkt/LKOhf6S4w+zpU2VTw9lkjfms7192S7ko49sWq4IuVCr23sfrR1+nHlDbdi2bot+GHBLyh0BXDjHQ9gE4Dazr3Q1sE4aizZQnGw7GxlZHrHseQqrmxR2gJ/cYLntXIGkAwhtHKGJbR2hpDnCKCdw4fjRp6B2fMWYfmGbXAES3Di6eOwAcAJp41HG2OstmlyPISELlpK0+wb9rGZXjK1TerzINp5IghXdRBaaQ5/Zj7kQ6GyOuS7w9JvqXlb+FijNgi2fBw6AvCzXF9xi/qYPKu5Y0l/mz9WDUcoLt9LWtXPm7qyj0jPtlCpMT/knpZ5k54Zms+3gnCE1ZyWK33s6vCjlSuIXZ0B2CNl4FxMGmZ/5/LNTMfQzhuVfmrNseIMKj7QiA7S0wnHIfFDU+2a6T15fKZ3mZ7vkumFaridYChT+5if60mi5WAoiMZgKIhwZT2uvG4qNm4DdgB44cXXUGDfCYYUSG8+sbPPdoKhXJmwFQwRpCQXOmb6T3dvBkOaEbZzBdDG4cM+hx6JLQBuv/sB3HrnAxh79p/hDZUi7z8ADBE0cgGYBEFmQKTuydwLCOhtXrSxeeR+/Tbgzgen4c33P8FJp49HdefeEADoCMhEkq6N9DNOlATJO8FQ+olRt1PiuhMMNWOi12CoPGdhxE4wZEFsGgxxxZEgQkuc9M+TDHOnZCjHwe1MD4b+ctUNOOL4Ufh+7iJsBzDjpTf+88BQeUslQ2GEK//HJEOV/82SoVzAEAGUCq0cQXhiNSIZascVJSVDDr+AoYqGzkLPn336BQpdQQFBBELtnH4BDgQHv0UyZMtJMpSsKyUiSioShJYM3XjHg7jp9gdwE693PICb77g/EW66836cMX4y2tq9yHP6UeAOYv12YMWGbahq6IY8J0GfWkVzNZ27ZKi5vFbxl52SoVz5rIr3vyYZsotkiJoStcBJP28n2/B/Fgyp1Z5mdKariKuDSsxlErlLQ1Ilki0Y8QPldaLCaesKGSL7YON0aQFWUERzVLFlrJ+sUk31FbWe6lCKI5W6KtnBJARzXpkIwxkth7skjYi9ie9lu6i0FO3nRnQJohRVV0zUCqImE5VTEK3tfrSx+/DQE89hK4DnX6JkyA9OLom0zqCoQESUaWqD5Htz3NR+4wSQ542IuJrqMkmjv1P6RbWZzku3n/odRL43KuL5lohuKZ4X9Zwxeeq8edXlJWjMQiOMExI1WWqf6jwS6S3p+JzfyRU4y88WL9M7Sg2a/72qD/i9bDOdt65vLlf2r1Y56/RNXZkv25CLkqJgTNRk8oztovs5w1WrLr2xaqHrbDTNyV6Cg+UFQSDE4I3VwhkqR56DaqWQUoPZvXAFo+Dfow9OQwFBkMMnQIiSIxmnTrWapZqtqW9M994epoqtMmW8W9uYqi6qvVo7w2jjDKGtU6nxCwIlCFU14IXX3sVLr72Ll19/Dy+//i5mvM77d1R44x1cfOV1aFPoQlubR9R9cxYuxbxFyxGKVQkYakdao6pA2iY9nZrrRJWzVguYnzd9T/Ul1WRclBhjx9ynacZAss3UGKb6ljSdUJPp9FnTGmqy8vbIc4fRzjCNSOZt4TuWvFhXjgeOx0S9TXH0d6fNz6BpmlGotE23r86PVy7wqSbTz9KWYdQlQdsGjQdKa+AMlqKdI4g8p1JDp0+vADbVSpz/2rlD8MWqYQ+VKlWwAHBj3Ojxk+aq0zsj5XBTTSbmJk2nY19yDPMbtekH6SSXb6ZUdFdXUFRlDpkPq6R/2zqCOanJqGJjyBdVV518P7+DC4RGfMfU59KOjoDUW6nJ6po1/jl38vuYj8wtFoyQvp+SdLoLmWyjEOOzGhmcHChkLJ6SKpmsyaDIIHlNF5jOV1ojeYYqG4TwmD+f0e6BgXGSwVx+8j3RO9N4Yiw390AgxHJZR9Y7Gax5pNZf15lMhUzJWmajNkppN1VvDjJ/vHFaa17W30zD9mC9WbZqm2oZPBx8j06fIWDo1dffRqikAn6jL/iNjMs0nKRZR9UnyW9NtrO5zdW9rgcJxyd1MPeFypvpdT/rvMy/mZblCo00o5+o/yYoUXla651aD+s3kYbY1qpPzPVLTZeuz0jHtJGQthMarlK0zTYtqTLyzJxPtLqj2IaltLWRlvVMV6Z+xu9lO+nf6prsl+TYUPnoMcN+Uu1MOtP0kUyn+yXdlbQRrelojDs1DnWbp9ZDlcmyzPXieCBt6fGh3ifL5nO/KfhiNQKCfKV1CFV0QLSyI4KxOoTj9QjEahAsrcZlV1+PHduA776bh2hxJUIllQiUVMAX021Dmq7LSpP8Bk2Hci2pStSR78g/Ut6b+JXq+xr4YrXwxUlHdUKHnKzIPyLVHWUcB+K1UKHGdK1BqLQawVgVSsrrpO77HXwUlq/ZLKq/irrOCMWrEYhVwl/KwHqxbZNtZr1nm7KfdB+n65dMzxRfNmg6hSdlp0W2AduHNMfxwO9O9rtOa62zfq7SeosrESWvFfrXfZeM07jOyfySNK3mk8ZtYpSRpt1YZ44lGQvGdzQuK3M9SM/8XoamxhNpwhyi5fUIx2sRJC2nHe/Jb5R0JdXwsZxYNULl7eVKuyMZM7EayD1/ZwulNYhWdUCorD04HzD4YzWSbyIvYwzqurI9FP+oFVqW+cHob75j+6cP1fCQn8dr4JH5ulbGA/PVZSX5lPrWRuPMSOviuOacKPWtlnprPpLsr2R7KRpI4gvynuT8XSn1tdJJ6m/V57T55Ri2fl+yzPS0sQtXJObAFTMDbSuI2lkhXgXdGu90HOtV58P4XGmwIWgwxufWuOl+S3pvRKQVbLSiQCyndDovXT47SxmbpZar3yfro+rJ9LpsSncYdJ78Dnlv1IuSFIbk+wjasX3cYZFGsfOkvYxnqi0s8U3twXYWtYBhQM3ydD3zPREUuCN48PHnhdHOePVN2NxBFLhVHF0HikHZ0ZQ6qPrq71J11/H0Nya/ISwrJPaTPNPtoK+Wb9bp9PfTgJGTls6/OVemJUOjrUSyPzK3kzlvtikZOOvBbzbXS9fNHN98zxUhGQOlLIl0+ntzuLLOrLtuZ3Nfa1oxl2e+pzSLtmGZyk3mqcae6i/Vp2TahTIeDPpg32Spr7lcAoOUcg36zZQ+8R3usEyYtIXTfaTa10xfYTGgLfBEQHrNc4Ww25774+mX38RHs77Fa29/hGBxFYqcIXTtNRB9B+6Bjz6dhdNPHyeqst0G7YX7Hn5SVE35nlCiXbkSJkAwf0e2e9VWHJtRyHiIVTcah+a+KvKX4LhRZ+GVv32Ijz6fjbvufxzOQCnsgVIBPvwWGgYz5LkYQiIB4bXQHRY7oS69Boit09JVG1HXsYfY9p182ljMeOMdFHpCKPCEwG/KM3hIpvqzXno8ZIqjnydoJ8GD1BgmbZEP6XiZruY2YD9rtWBq/6bhcSaepfNmG3GCL/IVK+lQmjg6rqZtfWU/EfyxfFVvxUN1/TKl07TJeYnSTl3vZPymeQjpiuOfZbE9dZnprkIHBm3zniDIHYqj0B1BAenCwn/M9aDETNMN6YigiMbXzEePF2v+5t+Utkl6VwieaKWkL/BGJW1T6Tmv6LoQHFBTwraTb87aTxG0Y5t4I2jjCcMWjsvcwm8p9EZlPOj+0n2py+G1LSWMRigMxgRsk0fnuw3+IGMhlX9Y82Gfsm8pJDDnLfmb5lVrf6nv0/Z3lOBbaSFTuer5Lkp0piZkfU9xkr4n0fFexNciZkuNq+OZryQw5sGJkh+lgFXTu8ko2pN8nFpNVpyohzn/pu6JVMlMU+OZ1C9W0ZzpNxk/re912qZEa/o94ycNqJOiN3Nb6jytV4ozOVkR0Oj8JJ0zIKLYB6Y9JxPHCzOoJvOlqMkIUtnGTMt8s6kzzHmre7WrixM8f1PMaA6N46d+F4mNfcx4Ip60pDfnZb0n4VISpsCg0e8J+mqirxxqhxS/l+mbqqf5PeMTDJFGrf2g2i/1G81peS+TTgtVbGT+SrSffgxZy9JjTrczGbgM+EQ7pc9HfZfqW94rIKV2k6n2zt5mu9p9wjRJl1xUcHGgGA/L033Dq0kV4AjAGY7j7/+Yic0ADj56BPoMGiZ0e/tdD6LA5sU2AEtXrkcwWo4OnXpKvNXrt6GstiPynX4oNZlqf5E0llQZ5ekys/eNbj8uwMgD9G/zlTymqlNPzFm8HAuXrUH3voNwyeXXSD0HDd0XDn9MVrCciKyBExOf5btCKHAFsGYzsGErUNepp/xeuWGbfNPeBx6OPBfVZD4JTdkMUXUg4KAFOyQ1DZO2dN9Yx5r5d0pb8Hs4wVeo3aQqvepXFU/fq6s5H96zLcLl7WVy5r0576buSYess54rrPFJe+aQ+p6qrjoBNPqbze91m+ir+R3vOcnmupuMfSe7qIydgQRDLi5oHEG0s2enR6qFtO0YNxFQkqZ3orG9zPnynnHMQVRkxi4sgiEP1WSaLo3n1jwSvw1hBtvHLjszafqh1InW9mj0m2oudwh/4rwUUvMS68K6yc5BYwGfqX3NajIKJsRMpiW7yeK1jXh0o7qa5m79jrxK4xZdR171+0zXXdTkmUr0OgMOUj1QSPwqw9S46dOrOETghb5i0UWLztCwD0mXJvlMERgnd5k4EvY3TZer8yAjJDDRv5PX7MTLRmI6tbU+XXk6ffp3TMeJQ5Wn4/KaLr5+puLxWxUY0s/VRMNB9+C052QieeGl/2sEhthHTEtJWuNymy5br9AUgSTLzl5nxgsIIyXRKUDSnLTG1vrKBtNk17z0lGY1XcfGeXJgkgkTVKRPb+63xvf83kwMPH1+yTpQ5Vwg24GTz1LTWMszbCGcQelfSg9S42fKh8+TW+sJWFmuHtdNj+Mk3RCU0AaH7UbGap18OFkUesKw+4uxcPk6bNgBDBm+P9ra3Tjm5NFCt+eefymKHEHEytrDHYwjn0zVGRR1gzdcJtvUuVWdNjaKDgOyIKGYuw1BhVvZP7SScaTsfGjrI/YHdr/Y1rUW42y1Nd/OhUUsdVu+2H44fIhVNYgEZ/maTfCFS1Fo9+PeBx+XevbqOxhOf4lM8JpfyQLNsPuQNjCYOu32qC7xccVNMENeGatGdccexhbkpF0Hv8m8O01/o76yjN8yDtmfwcTW+mw0oehCl8urAkNqFZ183jge35npR9FQAMFySoa1vVHuZfObFRhStjvJsjUNWPPSz9XCS4EhRdNqTJjfpy4SrHlzPGQew8l85JsNwK/pgKoiZyguNkO0ock+HpM0QNohTRKot+VORI4nS97pf6s8aC/kilaIzVAybTJ/c1q+T9TdFZLxS8mQGr+qbbLXW/U/53xKWQloONZ0va3taf2t6YQ0zbbmb1Xn1La1pjP/FpuhOLfWW9u46TxYHmmrcVprXqm/026t1x+jwBD9V4REYsCrucLp79Uqn+8IhihCJaLkqoqoNX2axs81GOIHcYWQazrGS4Khxvk2lY82NmsqXrr3WsWmOqE5ZQdFBaLAUDIdGTiZ7IOPPSsr1xdfzASGlDgyXZ2aepbKhJNlN5WO78lICUoUGGpeWtISVV25lNM4jjKCbPy86TqwXAFD2mDcNAHnkh8nHQWkmi7Lmh+/N9cVqUqbBB+U7iTBUG5ls184dhqDIcXsrPVr/Js7wqoUGDKtBs3xSKN2XxTvfjxLVLmPTn8O+U4P3OEolq7dIJISf7hMdkHSRxa3m8vGAG0sKrvMkqtvnTels/7Sahx14mgcOWIUjhgxEoeNGIlDT9BhFA4bMdoURuHwE05BeUMnOEJR+GJclCT5BnlPvjuIzTuUm4qjjjkBNmcAe+5zsNjjffHNj7A5/UkwlIVXCbDS9ZermtwKfFH44zWNdo+JQarhe4X+V/Q36itp8reMQ/ax2lDQOG9dRqarWTIk4FLqlyt9UEKrwFCm/DM95+StwJCS0GaKl+450wpN+1sGhqg2piomXd5NPaPtjEh3DElRU/H1e/Yx+TsXFhqU6He5XJWEtkIktMzLuijJlAfjskzOTSqdBkNN0wrBEMchwRBpTKfPVJZ+ruORtjQY4jP9PpdrEgw1Xc90+bVkbtkJhizMqSVgSBNmZslQUx26EwylI+jMz/44MNRS8Mdv+S1giIaCnDAzt0ljGvu9wJBSG6tFjpWpcXdLQ/e+Ano2Aqjv2hMnn3EW1m3bgW/nLUBF+86yrV5238hqOLWeCbG+ZXJhmaGyWgEqVK8x0L2EOciWNOMf/XBt3wGMGn0m/IEoouL8NFkWy7n46hsk/c8Lf0E4Uorb7nwAW3YAL7zyFly+CArtHjj9xeJIsDkLN90nlDbIKtrCU3aCoWQ/6Lb6PcCQWhykyVvAQlI6pMvU1/9dMJR+DOt2sV41GCKIo+pcgxxrPOtvHU+BIeU4kf1tjZft904wZGIi/02SoX8tGArigUef2SkZMtEGJW9KTdaYEWYbYHzHgfpbJEP/s2BI7A0aryopJXnoyeewZgvEk/SPP/+Cp158GT0G7IZArAzOYDFaF3lFOixba1P6MSkR0qBI9x/BENUK9b36o+uAIRK6DRiC7qbQs/8Q9Ow/2AhD0Kv/7giX1sAdKEWQ9kamspg/PV3zb+26jfh+7gLccNu9CJVWId8ZQL7DiwKHB46dYMhQL+Q2tv5IydBOMNR0H5HfUTJE8w8tkeIzPc6yXXeCIWPCkAYTm6GdarJsBMN3/0owRNuKnWDIOuh3gqGmaJLvf0/JEAEoeYKmdV0+7XxWrN8qkpuJ51+MPIIKlw95DqrKSuAKx9DOpY7iyHMHxUiax3LooFRZVGfpoPqaYIgqulQVOeP4TMFsM6HuWS9lQJ1qM0QwRK/XlAR5/WEUOX3Ip38jO+vqRzs7wZAfDn9U2QxlUZPpb7deM0mGrPGsv9mu/+tqMvZztmBuM60m2wmGrHyx8W/SVhIMZZaWmdtX32cCQ4oPJPPK9HunZMhgmmzQ/xWbIT1B/EvUZPYApk2fkSIZojdfTbDU83PLdTrxfCKOaYVsffZbmDCJvaWSEg6gluh1Vf13giFrP6b7/e8BQ0Gs2rhdjosZeeZEtHN40c7uRhubC5169sWp488Rr9P0Ls3dYnqHlb6mgpskXWswlO670j9LiuEzgSFKhrbuAHyBCPJsbqkrPUgP2+8Q9B40TECRXcBQbbPsG3V9/v8CQ8n21N+X7tpcyRCBjAI9yjGeGF9z91JiJ2pjUJSuXM4t6cGQBu3JyZo80qymaZ6aLLUd/ltthpRkyNQmWeYE3d7NB0Opbc/5gbaOzE+kUs1QlWVXk6X2ia6v+dqSuWWnzZCFKP5TbIbIMKhW4G6yWd/Mk8lmzpyfZTfZTjC0EwyZB36m+98TDAkzS3M2GVVR9097WmyGfly4FD0HDEGsqg5nnj0Zv6xah96Dh6GtQ20xJyDSIEhfs4Oh9Nvj039vkkFmAkOrNyvbo/977S1UVNejun1nXD/1Lnz9w3wUc5egIRkKldXsBEOaL5olZCk7ipUUorlgiH23q52AR7mp4DZoWUxKORooKUBEOxXG43sFljRQCsjiT3tz1xIlK10omjV2VpnyyAUMcbHGelo3w+wEQ0lApSVCbH+2vZlH8J78h2CI73nPZ9Y+yvQ7Oxhq2gXMfxwYYkP8EbvJaG+kttY3Fhtmanz9XIEh7kTJPa3uZCUZasmp9ekMqIOY+Jcr8O28X2Ur8JZtwJatwMf//BqTL56SrN9OyVCyLXLoM27x/G+0GaLrhJYaUFN6l7q1PlempHeTab9bauVnHhsEQ+5wKb7+cYEyot6hbIfeePcf6Dd0b3iiZQmVmAZA5uu/Ewx17jMQqzZsF3UZd5Vt2gHc+9ATCBZXKLshutbwR/G/DoYIPBp67obOfQajkxF436XvEPGELL5mDBcDiVPrRfqSjWeS5oLi9blLv6Ho2n93DN73UHTuN1RCaW1n8aFjBj2kM+5G6thncKIuvO/UdzBqu/Q13FQoH00d+w5Gx75DwGuH3oNEK6FUu+q0dDozvPbWe8UTOMeD2k2m6mSmZ31f060vLrrmlkY7ZX8LGOIu59+ym4zb4xV4zB1YELD8JjWZcWq92YA64evI8HnERfvu+x2GU8ZMShzNwh3kvQbvjbsemY7pL7+Fqfc9hp4D95TjPMQ+UNwKZKaXpsDQOZdejf57HZCR97cIDHGrLxllSvCXiO8aOnmjOJLiX97zRGdWMlugXyECIJu/RJyXOYKlKPQWi78huuWe9tyrcEcr5DfjUtTJIOX7oyiQwC151Tj4+JNxzqVXoTDA5xGI901vVPLXdcjzF0MH+RbjPBSCIZ13Llf66qHHa25hpG+VXNLoOKouJaa0UdlCzfrkEpheeZGukXbmb5V3VL5ZPH/Si6cR2A5yL/kXy9kzXKHxG9hPuQaq13ggJsFBsp667OxXlsFBxl1OLJd5qXIVfei20VfdX+bfZHSkKVVvE12Z6ELHN19tgZjyfOsvAT0JK7o0f7cpL6HX5LuiQAkiFfWgLxrSKPuc9Tbnn+meNFpS0wksn4H1zvNFE/RX22sARoydiFsfmoZ3PpmFaGV7FHKXkhG4IqXUQvcdaT3fF0Hf4fvj+DPGJcYEx4WMh0AJ8hn8xXKkgytcJvW+86EnES6vB8cWaUHow6AFc911m9NXGLfWqjFmbptkuyRphu/5XPW/Xlikvjfnoe7p+LNT3yEYsOcBqOrUS/qFxzTQ+SlBnK6X1EF/X5ardrooaU3pdT7prrqOnDRYb6bVQeJ7o/AWV6DP4D3Ra+AwFFd1SPAmRYcxcX7KA4ST48EYw3rMpbsaPJQLKT0e0tUv0zNdbwGtjfirok3dl9Y+ZJ502MoxTFrOVEa658yTaekuguPXPA4nXXQVVq7fJrZgG7cBazZux8Jla+X34hXrcP9j0xEuq5GjRzzhUngicdgM3k/+X2j9Dl8JCr0lqGjohcv+egs2bQe2b4ccy/Lllz/g+JFjhJYVrUSF5lm/ht4DcdWNdwjQps0Xdwx++OHnOPaE0XCFOA6LJZx7yVVYt0NJ/r6etwgHHXMCnOEYivxRVNZ3wVc/zMfVN90hc0e4ogFFgTgK/KUo8HP8N6Zn8uNpL76Ol9/+SOrFb2KgnyHOX8I/pL01j2+cB3kT4zFwrHI8uCLlin9wjJH+05Sd+qxEnOrKeIhVSR8rXqvSCp1aaVL4J+fMYvDQYvoo4tEjwicTZaYb+6ZnxtjkWKJQQ/N38j36FHMEisWlBjccXH7NzZi7aBmqO3aHM1AMb7QM1029C+u2Arfe8zAG7bm/SI83bgfOHH8eHN5i2L3FKDJwAfldIhjzF+clYg89NnglfWve27XfUMxftg5Hn3yGpBU+aMonXFHfaF6QMWBtK3r0lj4owS4s1Bw0iiSY4HOe8cEGpQ6fA4dEouKXCmMnczcH3fHOUJmcW+OJVohfhgHD9sfi1ZsFPZKp03EV/TWwvGRg/jEJgbIa+OPVeG/m13hixmtwREoTDSGEyMmMQCuUDKyf8saszgpTv/kse9Dfz3roM9jM39TUPT18sl2I/Bl0frlcmY5MlO1LcMArn7Eu5vSqDHNbGfch+pCoEH8fzMecJts982d8DjIy/+akZb6sI8ulrZLuv9R2stTf6OdknUrFoSfpjIM0NS1pKjW9+bczXAYSO+nHEcwcz5xG3zvCcUlLuiRTk/YOxZukEdIQ6Y0ed4XGjTawR8oUDQZi6NpnMB5+7Glh8ty1RImDPUiGqALPg5K2YnnhOPKDUVx+8+1YvHojqjv1TDDOBH2H4yhin4bjMmGRmZLBHnTMyVi+Aaju3DvBRFg3/Y36yrJYb9IV+5mB7S30laV9hTYMGiQj1H3MPtLt0Lg89Y55cwyyXSnNUqthRd+KttPQcAoPUO/ZVrpc0n5j+kjlO3yv6ZDfSS/DqeOefCUufSjfIHyL7aMAENuM77kY4qpSt6G+6rwzXbWnbgKaXOpqjqPyLBNAky5/XQd1Nfezokm2K/0Mse3N+TZ1z7IIklln0gV5gB7XnKifePYVUc+vXLMZcZ7nFYiiW+8BWLlus4Cidz78BNF4FdzBYriDMbiCiqfLuDTRl/AuvguS75eDx54QDG3dDsz76RfEK+rhIV2ST5PGQqUoCHKhVirek13huAJDO4B/fv4VAsESFPN8rUgcznApHKESuCJx3PXIE/h1zSY5D84ZisERiqGsvgt+XrYGt933iBG/DIE4T0Zgf3OOa8xrpVyOnUAMb3z4T8z6foHwGfKaIM+8o1d2Yw5TbZ6ePvndnOd04Dl3braz8ayp/lHvFV1yDHM8kefqMazGoJkekrxQ05Hm8Wos6blFf3PjMaTL1DTHsUSgzTL5jHWnJNgdigkYnXrPQ1i6djPad+kl7esJx7H73gcKfXw3dxHCsUr4yL+KKzDrmzliszdg0HA4/TE4ePyNARYTV4NuWH8u4nQ9eE20Z7gMxBCd+w7B0nXbMGrcZGlXV7g8ATZ5Zp7iG43bRLeNvqoyStHYZsgwZBO9rt0vE6VyxZ/+dGEtVtRXis0pBuNZJuqCsUIAACAASURBVCQcrr77DBmOlZuAM86+AG3sxnZauvc2PFNSdaE9VFKEzt0ngTgZPyUjIXz38xK8//nXxinQqc4b6cSMgb48dB3ISImG9e/mXElsZGzNSaNEuwFJxxWpVc/cVF5MT0DAtLxvKr75PVV0ZOgkdlHXJQwRm86HZXElRmJvbp0Zn2kpkRL7phy3a+q6i7rKOHlet5++6jiZrqQbgiGKZ9V2bapvksZ71nQsq5XdL1ut6ReHhx6qM6cae4FV7aDF56lXGmHyLCaes8M6JHTmPBLE2Gl1+ImjRA3DE87zaCtj2MuQrrXEQUTOzgBuuOt+rN0KVDZ0FeNiejLm2NFbwrV/mtauoBzwWOQrEaeFrPuQfQ6RMUVAtEuBO1EXXSd95fdoCVxinDWhu0+kdQSEJindoc7f2q7qd7LddTp9JRAiM9a/m3PlJM306cvMTttkcpTQNCetph+ugHlcizWtps1sVxoDs62taZv6TbqisSnLZRs1Fd/8nmOQvJpjmHza/C79fZKm+c08Z0qlTapeZDw7Ali4dI2AodnfzUeRw48Cp1fCHfc+JJMdAX99lx6wuQMoZF4G/ZppmHUgPWsv3bSBzHMGxFM5vZXfdt9DspNPj6c/uQJgUPxceeH3RMsFDG0FcPak81Fo94pEKt8XRmt3ALvaPMhzB7B49Qa8//lX4lyTx6Fw7vj827n4eekakVjQWJ6exUPlPBMthjYu5YVebF4MmyXr+Ccg2gDg+tvvRztXUCRDlPKwvrrOmq5lDjPxhbZOzlX6iI0QuJghmGA61sNaVvr+UrQuoKbYOPcysauTefC9zss0LjgPGBttCP45H2ra0jRsLo/P0uXFcjkvadri7tF2pAVXECeeNhb0LXb0iaPR1uZFW+7IdAXw7ZyF4tz0sKNPRHF5LYpcQdmpOWbCebIR6O/vfoZCZ0idc6aPF7G0J6WUnFvMddTYQtRsxnEwx44cg2UbdiBW21mp4DgvucMi/TenzeVewJDuTLkaFt9sHA5STrJN6SmFabtUY5JgBAzRRXxZLUoqG7Bk9SZ88NmXsPkiUlFWNkFMxkcJgUgeNMbyw08wFIiitcOLQl8Ya7YCx55ypnhkNg82lq2D/mABQ9EKAU/sPAY1wai8k9t4zb8VUZEJUzSo88rlKoTEs5miFbIdWBGnJq6mr2RmFAO2BAyxLIIhIviUfjQNymzPWa5mwtnipXtHdQ8lB/p79YSSLm7jZ8oDLduX39/4fRJsWN+RVlLBkDbeU2n0pM8jGtj3BZ4gvvj+JxS6FSOOVnZAgSeKtvaAHMLZ1hFCO2dYQhsnvTZzgif45zEDxlXuWS6BVBTt9HtXGK3IVF1hkPnd++h0YQT7H3yUAkPG+VQKDCl1JN3an3TaWKzfBhx8xHFoXeRRZxJpx4PSHorR8Zwfjj+qmB2BGNrafGhj84nvnvsffx5zf1klBz/qNuKZQq1TvEUH5QR3SpT0mGP5qWMglUZVP6ryub1dA5r0YyEbGKoSCRHpQ9cv1yvHoVpYpNYtfR1S4/wrwFAu9abKgqrQ5MSSWq9MdWd704ZFHX2k2j1ZnqXtTG2pxh3fB0UypIxUM4+bZJ46DkFYWKSOZrC7S55TaGr1xh2ykp94zl/EW3eh0ydgaOqd9yk3BQDad+pmgCFOkklD+TYmFwi8J88WsGTzwxstFzC0fgfQpd9uco6brhuBkHlxS7DWqfcgAUObtgLRknIUaDDkp/fqAHYtcstETHB1wOHHycTMuowcM1Em38OOO1kOAuYkznpQQsP25vzGtmvrDqC1zD1se9X+sgON/eIOyyKeOxF5lEsgVilnk3GeY9D1VjxHHWguR8U4wxC+4gghj8HJg1prYA9xcUAQRtBrDUbZpoW9phlnpAye4grk8QBW1jmRlmnUoo7fJoGAhd9Gw2aHcjUh9kasrzEXqvlQ8U0CM51Oj33yCMZJgCFjAcVnFFZQNcr2/uCfX0nb07s7ASl9dtEej6pVTyiGcGmVgKFCpx8du/XBus1qR2eMgNQChAQDGIbWVF01BYZYZ6ouf1y0Au9/+jXa2FT/Mh+aQui2y/XaWDJkdAQbhYNaT5QJhmZIflgR1aB+tHL5sKvDi1YOr3F+CTsiIIRzEwfODqBH30EyObCizFtXUFA5O0gOluNKhZ3gEzBEfbDsQHH48NjTL2HtZgjClBW3DLZUZiMd6gjIKppiPUXYmuDUKkMGAAnFEvQgYOcT1Oj65XplW7FMghKdV65pGZ/oXQGL1G/KJQ+uXpq7Etb56hWp/t2cKxmKlWBzT08w1CDtrGkg17RkPJw4MsXXdMBrO6cPX34/Dz/MX4i2NpesYAnQeeq0MOcELSrGxtXUn2zeBNPQ23KFZh0BNchsPgXq5XwzvYrlURM+fDv3Z5koyqrrke/0Is/lRRuHG3kuHyK0IfKGxZZh1abt+MfMr1BIHzyaiRlXHlkhEi9jQcJxFuG5XmSkRV60LfKhVYFHzsLiavm+ac9KfbkokMkksbJWtCS2KAF1UKti3hx/5jHAsUzJmTmo8e2hak0OLk4y/kztbn7O8aWAVLlIDs3vcrlnmS2VDFHU39K06ZhwLvVlHNpW0s4h1/iJeDwqxKcktOyfxPM0k6L1nR477GMzX7XGy/SbYIhn5pnLpYqsz9B9xMCcnr29BNIFbuTZ3fCGSrB01XpxXvnuR5+iuKwaRS6/0LFIXhxe+GOVuO7Wu/Hmex/jzAmThe/niy8nP1oVenDZtTfLIbeLV65BO7tLDunl5KsXtfrKOnOsPz3jTfFEPnf+YuTTJYLdI5oDSoYEdNk86NizP9ZvB/IJagpdKPKEsXzdVnlGNRXHlB5nofJaFHgjOHnM2Xjprfcx/LCjBVRxrB9x4qmY8dZ7iJTXigSLp8PzKCkePvzy639HMFYBV7AksSsytV0JKgiCdCBoIUBUbiXcxeVi+6rGmQKIuk5pr8ZCihKusvadce7FV+KeR5+SevmKK3HjHQ/izgemo9AdRZ4ziAJ3CNdMvQuvv/cxCjxc4HEXF4/UiMEZZRt4ESitwg133If3Pv0C9V1649pb7sQLr/wNefaAHEBMqXOwtFrsgF575yNUNHSVNEx7zW334N5pT4sfsWNOGiVte/aFl5p8h/mkHwiEeIBxIb3ux6rkSh9eTl8UtBuie4vOXfqgICGk0HMetQ1h5PF7O3TFLfc9jOdfexuucKl8C3d73vvwk7jpjvuEpshv2W53PPC45NuhW38RlhAIRlpw1FNWMMSObgyGlNiPzLqtww97sBjRqlr022NPDBq+LwbvvT+G7L0/dhu2D2oausmA+nnxMnF1L6JUQ2JBZky1waRLpuDFtz5Apz6DJD8axY0/7yI8+8qbCJZWyXbXtjaPiNvYiFf99ebEijuVEA1UrMFQmGAojFZOnkETQRsHBw4RtCU41G8NYFoKhlgXgiElVUqCPWsd0//+Y8GQUpNpgsz9+lvBkLb41ww9fds0rk/TYEjRKHe/bNyhjnB49oWXUFlbD5s7iBKeHu+OyGGhFQ3dccGV1+GGOx9ApKIOE86/BBdPuVEYDg0FeSRE9/5Dcfm1t8BfUomSivYYsteBuOehJ9ChR3+hWTI3Mrx4TQdZFa1YsxF2bxBF7gAOOPxo3PvoE6jr0gPRyjoUeSM4bcJkYe6jx4xHO5s7yaidQbEBKmvfFeddeg0uuPxaMTIkaOvYrT9unHof9hh+CNoV0VFgQFZWcxevxNI1W4VhE4DsypVyWjBULKtfrqTVpGkGQ+nv2c60oaOkhWn0Kji3flI7h6hia27/Mv//VjDEBU3zQYk+qFUtDnJrXzUuVNsqVxPNL5fnC2owpBa/LJvPXnzzfVGFrV6/Dd5ACcLFFejRdyDm/vyL2Pus3rAF8ar2iJRWoshNMOSDO1SMz76cjXWbt+O4E0Zi4sTzsGM7cOSRx4tqi44ti7xhrFi/RcDFh59+jiKXR9JS2pDuu7k4oVSGoOzZF15BocMrzjI5qXNjAsFGgTuIJ194Fb+sXI82VJk5fOgxYKiM/fc//TIBgjTgiFa1x9v/+By3P/CY1OOHeYtgdwVxy+33Yf2m7TJZ77XP/uKRnJKOPGdI5rFlK9chUloBTxYwNHTfwzB6zLk4VcIkjBozAaPHTsTosWdj7Dl/xunjz8HIM8dLGHXmRDAU0ummlpyZF0aGycg38xZJPQnI/nrdVAwYvBc2bALWG/69jhsxEvGKOqzfBKzbqNrqjbc/lG8iQOWcyh2Sjzz5PNZvBSac8xeEI+VYu3artOu8n5bATsm4PYB9Dj0Wjz31Ij6bOVuM1e+880HEy+uwbPUmrNuwA5sF5Pjw0cyvZNHXY+AQ4X3cFdrOE8Sxo86Qdl+7CbC5QyguqxEBBsEQw9LVm6R9d9ttGAiQlUBF0bLiLwGcevZ5uP3hx/Hdgl/lu4896TQ4fBFs3Aps2gIsX7URbQvdyKeEzO7H4ceNFH569fW3C6jj0T//JjBEtVMQnmglbr33Eflw8exKh2ayXlD/lv6yEkcfNUKMSR+dNl3AEJEgCZJEz9XHtGdexrSnX5bGef+jWXC4w/i/t94HT5Rmx3ftM0g8xHIQ2b1hiTdvwZImwdC48y+Vbelnnn0BxiTC+Tjz7PNSw6Tzceak83HquHMSA/G/Gww1F4Spw1ZzB0Op+f+ngiFZGVQ24MOZs2XAki4/+ecX+PTzL2D3hBAtb49IWR3efPcTrFizGaeMOhMHHHiYEC7PwHp42nTkOzx4+ImnsXzNRslj+ZoN8AaL8fW3c7B4yUqhRa6Q8+wekQBRCjTytDHYtgO45ba7EY1V4Ndlq7Ho1+UyqSz8ZTkipVUCxj7+/Bt51qPfQJlINJNuVeTFc6+9g0eenoG1W9R42m3IvujQpR9Wr98hIv+5Py9HgT0o/qfynSFcc/PdMlZOPm0C2tqSzIWraz258GTxQEkF9j/sKOx76JES9jn0SDQVyOg8xTR+1mBIASKdb7YrJaV/mGQo+EdIhhSgaQkYIoghANGS0mztan33rwBDBNWLV20WSTwHBQ2d16zfJkB/zoLFuPKaG+HwhgTwEwy1LrDD5Q/LRLVxM1DX0EVAy/kXXy5j56DDjxFpDvm4zRsWcMMz5sadM7lJMERpG8EQ4+++574KDDl9Iq3QYKi1zYNfV23AuHMvFFsWAoBDjz1J5qNHnnpewAwnSD3OKBmihJbSE0oq6Lbk5FPOxHsffIIipxfuQAQFDq+MbfFO7gxiDUESgH6DdjeBocYATgM301SY8ZYAb8t2wB+OocChPKHrOsrVAEOU8LBshj33OhDLl69Ht259MfWWOwT5jB59JpYsWYnhww/CqaeOE4Dz1t8+hN3uR77Ni2C0HG/+/SPhWUOH7YcimmXYfXjq2ZeFD114ydXItwXQLiE986Nr/yGK763cgDfe+gBXTrkRDlcQgVAc3nAM6+jqBYCvhK4zeG4ZpXgBTLn5dumvVeu3we4JIxqvQb7dJ0CIHt8X/LpKyhwxYrQ8M4MhRdtsU5/sKh89dpLEfe2N9zD3p1+x34FHwO4OwRMoESlTG0rw7X5077e7xPv51zUJM5rfDQyxglpnGKpobxg3c/VIw+gQYjUd8cPPywQIrduyA6+8/hY2bd2Bt9/9ALfeehem3nInzj/vItx2+z2CLs//y6Uoopt+AwxJ/mKIFYC/uFI+ZPnKDbj4sqvxwCNPSiNVtO8iW/io59UGWyQcTjZccfMZ80kXaPBGwy4GM1DTBz7qKyVNzHPD5u0JhJoRDIn+lTrYxkG+xxWUScNdXJG2TunS6WdMXxQolt1zKi9FWCQu/tbx0l353hYsSaQ1x1F58UTt9IHv870RhGlAnaU9GU/nZY6nwBCNTVNBkpVhp/8dTHigbq7kgCuIbABO6ctpDxSUgbl28w7YXD4U2N0ocgVQ17k3Zs3+Cas3Au079kCBzYP8IpfQApnuoCF7It/uFvuIsZMmCyO+/qZb8e4HH+O6G2+Fwx0Q1e+q9VsENPH4iXY2Fx578lllo3DksZg7fxFOGX06ihxuoe8ly1cjVlEnkiV96Kg/QiboTRietrJ5ZXy1tvtw0ZXXC8i58ea78f2cxTjgkGNR27EXgiXVKHDSBoH2T0H0HTxcxuGLr76NAmNVaW3vUHkddhuyl9SNNE+mqsdAtuvLr72OQLQUzlCpqJXFtoCSXS3eThhdJoGXuWza/HC3DSdX9ZxXfc805vvUPJRkKPVIDXPe2e5pQ0e7wWxxMr1ThpuNDagzxU8+JxhSB7Vq3qnVjsk4qd+onyswFPljwZDYmCi7nkBJldAeZ/H2HbpjyB77Cs0QONR16g6HLyzHrtg8IUTiVIP4MXfBrzJ+Lrvsr3C4AujWa4BIkOYtWoJgSZnEb2v3YPjBR8iWa267jlfXodBlHN8i0tWkfaemsRNOn6DG8MYdoiLjeMl3+pNgyOET42jWzRMqFXuhXfOdmHzRlcL7p95xP7hoyE8BQ3Uo9EZErf3ORzNlXKxauxklZdWGuo/AKWkQzrrMW7xM2mDk6WPgofmGYX/Ed7ofee3cbzB69B+KXn2HoGffwehhhJ79BmPIsP2w26A90bvPYBX6DUbXPruh0E3nuj6RRuvvVlcf2rr8ot6icGDtxu1YtHgFunTtA5vdh08//ULm10WLluHAgw5Hkc2Lb76dJ+DrkEOPgdsTgc0REGkS+dqUa25Boc2HIga7H1/NngfaYQWjFYZERQkrlErRj19XKHXojz/+jELyJrtPpDEl1Q3Stuu3QACjHK0jc0wAF155nbTTyvVb4fBGEI1XJ8AQJUMLl66W9j7q6BORJ+YIqYBSj5kCXwRVDd0EgJNHPfP8q2hX6EYRbZPs6hidduRDjgAq6rtLmbRV4kHLNNBXYMjKb1L7ytxvvE+rJiMq5YTCK7cSi2GT8dsZLMWs7+bJYPlp8Qr0HjAETm8Q511wiXTCXsMPgMPlh83hxYxX3hCmO2bcJAFD1CFqIzulSyUB+LFs9UZpoB/mzpe88mkzVFotWxvJSFvZfFIfNgr/OnbrLSvjVBTND1XnFvXefW/032s/dO4/OBG69B+MrmnDEDT07JcgaCsY4gqXB0b6SyszBopsA/FqlNZ1kiC/+cwI2dKqd1Wioqno0A2+koqM5TTOpwq+WCWiVfWo6tjdSJesq64X65Yu+GIVCJZVo6ZzT3hLKiQv5ucvZR6NQ7CsRumhuRKgHty0A0br+fXVSmiNf7ccDHESpYq1cZ6K2EkX+Z4wiqvqhU6/oeduhw80AC10BvHqmx/IIDv2xNNkwObbPcgrcsmKauWq9bDLOVVe8PlLryka/vizzzH19rsQKSlHIBwTep2z4BeR7OS7fCh0+7Fs1Tp5Puurb3DsiJOQb3Pg4MOOkEniy2++R6yiFu279JYxQQeaXCBwZ4zYLol9kHHulsMrx0MQtHD1eNGlV6M4XifbUdVuHNoiqMVJ+659hTG9/8ks+UZ+u7VdwuV1KKmqx76HHIX9Djsa+x5yBIYffBiGH3xI1tC5Vx85X4zblMkPJHAnDHch0vhRdoRSWkQDeBVoo8DQzhOCJ1YpKjYawbLPaKRrNvLV9+p5WHY2MS6D3k2mVXPNuRKAKeNrxQybk5a7ybhNvTlpdFza/dBmiOpFAnEGsf/KAvp0X8muzix2cDqe9fp7SYaUEa1fVFCTzr9E6JgTaH6RBy5/sfgZIj1eN/VuFLpDYptDu5xwvAZHjhgpdLpi1UZ4fcUYO24y1m8GPvhsFmo7d0eekwCGdO7B3//xmYzJxSvXIlxWqc6xo22dw68AgUVCMvunX7FhG/D1t/NkMiRooKSE/ImSIc4np42dJAa7XOiIusnuxx33Py7fcPPUe1Do4G6mpGRI/Cp5o0LHPEKG88pDjz6OdoVOqYOSInFDBE0rFGD/+EslZb7+5qnw+CMJCYR1vGn+p4zADfqTcRKGt6Qa9mBcgBbBFvNWkz/5KYN17Cp+UNmphwBCAr6zxp8Du8uPcHGZfB/nw1tuvUs0L6zXhs075Htc3rCoyTp06iUqtdXrtqI4Vo1Cuw+Fdj9s9qA83yAgM8mD+D3c8U3fZR98Mkvm2936D0ZhkQvtZB4OgfUhLVBtVkA7Q7s/wRuosiLPomSI/REpVWCIcz8DtT5s7367DUWBfDO/MfndLJ+BfodCpbWYv3iF5Ec1bTtDBaoxhGo7ZRDP+jBIec4guElGGair/jOXkem+2WDoxNPGJwZK/6F7I99Ohu6RDmLLUdKSV+iEzenDrK++E9Hc6WMmZABDbEQ/7nlomjTQ5D9fiEKHC5xcfKXV4uiJFf8TO8EVkjhs6EOPOk6BIW0FzwaUBlXE4yqm7U4Z2nqCidCO6JuDxRJoBU99tda3W8EQjepW0gI+y2qa7yiBIjI1S6J0B7HzMwWdlu95zxWAzoNXPsuUls+tca3x+TtTYFr6++CVjI/xzPnxnvUx14kSE6qQeAZVHneiGNuByQS0mwNeMxFc8vlvA0OZDKjJYEgrnKj/OvUu2fFw/iVXyeqVq8oDDj1Wvnn2j4tEAsnVSpsChxLzbwN+mv8LbHYPCp1euAIRrN6wGZu3AbO+mo1QNIbi0kpcMeU6YRJXXnODSI/I7Kny4h8lL/c/9AiKnG4UOZyY9fXX8nziOeehtLIWvXbbXeJs3LRdSUvtHtHVCxNy8vgBr+ygLK2uF3pat3Eb7K4AwmSk3uIEE9aMs7pjL+mzb+csAL+P+STbWN0TDNEnS+tCN9qKASrP5XIjz+nKGmxeP4KxcqQDQ7fe95hIXq30kY7WSKfWeJrOdHz9W181zaejSR0n05WepTO9a+o5yzOP46bim9/rbzHX+aTTxoldmbVPrL//cDDElT2l8DYvPp75jfCCJcvXyZZ62mdce5NyfDh/ySpxtkfpjAZDcxctlwmL6rQFPy/DtCeew8GHHyuTnQAhY7FQ5Alg+bpNwmvueeRxVNZ3EqDEQ31FMuLgLkk3Kuq7CNgmQKNLFrYnbWW4RVuMgg0wVEBjW6cfn335HaZNf1EmW1H12Pw47+KrZZzdfd+jxthvDIbyXWGMGDlGpLx/e/d9FLIedjWGtBG0BkMffP6l0MUFF10KbyCaGIfW8VbbvT8Y6rr3R/vu/VHfrZ+Ehm790Wvgnujadwjqu/aT0L5bX9R164U8tx9tXWqBmUoXaj67+7HpQs9ffPuj8AynJ4j9Dzpc+ApV8cFIqTwnQOIfeUYRpThFHtw09W5p77vufQR5hW4BQg5XCJMmXYQdO4Bvv5snKjP2Pb+FbU7+2drux9/f/1TyO4u2jfk2tC3yop0zhIghGdq6TRlJUzpDfsv0lLpzLuG8QslhiA6M7T6xHyKo5tjk+05de0lbt2afU9tizBlSBwMMheO1sk2flejUtY/qX2PHohjOy+HOasMU+QvpxB+Ny462fzkY4gfPWbxCOuaR6S+IAZQyjPLC4QlJw7FCNFItdPgw4//+JoQ2/pwL5DfVZEq9pQxO2QEEJ3fc97BM+A88/Bjy7U4BQ1oypETpancaGSTB0OBh+6QQo+pEMn5FPD0GD0OXAUNQ16NvIrTv3gftuzUO9d37or5bnwQqV2AoeRwHV3mHjhiJI08YlTEcdeIoHH3SaJx42lk44dSzMsbLlAf9NBw/6kyccuZ4yYf5ZYprfn7UiaPFx8OI0WfilDNUWvP7pu6POmEUjht5OkadNRHHnHwqmJ9Oo7+J38Wg3o1EHvXYQpB+AUNqF0tIdoI0Hwy1bDcZaYLl6gFkvnKVTqkFaWLhinUyCfuiZcizEeD48Na7nwhTG33mJBS5QjJI2xW58fQLr2DNhq24asr1sFFtZnNj6F77qUl1O9C1R29ReVGUvmTFGmG05TUNCTA05mxlFL1k+SoBTVSPFTqcWL95M9Zu2AiXL4B4ZS3qu/QSZrBtO0TdRhWdVTLEA039JeUyuEnzFdUNCNNHSRowVNu5t9Rx1uwfpC78bnN78J5HJoRKq2RDQiBWIXn7S+JoKviidKxWImoykQIZjJJlDNr7IIweNxmnjTsXp487B2eMtYZJmDj5Lxh/zvkYM34STh87AWeMnYAzx01sOow/GxPOPQ+TzrsAZ4yb0LwwdgLGTToXEyefjzPGTcwYzhw/EWPGn90oTDjnPJzz57+A77OlT/furImTVJ3HT8Jp48+RQ2rJc6jutvaJ9TfBUCaAb41r/v27SIYqG0SaRTqkUeuyNcrIddzEP4tahZNZQ5fe4gqCNjEHHTVCJDkaDK3epKQRPXoPhM3hlzQ0SKZfIgE6hlQoFK8QAE0eTolRuIwbZAiEkmCIeX43/xdEKhtEBcLt95xAB+2+jwFU1O4sSoaK/BExDOYu44r2nUWqpQ2R9z3sOJlTnnj6JQMMcaJX8w5t6KgmC8ZrsGaTssVbsWodbJRK2Sgl4cQeRhs75x0lgZ2zcImMs6OOHQFPIGoas8m+5QKFphnaTINAmfOhOWjwzGe85yTuK+ZuKa/Y3pj7Vs9ni1ZvkDwG7rG3LNKKXD7cOPVOmVsnX3AJbDRgd3gx8dwLhLfcee9DKKLphTOApSs3SLyDDj1G3CPYnEHYnSGsWr1VVGyjTj0rFQwZgGjEaeOxhGqyHcDMf36B/EJHAgzRiSz5Er2BE+zI/GtIdNj+3EnG79POFjUY6jtomHzz+i074PKFpb/YJ+Zv1nlxyzztIZes3CA2a9feeDsKRfLnE+Asdkqy+zWAyg49pB0pOSP2yP+9JUNEiOI4kT4ZjMmFnjfZ0ezEfkP2FhsgriSow9tt6HABKtxW5+TxA84Aplw3VSp56VXXCzrMNyQ4oiMnY3X40anXABFxEi1+9c33KLRxq6XXcLpYbNi7UCwblk7lQCqrqhe9oKBY5mME6lcZVm4B1tLobzuwdgfETTu3XW7YDjGYY6PpQGJcsxnSycqlxQAAIABJREFUoewUDYZYRyUtMq5EyxlCK+7icQRgj5TDSYeNGeKlfU5/EZzAfcVwl9DOQbsCUFfmnS0wbWGgFG7aSNC5YBPxze9ZViG39IujOOVfx/w+273U2au21lPNwbjNAUMELYr5K/WJGhCqrfVqzDxIzPd0Wkg1GYGqTmd+LysVVwgrN+0AGbXDF5VBQm+5G7aolUl5VUfkFXlQYPMiWlolg5cSoNJ4DRxO7nzw4tKrrpNB/9QzL6DQzlWVW3ZH0FcGDUrp74QG2RT3vvTa3ySPsRMoxvZJKCmrFOa0av1G5BU6ZMdHMFIuiwb+C0Xjxi6bgOyKaCOrJEpBA5j2zEsyzsh0jh0xEmF6vRUwpFZgyqYggPoufQXcvfvRZ1JnjgUt5eSVbRSM16LfkOHyLWbG3NT9C6+9CQJJes1NjjXlME6kb4afEHEuRwdziZ2aXF2GxQs0vcCKFMtka6frnu1Ko20vGa1hN6fjWn/r5+arI1wKb6wyxQ4uaceT3s6Q6Zl3AVVddGCawR4x3XOdN9NSpc44uzr8EigxNdNmunv62VJb6xvELwxV87kGpmUfc3Gg/MEZYyjBv7KXT19htL+j/R935gze84AE3QXopd4VEgkqpTKzvp0r9MydvvTZxZ1h4Xg1Vm1UYIg2HQ53yPBJ5Ed9114YSKNndwCtCpy49OobJG/SXaEniLK6jrD7IyjycDeVT6633/cIPv9mDv6Ur3ZZ0ikp55sRp5yBtkVuAWE0mOZOYxpAv/X+p1iwZLWo7tpxq7XdB46jLn0Hy3h87x8zZdLlfKRMM/wIVnBrfQhfz/kZx5wwCt//OF8m9u49B8AbLMXYcy4yjK6VhJm0z12prHdFTT1c/qgCVpaFB/v9kmtuwYVTbsRFU27EJVNuwGVXXYfLGaZch79ef4sYn1865TpcNuVaXHHNjfjL5VNQ4FZCAbEPMvqNY7eVzSO+9tZsA9Zth2zpp30WVf4//LRYxnOHrr1FHUU72o8+U9Kr6vouGHXGBPQbtKdaeAFiv1To8MPmCuLJZ14WgMEFmddfjFNOHSdqMZnnnUFwh+0vqzaiAwHwOrY+xOylpmMP7LHf4eLUctbsuWK/23vA7glgSCBDMPT2+5+KZIjqVnqeZttzZ9lFl18jdb75jvtEo8Q+pz3iIceeIuWTdxV4wmKXNGT4Qfjx52Xos9se2LwdAupcvgj22vcgVNZ1FBs0JSUMYPd9Dpa+ef3tjwSwFXqiCMaVLatSRRq2t02MxV04iKyBXnYZ6EGaZzF5iyvlnmcOEQxxdVBDG5XicoTLauEvrsDsOYuEaEeNmYRIvFZ27IwYNUb0vY8+9SLCpTUIx+vEwRcdk4XitSipqMPKdVuw5/ADsGnzDiHI9g1d0LlHX5x1zp8RrqiVVS0d7JVUdRCwtWr1ZpSU14muWla8zEuH8hqEymtw+XW34Kqbbsel194sgb+vvFaFq669BdZw8VXXyYRBfT/VPpyk6Uk291ArjJ/+Opg+93SqDLY/7RQi1R3EaZu1PzL/rhPfRKw303InC8vOHL9xX/PYAm5xZ9rmpNPtxCvTeuM18JXXJUK2NmD/05knj3rR6anHZ8iWTr9Tda0TuxC5L1N0xX6TUFaHvQ46SgYIGXhVfVfceOvd6DNgiNAQgXcsXotIcSWisSp8+fUcGfAE2iWxKkya9BfZgfHNDwsEqB96xLGyO4w7xPY75GgB0p/N/Bb1nXqK+qC6oavQPv1pUfxLuyLGvXzKNZLv3Q8+jK49++HPF16BSEkl5s1fIkxqt8HDUBKvQriszgg14HlPR590Kr7/aRFOOW2MxLvvwWkoq2zA5D9fgar67glapfO4ESPPEjD04LTpcjQCxy3bV9GBcuEfq+mEzr0H4cHHn8f9jz2L+6c9I2cF3T9tOrIF7r6kkzmu0lkW+QED782BfcBAOiQNqX5oj2hNRzEK5nMd2O/6PtuVtMDdVfRjJf1u0LamgWxpaS/EOkhZpjGRTNt4bLMcBroS4FjMFtf6Tn0T8+Q47Jgsl2Ubx5kwjq4Py9H0zqtOT1BizTv771rxL8Y6cwwn2qoZvIv9VVzTCZSWhMtrBNRzfHACKi2vEXqNxKpAfzGnjj1HeD+nR6p7qXqNVTdgzsKlQqd//+AzkRTEyutwzgWXyVb3nrvtLvME/fbMWbhMxgmd9ZVWN4g6LFJWK3l07jUAr/7tA5ksWY4vWgF/cRVuuethSbNgySo0dOsj4ItpmB+30xOk9N99OPwlFUKrwXg1QhW1CJbXYv6vK0VKUVHbUcYG7SNvf/BRnH/ZVXhmxmt45c13ECurxdXX3Czj9N77H8Mbf/sIJ4weK3MV5yuOzUh5vXwf/RyVVtYhwr4srRRTDvaPph11bS99TiepXISES2sRiTMNN080yC5W/YySKW555xzKTQ46vYwhjuN4Nc67dIp8/2PPzpD2jpbVolPP/lizCfjy+58QilUhWFIJbySOlRu2ST0PPmoE5i1ajuoO3UQYwbjnX3QlSspr8cQzMzDj1bewerWS+kyaNBmfff4l6jp3x5PPz8DosybIocunnDlBjjz58YcF0jb77n8olq7ZjIr23aTtudOLJgEXX3GtzOX8VvlezuvldcKTFi5ZjXhVg9BEZV1n/LJ8rfDkHv2HSD+G41Wy1Z+L0B/m/owefQbisekviJaD5+Dtc/BRKC6rxZJVG4RvDxt+EFav24x4Jef/SoTLq6X9rr35TqGbE0eORShWJ84tYzVdhA9qniV8q4lxsYsyXlSGjfqeCFGjRDJX/ZtebBcuXy8DolvfQWLVX+gO49Z7HpUGI6OlgWeBi7sNgqjqoAy/vvvpV3HoRO+5dIL39IzXceLIM/D17Dm4iPYcNjeeef5l+eALLrwcs+fMx+77HAibPyq7a/JcYTn3ho3/8SdfykqFumItEUpelQ2St7gc7mgc+Z6gEULii4J+jqyBbsXFlwSlX+6QOE2k8aZui1yulHJwJUepEp0u5pLGGoeO4rTjRIVmDQM71itDYLm0aaLRJxkqt+da46bkJQZ7Ol8lkaGTOU46iVWlOU6GeymDRpQ+5YFaJBGuUKpkyLTC4XtrYF05YbEd+C5Rb8PINvE7zbfTwJYTFlULvNdtKfUwVDkPPvWiMBF6ev55yWpU1nVCZW0HsWnbsBno0Wsg/MFSfPDRTFx86V+xZt02rFi9CTfceAceevgpREurZXWzasMWuH1hsYGzOf24/b5HwcXSMcePxOwfFmDg7vvICow68nc++FR09zRwpM3c51/NxtrNW1Fe2x4//7ocDV17i5Rq/LkXCtMaccppYm9H/x7d+g7F1Tfchs49+mPFui0IlVSgvlN3iffL0tUYcdLp+OfM71FEZ5GG9Icr1lfeeF8YT/c+g5BHeyDDUJm0ods2VNYeDn9MbJM4Nun5lW7zMwXag3BM8D3Va5QMUUrMY0h4+jftBtSOE30cipY+sJ9ZbliM63k4JH0U6f7R9TH/znRPI2imt9JNLr+1AXUucXUcrY6nR3Y1HlJ5YqZ66uekZ6bl5KjzbM6VY4njQcawMSZySS/jVtTGdcqrcpqxli0fJZGqFzD08ZffymTFRQH/vvzmB1D1RakQQ3FFnbh8IBhaumYTTh9/rgCdPoOGyZxAW6uV67Zi6aqN+PqH+SjmTmSbB/2G7o1PvvxOJPMEQgQwlMYvWbNJtsSvWL9NSe0BceRHCYTeKFDkjeLmux6U+qzeBAFU835ZIeXTg/vgvQ9Qx2+4ualA8X/xKO304XTDn9e+Bx1u+DLyYtn6zfKNvyxfIxIVbtXu03+ojDPOL7fd/bCMD6paKCnj2OzRfw/RSow67SyE6XSRGwpoj8oyhf+QB6mg6Z90waN/KEGl5JB8isdx8Dwx9SykHLcac63Mt5r3GVdKhj6Y+bXwsdouvURK6wyU4OCjThCQc+0td4G/ebIDD079YcGvYiPJs8Io0aX07oLLrlYqJEqXNkNsq/yhUqxcuUba9Kef5sPl8aL3gEFSDlWSV984FYWekNiL3XnH/WJbRIC8xz6HwOYtFvcFdNVBqfVb736MNkW+xPFGbDfSCp0sr96wHXN/XoqbbrsX8xYuE6PqAUOHS73oG8rmCWLDlh0iYeKhveMmnCt9Qy3OmLP/jGCsEu2KPHiDbgEMjVRFbYP0ZYGHjiyVRO2dD/8J8nR/uAIFzgjynBGEyxvEp2BSot14DrKOi110J5qvZHwMzEjvJhOxuDOIbv2HSqP9unojzr3oCvxj5rfyu/fAvWD3lSgi5vkzjiDoJfOG2+83wNNQtC70wl9SJp3DAff1V9+hqMgNu9uPAw87WgiSjX4GB1lJBYrk+I6QMOBJF1whjd+txwDRcVLEpvWLiavhsdNDA+pIHHniIpzqPu7saGw8bX6mXauTAdNRHIk6l8BJWwCHK5QAQ7mkM8dhHpw0CGgIqsyidNVhmeuyq80nYIhAin1ozpf35rzS3VNMTqkSB2+69+mfKSDFrcRczchxGkxPj8xG4E6zbIF15UqY38dvNtdbgJCFyZjpk+8VGKI7/mTb6O8n3X7+3Xyhu9WbIZIWEeu7g3j7vU8EdHPlSwBz6ZXXweEJy2DbtA14+f/eFtUZJUBktnc9OE2Yps0VkOvVN94uzJyc5KyJ52OXNkWYfNEVQruXXHkdGI9q4iKnH19+N0firtywGb0H7i6TCRkXV7Yc3E89N0OMo+kA8qN/qt0qZDBUOVP37fAGMXfBL0L36zYB8cqOKORCg1vr3WE5tJUT0+wffzaAjU/GCt8l2sUYww5/ifghkm3GTrUAIOBJF5QxqwJECTBkMG3lbJVjSR2pI/Sf6APFcKSvXFxYEAylnk1GekrULZEu2Yf6HdO11It0SzxQk/6pKuIZgZRW6nrkfKXDzIACQ9YxYx3DamddcgJle3Es6fFgHTcptC9AKdlejEu7IY4HfoO1vqlpU8uUbzbK5WStDuAslS3qXi7OeCq8m7Y/ik6ohvIVl8up5JQK2X1RlFS2Fxqi2qqspqMc2OnmQasEAHav7DrjxMp0TKMDJRlMy0mVge8ZHDyPUmxDlMqldaEHhZ6IlNlr4B4CcE4+fRy69x8CVzAm+evdRVQvM8jORnpeDhTjxwVLMPvHBbIlm5MntQ19h3Ku4o4wn9Td5gkjXt0Bscp6WbTT6zTNMqjy4dikKcXiJavhcAcVGArSmzvLyYWW1W4m9gv9bhEMaeBtVjXzPrXvlPdqOoLlSQzcZcw2cwZLxPcetTK0r6LHbWkvpz9xeGqbInfKuK7u0F0keZGyGrQt8ogDQ3+0FNGKGtCovcDlR5EvCoLQirrOonJk/1G9FQ7Fscew/eD2R+HwFSvgw51h7iDGjJ8sc/+eBxyhFkueiABJGtzTfIZHdvToN0Sk6WLTRRWfKyBAizsSqR4NRMtFekSgRj7MeJ5omQBcSs3o7Zv903vgHiIFo4qQhtlUc1LYsf/hx0odTh41BoU89sROtyMRRCs7CQDVWEbwTLY5xR3OsLXeWI1T6kDGwIFCfagwPsNQqriqATWde4mujy68CUjUapE2DcqOh+orginuCHjh9XcFPRI5UixYTMIjcqbRnjHYytt3EQBFvaGfJ3wHY8Z75Q793Y9ngR2tgJAypNNGcbxqfT719jzLhYg8Ecw7z4xvSIAo/jaIkVIhgiErQ7P+pi2AgCCCF7sP9BHDE5Q9JcrPkDV+5t9q+6WW7ljBUOZ0CjTxm22UDMWqBbzqNtDXbOkZhyt9HlzKwagGaSoYy5Rep6X4UZWV+xZG5imARg6IZXnNS8v4ZP5cRXOisdaRkwPrtN+RI+Cl3yceb2E4FMt3hTB030Ow+76HgqtObn8mDXboOQCVHboLM0w6H9M07ZdVIleKpGem5zlh9POTlEoqvz/MS/sA4i6sPkP2khVMa5sPkYr2cpYO01x0xfUCcnr2GSSTDZnPocecDG+0QupDHxqUonqLy7D3QUegrLazLDZo3Mm+ouuA40eNEUY9YMjeCZcV2k8H24iTHemJqhgemJw4JDkd/Wd45uPiIEzHamo7f8qYEXcAmemFUlKxGTIxemtfKdpJ1elzbBWF48oOLoue35yWY5F5M21hqBSuWMt8FFFSSp5nraf1t7lsmciMsUT1Id9Z42f7zboTiBEMZYuX7R1VKwQ+2eKke6dAWIOSsHJcGnRAOjbTskgcTTSifqvxYH2n85CxlYHvkr55fhQlIqnpk8dm6Hx4tcahZ3ibv1jV1+Dzuk/0dzJNXZc+svAYefpEOcqG5/pFyusS0h+OVXM5+p55Mf2U628H/enYXGHZos/FgeyuNICXLivr1VjsaTCkwTHT6DqnoxnzO/qvo4SWddJBtwvbku2oeVzyufo22uV4eFan0Y6y8LeUzXcEfwzS90a7uEJxBDi32P3C11QebBvSih8vvfYO5ixcAQJI4RFGesbjM6rOWJ7uZ113fWV5Dz3xPN7+aCaoZZJ4riDsdHFRpvqJkmziBnPgM1ewFL+u2Cges/PoSFL8rynhhHK9oniCbsesfZTJz5BGqGSoZAx6kKnnuoAgKJWQjiUjEqO/pGU4BzhtF3jC7+77Hy62RnT1zcmIB2TqirGibDgGTRBMy8ndHiiVuG+9/08sXrFRpDvcCq99FKVek2X7eKhlDoBG10GI0mDYYkDdRFoyXIYCbxQNPXfDSWdMEBGtMxyDt6Q88R3m/LPfc1WpVF3NBUNsN4Ihgkfdjvqq2zNT2XxPxEymRNUHAYo5brIu7HPV7/o90xJIEQypvkt9r+NlupKWWsr8WS+CIQWEMpdL+uQ3KaCr4jENy5Vnlm/Sz0h/Op21/gQW9J7OBYLayZOk5URcoWU1JvQz+sqi7Rt3SWhG8NEnX2Dml9+LcSEdhQm4Yn0TY8EnPnto2Ovj7hmfYv6cfN2RuEidbrj1Htkto7aaqt2UahdKKhiixEPXpTnXxHEchsq0uWl5zh/5R5KnKONgnY9moPxm/YyG+ARDniZOnicNJmld9S/T/jvAkC5XXzkRkfm3BAzxu7VkSLdBc6+/FQxJ/1jGQ1N14EYGSoZFItXMtBxjMg65kLbwnabK5VxDWyyOQ8Y1g4uUtMbkP2Svg7Byww4MP/ho0VhwQcMFPHkex2JKGhOQHTVuMlas2w6XP6Z8FTlDojZ2UE3WUjBEB6aiykzltZnqoJ4H4eIGCppvWPhZpnQ6HtuKCxI5JorzrJHenM48jkjP/C3vmZYbGUqrhddZ+4n8j2r3N979BH9751O4fCUiuSZ/YlxqHWhHJ/yUqj/jufn65oef45OvfpR5iEBIv6PjVNIW+SwDn7O/KBWndNsTKcfMb+bhyWdeRb6d0i3liJbxOFfpo57M39nUfVo/Q0nGZQZDSgTeVIbJ96bTsp1BHH/qOKzYyF1o+8jpsol4ptVGohM0GAqW4vb7p+GHBcsEFSalQOp05H8/GNITKt2VD8Y/Zil9Lo2+uH2QHlApviRBJb5PE1bW6x8LhsgcCIoUuKU0QQNL/b0KbZsBEfvqt4AhMlAanDevnVS7ajDEtEnAlqnNk8yOcQWEEfxZVtFN56PyJxhSIIxl63YyyjZ2AVm/iXnTD5YGQxoAUIz86axv8ejjz4oomIOc/aCYCcebcmRIcMMdTnSERjUVjTq5nXjqXQ+IlJSSUr0NN3lVkwTryImSYEjX21q/bL+13Q8ZGkO2uNZ3BFKNwRDzMOjKWH3ymxh0+v8mMKTUhkFQ8kcwxD6mNJpjKVea4nf/t4Mh3ae6D5u+JsEQx2TT8ZP0wbg0KaAUL5d05MfkcZ/NnicLP7GD1dqMDHy528BheOvjWTLmmF4vYOjI9o8CQ1zgJxcW2cdicg7nqfVJMGR+rtsuIxhypoKhdP1Em6p2Nj+uvf4OTLnmVpHOiKSU4MSrbEpJG7pc65W7knUwv9NgSD8TMCSAR4GhJ57/P/C4LfYNQZRIlAzgxPL+I8EQxfOakOq798fM7xaI2D1dRyTBkHKkdMqYSbj3sWcSIjYFhtIBIaUy03n+qyVDF19zI154/W3MXbREDOuemM6t1z5Qz67BkPlbdL0yXTk5inSHK2HTpJApvvk5iSFXyZCV6KlyIbKPcKXkikjZZfXdsOeBR6K8vptIKnoMGIY+g4fLBG2eDJnXfwcYSmWiHFRiqGoBQ+Y2zXafCoZS886WLgUMsY9FJE238l7c/8hTcjixZiKKAZCBENAQcJnAEN3o/7gQRxw/0qQu/s8CQ5SaEtBQVUWVM79LAB7BjzuEDr0HoVOfwcLIuGtowB77CZDQ7ZcJDAmYFV9lSpRP2tdB02aukiHrWNDjNVc1mS6Xhund+w5F196DxbaFHsH7Dt2n2eN4JxjKfSyRTpoDhoSuhK/SPtEvO6XFhsQsBbGAIi4c1GInFXTsBEON+4nzO7U9WtKmAYwZDIn00KQu13HEHQzV+bQ5Ndm8WcEQ42tAxPLo8FekQIa0S96L4bnydt8SrcPvLhnSjLCVKyjbrGlUqJChoXowVsma8WW68mwhMiZhooYrdc2wMqXRz1sChnQduZJl0Hk1vhqTlNOH8y+5UvTR06Y/K0c3eEIxOWtNraKTjLpxHkmCIlP9+Msf8M7HXyBIMETVo2VgNpWeqkhu5SSR8Dt0fGH4hlG59rGhmXhbB8WOUeS7YwiXdxRC5hbqux55Eis2bMOLr/4Nx598uuzEoh8P2ncl06pv+6PAEL+R0g79nc25ctAo8XzzV6MsxwyGmlMu41K1l1zNGv6CqIs3Vp6N+51qaAMMldXI7j0NCNgX1O3rPrHWhcycq0gtGaJKgd9ujdfU78aSoSR9WdPqsU9AwuCOVcEuagFK0RRge/Ll1/HZ7B/Fqzu3ZX/x3U+yA6ZDrwFobfcm6sfVrN5dqcoJorWdbRbChVdej3c+ngkn7baMCY38Rpf7b1GTGePq8WdfxuJla+Ucp7LqDrINmDumqD6WsZjL4obAmP5+WnAch+6DP0JNRlqkKkNJHDPTha5j6lVJhjLZ/aXGTfJL/ZyuHPINNZl+lvmarBvrzDFslQxnTptadoAmGKF4AuDnlM6QSIvN0G9VkyUAQ/Kb0tWBY12P93SSIT1WeOW4TZcHn6nNCFUJKbjOM1N8PtdlNwWGCIQkPwMM6XuCooJgTPAD7xPBBKYIhFRdggkQxWfK5iiE4qoOApZUnMzfZ/6OfwsY4mBRIXdmrMFQ4oOzdJj5g3j/rwVD1Kkqe5Dx518sluzTpj8nYMhNm6HiSqUjNdkzWOtn/s3JjDuCeOJuOFYjE1zjSTFzZzK9PViKQGkt2rjC2NUVxp9cUfzJFUEr6sS1BMLwGm3Om1tDuUU0VEmG5kdruwdt7V7cdf9jWLV2C95571N4/MXiykAfTKgnX153gqHM/WLuY32fCoaSQIZtqeNoJqUYVHowpONmu/67wZCSzND5qB8amFAyVBiKoZXLj12dPglt3AH0GjxMPPV+MXsuhuy5v3iq1ccs6G9KB4baOMJo5wjjjb//Q4zPvaF4i8GQGbjpyYBjg3ZZVKmocZM0KiXoMge6H5DztGxe7LnPweKy4aOPv0C/gcPgpc2hIf3LSdL7PwyGOGEqMJUcA5oGsl2bB4ZMeRtgSNvBZisj3budYKjpOZxzNkNTYEjHS3dNJxlKF0/jCr4zgyFuCkoCJlP/Z8ER/zYwlBNTMFX0PxkMcZCQ+U/4HcHQ/MWrECqpbjYYojSIDJyW9/322A8XXHUTrp56Hzr1GSq7+miB/+qb74k6j340tvFsGCPwbJod9GWwA3K4boHhFv/I408RiRdd4PO4FW2rxcliJxhK2gylY5bZnv2RYIg2Qy2ZAHKXDBkrVePQxp4D98Ski67CpAuvgD1QjDY880l2fPrAA3/puPXhx58VdwU8AqGtAdZ1+6UDQ21dPJgzjDff+VhcGbQMDKl6UjXNTRrHjx6LyZdcjQF77C+uB+hHhUe18CiCpSvWZgx0f0DP+224VdkflfEy9bb7xD8Vx5x50aG/KeN1JxhKLAYytpFpbmAcqsnoIynX+OZ4lAy1ZCwwjz8SDP2eNkN60fV7SoasYOXfA4YU8MoGhnIF202CoaT4VUl3zESV7l6vuMxqMqmMafWbLp312W8FQ7S8t+aZ/bf6vqbVZAplChj68yWGZOj5RpIhM3DIVi7BBX3OCBhqrmSIbWqj6/khmPndfCxctgZHHHsC5i/8RSYLdUJxAPWde2HwsP0waNi+cq7bkGH7gGH3PffDHnvth4MOPUomJbo4oM+Os8+/WBz5HX3syeJzR9zc64P8TKoZkQzFawUgKclAbgic7fFbDKh/i5qMqwWqyVoqJv/91GQmYGli9JpJKWmFNqD2w29Sk2WjJ/2OY86sJuNKq3kTgPL/RNE+d5Swf1Ufp4rnE3RuV5KTHv13x5ffLcD3c3/B6WMnY8XqzVi+YgNsPDiSJ467/CipaRAw9OyM18UtP0++1mpcXX/ugLGqyRQYCgoYop8yX1BJhjiGzPSnRfs6L31lXbl4oN+aex58Aus3AldNuRnTn3lVFgtdew0S55Q9+++B/Q88EgceeHjacMBBRyVO4+bYCJdWyxh+5vlXxWMzx1FLwFAuW/r1t1ivST6d+xhkHtpWSSYyk4rdmn+636JyEp9MpIlUukgXP/WZUpO1RDLEvuZuspaCIbUDrvluCFj/PxoMKbUiAUD29tbAhHX+bWqyuPhHUvZTSgWW2o9JetNl6usfCYZo8qElQ7o+meqtn+9CFYs1OENx0YvSMJdbRWn3UxSIgQyVTqOs8c2/+b6IccJlsi2foITMiR3SVFqdj5RbWgNXtAKOcFkiMA/mnQwxqRe3pTPwOcsgEyUTZ31TQzJtok6slykwLbcSyvcyz3SBdQjEcO7FVwoYmv7cDPiCxQjFKhEorZatjOY81T3LVvUpDETBUBSbiic5AAAgAElEQVSIwm6sKum6PBKvhoMeRU31tgVLYA8WJ4LKg+1ZKn1El+X/r73rAJOqyLoG0gCTe3p6uicHGBiiKCiIAcEFE6KAYkR0VVxMa15X/Y1rABUw5zWgqCAiRlARDIsuZkUUWSMoCIJKVDz/d26926/69XvdPYOr7i7zfTX1+r3KdevWqVtV91IB5hfLvxN164WRUixdtkKYO1es+cXlyCuuFEfdDWxbbee8SAVCsWoxAUEFZW0jZejSZze8+s5CrP7+R9x8y92IxWrQrmE7MKzG1zR4OJZMmH3dlorWIhVx5/aR2+b2O7YJNf2afvAPY4e3n5kfGRrT4KTpbWs7rPeZ9MEbPzxnlR+tlsP82h70+T2V4+FrNz9vuQPoxaEhDlCeN1DnLZv/b5Mm6ZIrQzfvxHonx62QtuV7thX7KjmMt/yJv9k3VHwo+oKi1QKKmL+mw3ZiXdhu1Edy4CGj5Arz/Hc+FEVqNMtAFfzUTFxQHEN+pAz50UrMmf8Onnx+Lpas/A4FxeVo36kHsoXGy+NjhOOXqjlIa3EnNFiGF158TbTillfWS/zccJlInzhWqAOGStuo4iI3UpXgSL/h0lp8uPhLUbg5aL9hYmfq6gk3y+Kh/6D9pXxl7TrJWSdta4458kDhg+SBJWY85ZdUSr40HfD+R5/i40+XIFpei/puvaRN7PCalt1+2o70ySepvdrvu/I3w+MMD7ZplOWj1JGg1S++nY/yIOUj5K8EYSYM62b4dGIcQxc8i9XacfzOMpQ6NN2mqMyhTYb18t1EPq314VhivVluiePHa/3e8YxkdUcTV+nRL1zAO26xsfzaXqwH89dypfI5H9JGp4xhpw1Shec3ps08OK9wDAsthcvN2LH4sc2H+KztSHrjGCQAZHzyP/I9b3j7t9aH9WQf82anS4/uGNZ+ln5P4qWVKCirEymc9zvbTp33m/42NN0h3sdSbodGNYztm+/Vjn3QWplb7O/pnokXlKdzp0TGiGAPM1+afja0rPVWn2lvxdteXieTXrgcJHAeBGSHEoVL55A5+cThO8ajOJ5AgQ1PoqN+BDNhmQIxTDrHtKisiQxcJ0v6poMNU2L6PIxKJ8RFwBapkIYkA48TncPENEyQL3WLVMjhaUqHgsLxvYQNl4sGbp73IRgqpAVlgqGK9uZ7EkNwAEy0AvnRChREy8SFomXCiL/8egWq2nVEiOeOLE2tDJsbKY87tx0qUNe1F75dt0k0JQ8cPBxF0XIcM/pEUbO+4KPFyC8uE3Al/en0jd1muWTqJRWybXHE8Sdhxqw5+PSrlait74rHn3wOK1eswfvvLxYTEQRpVDQY7zth4LWgHgm2lxcMtUmqv5dBcpJuMCDWoRdtc+2LIJ+6oHgtX5mCtomG13R8/XC5rO6oSJDMw0vLvnEsGuLEoQCddK20ni4ev3PiYL50VKDmLbeWP9E3dGPa2QCazOIpGKqQxQHbLDFdb38k/2aZOZaoo8TO066r0EO4Atv33gPfrwdWr/0Z9Q09EIlVY9y1NwoQuvO+ySiMlGHq40/jzvsfxtgbbsXQQ0fKuZ9JD0zDZZdfizzylzAdx3W5jH0qmnP7h4CnFLnhGOa8OE9ovJymQgj4i2LILy5FXqQMhRw/pVVi+8iNa3gTwdH7iz4Xm0rXTLwJOYXFaNfQFd+v+wnrfgQqaGevrAaxugazCFIadsYO6802ZFvUdN4edz88HbPnvYEDRhyJE045A9RgftMd9+CM8y4RAKeMlnHsNtMxqD7piGNIAY0dNtWz6U934cl+ShWe3xJooNjwS9IW68QJgWUOSiOruBzqGIZxuLDIK6mWfuMcYepEHp0ILhLyddrDLGgMqPK2UVAZzHvqoGkQgKB0nTq8O2cwHBdhrKeZXB3Q4eFBQenxWn4+AQnBVHGl0Kr2Y7Bv8jeqJgiGDD348R+bZrUN2c4EQ2ZecxYlPrwrMS77p0p4FedRbiuSb2mdg+qX+J7zYY2AMPaPoZHkeVjf2z6BEuMyX/Jp094KRJJ5jdIH49CRlgn+EoQROh4DfPJVBamkS7s8fs+ap/pb2WdA4ldgna0QipRFqZ6jNE3ESdY2iR03/myd8ubKjo1LMZWeGYqHk3SMNl07X9HbwdtClfVoEzY3YILEXJKus8VAka1eheQ5B4IhFX9l6jN+JttkFFFSDH6as002ecp0sUlVXFotyg+DROTnXzlebs7w7I7a/xGu7vnHb+pGHjsGPFyqSr6ow0TrfefkR0UD8YyZc1BS2R4XXHK5rHhff+s9tM0LIS9catTKO5o9eU5C24JpUJkfJ6DSdg3440mnY9XaTejZp5/kt+sf9hWpF1fP3NrgeQ/pJypnZN9RqVZRqViPFxF5fgRb54bjTvMJ8rlNptcftT4aln2pz34+w5Oh0RQBNffyt+384ug7lpsSGirMVLqzfQ0X5FOzKQeo5me2BzLbJmC+onSRbe/QK2lOncnT9K+mr9tdlAxx0DI/Q+dsIzest7wan2HMSljLbMpq0tBye313LFFCQ2bK9EzeJl/Jm1dq8yLYqnk2FixeIlR8610PoCBchutuvAM0wPj83Hlok1eEltkFspX0zLOzxW5bQaRCbBXNnP0KaE1btMrnW5rgYzXo039vUSypdok4bnjeTf945o2/9Swcz8M9Net5FJTEUFzhXunX/t15gKHpFavWoE1uCF167IhVazYIGOrcfUdjgytEu0a8CRZMg1R50Lv/QKwBcOXEG9C2MIzS6vYijZ0642nwsoFc3Y7f/EmkT+0b2yctG71bbp+a7+ybVGUxesGMXbOYT9jEvnX73VXAZ25X+mtzt/Pm+N4qp0ic0iDt3pEGWF9DI/5lZXgqaVTH8c+FBbfpmIdsw3rGsbYPv2s8+kyLkiHdMjJlTKwn6VOdpqM+60u+YeYM//La9bafqdyWUmWdx+xv3mfmx3rqe85LnJS1HKRLTcfP13gMz7iUfDA9bndzjvGLo+9k2zjXtBsBI8GF5pvsB9MYwRN5AOOYbXbTrnYa2s7q6zfO/ewn9rG+S+UzffYpxwJ5HeNqmpn4pEO2KXk7FQmTTjJ1TH8rNqq/M4yO1oxZOHYMbVAR1PiHN+91n46J22BIAIQDIgLjcxKk2QTuCVcapVp247EMduWUWBQI6SRKwiGo0e+Z+JpPY8EQGfX9D01DVtt8hEurQPMFrJ9fnp133A2Xjr8RV42biCvHTsBV4yZg7LgJAnx+WLMBE6+7CX+74mpcMXaCfL/simvQafveDhAh8zKK9FhW3phbC8jqetW6TQKKXv7nW+jesw9atc1HVnYBWuWE5EC2DhC2q9aTPoFVm6IYQuW1YpZi69b5ojp965yQ3ACiwUNeid62gPk6daJpC9IMD33SZlBlvWga5S0iBUNyKM+rkNDTJqQTBUPab35t5veOZTdgyOz7Nya+giFqR9VJ0vb98rPfkYHbV3ptevQ+G5p3J0KCIUpbZaIUxu/2hxnsCkLY1uYb8yaTYDsbLdKGcdn9mOqZ6ZCpkLm44TQNzcP726Vfs7BQ+2KGEdr1JB3Udt5B6JC23j5b+i3WbADefncRaGokv6hUzgoR7LQJ0ZRACM3zi7C1c7tsm7aFolne6FRyr+XmRqvQpVdfPPjok3hw2uOY/MjjePCRGXho6mNYsnSZ4KHpjz2OKY9Mx4PT6B7DlGkzcPZ5FyK/pBThCmOGpRnpkHwrJ4zvfoQAMlq+XrPhZ3z97Q849cxzRYM3jUtSiy0VW/I2mfIxb5uxP/hOGS/HULM8Mt0iZIVjKKppj2YZ2axy6YLp8XxFjDdgrMnTprugZ9IY+TMBr5fe3LK7eZl0lO5MXAOkOMEaOgjKi+Nax7jSq4IhjiHmFxSX7xPoxrFuEAdDAUDITlPjcw4iTVNdBMGRXz3td94yKRgy71PX2Ru3MWAoHleu1kdkq8oFQ0bVTOBcaPFL1kXnNNKH1C1w3nbm5/gN7ohsZ9lgSPsuXj4rL+87SngIhhiH7Z/43fAD8id1drur4WIFrfa3oGdNp0lgiGMhv0SklBxLpp6mjJnU2fcANSvMgUWfzN80AAdQYwjHgCGKDkm8jRnkzEeVarHRMqmIlpEdpttk5p3L2DP53SgwpLfJHn5UrI8XcXurlOY40rWT0R1DZXotc0MykSz5aiUo9pfbW0J0DOPaW9Oy6wH17n32EMZOC+elVfWI0VwDGXpuEVrnhpCVG5Krv7YuGj+QRmbEPibzF6Ab0Md6/ThejtywoHiKq1lfLZceANZwQb6CIfZ1Y8CMSc/oGWocPRo6ID1RukMgF1S2VO/d1Wy6Pk6mOzJhStMyHQt223Cyk7GUlrbcfDUf9pEBQ40vs2xz09iqgLNEHkB6IigYcsgoAePvffg5wtEaMaSc1aYQheFyFEaoC8hINgU0iN4kL1N1y6x0JivScqqp4BhwJJNynb0Iz819RbaWQ5FyZNHkinPjkVf4CfzzIlUIlbcXwG5oxNSbN9hodLN3335olV0gBi9pPVskr6K3KYI2IV6t75TBGHbLbNqBtpiisuVMBZupaMj7jTRJMESA7/2W2e+mjwcCKdKlTWtBeerYpq88TsEQ6SAont97jnkBJaFSn0k2dfsxb44HjiW/tNO943akGf+J9JwuHr//EmAok3y8YYw5DiMZ8n4L/m36hNtUBFPB4TztbUnmuYWr5ji88YMAjb7nopH9JADOUbjKNPR7Kl/BUKowQd8IqAxNu4sAhvWW3/v7vwwMmdXHrwKGcs02GSVDD1hgqCBmJg5vQwf9JlhhGl9+9a1zLdcge7/wNuDgNXqeV1r06VcIx6pRTuODuWGxNNxjp13RhsbxOAlZ25p+YIionRM8iaUxoISgk3H/18AQGamudPz6KNU7Zf4KUlKF5Tcb7G0OGGKZNw8MObfJnNWmllvB0CnnXCggY+bseWiVXYTs/AiyqRK/tBZ9++0ltoSMBCW1gjdNlz7BECcA9x23pmkWphiz5s6TMVMYqQTNATCMTtQcIzkOGOJ7Gwyt+RlY8yOQV0iL6oVoKZLPkNzU4/mEZjnFaBMqR7Smc3yyd/P3TBgW4JF2sAxTNhYYcOz96mDImfAICkiXpOl0i05t463VDA4BDbdBHBMyQZOT73vHNlnjFyVmgqdkvKlgyF7QsGyJfcz0XUf6MWHMO+5Y8IKRStu9cU14U0bzzTwzDZ4ZomQoMU4wXdnhNgcM8axQU8EQ424OGOK4Mv1vtM+zTuZ3IlDxvtsChhwGQ4JqmmTo1wVD42+5S5jy408/K2eGQtFK/FpgqEOPPnI9mYDoiKNHi+I6gqILLrkKH3/2lRxIbVPgGK9zANEWMGQG4uZIhjYXDDVGMkSmbCZz6lVpumTo3wqGcoux7/AjBAwtW7UBHbr0RF4ohpLSGky84Xa89sYCsVektKeSH5vR+z2nAkPPvviqLARUzxDj2wsFHqKkDiHDeHViKxZFj+s2AeecdzFatC0QiVC/vYdg0ZIV6NVvEJrnUDK0eWCIZx21rn718nvHieDXAkMsm3ERkZyZxVAn51yHoTeV+njLqmCIPqXI/C5nO50tZ9bDGyfoNxcEutUd35IkvadwCtaMZMiAoVThvd+0LDwwTolYfNvNkWolbgPZgMYFLMlgyA5nPzOO+c102TaNBUNuXxXLrS7uWmS6kDJ1Nfn7gSE7bW0X9e1FMc8bbRYYqnYkQ3LmyNCHF/j4/f7dgiESlc2YtdFS+9Y2maNXJnV4l+B+72CoW589MPb62+OHO38CcMVV43HwoaPkqjoJLZO6MhwlN4y/5OtViFW0Q0vnTI5ffDJ8dWRGdz0wTc4bUW/i0mWr5azG9KdmI8rzVuwzUWanzM9f4qTMUBiZJR71y99+xwHDuNy7Z3/Zk5EwyzRt8ItskznMxi5XumfW87cDQ50buU1G5mEYmiwOxPyA+Z2unvwuW5/OVjdvpkgfp+kXb7pckZKZysTikQwxLGk4P1aNdxd9jrWbgNXrNuGfb76P73/4EdNnzERZVce4IlGGJZ148/D7nQoMPffSfLmlVkizCJxonK20bXl2Jy8s1+JDcs4hUTJ09oVXyAKCktgFH3+JhYuX4JPPv8GuA/ZF8xyeg+OZIX8wpONOfbvMrBclBWK1/ncChthfRkJlzjO1LipFdace2HnAPjhmzGk47uQzcf5lYzHuhlvx98lTcPt9k3HPQ4/g/inTcf/U6Xhg6mN44JEZcTd52gw8PnM2psx4Gg888hgmy7fH8OgTM/GQnOl6DJPp+zie+aJjOHVMb8as2Xhw+hN46LEn8eCjJozG1zgMp+6h6U9g1txXpIyPPjkLD2s8J67G8fqMz7qwXpOmTAfjsg6THn4U9z08Dfc8+AjueXAq7p48BX9/4GHcdf9DuHPSg+Ju+ft9uPmue3HTnffITcFb7rwXN932d1xz3c24euJNuHL89bhqwg24bNx4XDp2PC66YhwuvHwc/nrx33DuRZfhnAsuwZnnXYg/n32ePJ9+7gUYfcoZOO6k03DMn07GUcePweHHHI9Djvojhh1+FAYfdCj2GXowBg4ein6D9sfOe+yFnn37Y6fdB6J7n36o6rQdSmo7Ir+0Bq2LYg7tF6EZz3bSnA2PWDhO6DLfWK0vKK01ktVcI2FtxcPGcTNA5kKQ8hoXFEbkIhLBkIBfmR+U/xDgOeDZ45Pv0PFMFyVDsk0WB0NG0uYHgPhO4/KSCi9vBYVL9T5omywIHOtYzmibjBmbhtKGyIShKRiibTKzItdM0/m/bzAUQWXDdth5z73Ru99A9NljEPr2G4Rd+g1C710GIJ/K7ZxD5mnr6YAhnmP4fOlKOfujtpbSxaXSQIqn++81BKef838Yc+rZ6Lx97ziBqxiXAyJVWjbhpArn942rK66ySB+/BRjyK1O6dyzrbwWGeFuI++iZru7YN1ofgiFzcDvzMWjGLe1HqdX6zFfumq+AIWfrV5ifB4CSvqhdmuofjjrhFFw8djxOPP1sdN1+J0TKasQ0RXNeinCkk5puOj8ZDBWjmXOe7Zk5r2E9dRdFKx1ljcZ4M8/g0WVT11CF2WJjmZXJ027R7nsdgHMvvgoXXHYNhh1yNPJCZWiTH0ULsXsWDQRDtlTEC/Z/l2CIB5Jzwjjh9L/gn+9+iNUbgYWfLMHMufPw6FPP4dJxE3HyWefhmD+dguNOOhUHHXEUho44DAcMPwRDho3A/kMPxuADD8J+BwyPu4MOPRL7DxuBfYYMw74HDMN+Bw7HiMOPkvAM6w2vcQdbacg7hh16MEaMPBr7HDhcnIZN5bNMR/1xtJTnkCNGYcjwEdjvgGHYd8gw7DNkqK/jt8FDD5L6MD7dEaOOlboyPut74EF0h4obdsjhOOiwI3HwYSOlboeNPAaHH/VHHHH0sTjymONwwkl/xvEnnoqjR4/BMaPH4I8nnozjTzkNo0+lOx0nnn4WTjzjbJxy9rn481/+ijPPvxBnXXAR/nLhJbj4qqvFXXTFWFx+7URcNfEGjLv+Jky89Q7ccPtduO3e+3HPg1Mwaco0TH3sSTw+83nMfOElvPDya5j/9gK8u/ATLFz8JT7/6lus+G491mwE1v0EfLN6DT5bugwfLP4E899+F3P+8SpmPDMLk6dOw+1334uJN9+Gy8dNwKln/hWjjhuDwcMOQ59+A9Ghey9RJdGKY4QH0WVsu+OFY5S3skOVPG9kQLUZt855Vl0cER84jmBGL6Twsgj1MrlgyNxYJF8KchqXusV4Hi0oXKr39pyWKpyXB20BQx6wkMkBap34qWV7WxHr8hZcCHnRSuTHeL4iNQCxO4EEWNVhO9R33RGRSmpzVpSeOg1F5WT0nDi4jaLIlyoRMgVkNuHY5crk2b3FsgUMZdJe3BZozDbZ7w4MxZmlS5ukOVqrVpUYPDyskppQeZ3oZFEgRD+TdmIYPzC0DY2x5hejXfed0L13P7kBaTRXZwaGOG41fwUwMk4I1ERlxH8PGGI99xp2OBZ8ugQ77LIHWhfSiCWlX2wr50C62CaMItqui0w6siqnUWxHqiS8iAs7dbqFxcmMz7lG9YpcqxewS77j7xheF9R8NnynsztReuIJPek7LY/wNVM+UQbMhYV+07B+voZxfOoK4vVrO67W0S6/XQY+UzoSrqgXHUMqwRCaIphwHOcDvjO+qbPMEQVRFPJGaKTSkY66cRhXw6sv44oqGvLCyMovRohnd6j/Lj+KVrk8D0qN70VoUxBBKFqF0pp61HXqhs7b74iefXfD7gP3waAhQzH00CNx7Imn4PS/XoALL78K1916h0jaZs/9B9794GOsWL1WANXSFaux+Iuv8fq7C/HkrBdw/5RpuOb6m3D2BRfhyONOQI++uyNcSelSCC14CSI/DNoZ5I1jNbjKM30yjqSfipFVwAsF5no8QQn73Mxb/6FgSA7INlEyJNcfw/99kiEaQN02LyquWZ65ksoJIC9WiYJSbim4TFeZr7/PwcJbXCXIjlSjuKoTmuUq08gkDVOONhGCoQYR8xumbsT2mZRjc8GQHpBTgKgraP/6unUiMzEn/pt+myxdHn7fOSh/K8lQYw9Q/2JgiPq+wo6+r4xp0/QVlb3x3IBMfvEJTfvRiNxb5EbhOgIj0nQEofJ6UDzP1Z4CIr8+8XvnBUNCX/nF2DrfbLUJmImb8UgHhkx5lTZlAnO2+JiOpJXLMv73gKGts4swYPBBeHfxF4jWdBA9Zc3zuPo3jit9tju3uqN1nUTNBsek9rPXVyCjk7X8zjXS/5b5UTl8bhZypAl/Z8dtzomyrjPoiwuI45cWeRzVtnAbxu97unclNZ3Qpqg8bVwFRnZ6BEO5JdVx0OdHu37vKB3hdhPPwvC7uxVlaFNpUuNKnkKTlLREkV9ah7zSdmhZwBt0lBhTypuZM7fJ6gS4CHjJK0IrmmLh7cy8YrTh7auK9mjfuSe2790PO/cbhIH7HYCjjjsBl427FpOnPYZXXn8bX69eg+82bMLny77Bux8uwkuvzcfjz8zChJvuwJhTz8IegwYjVt1BLMcTIGeHjQULjq9WPL8qagHM1nWQxMYrGVLQqX5QPPu9PafZ773P2tbqb2VEWMl7f4riCIbMgarEMJqASii8vnY+zypwwPG7xnF9TdMop9IKMzwlHea2gD1Zanjju+mYBmaedFQh3hSli0wvvWRIwQoJkUQZlRUSmT3PTnA1zJsz3vawf2s5452TF0VWUQWKqjpKW5v3Jg0N6+9zq6wUOZEahCo6oAXF/Q5DS1cGlodpsq2pZ4SrHruMaZ/zIrJtw20yVaqlqyP6/uXlikDr5a5I7X40z6nbj4xYgJSj+yIoL7/33F6kDgq/WyymbA4tOrTkTYP5cj/bTA4KCowvDCxF36tkSONqWwT50pciUo6Irg/q7WB5gsLb7zUP1lcXJUyP/c1vdtigZ/YFGTiZKWmSv12fabAsUbTIi7nOoUHSE+0LUgW+oUkDOuy+ZnuRz5g6JdaLelF4K1Try3gKiHSbSoGMgBmLnqkbheU29TLpKm2KNFdpkHpJRLJFP4qWeTFkhyvNbTKn3bVtNL76+p4+xxrHv2wL8MwQpST63se345r2MPp+uI3K/tLzGUFp2PHZPhx/lO54+5XtfuV1N2PJt2sw+OAjxBguJUQERKK2Iz+MrFAUJTW8EUbJUSTQtSyIoHlBBM0c16IggqzCEpRU1aNNYRRZKeIyXYbXuExnm5wQonVU6WEkiSqtysRvmVeMaG1H0ZGWqsxB3xiXahBEkSwn6hRlZ71sVyTSzio0d9Q5UD8W68Z+aFloxhfHmHHJNM2FhRlHibxD3iUAdAMouahgP2aXUHFiezQvKDULcWtrSreo/HwCzexoFXLLauPtLzQsCxuWwcxnBvCRNgwgVtrLi1WjqKIOrURtRQjZtLRQ3Q51nbujS8/e2HXPveTs0/gbb8VzL87D8lU/4Ie1P2L5N6vwyWdL8P6H/8JTz83F2Ik34/CjTxDdYW3DMZEuZRWyjkVyq5PSJtKCgiFq56a+L1U2LH1EnszxSt86X2Q/c7eCLtbeSDsNj2I/JDsz9rQfItiKYIeToesa5IwBByZX0BUN28nNLoITN4wVvsaEZzp0ZPgcmJw0JI12nZ33jOPNy0pHvjVIHE6wrAzjMz3NV/NQ36THNI3T96X1XWXloPEy9TU+Jw/mm6ljO6mjinnjEttF06av5bHTZ5sxDdabbcc62XGCnmk4NFLTCZUNPaS8Gk7zML7bRtpW9KWetaafmG9iHLtv/J+1f1hf/7h++Zq0KFEibTEey5EYP13dOxraYh8l0K5/Oe20Wday+m4JtGXazPSb3T5+zxUNPQSY2Gm6z4nldt+bcrGNOY5MeyWH9aMb9i/LF2vfVc5nmX7rIM8cJ0GOeTM90lhZh24SjnRl2tqvX5LfMb6OJaZjysd2Ytj0bW3auYsT3pt+cHztD5bV1E/HVGrf7S+TtkknsZ3td2bscPw0gNKCkurO4irqeUg1uHzeujMsFWrSBABNrnAbhr+DHMe5jn3DI00bkT7Y3kwnyPmVi8DP8A3Da+06ltY1oFvvXbFoyTd496NPUV7bIKt3mhyJ8rm2Eyrqu0mZKSFK5Ug/Ca6mIyo7dJc0CKiooDfIubTj9mFZfVeEq+rjcUpqG5CJYz8Lj0yRX1A5+D7WrrPky7pqfkHhWa9oteVqG2SS5nv5Rr4t853bp9q39JUmWV7tI9IP49j95PesY45jv5zt3L4rStoZPl2cZn6Q8c95UeaSTuJzPEk+0n6J5U2iaWveZT9pO8Xpg/igXWeUcr6qqUestgNIa+XtGlBd14CGLj3QpVtP7LDjLthr8DCcc/6leODh6fhg0ady+eGH9Rvx/br1+GbVanzx1dd4+/0FeOyJZ3DjbXfjzPMuwqdfrcC+B47ADn37o6ahO8rqGlBW1wlldV0Cx0vRDQQAAB8xSURBVFUp26euk+CVUtK0Nc78nr113spIgPxQk5HIMIJZcZgwRMCKJv18RWBEayQEY46DaE6lQypZSe1zVekeGPUPq3l5fVfPkH88v3LrO0qUaH5Af2fiU3JGR6mSqi73lsn+7ZcmtwVIrH7f0r0zcTsk9JPmlyouV5hsYxIOy69x1E8Vl98oqSBjZ19rnMb4ZAhKT42Jx7w5gXEFYFbJmefP/EjTKs3y5mskFf4aVRmW4nkts7d90pWFzENE+wHt5U3P/k1QYCRDHEepx6DG0/KQrhhXJbzqaziv77aJ0ZpLW0GsM9Mzfur2NitfozWXtOmml1m5WR5Ko6h121u2TH6T51APTSZhNQy3XprnlCA7XIVYbddG0TTpnytaaq/mWRZd3Qb53vHCMpCWCRjYvro9Iz6/WYdS/eKSZjkOKY2w25rPXFm3KqDUKopDjh4tt+nOvehytM4vRmuOf0pKeZZNTDyk5peatrYZy0XQxnrzWb9n4jMNTsykKT/eky4Njn8dS1qeTH3OS64CU5Vw+pffbns+s3+pukH7Jl2eph5m/uMYptSS44N9li4uJYQ6VnmRgXNi0HztbS9J35H2c07jnEjpj2nr1P3McgnvyIvITVJe3tCyevPh72Y8N+Q4w3PIP2NoW0zbdTR9VIZWeVG0yqP0MSq0llVQIn6bUEzop65LTwwachDGnHYOxt9wq9w6fPnV+Vj82ZdY/+MmUbL6yRdfgfFUr5W3b1TSzDIROPqV1X6ndVJ/K/uj/azMjyvZeOPIISh/orHj8pmD1h8MqVjKiOgS47GTzHcDhlJrGE2M65bLBUN2Xpk9i6G4Rto1MwTbGDCUWBbGpyE7A4YSv2l7pPJdMOQHaILTY/uRoRgmzMGZuFWWKk/GJZBiXIZT8aT6/J4uPtG6ydPtO+1TDua48zBa0iOZocSVgZscX9Px+qTrVGDIlMfeRkpM2wVDPu3qMC/2p9KE3QbpwZBPms54ICPltnG6drXzM4ypROiKAEHBo9+kqe3kMlvDMM2ZIdc2maav4f189g/PRHAsyRab1UcaP53PeIYJB7dJUBqMy63yoO9+7+XwZy7POVRuFhiigWlh0nklcrCch8vjB3WdA7xJ+TtqKggOyDf1fJX4acAQ+5LtnQoMyVYDz4cUEBhF8MKrb2LZ6nXIoWFVgiEqEmXZMhizpCktP+vZVDDEcnP8Cy1a9OFHT37vksFQ5nTigiHGaRwYCpXVibkH2a7x8CW/cmp78RsleByHzFPASrrze/Gr7BHHeGld2gley6B9yTEvtslER5HZkmb+2od+vvINhjN6huqdfE085hHnzQUlYq6JJpvouBUqW4eFJWKQNlLTGa0Ly9AqPyauZX5MgFKLAvIyLmZjcg6qZWEZsrjFRXrnBQrOLRXtBPjwFigBN4GUOXsUTVggxEGRI6hhnSkh0rYI8r11971NppHJ0Ek4/O0STeqGdDPQq/VlIMLlJOR+S0+4BENE75q3X9wWzgEz+nJoU26ElKCoogNyY0avgh0mk+f8GG/A1CGTsHYY5p8frW1SXKaTXVyFSFVDo/Nl3NxINcKVHeXcgzlATYaa3vGcRJuiCpTWdRVCTY7jGgPkuQy7DzhgKF3hJG36KPG7HdbvmXEoNvb7lv6dOTOUPlxymZgvJw4quUtFW0Fp83YEB2fChCU3aZLPxHjT4AAlgGS+Ju/k8rlxEscZtxXNGbpUcRK/GUZsVuDxA9QOgzUMz4A+f+Zo8o+DIedsgR/Ic8ucmD8XUgREQd9TvSfANzaREtMMjuO2FxcWVHAXHDY5TVlV5kbQtoh6hppgjsPSQE3m3Ji8GZZjibzWvy+Sy5uYfkS29QwwCg7LcyACevKMPblm2UUyATXFJhrz5xiwNVAnlim4HBLXsS+YESjw8B7Gd1VNpM7Hr0wEUpmOJTmP5oxv1pfmOGgdXse/X/pJ7xypO+lZNVCnH/+mXmYMF4s2dqN0UaV/Lr0n5edpL+oJo5FXw3cM+EsXh9851o2Etn38wLfyrkTf8BHDU8jbzG+2sVk8mrOsAqBSAEgCOEqPOH7ahsvlJhrjMK+4z+cgJ+c8jaHXXxwMEcT8+8CQt1Pszo2ISJCifTZEMoNgWB5a5CE49yYLDzPzlldhJcEQ7Rq539zbLqnfEQgVCBhKHc4vvfxonQAiv2/p3lE8H6nqZN3KyTz/3EgNwhUEQ7FGx29dWI7Sum4JcbldYMprgSGfa9FE4E0FQyTo/zUwRImUSncMXadi5PZ4oGHKXwYMeW+xBDNFk3+iniFjvy44TmJ9NgcMEdA0yoSAtdoVMOQoXcy0rDrptS2ibTLa20usS6p0NC6VLhIsNw0MlQqgSZVP8LfMwJAb36EtkUiZw9eNXawyLR4U1/o2ts4c/5yw0gE4t8yJ/WHMcZQ1GnQyPYIhvwsUfnlp3yr4+aXAkF9efu8UDHEscLtLQYG+94vjfSdgyGO13hvG77eCIYI4GsRlmEQQlCg1937jgtPMD2auJ43RecPZv0lHAoaKyhw9QyauHSbo2aRvdhT+bWCImbBhTCESmbRfI5p3qSVDTIvpMiwZNAcH8zDMOiLnBXhrJ6nxHKbH8BTJ0Tgjtc7yJLreRODp99wY9TkUYRvLMSz3NiVOPu0Zme/6nt+yY5XILat2woQljAlv4jJM88IIWoRKkFVcijYl5SZOeQ1iHbqgvFN3hGs7oKR9J9T12BGVXXqgdrteaLf9TujQa2c07LQLuvbth2679Ef3Xftj+35/QM/+g9Bn0H7YY8hw9Bqwl7gd99wbdDv9YR9ft+fQEfKeYXbeazD6H3CQpNVj9z2x3W4D0GXn3dG5z26SX33PPqjutoOULVrfBWFqMK2oRetImZS/uK4hvufLum6dG8K2uSFR2iZMgKJkawtI+/w/Ggw11VDrZkiG/pfAEMcytwWaKhnafDBkzHEorabzddL73wBDCiz0Fpue3dP3mflbwFBm7UShAuc7WzKUjh71u4Ke3xMYcsuWGtj8MmAodR42MPq3gyHuCdf36INmOWFHxNpYMETr4EVGOZOjIZa/80qrUNahKzr27It9R4zEXsMPx0FH/wlHnXg6Rp/xV5z7t2tw6fibcdmEWzDhjvtw3Z33455HZuChJ2dh1j/mY/7CxXjv/cVY9PGX+OyzZVj61QosW7kKy1auxMrvvsP3a9fih7Xr4m7N2nVYs2496K9bvwEbf9qEtes3mHfr1ss7vl+3YaO49Rs2gu6nTT/jp58RN3tB0xebfgY2/viTfGea3/+wBqu++x7frlqNZcu/wdfLluOLL5dgxbersPCjRVjwwUK8+977ePPtd/DP19/AvFdfw0uv/ANzXnwJs1+Yg9lz5uL52S9g7osv4dnnZ2PWs89h5qxn8czMWYHulX/MkzBPz5yFWc89jzlzX8QLc+ZKei++9DL4/dV/zsf819+QfN9f8AE+WvQxPvn0MyxZ+hWWr1iJ735Ygw0bf5T6sK2+XLI0Xt65r8zDvQ89hsvH34w/nnwW+u83XG4lEWCy/6hXKasgBp6RIDDlZEKnAyWdzzi/qWRoCxhK01ebJxnaAoYyHwscK2KbTC6rNC6eGWeNlQxpHlvAUDo+pSB5i2TIlQxpm9lAxO/5vwoMbZMTlvMVawF89MU3uPaWu3HS2Rfi0GNOxoGHHYv9DzlG/BGjxmDk6NNw/t/G49xLrsbfrr0FE2+bhHunPoFpTz2HmXNexqtvvYeFiz/H8pXfY/2PwJr1m/DpF1/h9TfexBtvvoWHp0zFTTffgrFXX4OLL70MF/zfhTj1z6fh+NEn4IgjR+KII0fhgGGHYu/9hmHAwMHYZY+90L5bT1R13g6l9bxS2kFWolyNVnbaXkTOPHdgXKUcUKbCK+o9SuWI3nmLhWc7uKXBDtWbR0oEXt/skZbIbTJuK3i/p/8dEX0sTT1AzXpxSyJJipZif1aJ19wm6yp70jzgW9O1J3rtsRd2Hbg/Djz4SJxw4lkYd+3NeOKp2Viy9Fus3wC88eYHmPLo0/j7g9NRyhthXPk0AgixPbaAoXTnShIXHZu7TWaUvSWmmZouk8EQt6Z1pZo6rjlvoJIhlfhmGpdpb5EMKWBJ7asknYev0/VJ8vfNA0M8f8QtRd5E44Hi5PRTlT0iRzDIWxsXz6T5a22Tecv2v7pN5re9rnNIkM+5U+elTOem5G2y34lkSMFQYVkd+v5hMA4/7iRc+LexuOX2O3HfpAdw/+QHxb/9zrsw4bobcMONN+Osc/6C0SeMwaGHH4EjRh6NvQcPwc679kf3HXqjrqErwhU1aB2iSvWQuObcuqKhRcc3z7SJUinKrKjIiqfT6bbNLoyrk6daeRIqGa2XYHn4mofNvMzXgBbD0IOeeWCMon1vXM3LMHaTrz5r/nItv7TG2e5LDKNh/XxOMgRoPBRIovMLE/SOeTMuAZxfmbVsQb6sSHmry9oGM8TtGHqkXSmez8oOIyuvWOyn9dhxV+w9ZAQOPWaM3Cb5TwND3K7SswpeOvBrJwnjMHs9QG3b25L2sQChpsm0TJ+YiUK3yZQx+OXlvkucXLaAoeRx7rYVv7ntZc4M/W9sk20BQ+noIvl7Y84MJdLYL3eA2ptu0G/DP4rl/NxveWZoCxjKCZsbDgUl2JrAg7aBRI272SrhdkkqV1TVTs7V6JmbBF/SocVp95CuiiI5uRTEasQKtHlnNLxS/biawaAfREC8Wt80CQ1BGMFQbWDaQXnyvYIhmzGnCm9/c8FQ8uC1w/k9c9XfFDBEgEUwZG6x2PmaiYUaf9VtI6CTbW4cTZI0p9X62gZQI7BXnbxfOe13v6ZkSMEd8ycQYX3lSq+lLTpR6uHU3wGIjK99Krdn+Dvb2TbmTRM5zJ8oHTPg1Zg8MAA3LIrW2M+8uWTStNvc++xO7iz3FjDkbR/vb7e9toAhb9sE/d4iGbJ5UibPWyRDLi0ZvhosufmvkwyJHhmCoZywAUO5RpJjbNukfiYYMsqtElfINtEJGOIkpY4rbDFtUSPXPgUMOZMNwRCvsOsVcDsd+/m3AkOUKNGMgE6cdplSP9uSIZfYUsdxw5ltssZLhhQMsY/9JmcFp2YLjMYES7B1vnHbcAuOZlNqOsYBU6blZbhfEwzxBsYJZ56Px1/4B6Y+NRsPTH8a906dgUmPPIFJ0+iexNSnnscjTz2P6bPm4JkXX8Wcf76N1977CG988C+8vehzsfG04NOl+HjJCiz6Yjk+/vIbfPLVt/jX0pXxdws/+xrv/WsJ3vn4C7z54SeY985CPPHCK7jrwWm49Jrr8ZdLrsSwkcei5257ivZaPfBPn22c6CKWnbqI6ArilqahLU787uQf1O4qleIWSmO3yQjYmK4xbePaJtOVquZpyqzlcX3SFgE6x4QCQ29cTcPPpxkPjmO/b/7v3PZgXEqH/cO548b+znqQ3v/TDlCrZIiaqe36ZPb824Ih3vhp8jZZVb3oSMusnol9vlmSocr6X+Rqfabl1jHzWx+g/tUlQ+Ey0YqeDnDZ37nQJa+n87tNxjnHdt4+2IqTt9eRgZmJvU4U1FFrphumDtQ9ks5RusIVOJmSGzc5r/wy2lwxTvOlz7jMg3E1L286lOKIQilPHcj8mb8dnr/t9JOfa0XzNBk4dSPYce0yaFlsn1IohuH+KM9JeOMm52Xa1/ueoIT6IOz3/O1Nz+83JQcMq+XyC+N9x3w42TEufb/vdlm8z8xL4/LZju8Nm/Q7ViPnulSaZsdN90wdMtS7w/Nd6cLyOzWKd+3dD8OPOh4jjz0RR406DocedgRGHHIYDjr4EAwfPgKDBx+A/QYPwYA9B6LfHgPQa8fe6Na9Bzp36YYuXbqjQ8dO6NjQBZ26dkdlXTvU1HcQV92+HnQ19R3RrmMn1Dd0RsfOXcV167EDBvxhEA4/8iiccdZfcOPNt+KR6TMw77X5+Hr5Cmz46WfRrrp2w4/4bu1GrPx+I75ZvR5Lv/ke7y/6EjNmvoTrbpuE0869FCNGnoDt+/RHUWkdCqI1yCupRFFFOxSU1SJUUWvorpzGHGuRW1ojLl9+1wiQkgWC85ttwnjc/i6k1nSPo2I5usLSOtG4W1xZL7+Lyk1+efHxWiX2+KiKQh1VU9CgJB2NBxeJmguO78zomGOJtEK+wW3jzOO59WA5KcFLFVfq79SbxmQpiWZ7sN5UQkiaYTk0DX6XME4cpWf7O+PyQoEdjm3LvDQcfY2b6NeKeYPkMVwndE5aF8c+sPgvt/Wpu4bMn2mzLnZ/2vn6PTMt8h2mrWMxsVwur8qhEk3HSZhotVytZ32FXsrqfPmI1LmsVuxj0UZWnjMmed5Q2sqH92hZmQ/pWfPlM7/R5Imh0eT21bhBPucWzhuc11hvuz3TPVMDNaVD2saaR1Cb2e85BpXPMh+Nm8rX+BwLRlVFIi1pXGmnmDvONB7HE+soc4tDh/ot2Tdp63umzfmMRwPYT+r0e5DvlqnW8GmhWbZzO9H/laqN2UbCqzgvVXXIqI3s/Fhfpk+FnvpefS1/kL8V9S0EOaIudh4PEhNRMRyvUyv6SuVTRxDjcqC6h5EZ3yhFygqVSrpZRTRSalyrUDReFjJDbikwT80nqJwsn+1YeRJ7Qvh4folhNR5XKHzmAGGZE+JaZdCy2D7rxPBKHPY3PnvT8v5mGOZJorXbV9PVMvr5DMNVPwmIYkmaPWEazMMvvP2OdabkjgCQSvn8yuWti/5mXPYPCc/0qasxOl3+jMs4HKBS3gzaKKFsoZgwhoR3KeiY7aJtw8OeJTRtwbrT0cikx/F9W9IUVcYX0pI5DXiWyW8a4qVxy1bS51ExGkjDgXzHeFmiOKxYwkraBSWSR1sq1ausl3T5XtKkBmDnOSdUitz8EuTllyC/oATt6rtg4N5D8KeT/oyrJ9yAp2Y9j4Uf/wtrN/4ktn3WbwLoNsC4dQCWr9mIee8uxOQZT+OKiTfh6JNOQ//BQ9GwQ2/kRMqQTVUQRVS0ybqbMrAc1PxKJzTjPFPjK5XLFcRqZdJpGyoTLbDUWEzDnNmFMXFtCqhOv1xc61A56LJCpKUy5Je2Q05JtfSx0l2qPiNtKV/gxQcCu1Thvd+0Htmky1hN2rganj7rS9po7ZjUYDmYPmmZtGqH5bPQlMMXGI7vmC+Bo2rMlXBWnUjrrKO33FLvIh42rZd02VbeMPHfHj5GyQHD8/IFy8Tysy7qtN3Vj6cjdWA9YjLp8DvrmfDdM6ZahGJQx3AMT2BABXlaV9bRLw2NR7+lHJqOyCQr7e7Jh/GVz7BcrVg2x/GZ9QyVt3Pz9Ylvl0HTUp+SIc5tdphMn1nfvJIqMxasvsgkPuclbuFqG2l50vksK+cWzmmcSzPJi/O1tB15V0lVPK4vHWj7OXMg+5VlIu0TbBdxLrZoinNGsiuXd9SwbeJGRds25zT2l+arWMD9bX9z6Z7hOB6S8/HL27wz+ZQJTdPUkx2X+XFskE6DnK8GaoqP2CAUlXPCYmdQLM/fFHl7xUt+vxmHlZEzEtatpuSw9rkjHh4Oy2FeTtCqdJFpmTJkljeBARmpieeKz5PzdtNjvVg/IksSXaqw3m9mG6AYOcLAG69xl+VkO7HO3rRT/WY8igZlW6CyXp5Thfd+Y1wSkCtSzKytNB1lpOwz0ktjtnA4WHl7zfRRuptVbj8x763aForiNObrJ77V8vn5tLJeSBtwodL4lp9u/aXzt8krQaiqI5rlR7FNbkQctw7VyTauWH1W68+JvtjpoQbqXDOWSG+24zYNrTgbx0sFhu7ZT+Fqjgeqr+fWdAjN80LIKgyjVX4IrfJCaJ1bgHBJDL1698XwEYfitDPPxvjrbsCjjz2Bt99bgKXLvsGSr5dj+crVWPHdD1i5dj2+2/gT1vwM/LAJWPDpEsx8aR7uuP9hXHTlNThmzKkYsM8Q7LjbAHTotgPKajsgVt0eRWU1CHOMRCqQTRMfLGNOMZplc5upBC0LjLp9tnMepUQxbpOxzIa2WBdO4HHn3R50zD2QgXMcmzbIjC51W5eTFSfLRHpkGq5ju+tZL8ajvSNuzydqoDbhJaylhZjhbdrSLTYqXaTkgOloWejzux0+6Znaq8NlIoVn+yR9t+ILvVj8lO1D/sOVMJ/tOnnLyXSVptTnhMfxzwmalyhS5a3nB+3zgZwomSdVr6SqJ+PwbCEd82CZqbaFExRpKFW+/KZxNW9K7xk3o/a12o9p8SYaeZdLD6nrbZeN9c0prpAyZ1Jujcv2JjAgr9axoN8y8VWqwnGTSXgdZwQmnM8omNA+T+8buue4Zdyiyvbx+rK9le/G/Rwzvr2KGblI1y1JzdOUK/ickZlHzKUg7jpovEx8tqsCS3tu0bgseyqXBIaUSW2TTZCixvQMGGpMJzIdivWIFLUw9JM70j53ZM4WccCTYMkgTNzMr/QyfUqVDBjKPF4cDHGbqpG2yYSJ5JoD1CT4xrSTtgclMwSPLIe+y9SnVIltnelA0XQZnkzBJZzMBpqJr+Y4OkqZefPQDA7D6DSPIN8FQ5n3kabFAcUzEmyrxrZX84ISRNt3RotCait3D4hn8sz8ab2ZUqFmAtr9zvp4z/6Y3xyEsdoGUELUkkpGfZyZTHQ8mImrmQCHEhRV1yMrHEMzKhkVQKQTm/YZJ0U+l2CbHLaLeebvcDUNU1agRb5Z4FBBaUsqKs0pRMucAuSEIujQpTv6D9obh406Gmeddz7G3XAjJj0yDS9SV9U77+K9Dxfho08+wxdfL8fK1T/g2+/W4If1P4l06pvvN2LB4iWYM+8tPPr0bNw+aSouv/YmXHzVBJxy9vkYetgo7DZwP/TirdKddkXnHXqjvltP1HTqjsr2nRGtqUe4ohb50UpkR8rQsiCCnJIKhCragSYk6Ng26rTt6WubkfZ41ZtAhIY0BQw5fcRw/M7+037ms6YnaeTQdlcMbcIVKKnpLMCO5xPNGUVzgSPOSJ107IsDTIMrTrVNFg+bARjiWCAooZkYpfHG+BzDnHRkRU5AZDH9dOnEwVCGercS2i+fNtGM6ZJtZUIM5l2Mp4DGtH0E5fXdpM1Y3nTl1LhMh2EJOimtYFxDA+nT0DzYVrLYd0C5vs/ED1fWC33R5hxdJnEYhvOYgKESBUM6btOVm+HMIp2AyH8OTU7DzAWGtyaCIfOO34OczF8O3+G2JOdTjivOczIWPYDM8OAIOAeYNM3ZHM79PLJiJKLueR0zpwcDIn4nkGosGJJFi0i0oqITz83HqatnzNvjn89JYEg7Vyca7q+aDsi089yOUWvZ2kAmHfe75uXnc1WoYKix4IJAivvffukGvWN92Ziy1dXIuEyT8ZknRaGNLS8ZNSVDAobSMBW/8pPouLfb2HyZljJDHRh+6Qe9I0OJSw4zYGh2OgqG7HeZPrOtm3Zg1IDtSG1HUGVDJpcAvGGitTSmSVrhRJzoGJba0IMcV+AU/wavwM2EHW8Hub1pJvAQt1CKSqHgKJO+1vEm41Bsopkx7DIBlt8o0IznqatoZ5IpKmuHwpJqtM6NoFVOMbJyw8jKKUKb/DBa5xWhVXYhistq0KHrDuiz+x+w9/4H4bBRo3HiaX/BRZdfjfHX34Y7/j4JUx99HE8/Oxuz576Cl199A/Pfeh9vvf8hFiz+DIs+X4rPl3+LZavX4tt1G7HuZ+D7jT/h06XL8OaCD/H8y/MwdcaTuP3eSRg74Xr89eLLMObPZ2DEyFHYb9jB2Gv/A9F3wED03Hl3dO25M7rs0AcN2+2EcHktssMUjdOMSJFR0eFooKeEzfSjqb+2SRse3GQfW4DCPEfQkuYrnPf25M5nxicY4lklSgzs+PyW1L7WO449ivENTTeezzJtqm1gf9v58jlVvvzGLReCMAKpdGFZT3UMy3LzPAklQ+SdqeLb7SVx8yKI0UZgPs0OpS+nFwyFueuwGWCI7Z2qvEHfuE1GyZBI4NLU2U6DfaNgSOdD+3vQs4blvMRdi3SSQ790bHMcft+97+y+ahurlh0LXUx4w/r9Fjrk9jCNrVIK74BB0pjWh2FSOQOkzK5UqnDeb2wf5sNtMv3mV0a/d4FgiIE56fxWYIiT+28BhswhwqZfrW8qGKLpkaaCIcbdHDBkCMegZz8iCXpnwJADljNgaHY6vx0YMlbrVRxrlymTZ510/MLaTESZt+3HwZA1Efqlo+/IODgG+ZuSPzIXTjoc5JmAIWWcXjCk6afyNV8eVue5IZG6cDuMYF2lYvGVtZFmKaCQyT83jMKKOjnQ24JAihaqVRpGAJZTaPSMOX6L3BBa5obQKqcQWbkhxKrq0LlHL3TZvhf67rEn9h4yFIeMHIXRJ5+Ks867AJeNvRrX33o77ntoCh6e/jiefPZ5PD/3Fbz6+lt4b+EifPLF11iyYjVWr/sRa34ClqxYhfcWfYI5r87HI088jdvunoTLx43HaWefiyOPOR4HHnwY+g/aFzvt0g879N4VucWlaEHN+XmUOBmpU8tcA4hk8o6rXTBbRJwcFQx5DflmDIbqmnIjzNAHgRTposlgyFdqnxqkkD65GMqEFu2xQbojHRswZLb2UtEiv+k4Yjr8TQlNm6IyqW+69vWmTclQU8GQbpM1HQxVxQGBt1x+vw14MJKh/zwwZM6ykT4UpDQGDCn2UFCTia/5NGVO+3+OvS/XbQMRBwAAAABJRU5ErkJggg==)import numpy as np import matplotlib.pyplot as plt x = np.linspace(-3, 3, 100) sigmoid = 1 / (1 + np.exp(-x)) tanh = np.tanh(x) relu = np.maximum(x, 0) fig, ax = plt.subplots(figsize=(10, 5)) ax.plot(x, sigmoid, label=r'$y = \sigma(x)$', alpha=0.7) ax.plot(x, tanh, label=r'$y = tanh(x)$', alpha=0.7) ax.plot(x, relu, label=r'$y= ReLU(x$)', alpha=0.7) ax.tick_params(labelsize=10) ax.legend(fontsize=10)**Quadratic, Irrational, Rational Functions** Quadratoc Functionsimport numpy as np a = np.random.randint(0, 10, (10, )) square1 = a*a square2 = a**2 square3 = np.square(a) print("a: \n", a, '\n') print(f"a*a: \n{square1}") print(f"a**2: \n{square2}") print(f"np.square(a): \n{square3}")a: [8 0 1 5 1 5 8 5 9 0] a*a: [64 0 1 25 1 25 64 25 81 0] a**2: [64 0 1 25 1 25 64 25 81 0] np.square(a): [64 0 1 25 1 25 64 25 81 0]Irrational Functionsimport numpy as np a = np.random.randint(0, 10, (4, )) sqrt1 = a**(1/2) print(sqrt1) sqrt2 = np.sqrt(a) print(sqrt2) cbrt1 = a**(1/3) print(cbrt1) cbrt2 = np.cbrt(a) print(cbrt2)[1.41421356 2. 1.41421356 1.41421356] [1.41421356 2. 1.41421356 1.41421356] [1.25992105 1.58740105 1.25992105 1.25992105] [1.25992105 1.58740105 1.25992105 1.25992105]Rational Functionsimport numpy as np a = np.random.uniform(0, 10, (4, )) recip1 = 1/a recip2 = a**(-1) recip3 = np.reciprocal(a) print(recip1) print(recip2) print(recip3)[0.57171156 0.6680051 0.30409902 0.12295034] [0.57171156 0.6680051 0.30409902 0.12295034] [0.57171156 0.6680051 0.30409902 0.12295034]![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUAAAABqCAYAAADN0IhOAAAQIUlEQVR4Ae2dP6gkRRfFNzRSMzVaMzcSI9lIjcREMBFMBBPBwMBwQcFAwcBAMBFMBBNBBDESIzESI80000yzDdf1z/bH77Xne7Wz3TNdNVUz9ecUDN1vXnV11al7T91763bPlcnFCBgBI3AmBH799Uw3/u+2V857e9/dCBiBURH48cdpunJlmm7cOB8CJsDzYe87G4GhEfjkk5kAX3nlfDCYAM+Hve9sBIZGwAQ49PR78EZgbARMgGPPv0dvBIZGwAQ49PR78EZgbARMgGPPv0dvBIZGwAQ49PR78EZgbARMgGPP/0lG//PP0/T99ye5lW9iBKIQMAFGweXKsQgo0ZRk048+ir3a9Y1AWQRMgGXxHb51Cdi1a9OEJehiBGpCQPLpROiaZqWjvtQgYB3B6aFkRqAG+fSTIJkntZbmbt2apuvX50eNXnutll65H0bgEgET4CUWPsuMgITL7m9mYN1cNgQko3aBs0HqhoTABx/M1t/bb+sbH41AXQiYAOuaj656I+EyAXY1rV0NRjJqC7Craa1jMBIuE2Ad8+Fe3IuAZNQEeC82/uZIBCA+8v9whV2MQI0ImABrnJVO+vTCCzMBfvttJwPyMLpD4LPPzp+l4DSY7sRqmkiBue++aXrwwfm8wyF6SB0ggJy+8cZ)import numpy as np a = np.random.uniform(0, 10, (4, )) y1 = a**(-2) y2 = np.reciprocal(np.square(a)) print(y1,'\n',y2) z1 = a**(-1/2) z2 = np.reciprocal(np.sqrt(a)) print(z1,'\n',z2)[0.27393882 0.01094338 0.0264708 0.0946275 ] [0.27393882 0.01094338 0.0264708 0.0946275 ] [0.72345812 0.32343566 0.40335902 0.55463126] [0.72345812 0.32343566 0.40335902 0.55463126]Power Functionsimport numpy as np a = np.random.uniform(0, 5, (4, )) s1 = np.square(a).round(2) s2 = (a**2).round(2) s3 = np.power(a, 2).round(2) print(s1,'\n',s2,'\n',s3) re1 = np.reciprocal(a).round(2) re2 = (a**(-1)).round(2) re3 = np.power(a, -1).round(2) print(re1,'\n',re2,'\n',re3) import numpy as np a = np.random.uniform(0, 5, (4, )) y1 = 3*x**3 - 2*x**2 + x - 2 y2 = 3*np.power(x, 3) - 2*np.power(x, 3) + x - 2 import numpy as np a = np.random.uniform(0, 5, (4, )) exp1 = np.exp(a).round(2) exp2 = np.power(np.e, a).round(2) print(exp1) print(exp2) import numpy as np a = np.random.uniform(0, 5, (4, )) b = np.random.uniform(0, 5, (4, )) power1 = (a**b).round(2) power2 = np.power(a,b).round(2) print(power1) print(power2)[ 2.7 12.79 61.38 158.94] [ 2.7 12.79 61.38 158.94]**Logarithmic Functions** Log Functionsimport numpy as np import matplotlib.pyplot as plt x = np.linspace(0.001, 10, 300) log = np.log(x) fig, ax = plt.subplots(figsize=(20,10)) ax.plot(x, log) ax.tick_params(labelsize=20) import numpy as np a = np.random.uniform(1, 5, (4, )) log = np.log(a) exp = np.exp(log) # 역연산 print(a) print(log) print(exp)[3.08637145 3.28626759 4.6096729 2.99925095] [1.12699611 1.18975245 1.5281569 1.09836257] [3.08637145 3.28626759 4.6096729 2.99925095]Properties of Log 곱셈에 대해서 선형적인 데이터를 로그를 취해줌으로써 덧셈에 대한 선형성으로 바꿔주어 linarity를 만족시키게 함 ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARwAAAAwCAYAAADD7cUZAAAgAElEQVR4Ae29B5RVRfY9zBA6v5xz50wQBAygoiCY0TFgxIxjwIQZcx4ddVQMmAPGURxzZBQxgICA5JxpGuhM0zRh/9c+9eq924+OMPNz5vvstWrdvvfdqlt16tSuk6qqkye7GN2sXnS1ejqUkmw++PO7R/N0NL8X7uxidN2L73az+eDLK0WS3deh+ur2eXNKkOzwg+XoZ+29+vK6I8UZ7EC+OF1cWYVIc4c6kNeDbjav1JO0SneH96LOXthCubD4sw1543Vqq92OcD6sgZwO1VmXyW/aw/kdyqv50OTLhD2c16G8+ruksTe3ZK/ypjgDwpekuy6vvdcUR2Cvvqu+5YUnZ+/qnOwIwJ/Xfa/GQ5LFA192MZJtPiR1cCyyr0hnjqX20ojvdSIzd7F0DGyY8X8ZcAhW/xeAY2Tc/xbAMdapLUb5A3DaPy7+/wY4FBY48RNwiAVt8ZL+/Q/A6YBk11EJxzi4/wCctgfvHxJO2zTSA1dffy8JZx8Ap0ipVB2Ucv6QcNpmjj8A5w+VSgNDS1fFI/97KtVeA44nJ2rD6Sjg2P3/szacJPv/jQ2nCeBk/nfYcIx1amkQ6Oe/n0qV9YcNpwXJm3YXY0p1BBCgbZF2mITfurVQhu7ffbHh7BPgpHsioMGMemgsOYNiIKWRtLmU4YnAm1uKdE8Yae4wUl3BeGohjy6H36Pxlnn4LLWN93U+edcVEt0xwxuJfa9J3dsoi3onDYupLqZo24ztbuV/X24p1Hebyct2GGlg+J80dUQKYPJlRWllyN9KfUkXlunOLoIlkK3K1+8b6kmxmro0k/RfNB/p5cwsgC2YG6sbn/F9/Z7Qlvfs/2i+NKFNCK7MQlgCObH+EXrpvNErGV6nNEcQ6c4g0l0hOEN5cIbz4/yUkC/GZ/o568X+cIdgCeXCnlmwR15OFMn2aBt1Pl6dAakj+9TszxbD7x7lG99v5n+2jX1L1TdGk2bea7ZcZ0DGAQ2/rIPuC+EHobfqG2P/CK2dQeEH1t84Hpr9hiOANGcAGe4gMly8BuRq82fCm10Isyck9+lOP5jSHD6k2L1IsnskpTh9kp99Y3KHkeEOw+7LRiC7BA5/DiyeTLD/dGprTCrjvBr/xID20qwT7RL0JtCb0TTlwRZqPqn38xDI72HIm/huYnnxe+ZX382TmUx9NzF/8/f2UJ7k5UByRPIlsbz2JnrWOADV+/xGvF5t/U+AbZrXmF+1hd4VY2IdOWiZl6BDqUH/3tb3SH9rMFe8Ac5IgfzPez5n/VmWKi9eJsuW51HacBDwnt9SeVV+1beq7cyjv8Ur68nyWWc6FYy/JdbZEcxFLDEvgSaUD29WETzZRQn0bb5PWT771cZ6RvJhzyqEK0d9l7/FU7y+mobxq6IFAYNSezyPMX/L/7O95Cl/fo8O5+W3nJmF4i0ifRRv5QuvkF8SaZZ4Tx7heGAfa55u7uoi7wSyYA9mwRHMhiOUDU9mPnzZhbAHs+EO58IVzImncC6CeSXod+gQHHHsSRh83MkYfNwpKNl/gEwIrmAuAtnF8GUWwh3Kl75j/+nUGg1Jd/KHkf663by2lLcTZ096qZTXhu7A9iWiNYkkHh8r3bdNkxbbmrvye+xYflOl9ufld8Q6LmpR++pqbJNy5QUSrOrK/Ryvj65X06tIR66Qod5Nf0+kQWezW+xjfE6G5OynPWStfSteXw+6WNwyEOgWp2Si6RV/R9MusS689wrzc9Zv7XvN/malSz1PwJLl/CnDuUcf83myNZ7oWk2yMHlgC+TIADLWU/3fsu2ri9WDzlYPMvxZAjzN1ivGM03bSz5jfTjzipkg+t6e32+ZZzhLczywHGM+3jdNirb6WRezW6Qs8hb7qynPJ/IWy25KA0pulJ7bCvUgrZOsbknJ0ev+hwzGM6++ha+mTMPs+Uuwcu16lG2qQG39duxG/K+6ZitmzZqPadNmoWxjBRp3APMWLsMLL09AcffeyLB7kGrzSX9KP7ZA51ifRL1UlM5Ue5rSrGkb478J4KhCEona+j2JwwHIjml+ELWcn4CjOlYxDfMbk+7I5q58j3mloQKUTTuwuTzGZ/sKOASNGNETOsXYBhrV5b0ogFM9EcDRz9u6xkDcJ6oNAUfbnvbsr6aDL16/fQMczl6UztgWDiR+v0kb7T6J4WAchyQZEApwrP5sARwj7dv6v6vNiy42714BDutGJk8EnLa+afydakScL+P8Sx4nzzJpIIrTmLTxiEqhAKcpmDQdeM3/xrJaAhyW/SeLGwRjlpXq9In69MbET7CirAJrt9SiajvQAGA7gGWr1+PWu+/DFddej1tvvxvvvDsR303+ETt2AI27gI1bajDu2RcRzMxDz/0PxPW33IGyilosWLYafz79fKREJxAVl+OVvo/xspFn9wCcpm0z0tX4f0zCaQ9hjO+Q8RhwxGeqI5p+0Phu4v98n+JY4vP23LNz+F3que15P/EdBjqy7onP23NPgKVe3p53yZgkNN/lleK6ytsUIFsrS9eTag1tZmx7a+83/U19h+It7T+qHgqYmr7XfL+xj6jmWIPxwL89Z+/m87J8s4+G39a9VFqioVTDpOtFWxdVCn3fkStpzIHfkTz6XU5icQknXh/9e0tX9i+lo735rqKpt1nA0fQh4DANHHYCVpVVoLYBuPbG21G2pQ5VW3eifgcwaco0ZBf2QrLFG0sqzECBJW1fOUX7YfSYW1C5dSfKa7bjzvsfgT+7COkOP3r0HYBFK9dj8eoy5BSzHLeSdqISa6IBmmVzPJBmOpyhJfoYn3fiAOwoYLAAo1u84/kZpfi/CDgdiTRuCji0Lew74LQfrNRM7JFBv9eAw0jjPwCnTfDiRLAvgNOShCMTFqUKhx8L1m7C6i11iOSV4rGnnkdFbSO+mPQDqrbuwFHHnwKbO4R0ewBJVnpgqXo3TZ1NHnSjRmDxIM0RwAOPj0ftTmBjTaN4udIcfqRaPdj/4EFo2AW89d4nSLV4kWL2IEVAzAMj6DQBnA5MhH8AjmFmNSJxc//TZkXGau635p4piYTA44Mrq+g/CjhGZlB1+TdIOO0AHON3jf9TwqGxms90SqQRZ3Cd/rclnI4BTqJE0BzgaJqF8nugYhvw57MuRJrdj8Ur1mHxivWo2w5MeOefSLe6kV3QA2aHT2ww3axULZsmPmPS9jXtPu8zcAhqdgLVjcCgo09EktmJVKsbGTYv3n7vE6xYuwW+cIECHqvv3wc4XTsYg0PGabeEY6HhlExHY5cHJndI1m1QZ9VETWTE1u7ZOR1RqWJqTRRY6H4UnTQ6EFr7FsVdnZ/v+elSb7dKpQzxzB8DnA6AFb+nv01PE1WqJJZlGMD8nzRNoUhv+E3yRt9zhPNgDmTLwNZ1aa3NOi95goChJBwFXomSbBerG90MRlLWhYOJ9bL6skCvCl25msGNde+q+YKzbrRv9JVrqRyRvVtLte8qVXFMddX1Jd2ZNDgm0o/925qEQ7uUoqtbXNYp7Ldou/kN8qOs8eOSm2i/8fcUqw+rymtw0unnwenPwaoNlfjsmymo3Q70PXgQMuw+pFk9yCrogXS7T97vZmEZVL1pW+J3GZ/DpIz7GXRcRON1KOmU9B2IDdXbsQ3Ag39/RlSpJJMTqRYPRl40GuWVDcgr7Rurb6ztNi+8XNpAlapDEk5OyT5HGicyoqqUC12tLliDWTju1DPx4KNPYPZvi7BzJ5BK/Y8DP9rwWCPaIW10FHC0VZ/uQVrys0v6SNyI7vCm36aHQdWb1y4WJ7pa3fh2+mz0HTQUoYIeIo7qPMZZWT8zXllXGYA2L9xZUUN3O9poLIP/kxlNBBzxAilm7T1gEC675iZM/OhLVG/dgQGHD0WygZ7SbotLXKemQLbYSFhfzfyJ39D3MgCiojfjaNTiTdoBFPh2sbnR2ebCn6xOJLv8uPCKa3HmhZciyepCksWN/J79Mfbev+GTr3/A5rqdePODT6E8Ki71jtUl9wcdcRTue+RJJJtdSLHEfyPdTb4IHJHcdkuSuu687hPgOALi4Uq2+1Da72CMuvI6vPX+x2LzGHHuxcIbXWyuGEDq7yYCDukstOYE68vE1bfeizcnfop15VX4adpsZFjcSLW4kGxxoZvZLQZ3T1aRWkSpwcDux6FDj8eytZtxzY13YGNFPT758nuUVdXD4g6hS7oNSWYXUu0q8C+VBv1o3hhvc9KI5OP62+7Dex9/g/VbtuLdD79ECsHM4hEbTYDhGqE8LFpdLqDz4psfoJvJhVSbH0lmD044bSRWb6qDL6cYnU2O2PjoYvPAw0XUzgC6dMAm2omzp4h4HZRy2pZwXOhmc8PkDWPMLXdgfdkW7N4B/Dx5KtL+44ATnVE461tcOOzo4diybRcuueZGATrdORxcmmnUtSngkPnZhpL+A1FW04DzL7sWJmcoNgtxxmuav+m9SBRR43EccFTd6DJvHqiblsHyEwGnq9mFocNPxVsTP8EOAI27gZyCUjU7Rfvx3wU4jCPRXpoYjWxupHtD+PCbyZg5dzF84Tyk0shocSGruDfOvni0eExYtxHnX4KuZmcMbAhMvO9qcuDzb3/ElF9mw+QMREFJ0f93BRyunrZ6cMiQY/DKhH/EXMv5Rb1ibeSANvZ7a4DDYMJTzr4ImyrqxUf96GNPIcPsRlqUXpQQ6eHjqu14tLALVm8Y73/yFTZW1mPF2k349ofpWF1WBTMDSq0eJJud6EZJhICTW2rIa5CebF7xcJ514eWo3wXhletvvTcOONQWGLzLYFKbH+urGtAI4JFnXgbHSJoziM7pDoy59V5M/PI7CSDsYlET8v894LTppVIV62YlWroxb/Fy7NoNHH/sSQI4/hYlHA7I5pLqZCXh9ECKg7aU5t7Tz5R4P3zEOajbBVw65mak2DwCOLIc3yDKx5lnT8BRoONBSZ8DUbcTGHPz3VEvgFIF9gStODMaAYc2HFr0OXuKFEFXcjtnBsaVxCScmErlEsmCg7q8sg6pZrsatM0AjjmQiy507ZJerYq/VIdULE03ixfOsIpSjtGHUo7FBbs/Ez/Pmo+5S1fDGcgWoFGztZqxyaxk3NodgNkbbgI2IgmJNES+8OD76XMwb9kaiZJlGfzd4ovA+XtJOLJdAz09Lgw55kTsIqDvBGyMqI5KJW0Bjla9ZEKiuuvwo3Lbbhnww449SSQcqkLSXkobBBxGKVMiNrsFbD79Zgoq63di207gn59+g8raRuQV9xFpkHYW5qUnqTXA0f1m9+dg227lNi/o3k/UJZFyrD74s0tANUvqkF2MmkaIpDPqyhuVlGPxIN0RwLwV67H/oYORZKOJxIWuNk9MpWqdp+LjgfXppHTHju1pwYwq8I92GDXAlX1AeWa62Wh/oAiuktkTRE3DTuwE4HAHQB2W6z848JoOWJZF/VMbvZQuKmpJ1D6QZAvAl9cTyc4QukTf00YxdWVwlEo9DjgUVdt24s33P5JZiyK/dKyjebuC1MemCUTCqiCrJBvFTxcuvuI6MdYddNgw6TQViKW9Uay7B2IPa2YGZBQpZ4yuZjfemPgZPvj8W6UXNwt8ug6qPHoSGY7O76Uw4FFUHjdeeO1tYeLHn3oeaRysnPXEphKtt9UNJ6OIg3noaiato/o96xf9rtBfylMATaBh6mrxwRUpAuNp9Luka6rDh398+Lm4Zot69RfbEdvNPhe7jMWNkt4HCeAsW1Muk00K1SiLE0kW1s+Jbqyn2SkAmeb0CTC9+9EXSImqV7YA7T+5YsMiXwld2yFNUupmHA6N+23midLA2F+cENTaQsW/Dz05Xtrx8WeTRCJJigKi5lt9pX2HbmcxE0jMjlvxgdizfMjvdYDEyVRtbYTFRW8Q1UqnDFzyWKozIG5x9m+aPYCnXnhd3n/zHx+hsm4Hqmobccigo5Bh8yOdPEAQk4nBLWDmzSlGSsLkpaVv1rG0z8HCJ6s3VEg7KF1JItBlFoqESTNHisWNo088XaRTSkSHDT1entE+dMlVN+LNDz4T/iKPsc10ixMHGL7RdPy33GcCOBJy30ZYdWKoNaNQ6dpmBK1aZpAYlp0rejhnqrMu/IuAzbbtgD+cA19WkagJOp8zki/rbmgz4KzqDBcaEu/jvztC+fDn94QzsxiOSBFcmcUyMOSaWQw3w7QzC8VgOWPeUtTtAEr79IcnXCApyNB1fw7ckQK4mAxlq+8WSXnOSKH87o7kQadwfimWr90kXoJgZgG84QK4w2y3SooWpEeheKXitMkHv0s9nWuMNlTUS6f6MotUfSMFQkPSQye60SkVMYWKekldZLkAy2Ybs4qwfN0mYaQTTz0L3lCu1NMVyYUrMw+uCNcy5cKbVShis4tls62RfNXuaNvZfqGDpoe0uwjOSBECeT3hz+0uv7PeXMJw/IhzRIV75vnXBBRIa91uWVaQVYCvJv8s/f32+x/D6Q/DHc6GO5QFVygbznBONOXCEcqBPZSNsy+6DNt2ASPOugCBzAJphyerQPVjluIvTZeWrvw2Y2iYZJmAoV66fsar7jNeVZnMXywbYdlDuUK/n36dJ/Q99/y/iNpIepKuml7kISZvZiE8mYUI5JTAGYz/7hJaFooqTolv8o+/CP/7M3Phk5QPb2a+0DVc0FOWgxw2bLjwxidfTcaGzbUSYzP66hvhDeSIx8gXUd/i99yZBfBkFyCQVyJ97WbfhsnTirc5ybGujz75gkhq//zkSwQiOXD7I5I8oVz4uKwikgdPOBfuYA78mfmYs3CF1GH1xkr07HswnP4shHJKUNUAUf3cUZ6hOkZeZGqpXxKfS+BffPEfFwG2LzFqloBD6zyD8Jrm42I1Wu59Ehn5xnsfSoNfevVNZNjcoM+fkhWRUS/8o0hHtx87L8MRQDp1yOgzinRap6T4R3Us3RmAPZCDdHq9GPHq8IsHSZc35ITTZJb46PNvYLJ7pOw0m1/y0jrP8nSi5MFk9efA5OUyAD9SnCHxSnFGl2T3IpxXir9cfYPMeldceQPMXOwm76r2KzqohXpq4aVapEnaEGyoFrGN5VXbhZEzHEFVh4RFgnyfs63Qlgv7cktg9kRE15Y6OwLI63WAMAWjSzPzSmCixyJa1zQ3F1SqehOAyNSUMlOitE9lzIWRBtH/KT2xDbZgjngfyCz8nwv9KN3wWl7XKICTlVuKVIc3NsOxDyhhpNq9AjZURQ494iiYHF5kODzwZuYi1U71wSOLCinZsI4pDh9coRyh6fxFK2Gx+2D3RWRgs46JvMV7JhX1zNlVJUUztQCT9NLvkS90kmfRBapqZtZ51TtchEybJlUgSlhbdyq7R2n3vkinN9BBCZMpyrekl90naiPXNfnzikHako7kp1TyhzOEKdPnCU1uv+t+mGnXtLvhCWUh3e4VrxXbSa8tr0vXbFbg8Ok3MrFRKwgyeNPug4kSDr8pdPYLL2U4/Sjq1Q8ZDj/SbT5Y3BFYPZnCV9Kfdj+21O6Q759x9gWwu/yw2D0IEei8EYRzisUN7vRFJJbH4gxg6LF/FtvV1h3AlKmzpO1mVwirN1bDGymQeB+OIWopoo5xUW2zGBCnve6DTpxBaQuhbYTiJQ2ZyphJsSge0q2f6ysLiK2lirp+WYZKLI/uPsVg85aukgF28CFHIM1CUdojs64SIf04+6IrsGhFGVauq8CMOUtQ1wBMnTEXZrr6zBTHlUuPujMjIyd9Pw0bK7di2eqNqNy6A2vKKnHZVTdg8NEnigeMNpp3PvxSvnnV9Terb9Iqb/YI4LDDWBYTCffo0y9hfUU9lq7ZhGVryvD8K6+LZLS5bjtS9NoVG79diIMPHyad96N0hE+CokiTeNs1DRQ9KWqSVl5GC1NXtnrF1bhjN5BmVfnFphSjnY4Gpu5PICOdSwWsyEB8l/U/Z9SVAjhzFy2XWAwZEFR1SXerG96sAnw5+WdsrN6GqdNno7qmQQz3J5xylsRa0KOlE+s0+vrbsLKsCtUNu/HbwhWoqd+FLyf9gOGnjUSksJfU25tZJN9cuXaT2CFoRxCVzKDuEHAIgvUNQCiSj9fefg8VW7cLzaobduHZV95Emt0rwETwIY9QveJsTpr0O3AQLJ6QzNhsa7N0jdmxonY09iVVDA5EbvXJaHL2MfMb3tVqllLT9hT7tUqVbPPi+BFny4RVUbMdJi6ZkBAEZWMi3TyRQnz45WRsqm3ElGmzsKVqK8oranHBqCuRZHaLh4d8a/PlCD0o4ZR0740773kQFVX1AirbdgDT5ywScKAEzImUQF1dv0tU1gVLV2Pl6nL07jsAZgK+xYtUq0/GQ3Zxb8yctwxV9bswe/5SrC2vwpSfZmHHLuCQQcfIREiacJIjaLE/rA4fxj3zAjZX1snYqG3YjYmffAWTw4euqdaYuuXyZ2H7bgW2ZeU1uOiSK5Fh9WLxqjL4GMAqap1PjNU0OCfatDSdm7vKjn+cnVo3wCrwMb7DPIyHiXee8R1lg2BnB/NK0bAbEr0Yyi4UW0OazYtQXqmg8lff/SxEvuaG22EmSls9GP/Cq2LRnzp9DpJNDulADraCngeIWEeGHnb8yTAxFiCQJYxKdW37LiDV7ILdlykiOg2qfQ88VL5J1ywTxUCCTKdkCxyBHKwpr0Xl1t04ZvgIYSqz04/Kmm1Sp9vv/itS6LaMglMwrwhWb1A6kAvjevUdIIY7us7FhmEwYovdKWYb8YAqQrqbgOFGeUUddu5GzD1KIyBjWMggYrSNXrXtitIg43C0wZmGxTffVx6qux94RLwn4uYUEHFj6AmnoHrbTixfuxF5Jb3g8kXgD+WinjTaDQkUE6+SgLkbH3w2SSSMr777CVZPUDwgn036XgYK1+iMe+41dE2147GnXhRG/frbH5FB2wPDBmSdD9f60ODuxgWXXSkSUFl5FWbOWYDFq9bhoMOPxIsT3pYBTCPooUccDdJZAw4ls/seflzoOuHNiWI0pfpCehh5Tv/fd9AwrK/chrKK+j1TZT02Vm7Dhsr6WCqrVO/RU8SBa4qCU+KAEMDJLkJXiwuPPvOC8OCLr7+NNKsTKVYHkiw2mFw+9D34cHGVb9hSh0B2IZJNTti9YZRX1Qt9QjnFsDiD6JJiRUFpXzH8rlm/BeOfewWbNlfjxD+PwI233CHlk49uuOUuRHJKcPUNt8XA6eMvJmHpqvV4/a33cdZ5F4tmkEG3v8WNM84dJbE4k3+aicy8UkRyS3D9zXfGPGrhrCJwjNHA3++wI+U7c+YtwyeffoPVazfhyKHH42+PjJPnHDPHDR+BzilWEQQ4+dALNun7qTIGvvp6Muq37YLLG8LyNWXwRXLFrkOhgTYr0kyAvZ22Nok0VoBjMFQaZqzETtH3nHm44lvfN3cl8l1w6TXCSOvKq2Xg09Kf4fDCn12AOfOXSqP//uR4pFtdsTR46LHynMiczqArM2M8PFi7Zat0yNMvvCZoTHAyR0V4LkyzOL0w2b3I776/MD29YtR9+U3lHXGLGEi1JMMVRnl1o9Tt6ONHIN3iFYmDIuuyVWVC7MLi/WCiVBEFnEBeEcxuP6q37cD2ncDxfz5DJCDtyTLSgGDLOtPAy6sCHC5ydMlMyLqZLE6kWRxIZUxL9D2+m5jYsRL4x4lB1rh4RLIjoA45+kQVU0GwsXrRo99AadOW2gakWhxw+SOguMzo0XmLV0m7Rl5wKZLT7eLxGDj4aKF1Zd12OLwhVRebG299+Il4K16a8B5M9gDSLT7Jz5n65QnvIN3mQorZLnEZKi7Hhc5WpwAMC9y0pQbf/fgLUtg2K93gNqwuq5DJ4bobb5fJQgLgKAVbPbj6+lul3j//8hts3gicoRyhg5Gm+v8BQ08Q8CJ/UCJoLpE2rCsT/+e7HNwiKYgkyEm2Kc9z8MhaKqsb81euE+/msaeMQKrNgRSrDZ2SU2H1BKSsmm274fRlireI3rZ0VwBTf1sovz302NPCn6T5PQ88KvXb3ggsWrwC6SY7TFYXOndLw8LFK4QeC5esQlZOEd585wOZmDlBjxv/Ej796lucd/FleP3t95Fhc4lp4MhjhssEvq68UvqL5oJAZh7Ou+hS6cdly9cizeRAqt0tKu9n3/6o+rdmO6ZOnY0MkwvpGU6YTC4sWLxK+PiJp15EOuNuoo4Zqolj73xA6DV/3mJ8+slXuPyKq1G2uRIufwgpFoc4UcThFD2QQAkeTemZSF/e/8cB5+2Jn0sn3P/wE0gxOZFGaSDDinHPvyyEWL+xQtCTA4/qVrrViaFHnyAdQUZR7kMlmdTvVowz5JiTxGbBsO6+Bx4i5ZOhMgS0POh1wCECOLt3A2abVwGOgI5bjF6UED76eoq4Cj/9+gekmj2SKM2YXCFU1+8UYru9YQEiRnwSBIL5xTLDLVm5Vlylo6+9WQBHeeXixCbY0Ltz6bW34PJrx+LyMUw349LrbsSoq6/Dpso6CREYe/s9uOyqMbj0qjG47KqbJF161U2wUJoxAE8ccPzoYnahR/9DZBBRWhGmJyCbyfSh2AA7+/xRMjN7Q1mwMnbD7BLAINFvHnsP0i1uAZyPvvxW6PftD1Nj4Ncp1YSf58yXtTYP/f1ZoU2aRdlm+M1Hn3y2GcBxCuBU1jeKpFm+uRq+UDZSGShHULW7RV3loD/n3EtkQFLCUWETHpx46tlC82UrN7QJOKQv6ZNmdiE9IbG/afPgJMOAQp2o/lE9JrgpyaoFwMkqRKS4J6p3KPdwpKhUpBsCTobTDfIgJezb73lI+Jnla8D5efYC+e2FV98WaZ3rm7ZU1ouas61hF/yBTAGcNJMdKelWTP91rtD++x9/QWZOIVauKRMJdFPVVjzwt7/jmhvGiq2H3+PYcPhC0u+UEkPZBWILok00mJ2Pex98RMbT3Xc/iHSLEymcEOweqQ9/KNtUA7c7DLPFI2BjtXoxe+4Sofkddz8kgEO6Uork9YyRF0t59XWNyI0TKxAAACAASURBVM0pwtb6RtTUb4fZSb6JA05HhZX/LOBYvVhdVi1EOviwoSLhUCoxOaN6fiMw5oaxYAdosKGkQ+MaAYQ6PSUcxhuEC3vJjMvZrHufg2IzyHdTpgnRlq9ajxR2pMmJfgOPkLxNAIdqC21AOSWgR4CqQs12gFZ/DiaCTrLJhVPPukC+PWPWXKRxnxNzXOIQwHF6sWCZkhTuuv8RmeGU+78p4DCiWW8ZQIbh/wwf54wrMzNHXnTWZZv0bMwBfcBhQ0Vq0ZJOHHAYu+PHeX+5SsqgSJ2c4ZC4pi7pTnzwxXdSzqaa7aLukKZeeoZ8mQLQtA1Qxz/x5DNFneNg+WbKVCnrtbfeQ4bNKQDR/7AhUl/Wud+AI8Qdm2JyS0gAF/Y9Om5PwOnq8CBQ2F0YnO0bdsxw+MI5McCxBSLi/uZvgwYfhwzaoiSsn/R14Zjhpwk9qut2/NsBh+UT2Bgl3dXukUQJPXEGpuHTnVWIk8+5EHW7gQUr16JzhiUGOA89MU5oRbshJzvSj5IzVXUawddtqZbfKa2Rz0l3qizbdwCPPfEMLDQYW13C7+lmB1avK5f3r7zmBgGcdQyOBTBv8QpMeHciTh95AcwOD+YuWobuvftj+uz5MpamzpyDpHTaXBwCOpn5xZg+e57kPeywI2XipdTM4EFO2ixz1KXXwGLzIcPsUlKe2YUN5dWyZcXgoSeIdK8AR9nFTjnjPMlH8SwYzMac3xZgU2U1Ui22/z7AYcU5WAr2O1AYkI32ZxbE1KBLRl8rzMW4BFrsjWBDffnXuUo0nTFngQwczk40NM5fuga0nJ913ihk2L044eQzRVxlyHiIe+RQlLS6Zak9o28JWB4/pQWGZCtC0qD4zU+/gnGfC5ZvEGRXEo4baTYPvv1hmtRt9NVjkCLEVSH7rEOooETAkhsd0QZzw1gGAUYDHGPxOwp4qAJdPPp6/OWqm3DJlTfiurF34YobbsboG25GZV2DdOblo8fgimuux+VXM90YvV6PYG6xGH71gKDoSuMf7Vik68dfK/vK2DvvVxIgZ3y7HzXc8wTAbXf/Fckmu6hUlHDojRh81HAl2u8G8kv2ExWLsRiDjjxW6jJt1lyJEQnmFmLqnPlCn/sfGweHLyJSKYGcK5MJQs+9+gbSbC4km+1iJ+BgZvr7cy/L96vqG0WV84ZzRarIcPhw8RXXqsFat0vWBTF0ngZuiXWyuHD4sOOF7uvKKuM2nIR4JkUPr3gmIwU9xHZB+wVTJlMeUyl69DkIobwSBPOjKa9EXMfB/FL5pnzXADg6UI/rgjxZhXjmlTeFbx99+jl0M3EJgR1/Ss1A5Tbl7Xnt3YmiKiZZHBJXRCM9A0P14M4v7QWzw4ve/QeIxFJRux20v1CqpJqVnGHHAQMHi8ubfEpgCucU4fufZ0oZ3/08E2+89xFOHHGOeLJee+cD/PnM84S2/IY7lBM1/CsJ7qjhp4maRQKS3+kJYyDoVdeNFZqXb9mK1HQH0s12pJmsMNucOO2Ms+VbWxt3IZidpzSPqIrJ8XvSaecIX+zaAdjtXnz08ed4+fU3/rsB5+Irb5DoXMbDUFIhscnkX3/3oxh15yxYGrPb0N5ACcUViIBAxL+efQ9Et3RrbL1NZkF3rN9cI3nLK+uxct1mMDiKHZaUTilJzTqR/O5CaAJOQXFPJJsV4JBhGQZeXrtDOu/Cy8cgWcLMlYQz+OjhMabZr19/JNNQyMHEqE4zAadYAGfVejUznT9qdFSlUgGOGiB4FdCNWvNpW6FNgvYfuoi3VKswd854YsNh2w1JliWIEVaBVwxwbH4JAOSgJ+MNPOIoMfAyoNHkjQgYkHKkByVFAjndr3ZPGG+8+6Hk+WHabAXMZv5OxnRi/EuvC3Ot2rAZG6u34vvpv2LYSafAGcoWwKE6RvrSRUpp7c33P2wCOKQR3cULVq4XlWPUldfC5gnCHaRK5REAWcK1OruBq264A6lWP1KsAdAbxD13OZmQwSnpzV2wvE3AOeCIY6R/KcFKxXlNMORwQmD/M4lEGX2VA1/UdAPg6LVPtOHQybG+ok6KO3HEWUh3UH1Tdja2nXTvc/Ch0UmMfKUCRJ8Y/7LkWb1+o0yiSekWfPr1t/I+jfLdMtjHLiRl2GFx+kHplNLs+x99KZNnKKcI48a/IvQl4Pxt3HO48LJrxEA7/LSzJaqb32b/0p0uqpxoCwH868cZqN8J/DhtFjJoljA7RJKigZikefjRp5FmciLFZEWK2YoMuxOz5i2Qut3/8CMwOVxIk0kjqlJZ3Bhx9oUyBsvLKmAyOfDNpMl46bUJSPtvlHA429Kl+e7HXwsBb77zAVnZeud9fxOVp6K2QRo7+adfYgMu1exAsskmBjK2lJKGeqaMm5Q+XprwDzGqMU6AdgmKtRRdBWi4tD4KDDZfJMZsgwYfFZNwCAShgp4ye5N5sot6i6s8nSHekULx6rBT2UnuQBC9Bxyi9H2JlI1KOC6fYnYAAwYNlQGVaMNRgOMVGhBsuFLXHcmFxROA2eWLAQ71eIKCGI0lNGFPVy3LigOODz37HybAsrmmQQB8wBFHoe8hQ9DzoMNjgEN6iBHe5oI7EMFBhw4RupHB9+s3UGhG8Cfzl/TqBxqMv/jX9yIRsS5pdq7vccEVzoErkCWgxFmZhkSC3b/YbwYJh4DDmJqKbbvk93S7GzZvSILKKAHectcDMlB+mD4PGQ4aHWmIZ6gDXdcecZHfcqdSoz/57F9tAk5x3wGY+usCzPp1AWYnpJnT52LunMWYOWsBZjSTaDQmz5A/SVsmDTiUcPofNlR4lpG2jB0q7t0PRx5/kkwIWiWmZEcQ6mZ1iHcyXNwTFRS9yROHHo6uqWYx8BIcmI4+8TSZmLj+iTQ/7azzBZwYzuFhnJPNK/Ewp511gXx71oJluHj0GDz14uviybW4g6jatltoy9igdAdj1hiV7MU5F1+O2kagrhG4456/wukJoqRHH5Fy6LklDvtDeXJPnkg2WXDNjTdLvWYvWITCHr1gcRKEVYiD9ozS5kpAn/T1ZFgsTqxYuRYLl67Yd8ChVb6jhh8ZUC16qeiO8yDdGcLm+t1itzjkyONEpXj+tbcljPqjLybJoJ4ydaa4+wgslHCOPelUETMpuXBWphgvxj+rW0RkMgE7cNqv8yXuhmpUBj0E1KGpdnHGjurWz738pgDDTbfdJeKvZi4NOGSeoSecJnt9EGwmfT8d732oDNy/zZ2PgYcfieUbNiHdpUR/ugrDBSXY70DlBdq4uVoCpRh4lexQcUdGuwBdhWRqAg7pwc2uLW4/zE6fxGyQORXgKOmGqpKuY+KVgGP2ZIpKddHo64XxPv/XD/AEs7FoxXrklPaB2Zsp0gXZXtGCHjAnwln5mDl7oTAe4yk42EhTAjjtPxPe+UD6orZhFx5/5gUMGDwMaQ72nw/uzHy4Q9kSmkDaUr2ilLK2vCJmNCagEHCGnz5S+oaARI+K0xdGIJwn9iJKGDPnLEYop1TW56RaubSkKeDMmLNI6njWyIsFcBh4l0gT3ovXj3Q1u0VFYb2MiW3iYlK1Qj2+xINSI5/RYCyLH1sAnJGXXCmT4Y+/zBa1kGsAi3r0QZLJhq3RBZD0DMngtTlFRfvkm++lT26/6x6km23olmYR50fd9l0C9IpH6Zhwoe9Bg7Cpapuop1T9GLDJOBiqheRn0q+6AQjmFGNjVT0i+aXC01NnzRf6kpaUNtmPx558Bpav24xfFyyT34affDre/sc/cfWYm1BUup/Qc1ujcpwQaGn3OfLY4VKnRcvXILugRCRgq8svKhV5Vsa2xS11JFhdffUNyMoqQFX1VrFH7bOE444USgSpHB3h4lETKjGCl+tSWko86oWh4Fx+zxWxvGekpjoyJiSh91ujxlIaM3+es1jCrM3uMIp7HyBgwMVpPfoNABt86pnnorZhJ6bOnIdQXndZMMYIYybWjSH43AeEOi//6JamQe79j76AJ5wtqgpVFpOb7wdx0OHDhHHIDCYeocHjNdxBWSuzclMt6gBsqGoQKWzlhko8Ou5FXDVmrJRdtrFKQKGw5/7q6A2XX4AnkFOEkaOukI4c/8LrYFQmASctmiS6mBGmjFzmsSOaltx+kmHu/hzY3ZnYVNkgZTg9YZHULK6ARBHriGcGU3GbBU17GrpdwTyQdo+Me1FW/i5YsQ4LV67HqWdfAJMrIGn2whXS5sefflHAkFGkk3+aATLdRaOuhMMdgtUdFNBnvc2uAC4YdYX0BRlZ/y1auRZDTzgR7mCmhLxzOwSVgliyar1Ij2RoDjyhgTuAvz75rAwWSo78DldXj3/+demviR9/KTYKE4+QiR4jo2nDq8OXLXXgLnbuQLacQMAIaUazaxoYr+TNRP7kUSXkQV657MP4fnP/s2zSOEZnOWImC9fdcZ9IGUuWr8Vv85fgsiuvkyhfiyOIKT/PloH93gefyzN/OA8ffjZJ2j3mptuQlVskEgbtN/tHvacM7uNyHq8/IrE3jIX6efpvKOrRVzxZ7APSlrxFIy9X0HNSHTTseIwZexd+mjkXjkAW7n34celbSqkff/Etvv/5V6zfUofeBw/C+qqtkodxV8+/PEHWKxJ0OFTY9zanT8D/wYfGoWEn8PnXPyIrv6dEJQd5Eok3CxnRaHvGqHXvN1DK47vFxb1w2x334tnnXxEDM8MCTC4/zN6QmCc4Nk1ujv3m+yrxeSeuz7H4smSXfYs3CzpxxzbuLdtc4m9mf5ZEGvPQev5P4NGJAMRtLV97/1PU7AZeee9jOTSPx5TYeaxIKAfDhp+KH2fMRUX9TlntPGXqDJx21rkiYrIRFm+mzNqcuXN69sfCNZvw/Kvvok//QzH2tvvACEjO5hwoU375FY5ABK5gJhxBtiVLNpr+fsYcEPS69x8Ant9j9WdKPbgsYNaSNULUmQtWSDQtiX7YkONkodyMWQvRfb8DxFti9UVg9atEpvjlt0Wo27YbecX7ia3E4gtDpUgTWpFGdI3b/NlyDed1hyeYByvjfyobZJahWmhzM4WkvZr2bHuc7pniype1YJECDDjiWGyqacSa8mqcdPq5sBK0PCFQjeQxIfc/Og6b6xqxsWoblq4tx8OPP4PefQ6Gz58NL3fhEzpEYPaFxTBZWdeIgYcNwVHHnoh/fvKFSJiUJCvqGzFg8FBRBclcpB2/cdQJJwtYPv/S67B7g2KQ5upuX04hbrvnIcxfsgpbtwFVNY2Y+MHnGHjYULgYV+ONwObNjLaT1zivHXHUScLMDzz8ZAxwGClNvjKmOE325Ev1XrYcySJLQRLyGstp7n8e3UL+7H/40TKQy7fUYuS5F8PhDsLpYSwTvX3ZuO2uh1BR3YDq2kbQwP3oE+PRa/8D4QpkIjOvWGyQXKDJmKZRV1yDaTPnyqCvq9+JL7+ZjGOPPwU+HiVDenjCsHnDcuxLqKBU+OiQIcfKhPKPj7+Ufn3xjX/gy+9+gjeSLxHIBCN6Cp9/9W0U9uonK/dfevt9cRjces+DcPjDcPoj8AQzcePYOwU0qVrV1O3ExI++waGDj4PbnwObJxNWr+ItbpbGxOUzHG/HnnKWjK2nn3sFbl8Yv8z8DUcceYxM8rSxss5yHlYUO5ry6559Y+y3TlzjQzGV4fJUASREnKJrQlCUUcxnkA/VMIbcy2pbiQhtWSVokpdxEHaveGK0Hir2BivVJ6pWyn1N8ZmJQUibtwHfTWPUsRsmmw/ZeaUSX3PameeJAZmdwAhgZXug/YHJg0hhD3Fvjn/9HfH6/CndBq68pnGQSwZkqQD3bY0lbkxEw7NK2njL0PsuJgf+fMa5MvtdNvr6qGrSVGQnXWIpqkqJCsD9fzIL4fBmSRDduPGviRG3c7JZ9HfxXNB7YVhyoWnGI0i4V4rDny1h7QxtZ2i5ph3VAzFsxzYOUxG/SXbag7iVRJasTRJbl7yrtpi4+5EnBHDD2YViaKS7NtVkRzinQIL3KKmMvu4mWH3hmGeH6hN1/WdfeBVbG3aB7liqZ/w+EyN0O2XYkMpZkAGHXPHNVeGylYLaTkH3q1wZ1Wr3o6JuJ5atLBfViCoRAY55dWRxR66UcDq6mTmXpnB/Iq7L4mpxbWKgLTKmGmsVWZZLuNHNwc3MmIfLWriKProvDfOI3U6NIb3xvN4eRO1AEKcZD62jU4KH2ck6Lasbr7/7oSyt2Y9R8nYvXpjwLpasLsM3k6fiL6OvU6YDiTFiXJFbgkp5MF4qDdy0rdlcSGL8k82FrnYGZDrQ1e5GZ7s3lvRqcnptWXemLiZ6ZL1Ytn4Llq7ZCLs7iONPPBVl5ZXIyS9B+ZYaGaM0WFM19XPNWtRey3a2ljQ/d/JmFcvAFnvD3gBOB09tIJARRAJSWbUYjkzMxFD5RMAxudRSgiuvHyvRsQzsys4rUa4/kx1cS8TBwZBzMb5GBwC3QSDwjPzLlWJHGn7GSFlkx5WtBBsyE08ZZFCfGsRqIDM2hGDFJOt8ov/7sovE4/byWxORlE67i+pseZcnHCa4xcVLRdDU605ySmRW4fopAmeXdDvYNrEp2KMGZqF/AthbVAi5zZcldWV9ZWsCWZagBjGZmN4SvcQi2UldnMsNXKKeUOJLlS0NVJuoWq7apGJG6L1goFia2SFXGu5POed8ibI9/fyLZSYTWug1ZdwPxxuRtU8z5y9VxlABeAX03C+FmzSlUTKI5InbW9OTXik1qSmjLSe5LyZPQ0X9bkUbejFtXtkPh6uym1tH1dYzqvQEnLbe0wPAeCVfcEJShn5Vx9i4MKzrEkO3k2Ck+ISAwxgeTsD6u7pc8oFx8lb3hv6SGB5ubVEkgENw7tlvgNiLFq3aIFIOaXLv356QJRKUbjp1TpVxwsmGy0KYl8BFOjM8gRONRL9HwxUYJyYncFJQII9F15gR9AVwZBGqXwBn/GvvoqyqAQXd90dKhh2ffD4Jl42+VuLlnnvxNRWDI2PMrdYlEnDE4dH8ujdND31VgBNFZOPsYySSJp6+ksBawuH/nc1sYPskHL7HBjOYTXemIGYsKpRuRkY7qu05uScuDbyTfvhF4hc4MHLyiyWmgHvr1NTvAIPWxNsTdS0niTubLm0GsnkkhoFlDDnhFNmiQa//UBKO2m4xtu1iVLJi+9lGto9bMDIQ7PkJ78tMwChXplZpxPN99KxoVVuMMjZHzwKaXroj9P2eV694qSi26v7RA0Lfsx46liTxqlSFnPhAiG4ANmP+clGfuPsi96vhEgQlITrx5IuvomrHbhT1PSh6xEx0i1GJ8OXaMrccLTJr4QpM+nmm2LdEwiGDM7iOE4g7ALtIKUoCSGwX6Trxi+8kmpm0FOCXYECfqF3cVsGYh3Qy3rf0P20yInl3gB91WVwsy8XMpKcx6d+NV/07n7EtrX1Xv6uvxnKUBOSVVerCL9FxwLVyNCAvXLFe7ISdkk3IL+ktizrnLlopxmY6SOix4smbXLlPidw4+WiXPZ+JZGWIXifvkI9oUuGk2CXDiceffx3ldTthpbrrCmHgoGFYsnwdMiweLFyyGj16HyjSqpJm1d5S3CmQZRnb1Nr/nSiui3syoTIkTksZSeCWAYebNzGvUi/YEbTn8IwitccLj2ItQXG/gXLl/9w+wR3KFfsCw/pTnSF0tfLcKSWevjDhHZFiJv80Hfv1PRB9DzgYDMyr2dqAn6dz+byKOZFYHguBxpDMTjDwLL/H/li6bhNye/QTBiHoaWJpsNELIElAtp+Dl+cBrdpchwMGHSVbTNDoSXoxtUQfPmd+eS8qNVKyolGT4CwpetaQ7H1rWG1vLFMPMs661LE1wDQFHLXgk31CmmsG1uVwi1DaJmLAxvpYPMgq6o2qrbvFHnDGeaOQXdxDPFTf/DgNqzZXot8RR8IeVjaReLnxqGuhkcWDx559CZddd4tSqaIzK9dWpXlDsEV37VN1M/KTF2eOGo03PvhcVKp4u5RaT3DdW8ChkdIoaeh2J141fYxXAg73zFEqnLG+rf/PsrmVSHuAjjyl3fDS99HBynFAwFHqvAsFPfbHlq078OjTL+Cehx9H51SreG0ZxMkYNJ7acO0td4pBmeoYHRdcUMnJ2tgm4/+azvqqAYcTb1HvgzDp51liw6Hh2J9VhE2V9Rg0+Bhce/2tmPLjTLUcRiRdqs9xwOFSIeN3Wvv/3w44bDBd7dfcdi9mL1uLzQ3AWhowy6qwYM0mLFy7CTMXr5Z7Gm5nLV0Lio5rN9dg/ZZabK5tFLcg1aSV67fgl1/n4b2Jn+LFF1/HtF9mg0sYps6YhSefeQ6HHH4kSnruj9zinvDxHOWsfDGa2YMRmDx+ZLi5f4hP7Tvi8KFzhl02dSIIsp7sYK5B4rYVOklniISlToHkpuNKJ/eCR3bQk6Q7TEsrapArkFXMqkCAerHMWgz8C+cL4OjtPViuMbXUSQQn7ksjEg4l0ejJiNLJJgYlxplMl20sS862DubGRGkyPPOyDdxB7vSRo/DY0y/g9Xcn4pZ7HsDAoUcj2U6J1QFnpjoXuzXAof0hZsMxAE66L9wi4FDl4sp3tqWbYemI0Ip7uuwT4HTchqPpRclXJJx2rnzW+fYFcFgG+YceXw58WeslC5x9uO2+h/HOh5+LhCOBfmJDc4vbn4fZ/TjjN3G63PvoOGQW95JN7DubCAbR3RejE5+WrDTf6qsGHG5Tyv/ZF+QnSjjTZi3EjWPvRvee/VFVuwM99jtQ3Po6DILjh95T1rnjgEOdLjqr68ppYjZ3bVnC8WLYyWdh49ZdePezSeA2AtSLORBYDg1z2jhH16XetIfGMkbLcs0L91PhsgW6FnMKu2PAoUNw6unngRte3ffg38GFcR9+MQmTfpiGqbPmYfpvizB38SosWrEBqzdUyrGlm2sbZHsGBkNxoRvd54wz4ZYNTGWVdeA7W2q3Y3PNNmwxpM3V21BetVXe2cBrtfp/Y2UdNlVzS4RabOTeJ9X1KOPvLST+Lt+qqMX6zdWy1Smfred9RS02VNZhfTStq6jF2s1VWF1egVUbK7CqbAuWrN6Axas3YMGKNeB+QjPmLsJPM+ZgyrRfMWnKVHw/9Ve89f5HGP/yBPztyWdx54OP4Prb7sa5oy7H8BFn4+iTTsMRxwzHkONOwsAhx6J0/4OR36Ov2M4YGk9XJg86E9VQtvskeDnR1eyQ0ypocORRLVTJjECm+UP4hVIx7Rh2AmzcLsGtKjL8EdjCPOpFSV7sfyXaU8Kzq2/oo2KiM7MAu80rsyx3qzNKGntKSc1LHXtjNFY8ztMzuZaKKlXLUkLz46H9Eg7zG9VeuWecFjUNOepFOS64oRaj2ydPnQ0GztJew+1FZfeCDCdGnH0RVm+sQve+B+Ptf36G1eVVeH7CuyKpcG9kfXQQx5XqY675ooTKcaae8Tl336RLnO8TdMzeCH6YOQ8vvfEPWQKztqwCJ48YqZa3aKcDVTRuMZpbKrbQ9tBLt7mT3qxIqUFxYjRHWP2sJcBhZ1c0AsHCnqB3hWVqsNF5eaU6s7cnb3IvVVrHU6NGO6I1CcdNjyil0OjMVcQMskrlcRcmrkJ3STi51elHXnEP+CM54jbkOiNfOFvu+ayt1L13P0RyCtp8T5cTyspDMCtP3i8s7YXMvCL5Hr+pvxuIUDLLE+9QJLcQTHwvt7A7Ckp7obhnHxx86GAcMHAQDjzkcAlIHHL08Rh54SUYcda5OO/iS2XF+ZibbsXt9zyAR554GuNfeg0T3nkfH3z8Ob6a9L2E0TO+idtTrNmwBZW12yQyldsg1DTsQnn1VqzeWIEV6zdh6ZoyLF61Xp7p/xetWoclq9fHEn8nIG6q2Qb+lvg7363Y2ijlrCmvwNI1G/DsyxMQyS9GioWR40zc45i2BQ0capCTeTO8PJeKh+h1bOCTt/YZcLK5IZ2uU/uuHZFwWiqbKhXLIS9rFZ//01mwcGUZzr1oNNIsPIdKpQx7AKPHjMUvsxbINqBUtcbccieWrinHZ5OmgIcH9Bs4WACpoGdf2TiOSyU+/OJbzFq4XFQ3Ahr382EsEG2daU4vPvrmO7z01nsYOOQorNm4BededIk4c9RGdHF6sG+oQqodF1s3L7DNWo1sAjhG/bIlwvB5S4BDJlmwplyARs1Iyo5jnKmYf18Ah50SKOghkhMbrYFN10uYVICOMy6NvtyQPYCuFrUxuyeH26LyqBe1oXhHrt5cffImCdx6knLFG6Dec0QKxTYlh5PJAWUcTM2UIRuYq9+SHUE5zIx1TndHkGTnoWM8yrWZfE3Koi1HJe4R7fBnxVz9ZDKuLaNHj4mxNAS/SG6RRJ5SqmTq0ac/+hwwAL36Hoicou7ILe4RSwRt3pfu1w/5JT0l5RX3hDH1HzhI8hf32h8HHnIEHhs3Hvndexu2i1AhBXE++wNwEgGHkynVlWBed6zeUIUrrrkFSSa3bPHJUA4eVsdtMpauKsOAw4cJbelG56JPHjGzbO0mzFqwXABm+tzFGPf8qzjulDMlSFQizS0EnCIBm179B2LukhV49pUJePzZF7CqbDMOPfLoqFeKk0PT3R33HnBopSYDc5AakCjOCHFU089aAhyWQzDQ77V03VfA4cZfdGuz/M6MoYjWW8U6+OQ0hy48edDqRxdrAOp/3vvg5kxi53N1bEpHrh4Cjivcobw8QoPfcGYVI8XFkyba993OPETeEZD2sc5pnkx0JXhKUmW2VRZpY+dRL/7smN1JqzidhT4KuJoD3SSrD9xInic+JBHkOgjQZl92k7xdo6dBkFF1ajoR/X8AcHJL2+T9lsZEcxKOtrXwSiPuxsoG3HTHgwI4Oh6LMVwjL7wMqzdswcRPv8ago06QBb1yblWGQy181ltpUNrnGixKMwzjyLDL7pD0iHG5xfdTZ2BjZS0e+vtTEvVMCZTGYcZXxSVRhQd7DTh0T8sxph0AHDKK9lLxf6pNBKFY1DaXhgAAB1lJREFUIlMZ7xP+J9H9+d2b2AbiHaEYL37fFPDYUOal/YffNgIOBzcPc48n3keDGaMGVy+3sRCDpbIpCDFFtFeuQ+1CbO7K0wfT3TQ4t/0u36F9Q5LVA1ckX/LqZ22VwXrpdz3ZjAbPFFuJftbWVcq3uOHKzJcocs28MtgJgtFEmnFmbS7RY2PyZRvsMOxX9a4Ciz3v9XNGlzozaQ+JHk0TAxrSXaWmfRznGea1hxmHo57p93iv/+d32Jb4veITOgQ6Gvin6uwRGw5tKYlltnXPeomXSo6v3rNOifm1TUyuNODbfOKx5ZhSDgrlDdR0YjtpX7H7s/HTrwvxweffqQ31ydMmF3w8PSKUixEjL8YvsxfJgXkffv4trrvlbpw04lz0OXAQevYdiP0OOATDjj8FY26+A+/+8zMsXrkBW2ob5bC9jRVb8exLE0QKFbCSI4foPFBxVWyDtsOIYGJ2y46fHIesd2IbE+913k7c45dEpmfJlVMcS7Sat5bo4g4W9hJXNw1ttO639r7xN75LGw7z8bv8jRGeRHmV9P2eV+Yh4MiRJJJHvePOKQYT83Oxo0rF4Jk9xhQq7KmOSuFxJDxaJKsQHNDtSTxbnIZU/a6x3Ob+9+WyHjzKpgDBgh5N6qHLaO5qrJc7qwDhol5ST36jufdbe8Y6c70MDzzzZRWLa1/6i/1N2rMPEhPpkVMstrhAfg/Vx9EjWIz9qPtNPdN9qa7sI+r4ie8n3qt+V/wn9cguEtc086t7xYd8z3hvLIf8q/mHzwMFPfdsk7GNzfA2DaDkqcTvGr/T0v/0IrIOBLo9aGn8Lv+P8rsui9/kUTWkM3/T7YiPBTUmNE9ze5X7H3sWZTWNuOuvT8iOB0GOm3CBAI8vXICDDzsKp58zCo888Tw+/WoK5i5ajflL1mLOguX49KvJ4BaoXA3OeB4GEv710afQf+BgWc/lzymSZSp0tXuyC+HOiaY98KEIIY5/HlcTq7fuh5avEvhHJBWUMrhquxFtDUnPNvpK7xMJrI1Gxnfb878+YoYzprwv9o74LKe/k3jluzwtggskY9/RZRjqG/st4RlnbdZdz8SJ5bd2z/Yylob529tu1T6/uLbT3eGYhNBS/eLP+Q1KEwwzUItk47817ZuWnvPbjH9yBnJlF3850kYvYUmgi7EMLcXQJsd1R6q97fumLscSyJE2t0bPpr+p3QyZn9/kt/l/03da5w9KDOwfDtq28ul66ivf54LO9uRNLJvqPfMSrMQ8EfPMUdppmpSEaWyHkhg5HtrFU5R2uLyndH+88s6H2LJ1F778bhp69D1EvI7cTI47WMo+1DQwR3eztLkjsm0oN0dnHM+vc5fIHkS5Rb1kATFVLFmmwaUa0STRyfboc1tTkwt5ghMKJRzSo719JYs344Djk4PJeTi5arweWMpQqRmR4pL6YHfDhxTh9DutXdkJnFHkGwZxPvGbLd3ztAi6MFv7Rku/cQZi/dXvbYu/RtFQAU4cNFr6RpPnol76ZPZk2L0uT7+j75trK9/h7xpwdJ72XklnqiY8wE5H8upYl1bLiKoqdE1TvWHdWn3f0If6PasADoPomqrExnu+26Td0l4vzP5sASsFBnEebPJu9Dwq/Yxt5cCnl0qBRvv5UdeD6hhpTeAy1rOt/1lPqlSUUlhWa+8TcHSdeVXve8X2yf9bSwQzxr2IMTm6/tGfXYyrb7wD3PGAi26rtgFb6hgCora54F453GaCISI8PnjwMSfK4kuJUrZ7wU3tuDkevVDc16mLXSVuxar3eeKVa/No++zKyZoTrsMvp7ZwHJL27e0rOXlTN7o1QiX+xg8Q4Yhuib+1fa8Bh53Tsfz8HgGHgNf2d/ZkHDLU3tXZIxJdmotR0HuW29ozfo+iJ2dB/Z5mLH2feCXTk8Z8TvE6dmqD4dsst7W+42+ytMGfLbOitgkkfqule4r6HPwt/d7ac+4iQOmqtXcSf9M2GeUWb7q0IfHdlu414LT0e8vPVRwO+6nDPMnlITyutz1HDBv6T9eFgX/tDRMh6IiUFLVJMiqYalZzqwW03a6lK9eycaWBnC3VTL10/Zq7kj+VhEPQbP8Y7kRmFqbtIKozD0XI5irT9jMFOOq99leW77Nxvx/glMqambbbZwCkKF1pt+IM2t687QGctsrSgMOtMvRgbiuP8XfaJgg48UhqQ7vaYFB+k8dBG8tr639dRwGcDoKVLnufASersEMDSH+XyyL+LwBHf09f1XdLo6pc+/tH56ftiXTvaB+TPynxc+L/A3BaGQw0vmnJQRO9vVfOQh0BDSn33wQ4tP90vN5KpaJapAdze9vK9/4AnPYP4N8bcERoaIXvW+p3GnwpNf0PAI5yT7fUkNaf/34Szh+A0/5B9AfgtJ9W/4uAQylFA07r43VPOvxuEk5HxKl4o/4AnDgt9uxM/ZuWaKj2/l4SDr1NHbVpsP5/qFQt96vuX16VZKG2p9ibsUSgU0duq+BdY9nt+Z8GdsVnHTNt7C3g/D+lvOTxgIoHTwAAAABJRU5ErkJggg==)import numpy as np a = np.random.uniform(1, 5, (4, )) b = np.random.uniform(1, 5, (4, )) print((np.log(a) + np.log(b)).round(3)) print(np.log(a*b).round(3))[1.378 2.86 1.848 1.79 ] [1.378 2.86 1.848 1.79 ]![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKwAAABZCAYAAACjbs9yAAAgAElEQVR4Ae1dB5hV1bVGYfrc3tvce6cXBqSIomBFURBFxRKNxmiemtiiPqMx9i4Se9RHwotRE58YY6yJsUR9oGIXg6IoKBARUZDe+d/3r332veeeObfMgII+5vv27HvO2X3/Z+21V9mnV3UwgapAAlX+OKoChYKRjmkDCUSbOuEIp1Dpj6PSH+sSrGVV+lQa3g+m2+CMplW9rDNPGbpc5mH+Cl8MAclbj+pAAtXBOqj2q3azLV37wfayjXH4ki1wxxvkmvklvfRZpcmMhdFH6YM/jppgHfypVrjjjZn6zP2WOnXdNrErVg9/qkW1zea5bf5AXPrH9prz6jGR8eSYWoMxF2y7I5JGqKGjy9xwHCt80S73WbZuS20oiVBDuyrfZn4z7bA8Y9m14RTCjaxXjXu+tDL+pvHgfLLd0eZOGXO2JTtHao57MUGZJ4IyTxhlbkuQ+3zWNUSaOgU0fdxh9HGHioQwertUmnJvFIF0K2pCdcjmLVwG62cdOzgCCNa3y4Awr75fqH7pl+QPw1vXBIJH+umJyKTpvL1dwbx9YBm+umZ5ydh+PR46b7GYwOHLUixdznNjvJiXgM15VnS81VjzBSRge5KXL0Kwvi0zb90pgwBV9RaeV2uZfIl4j8RQ/Wb+)import numpy as np a = np.random.uniform(1, 5, (4, )) log2 = np.log(a) / np.log(2) #log2(a) log3 = np.log(a) / np.log(3) print(log2) print(log3)[2.03781948 1.5172318 1.83579586 1.82535321] [1.28572094 0.95726668 1.15825823 1.15166965]Binary Entropy ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYoAAABYCAYAAADr/7CAAAAgAElEQVR4AeydB7hcRdn4kbTbtvd2e01PCL2DdBCkCwhSpUgT6b2JFEVQimJBBZQiSvsElN67kNBCQnrvPQGS9//83jmze3bv7t69N2g+vj95nsnsPefMmZl33j7vvGej/v6YVFpqYw0Saeqs+Hn3e2tj9U7beK/bDwqlJNE2VAYEEr1uWxVOSax5cK/bMfaBgYQk24fJgEDvx9zPF5N4y5A+9cs8461DZVA//)import numpy as np p = np.random.uniform(0, 1, (4, )) be_e = -(p*np.log(p) + (1-p)*np.log(1-p)) be_2 = -(p*(np.log(p)/np.log(2)) + (1-p)*(np.log(1-p)/np.log(2))) print("be_e: \n", be_e.round(2)) print("be_2: \n", be_2.round(2))be_e: [0.63 0.68 0.6 0.2 ] be_2: [0.91 0.98 0.86 0.29]Load data# Data path data_path = '/data3/martin/tms_gene_data' output_folder = data_path + '/DE_result' # Load the data adata_combine = util.load_normalized_data(data_path) temp_facs = adata_combine[adata_combine.obs['b_method']=='facs',] temp_droplet = adata_combine[adata_combine.obs['b_method']=='droplet',]Generate a list of tissue-cell types for DE testingcell_type_list = list(set(temp_facs.obs['cell_ontology_class'])) tissue_list = list(set(temp_facs.obs['tissue'])) min_cell_number = 25 analysis_list = [] analysis_info = {} # for cell_type in cell_type_list: for tissue,cell_type in product(tissue_list, cell_type_list): analyte = '%s.%s'%(tissue,cell_type) ind_select = (temp_facs.obs['cell_ontology_class'] == cell_type) & \ (temp_facs.obs['tissue'] == tissue) n_young = (temp_facs.obs['age'][ind_select].isin(['1m', '3m'])).sum() n_old = (temp_facs.obs['age'][ind_select].isin(['18m', '21m', '24m', '30m'])).sum() analysis_info[analyte] = {} analysis_info[analyte]['n_young'] = n_young analysis_info[analyte]['n_old'] = n_old if (n_young>min_cell_number) & (n_old>min_cell_number) & (cell_type!='nan'): print('%s, n_young=%d, n_old=%d'%(analyte, n_young, n_old)) analysis_list.append(analyte)Pancreas.pancreatic ductal cell, n_young=163, n_old=150 Pancreas.pancreatic D cell, n_young=123, n_old=68 Pancreas.pancreatic B cell, n_young=522, n_old=820 Pancreas.leukocyte, n_young=54, n_old=66 Pancreas.pancreatic A cell, n_young=364, n_old=157 Pancreas.endothelial cell, n_young=86, n_old=116 Pancreas.pancreatic acinar cell, n_young=191, n_old=385 Brain_Myeloid.macrophage, n_young=44, n_old=243 Brain_Myeloid.microglial cell, n_young=4488, n_old=8642 Skin.basal cell of epidermis, n_young=678, n_old=994 Skin.epidermal cell, n_young=260, n_old=232 Skin.bulge keratinocyte, n_young=1370, n_old=1234 Marrow.precursor B cell, n_young=489, n_old=367 Marrow.late pro-B cell, n_young=276, n_old=135 Marrow.CD4-positive, alpha-beta T cell, n_young=29, n_old=251 Marrow.macrophage, n_young=182, n_old=255 Marrow.basophil, n_young=29, n_old=42 Marrow.mature alpha-beta T cell, n_young=99, n_old=231 Marrow.early pro-B cell, n_young=29, n_old=43 Marrow.granulocyte, n_young=742, n_old=2150 Marrow.NK cel[...]DE using R package MAST## DE testing gene_name_list = np.array(temp_facs.var_names) DE_result_MAST = {} for i_analyte,analyte in enumerate(analysis_list): print(analyte, '%d/%d'%(i_analyte, len(analysis_list))) tissue,cell_type = analyte.split('.') ind_select = (temp_facs.obs['cell_ontology_class'] == cell_type) & \ (temp_facs.obs['tissue'] == tissue) adata_temp = temp_facs[ind_select,] # reformatting adata_temp.X = np.array(adata_temp.X.todense()) adata_temp.obs['condition'] = [int(x[:-1]) for x in adata_temp.obs['age']] adata_temp.obs = adata_temp.obs[['condition', 'sex']] if len(set(adata_temp.obs['sex'])) <2: covariate = '' else: covariate = '+sex' # # toy example # covariate = '' # np.random.seed(0) # ind_select = np.random.permutation(adata_temp.shape[0])[0:100] # ind_select = np.sort(ind_select) # adata_temp = adata_temp[ind_select, 0:3] # adata_temp.X[:,0] = (adata_temp.obs['sex'] == 'male')*3 # adata_temp.X[:,1] = (adata_temp.obs['condition'])*3 # DE using MAST R_cmd = util.call_MAST_age() get_ipython().run_cell_magic(u'R', u'-i adata_temp -i covariate -o de_res', R_cmd) de_res.columns = ['gene', 'raw-p', 'coef', 'bh-p'] de_res.index = de_res['gene'] DE_result_MAST[analyte] = pd.DataFrame(index = gene_name_list) DE_result_MAST[analyte] = DE_result_MAST[analyte].join(de_res) # fc between yound and old X = adata_temp.X y = (adata_temp.obs['condition']>10) DE_result_MAST[analyte]['fc'] = X[y,:].mean(axis=0) - X[~y,:].mean(axis=0) # breakPancreas.pancreatic ductal cell 0/131 Pancreas.pancreatic D cell 1/131 Pancreas.pancreatic B cell 2/131 Pancreas.leukocyte 3/131 Pancreas.pancreatic A cell 4/131 Pancreas.endothelial cell 5/131 Pancreas.pancreatic acinar cell 6/131 Brain_Myeloid.macrophage 7/131 Brain_Myeloid.microglial cell 8/131 Skin.basal cell of epidermis 9/131 Skin.epidermal cell 10/131 Skin.bulge keratinocyte 11/131 Marrow.precursor B cell 12/131 Marrow.late pro-B cell 13/131 Marrow.CD4-positive, alpha-beta T cell 14/131 Marrow.macrophage 15/131 Marrow.basophil 16/131 Marrow.mature alpha-beta T cell 17/131 Marrow.early pro-B cell 18/131 Marrow.granulocyte 19/131 Marrow.NK cell 20/131 Marrow.naive B cell 21/131 Marrow.promonocyte 22/131 Marrow.granulocyte monocyte progenitor cell 23/131 Marrow.immature B cell 24/131 Marrow.hematopoietic stem cell 25/131 Marrow.granulocytopoietic cell 26/131 Brain_Non-Myeloid.oligodendrocyte precursor cell 27/131 Brain_Non-Myeloid.oligodendrocyte 28/131 Brain_Non-Myeloid.brain peri[...]Save DE resultswith open(output_folder+'/DE_tissue_cell_FACS.pickle', 'wb') as handle: pickle.dump(DE_result_MAST, handle) pickle.dump(analysis_list, handle) pickle.dump(analysis_info, handle)Validation# Load DE result with open(output_folder+'_old/DE.pickle', 'rb') as handle: DE_result_MAST_temp = pickle.load(handle) analysis_list_temp = pickle.load(handle) for analyte in analysis_list: if analyte in analysis_list_temp: bh_p = DE_result_MAST[analyte]['bh-p'] bh_p_temp = DE_result_MAST_temp[analyte]['bh-p'] print('%s, New:%d, Old:%d, Overlap:%d'%(analyte, np.sum(bh_p<0.01), np.sum(bh_p_temp<0.01), np.sum((bh_p<0.01) & (bh_p_temp<0.01))))Trachea.endothelial cell, New:2437, Old:2312, Overlap:2177 Trachea.fibroblast, New:9847, Old:8395, Overlap:8242 Trachea.basal epithelial cell of tracheobronchial tree, New:1539, Old:1895, Overlap:1169 Trachea.macrophage, New:2225, Old:1221, Overlap:1159 Lung.non-classical monocyte, New:83, Old:86, Overlap:65 Lung.lymphatic endothelial cell, New:54, Old:75, Overlap:54 Lung.classical monocyte, New:121, Old:107, Overlap:72 Lung.fibroblast of lung, New:717, Old:478, Overlap:384 Lung.dendritic cell, New:1710, Old:1900, Overlap:1188 Lung.mature natural killer T cell, New:1513, Old:3384, Overlap:1385 Lung.capillary endothelial cell, New:5690, Old:6824, Overlap:5079 Pancreas.pancreatic acinar cell, New:1396, Old:244, Overlap:223 Pancreas.endothelial cell, New:120, Old:97, Overlap:83 Pancreas.pancreatic D cell, New:1332, Old:715, Overlap:650 Pancreas.pancreatic A cell, New:5651, Old:2568, Overlap:2482 Pancreas.leukocyte, New:482, Old:48, Overlap:48 Pancreas.pancreatic ductal cell, New:3781, Old[...]Dynamic Programming Fibonacci Idealdef fibo(n): t1, t2 = 0, 1 for _ in range(n): t1, t2 = t2, t1 + t2 return t1 fibo(10) %timeit fibo(15)1000000 loops, best of 5: 1.07 µs per loopNaive Recursivedef naiveFibo(n): if n < 2: return n else: return naiveFibo(n-1) + naiveFibo(n-2) naiveFibo(10) %timeit naiveFibo(15)1000 loops, best of 5: 241 µs per loopDP Solutiondef dpFibo(n, T): if n < 2: T[0] = 0 T[1] = 1 return T[n] else: T[n - 1] = dpFibo(n - 1, T) return T[n - 1] + T[n - 2] dpFibo(10, [0]*10) %timeit dpFibo(15, [0]*15)100000 loops, best of 5: 4.22 µs per loopCoins Naiveimport math def naiveCoins(D, p): if p == 0: return 0, [] else: minc = math.inf mins = None for di in D: if di <= p: c, s = naiveCoins(D, p - di) if c < minc: minc = c mins = s + [di] return 1 + minc, mins naiveCoins([1, 5, 10, 20, 50], 40) naiveCoins([1, 5, 10, 20, 25, 50], 40) %timeit naiveCoins([1, 5, 10, 20, 50], 40)10 loops, best of 5: 140 ms per loopDP Coinsdef dpCoins(D, p): C = [0]*(p + 1) S = [-1]*(p + 1) for pi in range(1, p + 1): minc = math.inf mins = -1 for di in D: if di <= pi and C[pi - di] < minc: minc = C[pi - di] mins = di C[pi] = 1 + minc S[pi] = mins return C, S def showMeTheMoney(C, S): print(C[-1], end=": ") p = len(C) - 1 while p > 0: print(S[p], end = " ") p -= S[p] print() showMeTheMoney(*dpCoins([1, 5, 10, 20, 50], 40)) showMeTheMoney(*dpCoins([1, 5, 10, 20, 25, 50], 40)) showMeTheMoney(*dpCoins([1, 5, 10, 20, 50], 85)) showMeTheMoney(*dpCoins([1, 5, 10, 20, 25, 50], 85)) %timeit dpCoins([1, 5, 10, 20, 50], 40)Rolling Regression* [Pairs trading](https://www.quantopian.com/posts/pairs-trading-algorithm-1) is a famous technique in algorithmic trading that plays two stocks against each other.* For this to work, stocks must be correlated (cointegrated).* One common example is the price of gold (GLD) and the price of gold mining operations (GFI).%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pmLets load the prices of GFI and GLD.# from pandas_datareader import data # prices = data.GoogleDailyReader(symbols=['GLD', 'GFI'], end='2014-8-1').read().loc['Open', :, :] prices = pd.read_csv(pm.get_data('stock_prices.csv')).dropna() prices['Date'] = pd.DatetimeIndex(prices['Date']) prices = prices.set_index('Date') prices_zscored = (prices - prices.mean()) / prices.std() prices.head()Plotting the prices over time suggests a strong correlation. However, the correlation seems to change over time.fig = plt.figure(figsize=(9, 6)) ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$') colors = np.linspace(0.1, 1, len(prices)) mymap = plt.get_cmap("winter") sc = ax.scatter(prices.GFI, prices.GLD, c=colors, cmap=mymap, lw=0) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]);A naive approach would be to estimate a linear model and ignore the time domain.with pm.Model() as model_reg: pm.glm.GLM.from_formula('GLD ~ GFI', prices) trace_reg = pm.sample(2000, tune=1000)Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (4 chains in 4 jobs) NUTS: [sd, GFI, Intercept]The posterior predictive plot shows how bad the fit is.fig = plt.figure(figsize=(9, 6)) ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$', title='Posterior predictive regression lines') sc = ax.scatter(prices.GFI, prices.GLD, c=colors, cmap=mymap, lw=0) pm.plot_posterior_predictive_glm(trace_reg[100:], samples=100, label='posterior predictive regression lines', lm=lambda x, sample: sample['Intercept'] + sample['GFI'] * x, eval=np.linspace(prices.GFI.min(), prices.GFI.max(), 100)) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]); ax.legend(loc=0);Rolling regressionNext, we will build an improved model that will allow for changes in the regression coefficients over time. Specifically, we will assume that intercept and slope follow a random-walk through time. That idea is similar to the [stochastic volatility model](stochastic_volatility.ipynb).$$ \alpha_t \sim \mathcal{N}(\alpha_{t-1}, \sigma_\alpha^2) $$$$ \beta_t \sim \mathcal{N}(\beta_{t-1}, \sigma_\beta^2) $$ First, lets define the hyper-priors for $\sigma_\alpha^2$ and $\sigma_\beta^2$. This parameter can be interpreted as the volatility in the regression coefficients.model_randomwalk = pm.Model() with model_randomwalk: # std of random walk sigma_alpha = pm.Exponential('sigma_alpha', 50.) sigma_beta = pm.Exponential('sigma_beta', 50.) alpha = pm.GaussianRandomWalk('alpha', sigma=sigma_alpha, shape=len(prices)) beta = pm.GaussianRandomWalk('beta', sigma=sigma_beta, shape=len(prices))Perform the regression given coefficients and data and link to the data via the likelihood.with model_randomwalk: # Define regression regression = alpha + beta * prices_zscored.GFI # Assume prices are Normally distributed, the mean comes from the regression. sd = pm.HalfNormal('sd', sigma=.1) likelihood = pm.Normal('y', mu=regression, sigma=sd, observed=prices_zscored.GLD)Inference. Despite this being quite a complex model, NUTS handles it wells.with model_randomwalk: trace_rw = pm.sample(tune=2000, cores=4, target_accept=0.9)Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (4 chains in 4 jobs) NUTS: [sd, beta, alpha, sigma_beta, sigma_alpha]Increasing the tree-depth does indeed help but it makes sampling very slow. The results look identical with this run, however. Analysis of results As can be seen below, $\alpha$, the intercept, changes over time.fig = plt.figure(figsize=(8, 6)) ax = plt.subplot(111, xlabel='time', ylabel='alpha', title='Change of alpha over time.') ax.plot(trace_rw['alpha'].T, 'r', alpha=.05); ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]);As does the slope.fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, xlabel='time', ylabel='beta', title='Change of beta over time') ax.plot(trace_rw['beta'].T, 'b', alpha=.05); ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]);The posterior predictive plot shows that we capture the change in regression over time much better. Note that we should have used returns instead of prices. The model would still work the same, but the visualisations would not be quite as clear.fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$', title='Posterior predictive regression lines') colors = np.linspace(0.1, 1, len(prices)) colors_sc = np.linspace(0.1, 1, len(trace_rw[::10]['alpha'].T)) mymap = plt.get_cmap('winter') mymap_sc = plt.get_cmap('winter') xi = np.linspace(prices_zscored.GFI.min(), prices_zscored.GFI.max(), 50) for i, (alpha, beta) in enumerate(zip(trace_rw[::15]['alpha'].T, trace_rw[::15]['beta'].T)): for a, b in zip(alpha[::30], beta[::30]): ax.plot(xi, a + b*xi, alpha=.01, lw=1, c=mymap_sc(colors_sc[i])) sc = ax.scatter(prices_zscored.GFI, prices_zscored.GLD, label='data', cmap=mymap, c=colors) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices_zscored[::len(prices)//10].index]); #ax.set(ylim=(100, 190));Author: %load_ext watermark %watermark -n -u -v -iv -wnumpy 1.18.5 pymc3 3.9.0 pandas 1.0.4 last updated: Mon Jun 15 2020 CPython 3.7.7 IPython 7.15.0 watermark 2.0.2하이퍼 파라미터 설정parser = argparse.ArgumentParser(description='PyTorch Convolutional Image Captioning Model') parser.add_argument('model_dir', help='output directory to save models & results') parser.add_argument('-g', '--gpu', type=int, default=0,\ help='gpu device id') parser.add_argument('--coco_root', type=str, default= './data/coco/',\ help='directory containing coco dataset train2014, val2014, & annotations') parser.add_argument('-t', '--is_train', type=int, default=1,\ help='use 1 to train model') parser.add_argument('-e', '--epochs', type=int, default=30,\ help='number of training epochs') parser.add_argument('-b', '--batchsize', type=int, default=32,\ help='number of images per training batch') parser.add_argument('-c', '--ncap_per_img', type=int, default=5,\ help='ground-truth captions per image in training batch') parser.add_argument('-n', '--num_layers', type=int, default=3,\ help='depth of convcap network') parser.add_argument('-m', '--nthreads', type=int, default=4,\ help='pytorch data loader threads') # parser.add_argument('-ft', '--finetune_after', type=int, default=8,\ # help='epochs after which vgg16 is fine-tuned') parser.add_argument('-lr', '--learning_rate', type=float, default=5e-5,\ help='learning rate for convcap') parser.add_argument('-st', '--lr_step_size', type=int, default=15,\ help='epochs to decay learning rate after') parser.add_argument('-sc', '--score_select', type=str, default='CIDEr',\ help='metric to pick best model') parser.add_argument('--beam_size', type=int, default=1, \ help='beam size to use for test') parser.add_argument('--attention', dest='attention', action='store_true', \ help='Use this for convcap with attention (by default set)') parser.add_argument('--no-attention', dest='attention', action='store_false', \ help='Use this for convcap without attention') parser.set_defaults(attention=True) args, _ = parser.parse_known_args() args.finetune_after = 8 args.model_dir = 'output' import os import os.path as osp import argparse import numpy as np import json import time import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable from torch.utils.data import DataLoader import torchvision.datasets as datasets import torchvision.transforms as transforms from torchvision import models from coco_loader import coco_loader from convcap import convcap from vggfeats import Vgg16Feats from tqdm import tqdm from test import test os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) if (args.is_train == 1): print('train') t_start = time.time() train_data = coco_loader(args.coco_root, split='train', ncap_per_img=args.ncap_per_img) print('[DEBUG] Loading train data ... %f secs' % (time.time() - t_start)) train_data_loader = DataLoader(dataset=train_data, num_workers=0, batch_size=args.batchsize, \ shuffle=True, drop_last=True) model_imgcnn = Vgg16Feats() model_imgcnn.cuda() model_imgcnn.train(True) #Convcap model model_convcap = convcap(train_data.numwords, args.num_layers, is_attention=args.attention) model_convcap.cuda() model_convcap.train(True) optimizer = optim.RMSprop(model_convcap.parameters(), lr=args.learning_rate) scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=.1) img_optimizer = None batchsize = args.batchsize ncap_per_img = args.ncap_per_img batchsize_cap = batchsize*ncap_per_img max_tokens = train_data.max_tokens nbatches = np.int_(np.floor((len(train_data.ids)*1.)/batchsize)) bestscore = .0 def repeat_img_per_cap(imgsfeats, imgsfc7, ncap_per_img): batchsize, featdim, feat_h, feat_w = imgsfeats.size() batchsize_cap = batchsize*ncap_per_img imgsfeats = imgsfeats.unsqueeze(1).expand(batchsize, ncap_per_img, featdim, feat_h, feat_w) imgsfeats = imgsfeats.contiguous().view(batchsize_cap, featdim, feat_h, feat_w) batchsize, featdim = imgsfc7.size() batchsize_cap = batchsize*ncap_per_img imgsfc7 = imgsfc7.unsqueeze(1).expand(batchsize, ncap_per_img, featdim) imgsfc7 = imgsfc7.contiguous().view(batchsize_cap, featdim) return imgsfeats, imgsfc7 args.epochs # for epoch in range(args.epochs): # 코드가 잘 돌아가는지 확인하기 위해 2번만 돌려봤습니다. 전 30(args.epochs)번 돌렸습니다. for epoch in range(2): loss_train = 0. if(epoch == args.finetune_after): img_optimizer = optim.RMSprop(model_imgcnn.parameters(), lr=1e-5) img_scheduler = lr_scheduler.StepLR(img_optimizer, step_size=args.lr_step_size, gamma=.1) scheduler.step() if(img_optimizer): img_scheduler.step() #One epoch of train for batch_idx, (imgs, captions, wordclass, mask, _) in tqdm(enumerate(train_data_loader), total=nbatches): imgs = imgs.view(batchsize, 3, 224, 224) wordclass = wordclass.view(batchsize_cap, max_tokens) mask = mask.view(batchsize_cap, max_tokens) imgs_v = Variable(imgs).cuda() wordclass_v = Variable(wordclass).cuda() optimizer.zero_grad() if(img_optimizer): img_optimizer.zero_grad() imgsfeats, imgsfc7 = model_imgcnn(imgs_v) imgsfeats, imgsfc7 = repeat_img_per_cap(imgsfeats, imgsfc7, ncap_per_img) _, _, feat_h, feat_w = imgsfeats.size() if(args.attention == True): wordact, attn = model_convcap(imgsfeats, imgsfc7, wordclass_v) attn = attn.view(batchsize_cap, max_tokens, feat_h, feat_w) else: wordact, _ = model_convcap(imgsfeats, imgsfc7, wordclass_v) wordact = wordact[:,:,:-1] wordclass_v = wordclass_v[:,1:] mask = mask[:,1:].contiguous() wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize_cap*(max_tokens-1), -1) wordclass_t = wordclass_v.contiguous().view(batchsize_cap*(max_tokens-1), 1) maskids = torch.nonzero(mask.view(-1)).numpy().reshape(-1) if(args.attention == True): #Cross-entropy loss and attention loss of Show, Attend and Tell loss = F.cross_entropy(wordact_t[maskids, ...], \ wordclass_t[maskids, ...].contiguous().view(maskids.shape[0])) \ + (torch.sum(torch.pow(1. - torch.sum(attn, 1), 2)))\ /(batchsize_cap*feat_h*feat_w) else: loss = F.cross_entropy(wordact_t[maskids, ...], \ wordclass_t[maskids, ...].contiguous().view(maskids.shape[0])) loss_train = loss_train + loss.data loss.backward() optimizer.step() if(img_optimizer): img_optimizer.step() loss_train = (loss_train*1.)/(batch_idx) print('[DEBUG] Training epoch %d has loss %f' % (epoch, loss_train)) modelfn = osp.join(args.model_dir, 'model.pth') if(img_optimizer): img_optimizer_dict = img_optimizer.state_dict() else: img_optimizer_dict = None torch.save({ 'epoch': epoch, 'state_dict': model_convcap.state_dict(), 'img_state_dict': model_imgcnn.state_dict(), 'optimizer' : optimizer.state_dict(), 'img_optimizer' : img_optimizer_dict, }, modelfn) #Run on validation and obtain score scores = test(args, 'val', model_convcap=model_convcap, model_imgcnn=model_imgcnn) score = scores[0][args.score_select] if(score > bestscore): bestscore = score print('[DEBUG] Saving model at epoch %d with %s score of %f'\ % (epoch, args.score_select, score)) bestmodelfn = osp.join(args.model_dir, 'bestmodel.pth') os.system('cp %s %s' % (modelfn, bestmodelfn)) bestmodelfn = osp.join(args.model_dir, 'bestmodel.pth') if (osp.exists(bestmodelfn)): print('if (osp.exists(bestmodelfn)):') if (args.beam_size == 1): print('if (args.beam_size == 1):') scores = test(args, 'test', modelfn=bestmodelfn) else: print('else:') scores = test_beam(args, 'test', modelfn=bestmodelfn) print('TEST set scores') for k, v in scores[0].items(): print('%s: %f' % (k, v)) else: print('2 else') raise Exception('No checkpoint found %s' % bestmodelfn) scores[0].items()Working with dask data frames. Reading Fiscal Data from a sqlite db to a dask dataframe. Computing, visualizing and groupby with dask dataframes. Using dask.distributed locally. This post includes code from [Scalable-Data-Analysis-in-Python-with-Dask](https://github.com/PacktPublishing/-Scalable-Data-Analysis-in-Python-with-Dask/tree/master/Section%202) and [coiled-examples](https://github.com/coiled/coiled-examples).import numpy as np import dask.array as da import pandas as pd import sqlalchemy as db from sqlalchemy import create_engine import sqlite3 import pandas as pd engine = db.create_engine('sqlite:///fiscal_data.db') connection = engine.connect() metadata = db.MetaData() engine.execute("SELECT * FROM fiscal_data LIMIT 1").fetchall() sql = """ SELECT year , region , province , gdp , fdi , it , specific FROM fiscal_data """ cnxn = connection df = pd.read_sql(sql, cnxn) df df.columns from dask import dataframe as dd ddf = dd.from_pandas(df, npartitions=5) print(ddf) ddf.npartitions ddf.npartitions len(ddf) from dask.distributed import Client client = Client(processes=False, threads_per_worker=2, n_workers=3, memory_limit='4GB') client ddf.describe().compute() ddf.head() groupby_yr = ddf.groupby('year').count() groupby_yr.compute() group_region = ddf.groupby('region')['gdp'].sum() group_region.compute() ddf.nlargest(5, 'fdi').compute() ddf.sum().visualize() ddf.sum().visualize(rankdir="LR") (ddf).visualize(rankdir="LR") ddf.visualize(rankdir="LR") client.close()Fetching the Data This is a bit annoying. But to download from kaggle we need to upload the kaggle API key here. Then we need to move the file to the correct folder after which we need to change the permissions. The error messages will not provide super helpful information so I've added the correct code here. You can also upload the dataset from kaggle manually or you can download all of this locally. The kaggle dataset can be found [here](https://www.kaggle.com/therohk/million-headlines).Then again, this code works;# from google.colab import files # files.upload() # ! cp kaggle.json ~/.kaggle/} # ! chmod 600 ~/.kaggle/kaggle.json # ! kaggle datasets download -d therohk/million-headlines import pandas as pd headlines = pd.read_csv('million-headlines.zip')['headline_text']Sequence of Letters Let's now take these headlines and grab sequences of letters out of them.headlines[0] import itertools as it def sliding_window(txt): for i in range(len(txt) - 1): yield txt[i], txt[i + 1] window = list(it.chain(*[sliding_window(_) for _ in headlines[:10000]])) mapping = {c: i for i, c in enumerate(pd.DataFrame(window)[0].unique())} integers_in = np.array([mapping[w[0]] for w in window]) integers_out = np.array([mapping[w[1]] for w in window]).reshape(-1, 1) integers_in.shape from tensorflow.keras.layers import Embedding, Dense, Flatten from tensorflow.keras.models import Sequential num_letters = len(mapping) # typically 36 -> 26 letters + 10 numbers # this one is so we might grab the embeddings model_emb = Sequential() embedding = Embedding(num_letters, 2, input_length=1) model_emb.add(embedding) output_array = model_emb.predict(integers_in) output_array.shape import matplotlib.pylab as plt idx_to_calc = list(mapping.values()) idx_to_calc = np.array([idx_to_calc]).T translator = {v:k for k,v in mapping.items()} preds = model_emb.predict(idx_to_calc) plt.scatter(preds[:, 0, 0], preds[:, 0, 1], alpha=0) for i, idx in enumerate(idx_to_calc): plt.text(preds[i, 0, 0], preds[i, 0, 1], translator[idx[0]]) from tensorflow.keras.optimizers import Adam # this one is so we might learn the mapping model_pred = Sequential() model_pred.add(embedding) model_pred.add(Flatten()) model_pred.add(Dense(num_letters, activation="softmax")) adam = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False) model_pred.compile(adam, 'categorical_crossentropy', metrics=['accuracy']) output_array = model_pred.predict(integers_in) output_array.shape from sklearn.preprocessing import OneHotEncoder to_predict = OneHotEncoder(sparse=False).fit_transform(integers_out) model_pred.fit(integers_in, to_predict, epochs=30, verbose=1) preds = model_emb.predict(idx_to_calc) plt.scatter(preds[:, 0, 0], preds[:, 0, 1], alpha=0) for i, idx in enumerate(idx_to_calc): plt.text(preds[i, 0, 0], preds[i, 0, 1], translator[idx[0]])Getting started with DaNLPThis tutorial provides you with code for getting started with the DaNLP package for the different tasks we cover. More information can be found in the docs folder for each model/dataset. This tutorial reuses the code snippets from the documentation as minimal examples.Overview: 1. Models2. Datasets 1. Models 1.1. Word embeddings 1.2. Part-of-speech tagging 1.3. Named entity recognition 1.4. Dependency Parsing & Noun Phrase Chunking 1.5. Sentiment Analysis 1.1 Word embeddingsYou can choose between using static or dynamic word embeddings. Below is an example of how to download and load pretrained static word embeddings with gensim or spaCy.from danlp.models.embeddings import load_wv_with_gensim # Load with gensim word_embeddings = load_wv_with_gensim('conll17.da.wv') # test word_embeddings.most_similar(positive=['københavn', 'england'], negative=['danmark'], topn=1) word_embeddings.doesnt_match("vand sodavand brød vin juice".split()) word_embeddings.similarity('københavn', 'århus') word_embeddings.similarity('københavn', 'esbjerg') from danlp.models.embeddings import load_wv_with_spacy # Load with spacy word_embeddings = load_wv_with_spacy('conll17.da.wv')Here is an example of how to load the pretrained dynamic flair embeddings.from danlp.models.embeddings import load_context_embeddings_with_flair # Use the wrapper from DaNLP to download and load embeddings with Flair # You can combine it with on of the static emebdings stacked_embeddings = load_context_embeddings_with_flair(word_embeddings='wiki.da.wv') from flair.data import Sentence # test # Embedd two different sentences sentence1 = Sentence('Han fik bank') sentence2 = Sentence('Han fik en ny bank') stacked_embeddings.embed(sentence1) stacked_embeddings.embed(sentence2) # Show that it is contextual in the sense that 'bank' has different embedding after context print('{} dimensions out of {} is equal'.format(int(sum(sentence2[4].embedding==sentence1[2].embedding)), len(sentence1[2].embedding)))Here is an example of how to use BERT for embedding tokens and sentences.from danlp.models import load_bert_base_model model = load_bert_base_model() vecs_embedding, sentence_embedding, tokenized_text = model.embed_text('Han sælger frugt')1.2 Part-of-speech taggingWe provide two models for Part-of-speech tagging. Depending on your needs, you might want to use the flair model (better accuracy) or the spaCy model (higher speed). The following snippet shows how to load and use the flair model.from danlp.models import load_flair_pos_model # Load the POS tagger using the DaNLP wrapper tagger = load_flair_pos_model() from flair.data import Sentence # Using the flair POS tagger sentence = Sentence('Jeg hopper på en bil , som er rød sammen med Niels .') tagger.predict(sentence) print(sentence.to_tagged_string())The following snippet shows how to load and use the spaCy model.from danlp.models import load_spacy_model #Load the POS tagger using the DaNLP wrapper nlp = load_spacy_model() # Using the spaCy POS tagger doc = nlp('Jeg hopper på en bil, som er rød sammen med Niels.') pred='' for token in doc: pred += '{} <{}> '.format(token.text, token.pos_) print(pred)1.3 Named entity recognitionWe provide 3 models for Named Entity Recognition (NER). Here is an example of how to use the BERT NER model.# load BERT NER from danlp.models import load_bert_ner_model bert = load_bert_ner_model() # Get lists of tokens and labels in BIO format tokens, labels = bert.predict(" kommer fra Danmark") print(" ".join(["{}/{}".format(tok,lbl) for tok,lbl in zip(tokens,labels)])) # To get a correct tokenization, you have to provide it yourself to BERT by providing a list of tokens # (for example SpaCy can be used for tokenization) # With this option, output can also be choosen to be a dict with tags and position instead of BIO format tekst_tokenized = ['Han', 'hedder', 'Anders', 'And', 'Andersen', 'og', 'bor', 'i', 'Århus', 'C'] bert.predict(tekst_tokenized, IOBformat=False)Below is an example for using the flair NER tagger.from danlp.models import load_flair_ner_model # Load the NER tagger using the DaNLP wrapper flair_model = load_flair_ner_model() from flair.data import Sentence # Using the flair NER tagger sentence = Sentence(' kommer fra Danmark') flair_model.predict(sentence) print(sentence.to_tagged_string())Here is an example for NER with spaCy.# load the model from danlp.models import load_spacy_model nlp = load_spacy_model() # use spaCy for NER doc = nlp(' kommer fra Danmark') for tok in doc: print("{} {}".format(tok,tok.ent_type_))1.4. Dependency Parsing & Noun Phrase ChunkingWe provide Dependency parsing with our spaCy model, as well as a wrapper for deducing NP-chunks from dependencies.# load the model from danlp.models import load_spacy_model nlp = load_spacy_model() # use the spaCy model for dependency parsing only text = 'Et syntagme er en gruppe af ord, der hænger sammen' doc = nlp(text) # and/or use our wrapper for deducing NP-chunks from danlp.models import load_spacy_chunking_model # Load the chunker using the DaNLP wrapper chunker = load_spacy_chunking_model(nlp) # Using the chunker to predict BIO tags np_chunks = chunker.predict(text) # print dependency and chunks features for each token syntactic_features=['Id', 'Text', 'Head', 'Dep', 'NP-chunk'] head_format ="\033[1m{!s:>11}\033[0m" * (len(syntactic_features) ) row_format ="{!s:>11}" * (len(syntactic_features) ) print(head_format.format(*syntactic_features)) # Printing dependency and chunking features for each token for token, nc in zip(doc, np_chunks): print(row_format.format(token.i, token.text, token.head.i, token.dep_, nc))1.5. Sentiment AnalysisWith the DaNLP package, we provide 2 BERT models for detecting emotions and tone in texts and a spaCy model for predicting the polarity of a sentence. Below is some code for using BERT for detecting emotions.# load the model from danlp.models import load_bert_emotion_model classifier = load_bert_emotion_model() # using the classifier print(classifier.predict('bilen er flot')) print(classifier.predict('jeg ejer en rød bil og det er en god bil')) print(classifier.predict('jeg ejer en rød bil men den er gået i stykker')) # get probabilities and matching classes names proba = classifier.predict_proba('jeg ejer en rød bil men den er gået i stykker', no_emotion=False)[0] classes = classifier._classes()[0] for cl, pb in zip(classes, proba): print(cl,'\t', pb)Here is an example for using BERT for tone detection.# load the model from danlp.models import load_bert_tone_model classifier = load_bert_tone_model() # using the classifier print(classifier.predict('Analysen viser, at økonomien bliver forfærdelig dårlig')) print(classifier.predict('Jeg tror alligvel, det bliver godt')) # get probabilities and matching classes names proba = classifier.predict_proba('Analysen viser, at økonomien bliver forfærdelig dårlig')[0] classes = classifier._classes()[0] for cl, pb in zip(classes, proba): print(cl,'\t', pb)Here is how to use spaCy for sentiment analysis.# load the model from danlp.models import load_spacy_model nlp = load_spacy_model(textcat='sentiment', vectorError=True) # if you got an error saying da.vectors not found, try setting vectorError=True as follow: #nlp = load_spacy_model(textcat='sentiment', vectorError=True) import operator # use the model for predicting the polarity of a sentence doc = nlp("Vi er glade for spacy!") max(doc.cats.items(), key=operator.itemgetter(1))[0]2. Datasets 2.1. Danish Dependency Treebank (DaNE) 2.2. Dacoref 2.3. WikiANN 2.4. Sentiment datasets 2.5. Word similarity datasets 2.6. DanNet 2.1. Danish Dependency Treebank (DaNE)The DaNE dataset contains annotations for PoS-tagging, Named Entity Recognition and Dependency Parsing.from danlp.datasets import DDT ddt = DDT() spacy_corpus = ddt.load_with_spacy() flair_corpus = ddt.load_with_flair() conllu_format = ddt.load_as_conllu()2.2. DacorefDacoref can be used for training and testing models for coreference resolution.from danlp.datasets import Dacoref dacoref = Dacoref() # The corpus can be loaded with or without splitting into train, dev and test in a list in that order corpus = dacoref.load_as_conllu(predefined_splits=True)2.3. WikiANNWikiANN is annotated with named entity tags.from danlp.datasets import WikiAnn wikiann = WikiAnn() spacy_corpus = wikiann.load_with_spacy() flair_corpus = wikiann.load_with_flair()2.4. Sentiment datasetsEuroparl Sentiment 1 is annotated with polarity scores (from -5 to 5), while Europarl Sentiment 2 is annotated with polarity tags (‘positive’, ‘neutral’, ‘negative’) and analytics (‘subjective’ , ‘objective’).from danlp.datasets import EuroparlSentiment1 eurosent = EuroparlSentiment1() df = eurosent.load_with_pandas() from danlp.datasets import EuroparlSentiment2 eurosent = EuroparlSentiment2() df = eurosent.load_with_pandas()As well as Europarl Sentiment 1, LCC Sentiment is annotated with polarity scores (from -5 to 5).from danlp.datasets import LccSentiment lccsent = LccSentiment() df = lccsent.load_with_pandas()2.5 Word similarity datasetsThe word similarity datasets contain lists of words annotated with similarity scores (from 1 to 10). They can be used for evaluating word embedings.from danlp.datasets import DSD dsd = DSD() dsd.load_with_pandas() from danlp.datasets import WordSim353Da ws353 = WordSim353Da() ws353.load_with_pandas()2.6 DanNetDanNet is a lexical database such as Wordnet. You can download the database or use our wrapper for finding synonyms and other type of relation between words in Danish.from danlp.datasets import DanNet dannet = DanNet() # you can load the databases if you want to look into the databases by yourself words, wordsenses, relations, synsets = dannet.load_with_pandas() # or use our functions to search for synonyms, hyperonyms, hyponyms and domains word = "myre" print(word) print("synonyms : ", dannet.synonyms(word)) print("hypernyms : ", dannet.hypernyms(word)) print("hyponyms : ", dannet.hyponyms(word)) print("domains : ", dannet.domains(word)) print("meanings : ", dannet.meanings(word)) # to help you dive into the databases # we also provide the following functions: print("part-of-speech : ", dannet.pos(word)) print("wordnet relations : ", dannet.wordnet_relations(word, eurowordnet=True)) print("word ids : ", dannet._word_ids(word)) print("synset ids : ", dannet._synset_ids(word)) i = 11034863 print("word from id =",i, ":", dannet._word_from_id(i)) i = 3514 print("synset from id =", i, ":", dannet._synset_from_id(i))Data FramesHere is a built-in example data frame with 52 rows (we preview the first few):head(warpbreaks)Ignoring the "breaks" attribute, let's make a contingency table of counts. We're using the GLM syntax to describe which columns we want, nothing explained "~" by the wool and tension variables.ct = xtabs(~wool+tension, data=warpbreaks) ct is.table(ct)We can also use the table function, which converts a dataframe into a tabke, and the margin.table function, which marginalizes a table, to accomplish this.table(warpbreaks) margin.table(table(warpbreaks),c(2,3))First Introduction to JupyterRaphael and Judit and I are exploring python. The first thing we're going to do is try to make a function for the "bulge" for rotation curves.import numpy as np #r0 = 3.07e3 #in pc r0 = 1533 #Judit found this online Sigb = 5.12e6 #solar masses per pc^2 n = 4 print(r0) print('hello world, r0 is: {} pc {}'.format(r0,3)) import matplotlib as mpl #let's try to create the funcntion for the "bulge" this is called a "lambda" function in python sig_b = lambda x: Sigb*np.exp(-np.power(x/r0,(1/4))) #now try the hump: Sigg01 = 11.0e6 rg1 = 5.0e3 Wg1 = 4.0e3 sig_g = lambda x: Sigg01*np.exp(-((x-rg1)/Wg1)**2) #vectorize this so it can hold a bunch of values vec_sig_b = np.vectorize(sig_b) vec_sig_g = np.vectorize(sig_g) #set up a plot import matplotlib as mpl from matplotlib import pyplot as plt #set up a 1d plot fig,axes = plt.subplots(1,1,figsize=(9.0,8.0),sharex=True) ax1 = axes X = np.arange(0.0,16000,0.1) ax1.plot(X,vec_sig_b(X), color='k', linestyle='-', \ label='bulge function', linewidth=2) ax1.plot(X,vec_sig_g(X), color='r', linestyle='-', \ label='gas function', linewidth=2) ax1.set_yscale('log') ax1.set_xlim(0, 16000) #in pairs ax1.set_ylim(1e5,1e7) ax1.set_xlabel('disc coordinate [pc]') ax1.set_ylabel('\Sigma') ax1.grid(True) ax1.yaxis.grid(True,which='minor',linestyle='--') ax1.legend(loc=1,prop={'size':22}) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(2) plt.tight_layout() plt.savefig('first_plot.png') plt.show()ndarray和base64互转import base64 import numpy as np n = np.ndarray(shape=(10), dtype=float) print(n) base64_str = base64.b64encode(n).decode() print(base64_str) nn = np.frombuffer(base64.b64decode(base64_str)) print(nn) base64_byte = base64.b64encode(n) print(base64_byte) base64_str = base64.b64encode(n).decode() print(base64_str) bs = base64.b64decode(base64_byte) print(bs) bs = base64.b64decode(base64_str) print(bs) nn = np.frombuffer(bs) print(nn)b'\x00\x00\x00\x00\x00\x00\x00\x00\xba\xaaI\xc2\xfe\x07\x00\xe0\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\xba\xaaI\xc2\xfe\x07\x00\xe0\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00' [ 0.00000000e+000 -2.68679584e+154 2.47032823e-323 0.00000000e+000 0.00000000e+000 0.00000000e+000 0.00000000e+000 0.00000000e+000 0.00000000e+000 6.95335581e-309]Data-Driven documents with `d3`[`d3`](http://d3js.org) is a powerful visualization framework, and powers the [`vega`](https://vega.github.io/vega/) extension for JupyterLab, which provides any kernel with rich display to create.display.display({ "application/vnd.vega.v3+json": { "$schema": "https://vega.github.io/schema/vega/v3.json", "width": 400, "height": 200, "padding": 5, "data": [ { "name": "table", "values": [ {"category": "A", "amount": 28}, {"category": "B", "amount": 55}, {"category": "C", "amount": 43}, {"category": "D", "amount": 91}, {"category": "E", "amount": 81}, {"category": "F", "amount": 53}, {"category": "G", "amount": 19}, {"category": "H", "amount": 87} ] } ], "signals": [ { "name": "tooltip", "value": {}, "on": [ {"events": "rect:mouseover", "update": "datum"}, {"events": "rect:mouseout", "update": "{}"} ] } ], "scales": [ { "name": "xscale", "type": "band", "domain": {"data": "table", "field": "category"}, "range": "width", "padding": 0.05, "round": true }, { "name": "yscale", "domain": {"data": "table", "field": "amount"}, "nice": true, "range": "height" } ], "axes": [ { "orient": "bottom", "scale": "xscale" }, { "orient": "left", "scale": "yscale" } ], "marks": [ { "type": "rect", "from": {"data":"table"}, "encode": { "enter": { "x": {"scale": "xscale", "field": "category"}, "width": {"scale": "xscale", "band": 1}, "y": {"scale": "yscale", "field": "amount"}, "y2": {"scale": "yscale", "value": 0} }, "update": { "fill": {"value": "steelblue"} }, "hover": { "fill": {"value": "red"} } } }, { "type": "text", "encode": { "enter": { "align": {"value": "center"}, "baseline": {"value": "bottom"}, "fill": {"value": "#333"} }, "update": { "x": {"scale": "xscale", "signal": "tooltip.category", "band": 0.5}, "y": {"scale": "yscale", "signal": "tooltip.amount", "offset": -2}, "text": {"signal": "tooltip.amount"}, "fillOpacity": [ {"test": "datum === tooltip", "value": 0}, {"value": 1} ] } } } ] } })Anyhow, because it's there, and **this is Jyve**, you can have it. A Hack: `d3` is dead, long live `d3`.In transitioning to a many-repo development process between `v3` and `v4`, some libraries decided to stay on to the last release of the `v3` line, including the [`vega2-extension`](https://github.com/jupyterlab/jupyterlab/tree/master/packages/vega2-extension). So you get `d3=^3.5.17`, and that's that... for now.> _🤔 How might we make multiple versions of a library available, if present?_now = new Date() scales = { x: d3.time.scale() .domain([now, new Date(+now + 60*1000)]) .range([0, 100]), y: d3.scale.linear() .domain([0, 255]) .range([200, 0]) } color = d3.scale.category10() d3.select(document.body) .selectAll("style").remove() d3.select(document.body) .append("style") .html(` .axis text { font: 10px sans-serif; } .axis path, .axis line { fill: none; stroke: #000; shape-rendering: crispEdges; } `) d3.select(document.body) .style({display: "flex", "flex-direction": "column"}) .selectAll("svg") .remove() svg = d3.select(document.body) .append("svg") .style({ flex: 1, }) g = svg.append("g") .attr({"transform": "translate(0 20)"}) svg.append("text") .text("Files Saved by time and path length") .attr({y: 20, x: 200}) .style({"font-family": "sans-serif", "text-anchor": "middle"}) data = [] axes = { x: d3.svg.axis().scale(scales.x).orient("bottom"), y: d3.svg.axis().scale(scales.y).orient("right"), } gax = { x: svg.append("g").attr({"class": "x axis", transform: "translate(0 220)"}), y: svg.append("g").attr({"class": "y axis", transform: "translate(400 20)"}) }The update function. This could be improved!if(this.update) { JupyterLab.serviceManager.contents.fileChanged.disconnect(this.update); } update = function(g) { scales.x = scales.x.domain([ now, new Date(data[data.length - 1].newValue.last_modified) ]) .range([0, 400]) gax.x.call(axes.x.scale(scales.x)); gax.y.call(axes.y.scale(scales.y)); g.selectAll("circle") .data(data) .enter() .append("circle") .attr({r: 0}) g.selectAll("circle") .attr({ cx: (d) => scales.x(new Date(d.newValue.last_modified)), cy: (d) => scales.y(d.newValue.path.length), r: 5 }) .style({ fill: (d) => color(d.newValue.path), stroke: 'transparent' }) }Install the listener!console.log(JupyterLab.serviceManager.contents.fileChanged.connect(function(mgr, msg){ console.log(msg) data.push(msg) g.call(window.update) }))The Inspection Paradox is Everywhere 2019[MIT License](https://en.wikipedia.org/wiki/MIT_License)import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from empiricaldist import Pmf from utils import decorate # set the random seed so we get the same results every time np.random.seed(17) # make the directory for the figures import os if not os.path.exists('inspection'): !mkdir inspectionClass sizeHere's the data summarizing the distribution of undergraduate class sizes at Purdue University in 2013-14.# Class size data originally from # https://www.purdue.edu/datadigest/2013-14/InstrStuLIfe/DistUGClasses.html # now available from # https://web.archive.org/web/20160415011613/https://www.purdue.edu/datadigest/2013-14/InstrStuLIfe/DistUGClasses.html sizes = [(1, 1), (2, 9), (10, 19), (20, 29), (30, 39), (40, 49), (50, 99), (100, 300)] counts = [138, 635, 1788, 1979, 796, 354, 487, 333]I generate a sample from this distribution, assuming a uniform distribution in each range and an upper bound of 300.def generate_sample(sizes, counts): """Generate a sample from a distribution. sizes: sequence of (low, high) pairs counts: sequence of integers returns: NumPy array """ t = [] for (low, high), count in zip(sizes, counts): print(count, low, high) sample = np.random.randint(low, high+1, count) t.extend(sample) return np.array(t)The "unbiased" sample is as seen by the college, with each class equally likely to be in the sample.unbiased = generate_sample(sizes, counts)138 1 1 635 2 9 1788 10 19 1979 20 29 796 30 39 354 40 49 487 50 99 333 100 300To generate a biased sample, we use the values themselves as weights and resample with replacement.def resample_weighted(sample, weights): """Resample values from `sample` with the given weights. sample: NumPy array weights: NumPy array returns: NumPy array """ n = len(sample) p = weights / np.sum(weights) return np.random.choice(sample, n, p=p) biased = resample_weighted(unbiased, unbiased)To plot the distribution, I use KDE to estimate the density function, then evaluate it over the given sequence of `xs`.from scipy.stats import gaussian_kde def kdeplot(sample, xs, label=None, **options): """Use KDE to plot the density function. sample: NumPy array xs: NumPy array label: string """ density = gaussian_kde(sample, **options).evaluate(xs) plt.plot(xs, density, label=label) decorate(ylabel='Relative likelihood')The following plot shows the distribution of class size as seen by the Dean, and as seen by a sample of students.xs = np.arange(1, 300) kdeplot(unbiased, xs, 'Reported by the Dean') kdeplot(biased, xs, 'Reported by students') decorate(xlabel='Class size', title='Distribution of class sizes') plt.savefig('inspection/class_size.png', dpi=150)Here are the means of the unbiased and biased distributions.np.mean(unbiased) np.mean(biased) from empiricaldist import Cdf def cdfplot(sample, xs, label=None, **options): """Plot the CDF of the sample. sample: NumPy array xs: NumPy array (ignored) label: string """ cdf = Cdf.from_seq(sample, **options) cdf.plot(label=label) decorate(ylabel='CDF') xs = np.arange(1, 300) cdfplot(unbiased, xs, 'Reported by the Dean') cdfplot(biased, xs, 'Reported by students') decorate(xlabel='Class size', title='Distribution of class sizes') plt.savefig('inspection/class_size.png', dpi=150)Red LineHere are times between trains in seconds.unbiased = [ 428.0, 705.0, 407.0, 465.0, 433.0, 425.0, 204.0, 506.0, 143.0, 351.0, 450.0, 598.0, 464.0, 749.0, 341.0, 586.0, 754.0, 256.0, 378.0, 435.0, 176.0, 405.0, 360.0, 519.0, 648.0, 374.0, 483.0, 537.0, 578.0, 534.0, 577.0, 619.0, 538.0, 331.0, 186.0, 629.0, 193.0, 360.0, 660.0, 484.0, 512.0, 315.0, 457.0, 404.0, 740.0, 388.0, 357.0, 485.0, 567.0, 160.0, 428.0, 387.0, 901.0, 187.0, 622.0, 616.0, 585.0, 474.0, 442.0, 499.0, 437.0, 620.0, 351.0, 286.0, 373.0, 232.0, 393.0, 745.0, 636.0, 758.0, ]Here's the same data in minutes.unbiased = np.array(unbiased) / 60We can use the same function to generate a biased sample.biased = resample_weighted(unbiased, unbiased)And plot the results.xs = np.linspace(1, 16.5, 101) kdeplot(unbiased, xs, 'Seen by MBTA') kdeplot(biased, xs, 'Seen by passengers') decorate(xlabel='Time between trains (min)', title='Distribution of time between trains') plt.savefig('inspection/red_line.png', dpi=150) xs = np.linspace(1, 16.5, 101) cdfplot(unbiased, xs, 'Seen by MBTA') cdfplot(biased, xs, 'Seen by passengers') decorate(xlabel='Time between trains (min)', title='Distribution of time between trains') plt.savefig('inspection/red_line.png', dpi=150)Here are the means of the distributions and the percentage difference.np.mean(biased), np.mean(unbiased) (np.mean(biased) - np.mean(unbiased)) / np.mean(unbiased) * 100Social networkThe following function reads the Facebook data.import networkx as nx def read_graph(filename): """Read a graph from a file. filename: string return: nx.Graph """ G = nx.Graph() array = np.loadtxt(filename, dtype=int) G.add_edges_from(array) return G # https://snap.stanford.edu/data/facebook_combined.txt.gz fb = read_graph('facebook_combined.txt.gz') n = len(fb) m = len(fb.edges()) n, mThe unbiased sample is the number of friends for each user.unbiased = [fb.degree(node) for node in fb] len(unbiased) np.max(unbiased)We can use the same function to generate a biased sample.biased = resample_weighted(unbiased, unbiased)And generate the plot.xs = np.linspace(0, 300, 101) kdeplot(unbiased, xs, 'Random sample of people') kdeplot(biased, xs, 'Random sample of friends') decorate(xlabel='Number of friends in social network', title='Distribution of social network size') plt.savefig('inspection/social.png', dpi=150) xs = np.linspace(0, 300, 101) cdfplot(unbiased, xs, 'Random sample of people') cdfplot(biased, xs, 'Random sample of friends') decorate(xlabel='Number of friends in social network', title='Distribution of social network size', xlim=[-10, 310]) plt.savefig('inspection/social.png', dpi=150)Here are the means of the distributions.np.mean(biased), np.mean(unbiased)And the probability that the friend of a user has more friends than the user.np.mean(biased > unbiased)Relay raceThe following function read the data from the 2010 10K, where I ran my personal record time.import relay results = relay.ReadResults() unbiased = relay.GetSpeeds(results)In this case, the weights are related to the difference between each element of the sample and the hypothetical speed of the observer.weights = np.abs(np.array(unbiased) - 7) biased = resample_weighted(unbiased, weights)And here's the plot.xs = np.linspace(3, 11, 101) kdeplot(unbiased, xs, 'Seen by spectator') kdeplot(biased, xs, 'Seen by runner at 7 mph', bw_method=0.2) decorate(xlabel='Running speed (mph)', title='Distribution of running speed') plt.savefig('inspection/relay.png', dpi=150) xs = np.linspace(3, 11, 101) cdfplot(unbiased, xs, 'Seen by spectator') cdfplot(biased, xs, 'Seen by runner at 7 mph') decorate(xlabel='Running speed (mph)', title='Distribution of running speed') plt.savefig('inspection/relay.png', dpi=150)Prison sentencesFirst we read the [data from the Bureau of Prisons web page](https://www.bop.gov/about/statistics/statistics_inmate_sentences.jsp).tables = pd.read_html('BOP Statistics_ Sentences Imposed.html') df = tables[0] dfHere are the low and I sentences for each range. I assume that the minimum sentence is about a week, that sentences "less than life" are 40 years, and that a life sentence is between 40 and 60 years.sentences = [(0.02, 1), (1, 3), (3, 5), (5, 10), (10, 15), (15, 20), (20, 40), (40, 60)]We can get the counts from the table.counts = df['# of Inmates']Here's a different version of `generate_sample` for a continuous quantity.def generate_sample(sizes, counts): """Generate a sample from a distribution. sizes: sequence of (low, high) pairs counts: sequence of integers returns: NumPy array """ t = [] for (low, high), count in zip(sizes, counts): print(count, low, high) sample = np.random.uniform(low, high, count) t.extend(sample) return np.array(t)In this case, the data are biased.biased = generate_sample(sentences, counts)5155 0.02 1 18619 1 3 17897 3 5 41887 5 10 34995 10 15 18674 15 20 22738 20 40 4600 40 60So we have to unbias them with weights inversely proportional to the values.Prisoners in federal prison typically serve 85% of their nominal sentence. We can take that into account in the weights.weights = 1 / (0.85 * np.array(biased))Here's the unbiased sample.unbiased = resample_weighted(biased, weights)And the plotted distributions.xs = np.linspace(0, 60, 101) kdeplot(unbiased, xs, 'Seen by judge', bw_method=0.5) kdeplot(biased, xs, 'Seen by prison visitor', bw_method=0.5) decorate(xlabel='Prison sentence (years)', title='Distribution of federal prison sentences') plt.savefig('inspection/orange.png', dpi=150) xs = np.linspace(0, 60, 101) cdfplot(unbiased, xs, 'Seen by judge') cdfplot(biased, xs, 'Seen by prison visitor') decorate(xlabel='Prison sentence (years)', title='Distribution of federal prison sentences') plt.savefig('inspection/orange.png', dpi=150)We can also compute the distribution of sentences as seen by someone at the prison for 13 months.x = 0.85 * unbiased y = 13 / 12 weights = x + yHere's the sample.kerman = resample_weighted(unbiased, weights)And here's what it looks like.xs = np.linspace(0, 60, 101) kdeplot(unbiased, xs, 'Seen by judge', bw_method=0.5) kdeplot(kerman, xs, 'Seen by Kerman', bw_method=0.5) kdeplot(biased, xs, 'Seen by visitor', bw_method=0.5) decorate(xlabel='Prison sentence (years)', title='Distribution of federal prison sentences') plt.savefig('inspection/orange.png', dpi=150) xs = np.linspace(0, 60, 101) cdfplot(unbiased, xs, 'Seen by judge') cdfplot(kerman, xs, 'Seen by Kerman') cdfplot(biased, xs, 'Seen by visitor') decorate(xlabel='Prison sentence (years)', title='Distribution of federal prison sentences') plt.savefig('inspection/orange.png', dpi=150)In the unbiased distribution, almost half of prisoners serve less than one year.np.mean(unbiased<1)But if we sample the prison population, barely 3% are short timers.np.mean(biased<1)Here are the means of the distributions.np.mean(unbiased) np.mean(biased) np.mean(kerman)The dartboard problemfrom matplotlib.patches import Circle def draw_dartboard(): ax = plt.gca() c1 = Circle((0, 0), 170, color='C3', alpha=0.3) c2 = Circle((0, 0), 160, color='white') c3 = Circle((0, 0), 107, color='C3', alpha=0.3) c4 = Circle((0, 0), 97, color='white') c5 = Circle((0, 0), 16, color='C3', alpha=0.3) c6 = Circle((0, 0), 6, color='white') for circle in [c1, c2, c3, c4, c5, c6]: ax.add_patch(circle) plt.axis('equal') draw_dartboard() plt.text(0, 10, '25 ring') plt.text(0, 110, 'triple ring') plt.text(0, 170, 'double ring') plt.savefig('inspection/darts0.png', dpi=150) sigma = 50 n = 100 error_x = np.random.normal(0, sigma, size=(n)) error_y = np.random.normal(0, sigma, size=(n)) draw_dartboard() plt.plot(error_x, error_y, '.') plt.savefig('inspection/darts1.png', dpi=150) sigma = 50 n = 10000 error_x = np.random.normal(0, sigma, size=(n)) error_y = np.random.normal(0, sigma, size=(n)) import numpy as np import seaborn as sns import matplotlib.pyplot as pl ax = sns.kdeplot(error_x, error_y, shade=True, cmap="PuBu") ax.collections[0].set_alpha(0) plt.axis([-240, 240, -175, 175]) decorate(xlabel='x distance from center (mm)', ylabel='y distance from center (mm)', title='Estimated density') plt.savefig('inspection/darts2.png', dpi=150) rs = np.hypot(error_x, error_y) np.random.seed(18) sigma = 50 n = 10000 error_x = np.random.normal(0, sigma, size=(n)) error_y = np.random.normal(0, sigma, size=(n)) xs = np.linspace(-200, 200, 101) #ys = np.exp(-(xs/sigma)**2/2) #pmf = Pmf(ys, index=xs) #pmf.normalize() #pmf.plot(color='gray') unbiased = error_x biased = resample_weighted(unbiased, np.abs(unbiased)) kdeplot(unbiased, xs, 'Density at a point') kdeplot(biased, xs, 'Total density in a ring') #kdeplot(rs, xs, 'Total density in a ring') decorate(xlabel='Distance from center (mm)', ylabel='Density', xlim=[0, 210]) plt.savefig('inspection/darts3.png', dpi=150) xs = np.linspace(0, 200, 101) unbiased = np.abs(error_x) biased = resample_weighted(unbiased, unbiased) cdfplot(unbiased, xs, 'Density at a point') cdfplot(biased, xs, 'Total density in a ring') decorate(xlabel='Distance from center (mm)', ylabel='Density') plt.savefig('inspection/darts4.png', dpi=150) triple = (biased > 97) & (biased < 107) triple.mean() * 100 ring50 = (biased > 6) & (biased < 16) ring50.mean() * 100 double = (biased > 160) & (biased < 170) double.mean() * 100 bull = (biased < 6) bull.mean() * 100Using and updating GIS contentThe GIS is a warehouse of geographic content and services. Arcgis includes several classes to make use of these content, publish new items and update the them when needed. This sample on updating the content of web maps and web scenes will demonstrate the following * **Replace web layers** of a web map. For instance, you can use this to update a web map when the services it points to were deleted. During the process, the sample shows how to read a web feature layer as a **FeatureService** object and inspect its properties. * **Drive the map widget by code**. In addition to displaying the interactive map widget, you can also set it to load at a particular extent. This is great for presentation purposes. During this process, the sample shows how to create and use a **MapView** object and a **Geocoder** object. * Make a **copy of a public web scene** item into your contents and then update it. * Edit the list of layers to remove unnecessary ones. * **Replace the basemap** of the web scene. In this step the sample shows how to search for **groups** and query the member items. * Change visibility of layers.from arcgis.gis import GIS from IPython.display import display gis = GIS("https://python.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")Data preparationTo run through the sample, we require some published web layers, web maps and web scene items. This section contains cells that show how to publish them. If you would like to work with your own datasets or have these items already published, you can skip this section. To understand the publishing process in detail, check out other samples within this samples directory.sd_file = "data/updating_gis_content/Ebola_Treatment_Units.sd" # add the sd file as an item and publish it as a web layer item = gis.content.add({},sd_file) new_item = item.publish()Now that the web layers are published, run through this section to publish a web map. To understand how this part of the sample works, refer to the sample titled **Publishing web maps and web scenes**import json web_map_json = str() # read web map json from text file with open("data/updating_gis_content/web_map_Ebola.json","r") as file_handle: web_map_json = json.load(file_handle) # publish a web map web_map_item_properties = {'title':'Ebola treatment locations', 'type':'Web Map', 'snippet':'This map shows locations of Ebola treatment centers in Africa', 'tags':'ArcGIS Python API', 'text':json.dumps(web_map_json)} web_map_item = gis.content.add(web_map_item_properties)Using and updating a web mapIn the data preparation section above, we published a web map to play with. We will search for that web map, draw it and update it if necessary.search_result = gis.content.search("title:Ebola treatment locations", item_type = "Web Map") display(search_result) # display the first search result to confirm the item web_map_item = search_result[0] display(web_map_item) import arcgis # create a web map object out of the item web_map_obj = arcgis.mapping.WebMap(web_map_item) # display the web map obj in an interactive widget web_map_obj![web_map_obj](http://esri.github.io/arcgis-python-api/notebooks/nbimages/05_Using_updating_GIS_01.PNG) Fix errors in web mapThe widget loads an empty web map with just a basemap. Let us investigate the contents of the web map to determine the issue. Let us start with `operationalLayers` dictionary which contains the list of layers and inspect each layer.layer_list = web_map_obj['operationalLayers'] display(layer_list)The web map has only 1 layer and that points to a feature service title **Ebola_Facilities**. Let us verify if a feature service of that name exists on the server. If not, let us try to find the closest match.search_result = gis.content.search('title:Ebola_Facilities', item_type = 'Feature Service') display(search_result)Let us change the search query leaving just the word `Ebola` in the title.search_result = gis.content.search('title:Ebola', item_type='Feature Layer') search_result[0]It is likely the old service was deleted and a new one was with a different name was published. Let us update the web map dictionary with the correct url. But before that, we need to investigate if the new service also has layer with id `1` like the previous service.ebola = search_result[0] ebola.layersThe new feature service does have a layer with id `1`. Hence we can use the same layer id while switching the url. While updating the web map, it is important to not only update the **url** but also the **itemId** of the feature service item.# set the url to feature service item's url layer_list[0]['url'] = ebola.layers[1].url layer_list[0]['itemId'] = search_result[0].id # update the web map object's operationalLayers dictionary web_map_obj['operationalLayers'] = layer_listUpdate the web mapNow the web map should be fixed as it points to a live service. To update the web map, we call the `update()` method.# Let us print the opertationalLayers dictionary before calling the update() web_map_obj['operationalLayers'] web_map_obj.update()Let us create a new web map object and try to draw it.search_result = gis.content.search('title: Ebola_treatment_locations', item_type = "Web Map") display(search_result) web_map_item = search_result[0] web_map_obj = arcgis.mapping.WebMap(web_map_item) web_map_obj![web_map_obj](http://esri.github.io/arcgis-python-api/notebooks/nbimages/05_Using_updating_GIS_02.PNG) The web map was sucessfully overwritten with correct operational layers. You can interact with the widget and zoom into the African coast to observe the locations of Ebola treatment centers. Using and updating a web sceneIn the sample above we observed how to update a web map. Updating the web scene is similar, we use the `update()` method. Let us look at the example of a web scene that displays tropical cyclones over the Pacific ocean.search_result = gis.content.search('title:Western Pacific Typhoons (2005)', item_type = 'Web Scene', outside_org = True) search_result[0]Lets display the web scene in the notebook.web_scene_item = search_result[0] web_scene_obj = arcgis.mapping.WebScene(web_scene_item) # display web scene in the notebook web_scene_obj![web_scene_obj](http://esri.github.io/arcgis-python-api/notebooks/nbimages/05_Using_updating_GIS_04.PNG) This is a great web scene and it displays a lot of hurricane tracks. However, we want to create a new one with only a particular subset of data and customize the basemaps. To modify this web scene, let us first make a copy of it and publish it into your portal. Make a copy of the public web scene itemTo make a copy, we essentially download the content of the web scene JSON, remove the parts we don't want, add the layers that we want and publish a new item using that information. The publishing steps is similar to what is described earlier in the **data preparation** section and in detail in the sample titled **Publishing web maps and web scenes**.Let's say, we are only interested in the storms that occur in summer. Summer in tropical Asia is around April-June and that matches with a layer in the existing web scene. Let us query the `operationalLayers` section of the web scene to understand how the layers look like. Update operational layers of new web scenedisplay(web_scene_obj['operationalLayers'])There is a lot of information displayed above. Let us drill into this and display only layer names and their urls. If you notice, some of the layers above are group layers, meaning, they contain sub layers. So let us write a loop like below and print some details.for layer in web_scene_obj['operationalLayers']: print(layer['title'] + " :: " + layer['layerType']) if layer['layerType'] == 'GroupLayer': for sub_layer in layer['layers']: print("\t" + sub_layer['title'] + " :: "+ sub_layer['url'])Typhoon Paths :: ArcGISFeatureLayer October - December :: GroupLayer Labels Q4 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/17 Typhoons Q4 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/16 September :: GroupLayer Labels Q3_3 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/14 Typhoons Q3_3 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/13 August :: GroupLayer Labels Q3_2 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/11 Typhoons Q3_2 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/10 July :: GroupLayer Labels Q3_1 :: http://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/PacificTyphoons2005_WFL/FeatureServer/8 Typhoons Q3[...]We are only interested in the layers that correspond to cyclones in summer. From the above report, we understand that information is in a group layer with two sub layers. Let us extract just that dictionary and compose a new web scene data.# Let us construct a list comprehension and mine out that group layer. subset_op_layers = [subset for subset in web_scene_obj['operationalLayers'] if subset['title'] == 'April - June'] display(subset_op_layers) # Let us apply the changes to a new web scene object. new_web_scene_obj = web_scene_obj new_web_scene_obj['operationalLayers'] = subset_op_layersUpdate basemap of new web sceneWe now have the necessary `operationalLayers` information. Let us also try to change the basemap to a darker shade. First let us search the basemaps available in the current portal. If no suitable one is found, we can widen the search outside the organization and use a basemap published by Esri.**Basemaps** are web maps that are stored in a **group** usually called **Basemaps**. Thus to get the list of basemaps available on a portal, we can find the basemaps group and list all web maps that are a part of it.To get the list of groups on the portal, we use `groups` property of the `GIS` class.basemap_search = gis.content.search('title:dark', outside_org=True, item_type='web map') for item in basemap_search: display(item) print(item.tags)We have found the basemap of our choice. Let us read it as a **`WebMap`** object and query the `baseMap` dictionary.dark_basemap_item = basemap_search[1] dark_basemap_obj = arcgis.mapping.WebMap(dark_basemap_item) dark_basemap_obj['baseMap']Now let us explore how the `baseMap` dictionary of the **web scene** looks like.new_web_scene_obj['baseMap']To get the desired basemap, we need to update the only `url` key-value pair of the web scene's `baseMap` dictionary. Here we will only pick the first layer of the dark basemap web map.new_web_scene_obj['baseMap']['baseMapLayers'][0]['url'] = \ dark_basemap_obj['baseMap']['baseMapLayers'][0]['url']Now that we have performed the necessary updates, we can go ahead and publish this as a new web scene item on our portal.new_web_scene_properties= {'title':'Toprical Cyclones - Summer', 'type' : 'Web Scene', 'tags' : 'ArcGIS Python API', 'snippet' : str.format('Subset of {0} published by {1}', web_scene_item.title, web_scene_item.owner, "https://www.arcgis.com/home/item.html?id=" + web_scene_item.id), 'text' : json.dumps(new_web_scene_obj)} new_item = gis.content.add(new_web_scene_properties) new_itemWe have successfully published the new web scene. Now let us display in an interactive widget and observe if it has the necessary updates.new_item.share(True) new_web_scene_obj = arcgis.mapping.WebScene(new_item) new_web_scene_obj![new_web_scene_obj](http://esri.github.io/arcgis-python-api/notebooks/nbimages/05_Using_updating_GIS_05.PNG) Our required updates have been applied to the new web scene. However notice the **April - June** layer is **turned off** by default. Let us fix that and update the web scene.Let us query the `operationalLayer` dictionary of the new web scene and look for a key called `visibility`.for layer in new_web_scene_obj['operationalLayers']: print(layer['visibility'])FalseAs we know, there is just 1 group layer and it is turned off. Let us change that and update the web scene.for layer in new_web_scene_obj['operationalLayers']: layer['visibility'] = TrueTo update the web scene call the `update()` method on the web scene object.new_web_scene_obj.update() new_web_scene_objtext> API details.#hide from nbdev.showdoc import * # export from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize from rank_bm25 import BM25Okapi stemmer = PorterStemmer() def get_language(language: str): if language: language = language.lower() if language == 'french' or language == 'fr' or language.startswith('fran'): return 'French' def stem(string: str): return stemmer.stem(string) def preprocess(string: str): string = string.lower() return [stem(word) for word in word_tokenize(string)] # export class Bm25Index: def __init__(self, column): column = column.fillna('').astype(str).apply(preprocess) self.bm25 = BM25Okapi(column.tolist()) def get_scores(self, sentence, n=10): tokenized_query = preprocess(sentence) doc_scores = self.bm25.get_scores(tokenized_query) top_indices = (-doc_scores).argsort()[:n] top_scores = doc_scores[top_indices] return top_indices, top_scores from nbdev.export import * notebook2script()Converted 00_config.ipynb. Converted 01_core.ipynb. Converted 02_text.ipynb. Converted 03_inventory.ipynb. Converted 04_repo.ipynb. Converted 05_datatools.ipynb. Converted 06_datasets.ipynb. Converted 07_statscan.ipynb. Converted index.ipynb.Demo: analysing soil moisture data from the Climate Data Store in xcube First, we import some necessary libraries and configure some matplotlib display preferences.# xcube_cds imports from xcube_cds.store import CDSDataStore # xcube imports import xcube from xcube.core.maskset import MaskSet from xcube.core.geom import mask_dataset_by_geometry from xcube.core.geom import clip_dataset_by_geometry from xcube.core.dsio import open_cube # Various utilities import json import xarray as xr import shapely.geometry import IPython.display import time import zarr import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import pathlib # Plot settings %matplotlib inline plt.rcParams["figure.figsize"] = 16,12Create a CDS data store object. The optional `normalize_names` parameter specifies that variable names in returned data cubes will be converted to legal Python identifiers, if required.cds_store = CDSDataStore(normalize_names=True)From the data store we request monthly averages of global volumetric soil moisture data from January 2015 to January 2016.generated_cube = cds_store.open_data( 'satellite-soil-moisture:volumetric:monthly', variable_names=['volumetric_surface_soil_moisture'], time_range=['2015-01-01', '2016-01-31'] ) cube = generated_cube2021-02-18 16:43:52,854 INFO Welcome to the CDS 2021-02-18 16:43:52,857 INFO Sending request to https://cds.climate.copernicus.eu/api/v2/resources/satellite-soil-moisture 2021-02-18 16:43:53,210 INFO Request is queued 2021-02-18 16:43:54,352 INFO Request is running 2021-02-18 16:44:43,517 INFO Request is completed 2021-02-18 16:44:43,518 INFO Downloading https://download-0009.copernicus-climate.eu/cache-compute-0009/cache/data8/dataset-satellite-soil-moisture-c5957ef6-94e6-4357-8383-acf4010dd43f.tar.gz to /tmp/tmpge7fvswt/tmpeow89jkn/data (18.4M) 2021-02-18 16:44:51,241 INFO Download rate 2.4M/sPlot the bounding box on a map to check that the cube covers the expected area.bbox = [min(cube.lon), min(cube.lat), max(cube.lon), max(cube.lat)] IPython.display.GeoJSON(shapely.geometry.box(*bbox).__geo_interface__)Plot the differences between successive time points in the dataset. We can see that the times are monotonically increasing (all the difference values are positive), but not equally spaced, since months are not all of the same length. The lowest values correspond to February; the four-year leap year cycle can also be discerned.cube.time.diff(dim='time').plot.line(figsize=(20, 4))As a quick initial test, we plot a simple plate carrée projection (with longitude and latitude mapped directly to the x and y co-ordinates respectively) for the first time-point available.sm_2015_jan = cube.sm.sel(time='2015-01-01 00:00:00', method='nearest') sm_2015_jan.plot.imshow()Now we define a plotting function which restricts the plotted area to Europe.def plot_map(data_array, colour_scale=(None, None), cmap=None): data_array[100:220, 670:800].plot.imshow(vmin=colour_scale[0], vmax=colour_scale[1], figsize=(14,12), cmap=cmap) common_scale = (260, 300)First we plot the soil moisture for January 2015. We use an inverted version of the matplotlib ‘viridis’ colour map ranging from yellow (least moisture) to dark blue (most moisture).plot_map(sm_2015_jan, cmap='viridis_r')For comparison, we plot the data for July 2015. Note the overall lower moisture levels (particularly in southern regions) and the improved coverage at higher altitudes and latitudes (probably due to less snow and ice cover).sm_2015_jul = cube.sm.sel(time='2015-07-01 00:00:00', method='nearest') plot_map(sm_2015_jul, cmap='viridis_r')By subtracting the January data from the July data, we can see more clearly where the soil moisture decreases the most in the summer.plot_map(sm_2015_jan - sm_2015_jul)Desafio 3Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,dividiremos este desafio em duas partes: 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e uma binomial.2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.> Obs.: Por favor, não modifique o nome das funções de resposta. _Setup_ geralimport pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as sct import seaborn as sns from statsmodels.distributions.empirical_distribution import ECDF #%matplotlib inline from IPython.core.pylabtools import figsize figsize(12, 8) sns.set()Parte 1 _Setup_ da parte 1np.random.seed(42) dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000), "binomial": sct.binom.rvs(100, 0.2, size=10000)})Inicie sua análise a partir da parte 1 a partir daqui# Sua análise da parte 1 começa aqui. dataframe.head() sns.distplot(dataframe['normal']) sns.distplot(dataframe['binomial'], bins=range(0, 35)) dataframe.describe() dataframe.quantile(0.25)['normal']Questão 1Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?def q1(): q_norm = dataframe['normal'].quantile(q=[0.25, 0.5, 0.75]) q_binom = dataframe['binomial'].quantile(q=[0.25, 0.5, 0.75]) return tuple(round(qi_norm - qi_binom, 3) for qi_norm, qi_binom in zip(q_norm, q_binom))Para refletir:* Você esperava valores dessa magnitude?* Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores? Minhas resposta:* Sim, a soma de diversas operações com p% de ocorrencia tende a uma normal, pelo teorema do limite central Questão 2Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.def q2(): # Creating ECDF Object (It will return the CDF from emprical data) ecdf = ECDF(dataframe['normal']) lower_bound = dataframe['normal'].mean() - dataframe['normal'].std() upper_bound = dataframe['normal'].mean() + dataframe['normal'].std() # Probability for the lower and upper_bound p1 = ecdf(lower_bound) p2 = ecdf(upper_bound) return float(round(p2 - p1, 3))Para refletir:* Esse valor se aproxima do esperado teórico?* Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$. Minha resposta* Tanto para 1s, 2s e 3s há grande concordância com o valor esperado, demostrando a robustez do ECDF implementado pela biblioteca statsmodel Questão 3Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?def q3(): # Retorne aqui o resultado da questão 3. m_binom = dataframe['binomial'].mean() m_norm = dataframe['normal'].mean() v_binom = dataframe['binomial'].var() v_norm = dataframe['normal'].var() return round(m_binom - m_norm, 3), round(v_binom - v_norm, 3)Para refletir:* Você esperava valore dessa magnitude?* Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`? Minha resposta:* Valores de pequena magnitude eram de fato esperados* Ao alterar **n**, estamos alterando os valores máximos da distribuição, quando aumentamos **n** aumentamos a média e o desvio padrão da distribuição binomial, e quando reduzimos **n**, reduzimos a média e desvio padrão* Portanto, alterar **n** se não de uma maneira muito sensível, tende a aumentar a diferença entre os valores da distribuição normal ($\mu=20,\sigma=4$) e da binomal Parte 2 _Setup_ da parte 2stars = pd.read_csv("pulsar_stars.csv") stars.rename({old_name: new_name for (old_name, new_name) in zip(stars.columns, ["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"]) }, axis=1, inplace=True) stars.loc[:, "target"] = stars.target.astype(bool)Inicie sua análise da parte 2 a partir daqui# Sua análise da parte 2 começa aqui. stars.head() stars.describe() false_pulsar = stars.query('target == 0')['mean_profile'] false_pulsar_mean_profile_standardized = (false_pulsar - false_pulsar.mean()) / false_pulsar.std() sns.distplot(false_pulsar_mean_profile_standardized)Questão 4Considerando a variável `mean_profile` de `stars`:1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.def q4(): false_pulsar = stars.query('target == 0')['mean_profile'] false_pulsar_mean_profile_standardized = sct.zscore(false_pulsar) theorical_quartiles = sct.norm.ppf([0.8, 0.9, 0.95]) ecdf = ECDF(false_pulsar_mean_profile_standardized) experimental_quartiles = tuple(float(round(exp_quartil, 3)) for exp_quartil in ecdf(theorical_quartiles)) return experimental_quartiles q4()Para refletir:* Os valores encontrados fazem sentido?* O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`? Minhas respostas:* Os valores encontrados fazem sentido, e aproximam-se de uma normal, pelo fato de que aparentemente a distribuição de estralas não pulsantes ser normal para variável de mean_profile Questão 5Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.def q5(): false_pulsar = stars.query('target == 0')['mean_profile'] false_pulsar_mean_profile_standardized = (false_pulsar - false_pulsar.mean()) / false_pulsar.std() quartiles = [0.25, 0.50, 0.75] q_stars = [false_pulsar_mean_profile_standardized.quantile(i) for i in quartiles] q_norm = [sct.norm.ppf(i) for i in quartiles] return tuple(float(round(q_stars[i] - q_norm[i], 3)) for i in range(len(quartiles)))Visualizing Precession in Schwarzschild Spacetime Importing required modulesimport astropy.units as u import numpy as np from plotly.offline import init_notebook_mode # from einsteinpy.coordinates.utils import four_position, stacked_vec from einsteinpy.coordinates import SphericalDifferential from einsteinpy.geodesic import Timelike from einsteinpy.metric import Schwarzschild from einsteinpy.plotting import GeodesicPlotter # Essential when using Jupyter Notebook (May skip in Jupyter Lab) init_notebook_mode(connected=True)Defining Schwarzschild Metric and Initial Conditions# Mass of the black hole in SI M = 6e24 * u.kg # Defining the initial coordinates of the test particle # in SI sph = SphericalDifferential( t=10000.0 * u.s, r=130.0 * u.m, theta=np.pi / 2 * u.rad, phi=-np.pi / 8 * u.rad, v_r=0.0 * u.m / u.s, v_th=0.0 * u.rad / u.s, v_p=1900.0 * u.rad / u.s, ) # Schwarzschild Metric Object ms = Schwarzschild(coords=sph, M=M)Calculating Geodesic# Calculating Geodesic geod = Timelike(metric=ms, coords=sph, end_lambda=0.002, step_size=5e-8) geodPlotting the geodesicobj = GeodesicPlotter() obj.plot(geod) obj.show()ETL Pipeline PreparationFollow the instructions below to help you create your ETL pipeline. 1. Import libraries and load datasets.- Import Python libraries- Load `messages.csv` into a dataframe and inspect the first few lines.- Load `categories.csv` into a dataframe and inspect the first few lines.# import libraries import pandas as pd from sqlalchemy import create_engine # load messages dataset messages = pd.read_csv('../data/disaster_messages.csv') messages.head() messages.info() messages['id'].value_counts() # load categories dataset categories = pd.read_csv('../data/disaster_categories.csv') categories.head() categories.info() RangeIndex: 26248 entries, 0 to 26247 Data columns (total 2 columns): id 26248 non-null int64 categories 26248 non-null object dtypes: int64(1), object(1) memory usage: 410.2+ KBDoes every `id` value in `messages` have a corresponding `id` value in `categories`?categories['id'].value_counts() # check that IDs match and we don't have any missing categories['id'].isin(messages['id'].unique()).sum() / len(categories)As we saw earlier that each DataFrame is identical in length, this indicates that every `id` contained within `messages` also exists within `categories`. Great! 2. Merge datasets.- Merge the messages and categories datasets using the common id- Assign this combined dataset to `df`, which will be cleaned in the following steps# merge datasets df = messages.merge(categories, on='id', indicator=False) df.head() df.info() df['id'].value_counts()It looks like we're seeing duplicates in the merged dataset due to duplicates in the `messages` and `categories` datasets. We'll worry about that later on in the cleaning. 3. Split `categories` into separate category columns.- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.- Use the first row of categories dataframe to create column names for the categories data.- Rename columns of `categories` with new column names.df['categories'].head() # create a dataframe of the 36 individual category columns category_values = df['categories'].str.split(';', expand=True) category_values.head() # select the first row of the categories dataframe row = category_values.loc[0] # use this row to extract a list of new column names for categories. # one way is to apply a lambda function that takes everything # up to the second to last character of each string with slicing category_colnames = row.str.slice(stop=-2) print(category_colnames) # rename the columns of `categories` category_values.columns = category_colnames category_values.head()4. Convert category values to just numbers 0 or 1.- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.htmlindexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.for column in category_values: # set each value to be the last character of the string category_values[column] = category_values[column].str[-1] # convert column from string to numeric category_values[column] = category_values[column].astype(int) category_values.head() for column in category_values.columns: print(f"{column}\n") print(category_values[column].value_counts()) print("\n\n")related 1 20042 0 6140 2 204 Name: related, dtype: int64 request 0 21873 1 4513 Name: request, dtype: int64 offer 0 26265 1 121 Name: offer, dtype: int64 aid_related 0 15432 1 10954 Name: aid_related, dtype: int64 medical_help 0 24287 1 2099 Name: medical_help, dtype: int64 medical_products 0 25067 1 1319 Name: medical_products, dtype: int64 search_and_rescue 0 25661 1 725 Name: search_and_rescue, dtype: int64 security 0 25915 1 471 Name: security, dtype: int64 military 0 25523 1 863 Name: military, dtype: int64 child_alone 0 26386 Name: child_alone, dtype: int64 water 0 24702 1 1684 Name: water, dtype: int64 food 0 23430 1 2956 Name: food, dtype: int64 shelter 0 24044 1 2342 Name: shelter, dtype: int64 clothing 0 25976 1 410 Name: clothing, dtype: int64 money 0 25780 1 606 Name: money, dtype: int64 missing_people 0 [...]**Wait a minute!** The first category `related` has values of 2 in there?! That's definitely not right. Let's replace those with the most common value for this category, 1.for column in category_values: pass # Replace values of 2 with 1 in category 'related' category_values['related'].replace({2:1}) category_values['related'].replace({2:1}, inplace=True) category_values['related'].value_counts() category_values.info() Int64Index: 26386 entries, 0 to 26385 Data columns (total 36 columns): related 26386 non-null int64 request 26386 non-null int64 offer 26386 non-null int64 aid_related 26386 non-null int64 medical_help 26386 non-null int64 medical_products 26386 non-null int64 search_and_rescue 26386 non-null int64 security 26386 non-null int64 military 26386 non-null int64 child_alone 26386 non-null int64 water 26386 non-null int64 food 26386 non-null int64 shelter 26386 non-null int64 clothing 26386 non-null int64 money 26386 non-null int64 missing_people 26386 non-null int64 refugees 26386 non-null int64 death 26386 non-null int64 other_aid 26386 non-null int6[...]5. Replace `categories` column in `df` with new category columns.- Drop the categories column from the df dataframe since it is no longer needed.- Concatenate df and categories data frames.# drop the original categories column from `df` df.drop(columns=['categories'], inplace=True) df.head() # concatenate the original dataframe with the new `categories` dataframe df = pd.concat([df, category_values], axis=1) df.head() df.info() Int64Index: 26386 entries, 0 to 26385 Data columns (total 40 columns): id 26386 non-null int64 message 26386 non-null object original 10246 non-null object genre 26386 non-null object related 26386 non-null int64 request 26386 non-null int64 offer 26386 non-null int64 aid_related 26386 non-null int64 medical_help 26386 non-null int64 medical_products 26386 non-null int64 search_and_rescue 26386 non-null int64 security 26386 non-null int64 military 26386 non-null int64 child_alone 26386 non-null int64 water 26386 non-null int64 food 26386 non-null int64 shelter 26386 non-null int64 clothing 26386 non-null int64 money 26386 non-null i[...]6. Remove duplicates.- Check how many duplicates are in this dataset.- Drop the duplicates.- Confirm duplicates were removed.# check number of duplicates df.duplicated().sum() # drop duplicates df.drop_duplicates(inplace=True) # check number of duplicates df.duplicated().sum()How many messages are translations?len(df[df['message'] == df['original']]) / len(df)Whoa...0.1% have the original message the same as the final message text? I hope they used a good translator...df.info() Int64Index: 26216 entries, 0 to 26385 Data columns (total 40 columns): id 26216 non-null int64 message 26216 non-null object original 10170 non-null object genre 26216 non-null object related 26216 non-null int64 request 26216 non-null int64 offer 26216 non-null int64 aid_related 26216 non-null int64 medical_help 26216 non-null int64 medical_products 26216 non-null int64 search_and_rescue 26216 non-null int64 security 26216 non-null int64 military 26216 non-null int64 child_alone 26216 non-null int64 water 26216 non-null int64 food 26216 non-null int64 shelter 26216 non-null int64 clothing 26216 non-null int64 money 26216 non-null i[...]7. Save the clean dataset into an sqlite database.You can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.engine = create_engine('sqlite:///../data/DisasterTweets.db') df.to_sql('categorized_messages', engine, index=False, if_exists='replace')8. Use this notebook to complete `etl_pipeline.py`Use the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.import spacy import en_core_web_sm def create_named_entities_feature(df): ''' Creates new columns to correspond to the counts of types of named entities in the text that we care about for the purposes of disaster message classification. Parameters ---------- df: pandas DataFrame of the format returned by clean_data(). Returns ------- df with 12 new columns containing only float values corresponding to the 12 named entity types we're interested in including as features to the classifier. ''' # Have to have "entity_" in front so these don't end up duplicating names of other features entities_of_interest = [ 'entity_PERSON', 'entity_NORP', 'entity_FAC', 'entity_ORG', 'entity_GPE', 'entity_LOC', 'entity_PRODUCT', 'entity_EVENT', 'entity_LANGUAGE', 'entity_DATE', 'entity_TIME', 'entity_MONEY' ] # Each column will be a count of how many of these entities there are in a message # We start with all zeros entities_zero_df = pd.DataFrame(data = np.zeros((len(df), len(entities_of_interest))), columns=entities_of_interest, index = df.index) # Make sure it's a DataFrame already. If just a Series, make it a DataFrame if type(df) == pd.core.frame.DataFrame: pass elif type(df) == pd.core.series.Series: df = pd.DataFrame(df) else: raise ValueError('Input df is not a pandas Series or DataFrame') df = pd.concat([df, entities_zero_df], axis=1) def count_entities(row, allowed_entities): ''' Analyzes the different named entity types present in a given message and adds the count of each to the row. Meant to be used via df.apply(count_entities, axis=1) Parameters ---------- row: pandas Series representing a row of a DataFrame. Must contain a 'message' column containing the text to be analyzed and already have columns reflecting the desired entity types to be tracked (any others identified in analysis will be dropped) allowed_entities: list of str indicating entity type labels that we want to count. Any missing from this list are dropped in the output Returns ------- row updated with counts of each unique entity type extracted from its message ''' nlp = en_core_web_sm.load() doc = nlp(row['message']) label_counts = pd.Series(["entity_" + ent.label_ for ent in doc.ents \ if "entity_" + ent.label_ in allowed_entities]).value_counts() return label_counts entity_counts = df.apply(count_entities, args=(entities_of_interest,), axis=1).fillna(0) # Update features with the counts df.loc[:, entity_counts.columns] = entity_counts return df # Test to see the simplest way to generate features from a single starting string # This is needed for modifying the web app in run.py text = 'Massive earthquake, 7.9 on the scale, seen in Ohio, reports Dave.' test = pd.DataFrame({'message': text}, index = [0]) test = create_named_entities_feature(test) test['translated'] = 0 testTraining Deep Neural Networks on a GPU with PyTorch Part 4 of "Deep Learning with Pytorch: Zero to GANs"This tutorial series is a hands-on beginner-friendly introduction to deep learning using [PyTorch](https://pytorch.org), an open-source neural networks library. These tutorials take a practical and coding-focused approach. The best way to learn the material is to execute the code and experiment with it yourself. Check out the full series here:1. [PyTorch Basics: Tensors & Gradients](https://jovian.ai/aakashns/01-pytorch-basics)2. [Gradient Descent & Linear Regression](https://jovian.ai/aakashns/02-linear-regression)3. [Working with Images & Logistic Regression](https://jovian.ai/aakashns/03-logistic-regression) 4. [Training Deep Neural Networks on a GPU](https://jovian.ai/aakashns/04-feedforward-nn)5. [Image Classification using Convolutional Neural Networks](https://jovian.ai/aakashns/05-cifar10-cnn)6. [Data Augmentation, Regularization and ResNets](https://jovian.ai/aakashns/05b-cifar10-resnet)7. [Generating Images using Generative Adversarial Networks](https://jovian.ai/aakashns/06b-anime-dcgan/) This tutorial covers the following topics: * Creating a deep neural network with hidden layers * Using a non-linear activation function * Using a GPU (when available) to speed up training * Experimenting with hyperparameters to improve the model How to run the codeThis tutorial is an executable [Jupyter notebook](https://jupyter.org) hosted on [Jovian](https://www.jovian.ai). You can _run_ this tutorial and experiment with the code examples in a couple of ways: *using free online resources* (recommended) or *on your computer*. Option 1: Running using free online resources (1-click, recommended)The easiest way to start executing the code is to click the **Run** button at the top of this page and select **Run on Colab**. [Google Colab](https://colab.research.google.com) is a free online platform for running Jupyter notebooks using Google's cloud infrastructure. You can also select "Run on Binder" or "Run on Kaggle" if you face issues running the notebook on Google Colab. Option 2: Running on your computer locallyTo run the code on your computer locally, you'll need to set up [Python](https://www.python.org), download the notebook and install the required libraries. We recommend using the [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) distribution of Python. Click the **Run** button at the top of this page, select the **Run Locally** option, and follow the instructions.> **Jupyter Notebooks**: This tutorial is a [Jupyter notebook](https://jupyter.org) - a document made of _cells_. Each cell can contain code written in Python or explanations in plain English. You can execute code cells and view the results, e.g., numbers, messages, graphs, tables, files, etc., instantly within the notebook. Jupyter is a powerful platform for experimentation and analysis. Don't be afraid to mess around with the code & break things - you'll learn a lot by encountering and fixing errors. You can use the "Kernel > Restart & Clear Output" or "Edit > Clear Outputs" menu option to clear all outputs and start again from the top. Using a GPU for faster trainingYou can use a [Graphics Processing Unit](https://en.wikipedia.org/wiki/Graphics_processing_unit) (GPU) to train your models faster if your execution platform is connected to a GPU manufactured by NVIDIA. Follow these instructions to use a GPU on the platform of your choice:* _Google Colab_: Use the menu option "Runtime > Change Runtime Type" and select "GPU" from the "Hardware Accelerator" dropdown.* _Kaggle_: In the "Settings" section of the sidebar, select "GPU" from the "Accelerator" dropdown. Use the button on the top-right to open the sidebar.* _Binder_: Notebooks running on Binder cannot use a GPU, as the machines powering Binder aren't connected to any GPUs.* _Linux_: If your laptop/desktop has an NVIDIA GPU (graphics card), make sure you have installed the [NVIDIA CUDA drivers](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html).* _Windows_: If your laptop/desktop has an NVIDIA GPU (graphics card), make sure you have installed the [NVIDIA CUDA drivers](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html).* _macOS_: macOS is not compatible with NVIDIA GPUsIf you do not have access to a GPU or aren't sure what it is, don't worry, you can execute all the code in this tutorial just fine without a GPU. Preparing the DataIn [the previous tutorial](https://jovian.ai/aakashns/03-logistic-regression), we trained a logistic regression model to identify handwritten digits from the MNIST dataset with an accuracy of around 86%. The dataset consists of 28px by 28px grayscale images of handwritten digits (0 to 9) and labels for each image indicating which digit it represents. Here are some sample images from the dataset:![mnist-sample](https://i.imgur.com/CAYnuo1.jpg)We noticed that it's quite challenging to improve the accuracy of a logistic regression model beyond 87%, since the model assumes a linear relationship between pixel intensities and image labels. In this post, we'll try to improve upon it using a *feed-forward neural network* which can capture non-linear relationships between inputs and targets.Let's begin by installing and importing the required modules and classes from `torch`, `torchvision`, `numpy`, and `matplotlib`.# Uncomment and run the appropriate command for your operating system, if required # Linux / Binder # !pip install numpy matplotlib torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html # Windows # !pip install numpy matplotlib torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html # MacOS # !pip install numpy matplotlib torch torchvision torchaudio import torch import torchvision import numpy as np import matplotlib import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F from torchvision.datasets import MNIST from torchvision.transforms import ToTensor from torchvision.utils import make_grid from torch.utils.data.dataloader import DataLoader from torch.utils.data import random_split %matplotlib inline # Use a white background for matplotlib figures matplotlib.rcParams['figure.facecolor'] = '#ffffff'We can download the data and create a PyTorch dataset using the `MNIST` class from `torchvision.datasets`.dataset = MNIST(root='data/', download=True, transform=ToTensor())Let's look at a couple of images from the dataset. The images are converted to PyTorch tensors with the shape `1x28x28` (the dimensions represent color channels, width and height). We can use `plt.imshow` to display the images. However, `plt.imshow` expects channels to be last dimension in an image tensor, so we use the `permute` method to reorder the dimensions of the image.image, label = dataset[0] print('image.shape:', image.shape) plt.imshow(image.permute(1, 2, 0), cmap='gray') print('Label:', label) image, label = dataset[0] print('image.shape:', image.shape) plt.imshow(image.permute(1, 2, 0), cmap='gray') print('Label:', label)image.shape: torch.Size([1, 28, 28]) Label: 5Next, let's use the `random_split` helper function to set aside 10000 images for our validation set.val_size = 10000 train_size = len(dataset) - val_size train_ds, val_ds = random_split(dataset, [train_size, val_size]) len(train_ds), len(val_ds)We can now create PyTorch data loaders for training and validation.batch_size=128 train_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True) val_loader = DataLoader(val_ds, batch_size*2, num_workers=4, pin_memory=True)Can you figure out the purpose of the arguments `num_workers` and `pin_memory`? Try looking into the documentation: https://pytorch.org/docs/stable/data.html .Let's visualize a batch of data in a grid using the `make_grid` function from `torchvision`. We'll also use the `.permute` method on the tensor to move the channels to the last dimension, as expected by `matplotlib`.for images, _ in train_loader: print('images.shape:', images.shape) plt.figure(figsize=(16,8)) plt.axis('off') plt.imshow(make_grid(images, nrow=16).permute((1, 2, 0))) breakimages.shape: torch.Size([128, 1, 28, 28])Hidden Layers, Activation Functions and Non-LinearityWe'll create a neural network with two layers: a _hidden layer_ and an _output layer_. Additionally, we'll use an _activation function_ between the two layers. Let's look at a step-by-step example to learn how hidden layers and activation functions can help capture non-linear relationships between inputs and outputs.First, let's create a batch of inputs tensors. We'll flatten the `1x28x28` images into vectors of size `784`, so they can be passed into an `nn.Linear` object.for images, labels in train_loader: print('images.shape:', images.shape) inputs = images.reshape(-1, 784) print('inputs.shape:', inputs.shape) breakimages.shape: torch.Size([128, 1, 28, 28]) inputs.shape: torch.Size([128, 784])Next, let's create a `nn.Linear` object, which will serve as our _hidden_ layer. We'll set the size of the output from the hidden layer to 32. This number can be increased or decreased to change the _learning capacity_ of the model.input_size = inputs.shape[-1] hidden_size = 32 layer1 = nn.Linear(input_size, hidden_size)We can now compute intermediate outputs for the batch of images by passing `inputs` through `layer1`.inputs.shape layer1_outputs = layer1(inputs) print('layer1_outputs.shape:', layer1_outputs.shape)layer1_outputs.shape: torch.Size([128, 32])The image vectors of size `784` are transformed into intermediate output vectors of length `32` by performing a matrix multiplication of `inputs` matrix with the transposed weights matrix of `layer1` and adding the bias. We can verify this using `torch.allclose`. For a more detailed explanation, review the tutorial on [linear regression](https://jovian.ai/aakashns/02-linear-regression).layer1_outputs_direct = inputs @ layer1.weight.t() + layer1.bias layer1_outputs_direct.shape # torch.allclose is use to check the similarities of two variables torch.allclose(layer1_outputs, layer1_outputs_direct, 1e-3)Thus, `layer1_outputs` and `inputs` have a linear relationship, i.e., each element of `layer_outputs` is a weighted sum of elements from `inputs`. Thus, even as we train the model and modify the weights, `layer1` can only capture linear relationships between `inputs` and `outputs`. Next, we'll use the Rectified Linear Unit (ReLU) function as the activation function for the outputs. It has the formula `relu(x) = max(0,x)` i.e. it simply replaces negative values in a given tensor with the value 0. ReLU is a non-linear function, as seen here visually:We can use the `F.relu` method to apply ReLU to the elements of a tensor.F.relu(torch.tensor([[1, -1, 0], [-0.1, .2, 3]])) layer1_outputs.shapeLet's apply the activation function to `layer1_outputs` and verify that negative values were replaced with 0.relu_outputs = F.relu(layer1_outputs) print('min(layer1_outputs):', torch.min(layer1_outputs).item()) print('min(relu_outputs):', torch.min(relu_outputs).item())min(layer1_outputs): -0.8755336999893188 min(relu_outputs): 0.0Now that we've applied a non-linear activation function, `relu_outputs` and `inputs` do not have a linear relationship. We refer to `ReLU` as the _activation function_, because for each input certain outputs are activated (those with non-zero values) while others turned off (those with zero values)Next, let's create an output layer to convert vectors of length `hidden_size` in `relu_outputs` into vectors of length 10, which is the desired output of our model (since there are 10 target labels).output_size = 10 layer2 = nn.Linear(hidden_size, output_size) relu_outputs.shape layer2_outputs = layer2(relu_outputs) print(layer2_outputs.shape) inputs.shapeAs expected, `layer2_outputs` contains a batch of vectors of size 10. We can now use this output to compute the loss using `F.cross_entropy` and adjust the weights of `layer1` and `layer2` using gradient descent.F.cross_entropy(layer2_outputs, labels)Thus, our model transforms `inputs` into `layer2_outputs` by applying a linear transformation (using `layer1`), followed by a non-linear activation (using `F.relu`), followed by another linear transformation (using `layer2`). Let's verify this by re-computing the output using basic matrix operations.# Expanded version of layer2(F.relu(layer1(inputs))) outputs = (F.relu(inputs @ layer1.weight.t() + layer1.bias)) @ layer2.weight.t() + layer2.bias torch.allclose(outputs, layer2_outputs, 1e-3)Note that `outputs` and `inputs` do not have a linear relationship due to the non-linear activation function `F.relu`. As we train the model and adjust the weights of `layer1` and `layer2`, we can now capture non-linear relationships between the images and their labels. In other words, introducing non-linearity makes the model more powerful and versatile. Also, since `hidden_size` does not depend on the dimensions of the inputs or outputs, we vary it to increase the number of parameters within the model. We can also introduce new hidden layers and apply the same non-linear activation after each hidden layer.The model we just created is called a neural network. A _deep neural network_ is simply a neural network with one or more hidden layers. In fact, the [Universal Approximation Theorem](http://neuralnetworksanddeeplearning.com/chap4.html) states that a sufficiently large & deep neural network can compute any arbitrary function i.e. it can _learn_ rich and complex non-linear relationships between inputs and targets. Here are some examples:* Identifying if an image contains a cat or a dog (or [something else](https://machinelearningmastery.com/introduction-to-the-imagenet-large-scale-visual-recognition-challenge-ilsvrc/))* Identifying the genre of a song using a 10-second sample* Classifying movie reviews as positive or negative based on their content* Navigating self-driving cars using a video feed of the road* Translating sentences from English to French (and hundreds of other languages)* Converting a speech recording to text and vice versa* And many more...It's hard to imagine how the simple process of multiplying inputs with randomly initialized matrices, applying non-linear activations, and adjusting weights repeatedly using gradient descent can yield such astounding results. Deep learning models often contain millions of parameters, which can together capture far more complex relationships than the human brain can comprehend.If we hadn't included a non-linear activation between the two linear layers, the final relationship between inputs and outputs would still be linear. A simple refactoring of the computations illustrates this.# Same as layer2(layer1(inputs)) outputs2 = (inputs @ layer1.weight.t() + layer1.bias) @ layer2.weight.t() + layer2.bias # Create a single layer to replace the two linear layers combined_layer = nn.Linear(input_size, output_size) combined_layer.weight.data = layer2.weight @ layer1.weight combined_layer.bias.data = layer1.bias @ layer2.weight.t() + layer2.bias # Same as combined_layer(inputs) outputs3 = inputs @ combined_layer.weight.t() + combined_layer.bias torch.allclose(outputs2, outputs3, 1e-3)Save and upload your notebookWhether you're running this Jupyter notebook online or on your computer, it's essential to save your work from time to time. You can continue working on a saved notebook later or share it with friends and colleagues to let them execute your code. [Jovian](https://jovian.ai/platform-features) offers an easy way of saving and sharing your Jupyter notebooks online.# Install the library !pip install jovian --upgrade --quiet import jovian jovian.commit(project='04-feedforward-nn')`jovian.commit` uploads the notebook to your Jovian account, captures the Python environment, and creates a shareable link for your notebook, as shown above. You can use this link to share your work and let anyone (including you) run your notebooks and reproduce your work. ModelWe are now ready to define our model. As discussed above, we'll create a neural network with one hidden layer. Here's what that means:* Instead of using a single `nn.Linear` object to transform a batch of inputs (pixel intensities) into outputs (class probabilities), we'll use two `nn.Linear` objects. Each of these is called a _layer_ in the network. * The first layer (also known as the hidden layer) will transform the input matrix of shape `batch_size x 784` into an intermediate output matrix of shape `batch_size x hidden_size`. The parameter `hidden_size` can be configured manually (e.g., 32 or 64).* We'll then apply a non-linear *activation function* to the intermediate outputs. The activation function transforms individual elements of the matrix.* The result of the activation function, which is also of size `batch_size x hidden_size`, is passed into the second layer (also known as the output layer). The second layer transforms it into a matrix of size `batch_size x 10`. We can use this output to compute the loss and adjust weights using gradient descent.As discussed above, our model will contain one hidden layer. Here's what it looks like visually:Let's define the model by extending the `nn.Module` class from PyTorch.class MnistModel(nn.Module): """Feedfoward neural network with 1 hidden layer""" def __init__(self, in_size, hidden_size, out_size): super().__init__() # hidden layer self.linear1 = nn.Linear(in_size, hidden_size) # output layer self.linear2 = nn.Linear(hidden_size, out_size) def forward(self, xb): # Flatten the image tensors xb = xb.view(xb.size(0), -1) # Get intermediate outputs using hidden layer out = self.linear1(xb) # Apply activation function out = F.relu(out) # Get predictions using output layer out = self.linear2(out) return out def training_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss return loss def validation_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss acc = accuracy(out, labels) # Calculate accuracy return {'val_loss': loss, 'val_acc': acc} def validation_epoch_end(self, outputs): batch_losses = [x['val_loss'] for x in outputs] epoch_loss = torch.stack(batch_losses).mean() # Combine losses batch_accs = [x['val_acc'] for x in outputs] epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()} def epoch_end(self, epoch, result): print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))We also need to define an `accuracy` function which calculates the accuracy of the model's prediction on an batch of inputs. It's used in `validation_step` above.def accuracy(outputs, labels): _, preds = torch.max(outputs, dim=1) return torch.tensor(torch.sum(preds == labels).item() / len(preds))We'll create a model that contains a hidden layer with 32 activations.input_size = 784 hidden_size = 32 # you can change this num_classes = 10 model = MnistModel(input_size, hidden_size=32, out_size=num_classes)Let's take a look at the model's parameters. We expect to see one weight and bias matrix for each of the layers.for t in model.parameters(): print(t.shape)torch.Size([32, 784]) torch.Size([32]) torch.Size([10, 32]) torch.Size([10])Let's try and generate some outputs using our model. We'll take the first batch of 128 images from our dataset and pass them into our model.for images, labels in train_loader: outputs = model(images) loss = F.cross_entropy(outputs, labels) print('Loss:', loss.item()) break print('outputs.shape : ', outputs.shape) print('Sample outputs :\n', outputs[:2].data)Loss: 2.3073794841766357 outputs.shape : torch.Size([128, 10]) Sample outputs : tensor([[-0.0109, -0.1785, 0.2660, 0.0964, -0.0586, -0.0312, -0.0868, -0.0336, 0.0989, -0.1498], [-0.0314, -0.0417, 0.2988, 0.1098, 0.0329, 0.0457, -0.1036, 0.2054, -0.0655, -0.1870]])Using a GPUAs the sizes of our models and datasets increase, we need to use GPUs to train our models within a reasonable amount of time. GPUs contain hundreds of cores optimized for performing expensive matrix operations on floating-point numbers quickly, making them ideal for training deep neural networks. You can use GPUs for free on [Google Colab](https://colab.research.google.com/) and [Kaggle](https://www.kaggle.com/kernels) or rent GPU-powered machines on services like [Google Cloud Platform](https://cloud.google.com/gpu/), [Amazon Web Services](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html), and [Paperspace](https://www.paperspace.com/).We can check if a GPU is available and the required NVIDIA CUDA drivers are installed using `torch.cuda.is_available`.torch.cuda.is_available()Let's define a helper function to ensure that our code uses the GPU if available and defaults to using the CPU if it isn't.def get_default_device(): """Pick GPU if available, else CPU""" if torch.cuda.is_available(): return torch.device('cuda') else: return torch.device('cpu') device = get_default_device() deviceNext, let's define a function that can move data and model to a chosen device.def to_device(data, device): """Move tensor(s) to chosen device""" if isinstance(data, (list,tuple)): return [to_device(x, device) for x in data] return data.to(device, non_blocking=True) for images, labels in train_loader: print(images.shape) images = to_device(images, device) print(images.device) breaktorch.Size([128, 1, 28, 28]) cpuFinally, we define a `DeviceDataLoader` class to wrap our existing data loaders and move batches of data to the selected device. Interestingly, we don't need to extend an existing class to create a PyTorch datal oader. All we need is an `__iter__` method to retrieve batches of data and an `__len__` method to get the number of batches.class DeviceDataLoader(): """Wrap a dataloader to move data to a device""" def __init__(self, dl, device): self.dl = dl self.device = device def __iter__(self): """Yield a batch of data after moving it to device""" for b in self.dl: yield to_device(b, self.device) def __len__(self): """Number of batches""" return len(self.dl)The `yield` keyword in Python is used to create a generator function that can be used within a `for` loop, as illustrated below.def some_numbers(): yield 10 yield 20 yield 30 for value in some_numbers(): print(value)10 20 30We can now wrap our data loaders using `DeviceDataLoader`.train_loader = DeviceDataLoader(train_loader, device) val_loader = DeviceDataLoader(val_loader, device)Tensors moved to the GPU have a `device` property which includes that word `cuda`. Let's verify this by looking at a batch of data from `valid_dl`.for xb, yb in val_loader: print('xb.device:', xb.device) print('yb:', yb) breakxb.device: cpu yb: tensor([7, 6, 5, 3, 3, 5, 1, 1, 7, 3, 2, 1, 2, 9, 0, 1, 3, 6, 8, 7, 0, 2, 6, 1, 9, 5, 1, 4, 0, 4, 9, 6, 7, 7, 1, 8, 8, 6, 1, 7, 0, 2, 1, 2, 5, 0, 0, 7, 2, 4, 3, 7, 8, 3, 9, 9, 5, 3, 9, 3, 7, 7, 2, 4, 8, 4, 8, 0, 5, 3, 1, 9, 1, 8, 8, 8, 4, 2, 1, 3, 5, 2, 4, 9, 2, 8, 1, 2, 7, 9, 3, 8, 2, 2, 7, 1, 7, 8, 2, 5, 7, 0, 9, 4, 1, 7, 0, 0, 6, 4, 0, 7, 4, 7, 3, 4, 4, 4, 5, 2, 6, 0, 2, 7, 6, 7, 0, 2, 3, 7, 7, 6, 6, 7, 7, 3, 0, 4, 3, 4, 9, 7, 6, 9, 0, 1, 3, 5, 0, 5, 5, 1, 0, 4, 7, 9, 0, 1, 3, 2, 1, 2, 2, 2, 6, 9, 6, 7, 6, 2, 4, 3, 1, 3, 9, 9, 4, 0, 4, 5, 1, 2, 5, 5, 8, 1, 7, 2, 2, 6, 8, 2, 2, 6, 6, 1, 4, 5, 8, 4, 9, 6, 7, 2, 5, 4, 0, 9, 8, 7, 2, 6, 7, 4, 1, 8, 0, 6, 1, 1, 4, 7, 1, 8, 2, 0, 1, 9, 5, 7, 9, 4, 8, 0, 0, 8, 5, 4, 4, 0, 0, 5, 1, 4, 7, 0, 8, 2, 6, 6, 2, 1, 6, 3, 4, 5])Training the ModelWe'll define two functions: `fit` and `evaluate` to train the model using gradient descent and evaluate its performance on the validation set. For a detailed walkthrough of these functions, check out the [previous tutorial](https://jovian.ai/aakashns/03-logistic-regression).def evaluate(model, val_loader): """Evaluate the model's performance on the validation set""" outputs = [model.validation_step(batch) for batch in val_loader] return model.validation_epoch_end(outputs) def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD): """Train the model using gradient descent""" history = [] optimizer = opt_func(model.parameters(), lr) for epoch in range(epochs): # Training Phase for batch in train_loader: loss = model.training_step(batch) loss.backward() optimizer.step() optimizer.zero_grad() # Validation phase result = evaluate(model, val_loader) model.epoch_end(epoch, result) history.append(result) return historyBefore we train the model, we need to ensure that the data and the model's parameters (weights and biases) are on the same device (CPU or GPU). We can reuse the `to_device` function to move the model's parameters to the right device.# Model (on GPU) model = MnistModel(input_size, hidden_size=hidden_size, out_size=num_classes) to_device(model, device)Let's see how the model performs on the validation set with the initial set of weights and biases.history = [evaluate(model, val_loader)] historyThe initial accuracy is around 10%, as one might expect from a randomly initialized model (since it has a 1 in 10 chance of getting a label right by guessing randomly).Let's train the model for five epochs and look at the results. We can use a relatively high learning rate of 0.5.history += fit(5, 0.5, model, train_loader, val_loader)Epoch [0], val_loss: 0.2245, val_acc: 0.9315 Epoch [1], val_loss: 0.1599, val_acc: 0.9543 Epoch [2], val_loss: 0.1628, val_acc: 0.9509 Epoch [3], val_loss: 0.1478, val_acc: 0.9566 Epoch [4], val_loss: 0.1469, val_acc: 0.956596% is pretty good! Let's train the model for five more epochs at a lower learning rate of 0.1 to improve the accuracy further.history += fit(5, 0.1, model, train_loader, val_loader)Epoch [0], val_loss: 0.1166, val_acc: 0.9672 Epoch [1], val_loss: 0.1147, val_acc: 0.9659 Epoch [2], val_loss: 0.1118, val_acc: 0.9666 Epoch [3], val_loss: 0.1094, val_acc: 0.9686 Epoch [4], val_loss: 0.1101, val_acc: 0.9685We can now plot the losses & accuracies to study how the model improves over time.losses = [x['val_loss'] for x in history] plt.plot(losses, '-x') plt.xlabel('epoch') plt.ylabel('loss') plt.title('Loss vs. No. of epochs'); accuracies = [x['val_acc'] for x in history] plt.plot(accuracies, '-x') plt.xlabel('epoch') plt.ylabel('accuracy') plt.title('Accuracy vs. No. of epochs');Our current model outperforms the logistic regression model (which could only achieve around 86% accuracy) by a considerable margin! It quickly reaches an accuracy of 97% but doesn't improve much beyond this. To improve accuracy further, we need to make the model more powerful by increasing the hidden layer's size or adding more hidden layers with activations. I encourage you to try out both these approaches and see which one works better. As a final step, we can save and commit our work using the `jovian` library.!pip install jovian --upgrade -q import jovian jovian.commit(project='04-feedforward-nn', environment=None)Testing with individual imagesWhile we have been tracking the overall accuracy of a model so far, it's also a good idea to look at model's results on some sample images. Let's test out our model with some images from the predefined test dataset of 10000 images. We begin by recreating the test dataset with the `ToTensor` transform.# Define test dataset test_dataset = MNIST(root='data/', train=False, transform=ToTensor())Let's define a helper function `predict_image`, which returns the predicted label for a single image tensor.def predict_image(img, model): xb = to_device(img.unsqueeze(0), device) yb = model(xb) _, preds = torch.max(yb, dim=1) return preds[0].item()Let's try it out with a few images.img, label = test_dataset[0] plt.imshow(img[0], cmap='gray') print('Label:', label, ', Predicted:', predict_image(img, model)) img, label = test_dataset[1839] plt.imshow(img[0], cmap='gray') print('Label:', label, ', Predicted:', predict_image(img, model)) img, label = test_dataset[193] plt.imshow(img[0], cmap='gray') print('Label:', label, ', Predicted:', predict_image(img, model))Label: 9 , Predicted: 9Identifying where our model performs poorly can help us improve the model, by collecting more training data, increasing/decreasing the complexity of the model, and changing the hypeparameters.As a final step, let's also look at the overall loss and accuracy of the model on the test set.test_loader = DeviceDataLoader(DataLoader(test_dataset, batch_size=256), device) result = evaluate(model, test_loader) resultWe expect this to be similar to the accuracy/loss on the validation set. If not, we might need a better validation set that has similar data and distribution as the test set (which often comes from real world data). Let's save the model's weights and attach it to the notebook using `jovian.commit`. We will also record the model's performance on the test dataset using `jovian.log_metrics`.jovian.log_metrics(test_loss=result['val_loss'], test_acc=result['val_loss']) torch.save(model.state_dict(), 'mnist-feedforward.pth') jovian.commit(project='04-feedforward-nn', environment=None, outputs=['mnist-feedforward.pth'])Data preprocessimport numpy as np import matplotlib.pyplot as plt from numpy.fft import fft2, ifft2, fftshift, ifftshift from fSIM_func import * from IPython import display import pickle import glob from PIL import Image %matplotlib inline %load_ext autoreload %autoreload 2 plt.style.use(['dark_background']) # Experimental parameters lambda_f = 0.605 # fluorescence wavelength mag = 5 # system magnification pscrop = 5.5/mag # effective pixel size on camera plane NA_obj = 0.1 # Objective NA Ns = 100 # data cropping y size Ms = 100 # data cropping x size bg = 300 # Background noise upsamp_factor = 4 # upsampling factor (should upsample to Nyquist of expected resolution) # cropping starting point of fluorescent/coherent images nstart = [108 + 550, 60 + 610] # [cam_pos + crop_pos] nstart_c = [34 + 550, 16 + 610] # [cam_pos + crop_pos] # File sorting helper function import re numbers = re.compile(r'(\d+)') def numericalSort(value): parts = numbers.split(value) parts[1::2] = map(int, parts[1::2]) return parts # Load raw fluorescent images from the folder filedir_fl = '/media/hugespace/Li-Hao/cSIM/data/data_035/scmos_image2/*tif' files_fl = sorted(glob.glob(filedir_fl), key=numericalSort) filedir_c = '/media/hugespace/Li-Hao/cSIM/data/data_035/data_image2/ptgrey1_acq*.tif' files_c = sorted(glob.glob(filedir_c), key=numericalSort) Nimg = len(files_fl) I_image = np.zeros((Nimg, Ns, Ms)) Ic_image = np.zeros((Nimg, Ns, Ms)) for i in range(0, Nimg): # Load fluorescent data I = plt.imread(files_fl[i]).astype('float64') I = np.maximum(0,np.array(Image.fromarray(I).resize((1536,1296),Image.BICUBIC))) I_image[i] = I[nstart[0]:nstart[0]+Ns,nstart[1]:nstart[1]+Ms] # Load coherent data Ic = plt.imread(files_c[i]).astype('float64') Ic_image[i] = Ic[nstart_c[0]:nstart_c[0]+Ns,nstart_c[1]:nstart_c[1]+Ms] if np.mod(i+1,100) == 0 or i+1 == Nimg: print('Data loading process (%d / %d)'%(i+1,Nimg)) display_image_movie(Ic_image, frame_num=40, size=(7,7), pause_time=0.0001) fig,ax = plt.subplots(1,2,figsize=(10,6)) ax0 = ax[0].imshow(np.mean(I_image,axis=0)) ax1 = ax[1].imshow(np.mean(Ic_image,axis=0)) fig.colorbar(ax0,ax=ax[0]) fig.colorbar(ax1,ax=ax[1]) # Save data f = open('fluorescent_data.pckl', 'wb') pickle.dump((pscrop,lambda_f,NA_obj,I_image,Ic_image, upsamp_factor,bg), f) f.close()from google.colab import drive drive.mount('/content/gdrive') !pwd %cd /content/gdrive/My\ Drive/projects/auto-image-tagging # !git clone https://github.com/tuanchris/auto-image-tagging import os os.chdir('/content/gdrive/My Drive/projects/auto-image-tagging') !python3 ./src/keras_json_parser.py --json_file ./src/labels.json --dataset_path ./data/ --train_percentage 80 --validation_percentage 20 from keras.applications.inception_v3 import InceptionV3 from keras.preprocessing import image from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import SGD from keras.callbacks import ModelCheckpoint train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( './data/train', target_size=(150, 150), batch_size=32, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( './data/validation', target_size=(150, 150), batch_size=32, class_mode='categorical') # create the base pre-trained model base_model = InceptionV3(weights='imagenet', include_top=False) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes predictions = Dense(16,activation='sigmoid')(x) # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False # compile the model (should be done *after* setting layers to non-trainable) # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=sgd, metrics = ['accuracy']) filepath="./models/weights-improvement-{epoch:02d}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] model.summary() # train the model on the new data for a few epochs # model.fit_generator( # train_generator, # steps_per_epoch=2000, # epochs=50, # validation_data=validation_generator, # validation_steps=800) H = model.fit_generator( train_generator, validation_data=validation_generator, steps_per_epoch=2000, epochs=50, verbose=1, callbacks=callbacks_list)Занятие 1 Прикладная алгебра и численные методы Матрицы и системы линейных алгебраических уравнений (СЛАУ)numpy:https://numpy.org/doc/stable/reference/routines.linalg.htmlscipy:https://docs.scipy.org/doc/scipy/reference/linalg.htmlimport numpy as npМатрицы, действия с матрицамиВ numpy в роли матриц могут использоваться списки list, например, так можно перемножать матрицы с помощью dota = [[1, 2], [3, 4]] b = [[5, 6], [7, 8]] display(a, b, np.dot(a, b))Заметим, что dot возвращает не список, а array.Для сложения матриц списки не годятся, при сложении списков получается добавление второго слагаемого в "хвост" первому:a + bДля сложения и вычитания матриц можно использовать array:a_np = np.array(a) b_np = np.array(b) display(a_np + b_np, a_np - b_np)На основе списка можно сделать и матрицу matrix, на пример, с помощью matnp.mat(a)В последней версии numpy не рекомендуется использовать матрицы matrix, поскольку от них в будущем планируется избавиться, вместо них рекомендуется использовать array. ТранспонированиеДля транспонирования матрицы используется np.transpose(x)display(np.transpose(a), np.transpose(a_np))np.transpose возвращает array и в случае, если в качестве аргумента передан array, и если аргумент - list. Функции в PythonДля более наглядного и удобного решения задачи былает удобно разбить ее на подзадачи и каждую подзадачу оформить в виде функции. Функции бывают встроенные, такие как $\sin$ и $\log$, их можно использовать, подключив соответствующий модуль. Можно написать собственные функции следующим образом:def function_name(arg1,\ ...,\ arg2=Value): ..... return somethingКлючевое слово return можно опустить, тогда функция вернет в качестве результата None.У функции могут быть только обязательные аргументы, но могут быть и аргументы со значениями по умолчанию (необязательные аргументы).Вначале опишем функцию $f$ с обязательными аргументами $x$ и $a$.При вызове функции аргументы передаются по порядку, в нашем случае сначала значение $x$, потом $a$. Пример 1Опишем функцию $func\_power(x, a) = x^a$:def func_power(x, a): return x**aПри вызове функции сначала передаем значение $x$, потом $a$.func_power(2, 3)Необязательные аргументыНеобязательные аргументы или аргументы со значением по умолчанию передаются всегда ПОСЛЕ обязательных аргументов!!! Пример 2Опишем функцию $g(A, n) = A^n$ с параметром $n$, по умолчанию равным 1:def g(A, n=1): return A**nПри вызове функции передадим только обязательный аргумент, если нас устраивает значение по умолчаниюA = np.mat([[1, 2], [3, 4]]) g(A)Возведем матрицу в степень 3:g(A, 3)Если вместо матрицы matrix использовать array, результат будет другим:B = np.array([[1, 2], [3, 4]]) g(B, 3)Для того, чтобы перемножить два array как матрицы, нужно использовать функцию matmulnp.matmul(A, B)У matmul есть необязательный аргумент - матрица (array), куда нужно записать результат:C = np.ones((2, 2)) display(C) np.matmul(B, B, C) display(C)Можно вместо array использовать matrix, пока работает:display(A) np.matmul(B, B, A) display(A)Пример 3Перепишем функцию $g(A, n) = A^n$ Примера 2 так, чтобы и array перемножались как матрицы:def g3(A, n=1): if n < 1: return 'error' if n == 1: return A res = A.copy() for k in range(n - 1): np.matmul(res, A, res) return res display(g3(B), g3(B, 3)) np.matmul(B, np.matmul(B, B))Аргументы функции, число которых заранее неизвестно Пример 4Перепишем функцию $g3$ Примера 3 так, чтобы с ее помощью можно было находить произведение произвольного числа матриц:def g4(*args): n = len(args) if n == 0: return 'error' if n == 1: return args[0] res = args[0].copy() for k in range(1, n): np.matmul(res, args[k], res) return res g4(A) g4(A, B, A) g4(B, B, A)Заметим, что тип результата такой, как у первого аргумента. Пример 5.Решим СЛАУ$$\left\{\begin{matrix}2x + 3y - z = 5\\3x - 2y + z = 2\\x + y - z = 0\end{matrix}\right.$$Для решения СЛАУ воспользуемся linalg.solve, аргументы - матрица левой части и столбец правой.Для проверки правильности решения используем allclosea = np.array([[2, 3, -1], [3, -2, 1], [1, 1, -1]]) b = np.array([5, 2, 0]) x = np.linalg.solve(a, b) display(x) np.allclose(np.dot(a, x), b)Basic hypothesis testing: $t$ and $F$ tests IntroductionAims of this chapter:* Using $t$ tests to look at differences between means* Using $F$ tests to compare the variance of two samples* Using non-parametric tests for differences $t$ testsThe $t$ test is used to compare the mean of a sample to another value, which can be: * Some reference point (Is the mean different from 5?) * Another mean (Does the mean of A differ from the mean of B?). If you have a factor with two levels then a $t$ test is a good way to compare the means of those samples. If you have more than two levels, then you have a problem: as the number of levels ($n$) increases, the number of possible comparisons between pairs of levels ($p$) increases very rapidly. The number of possible pairs is the binomial coefficient $C(n,2)$ — inevitably, R has a function for this (try it in R):choose(2:6,2)Making these many comparisons using something like a t-test is a problem because we neglect the covariance among measures and inflate the chance of falsely rejecting at least one null hypothesis (false positive, or [Type I error](ExpDesign:Some-statistical-parlance)). ---:::{figure-md} multiple-comparisons**Your Type I error rate increases with the number of pairwise comparisons ($p$) you make using a single dataset.** Say, you make multiple comparisons using an $\alpha = 0.05$ significance level. Then you have that much chance of making a Type I error on each comparison. Thus, assuming each pairwise comparison is made independently, the type I error rate just adds up with every new comparison you make. Thus, the chance of committing at least one Type I error for a set of $n$ comparisons is $n \times 0.05$; so for 6 pairwise comparisons, your chance of making at least one Type I (false positive) error would be $6 \times 0.05 = 0.3$, or $30\%$!:::--- A more-than-two-factors example would be if you were, say, comparing the means of more than two types of insect groups – below, we compared means of two groups (dragonflies and damselflies); if we added stoneflies to this comparison, the t-test would not be the way to go as we would be making three pairwise comparisons (n = 3, p = 3).*OK, assuming you are interested in making just one paiwise comparison, let's learn about $t$ tests.*The basic idea behind the $t$ test is to divide the difference between two values by a measure of how much uncertainty there is about the size of that difference. The measure of uncertainty used is the *standard error*.$$t=\frac{\textrm{difference between values}}{\textrm{standard error}}$$When there is no difference between the values $t$ will be zero, but with big differences and/or small errors, will be larger. The $t$ distribution below shows how commonly different values of $t$ are found under the null hypothesis:---:::{figure-md} t-distribution**The $t$ distribution.**:::---Some points about the $t$-distribution illustrated in {numref}`t-distribution`:* The null hypothesis is that there is no difference between the values but because we only estimate the values from samples, differences will creep in by chance.* Mostly these differences will be small — hence the peak in the middle — but sometimes the differences will be large and the errors will be small.* 95% of the area under the curves are between these two sets of vertical lines. Values of $t$ more extreme than this will only occur 1 in 20 times or with a probability ($p$) of 0.05.* The means of small samples are more easily influenced by extreme values and so produce extreme $t$ values more frequently. This is why the red curve above for smaller samples is more flattened out and why the 95% limits are more spread out. One-sample $t$ testsIn the simplest example, a $t$ test can be used to test whether the mean of a sample is different from a specific value. For example:* Is the ozone in a set of air samples above the legal limit?* Is the change in a set of patients' weights different from zero?* Is the mean genome size for Odonata smaller than 1.25 pg, which is the average for insects [see here](http://www.genomesize.com/statistics.php?stats=insects)?Oh look! We can test that last one...[Previously](12-ExpDesign.ipynbData-exploration), we looked at the genome size and morphology of species of dragonflies and damselflies (Odonates: Anisoptera and Zygoptera). Box and whisker plots and density plots both show that the two groups have rather different genome size and morphology. We can use $t$ tests to test whether the means of the variables of the two groups are different and we can use the $F$ test to check whether they have the same variance.In this chapter, we will continue to practise building scripts and creating your own R code, so we will start from an empty script file (you can refer to `t_F_tests.R` from TheMulQuaBio). Use this script to store the code you run in this practical session and add notes to keep a record of what each bit is doing:$\star$ Open R and change (`setwd`) to the `code` directory. If you have misplaced the data, download it again from TheMulQuaBio.$\star$ Create a new blank script called `ttests.R` and save it to the working directory.$\star$ Put a comment at the top (using ``) to describe the script.For the rest of this session, type your code into this script, adding comments and then run them in R using Ctrl+R. If you make mistakes, correct the script and run the code again. This way you will end up with a complete neat version of the commands for your analysis.Add code to your `ttests.R` script to load the genome size data into R, assigning the object name `genome` and use `str(genome)` to check it has loaded correctly:genome <- read.csv('../data/GenomeSize.csv')To calculate a $t$ value in a one-sample scenario, we need that observed difference and then the standard error of the difference between the mean of our sample and the known value. This is calculated using the *variance* and the *sample size* ($n$) of the sample ($s$).$$se_s = \sqrt{\frac{\textrm{var}(s)}{n}}$$This simple equation trades off variance — high variance in the data gives higher uncertainty about the location of the mean — and sample size – more data gives more certainty. So, *low variance* and *large datasets* have *small* standard errors; *high variance* and *small datasets* have *large* standard errors. Variance is calculated using sums of squares and so the square root is needed to give a standard error in the same units as the mean.So, all we need are three values calculated from the data: mean, variance and the number of data points and we can calculate $t$. R can do this for us. First calculate the three values from the data:mean.gs <- mean(genome$GenomeSize) print(mean.gs) var.gs <- var(genome$GenomeSize) print(var.gs) n.gs <- length(genome$GenomeSize) print(n.gs)[1] 100Now get the difference in means:diff <- mean.gs - 1.25 print(diff)[1] -0.2357Get the standard error:se.gs <- sqrt(var.gs/n.gs) print(se.gs)[1] 0.03737613Finally, get the t valuet.gs <- diff/se.gs print(t.gs)[1] -6.306164This is a big $t$ value — values this extreme don't even appear on the graph above — so we would conclude that the mean genome size for Odonata is different from the average for insects.$\star$ Copy and paste the code above into your script in R and run it. Read through the code and make sure you understand the steps.We can do the above more easily and get some more information using R's `t.test` function. The null hypothesis can be set using the option (sometimes called a function *argument*) `mu` — the Greek letter $\mu$ is typically used to refer to a (true, not sample) mean:t.test(genome$GenomeSize, mu = 1.25)This confirms the values we calculated by hand and adds a $p$ value. The output also gives the degrees of freedom. This is something we will come back to later, but the degrees of freedom are basically the number of data points minus the number of estimated parameters, which in this case is one mean.(13-t_F_tests:CI)= Confidence IntervalsThe output also gives a confidence interval for the observed mean. The mean is the best estimate of the population mean given our sample of species of Odonata, but the actual mean for the order could be bigger or smaller. The confidence interval tells us the region in which we are 95% confident that this actual mean lies.It is calculated using the $t$ distribution. Remember that $t$ is a difference divided by a standard error; if we multiply $t$ by a standard error, we get back to a difference. If we pick a pair of $t$ values that contain the middle 95% of the $t$ distribution, as in the plot on page 2, then we can multiply that by the standard error from the data to get a range above and below the mean. If we sampled lots of sets of 100 species of Odonata, we expect 95% of the observed means to lie inside this range. The code below shows the calculation of the confidence interval for the test above.First, find the edges of the middle 95% of a $t$ distribution with 99 df:tlim <- qt(c(0.025,0.975), df = 99) print(tlim)[1] -1.984217 1.984217(quantiles of the t distribution, so `qt`)Now use the mean and standard error from above to get a confidence intervals:mean.gs + tlim * se.gsExerciseUsing the `t.test` code above as a template, test whether the body weight (in grams) of Odonata is different from the average for arthropods of 0.045 grams. Note that this slightly dodgy estimate comes from an estimated average volume for arthropods of 45.21 mm$^3$, and assuming a density of 1 gm per cm$^3$; see: ., ., . and . (2002), Body size does not predict species richness among the metazoan phyla. Journal of Evolutionary Biology, 15: 235–247. Two-sample $t$ testsIt is more common to use a $t$ test to compare the means of two samples. This includes questions like:* Do two rivers have the same concentration of a pollutant?* Do chemicals A and chemical B cause different rates of mutation?* Do damselflies and dragonflies have different genome sizes?The main difference here is that with a one sample $t$ test, we assume that one of the means is known exactly: the only error is in the single sample. With a two sample test, we are comparing two means estimated from samples and both contain error. The graph below illustrates this:---:::{figure-md} 2-sample-t-test**Illustration of one-sample (left) vs. two-sample (right) $t$ tests**. The vertical lines show the mean (solid lines) and one standard error to each side (dashed lines). The red mean is the same in both cases, but the second graph shows that this is also estimated from a sample with error: the difference in the means looks less convincing and we'd expect a smaller $t$ value.:::---A few points to note about {numref}`2-sample-t-test`: * The $t$ tests in for these two graphs confirm this:* The mean for blue is significantly different from 16.74 (mean=14.98, se=0.38, df=59, $t$=-4.65, $p$=0.00002).* The means of blue and red are significantly different (blue: mean=14.98, se=0.38; red: mean=16.74, se=0.42; df=118, $t$=-3.13, $p$=0.002)$\star$ Have a close look at the previous two statements. This shows the kind of detail needed when reporting the results of $t$ tests. The following is *not* acceptable: The means of blue and red are significantly different ($p=0.002$).So, with two samples, we shouldn't be so confident about the difference between the values — it should have a higher standard error. We can do this simply by combining the variance and sample size for the two samples ($a$ and $b$) into the calculation:$$se_{a-b}= \sqrt{\frac{\textrm{var}(a)}{n_a} + \frac{\textrm{var}(b)}{n_b}}$$Note that the two-sample t-test makes two assumptions; that the data are: (i) *normally distributed*, so that the two means are a good measure of central tendencies of the two samples and be estimated sensibly; and (ii) have *similar variances*. The first assumption also applies to the one-sample t-test.Let's use a $t$ test to address the question of whether the genome sizes of Anisoptera and Zygoptera are different. First, we'll do this by hand. We'll use a really handy function `tapply(X, INDEX, FUN)` to quickly find the values for the two groups: it takes some values (X), splits those values into groups based on a factor (INDEX) and runs each group through another function (FUN).So let's first calculate the three values from the data:mean.gs <- tapply(X = genome$GenomeSize, INDEX = genome$Suborder, FUN = mean) print(mean.gs) var.gs <- tapply(X = genome$GenomeSize, INDEX = genome$Suborder, FUN = var) print(var.gs) n.gs <- tapply(X = genome$GenomeSize, INDEX = genome$Suborder, FUN = length) print(n.gs)Anisoptera Zygoptera 62 38Now get the difference in means:diff <- mean.gs[1] - mean.gs[2] print(diff)Anisoptera -0.006646859Now get the standard error of the difference:se.gs <- sqrt((var.gs[1]/n.gs[1]) + (var.gs[2]/n.gs[2])) print(se.gs)Anisoptera 0.06931693And finally, the t-value.t.gs <- diff/se.gs print(t.gs)Anisoptera -0.09589084$\star$ Type the above code into your growing R script and run it in R. Again, read through the code and make sure you understand the steps.And as before, the `t.test` function can be used to automate this all for us. We can use a formula as we have seen [before](12-ExpDesign.ipynbTake-a-quick-look-at-effects-of-certain-factors) to get a test between the two suborders:t.test(GenomeSize ~ Suborder, data = genome)The output looks very similar to the one-sample t-test, except that the output now gives two estimated means, rather than one and it reports the $p$ value for the calculated $t$ value.(13-t_F_tests:CI2)= Confidence Intervals againAlso, in the output above unlike the 95% interval of a 1 sample t-test, which provides the confidence intervals around the estimated mean of the single population, this output of the 2-sample test provides the confidence intervals around the observed difference in the means of the two samples. That is, the 95% confidence interval (-0.1442041, 0.1309104) above is around the observed difference in the sample means: $1.011774 - 1.018421 = - 0.006647$. This is essentially not distinguishable from zero as the intervals include the value 0 (and therefore, the p-value is also insignificant).$\star$ Add this last step to your script and run it. Exercise $\star$ Expand your t-test code to test whether the body weight of the two suborders are different and add it to in the script file. The $F$ testThe $F$ test is used to compare the variances of two samples or populations. You will also see them featured in analysis of variance (ANOVA), to test the hypothesis that the means of a given set of normally distributed populations all have the same variance.The distribution of the test statistic $F$ (the $F$ distribution) is simply the ratio of the variances for sample $a$ and $b$: $\frac{\textrm{var}(a)}{\textrm{var}(b)}$:* If the two variances are the same then $F=1$; * if $\textrm{var}(a) > \textrm{var}(b)$ then $F > 1$; * and if $\textrm{var}(a) < \textrm{var}(b)$ then $F < 1$ ---:::{figure-md} f-distribution **The F distribution.** The two vertical blue lines show the edges of the central 95% of the area of the curve.:::---In the above figure, note that:* If the two samples are drawn at random from a population with the same variance then values of $F 2.074$ are observed fewer than 1 time in 20 ($p \le 0.05$ ). And because $1/0.482 \approx 2.074$ and $1/2.074 \approx 0.482$, in this case, it doesn't matter which way round you compare the two variances. This is not always true, and the order in which you compare two variances will very often matter.* The shape of the $F$ distribution changes depending on the amount of data in each of the two samples but will always be centered near 1 and with a tail to the right (right-skewed).* This F-distribution arises as the ratio of two appropriately scaled *chi-square distributed variates*, because, as we saw above, variances should be chi-square distributed.Let's use our same genome size dataset to learn how to do F-tests as well. We will use the F test to judge whether a key assumption of the two-sample $t$ tests we did above holds: that the two samples have *equal variances*. First, let's visualize the data. As I hope you've already noticed, this session has been neglecting one very important part of analysis — *plotting the data*. We are going to compare two plots, so it helps to have them side by side in the same window. We can use the function `par` to change a set of options called graphics parameters to get R to do this. The option to change is `mfrow`. This sets a graphics window to include *m*ultiple *f*igures and we need to tell R the number of rows and columns to divide the window into: `par(mfrow=c(1,2))`.$\star$ Type `par(mfrow=c(1,2))` into your script, add a comment, and run it.Using your (rapidly improving!) R skills, create a boxplot comparing the genome sizes of the two suborders.Add another boxplot beside it comparing the body weight of the two suborders.It should look like {numref}`odonata-wt-boxplots`.---:::{figure-md} odonata-wt-boxplots **Reproduce this figure.**:::---Now, we can use R to calculate $F$ for the variance in genome size in each of the two suborders. We calculated the variance for the $t$ test above, so we can just do this:var.gs[1]/var.gs[2]That's quite a big $F$ value and we can use the function `var.test` to do all the calculations for us and give us the actual $p$ value:var.test(GenomeSize ~ Suborder, data = genome)It produces the same value that we calculated by hand and shows that, ifthe two samples are drawn from populations with the same variance, an$F$ value this extreme will only be observed roughly 1 time in 500($1/0.00195 \approx 500$) .$\star$ Open a new empty script called `FTests.R`.In this write your script to test whether the variances in the body weight of the two suborders from the `GenomSize` dataset are different.There are clearly problems with the variance in both examples. The next two sections present ways to address these kinds of problems. $t$ tests revisitedThe first thing to say is that R is aware of the problem with the variance. If you look back at the output from the previous $t$ tests, you will see that the degrees of freedom vary a lot. We have 100 observations and – after subtracting one for each mean we calculate — our degrees of freedom should be either 99 (one sample test) or 98 (twosample test). What we actually see are smaller numbers, with the smallest being `df = 60.503` for the two sample test of body weight.The explanation is that R is applying a *penalty* to the degrees of freedom to account for differences in variance. With fewer degrees for freedom, more extreme $t$ values are more likely and so it is harder to find significant results. This doesn't mean we can forget about checking the variances or plotting the data!In this case, we can also apply a transformation to the data in order to make the variances more equal. Forgetting the wings and assuming Odonata are shaped like a box, the model in the graph below shows how volume changes with length: equal changes in length do not lead to equal changes in volume and longer species will have a disproportionately large volume. This is a classic feature of morphological data known as allometric scaling and we'll look at it again when we learn about [ANOVA](15-anova.ipynb).In the meantime, a log transformation will turn body weight from a skewed distribution to a more normal distribution, as shown in {numref}`log-transformation`. ---:::{figure-md} log-transformation**Log transformation of body weights.**:::--- We can do a $\log_e$ (natural log) transform of our body weight data as follows:genome$logBodyWeight <- log(genome$BodyWeight)That is, you took a natural log of the whole Body weight column and saved it as a new column called `logBodyWeight`$\star$ Copy the line into your `FTests.R` script and run it. Exercise Now write three lines of code to get a boxplot of $\log_e$ body weight and then run a variance test and $t$ test on the differences in $\log_e$ body weight between suborders. This gives a much clearer result — the variances are almost identical and the differences between the suborders are much more cleanly tested. Non-parametric testsWhat happens if there isn't a convenient transformation for the variable that gives roughly constant variation and equal variance? In a parametric test, like the $t$ and $F$ test above, we use parameters (mean and variance) to describe the data, assume these describe the data well and then just use these parameters to run the test. If these assumptions don't seem very sound, the non-parametric tests provide a way of using the ranks of the data to test for differences. They aren't as powerful — they are less likely to reveal significant differences — but they are more robust. The most commonly used alternative is the Wilcoxon test, which uses the function `wilcox.test` in R.$\star$ Using `wilcox.test` as a replacement for `t.test`, repeat the one and two sample $t$ test for genome size and body weight.For example to carry out a wilcox test equivalent to the one-sample t test you performed above:wilcox.test(genome$GenomeSize, mu=1.25)Klasse D Versterkers ()d = schem.Drawing() T1 = d.add(e.BJT_NPN) d.add(e.LINE, xy=T1.base, d='down', l=1.5) indot=d.add(e.DOT) d.add(e.LINE, d='down', l=1.4) T2 = d.add(e.BJT_PNP, d='right') d.add(e.VDD, xy=T1.collector, l=0.75) d.add(e.LINE, xy=indot.start, d='left', l=0.5) d.add(e.DOT_OPEN) d.add(e.LINE, d='down',xy=T1.emitter, l=0.8) d.add(e.DOT) L1 = d.add(e.INDUCTOR2, d='right', label='$L_1$') d.add(e.LINE, d='down',xy=L1.start, l=0.8) C1 = d.add(e.CAP, d='right',xy=L1.end, label='$C_1$') R = d.add(e.RES, d='down', label='$R_L$') d.add(e.LINE, xy=R.start, d='right', l=0.5) dot=d.add(e.DOT_OPEN) d.add(e.LINE,xy=R.end, d='right',l=0.5) d.add(e.DOT_OPEN) d.add(e.LINE,xy=R.end, d='left', tox=L1.start) d.add(e.DOT) d.add(e.GND) d.add(e.LINE,d='up',toy=T2.collector) d.add(e.GAP_LABEL, d='down', xy= dot.start, toy= R.end, label=['-','$v_{out}$','$+$'],lblofst=0.4) d.draw()Basisschema van de klasse D versterkerd = schem.Drawing() T1 = d.add(e.BJT_NPN) d.add(e.LINE, xy=T1.base, d='down', l=1.5) indot=d.add(e.DOT,color='red') indot.add_label('1', loc='center', ofst=[-0.2,.3], align=('right','bottom')) d.add(e.LINE, d='down', l=1.4) T2 = d.add(e.BJT_PNP, d='right') d.add(e.VDD, xy=T1.collector, l=0.75) vdd=d.add(e.DOT,color='red') vdd.add_label('3', loc='center', ofst=[0.6,-0.3], align=('right','bottom')) d.add(e.LINE, xy=indot.start, d='left', l=0.5) d.add(e.DOT_OPEN) d.add(e.LINE, d='down',xy=T1.emitter, l=0.8) pwm=d.add(e.DOT,color='red') pwm.add_label('2', loc='center', ofst=[0.4,.3], align=('right','bottom')) L1 = d.add(e.INDUCTOR2, d='right', label='$L_1$') mid=d.add(e.DOT,color='red') mid.add_label('5', loc='center', ofst=[0.4,.3], align=('right','bottom')) d.add(e.LINE, d='down',xy=L1.start, l=0.8) C1 = d.add(e.CAP, d='right',xy=L1.end, label='$C_1$') out=d.add(e.DOT,color='red') out.add_label('6', loc='center', ofst=[0.4,.3], align=('right','bottom')) R = d.add(e.RES, d='down', label='$R_L$') d.add(e.LINE, xy=R.start, d='right', l=0.5) dot=d.add(e.DOT_OPEN) d.add(e.LINE,xy=R.end, d='right',l=0.5) d.add(e.DOT_OPEN) d.add(e.LINE,xy=R.end, d='left', tox=L1.start) gg=d.add(e.DOT,color='red') gg.add_label('0', loc='center', ofst=[0.4,.3], align=('right','bottom')) d.add(e.GND) d.add(e.LINE,d='up',toy=T2.collector) d.add(e.GAP_LABEL, d='down', xy= dot.start, toy= R.end, label=['-','$v_{out}$','$+$'],lblofst=0.4) d.draw()Basisschema van de klasse D versterker, met nummering van de knopen overeenkomende met de onderstaande SPICE file De overeenkomende SPICE code voor dit circuit is:spicelisting('simul\classD.sp')* Class D amp 1 Q_Q1 3 1 2 Q2 Q_Qp 0 1 2 Qp L_L1 5 2 1uH C_C1 5 6 10n R_RL 6 0 6 V_Vdd 3 0 20V R_Rin 4 1 1 V_Vin 4 0 PULSE(0 20 0 20ns 20ns 294ns 628ns) DC=0 .model Q2 NPN(Is=14.34f BF=200) .model Qp PNP(Is=5.34f BF=100)basiscircuit van de Klasse D versterker# .print TRAN IC(Q_Q1) IC(Q_Qp) I(R_RL) circuit=SpiceParser(path='simul\classD.sp').build_circuit() end_time=1e-5 simulator = circuit.simulator(temperature=25, nominal_temperature=25) #simulator.save('2 1 5 6') # dit mag niet langer zijn dan 15 tekens "v(1) v(2) v(5) v(6)" kan niet maar wel de 3 eerste analysis = simulator.transient(step_time=2e-10, end_time=end_time) figure, ax = plt.subplots(figsize=(20, 10)) ax.plot(analysis.time*1e6,analysis.nodes['2'],label='V(2) [V]') ax.plot(analysis.time*1e6,analysis.nodes['5'],label='V(5) [V]' ) ax.plot(analysis.time*1e6,analysis.nodes['6'],label='V(6) [V] ($V_{out}$)' ) ax.plot(analysis.time*1e6,analysis.nodes['1'],label='V(1) [V] ($V_{in}$)') ax.grid() ax.legend( loc=(0.31,.78)) ax.set_xlabel('tijd [us]') ax.set_xlim(0,end_time*1e6) ax.set_ylabel('Voltage [V]') plt.show()Bekomen spanningen op de verschillende knopen in het hierboven aangegeven schema van de Klasse D versterker. We merken op dat de spanning aan knoop 5 een veel grotere swing heeft dan de uitgangspanning over $R_L$ (i.e. knoop 6). De spanning op knoop 2 is typisch de verzadigingspanning van Q1 lager dan de voedingsspanning (hier 20V) of de verzadigingspanning van Qp hoger dan de grond.circuit=SpiceParser(path='simul\classD.sp').build_circuit() end_time=1e-5 simulator = circuit.simulator(temperature=25, nominal_temperature=25) #simulator.save("@Q_Q1[ie]") analysis = simulator.transient(step_time=2e-10, end_time=end_time) figure, ax = plt.subplots(figsize=(20, 10)) #ax.plot(analysis.time*1e6,-analysis.branches['Q_Q1[ie]']*201/200,label='ic(Q_Q1)' ) ax.plot(analysis.time*1e6,-analysis.branches['v_vdd']*201/200,label= 'ic(Q_Q1)') ax.plot(analysis.time*1e6,analysis.branches['v_vdd']*201/200-analysis.branches['l_l1'],label= 'ic(Q_Qp)') ax.grid() ax.legend( loc=(0.31,.78)) ax.set_xlabel('tijd [us]') ax.set_xlim(0,end_time*1e6) ax.set_ylabel('stroom [A]') plt.show()Stromen in de NPN en de PNP transistor van het hierboven aangegeven schema van de Klasse D versterker.# simulatie met aansturing off frequency #Alleen tijdens het opstarten is kort deze spanning 0.7 V hoger dan voedingsspanning of 0.7V lager dan de grond. Merk op in de onderstaande figuur dat de stromen op deze momenten ook het ander teken hebben.LemmatizationAs we saw in the previous chapter, we can explain to the machine which words are similar but also how different there are. But sometimes you don't want to catch the difference between those words, let's take an example.You can building a model to classify books, for that you want to take a list of the most recurrent words in each category. You have books about cooking and books about cars.You don't really want to make a distinction between `wheel` and `wheels` or between `foot` and `feet` for example. To fix that, we will apply **lemmatization** which will put each word in its simplest variation. Still confused?Let's see how it works in a practical case.First, read [this article](https://www.machinelearningplus.com/nlp/lemmatization-examples-python/).Then, try to apply what you have learned.**Pro tips:** Most lemmatizers only work with a single word and not on sentences. Think about tokenizing your sentence first.**Pro tips:** If you experience SSL issues during `nltk` import [check this](https://stackoverflow.com/questions/38916452/nltk-download-ssl-certificate-verify-failed).# Can you lemmatize this sentence with nltk? my_sentence = "Those childs are playing. this game, those games, I play he plays"************ World data************import pandas as pdArea de los paisesA = pd.read_csv('../dat/table-1.csv') # source: https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_area # preprocessed with https://wikitable2csv.ggor.de/ # eliminar los ultimos paises porque tienen areas muy chicas A2 = A[:-20] A2[-10:] clist = A2['Sovereign state/dependency'].values for c in clist: if 'pa' in c: print(c) d = A2['Total in km2 (mi2)'].values d[-20:] area = [] for a in d: s = a.split('(')[0].replace(',','').strip() s = float(s) area.append(s) area[:10] print(A2.shape) print(len(area)) country = A2['Sovereign state/dependency'].values d = {'country': country, 'area': area} df = pd.DataFrame(data=d) df.to_csv('../dat/world_area.csv', index=None)Poblacion de los paisesP = pd.read_csv('../dat/table-2.csv') # source: https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations) # preprocessed with https://wikitable2csv.ggor.de/ population = [] for pop in P['Population(1 July 2019)']: pp = pop.replace(',','') pp = float(pp) population.append(pp) population[:6] P[:5] P.keys() country = P['Country or area'].values d = {'country': country, 'population': population} df = pd.DataFrame(data=d) df df.to_csv('../dat/world_population.csv', index=None)Combine area and population dataimport pandas as pd P = pd.read_csv('../dat/world_population.csv') P A = pd.read_csv('../dat/world_area.csv') A df = pd.merge(P, A, on='country') dfstandarize country namesdf['country'].replace('United States','US', inplace=True) pais = 'US' any(pais in s for s in df['country']) df['country'].replace('South Korea','Korea, South', inplace=True) df.to_csv('../dat/pop_area.csv') df['country'] for c in df['country']: if 'pa' in c: print(c)Japan NepalPython Basic Assignment: Solution_10 1. How do you distinguish between shutil.copy() and shutil.copytree()? Answer: Shutil.copy(): Copy data and mode bits. Return the file's destination. The destination may be a directory.Shutil.copytree(): Recursively copy a directory tree and return the destination directory. 2. What function is used to rename files? Answer: The os.rename() method allows us to rename files. 3. What is the difference between the delete functions in the send2trash and shutil modules? Answer: - The send2trash functions will move a file or folder to the recycle bin. - Shutil functions will permanently delete files and folders. 4.ZipFile objects have a close() method just like File objects’ close() method. What ZipFile method is equivalent to File objects’ open() method? Answer: To create your own compressed ZIP files, we must open the ZipFile object in write mode by passing 'w' as the second argument. This is similar to opening a text file in write mode by passing 'w' to the open() function. 5. Create a programme that searches a folder tree for files with a certain file extension (such as .pdf or .jpg). Copy these files from whatever location they are in to a new folder.#Answer: def pdfjpgcopy(src,dst): ''' This pdfjpgcopy function will search for all pdfs & jpgs file present in src directory and copy those files in dst directory. Note: Only workable when you have your files in one direcory not files in dir under dir. Parametrs: src --> path of directory from where you want to search for pdfs and jpgs. dst --> path of directory where you want to create copy of those files. ''' import os # IMPORTING REQUIRED LIBRARIES import shutil main_list = [] # INTIALIZING A EMPTY LIST for root, dirs, files in os.walk(src): main_list.append(files) #main_list WILL HAVE ALL FILES IN LIST OF LIST FORM pdf_files = [] #INTIALIZING EMPTY LIST TO STORE ALL PDF FILES IN IT jpg_files = [] #INTIALIZING EMPTY LIST TO STORE ALL JPG FILES IN IT for item in main_list: #ITERATING THROUGH main_list AND SEPERATING pdf AND jpg FILES. for i in item: if i.split(".")[-1] =="pdf": pdf_files.append(i) if i.split(".")[-1] =="jpg": jpg_files.append(i) for i in pdf_files: #DOING COPY OF pdf AND JPG FILES FROM SRC TO DESTINATION FOLDER shutil.copy("{a}".format(a = src)+"\{b}".format(b = i), f"{dst}", follow_symlinks=True) for i in jpg_files: shutil.copy("{a}".format(a = src)+"\{b}".format(b = i), f"{dst}", follow_symlinks=True) print("Files successfully copied") return "Copied files are: {x}".format(x = os.listdir(dst)) pdfjpgcopy("G:\Testing","G:\Checking")Files successfully copiedNLPM750 fiber model with loss.. codeauthor:: import numpy as np def define_beta_fun_NLPM750(): r"""Propagation constant for NLPM750 PCF. Enclosing function returning a closure implementing a rational Pade-approximant of order [4/4] for the refractive index of a NL-PM-750 nonlinear photonic crystal fiber (PCF), see [NLPM750]_. Returns: :obj:`callable`: Propagation constant for NL-PM-750 PCF. .. [NLPM750] NL-PM-750 Nonlinear Photonic Crystal Fiber, www.nktphotonics.com. """ p = np.poly1d((1.49902, -2.48088, 2.41969, 0.530198, -0.0346925)[::-1]) q = np.poly1d((1.00000, -1.56995, 1.59604, 0.381012, -0.0270357)[::-1]) n_idx = lambda w: p(w) / q(w) # (-) c0 = 0.29979 # (micron/fs) return lambda w: n_idx(w) * w / c0 # (1/micron) def define_alpha_fun_NLPM750(): _dat = [ (1.190, 1867.8), (1.191, 1866.4), (1.192, 1854.7), (1.193, 1860.9), (1.194, 1862.1), (1.195, 1725.1), (1.196, 1656.2), (1.197, 1658.1), (1.198, 1646.3), (1.199, 1574.4), (1.200, 1432.4), (1.201, 1295.9), (1.202, 1223.7), (1.203, 1155.4), (1.204, 1081.2), (1.205, 937.1), (1.206, 862.4), (1.207, 870.6), (1.208, 865.9), (1.209, 722.0), (1.210, 644.1), (1.211, 567.5), (1.212, 555.9), (1.213, 543.7), (1.214, 531.9), (1.215, 519.3), (1.216, 505.8), (1.217, 490.9), (1.218, 473.7), (1.219, 449.9), (1.220, 433.0), (1.221, 418.0), (1.222, 404.0), (1.223, 390.6), (1.224, 377.9), (1.225, 365.6), (1.226, 354.1), (1.227, 343.1), (1.229, 332.2), (1.229, 321.4), (1.231, 311.1), (1.232, 301.2), (1.233, 291.7), (1.233, 282.5), (1.235, 273.4), (1.236, 264.4), (1.237, 255.7), (1.238, 247.2), (1.239, 239.2), (1.240, 231.4), (1.241, 223.7), (1.242, 216.3), (1.243, 209.0), (1.245, 202.0), (1.245, 195.2), (1.247, 188.5), (1.248, 182.1), (1.249, 175.9), (1.250, 169.9), (1.251, 164.1), (1.252, 158.6), (1.253, 153.3), (1.254, 148.1), (1.255, 143.2), (1.256, 138.5), (1.257, 133.9), (1.258, 129.6), (1.260, 125.4), (1.260, 121.4), (1.262, 117.5), (1.263, 113.9), (1.264, 110.3), (1.265, 107.0), (1.266, 103.8), (1.267, 100.8), (1.268, 98.0), (1.269, 95.1), (1.270, 92.4), (1.272, 89.8), (1.273, 87.4), (1.274, 85.2), (1.276, 83.1), (1.277, 81.1), (1.279, 79.2), (1.280, 77.4), (1.281, 75.8), (1.282, 74.2), (1.283, 72.7), (1.284, 71.5), (1.285, 70.4), (1.286, 69.3), (1.288, 68.2), (1.289, 67.2), (1.290, 66.3), (1.291, 65.5), (1.293, 64.7), (1.294, 64.0), (1.296, 63.3), (1.297, 62.7), (1.298, 62.2), (1.299, 61.8), (1.300, 61.5), (1.302, 61.3), (1.303, 61.1), (1.308, 61.1), (1.308, 61.2), (1.310, 61.3), (1.311, 61.6), (1.312, 62.0), (1.313, 62.5), (1.315, 63.1), (1.315, 63.8), (1.318, 64.5), (1.319, 65.4), (1.320, 66.5), (1.322, 67.7), (1.323, 68.9), (1.324, 70.3), (1.325, 71.9), (1.326, 73.8), (1.328, 75.9), (1.329, 78.3), (1.330, 80.8), (1.332, 83.6), (1.334, 86.6), (1.335, 89.8), (1.336, 93.3), (1.338, 97.0), (1.339, 101.0), (1.340, 105.3), (1.341, 109.9), (1.343, 114.6), (1.344, 119.5), (1.345, 124.6), (1.346, 129.8), (1.348, 135.3), (1.348, 140.9), (1.350, 146.4), (1.351, 151.9), (1.353, 157.1), (1.354, 161.9), (1.355, 166.0), (1.356, 169.2), (1.358, 171.1), (1.359, 171.8), (1.360, 171.0), (1.362, 168.8), (1.363, 165.3), (1.364, 160.4), (1.365, 154.6), (1.367, 147.9), (1.368, 141.0), (1.369, 133.8), (1.371, 126.4), (1.372, 118.9), (1.373, 111.7), (1.375, 104.9), (1.376, 98.5), (1.377, 92.5), (1.378, 87.0), (1.380, 82.0), (1.381, 77.4), (1.382, 73.3), (1.384, 69.2), (1.385, 65.3), (1.386, 61.8), (1.388, 58.7), (1.389, 56.0), (1.390, 53.6), (1.392, 51.8), (1.393, 50.4), (1.394, 49.3), (1.396, 48.5), (1.397, 47.8), (1.399, 47.1), (1.400, 46.5), (1.401, 45.9), (1.403, 45.2), (1.404, 44.6), (1.406, 44.0), (1.406, 43.3), (1.408, 42.6), (1.409, 41.9), (1.411, 41.2), (1.412, 40.6), (1.418, 40.0), (1.419, 39.4), (1.420, 38.9), (1.422, 38.4), (1.423, 38.0), (1.425, 37.6), (1.426, 37.2), (1.429, 37.0), (1.430, 36.8), (1.432, 36.6), (1.433, 36.4), (1.435, 36.3), (1.436, 36.2), (1.438, 36.1), (1.439, 36.0), (1.442, 35.9), (1.443, 35.9), (1.449, 35.8), (1.450, 35.8), (1.456, 35.9), (1.458, 35.9), (1.459, 35.9), (1.460, 35.9), (1.462, 35.9), (1.464, 36.0), (1.465, 36.1), (1.467, 36.2), (1.469, 36.3), (1.469, 36.3), (1.471, 36.4), (1.474, 36.5), (1.475, 36.6), (1.477, 36.7), (1.478, 36.9), (1.480, 37.0), (1.481, 37.2), (1.483, 37.4), (1.484, 37.6), (1.486, 37.8), (1.488, 38.1), (1.492, 38.3), (1.494, 38.6), (1.496, 38.9), (1.497, 39.3), (1.498, 39.6), (1.500, 39.9), (1.502, 40.3), (1.504, 40.6), (1.505, 40.8), (1.506, 41.0), (1.508, 41.1), (1.510, 41.3), (1.511, 41.3), (1.513, 41.3), (1.514, 41.2), (1.516, 41.1), (1.517, 40.8), (1.519, 40.6), (1.520, 40.2), (1.522, 39.9), (1.524, 39.5), (1.526, 39.1), (1.527, 38.6), (1.529, 38.2), (1.530, 37.8), (1.532, 37.4), (1.536, 37.0), (1.538, 36.7), (1.540, 36.4), (1.542, 36.2), (1.544, 36.0), (1.547, 35.8), (1.550, 35.7), (1.551, 35.6), (1.553, 35.5), (1.555, 35.5), (1.557, 35.4), (1.558, 35.4), (1.560, 35.3), (1.561, 35.3), (1.564, 35.3), (1.565, 35.3), (1.567, 35.2), (1.570, 35.2), (1.572, 35.3), (1.574, 35.3), (1.577, 35.3), (1.578, 35.3), (1.581, 35.3), (1.584, 35.3), (1.590, 35.3), (1.591, 35.3), (1.593, 35.3), (1.594, 35.3), (1.596, 35.3), (1.598, 35.3), (1.600, 35.3), (1.601, 35.3), (1.603, 35.3), (1.605, 35.3), (1.607, 35.3), (1.608, 35.4), (1.610, 35.4), (1.614, 35.5), (1.616, 35.5), (1.619, 35.5), (1.621, 35.6), (1.623, 35.7), (1.626, 35.7), (1.628, 35.8), (1.630, 35.8), (1.632, 35.9), (1.635, 35.9), (1.636, 35.9), (1.638, 36.0), (1.640, 36.0), (1.642, 36.0), (1.643, 36.1), (1.645, 36.1), (1.647, 36.0), (1.649, 36.0), (1.651, 36.0), (1.653, 36.0), (1.654, 36.0), (1.657, 36.0), (1.662, 35.9), (1.664, 35.9), (1.666, 35.8), (1.668, 35.8), (1.669, 35.7), (1.672, 35.7), (1.673, 35.7), (1.676, 35.8), (1.678, 35.7), (1.680, 35.7), (1.683, 35.8), (1.686, 35.8), (1.688, 35.8), (1.690, 35.8), (1.691, 35.8), (1.706, 35.8), (1.707, 35.9), (1.709, 36.0), (1.711, 36.0), (1.714, 36.0), (1.715, 36.0), (1.718, 36.0), (1.719, 36.0), (1.722, 36.0), (1.723, 36.1), (1.727, 36.1), (1.730, 36.2), (1.732, 36.2), (1.734, 36.2), (1.737, 36.2), (1.738, 36.2), (1.740, 36.2), (1.742, 36.2), (1.745, 36.2), (1.747, 36.2), (1.749, 36.3), (1.751, 36.2), (1.753, 36.2), (1.755, 36.1), (1.758, 36.1), (1.762, 36.1), (1.764, 36.1), (1.766, 36.1), (1.767, 36.1), (1.770, 36.1), (1.775, 36.1), (1.777, 36.1), (1.779, 36.0), (1.781, 36.1), (1.783, 36.1), (1.786, 36.1), (1.788, 36.1), (1.791, 36.1), (1.792, 36.0), (1.795, 36.0), (1.797, 35.9), (1.799, 35.9), (1.801, 35.9), (1.803, 35.9), (1.805, 35.9), (1.808, 35.9), (1.812, 35.9), (1.814, 35.9), (1.817, 35.9), (1.819, 35.9), (1.822, 35.9), (1.826, 36.0), (1.828, 36.0), (1.831, 36.0), (1.832, 35.9), (1.835, 35.9), (1.837, 35.9), (1.840, 35.9), (1.843, 35.9), (1.844, 35.9), (1.847, 35.9), (1.849, 35.9), (1.852, 35.9), (1.859, 35.9), (1.862, 35.8), (1.864, 35.8), (1.867, 35.9), (1.868, 35.9), (1.871, 35.9), (1.873, 35.9), (1.876, 35.8), (1.878, 35.8), (1.883, 35.9), (1.886, 35.9), (1.888, 35.9), (1.891, 35.9), (1.893, 36.0), (1.896, 36.0), (1.898, 36.0), (1.905, 36.0), (1.908, 36.1), (1.911, 36.1), (1.914, 36.1), (1.915, 36.2), (1.918, 36.2), (1.921, 36.2), (1.924, 36.3), (1.926, 36.3), (1.928, 36.3), (1.934, 36.4), (1.936, 36.5), (1.939, 36.6), (1.941, 36.6), (1.944, 36.7), (1.946, 36.8), (1.950, 36.9), (1.951, 37.0), (1.954, 37.1), (1.957, 37.2), (1.960, 37.4), (1.962, 37.5), (1.968, 37.6), (1.971, 37.7), (1.974, 37.9), (1.976, 38.0), (1.979, 38.2), (1.981, 38.3), (1.984, 38.4), (1.990, 38.5), (1.992, 38.5), (1.995, 38.5), (1.998, 38.5), (2.003, 38.4), (2.006, 38.3), (2.009, 38.2), (2.012, 38.0), (2.015, 37.9), (2.018, 37.7), (2.020, 37.5), (2.023, 37.3), (2.026, 37.2), (2.029, 37.1), (2.031, 36.9), (2.034, 36.8), (2.038, 36.8), (2.040, 36.7), (2.044, 36.7), (2.046, 36.7), (2.049, 36.7), (2.052, 36.7), (2.055, 36.7), (2.058, 36.7), (2.063, 36.7), (2.067, 36.8), (2.069, 36.8), (2.073, 36.8), (2.075, 36.8), (2.078, 36.9), (2.081, 36.9), (2.085, 36.9), (2.087, 37.0), (2.090, 37.0), (2.093, 37.1), (2.097, 37.2), (2.102, 37.2), (2.105, 37.3), (2.109, 37.3), (2.112, 37.4), (2.115, 37.4), (2.119, 37.4), (2.121, 37.5), (2.125, 37.5), (2.127, 37.5), (2.131, 37.5), (2.133, 37.5), (2.137, 37.6), (2.140, 37.6), (2.144, 37.7), (2.145, 37.7), (2.152, 37.8), (2.156, 37.8), (2.159, 37.8), (2.163, 37.9), (2.172, 37.9), (2.176, 38.0), (2.178, 38.0), (2.182, 38.1), (2.185, 38.1), (2.189, 38.1), (2.191, 38.2), (2.195, 38.2), (2.202, 38.3), (2.206, 38.4), (2.212, 38.5), (2.215, 38.5), (2.219, 38.6), (2.222, 38.7), (2.226, 38.7), (2.228, 38.7), (2.233, 38.8), (2.236, 38.9), (2.240, 39.0), (2.242, 39.1), (2.249, 39.2), (2.256, 39.2), (2.260, 39.4), (2.263, 39.4), (2.268, 39.5), (2.271, 39.6), (2.277, 39.7), (2.282, 39.8), (2.286, 39.9), (2.289, 40.0), (2.304, 40.2), (2.308, 40.3), (2.310, 40.5), (2.315, 40.6), (2.330, 40.7), (2.333, 40.8), (2.337, 40.9), (2.341, 41.1), (2.345, 41.2), (2.348, 41.2), (2.352, 41.3), (2.356, 41.5), (2.360, 41.6), (2.363, 41.6), (2.367, 41.7), (2.371, 41.7), (2.376, 41.8), (2.380, 41.9), (2.383, 42.1), (2.387, 42.1), (2.391, 42.2), (2.396, 42.3), (2.399, 42.4), (2.404, 42.4), (2.407, 42.4), (2.412, 42.5), (2.415, 42.7), (2.420, 42.8), (2.422, 42.9), (2.427, 42.9), (2.431, 43.0), (2.436, 43.2), (2.439, 43.3), (2.444, 43.4), (2.456, 43.6), (2.464, 43.7), (2.469, 43.9), (2.473, 44.1), (2.483, 44.2), (2.485, 44.4), (2.491, 44.6), (2.494, 44.7), (2.500, 44.9), (2.502, 45.1), (2.508, 45.2), (2.512, 45.3), (2.517, 45.4), (2.521, 45.6), (2.525, 45.8), (2.529, 45.9), (2.534, 46.0), (2.538, 46.1), (2.544, 46.3), (2.552, 46.5), (2.556, 46.7), (2.561, 46.9), (2.564, 47.1), (2.570, 47.3), (2.574, 47.5), (2.579, 47.7), (2.588, 47.9), (2.593, 48.1), (2.597, 48.4), (2.603, 48.6), (2.607, 48.8), (2.613, 49.0), (2.622, 49.1), (2.626, 49.3), (2.632, 49.5), (2.635, 49.7), (2.640, 49.9), (2.645, 50.1), (2.651, 50.2), (2.654, 50.3), (2.659, 50.5), (2.664, 50.6), (2.670, 50.7), (2.674, 50.9), (2.680, 51.1), (2.683, 51.3), (2.689, 51.5), (2.694, 51.8), (2.700, 52.0), (2.703, 52.3), (2.715, 52.6), (2.720, 53.0), (2.726, 53.3), (2.736, 53.6), (2.740, 53.9), (2.747, 54.2), (2.751, 54.4), (2.756, 54.6), (2.761, 54.8), (2.767, 55.0), (2.772, 55.1), (2.778, 55.3), (2.782, 55.5), (2.788, 55.7), (2.793, 55.8), (2.800, 56.0), (2.803, 56.1), (2.809, 56.3), (2.814, 56.5), (2.821, 56.7), (2.826, 57.0), (2.831, 57.2), (2.843, 57.4), (2.850, 57.7), (2.855, 57.9), (2.862, 58.2), (2.865, 58.6), (2.872, 58.8), (2.877, 59.1), (2.884, 59.3), (2.887, 59.6), (2.894, 59.8), (2.900, 60.0), (2.907, 60.3), (2.910, 60.6), (2.917, 60.8), (2.923, 61.0), (2.930, 61.3), (2.935, 61.5), (2.942, 61.8), (2.946, 62.1), (2.953, 62.4), (2.959, 62.8), (2.966, 63.1), (2.977, 63.4), (2.983, 63.6), (2.998, 64.0), (3.002, 64.4), (3.009, 64.7), (3.022, 65.0), (3.047, 65.3), (3.061, 65.6), (3.065, 65.9), (3.073, 66.1), (3.079, 66.4), (3.087, 66.8), (3.091, 67.1), (3.099, 67.5), (3.105, 67.7), (3.113, 68.1), (3.125, 68.5), (3.131, 68.7), (3.139, 68.9), (3.146, 69.1), (3.154, 69.4), (3.162, 69.6), (3.166, 69.9), (3.175, 70.1), (3.181, 70.2), (3.190, 70.4), (3.194, 70.5), (3.202, 70.6), (3.209, 70.8), (3.218, 70.9), (3.222, 71.1), (3.231, 71.4), (3.237, 71.5), (3.246, 71.6), (3.253, 71.7), (3.261, 71.8), (3.266, 72.0), (3.275, 72.2), (3.282, 72.4), (3.291, 72.6), (3.295, 72.9), (3.304, 73.4), (3.311, 73.8), (3.320, 74.1), (3.325, 74.6), (3.343, 75.3), (3.350, 76.0), (3.360, 76.6), (3.367, 77.2), (3.374, 77.7), (3.381, 78.3), (3.391, 78.8), (3.398, 79.2), (3.408, 79.6), (3.413, 79.8), (3.422, 80.1), (3.430, 80.5), (3.440, 80.8), (3.445, 81.0), (3.454, 81.4), (3.462, 81.9), (3.472, 82.3), (3.480, 82.6), (3.487, 83.0), (3.495, 83.4), (3.505, 83.8), (3.513, 84.2), (3.523, 84.6), (3.528, 85.0), (3.539, 85.3), (3.547, 85.7), (3.557, 86.1), (3.568, 86.5), (3.573, 87.0), (3.584, 87.5), (3.592, 87.9), (3.603, 88.3), (3.608, 88.8), (3.619, 89.5), (3.627, 90.1), (3.638, 90.7), (3.647, 91.3), (3.658, 91.8), (3.663, 92.2), (3.675, 92.5), (3.683, 92.9), (3.695, 93.4), (3.700, 94.1), (3.712, 94.9), (3.720, 95.4), (3.732, 95.7), (3.738, 96.1), (3.750, 96.5), (3.758, 97.1), (3.770, 97.9), (3.779, 98.7), (3.791, 99.5), (3.800, 100.2), (3.809, 100.9), (3.821, 101.6), (3.831, 102.2), (3.843, 102.9), (3.849, 103.7), (3.862, 104.2), (3.871, 104.5), (3.884, 104.8), (3.890, 105.1), (3.902, 105.3), (3.912, 105.3), (3.925, 105.4), (3.935, 105.4), (3.944, 105.5), (3.954, 105.6), (3.967, 106.1), (3.977, 106.9), (3.990, 107.6), (3.997, 108.1), (4.010, 108.5), (4.021, 109.0), (4.034, 109.6), (4.041, 110.3), (4.055, 111.2), (4.068, 112.2), (4.079, 113.1), (4.093, 113.9), (4.100, 114.5), (4.114, 114.8), (4.125, 115.4), (4.139, 116.2), (4.150, 116.8), (4.164, 117.3), (4.171, 117.8), (4.186, 118.3), (4.197, 118.9), (4.212, 119.5), (4.219, 120.1), (4.234, 120.6), (4.245, 121.3), (4.260, 122.1), (4.268, 122.9), (4.283, 123.7), (4.295, 124.9), (4.310, 126.5), (4.322, 127.5), (4.338, 127.9), (4.346, 128.3), (4.361, 128.6), (4.377, 129.7), (4.389, 130.2), (4.406, 130.0), (4.414, 130.3), (4.430, 131.6), (4.459, 132.2), (4.467, 132.8), (4.484, 133.3), (4.497, 134.6), (4.514, 136.5), (4.539, 138.3), (4.552, 140.1), (4.570, 141.0), (4.583, 142.2), (4.600, 143.3), (4.609, 143.2), (4.627, 142.3), ] X, Y = zip(*_dat) a_func_ = lambda x: np.interp(x, np.asarray(X), np.asarray(Y) * 1e-9 / (20*0.43)) return a_func_ if __name__ == "__main__": w = np.linspace(0.6, 5.0, 1000) a_ = define_alpha_fun_NLPM750() b_ = define_beta_fun_NLPM750() for wi in w: print(wi, a_(wi), b_(wi))Save GSD trajectory Dumping dataHOOMD can write system configuration data to files as the simulation progresses. You can then used the saved data to visualize the system's evolution, perform off-line analysis, or as input to a follow-up simulation. The GSD file format[GSD](https://github.com/glotzerlab/gsd) is a binary file format designed specifically for HOOMD. It supports all of HOOMD's particle properties, is easy to configure, and fast to read and write. It is trivial to use in python analysis tools, and easy to integrate with any language that can link C/C++ (only 1k lines of vanilla C code). Write GSD files from HOOMDStart with the Lennard-Jones tutorial script.hoomd.context.initialize('--mode=cpu'); system = hoomd.init.create_lattice(unitcell=hoomd.lattice.sc(a=2.0), n=5); nl = hoomd.md.nlist.cell(); lj = hoomd.md.pair.lj(r_cut=3.0, nlist=nl); lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0); all = hoomd.group.all(); hoomd.md.integrate.mode_standard(dt=0.001); hoomd.md.integrate.langevin(group=all, kT=1.0, seed=987);HOOMD-blue v2.1.5 CUDA (7.5) DOUBLE HPMC_MIXED MPI SSE SSE2 SSE3 SSE4_1 SSE4_2 AVX Compiled: 03/12/2017 Copyright 2009-2016 The Regents of the University of Michigan. ----- You are using HOOMD-blue. Please cite the following: * , , and . "General purpose molecular dynamics simulations fully implemented on graphics processing units", Journal of Computational Physics 227 (2008) 5342--5359 * , , , , , , , and . "Strong scaling of general-purpose molecular dynamics simulations on GPUs", Computer Physics Communications 192 (2015) 97--107 ----- HOOMD-blue is running on the CPU notice(2): Group "all" created containing 125 particles notice(2): integrate.langevin/bd is using specified gamma valuesOne additional command activates GSD file writes every period steps.d = hoomd.dump.gsd("trajectory.gsd", period=2e3, group=all, overwrite=True);Like all HOOMD commands, they take effect in subsequent runs:hoomd.run(10000, quiet=True);notice(2): -- Neighborlist exclusion statistics -- : notice(2): Particles with 0 exclusions : 125 notice(2): Neighbors included by diameter : no notice(2): Neighbors excluded when in the same body: noThis run produced the file `trajectory.gsd`:!ls -l trajectory.gsd-rw-rw---- 1 joaander glotzer 20227 Apr 3 20:08 trajectory.gsdRead a GSD file[Full GSD tutorials are are available in the gsd docs](http://gsd.readthedocs.io/en/latest/). Here, let's just open the gsd file and verify that it has the expected number of frames and particles. We will do this with the `gsd` python module.import gsd import gsd.hoomd t = gsd.hoomd.open('trajectory.gsd', 'rb'); len(t) t[4].particles.N t[4].configuration.step t[4].configuration.box t[4].particles.position[0:10,:]And here is the velocity of particle 0 at every frame in the file:for f in t: print(f.particles.velocity[0,:])[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]Static and dynamic quantitiesTo save on file size, GSD does not store all particle quantities on every time step by default. By default, only particle position and orientation are written every step - and orientation is only written if it changes. This is why the previous example recorded a velocity of 0 in every frame.Take `momentum` out of the static properties list in `dump.gsd` to store velocity in every frame output.d = hoomd.dump.gsd("trajectory2.gsd", static=['attribute', 'topology'], period=2e3, group=all, overwrite=True); hoomd.run(10000, quiet=True);Now the velocities are saved in the file.t = gsd.hoomd.open('trajectory2.gsd', 'rb'); for f in t: print(f.particles.velocity[0,:])[-1.67411315 -0.57448936 -0.79786533] [ 0.13245943 0.33422855 0.79139143] [-0.3298862 0.22901018 -1.15823472] [-0.00869042 -0.97660118 -2.23960042] [-0.79439515 1.21248317 0.30553663]For a full list of all the property groups you can specify in `static`, see the dump.gsd documentation - even the particle types, masses, charges, bond connectivity, and the number of particles/bonds/etc... can change from frame to frame.The more properties you save on every frame, the larger the file size will be. Write a subset of the system`dump.gsd` allows you to write a file that only contains a subset of the entire system.Here is a group that contains the first 10 particles in the simulation passed to the `group` argument of `dump.gsd` file as an example.first10 = hoomd.group.tag_list(name='first10', tags=[0,1,2,3,4,5,6,7,8,9]); d = hoomd.dump.gsd("trajectory3.gsd", static=['attribute', 'topology'], period=2e3, group=first10, overwrite=True); hoomd.run(10000, quiet=True); t = gsd.hoomd.open('trajectory3.gsd', 'rb');As requested, this GSD file contains only 10 particles.t[4].particles.NWrite a single frame GSD fileYou can write a single frame GSD file with a single line. For example, use this to write the final system state at the end of a run.hoomd.dump.gsd("final-frame.gsd", group=hoomd.group.all(), overwrite=True, period=None);Escalar las variablesfrom sklearn.preprocessing import StandardScaler scaler = StandardScaler() df_nutrientes = scaler.fit_transform(numericas)Funciones del examen anterior que calculan factorización QRimport numpy as np import copy import codecs import sys from scipy.linalg import solve_triangular def busca_ayuda(cadena): """ Función que devuelve el texto de ayuda correspondiente a la función que se pase como parámetro, obtenida de un unico archivo .txt donde están documentadas las "ayudas" de todas las funciones asociadas a la factorización QR (contenidas en este .py) params: cadena nombre de la función de la que se desea obtener ayuda return: help ayuda de la función buscada en forma de texto """ l=len(cadena) help=codecs.open('Help_funciones_factorizacion_QR.txt', "r", "utf-8").read() p_inicio=help.find("****i****" + cadena) p_final=help.find("****f****" + cadena) help=help[(p_inicio+9+l):p_final] return help def crear_matriz_aleatoria(renglones,columnas,maximo_valor,minimo_valor,entero=False): """ Función de apoyo para genear matrices aleatorias params: renglones no. de renglones de la matriz columnas no. de columnas de la matriz maximo_valor valor máximo de las entradas de la matriz minimo_valor valor mínimo de las entradas de la matriz entero Indica si las entradas serán enteras (True) o no return: M Matriz con numeros al azar """ #Se checa que los parámetros sean congruentes con la funcionalidad if isinstance(renglones, int)==False or isinstance(columnas, int)==False: sys.exit('Los parámetros de renglones y columnas deben ser enteros positivos') elif renglones<=0 or columnas<=0: sys.exit('Los parámetros de renglones y columnas deben ser enteros positivos') if isinstance(maximo_valor, (int, float))==False or isinstance(minimo_valor, (int, float))==False: sys.exit('Los parámetros maximo_valor y minimo_valor deben ser numericos') #Se inicializa una matriz llena de ceros con las dimensiones deseadas (mxn) M=np.zeros((renglones, columnas)) for i in range(renglones): for j in range(columnas): #Si entero es verdadero se obtiene el maximo entero menor o igual a 1 (//1) if entero: M[i][j]=(np.random.rand(1)*(maximo_valor+1-minimo_valor)+minimo_valor)//1 else: M[i][j]=np.random.rand(1)*(maximo_valor-minimo_valor)+minimo_valor return M #crear_matriz_aleatoria.__doc__ =busca_ayuda("crear_matriz_aleatoria") def house(x): """ Función que calcula vector de householder params: x vector al que se le hará la reflexión householder return: Beta factor para obtener matriz de reflexión householder Rf v vector de householder """ #Se checa que los parámetros sean congruentes con la funcionalidad if type(x) is not np.ndarray: sys.exit('x debe ser de tipo numpy.ndarray') #longitud del vector x=(x_0,x_1,x_2,...,x_(m-1)) m=len(x) norm_2_m=x[1:m].dot(np.transpose(x[1:m])) #Se hace v=x=(1,x_1,x_2,...,x_(m-1)) v=np.concatenate((1,x[1:m]), axis=None) Beta=0 #con las siguientes condiciones se checa si x es múltiplo del vector canónico e_1 #y el signo de x[0] if (norm_2_m==0 and x[0]>=0): Beta=0 elif (norm_2_m==0 and x[0]<0): Beta=2 else: norm_x=np.sqrt(pow(x[0],2)+norm_2_m) if (x[0]<=0): v[0]=x[0]-norm_x else: v[0]=-norm_2_m/(x[0]+norm_x) Beta=2*pow(v[0],2)/(norm_2_m+pow(v[0],2)) v=v/v[0] return Beta, v #house.__doc__ =busca_ayuda("house") def matriz_auxiliar_Arv(A,ind_singular=False): """ Función que genera una matriz que contiene los elementos r distintos de cero de la matriz R y las entradas de los vectores householder v (excepto la primera), con los cuales se puede calcular la matriz Q. Ambas matrices componentes de la factorización QR params: A Matriz (mxn) de la que se desea obtner factorización QR return: Arv Matriz (mxn) que incluye las componentes distintas de cero de la matriz R y los vectores householder con los que se puede obtener la matriz Q, y con ello la factorización QR """ #Se checa que los parámetros sean congruentes con la funcionalidad if type(A) is not np.ndarray: sys.exit('A debe ser de tipo numpy.ndarray') m,n=A.shape if m-10**(-14): return None Arv[(j+1):m,j]=v[1:(m-j)] return Arv #matriz_auxiliar_Arv.__doc__ =busca_ayuda("matriz_auxiliar_Arv") def matriz_Q_R(A,ind_singular=False): """ Función que devuelve la matriz R y Q de la factorización QR de una matriz A params: A Matriz (mxn) return: Q Matriz Q (mxm) de la factorización A=QR R Matriz Q (mxm) de la factorización A=QR """ #Se checa que los parámetros sean congruentes con la funcionalidad if type(A) is not np.ndarray: sys.exit('A debe ser de tipo numpy.ndarray') elif A.shape[0]j or j>Arv.shape[1]: sys.exit('El parámetro j debe estar en el rango [1,no. columnas de Arv]') m,n=Arv.shape Qj=np.eye(m) #Para construir Q_j requerimos usar el vector v contenido en Arv contenido #en la j-1 columna (considerando que en Python la primer columna es la cero) v=np.concatenate((1,Arv[j:m,(j-1)]), axis=None) beta=2/(1+Arv[j:m,(j-1)].dot(Arv[j:m,(j-1)])) Qj[(j-1):m,(j-1):m]=np.eye(m-(j-1))-beta*np.outer(v,v) return Qj #Q_j.__doc__ =busca_ayuda("Q_j") def Solucion_SEL_QR_nxn(A,b): """ Función que obtiene la solución de un sistema de ecuaciones lineales (SEL) con n ecuaciones y n incognitas params: A Matriz (nxn) que representa los coeficientas de las ecuaciones b vector (nx1) constantes del sistema return: x vector que satisface (Ax=b) """ #Se checa que los parámetros sean congruentes con la funcionalidad if type(A) is not np.ndarray or type(b) is not np.ndarray: #esto implica que A y b tienen más de 1 elemento sys.exit('A y b deben ser de tipo numpy.ndarray') m,n = A.shape if mFuncion creada para este proyecto final Función que ocupa algoritm QR para calcular los eigenvectores de una matrizdef eigenvectores_eigenvalores_QR_vf(data,niter,tolerancia = 10**-8): """ Función para obtener los eigenvectores y eigenvalores de una matriz cualquiera params: data matriz de datos niter: número de iteraciones máximas return: eigenvalores eigenvalores Depende de las funciones ue calculan la factorización QR """ # convertir a array A = np.array(data) # La matriz de covarianza ya es simetrica, no necesitamos rellenar columnas de ceros # Completamos la matriz A con columnas de ceros para que sea cuadrada n, p = A.shape columnas = n - p ceros = np.zeros((n,columnas)) # Matriz inicial A0 = np.append(A, ceros, axis = 1) # Factorización inicial Qk,Rk =matriz_Q_R(A0) # Hacer una copia de la matriz Q inicial Q = Qk.copy() valor = 10 #iniciamos ciclo for i in range(niter): #Definimos la nueva Ak+1 Ak = Rk@Qk #calculamos la factorización QR sobre la nueva Ak Qk,Rk =matriz_Q_R(Ak) # Actualizar la matriz Q Q = Q@Qk # Se usan los valores distintos de la diagonal para corroborar la tolerancia deseada valor = np.sum(np.abs(Qk)) - np.trace(np.abs(Qk)) # Detener si se logra la tolerancia deseada # Notar que en esta primera prueba no se está actualizando el valor que checa la tolerancia if (valorProbar la función Crear una matriz de covarianzasmean_vec = np.mean(df_nutrientes, axis=0) # Matriz de covarianzas C = (df_nutrientes - mean_vec).T.dot((df_nutrientes - mean_vec)) / (df_nutrientes.shape[0]-1) C e, q = eigenvectores_eigenvalores_QR_vf(C,10) print(e) print('--------------------------------------------------------------------------') print(q)[5.44991441e+00 2.61797014e+00 2.02972424e+00 1.65005670e+00 1.86641120e+00 1.13445534e+00 1.06408339e+00 8.68331083e-01 8.60156528e-01 8.86362763e-01 7.33685165e-01 5.67458181e-01 5.00363037e-01 5.02198917e-01 3.33008339e-01 4.08115249e-01 3.32553367e-01 2.66385369e-01 3.18388529e-01 2.38233674e-01 2.11195400e-01 1.59822432e-01 3.79568767e-03] -------------------------------------------------------------------------- [[ 1.57990336e-01 -2.84447153e-01 4.73895575e-01 -2.42831806e-01 1.25131302e-01 1.01450256e-01 -8.80435214e-02 -6.60609299e-02 -9.11277861e-02 1.30668059e-01 -1.43251953e-01 -1.13459083e-01 -6.27872311e-02 3.79243323e-02 1.08875745e-01 3.05480334e-02 7.12927356e-02 -6.96291630e-02 -5.29083030e-03 8.49527843e-02 -1.30373514e-01 -4.13090282e-02 6.78011231e-01] [ 1.40443482e-01 3.43892880e-01 2.24706618e-01 -5.79863640e-02 -2.96084603e-01 1.42178628e-01 -5.81675610e-02 6.63767504e-02 -2.01389902e-01 4.66520097e-02 -2.77276177e-01 -2.9117104[...]Usar numpy para cacular los valores y eigen vectores de una matriz de covarianzaseigenvalores, eigenvectotes= np.linalg.eig(C) print(eigenvalores) print("----------------------------------------------") print(eigenvectotes) def PCA_from_QR_vf(data,niter): """ Función para PCA a partir de los eigenvectores params: data: matriz de datos niter: número de iteraciones máximas return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas) Z Los datos transformados (componentes principales) varianza_explicada La varianza explicada por cada componente principal Depende de la función: eigenvectores_QR """ # convertir a array A = np.array(data) # Centrar los datos mean_vec = np.mean(A, axis=0) datos_centrados = (A - mean_vec) # Matriz de Covarianzas #C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1) C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1) # Calcular algoritmo QR E, Q = eigenvectores_eigenvalores_QR_vf(C,niter) # Los componentes (coeficientes) componentes = Q.T # Los datos transformados (componentes principales) # Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array Z = datos_centrados@Q # La varianza explicada varianza_explicada = E/np.sum(E) # Calcula número de componentes de manera automatica de acuerdo a la variana explicada # Threshold de 60% n = data.shape[1] #numero de columnas varianza_acumulada = varianza_explicada.cumsum() conteo = (varianza_acumulada) < 0.6 num_componentes = conteo.sum() + 1 # regresar 4 objetos return E[:num_componentes], componentes[:num_componentes].T, Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes #return E, componentes, Z, varianza_explicada, varianza_acumulada, num_componentesProbar funcióneigenvalores, coeficientes, Z, varianza_explicada = PCA_from_QR_vf(df_nutrientes,380) eigenvalores coeficientes Z varianza_explicadaPCA de scikit-learnfrom sklearn.decomposition import PCA pca = PCA(n_components=6,svd_solver='full') pca.fit(df_nutrientes) print(pca.components_) print(pca.explained_variance_ratio_) pca.singular_values_ z = pca.transform(df_nutrientes) print(z)[[-1.12177585 -1.18225141 -3.66193973 1.08091972 -3.08416096 0.83802181] [-1.11468691 -1.18417302 -3.66232928 1.09073824 -3.07743037 0.83709805] [-0.99491941 -1.57357953 -4.69772411 1.31731134 -3.912684 0.94020808] ... [-0.7676707 -3.26765632 0.98520556 1.93561005 -0.17248306 -2.30534729] [ 0.35589709 0.67843536 -1.00293556 -0.66588834 1.81479136 1.04864227] [-0.8668898 1.19845904 0.19348689 -0.47633782 0.60088129 0.03119761]]Unified Dataset Class for ARCH Datasetarch_dataset_raw_train = ArchCaptionsDatasetRaw(data_root='../datasets/ARCH', source="both", split="train") len(arch_dataset_raw_train.instances)Unified Dataset Class + augmentations and collate functionimport random from typing import Callable, Dict, List import albumentations as alb import numpy as np import torch from torch.utils.data import Dataset from virtex.data.tokenizers import SentencePieceBPETokenizer from virtex.data import transforms as T from virtex.data.datasets.arch_captions import ArchCaptionsDatasetRaw class ArchCaptioningDatasetExtended(Dataset): r""" A dataset which provides image-caption (forward and backward) pairs from a ARCH Captions annotation file. This is used for pretraining tasks which use captions - bicaptioning, forward captioning and token classification. Args: data_root: Path to dataset directory containing images and annotations. source: Name of ARCH source to read. One of ``{"pubmed", "books", "both"}``. "both" option results in a concatenation of the datasets from "pubmed" and "books" split: Name of ARCH split to read. One of ``{"train", "val", "all"}``. tokenizer: Tokenizer which maps word tokens to their integer IDs. image_transform: List of image transformations, from either `albumentations `_ or :mod:`virtex.data.transforms`. max_caption_length: Maximum number of tokens to keep in caption tokens. Extra tokens will be trimmed from the right end of the token list. """ def __init__( self, data_root: str, split: str, tokenizer: SentencePieceBPETokenizer, source: str = "both", image_transform: Callable = T.ARCH_DEFAULT_IMAGE_TRANSFORM, tensor_flip_transform: Callable = None, max_caption_length: int = 30, ): self._dset = ArchCaptionsDatasetRaw(data_root=data_root, source=source, split=split) self.image_transform = image_transform self.tensor_flip_transform = tensor_flip_transform self.caption_transform = alb.Compose( [ T.NormalizeCaption(), T.TokenizeCaption(tokenizer), T.TruncateCaptionTokens(max_caption_length), ] ) self.padding_idx = tokenizer.token_to_id("") def __len__(self): return len(self._dset) def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: # keys: {"image_ids", "images", "caption"} instance = self._dset[idx] image_ids, images, caption = ( instance["image_ids"], instance["images"], instance["caption"], ) # # debugging # print("Checkpoint 1") # print("Shapes before applying self.image_transform", [image.shape for image in images]) # List[int] -> np.array of shape (len(image_ids), ) image_ids = np.array(image_ids) # (len(image_ids), ) -> (len(image_ids), 1) image_ids = image_ids.reshape((image_ids.shape[0], 1)) # # debugging # print("Checkpoint 2") # Transform images, no flips at this stage not to create multiple versions of the caption! # Before flipping all images need to be resized to the same size to put them into a tensor. # Caption won't be tokenized/processed here. # Albumentations transforms require named arguments - can't avoid it. images = [self.image_transform(image=image)["image"] for image in images] # print("Shapes after applying self.image_transform", [image.shape for image in images]) # # # debugging # print("Checkpoint 3") # Convert each image from HWC to CHW format and convert to tensors: # PyTorch Transforms expect to receive tensors in (B, C, H, W) shape # [(Channel, Height, Width), ..., ] Bag Size times images = [np.transpose(image, (2, 0, 1)) for image in images] images = [torch.tensor(image, dtype=torch.float) for image in images] # # # debugging # print("Checkpoint 4") # stack all the images into a tensor: (bag_size=batch_size, Channel, Height, Width) images = torch.stack(images, dim=0) if self.tensor_flip_transform is not None: # perform tensor transforms on images in the tensor and the # corresponding caption, e.g. random horizontal flips # Reason: single version of the caption should appear => random flip # should be performed on all images in a bag images_caption = self.tensor_flip_transform(image=images, caption=caption) images, caption = images_caption["image"], images_caption["caption"] # print(images) # print(caption) # # # debugging # print("Checkpoint 5") # caption tokens caption_tokens = self.caption_transform(caption=caption)["caption"] # # # debugging # print("Checkpoint 6") return { "image_ids": torch.tensor(image_ids, dtype=torch.long), #(bag_size,1) "images": images, "caption_tokens": torch.tensor(caption_tokens, dtype=torch.long), "noitpac_tokens": torch.tensor(caption_tokens, dtype=torch.long).flip(0), "caption_lengths": torch.tensor(len(caption_tokens), dtype=torch.long), } def collate_fn( self, data: List[Dict[str, torch.Tensor]] ) -> Dict[str, torch.Tensor]: # Pad `caption_tokens` and `masked_labels` up to this length. caption_tokens = torch.nn.utils.rnn.pad_sequence( [d["caption_tokens"] for d in data], batch_first=True, padding_value=self.padding_idx, ) noitpac_tokens = torch.nn.utils.rnn.pad_sequence( [d["noitpac_tokens"] for d in data], batch_first=True, padding_value=self.padding_idx, ) return { "image_id": torch.stack([d["image_ids"] for d in data], dim=0), "image": torch.stack([d["images"] for d in data], dim=0), "caption_tokens": caption_tokens, "noitpac_tokens": noitpac_tokens, "caption_lengths": torch.stack( [d["caption_lengths"] for d in data]), } # check the default transform T.DEFAULT_IMAGE_TRANSFORM T.ARCH_DEFAULT_IMAGE_TRANSFORM help(T.DEFAULT_FLIP_TRANSFORM) #help(SentencePieceBPETokenizer) arch_tokenizer = SentencePieceBPETokenizer("../datasets/vocab/arch_10k.model") arch_train_dataset_extended = ArchCaptioningDatasetExtended(data_root='../datasets/ARCH', split="train", tokenizer=arch_tokenizer, tensor_flip_transform=None) len(arch_train_dataset_extended) sample_item = arch_train_dataset_extended.__getitem__(0) sample_item sample_item['images'].size() sample_tensor_image = sample_item['images'][0] sample_tensor_image plt.imshow(sample_tensor_image.to(int).view(sample_tensor_image.shape[1], sample_tensor_image.shape[2], sample_tensor_image.shape[0]))Test how the dataloader worksThis will help to debug the `collate_fn`from torch.utils.data import DataLoader train_dataloader = DataLoader( dataset=arch_train_dataset_extended, batch_size=2, shuffle=True, drop_last=True, collate_fn=arch_train_dataset_extended.collate_fn, ) len(train_dataloader) for batch in train_dataloader: print("image_id", batch["image_id"]) print("image shape", batch["image"].shape) print("caption_tokens:", batch["caption_tokens"]) print("noitpac_tokens:", batch["noitpac_tokens"]) print("caption_lengths:", batch["caption_lengths"]) break batch = next(iter(train_dataloader)) batch batch['image'].shapeExamples of TensorHorizontalFlipflip = T.TensorHorizontalFlip(p=0.5) img_tensor = torch.Tensor([[[1, 1, 0, 0] for _ in range(4)], [[0, 0, 1, 1] for _ in range(4)]]) print(img_tensor.shape) img_tensor image_caption = flip(image=img_tensor, caption="1-s are to the left on the " "first image and to the right " "on the second") print(image_caption['image']) print(image_caption['caption'])Checking if the horizontal flip can be applied to different images separately in torchvisionimport torchvision from PIL import Image test_transform = torchvision.transforms.Compose( [torchvision.transforms.ToPILImage(), torchvision.transforms.RandomHorizontalFlip(p=0.5), torchvision.transforms.ToTensor()] ) fixed_test_transform = test_transform test_list = [np.array([1, 1, 0, 0], dtype=np.float32).reshape((1, 4)) for _ in range(20)] test_list [fixed_test_transform(l) for l in test_list]**Task discription:**---Set-up a rough framework which simulates the reflectionsof 2 (imaginative) forest types at 4 points within the vegetation period(winter, spring, summer, autumn) and for 2 conditions: undisturbed anddisturbed.--- **Task preparation:**---Prepared input (refer from): - The PROSAIL model (Python-implementations): [link](https://github.com/robintw/ProSAIL) - Forest types: Temperate broad-leaved rainforest & Temperate dry conifer forest. - Leaf chemistry & structure properties (N, Cab, Car, Cbrown, Cw, Cm): [link](https://core.ac.uk/download/pdf/77234212.pdf) - (LIDF) Leaf Inclination Distribution Function (TypeLidf): [link](https://www.sciencedirect.com/science/article/abs/pii/S0168192312003036?via%3Dihub) - 4SAIL canopy structure parameters (LAI, hspot, tts, tto, psi). - LAI (Leaf Area Index) at 4 points within the vegetation period (winter, spring, summer, autumn): [link](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0032155) - During all PROSAIL simulations the soil brightness parameter (psoil), which determines the moisture content of the soil, was kept constant at 0.5. The sun angle (tts) was set to 35° , he relative azimuth angle between sun and satellite sensor (psi) was set to 90°, and the observer angle (tto) was set to nadir (0°), resulting in a negligible effect of the hotspot size parameter, which was therefore kept constant at 0.01. **Task implimentation:**---# Install the requirement package !pip3 install gitpython import os import sys import logging from collections import OrderedDict from git.repo.base import Repo import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from IPython.display import display from ipywidgets import interact, interactive, fixed, interact_manual, widgets, Layout plt.style.use('seaborn') %matplotlib inline logger = logging.getLogger('') logger.setLevel(logging.INFO) original_dir = os.getcwd() # Setup working directory where python scripts/packages, data and output results be stored. folder_name = "work" prosail_git_package_url = "https://github.com/robintw/ProSAIL.git" # Check if the working directory is available, if NOT --> the folder will be created! try: os.makedirs(folder_name) logging.info('{} folder has been successfully created!'.format(folder_name)) except FileExistsError: logging.info('{} folder does exist!'.format(folder_name)) pass # Clone ProSAIL python package from the repository. try: logging.info('ProSAIL package has successfully cloned from {} repository'.\ format(prosail_git_package_url)) Repo.clone_from(prosail_git_package_url, folder_name) except: logging.warning('The repository {} is no longer available'\ ' Please check from other sources!'. format(prosail_git_package_url)) # Check if the required python function is availbe: prosail.py & dataSpec_P5.csv try: # Change the working directory os.chdir(os.path.join(original_dir, '{}'.format(folder_name))) import prosail logging.info('ProSAIL method (prosail) has successfully imported!') except: logging.warning('prosail.py does not exists in {}'\ ' Please check from other sources!'. format(os.getcwd())) sys.exit() # Check if the required dataset is availbe: prosail.py & dataSpec_P5.csv try: os.path.exists('dataSpec_P5.csv') except: logging.warning('dataSpec_P5.csv does not exists in {}'.format(os.getcwd())) sys.exit() logging.info("Initilization success. READY TO GO!") def plotting_function(delta_LAI, N, Cab, Car, Cbrown, Cw, Cm, psoil, hspot, tts, tto, psi): """ Plot the reflectance charts in different conditions, senarios. :param N, Cab, Car, Cbrown, Cw, Cm (LEAF CHEM & STR PROPERTIES) :param psoil (Soil Reflectance Properties) :param LAI, hspot, tts, tto, psi (4SAIL canopy structure parameters) :return: charts plot """ fig, axs = plt.subplots(2, 2, figsize=(21,10)) # Create a json dictionary object where store initinal forest type and LAI value simulation_init_info =OrderedDict({ "Conifer_forest": { "TypeLidf": 2, "LAI": {"Spring": 3.1, "Summer": 3.5, "Autumn": 3.7, "Winter": 3.1 } }, "Broadleaf_forest": { "TypeLidf": 2, "LAI": {"Spring": 0.5, "Summer": 2.5, "Autumn": 2.0, "Winter": 0.0 } } }) # Some addition parameters which support the plots colors = ["green", "orange"] events = ["undisturbed", "disturbed (LAI)", "disturbed (Others)"] # Loop over different senarios, compute reflectance (uses PROSAIL), and plot charts for i, forest_type in enumerate(list(simulation_init_info.keys())): LIDF = simulation_init_info[forest_type]['TypeLidf'] LAI_dict = simulation_init_info[forest_type]['LAI'] for j, _lai in enumerate(LAI_dict.keys()): LAI = LAI_dict[_lai] params = [N, Cab, Car, Cbrown, Cw, Cm, psoil, hspot, tts, tto, psi] [N_orig, Cab_orig, Car_orig, Cbrown_orig, Cw_orig, Cm_orig, psoil_orig, \ hspot_orig, tts_orig, tto_orig, psi_orig] = params_orig # reflectance computation for undisturbed senarios results = p.run(N_orig, Cab_orig, Car_orig, Cbrown_orig, Cw_orig, Cm_orig, \ psoil_orig, LAI, hspot_orig, tts_orig, tto_orig, psi_orig, LIDF) if j < 2: j, i0 = j, 0 else: j, i0 = j-2, 1 # Start plotting for undisturbed senarios axs[i0, j].plot(results[0], results[1], lw=2, color=colors[i], \ label = '{}[{}]'.format(forest_type, events[0])) tkw = dict(size=4, width=1.5) axs[i0, j].set_title('{} [N_ud = {}, Cab_ud = {}, psoil_ud = {}]'.\ format(_lai, N_orig, Cab_orig, psoil_orig ), fontsize=16) axs[i0, j].set_xlabel("Wavelength [nm]", fontsize=14) axs[i0, 0].set_ylabel("Reflectance [-]", fontsize=14) axs[i0, j].tick_params(axis='y', labelsize=16, **tkw) axs[i0, j].tick_params(axis='x', labelsize=16, **tkw) # axs[i0, j].legend(fontsize=16, loc='lower center', \ # fancybox=True, framealpha=0.9, ncol=2) axs[i0, j].legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., fontsize=13) if delta_LAI !=0 and params == params_orig: # Reflectance computation for disturbed senarios: normal & ± uncertainty of LAI LAI_p = LAI + 0.01*delta_LAI*LAI if LAI - 0.01*delta_LAI > 0: LAI_n = LAI - 0.01*delta_LAI*LAI else: LAI_n = 0 results_p = p.run(N, Cab, Car, Cbrown, Cw, Cm, \ psoil, LAI_p, hspot, tts, tto, psi, LIDF) results_n = p.run(N, Cab, Car, Cbrown, Cw, Cm, \ psoil, LAI_n, hspot, tts, tto, psi, LIDF) # Start plotting for disturbed senarios: normal & ±10% uncertainty of LAI axs[i0, j].plot(results_p[0], results_p[1], '-', lw=0.75, \ color=colors[i], label = '{}[{}]'.format(forest_type, events[1])) axs[i0, j].plot(results_n[0], results_n[1], '-', lw=0.75, color=colors[i]) tkw = dict(size=4, width=1.5) axs[i0, j].set_title('{} [LAI_ud/d={}/{:.1f},{:.1f}, N_ud/d = {}/{}, Cab_ud/d = {}/{}, psoil_ud/d = {}/{}]'. format(_lai, LAI, LAI_p, LAI_n, N_orig, N, Cab_orig, Car, psoil_orig, psoil), fontsize=16) axs[i0, j].legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., fontsize=13) if params != params_orig: # Reflectance computation for disturbed senarios: normal & ±10% uncertainty of LAI results = p.run(N, Cab, Car, Cbrown, Cw, Cm, \ psoil, LAI, hspot, tts, tto, psi, LIDF) # Start plotting for disturbed senarios: normal & ±10% uncertainty of LAI axs[i0, j].plot(results[0], results[1], '--', lw=1, \ color=colors[i], label = '{}[{}]'.format(forest_type, events[2])) tkw = dict(size=4, width=1.5) axs[i0, j].set_title('{} [LAI_ud/d={}/{}, N_ud/d = {}/{}, Cab_ud/d = {}/{}, psoil_ud/d = {}/{}]'. format(_lai, LAI, LAI, N_orig, N, Cab_orig, Car, psoil_orig, psoil), fontsize=16) axs[i0, j].legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., fontsize=13) fig.tight_layout() # fig.subplots_adjust(right=0.75) fig.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=1.05, hspace=0.5, wspace=0.5) # Setup stype and layout for the chart style = {'description_width': 'initial'} layout = {'width': '600px'} # Change/Re-assign the working directory os.chdir(os.path.join(original_dir, '{}'.format(folder_name))) # Call the model class p = prosail.Prosail() # Initinal value for input parameters undisturbed senarios. params_orig = [] # Initinal LEAF CHEM & STR PROPERTIES parameters (values and ranges) ################################################################################ ## LEAF CHEM & STR PROPERTIES : N, Cab, Car, Cbrown, Cw, Cm # ################################################################################ N = 1.5 # structure coefficient Cab = 40 # chlorophyll content (µg.cm-2) Car = 8 # carotenoid content (µg.cm-2) Cbrown = 0.0 # brown pigment content (arbitrary units) Cw = 0.01 # EWT (cm) Cm = 0.009 # LMA (g.cm-2) params_orig = params_orig + [N, Cab, Car, Cbrown, Cw, Cm] N = widgets.FloatSlider(value=N, min=1, max=3.5, step=0.1, \ description='N[leaf structure parameter]', \ style=style, layout = layout) Cab = widgets.FloatSlider(value=Cab, min=0, max=100, step=10, \ description='Cab[chlorophyll a+b content (in µg/cm²)]', \ style=style, layout = layout) Car = widgets.FloatSlider(value=Car, min=0, max=30, step=1, \ description='Car[carotenoids (carotenes + xanthophylls) content (in µg/cm²)]', \ style=style, layout = layout) Cbrown = widgets.FloatSlider(value=Cbrown, min=0, max=1, step=0.1, \ description='Cbrown[brown pigments content (in arbitrary units)]', \ style=style, layout = layout) Cw = widgets.FloatSlider(value=Cw, min=0.00005, max=0.05, step=0.001, \ description='Cw[equivalent water thickness (in g/cm² or cm)]', \ style=style, layout = layout) Cm = widgets.FloatSlider(value=Cm, min=0.002, max=0.020, step=0.001, \ description='Cm[dry matter content (in g/cm²)]', \ style=style, layout = layout) # Initinal Soil Reflectance Properties parameter (value and range) ################################################################################ ## Soil Reflectance Properties: psoil # ################################################################################ psoil = 0.5 # soil factor (psoil=0: wet soil / psoil=1: dry soil) params_orig = params_orig + [psoil] psoil = widgets.FloatSlider(value=psoil, min=0, max=1, step=0.1, \ description='psoil[soil factor]', \ style=style, layout = layout) # Initinal 4SAIL canopy structure parameters (values and ranges) ################################################################################ ## 4SAIL canopy structure parameters: LAI, hspot, tts, tto, psi # ################################################################################ # LAI = 5. # leaf area index (m^2/m^2) delta_LAI = 10 # Uncertanty level (in %) of leaf area index (m^2/m^2) hspot = 0.01 # hot spot tts = 35. # solar zenith angle (°) tto = 0. # observer zenith angle (°) psi = 90. # azimuth (°) params_orig = params_orig + [hspot, tts, tto, psi] delta_LAI = widgets.FloatSlider(value=delta_LAI, min=0, max=100, step=10, \ description='delta_LAI[uncertanty level of leaf area index (%)]', \ style=style, layout = layout) hspot = widgets.FloatSlider(value=hspot, min=0.005, max=0.2, step=0.01, \ description='hspot[hot spot]', \ style=style, layout = layout) tts = widgets.FloatSlider(value=tts, min=0, max=90, step=10, \ description='tts[solar zenith angle (°)]', \ style=style, layout = layout) tto = widgets.FloatSlider(value=tto, min=0, max=90, step=10, \ description='tto[observer zenith angle (°)]', \ style=style, layout = layout) psi = widgets.FloatSlider(value=psi, min=0, max=180, step=10, \ description='psi[azimuth (°)]', \ style=style, layout = layout) # Call plotting_function function and present the result ouput = widgets.interactive(plotting_function, \ delta_LAI=delta_LAI, N=N, Cab=Cab, Car=Car, Cbrown=Cbrown, Cw=Cw, Cm=Cm, \ psoil=psoil, hspot=hspot, tts=tts, tto=tto, psi=psi) display(ouput)Until Python 3.6, dictionaries were unordered.Python 3.1 saw the addition of the `OrderedDict` class to the standard library. This was first proposed in PEP-372: https://www.python.org/dev/peps/pep-0372/`collections.OrderedDict` was created to give dictionaries the ability to remember the order of insertion. It is a subclass of the `dict` class. `OrderedDict` iterates over keys and values in the **same order as they were inserted**.If a new entry overwrites an existing entry, then the order is maintained.Python 3.6 revamped the `dict` class to allow it to maintain ordering, among other efficiency augmentations.So why use `OrderedDict`?from collections import OrderedDict ages = OrderedDict() ages['John'] = 30 ages['James'] = 23 ages['Mark'] = 41 ages sum(ages.values()) max(ages.keys()) bands = OrderedDict([ ('John', 'Sonic Youth'), ('James', 'The Beatles'), ('Mark', 'Boards of Canada') ]) bands`OrderedDict` adds a few useful features to the `dict` object.1. `popitem()` - augments the `dict.popitem()` method by allowing items to be popped from either the start or the end of the dictionary.2. `move_to_end()` - enables an item in the dictionary to be moved to either the start or the end.# move_to_end() bands = OrderedDict([ ('John', 'Sonic Youth'), ('James', 'The Beatles'), ('Mark', 'Boards of Canada') ]) bands bands.move_to_end('James', last=False) bands for key,val in sorted(bands.items(), key=lambda x: x[1]): bands.move_to_end(key) bands # popitem() bands = OrderedDict([ ('John', 'Sonic Youth'), ('James', 'The Beatles'), ('Mark', 'Boards of Canada') ]) bands.popitem(last=False) bandsTesting for equality is also treated differently with `OrderedDict`.In a regular dictionary, two dicts are considered equal if they contain the same elements. With `OrderedDict`, the order MUST be the same for equality to prevail.odict1 = OrderedDict(one=1, two=2) odict2 = OrderedDict(two=2, one=1) odict1 == odict2 d1 = dict(one=1, two=2) d2 = dict(two=2, one=1) d1 == d2DNA Set-up and initializationimport os import random import urllib import numpy as np if ((not os.path.isfile(os.path.join('temp', 'hg38.fa.align'))) and (not os.path.isfile(os.path.join('temp', 'hg38.align.lines')))): url = 'http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.align.gz' urllib.request.urlretrieve(url, os.path.join('temp', url.split('/')[-1])) import gzip import shutil with gzip.open(os.path.join('temp', url.split('/')[-1]), 'rb') as f_in: with open(os.path.join('temp', 'hg38.fa.align'), 'wb') as f_out: shutil.copyfileobj(f_in, f_out) def is_number(s): try: float(s) return True except ValueError: return False count = 0 samp1 = '' samp2 = '' first = True if not os.path.isfile(os.path.join('temp', 'hg38.align.lines')): with open(os.path.join('temp', 'hg38.align.lines'), 'w') as f_out: with open(os.path.join('temp', 'hg38.fa.align'), 'r') as f_in: for line in f_in: line_s = line[:-1].split() if line_s and line_s[0] in {'Matrix', 'Transitions', 'Gap_init'}: continue if len(line_s) > 10 and (line_s[0].isdigit() and is_number(line_s[1]) and is_number(line_s[2]) and is_number(line_s[3]) and line_s[-1].isdigit() and line_s[5].isdigit() and line_s[6].isdigit()): if count: f_out.write(f'{samp1},{samp2}\n') count += 1 samp1 = '' samp2 = '' first = True elif line: line_s = line[16:].split() if line_s and line_s[0].isdigit() and line_s[2].isdigit(): if first: samp1 += line_s[1].strip() else: samp2 += line_s[1].strip() first = not first f_out.write(f'{samp1},{samp2}\n') else: with open(os.path.join('temp', 'hg38.align.lines')) as f_count: for line in f_count: count += 1 countSampling and outputnp.random.seed(44674) sel_ct = 2400 selections_ordered = list(np.random.choice(count, sel_ct+400, replace=False)) selections = set(selections_ordered) seqs = {} line_no = 0 with open(os.path.join('temp', 'hg38.align.lines'), 'r') as f_in: for line in f_in: if line_no in selections: seqs[line_no] = line.strip() line_no += 1 out_ct = 0 sequences = set() with open('../hg38.csv', 'w') as f_out: f_out.write('seq1,seq2\n') i = 0 while out_ct < sel_ct: proposed = seqs[selections_ordered[i]].replace('-', '') if proposed not in sequences: sequences.add(proposed) f_out.write(f'{proposed}\n') out_ct += 1 i += 1 random.seed(23115) out_ct = 0 sequences = [] for i in range(len(selections_ordered)): ss1,ss2 = seqs[selections_ordered[i]].split(',') start = 0 if len(ss1)>12: start = random.randint(0, len(ss1)-12) ss1 = ss1[start:start+12].replace('-', '') ss2 = ss2[start:start+12].replace('-', '') if len(ss1) < 7 or len(ss2) < 7 or out_ct >= sel_ct or (ss1,ss2) in sequences: continue else: sequences.append((ss1,ss2)) out_ct += 1 random.shuffle(sequences) with open('../hg38_maxlen12.csv', 'w') as f_out: f_out.write('seq1,seq2\n') for ss1,ss2 in sequences: f_out.write(f'{ss1},{ss2}\n')Classification with Tensorflow Davis SML: Lecture 9 Part 2 Prof. Importing and installing tensorflow- install tensorflow 2.0 with conda (you do not need to install tensorflow-gpu for the course)- tensorflow, build and execute computational graphs- tensorflow 1.0 and 2.0 differ mainly by making eager execution default, removing sessionsimport os import matplotlib.pyplot as plt import tensorflow as tf import pandas as pd print("TensorFlow version: {}".format(tf.__version__)) print("Eager execution: {}".format(tf.executing_eagerly()))TensorFlow version: 2.0.0 Eager execution: TrueLoading data- tensorflow has many built in utilities for getting data- you could just as easily use requests/pandastrain_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv" train_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url), origin=train_dataset_url) print("Local copy of the dataset file: {}".format(train_dataset_fp)) train_df = pd.read_csv(train_dataset_fp) train_dataset = tf.data.Dataset.from_tensor_slices((train_df.values[:,:-1],train_df.values[:,-1]))Tensorflow datasets API- Datasets API loads and readies data for use in stochastic gradient descent type iteration- the batch size tells it how many samples for the mini-batch- Dataset has methods to shuffle the data and apply transformationsbatch_size = 32 train_dataset = train_dataset.shuffle(1000) train_dataset = train_dataset.batch(batch_size) ## sets batchsize and shuffles X,y = next(iter(train_dataset)) XKeras Model API- a model is a predictor which builds a computational graph, maintains losses, prepares for optimization- built from layers which are operations in computational graph- layers have trainable variables, input and output tensor shapestrain_dataset.element_spec lin_layers = tf.keras.layers.Dense(3) lin_layers(X) ## Builds and calls the layer lin_layers.trainable_variables ## create a keras model model = tf.keras.Sequential([ tf.keras.layers.Dense(3) ]) ## model is callable outputs decision function logits = model(X) logits[:5] ## Apply softmax to logits to get predicted probabilities tf.nn.softmax(logits[:5]) y_pred = tf.argmax(logits, axis=1) ## Create the losses logistic_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) logistic_loss(y,logits) def loss(model, x, y, training): # training=training is needed only if there are layers with different # behavior during training versus inference (e.g. Dropout). logits = model(x, training=training) return logistic_loss(y,logits) l = loss(model, X, y, training=False) print("Loss test: {}".format(l))Loss test: 4.7047529220581055Automatic differentiation- Tensorflow knows how to differentiate built in expressions (exp, matmul, log, etc.)- you can also use chain rule to "backpropagate" derivatives (more on this later)- as a simple case, consider linear classifier with loss $\ell(y x^\top \beta)$ (eg logistic) If tensorflow has $\ell$ and $\ell'$ saved then it can automatically perform the following op$$\frac{\partial}{\partial \beta} \ell(y x^\top \beta) = \ell'(y x^\top \beta) \cdot y x$$this is the simplest example of automatic differentiation.## Gradient tape lets TF know with respect to what to take gradients def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets, training=True) return loss_value, tape.gradient(loss_value, model.trainable_variables) ## Create optimizer (chooses learning schedule etc) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) loss_value, grads = grad(model, X, y) print("Step: {}, Initial Loss: {}".format(optimizer.iterations.numpy(), loss_value.numpy())) ## Optimizer has apply_gradients step which will modify all training variables appropriately optimizer.apply_gradients(zip(grads, model.trainable_variables)) print("Step: {}, Loss: {}".format(optimizer.iterations.numpy(), loss(model, X, y, training=True).numpy())) ## Note: Rerunning this cell uses the same model variables # Keep results for plotting train_loss_results = [] train_accuracy_results = [] num_epochs = 201 for epoch in range(num_epochs): epoch_loss_avg = tf.keras.metrics.Mean() epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Training loop - using batches of 32 for x, y in train_dataset: # Optimize the model loss_value, grads = grad(model, x, y) optimizer.apply_gradients(zip(grads, model.trainable_variables)) # Track progress epoch_loss_avg.update_state(loss_value) # Add current batch loss epoch_accuracy.update_state(y, model(x, training=True)) # End epoch train_loss_results.append(epoch_loss_avg.result()) train_accuracy_results.append(epoch_accuracy.result()) if epoch % 50 == 0: print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch, epoch_loss_avg.result(), epoch_accuracy.result())) fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8)) fig.suptitle('Training Metrics') axes[0].set_ylabel("Loss", fontsize=14) axes[0].plot(train_loss_results) axes[1].set_ylabel("Accuracy", fontsize=14) axes[1].set_xlabel("Epoch", fontsize=14) axes[1].plot(train_accuracy_results) plt.show() ## Evaluate on test set test_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv" test_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url), origin=test_url) test_df = pd.read_csv(test_fp) test_dataset = tf.data.Dataset.from_tensor_slices((test_df.values[:,:-1],test_df.values[:,-1])) test_dataset = test_dataset.batch(batch_size) ## Compute test accuracy test_accuracy = tf.keras.metrics.Accuracy() for (x, y) in test_dataset: # training=False is needed only if there are layers with different # behavior during training versus inference (e.g. Dropout). logits = model(x, training=False) prediction = tf.argmax(logits, axis=1, output_type=tf.int32) test_accuracy(prediction, y) print("Test set accuracy: {:.3%}".format(test_accuracy.result())) ## Last batch actual labels and predicted print("\n".join(f"actual: {a} =? pred: {b}" for a,b in zip(y,prediction)))actual: 1.0 =? pred: 1 actual: 2.0 =? pred: 2 actual: 0.0 =? pred: 0 actual: 1.0 =? pred: 1 actual: 1.0 =? pred: 1 actual: 1.0 =? pred: 1 actual: 0.0 =? pred: 0 actual: 2.0 =? pred: 2 actual: 1.0 =? pred: 1 actual: 2.0 =? pred: 2 actual: 2.0 =? pred: 2 actual: 0.0 =? pred: 0 actual: 2.0 =? pred: 2 actual: 1.0 =? pred: 1 actual: 1.0 =? pred: 1 actual: 0.0 =? pred: 0 actual: 1.0 =? pred: 1 actual: 0.0 =? pred: 0 actual: 0.0 =? pred: 0 actual: 2.0 =? pred: 2 actual: 0.0 =? pred: 0 actual: 1.0 =? pred: 2 actual: 2.0 =? pred: 2 actual: 1.0 =? pred: 2 actual: 1.0 =? pred: 1 actual: 1.0 =? pred: 1 actual: 0.0 =? pred: 0 actual: 1.0 =? pred: 1 actual: 2.0 =? pred: 2 actual: 1.0 =? pred: 1Multi-model metadata generation> experiment in combining text and tabular models to generate web archive metadata- toc: true - badges: false- comments: true- categories: [metadata, multi-model]- search_exclude: false Learning from multiple input types Deep learning models usually take one type of input (image, text etc.) to predict output labels (category, entities etc). This usually makes sense if the data you are using to make predictions contains a lot of information. i.e. a chunk of text from a movie review or an image. Recently I have been playing around with a Website Classification Dataset from the UK web archive. The dataset is derived from a manually curated web archive which contains a primary and secondary category for each web page. The UK web archive has made a [dataset](https://data.webarchive.org.uk/opendata/ukwa.ds.1/classification/) available based on this archive which contains the manually classified subject categories alongside the page URL and the page title. As part of playing around with this dataset I was keen to see if a multi-input model would work well. In this case exploring a model that takes both text and tabular data as input. A preview of the data:#hide_input import pandas as pd tsv ='https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/71426e6b92c7fa98140a95728a5ea55171b948cd/classification.tsv' df = pd.read_csv(tsv, error_bad_lines=False, index_col=0) df.head()Based on this data the UK web archive are interested: >"in understanding whether high-level metadata like this can be used to train an appropriate automatic classification system so that we might use this manually generated dataset to partially automate the categorisation of our larger archives."This is going to be fairly tricky but offers a nice excuse to try to use models with multiple inputs to predict our categories. Looking at the dataTaking a closer look at the data:#hide_input tsv = 'https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/71426e6b92c7fa98140a95728a5ea55171b948cd/classification.tsv' df = pd.read_csv(tsv, error_bad_lines=False,)Unique primary categorieslen(df['Primary Category'].unique())Unique secondary categorieslen(df['Secondary Category'].unique())Predicting a 104 different labels is going to be pretty difficult so I've only used 'Primary Category' as the the ```y``` target. What is the distribution of these categories like?#hide_input df['Primary Category'].value_counts()😬 We also have a fairly skewed datasets. I could drop some of rows which don't occur often but since the main objective here is to see if we can use a multi-input model we'll leave the data as it is for now. Multi-input model The rest of the notebook will describe some experiments with using [fastai](https://docs.fast.ai/) to create a model which takes tabular and text data as an input. The aim here wasn't for me to create the best model but get my head around how to combine models. I heavily relied on some existing [notebooks](https://nbviewer.jupyter.org/gist/joshfp/b62b76eae95e6863cb511997b5a63118/5.full-deep-learning.ipynb), kaggle [writeup](https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/89491) and forum posts on the [fastai forums](forums.fast.ai/). Tabular model In the dataset above we start of with two columns of data which can be used as inputs for the model. The title is fairly obviously something which we can treat like other text inputs. The URL is a little less obvious. It could be treated as a text input but an alternative is to treat a URL as parts which each contain some information which could be useful for our model.#hide_input print(df.URL.sample(10).to_list()[3]) print(df.URL.sample(10).to_list()[4]) print(df.URL.sample(10).to_list()[3])http://www.specialschool.org/ http://www.bbc.co.uk/news/health-12668398 http://www.monarchit.co.uk/Each part of the URL could be split into smaller parts#hide_input print(df.URL.sample(10).to_list()[3].split('.'))['http://www', 'darwincountry', 'org/']Whether a url has '.org' or '.uk' or '.com' could be meaningful for predicting our categories (it might also not be meaningful). It also offers us a way of taking the URLs and composing it into a format which looks more tabular.#hide_input csv ='https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/4c2a27772bf4d959bf3e58cfa8de9e0b9be69ca7/03_classification_valid_train.csv' df = pd.read_csv(csv, index_col=0) df[['scheme','url1','url3','url4','url5']].sample(5)So far I've only done this very crudely. I suspect tidying up this part of the data will help improve things. At this point though we have something which is a little more tabular looking we can pass to ```fastai.tabular``` learner. Now we have some 'categories' rather than unique urls.print(len(df.url3.unique())) print(len(df.url4.unique()))279 56How does this tabular model do? Once some preprocessing of the url has been done we train a model using the tabular learner. I didn't do much to try to optimize this model. Tracking best ```f2``` score we end up with:```Better model found at epoch 36 with f_beta value: 0.17531482875347137``` and an accuracy of ```0.334121``` How well does a text model do?Next I tried training using the title field in a NLP model. I tried a few things here. SentencePiece tokenizationBy default fastai uses SpaCy to do tokenization with a few additional special tokens added by fastai. I wanted to see if using [sentencePiece](https://github.com/google/sentencepiece) would work better for processing title fields. SentencePiece allows for various sub-word tokeinzation. This can be useful for agglutinative languages but could also be useful when you have a lot of out of vocabulary words in your corpus. I wanted to see if this also was useful for processing titles since these may contain domain specific terms. I only tried using SentencePiece with 'unigram' tokenization. The best score I got for this was:```Better model found at epoch 1 with f_beta value: 0.21195338666439056.``` Default SpaCy tokenizationI compared the above to using the default fastai tokenizer which uses SpaCy. In this case the default approach worked better. This is probably because we didn't have a large pre-trained model using the SentencePiece tokenization to use as a starting point. The best score I got for this model was:```Better model found at epoch 27 with f_beta value: 0.33327043056488037.``` Using the URL as text input I wanted to do a quick comparison to the tabular model and use the URL as a text input instead. In this case I used SentencePiece with byte-pair-encoding (BPE). The best score in this case was:```Better model found at epoch 3 with f_beta value: 0.2568161189556122.```This might end up being a better approach compared to the tabular approach described above. Combining inputs Neither of these models is doing super well but my main question was whether combining the two would improve things at all. There are different approaches to combining these models. I followed existing examples and removed some layers from the text and tabular models which are then combined in a concat model. I won't cover all the steps here but all the notebooks can be found in this [GitHub repo](https://github.com/davanstrien/Website-Classification).#hide from fastai.tabular import * from pathlib import Path import pandas as pd from fastai import * from fastai.tabular import * from fastai.callbacks import * from fastai.text import * from fastai.metrics import accuracy, MultiLabelFbetaOne of the things we need to do to create a model with multiple input is create a new Pytorch dataset which combines our text and tabular ```x``` inputs with our target. This is pretty straightforward:#collapse_show class ConcatDataset(Dataset): def __init__(self, x1, x2, y): self.x1,self.x2,self.y = x1,x2,y def __len__(self): return len(self.y) def __getitem__(self, i): return (self.x1[i], self.x2[i]), self.y[i]One of the other pieces was creating a ```ConcatModel```#collapse_show class ConcatModel(nn.Module): def __init__(self, model_tab, model_nlp, layers, drops): super().__init__() self.model_tab = model_tab self.model_nlp = model_nlp lst_layers = [] activs = [nn.ReLU(inplace=True),] * (len(layers)-2) + [None] for n_in,n_out,p,actn in zip(layers[:-1], layers[1:], drops, activs): lst_layers += bn_drop_lin(n_in, n_out, p=p, actn=actn) # https://docs.fast.ai/layers.html#bn_drop_lin self.layers = nn.Sequential(*lst_layers) def forward(self, *x): x_tab = self.model_tab(*x[0]) x_nlp = self.model_nlp(x[1])[0] x = torch.cat([x_tab, x_nlp], dim=1) return self.layers(x)Understanding Descriptive StatisticsImport the necessary libraries here:# LibrariesChallenge 1 1.- Define a function that simulates rolling a dice 10 times. Save the information in a dataframe.**Hint**: you can use the *choices* function from module *random* to help you with the simulation.# your code here2.- Plot the results sorted by value.# your code here3.- Calculate the frequency distribution and plot it. What is the relation between this plot and the plot above? Describe it with words.# your code here """ your comments here """Challenge 2Now, using the dice results obtained in *challenge 1*, your are going to define some functions that will help you calculate the mean of your data in two different ways, the median and the four quartiles. 1.- Define a function that computes the mean by summing all the observations and dividing by the total number of observations. You are not allowed to use any methods or functions that directly calculate the mean value.# your code here2.- First, calculate the frequency distribution. Then, calculate the mean using the values of the frequency distribution you've just computed. You are not allowed to use any methods or functions that directly calculate the mean value.# your code here3.- Define a function to calculate the median. You are not allowed to use any methods or functions that directly calculate the median value. **Hint**: you might need to define two computation cases depending on the number of observations used to calculate the median.# your code here4.- Define a function to calculate the four quartiles. You can use the function you defined above to compute the median but you are not allowed to use any methods or functions that directly calculate the quartiles.# your code hereChallenge 3Read the csv `roll_the_dice_hundred.csv` from the `data` folder. 1.- Sort the values and plot them. What do you see?# your code here """ your comments here """2.- Using the functions you defined in *challenge 2*, calculate the mean value of the hundred dice rolls.# your code here3.- Now, calculate the frequency distribution.# your code here4.- Plot the histogram. What do you see (shape, values...) ? How can you connect the mean value to the histogram?# your code here """ your comments here """5.- Read the `roll_the_dice_thousand.csv` from the `data` folder. Plot the frequency distribution as you did before. Has anything changed? Why do you think it changed?# your code here """ your comments here """Challenge 4In the `data` folder of this repository you will find three different files with the prefix `ages_population`. These files contain information about a poll answered by a thousand people regarding their age. Each file corresponds to the poll answers in different neighbourhoods of Barcelona. 1.- Read the file `ages_population.csv`. Calculate the frequency distribution and plot it as we did during the lesson. Try to guess the range in which the mean and the standard deviation will be by looking at the plot.# your code here2.- Calculate the exact mean and standard deviation and compare them with your guesses. Do they fall inside the ranges you guessed?# your code here """ your comments here """3.- Now read the file `ages_population2.csv` . Calculate the frequency distribution and plot it.# your code here4.- What do you see? Is there any difference with the frequency distribution in step 1?""" your comments here """5.- Calculate the mean and standard deviation. Compare the results with the mean and standard deviation in step 2. What do you think?# your code here """ your comments here """Challenge 5Now is the turn of `ages_population3.csv`. 1.- Read the file `ages_population3.csv`. Calculate the frequency distribution and plot it.# your code here2.- Calculate the mean and standard deviation. Compare the results with the plot in step 1. What is happening?# your code here """ your comments here """3.- Calculate the four quartiles. Use the results to explain your reasoning for question in step 2. How much of a difference is there between the median and the mean?# your code here """ your comments here """4.- Calculate other percentiles that might be useful to give more arguments to your reasoning.# your code here """ your comments here """Bonus challengeCompare the information about the three neighbourhoods. Prepare a report about the three of them. Remember to find out which are their similarities and their differences backing your arguments in basic statistics.# your code here """ your comments here """Preparacion de los datosdataDir='data' resultsDir='results' if not os.path.exists(resultsDir): os.mkdir(resultsDir)Descripcion de las columnas- Review: Comentarios escritos por usuarios de IMDB acerca de peliculas. (https://www.imdb.com/)- Sentiment: Valoracion de la pelicula. El valor 1 significa positiva, mientras que 0 es negativadataFile='Reviews peliculas IMDB small.txt' dataFile=os.path.join(dataDir, dataFile) colNames = ['Review', 'Sentiment'] dataDF=pd.read_csv(dataFile, sep = '\t', header = None) dataDF.columns = colNames dataDF.head() len(dataDF) dataDF['Sentiment'].value_counts() dataDF.isnull().sum()Procesamiento de lenguaje naturalimport string import spacy from spacy import displacy from spacy.lang.en.stop_words import STOP_WORDS nlp = spacy.load('en_core_web_sm') sent = nlp.create_pipe('sentencizer') stopwordsNLP = list(STOP_WORDS) punctuationNLP = string.punctuation text = dataDF.sample(3)['Review'].values[0] doc = nlp(text) print(text, '\n') for token in doc: print(token.text, token.pos_) displacy.render(doc, style = 'dep') def text_data_cleaning(sentence): doc = nlp(sentence) tokens = [] for token in doc: if token.lemma_ != "-PRON-": temp = token.lemma_.lower().strip() else: temp = token.lower_ tokens.append(temp) cleaned_tokens = [] for token in tokens: if token not in stopwordsNLP and token not in punctuationNLP: cleaned_tokens.append(token) return cleaned_tokens text_data_cleaning(text)Vectorization Feature Engineering (TF-IDF)from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.svm import LinearSVC X = dataDF['Review'] y = dataDF['Sentiment'] XTrain, XTest, yTrain, yTest = train_test_split(X, y, test_size = 0.2, random_state = 42, stratify=y) XTrain.shape, XTest.shape yTrain.value_counts() tfidf = TfidfVectorizer(tokenizer = text_data_cleaning) classifier = LinearSVC() clf = Pipeline([('tfidf', tfidf), ('clf', classifier)], verbose=True)We train the modelclf.fit(XTrain, yTrain) yPred = clf.predict(XTest) print(yPred[0:10]) print(yTest[0:10].values) def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues, figFile='model_confussion_matrix.jpg'): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred, labels=np.arange(len(classes))) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') # print(cm) fig, ax = plt.subplots(figsize=(10,10)) im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') ax.tick_params(labelbottom=False, labeltop=True, labelright=False, ) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=60, ha="left", rotation_mode="anchor") ''' # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") ''' fig.tight_layout() figFile = os.path.join(resultsDir, figFile) fig.savefig(figFile, dpi=300) labels = ['Negative', 'Positive'] cm = confusion_matrix(yTest, yPred, labels=[0, 1]) plot_confusion_matrix(yTest, yPred, labels) print(classification_report(yTest, yPred))precision recall f1-score support 0 0.76 0.77 0.76 73 1 0.78 0.77 0.77 77 accuracy 0.77 150 macro avg 0.77 0.77 0.77 150 weighted avg 0.77 0.77 0.77 150Is file grofrom molsysmt.tools import file_gro file_gro.is_file_gro('test.gro')Define the Modelclass MNistModel(nn.Module): def __init__(self, input_size=784, output_size=10, layers=[120,80]): super().__init__() self.fc1 = nn.Linear(input_size, layers[0]) self.fc2 = nn.Linear(layers[0], layers[1]) self.fc3 = nn.Linear(layers[1], output_size) def forward(self,x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.log_softmax(self.fc3(x), dim=1) return x model = MNistModel() model model_params = [params.numel() for params in model.parameters() if params.requires_grad] for items in model_params: print(items) print(f'______') print(f'{sum(model_params)}') ### define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001)Training the modelimport time start_time = time.time() epochs = 10 train_losses = [] test_losses = [] train_correct = [] test_correct = [] for i in range(epochs): trn_corr = 0 tst_corr = 0 # train for b,(X_train, y_train) in enumerate(train_loader): b+=1 y_pred = model(X_train.view(100,-1)) loss = criterion(y_pred, y_train) predicted = torch.max(y_pred.data, 1)[1] batch_corr = (predicted == y_train).sum() trn_corr += batch_corr optimizer.zero_grad() loss.backward() optimizer.step() if b%200 == 0: print(f'epoch: {i:2} batch: {b:4} [{100*b:6}/60000] loss: {loss.item():10.8f} \ accuracy: {trn_corr.item()*100/(100*b):7.3f}%') train_losses.append(loss) train_correct.append(trn_corr) #test with torch.no_grad(): for b, (X_test, y_test) in enumerate(test_loader): # Apply the model y_val = model(X_test.view(500, -1)) # Here we flatten X_test # Tally the number of correct predictions predicted = torch.max(y_val.data, 1)[1] tst_corr += (predicted == y_test).sum() # Update test loss & accuracy for the epoch loss = criterion(y_val, y_test) test_losses.append(loss) test_correct.append(tst_corr) print(f'\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed plt.plot(train_losses, label='training loss') plt.plot(test_losses, label='validation loss') plt.title('Loss at the end of each epoch') plt.legend(); plt.plot([t//600 for t in train_correct], label='training accuracy') plt.plot([t//100 for t in test_correct], label='validation accuracy') plt.title('Accuracy at the end of each epoch') plt.legend(); print(test_correct) # contains the results of all 10 epochs print() print(f'Test accuracy: {test_correct[-1].item()*100/10000:.3f}%') # Extract the data all at once, not in batches test_load_all = DataLoader(test_data, batch_size=10000, shuffle=False) with torch.no_grad(): correct = 0 for X_test, y_test in test_load_all: y_val = model(X_test.view(len(X_test), -1)) # pass in a flattened view of X_test predicted = torch.max(y_val,1)[1] correct += (predicted == y_test).sum() print(f'Test accuracy: {correct.item()}/{len(test_data)} = {correct.item()*100/(len(test_data)):7.3f}%')Test accuracy: 9771/10000 = 97.710%Tutorial on keras https://gist.github.com/NiharG15/cd8272c9639941cf8f481a7c4478d525import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam iris_data = load_iris() # load the iris dataset print('Example data: ') print(iris_data.data[:5]) print('Example labels: ') print(iris_data.target[:5]) x = iris_data.data y_ = iris_data.target.reshape(-1, 1) # Convert data to a single column # One Hot encode the class labels encoder = OneHotEncoder(sparse=False) y = encoder.fit_transform(y_) #print(y) # Split the data for training and testing train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.20) # Build the model model = Sequential() model.add(Dense(10, input_shape=(4,), activation='relu', name='fc1')) model.add(Dense(10, activation='relu', name='fc2')) model.add(Dense(3, activation='softmax', name='output')) # Adam optimizer with learning rate of 0.001 optimizer = Adam(lr=0.001) model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy']) print('Neural Network Model Summary: ') print(model.summary()) # Train the model model.fit(train_x, train_y, verbose=2, batch_size=5, epochs=200) # Test on unseen data results = model.evaluate(test_x, test_y) print('Final test set loss: {:4f}'.format(results[0])) print('Final test set accuracy: {:4f}'.format(results[1]))Final test set loss: 0.151927 Final test set accuracy: 0.9666671. Collect course link texts for driver to click onpage_soup = soup(driver.page_source, 'lxml') containers = page_soup.find("table", {"class": "sc_courselist"}).find("tbody").findAll("a") len(containers) link_texts = [container.text for container in containers] link_texts2. Test run - try scraping the first coursedriver.find_element_by_link_text(link_texts[0]).click() page_soup = soup(driver.page_source, 'lxml') course_block = page_soup.find("div", {"class": "lfjsbubblemain"}).find("div", {"class": "courseblock"}) course_code = course_block.find("span", {"class": "text detail-code margin--small text--semibold text--big"}).text course_code course_name = course_block.find("span", {"class": "text detail-title margin--small text--semibold text--big"}).text course_name course_desc = course_block.find("p", {"class": "courseblockextra noindent"}).text course_desc from selenium.webdriver.common.keys import Keys driver.find_element_by_link_text(link_texts[0]).send_keys(Keys.ESCAPE) driver.find_element_by_link_text(link_texts[1]).click() driver.find_element_by_link_text(link_texts[1]).send_keys(Keys.ESCAPE)3. Test run successful! Implement automation script to scrape all coursescounter = 0 course_names = [] course_codes = [] course_descs = [] for link_text in link_texts: link = driver.find_element_by_link_text(link_text) time.sleep(2) link.click() time.sleep(3) page_soup = soup(driver.page_source, 'lxml') course_block = page_soup.find("div", {"class": "lfjsbubblemain"}).find("div", {"class": "courseblock"}) course_codes.append(course_block.find("span", {"class": "text detail-code margin--small text--semibold text--big"}).text) course_names.append(course_block.find("span", {"class": "text detail-title margin--small text--semibold text--big"}).text) course_descs.append(course_block.find("p", {"class": "courseblockextra noindent"}).text) print("Scraped ", course_codes[-1]) counter += 1 link.send_keys(Keys.ESCAPE) time.sleep(2) print("Successfully scraped {} courses".format(counter))Scraped CHEM*1040 Scraped CIS*1500 Scraped ENGG*1100 Scraped MATH*1200 Scraped PHYS*1130 Scraped ENGG*1210 Scraped ENGG*1500 Scraped MATH*1210 Scraped PHYS*1010 Scraped ENGG*1070 Scraped ENGG*2100 Scraped ENGG*2120 Scraped ENGG*2160 Scraped ENGG*2400 Scraped MATH*2270 Scraped ENGG*2180 Scraped ENGG*2230 Scraped ENGG*2340 Scraped ENGG*2450 Scraped MATH*2130 Scraped STAT*2120 Scraped ENGG*3240 Scraped ENGG*3260 Scraped ENGG*3280 Scraped ENGG*3510 Scraped HIST*1250 Scraped ENGG*3100 Scraped ENGG*3370 Scraped ENGG*3410 Scraped ENGG*3430 Scraped ENGG*3140 Scraped ENGG*4000 Scraped ENGG*4160 Successfully scraped 33 courses4. Inspect scraped data and write to CSVcourse_names course_codes course_descs import pandas as pd df = pd.DataFrame({ "Course Number": course_codes, "Course Name": course_names, "Course Description": course_descs }) df df.to_csv('UGuelph_MechEng_Core_(AllYears)_Courses.csv', index = False) driver.quit()TenantStorage was created automatically when the tenant was created.Use the `hadoop fs` cli to upload a file.! hadoop fs -put -f wine-quality.csv dtap://TenantStorage/Now check the file has been uploaded.# NBVAL_IGNORE_OUTPUT ! hadoop fs -tail dtap://TenantStorage/wine-quality.csvBy default, only number as primary key is used for renaming the files, but you can add information into the new name by using doc.metadata. By default, files are copied and renamed, you can opt to cut and rename.oldfolder = "PDFS" current_dir = os.getcwd() oldfolder_dir = os.path.join(current_dir, oldfolder) newfolder_dir = os.path.join(current_dir, "Rename_output") if os.path.exists(newfolder_dir): print("oldfolder_dir: ", newfolder_dir) print("foler already existed, please create a new folder!") raise SystemExit else: os.makedirs(newfolder_dir) old_pattern = '*.pdf' rename(oldfolder_dir, old_pattern, newfolder_dir) # [os.rename(f, f.replace('_', '-')) for f in os.listdir('.') if not f.startswith('.')]import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns cd '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/matrix_two_cars' !pip install --upgrade tables ls df = pd.read_hdf('data/car.h5') df.shape df.columns.values df['price_value'].hist(bins=100) df['price_value'].describe() df['param_marka-pojazdu'].unique() ( df .groupby('param_marka-pojazdu')['price_value'] .agg([np.mean, np.median, np.size]) .sort_values(by='mean', ascending = False) .head(50) .plot(kind='bar', figsize=(15,5), subplots=True) ) ( df .groupby('param_marka-pojazdu')['price_value'] .agg([np.mean, np.median, np.size]) .sort_values(by='size', ascending = False) .head(50) .plot(kind='bar', figsize=(15,5), subplots=True) ) def group_and_barplot(feat_groupby, feat_agg='price_value', feat_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True): return ( df .groupby(feat_groupby)[feat_agg] .agg(feat_funcs) .sort_values(by=feat_sort, ascending = False) .head(top) ).plot(kind='bar', figsize=(15,5), subplots=subplots) group_and_barplot('param_marka-pojazdu'); group_and_barplot('param_kraj-pochodzenia'); group_and_barplot('param_rok-produkcji',feat_sort='size'); group_and_barplot('param_kolor',feat_sort='mean'); group_and_barplot('param_przebieg',feat_sort='mean');**Template Matching**Use Python libraries (skimage , opencv , ...) to find matches with two template above.show the found matches on coins image.Your Implementation (Extra points)Self Balance Tree> Info bout AVL Tree,Red-Black tree, 2-3 tree, B-tree which can maintain its own balance when data is added or remove from the tree This file will become your README and also the index of your documentation. Install `pip install your_project_name` How to use Fill me in please! Don't forget code examples:1+1Carregando os Dadosocorrencia=pd.read_csv('Dados/ocorrencia_2010_2020.csv', sep = ';', low_memory=False) ocorrenciaChecando se todos os códigos de ocorrência são iguaisfor i in range(1,5): print(ocorrencia['codigo_ocorrencia'].equals(ocorrencia['codigo_ocorrencia' + str(i)])) print(ocorrencia.columns.values)['codigo_ocorrencia' 'codigo_ocorrencia1' 'codigo_ocorrencia2' 'codigo_ocorrencia3' 'codigo_ocorrencia4' 'ocorrencia_classificacao' 'ocorrencia_latitude' 'ocorrencia_longitude' 'ocorrencia_cidade' 'ocorrencia_uf' 'ocorrencia_pais' 'ocorrencia_aerodromo' 'ocorrencia_dia' 'ocorrencia_hora' 'investigacao_aeronave_liberada' 'investigacao_status' 'divulgacao_relatorio_numero' 'divulgacao_relatorio_publicado' 'divulgacao_dia_publicacao' 'total_recomendacoes' 'total_aeronaves_envolvidas' 'ocorrencia_saida_pista']Checando os tipo de cada coluna e se tem valores faltantesocorrencia.info() RangeIndex: 5752 entries, 0 to 5751 Data columns (total 22 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 codigo_ocorrencia 5752 non-null int64 1 codigo_ocorrencia1 5752 non-null int64 2 codigo_ocorrencia2 5752 non-null int64 3 codigo_ocorrencia3 5752 non-null int64 4 codigo_ocorrencia4 5752 non-null int64 5 ocorrencia_classificacao 5752 non-null object 6 ocorrencia_latitude 4187 non-null object 7 ocorrencia_longitude 4187 non-null object 8 ocorrencia_cidade 5752 non-null object 9 ocorrencia_uf 5752 non-null object 10 ocorrencia_pais 5752 non-null object 11 ocorrencia_aerodromo 5752 non-null object 12 ocorrencia_dia 5752 non-null [...]Checando quantos valores faltantes tem em cada colunaprint("Missing values: ", ocorrencia.isnull().sum())Missing values: codigo_ocorrencia 0 codigo_ocorrencia1 0 codigo_ocorrencia2 0 codigo_ocorrencia3 0 codigo_ocorrencia4 0 ocorrencia_classificacao 0 ocorrencia_latitude 1565 ocorrencia_longitude 1565 ocorrencia_cidade 0 ocorrencia_uf 0 ocorrencia_pais 0 ocorrencia_aerodromo 0 ocorrencia_dia 0 ocorrencia_hora 1 investigacao_aeronave_liberada 341 investigacao_status 340 divulgacao_relatorio_numero 865 divulgacao_relatorio_publicado 0 divulgacao_dia_publicacao 4258 total_recomendacoes 0 total_aeronaves_envolvidas 0 ocorrencia_saida_pista 0 dtype: int64Removendo as colunas com dados faltante, visto que não são colunas essenciais para as análises que serão realizadasocorrencia_filtrada = ocorrencia.drop(['ocorrencia_latitude', 'ocorrencia_longitude', 'investigacao_aeronave_liberada', 'investigacao_status', 'divulgacao_relatorio_numero', 'divulgacao_dia_publicacao'], axis = 1) ocorrencia_filtrada print("Missing values: ", ocorrencia_filtrada.isnull().sum())Missing values: codigo_ocorrencia 0 codigo_ocorrencia1 0 codigo_ocorrencia2 0 codigo_ocorrencia3 0 codigo_ocorrencia4 0 ocorrencia_classificacao 0 ocorrencia_cidade 0 ocorrencia_uf 0 ocorrencia_pais 0 ocorrencia_aerodromo 0 ocorrencia_dia 0 ocorrencia_hora 1 divulgacao_relatorio_publicado 0 total_recomendacoes 0 total_aeronaves_envolvidas 0 ocorrencia_saida_pista 0 dtype: int64Checando se existem dados duplicadosprint("Duplicated values: ", ocorrencia_filtrada.duplicated().sum()) for column in ocorrencia_filtrada.columns: print("\n" + column) print(ocorrencia_filtrada[column].value_counts())codigo_ocorrencia 49152 1 79512 1 44399 1 50542 1 77250 1 .. 43684 1 45731 1 43680 1 45727 1 43007 1 Name: codigo_ocorrencia, Length: 5752, dtype: int64 codigo_ocorrencia1 49152 1 79512 1 44399 1 50542 1 77250 1 .. 43684 1 45731 1 43680 1 45727 1 43007 1 Name: codigo_ocorrencia1, Length: 5752, dtype: int64 codigo_ocorrencia2 49152 1 79512 1 44399 1 50542 1 77250 1 .. 43684 1 45731 1 43680 1 45727 1 43007 1 Name: codigo_ocorrencia2, Length: 5752, dtype: int64 codigo_ocorrencia3 49152 1 79512 1 44399 1 50542 1 77250 1 .. 43684 1 45731 1 43680 1 45727 1 43007 1 Name: codigo_ocorrencia3, Length: 5752, dtype: int64 codigo_ocorrencia4 49152 1 79512 1 44399 1 50542 1 77250 1 .. 43684 1 45731 1 43680 1 45727 1 43007 1 Name: codigo_ocorrencia4, Length: 5752, dtype: int64 ocorrencia_classificacao INCIDE[...]Selecionando as colunas que serão utilizadas nas análisesdf = ocorrencia_filtrada.loc[:,['codigo_ocorrencia1', 'ocorrencia_classificacao', 'ocorrencia_cidade', 'ocorrencia_uf', 'ocorrencia_aerodromo', 'ocorrencia_dia']] dfSplitando a coluna da data (ocorrencia_dia) em dia, mês e anodf[['Dia','Mes', 'Ano']] = df.ocorrencia_dia.str.split("/",expand=True,) df.to_csv('Dados/Ocorrencias_Filtradas.csv', index=False) dfSeparando as classificações de incidentesincidentes = df.loc[df['ocorrencia_classificacao'] == 'INCIDENTE'] incidentes_graves = df.loc[df['ocorrencia_classificacao'] == 'INCIDENTE GRAVE'] acidentes = df.loc[df['ocorrencia_classificacao'] == 'ACIDENTE']Panorama dos acidentes e incidentes nos últimos 10 anosfig = plt.figure(figsize=(27,12)) fig.subplots_adjust(hspace=0.2, wspace=0.2) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) plot1 = plt.subplot(2, 3, 1) ax = sns.countplot(x=acidentes.Ano ,data=acidentes) plt.xlabel("Ano") plt.ylabel("Contagem acidentes") plt.title("Ocorrências de Acidentes por ano") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x(), p.get_height()+2)) plot2 = plt.subplot(2, 3, 2) ax = sns.countplot(x=incidentes.Ano ,data=incidentes) plt.xlabel("Ano") plt.ylabel("Contagem incidentes") plt.title("Ocorrências de Incidentes por ano") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x(), p.get_height()+3)) plot3 = plt.subplot(2, 3, 3) ax = sns.countplot(x=incidentes_graves.Ano ,data=incidentes_graves) plt.xlabel("Ano") plt.ylabel("Contagem incidentes graves") plt.title("Ocorrências de Incidentes Graves por ano") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x()+0.2, p.get_height()+1)) plt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = sns.countplot(x=df.Ano ,data=df, hue=df.ocorrencia_classificacao) plt.legend(fontsize='x-large') plt.xlabel("Ano") plt.ylabel("Contagem Classificação") plt.title("Classificação das ocorrências por ano") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x()+0.01, p.get_height()+5))Panorama dos acidentes e incidentes de acordo com os mesesplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = sns.countplot(x=df.Mes ,data=df) plt.xlabel("Mês") plt.ylabel("Contagem acidentes/incidentes") plt.title("Ocorrências por mês") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x()+0.2, p.get_height()+10)) dfPanorama de acidentes e incidentes por estadosplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = sns.countplot(x=df.ocorrencia_uf ,data=df) plt.xlabel("Estado") plt.ylabel("Contagem acidentes/incidentes") plt.title("Ocorrências por Estado") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x(), p.get_height()+10)) df.ocorrencia_uf.value_counts()Selecionando os 10 estados com maiores quantidades de acidentes e incidentestop10_ufs = ['SP', 'MG', 'RJ' , 'PR', 'RS', 'GO', 'MT', 'PA', 'AM', 'BA'] df_filter_ufs = df.query('ocorrencia_uf in @top10_ufs')Panorama de classificação dos acidentes e incidentes de acordo com cada um dos 10 estadosplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = sns.countplot(x=df_filter_ufs.ocorrencia_uf ,data=df_filter_ufs, hue=df_filter_ufs.ocorrencia_classificacao) plt.legend(fontsize='x-large') plt.xlabel("Estado") plt.ylabel("Contagem Classificação") plt.title("Classificação das ocorrências por estado") for p in ax.patches: ax.annotate(p.get_height(), (p.get_x()+0.01, p.get_height()+5))Escolhendo o estado com mais ocorrências para ser analisadodf_SP = df_filter_ufs[df_filter_ufs['ocorrencia_uf'].str.contains("SP")]Selecionando as 10 cidades do estado de SP que têm mais ocorrências de acidentes e incidentesdf_SP.ocorrencia_cidade.value_counts().head(10) top10_SP_cidades = ['SÃO PAULO', 'GUARULHOS', 'CAMPINAS' , 'JUNDIAÍ', 'RIBEIRÃO PRETO', 'BRAGANÇA PAULIST', 'SÃO JOSÉ DOS CAMPOS', 'SOROCABA', 'SÃO JOSÉ DO RIO PRETO', 'BAURU'] df_filter_SP_cidades = df_SP.query('ocorrencia_cidade in @top10_SP_cidades')Panorama de classificação de acidentes e incidentes no estado de SPplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = sns.countplot(x=df_filter_SP_cidades.ocorrencia_cidade ,data=df_filter_SP_cidades, hue=df_filter_SP_cidades.ocorrencia_classificacao) plt.legend(fontsize='x-large') plt.xlabel("Cidades de SP") plt.ylabel("Contagem Classificação") plt.title("Classificação das ocorrências por cidades de SP") plt.xticks(rotation=90) for p in ax.patches: ax.annotate(p.get_height(), (p.get_x()+0.01, p.get_height()+5))Selecionando os aerodromo com mais ocorrências# Top 12, porque serão retirados os 2 autódromos identificados por * aerodromo = df['ocorrencia_aerodromo'].value_counts().head(12) top_12_aerodromos = df.query('ocorrencia_aerodromo in @aerodromo.index.tolist()') df_plot = top_12_aerodromos.groupby(['ocorrencia_aerodromo', 'ocorrencia_classificacao']).size().reset_index().pivot(columns='ocorrencia_classificacao', index='ocorrencia_aerodromo', values=0) df_plotPanorama das classificações de acidentes e incidentes por aerodromoplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = df_plot.plot(kind='bar', stacked=True, figsize=(15, 8)) plt.legend(fontsize='x-large') plt.xlabel("Top 12 Aerodromo") plt.ylabel("Contagem Classificação") plt.title("Classificação das ocorrências por Aerodromo") plt.xticks(rotation=45) plt.show()Removendo os autódromos identificados por *, ficando apenas com os top 10 aerodromotop_10_aerodromos = aerodromo.index.tolist()[2:] top_10_aerodromos query_10_aerodromos = df.query('ocorrencia_aerodromo in @top_10_aerodromos') top_10_aerodromos_plot = query_10_aerodromos.groupby(['ocorrencia_aerodromo', 'ocorrencia_classificacao']).size().reset_index().pivot(columns='ocorrencia_classificacao', index='ocorrencia_aerodromo', values=0) top_10_aerodromos_plotPanorama dos acidentes e incidentes dos top 10 aerodromoplt.figure(figsize = (15,8)) rc={'font.size': 12, 'axes.labelsize': 24, 'legend.fontsize': 40, 'axes.titlesize': 24, 'xtick.labelsize': 12, 'ytick.labelsize': 12} sns.set(rc=rc) ax = top_10_aerodromos_plot.plot(kind='bar', stacked=True, figsize=(15, 8)) plt.legend(fontsize='x-large') plt.xlabel("Top 10 Aerodromos") plt.ylabel("Contagem Classificação") plt.title("Classificação das ocorrências por Aerodromo") plt.xticks(rotation=45) bottom = np.zeros(len(top_10_aerodromos_plot)) for i, col in enumerate(top_10_aerodromos_plot.columns): ax.bar(top_10_aerodromos_plot.index, top_10_aerodromos_plot[col], bottom=bottom, label=col) bottom += np.array(top_10_aerodromos_plot[col]) # Somando as linhas dos dados para obter o valor total de cada barra. totals = top_10_aerodromos_plot.sum(axis=1) # Add labels to each bar. for i, total in enumerate(totals): ax.text(totals.index[i], total + 1, round(total), ha='center', weight='bold') plt.show() df[df['ocorrencia_aerodromo'] == 'SBGR']Figure 1 Pull in data from differential expression screencd .. import NotebookImport from DX_screen import */cellar/users/agross/anaconda2/lib/python2.7/site-packages/IPython/nbformat.py:13: ShimWarning: The `IPython.nbformat` package has been deprecated. You should import from nbformat instead. "You should import from nbformat instead.", ShimWarning)Read in microarray validation data.microarray = pd.read_hdf('/data_ssd/GEO_microarray_dx.h5', 'data') tissue = pd.read_hdf('/data_ssd/GEO_microarray_dx.h5', 'tissue') dx = microarray.xs('01',1,1) - microarray.xs('11',1,1) tt = tissue[:,'01'].replace('COAD','COADREAD') pos = (dx>0).groupby(tt, axis=1).sum() count = dx.groupby(tt, axis=1).count().replace(0, np.nan) count = count[count.sum(1) > 500] frac_df = 1.*pos / count frac_microarray = frac_df.mean(1) pts = ti(microarray.groupby(level=0, axis=1).size() > 1) cc = microarray.columns pts = list({p[0] for p in cc if (p[0], '01') in cc and (p[0], '11') in cc}) microarray.shape frac_microarray.name = 'GEO mean' df = frac_df df = df.join(frac_microarray).ix[dx_rna.frac.index].dropna(0, how='all') df.to_csv('/cellar/users/agross/Desktop/Figures/geo.csv') tt = tissue.groupby(lambda s: s[0].split('_')[0]).first() cc = pd.DataFrame([(c.split('_')[0], c) for c in pts])[0].value_counts() pd.concat([tt, cc], 1) match_series(frac_microarray.dropna(), dx_rna.frac.dropna())[0].shape fig, ax = subplots(figsize=(4,4)) s1, s2 = match_series(dx_rna.frac, frac_microarray) plot_regression(s1, s2, density=True, rad=.02, ax=ax, rasterized=True, line_args={'lw':0}) ax.set_ylabel("GEO microarray") ax.set_xlabel("TCGA mRNASeq") ann = ax.get_children()[4] ann.set_text(ann.get_text().split()[0]) ax.set_xticks([0, .5, 1]) ax.set_yticks([0, .5, 1]) fig.tight_layout() fig.savefig('/cellar/users/agross/figures/geo_fup.png', dpi=300) fig, axs = subplots(2,2, figsize=(8,6)) fig_1e(axs[0][0]) ax=axs[0][1] s1, s2 = match_series(dx_rna.frac, frac_microarray) plot_regression(s1, s2, density=True, rad=.02, ax=ax, rasterized=True, line_args={'lw':0}) ax.set_ylabel("GEO microarray") ax.set_xlabel("TCGA mRNASeq") ann = ax.get_children()[4] ann.set_text(ann.get_text().split()[0]) ax.set_xticks([0, .5, 1]) ax.set_yticks([0, .5, 1]) fig_1f(axs[1][0]) fig_1g(axs[1][1]) fig.tight_layout() fig = plt.figure(figsize=(7, 3)) ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=4) ax2 = plt.subplot2grid((1, 7), (0, 4), colspan=3) fig_1e(ax1) ax=ax2 s1, s2 = match_series(dx_rna.frac, frac_microarray) plot_regression(s1, s2, density=True, rad=.02, ax=ax, rasterized=True, line_args={'lw':0}) ax.set_ylabel("GEO microarray") ax.set_xlabel("TCGA mRNASeq") ann = ax.get_children()[4] ann.set_text(ann.get_text().split()[0]) ax.set_xticks([0, .5, 1]) ax.set_yticks([0, .5, 1]) fig.tight_layout() fig.savefig('/cellar/users/agross/Desktop/Figures/dx_fig1.pdf', transparent=True) fig, axs = subplots(1,2, figsize=(15,3)) fig_1e(axs[0]) ax=axs[1] s1, s2 = match_series(dx_rna.frac, frac_microarray) plot_regression(s1, s2, density=True, rad=.02, ax=ax, rasterized=True, line_args={'lw':0}) ax.set_ylabel("GEO microarray") ax.set_xlabel("TCGA mRNASeq") ann = ax.get_children()[4] ann.set_text(ann.get_text().split()[0]) ax.set_xticks([0, .5, 1]) ax.set_yticks([0, .5, 1]) fig.tight_layout() colors def fig_1f(ax): v = pd.concat([dx_rna.frac, dx_rna.frac.ix[ti(gs2['REACTOME_CELL_CYCLE']>0)], dx_rna.frac.ix[ti(gs2['KEGG_FATTY_ACID_METABOLISM']>0)]]) v1 = pd.concat([pd.Series('', dx_rna.frac.index), pd.Series('Cell Cycle', ti(gs2['REACTOME_CELL_CYCLE']>0)), pd.Series('Fatty Acid\nMetabolism', ti(gs2['KEGG_FATTY_ACID_METABOLISM']>0))]) v1.name = '' v.name = 'Fraction Overexpressed' draw_dist(v, v1, bins=100, ax=ax, colors={'': 'grey', 'Cell Cycle': colors[0], 'Fatty Acid\nMetabolism': colors[1]}) ax.legend(loc='upper right') ax.set_yticks([]) ax.set_ylabel('Density') ax.set_xlabel('Fraction Overexpressed') ax.set_xticks([0,.5,1.]) prettify_ax(ax) return ax from DX_screen import fig_1f fig, axs = subplots(4,1, figsize=(5,12)) fig_1e(axs[0]) ax=axs[1] s1, s2 = match_series(dx_rna.frac, frac_microarray) plot_regression(s1, s2, density=True, rad=.02, ax=ax, rasterized=True, line_args={'lw':0}) ax.set_ylabel("GEO microarray") ax.set_xlabel("TCGA mRNASeq") ann = ax.get_children()[4] ann.set_text(ann.get_text().split()[0]) ax.set_xticks([0, .5, 1]) ax.set_yticks([0, .5, 1]) fig_1f(axs[2]) fig_1g(axs[3]) letters = list(map(chr, range(97, 123)))[:6] for i,ax in enumerate(axs): ax.text(-0.15, 1.15, letters[i], transform=ax.transAxes, fontsize=20, fontweight='bold', va='top', ha='right') prettify_ax(ax) fig.tight_layout() fig.savefig('/cellar/users/agross/Desktop/Figures/dx_fig1.png', dpi=300, tranparent=True)Target EncodingMost machine learning algorithms require the input data to be a numeric matrix, where each row is a sample and each column is a feature. This makes sense for continuous features, where a larger number obviously corresponds to a larger value (features such as voltage, purchase amount, or number of clicks). How to represent categorical features is less obvious. Categorical features (such as state, merchant ID, domain name, or phone number) don't have an intrinsic ordering, and so most of the time we can't just represent them with random numbers. Who's to say that Colorado is "greater than" Minnesota? Or DHL "less than" FedEx? To represent categorical data, we need to find a way to encode the categories numerically.There are quite a few ways to encode categorical data. We can simply assign each category an integer randomly (called label encoding). Alternatively, we can create a new feature for each possible category, and set the feature to be 1 for each sample having that category, and otherwise set it to be 0 (called one-hot encoding). If we're using neural networks, we could let our network learn the embeddings of categories in a high-dimensional space (called entity embedding, or in neural NLP models often just "embedding").However, these methods all have drawbacks. Label encoding doesn't work well at all with non-ordinal categorical features. One-hot encoding leads to a humongous number of added features when your data contains a large number of categories. Entity embedding can only be used with neural network models (or at least with models which are trained using stochastic gradient descent).A different encoding method which we'll try in this post is called target encoding (also known as "mean encoding", and really should probably be called "mean target encoding"). With target encoding, each category is replaced with the mean target value for samples having that category. The "target value" is the y-variable, or the value our model is trying to predict. This allows us to encode an arbitrary number of categories without increasing the dimensionality of our data! Of course, there are drawbacks to target encoding as well. Target encoding introduces noise into the encoding of the categorical variables (noise which comes from the noise in the target variable itself). Also, naively applying target encoding can allow data leakage, leading to overfitting and poor predictive performance. To fix that problem, we'll have to construct target encoders which prevent data leakage. And even with those leak-proof target encoders, there are situations where one would be better off using one-hot or other encoding methods. One-hot can be better in situations with few categories, or with data where there are strong interaction effects.In this post we'll evaluate different encoding schemes, build a cross-fold target encoder to mitigate the drawbacks of the naive target encoder, and determine how the performance of predictive models change based on the type of category encoding used, the number of categories in the dataset, and the presence of interaction effects. TODO: outlineFirst let's import the packages we'll be using.import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import mean_absolute_error, make_scorer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.base import BaseEstimator, TransformerMixin from sklearn.linear_model import BayesianRidge from xgboost import XGBRegressor np.random.seed(12345) # TODO: remove this import warnings from sklearn.exceptions import DataConversionWarning warnings.filterwarnings(action='ignore', category=DataConversionWarning)DataTo evaluate the effectiveness of different encoding algorithms, we'll want to be able to generate data with different numbers of samples, features, and categories. Let's make a function to generate categorical datasets, which allows us to set these different aspects of the data. The categories have a direct effect on the target variable which we'll try to predict.def make_categorical_regression(n_samples=100, n_features=10, n_informative=10, n_categories=10, imbalance=0.0, noise=1.0, n_cont_features=0, cont_weight=0.1, interactions=0.0): """Generate a regression problem with categorical features. Parameters ---------- n_samples : int > 0 Number of samples to generate Default = 100 n_features : int > 0 Number of categorical features to generate Default = 10 n_informative : int >= 0 Number of features which carry information about the target. Default = 10 n_categories : int > 0 Number of categories per feature. Default = 10 imbalance : float > 0 How much imbalance there is in the number of occurrences of each category. Larger values yield a higher concentration of samples in only a few categories. An imbalance of 0 yields the same number of samples in each category. Default = 0.0 noise : float > 0 Noise to add to target. Default = 1.0 n_cont_features : int >= 0 Number of continuous (non-categorical) features. Default = 0 cont_weight : float > 0 Weight of the continuous variables' effect. Default = 0.1 interactions : float >= 0 and <= 1 Proportion of the variance due to interaction effects. Note that this only adds interaction effects between the categorical features, not the continuous features. Default = 0.0 Returns ------- X : pandas DataFrame Features. Of shape (n_samples, n_features+n_cont_features) y : pandas Series of shape (n_samples,) Target variable. """ def beta_binomial(n, a, b): """Beta-binomial probability mass function. Parameters ---------- n : int Number of trials a : float > 0 Alpha parameter b : float > 0 Beta parameter Returns ------- ndarray of size (n,) Probability mass function. """ from scipy.special import beta from scipy.misc import comb k = np.arange(n+1) return comb(n, k)*beta(k+a, n-k+b)/beta(a, b) # Check inputs if not isinstance(n_samples, int): raise TypeError('n_samples must be an int') if n_samples < 1: raise ValueError('n_samples must be one or greater') if not isinstance(n_features, int): raise TypeError('n_features must be an int') if n_features < 1: raise ValueError('n_features must be one or greater') if not isinstance(n_informative, int): raise TypeError('n_informative must be an int') if n_informative < 0: raise ValueError('n_informative must be non-negative') if not isinstance(n_categories, int): raise TypeError('n_categories must be an int') if n_categories < 1: raise ValueError('n_categories must be one or greater') if not isinstance(imbalance, float): raise TypeError('imbalance must be a float') if imbalance < 0: raise ValueError('imbalance must be non-negative') if not isinstance(noise, float): raise TypeError('noise must be a float') if noise < 0: raise ValueError('noise must be positive') if not isinstance(n_cont_features, int): raise TypeError('n_cont_features must be an int') if n_cont_features < 0: raise ValueError('n_cont_features must be non-negative') if not isinstance(cont_weight, float): raise TypeError('cont_weight must be a float') if cont_weight < 0: raise ValueError('cont_weight must be non-negative') if not isinstance(interactions, float): raise TypeError('interactions must be a float') if interactions < 0: raise ValueError('interactions must be non-negative') # Generate random categorical data (using category probabilities # drawn from a beta-binomial dist w/ alpha=1, beta=imbalance+1) cat_probs = beta_binomial(n_categories-1, 1.0, imbalance+1) categories = np.empty((n_samples, n_features), dtype='uint64') for iC in range(n_features): categories[:,iC] = np.random.choice(np.arange(n_categories), size=n_samples, p=cat_probs) # Generate random values for each category cat_vals = np.random.randn(n_categories, n_features) # Set non-informative columns' effect to 0 cat_vals[:,:(n_features-n_informative)] = 0 # Compute target variable from those categories and their values y = np.zeros(n_samples) for iC in range(n_features): y += (1.0-interactions) * cat_vals[categories[:,iC], iC] # Add interaction effects if interactions > 0: for iC1 in range(n_informative): for iC2 in range(iC1+1, n_informative): int_vals = np.random.randn(n_categories, #interaction n_categories) #effects y += interactions * int_vals[categories[:,iC1], categories[:,iC2]] # Add noise y += noise*np.random.randn(n_samples) # Generate dataframe from categories cat_strs = [''.join([chr(ord(c)+49) for c in str(n)]) for n in range(n_categories)] X = pd.DataFrame() for iC in range(n_features): col_str = 'categorical_'+str(iC) X[col_str] = [cat_strs[i] for i in categories[:,iC]] # Add continuous features for iC in range(n_cont_features): col_str = 'continuous_'+str(iC) X[col_str] = cont_weight*np.random.randn(n_samples) y += np.random.randn()*X[col_str] # Generate series from target y = pd.Series(data=y, index=X.index) # Return features and target return X, yNow, we can easily generate data to test our encoders on:# Generate categorical data and target X, y = make_categorical_regression(n_samples=2000, n_features=10, n_categories=100, n_informative=1, imbalance=2.0) # Split into test and training data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:74: DeprecationWarning: `comb` is deprecated! Importing `comb` from scipy.misc is deprecated in scipy 1.0.0. Use `scipy.special.comb` instead.The ten features in the dataset we generated are all categorical:X_train.sample(10)Using the pandas package, these are stored as the "object" datatype:X_train.info() Int64Index: 1000 entries, 523 to 583 Data columns (total 10 columns): categorical_0 1000 non-null object categorical_1 1000 non-null object categorical_2 1000 non-null object categorical_3 1000 non-null object categorical_4 1000 non-null object categorical_5 1000 non-null object categorical_6 1000 non-null object categorical_7 1000 non-null object categorical_8 1000 non-null object categorical_9 1000 non-null object dtypes: object(10) memory usage: 85.9+ KBWhile all the features are categorical, the target variable is continuous:y_train.hist(bins=20)Now the question is: which encoding scheme best allows us to glean the most information from the categorical features, leading to the best predictions of the target variable? BaselineFor comparison, how well would we do if we just predicted the mean target value for all samples? We'll use the mean absolute error (MAE) as our performance metric.mean_absolute_error(y_train, np.full(y_train.shape[0], y_train.mean()))So our predictive models should definitely be shooting for a mean absolute error of less than that! But, we added random noise with a standard deviation of 1, so even if our model is *perfect*, the best MAE we can expect is:mean_absolute_error(np.random.randn(10000), np.zeros(10000))Label EncodingThe simplest categorical encoding method is label encoding, where each category is simply replaced with a unique integer. However, there is no intrinsic relationship between the categories and the numbers being used to replace them. In the diagram below, category A is replaced with 0, and B with 1 - but there is no reason to think that category A is somehow greater than category B.TODO: diagramWe'll create a [scikit-learn](https://scikit-learn.org/stable/index.html)-compatible transformer class with which to label encode our data. Note that we could instead just use [scikit-learn's LabelEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) - although their version is a little wasteful in that it doesn't choose a data type efficiently.class LabelEncoder(BaseEstimator, TransformerMixin): """Label encoder. Replaces categorical column(s) with integer labels for each unique category in original column. """ def __init__(self, cols=None): """Label encoder. Parameters ---------- cols : list of str Columns to label encode. Default is to label encode all categorical columns in the DataFrame. """ if isinstance(cols, str): self.cols = [cols] else: self.cols = cols def fit(self, X, y): """Fit label encoder to X and y Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to label encode y : pandas Series, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # Encode all categorical cols by default if self.cols is None: self.cols = [c for c in X if str(X[c].dtype)=='object'] # Check columns are in X for col in self.cols: if col not in X: raise ValueError('Column \''+col+'\' not in X') # Create the map from objects to integers for each column self.maps = dict() #dict to store map for each column for col in self.cols: self.maps[col] = dict(zip( X[col].values, X[col].astype('category').cat.codes.values )) # Return fit object return self def transform(self, X, y=None): """Perform the label encoding transformation. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to label encode Returns ------- pandas DataFrame Input DataFrame with transformed columns """ Xo = X.copy() for col, tmap in self.maps.items(): # Map the column Xo[col] = Xo[col].map(tmap) # Convert to appropriate datatype max_val = max(tmap.values()) if Xo[col].isnull().any(): #nulls, so use float! if max_val < 8388608: dtype = 'float32' else: dtype = 'float64' else: if max_val < 256: dtype = 'uint8' elif max_val < 65536: dtype = 'uint16' elif max_val < 4294967296: dtype = 'uint32' else: dtype = 'uint64' Xo[col] = Xo[col].astype(dtype) # Return encoded dataframe return Xo def fit_transform(self, X, y=None): """Fit and transform the data via label encoding. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to label encode y : pandas Series, shape = [n_samples] Target values Returns ------- pandas DataFrame Input DataFrame with transformed columns """ return self.fit(X, y).transform(X, y)Now we can convert the categories to integers:# Label encode the categorical data le = LabelEncoder() X_label_encoded = le.fit_transform(X_train, y_train) X_label_encoded.sample(10)But again, these integers aren't related to the categories in any meaningful way - aside from the fact that each unique integer corresponds to a unique category.We can create a processing pipeline that label-encodes the data, and then uses a Bayesian ridge regression to predict the target variable, and compute the cross-validated mean absolute error of that model.# Regression model model_le = Pipeline([ ('label-encoder', LabelEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE mae_scorer = make_scorer(mean_absolute_error) scores = cross_val_score(model_le, X_train, y_train, cv=3, scoring=mae_scorer) print('Cross-validated MAE: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))Cross-validated MAE: 1.132 +/- 0.022That's not much better than just predicting the mean!The error is similarly poor on validation data.# MAE on test data model_le.fit(X_train, y_train) y_pred = model_le.predict(X_test) test_mae = mean_absolute_error(y_test, y_pred) print('Validation MAE: %0.3f' % test_mae)Validation MAE: 1.176One-hot EncodingOne-hot encoding, sometimes called "dummy coding", encodes the categorical information a little more intelligently. Instead of assigning random integers to categories, a new feature is created for each category. For each sample, the new feature is 1 if the sample's category matches the new feature, otherwise the value is 0. This allows us to encode the categorical information numerically, without loss of information, but ends up adding a lot of columns when the original categorical feature has many unique categories.TODO: diagramLike before, we'll create an sklearn transformer class to perform one-hot encoding. And again we could have used sklearn's built-in [OneHotEncoder class](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html).class OneHotEncoder(BaseEstimator, TransformerMixin): """One-hot encoder. Replaces categorical column(s) with binary columns for each unique value in original column. """ def __init__(self, cols=None, reduce_df=False): """One-hot encoder. Parameters ---------- cols : list of str Columns to one-hot encode. Default is to one-hot encode all categorical columns in the DataFrame. reduce_df : bool Whether to use reduced degrees of freedom for the encoding (that is, add N-1 one-hot columns for a column with N categories). E.g. for a column with categories A, B, and C: When reduce_df is True, A=[1, 0], B=[0, 1], and C=[0, 0]. When reduce_df is False, A=[1, 0, 0], B=[0, 1, 0], and C=[0, 0, 1] Default = False """ if isinstance(cols, str): self.cols = [cols] else: self.cols = cols self.reduce_df = reduce_df def fit(self, X, y): """Fit one-hot encoder to X and y Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # Encode all categorical cols by default if self.cols is None: self.cols = [c for c in X if str(X[c].dtype)=='object'] # Check columns are in X for col in self.cols: if col not in X: raise ValueError('Column \''+col+'\' not in X') # Store each unique value self.maps = dict() #dict to store map for each column for col in self.cols: self.maps[col] = [] uniques = X[col].unique() for unique in uniques: self.maps[col].append(unique) if self.reduce_df: del self.maps[col][-1] # Return fit object return self def transform(self, X, y=None): """Perform the one-hot encoding transformation. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to one-hot encode Returns ------- pandas DataFrame Input DataFrame with transformed columns """ Xo = X.copy() for col, vals in self.maps.items(): for val in vals: new_col = col+'_'+str(val) Xo[new_col] = (Xo[col]==val).astype('uint8') del Xo[col] return Xo def fit_transform(self, X, y=None): """Fit and transform the data via one-hot encoding. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to one-hot encode y : pandas Series, shape = [n_samples] Target values Returns ------- pandas DataFrame Input DataFrame with transformed columns """ return self.fit(X, y).transform(X, y)Now, instead of replacing categories with integer labels, we've create a new column for each category in each original column. The value in a given column is 1 when the original category matches, otherwise the value is 0. The values in the dataframe below are mostly 0s because the data we generated has so many categories.# One-hot-encode the categorical data ohe = OneHotEncoder() X_one_hot = ohe.fit_transform(X_train, y_train) X_one_hot.sample(10)Note that although we've now encoded the categorical data in a meaningful way, our data matrix is huge!# Compare sizes print('Original size:', X_train.shape) print('One-hot encoded size:', X_one_hot.shape)Original size: (1000, 10) One-hot encoded size: (1000, 853)We can fit the same model with the one-hot encoded data as we fit to the label-encoded data, and compute the cross-validated error.# Regression model model_oh = Pipeline([ ('encoder', OneHotEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_oh, X_train, y_train, cv=3, scoring=mae_scorer) print('Cross-validated MAE:', scores.mean(), '+/-', scores.std())Cross-validated MAE: 1.0386222429286203 +/- 0.028114652268138147Unlike with label encoding, when using one-hot encoding our predictions are definitely better than just guessing the mean - but not by a whole lot! Performance on the validation dataset is about the same:# MAE on test data model_oh.fit(X_train, y_train) y_pred = model_oh.predict(X_test) test_mae = mean_absolute_error(y_test, y_pred) print('Validation MAE:', test_mae)Validation MAE: 1.028751865940599Target EncodingThe problem with one-hot encoding is that it greatly increases the dimensionality of the training data (by adding a new feature for each unique category in the original dataset). This often leads to poorer model performance due to the curse of dimensionality - i.e., all else being equal, it is harder for machine learning algorithms to learn from data which has more dimensions.Target encoding allows us to retain actual useful information about the categories (like one-hot encoding, but unlike label encoding), while keeping the dimensionality of our data the same as the unencoded data (like label encoding, but unlike one-hot encoding). To target encode data, for each feature, we simply replace each category with the mean target value for samples which have that category.TODO: diagramLet's create a transformer class which performs this target encoding.class TargetEncoder(BaseEstimator, TransformerMixin): """Target encoder. Replaces categorical column(s) with the mean target value for each category. """ def __init__(self, cols=None): """Target encoder Parameters ---------- cols : list of str Columns to target encode. Default is to target encode all categorical columns in the DataFrame. """ if isinstance(cols, str): self.cols = [cols] else: self.cols = cols def fit(self, X, y): """Fit target encoder to X and y Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # Encode all categorical cols by default if self.cols is None: self.cols = [col for col in X if str(X[col].dtype)=='object'] # Check columns are in X for col in self.cols: if col not in X: raise ValueError('Column \''+col+'\' not in X') # Encode each element of each column self.maps = dict() #dict to store map for each column for col in self.cols: tmap = dict() uniques = X[col].unique() for unique in uniques: tmap[unique] = y[X[col]==unique].mean() self.maps[col] = tmap return self def transform(self, X, y=None): """Perform the target encoding transformation. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode Returns ------- pandas DataFrame Input DataFrame with transformed columns """ Xo = X.copy() for col, tmap in self.maps.items(): vals = np.full(X.shape[0], np.nan) for val, mean_target in tmap.items(): vals[X[col]==val] = mean_target Xo[col] = vals return Xo def fit_transform(self, X, y=None): """Fit and transform the data via target encoding. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values (required!). Returns ------- pandas DataFrame Input DataFrame with transformed columns """ return self.fit(X, y).transform(X, y)Now, instead of creating a bazillion columns (like with one-hot encoding), we can simply replace each category with the mean target value for that category. This allows us to represent the categorical information in the same dimensionality, while retaining some information about the categories. By target-encoding the features matrix, we get a matrix of the same size, but filled with continuous values instead of categories:# Target encode the categorical data te = TargetEncoder() X_target_encoded = te.fit_transform(X_train, y_train) X_target_encoded.sample(10)Note that the size of our target-encoded matrix is the same size as the original (unlike the huge one-hot transformed matrix):# Compare sizes print('Original size:', X_train.shape) print('Target encoded size:', X_target_encoded.shape)Original size: (1000, 10) Target encoded size: (1000, 10)Also, each column has exactly as many unique continuous values as it did categories. This is because we've simply replaced the category with the mean target value for that category.# Compare category counts print('Original:') print(X_train.nunique()) print('\nTarget encoded:') print(X_target_encoded.nunique())Original: categorical_0 84 categorical_1 81 categorical_2 85 categorical_3 88 categorical_4 84 categorical_5 86 categorical_6 88 categorical_7 88 categorical_8 90 categorical_9 79 dtype: int64 Target encoded: categorical_0 84 categorical_1 81 categorical_2 85 categorical_3 88 categorical_4 84 categorical_5 86 categorical_6 88 categorical_7 88 categorical_8 90 categorical_9 79 dtype: int64If we fit the same model as before, but now after target-encoding the categories, the error of our model is far lower!# Regression model model_te = Pipeline([ ('encoder', TargetEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_te, X_train, y_train, cv=3, scoring=mae_scorer) print('Cross-validated MAE:', scores.mean(), '+/-', scores.std())Cross-validated MAE: 0.9402165006761526 +/- 0.029713106940791913The performance on the test data is about the same, but slightly better, because we've given it more samples on which to train.# MAE on test data model_te.fit(X_train, y_train) y_pred = model_te.predict(X_test) test_mae = mean_absolute_error(y_test, y_pred) print('Validation MAE:', test_mae)Validation MAE: 0.9325682408250325While the error is lower using target encoding than with one-hot encoding, in naively target-encoding our categories, we've introduced a data leak from the target variable for one sample into the features for that same sample! In the diagram above, notice how the i-th sample's target value is used in the computation of the mean target value for the i-th sample's category, and then the i-th sample's category is replaced with that mean. Leaking the target variable into our predictors like that causes our learning algorithm to over-depend on the target-encoded features, which results in the algorithm overfitting on the data. Although we gain predictive power by keeping the dimensionality of our training data reasonable, we loose a lot of that gain by allowing our model to overfit to the target-encoded columns! Cross-Fold Target EncodingTo clamp down on the data leakage, we need to ensure that we're not using the using the target value from a given sample to compute its target-encoded values. However, we can still use *other* samples in the training data to compute the mean target values for *this* sample's category. There are a few different ways we can do this. We could compute the per-category target means in a cross-fold fashion, or by leaving the current sample out (leave-one-out).First we'll try cross-fold target encoding, where we'll split the data up into $N$ folds, and compute the means for each category in the $i$-th fold using data in all the other folds. The diagram below illustrates an example using 2 folds.TODO: diagramLet's create a transformer class to perform the cross-fold target encoding. There are a few things we need to watch out for now which we didn't have to worry about with the naive target encoder. First, we may end up with NaNs (empty values) even when there were categories in the original dataframe. This will happen for a category that appears in one fold, but when there are no examples of that category in the other folds. Also, we can't perform cross-fold encoding on our test data, because we don't have any target values for which to compute the category means! So, we have to use the category means from the training data in that case.class TargetEncoderCV(TargetEncoder): """Cross-fold target encoder. """ def __init__(self, n_splits=3, shuffle=True, cols=None): """Cross-fold target encoding for categorical features. Parameters ---------- n_splits : int Number of cross-fold splits. Default = 3. shuffle : bool Whether to shuffle the data when splitting into folds. cols : list of str Columns to target encode. """ self.n_splits = n_splits self.shuffle = shuffle self.cols = cols def fit(self, X, y): """Fit cross-fold target encoder to X and y Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._target_encoder = TargetEncoder(cols=self.cols) self._target_encoder.fit(X, y) return self def transform(self, X, y=None): """Perform the target encoding transformation. Uses cross-fold target encoding for the training fold, and uses normal target encoding for the test fold. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode Returns ------- pandas DataFrame Input DataFrame with transformed columns """ # Use target encoding from fit() if this is test data if y is None: return self._target_encoder.transform(X) # Compute means for each fold self._train_ix = [] self._test_ix = [] self._fit_tes = [] kf = KFold(n_splits=self.n_splits, shuffle=self.shuffle) for train_ix, test_ix in kf.split(X): self._train_ix.append(train_ix) self._test_ix.append(test_ix) te = TargetEncoder(cols=self.cols) if isinstance(X, pd.DataFrame): self._fit_tes.append(te.fit(X.iloc[train_ix,:], y.iloc[train_ix])) elif isinstance(X, np.ndarray): self._fit_tes.append(te.fit(X[train_ix,:], y[train_ix])) else: raise TypeError('X must be DataFrame or ndarray') # Apply means across folds Xo = X.copy() for ix in range(len(self._test_ix)): test_ix = self._test_ix[ix] if isinstance(X, pd.DataFrame): Xo.iloc[test_ix,:] = self._fit_tes[ix].transform(X.iloc[test_ix,:]) elif isinstance(X, np.ndarray): Xo[test_ix,:] = self._fit_tes[ix].transform(X[test_ix,:]) else: raise TypeError('X must be DataFrame or ndarray') return Xo def fit_transform(self, X, y=None): """Fit and transform the data via target encoding. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values (required!). Returns ------- pandas DataFrame Input DataFrame with transformed columns """ return self.fit(X, y).transform(X, y)With this encoder, we can convert the categories into continuous values, just like we did with the naive target encoding.# Cross-fold Target encode the categorical data te = TargetEncoderCV() X_target_encoded_cv = te.fit_transform(X_train, y_train) X_target_encoded_cv.sample(10)Like with normal target encoding, our transformed matrix is the same shape as the original:# Compare sizes print('Original size:', X_train.shape) print('Target encoded size:', X_target_encoded_cv.shape)Original size: (1000, 10) Target encoded size: (1000, 10)However, now we have more unique continuous values in each column than we did categories, because we've target-encoded the categories separately for each fold (since we used 3 folds, there are about 3 times as many unique values).# Compare category counts print('Original:') print(X_train.nunique()) print('\nTarget encoded:') print(X_target_encoded_cv.nunique())Original: categorical_0 84 categorical_1 81 categorical_2 85 categorical_3 88 categorical_4 84 categorical_5 86 categorical_6 88 categorical_7 88 categorical_8 90 categorical_9 79 dtype: int64 Target encoded: categorical_0 214 categorical_1 203 categorical_2 201 categorical_3 203 categorical_4 208 categorical_5 207 categorical_6 207 categorical_7 205 categorical_8 213 categorical_9 200 dtype: int64We can fit the same model as before, but now using cross-fold target encoding.# Regression model model_te_cv = Pipeline([ ('encoder', TargetEncoderCV()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_te_cv, X_train, y_train, cv=3, scoring=mae_scorer) print('Cross-validated MAE:', scores.mean(), '+/-', scores.std())Cross-validated MAE: 0.8351049783190474 +/- 0.04365424683197591Now our model's error is very low - pretty close to the lower bound of ~0.8! And the cross-validated performance matches the performance on the validation data.# MAE on test data model_te_cv.fit(X_train, y_train) y_pred = model_te_cv.predict(X_test) test_mae = mean_absolute_error(y_test, y_pred) print('Validation MAE:', test_mae)Validation MAE: 0.8389909316238072Leave-one-out Target EncodingWe could also prevent the target data leakage by using a leave-one-out scheme. With this method, we compute the per-category means as with the naive target encoder, but we don't include the current sample in that computation.TODO: diagramThis may seem like it will take much longer than the cross-fold method, but it actually ends up being faster, because we can compute the mean without the effect of each sample in an efficient way. Normally the mean is computed with:$$v = \frac{1}{N_C} \sum_{j \in C} y_j$$where $v$ is the target-encoded value for all samples having category $C$, $N_C$ is the number of samples having category $C$, and $j \in C$ indicates all the samples which have category $C$.With leave-one-out target encoding, we can first compute the count of samples having category $C$ ($N_C$), and then separately compute the sum of the target values of those categories:$$S_C = \sum_{j \in C} y_j$$Then, the mean target value for samples having category $C$, excluding the effect of sample $i$, can be computed with$$v_i = \frac{S_C - y_i}{N_C-1}$$Let's build a transformer class which performs the leave-one-out target encoding using that trick.class TargetEncoderLOO(TargetEncoder): """Leave-one-out target encoder. """ def __init__(self, n_splits=3, shuffle=True, cols=None): """Leave-one-out target encoding for categorical features. Parameters ---------- cols : list of str Columns to target encode. """ self.cols = cols def fit(self, X, y): """Fit leave-one-out target encoder to X and y Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to target encode y : pandas Series, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # Encode all categorical cols by default if self.cols is None: self.cols = [col for col in X if str(X[col].dtype)=='object'] # Check columns are in X for col in self.cols: if col not in X: raise ValueError('Column \''+col+'\' not in X') # Encode each element of each column self.sum_count = dict() #dict for sum + counts for each column for col in self.cols: self.sum_count[col] = dict() uniques = X[col].unique() for unique in uniques: ix = X[col]==unique self.sum_count[col][unique] = (y[ix].sum(), ix.sum()) # Return the fit object return self def transform(self, X, y=None): """Perform the target encoding transformation. Uses leave-one-out target encoding for the training fold, and uses normal target encoding for the test fold. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode Returns ------- pandas DataFrame Input DataFrame with transformed columns """ # Create output dataframe Xo = X.copy() # Use normal target encoding if this is test data if y is None: for col in self.sum_count: vals = np.full(X.shape[0], np.nan) for cat, sum_count in self.sum_count[col].items(): vals[X[col]==cat] = sum_count[0]/sum_count[1] Xo[col] = vals # LOO target encode each column else: for col in self.sum_count: vals = np.full(X.shape[0], np.nan) for cat, sum_count in self.sum_count[col].items(): ix = X[col]==cat vals[ix] = (sum_count[0]-y[ix])/(sum_count[1]-1) Xo[col] = vals # Return encoded DataFrame return Xo def fit_transform(self, X, y=None): """Fit and transform the data via target encoding. Parameters ---------- X : pandas DataFrame, shape [n_samples, n_columns] DataFrame containing columns to encode y : pandas Series, shape = [n_samples] Target values (required!). Returns ------- pandas DataFrame Input DataFrame with transformed columns """ return self.fit(X, y).transform(X, y)Using the leave-one-out target encoder, we can target-encode the data like before:# Cross-fold Target encode the categorical data te = TargetEncoderLOO() X_target_encoded_loo = te.fit_transform(X_train, y_train) X_target_encoded_loo.sample(10)The transformed matrix is stil the same size as the original:# Compare sizes print('Original size:', X_train.shape) print('Target encoded size:', X_target_encoded_loo.shape)Original size: (1000, 10) Target encoded size: (1000, 10)But now there are nearly as many unique values in each column as there are samples:# Compare category counts print('Original:') print(X_train.nunique()) print('\nLeave-one-out target encoded:') print(X_target_encoded_loo.nunique())Original: categorical_0 84 categorical_1 81 categorical_2 85 categorical_3 88 categorical_4 84 categorical_5 86 categorical_6 88 categorical_7 88 categorical_8 90 categorical_9 79 dtype: int64 Leave-one-out target encoded: categorical_0 993 categorical_1 994 categorical_2 992 categorical_3 987 categorical_4 990 categorical_5 990 categorical_6 990 categorical_7 991 categorical_8 992 categorical_9 996 dtype: int64Also, there are less empty values in the leave-one-out target encoded dataframe than there were in the cross-fold target encoded dataframe. This is because with leave-one-out target encoding, a value will only be null if it is the only category of that type (or if the original feature value was null).# Compare null counts print('Original null count:') print(X_train.isnull().sum()) print('\nCross-fold target encoded null count:') print(X_target_encoded_cv.isnull().sum()) print('\nLeave-one-out target encoded null count:') print(X_target_encoded_loo.isnull().sum())Original null count: categorical_0 0 categorical_1 0 categorical_2 0 categorical_3 0 categorical_4 0 categorical_5 0 categorical_6 0 categorical_7 0 categorical_8 0 categorical_9 0 dtype: int64 Cross-fold target encoded null count: categorical_0 9 categorical_1 12 categorical_2 22 categorical_3 21 categorical_4 12 categorical_5 15 categorical_6 19 categorical_7 20 categorical_8 23 categorical_9 15 dtype: int64 Leave-one-out target encoded null count: categorical_0 7 categorical_1 6 categorical_2 8 categorical_3 13 categorical_4 10 categorical_5 10 categorical_6 10 categorical_7 9 categorical_8 8 categorical_9 4 dtype: int64But more importantly, how well can our model predict the target variable when trained on the leave-one-out target encoded data?# Regression model model_te_loo = Pipeline([ ('encoder', TargetEncoderLOO()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_te_loo, X_train, y_train, cv=3, scoring=mae_scorer) print('Cross-validated MAE:', scores.mean(), '+/-', scores.std()) # MAE on test data model_te_loo.fit(X_train, y_train) y_pred = model_te_loo.predict(X_test) test_mae = mean_absolute_error(y_test, y_pred) print('Validation MAE:', test_mae)Validation MAE: 0.837977841124484The leave-one-out target encoder performs *slightly* better than the cross-fold target encoder, because we've given it more samples with which to compute the per-category means ($N-1$, instead of $N-N/K$, where K is the number of folds). While the increase in performance was very small, the leave-one-out target encoder is faster, due to the effecient way we computed the leave-one-out means (instead of having to compute means for each fold).%%time Xo = TargetEncoderCV().fit_transform(X_train, y_train) %%time Xo = TargetEncoderLOO().fit_transform(X_train, y_train)CPU times: user 4.07 s, sys: 18 ms, total: 4.09 s Wall time: 4.1 sEffect of the Learning AlgorithmThe increase in predictive performance one gets from target encoding depends on the machine learning algorithm which is using it. As we've seen, target encoding is great for linear models (throughout this post we were using a Bayesian ridge regression, a variant on a linear regression which optimizes the regularization parameter). However, target encoding doesn't help as much for tree-based boosting algorithms like XGBoost, CatBoost, or LightGBM, which tend to handle categorical data pretty well as-is.Fitting the Bayesian ridge regression to the data, we see a huge increase in performance after target encoding (relative to one-hot encoding).# Bayesian ridge w/ one-hot encoding model_brr = Pipeline([ ('encoder', OneHotEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_brr, X_train, y_train, cv=3, scoring=mae_scorer) print('MAE w/ Bayesian Ridge + one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # Bayesian ridge w/ target-encoding model_brr = Pipeline([ ('encoder', TargetEncoderLOO()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Cross-validated MAE scores = cross_val_score(model_brr, X_train, y_train, cv=3, scoring=mae_scorer) print('MAE w/ Bayesian Ridge + target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))MAE w/ Bayesian Ridge + target encoding: 0.833 +/- 0.038However, using XGBoost, there is only a modest perfomance increase (if any at all).# Regression model model_xgb = Pipeline([ ('encoder', OneHotEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', XGBRegressor()) ]) # Cross-validated MAE scores = cross_val_score(model_xgb, X_train, y_train, cv=3, scoring=mae_scorer) print('MAE w/ XGBoost + one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # Regression model model_xgb = Pipeline([ ('encoder', TargetEncoderLOO()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', XGBRegressor()) ]) # Cross-validated MAE scores = cross_val_score(model_xgb, X_train, y_train, cv=3, scoring=mae_scorer) print('MAE w/ XGBoost + target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))MAE w/ XGBoost + target encoding: 0.864 +/- 0.052Dependence on the Number of CategoriesThere is also an effect of the number of categories on the performance of a model trained on target-encoded data. Target encoding works well with categorical data that contains a large number of categories. However, if you have data with only a few categories, you're probably better off using one-hot encoding.For example, let's generate two datasets: one which has a large number of categories in each column, and another which has only a few categories in each column.# Categorical data w/ many categories X_many, y_many = make_categorical_regression( n_samples=1000, n_features=10, n_categories=100, n_informative=1, imbalance=2.0) # Categorical data w/ few categories X_few, y_few = make_categorical_regression( n_samples=1000, n_features=10, n_categories=5, n_informative=1, imbalance=2.0)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:74: DeprecationWarning: `comb` is deprecated! Importing `comb` from scipy.misc is deprecated in scipy 1.0.0. Use `scipy.special.comb` instead.Then we'll construct two separate models: one which uses target-encoding, and another which uses one-hot encoding.# Regression model w/ target encoding model_te = Pipeline([ ('encoder', TargetEncoderLOO()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ]) # Regression model w/ one-hot encoding model_oh = Pipeline([ ('encoder', OneHotEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', BayesianRidge()) ])On the dataset with many categories per column, target-encoding outperforms one-hot encoding by a good margin.print('Many categories:') # Target encoding w/ many categories scores = cross_val_score(model_te, X_many, y_many, cv=3, scoring=mae_scorer) print('MAE w/ target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # One-hot encoding w/ many categories scores = cross_val_score(model_oh, X_many, y_many, cv=3, scoring=mae_scorer) print('MAE w/ one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))Many categories: MAE w/ target encoding: 0.820 +/- 0.029 MAE w/ one-hot encoding: 1.049 +/- 0.045On the other hand, with the dataset containing only a few categories per column, the performance of the one-hot encoded model is nearly indistinguishable from the performance of the model which uses target encoding.print('Few categories:') # Target encoding w/ few categories scores = cross_val_score(model_te, X_few, y_few, cv=3, scoring=mae_scorer) print('MAE w/ target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # One-hot encoding w/ few categories scores = cross_val_score(model_oh, X_few, y_few, cv=3, scoring=mae_scorer) print('MAE w/ one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))Few categories: MAE w/ target encoding: 0.815 +/- 0.030 MAE w/ one-hot encoding: 0.830 +/- 0.025Effect of Category ImbalanceI would have expected target encoding to perform better than one-hot encoding when the categories were extremely unbalanced (most samples have one of only a few categories), and one-hot encoding to outperform target encoding in the case of balanced categories (categories appear about the same number of times thoughout the dataset). However, it appears that category imbalance effects both one-hot and target encoding similarly. Let's generate two datasets, one of which has balanced categories, and another which has highly imbalanced categories in each column.# Categorical data w/ many categories X_bal, y_bal = make_categorical_regression( n_samples=1000, n_features=10, n_categories=100, n_informative=1, imbalance=0.0) # Categorical data w/ few categories X_imbal, y_imbal = make_categorical_regression( n_samples=1000, n_features=10, n_categories=100, n_informative=1, imbalance=2.0)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:74: DeprecationWarning: `comb` is deprecated! Importing `comb` from scipy.misc is deprecated in scipy 1.0.0. Use `scipy.special.comb` instead.Fitting the models from the previous section (one of which uses target encoding and the other uses one-hot encoding), we see that how imbalanced the data is doesn't have a huge effect on the perfomance of the model which uses target encoding.print('Target encoding:') # Target encoding w/ imbalanced categories scores = cross_val_score(model_te, X_imbal, y_imbal, cv=5, scoring=mae_scorer) print('MAE w/ imbalanced categories: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # Target encoding w/ balanced categories scores = cross_val_score(model_te, X_bal, y_bal, cv=5, scoring=mae_scorer) print('MAE w/ balanced categories: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))Target encoding: MAE w/ imbalanced categories: 0.873 +/- 0.054 MAE w/ balanced categories: 0.845 +/- 0.041Nor does it appear to have a big effect on the performance of the model which uses one-hot encoding.print('One-hot encoding:') # One-hot encoding w/ imbalanced categories scores = cross_val_score(model_oh, X_imbal, y_imbal, cv=5, scoring=mae_scorer) print('MAE w/ imbalanced categories: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # One-hot encoding w/ balanced categories scores = cross_val_score(model_oh, X_bal, y_bal, cv=5, scoring=mae_scorer) print('MAE w/ balanced categories: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))One-hot encoding: MAE w/ imbalanced categories: 1.030 +/- 0.024 MAE w/ balanced categories: 0.993 +/- 0.029I've tried various combinations of predictive models, levels of imbalance, and numbers of categories, and the level of imbalance doesn't seem to have a very systematic effect. I suspect this is because for both target encoding and one-hot encoding, with balanced categories we have more information about all categories on average (because examples with each category are more evenly distributed). On the other hand, we have *less* information about the most common categories - because those categories are no more "common" than any other in a balanced dataset. Therefore, the level of uncertainty for those categories ends up actually being higher for balanced datasets. Those two effects appear to cancel out, and the predictive performance of our models don't change. Effect of InteractionsSo far, target encoding has performed as well or better than other types of encoding. However, there's one situation where target encoding doesn't do so well: in the face of strong interaction effects.An interaction effect is when the effect of one feature on the target variable depends on the value of a second feature. For example, suppose we have one categorical feature with categories A and B, and a second categorical feature with categories C and D. With no interaction effect, the effect of the first and second feature would be additive, and the effect of A and B on the target variable is independent of C and D. An example of this is the money spent as a function of items purchased. If a customer purchases both items 1 and 2, they will be charged the same as if they had purchased either item independently:plt.bar(np.arange(4), [0, 2, 3, 5]) plt.ylabel('Cost') plt.xticks(np.arange(4), ['No purchases', 'Purchased only item 1', 'Purchased only item 2', 'Purchased both 1 + 2'])On the other hand, if there is an interaction effect, the effect on the target variable will not be simply the sum of the two features' effects. For example, just adding sugar *or* stirring coffee may not have a huge effect on the sweetness of the coffee. But if one adds sugar *and* stirs, there is a large effect on the sweetness of the coffee.plt.bar(np.arange(4), [1, 1, 3, 10]) plt.ylabel('Coffee sweetness') plt.xticks(np.arange(4), ['Nothing', 'Stir', 'Sugar', 'Sugar + stir'])Target encoding simply fills in each category with the mean target value for samples having that category. Because target encoding does this for each column individually, it's fundamentally unable to handle interactions between columns! That said, one-hot encoding doesn't intrinsically handle interaction effects either - it depends on the learning algorithm being used. Linear models (like the Bayesian ridge regression we've been using) can't pull out interaction effects unless we explicitly encode them (by adding a column for each possible interaction). Nonlinear learning algorithms, such as decision tree-based models, SVMs, and neural networks, are able to detect interaction effects in the data as-is.To see how well interaction effects are captured by models trained on target-encoded or one-hot-encoded data, we'll create two categorical datasets: one which has no interaction effects, and one whose variance is completely explained by interaction effects (and noise).# Categorical data w/ no interaction effects X_no_int, y_no_int = make_categorical_regression( n_samples=1000, n_features=10, n_categories=100, n_informative=2, interactions=0.0) # Categorical data w/ interaction effects X_inter, y_inter = make_categorical_regression( n_samples=1000, n_features=10, n_categories=100, n_informative=2, interactions=1.0)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:74: DeprecationWarning: `comb` is deprecated! Importing `comb` from scipy.misc is deprecated in scipy 1.0.0. Use `scipy.special.comb` instead.To capture interaction effects, we'll have to use a model which can handle interactions, such as a tree-based method like XGBoost (a linear regression can't capture interactions unless they are explicitly encoded).# Regression model w/ target encoding model_te = Pipeline([ ('encoder', TargetEncoderLOO()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', XGBRegressor()) ]) # Regression model w/ one-hot encoding model_oh = Pipeline([ ('encoder', OneHotEncoder()), ('scaler', StandardScaler()), ('imputer', SimpleImputer(strategy='mean')), ('regressor', XGBRegressor()) ])As we've seen before, without interaction effects the target encoder performs better than the one-hot encoder.print('No interaction effects:') # Target encoding w/ no interaction effects scores = cross_val_score(model_te, X_no_int, y_no_int, cv=5, scoring=mae_scorer) print('MAE w/ target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # One-hot encoding w/ no interaction effects scores = cross_val_score(model_oh, X_no_int, y_no_int, cv=5, scoring=mae_scorer) print('MAE w/ one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))No interaction effects: MAE w/ target encoding: 1.013 +/- 0.033 MAE w/ one-hot encoding: 1.155 +/- 0.029However, when most of the variance can be explained by interaction effects, the model trained on one-hot encoded data performs better (or at least it's unlikely that the target-encoded model has better performance).print('With interaction effects:') # Target encoding w/ interaction effects scores = cross_val_score(model_te, X_inter, y_inter, cv=5, scoring=mae_scorer) print('MAE w/ target encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std())) # One-hot encoding w/ interaction effects scores = cross_val_score(model_oh, X_inter, y_inter, cv=5, scoring=mae_scorer) print('MAE w/ one-hot encoding: %0.3f +/- %0.3f' % (scores.mean(), scores.std()))With interaction effects: MAE w/ target encoding: 1.222 +/- 0.035 MAE w/ one-hot encoding: 1.189 +/- 0.009Circuito RLC paralelo sem fonteJupyter Notebook desenvolvido por [.](https://github.com/GSimas)Circuitos RLC em paralelo têm diversas aplicações, como em projetos de filtrose redes de comunicação. Suponha que a corrente inicial I0 no indutor e a tensão inicial V0 no capacitor sejam:\begin{align}{\Large i(0) = I_0 = \frac{1}{L} \int_{-\infty}^{0} v(t) dt}\end{align}\begin{align}{\Large v(0) = V_0}\end{align}![](https://i.imgur.com/6gt8E4q.png)Portanto, aplicando a LKC ao nó superior fornece:\begin{align}{\Large \frac{v}{R} + \frac{1}{L} \int_{-\infty}^{t} v(\tau) d\tau + C \frac{dv}{dt} = 0}\end{align}Extraindo a derivada em relação a t e dividindo por C resulta em:\begin{align}{\Large \frac{d^2v}{dt^2} + \frac{1}{RC} \frac{dv}{dt} + \frac{1}{LC} v = 0}\end{align}Obtemos a equação característica substituindo a primeira derivada por s e a segundapor s^2:\begin{align}{\Large s^2 + \frac{1}{RC} s + \frac{1}{LC} = 0}\end{align}Assim, as raízes da equação característica são:\begin{align}{\Large s_{1,2} = -\alpha \pm \sqrt{\alpha^2 - \omega_0^2}}\end{align}onde:\begin{align}{\Large \alpha = \frac{1}{2RC}, \space \space \space \omega_0 = \frac{1}{\sqrt{LC}} }\end{align} Amortecimento Supercrítico / Superamortecimento (α > ω0)Quando α > ω0, as raízes da equação característica são reais e negativas. A resposta é:\begin{align}{\Large v(t) = A_1 e^{s_1 t} + A_2 e^{s_2 t} }\end{align} Amortecimento Crítico (α = ω0)Quando α = ω0 as raízes da equação característica são reais e iguais de modo que a resposta seja:\begin{align}{\Large v(t) = (A_1 + A_2t)e^{-\alpha t}}\end{align} Subamortecimento (α < ω0)Quando α < ω0, nesse caso, as raízes são complexas e podem ser expressas como segue:\begin{align}{\Large s_{1,2} = -\alpha \pm j\omega_d}\\\\{\Large \omega_d = \sqrt{\omega_0^2 - \alpha^2}}\end{align}\begin{align}{\Large v(t) = e^{-\alpha t}(A_1 cos(\omega_d t) + A_2 sen(\omega_d t))}\end{align}![](https://i.imgur.com/3s4tDKQ.png)As constantes A1 e A2 em cada caso podem ser determinadas a partir dascondições iniciais. Precisamos de v(0) e dv(0)/dt. **Exemplo 8.5**No circuito paralelo da Figura 8.13, determine v(t) para t > 0, supondo que v(0) = 5 V,i(0) = 0, L = 1 H e C = 10 mF. Considere os seguintes casos: R = 1,923 Ω, R = 5 Ω eR = 6,25 Ω .![](https://i.imgur.com/6gt8E4q.png)print("Exemplo 8.5") from sympy import * m = 10**(-3) #definicao de mili L = 1 C = 10*m v0 = 5 i0 = 0 A1 = symbols('A1') A2 = symbols('A2') t = symbols('t') def sqrt(x, root = 2): #definir funcao para raiz y = x**(1/root) return y print("\n--------------\n") ## PARA R = 1.923 R = 1.923 print("Para R = ", R) def resolve_rlc(R,L,C): alpha = 1/(2*R*C) omega = 1/sqrt(L*C) print("Alpha:",alpha) print("Omega:",omega) s1 = -alpha + sqrt(alpha**2 - omega**2) s2 = -alpha - sqrt(alpha**2 - omega**2) def rlc(alpha,omega): #funcao para verificar tipo de amortecimento resposta = "" if alpha > omega: resposta = "superamortecimento" v = A1*exp(s1*t) + A2*exp(s2*t) elif alpha == omega: resposta = "amortecimento critico" v = (A1 + A2*t)*exp(-alpha*t) else: resposta = "subamortecimento" v = exp(-alpha*t)*(A1*cos(omega_d*t) + A2*sin(omega_d*t)) return resposta,v resposta,v = rlc(alpha,omega) print("Tipo de resposta:",resposta) print("Resposta v(t):",v) print("v(0):",v.subs(t,0)) print("dv(0)/dt:",v.diff(t).subs(t,0)) return alpha,omega,s1,s2,resposta,v alpha,omega,s1,s2,resposta,v = resolve_rlc(R,L,C) #v(0) = 5 = A1 + A2 -> A2 = 5 - A1 #dv(0)/dt = -2A1 - 50A2 #C*dv(0)/dt + i(0) + v(0)/R = 0 #0.01*(-2A1 - 50A2) + 0 + 5/1.923 = 0 #(-2A1 -50(5 - A1)) = -5/(1.923*0.01) #48A1 = 250 - 5/(1.923*0.01) A1 = (250 - 5/(1.923*0.01))/48 print("Constante A1:",A1) A2 = 5 - A1 print("Constante A2:",A2) v = A1*exp(s1*t) + A2*exp(s2*t) print("Resposta v(t):",v,"V") print("\n--------------\n") ## PARA R = 5 R = 5 A1 = symbols('A1') A2 = symbols('A2') print("Para R = ", R) alpha,omega,s1,s2,resposta,v = resolve_rlc(R,L,C) #v(t) = (A1 + A2t)e^(-alpha*t) #v(0) = A1 = 5 A1 = 5 #C*dv(0)/dt + i(0) + v(0)/R = 0 #0.01(-10A1 + A2) + 0 + 5/5 = 0 #0.01A2 = -1 + 0.5 A2 = (-1 + 0.5)/0.01 print("Constante A1:",A1) print("Constante A2:",A2) v = (A1 + A2*t)*exp(-alpha*t) print("Resposta v(t):",v,"V") print("\n--------------\n") ## PARA R = 6.25 R = 6.25 A1 = symbols('A1') A2 = symbols('A2') print("Para R = ", R) omega_d = sqrt(omega**2 - alpha**2) alpha,omega,s1,s2,resposta,v = resolve_rlc(R,L,C) #v(t) = e^-(alpha*t)*(A1cos(wd*t) + A2sen(wd*t)) #v(0) = A1 = 5 A1 = 5 #C*dv(0)/dt + i(0) + v(0)/R = 0 #0.01*(-8A1 + 6A2) + 0 + 5/6.25 = 0 #-0.4 + 0.06A2 = -5/6.25 A2 = (-5/6.25 + 0.4)/0.06 print("Constante A1:",A1) print("Constante A2:",A2) v = exp(-alpha*t)*(A1*cos(omega_d*t) + A2*sin(omega_d*t)) print("Resposta v(t):",v,"V")Exemplo 8.5 -------------- Para R = 1.923 Alpha: 26.001040041601662 Omega: 10.0 Tipo de resposta: superamortecimento Resposta v(t): A1*exp(-1.9999133337787*t) + A2*exp(-50.0021667494246*t) v(0): A1 + A2 dv(0)/dt: -1.9999133337787*A1 - 50.0021667494246*A2 Constante A1: -0.20855000866701326 Constante A2: 5.208550008667014 Resposta v(t): 5.20855000866701*exp(-50.0021667494246*t) - 0.208550008667013*exp(-1.9999133337787*t) V -------------- Para R = 5 Alpha: 10.0 Omega: 10.0 Tipo de resposta: amortecimento critico Resposta v(t): (A1 + A2*t)*exp(-10.0*t) v(0): A1 dv(0)/dt: -10.0*A1 + A2 Constante A1: 5 Constante A2: -50.0 Resposta v(t): (-50.0*t + 5)*exp(-10.0*t) V -------------- Para R = 6.25 Alpha: 8.0 Omega: 10.0 Tipo de resposta: subamortecimento Resposta v(t): A1*exp(-8.0*t) v(0): A1 dv(0)/dt: -8.0*A1 Constante A1: 5 Constante A2: -6.666666666666667 Resposta v(t): 5*exp(-8.0*t) V**Problema Prático 8.5**Na Figura 8.13, seja R = 2 Ω, L = 0,4 H, C = 25 mF, v(0) = 0, e i(0) = 50 mA. Determine v(t) para t > 0.![](https://i.imgur.com/6gt8E4q.png)print("Problema Prático 8.5") R = 2 L = 0.4 C = 25*m v0 = 0 i0 = 50*m A1 = symbols('A1') A2 = symbols('A2') alpha,omega,s1,s2,resposta,v = resolve_rlc(R,L,C) #C*dv(0)/dt + i(0) + v(0)/R = 0 #C*(-10A1 + A2) + i0 + v(0)/2 = 0 #v(0) = 0 = A1 #C*A2 = -i0 A2 = -i0/C A1 = 0 print("Constante A1:",A1) print("Constante A2:",A2) v = (A1 + A2*t)*exp(-10.0*t) print("Resposta v(t):",v,"V")Problema Prático 8.5 Alpha: 10.0 Omega: 10.0 Tipo de resposta: amortecimento critico Resposta v(t): (A1 + A2*t)*exp(-10.0*t) v(0): A1 dv(0)/dt: -10.0*A1 + A2 Constante A1: 0 Constante A2: -2.0 Resposta v(t): -2.0*t*exp(-10.0*t) V**Exemplo 8.6**Determine v(t) para t > 0 no circuito RLC da Figura 8.15.![](https://i.imgur.com/07Ueco1.png)print("Exemplo 8.6") u = 10**(-6) #definicao de micro Vs = 40 L = 0.4 C = 20*u A1 = symbols('A1') A2 = symbols('A2') #Para t < 0 v0 = Vs*50/(50 + 30) i0 = -Vs/(50 + 30) print("V0:",v0,"V") print("i0:",i0,"A") #Para t > 0 #C*dv(0)/dt + i(0) + v(0)/50 = 0 #20u*dv(0)/dt - 0.5 + 0.5 = 0 #dv(0)/dt = 0 R = 50 alpha,omega,s1,s2,resposta,v = resolve_rlc(R,L,C) #v(0) = 25 = A1 + A2 #A1 = 25 - A2 #dv(0)/dt = -146A1 - 854A2 = 0 #-146(25 - A2) - 854A2 = 0 #146A2 - 854A2 = 3650 #-708A2 = 3650 A2 = -3650/708 A1 = 25 - A2 print("Constante A1:",A1) print("Constante A2:",A2) v = A1*exp(s1*t) + A2*exp(s2*t) print("Resposta v(t):",v,"V")Exemplo 8.6 V0: 25.0 V i0: -0.5 A Alpha: 500.0 Omega: 353.5533905932738 Tipo de resposta: superamortecimento Resposta v(t): A1*exp(-146.446609406726*t) + A2*exp(-853.553390593274*t) v(0): A1 + A2 dv(0)/dt: -146.446609406726*A1 - 853.553390593274*A2 Constante A1: 30.15536723163842 Constante A2: -5.155367231638418 Resposta v(t): -5.15536723163842*exp(-853.553390593274*t) + 30.1553672316384*exp(-146.446609406726*t) VTumor Response to Treatment# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint mean_tumor_grouped = combined_data.groupby(["Drug", "Timepoint"]).mean()["Tumor Volume (mm3)"] # Convert to DataFrame tumor_mean_df = pd.DataFrame(mean_tumor_grouped) # Preview DataFrame reindexed_tumor_mean_df = tumor_mean_df.reset_index() reindexed_tumor_mean_df.head(11) # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint sem_tumor_grouped = combined_data.groupby(["Drug", "Timepoint"]).sem()["Tumor Volume (mm3)"] # Convert to DataFrame tumor_sem_df = pd.DataFrame(sem_tumor_grouped) # Preview DataFrame reindexed_tumor_sem_df = tumor_sem_df.reset_index() reindexed_tumor_sem_df.head(5) # Minor Data Munging to Re-Format the Data Frames reformat_tumor_response = reindexed_tumor_mean_df.pivot(index='Timepoint', columns = 'Drug', values = 'Tumor Volume (mm3)') reformat_tumor_sem = reindexed_tumor_sem_df.pivot(index='Timepoint', columns = 'Drug', values = 'Tumor Volume (mm3)') # Preview that Reformatting worked reformat_tumor_response.head(5) # Generate the Plot (with Error Bars) x_axis = reformat_tumor_response.index plt.errorbar(x_axis, reformat_tumor_response['Capomulin'], yerr=reformat_tumor_sem['Capomulin'], marker= 'o', color = 'red', linestyle=':') plt.errorbar(x_axis, reformat_tumor_response['Infubinol'], yerr=reformat_tumor_sem['Infubinol'], marker= '^', color = 'blue', linestyle=':') plt.errorbar(x_axis, reformat_tumor_response['Ketapril'], yerr=reformat_tumor_sem['Ketapril'], marker= 's', color = 'green', linestyle=':') plt.errorbar(x_axis, reformat_tumor_response['Placebo'], yerr=reformat_tumor_sem['Placebo'], marker= 'D', color = 'black', linestyle=':') plt.title("Tumor Response to Treatment") plt.xlabel("Time (Days)") plt.ylabel("Tumor Volume (mm3)") plt.legend(loc="upper left") plt.ylim(30,75) plt.xlim(-5,50) plt.grid() # Save the Figure plt.tight_layout() plt.savefig("data/tumor_response.png") # Show the Figure plt.show()Metastatic Response to Treatment# Store the Mean Met. Site Data Grouped by Drug and Timepoint mean_metastatic_grouped = combined_data.groupby(["Drug", "Timepoint"]).mean()["Metastatic Sites"] # Convert to DataFrame metastatic_mean_df = pd.DataFrame(mean_metastatic_grouped) # Preview DataFrame reindexed_metastatic_mean_df = metastatic_mean_df.reset_index() reindexed_metastatic_mean_df.head(5) # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint sem_metastatic_grouped = combined_data.groupby(["Drug", "Timepoint"]).sem()["Metastatic Sites"] # Convert to DataFrame metastatic_sem_df = pd.DataFrame(sem_metastatic_grouped) # Preview DataFrame reindexed_metastatic_sem_df = metastatic_sem_df.reset_index() metastatic_sem_df.head(5) # Minor Data Munging to Re-Format the Data Frames reformat_metastatic_response = reindexed_metastatic_mean_df.pivot(index = 'Timepoint', columns = 'Drug', values = 'Metastatic Sites') reformat_metastatic_sem = reindexed_metastatic_sem_df.pivot(index='Timepoint', columns = 'Drug', values = 'Metastatic Sites') # Preview that Reformatting worked reformat_metastatic_response.head(5) # Generate the Plot (with Error Bars) x_axis1 = reformat_metastatic_response.index plt.errorbar(x_axis1, reformat_metastatic_response['Capomulin'], yerr=reformat_metastatic_sem['Capomulin'], marker= 'o', color = 'red', linestyle=':') plt.errorbar(x_axis1, reformat_metastatic_response['Infubinol'], yerr=reformat_metastatic_sem['Infubinol'], marker= '^', color = 'blue', linestyle=':') plt.errorbar(x_axis1, reformat_metastatic_response['Ketapril'], yerr=reformat_metastatic_sem['Ketapril'], marker= 's', color = 'green', linestyle=':') plt.errorbar(x_axis1, reformat_metastatic_response['Placebo'], yerr=reformat_metastatic_sem['Placebo'], marker= 'D', color = 'black', linestyle=':') plt.title("Metastatic Spread During Treatment") plt.xlabel("Treatment Duration (Days)") plt.ylabel("Met. Sites") plt.legend(loc="upper left") plt.ylim(-0.5,4.0) plt.xlim(-5,50) plt.grid() # Save the Figure plt.tight_layout() plt.savefig("data/metastatic_response.png") # Show the Figure plt.show()Survival Rates# Store the Count of Mice Grouped by Drug and Timepoint count_mice_grouped = combined_data.groupby(["Drug", "Timepoint"]).count()["Mouse ID"] # Convert to DataFrame count_mice_df = pd.DataFrame(count_mice_grouped) count_mice_df = count_mice_df.rename(columns={"Mouse ID": "Mouse Count"}) # Preview DataFrame reindexed_count_mice_df = count_mice_df.reset_index() reindexed_count_mice_df.head(5) # Minor Data Munging to Re-Format the Data Frames reformat_survival_rate = reindexed_count_mice_df.pivot(index='Timepoint', columns = 'Drug', values = 'Mouse Count') # Preview the Data Frame reformat_survival_rate.head(5) # Generate the Plot (Accounting for percentages) x_axis2 = reformat_survival_rate.index percent_capomulin = (reformat_survival_rate/reformat_survival_rate.iloc[0])*100 plt.errorbar(x_axis2, percent_capomulin['Capomulin'], marker= 'o', color = 'red', linestyle=':') plt.errorbar(x_axis2, percent_capomulin['Infubinol'], marker= '^', color = 'blue', linestyle=':') plt.errorbar(x_axis2, percent_capomulin['Ketapril'], marker= 's', color = 'green', linestyle=':') plt.errorbar(x_axis2, percent_capomulin['Placebo'], marker= 'D', color = 'black', linestyle=':') plt.title("Survival During Treatment") plt.xlabel("Time (Days)") plt.ylabel("Survival Rate %") plt.legend(loc="bottom left") plt.ylim(30,105) plt.xlim(-5,50) plt.grid() # Save the Figure plt.tight_layout() plt.savefig("data/survival_rate.png") # Show the Figure plt.show()Summary Bar Graph# Calculate the percent changes for each drug percentage_change = ((reformat_tumor_response.iloc[-1] - reformat_tumor_response.iloc[0])/reformat_tumor_response.iloc[0])*100 # Display the data to confirm print(percentage_change) # Store all Relevant Percent Changes into a Tuple relevant_percentage_change = (percentage_change['Capomulin'], percentage_change['Infubinol'], percentage_change['Ketapril'], percentage_change['Placebo']) # Splice the data between passing and failing drugs failing_drugs = relevant_percentage_change[0] passing_drugs = relevant_percentage_change[1:] # Orient widths. Add labels, tick marks, etc. x_axis = np.arange(len(relevant_percentage_change)) tick_location=[values for values in x_axis] plt.xticks(tick_location, ["Capomulin", "Infubinol", "Ketapril", "Placebo"]) rect=plt.bar(x_axis, relevant_percentage_change, color=['g', 'r', 'r', 'r'], alpha=1, align="edge", width=-0.99) plt.xlim(-1.25, (max(x_axis)+0.25)) plt.ylim(-30, 70) plt.title("Tumor Change Over 45 day Treatment") plt.xlabel("Drugs") plt.ylabel("% Tumor Volume Change") plt.grid() # Use functions to label the percentages of changes def autolabel(rects): for rect in rects: height = rect.get_height() # Fraction of axis height taken up by this rectangle label_position = (abs(height)/height)*5 plt.text(rect.get_x() + rect.get_width()/2., label_position, '%1.1f%%' % int(height), ha='center', va='bottom') # Call functions to implement the function calls autolabel(rect) # Save the Figure plt.tight_layout() plt.savefig("data/summary_bar_graph.png") # Show the Figure plt.show()Workspace \9: ``model.py``# getting future division from __future__ import division # autoreloading for imported modules %load_ext autoreload %autoreload 2 # numpy import numpy as np from numpy import pi, sqrt, log, log10, power, exp #scipy from scipy.interpolate import interp1d # matplotlib import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib import rc from matplotlib.lines import Line2D # other import os from tqdm import tqdm # matplotlib style %matplotlib inline %config InlineBackend.figure_format = "retina" rcParams['figure.figsize'] = (13, 8) rcParams['font.family'] = 'serif' rcParams['font.serif'] = ['New Times Roman'] rc('text', usetex=True) # current directory current_dir = os.getcwd() from random import random import sys sys.path.insert(0, '../') import tools as tl import constants as ct import particle as pt import ska as sk import astro as ap import echo as ec import routines as rt import data as dt import model as md # The contents of model.py dir(md) dt.snrs_cut['G39.7-2.0'].__dict__ # defining a test SNR test_snr = dt.SuperNovaRemnant() # some shorthand names for SNRs from GC W28 = 'G6.4-0.1' W50 = 'G39.7-2.0' # Making our test SNR be like one of the above: test_snr.__dict__ = {key:val for key, val in dt.snrs_dct[W50].__dict__.items()} # W28 does not have a spectral index in the GC; others say it's 0.7 alpha = test_snr.get_spectral_index() if alpha == None: alpha = 0.7 test_snr.__dict__.update({'alpha':alpha}) test_snr.__dict__.update({'gamma': ap.gamma_from_alpha(alpha)})Getting reach by sing the SNR age:ma_arr = np.logspace(np.log10(1e-7), np.log10(2e-4), 200) sig_noi_thres = 1. ga_ref = 1.e-10 # doesn't matter, will be rescaled counter = 0 plt.subplots() snr = test_snr for use_free in [True, False]: try: distance = snr.get_distance() except: continue t_age = snr.get_age() snu_echo_kwargs = {'tmin_default':None, 'Nt':100000, # for a fine enough array 'xmin':ct._au_over_kpc_, 'xmax_default':100., 'use_quad':False, 'lin_space':False, 'Nint':100000, # for a fine enough array 't_extra_old':0. } # data: data = {'deltaE_over_E':ct._deltaE_over_E_, 'f_Delta':ct._f_Delta_, 'exp':'SKA', 'total_observing_time':100., 'verbose':0, 'correlation_mode':'interferometry', 'average':True } t_trans = 300. t_peak = 3000#10.**(ct._mu_log10_tpk_) lightcurve_params = {'t_peak':t_peak, 't_age':t_age, 't_trans':t_trans, 'L_today':snr.get_luminosity(), 'use_free_expansion':use_free } sig_noi = md.snr_routine(ma_arr, ga_ref, snr, lightcurve_params=lightcurve_params, snu_echo_kwargs=snu_echo_kwargs, data=data, output_all=False) ga_arr = ec.ga_reach(sig_noi_thres, sig_noi, ga_ref) _, out = ap.L_source(t_age, model='eff', output_pars=True, gamma=snr.get_gamma(), t_peak=t_peak, t_trans=t_trans, L_today=snr.get_luminosity(), t_age=t_age) Lpk = out['L_peak'] plt.plot(ma_arr, ga_arr, lw='2.', alpha=1., label="Lpk=%.1e\ntpk=%.1f\nttr=%d\ntage=%d" % (Lpk, t_peak, int(t_trans), int(t_age))) counter += 1 ga_cast = 6.5e-11 plt.fill_between([1e-7, 1e-2], ga_cast, 1, color='grey', alpha=0.2) plt.xscale('log') plt.yscale('log') plt.ylim(1e-11, 1e-9) plt.xlim(3e-7, 1e-4) plt.xlabel('$m_a$ [eV]') plt.ylabel('$g_{a\gamma}$ [GeV]$^{-1}$') plt.legend(loc='best') LpkGetting reach by fixing age to a value of our choice:ma_arr = np.logspace(np.log10(1e-7), np.log10(2e-4), 200) sig_noi_thres = 1. ga_ref = 1.e-10 # doesn't matter, will be rescaled counter = 0 plt.subplots() snr = test_snr for use_free in [True, False]: try: distance = snr.get_distance() except: continue # t_age = snr.get_age() t_age = 10000 # much reduced "effective" age, assuming the rest is in snow-plow (unwarranted, since snow plow typically starts at around 5.e4 years; see Draine's "Physics of the ISM and IGM" book) snu_echo_kwargs = {'tmin_default':None, 'Nt':100000, # for a fine enough array 'xmin':ct._au_over_kpc_, 'xmax_default':100., 'use_quad':False, 'lin_space':False, 'Nint':100000, # for a fine enough array 't_extra_old':0. } # data: data = {'deltaE_over_E':ct._deltaE_over_E_, 'f_Delta':ct._f_Delta_, 'exp':'SKA', 'total_observing_time':100., 'verbose':0, 'correlation_mode':'interferometry', 'average':True } t_trans = 300. t_peak = 10.**(ct._mu_log10_tpk_) lightcurve_params = {'t_peak':t_peak, 't_age':t_age, 't_trans':t_trans, 'L_today':snr.get_luminosity(), 'use_free_expansion':use_free } sig_noi = md.snr_routine(ma_arr, ga_ref, snr, lightcurve_params=lightcurve_params, snu_echo_kwargs=snu_echo_kwargs, data=data, output_all=False) ga_arr = ec.ga_reach(sig_noi_thres, sig_noi, ga_ref) _, out = ap.L_source(t_age, model='eff', output_pars=True, gamma=snr.get_gamma(), t_peak=t_peak, t_trans=t_trans, L_today=snr.get_luminosity(), t_age=t_age) Lpk = out['L_peak'] plt.plot(ma_arr, ga_arr, lw='2.', alpha=1., label="Lpk=%.1e\ntpk=%.1f\nttr=%d\ntage=%d" % (Lpk, t_peak, int(t_trans), int(t_age))) counter += 1 ga_cast = 6.5e-11 plt.fill_between([1e-7, 1e-2], ga_cast, 1, color='grey', alpha=0.2) plt.xscale('log') plt.yscale('log') plt.ylim(1e-11, 1e-9) plt.xlim(3e-7, 1e-4) plt.xlabel('$m_a$ [eV]') plt.ylabel('$g_{a\gamma}$ [GeV]$^{-1}$') plt.legend(loc='best') Lpk _, out = ap.L_source(1, model='eff', output_pars=True, gamma=snr.get_gamma(), t_peak=3000, t_trans=t_trans, L_today=snr.get_luminosity(), t_age=10000) out['L_peak']More about the Green Catalogueverbose = 2 snr_results = {} ga_ref = 1.e-10 tpk = 10.**(1.7) Lpk = 10.**(28.) t_trans = 100. # t_trans_over_t_peak = 30. # t_trans = (tpk/365.)*t_trans_over_t_peak for name, snr in dt.snrs_age.items(): t_age = snr.get_age() lightcurve_params = {'t_peak':tpk, 't_trans':t_trans, 't_age':t_age} snu_echo_kwargs = {'tmin_default':None, 'Nt':int(30001), 'xmin':ct._au_over_kpc_, 'xmax_default':100., 'use_quad':False, 'lin_space':False, 'Nint':int(30001), 't_extra_old':0. } # data: data = {'deltaE_over_E':ct._deltaE_over_E_, 'f_Delta':ct._f_Delta_, 'exper':'SKA', 'total_observing_time':100., 'verbose':0, 'correlation_mode':'interferometry', 'average':True } z, new_output = md.snr_routine(pt.ma_from_nu(1.), ga_ref, snr, lightcurve_params=lightcurve_params, snu_echo_kwargs=snu_echo_kwargs, data=data, output_all=True, verbose=verbose) snr_results[name] = new_output # obtaining L_today L0 = snr.get_luminosity() # [cgs]SNR size=6.0e-07. Value will be respected. signal power:5.806265394835402e-20 noise power:9.92029949697011e-18 s/n: 0.005852913409125168 SNR size=1.1e-06. Value will be respected. signal power:4.259507574255806e-19 noise power:9.728898045368845e-18 s/n: 0.043782014719369156 SNR size=1.4e-05. Value will be respected. signal power:1.0578776314873312e-18 noise power:2.141734037572371e-17 s/n: 0.04939351072210733 SNR size=4.3e-06. Value will be respected. signal power:7.587198986181078e-19 noise power:1.3728442490976773e-17 s/n: 0.055266276499813284 SNR size=1.2e-05. Value will be respected. signal power:4.6858032710203817e-17 noise power:1.9867418404677013e-17 s/n: 2.358536562514479 SNR size=7.2e-05. Value will be respected. signal power:1.5583355395665702e-18 noise power:4.541640694190163e-17 s/n: 0.03431217140448938 SNR size=2.7e-05. Value will be respected. signal power:2.4538180241432213e-17 noise power:2.77358966111339e-17 s/n: 0.8847083829834423 SNR size=4.8e-05. Value will b[...]Summarizing data The **5-number summary** provides descriptive statistics that summarize data by five most important sample percentiles| | Quartile | Definition | Statistic | Percentile || --- | --- | --- | --- | --- ||1.|$Q_0$||minimum|$0^{th}$||2.|$Q_1$|splits off the lowest 25% of data from the highest 75%|N/A|$25^{th}$||3.|$Q_2$|cuts data set in half|median|$50^{th}$||4.|$Q_3$|splits off the highest 25% of data from the lowest 75%|N/A|$75^{th}$||5.|$Q_4$||maximum|$100^{th}$| Medián (Me or ${\tilde x}$) je hodnota, jež dělí řadu vzestupně seřazených výsledků na dvě stejně početné poloviny. Ve statistice patří mezi míry centrální tendence. Platí, že nejméně 50 % hodnot je menších nebo rovných a nejméně 50 % hodnot je větších nebo rovných mediánu. * není ovlivněn extrémními hodnotami. Proto se často používá v případě šikmých rozdělení, u kterých aritmetický průměr dává obvykle nevhodné výsledky. Kvantily (z lat. quantilis, jak malý/velký?) jsou ve statistice čísla (hodnoty), která dělí soubor seřazených (například naměřených) hodnot na několik zhruba stejně velkých částí. Kvantil je tedy míra polohy rozdělení pravděpodobnosti náhodné veličiny. Popisují body, ve kterých distribuční funkce náhodné proměnné prochází danou hodnotou. KvartilTři kvartily rozdělují statistický soubor na čtvrtiny. 25 % prvků má hodnoty menší než dolní kvartil $Q_{0,25}$ a 75 % prvků hodnoty menší než horní kvartil $Q_{0,75}$; někdy se označují $Q_{1}$ a $Q_{3}$. Box Plot (also called box-and-whisker plot). * The box has an upper bound of $Q_3$ and a lower bound of $Q_1$. * The median will be a line somewhere in this box. * The whiskers extend from the box towards the minimum/maximum. Whiskers* extends from $Q_3 + 1.5 \times IQR$ (interquartile range) to $Q_1 - 1.5 \times IQR$ * anything beyond will be represented as individual points for outliers Outliers* are extreme values that might be errors in measurement and recording, * accurate reports of rare events.import numpy as np import pandas as pd import datetime as dt def example_boxplot(): """ Generate an example box plot. taken from [Hands-On Data Analysis with Pandas](https://www.packtpub.com/big-data-and-business-intelligence/hands-data-analysis-pandas) """ non_symmetric = pd.Series(np.random.gamma(7, 5, size=1000) * np.random.choice([-2.2, -1.85, 0, -0.4, 1.33], size=1000), name='x') # find the quartiles and iqr q1_y, median_y, q3_y = non_symmetric.quantile([0.25, 0.5, 0.75]) iqr = q3_y - q1_y # make the boxplot ax = non_symmetric.plot(kind='box', figsize=(6, 6), title='Box plot') # label the box ax.annotate('median', xy=(0.945, median_y + 2)) ax.annotate(r'$Q_3$ ($75^{th}$)', xy=(1, q3_y), xytext=(1.08, q3_y - 5)) ax.annotate(r'$Q_1$ ($25^{th}$)', xy=(1, q1_y), xytext=(1.08, q1_y)) ax.annotate( 'IQR', xy=(0.9, (q3_y + q1_y)/2), xytext=(0.8, (q3_y + q1_y)/2 - 2.85), arrowprops=dict(arrowstyle='-[, widthB=3.3, lengthB=0.5') ) # label the whiskers ax.annotate(r'$Q_3 + 1.5 * IQR$', xy=(1.05, q3_y + 1.5 * iqr - 7)) ax.annotate(r'$Q_1 - 1.5 * IQR$', xy=(1.05, q1_y - 1.5 * iqr - 2)) # label the outliers ax.annotate( 'outlier', xy=(0.99, non_symmetric.min()), xytext=(0.8, non_symmetric.min() - 2.1), arrowprops=dict(arrowstyle='->') ) for i, val in enumerate(non_symmetric[non_symmetric > q3_y + 1.5*iqr]): if not i: text = 'outliers' x, y = 0.75, 102 else: text = '' x, y = 0.87, 103 ax.annotate( text, xy=(0.99, val), xytext=(x, y), arrowprops=dict(facecolor='black', arrowstyle='-|>') ) return ax example_boxplot();MLP:The first half of this notebook may be used to train an MLP. Training for RNN models can be found in the second half of this notebookNote that this notebook requires the use of train_df.pkl and test_df.pkl files. These are generated in the preprocess.ipynb notebook. If you have not run this notebook, you will not have the necessary data to proceed with this notebook!# Load Dependencies import pandas as pd import numpy as np import matplotlib.pyplot as plt from src.utils import get_batches, shuffle, train_val_split, preds_to_scores,scores_to_preds, plot_train_loss from src.mlp import MLP from src.rnn import RNN import seaborn as sns import plotly.plotly as py import plotly.graph_objs as go %load_ext autoreload %autoreload 2 # Define the path to the data. This is the training dataframe saved from the preprocessing notebook. # If you have not run the preprocessing notebook, go back and do so now. data_path = './data/train_df.pkl' train_df = pd.read_pickle(data_path) # To further isolate our data, we will only examine essays from a single set # Feel free to experiment with different essay sets by choosing a different value # for the set variable. Sets 1, 3, 4, 5, and 6 are supported! set = 1 df = train_df.loc[train_df['essay_set'] == set] df.head() # In order to avoid bias toward more common scores, we will limit the number # of essays from each scoring bucket to a set value score_df = None min_score = int(df['min_score'].min()) max_score = int(df['max_score'].max()) n_max = 100 for i in range(min_score,max_score+1): if score_df is None: score_df = df.loc[df['domain1_score'] == i][:n_max] else: temp_df = df.loc[df['domain1_score'] == i][:n_max] score_df = pd.concat([score_df, temp_df]) df = score_df # Extract essay vectors and corresponding scores X = np.array(df['essays_embed']) y = np.array(df['domain1_score']) X = np.stack(X, axis=0) print('There are {} training essays, each of shape {} x {}'.format(X.shape[0], X.shape[1], X.shape[2]))There are 566 training essays, each of shape 426 x 200These essays are the wrong shape to feed directly into the MLP. Therefore, each essay matrix needs to be flattened into a 1-D vector.X_flatten = np.reshape(X, [X.shape[0], -1]) print('There are {} training essays, each a vector of length {}'.format(X_flatten.shape[0], X_flatten.shape[1]))There are 566 training essays, each a vector of length 85200The next step is to shuffle the data and separate it into training and validation sets.X, y = shuffle(X_flatten, y) X_train, y_train, X_val, y_val = train_val_split(X, y, train_prop=0.85)Here we need to transform the labels to the form that the network will predict. For example, in set 1, the essays are graded on a scale from 2-12, therefore there are 11 classes into which the network will try to classify each essay. However, the network will classify essays into the scale 0-10. Therefore, this step will perform this shift on the labels. If the scoring range already starts at 0, no shift is performed.if min_score != 0: y_train_adj = scores_to_preds(y_train, min_score) print('Training labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_train),max(y_train), min(y_train_adj), max(y_train_adj))) y_val_adj = scores_to_preds(y_val, min_score) print('Validation labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_val),max(y_val), min(y_val_adj), max(y_val_adj))) else: print('No score adjustment necessary') y_train_adj = y_train y_val_adj = y_valTraining labels shifted from a scale of (2,12) to (0,10) Validation labels shifted from a scale of (2,12) to (0,10)Initial MLPHere we define an MLP model to train. The parameters below were the initial parameters tested on the dataset. This model learns the training set well, but is unable to generalize to the validation set. You may skip training this model to save time.# User Defined Parameters model_name = 'mlp_set1_bad' hidden_dims = [128,64] weight_scale = 1e-2 batch_size = 16 n_epochs = 20 l2_reg = 1e-4 keep_prob = 1 reg = False lr = 1e-3 # Derived Parameters input_dim = X_train.shape[1] num_classes = max_score-min_score + 1 n_batches = round(X_train.shape[0]/batch_size) batch_gen = get_batches(X_train, y_train_adj, batch_size, net_type='mlp') mlp_net = MLP(input_dim=input_dim, hidden_dims=hidden_dims, num_classes=num_classes, weight_scale=weight_scale,\ l2_reg=l2_reg, keep_prob=keep_prob, regression=reg) print('Training Network...') train_loss_hist, val_loss_hist = mlp_net.train(gen=batch_gen, X_val=X_val, y_val=y_val_adj, n_epochs=n_epochs, n_batches=n_batches, lr=lr,\ save_every_n=5, model_name=model_name) fig = plot_train_loss(train_loss_hist, val_loss_hist, n_batches, model_name) py.iplot(fig, filename='basic-area')High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~pmt210/0 or inside your plot.ly account where it is named 'basic-area'Train your own MLPThe MLP above is able to learn the training set, but is unable to generalize for the validation set. Below is another MLP model definition. The user may change the model name and parameters, or leave the model definition as is. The model will be saved to the 'model/' directory of this project. Parameters such as the following may be defined by the user: learning rate, number of training epochs, l2 regularization, dropout probability, and regression vs classification.After many iterations, we found the following mlp parameters yielded the best results on both the training and validation sets. Note that this model is much larger and requires a GPU to train in a reasonable amount of time.# User Defined Parameters model_name = 'mlp_set'+'{}'.format(set) hidden_dims = [1024,256] weight_scale = 1e-2 batch_size = 16 n_epochs = 20 l2_reg = 1e-4 keep_prob = 0.6 reg = False lr = 1e-4 # Derived Parameters input_dim = X_train.shape[1] num_classes = max_score-min_score + 1 n_batches = round(X_train.shape[0]/batch_size) batch_gen = get_batches(X_train, y_train_adj, batch_size, net_type='mlp') mlp_net = MLP(input_dim=input_dim, hidden_dims=hidden_dims, num_classes=num_classes, weight_scale=weight_scale,\ l2_reg=l2_reg, keep_prob=keep_prob, regression=reg) print('Training Network...') train_loss_hist, val_loss_hist = mlp_net.train(gen=batch_gen, X_val=X_val, y_val=y_val_adj, n_epochs=n_epochs, n_batches=n_batches, lr=lr,\ save_every_n=5, model_name=model_name) fig = plot_train_loss(train_loss_hist, val_loss_hist, n_batches, model_name) py.iplot(fig, filename='basic-area')High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~pmt210/0 or inside your plot.ly account where it is named 'basic-area'Test the QWK of the trained modelNow we can use essays from the test dataset to obtain a quadratic weightedkappa (QWK) score for the model. This metric is used to quantify how wellthe model predicted the essay scores relative to random guessing. A valueof 0 indicates that the predictions were no better than random guessing,while a value of 1 indicates perfect matching between predictions and labels.data_path = './data/test_df.pkl' test_df = pd.read_pickle(data_path) df = test_df.loc[test_df['essay_set'] == set] X_test = np.array(df['essays_embed']) y_test = np.array(df['domain1_score']) X_test = np.stack(X_test, axis=0) X_test = np.reshape(X_test, [X_test.shape[0], -1]) print('There are {} testing essays'.format(X_test.shape[0])) if min_score != 0: y_test_adj = scores_to_preds(y_test, min_score) print('Testing labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_test),max(y_test), min(y_test_adj), max(y_test_adj))) else: print('No score adjustment necessary') y_test_adj = y_test preds = mlp_net.predict('./model/'+model_name, X_test) # We need to map predictions from classes in the model to actual scores #preds = preds_to_scores(preds, min_score=min_score) from src.utils import quadratic_weighted_kappa y_test_adj = scores_to_preds(y_test, min_score) k = quadratic_weighted_kappa(y_test_adj, preds, num_classes) print('The quadratic weighted kappa score for set {} using {} is : {}'\ .format(set, model_name, k))The quadratic weighted kappa score for set 1 using mlp_set1 is : 0.7032761357170665RNN:The second half of this notebook may be used for training an RNN - specifically an LSTM or GRU# Define the path to the data data_path = './data/train_df.pkl' train_df = pd.read_pickle(data_path) # To further isolate our data, we will only examine essays from a single set # Feel free to experiment with different essay sets! set = 1 df = train_df.loc[train_df['essay_set'] == set] df.head() # In order to avoid bias toward more common scores, we will limit the number # of essays from each scoring bucket to a set value score_df = None min_score = int(df['min_score'].min()) max_score = int(df['max_score'].max()) n_max = 100 for i in range(min_score,max_score+1): if score_df is None: score_df = df.loc[df['domain1_score'] == i][:n_max] else: temp_df = df.loc[df['domain1_score'] == i][:n_max] score_df = pd.concat([score_df, temp_df]) df = score_df # Extract essay vectors and corresponding scores X = np.array(df['essays_embed']) y = np.array(df['domain1_score']) X = np.stack(X, axis=0) print('There are {} training essays, each of shape {} x {}'.format(X.shape[0], X.shape[1], X.shape[2]))There are 566 training essays, each of shape 426 x 200The next step is to shuffle the data and separate it into training and validation sets.X, y = shuffle(X, y) X_train, y_train, X_val, y_val = train_val_split(X, y, train_prop=0.85)Here we need to transform the labels to the form that the network will predict. For example, in set 1, the essays are graded on a scale from 2-12, therefore there are 11 classes into which the network will try to classify each essay. However, the network will classify essays into the scale 0-10. Therefore, this step will perform this shift on the labels.if min_score != 0: y_train_adj = scores_to_preds(y_train, min_score) print('Training labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_train),max(y_train), min(y_train_adj), max(y_train_adj))) y_val_adj = scores_to_preds(y_val, min_score) print('Validation labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_val),max(y_val), min(y_val_adj), max(y_val_adj))) else: print('No score adjustment necessary') y_train_adj = y_train y_val_adj = y_valTraining labels shifted from a scale of (2,12) to (0,10) Validation labels shifted from a scale of (2,12) to (0,10)Initial RNNHere we define an RNN model to train. The parameters below were the initial parameters tested on the dataset. model learns the training and validation set well. It serves as a good baseline from which you can design your own RNN. If you'd like, you may skip training this model to save time and move directly to training your own model with tunable parameters.# User Defined Parameters batch_size = 32 cell_type = 'lstm' rnn_size = 128 lr = 1e-3 n_epochs = 20 keep_prob = 1 # Derived Parameters model_name = cell_type+'_set'+'{}'.format(set) num_classes = max_score-min_score + 1 n_batches = round(X_train.shape[0]/batch_size) seq_length = X_train.shape[1] embed_size = X_train.shape[2] X_val_t = X_val[:batch_size] y_val_t = y_val_adj[:batch_size] batch_gen = get_batches(X_train, y_train_adj, batch_size, net_type=cell_type) rnn_net = RNN(num_classes, batch_size, seq_length, embed_size, cell_type=cell_type, rnn_size=rnn_size, learning_rate=lr, train_keep_prob=1) print('Training Network...') train_loss_hist, val_loss_hist = rnn_net.train(batch_gen, X_val_t, y_val_t,\ n_epochs, n_batches, save_every_n=5,\ model_name=model_name) fig = plot_train_loss(train_loss_hist, val_loss_hist, n_batches, model_name) py.iplot(fig, filename='basic-area')High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~pmt210/0 or inside your plot.ly account where it is named 'basic-area'Train your own RNNThe LSTM above is able to learn the training set and performance on the validation set is comparable. These preliminary results are promising, but changing hyperparameters can yield even better results. Below is another RNN model definition. Again, many parameters can be modified by the user or left alone with the parameters that yielded our best results.The model will be saved to the 'model/' directory of this project. After many iterations, we found the following mlp parameters yielded the best results on both the training and validation sets:# User Defined Parameters batch_size = 32 cell_type = 'gru' rnn_size = 256 lr = 1e-3 n_epochs = 20 keep_prob = 1 # Derived Parameters model_name = cell_type+'_set'+'{}'.format(set) num_classes = max_score-min_score + 1 n_batches = round(X_train.shape[0]/batch_size) seq_length = X_train.shape[1] embed_size = X_train.shape[2] X_val_t = X_val[:batch_size] y_val_t = y_val_adj[:batch_size] batch_gen = get_batches(X_train, y_train_adj, batch_size, net_type=cell_type) rnn_net = RNN(num_classes, batch_size, seq_length, embed_size, cell_type=cell_type, rnn_size=rnn_size, learning_rate=lr, train_keep_prob=1) print('Training Network...') train_loss_hist, val_loss_hist = rnn_net.train(batch_gen, X_val_t, y_val_t,\ n_epochs, n_batches, save_every_n=2,\ model_name=model_name) fig = plot_train_loss(train_loss_hist, val_loss_hist, n_batches, model_name) py.iplot(fig, filename='basic-area')High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~pmt210/0 or inside your plot.ly account where it is named 'basic-area'Test the QWK of the trained modelNow we can use essays from the test dataset to obtain a quadratic weightedkappa (QWK) score for the model. This metric is used to quantify how wellthe model predicted the essay scores relative to random guessing. A valueof 0 indicates that the predictions were no better than random guessing,while a value of 1 indicates perfect matching between predictions and labels.data_path = './data/test_df.pkl' test_df = pd.read_pickle(data_path) df = test_df.loc[test_df['essay_set'] == set] X_test = np.array(df['essays_embed']) y_test = np.array(df['domain1_score']) X_test = np.stack(X_test, axis=0) print('There are {} testing essays'.format(X_test.shape[0])) if min_score != 0: y_test_adj = scores_to_preds(y_test, min_score) print('Testing labels shifted from a scale of ({},{}) to ({},{})'\ .format(min(y_test),max(y_test), min(y_test_adj), max(y_test_adj))) else: print('No score adjustment necessary') y_test_adj = y_test batch_size = X_test.shape[0] seq_length = X_test.shape[1] embed_size = X_test.shape[2] pred_net = RNN(num_classes, batch_size, seq_length, embed_size, cell_type=cell_type, rnn_size=rnn_size, learning_rate=lr, train_keep_prob=1) preds = pred_net.predict('./model/'+model_name, X_test) k = quadratic_weighted_kappa(preds[0], y_test_adj, num_classes) print('The quadratic weighted kappa score for set {} using {} is : {}'\ .format(set, model_name, k))The quadratic weighted kappa score for set 1 using gru_set1 is : 0.6883838173986511Results Visualizationsets=['set1','set3','set4','set5','set6'] #First here is the training time for each set for each model MLP_training_time = [170.3, 25.5, 11.7, 66.4, 33.1] LSTM_training_time = [157.0, 35.0, 30.0, 39.1, 52.3] GRU_training_time = [177.3, 31.4, 33.1, 42.1, 55.4] trace1 = go.Bar(x=sets,y=MLP_training_time,name='MLP') trace2 = go.Bar(x=sets,y=LSTM_training_time,name='LSTM') trace3 = go.Bar(x=sets,y=GRU_training_time,name='GRU') data = [trace1, trace2, trace3] layout = go.Layout(barmode='group', title='Training Times for each Network and Essay Set', xaxis=dict( title='Essay Set'), yaxis=dict( title='Training Time (s)'), showlegend=True) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='grouped-bar') #Then, here is the kappa value for each set for each model MLP_kappa = [0.725, 0.546, 0.600, 0.626, 0.512] LSTM_kappa = [0.69, 0.579, 0.551, 0.658, 0.688] GRU_kappa = [0.69, 0.506, 0.689, 0.664, 0.736] trace1 = go.Bar(x=sets,y=MLP_kappa,name='MLP') trace2 = go.Bar(x=sets,y=LSTM_kappa,name='LSTM') trace3 = go.Bar(x=sets,y=GRU_kappa,name='GRU') data = [trace1, trace2, trace3] layout = go.Layout(barmode='group') fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='grouped-bar')Домашнее задание1. Сделать класс нейронки, вписать необходимые операции, архитектура ниже1. Написать обучалку (обобщить то, что было выше)1. Добавить логирование 1. Сохранять лосс на каждой итерции обучения __0.25 балла__ ✓ 1. Каждую эпоху сохранять лосс трейна и тест __0.25 балла__ ✓ 1. Каждую эпоху рассчитывать метрики __0.25 балла__ ✓ 1. Добавить прогресс бар, в котором показывается усредненный лосс последних 500-та итераций __0.25 балла__ ✓1. Добавить early stopping __0.5 балла__1. Нарисовать графики лосса, метрик, конфьюжин матрицу __0.5 балла__ ✓ Архитектура (что можно попробовать)1. Предобученные эмбеддинги. Почитайте [здесь](https://pytorch.org/docs/stable/nn.htmlembedding) (from_pretrained) как вставить свои эмбеддинги, выше мы читали матрицу эмбеддингов. __0 баллов__ ✓1. Дообучить эмбеддинги отдельно от сети. __2 балла__1. Дообучить эмбеддинги вместе с сетью и с другим learning rate (указывается в оптимизаторе). __2 балла__ ✓1. Bidirectional LSTM. __1 балл__ ✓1. Несколько параллельных CNN с разными размерами окна и mean/max over time пулингами к ним и дальнейшей конкатенацией. __2 балла__ ✓1. Несколько последовательных CNN. __1 балла__ ✓1. Разные окна и residual к предыдущему пункту. __2 балла__ ✓1. Предыдущий пункт сделан без ошибок (замаскированы свертки паддингов). __2 балла__1. Написать правильный mean/max пулинг, который не учитывает паддинги, точнее их маскирует. __2 балла__1. Добавить [torch.nn.utils.rnn.pack_padded_sequence()](https://pytorch.org/docs/stable/nn.htmltorch.nn.utils.rnn.pack_padded_sequence) и [torch.nn.utils.rnn.pack_sequence()](https://pytorch.org/docs/stable/nn.htmltorch.nn.utils.rnn.pack_sequence) для LSTM. Инфа [здесь](Еще-важный-момент-про-LSTM) __2 балла__ ✓1. Добавить spatial дропаут для входа LSTM (не просто стандартный пункт при инициализации LSTM) __1 балл__1. Добавить BatchNorm/LayerNorm/Dropout/Residual/etc __1 балл__ ✓1. Добавить шедуллер __1 балл__ ✓1. Обучать на GPU __2 балла__1. Сделать transfer learning с собственно обученной языковой модели, обученной на любых данных, например, unlabeled. __7 баллов__1. your madness 10 баллов максимумimport math import numpy as np import pandas as pd from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from tqdm import tqdm import torch from torch.utils.data import DataLoader from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from torch import nn import torch.nn.functional as F import zipfile import seaborn as sns import matplotlib.pyplot as plt # вынес в отдельные файлы, чтобы не забивать тетрадку from src.data import Parser from src.utils import load_embeddings, TextClassificationDataset from src.train import train_model from typing import Iterable, List, Tuple from nltk.tokenize import wordpunct_tokenizeЧитаем и обрабатываем данныеdata_path = '/mnt/f/data/dl' parser = Parser(data_path=data_path) unlabeled, train, valid = parser.run() unique_categories = set(train.category.unique().tolist() + valid.category.unique().tolist()) category2index = {category: index for index, category in enumerate(unique_categories)} train['target'] = train.category.map(category2index) valid['target'] = valid.category.map(category2index) vocab, embeddings = load_embeddings('/mnt/f/data/models/wiki-news-300d-1M.vec.zip', 'wiki-news-300d-1M.vec', max_words=100_000) train_x, train_y = train.question.tolist(), train.target.tolist() valid_x, valid_y = valid.question.tolist(), valid.target.tolist() train_ds = TextClassificationDataset(texts=train_x, targets=train_y, vocab=vocab) valid_ds = TextClassificationDataset(texts=valid_x, targets=valid_y, vocab=vocab) train_loader = DataLoader(train_ds, batch_size=512) valid_loader = DataLoader(valid_ds, batch_size=512)Архитектура сетиclass MyNet(nn.Module): def __init__(self, embeddings: np.ndarray, n_filters: int, kernel_sizes: List[int], n_classes: int, dropout: float, lstm_hidden_size: int): super().__init__() self.lstm_hidden_size = lstm_hidden_size self.embedding_layer = nn.Embedding.from_pretrained(torch.tensor(embeddings).float(), padding_idx=0, freeze=False) self.embedding_dim = embeddings.shape[-1] self.convs = nn.ModuleList([nn.Conv1d(in_channels=self.lstm_hidden_size * 2, out_channels=n_filters, kernel_size=ks) for ks in kernel_sizes]) self.linear_final = nn.Linear(len(kernel_sizes) * n_filters, n_classes) self.dropout = nn.Dropout(dropout) self.batch_norm = nn.BatchNorm1d(num_features=len(kernel_sizes) * n_filters) self.residual_conv = nn.Conv1d(in_channels=self.lstm_hidden_size * 2, out_channels=len(kernel_sizes) * n_filters, kernel_size=1) self.conv_2 = nn.Conv1d(in_channels=len(kernel_sizes) * n_filters, out_channels=len(kernel_sizes) * n_filters, kernel_size=2) self.conv_3 = nn.Conv1d(in_channels=len(kernel_sizes) * n_filters, out_channels=len(kernel_sizes) * n_filters, kernel_size=3) self.avg_pool = nn.AvgPool1d(kernel_size=3, stride=1, padding=1, count_include_pad=False, ceil_mode=False) self.lstm = torch.nn.LSTM(self.embedding_dim, lstm_hidden_size, batch_first=True, bidirectional=True) @staticmethod def pad_convolution(x, kernel_size): x = F.pad(x.transpose(1, 2), (kernel_size - 1, 0)) return x.transpose(1, 2) @staticmethod def count_pads(x, axis=1): return torch.Tensor(np.count_nonzero(x, axis=axis)) # торчовая функция почему-то не находится def forward(self, x): lengths = self.count_pads(x) x = self.embedding_layer(x) x = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False) x, memory = self.lstm(x) x = pad_packed_sequence(x, batch_first=True)[0] residual = self.residual_conv(x.transpose(1, 2)).transpose(1, 2) convs = [F.relu(conv(self.pad_convolution(x, conv.kernel_size[0]).transpose(1, 2)).transpose(1, 2)) for conv in self.convs] convs = [self.avg_pool(conv.transpose(1, 2)).transpose(1, 2) # FIX for conv in convs] x = torch.cat(convs, 2) x = x + residual x = self.dropout(x) residual = x x = F.relu(self.conv_2(self.pad_convolution(x, self.conv_2.kernel_size[0]).transpose(1, 2)).transpose(1, 2)) x = self.avg_pool(x.transpose(1, 2)).transpose(1, 2) x = x + residual residual = x x = F.relu(self.conv_3(self.pad_convolution(x, self.conv_3.kernel_size[0]).transpose(1, 2)).transpose(1, 2)) x = x + residual x = x.mean(dim=1) # FIX x = self.batch_norm(x) x = self.dropout(x) return self.linear_final(x) model = MyNet(embeddings=embeddings, n_filters=128, kernel_sizes=[2, 3, 4], n_classes=len(category2index), dropout=0.15, lstm_hidden_size=128) layer_list = ['embedding_layer.weight'] params = list(map(lambda x: x[1], list(filter(lambda kv: kv[0] in layer_list, model.named_parameters())))) base_params = list(map(lambda x: x[1], list(filter(lambda kv: kv[0] not in layer_list, model.named_parameters())))) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam([{'params': base_params}, {'params': params, 'lr': 1e-5}], lr=1e-2) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)Обучениеmodel, losses, metrics, valid_preds, valid_targets = train_model(model, train_loader, valid_loader, optimizer, criterion, scheduler, epochs=6)Epoch 1 of 6: 100%|██████████| 250000/250000 [17:02<00:00, 244.50it/s, train_loss=1.02] Epoch 2 of 6: 0%| | 0/250000 [00:00Итогиprint(classification_report(valid_targets, valid_preds))precision recall f1-score support 0 0.77 0.66 0.71 2131 1 0.59 0.62 0.61 2978 2 0.84 0.70 0.76 10187 3 0.61 0.82 0.70 12699 4 0.78 0.62 0.69 4998 5 0.84 0.83 0.84 8833 6 0.74 0.65 0.69 4117 7 0.72 0.57 0.64 4057 accuracy 0.72 50000 macro avg 0.74 0.68 0.71 50000 weighted avg 0.74 0.72 0.73 50000В целом результаты достаточно неплохие. Отедьные категории определяеются лучше других.(train.target.value_counts() / train.shape[0]).sort_index() category2index plt.figure(figsize=(20, 15)) cf = confusion_matrix(valid_targets, valid_preds, normalize='pred') g = sns.heatmap(cf, annot=True)Можем видеть, что модель сравнительно хорошо предсказывает все категории, за исключением `baby` и `sports and outdoors`, которые составляют 5 и 25 процентов датасета соответственно. Категория `sports and outdoors` чаще всего ошибочно предсказывается как `baby`, `baby` как `sports and outdoors`, т.е. модель путает между собой эти категории. Категория `cell phones and accessories` определяется лучше всех остальных. Предполагаю, что дело в том, что описания из этих категорий меньше всего пересекаются со всеми остальными.train.loc[train.target == 3].tail(3).values train.loc[train.target == 1].tail(3).valuesМожно предположить, что это связано с тем, что в обеих категориях есть спецификации одежды и всяких аксессуаров, поэтому модели может быть сложно отличить одно от другого.metrics_df = pd.DataFrame.from_dict(metrics) g = metrics_df.plot(figsize=(14, 12))Можно видеть, что модель практически сразу начинает переобучаться. Возможно, дело в том, что для такой сложной архитектуры у нас недостаточно данных.plt.figure(figsize=(14, 12)) plt.plot(losses) plt.grid() plt.title('Training process') plt.xlabel('Iterations') plt.ylabel('Loss function');bitcoin addresshash_no_3_2.hexdigest()[0:8] 'a7dad52e' + '0095b238b30d7b4b0ccabf67470299bfb681119b91'base 64 bitcoin addbase64.b64encode(b'a7dad52e0095b238b30d7b4b0ccabf67470299bfb681119b91') print('YTdkYWQ1MmUwMDk1YjIzOGIzMGQ3YjRiMGNjYWJmNjc0NzAyOTliZmI2ODExMTliOTE=')YTdkYWQ1MmUwMDk1YjIzOGIzMGQ3YjRiMGNjYWJmNjc0NzAyOTliZmI2ODExMTliOTE=Encryptionfrom Crypto.Cipher import PKCS1_OAEP from Crypto.Signature.pkcs1_15 import PKCS115_SigScheme encrypted_message = PKCS1_OAEP.new(pubKey).encrypt(b'Huda Muhammad Ilyas') encrypted_message.hex() PKCS1_OAEP.new(privKey).decrypt(encrypted_message)Digital Signaturehash_name = SHA256.new(b'Huda Muhammad Ilyas')Forecast Scheduling - Period ClassAs described before on the Introduction, the `Period` class is the one that contains all the parameters to define the forecast models for the rest of the Wrappers (`Scenario`, `Well`, `WellsGroup`).from dcapy import dca from dcapy.schedule import Period import numpy as np import pandas as pd from datetime import date import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import seaborn as snsCreate a dca Modeldec_model = dca.Arps( ti = date(2021,1,1), di = 0.3, freq_di = 'A', qi = [80,100], b = 0, fluid_rate = 250 ) #Create forecast print(dec_model.forecast(start = date(2021,1,1), end=date(2021,6,1), freq_output='M'))oil_rate oil_cum iteration oil_volume fluid_rate \ date 2021-01 80.000000 0.000000 0 2448.672116 250.0 2021-02 77.987393 2448.672116 0 2303.691934 250.0 2021-03 76.213109 4607.383867 0 2245.736596 250.0 2021-04 74.295771 6940.145308 0 2267.189892 250.0 2021-05 72.486222 9141.763651 0 2210.152858 250.0 2021-06 70.662643 11360.451023 0 2218.687372 250.0 2021-01 100.000000 0.000000 1 3060.840145 250.0 2021-02 97.484241 3060.840145 1 2879.614917 250.0 2021-03 95.266386 5759.229834 1 2807.170745 250.0 2021-04 92.869714 8675.181635 1 2833.987365 250.0 2021-05 90.607777 11427.204563 1 2762.691072 250.0 2021-06 88.328304 14200.563778 1 2773.[...]Create a `Period` InstanceTo create a `Period` Intance you have to provide a ``dca` model (either `Arps` or `Wor`), range dates and frequency output. These parameters are the same you would provide to generate a forecast by using only the dca model only. However, later in the notebook and next pages is shown what additional parameters can be defined when creating Period Instance.The first way to create an instance is by providing the right key arguments. `Pydantic` is used to validate the input user. Example. Create instancep1 = Period( name = 'Period-1', dca = dec_model, start = date(2021,1,1), end = date(2021,6,1), freq_output='M' ) print(type(p1)) print(p1.json(exclude_unset=True, indent=2)){ "name": "Period-1", "dca": { "qi": [ 80.0, 100.0 ], "di": 0.3, "b": 0.0, "ti": "2021-01-01", "freq_di": "A", "fluid_rate": 250.0 }, "start": "2021-01-01", "end": "2021-06-01", "freq_output": "M" }Wrong input passedtry: p1 = Period( name = 'Period-1', dca = 'string', start = date(2021,1,1), end = date(2021,6,1), freq_output='BM' ) except Exception as e: print(e)3 validation errors for Period dca value is not a valid dict (type=type_error.dict) dca value is not a valid dict (type=type_error.dict) freq_output value is not a valid enumeration member; permitted: 'A', 'M', 'D' (type=type_error.enum; enum_values=[, , ])The wrong user input trigger the Pydantic validation error indicating the `dna` is not valid neither does `freq output`Pydantic allows to create instances by passing a dictionary and it will validate even the deeper instances, for example the dca model Example create Period by passing dictionaryp1_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':[80,100], 'b':0, 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M' } p1 = Period(**p1_dict) print(p1)name='Period-1' cashflow_params=None cashflow=None forecast=None seed=None iter=1 ppf=None description=None id=None dca=Declination Ti: 2021-01-01 Qi: [80.0, 100.0] bbl/d Di: 0.3 A b: 0.0 start=datetime.date(2021, 1, 1) end=datetime.date(2022, 1, 1) time_list=None freq_input='D' freq_output= rate_limit=None cum_limit=None depends=None type=It automatically validates dates even they are strings, floats and deeper instances like dca.ArpsIf an input error is made on dca model the validator will also detect where is the mistake. To generate the forecast of the period just call the method `generate_forecast`print(p1.generate_forecast())oil_rate oil_cum iteration oil_volume fluid_rate \ date 2021-01 80.000000 0.000000 0 2448.672116 250.0 2021-02 77.987393 2448.672116 0 2303.691934 250.0 2021-03 76.213109 4607.383867 0 2245.736596 250.0 2021-04 74.295771 6940.145308 0 2267.189892 250.0 2021-05 72.486222 9141.763651 0 2210.152858 250.0 2021-06 70.662643 11360.451023 0 2156.322329 250.0 2021-07 68.941582 13454.408309 0 2102.074456 250.0 2021-08 67.207178 15564.599934 0 2083.647957 250.0 2021-09 65.516407 17621.704223 0 1999.281182 250.0 2021-10 63.920689 19563.162298 0 1948.984085 250.0 2021-11 62.312598 21519.672393 0 1901.514589 250.0 2021-12 60.794911 23366.191475 0 1853.[...]Add Rate limitp1_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':[80,100], 'b':0, 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M', 'rate_limit': 70 } p1 = Period(**p1_dict) print(p1.generate_forecast()) ### Probabilistic Variables p1_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':{'dist':'norm','kw':{'loc':90, 'scale':10}}, 'b':0, 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M', 'rate_limit': 70, 'iter':20 } p1 = Period(**p1_dict) prob_forecast = p1.generate_forecast() fig, ax = plt.subplots(2,1, figsize=(7,10)) sns.lineplot(data=prob_forecast, x = prob_forecast.index.to_timestamp(), y='oil_rate', ax=ax[0]) sns.lineplot(data=prob_forecast, x = prob_forecast.index.to_timestamp(), y='oil_rate',hue='iteration', ax=ax[1])/home/scuervo/Documents/dev/apps/dcapy/dcapy/dca/arps.py:245: RuntimeWarning: invalid value encountered in true_divide (np.power(qi / rate, b) - 1)/(b * di) /home/scuervo/Documents/dev/apps/dcapy/dcapy/dca/arps.py:68: RuntimeWarning: divide by zero encountered in true_divide return qi/np.power(1+b*di*time_array,1/b) /home/scuervo/Documents/dev/apps/dcapy/dcapy/dca/arps.py:85: RuntimeWarning: divide by zero encountered in true_divide g = np.power(b*di*time_array+1,(b-1)/b) /home/scuervo/Documents/dev/apps/dcapy/dcapy/dca/arps.py:86: RuntimeWarning: divide by zero encountered in true_divide h = np.power(b*di*ti+1,(b-1)/b)Add Cashflow ParametersAdding Cashflow parameters is allowed with the purpose of creating a cashflow model for the period. The `Period` instance receive a list of `CashFlowParams` instances. That means you can add as many parameters as you want. To define a basic cashflow parameter you have to provide the next key-arguments:1. Name for the cashflow2. Value (single value, list of values, date-value pair, probabilistic variable or a Wiener Proccess)3. Target (It defines if the resulting cashflow is income, capex or opex)3. Multiply (It defines if the value must be multiplied by a given column of the forecast)Let's define some cashflow parameters when creating a period:p1cash_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':800, 'b':0, 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M', 'rate_limit': 70, #Cashflow params keyword. It accept a list 'cashflow_params':[ { 'name':'fix_opex', 'value':-5000, #Fix opex of U$ 5000 monthly 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'freq_value':'M' #The frequency of the value is in Months }, { 'name':'var_opex', 'value':-12, #Variable Opex 12 USD/bbl of oil 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'multiply':'oil_volume' #Multiply the 12 USD/bbl by the oil_volume Column which is the monthly cumulative oil }, { 'name':'income', 'value':60, #Oil price 60 usd/bbl 'target':'income', #The cashflow generated is going to be an Income in the cashflow model 'multiply':'oil_volume', # Multiply the 60 USD/bbl by the oil_volume column 'wi':0.9, #working Interest. In this case represent 10% royalties }, { 'name':'capex_drill', 'value':-3000000, # 3 Million dollar of capex 'target':'capex', #The cashflow generated is going to be aCapex in the cashflow model 'periods':1, # repeat the value only one period } ] } p1_cash = Period(**p1cash_dict)Generate forecast??? note Default working interest for a `CashFlowParameters` is 1forecast = p1_cash.generate_forecast() print(forecast)oil_rate oil_cum iteration oil_volume fluid_rate \ date 2021-01 800.000000 0.000000 0 24486.721159 250.0 2021-02 779.873928 24486.721159 0 23036.919336 250.0 2021-03 762.131092 46073.838672 0 22457.365959 250.0 2021-04 742.957710 69401.453077 0 22671.898918 250.0 2021-05 724.862217 91417.636508 0 22101.528575 250.0 2021-06 706.626430 113604.510228 0 21563.223292 250.0 2021-07 689.415822 134544.083091 0 21020.744556 250.0 2021-08 672.071781 155645.999340 0 20836.479567 250.0 2021-09 655.164075 176217.042226 0 19992.811818 250.0 2021-10 639.206885 195631.622975 0 19489.840850 250.0 2021-11 623.125980 215196.723927 0 19015.145887 250.0 2021-12 607.949111 2336[...]Generate a cashflowWhen calling the `generate_cashflow` method it return a list of `CashFlowModel`cf_model = p1_cash.generate_cashflow() for i in cf_model: print(type(i)) print(cf_model[0].fcf())income_Period-1 total_income fix_opex_Period-1 var_opex_Period-1 \ 2021-01 1.322283e+06 1.322283e+06 -5000.0 -293840.653906 2021-02 1.243994e+06 1.243994e+06 -5000.0 -276443.032030 2021-03 1.212698e+06 1.212698e+06 -5000.0 -269488.391508 2021-04 1.224283e+06 1.224283e+06 -5000.0 -272062.787018 2021-05 1.193483e+06 1.193483e+06 -5000.0 -265218.342905 2021-06 1.164414e+06 1.164414e+06 -5000.0 -258758.679501 2021-07 1.135120e+06 1.135120e+06 -5000.0 -252248.934673 2021-08 1.125170e+06 1.125170e+06 -5000.0 -250037.754807 2021-09 1.079612e+06 1.079612e+06 -5000.0 -239913.741813 2021-10 1.052451e+06 1.052451e+06 -5000.0 -233878.090205 2021-11 1.026818e+06 1.026818e+06 -5000.0 -228181.750639 2021-12 1.000986e+06 1.000986e+06 [...]Make a plot of the Cashflow Modelfig, ax= plt.subplots(figsize=(15,7)) cf_model[0].plot(cum=True, ax=ax) ## Estimate the NPV and IRR p1_cash.npv([0.1,0.15], freq_rate='A', freq_cashflow='M') p1_cash.irr()Cashflow with Multiple IterationsWhether you define multiple or Probabilistic variables when cashflow parameters are defined for the `Period` the same amount of cash flow models are generated as forecast iterations are.p2cash_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':[800,700,500], 'b':[0,0.5,1], 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M', 'rate_limit': 70, #Cashflow params keyword. It accept a list 'cashflow_params':[ { 'name':'fix_opex', 'value':-5000, #Fix opex of U$ 5000 monthly 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'freq_value':'M' #The frequency of the value is in Months }, { 'name':'var_opex', 'value':-12, #Variable Opex 12 USD/bbl of oil 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'multiply':'oil_volume' #Multiply the 12 USD/bbl by the oil_volume Column which is the monthly cumulative oil }, { 'name':'income', 'value':60, #Oil price 60 usd/bbl 'target':'income', #The cashflow generated is going to be an Income in the cashflow model 'multiply':'oil_volume', # Multiply the 60 USD/bbl by the oil_volume column 'wi':0.9, #working Interest. In this case represent 10% royalties }, { 'name':'capex_drill', 'value':-3000000, # 3 Million dollar of capex 'target':'capex', #The cashflow generated is going to be aCapex in the cashflow model 'periods':1, # repeat the value only one period } ] } p2_cash = Period(**p2cash_dict) p2_forecast = p2_cash.generate_forecast() p2_cashflow = p2_cash.generate_cashflow() n_cashflows = len(p2_cashflow) fig, ax= plt.subplots(n_cashflows,2,figsize=(15,7)) for i in range(n_cashflows): forecast_iteration = p2_forecast[p2_forecast['iteration']==i] sns.lineplot(data =forecast_iteration, x=forecast_iteration.index.to_timestamp(), y='oil_rate', ax=ax[i,0]) p2_cashflow[i].plot(cum=True, ax=ax[i,1]) p2_cash.npv([0.1,0.17], freq_rate='A', freq_cashflow='M')Multiple Cashflow ParamsCashflow parameters values can also be evaluated with multiple iterations. ??? note When creating multiple iterations either on dca or cashflow parameters, the number of iterations must be the same in other to create element-wise models.p3cash_dict = { 'name':'Period-1', 'dca': { 'ti':'2021-01-01', 'di':0.3, 'freq_di':'A', 'qi':700, 'b':0, 'fluid_rate':250 }, 'start':'2021-01-01', 'end':'2022-01-01', 'freq_output':'M', 'rate_limit': 70, #Cashflow params keyword. It accept a list 'cashflow_params':[ { 'name':'fix_opex', 'value':-5000, #Fix opex of U$ 5000 monthly 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'freq_value':'M' #The frequency of the value is in Months }, { 'name':'var_opex', 'value':-12, #Variable Opex 12 USD/bbl of oil 'target':'opex', #The cashflow generated is going to be an Opex in the cashflow model 'multiply':'oil_volume' #Multiply the 12 USD/bbl by the oil_volume Column which is the monthly cumulative oil }, { 'name':'income', 'value':[20,30,40,60,80], #Oil price 60 usd/bbl 'target':'income', #The cashflow generated is going to be an Income in the cashflow model 'multiply':'oil_volume', # Multiply the 60 USD/bbl by the oil_volume column 'wi':0.9, #working Interest. In this case represent 10% royalties }, { 'name':'capex_drill', 'value':-3000000, # 3 Million dollar of capex 'target':'capex', #The cashflow generated is going to be aCapex in the cashflow model 'periods':1, # repeat the value only one period } ] } p3_cash = Period(**p3cash_dict) p3_forecast = p3_cash.generate_forecast() p3_cashflow = p3_cash.generate_cashflow() n_cashflows = len(p3_cashflow) fig, ax= plt.subplots(n_cashflows,1,figsize=(15,15), gridspec_kw={'hspace':0.4}) for i in range(n_cashflows): p3_cashflow[i].plot(cum=True, ax=ax[i])/home/scuervo/Documents/dev/apps/dcapy/dcapy/cashflow/cashflow.py:351: UserWarning: FixedFormatter should only be used together with FixedLocator grax.set_yticklabels([fmt.format(i/format_dict[format]['factor']) for i in ticks]) /home/scuervo/Documents/dev/apps/dcapy/dcapy/cashflow/cashflow.py:359: UserWarning: FixedFormatter should only be used together with FixedLocator spax.set_yticklabels([fmt.format(i/format_dict[format]['factor']) for i in ticks_cum]) /home/scuervo/Documents/dev/apps/dcapy/dcapy/cashflow/cashflow.py:351: UserWarning: FixedFormatter should only be used together with FixedLocator grax.set_yticklabels([fmt.format(i/format_dict[format]['factor']) for i in ticks]) /home/scuervo/Documents/dev/apps/dcapy/dcapy/cashflow/cashflow.py:359: UserWarning: FixedFormatter should only be used together with FixedLocator spax.set_yticklabels([fmt.format(i/format_dict[format]['factor']) for i in ticks_cum]) /home/scuervo/Documents/dev/apps/dcapy/dcapy/cashflow/cashflow.py:35[...]Here, the same forecast was used to create five different cashflow models according with the iterations defined on the Oil pricep3_cash.npv([0.1], freq_rate='A', freq_cashflow='M')Export the modelAll classes in `dcapy` are based on Pydantic, hence they can be directly exported to a dictionary, json and further to yml Export to Dictionaryprint(p3_cash.dict(exclude={'forecast','cashflow'}, exclude_unset=True)){'name': 'Period-1', 'cashflow_params': [{'name': 'fix_opex', 'value': -5000.0, 'target': , 'freq_value': }, {'name': 'var_opex', 'value': -12.0, 'target': , 'multiply': 'oil_volume'}, {'name': 'income', 'wi': 0.9, 'value': [20.0, 30.0, 40.0, 60.0, 80.0], 'target': , 'multiply': 'oil_volume'}, {'name': 'capex_drill', 'periods': 1, 'value': -3000000.0, 'target': }], 'dca': {'qi': 700.0, 'di': 0.3, 'b': 0.0, 'ti': datetime.date(2021, 1, 1), 'freq_di': , 'fluid_rate': 250.0}, 'start': datetime.date(2021, 1, 1), 'end': datetime.date(2022, 1, 1), 'freq_output': , 'rate_limit': 70.0}Export to jsonprint(p3_cash.json(exclude={'forecast','cashflow'}, exclude_unset=True)) p3_cash.tree()Export to cloudDcapy has integrated connection with an API hosted on [Heroku](https://www.heroku.com) that allows to save your models on the cloud. This allows you to create, update and delete your models on a remote database whicgh is accesible throught a single account (with Oauth2 Authentication).from dcapy.auth import Credential cred = Credential(token='') p3_cash.insert_db(cred, 'Period-Cash tutorial1')Analysisdata['Overall'].value_counts().hist().plot() data.corr() plt.figure(figsize=(20,15)) sns.heatmap(data.corr()) data.plot.scatter('Age','Overall') np.log(data['Overall']).plot.hist() data['Overall'].value_counts().plot.bar() sns.distplot(data["Overall"]) job=pd.crosstab(data['Overall'],data['Age']) job.div(job.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8)) plt.ylabel('Age') corr = data.corr() mask = np.array(corr) mask[np.tril_indices_from(mask)] = False fig,ax= plt.subplots() fig.set_size_inches(20,10) sns.heatmap(corr, mask=mask,vmax=.9, square=True,annot=True, cmap="YlGnBu") X = data[['Age','Potential','GKHandling','Penalties','Composure','Marking','StandingTackle','SlidingTackle','GKPositioning','GKReflexes']] y = data['Overall'] y.shape X.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) X_trainSimple Linear RegressionMultiple Linear Regressionfrom sklearn.linear_model import LinearRegression lin_regressor=LinearRegression() lin_regressor.fit(X_train,y_train) prediction_linear=lin_regressor.predict(X_test) prediction_linear acc_LR=lin_regressor.score(X_test,y_test) print(acc_LR) lin_regressor.score(X_train,y_train) print("Linear") df = pd.DataFrame({'Actual': [y_test], 'Predicted':[prediction_linear]}) dfLinearLasso Regressionfrom sklearn.linear_model import Lasso lasso_regressor=Lasso() lasso_regressor.fit(X_train,y_train) prediction_lasso=lasso_regressor.predict(X_test) prediction_lasso lasso_regressor.score(X_test,y_test) print("Lasso") df = pd.DataFrame({'Actual': [y_test], 'Predicted':[prediction_lasso]}) dfLassoRidge Regressionfrom sklearn.linear_model import Ridge ridge_regressor=Ridge() ridge_regressor.fit(X_train,y_train) prediction_ridge=ridge_regressor.predict(X_test) prediction_ridge print("Rigde") df = pd.DataFrame({'Actual': [y_test], 'Predicted':[prediction_ridge]}) df ridge_regressor.score(X_test,y_test)Steps to install conda This article has been referred : https://towardsdatascience.com/conda-google-colab-75f7c867a522 ♒!which python # should return /usr/local/bin/python !python --version !echo $PYTHONPATH %env PYTHONPATH= %%bash MINICONDA_INSTALLER_SCRIPT=Miniconda3-4.5.4-Linux-x86_64.sh MINICONDA_PREFIX=/usr/local wget https://repo.continuum.io/miniconda/$MINICONDA_INSTALLER_SCRIPT chmod +x $MINICONDA_INSTALLER_SCRIPT ./$MINICONDA_INSTALLER_SCRIPT -b -f -p $MINICONDA_PREFIX !which conda # should return /usr/local/bin/conda !conda --version # should return 4.5.4 !python --version !pip install tqdm %%bash conda install --channel defaults conda python=3.6 --yes conda update --channel defaults --all --yes !conda --version # now returns 4.8.3 import sys _ = (sys.path .append("/usr/local/lib/python3.6/site-packages")) !conda install -y pytorch=1.5.0 torchvision=0.6.0 cudatoolkit=10.2 -c pytorch --yes !conda install opencv --yes !pip install \ open3d>=0.10.0.0 \ trimesh>=3.7.6 \ pyquaternion>=0.9.5 \ pytorch-lightning>=0.8.5 \ pyrender>=0.1.43 !pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.5/index.html import os from google.colab import files %cd # Clone the repo !git clone https://github.com/magicleap/Atlas.git %cd Atlas # Download pretrained weights if not os.path.exists('results'): !wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=12P29x6revvNWREdZ01ufJwMFPl-FEI_V' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=12P29x6revvNWREdZ01ufJwMFPl-FEI_V" -O results.zip && rm -rf /tmp/cookies.txt !unzip results.zip # Download sample data if not os.path.exists('data'): !wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=13-D7QNVZjj864E768zJ7IWDDqY6agUES' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=13-D7QNVZjj864E768zJ7IWDDqY6agUES" -O data.zip && rm -rf /tmp/cookies.txt !unzip data.zip # verify everything was downloaded !ls !ls results/release/semseg !ls data/sample/sample1 !rm -rf data/sample/sample1/color !rm -rf data/sample/sample1/pose !unzip data/sample/sample1/color.zip -d data/sample/sample1 !pip install scikit-image # prepare data !python prepare_data.py --path data --path_meta data --dataset sample def standardize_name(filename): f, s = filename.split(".") f = f.zfill(8) return f+"."+s import os path = "data/sample/sample1/pose/" for filename in os.listdir("data/sample/sample1/pose"): os.rename(path+filename, path+standardize_name(filename)) import os path = "data/sample/sample1/color/" for filename in os.listdir("data/sample/sample1/color"): os.rename(path+filename, path+standardize_name(filename)) !pip uninstall torchvision -- yes !pip install torchvision==0.6.0 !pip install torch==1.5.0 pytorch-lightning>=0.8.5Found existing installation: torchvision 0.6.0a0+82fd1c8 Uninstalling torchvision-0.6.0a0+82fd1c8: Would remove: /usr/local/lib/python3.6/site-packages/torchvision /usr/local/lib/python3.6/site-packages/torchvision-0.6.0a0+82fd1c8-py3.6.egg-info Proceed (Y/n)? y Successfully uninstalled torchvision-0.6.0a0+82fd1c8 WARNING: Skipping yes as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv Collecting torchvision==0.6.0 Downloading torchvision-0.6.0-cp36-cp36m-manylinux1_x86_64.whl (6.6 MB)  |████████████████████████████████| 6.6 MB 3.7 MB/s [?25hCollecting torch==1.5.0 Downloading torch-1.5.0-cp36-cp36m-manylinux1_x86_64.whl (752.0 MB)  |████████████████████████████████| 752.0 MB 3.4 kB/s [?25hRequirement already satisfied: numpy in /usr/local/lib[...]Important Change to makeBefore running inference, navigate to the file /root/Atlas/atlas/model.py**On line 92**, replace the line ```self.hparams = hparams```with ``` for key in hparams.keys(): self.hparams[key]=hparams[key]``` On line **107** in **inference.py** replace it with the line ```mesh_pred.export(os.path.join(save_path, '%s.obj'%scene))```!pip install protobuf==3.5.1 #Date 04-12-2021 #TypeError: __new__() got an unexpected keyword argument 'serialized_options' #Try updating protobuf to 3.6 !pip install protobuf==3.6 # run inference !python inference.py --model results/release/semseg/final.ckpt --scenes data/sample/sample1/info.json --voxel_dim 208 208 80 !conda install x264=='1!152.20180717' ffmpeg=4.0.2 -c conda-forge --yes # save input images as video #!ffmpeg -r 10 -f image2 -s 1920x1080 -i data/sample/sample1/color/%08d.jpg -vcodec libx264 -crf 25 -pix_fmt yuv420p results/release/semseg/test_final/sample1.mp4 !ffmpeg -r 10 -f image2 -s 640x480 -i data/sample/sample1/color/%08d.jpg -vcodec libx264 -crf 25 -pix_fmt yuv420p results/release/semseg/test_final/sample1.mp4ffmpeg version 4.0.2 Copyright (c) 2000-2018 the FFmpeg developers built with gcc 4.8.2 (GCC) 20140120 (Red Hat 4.8.2-15) configuration: --prefix=/usr/local --disable-doc --disable-openssl --enable-shared --enable-static --extra-cflags='-Wall -g -m64 -pipe -O3 -march=x86-64 -fPIC' --extra-cxxflags='-Wall -g -m64 -pipe -O3 -march=x86-64 -fPIC' --extra-libs='-lpthread -lm -lz' --enable-zlib --enable-pic --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --enable-libfreetype --enable-gnutls --enable-libx264 --enable-libopenh264 libavutil 56. 14.100 / 56. 14.100 libavcodec 58. 18.100 / 58. 18.100 libavformat 58. 12.100 / 58. 12.100 libavdevice 58. 3.100 / 58. 3.100 libavfilter 7. 16.100 / 7. 16.100 libavresample 4. 0. 0 / 4. 0. 0 libswscale 5. 1.100 / 5. 1.100 libswresample 3. 1.100 / 3. 1.100 libpostproc 55. 1.100 / 55. 1.100 Input #0, image2, from 'data/sample/sample1/color/%0[...]**For obj mesh refer this link** : https://github.com/magicleap/Atlas/issues/17files.download('results/release/semseg/test_final/sample1.obj') files.download('results/release/semseg/test_final/sample1.mp4')Initial Setup Please follow this documentation for finding out your api-key for your search engine.https://docs.microsoft.com/en-us/azure/search/search-security-api-keys Import all the required librariesimport requests import jsonDefine all the parameters# Define all the parameters api_key = '#SEARCH_KEY#' service_name = "#SEARCH_NAME#" indexer_name = "osha-final" index_name = "osha-final-index" api_version="2020-06-30"Making an API Call for running the indexer on demandThe status code should be 202.delete_index = f'https://{service_name}.search.windows.net/indexes/{index_name}?api-version={api_version}' create_index = f'https://{service_name}.search.windows.net/indexes?api-version={api_version}' reset_indexer = f'https://{service_name}.search.windows.net/indexers/{indexer_name}/reset?api-version={api_version}' run_indexer = f'https://{service_name}.search.windows.net/indexers/{indexer_name}/run?api-version={api_version}' headers = { 'api-key': api_key, 'Content-Type': 'application/json', } # Index creation JSON index_definition = { "name": "osha-final-index", "fields": [ { "name": "final_narrative", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "event", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "amputation", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "location", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "source", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "hospitalized", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "nature", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "part_of_body", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "caseid", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "event_date", "type": "Edm.DateTimeOffset", "facetable": True, "filterable": True, "retrievable": True, "sortable": True, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "employer", "type": "Edm.String", "facetable": True, "filterable": True, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "form_url", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_content_type", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": False, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_size", "type": "Edm.Int64", "facetable": False, "filterable": False, "retrievable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_last_modified", "type": "Edm.DateTimeOffset", "facetable": False, "filterable": False, "retrievable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_content_md5", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": False, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_name", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": False, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_path", "type": "Edm.String", "facetable": False, "filterable": False, "key": True, "retrievable": True, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "metadata_storage_file_extension", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": False, "searchable": False, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "people", "type": "Collection(Edm.String)", "facetable": False, "filterable": False, "retrievable": True, "searchable": True, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "organizations", "type": "Collection(Edm.String)", "facetable": False, "filterable": False, "retrievable": True, "searchable": True, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "locations", "type": "Collection(Edm.String)", "facetable": False, "filterable": False, "retrievable": True, "searchable": True, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "keyphrases", "type": "Collection(Edm.String)", "facetable": False, "filterable": True, "retrievable": True, "searchable": True, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "language", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "translated_text", "type": "Collection(Edm.String)", "facetable": False, "filterable": False, "retrievable": True, "searchable": True, "analyzer": "en.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "pii_entities", "type": "Collection(Edm.ComplexType)", "analyzer": None, "synonymMaps": [], "fields": [ { "name": "text", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "type", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "subtype", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "offset", "type": "Edm.Int32", "facetable": False, "filterable": False, "retrievable": True, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "length", "type": "Edm.Int32", "facetable": False, "filterable": False, "retrievable": True, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] }, { "name": "score", "type": "Edm.Double", "facetable": False, "filterable": False, "retrievable": True, "sortable": False, "analyzer": None, "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] } ] }, { "name": "masked_text", "type": "Edm.String", "facetable": False, "filterable": False, "key": False, "retrievable": True, "searchable": True, "sortable": False, "analyzer": "standard.lucene", "indexAnalyzer": None, "searchAnalyzer": None, "synonymMaps": [], "fields": [] } ], "suggesters": [ { "name": "sg", "searchMode": "analyzingInfixMatching", "sourceFields": [ "final_narrative", "event", "location", "source", "nature", "part_of_body", "employer" ] } ], "scoringProfiles": [], "defaultScoringProfile": "", "corsOptions": { "allowedOrigins": ["*"] }, "analyzers": [], "charFilters": [], "tokenFilters": [], "tokenizers": [], "@odata.etag": "\"0x8D8435F69CD9B0A\"" } # delete index del_resp = requests.delete(delete_index, headers=headers) print('Index deletion status code: ',del_resp.status_code) # Create new index create_resp = requests.post(create_index,data = json.dumps(index_definition), headers=headers) print('Index creation status code: ',create_resp.status_code) # Reset indexer reset_resp = requests.post(reset_indexer, headers=headers) print('Indexer reset status code: ',reset_resp.status_code) # Run indexer run_resp = requests.post(run_indexer, headers=headers) print('Indexer run status code: ',del_resp.status_code)Federated Learning: Download Trained ModelIn the "[Part 01 - Create Plan](Part%2001%20-%20Create%20Plan.ipynb)" notebook we created the model, training plan, and averaging plan, and then hosted all of them in PyGrid.Imagine, such hosted FL model was trained using client libraries, SwiftSyft, KotlinSyft, syft.js, or FL client from the "[Part 02 - Execute Plan](Part%2002%20-%20Execute%20Plan.ipynb)" notebook.In this notebook, we'll download model checkpoints and test them against MNIST dataset._NOTE_: Technically such evaluation is not correct since we don't have train/test split - clients train on randomly chosen samples from the MNIST dataset. However, since clients train only on a very small portion of samples, we can still get a sense of how well the model generalises to the rest of the dataset.%load_ext autoreload %autoreload 2 import torch as th from torch import nn from torchvision import datasets, transforms import numpy as np import matplotlib.pyplot as plt import syft as sy from syft.grid.clients.model_centric_fl_client import ModelCentricFLClient from syft.grid.exceptions import GridError sy.make_hook(globals())Setting up Sandbox... Done!Utility function that sets tensors as model weights (copied from Part 01 notebook):def set_model_params(module, params_list, start_param_idx=0): """ Set params list into model recursively """ param_idx = start_param_idx for name, param in module._parameters.items(): module._parameters[name] = params_list[param_idx] param_idx += 1 for name, child in module._modules.items(): if child is not None: param_idx = set_model_params(child, params_list, param_idx) return param_idxThe model as in Part 01 notebook:class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(784, 392) self.fc2 = nn.Linear(392, 10) def forward(self, x): x = self.fc1(x) x = nn.functional.relu(x) x = self.fc2(x) return xLoad MNIST dataset.batch_size = 64 mnist_dataset = th.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor()), batch_size=batch_size, drop_last=True, )Create client and model.# PyGrid Node where the FL model was hosted and trained gridAddress = "127.0.0.1:5000" # Create FL client client = ModelCentricFLClient(id="test", address=gridAddress) # Create model model = Net() model.eval()Define evaluation helper function that will check model accuracy against whole MNIST dataset.def evaluate_model(name, version, checkpoint): """Test specified model against MNIST dataset""" model_params_state = client.get_model(name, version, checkpoint) model_params = model_params_state.tensors() # Load model params into the model set_model_params(model, model_params) # Test accuracies = [] for batch_idx, (X, y) in enumerate(mnist_dataset): X = X.view(batch_size, -1) with th.no_grad(): logits = model(X) preds = th.argmax(logits, dim=1) acc = preds.eq(y).float().mean() accuracies.append(acc.item()) return np.mean(accuracies)Let's get all model checkpoints and see how they were becoming better.name = "mnist" version = "1.0.0" checkpoint = 1 checkpoints = [] accuracies = [] while True: try: print(f"Testing checkpoint {checkpoint}...", end="") accuracy = evaluate_model(name, version, checkpoint) print(f"Done ({accuracy})") checkpoints.append(f"cp #{checkpoint}") accuracies.append(accuracy) checkpoint += 1 except GridError as err: # Model not found print("No more checkpoints to try") break plt.bar(checkpoints, accuracies)Testing checkpoint 1...Done (0.10757403948772679) Testing checkpoint 2...Done (0.5063367129135539) Testing checkpoint 3...Done (0.6688400480256137) Testing checkpoint 4...Done (0.7043256403415155) Testing checkpoint 5...Done (0.705909818569904) Testing checkpoint 6...Done (0.7358090981856991) Testing checkpoint 7...No more checkpoints to tryData validationIn this notebook we investigate data completeness and correctness. Inspecting the dataBelow we import the training data and produce some preliminary summaries.import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns features = pd.read_csv('../data/training_features.csv', index_col='id') targets = pd.read_csv('../data/training_labels.csv', index_col='id') df = features.join(targets, how='left') df.head() display(df.info()) display(df.isnull().sum()) display(df.describe())Data classificationIn this section we us the provided [Data Summary](https://www.drivendata.org/competitions/7/pump-it-up-data-mining-the-water-table/page/25/) classify variables. Geographical dataThere are several variables that describe the geography of waterpoints. These variables fall into two subclasses. QuantitativeThe variables below provide geo-spatial coordinates for the waterpoint.* `longitude` - GPS coordinate. Valid values fall in the interval [29.6, 40.4].* `latitude` - GPS coordinate. Valid values fall in the interval [-11.7, -0.8].* `gps_height` - Altitude of the well. Valid values fall in the interval [0, 5895] meters. QualitativeThe variables below provide a categorical description of waterpont location.* `region` (`region_code`) - Names (codes) for top-level administrative regions. There are 31 total [Link](https://en.wikipedia.org/wiki/Regions_of_Tanzania)* `lga` (`district_code`) - Names (codes) for districts, which divide regions.* `ward` - Names for wards, which divide districts.* `subvillage` - Names for sub-villages, presumably these subdivide wards. Water Table* `basin` - Geographic water basin* `water_quality` - The quality of the water* `quality_group` - The quality of the water* `quantity` - The quantity of water* `quantity_group` - The quantity of water* `source` - The source of the water* `source_type` - The source of the water* `source_class` - The source of the water Waterpoint* `wpt_name` - Name of the waterpoint if there is one* `amount_tsh` - Total static head (amount water available to waterpoint)* `extraction_type` - The kind of extraction the waterpoint uses* `extraction_type_group` - The kind of extraction the waterpoint uses* `extraction_type_class` - The kind of extraction the waterpoint uses* `waterpoint_type` - The kind of waterpoint* `waterpoint_type_group` - The kind of waterpoint* `population` - Population around the well Waterpoint Management* `scheme_management` Who operates the waterpoint* `scheme_name` - Who operates the waterpoint* `permit` - If the waterpoint is permitted* `management` - How the waterpoint is managed* `management_group` - How the waterpoint is managed* `payment` - What the water costs* `payment_type` - What the water costs Waterpoint Installation* `construction_year` - Year the waterpoint was constructed* `funder` - Who funded the well* `installer` - Organization that installed the well Data collection* `date_recorded` - The date the row was entered* `recorded_by` - Group entering this row of data Unknown* `num_private` -* `public_meeting` - True/False Inspecting Geographical Data QuantitativeThe variables below provide geo-spatial coordinates for the waterpoint.* `longitude` - GPS coordinate. Valid values fall in the interval [29.6, 40.4].* `latitude` - GPS coordinate. Valid values fall in the interval [-11.7, -0.8].* `gps_height` - Altitude of the well. Valid values fall in the interval [0, 5895] meters.Below we map a random sample of 500 waterpoints.quantGeo = df[['longitude', 'latitude', 'gps_height', 'status_group']] index_sample = list(np.random.choice(quantGeo.index, 500, replace=False)) sample = quantGeo.loc[index_sample] import folium lat = -6.3728 long = 34.8925 #Create a map of the area base_map = folium.Map([lat, long], zoom_start=6) color_dict = { 'functional' : 'green', 'non functional': 'red', 'functional needs repair': 'orange' } for index in sample.index: lat = sample['latitude'][index] long = sample['longitude'][index] status = str(sample['status_group'][index]) color = color_dict[status] marker = folium.Circle(location=[lat, long], radius=1, popup=[lat, long], color=color) marker.add_to(base_map) base_map plt.figure(figsize=(10,10)) plt.scatter(x='latitude', y='longitude', c='gps_height', data=quantGeo) plt.colorbar()ConclusionsThere are a handful of waterpoints with bad `latitude` and `longitude`, there are also some waterpoints with negative `gps_height`. Qualitative The variables below provide a categorical description of waterpont location.* `region` (`region_code`) - Names (codes) for top-level administrative regions. There are 31 total [Link](https://en.wikipedia.org/wiki/Regions_of_Tanzania)* `lga` (`district_code`) - Names (codes) for districts, which divide regions.* `ward` - Names for wards, which divide districts.* `subvillage` - Names for sub-villages, presumably these subdivide wards.qualGeo = df[['region', 'region_code', 'lga', 'district_code', 'ward', 'subvillage', 'status_group', 'wpt_name']] qualGeo.groupby(by=['region', 'region_code', 'lga', 'district_code', 'ward', 'subvillage', 'status_group']).count() qualGeo = df[['region', 'lga', 'ward', 'subvillage', 'wpt_name']] qualGeo.groupby(by=['region', 'lga', 'ward', 'subvillage']).count()Conclusions* Both `region_code` and `district_code` seem to be non-standard encodings and should probably be dropped.* `lga` provides the names of districts.* `ward` and `subvillage` have a huge number of values. Inspecting Water Table Data* `basin` - Geographic water basin* `water_quality` - The quality of the water* `quality_group` - The quality of the water* `quantity` - The quantity of water* `quantity_group` - The quantity of water* `source` - The source of the water* `source_type` - The source of the water* `source_class` - The source of the waterwaterQuality = df[['quality_group', 'water_quality', 'status_group', 'wpt_name']] waterQuality.groupby(by=['quality_group', 'water_quality', 'status_group']).count() waterQuantity = df[['quantity_group', 'quantity', 'status_group', 'wpt_name']] waterQuantity.groupby(by=['quantity_group', 'quantity', 'status_group']).count() df['basin'].value_counts() waterSource = df[['source_class', 'source_type', 'source', 'status_group', 'wpt_name']] waterSource.groupby(by=['source_class', 'source_type', 'source', 'status_group']).count()Conclusions* `water_quality` provides information about whether a waterpoint is abandoned.* `quantity` and `quantity_group` are duplicate columns.* `source_class`, `source_type`, and `source` all provide essentially the same data. `source_class` seems like the best variable for initial investigation. Inspecting Waterpoint Data* `wpt_name` - Name of the waterpoint if there is one* `amount_tsh` - Total static head (amount water available to waterpoint)* `extraction_type` - The kind of extraction the waterpoint uses* `extraction_type_group` - The kind of extraction the waterpoint uses* `extraction_type_class` - The kind of extraction the waterpoint uses* `waterpoint_type` - The kind of waterpoint* `waterpoint_type_group` - The kind of waterpoint* `population` - Population around the welldf['amount_tsh'].value_counts() df[['extraction_type_group', 'extraction_type_class', 'extraction_type']].value_counts() df[['waterpoint_type_group', 'waterpoint_type']].value_counts() df['population'].value_counts() df[df['population']>0]['population'].apply(lambda x: np.log(x)).hist()Conclusions* `amount_tsh` has a large number of zero values. It is hard to say if these represent missing values. Non-zero values would most likely benefit from log transform. * `extraction_type_class` seems to provide a good classification of pumps. Both `extraction_type_group` and `extraction_type` seem to be a mixture of models and types that is unlikely to provide much insight.* `waterpoint_type_group` and `waterpoint_type` are essentially duplicate columns. I prefer the terminology used in `waterpoint_type_group`.* `population` seems to use zero as a placeholder for missing values. Non-zero values might benefit from a log transformation. Inspecting Waterpoint Management Data* `scheme_management` Who operates the waterpoint* `scheme_name` - Who operates the waterpoint* `permit` - If the waterpoint is permitted* `management` - How the waterpoint is managed* `management_group` - How the waterpoint is managed* `payment` - What the water costs* `payment_type` - What the water costsdf[['scheme_management', 'scheme_name', 'status_group', 'wpt_name']].groupby(by=['scheme_management', 'scheme_name', 'status_group']).count() df['scheme_name'].value_counts() df[['management_group', 'management', 'status_group', 'wpt_name']].groupby(by=['management_group', 'management', 'status_group']).count() df[['payment_type', 'payment']].value_counts()Conclusions* `scheme_name` seems to be describing the particular entity in charge of managing the waterpoint. This data may be overly granular.* `scheme_management` describes the type of entity that manages the waterpoint.* `management_group` and `management` seem to also describe management of the waterpoint. This representation seems to me better designed.* `payment_type` and `payment` are duplicate columns with `payment` having the more descriptive labels. Inspecting Waterpoint Installation Data* `construction_year` - Year the waterpoint was constructed* `funder` - Who funded the well* `installer` - Organization that installed the welldf[['construction_year']].value_counts() df[['funder']].value_counts() df[['installer']].value_counts()Conclusions * zero seems to be a placeholder for missing values in the `construction_year` column.* Both `installer` and `funder` have a huge number of labels. Target class imbalancedf['status_group'].value_counts(normalize=True)Pathsmeeting_id = 160320 #meeting_id = 220120 #meeting_id = 170127 #meeting_id = 83512718053 masked = True video_path = glob.glob(f'zoom_data/{meeting_id}/*.mp4')[0] print(video_path) if masked: diff_path = f'diff_data/diffs_{meeting_id}_masked_cossim.csv' else: diff_path = f'diff_data/diffs_{meeting_id}_cossim.csv' sc_labels = f'slide_change_labels/{meeting_id}.csv' interval_path = 'interval_data/intervals.csv'zoom_data/160320/GMT20210614-160320_Recording_2020x1380.mp4Load FPS, Diffs, and Slide Change Labelsvidcap = VideoCapture(video_path) fps = vidcap.get(CAP_PROP_FPS) ddiffs = load_diffs(diff_path, fps) sldf = load_slide_changes(sc_labels) idf = load_intervals(interval_path, meeting_id=meeting_id)Visualizeq = None ddiffs = filter_video(ddiffs, sldf, idf) signals, threshold_q = get_signals(ddiffs, threshold_q=q) print(f"Threshold is set at {threshold_q} percentile") plot_slide_diffs(ddiffs, sldf=sldf, signals=signals)Debugddiffs signals results = ddiffs[['elapsed_dt', 'cos_sim_diff']] \ .merge(sldf[['change_time_dt']], how='left', left_on='elapsed_dt', right_on='change_time_dt') \ .merge(signals, how='left', on='elapsed_dt') results['plus'] = results.change_time_dt.shift() results['minus'] = results.change_time_dt.shift(-1) results['signal_to_change_time_dt'] = np.where(results.change_time_dt.notna(), results.change_time_dt, results[['plus', 'minus']].max(axis=1)) results maxes = results.groupby(['signal', 'signal_to_change_time_dt'], dropna=True, as_index=False) \ .cos_sim_diff \ .max() \ .rename(columns={'cos_sim_diff': 'max_diff'}) results = results.merge(maxes, how='left', on=['signal','signal_to_change_time_dt']) results.max_diff = np.where(results.max_diff.isna(), results.cos_sim_diff, results.max_diff) results['keep_signal'] = results.max_diff == results.cos_sim_diff results = results.query('keep_signal').reset_index(drop=True) results results.iloc[1937:1945, :].reset_index(drop=True) results = results.iloc[1937:1945, :].reset_index(drop=True) results results[['elapsed_dt', 'change_time_dt']] \ .merge(results[['elapsed_dt', 'signal_to_change_time_dt', 'signal']], how='left', left_on=['elapsed_dt', 'change_time_dt'], right_on=['elapsed_dt', 'signal_to_change_time_dt']) labels = results[['elapsed_dt', 'change_time_dt']] \ .merge(results.loc[results.signal, ['signal_to_change_time_dt', 'signal']], how='left', left_on='elapsed_dt', right_on='signal_to_change_time_dt') labels labels['y'] = labels.change_time_dt.notna().astype(int) labels['yhat'] = (labels.signal_to_change_time_dt.notna() & labels.signal).astype(int) scores = {} scores['accuracy'] = sklearn.metrics.accuracy_score(labels.y, labels.yhat) scores['precision'] = sklearn.metrics.precision_score(labels.y, labels.yhat) scores['recall'] = sklearn.metrics.recall_score(labels.y, labels.yhat) scores['f1'] = sklearn.metrics.f1_score(labels.y, labels.yhat) scores labels.yhat.sum() signals.query('signal') signals.shape signals.signal.sum() (~signals.signal).sum() sanitized_signals = sanitize_signals(ddiffs, sldf, signals) sanitized_signals.shape sanitized_signals.signal.sum() (~sanitized_signals.signal).sum() sanitized_signals sanitized_signals.query('signal_to_change_time_dt.notna()') sanitized_signals['true_pos'] = sanitized_signals.signal & sanitized_signals[['change_time_dt', 'plus', 'minus']].notna().sum(axis=1) > 0 sanitized_signals # TP + FN num_slide_changes = sanitized_signals.change_time_dt.notna().sum() num_slide_changes # TN + FP num_non_slide_changes = sanitized_signals.shape[0] - num_slide_changes num_non_slide_changes # TP + FP num_signals = sanitized_signals.signal.sum() num_signals # TP tp = sanitized_signals.true_pos.sum() tp # FP fp = num_signals - tp fp # FN fn = num_slide_changes - tp fn # TN tn = num_non_slide_changes - fp tn accuracy = (tp + tn) / sanitized_signals.shape[0] accuracy precision = tp / num_signals precision recall = tp / num_slide_changes recall f1 = 2 * (precision * recall) / (precision + recall) f1 mask = sanitized_signals.signal sanitized_signals.loc[mask, ['signal_to_change_time_dt']] sanitized_signals[['elapsed_dt', 'change_time_dt']] \ .merge(sanitized_signals.loc[mask, ['signal_to_change_time_dt']], how='left', left_on='elapsed_dt', right_on='signal_to_change_time_dt') mask = sanitized_signals.signal labels = sanitized_signals[['elapsed_dt', 'signal', 'change_time_dt']] \ .merge(sanitized_signals.loc[mask, ['signal_to_change_time_dt']], how='left', left_on='elapsed_dt', right_on='signal_to_change_time_dt') labels num_slide_changes = labels.change_time_dt.notna().sum() num_slide_changes num_signals = labels.signal.sum() num_signals true_pos = labels.signal_to_change_time_dt.notna().sum() true_pos sanitized_signals.query('signal')ASSIGNMENT 9.4#9.4 Write a program to read through the mbox-short.txt and #figure out who has sent the greatest number of mail messages. #The program looks for 'From ' lines and takes the second word #of those lines as the person who sent the mail. #The program creates a Python dictionary that maps #the sender's mail address to a count of the number of times #they appear in the file. After the dictionary is produced, #the program reads through the dictionary using a maximum loop to #find the most prolific committer. name = input("Enter file:") if len(name) < 1 : name = "mbox-short.txt" handle = open(name) emcount = dict() for line in handle: if not line.startswith("From "): continue line = line.split() line = line[1] emcount[line] = emcount.get(line, 0) +1 bigcount = None bigword = None for word,count in emcount.items(): if bigcount == None or count > bigcount: bigcount = count bigword = word print(bigword, bigcount)Noisy dataSuppose that we have a dataset in which we have some measured attributes. Now, these attributes might carry some random error or variance. Such errors in attribute values are called as noise in the data.If such errors persist in our data, it will return inaccurate results. Data cleaningReal-world data tend to be noisy. Noisy data is data with a large amount of additional meaningless information in it called noise. Data cleaning (or data cleansing) routines attempt to smooth out noise while identifying outliers in the data. There are three data smoothing techniques as follows:1. Binning : Binning methods smooth a sorted data value by consulting its “neighborhood”, that is, the values around it.2. Regression : It conforms data values to a function. Linear regression involves finding the “best” line to fit two attributes (or variables) so that one attribute can be used to predict the other.3. Outlier analysis : Outliers may be detected by clustering, for example, where similar values are organized into groups, or “clusters”. Intuitively, values that fall outside of the set of clusters may be considered as outliers. 1. Binning Types of smoothing using binning:Smoothing by bin means : In smoothing by bin means, each value in a bin is replaced by the mean value of the bin.Smoothing by bin median : In this method each bin value is replaced by its bin median value.Smoothing by bin boundary : In smoothing by bin boundaries, the minimum and maximum values in a given bin are identified as the bin boundaries. Each bin value is then replaced by the closest boundary value. Bin = [ 2, 6, 7, 9, 13, 20, 21, 25, 30 ]Partition using equal frequency approach:Bin 1 : 2, 6, 7Bin 2 : 9, 13, 20Bin 3 : 21, 24, 30Smoothing by bin mean :Bin 1 : 5, 5, 5Bin 2 : 14, 14, 14Bin 3 : 25, 25, 25Smoothing by bin median :Bin 1 : 6,6,6Bin 2 : 13,13,13Bin 3 : 24,24,24Smoothing by bin boundaries :Boundary_bins = [0,7,14,21,30]Bin = [ 2, 6, 7, 9, 13, 20, 21, 25, 30 ]New_bin = [ (0,7] , (0,7] , (0,7] , (7,14], (7,14], (14,21], (14,21], (25,30] , (25,30] ]import pandas as pd import matplotlib.pyplot as plt ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32] bound_bins = [18, 25, 35, 60, 100] categories = ['18_25','25_35' , '35_60' , '60_100' ] cats = pd.cut(ages, bound_bins) cats cats[6] y = pd.value_counts(cats) y plt.bar(categories,y) plt.show() cut_labels = ['young', 'adult', 'old', 'very_old'] cut_bins = [18, 25, 35, 60, 100] cats = pd.cut(ages, bins=cut_bins, labels=cut_labels) cats y = pd.value_counts(cats) y plt.bar(cut_labels,y,width= 0.7) plt.show() cats2 = pd.qcut(ages, q = 4) cats2 y = pd.value_counts(cats2) yThis notebook tests the implementation of Gaussian Process with and without Derivative ObservationsFirst we test Gaussian Processes in **2D**import numpy as np from utils.gaussian_process import GaussianProcess as GP from utils.kernel import RBFKernel import matplotlib.pyplot as plt import random random.seed(2020) %matplotlib inline %matplotlib notebook kernel_test = RBFKernel(alpha=1, gamma=2) def y(x): return np.sin(3*x) def dy(x): return np.cos(x) X = np.linspace(-2, 2, 5) X_x = np.linspace(-2.5,2.5,100).reshape(-1, 1) y_t = y(X) dy_t = np.array([-0, 4, 3, 0, 2]) Y = y_t Y_do = np.hstack((y_t, dy_t)) Y = Y.reshape(-1, 1) X = X.reshape(-1, 1) gp = GP(kernel=kernel_test, derivative_observations=False, alpha=0.0, restarts=0) gp_do = GP(kernel=kernel_test, derivative_observations=True, alpha=0.0, restarts=0) gp.fit(X, Y, sample_ratio=1.0) gp_do.fit(X, Y_do, sample_ratio=1.0) %matplotlib inline # samples = gp.sample(X_x, n_samples=5) plt.figure(figsize=(7,5)) deltax = 0.1 plt.plot(X, Y[:5], 'ro') plt.plot(X_x, samples[0], color=[0.6, 0.6, 0.6, 0.6]) plt.plot(X_x, gp.predict(X_x), 'k--', linewidth=2) # plt.title("Gaussian Process without Derivative Observations noise-free setting") %matplotlib inline samples = gp_do.sample(X_x, n_samples=5) print(type(samples)) plt.figure(figsize=(10,5)) deltax = 0.1 for i in range(5): g = Y_do[5+i] f = Y_do[i] x1 = X[i]-deltax x2 = X[i]+deltax y1 = f - g*deltax y2 = f + g*deltax plt.plot([x1,x2],[y1,y2],'b',linewidth=4) plt.plot(X, Y_do[:5], 'ro') plt.plot(X_x, samples[0], color=[0.6, 0.6, 0.6, 0.6]) plt.plot(X_x, gp_do.predict(X_x), 'k--', linewidth=2) plt.title("Gaussian Process with Derivative Observations noise-free setting")Now testing for points in 3Dkernel_test = RBFKernel() Xx, Yy = np.mgrid[-2.5:2.5:0.25, -2.5:2.5:0.25] positions = np.vstack([Xx.ravel(), Yy.ravel()]).T def plane_equation(y, x, i, positions): return y[i] + y[i+5]*(positions[:,0] - x[i][0]) + y[i+10]*(positions[:,1] - x[i][1]) X = positions[[78, 127, 247, 323, 378]] K = kernel_test(X) X_x = np.linspace(-2.5,2.5,900).reshape(-1, 2) y_t = np.random.multivariate_normal(np.zeros((5)),K) dy_t1 = np.zeros_like(X).reshape(-1) Y = np.hstack((y_t, dy_t1)) Y[-9] = 2 Y[-3] = 5 Y[-1] = -1 Y[-6] = 1 Y[-5] = -3 Y[-8] = 2 Y = Y.reshape(-1, 1) X = X.reshape(-1, 2) gp = GP(kernel=kernel_test, derivative_observations=True) gp.fit(X, Y, sample_ratio=1) %matplotlib notebook import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(10, 7)) ax = fig.add_subplot(111, projection='3d') ax.scatter(X[:, 0], X[:, 1], Y[:5], c='r') Z = gp.predict(positions) for i in range(5): Xxx, Yyy = np.mgrid[X[i][0]-0.25:X[i][0]+0.25:0.05, X[i][1]-0.25:X[i][1]+0.25:0.05] ps = np.vstack([Xxx.ravel(), Yyy.ravel()]).T p = plane_equation(Y, X, i, ps) ax.plot_trisurf(ps[:, 0], ps[:, 1], p, alpha=0.7) ax.plot_trisurf(positions[:, 0], positions[:, 1], Z.reshape(-1), cmap='RdYlGn', alpha=0.4) ax.view_init(0, 20) ax.set_xlabel('$x$')Constructing plugins from TimeSeriesMany times we encounter event lists or sets of spectral histograms from which we would like to derive a single or set of plugins. For this purpose, we provide the **TimeSeriesBuilder** which provides a unified interface to time series data. Here we will demonstrate how to construct plugins from different data types. Constructing time series objects from different data typesThe **TimeSeriesBuilder** currently supports reading of the following data type:* A generic PHAII data file* GBM TTE/CSPEC/CTIME files* LAT LLE filesIf you would like to build a time series from your own custom data, consider creating a TimeSeriesBuilder.from_your_data() class method. GBM Data Building plugins from GBM is achieved in the following fashioncspec_file = get_path_of_data_file('datasets/glg_cspec_n3_bn080916009_v01.pha') tte_file = get_path_of_data_file('datasets/glg_tte_n3_bn080916009_v01.fit.gz') gbm_rsp = get_path_of_data_file('datasets/glg_cspec_n3_bn080916009_v00.rsp2') gbm_cspec = TimeSeriesBuilder.from_gbm_cspec_or_ctime('nai3_cspec', cspec_or_ctime_file=cspec_file, rsp_file=gbm_rsp) gbm_tte = TimeSeriesBuilder.from_gbm_tte('nai3_tte', tte_file=tte_file, rsp_file=gbm_rsp)LAT LLE dataLAT LLE data is constructed in a similar fashionlle_file = get_path_of_data_file('datasets/gll_lle_bn080916009_v10.fit') ft2_file = get_path_of_data_file('datasets/gll_pt_bn080916009_v10.fit') lle_rsp = get_path_of_data_file('datasets/gll_cspec_bn080916009_v10.rsp') lat_lle = TimeSeriesBuilder.from_lat_lle('lat_lle', lle_file=lle_file, ft2_file=ft2_file, rsp_file=lle_rsp)Viewing Lightcurves and selecting source intervalsAll time series objects share the same commands to get you to a plugin. Let's have a look at the GBM TTE lightcurve.threeML_config['lightcurve']['lightcurve color'] = '#07AE44' fig = gbm_tte.view_lightcurve(start=-20,stop=200)Perhaps we want to fit the time interval from 0-10 seconds. We make a selection like this:threeML_config['lightcurve']['selection color'] = '#4C3CB7' gbm_tte.set_active_time_interval('0-10') fig = gbm_tte.view_lightcurve(start=-20,stop=200);For event list style data like time tagged events, the selection is *exact*. However, pre-binned data in the form of e.g. PHAII files will have the selection automatically adjusted to the underlying temporal bins.Several discontinuous time selections can be made. Fitting a polynomial backgroundIn order to get to a plugin, we need to model and create an estimated background in each channel ($B_i$) for our interval of interest. The process that we have implemented is to fit temporal off-source regions to polynomials ($P(t;\vec{\theta})$) in time. First, a polynomial is fit to the total count rate. From this fit we determine the best polynomial order via a likelihood ratio test, unless the user supplies a polynomial order in the constructor or directly via the polynomial_order attribute. Then, this order of polynomial is fit to every channel in the data.From the polynomial fit, the polynomial is integrated in time over the active source interval to estimate the count rate in each channel. The estimated background and background errors then stored for each channel.$$ B_i = \int_{T_1}^{T_2}P(t;\vec{\theta}) {\rm d}t $$threeML_config['lightcurve']['background color'] = '#FC2530' gbm_tte.set_background_interval('-24--5','100-200') gbm_tte.view_lightcurve(start=-20,stop=200);Auto-determined polynomial order: 0For event list data, binned or unbinned background fits are possible. For pre-binned data, only a binned fit is possible.gbm_tte.set_background_interval('-24--5','100-200',unbinned=False)Auto-determined polynomial order: 4Saving the background fitThe background polynomial coefficients can be saved to disk for faster manipulation of time series data.gbm_tte.save_background('background_store',overwrite=True) gbm_tte_reloaded = TimeSeriesBuilder.from_gbm_tte('nai3_tte', tte_file=tte_file, rsp_file=gbm_rsp, restore_background='background_store.h5') fig = gbm_tte_reloaded.view_lightcurve(-10,200)Creating a pluginWith our background selections made, we can now create a plugin instance. In the case of GBM data, this results in a **DispersionSpectrumLike**plugin. Please refer to the Plugins documentation for more details.gbm_plugin = gbm_tte.to_spectrumlike() gbm_plugin.display()Time-resolved binning and plugin creationIt is possible to temporally bin time series. There are up to four methods provided depending on the type of time series being used:* Constant cadence (all time series)* Custom (all time series)* Significance (all time series)* Bayesian Blocks (event lists) Constant CadenceConstant cadence bins are defined by a start and a stop time along with a time delta.gbm_tte.create_time_bins(start=0, stop=10, method='constant', dt=2.) gbm_tte.bins.display()CustomCustom time bins can be created by providing a contiguous list of start and stop times.time_edges = np.array([.5,.63,20.,21.]) starts = time_edges[:-1] stops = time_edges[1:] gbm_tte.create_time_bins(start=starts, stop=stops, method='custom') gbm_tte.bins.display()SignificanceTime bins can be created by specifying a significance of signal to background if a background fit has been performed.gbm_tte.create_time_bins(start=0., stop=50., method='significance', sigma=25) gbm_tte.bins.display()Bayesian BlocksThe Bayesian Blocks algorithm (Scargle et al. 2013) can be used to bin event list by looking for significant changes in the rate.gbm_tte.create_time_bins(start=0., stop=50., method='bayesblocks', p0=.01, use_background=True) gbm_tte.bins.display()Working with binsThe light curve can be displayed by supplying the use_binner option to display the time binningfig = gbm_tte.view_lightcurve(use_binner=True)The bins can all be writted to a PHAII file for analysis via OGIPLike.gbm_tte.write_pha_from_binner(file_name='out', overwrite=True, force_rsp_write = False) # if you need to write the RSP to a file. We try to choose the best option for you.Similarly, we can create a list of plugins directly from the time series.my_plugins = gbm_tte.to_spectrumlike(from_bins=True)Deep Learning for Sentiment Analysis[![ForTheBadge built-with-love](http://ForTheBadge.com/images/badges/built-with-love.svg)](https://github.com/NiklasHoltmeyer/sentiment-analysis) [![Made withJupyter](https://img.shields.io/badge/Made%20with-Jupyter-orange?style=for-the-badge&logo=Jupyter)](https://jupyter.org/try) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Github](https://img.shields.io/badge/Git-Hub-green.svg)](https://github.com/NiklasHoltmeyer/sentiment-analysis)! cd /content/ ! rm -rf /content/scripts /content/training ! git clone https://github.com/NiklasHoltmeyer/sentiment-analysis ! mv sentiment-analysis/* $PWD ! rm -r sentiment-analysis/ %mkdir -p /content/training/ %cd /content/training/InstallationThe following script contains the (Python) requirements and training data.```bashbash /content/scripts/install_prerequisites.sh```!sh /content/scripts/install_small.sh #install_prerequisites.sh Containts Dependencies for Everything, Small Excludes GloVe %cd /content/DeepSentiment/ %cat requirements.txt | xargs -n 1 pip install #pip install -r file.txt might fail %pip install --force-reinstall contractions emoji %pip install --force -e . %cd /content/DeepSentiment/ import DeepSentiment %cd /content/training/ %pip install --force-reinstall contractions emojiMount Gdrive in case you want to later export the results.#from google.colab import drive #drive.mount('/gdrive/')Showing possible models to train for Sentiment Analysis#!python /content/training/train.py --helpTrain Examplefrom DeepSentiment.Networks.Tensorflow.Model import Model as TFModel import logging logging.basicConfig( level=logging.DEBUG, format= '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', #%(asctime)s - %(levelname)s: %(message)s ) logger = logging.getLogger("sentiment") logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.getLogger("nltk_data").setLevel(logging.WARNING) model, history = TFModel().trainModel(CNN_LAYER = True, #self-trained word2vec embedding layer POOLING_LAYER = True, BiLSTM_Layer = True, logger = logger) #model, history = TFModel().trainModel(GLOVE = True, # CNN_LAYER = True # POOLING_LAYER = True # GRU_LAYER = True # BiLSTM_Layer = True # LSTM_Layer = True # DENSE_LAYER = True, # logger = logger)Predictionfrom DeepSentiment.Preprocessing.CleanText import CleanText sample_text = ('The movie was not good. The animation and the graphics ' 'were terrible. I would not recommend this movie.') sample_text_cleaned = CleanText().cleanText(sample_text) model.predict([sample_text])from tensorflow.python.keras.datasets import imdb ((Xtrain,Ytrain),(Xtest,Ytest)) = imdb.load_data(num_words=10000) print('Dataset Loaded!') Xtrain.shape len(Xtrain) len(Xtest) Xtrain[0] len(Xtrain[0]) sh word_idx = imdb.get_word_index() idx_word = dict([value,key] for (key,value) in word_idx.items()) actual_review = ' '.join([idx_word.get(idx-3,'?') for idx in Xtrain[0]]) print(actual_review) print(len(actual_review.split())) word_idx idx_word from tensorflow.python.keras.preprocessing import sequence X_train = sequence.pad_sequences(Xtrain,maxlen=500) X_test = sequence.pad_sequences(Xtest,maxlen=500) print("Padding Completed!") print(X_train.shape) print(X_test.shape) print(X_train[0]) from tensorflow.python.keras.layers import Embedding,SimpleRNN,Dense from tensorflow.python.keras.models import Sequential model = Sequential() model.add(Embedding(10000,64)) model.add(SimpleRNN(32)) model.add(Dense(1,activation='sigmoid')) print(model.summary()) model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc']) print("Model Compiled Successfully!") from tensorflow.python.keras.callbacks import ModelCheckpoint from tensorflow.python.keras.callbacks import EarlyStopping checkpoint = ModelCheckpoint("best_model.h5", monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False) earlystop = EarlyStopping(monitor='val_acc',patience=1) print("Callbacks Created Successfully!") hist = model.fit(X_train,Ytrain,validation_split=0.2,epochs=10,batch_size=128,callbacks=[checkpoint,earlystop]) import matplotlib.pyplot as plt acc = hist.history['acc'] val_acc = hist.history['val_acc'] epochs = range(1,len(loss)+1) plt.title("Accuracy vs Epochs") plt.plot(epochs,acc,label="Training Acc") plt.plot(epochs,val_acc,label="Val Acc") plt.legend() plt.show() import matplotlib.pyplot as plt loss = hist.history['loss'] val_loss = hist.history['val_loss'] epochs = range(1,len(loss)+1) plt.title("Loss vs Epochs") plt.plot(epochs,loss,label="Training Loss") plt.plot(epochs,val_loss,label="Val Loss") plt.legend() plt.show() model.evaluate(X_test,Ytest)782/782 [==============================] - 24s 31ms/step - loss: 0.4088 - acc: 0.8339Quiz-3There are three tasks.You'll see the tasks enclosed as follows.---> YOUR TASK n <---...task n ... ---> YOUR TASK n ENDS HERE<---Goals:* Design a DFA that we shall specify in Section 2 (begin XOR end with 01)* Design a DFA we shall specify in Section 3 (for numbers, MSB first, equal to 0 mod 5, similar to Sec 5.2.3 from book)* Practice some Pumping Lemma problems in Section 4. Answer the questions there YOUR TASKSYour tasks will be denoted by "---> YOUR TASK n <---" belowimport sys sys.path[0:0] = ['../..','../../3rdparty'] # Put these at the head of the search path from jove.DotBashers import * from jove.Def_md2mc import * from jove.Def_DFA import * from jove.LangDef import * from jove.Def_RE2NFA import * from jove.Def_NFA import *Design a DFA for strings over 0,1 that begin XOR end with 01* If it begins with 01, it can't end with 01* If it does not begin with 01, it must end with 01 This is the main design the students will work on!! ---> YOUR TASK 1 is below <---Db01XORe01 = md2mc(''' !! !!- The overall algorithm is to case-analyze on whether we began with a 01 or not. !!- Please see the state names assigned. Once you understand how the state names were designed, !!- the transitions should make sense. !! DFA !! This DFA chooses meaningful state names and records the last bit seen !!--- The DFA has to be designed by you !!--- I'll just tell you the state names I ended up inventing, and my scheme for naming the states !!--- without giving such state names, I could not have solved this problem! !!--- In other words, the ENTIRE solution depended on my keeping a clear sense of state names !!--- and also remembering one bit seen last. !!--- The state names I chose --- S_0 !! No acceptance upon seeing a 0; record in state name S0 MNE_1 !! MNE means "must not end in 01." The _1 remembers the last bit seen NB_1 !! NB means "not beginning with 01." The _1 remembers the last bit seen NB_0 !! Not beginning with 01. Also 0 is the last bit seen FNB_1 !! FNB means a final state for the case not beginning with 01. Also '1' seen last FMNE_0 !! FMNE means a final state and "must not end in 01". Also 0 bit seen last FMNE_0 !! Since we are seeing a 00, we are not ending in 01, so the F status is kept ''') dotObj_dfa(Db01XORe01, FuseEdges = True) Sigma={'0','1'} for i in range(1,120): w = nthnumeric(i, Sigma) if accepts_dfa(Db01XORe01, w): print("DFA Db01XORe01 accepts ", w) print("DFA Db01XORe01 rejects all other w in the test set")---> YOUR TASK 1 ENDS HERE <--- The part below will be retained as such. The TAs will check Presto-1 and Presto-2They expect empty DFA. Then the student design is correct! Else there is a mistake somewhere. Testing out the above machine is not easy; we use REs for thatWe will show the power of regular expressions to test out the above machine. You will simply be doing the tests below and ending up with empty DFAs at "Presto-1" and "Presto-2". The TAs will grade wrt those Prestos.There is no other way to exhaustively test out the DFA! We first complement the above machine and make senseThe complement of the above machine must be a DFA that begins with a 01 exactly when it ends with a 01. See if so.# Its complement must be a machine that begins with 01 exactly when it ends with 01 :-) # This can be read out and confirmed! Db01XNORe01 = comp_dfa(Db01XORe01) dotObj_dfa(Db01XNORe01, FuseEdges = True)Check the complementIf the complement looks like it is doing its job, you can let out a mini Presto. But we will do more tests! Obtain an RE for begins with 01 AND ends with 01# This RE "01(''+(0+1)*01)" captures begin with 01 AND ends with 01 Db01ANDe01 = min_dfa(nfa2dfa(re2nfa("01(''+(0+1)*01)"))) dotObj_dfa(Db01ANDe01)Obtain DB01XNORe01 minus Db01ANDe01 Now the DFA must neither begin with 01 nor end with 01. Check.We can let out a mini Presto if so. It indeed is so!# We now need to perform DbXNORe01 - DB01ANDE01 to get a DFA which neither begins nor ends with 01 Dnb01ANDne01 = intersect_dfa(Db01XNORe01, comp_dfa(Db01ANDe01)) dotObj_dfa(Dnb01ANDne01)This is the RE for "begins with 01". Again fool-proof to obtain.# Now Dnb01ANDne01 must neither begin with 01 nor end with 01 # We can intersect with DFAs that begin with 01 and then DFA that ends with 01 and prove they are empty Db01 = min_dfa(nfa2dfa(re2nfa("01(0+1)*"))) dotObj_dfa(Db01)This is the RE for "ends with 01". Again fool-proof to obtain.De01 = min_dfa(nfa2dfa(re2nfa("(0+1)*01"))) dotObj_dfa(De01)Presto-1 : If the following DFA is empty, it DOES NOT begin with 01The student is likely right! Check Presto-2 also.dotObj_dfa(min_dfa(intersect_dfa(Db01, Dnb01ANDne01)), FuseEdges=True)Presto-2: If the following DFA is empty, it DOES NOT end with 01If this check also passes, the student is right !!dotObj_dfa(min_dfa(intersect_dfa(De01, Dnb01ANDne01)), FuseEdges=True)Since Presto-1 and Presto-2 worked out, we are done !! Design a DFA for Numbers arriving MSB-first, equal to 0 modulo 5Similar to the machine in Section 5.2.3 but with "5" not "3" ---> YOUR TASK 2 <---DmsbMod5 = md2mc(''' DFA ''') dotObj_dfa(DmsbMod5, FuseEdges=True) Sigma={'0','1'} for i in range(1,120): w = nthnumeric(i, Sigma) if accepts_dfa(DmsbMod5, w): print("DFA DmsbMod5 accepts ", w, " having value ", int(w, 2)) # Printout below must be for numbers modulo 5 = 0MNIST digits classification with TensorFlowimport numpy as np from sklearn.metrics import accuracy_score from matplotlib import pyplot as plt %matplotlib inline import tensorflow as tf print("We're using TF", tf.__version__) import sys sys.path.append("../..") from collections import defaultdict import numpy as np from keras.models import save_model import tensorflow as tf import keras from keras import backend as K from matplotlib import pyplot as plt from IPython.display import clear_output, display_html, HTML import contextlib import time import io import urllib import base64 # import grading # import matplotlib_utils # from importlib import reload # reload(matplotlib_utils) # # import grading_utils # # reload(grading_utils) # import keras_utils # from keras_utils import reset_tf_sessionWe're using TF 1.15.2Fill in your Coursera token and emailTo successfully submit your answers to our grader, please fill in your Coursera submission token and emailgrader = grading.Grader(assignment_key="", all_parts=["9XaAS", "vmogZ", "RMv95", "i8bgs", "rE763"]) # token expires every 30 min COURSERA_TOKEN = "### YOUR TOKEN HERE ###" COURSERA_EMAIL = "### YOUR EMAIL HERE ###"Look at the dataIn this task we have 50000 28x28 images of digits from 0 to 9.We will train a classifier on this data.import keras def load_dataset(flatten=False): (X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data() # normalize x X_train = X_train.astype(float) / 255. X_test = X_test.astype(float) / 255. # we reserve the last 10000 training examples for validation X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] if flatten: X_train = X_train.reshape([X_train.shape[0], -1]) X_val = X_val.reshape([X_val.shape[0], -1]) X_test = X_test.reshape([X_test.shape[0], -1]) return X_train, y_train, X_val, y_val, X_test, y_test # import preprocessed_mnist X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() # X contains rgb values divided by 255 print("X_train [shape %s] sample patch:\n" % (str(X_train.shape)), X_train[1, 15:20, 5:10]) print("A closeup of a sample patch:") plt.imshow(X_train[1, 15:20, 5:10], cmap="Greys") plt.show() print("And the whole sample:") plt.imshow(X_train[1], cmap="Greys") plt.show() print("y_train [shape %s] 10 samples:\n" % (str(y_train.shape)), y_train[:10])X_train [shape (50000, 28, 28)] sample patch: [[0. 0.29803922 0.96470588 0.98823529 0.43921569] [0. 0.33333333 0.98823529 0.90196078 0.09803922] [0. 0.33333333 0.98823529 0.8745098 0. ] [0. 0.33333333 0.98823529 0.56862745 0. ] [0. 0.3372549 0.99215686 0.88235294 0. ]] A closeup of a sample patch:Linear modelYour task is to train a linear classifier $\vec{x} \rightarrow y$ with SGD using TensorFlow.You will need to calculate a logit (a linear transformation) $z_k$ for each class: $$z_k = \vec{x} \cdot \vec{w_k} + b_k \quad k = 0..9$$And transform logits $z_k$ to valid probabilities $p_k$ with softmax: $$p_k = \frac{e^{z_k}}{\sum_{i=0}^{9}{e^{z_i}}} \quad k = 0..9$$We will use a cross-entropy loss to train our multi-class classifier:$$\text{cross-entropy}(y, p) = -\sum_{k=0}^{9}{\log(p_k)[y = k]}$$ where $$[x]=\begin{cases} 1, \quad \text{if $x$ is true} \\ 0, \quad \text{otherwise} \end{cases}$$Cross-entropy minimization pushes $p_k$ close to 1 when $y = k$, which is what we want.Here's the plan:* Flatten the images (28x28 -> 784) with `X_train.reshape((X_train.shape[0], -1))` to simplify our linear model implementation* Use a matrix placeholder for flattened `X_train`* Convert `y_train` to one-hot encoded vectors that are needed for cross-entropy* Use a shared variable `W` for all weights (a column $\vec{w_k}$ per class) and `b` for all biases.* Aim for ~0.93 validation accuracyX_train_flat = X_train.reshape((X_train.shape[0], -1)) print(X_train_flat.shape) X_val_flat = X_val.reshape((X_val.shape[0], -1)) print(X_val_flat.shape) import keras y_train_oh = keras.utils.to_categorical(y_train, 10) y_val_oh = keras.utils.to_categorical(y_val, 10) print(y_train_oh.shape) print(y_train_oh[:3], y_train[:3]) def reset_tf_session(): curr_session = tf.get_default_session() # close current session if curr_session is not None: curr_session.close() # reset graph K.clear_session() # create new session config = tf.ConfigProto() config.gpu_options.allow_growth = True s = tf.InteractiveSession(config=config) K.set_session(s) return s # run this again if you remake your graph s = reset_tf_session() # Model parameters: W and b W = tf.get_variable("W",shape=(784,10)) ### tf.get_variable(...) with shape[0] = 784 b = tf.get_variable("b",shape=(10,)) ### tf.get_variable(...) # Placeholders for the input data input_X = tf.placeholder(tf.float32,shape=(None,784)) ### tf.placeholder(...) for flat X with shape[0] = None for any batch size input_y = tf.placeholder(tf.int32,shape=(None,10)) ### tf.placeholder(...) for one-hot encoded true labels # Compute predictions logits = input_X@W ### logits for input_X, resulting shape should be [input_X.shape[0], 10] probas =tf.nn.softmax(logits) ### YOUR CODE HERE ### apply tf.nn.softmax to logits classes = tf.argmax(probas,axis=1) ### YOUR CODE HERE ### apply tf.argmax to find a class index with highest probability # Loss should be a scalar number: average loss over all the objects with tf.reduce_mean(). # Use tf.nn.softmax_cross_entropy_with_logits on top of one-hot encoded input_y and logits. # It is identical to calculating cross-entropy on top of probas, but is more numerically friendly (read the docs). loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=input_y,logits=logits))### YOUR CODE HERE ### cross-entropy loss # Use a default tf.train.AdamOptimizer to get an SGD step step = tf.train.AdamOptimizer().minimize(loss) ### optimizer step that minimizes the loss #!/usr/bin/env python # -*- coding: utf-8 -*- def clear_and_display_figure(fig, sleep=0.01): img_data = io.BytesIO() fig.savefig(img_data, format='jpeg') img_data.seek(0) uri = 'data:image/jpeg;base64,' + urllib.request.quote(base64.b64encode(img_data.getbuffer())) img_data.close() clear_output(wait=True) display_html(HTML('')) time.sleep(sleep) class SimpleMovieWriter(object): """ Usage example: anim = animation.FuncAnimation(...) anim.save(None, writer=SimpleMovieWriter(sleep=0.01)) """ def __init__(self, sleep=0.1): self.sleep = sleep def setup(self, fig): self.fig = fig def grab_frame(self, **kwargs): clear_and_display_figure(self.fig, self.sleep) @contextlib.contextmanager def saving(self, fig, *args, **kwargs): self.setup(fig) try: yield self finally: pass class SimpleTrainingCurves(object): def __init__(self, loss_name, metric_name): self.fig, (self.ax1, self.ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) self.ax1.set_title(loss_name) self.ax2.set_title(metric_name) self.train_loss_curve, = self.ax1.plot([], [], 'r', label='train', lw=2) self.valid_loss_curve, = self.ax1.plot([], [], 'g', label='valid', lw=2) self.train_metric_curve, = self.ax2.plot([], [], 'r', label='train', lw=2) self.valid_metric_curve, = self.ax2.plot([], [], 'g', label='valid', lw=2) self.iter = 0 self.y_limits_1 = [None, None] self.y_limits_2 = [None, None] plt.close(self.fig) def _update_y_limits(self, limits, *values): limits[0] = min(list(values) + ([limits[0]] if limits[0] else [])) limits[1] = max(list(values) + ([limits[1]] if limits[1] else [])) def _update_curve(self, curve, value, label): x, y = curve.get_data() curve.set_data(list(x) + [self.iter], list(y) + [value]) curve.set_label("{}: {}".format(label, value)) def _set_y_limits(self, ax, limits): spread = limits[1] - limits[0] ax.set_ylim(limits[0] - 0.05*spread, limits[1] + 0.05*spread) def add(self, train_loss, valid_loss, train_metric, valid_metric): self._update_curve(self.train_loss_curve, train_loss, "train") self._update_curve(self.valid_loss_curve, valid_loss, "valid") self._update_curve(self.train_metric_curve, train_metric, "train") self._update_curve(self.valid_metric_curve, valid_metric, "valid") self.ax1.set_xlim(0, self.iter) self.ax2.set_xlim(0, self.iter) self._update_y_limits(self.y_limits_1, train_loss, valid_loss) self._update_y_limits(self.y_limits_2, train_metric, valid_metric) self._set_y_limits(self.ax1, self.y_limits_1) self._set_y_limits(self.ax2, self.y_limits_2) clear_and_display_figure(self.fig) self.ax1.legend() self.ax2.legend() self.iter += 1 s.run(tf.global_variables_initializer()) BATCH_SIZE = 512 EPOCHS = 40 # for logging the progress right here in Jupyter (for those who don't have TensorBoard) simpleTrainingCurves =SimpleTrainingCurves("cross-entropy", "accuracy") for epoch in range(EPOCHS): # we finish an epoch when we've looked at all training samples batch_losses = [] for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE): # data is already shuffled _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]}) # collect batch losses, this is almost free as we need a forward pass for backprop anyway batch_losses.append(batch_loss) train_loss = np.mean(batch_losses) val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) # this part is usually small train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat})) # this is slow and usually skipped valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat})) simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)Submit a linear model## GRADED PART, DO NOT CHANGE! # Testing shapes grader.set_answer("9XaAS", grading_utils.get_tensors_shapes_string([W, b, input_X, input_y, logits, probas, classes])) # Validation loss grader.set_answer("vmogZ", s.run(loss, {input_X: X_val_flat, input_y: y_val_oh})) # Validation accuracy grader.set_answer("RMv95", accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)MLP with hidden layers Previously we've coded a dense layer with matrix multiplication by hand. But this is not convenient, you have to create a lot of variables and your code becomes a mess. In TensorFlow there's an easier way to make a dense layer:```pythonhidden1 = tf.layers.dense(inputs, 256, activation=tf.nn.sigmoid)```That will create all the necessary variables automatically.Here you can also choose an activation function (remember that we need it for a hidden layer!).Now define the MLP with 2 hidden layers and restart training with the cell above.You're aiming for ~0.97 validation accuracy here.# write the code here to get a new `step` operation and then run the cell with training loop above. # name your variables in the same way (e.g. logits, probas, classes, etc) for safety. ### YOUR CODE HERE ### hidden1=tf.layers.dense(input_X,1024,activation=tf.nn.sigmoid) logits=tf.layers.dense(hidden1,10) probas =tf.nn.softmax(logits) ### YOUR CODE HERE ### apply tf.nn.softmax to logits classes = tf.argmax(probas,axis=1) ### YOUR CODE HERE ### apply tf.argmax to find a class index with highest probability # Loss should be a scalar number: average loss over all the objects with tf.reduce_mean(). # Use tf.nn.softmax_cross_entropy_with_logits on top of one-hot encoded input_y and logits. # It is identical to calculating cross-entropy on top of probas, but is more numerically friendly (read the docs). loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=input_y,logits=logits))### YOUR CODE HERE ### cross-entropy loss # Use a default tf.train.AdamOptimizer to get an SGD step step = tf.train.AdamOptimizer().minimize(loss) ### optimizer step that minimizes the lossSubmit the MLP with 2 hidden layersRun these cells after training the MLP with 2 hidden layers## GRADED PART, DO NOT CHANGE! # Validation loss for MLP grader.set_answer("i8bgs", s.run(loss, {input_X: X_val_flat, input_y: y_val_oh})) # Validation accuracy for MLP grader.set_answer("rE763", accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)BEM method#Q = 2000/3 #strength of the source-sheet,stb/d h=25.26 #thickness of local gridblock,ft phi=0.2 #porosity kx=200 #pemerability in x direction,md ky=200 #pemerability in y direction,md kr=kx/ky #pemerability ratio miu=1 #viscosity,cp Nw=1 #Number of well Qwell_1=2000 #Flow rate of well 1 Boundary_V=-400 #boundary velocity ft/dayBoundary Discretizationwe will create a discretization of the body geometry into panels (line segments in 2D). A panel's attributes are: its starting point, end point and mid-point, its length and its orientation. See the following figure for the nomenclature used in the code and equations below.Figure 1. Nomenclature of the boundary element in the local coordinates Create panel and well classclass Panel: """Contains information related to a panel.""" def __init__(self, xa, ya, xb, yb): """Creates a panel. Arguments --------- xa, ya -- Cartesian coordinates of the first end-point. xb, yb -- Cartesian coordinates of the second end-point. """ self.xa, self.ya = xa, ya self.xb, self.yb = xb, yb self.xc, self.yc = (xa+xb)/2, (ya+yb)/2 # control-point (center-point) self.length = math.sqrt((xb-xa)**2+(yb-ya)**2) # length of the panel # orientation of the panel (angle between x-axis and panel) self.sinalpha=(yb-ya)/self.length self.cosalpha=(xb-xa)/self.length self.Q = 0. # source strength self.U = 0. # velocity component self.V = 0. # velocity component self.P = 0. # pressure coefficient class Well: """Contains information related to a panel.""" def __init__(self, xw, yw,rw,Q): """Creates a panel. Arguments --------- xw, yw -- Cartesian coordinates of well source. Q -- Flow rate of well source. rw -- radius of well source. """ self.xw, self.yw = xw, yw self.Q = Q # source strength self.rw = rw # velocity componentWe create a node distribution on the boundary that is refined near the corner with cosspace functiondef cosspace(st,ed,N): N=N+1 AngleInc=numpy.pi/(N-1) CurAngle = AngleInc space=numpy.linspace(0,1,N) space[0]=st for i in range(N-1): space[i+1] = 0.5*numpy.abs(ed-st)*(1 - math.cos(CurAngle)); CurAngle += AngleInc if edDiscretize boundary element along the boundary Here we implement BEM in a squre gridN=80 #Number of boundary element Nbd=20 #Number of boundary element in each boundary Dx=1. #Grid block length in X direction Dy=1. #Gird block lenght in Y direction #Create the array x_ends = numpy.linspace(0, Dx, N) # computes a 1D-array for x y_ends = numpy.linspace(0, Dy, N) # computes a 1D-array for y interval=cosspace(0,Dx,Nbd) rinterval=cosspace(Dx,0,Nbd) #interval=numpy.linspace(0,1,Nbd+1) #rinterval=numpy.linspace(1,0,Nbd+1) #Define the rectangle boundary for i in range(Nbd): x_ends[i]=0 y_ends[i]=interval[i] for i in range(Nbd): x_ends[i+Nbd]=interval[i] y_ends[i+Nbd]=Dy for i in range(Nbd): x_ends[i+Nbd*2]=Dx y_ends[i+Nbd*2]=rinterval[i] for i in range(Nbd): x_ends[i+Nbd*3]=rinterval[i] y_ends[i+Nbd*3]=0 x_ends,y_ends=numpy.append(x_ends, x_ends[0]), numpy.append(y_ends, y_ends[0]) #Define the panel panels = numpy.empty(N, dtype=object) for i in range(N): panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1]) #Define the well wells = numpy.empty(Nw, dtype=object) wells[0]=Well(Dx/2,Dy/2,0.025,Qwell_1) #for i in range(N): # print("Panel Coordinate (%s,%s) sina,cosa (%s,%s) " % (panels[i].xc,panels[i].yc,panels[i].sinalpha,panels[i].cosalpha)) #print("Well Location (%s,%s) radius: %s Flow rate:%s " % (wells[0].xw,wells[0].yw,wells[0].rw,wells[0].Q))Plot boundary elements and wells#Plot the panel %matplotlib inline val_x, val_y = 0.3, 0.3 x_min, x_max = min(panel.xa for panel in panels), max(panel.xa for panel in panels) y_min, y_max = min(panel.ya for panel in panels), max(panel.ya for panel in panels) x_start, x_end = x_min-val_x*(x_max-x_min), x_max+val_x*(x_max-x_min) y_start, y_end = y_min-val_y*(y_max-y_min), y_max+val_y*(y_max-y_min) size = 5 pyplot.figure(figsize=(size, (y_end-y_start)/(x_end-x_start)*size)) pyplot.grid(True) pyplot.xlabel('x', fontsize=16) pyplot.ylabel('y', fontsize=16) pyplot.xlim(x_start, x_end) pyplot.ylim(y_start, y_end) pyplot.plot(numpy.append([panel.xa for panel in panels], panels[0].xa), numpy.append([panel.ya for panel in panels], panels[0].ya), linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305'); pyplot.scatter(wells[0].xw,wells[0].yw,s=100,alpha=0.5) pyplot.legend(['panels', 'Wells'], loc=1, prop={'size':12})Boundary element implementationFigure 2. Representation of a local gridblock with boundary elementsGenerally, the influence of all the j panels on the i BE node can be expressed as follows:\begin{matrix}{{c}_{ij}}{{p}_{i}}+{{p}_{i}}\int_{{{s}_{j}}}{{{H}_{ij}}d{{s}_{j}}}=({{v}_{i}}\cdot \mathbf{n})\int_{{{s}_{j}}}{{{G}_{ij}}}d{{s}_{j}}\end{matrix}Where,${{c}_{ij}}$ is the free term, cased by source position.${{c}_{ij}}=\left\{ \begin{matrix} \begin{matrix} 1 & \text{source j on the internal domain} \\\end{matrix} \\ \begin{matrix} 0.5 & \text{source j on the boundary} \\\end{matrix} \\ \begin{matrix} 0 & \text{source j on the external domain} \\\end{matrix} \\\end{matrix} \right.$$\int_{{{s}_{j}}}{{{H}_{ij}}d{{s}_{j}}\text{ }}$ is the integrated effect of the boundary element source i on the resulting normal flux at BE node j. $\int_{{{s}_{j}}}{{{G}_{ij}}}d{{s}_{j}}$ is the is the integrated effect of the boundary element source i on the resulting pressure at BE node j Line segment source solution for pressure and velocity (Derived recently)The integrated effect can be formulated using line segment source solution, which givs:\begin{equation}\int_{{{s}_{j}}}{{{G}_{ij}}}d{{s}_{j}}=B{{Q}_{w}}=P({{{x}'}_{i}},{{{y}'}_{i}})=-\frac{70.60\mu }{h\sqrt{{{k}_{x}}{{k}_{y}}}}\int_{t=0}^{t={{l}_{j}}}{\ln \left\{ {{({x}'-t\cos {{\alpha }_{j}})}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{({y}'-t\sin {{\alpha }_{j}})}^{2}} \right\}dt}\cdot {{Q}_{w}}\end{equation}\begin{equation}\int_{{{s}_{j}}}{{{H}_{ij}}d{{s}_{j}}\text{ }}={{v}_{i}}(s)\cdot {{\mathbf{n}}_{i}}=-{{u}_{i}}\sin {{\alpha }_{i}}+{{v}_{i}}\cos {{\alpha }_{i}}\end{equation}Where,\begin{equation}u\left( {{{{x}'}}_{i}},{{{{y}'}}_{i}} \right)={{A}_{u}}{{Q}_{j}}=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}_{x}}}{{{k}_{y}}}}\int_{t=0}^{t={{l}_{j}}}{\frac{{{{{x}'}}_{i}}-t\cos {{\alpha }_{j}}}{{{\left( {{{{x}'}}_{i}}-t\cos {{\alpha }_{j}} \right)}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{({{{{y}'}}_{i}}-t\sin {{\alpha }_{j}})}^{2}}}dt}\cdot {{Q}_{j}}\end{equation}\begin{equation}v\left( {{{{x}'}}_{i}},{{{{y}'}}_{i}} \right)={{A}_{v}}{{Q}_{j}}=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}_{x}}}{{{k}_{y}}}}\int_{t=0}^{t={{l}_{j}}}{\frac{{{{{y}'}}_{i}}-t\sin {{\alpha }_{j}}}{{{\left( {{{{x}'}}_{i}}-t\cos {{\alpha }_{j}} \right)}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{({{{{y}'}}_{i}}-t\sin {{\alpha }_{j}})}^{2}}}dt}\cdot {{Q}_{j}}\end{equation} Line segment source Integration function (Bij and Aij)#Panel infuence factor Bij def InflueceP(x, y, panel): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ #Transfer global coordinate point(x,y) to local coordinate x=x-panel.xa y=y-panel.ya L1=panel.length #Calculate the pressure and velocity influence factor a=panel.cosalpha**2+kr*panel.sinalpha**2 b=x*panel.cosalpha+kr*panel.sinalpha*y c=y*panel.cosalpha-x*panel.sinalpha dp=70.6*miu/h/math.sqrt(kx*ky) Cp = dp/a*( ( b*math.log(x**2-2*b*L1+a*L1**2+kr*y**2) -L1*a*math.log((x-L1*panel.cosalpha)**2+kr*(y-L1*panel.sinalpha)**2) +2*math.sqrt(kr)*c*math.atan((b-a*L1)/math.sqrt(kr)/c) ) - ( b*math.log(x**2+kr*y**2) +2*math.sqrt(kr)*c*math.atan((b)/math.sqrt(kr)/c) ) ) #debug #print("a: %s b:%s c:%s " % (a,b,c)) #angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi #print("Magic angle:%s"% angle) return Cp def InflueceU(x, y, panel): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ #Transfer global coordinate point(x,y) to local coordinate x=x-panel.xa y=y-panel.ya L1=panel.length #Calculate the pressure and velocity influence factor a=panel.cosalpha**2+kr*panel.sinalpha**2 b=x*panel.cosalpha+kr*panel.sinalpha*y c=y*panel.cosalpha-x*panel.sinalpha dv=-0.4468/h/phi*math.sqrt(kx/ky) Cu = dv/a*( ( panel.cosalpha*math.log(x**2-2*b*L1+a*L1**2+kr*y**2)+ 2*math.sqrt(kr)*panel.sinalpha*math.atan((a*L1-b)/math.sqrt(kr)/c) ) - ( panel.cosalpha*math.log(x**2+kr*y**2)+2*math.sqrt(kr)*panel.sinalpha*math.atan((-b)/math.sqrt(kr)/c) ) ) #print("a: %s b:%s c:%s " % (a,b,c)) #angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi #print("Magic angle:%s"% angle) return Cu def InflueceV(x, y, panel): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ #Transfer global coordinate point(x,y) to local coordinate x=x-panel.xa y=y-panel.ya L1=panel.length #Calculate the pressure and velocity influence factor a=panel.cosalpha**2+kr*panel.sinalpha**2 b=x*panel.cosalpha+kr*panel.sinalpha*y c=y*panel.cosalpha-x*panel.sinalpha dv=-0.4468/h/phi*math.sqrt(kx/ky) Cv = dv/a*( ( panel.sinalpha*math.log(x**2-2*b*L1+a*L1**2+kr*y**2)+ 2*math.sqrt(1/kr)*panel.cosalpha*math.atan((b-a*L1)/math.sqrt(kr)/c) ) - ( panel.sinalpha*math.log(x**2+kr*y**2)+2*math.sqrt(1/kr)*panel.cosalpha*math.atan((b)/math.sqrt(kr)/c) ) ) #print("a: %s b:%s c:%s " % (a,b,c)) #angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi #print("Magic angle:%s"% angle) return CvWell source function Line source solution for pressure and velocity (Datta-Gupta, 2007)\begin{equation}P(x,y)=B{{Q}_{w}}=-\frac{70.60\mu }{h\sqrt{{{k}_{x}}{{k}_{y}}}}\ln \left\{ {{(x-{{x}_{w}})}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{(y-{{y}_{w}})}^{2}} \right\}{{Q}_{w}}+{{P}_{avg}}\end{equation}\begin{equation}\frac{\partial P}{\partial x}=u=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}_{x}}}{{{k}_{y}}}}\sum\limits_{k=1}^{{{N}_{w}}}{{{Q}_{k}}}\frac{x-{{x}_{k}}}{{{\left( x-{{x}_{k}} \right)}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{(y-{{y}_{k}})}^{2}}}\end{equation}\begin{equation}\frac{\partial P}{\partial y}=v=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}_{x}}}{{{k}_{y}}}}\sum\limits_{k=1}^{{{N}_{w}}}{{{Q}_{k}}}\frac{y-{{y}_{k}}}{{{\left( x-{{x}_{k}} \right)}^{2}}+\frac{{{k}_{x}}}{{{k}_{y}}}{{(y-{{y}_{k}})}^{2}}}\end{equation}#Well influence factor def InflueceP_W(x, y, well): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ dp=-70.6*miu/h/math.sqrt(kx*ky) Cp=dp*math.log((x-well.xw)**2+kr*(y-well.yw)**2) return Cp def InflueceU_W(x, y, well): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ dv=0.8936/h/phi*math.sqrt(kx/ky) Cu=dv*(x-well.xw)/((x-well.xw)**2+kr*(y-well.yw)**2) return Cu def InflueceV_W(x, y, well): """Evaluates the contribution of a panel at one point. Arguments --------- x, y -- Cartesian coordinates of the point. panel -- panel which contribution is evaluated. Returns ------- Integral over the panel of the influence at one point. """ dv=0.8936/h/phi*math.sqrt(kx/ky) Cv=dv*(y-well.yw)/((x-well.xw)**2+kr*(y-well.yw)**2) return Cv #InflueceV(0.5,1,panels[3]) #InflueceP(0,0.5,panels[0]) #InflueceU(0,0.5,panels[0])BEM function solutionGenerally, the influence of all the j panels on the i BE node can be expressed as follows:\begin{matrix}{{c}_{ij}}{{p}_{i}}+{{p}_{i}}\int_{{{s}_{j}}}{{{H}_{ij}}d{{s}_{j}}}=({{v}_{i}}\cdot \mathbf{n})\int_{{{s}_{j}}}{{{G}_{ij}}}d{{s}_{j}}\end{matrix}Applying boundary condition along the boundary on above equation, a linear systsem can be constructed as follows:\begin{matrix}\left[ {{{{H}'}}_{ij}} \right]\left[ {{P}_{i}} \right]=\left[ {{G}_{ij}} \right]\left[ {{v}_{i}}\cdot \mathbf{n} \right]\end{matrix}!!!!!MY IMPLEMENTATION MAY HAS SOME PROBLEM HERE!!!!!!All the integration solution can be evaluated except on itself. Where,$\left[ {{{{H}'}}_{ij}} \right]=\left\{ \begin{matrix} \begin{matrix} {{H}_{ij}} & i\ne j \\\end{matrix} \\ \begin{matrix} {{H}_{ij}}+\frac{1}{2} & i=j \\\end{matrix} \\\end{matrix} \right.$Figure 3. Representation of coordinate systems and the principle of superstition with well source and boundary element source As shown in Fig.3, the pressure and velocity at any point i in the local gridblock can be determined using Eqs. below. Applying principle of superposition for each BE node along the boundary (Fig. 3), boundary condition can be written as follows:\begin{matrix} {{P}_{i}}(s)=\sum\limits_{j=1}^{M}{{{B}_{ij}}{{Q}_{j}}} & \text{constant pressure boundary} \\\end{matrix}\begin{matrix} {{v}_{i}}(s)\cdot {{\mathbf{n}}_{i}}=\sum\limits_{j=1}^{M}{{{A}_{ij}}{{Q}_{j}}} & \text{constant flux boundary} \\\end{matrix}The Pi and v ·n are the konwn boundary codition. The flow rate(strength) of boundary elements in Hij and Gij are the only unknown terms. So we could rearrange the matrix above as linear system:${{\left[ \begin{matrix} {{A}_{ij}} \\ {{B}_{ij}} \\\end{matrix} \right]}_{N\times N}}{{\left[ \begin{matrix} {{Q}_{j}} \\ {{Q}_{j}} \\\end{matrix} \right]}_{N\times 1}}={{\left[ \begin{matrix} -{{u}_{i}}\sin {{\alpha }_{i}}+{{v}_{i}}\cos {{\alpha }_{i}} \\ {{P}_{i}} \\\end{matrix} \right]}_{N\times 1}}$def build_matrix(panels): """Builds the source matrix. Arguments --------- panels -- array of panels. Returns ------- A -- NxN matrix (N is the number of panels). """ N = len(panels) A = numpy.empty((N, N), dtype=float) #numpy.fill_diagonal(A, 0.5) for i, p_i in enumerate(panels): #target nodes for j, p_j in enumerate(panels): #BE source #if i != j: ###Matrix construction if i>=0 and i=3*Nbd and i<4*Nbd: A[i,j] = -p_j.sinalpha*InflueceU(p_i.xc, p_i.yc, p_j)+p_j.cosalpha*InflueceV(p_i.xc, p_i.yc, p_j) #A[i,j] = InflueceP(p_i.xc, p_i.yc, p_j) if i>=Nbd and i<2*Nbd or i>=2*Nbd and i<3*Nbd: A[i,j] = -p_j.sinalpha*InflueceU(p_i.xc, p_i.yc, p_j)+p_j.cosalpha*InflueceV(p_i.xc, p_i.yc, p_j) #A[i,j] = InflueceP(p_i.xc, p_i.yc, p_j) return A def build_rhs(panels): """Builds the RHS of the linear system. Arguments --------- panels -- array of panels. Returns ------- b -- 1D array ((N+1)x1, N is the number of panels). """ b = numpy.empty(len(panels), dtype=float) for i, panel in enumerate(panels): V_well=( -panel.sinalpha*Qwell_1*InflueceU_W(panel.xc, panel.yc, wells[0])+panel.cosalpha*Qwell_1*InflueceV_W(panel.xc, panel.yc, wells[0]) ) if i>=0 and i=Nbd and i<2*Nbd: b[i]=-V_well #b[i]=-42 if i>=2*Nbd and i<3*Nbd: b[i]=-V_well #b[i]=-42 if i>=3*Nbd and i<4*Nbd: b[i]=0+V_well #b[i]=84 return b #Qwell_1=300 #Flow rate of well 1 #Boundary_V=-227 #boundary velocity ft/day A = build_matrix(panels) # computes the singularity matrix b = build_rhs(panels) # computes the freestream RHS # solves the linear system Q = numpy.linalg.solve(A, b) for i, panel in enumerate(panels): panel.Q = Q[i]Plot results#Visulize the pressure and velocity field #Define meshgrid Nx, Ny = 50, 50 # number of points in the x and y directions x_start, x_end = -0.01, 1.01 # x-direction boundaries y_start, y_end = -0.01, 1.01 # y-direction boundaries x = numpy.linspace(x_start, x_end, Nx) # computes a 1D-array for x y = numpy.linspace(y_start, y_end, Ny) # computes a 1D-array for y X, Y = numpy.meshgrid(x, y) # generates a mesh grid #Calculate the velocity and pressure field p = numpy.empty((Nx, Ny), dtype=float) u = numpy.empty((Nx, Ny), dtype=float) v = numpy.empty((Nx, Ny), dtype=float) #for i, panel in enumerate(panels): #panel.Q = 0. #panels[0].Q=100 #panels[5].Q=100 #Qwell_1=400 for i in range(Nx): for j in range(Ny): p[i,j] =sum([p.Q*InflueceP(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceP_W(X[i,j], Y[i,j], wells[0]) u[i,j] =sum([p.Q*InflueceU(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceU_W(X[i,j], Y[i,j], wells[0]) v[i,j] =sum([p.Q*InflueceV(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceV_W(X[i,j], Y[i,j], wells[0]) #p[i,j] =sum([p.Q*InflueceP(X[i,j], Y[i,j], p) for p in panels]) #u[i,j] =sum([p.Q*InflueceU(X[i,j], Y[i,j], p) for p in panels]) #v[i,j] =sum([p.Q*InflueceV(X[i,j], Y[i,j], p) for p in panels]) #p[i,j] =Qwell_1*InflueceP_W(X[i,j], Y[i,j], wells[0]) #u[i,j] =Qwell_1*InflueceU_W(X[i,j], Y[i,j], wells[0]) #v[i,j] =Qwell_1*InflueceV_W(X[i,j], Y[i,j], wells[0]) # plots the streamlines %matplotlib inline size = 6 pyplot.figure(figsize=(size, size)) pyplot.grid(True) pyplot.title('Streamline field') pyplot.xlabel('x', fontsize=16) pyplot.ylabel('y', fontsize=16) pyplot.xlim(-0.2, 1.2) pyplot.ylim(-0.2, 1.2) pyplot.plot(numpy.append([panel.xa for panel in panels], panels[0].xa), numpy.append([panel.ya for panel in panels], panels[0].ya), linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305'); stream =pyplot.streamplot(X, Y, u, v,density=2, linewidth=1, arrowsize=1, arrowstyle='->') #streamline #cbar=pyplot.colorbar(orientation='vertical') #equipotential=pyplot.contourf(X, Y, p1, extend='both') size = 7 pyplot.figure(figsize=(size, size-1)) pyplot.title('Pressure field') pyplot.xlabel('x', fontsize=16) pyplot.ylabel('y', fontsize=16) pyplot.xlim(0, 1) pyplot.ylim(0, 1) pyplot.contour(X, Y, p, 15, linewidths=0.5, colors='k') pyplot.contourf(X, Y, p, 15, cmap='rainbow', vmax=abs(p).max(), vmin=-abs(p).max()) pyplot.colorbar() # draw colorbar size = 7 pyplot.figure(figsize=(size, size-1)) pyplot.title('Total Velocity field') pyplot.xlabel('x', fontsize=16) pyplot.ylabel('y', fontsize=16) pyplot.xlim(0, 1) pyplot.ylim(0, 1) Vtotal= numpy.sqrt(u**2+v**2) #Vtotal= numpy.abs(v) pyplot.contour(X, Y, Vtotal, 15, linewidths=0.5, colors='k') pyplot.contourf(X, Y, Vtotal, 15, cmap='rainbow') #vmax=50, vmin=0) pyplot.colorbar() # draw colorbar pyplot.title('Darcy velocity on the outflow boundary, x component (ft/day)') pyplot.xlabel('x', fontsize=16) pyplot.ylabel('y', fontsize=16) pyplot.plot(y, u[49,:], '--', linewidth=2) pyplot.plot(9.8425+y, u[:,49], '--', linewidth=2) u[:,49] pyplot.title('Darcy velocity on the outflow boundary, y component (ft/day)') pyplot.plot(y, v[:,49], '--', linewidth=2) pyplot.plot(9.8425+y, v[49,:], '--', linewidth=2) v[49,:]Box 3 is very quietBox 10, 14 are very loudBox 15 doesn't workfor ii in box.inputs: print ii.read() for oo in box.outputs: print oo.write(False) for ii in box_1.inputs: print ii.read() iface = box.interfaces['comedi'] box = PANELS['Zog6']() box.reset() box.test() box.reward()Overlay Histogram Example source: modified from https://stackoverflow.com/questions/6871201/plot-two-histograms-on-single-chart-with-matplotlib Import modulesimport random import numpy from matplotlib.pyplot import hist, legend, show import numpy as np import osGenerate test datax = [random.gauss(3,1) for _ in range(400)] y = [random.gauss(4,2) for _ in range(400)]Histogram overlayhist(x, bins="auto", alpha=0.5, label='x') hist(y, bins="auto", alpha=0.5, label='y') legend(loc='upper right') show()Mean and Standard Deviationprint(np.mean(x), np.std(x)) print(np.mean(y), np.std(y))2.8880676273225014 0.9726233031755469 4.140335712151743 1.8909099366710311Plot separately Note that overlaying the two histograms makes it a lot easier to see the differences between the two distributions as opposed to plotting them separately.hist(x, bins="auto", alpha=0.5, label='x') show() hist(y, bins="auto", alpha=0.5, label='y') show()When plotted separately, you need to manually look at the min and max of the bins, try to figure out where the mean is, how the spread of the distribution compares, etc. In other words, it's a lot easier to just look at an overlay. Print this notebook to PDFos.system("jupyter nbconvert --to pdf overlay-histogram-example.ipynb")Training a model with a curated image dataset in fastai with dataset augmentationWalkthrough of how to train a deep learning model in fastai with a curated image dataset incorporating an augmented dataset# imports for notebook boilerplate !pip install -Uqq fastbook import fastbook from fastbook import * from fastai.vision.all import * modifier = 'jun16_2021' # define timestamp string for saving models modifier = datetime.now().strftime("%Y%m%d-%H%M%S") # define path for saving models - update this path for your Gradient or Colab instance model_path = '/notebooks/temp' # set up the notebook for fast.ai fastbook.setup_book()Ingest the dataset- create a path object- defined an ImageDataLoaders object# ingest the curated image dataset CIFAR path = untar_data(URLs.CIFAR) path # examine the directory structure of the dataset path.ls() # define a DataBlock object db = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(seed=42), get_y=parent_label) dls = db.dataloaders(path/'train',bs=32) # summary must be run on DataBlock object, not dataloaders object db.summary(path/"train") # define a DataBlock object for the test set db_test = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.99,seed=42), get_y=parent_label) # summary must be run on DataBlock object, not dataloaders object db_test.summary(path/"train")Setting-up type transforms pipelines Collecting items from /storage/data/cifar10/train Found 50000 items 2 datasets of sizes 500,49500 Setting up Pipeline: PILBase.create Setting up Pipeline: parent_label -> Categorize -- {'vocab': None, 'sort': True, 'add_na': False} Building one sample Pipeline: PILBase.create starting from /storage/data/cifar10/train/bird/44595_bird.png applying PILBase.create gives PILImage mode=RGB size=32x32 Pipeline: parent_label -> Categorize -- {'vocab': None, 'sort': True, 'add_na': False} starting from /storage/data/cifar10/train/bird/44595_bird.png applying parent_label gives bird applying Categorize -- {'vocab': None, 'sort': True, 'add_na': False} gives TensorCategory(2) Final sample: (PILImage mode=RGB size=32x32, TensorCategory(2)) Setting up after_item: Pipeline: ToTensor Setting up before_batch: Pipeline: Setting up after_batch: Pipeline: IntToFloatTensor -- {'div': 255.0, 'div_mask': 1} Buil[...]Examine the dataset# show a batch of training data dls.train.show_batch(max_n=4, nrows=1) # examine the train subdirectory (path/'train').ls() (path/'train/dog').ls() (path/'train/cat').ls() # take a look at one of the images img_files = get_image_files(path) img = PILImage.create(img_files[100]) img # display a thumbnail of the image img.to_thumb(180) img = PILImage.create(img_files[3000]) imgDefine and train the model# define the model learn = cnn_learner(dls, resnet18, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy) %%time learn.fine_tune(2,cbs=ShowGraphCallback()) learn.summary() # save the model trained with non-augmented data using the trick to specify a writeable directory save_path = learn.path learn.path = Path(model_path) learn.save('cifar_save_'+modifier) learn.path = save_pathTry augmenting the training set# create a new DataBlock object incorporating augmentation transformations db2 = db.new(batch_tfms=aug_transforms()) # create a new dataloaders object based on the new DataBlock object dls2 = db2.dataloaders(path/'train',bs=32) db2.summary(path/"train") # examine a batch of the augmented training data dls2.train.show_batch(unique=True,max_n=8, nrows=2) # define a distinct model based on the augmented dataloaders object learn2 = cnn_learner(dls2, resnet18, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy) %%time learn2.fine_tune(2) learn2.summary() # save the model trained with the augmented dataset using the trick to specify a writeable directory save_path = learn2.path learn2.path = Path(model_path) learn2.save('cifar_augmented_save_'+modifier) learn2.path = save_pathExamine the performance of the model trained with non-augmented data on the test set# define a dataloader object on the test dataset dls_test = db_test.dataloaders(path/'test',bs=32) # define a model for the test dataset learn_test = cnn_learner(dls_test, resnet18, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy) # point the model's path to where the weights were saved and load the weights for the model trained with non-augmented data learn_test.path = Path(model_path) learn_test.load('cifar_save_'+modifier) learn_test.validate() interp_test = ClassificationInterpretation.from_learner(learn_test) # examine the images from the training set with the biggest loss interp_test.plot_top_losses(9, figsize=(15,11)) # plot the confusion matrix interp_test.plot_confusion_matrix()Examine the performance of the model trained on the augmented dataset on the test set# define a dataloader object on the test dataset dls_test = db_test.dataloaders(path/'test',bs=32) # define a model for the augmented dataset learn_augment_test = cnn_learner(dls_test, resnet18, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy) # point the model's path to where the weights were saved and load the weights for the model trained with augmented data learn_augment_test.path = Path(model_path) learn_augment_test.load('cifar_augmented_save_'+modifier) learn_augment_test.validate() # show confusion matrix for the test dataset interp_augment_test = ClassificationInterpretation.from_learner(learn_augment_test) # examine the images from the training set with the biggest loss interp_augment_test.plot_top_losses(9, figsize=(15,11)) interp_augment_test.plot_confusion_matrix()Chassis.ml demo Easily build MLflow models into {KFServing, Modzy} Docker images This demo will show you how we can train a model, define custom pre- and post-processing steps, save it in MLflow format and then build it into a container image and push it to docker hub with a single command.By easily connecting MLflow models to Docker images with a simple Python SDK for data scientists & ML engineers, Chassis is the missing link between MLflow and DevOps. Prerequisites* [Docker Hub](https://hub.docker.com/) account (free one is fine)* The browser you're reading this in :-)import chassisml import sklearn import mlflow.pyfunc from joblib import dump, loadTrain the modelThis will train a sklearn model and it will be saved as a joblib file inside the `model` directory.The goal for Chassis service is to create an image that exposes this model.from sklearn import datasets, svm from sklearn.model_selection import train_test_split digits = datasets.load_digits() data = digits.images.reshape((len(digits.images), -1)) # Create a classifier: a support vector classifier clf = svm.SVC(gamma=0.001) # Split data into 50% train and 50% test subsets X_train, X_test, y_train, y_test = train_test_split( data, digits.target, test_size=0.5, shuffle=False) # Learn the digits on the train subset clf.fit(X_train, y_train) dump(clf, './model.joblib') # Wrap your model in a pyfunc and provide auxiliary functionality through extension of the # mlflow PythonModel class with methods pre_process, post_process, and explain class CustomModel(mlflow.pyfunc.PythonModel): _model = load('./model.joblib') def load_context(self, context): self.model = self._model def predict(self, context, inputs): processed_inputs = self.pre_process(inputs) inference_results = self.model.predict(processed_inputs) return self.post_process(inference_results) def pre_process(self, inputs): return inputs / 2 def post_process(self, inference_results): structured_results = [] for inference_result in inference_results: inference_result = { "classPredictions": [ {"class": str(inference_result), "score": str(1)} ] } structured_output = { "data": { "result": inference_result, "explanation": None, "drift": None, } } structured_results.append(structured_output) return structured_results def explain(self, images): pass # Define conda environment with all required dependencies for your model conda_env = { "channels": ["defaults", "conda-forge", "pytorch"], "dependencies": [ "python=3.8.5", "pytorch", "torchvision", "pip", { "pip": [ "mlflow", "lime", "sklearn" ], }, ], "name": "linear_env" }Save the modelTransform the model into MLFlow format.!rm -rf mlflow_custom_pyfunc_svm model_save_path = "mlflow_custom_pyfunc_svm" mlflow.pyfunc.save_model(path=model_save_path, python_model=CustomModel(), conda_env=conda_env)Load the MLFlow model and test it.import json classifier = mlflow.pyfunc.load_model(model_save_path) predictions = classifier.predict(X_test) print(json.dumps(predictions[0], indent=4))We check that the model has been correctly saved inside the `model` directory.!ls ./mlflow_custom_pyfunc_svmGet Docker Hub credentials securelyNow we prompt the user (you!) for your docker hub username and password in such a way that the value itself doesn't get written into the notebook, which is sensible security best-practice.import getpass import base64 username = getpass.getpass("docker hub username") password = getpass.getpass("docker hub password")Now we can construct the metadata that the chassis service needs to build and publish the container to docker hub:image_data = { 'name': f'{username}/chassisml-sklearn-demo:latest', 'version': '0.0.1', 'model_name': 'digits', 'model_path': './mlflow_custom_pyfunc_svm', 'registry_auth': base64.b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8") }Launch the jobImportant fields that we should fill in here are:* `image_data`: the values defined above* `base_url`: the name of the service that runs Chassis (that is running in the same k8s cluster that this notebook is running in)* `deploy`: whether to publish this image to Docker Hubres = chassisml.publish( image_data=image_data, deploy=True, base_url='http://chassis:5000' ) error = res.get('error') job_id = res.get('job_id') if error: print('Error:', error) else: print('Job ID:', job_id)After the request is made, Chassis launches a job that runs Kaniko and builds the docker image based on the values provided.You can get the id of the job created from the result of the request. This id can be used to ask for the status of the job.chassisml.get_job_status(job_id)**Poll the job a few times until it's finished.** You can also use `kubectl get pods -A` and `kubectl logs` to watch the build in progress in the testfaster SSH tab.Now, we should be able to see the created image listed in the registry. This means that the service has correctly created the image and uploaded it. Pull the docker imageNow that the job has finished, we can check that the image has been pushed to [Docker Hub](https://hub.docker.com). Log into Docker Hub and check that the tag has been pushed. (optional) Download the tar fileWe can also download the docker image that has been generated in the form of a tar file.dst = './downloaded_image.tar' chassisml.download_tar(job_id, dst) !ls -ltrh ./ from IPython.display import display, FileLink local_file = FileLink(dst, result_html_prefix="Click here to download: ") display(local_file)Testing covid and dataimport requests import pandas as pd from datetime import datetime # entity_name = 'INSTITUTO NACIONAL DE VÍAS (INVIAS)' # url_secop_i = 'https://www.datos.gov.co/resource/c82b-7jfi.json' # p_entity = {'nombre_de_la_entidad': entity_name, # '$limit': '10000', # 'causal_de_otras_formas_de': 'Contratos Interadministrativos (Literal C)'} # r_entity = requests.get(url_secop, params=p_entity) # d_entity = r_entity.json() # To .json # df_entity = pd.DataFrame(d_entity) # To df # df_entityExtractionSECOP I# SECOP I url_secop_i = 'https://www.datos.gov.co/resource/c82b-7jfi.json' p_secop_i = {'$limit': '1000000', 'anno_firma_del_contrato': '2020'} r_secop_i = requests.get(url_secop_i, params=p_secop_i) d_secop_i = r_secop_i.json() # To .json secop_1 = pd.DataFrame(d_secop_i) # To df print(len(secop_1)) secop_1.head(2) secop_1.columns covid_pattern = 'covid|coronavirus|pandemia|emergencia' # secop_1['detalle_lower'] = secop_1['detalle_del_objeto_a_contratar'].str.lower() #secop_1.loc[lambda x: x.detalle_lower.str.contains(covid_pattern)].detalle_lower.to_csv('../data/secop_1_descriptions.csv', index=False, header=False)SECOP II# SECOP II url_secop_ii = "https://www.datos.gov.co/resource/jbjy-vk9h.json?$where=fecha_de_firma>='2020-01-01'" p_secop_ii = {'$limit': '1000000'} r_secop_ii = requests.get(url_secop_ii, params=p_secop_ii) d_secop_ii = r_secop_ii.json() # To .json secop_2 = pd.DataFrame(d_secop_ii) # To df print(len(secop_2)) secop_2.head(2) secop_2 = (secop_2 .assign(descripcion_del_proceso = lambda x: x.descripcion_del_proceso.str.lower(), fecha_de_firma = lambda x: pd.to_datetime(x.fecha_de_firma)) ) secop_2.loc[lambda x: x.descripcion_del_proceso.str.lower().str.contains(covid_pattern)].shape[0] secop_2.shapeUnificationcolumn_map = { 'nombre_de_la_entidad': 'nombre_entidad', 'nit_de_la_entidad': 'nit_entidad', 'departamento_entidad': 'departamento', 'municipio_entidad': 'ciudad', 'nivel_entidad': 'orden', 'numero_de_proceso': 'proceso_de_compra', 'numero_del_contrato': 'id_contrato', 'estado_del_proceso': 'estado_contrato', 'detalle_del_objeto_a_contratar': 'descripcion_del_proceso', 'tipo_de_contrato': 'tipo_de_contrato', 'tipo_de_proceso': 'modalidad_de_contratacion', 'fecha_de_firma_del_contrato': 'fecha_de_firma', 'fecha_ini_ejec_contrato': 'fecha_de_inicio_de_ejecucion', 'fecha_fin_ejec_contrato': 'fecha_de_fin_de_ejecucion', 'tipo_identifi_del_contratista': 'tipodocproveedor', 'identificacion_del_contratista': 'documento_proveedor', 'nom_raz_social_contratista': 'proveedor_adjudicado', 'cuantia_contrato': 'valor_del_contrato', 'espostconflicto': 'espostconflicto', 'ruta_proceso_en_secop_i': 'urlproceso', 'origen_de_los_recursos': 'origen_de_los_recursos', } #rename columns from covid 1 to match covid 2 secop_1 = secop_1.rename(columns=column_map) # source secop_1['source'] = 'secop_1' secop_2['source'] = 'secop_2' # unification union_cols = list(column_map.values())+['source'] secop_union = pd.concat([secop_2, secop_1]) secop_union = secop_union[union_cols]preprocessingsecop_union.dtypes secop_union['descripcion_del_proceso'] = secop_union.descripcion_del_proceso.str.lower() secop_union['fecha_de_firma'] = pd.to_datetime(secop_union.fecha_de_firma) secop_union['fecha_de_inicio_de_ejecucion'] = pd.to_datetime(secop_union.fecha_de_inicio_de_ejecucion) secop_union['fecha_de_fin_de_ejecucion'] = pd.to_datetime(secop_union.fecha_de_fin_de_ejecucion) secop_union['url'] = secop_union.urlproceso.apply(lambda x: x['url']) secop_union['is_covid'] = secop_union.descripcion_del_proceso.str.contains(covid_pattern) # saving data secop_union.to_pickle('../data/secop_union_all.pickle') secop_union.to_csv('../data/secop_union_all.csv') secop_1.to_pickle('../data/secop_1.pickle') secop_1.to_csv('../data/secop_1.csv') secop_2.to_pickle('../data/secop_2.pickle') secop_2.to_csv('../data/secop_2.csv') secop_union.dtypes # covid_separation secop_1_covid = secop_1.loc[lambda x: x.detalle_del_objeto_a_contratar.str.contains(covid_pattern)] # covid flag secop_1.assign(is_covid = lambda x: x.detalle_del_objeto_a_contratar.str.) secop_1_covid.shape[0] secop_1_covid.reset_index(drop=True).to_feather('../data/secop_1_covid.feather') secop_1_covid.reset_index(drop=True).to_pickle('../data/secop_1_covid.pkl') secop_2_covid = secop_2.loc[lambda x: x.descripcion_del_proceso.str.contains('covid|coronavirus|pandemia|emergencia')] secop_2_covid.shape[0] secop_2_covid.reset_index(drop=True).to_feather('../data/secop_2_covid.feather') secop_2_covid.reset_index(drop=True).to_pickle('../data/secop_2_covid.pkl') secop_unAdmissionsadmissions_df.head() admissions_df.shapeTypes of admissionsadmissions_df['ADMISSION_TYPE'].unique()Check for missing values on the admission timessum(admissions_df['ADMITTIME'].isnull())Conversion of times to datetime typeadmissions_df['ADMITTIME'] = admissions_df['ADMITTIME'].astype('datetime64[ns]') admissions_df['DISCHTIME'] = admissions_df['DISCHTIME'].astype('datetime64[ns]') admissions_df['DEATHTIME'] = admissions_df['DEATHTIME'].astype('datetime64[ns]')Sort by subject and admission type and reset the data frame index.admissions_df = admissions_df.sort_values(['SUBJECT_ID', 'ADMITTIME']) admissions_df = admissions_df.reset_index(drop = True) admissions_df['NEXT_ADMITTIME'] = admissions_df.groupby('SUBJECT_ID').ADMITTIME.shift(-1) admissions_df['NEXT_ADMISSION_TYPE'] = admissions_df.groupby('SUBJECT_ID').ADMISSION_TYPE.shift(-1) admissions_df[admissions_df['NEXT_ADMISSION_TYPE'] == 'ELECTIVE']['NEXT_ADMITTIME'] = pd.NaT admissions_df[admissions_df['NEXT_ADMISSION_TYPE'] == 'ELECTIVE']['NEXT_ADMISSION_TYPE'] = np.NaNFill NA's with the next valid value. Previously sorted.admissions_df[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']] = admissions_df.groupby(['SUBJECT_ID'])[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']].fillna(method = 'bfill')Obtain days to readmission: from discharge to next readmissionadmissions_df['DAYS_TO_READMISSION'] = (admissions_df['NEXT_ADMITTIME'] - admissions_df['DISCHTIME']).dt.daysNumber of readmissionssum(admissions_df['DAYS_TO_READMISSION'].notnull())Distribution of days to readmissionsns.set(rc={'figure.figsize':(7,3.5), 'axes.grid':True}) sns.set_style("whitegrid", {'axes.grid' : False}) fig = sns.distplot(admissions_df['DAYS_TO_READMISSION'], kde=False, bins=15) fig = fig.get_figure() fig.savefig('days_dist.pgf') print(admissions_df['DAYS_TO_READMISSION'].quantile(0.5)) print(admissions_df['DAYS_TO_READMISSION'].quantile(0.75)) print(admissions_df['DAYS_TO_READMISSION'].quantile(0.25)) print(admissions_df['DAYS_TO_READMISSION'].mean()) admissions_df['DAYS_TO_READMISSION'].max()122.0 500.0 25.0 404.0378933847142Notesnotes_df.head() notes_df.shape notes_df['CATEGORY'].unique() discharge_notes = notes_df[notes_df['CATEGORY'] == "Discharge summary"] discharge_notes.shapeThere are 6926 admissions with more than one discharge note (HADM_ID - ID of Admissions).discharge_notes.duplicated(['HADM_ID']).sum()Take the last row per admissiondischarge_notes_ordered = discharge_notes.groupby(['SUBJECT_ID', 'HADM_ID']).nth(-1).reset_index() discharge_notes_ordered.duplicated(['HADM_ID']).sum() discharge_notes_ordered.shapeMerge Notes and Admissionsadmissions_notes = pd.merge( admissions_df[['SUBJECT_ID','HADM_ID','ADMITTIME','DISCHTIME','DAYS_TO_READMISSION','NEXT_ADMITTIME','ADMISSION_TYPE','DEATHTIME']], discharge_notes_ordered[['SUBJECT_ID', 'HADM_ID', 'TEXT']], on = ['SUBJECT_ID', 'HADM_ID'], how='left' ) admissions_notes10% of admissions without discharge notes.sum(admissions_notes['TEXT'].isnull()) / len(admissions_notes)53% of NEWBORN are missingadmissions_notes.groupby('ADMISSION_TYPE').apply(lambda g: g.TEXT.isnull().sum())/admissions_notes.groupby('ADMISSION_TYPE').size()Remove NEWBORN admissions and create the target variableadm_notes = admissions_notes[admissions_notes['ADMISSION_TYPE'] != 'NEWBORN'] adm_notes['READM_WITHIN_30'] = (adm_notes['DAYS_TO_READMISSION'] < 30).astype('int') sum(adm_notes['READM_WITHIN_30']) adm_notes.shapeExlude patients that died during the admissionrows_not_death = adm_notes['DEATHTIME'].isnull() df_adm_notes_not_death = adm_notes.loc[rows_not_death].copy() df_adm_notes_not_death = df_adm_notes_not_death.sample(n = len(df_adm_notes_not_death)) df_adm_notes_not_death = df_adm_notes_not_death.reset_index(drop = True) sum(df_adm_notes_not_death['READM_WITHIN_30']) len(df_adm_notes_not_death['READM_WITHIN_30'])" Module 5: Pipeline and Grid Search Predicting Grant Applications: Building a Pipeline Lesson Objectives* After completing this lesson, you should be able to: - Understand the role of pipelines in spark.ml - Use a pipeline to fit a model and make predictions - Evaluate the results Key Concepts* Transformer - an algorithm which transforms one DataFrame into another* Estimator - an algorithm which can be fit on a DataFrame to produce a Transformer* Parameter - there is a common API shared by Transformers and Estimators* Pipeline - chains multiple Transformers together to specify a machine learning workflow* Evaluator - measures the performance of an estimator or pipeline against some metric(s) Pipelines in spark.ml* Inspired by the scikit-learn project* Components: - Transformers - Estimators* Properties of components: - Transformer.transform() and Estimator.fit() are stateless - Each instance of Transformer/Estimator has a unique ID* A sequence of PipelineStages to be run in a specific order - input DataFrame is transformed as it passes through each stage - Transformer stages: `transform()` method is called on the DF - Estimator stages: `fit()` method is called to produce a Transformer - this Transformer becomes part of the Pipeline Model - `transform()` method is called on the DF* Runtime checking is done using the DF's schema before actually running the Pipeline Create the Pipeline load grant dataimport org.apache.spark.sql.SparkSession val spark = SparkSession.builder().getOrCreate() import spark.implicits._ import org.apache.spark.sql.functions._ val data = spark.read. format("com.databricks.spark.csv"). option("delimiter", "\t"). option("header", "true"). option("inferSchema", "true"). load("/resources/data/grantsPeople.csv") data.show()------------+------------+--------------------+-------------------+ |Grant_Application_ID| RFCD_Code|RFCD_Percentage| SEO_Code|SEO_Percentage|Person_ID| Role|Year_of_Birth|Country_of_Birth|Home_Language| Dept_No|Faculty_No|With_PHD|No_of_Years_in_Uni_at_Time_of_Grant|Number_of_Successful_Grant|Number_of_Unsuccessful_Grant| A2| A| B| C|Grant_Status|Sponsor_Code| Contract_Value_Band|Grant_Category_Code| +--------------------+----------+---------------+---------+--------------+---------+--------------------+-------------+----------------+-------------+--------+----------+--------+-----------------------------------+--------------------------+----------------------------+----+----+----+----+------------+------------+--------------------+-------------------+ | 1|RFCD280199| 100.0|SEO700299| 100.0| 40572| CHIEF_INVESTIGATOR| 1965| AsiaPacific| OtherLang|Dept3073| Faculty31| null| DurationLT0[...]create featuresval researchers = data. withColumn ("phd", data("With_PHD").equalTo("Yes").cast("Int")). withColumn ("CI", data("Role").equalTo("CHIEF_INVESTIGATOR").cast("Int")). withColumn("paperscore", data("A2") * 4 + data("A") * 3) val grants = researchers.groupBy("Grant_Application_ID").agg( max("Grant_Status").as("Grant_Status"), max("Grant_Category_Code").as("Category_Code"), max("Contract_Value_Band").as("Value_Band"), sum("phd").as("PHDs"), when(max(expr("paperscore * CI")).isNull, 0). otherwise(max(expr("paperscore * CI"))).as("paperscore"), count("*").as("teamsize"), when(sum("Number_of_Successful_Grant").isNull, 0). otherwise(sum("Number_of_Successful_Grant")).as("successes"), when(sum("Number_of_Unsuccessful_Grant").isNull, 0). otherwise(sum("Number_of_Unsuccessful_Grant")).as("failures") ) grants.show()+--------------------+------------+-------------+--------------------+----+----------+--------+---------+--------+ |Grant_Application_ID|Grant_Status|Category_Code| Value_Band|PHDs|paperscore|teamsize|successes|failures| +--------------------+------------+-------------+--------------------+----+----------+--------+---------+--------+ | 148| 0| GrantCat30B|ContractValueBandUnk|null| 6| 1| 0| 1| | 463| 1| GrantCat30C|ContractValueBandUnk|null| 0| 1| 1| 0| | 471| 0| GrantCat30B| ContractValueBandA| 1| 127| 2| 1| 5| | 496| 0| GrantCat30B| ContractValueBandA|null| 0| 1| 1| 3| | 833| 1| GrantCat10A| ContractValueBandF|null| 0| 1| 0| 0| | 1088| 1| GrantCat50A| ContractValueBandA| 1| [...]String Indexerimport org.apache.spark.ml.feature.StringIndexer val value_band_indexer = new StringIndexer(). setInputCol("Value_Band"). setOutputCol("Value_index"). fit(grants) val category_indexer = new StringIndexer(). setInputCol("Category_Code"). setOutputCol("Category_index"). fit(grants) val label_indexer = new StringIndexer(). setInputCol("Grant_Status"). setOutputCol("status"). fit(grants) import org.apache.spark.ml.feature.VectorAssembler val assembler = new VectorAssembler(). setInputCols(Array( "Value_index" ,"Category_index" ,"PHDs" ,"paperscore" ,"teamsize" ,"successes" ,"failures" )).setOutputCol("assembled")Random Forest Classifier and Pipelineimport org.apache.spark.ml.classification.RandomForestClassifier import org.apache.spark.ml.classification.RandomForestClassificationModel val rf = new RandomForestClassifier(). setFeaturesCol("assembled"). setLabelCol("status"). setSeed(42) import org.apache.spark.ml.Pipeline val pipeline = new Pipeline().setStages(Array( value_band_indexer, category_indexer, label_indexer, assembler, rf) )Create an Evaluatorimport org.apache.spark.ml.evaluation.BinaryClassificationEvaluator val auc_eval = new BinaryClassificationEvaluator(). setLabelCol("status"). setRawPredictionCol("rawPrediction") auc_eval.getMetricNameSplit into Training and Testval tr = grants.filter("Grant_Application_ID < 6635") val te = grants.filter("Grant_Application_ID >= 6635") val training = tr.na.fill(0, Seq("PHDs")) val test = te.na.fill(0, Seq("PHDs"))Run and Evaluate the Pipelineval model = pipeline.fit(training) val pipeline_results = model.transform(test) auc_eval.evaluate(pipeline_results)Predicting the Outcome of Cricket Matches Prediction Model * Logistic Regression using sklearn * K-Nearest Neighbors using sklearn%matplotlib inline import numpy as np # imports a fast numerical programming library import matplotlib.pyplot as plt #sets up plotting under plt import pandas as pd #lets us handle data as dataframes #sets up pandas table display pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set(style="whitegrid", color_codes=True) from __future__ import division from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.cross_validation import train_test_split from sklearn import metrics from patsy import dmatrices matches = pd.read_csv("../data/matcheswithfeatures.csv", index_col = 0) y, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + \ Total_RF_Difference', matches, return_type="dataframe") y_arr = np.ravel(y)Training and Testing on Entire Data# instantiate a logistic regression model, and fit with X and y model = LogisticRegression() model = model.fit(X, y_arr) # check the accuracy on the training set print "Accuracy is", model.score(X, y_arr)*100, "%"Accuracy is 57.4923547401 %Splitting train and test using train_test_split# evaluate the model by splitting into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y_arr, random_state = 0) # Logistic Regression on train_test_split model2 = LogisticRegression() model2.fit(X_train, y_train) # predict class labels for the test set predicted = model2.predict(X_test) # generate evaluation metrics print "Accuracy is ", metrics.accuracy_score(y_test, predicted)*100, "%" # KNN Classification on train_test_split k_range = list(range(1, 61)) k_score = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) k_score.append(metrics.accuracy_score(y_test, y_pred)) plt.plot(k_range, k_score) # Best values of k in train_test_split knn = KNeighborsClassifier(n_neighbors = 50) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print "Accuracy is ", metrics.accuracy_score(y_test, y_pred)*100, "%"Accuracy is 64.6341463415 %Splitting Training Set (2008-2013) and Test Set (2013-2015) based on SeasonsX_timetrain = X.loc[X.index < 398] Y_timetrain = y.loc[y.index < 398] Y_timetrain_arr = np.ravel(Y_timetrain) X_timetest = X.loc[X.index >= 398] Y_timetest = y.loc[y.index >= 398] Y_timetest_arr = np.ravel(Y_timetest) # Logistic Regression on time-based split sets model3 = LogisticRegression() model3.fit(X_timetrain, Y_timetrain_arr) timepredicted = model3.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, timepredicted)*100, "%" # KNN Classification on time-based split sets k_range = list(range(1, 61)) k_score = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_timetrain, Y_timetrain_arr) y_pred = knn.predict(X_timetest) k_score.append(metrics.accuracy_score(Y_timetest_arr, y_pred)) plt.plot(k_range, k_score) # Best values of k in time-based split data knn1 = KNeighborsClassifier(n_neighbors = 31) knn1.fit(X_timetrain, Y_timetrain_arr) y_pred = knn1.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, y_pred)*100, "%"Accuracy is 64.367816092 %Support Vector Machinesclf = svm.SVC(gamma=0.001, C=10) clf.fit(X_timetrain, Y_timetrain_arr) clf_pred = clf.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, clf_pred)*100, "%"Accuracy is 45.9770114943 %Random Forestsrfc = RandomForestClassifier(n_jobs = -1, random_state = 1) rfc.fit(X_timetrain, Y_timetrain_arr) rfc_pred = rfc.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, rfc_pred)*100, "%" fi = zip(X.columns, rfc.feature_importances_) print "Feature Importance according to Random Forests Model\n" for i in fi: print i[0], ":", i[1]Feature Importance according to Random Forests Model Avg_SR_Difference : 0.330684992918 Avg_WPR_Difference : 0.21317276792 Total_MVP_Difference : 0.191778034092 Prev_Enc_Team1_WinPerc : 0.141146504197 Total_RF_Difference : 0.123217700874Naive Bayes Classifiergclf = GaussianNB() gclf.fit(X_timetrain, Y_timetrain_arr) gclf_pred = gclf.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, gclf_pred) *100, "%"Accuracy is 55.1724137931 %Cross Validationfrom sklearn.cross_validation import cross_val_score rfc = LogisticRegression() scores = cross_val_score(rfc, X, y_arr, cv=10, scoring='accuracy') scores k_range = list(range(1, 61)) k_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(knn, X, y_arr, cv=10, scoring='accuracy') k_scores.append(scores.mean()) plt.plot(k_range, k_scores) def getPrediction(match_id): '''Returns the prediction for the given match Args: match_id (int): Match ID for the required game Returns: String: Predicted winner of the game and probability of victory ''' results = {} match_row = matches.loc[matches['id'] == match_id] team1name = match_row.team1.unique()[0] team2name = match_row.team2.unique()[0] toPredict = X_timetest.loc[X_timetest.index == match_id-1].values prediction_prob = knn1.predict_proba(toPredict) prediction = knn1.predict(toPredict) if prediction[0] > 0: results['name'] = str(team1name) results['prob'] = float(prediction_prob[0][1])*100 else: results['name'] = str(team2name) results['prob'] = float(prediction_prob[0][0])*100 return results getPrediction(517)Execute Python Syntaxprint("Hello, World!")Hello, World!Python IndentationIndentation refers to the spaces at the beginning of a code line.Where in other programming languages the indentation in code is for readability only, the indentation in Python is very important.Python uses indentation to indicate a block of code.if 5 > 2: print("Five is greater than two!") if 5 > 2: print("Five is greater than two!") if 5 > 2: print("Five is greater than two!") if 5 > 2: print("Five is greater than two!")Five is greater than two! Five is greater than two!Python VariablesIn Python, variables are created when you assign a value to it:x = 5 y = "Hello, World!"CommentsPython has commenting capability for the purpose of in-code documentation.Comments start with a , and Python will render the rest of the line as a comment:#This is a comment. print("Hello, World!")Hello, World!Multi Line CommentsSince Python will ignore string literals that are not assigned to a variable, you can add a multiline string (triple quotes) in your code, and place your comment inside it:""" This is a comment written in more than just one line """ print("Hello, World!")Hello, World!CastingIf you want to specify the data type of a variable, this can be done with casting.# Example x = str(3) # x will be '3' y = int(3) # y will be 3 z = float(3) # z will be 3.0 print(x,y,z)3 3 3.0Get the TypeYou can get the data type of a variable with the type() function.x = 5 y = "John" print(type(x)) print(type(y)) Many Values to Multiple VariablesPython allows you to assign values to multiple variables in one line:x, y, z = "Orange", "Banana", "Cherry" print(x) print(y) print(z)Orange Banana CherryOne Value to Multiple VariablesAnd you can assign the same value to multiple variables in one line:x = y = z = "Orange" print(x) print(y) print(z)Orange Orange OrangeGlobal VariablesVariables that are created outside of a function (as in all of the examples above) are known as global variables.Global variables can be used by everyone, both inside of functions and outside.x = "awesome" def myfunc(): print("Python is " + x) myfunc() x = "awesome" def myfunc(): x = "fantastic" print("Python is " + x) myfunc() print("Python is " + x) global x x = "fantastic" myfunc() print("Python is " + x)Python is fantastic Python is fantasticBuilt-in Data TypesIn programming, data type is an important concept.Variables can store data of different types, and different types can do different things.Python has the following data types built-in by default, in these categories:Text Type: strNumeric Types: int, float, complexSequence Types: list, tuple, rangeMapping Type: dictSet Types: set, frozensetBoolean Type: boolBinary Types: bytes, bytearray, memoryview Python NumbersThere are three numeric types in Python:- int - float - complex \Variables of numeric types are created when you assign a value to themx = 1 # int y = 2.8 # float z = 1j # complex print(type(x)) print(type(y)) print(type(z)) Type ConversionYou can convert from one type to another with the int(), float(), and complex() methods:#convert from int to float: a = float(x) #convert from float to int: b = int(y) #convert from int to complex: c = complex(x) print(a) print(b) print(c) print(type(a)) print(type(b)) print(type(c))1.0 2 (1+0j) SlicingYou can return a range of characters by using the slice syntax.Specify the start index and the end index, separated by a colon, to return a part of the string.b = "Hello, World!" print(b[2:5]) # Slice From the Start b = "Hello, World!" print(b[:5]) # Slice To the End b = "Hello, World!" print(b[2:])llo, World!Python - Modify Strings# Upper Case a = "Hello, World!" print(a.upper()) # Lower Case a = "Hello, World!" print(a.lower()) # Remove Whitespace # Whitespace is the space before and/or after the actual #text, and very often you want to remove this space. a = " Hello, World! " print(a.strip()) # Replace String a = "Hello, World!" print(a.replace("H", "J")) # Split String a = "Hello, World!" print(a.split(","))['Hello', ' World!']String ConcatenationTo concatenate, or combine, two strings you can use the + operator.a = "Hello" b = "World" c = a + b print(c) c = a + " " + b print(c)HelloWorld Hello WorldString FormatAs we learned in the Python Variables chapter, we cannot combine strings and numbers like this:age = 36 txt = "My name is John, I am " + age print(txt) txt = f"My name is John, I am {age}" print(txt) txt = "My name is John, I am {}".format(age) print(txt) txt = "My name is John, I am {ages_values}".format(ages_values=age) print(txt) txt = "We are the so-called \"Vikings\" from the north." txtPython BooleansBooleans represent one of two values: True or False.You can evaluate any expression in Python, and get one of two answers, True or False.When you compare two values, the expression is evaluated and Python returns the Boolean answer:print(10 > 9) print(10 == 9) print(10 < 9) a = 200 b = 33 if b > a: print("b is greater than a") else: print("b is not greater than a") print(bool("Hello")) print(bool(15))True TrueHealth Insurance Price Forecast ![](img/poster.jpg) Today, as prices rise for basic necessities, we need to have a way to check beforehand what we will spend our money on. For medical insurance we can take a look at various features to arrive at a price for customers. This is what we'll do in this project, from prediction to deployment.Our data was obtained from this [Kaggle problem on Medical Cost - Insurance Forecast](https://www.kaggle.com/mirichoi0218/insurance), in which we have the question of **"Can you accurately predict insurance costs?"**> For ease of access, the data was upload to GitHub [here](https://github.com/diascarolina/project-insurance-forecast/blob/main/insurance.csv). 1 Libraries and Settingsimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge, Lasso, BayesianRidge from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.dummy import DummyRegressor from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import GridSearchCV import pickle # source: https://github.com/empathy87/storytelling-with-data # colors GRAY1, GRAY2, GRAY3 = '#231F20', '#414040', '#555655' GRAY4, GRAY5, GRAY6 = '#646369', '#76787B', '#828282' GRAY7, GRAY8, GRAY9 = '#929497', '#A6A6A5', '#BFBEBE' BLUE1, BLUE2, BLUE3, BLUE4 = '#174A7E', '#4A81BF', '#94B2D7', '#94AFC5' RED1, RED2 = '#C3514E', '#E6BAB7' GREEN1, GREEN2 = '#0C8040', '#9ABB59' ORANGE1 = '#F79747' # fonts plt.rcParams['font.family'] = 'Arial' plt.rcParams['mathtext.fontset'] = 'custom' plt.rcParams['mathtext.bf'] = 'Arial:bold' plt.rcParams['mathtext.it'] = 'Arial:italic' # setting random state rs = 428472 Functionsdef run_model(model, print_values = 1, return_predictions = 0): """ Function that receives a machine learning model and returns its metrics. print_values = 1: print the results of the calculated metrics return_predictions = 1: returns the y predictions together with the metrics """ model.fit(X_train, y_train) y_predictions = model.predict(X_test) mae = mean_absolute_error(y_test, y_predictions) mse = mean_squared_error(y_test, y_predictions) rmse = np.sqrt(mse) if print_values: print(f"MAE: {round(mae, 3)}") print(f"MSE: {round(mse, 3)}") print(f"RMSE: {round(rmse, 3)}") if return_predictions: return y_predictions, mae, mse, rmse return mae, mse, rmse3 Data Checking and Cleaning Let's first load our data. It was downloaded from [the Kaggle problem page](https://www.kaggle.com/mirichoi0218/insurance) and uploaded to GitHub for easy access.df = pd.read_csv('insurance.csv')Checking the first few line of the dataset:df.head()From this we see that we have the following information (adapted from the Kaggle problem description):- **age**: age of primary beneficiary;- **sex**: insurance contractor gender, female, male;- **bmi**: Body mass index, providing an understanding of body, weights that are relatively high or low relative to height, objective index of body weight $(kg/m^2)$ using the ratio of height to weight, ideally 18.5 to 24.9;- **children**: Number of children covered by health insurance / Number of dependents;- **smoker**: if that person smokes or not;- **region**: the beneficiary's residential area in the US, northeast, southeast, southwest, northwest;- **charges**: Individual medical costs billed by health insurance.print(f'Numer of rows: {df.shape[0]}') print(f'Numer of columns: {df.shape[1]}')Numer of rows: 1338 Numer of columns: 7Let's check missing values:df.isnull().sum()Since this is a Kaggle dataset, it is already pretty clean. Information about the columns:df.info() RangeIndex: 1338 entries, 0 to 1337 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 1338 non-null int64 1 sex 1338 non-null object 2 bmi 1338 non-null float64 3 children 1338 non-null int64 4 smoker 1338 non-null object 5 region 1338 non-null object 6 charges 1338 non-null float64 dtypes: float64(2), int64(2), object(3) memory usage: 73.3+ KBStatistics about the values:df.describe()From this we see that the youngest person in the dataset is 18 years old and the oldest is 64 years old.The average BMI is around 30.66, which is pretty high compared to the ideal BMI of 18.5 to 24.9.The person that has most children in this dataset has 5 children.The lowest insurance charge is 1121.87 USD and the highest is 63770.42 USD. 4 Exploratory Data Analysis In this step, let's first check the distribution of the variables, to see if everything's in order.df.age.value_counts().sort_index(ascending = True).plot(kind = 'bar', figsize = (18, 6), color = BLUE2) plt.title('How is the age distribution in the dataset?', fontdict = {'fontsize': 28}, loc = 'left') plt.xticks(rotation = 0, fontsize = 12) plt.yticks(fontsize = 12) plt.xlabel('Age', fontsize = 15, loc = 'left') plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()It seems that the are approximately 30 people with each age, except for 18 and 19 years old, which seems to have almost 70 people each. Now for the gender distribution between men and women.df.sex.value_counts().plot(kind = 'bar', figsize = (10, 5), color = [BLUE2, ORANGE1]) plt.title('Gender Distribution', fontdict = {'fontsize': 25}, loc = 'left') plt.xticks(rotation = 0, fontsize = 20) plt.yticks(fontsize = 12) plt.xlabel('Gender', fontsize = 15, loc = 'left') plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()The distribution is almost 50-50. Let's check:percentage_female = (df['sex'] == 'female').sum() / len(df) * 100 percentage_male = (df['sex'] == 'male').sum() / len(df) * 100 print(f'Percentage of female: {round(percentage_female, 2)}%') print(f'Percentage of male: {round(percentage_male, 2)}%')Percentage of female: 49.48% Percentage of male: 50.52%What about the distribution of the BMI?plt.figure(figsize = (12, 5)) plt.title("Distribution of the BMI", fontdict = {'fontsize': 24}, loc = 'left') sns.set_style("white") ax = sns.histplot(df.bmi, kde = True, bins = 50, color = BLUE1) plt.xticks(fontsize = 14) plt.yticks(fontsize = 14) plt.xlabel('BMI', fontsize = 15, loc = 'left') plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()We have a pretty even distribution (to the naked eye), centered at around 30. Is the number of children a person has well distributed?df.children.value_counts().sort_index().plot(kind = 'bar', figsize = (10, 6), color = BLUE2) plt.title('Number of Children', fontdict = {'fontsize': 23}, loc = 'left') plt.xticks(rotation = 0, fontsize = 14) plt.yticks(fontsize = 14) plt.xlabel('# Children', fontsize = 15, loc = 'left') plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()Let's check some values:percentage_with_children = (df['children'] != 0).sum() / len(df) * 100 percentage_no_children = (df['children'] == 0).sum() / len(df) * 100 print(f'Percentage of people with no children: {round(percentage_no_children, 2)}%') print(f'Percentage of people with one or more children: {round(percentage_with_children, 2)}%')Percentage of people with no children: 42.9% Percentage of people with one or more children: 57.1%Do we have more smokers or non-smokers?df.smoker.value_counts().plot(kind = 'bar', figsize = (10, 5), color = [BLUE2, ORANGE1]) plt.title('Smoker: yes or no?', fontdict = {'fontsize': 25, 'fontweight': 'medium'}, loc = 'left') plt.xticks(rotation = 0, fontsize = 20) plt.yticks(fontsize = 13) plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()We have significantly less non-smokers in our dataset than smokers. Are the regions well distributed?df.region.value_counts().plot(kind = 'bar', figsize = (10, 6), color = BLUE2) plt.title('Regions in the USA', fontdict = {'fontsize': 22, 'fontweight': 'medium'}, loc = 'left') plt.xticks(rotation = 0, fontsize = 15) plt.yticks(fontsize = 15) plt.xlabel('Region', fontsize = 20, loc = 'left') plt.ylabel('Count', fontsize = 18, loc = 'top') sns.despine() plt.show()The regions are pretty well distributed. Finally, let's see how the variable that we want to predict is distributed.plt.figure(figsize = (12, 5)) plt.title("Distribution of the Charges", fontdict = {'fontsize': 24}, loc = 'left') sns.set_style("white") ax = sns.histplot(df.charges, kde = True, bins = 100, color = BLUE1) plt.xticks(fontsize = 13) plt.yticks(fontsize = 13) plt.xlabel('Charges', fontsize = 15, loc = 'left') plt.ylabel('Count', fontsize = 15, loc = 'top') sns.despine() plt.show()We have a distrubution skewed to the right here in the charges, which means that the majority of people pay less than 15000 USD in insurance. Now let's check each variable against the charges to see how they relate.fig = plt.figure(figsize = (15, 6)) ax = fig.add_subplot(121) sns.histplot(df[(df.smoker == 'yes')]['charges'], color = BLUE2, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Smokers', fontdict = {'fontsize': 18}, loc = 'left') ax = fig.add_subplot(122) sns.histplot(df[(df.smoker == 'no')]['charges'], color = ORANGE1, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Non-smokers', fontdict = {'fontsize': 18}, loc = 'left') sns.despine() plt.show()We see that the charges are significantly higher for smokers.fig = plt.figure(figsize = (15, 6)) ax = fig.add_subplot(121) sns.histplot(df[(df.sex == 'female')]['charges'], color = BLUE2, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Women', fontdict = {'fontsize': 18}, loc = 'left') ax = fig.add_subplot(122) sns.histplot(df[(df.sex == 'male')]['charges'], color = ORANGE1, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Men', fontdict = {'fontsize': 18}, loc = 'left') sns.despine() plt.show()Visually, there's not much of a difference in charges for men and women.fig = plt.figure(figsize = (15, 6)) ax = fig.add_subplot(121) sns.histplot(df[(df.bmi >= 40)]['charges'], color = BLUE2, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for BMI >= 40', fontdict = {'fontsize': 18}, loc = 'left') ax = fig.add_subplot(122) sns.histplot(df[(df.bmi < 40)]['charges'], color = ORANGE1, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for BMI < 40', fontdict = {'fontsize': 18}, loc = 'left') sns.despine() plt.show()A BMI of 40 or more falls under the category of severe obesity, so it makes sense for people in this range to have higher prices in insurance.fig = plt.figure(figsize = (15, 6)) ax = fig.add_subplot(121) sns.histplot(df[(df.age >= 40)]['charges'], color = BLUE2, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Age >= 40', fontdict = {'fontsize': 18}, loc = 'left') ax = fig.add_subplot(122) sns.histplot(df[(df.age < 40)]['charges'], color = ORANGE1, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for Age < 40', fontdict = {'fontsize': 18}, loc = 'left') sns.despine() plt.show()The charges distribution by age is about the same, but just a bit higher in 40 years old or more.fig = plt.figure(figsize = (15, 6)) ax = fig.add_subplot(121) sns.histplot(df[(df.children != 0)]['charges'], color = BLUE2, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for people with children', fontdict = {'fontsize': 18}, loc = 'left') ax = fig.add_subplot(122) sns.histplot(df[(df.children == 0)]['charges'], color = ORANGE1, ax = ax, kde = True, bins = 20) ax.set_title('Distribution of Charges for people with no children', fontdict = {'fontsize': 18}, loc = 'left') sns.despine() plt.show()People with children pay more in insurance prices. 4.1 Feature Importance Here we'll check how the data correlates with each other.plt.figure(figsize = (12, 8)) sns.heatmap(df.corr(), cmap = "BuPu", annot = True) plt.xticks(fontsize = 14) plt.yticks(fontsize = 14) plt.title('How is the data correlated to each other?', fontdict = {'fontsize': 25, 'fontweight': 'bold'}, loc = 'left') sns.despine() plt.show()There's not really a significant correlation between the numerical variables. Let's check how the variables influence the charges.df_corr = df.copy() age_corr = df_corr['age'].corr(df_corr['charges']) print(f'The correlation between age and charges is {round(age_corr, 3)}.') df_corr.sex = df_corr.sex.map({"female": 0, "male": 1}) gender_corr = df_corr['sex'].corr(df_corr['charges']) print(f'The correlation between gender and charges is {round(gender_corr, 3)}.') bmi_corr = df_corr['bmi'].corr(df_corr['charges']) print(f'The correlation between BMI and charges is {round(bmi_corr, 3)}.') children_corr = df_corr['children'].corr(df_corr['charges']) print(f'The correlation between the number of children and the charges is {round(children_corr, 3)}.') df_corr.smoker = df_corr.smoker.map({"yes": 1, "no": 0}) smoker_corr = df_corr['smoker'].corr(df_corr['charges']) print(f'The correlation between smoking and charges is {round(smoker_corr, 3)}.')The correlation between smoking and charges is 0.787.The "region" column is trickier to check the correlation against the charges because we don't have a binary variable. We can use some statistical tests to check the correlation, but checking manually (changing the values for the mapping) gives us somewhat similar results: very low correlation between the region and the charges.df_corr.region = df_corr.region.map({'southwest': 4, 'southeast': 3, 'northwest': 1, 'northeast': 2}) region_corr = df_corr['region'].corr(df_corr['charges']) print(f'The correlation between the regions and the charges is {round(region_corr, 3)}.')The correlation between the regions and the charges is 0.012.From this we see that the variable that most affects is if someone smokes or not, followed by a person's age and BMI. All of this makes sense in the "real world". 5 Model Prediction 5.1 Data Preparation Here we will change the categorical variables to numerical (but still categorical) variables. We'll do this by mapping each word to a value. It's preferable to encode the variables using One Hot Encoding, but for the purpose of this project we'll do a simple mapping.df.sex = df.sex.map({"female": 0, "male": 1}) df.region = df.region.map({'southwest': 0, 'southeast': 1, 'northwest': 2, 'northeast': 3}) df.smoker = df.smoker.map({"yes": 1, "no": 0})Getting our X and y...X = df.drop('charges', axis = 1) y = df.chargesSplitting the data...X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = rs) print(f'X train size: {len(X_train)} || y train size: {len(y_train)}') print(f'X test size: {len(X_test)} || y train size: {len(y_test)}')X train size: 1003 || y train size: 1003 X test size: 335 || y train size: 335As seen above, we split our data only in two parts, train and test, without the validation part. We'll still validate our results, but we'll do it in the test set, seeing as our dataset has a small number of data points at our disposal. 5.2 Machine Learning: Modelling and Tuning In this part we'll use a variety of regression models in our data to predict the charges. Our chosen metric is the Root Mean Squared Error (RMSE), because it gives a good insight about how our prediction is doing, but we'll also take a look at the Mean Absolute Value (MAE) and the Mean Squared Error (MSE). Model 0: Dummy Model It's always good to have a baseline to check our future models. Here we'll predict, for new values, the average of the charges.dummy = DummyRegressor(strategy = 'mean') mae_dummy, mse_dummy, rmse_dummy = run_model(dummy)MAE: 9668.122 MSE: 174387341.422 RMSE: 13205.58Model 1: Linear Regressionlinreg = LinearRegression() mae_linreg, mse_linreg, rmse_linreg = run_model(linreg)MAE: 4532.344 MSE: 43075124.028 RMSE: 6563.164There's not much tuning we can do in a simple linear regression, so we'll go ahead to the next models. Model 2: RandomForestRegressorrandomreg = RandomForestRegressor(random_state = rs) mae_rfr, mse_rfr, rmse_rfr = run_model(randomreg)MAE: 2773.741 MSE: 27811593.691 RMSE: 5273.67RandomForestRegressor Tuning Here we'll use `GredSearchCV` to find possible best candidate for hyperparameters in our model. It tests a combination of values and returns the best one.randomreg = RandomForestRegressor(random_state = rs) params = { 'n_estimators': [20, 50, 100], 'max_features': ['auto', 'sqrt', 'log2'], 'min_samples_split': [10, 50, 100], 'bootstrap': [True, False] } rfr_search = GridSearchCV(randomreg, params, scoring = 'neg_root_mean_squared_error', error_score = 'raise', n_jobs = -1, verbose = 0, cv = 5) rfr_search.fit(X_train, y_train) rfr_search.best_params_ rfr_model = rfr_search.best_estimator_ mae_rfr, mse_rfr, rmse_rfr = run_model(rfr_model)MAE: 2860.878 MSE: 26120557.91 RMSE: 5110.828Model 3: Ridge Regressionridge = Ridge(random_state = rs) mae_ridge, mse_ridge, rmse_ridge = run_model(ridge)MAE: 4542.96 MSE: 43100886.662 RMSE: 6565.127Ridge Regression Tuningfor i in [0.001, 0.01, 0.1, 0.5, 1.0, 10, 100]: ridge = Ridge(random_state = rs, alpha = i) print(f'alpha = {i}') run_model(ridge) print("---")alpha = 0.001 MAE: 4532.355 MSE: 43075145.574 RMSE: 6563.166 --- alpha = 0.01 MAE: 4532.451 MSE: 43075339.877 RMSE: 6563.181 --- alpha = 0.1 MAE: 4533.41 MSE: 43077320.96 RMSE: 6563.332 --- alpha = 0.5 MAE: 4537.659 MSE: 43086957.264 RMSE: 6564.066 --- alpha = 1.0 MAE: 4542.96 MSE: 43100886.662 RMSE: 6565.127 --- alpha = 10 MAE: 4640.135 MSE: 43666376.453 RMSE: 6608.054 --- alpha = 100 MAE: 5481.88 MSE: 60500857.665 RMSE: 7778.23 ---Best alpha from above (that gives us the lowest RMSE):ridge = Ridge(random_state = rs, alpha = 0.001) mae_ridge, mse_ridge, rmse_ridge = run_model(ridge)MAE: 4532.355 MSE: 43075145.574 RMSE: 6563.166Model 4: Lasso Regressionlasso = Lasso(random_state = rs) mae_lasso, mse_lasso, rmse_lasso = run_model(lasso)MAE: 4532.579 MSE: 43074041.263 RMSE: 6563.082Lasso Regression Tuningfor i in [0.001, 0.01, 0.1, 0.5, 1.0, 10, 100]: lasso = Lasso(random_state = rs, alpha = i) print(f'alpha = {i}') run_model(lasso) print("---")alpha = 0.001 MAE: 4532.344 MSE: 43075122.934 RMSE: 6563.164 --- alpha = 0.01 MAE: 4532.346 MSE: 43075113.094 RMSE: 6563.163 --- alpha = 0.1 MAE: 4532.368 MSE: 43075014.734 RMSE: 6563.156 --- alpha = 0.5 MAE: 4532.461 MSE: 43074579.948 RMSE: 6563.123 --- alpha = 1.0 MAE: 4532.579 MSE: 43074041.263 RMSE: 6563.082 --- alpha = 10 MAE: 4535.494 MSE: 43072274.388 RMSE: 6562.947 --- alpha = 100 MAE: 4569.692 MSE: 43154246.476 RMSE: 6569.189 ---Best alpha from above (that gives us the lowest RMSE):lasso = Lasso(random_state = rs, alpha = 10) mae_lasso, mse_lasso, rmse_lasso = run_model(lasso)MAE: 4535.494 MSE: 43072274.388 RMSE: 6562.947Model 5: Bayesian Ridge Regressionbayrid = BayesianRidge() mae_br, mse_br, rmse_br = run_model(bayrid)MAE: 4536.282 MSE: 43083681.535 RMSE: 6563.816Bayesian Ridge Regression Tuningbayrid = BayesianRidge() params = { 'n_iter': [5, 10, 100], 'alpha_init':[0.1, 0.5, 1], 'lambda_init': [1, 1e-1, 1e-2, 1e-3] } bayrid_search = GridSearchCV(bayrid, params, scoring = 'neg_root_mean_squared_error', error_score = 'raise', n_jobs = -1, verbose = 0, cv = 5) bayrid_search.fit(X_train, y_train) print(bayrid_search.best_params_) bayrid_search = bayrid_search.best_estimator_ mae_br, mse_br, rmse_br = run_model(bayrid_search){'alpha_init': 0.5, 'lambda_init': 0.1, 'n_iter': 5} MAE: 4536.282 MSE: 43083681.535 RMSE: 6563.816Model 6: Support Vector Regressionsvr = SVR() mse_svr, mse_svr, rmse_svr = run_model(svr)MAE: 9470.772 MSE: 200933661.591 RMSE: 14175.107Support Vector Regression Tuningsvr = SVR() params = { 'kernel': ['linear', 'poly', 'rbf'], 'degree': [2, 3, 4], 'coef0': [100, 500], 'C': [10, 20], 'epsilon': [0.1, 0.01] } svr_search = GridSearchCV(svr, params, scoring = 'neg_root_mean_squared_error', error_score = 'raise', n_jobs = -1, verbose = 0, cv = 5) svr_search.fit(X_train, y_train) print(svr_search.best_params_) svr_search = svr_search.best_estimator_ mse_svr, mse_svr, rmse_svr = run_model(svr_search){'C': 20, 'coef0': 100, 'degree': 4, 'epsilon': 0.01, 'kernel': 'poly'} MAE: 3143.664 MSE: 34501865.925 RMSE: 5873.8296 Models Results Let's finally compare all our models.metrics = { 'DummyModel': rmse_dummy, 'LinearRegression': rmse_linreg, 'RandomForestRegressor': rmse_rfr, 'RidgeRegression': rmse_ridge, 'LassoRegression': rmse_lasso, 'BayesianRegression': rmse_br, 'SupportVectorRegression': rmse_svr } df_metrics = pd.DataFrame(metrics.items(), columns = ['Model', 'RMSE']) df_metrics.sort_values('RMSE')From the above table, we see that the model that gives us the least _Root Mean Squared Error_ is the RandomForestRegressor. So let's go ahead and save it. Last but not least, let's check which features where the most determinant for our model.feat_import_rfr = pd.Series(rfr_model.feature_importances_, index = X_train.columns) ax = feat_import_rfr.sort_values(ascending = True).plot(kind = 'barh', figsize = (10, 7)) plt.title('Feature Importance - RandomForestRegressor', fontsize = 22, color = GRAY1) sns.despine() plt.show()As we've seen before, the variables "smoker", "bmi" and "age" all play an important part in the prediction. 7 Saving the Model Our best model is the **"rfr_model"**, the RandomForestRegressor model obtained from the GridSearchCV.model = rfr_model output_file = 'model_randomforestregressor.bin' with open(output_file, 'wb') as f_out: pickle.dump(model, f_out)Question 1 - How many Bitcoin transactions were made each day in 2017?You can use the "timestamp" column from the "transactions" table to answer this question.import pandas as pd import bq_helper btc = bq_helper.BigQueryHelper(active_project="bigquery-public-data", dataset_name="bitcoin_blockchain") btc.list_tables()only two tables in the one, blocks and transactionstrans_dat = btc.head('transactions') trans_dat trans_dat.columns #How many Bitcoin transactions were made each day in 2017? q1 = """ WITH time AS( SELECT TIMESTAMP_MILLIS(timestamp) AS trans_time, transaction_id FROM `bigquery-public-data.bitcoin_blockchain.transactions` ) SELECT COUNT(transaction_id) AS transactions, EXTRACT(DAY FROM trans_time) AS day, EXTRACT(MONTH FROM trans_time) AS month FROM time WHERE EXTRACT(YEAR FROM trans_time) = 2017 GROUP BY month, day ORDER BY month, day """ btc.estimate_query_size(q1) q1_ans = btc.query_to_pandas_safe(q1 ,max_gb_scanned=21) q1_ansQuestion 2 - How many transactions are associated with each merkle root?You can use the "merkle_root" and "transaction_id" columns in the "transactions" table to answer this question.trans_dat q2 = """ SELECT merkle_root AS merkle, COUNT(transaction_id) AS transactions FROM `bigquery-public-data.bitcoin_blockchain.transactions` GROUP BY merkle ORDER BY transactions """ btc.estimate_query_size(q2) q2_ans = btc.query_to_pandas_safe(q2 ,max_gb_scanned=37) q2_ansThe reverse primer can anneal in two locations creating two distinct PCR products.pUCMini1 is probably the expected one, the pUCMini2 has a very short annealing footprint for the reverse primer.pUCMini = pUCMini1.looped() rxn3 = pcr( pUCmu_F, pUCmu_R, pUCMini).looped() rxn3 print(rxn3.seq) rxn4 = pcr( pUcReFix_F, pUcReFix_R, rxn3) print(rxn4.seq)ctaatctcgaggatatccgaattcgagctcggtacccGGGATCCTCTAGAGTCGACCTGCAGGCATGCAAGCTTGGCcatgggttaacggcccatatggcctcgcgacgcgtTATGTATCCGCTCATGAGACAATAACCCTGATAAATGCTTCAATAATATTGAAAAAGGAAGAGTATGAGTATTCAACATTTCCGTGTCGCCCTTATTCCCTTTTTTGCGGCATTTTGCCTTCCTGTTTTTGCTCACCCAGAAACGCTGGTGAAAGTAAAAGATGCTGAAGATCAGTTGGGTGCACGAGTGGGTTACATCGAACTGGATCTCAACAGCGGTAAGATCCTTGAGAGTTTTCGCCCCGAAGAACGTTTTCCAATGATGAGCACTTTTAAAGTTCTGCTATGTGGCGCGGTATTATCCCGTATTGACGCCGGGCAAGAGCAACTCGGTCGCCGCATACACTATTCTCAGAATGACTTGGTTGAGTACTCACCAGTCACAGAAAAGCATCTTACGGATGGCATGACAGTAAGAGAATTATGCAGTGCTGCCATAACCATGAGTGATAACACTGCGGCCAACTTACTTCTGACAACGATCGGAGGACCGAAGGAGCTAACCGCTTTTTTGCACAACATGGGGGATCATGTAACTCGCCTTGATCGTTGGGAACCGGAGCTGAATGAAGCCATACCAAACGACGAGCGTGACACCACGATGCCTGTAGCAATGGCAACAACGTTGCGCAAACTATTAACTGGCGAACTACTTACTCTAGCTTCCCGGCAACAATTAATAGACTGGATGGAGGCGGATAAAGTTGCAGGACCACTTCTGCGCTCGGCCCTTCCGGCTGGCTGGTTTATTGCTGATAAATCTGGAGCCGGTGAGCGTGGGTCTCGCGGTATCATTGCAGCACTGGGGCCAGATGGTAAGCCCTCCCGTATCGTAGTTATCTACACGACGGGGAGTCAGGCAACTATGGATGAACGAAATAGACAGAT[...]I guessed that the restriction enzyme used is XhoI? I do not think it is mentioned in the paper.from Bio.Restriction import XhoI stuffer1, backbone, stuffer2 = rxn4.cut(XhoI) stuffer1, stuffer2 pUCMu = backbone.looped().synced("acgcgtcgcgaggccatatgggccgtt") pUCMu pUCMu.locus = "pUCmu_asm" pUCMu.write("pUCmu_assembled.gb")Grundlagen Neuronale Netze - einfache Operationenhttps://bootcamp.codecentric.aiIn diesem Notebook wollen wir das einfache Beispiel aus dem Video nachvollziehen. Wir:- definieren einen Input Tensor (aus einem 28x28 Pixel Bild von MNIST)- wir normalisieren die Werte des Bildes- wir definieren Matrizen mit Gewichten, die wir lernen wollen- wir kombinieren Matrix Multiplikationen und Aktivierungsfunktionen, um aus einem Input mit 784 Pixeln einen Output mit 10 Werten zu erhalten- wir definieren ein Label- (wir optimieren die Gewichte, damit sie zum Label passen - kleiner Vorausblick auf kommende Videos)Das folgende neuronale Netz ist keine besonders sinnvolle Architektur. Auch das Training mit nur einem Bild macht natürlich wenig Sinn. Es geht darum zu verstehen, welche Rechenoperationen "unter der Haube" eines neuronalen Netzes stattfinden. **Daher ist das ganze (hier) noch stark vereinfacht.**Hier noch einmal das Bild, was wir versuchen in Code nachzuvollziehen: ![simple nn](simple_nn.png) Zu diesem Notebook gibt es noch ein **begleitendes Video**, das die einzelnen Abschnitte erklärt.Du findest das Video hier: https://youtu.be/26fGbPYjSNY Beispiel mit PyTorchZunächst ein paar benötigte Imports:%matplotlib inline import torch import torchvision import math from torchvision import transforms from matplotlib import pyplotFolgender Tensor ist die interne Darstellung eines Bildes.Es ist ein 28x28 Pixel Matrix mit Zahlenwerten von 0-255. (0 = schwarz, 255 = weiss, dazwischen Graustufen)img = torch.tensor( [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 18, 18, 18, 126, 136, 175, 26, 166, 255, 247, 127, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 30, 36, 94, 154, 170, 253, 253, 253, 253, 253, 225, 172, 253, 242, 195, 64, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 49, 238, 253, 253, 253, 253, 253, 253, 253, 253, 251, 93, 82, 82, 56, 39, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 18, 219, 253, 253, 253, 253, 253, 198, 182, 247, 241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 80, 156, 107, 253, 253, 205, 11, 0, 43, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 1, 154, 253, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, 253, 190, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 190, 253, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 241, 225, 160, 108, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 81, 240, 253, 253, 119, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 186, 253, 253, 150, 27, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 93, 252, 253, 187, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 249, 253, 249, 64, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 130, 183, 253, 253, 207, 2, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 39, 148, 229, 253, 253, 253, 250, 182, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 114, 221, 253, 253, 253, 253, 201, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 23, 66, 213, 253, 253, 253, 253, 198, 81, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 18, 171, 219, 253, 253, 253, 253, 195, 80, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 55, 172, 226, 253, 253, 253, 253, 244, 133, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 136, 253, 253, 253, 212, 135, 132, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.float)So kann man sich die Dimensionen des Tensors anschauen:img.size()Und so sieht es aus, wenn man die Zahlen als Bild interpretiert (eine Zahl 5 aus dem MNIST Datensatz):pyplot.imshow(img, cmap="gray")Jetzt klopfen wir den Tensor flach. Aus einer 28 x 28 Matrix wird ein Vektor mit 784 "Input Pixeln". Das sind die gleichen Zahlenwerte - nur nicht mehr in 28 Reihen sondern alle in einer Reihe aneinander gehängt.input_tensor = img.flatten() input_tensor.size()Jetzt schauen wir uns mal den Wert an Stelle 180 an:input_tensor[180]Der Wert beträgt 170 ...... nun schauen wir an was der größte Wert in dem Vektor ist:max(input_tensor)Wie zu erwarten war, ist es 255 (weiß - sicher sind einige Pixel in dem Bild weiß - größere Zahlen kann es bei einem solchen Bild nicht geben).Jetzt machen wir eine einfache "Normalisierung" und teilen alle Werte des Vektors durch 255.Damit ändern wir den Zahlenbereich im Vektor von 0-255 auf 0-1. Mit diesem Schritt kann man Probleme beim Training verringern - vor allem bei tieferen neuronalen Netzen wird das sehr wichtig.normalized_input_tensor = input_tensor / 255Der Wert an der Stelle 180 (den wir vorher schon angesehen haben) ist jetzt 0.6667Die Zahlen stehen aber noch im gleichen Verhältnis 0,66 ist 2/3 von 1 sowie 170 2/3 von 255 ist.normalized_input_tensor[180] max(normalized_input_tensor)Wie zu erwarten ist die größte Zahl im Vektor jetzt 1Jetzt initialisieren wir unsere erste Weight Matrix mit Parametern, die gelernt werden können. Anders als im Video wählen wir nicht 784x3 sondern 784x20 - im Video wurde nur eine kleinere Zahl gewählt, damit es auf eine Folie passt und übersichtlicher aussieht. Die Zahlen sind zunächst (kleine) Zufallszahlen. (was requires_grad bedeutet überspringen wir an dieser Stelle - dazu kommen wir später)Wir teilen die Zufallszahlen durch die Wurzel aus der Größe der Input Schicht - wir tun dies, um die Weights auf einen "sinnvollen" Werte-Bereich zu initialisieren. Wenn wir das nicht tun, kann es sein, dass die Weights zu groß sind und unser neuronales Netz nicht lernt. Wenn man später als "Practitioner" libraries wie fast.ai verwendet muss man sich i.d.R. um solche Details nicht mehr sorgen - man sollte jedoch mal gesehen haben welche kleinen Änderungen wichtig sind und welche Auswirkungen sie haben.weights_tensor = torch.randn((784, 20)) / math.sqrt(784) weights_tensor.requires_grad_() weights_tensor.size()Jetzt berechnen wir wie im Video zuvor einige "Activations". Dazu machen wir eine Matrix-Multiplikation mit dem Input @ weightsfirst_activation = normalized_input_tensor @ weights_tensor first_activationDas ist das Ergebnis unserer ersten Matrix-Multiplikation.Anders als im Video Beispiel hat diese jetzt auch wieder eine Size von 20, da wir ja eine größere Weight Matrix gewählt haben.first_activation.size()Jetzt kommt die Aktivierungs-Funktion, um auch nichtlineare Zusammenhänge lernen zu können. Im Prinzip setzt diese alle negativen Activations aus dem vorigen Schritt auf 0.second_activation = first_activation.relu() second_activationJetzt initialisieren wir die zweite Weight Matrix (wie im Video Beispiel). Hier müssen wir jetzt auch wieder die Size von 3 auf 20 anpassen, damit die Matrix-Multiplikationen zusammen passen. Nach wie vor wollen wir aber eine Output Größe von 10 haben (in unserem Beispiel wollen wir ja Zahlen von 0-9) vorhersagen.more_weights_tensor = torch.randn((20, 10)) / math.sqrt(20) more_weights_tensor.requires_grad_()... eine weitere Matrix-Multiplikation ...output = second_activation @ more_weights_tensor output output.size()Und das ist jetzt erstmal unser Output. Der Vektor hat die richtige Dimension. Die Zahlen darin sind bis hierhin erstmal **völlig bedeutungslos**.Wir haben einmal das Modell mit Zufallszahlen durchgerechnet und geschaut, was am Ende raus kommt -> Zufallszahlen. Label definierenAn dieser Stelle definieren wir jetzt ein Label. Das Label ist das was wir vom neuronalen Netz erwarten. Wenn ich vorne ein Bild einer 5 rein gebe, dann soll folgendes herauskommen:label = torch.tensor([0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=torch.float)Diese verbreitete Form eines Labels nennt man auch "one-hot encoded vector". Es ist Vektor der verschiedene Klassen abbilden kann - der Eintrag an der Stelle 0 ist die Wahrscheinlichkeit für eine 0. Der Wert an der Stelle 5 ist die Wahrscheinlichkeit für eine 5 etc. (Es könnte aber auch etwas völlig anderes bedeuten - z.B. Stelle 0 = Katze, Stelle 3 = Hund etc.)Da wir ja ein Bild einer 5 betrachten, soll also das label[5] = 1 und alles andere 0 sein. Jetzt fügen wir dem Output noch eine weitere Aktivierungs-Funktion hinzu und haben unsere "prediction" - also unsere Vorhersage. Diese bringt die Zahlen Werte in einen Bereich von 0-1 (warum ist an dieser Stelle nicht relevant - wir tun es einfach :) )prediction = output.sigmoid()Hier jetzt die aktuelle Vorhersage des Modells (gerundet, dass man es besser lesen kann):prediction.round()Loss FunktionUnsere Loss Funktion soll uns den Fehler zwischen unserer prediction und dem label berechnen. Wir verwenden eine bestehende pytorch Funktion. (Warum genau diese, ist an dieser Stelle auch noch nicht relevant. Diese Loss Funktion eignet sich eben besonders gut für Klassifikations-Probleme.)loss_func = torch.nn.functional.binary_cross_entropyNun berechnen wir einmal beispielhaft den aktuellen Loss, also den Fehler oder den "Abstand" zwischen unserer prediction und dem label.loss_func(prediction, label)Das ist unser loss - was sagt uns das? Erstmal noch gar nichts (auch hier sind wir immer bei völlig aussagslosen Zufallszahlen.) Jetzt machen wir eine "manuelle Vorhersage". Wir definieren einfach eine prediction wie sie uns gefällt. Sind bei dieser prediction mehr Einsen und Nullen an der richtigen Stelle sollte der folgende Loss kleiner weren - ansonsten größer:manual_prediction = torch.tensor([0, 0, 0, 0, 0, 1, 0, 1, 1, 0], dtype=torch.float) loss_func(manual_prediction, label)Was passiert wenn alle Zahlen richtig vorhergesagt werden? Wenn unsere prediction gleich dem label ist?loss_func(label, label)... je ähnlicher Prediction und Label werden, desto mehr geht der Loss gegen 0. Wenn Prediction = Label ist, ist der Loss 0. Modell optimieren(Kleiner Vorausblick)Jetzt optimieren wir in ein paar Schritten unsere weight so, dass die prediction möglichst nah an das label heran kommt. Das ist das (vereinfachte) Prinzip, wie neuronale Netze lernen. Wir werden es in einem folgenden Video noch genauer betrachten.Daher gehe ich einfach die Schritte durch, ohne diese detailliert zu erklären.Zunächst berechnen wir den Loss als Tensor.loss = loss_func(prediction, label) lossWir fordern pytorch dazu auf eine "Backpropagation" zu machen und die Gradienten für die weight Matrizen zu ermitteln.loss.backward() more_weights_tensor.grad.dataModel definierenHier definieren wir einfach nochmal die gleichen Berechnungen wir zuvor - nur in einer Funktion, so dass wir sie in einer Schleife immer wieder aufrufen können.def model(x): return ( ((x @ weights_tensor).relu()) @ more_weights_tensor).sigmoid() model(normalized_input_tensor)Wir machen eine prediction mit dem aktuellen Modell für unseren input Tensor (das Bild der 5):new_pred = model(normalized_input_tensor) new_pred... ermitteln den loss (als den Fehler zwischen Vorhersage und label) und machen eine backpropagationloss = loss_func(new_pred, label) loss.backward()Jetzt verwenden wir die Gradienten, um die Weights ein kleines bisschen in die richtige Richtung zu optimieren:lr = 0.1 with torch.no_grad(): weights_tensor -= weights_tensor.grad * lr more_weights_tensor -= more_weights_tensor.grad * lr weights_tensor.grad.zero_() more_weights_tensor.grad.zero_() new_pred = model(normalized_input_tensor) loss_func(new_pred, label)... und wir sehen, dass unser Fehler tatsächlich etwas kleiner geworden ist. iterative OptimierungWenn wir diese einfachen Optimierungsschritte jetzt ganz oft aufrufen, dann werden die Gewichte immer mehr so angepasst, dass sich die prediction immer mehr dem label annähert (der loss kleiner wird):lr = 0.1 for i in range(1000): new_pred = model(normalized_input_tensor) loss = loss_func(new_pred, label) loss.backward() with torch.no_grad(): weights_tensor -= weights_tensor.grad * lr more_weights_tensor -= more_weights_tensor.grad * lr weights_tensor.grad.zero_() more_weights_tensor.grad.zero_() if (i % 100 == 0): print("Loss: ", loss_func(new_pred, label).item())Loss: 0.521828830242157 Loss: 0.0037695704959332943 Loss: 0.0015075983246788383 Loss: 0.0009094176930375397 Loss: 0.0006413256051018834 Loss: 0.0004911682335659862 Loss: 0.00039589041261933744 Loss: 0.0003304002166260034 Loss: 0.000282724155113101 Loss: 0.00024658729671500623Nach der Optimierung sieht unser Vorhersage nun so aus:model(normalized_input_tensor)Mit dieser Schreibweise ist auf einen ersten Blick erstmal nicht viel anzufangen. Schauen wir uns an welche Zahl am größten ist:model(normalized_input_tensor).argmax()Die Zahl an der Stelle 5 ist die Größte. Also die 5, die wir vorhersagen wollen.Runden wir die Zahlen auf und ab:model(normalized_input_tensor).round()Sublinear Search Implementation IMPORTS:import numpy as np from random import choiceSublinear Search Implementationdef sub_linear_searcher(inp_list, x): rand_list = np.array(inp_list) np.random.shuffle(rand_list) s = rand_list[:int(len(inp_list)**0.5)] presence = False p = s[0] q = s[0] min_p = p-x max_q = x-q for i in range(len(s)): if s[i]<=x and x-s[i]x and s[i]-xCreating A Listarr = [i for i in range(0,10000,3)]Results:iterations = 10000#int(input("Iterations\t")) list_outputs = [] for i in range(iterations): n = choice(arr) pre = sub_linear_searcher(arr, n) list_outputs.append(pre) print('sub_linear_searcher\t')#, list_outputs) print('Number of Trials:\t', len(list_outputs)) print('Number of Trues\t',list_outputs.count(True)) print('Numer of False\t',list_outputs.count(False))**This is to remove the previosu jar file downloaded during the wget command the jar file used for the postgres SQL**!rm "/content/postgresql-42.2.9.jar" !rm "/content/postgresql-42.2.9.jar.1" !rm "/content/postgresql-42.2.9.jar.2"rm: cannot remove '/content/postgresql-42.2.9.jar': No such file or directory rm: cannot remove '/content/postgresql-42.2.9.jar.1': No such file or directory rm: cannot remove '/content/postgresql-42.2.9.jar.2': No such file or directory**This is wget to get the jar file and download in content directory**!wget https://jdbc.postgresql.org/download/postgresql-42.2.9.jar--2021-02-15 18:47:09-- https://jdbc.postgresql.org/download/postgresql-42.2.9.jar Resolving jdbc.postgresql.org (jdbc.postgresql.org)... 172.16.31.10, fc00:e968:6179::de52:7100 Connecting to jdbc.postgresql.org (jdbc.postgresql.org)|172.16.31.10|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 914037 (893K) [application/java-archive] Saving to: ‘postgresql-42.2.9.jar’ postgresql-42.2.9.j 100%[===================>] 892.61K 4.12MB/s in 0.2s 2021-02-15 18:47:09 (4.12 MB/s) - ‘postgresql-42.2.9.jar’ saved [914037/914037]**Py Spark initializing and configuring the postgres jar file for startup**# Start Spark session #from pyspark.sql import SparkSession #spark = SparkSession.builder.appName("sparkDates").getOrCreate() from pyspark.sql import SparkSession spark = SparkSession.builder.appName("CloudETL").config("spark.driver.extraClassPath","/content/postgresql-42.2.9.jar").getOrCreate()**Loading the file from Amazon website as GZ format and uncompresses it for processing**# Load in data from pyspark import SparkFiles url ="https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Wireless_v1_00.tsv.gz" spark.sparkContext.addFile(url) df = spark.read.csv(SparkFiles.get("amazon_reviews_us_Wireless_v1_00.tsv.gz"), sep="\t", header=True, inferSchema=True, timestampFormat="yyyy/MM/dd HH:mm:ss") df.show() df = df.dropna() df.show()+-----------+-----------+--------------+----------+--------------+--------------------+----------------+-----------+-------------+-----------+----+-----------------+--------------------+--------------------+-----------+ |marketplace|customer_id| review_id|product_id|product_parent| product_title|product_category|star_rating|helpful_votes|total_votes|vine|verified_purchase| review_headline| review_body|review_date| +-----------+-----------+--------------+----------+--------------+--------------------+----------------+-----------+-------------+-----------+----+-----------------+--------------------+--------------------+-----------+ | US| 16414143|R3W4P9UBGNGH1U|B00YL0EKWE| 852431543|LG G4 Case Hard T...| Wireless| 2| 1| 3| N| Y|Looks good, funct...|2 issues - Once...| 2015-08-31| | US| 50800750|R15V54KBMTQWAY|B00XK95RPQ| 516894650|Selfie Stick Fibl...| Wireless| 4| [...]**The Review table updated, data extracted from main data frame for review id, customer id and will be used to update the DB table**reviews_info = df.select(["review_id", "customer_id", "product_id", "product_parent", "review_date"]).limit(100) reviews_info.show(5)+--------------+-----------+----------+--------------+-----------+ | review_id|customer_id|product_id|product_parent|review_date| +--------------+-----------+----------+--------------+-----------+ |R3W4P9UBGNGH1U| 16414143|B00YL0EKWE| 852431543| 2015-08-31| |R15V54KBMTQWAY| 50800750|B00XK95RPQ| 516894650| 2015-08-31| | RY8I449HNXSVF| 15184378|B00SXRXUKO| 984297154| 2015-08-31| |R18TLJYCKJFLSR| 10203548|B009V5X1CE| 279912704| 2015-08-31| |R1NK26SWS53B8Q| 488280|B00D93OVF0| 662791300| 2015-08-31| +--------------+-----------+----------+--------------+-----------+ only showing top 5 rows**The DB initialization, this is Heroku Postgres DB instance, DB created for BigData, there are limitations 10000 records per table**mode="append" #rds_connection_string = "postgres:admin@localhost:5432/final_project_stocks" #jdbc_url = "jdbc:postgresql://:5432/my_data_class_db" jdbc_url = "jdbc:postgresql://ec2-52-3-4-232.compute-1.amazonaws.com:5432/ddv6vu8jpdbjns" config = {"user":"slcslzlanikhqj", "password": "", "driver":"org.postgresql.Driver"}**The Review data updated on the main review_id_table in Heroku DB instance**reviews_info.write.jdbc(url=jdbc_url, table='review_id_table', mode=mode, properties=config)T**he Product data from the main dataframe is extracted and limited to 1000 to udpate the product table**product_info = df.select(["product_id", "product_title"]).limit(1000).distinct() product_info.show(5)+----------+--------------------+ |product_id| product_title| +----------+--------------------+ |B00YL0EKWE|LG G4 Case Hard T...| |B00XK95RPQ|Selfie Stick Fibl...| |B00SXRXUKO|Tribe AB40 Water ...| |B009V5X1CE|RAVPower® Element...| |B00D93OVF0|Fosmon Micro USB ...| +----------+--------------------+ only showing top 5 rows**The product data updated in products table in Heroku**product_info.write.jdbc(url=jdbc_url, table='products', mode=mode, properties=config)**The Customer data and its count based on product they chosen and grouped by to get the number of products customer reviewed**#date_df = df.groupBy('date').agg({"date": "count"}) customer_info = df.groupBy(["customer_id"]).agg({"customer_id": "count"}).limit(1000) customer_info.show(5) customer_info.write.jdbc(url=jdbc_url, table='customers', mode=mode, properties=config)**Hedetniemi Matrix Sum**This code is used to implement the [Hedetniemi Matrix Sum](https://deepblue.lib.umich.edu/handle/2027.42/59763). (lin.3326 at osu.edu) Created: 5/6/2020from google.colab import drive drive.mount('/content/gdrive')Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly Enter your authorization code: ·········· Mounted at /content/gdrive**Install packages**!pip install timeout-decoratorCollecting timeout-decorator Downloading https://files.pythonhosted.org/packages/07/1c/0d9adcb848f1690f3253dcb1c1557b6cf229a93e724977cb83f266cbd0ae/timeout-decorator-0.4.1.tar.gz Building wheels for collected packages: timeout-decorator Building wheel for timeout-decorator (setup.py) ... [?25l[?25hdone Created wheel for timeout-decorator: filename=timeout_decorator-0.4.1-cp36-none-any.whl size=5021 sha256=3b5557809410ee8e435697ae7b9c36878cae1b187e936e900a3cf23ca7ffe621 Stored in directory: /root/.cache/pip/wheels/f1/e6/ea/7387e3629cb46ba65140141f972745b823f4486c6fe884ccb8 Successfully built timeout-decorator Installing collected packages: timeout-decorator Successfully installed timeout-decorator-0.4.1**Generate graph data** Data from the original article## [node i, node j, distance between node i and j] ## using data from example 1: San Francisco Bay Area Graph of Time-Distances (in minutes) data = [[1, 2, 30], [1, 4, 30], [1, 9, 40], [2, 3, 25], [2, 4, 40], [3, 4, 50], [4, 5, 30], [4, 6, 20], [5, 7, 25], [6, 7, 20], [6, 9, 20], [7, 8, 25], [8, 9, 20]] nodes = 9Create random graph%cd '/content/gdrive/My Drive/Colab Notebooks/hedetniemi_matrix_sum' import networkx as nx import random ## Number of nodes (100/1,000/10,000/100,000/1,000,000) nodes = 1000 print('Nodes: ', nodes) ## Total degree degree = 3 print('Degree: ', degree) G = nx.random_regular_graph(degree,nodes) for (u, v) in G.edges(): G.edges[u,v]['weight'] = random.uniform(1,100) nx.draw(G) nx.write_weighted_edgelist(G, 'graph_n' + str(nodes) + '_d' + str(degree) + '.txt')/content/gdrive/My Drive/Colab Notebooks/hedetniemi_matrix_sum Nodes: 1000 Degree: 3Read random graph%cd '/content/gdrive/My Drive/Colab Notebooks/hedetniemi_matrix_sum' ## Number of nodes (100/1,000/10,000/100,000/1,000,000) nodes = 100 print('Nodes: ', nodes) ## Total degree degree = 3 print('Degree: ', degree) data = [] with open('graph_n' + str(nodes) + '_d' + str(degree) + '.txt', 'r') as f: lines = f.read().splitlines() for line in lines: l = line.split() item = [int(l[0]), int(l[1]), float(l[2])] data.append(item) print(data[0])/content/gdrive/My Drive/Colab Notebooks/hedetniemi_matrix_sum Nodes: 100 Degree: 3 [77, 86, 89.39726376738572]**Implementation 1: list** Construct distance matrixfrom timeit import default_timer import timeout_decorator @timeout_decorator.timeout(10) def distance_matrix(graph, n): ## calculate distance matrix INF = float('inf') dist_mtx = [[INF] * n for i in range(n)] for g in graph: i = g[0] - 1 j = g[1] - 1 d = g[2] dist_mtx[i][j] = d dist_mtx[j][i] = d ## set diagonal to 0 for i in range(n): dist_mtx[i][i] = 0.0 return dist_mtx ## print time costs try: start = default_timer() dist_mtx = distance_matrix(data, nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') raiseTime: 0.028098457999988113Calculate Hedetniemi Matrix Sumfrom timeit import default_timer import timeout_decorator import copy @timeout_decorator.timeout(1000) def hede_distance(matrix, n): INF = float('inf') mtx_a_t = [[INF] * n for i in range(n)] mtx_a_t_1 = copy.deepcopy(matrix) for p in range(n): for i in range(n): a = mtx_a_t_1[i] for j in range(n): b = [row[j] for row in matrix] mtx_a_t[i][j] = min([a[k] + b[k] for k in range(n)]) if mtx_a_t == mtx_a_t_1: break else: mtx_a_t_1 = copy.deepcopy(mtx_a_t) return mtx_a_t ## print time costs try: start = default_timer() mtx_a_t = hede_distance(dist_mtx, nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') raise ## print shortest path matrix with open('hedet_mtx_list.txt', 'w') as fw: fw.write('\n'.join(['\t'.join([str(round(cell,2)) for cell in row]) for row in mtx_a_t]))Time: 1.758620876000009**Implementation 2: numpy** Construct distance matrixfrom timeit import default_timer import numpy as np import timeout_decorator @timeout_decorator.timeout(10) def distance_matrix(graph, n): ## calculate distance matrix dist_mtx = np.full((n,n), np.inf) for g in graph: i = int(g[0]) - 1 j = int(g[1]) - 1 d = g[2] dist_mtx[i,j] = d dist_mtx[j,i] = d ## set diagonal to 0 np.fill_diagonal(dist_mtx, 0) return dist_mtx ## print time costs try: start = default_timer() dist_mtx = distance_matrix(np.array(data), nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') ## print distance matrix # print("Distance matrix: ") # for line in dist_mtx: # print(line)Time: 0.0005016160000081982Calculate Hedetniemi Matrix Sumfrom timeit import default_timer import numpy as np import timeout_decorator @timeout_decorator.timeout(100) def hede_distance(matrix, n): mtx_a_t = np.full((n,n), np.inf) mtx_a_t_1 = matrix.copy() for p in range(n): for i in range(n): a = mtx_a_t_1[i] for j in range(n): b = matrix[:,j] mtx_a_t[i,j] = np.amin([a[k] + b[k] for k in range(n)]) if np.array_equal(mtx_a_t, mtx_a_t_1): break else: mtx_a_t_1 = mtx_a_t.copy() return mtx_a_t ## print time costs try: start = default_timer() mtx_a_t = hede_distance(dist_mtx, nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') ## print shortest path matrix with open('hedet_mtx_np.txt', 'w') as fw: fw.write('\n'.join(['\t'.join([str(round(cell,2)) for cell in row]) for row in mtx_a_t.tolist()]))Time: 7.878349632000038**Implementation 3: numba (njit)** Construct distance matrix!pip show numba from timeit import default_timer import numpy as np import numba import timeout_decorator @timeout_decorator.timeout(10) @numba.njit def distance_matrix(graph, n): ## calculate distance matrix dist_mtx = np.full((n,n), np.inf) for g in numba.prange(graph.shape[0]): i = int(graph[g,0]) - 1 j = int(graph[g,1]) - 1 d = graph[g,2] dist_mtx[i,j] = d dist_mtx[j,i] = d ## set diagonal to 0 np.fill_diagonal(dist_mtx, 0) return dist_mtx ## print time costs try: start = default_timer() dist_mtx = distance_matrix(np.array(data), nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') ## print distance matrix # print("Distance matrix: ") # for line in dist_mtx: # print(line)Time: 1.4319228440000131Calculate Hedetniemi Matrix Sumfrom timeit import default_timer import numpy as np import numba import timeout_decorator @timeout_decorator.timeout(100) @numba.njit def hede_distance(matrix, n): mtx_a_t = np.full((n,n), np.inf) mtx_a_t_1 = matrix.copy() for p in range(n): for i in numba.prange(n): a = mtx_a_t_1[i] for j in numba.prange(n): b = matrix[:,j] mtx_a_t[i,j] = np.amin(np.array([a[k] + b[k] for k in range(n)])) if np.array_equal(mtx_a_t, mtx_a_t_1): break else: mtx_a_t_1 = mtx_a_t.copy() return mtx_a_t ## print time costs try: start = default_timer() mtx_a_t = hede_distance(dist_mtx, nodes) stop = default_timer() print('Time: ', stop - start) except: print('Time: inf') ## print shortest path matrix with open('hedet_mtx_nb.txt', 'w') as fw: fw.write('\n'.join(['\t'.join([str(round(cell,2)) for cell in row]) for row in mtx_a_t.tolist()]))Time: 0.44318997999999965**Implementation 4: tensorflow**!pip install tensorflow import tensorflow as tf print(tf.version.VERSION) print(tf.config.list_physical_devices('GPU')) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))2.2.0 [PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')] Num GPUs Available: 1Construct distance matrix (numpy)from timeit import default_timer import numpy as np def distance_matrix(graph): ## calculate number of nodes n = int(np.amax(graph[:,1])) ## calculate distance matrix dist_mtx = np.full((n,n), np.inf) for g in graph: i = int(g[0]) - 1 j = int(g[1]) - 1 d = g[2] dist_mtx[i,j] = d dist_mtx[j,i] = d ## set diagonal to 0 np.fill_diagonal(dist_mtx, 0) dist_mtx = tf.convert_to_tensor(dist_mtx, dtype=tf.float32) return dist_mtx, n ## print time costs start = default_timer() dist_mtx, n = distance_matrix(np.array(data)) stop = default_timer() print('Time: ', stop - start) ## print distance matrix # print("Distance matrix: ") # for line in dist_mtx: # print(line)Time: 0.011678949000042849Calculate Hedetniemi Matrix Sumfrom timeit import default_timer import tensorflow as tf import numpy as np def hede_distance(matrix, n): mtx_a_t_1 = matrix p = True while p: for i in tf.range(n): a = mtx_a_t_1[i] for j in tf.range(n): b = matrix[:,j] c = tf.math.reduce_min(tf.math.add(a, b)) c = tf.fill([1], value=c) if tf.math.equal(j, 0): r = c else: r = tf.concat([r, c], 0) r = tf.expand_dims(r, 0) if tf.math.equal(i, 0): mtx_a_t = r else: mtx_a_t = tf.concat([mtx_a_t, r], 0) if tf.reduce_all(tf.math.equal(mtx_a_t_1, mtx_a_t)): p = False else: mtx_a_t_1 = mtx_a_t return mtx_a_t ## print time costs (using gpu) start = default_timer() with tf.device('/device:GPU:0'): mtx_a_t = hede_distance(dist_mtx, n) stop = default_timer() print('Time using GPU: ', stop - start) ## print time costs (using cpu) start = default_timer() with tf.device('/cpu:0'): mtx_a_t = hede_distance(dist_mtx, n) stop = default_timer() print('Time using CPU: ', stop - start) ## print shortest path matrix # print("Shortest path matrix: ") # print(mtx_a_t)**Compare results**!diff 'hedet_mtx_list.txt' 'hedet_mtx_nb.txt' !diff 'hedet_mtx_nb.txt' 'hedet_mtx_np.txt' !diff 'hedet_mtx_list.txt' 'hedet_mtx_np.txt' !diff 'hedet_mtx_list.txt' 'floyd_mtx_list.txt'Summary Resultsfrom utils.all_imports import *; %matplotlib inline # Set seed for notebook repeatability np.random.seed(0) # READ INPUT DATASET # =========================================================================== # dataset_path, dataset_name, column_names, TARGET_COL = get_dataset_location() estimators_list, estimators_names = get_estimators() dataset, feature_vs_values = load_brdiges_dataset(dataset_path, dataset_name) columns_2_avoid = ['ERECTED', 'LENGTH', 'LOCATION'] # Make distinction between Target Variable and Predictors # --------------------------------------------------------------------------- # rescaledX, y, columns = prepare_data_for_train(dataset, target_col=TARGET_COL) # Parameters to be tested for Cross-Validation Approach # ----------------------------------------------------- param_grids = [] parmas_logreg = { 'penalty': ('l1', 'l2', 'elastic', None), 'solver': ('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'), 'fit_intercept': (True, False), 'tol': (1e-4, 1e-3, 1e-2), 'class_weight': (None, 'balanced'), 'C': (10.0, 1.0, .1, .01, .001, .0001), # 'random_state': (0,), }; param_grids.append(parmas_logreg) parmas_knn_clf = { 'n_neighbors': (2,3,4,5,6,7,8,9,10), 'weights': ('uniform', 'distance'), 'metric': ('euclidean', 'minkowski', 'manhattan'), 'leaf_size': (5, 10, 15, 30), 'algorithm': ('ball_tree', 'kd_tree', 'brute'), }; param_grids.append(parmas_knn_clf) params_sgd_clf = { 'loss': ('log', 'modified_huber'), # ('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron') 'penalty': ('l2', 'l1', 'elasticnet'), 'alpha': (1e-1, 1e-2, 1e-3, 1e-4), 'max_iter': (50, 100, 150, 200, 500, 1000, 1500, 2000, 2500), 'class_weight': (None, 'balanced'), 'learning_rate': ('optimal',), 'tol': (None, 1e-2, 1e-4, 1e-5, 1e-6), # 'random_state': (0,), }; param_grids.append(params_sgd_clf) kernel_type = 'svm-rbf-kernel' params_svm_clf = { # 'gamma': (1e-7, 1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5, 1e+7), 'gamma': (1e-5, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5), 'max_iter':(1e+2, 1e+3, 2 * 1e+3, 5 * 1e+3, 1e+4, 1.5 * 1e+3), 'degree': (1,2,4,8), 'coef0': (.001, .01, .1, 0.0, 1.0, 10.0), 'shrinking': (True, False), 'kernel': ['linear', 'poly', 'rbf', 'sigmoid',], 'class_weight': (None, 'balanced'), 'C': (1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3), 'probability': (True,), }; param_grids.append(params_svm_clf) parmas_tree = { 'splitter': ('random', 'best'), 'criterion':('gini', 'entropy'), 'max_features': (None, 'sqrt', 'log2'), 'max_depth': (None, 3, 5, 7, 10,), 'splitter': ('best', 'random',), 'class_weight': (None, 'balanced'), }; param_grids.append(parmas_tree) parmas_random_forest = { 'n_estimators': (3, 5, 7, 10, 30, 50, 70, 100, 150, 200), 'criterion':('gini', 'entropy'), 'bootstrap': (True, False), 'min_samples_leaf': (1,2,3,4,5), 'max_features': (None, 'sqrt', 'log2'), 'max_depth': (None, 3, 5, 7, 10,), 'class_weight': (None, 'balanced', 'balanced_subsample'), }; param_grids.append(parmas_random_forest) # Some variables to perform different tasks # ----------------------------------------------------- N_CV, N_KERNEL, N_GS = 9, 5, 6; nrows = N_KERNEL // 2 if N_KERNEL % 2 == 0 else N_KERNEL // 2 + 1; ncols = 2; grid_size = [nrows, ncols] %%javascript IPython.OutputArea.prototype._should_scroll = function(lines) { return false; }Summary Tables about Analyses done by means of different number of included Pricipal Components# df_9_, df_12_ = reshape_dfs_acc([df_9, df_12], num_col=N_KERNEL, n_cp_list=[9, 11]) # res = create_widget_list_df_vertical([df_9_, df_9_auc]); display.display(res) # res = create_widget_list_df_vertical([df_12_, df_12_auc]); display.display(res)Summary TestHere, in the following section I'm going to emulate a test in which I will test the different possible kinds of kernel trick, in other sense techniques, available for a Principal Component Analysis, shortly PCA, unsupervised statistical learning technique in order to remap the original features into a new N-dimensional reference system by means of the kernel approach adopted during the computation.Once the new N-dimensional feature space is available and ready, I will experiment a bounch of selected machine learning methods and procedures applied directly on the first two most informative principal components, that is, also referred to as PCA1 and PCA2, respectively, in order to display a sequence of decision boundaries and contours retrieved after having runned each method on the selected dataset, which has been divided into halves, ofd the same size, and with the same proportion of the two classes of the target variable.What follows is the related code, to the desciption given just above, and the results are also available through several rows of images that represent the contour and decision boundaries obtained thank to the several combinations of PCA's kernel trick and machine learning method for fitting a classifier:kernel_pca = ['linear', 'poly', 'rbf', 'cosine', 'sigmoid'] # linear, poly, rbf, sigmoid, cosine, precomputed scaler_techniques = ['StandardScaler', 'Normalize', 'MinMaxScaler'] X = rescaledX # Trying only StandardScaler approach err_list = classifier_comparison_by_pca_kernels(X, y, start_clf=0, stop_clf=10, scaler_technique=scaler_techniques[0], straitified_flag=True, kernels_pca_list=kernel_pca[:], figsize=(27, 9), by_pairs=False, singles=False, verbose=0, record_errors=True, avoid_func=False,)Wigner functionWigner distribution function gives the phase space distribution of a function. The definition is as follows, as per Advances in Optics and Photonics 3, 272–365 (2011) : $W_{f}(p,q) = \left(\dfrac{|K|}{2\pi}\right)^{N}\int f^{*}\left(q-\dfrac{q^{'}}{2}\right) f\left(q+\dfrac{q^{'}}{2}\right)exp(-iKq^{'}.p)d^{N}q^{'}$Direct integration is too slow as shown below.import numpy as np import matplotlib.pyplot as plt from scipy.signal import gaussian import quadpy from tqdm import tqdm from numba import jit,prange N = 500 f = np.zeros(N) f[int(N/2)-int(N/5):int(N/2)+int(N/5)]=1 #f = gaussian(N,10) x = np.linspace(-1,1,N) plt.plot(x,f) plt.ylabel('f') plt.xlabel('x') plt.show() scale_factor = 3 #Scale domain by this much domain_real = np.linspace(-scale_factor,scale_factor,scale_factor*N) domain_freq = np.fft.fftshift(np.fft.fftfreq(scale_factor*N,domain_real[1]-domain_real[0])) @jit def f1(f,f_,y,domain): i = int((y-domain[0])/(domain[1]-domain[0])) f_[:] = 0 N = len(f) f_[i-int(N/2):i+int(N/2)] = f return f_ z1 = np.zeros(scale_factor*N) z2 = np.zeros(scale_factor*N) q1 = -2 z1 = f1(f,z1,q1/2,domain_real) z2 = f1(f,z2,-q1/2,domain_real) fig,ax1 = plt.subplots(1,1) ax1.plot(domain_real,z1,'b') ax1.tick_params('y', colors='b') ax1.set_ylabel('z1') ax2 = ax1.twinx() ax2.plot(domain_real,z2,'g') ax2.tick_params('y', colors='g') ax2.set_ylabel('z2') ax1.set_xlabel('domain') plt.show() W = np.zeros((len(domain_real),len(domain_freq)),dtype=np.complex128) @jit def fill_W(domain_real,domain_freq,f,W,N,scale_factor): for q1 in np.linspace(-2,2,250): z1 = np.zeros(scale_factor*N) z2 = np.zeros(scale_factor*N) z1 = f1(f,z1,q1/2,domain_real) z2 = f1(f,z2,-q1/2,domain_real) for j in prange(scale_factor*N): for i in prange(scale_factor*N): p = domain_real[i] q = domain_freq[j] phase_factor = np.exp(-1j*q1*q) W[i][j] += z1[i]*z2[i]*phase_factor fill_W(domain_real,domain_freq,f,W,N,scale_factor) plt.rcParams["figure.figsize"] = (8,8) plt.imshow(np.abs(W)) plt.colorbar() plt.show() F = np.abs(np.fft.fftshift(np.fft.fft(f))) F = F/(np.max(np.abs(F))) scale_factor = 3 #Scale domain by this much domain_real = np.linspace(domain_freq[0],domain_freq[-1],scale_factor*N) domain_freq = np.fft.fftshift(np.fft.fftfreq(scale_factor*N,domain_real[1]-domain_real[0])) W = np.zeros((len(domain_freq),len(domain_real)),dtype=np.complex128) fill_W(domain_real,domain_freq,F,W,N,scale_factor) plt.rcParams["figure.figsize"] = (8,8) plt.imshow(np.abs(W)) plt.colorbar() plt.show()Tutorial for using the package `fast-ml` This package is as good as having a junior Data Scientist working for you. Most of the commonly used EDA steps, Missing Data Imputation techniques, Feature Engineering steps are covered in a ready to use format Part 5. Feature Engineering for Categorical Variables / Categorical Encodings 1. Import eda module from the package `from fast_ml.feature_engineering import FeatureEngineering_Categorical` 2. Define the imputer object. * For Categorical variables use `FeatureEngineering_Categorical`* For Numerical variables use `FeatureEngineering_Numerical``cat_imputer = FeatureEngineering_Categorical(method = 'label')` 3. Fit the object on your dataframe and provide a list of variables`cat_imputer.fit(train, variables = ['BsmtQual'])` 4. Apply the transform method on train / test dataset`train = cat_imputer.transform(train)`&`test = cat_imputer.transform(test)` 5. parameter dictionary gets created which store the values used for encoding. It can be viewed as`cat_imputer.param_dict_` Available Methods for Categorical Encoding1. One-hot Encoding1. Label Encoding / Integer Encoding1. Count Encoding1. Frequeny Encoding1. Ordered Label EncodingTarget Based Encoding6. Target Ordered Encoding7. Target Mean Value Encoding8. Target Probability Ratio Encoding (only Classification model)9. Weight of Evidence (WOE) Encoding (only Classification model) Start Feature Engineering for Categorical Variables# Import Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from fast_ml.feature_engineering import FeatureEngineering_Categorical %matplotlib inline import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../data/house_prices.csv') df.shape df.head(5) numeric_type = ['float64', 'int64'] category_type = ['object']Categorical Variables 1. BsmtQual#Before Imputation df['BsmtQual'].value_counts() cat_imputer1 = FeatureEngineering_Categorical(method = 'label') cat_imputer1.fit(df, variables = ['BsmtQual']) cat_imputer1.param_dict_ df = cat_imputer1.transform(df) #After Imputation df['BsmtQual'].value_counts()2. FireplaceQu#Before Imputation df['FireplaceQu'].value_counts() cat_imputer2 = FeatureEngineering_Categorical(method = 'freq') cat_imputer2.fit(df, variables = ['FireplaceQu']) print (cat_imputer2.param_dict_) df = cat_imputer2.transform(df) #After Imputation df['FireplaceQu'].value_counts()Flowing youtube chanel : https://www.youtube.com/watch?v=CoYIwoeQxMY&list=PLot-YkcC7wZ_2sxmRTZr2c121rjcaleqv# INSTALLING SPARK !apt-get install openjdk-8-jdk-headless -qq > /dev/null !wget -q https://downloads.apache.org/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz !tar xf spark-2.4.5-bin-hadoop2.7.tgz !pip install -q findspark #DEFINING SYSTEM VARIABLES import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-2.4.5-bin-hadoop2.7" import findspark findspark.init() from pyspark.sql import SparkSession spark = SparkSession.builder.master("local[*]").getOrCreate()**EXAMPLE 1: WORD COUNT**rdd = spark.sparkContext.textFile("/content/drive/My Drive/BIG DATA/pyspark/Tutorial-2/datasets/word_count.txt") # NOT COMPLETE YET words = rdd.map(lambda x: x.split(" ")) words = words.count()**EXAMPLE 2: Airplans USA Problems**rdd = spark.sparkContext.textFile("/content/drive/My Drive/BIG DATA/pyspark/Tutorial-2/datasets/airports.text") result = rdd.map(lambda line: line.split(",")) \ .filter(lambda line: line[3] == "\"United States\"") \ .map(lambda line: "{},{}".format(line[1], line[2])) \ .collect() result**EXAMPLE 3 : : Aiplans , LATITUTE > 40**import re class Utils(): COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''') result = rdd.map(lambda line: Utils.COMMA_DELIMITER.split(line)) \ .filter(lambda line : float(line[6]) > 40) \ .map(lambda line : "{},{}".format(line[1], line[6])) \ .collect() result**EXAMPLE 4 : (1) Union Operation **firstDataset = '/content/drive/My Drive/BIG DATA/pyspark/Tutorial-2/datasets/union/nasa_19950701.tsv.txt' secondDataSet = '/content/drive/My Drive/BIG DATA/pyspark/Tutorial-2/datasets/union/nasa_19950801.tsv.txt' firstRdd = spark.sparkContext.textFile(firstDataset) secondRdd = spark.sparkContext.textFile(secondDataSet) fullRdd = firstRdd.union(secondRdd) filtredRdd = fullRdd.filter(lambda line : not (line.startsWith("host") and "bytes")) sampleRdd = filtredRdd.sample(withReplacement = True, fraction = 0.1) sampleRdd**EXAMPLE 4 : (2) Intersection Operation **#Not working fullRdd = firstRdd.intersection(secondRdd) filtredRdd = fullRdd.filter(lambda line : not (line.startsWith("host") and "bytes")) filtredRdd**EXAMPLE 5 : Reduce **primeDataset = '/content/drive/My Drive/BIG DATA/pyspark/Tutorial-2/datasets/prime_nums.text' primeRdd = spark.sparkContext.textFile(primeDataset) prime_sum = primeRdd.flatMap(lambda line : line.split("\t")) \ .filter(lambda number: number) \ .map(lambda number: int(number)) \ .reduce(lambda x, y: x + y) prime_sumDefine class and matching indexlabel_list = ['3m', 'andes', 'cocacola', 'crayola', 'folgers','heineken','hunts','kellogg','kleenex',\ 'kotex','libava','macadamia','milo','mm','pocky','raisins','stax','swissmiss','vanish','viva'] LABEL = dict(zip(label_list, range(len(label_list)))) DATA_ROOT = "/media/arg_ws3/5E703E3A703E18EB/data/mm_FCN/real/" IMG_ROOT = osp.join(DATA_ROOT, "image") MASK_ROOT = osp.join(DATA_ROOT, "mask") img_list = os.listdir(IMG_ROOT) mask_list = os.listdir(MASK_ROOT) GT_ROOT = osp.join(DATA_ROOT, "groundtruths_mmfcn") DT_ROOT = osp.join(DATA_ROOT, "detections_real") if not osp.exists(GT_ROOT): os.makedirs(GT_ROOT) if not osp.exists(DT_ROOT): os.makedirs(DT_ROOT)Create ground truth file for mmfcn real datafor data in mask_list: mask = cv2.imread(osp.join(MASK_ROOT, data), cv2.IMREAD_GRAYSCALE) name = data.split('.')[0] x, y, w, h = cv2.boundingRect(mask) if (x, y, w, h) != (0, 0, 0, 0): label = int(mask[mask!=0][0])-1 cls = list(LABEL.keys())[list(LABEL.values()).index(label)] # f = open(osp.join(GT_ROOT, name + ".txt"), "w+") ss = cls + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h) f.write(ss) f.close() else: # if there is no any object inside f = open(osp.join(GT_ROOT, name + ".txt"), "w+") f.close()SSD detectionnet = build_ssd('test', 300, 21) # initialize SSD, +1 for background net.load_weights('/media/arg_ws3/5E703E3A703E18EB/research/mm_fcn/ssd/real/real_21.pth')Loading weights into state dict... Finished!Build SSD300 Networkfor img in img_list: image = cv2.imread(osp.join(IMG_ROOT, img)) x = cv2.resize(image, (300, 300)).astype(np.float32) x -= (104.0, 117.0, 123.0) x = x.astype(np.float32) x = x[:, :, ::-1].copy() x = torch.from_numpy(x).permute(2, 0, 1) xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable if torch.cuda.is_available(): xx = xx.cuda() y = net(xx) scale = torch.Tensor(image.shape[1::-1]).repeat(2) detections = y.data # torch.Size([1, 4, 200, 5]) --> [batch?, class, object, coordinates] objs = [] for i in range(detections.size(1)): # detections.size(1) --> class size for j in range(5): # each class choose top 5 predictions if detections[0, i, j, 0].cpu().numpy() > 0.1: score = detections[0, i, j, 0] pt = (detections[0, i, j,1:]*scale).cpu().numpy() objs.append([int(pt[0]), int(pt[1]), int(pt[2]-pt[0]+1), int(pt[3]-pt[1]+1), \ list(LABEL.keys())[list(LABEL.values()).index(i-1)], score.item()]) #print(objs) f = open(osp.join(DT_ROOT, img.split('.')[0] + ".txt"), "w+") for obj in objs: ss = obj[4] + " " + str(obj[5])[1:] + " " + str(obj[0]) + " " + str(obj[1]) +\ " " + str(obj[2]) + " " + str(obj[3]) + "\n" f.write(ss) f.close()Exercise 8 - 201924178 - 201527819- 202020148 - 201113765 - 202022939 Car Price PredictionPredict if the price of a car is low or high%matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv('../datasets/dataTrain_carListings.zip') data = data.loc[data['Model'].str.contains('Camry')].drop(['Make', 'State'], axis=1) data = data.join(pd.get_dummies(data['Model'], prefix='M')) data['HighPrice'] = (data['Price'] > data['Price'].mean()).astype(int) data = data.drop(['Model', 'Price'], axis=1) data.head() data.shape y = data['HighPrice'] X = data.drop(['HighPrice'], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)Exercise 8.1Estimate a Decision Tree Classifier Manually using the code created in the decision trees notebook.Evaluate the accuracy on the testing setdef gini(y): if y.shape[0] == 0: return 0 else: return 1 - (y.mean()**2 + (1 - y.mean())**2) def gini_impurity(X_col, y, split): "Calculate the gain of an split k on feature j" filter_l = X_col < split y_l = y.loc[filter_l] y_r = y.loc[~filter_l] n_l = y_l.shape[0] n_r = y_r.shape[0] gini_y = gini(y) gini_l = gini(y_l) gini_r = gini(y_r) gini_impurity_ = gini_y - (n_l / (n_l + n_r) * gini_l + n_r / (n_l + n_r) * gini_r) return gini_impurity_ def best_split(X, y, num_pct=10): features = range(X.shape[1]) best_split = [0, 0, 0] # j, split, gain # For all features for j in features: splits = np.percentile(X.iloc[:, j], np.arange(0, 100, 100.0 / (num_pct+1)).tolist()) splits = np.unique(splits)[1:] # For all splits for split in splits: gain = gini_impurity(X.iloc[:, j], y, split) if gain > best_split[2]: best_split = [j, split, gain] return best_split def tree_grow(X, y, level=0, min_gain=0.001, max_depth=None, num_pct=10): # If only one observation if X.shape[0] == 1: tree = dict(y_pred=y.iloc[:1].values[0], y_prob=0.5, level=level, split=-1, n_samples=1, gain=0) return tree # Calculate the best split j, split, gain = best_split(X, y, num_pct) # save tree and estimate prediction y_pred = int(y.mean() >= 0.5) y_prob = (y.sum() + 1.0) / (y.shape[0] + 2.0) # Laplace correction tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=X.shape[0], gain=gain) # Check stooping criteria if gain < min_gain: return tree if max_depth is not None: if level >= max_depth: return tree # No stooping criteria was meet, then continue to create the partition filter_l = X.iloc[:, j] < split X_l, y_l = X.loc[filter_l], y.loc[filter_l] X_r, y_r = X.loc[~filter_l], y.loc[~filter_l] tree['split'] = [j, split] # Next iteration to each split tree['sl'] = tree_grow(X_l, y_l, level + 1, min_gain=min_gain, max_depth=max_depth, num_pct=num_pct) tree['sr'] = tree_grow(X_r, y_r, level + 1, min_gain=min_gain, max_depth=max_depth, num_pct=num_pct) return tree def tree_predict(X, tree, proba=False): predicted = np.ones(X.shape[0]) # Check if final node if tree['split'] == -1: if not proba: predicted = predicted * tree['y_pred'] else: predicted = predicted * tree['y_prob'] else: j, split = tree['split'] filter_l = (X.iloc[:, j] < split) X_l = X.loc[filter_l] X_r = X.loc[~filter_l] if X_l.shape[0] == 0: # If left node is empty only continue with right predicted[~filter_l] = tree_predict(X_r, tree['sr'], proba) elif X_r.shape[0] == 0: # If right node is empty only continue with left predicted[filter_l] = tree_predict(X_l, tree['sl'], proba) else: predicted[filter_l] = tree_predict(X_l, tree['sl'], proba) predicted[~filter_l] = tree_predict(X_r, tree['sr'], proba) return predicted from sklearn.metrics import accuracy_score tree = tree_grow(X_train, y_train, level=0, min_gain=0.001, max_depth=3, num_pct=10) y_predicted = tree_predict(X_test, tree) accuracy_score(y_test, y_predicted)Exercise 8.2Estimate a Bagging of 10 Decision Tree Classifiers Manually using the code created in bagging notebook.Evaluate the accuracy on the testing setnp.random.seed(42) n_samples = train.shape[0] n_trees = 10 samples = [np.random.choice(a=n_samples, size=n_samples, replace=True) for _ in range(1, n_trees +1 )] from sklearn.tree import DecisionTreeClassifier accuracy_scores = [] tree = DecisionTreeClassifier(max_depth=None) for i, sample in enumerate(samples): subsample = X_train.iloc[sample, :] label = y_train.iloc[sample] tree.fit(subsample,label) y_pred = tree.predict(X_test) accuracy_scores.append(accuracy_score(y_test, y_pred)) print(i, accuracy_score(y_test, y_pred)) ensemble_accuracy = sum(accuracy_scores) / float(len(accuracy_scores)) print("Bagged Trees Accuracy:" + str(ensemble_accuracy))0 0.8281105990783411 1 0.8361751152073733 2 0.8352534562211982 3 0.8366359447004609 4 0.8299539170506912 5 0.8391705069124424 6 0.8274193548387097 7 0.8380184331797235 8 0.8288018433179724 9 0.8352534562211982 Bagged Trees Accuracy:0.8334792626728109Exercise 8.3Compare the impact in the results by varing the parameter max_featuresEvaluate the accuracy on the testing setfrom sklearn.ensemble import BaggingClassifier accuracy_scores = [] max_features = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] for i in max_features: bagged_trees = BaggingClassifier(base_estimator = DecisionTreeClassifier(), n_estimators = 10, random_state = 42, max_features = i ) bagged_trees.fit(X_train, y_train) y_pred = bagged_trees.predict(X_test) accuracy_score(y_test, y_pred) accuracy_scores.append(accuracy_score(y_test, y_pred)) plt.plot( max_features, accuracy_scores )- Al variar el parámetro max_features entre cero y el 100% de los features se observa que el accuracy se maximiza con el 70% de features Exercise 8.4Estimate a Bagging of 10 Decision Tree Classifiers with `max_features = log(n_features)`Evaluate the accuracy on the testing setbagged_trees_log_features = BaggingClassifier(base_estimator = DecisionTreeClassifier(max_features='log2'), n_estimators = 10, random_state = 42 ) bagged_trees_log_features.fit(X_train, y_train) y_pred = bagged_trees_log_features.predict(X_test) accuracy_score(y_test, y_pred)Exercise 8.5Using sklearn, train a RandomForestClassifierEvaluate the accuracy on the testing setfrom sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred=rf.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))Accuracy: 0.8382488479262673Exercise 8.6Find the best parameters of the RandomForestClassifier (max_depth, max_features, n_estimators)Evaluate the accuracy on the testing set# list of values to try for n_estimators estimator_range = range(10, 500, 20) # list to store the average Accuracy for each value of n_estimators accuracy_scores = [] # use 5-fold cross-validation with each value of n_estimators (WARNING: SLOW!) for estimator in estimator_range: clf = RandomForestClassifier(n_estimators=estimator, random_state=1, n_jobs=-1) accuracy_scores.append(cross_val_score(rf, X_train, y_train, cv=2, scoring='accuracy').mean()) plt.plot(estimator_range, accuracy_scores) plt.xlabel('n_estimators') plt.ylabel('Accuracy') # define features: exclude career statistics (which start with "C") and the response (Salary) feature_cols = data.columns.drop('HighPrice') feature_cols # list of values to try for max_features feature_range = range(1, len(feature_cols)+1) # list to store the average Accuracy for each value of max_features accuracy_scores = [] # use 10-fold cross-validation with each value of max_features (WARNING: SLOW!) for feature in feature_range: clf = RandomForestClassifier(n_estimators=300, max_features=feature, random_state=1, n_jobs=-1) accuracy_scores.append(cross_val_score(clf, X_train, y_train, cv=2, scoring='accuracy').mean()) plt.plot(feature_range, accuracy_scores) plt.xlabel('max_features') plt.ylabel('Accuracy') # list of values to try for max_depth depth_range = range(1,20,1) # list to store the average Accuracy for each value of max_features accuracy_scores = [] # use 10-fold cross-validation with each value of max_features (WARNING: SLOW!) for depth in depth_range: clf = RandomForestClassifier(n_estimators=300, max_features= 8, max_depth= depth, random_state=1, n_jobs=-1) accuracy_scores.append(cross_val_score(clf, X_train, y_train, cv=2, scoring='accuracy').mean()) plt.plot(depth_range, accuracy_scores) plt.xlabel('max_depth') plt.ylabel('Accuracy') rf = RandomForestClassifier(max_depth = 6, n_estimators = 300, max_features = 8) rf.fit(X_train, y_train) y_pred=rf.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))Accuracy: 0.8811059907834101Exercise 8.7 Using xgboost train a XGBClassifier Evaluate the accuracy on the testing setfrom xgboost import XGBClassifier xgb = XGBClassifier() xgb.fit(X_train, y_train) y_pred=xgb.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:888: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)Exercise 8.8Using xgboost train a XGBClassifier Modify the parameters learning rate, gamma, colsample_bytree. Explain what each parameter means.Evaluate the accuracy on the testing set Tasa de aprendizaje En el machine learning, la tasa de aprendizaje es un parámetro de ajuste óptimo de varios tipos de modelos que determina el tamaño del paso en cada iteración mientras se mueve hacía un mínimo de una función de pérdida; en la literatura este se conoce también como la ganancia.# Change learning rate xgb = XGBClassifier(learning_rate = 0.1) xgb.fit(X_train, y_train) y_pred=xgb.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))[23:43:54] WARNING: /opt/concourse/worker/volumes/live/7a2b9f41-3287-451b-6691-43e9a6c0910f/volume/xgboost-split_1619728204606/work/src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior. Accuracy: 0.8794930875576037Colsample by tree Este parámetro denota la fracción de columnas que serán seleccionadas al azar para cada árbol, los valores que puede tomar se encuentra entre 0.5 y 1, el default se encuentra en 1.# Change colsample by tree xgb = XGBClassifier(colsample_by_tree = 0.8) xgb.fit(X_train, y_train) y_pred=xgb.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))[23:44:23] WARNING: /opt/concourse/worker/volumes/live/7a2b9f41-3287-451b-6691-43e9a6c0910f/volume/xgboost-split_1619728204606/work/src/learner.cc:541: Parameters: { colsample_by_tree } might not be used. This may not be accurate due to some parameters are only used in language bindings but passed down to XGBoost core. Or some parameters are not used but slip through this verification. Please open an issue if you find above cases. [23:44:23] WARNING: /opt/concourse/worker/volumes/live/7a2b9f41-3287-451b-6691-43e9a6c0910f/volume/xgboost-split_1619728204606/work/src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior. Accuracy: 0.8790322580645161Gamma El algoritmo busca una reducción mínima de pérdidas para realizar partición en un nodo hoja del árbol. Cuanto mayor sea la gamma, más conservador será el algoritmo. El algoritmo busca una reducción mínima de pérdidas para realizar partición en un nodo hoja del árbol. Cuanto mayor sea la gamma, más conservador será el algoritmo.# Change colsample by tree xgb = XGBClassifier(gamma = 2) xgb.fit(X_train, y_train) y_pred=xgb.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred))[23:45:09] WARNING: /opt/concourse/worker/volumes/live/7a2b9f41-3287-451b-6691-43e9a6c0910f/volume/xgboost-split_1619728204606/work/src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior. Accuracy: 0.880184331797235load dataDATASET_ID = 'castellucci_mouse_usv_segmented' df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'mouse.pickle' syllable_df = pd.read_pickle(df_loc) del syllable_df['audio'] syllable_df[:3] np.shape(syllable_df.spectrogram.values[0])projectensure_dir(DATA_DIR / 'embeddings' / DATASET_ID / 'full') for indv in tqdm(syllable_df.indv.unique()): subset_df = syllable_df[syllable_df.indv == indv] specs = list(subset_df.spectrogram.values) specs = [i/np.max(i) for i in tqdm(specs)] specs_flattened = flatten_spectrograms(specs) print(np.shape(specs_flattened)) min_dist = 0.5 cuml_umap = cumlUMAP(min_dist = min_dist) embedding = cuml_umap.fit_transform(specs_flattened) subset_df['umap'] = list(embedding) fig, ax = plt.subplots() ax.scatter(embedding[:,0], embedding[:,1], s=1, color='k', alpha = 1) ax.set_xlim([-8,8]) ax.set_ylim([-8,8]) plt.show() subset_df.to_pickle(DATA_DIR / 'embeddings' / DATASET_ID / (str(min_dist) + '_' + indv + '.pickle'))Исследование графиков Вариант задания: 5![image-2.png](attachment:image-2.png) Заданная формула функции:y = 2*log((x - 1) / x) + 1 show("y = ", y)Область определения:D1 = (-infinity, 0) D2 = (1, +infinity) show("D: ", D1, "⋃", D2) show(plot(y, (x, -22, 22)), xmin= -10, xmax= 10, ymin= -4, ymax= 7, aspect_ratio=2, figsize=[4,3])Четность/нечетность:if (y(x)-y(-x)).expand().simplify_full() != 0: print("Функция нечетная.") else: print("Функция четная.")Функция нечетная.Периодичностьvar("T") eq = y(x) - y(x + T) solve(eq, T)вывод: не является периодической Точки пересечения функции с осями координат:show(plot(y, (x, -22, 22)), xmin= -10, xmax= 10, ymin= -4, ymax= 7, aspect_ratio=2, figsize=[4,3])График никогда не будет пересекать ось Oy, что следует из области определения функцииsolve(y == 0, x) var("x0") x0 = solve(y == 0, x) show(x0) show(plot(y, (x, -22, 22)), xmin= 0, xmax= 4, ymin= -1, ymax= 1, aspect_ratio=1, figsize=[3,2])мы имеем 1 точку пересечения с осью Ox Промежутки знакопостоянства: у нас одна точка пересечения Oxshow(x0) print(n(e^(1/2)/(e^(1/2) - 1)))определяем знаки справа и слеваprint(n(y(2))) print(n(y(3))) intervals_of_constancy = plot(0, xmin=-2, xmax=5, ymin=-0.5, ymax=1) intervals_of_constancy += circle((0, 0), radius=0.05414) intervals_of_constancy += circle((1, 0), radius=0.05414) intervals_of_constancy += disk((2.54, 0), 0.0541, (0, 2*pi), color='black') # здесь добавляем подписи точек (значения по оси X) intervals_of_constancy += text("0", (0, 0.21), color="black", fontsize=14) intervals_of_constancy += text("1", (1, 0.21), color="black", fontsize=14) intervals_of_constancy += text("e^(1/2)/(e^(1/2) - 1)", (2.54, 0.21), color="black", fontsize=12) # Здесь добавляем подписи знаков промежутков знакопостоянства intervals_of_constancy += text("_", (-1, -0.06), color="black", fontsize=25) intervals_of_constancy += text("_", (1.8, -0.06), color="black", fontsize=25) intervals_of_constancy += text("+", (4 , -0.2), color="black", fontsize=25) # оси не нужны - скрываем intervals_of_constancy.show(axes=False)Промежутки возрастания и убывания:var('dy') dy = y.derivative() show("dy= ",dy)y(x) возрастает на D, то есть при:x_0 = solve(dy > 0, x) show(x_0[0],",", x_0[1])Точки экстремума и значения в этих точках: Экстремумов нет Исследовать поведение функции в окрестности «особых» точек и при больших по модулю x: Особые точки: 0; 1print("y(10000)= ",float(y(10000)), "; y(-10000)= ", float(y(-10000)))y(10000)= 0.9997999899993333 ; y(-10000)= 1.0001999900006666При больших положительных и отрицательных значениях x функция стремится к 0 справа и к 0 слева соответственно Непрерывность. Наличие точек разрыва и их классификация:k1 = limit(y(x), 'right', x=0) show("Предел справа для точки x1: не существует на D") k2 = limit(y(x), 'left', x=0) show("Предел слева для точки x1: ",k2) k3 = limit(y(x), 'right', x=1) show("Предел справа для точки x2: ", k3) k4 = limit(y(x), 'left', x=1) show("Предел слева для точки x2: не существует на D") if (abs(k1) == infinity or abs(k2) == infinity): show("x1 = 0 - точка разрыва 2 рода") else: show("x1 = 0 - точка разрыва 1 рода") if (abs(k3) == infinity or abs(k4) == infinity): show("x2 = 1 - точка разрыва 2 рода") else: show("x2 = 1 - точка разрыва 1 рода")Асимптоты:show("Уравнение асимптот: y = kx + b") k = limit((y(x)/x), x=infinity) show("k =", k) b = limit((y(x) - k*x), x = infinity) show("b = ", b) show("Горизонтальная асимптота: y = ", k*x + b) show("Вертикальные асимптоты: х1 = 0, x2 = 1") xmin = -3 xmax = 3 ymin = -3 ymax = 3 x1 = 0 x2 = 1 y1 = 1 h_asymptote1 = line([(xmin, x1), (xmax, x1)], thickness=2.0) h_asymptote2 = line([(xmin, x2), (xmax, x2)], thickness=2.0) v_asymptote = line([(y1, ymin), (y1, ymax)], thickness=2.0) show(h_asymptote1+h_asymptote2+v_asymptote, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, aspect_ratio=1, figsize=[4, 3])Deviceqasm_sim = Aer.get_backend("qasm_simulator") statevector_sim = Aer.get_backend("statevector_simulator") unit_sim = Aer.get_backend("unitary_simulator") qasm_sim.provider() #IBMQ.load_account() # Logging to IBMQ provider = IBMQ.get_provider('ibm-q') backends = least_busy(provider.backends(filters = lambda x:x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational==True)) backends quantum_computer = provider.get_backend('ibmq_belem') plot_gate_map(quantum_computer) plot_error_map(quantum_computer)Circuitqr = QuantumRegister(3, "q") cr = ClassicalRegister(3, "c") qc = QuantumCircuit(qr, cr) qc.initialize([1/np.sqrt(2), 1/np.sqrt(2)], 0) qc.cx([qr[0], qr[1]], [qr[1], qr[2]]) qc.barrier() qc.draw() qreg = QuantumRegister(1) qgate = QuantumCircuit(qreg) qgate.h(qreg) #qgate.cp(np.pi, 0, 1) gate = qgate.to_gate() gate.name = 'my_gate_ϕ' qc.append(gate, [2]) qc.draw(output='mpl') qc_decomp = qc.decompose() qc_decomp.draw() qc.barrier() qc.measure(qr, cr) circuit_drawer(qc, output='mpl') qgate.measure_all() qasm_str = qgate.qasm() qasm_str qc_qasm = QuantumCircuit.from_qasm_str(qasm_str) qc_qasm.draw(output="mpl")Executeqc_job = execute(qc, qasm_sim, shorts=1024) qc_result = qc_job.result() qc_count = qc_result.get_counts() qgate_job = qasm_sim.run(qgate, shots=1024) qgate_result = qgate_job.result() qgate_count = qgate_result.get_counts() print("QC : ", qc_count) print("QGATE : ", qgate_count)QC : {'000': 256, '100': 279, '111': 268, '011': 221} QGATE : {'0': 524, '1': 500}Plotplot_histogram([qc_count, qgate_count], color=["green", "blue"], sort="asc", legend=["qc", "qgate"], title="My super result") qc_sv = QuantumCircuit(3) #qc_sv.rx(np.pi/3, 0) qc_sv.h(0) qc_sv.rx(np.pi/3, 1) qc_sv.draw() q_operator = QuantumCircuit(2) q_operator.cx(0, 1) operator = qi.Operator(q_operator) operator qc_sv.append(q_operator, [0, 2]) qc_sv.decompose().draw() qc_job_sv = execute(qc_sv, statevector_sim).result() qc_psi = qc_job_sv.get_statevector() qc_psi plot_bloch_multivector(qc_psi, reverse_bits=True) plot_bloch_vector([np.pi/3, 0, 1/np.sqrt(2)], title="Mon super vector") plot_state_city(qc_psi, title="My city!", color=["green", "yellow"]) plot_state_hinton(qc_psi) plot_state_qsphere(qc_psi) plot_state_paulivec(qc_psi) transpile_qc = transpile(qc_sv, backend=quantum_computer, optimization_level=3, coupling_map=[[0, 1], [1, 2]]) plot_circuit_layout(transpile_qc, quantum_computer) qiskit.__version__ %qiskit_version_tableNode Representation Learning with attri2vec on Citeseer This is the python implementation of the attri2vec algorithm outlined in paper ***[Attributed Network Embedding Via Subspace Discovery](https://arxiv.org/abs/1901.04095)*** , , and , arXiv:1901.04095, [cs.SI], 2019. The implementation uses the stellargraph libraries. attri2vecattri2vec learns node representations by performing a linear/non-linear mapping on node content attributes. To make the learned node representations respect structural similarity, [`DeepWalk`](https://dl.acm.org/citation.cfm?id=2623732)/[`node2vec`](https://snap.stanford.edu/node2vec) learning mechanism is used to make nodes sharing similar random walk context nodes represented closely in the subspace, which is achieved by maximizing the occurrence probability of context nodes conditioned on the representation of the target nodes. The probability is modelled by Softmax and negative sampling is used to speed up its calculation. This makes attri2vec equivalent to predict whether a node occurs in the given target node's context in random walks with the representation of the target node, by minimizing the cross-entropy loss. In implementation, node embeddings are learnt by solving a simple classification task: given a large set of "positive" `(target, context)` node pairs generated from random walks performed on the graph (i.e., node pairs that co-occur within a certain context window in random walks), and an equally large set of "negative" node pairs that are randomly selected from the graph according to a certain distribution, learn a binary classifier that predicts whether arbitrary node pairs are likely to co-occur in a random walk performed on the graph. Through learning this simple binary node-pair-classification task, the model automatically learns an inductive mapping from attributes of nodes to node embeddings in a low-dimensional vector space, which preserves structural and feature similarities of the nodes. To train the attri2vec model, we first construct a training set of nodes, which is composed of an equal number of positive and negative `(target, context)` pairs from the graph. The positive `(target, context)` pairs are the node pairs co-occurring on random walks over the graph whereas the negative node pairs are the sampled randomly from the global node degree distribution of the graph. In attri2vec, each node is attached with two kinds of embeddings: 1) the inductive 'input embedding', i.e, the objective embedding, obtained by perform a linear/non-linear transformation on node content features, and 2) 'output embedding', i.e., the parameter vector used to predict its occurrence as a context node, obtained by looking up a parameter table. Given a `(target, context)` pair, attri2vec outputs a predictive value to indicate whether it is positive or negative, which is obtained by performing the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node, followed by a sigmoid activation. The entire model is trained end-to-end by minimizing the binary cross-entropy loss function with regards to predicted node pair labels and true node pair labels, using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' node pairs generated on demand and fed into the model.import networkx as nx import pandas as pd import numpy as np import os import random import stellargraph as sg from stellargraph.data import UnsupervisedSampler from stellargraph.mapper import Attri2VecLinkGenerator, Attri2VecNodeGenerator from stellargraph.layer import Attri2Vec, link_classification from tensorflow import keras from pandas.core.indexes.base import Index import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import accuracy_scoreDataset The dataset is the citation network Citeseer.It can be downloaded by clicking [here](https://linqs-data.soe.ucsc.edu/public/lbc/citeseer.tgz)The following is the description of the dataset from the publisher,> The CiteSeer dataset consists of 3312 scientific publications classified into one of six classes. The citation network consists of 4732 links. Each publication in the dataset is described by a 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary. The dictionary consists of 3703 unique words. The README file in the dataset provides more details.Download and unzip the citeseer.tgz file to a location on your computer. We assume that the dataset is stored in the directory`~/data/citeseer/`where the files `citeseer.cites` and `citeseer.content` can be located.We are going to load the data into a networkx object.data_dir = "~/data/citeseer"Load edges in order 'cited-paper' <- 'citing-paper'.citeseer_location = os.path.expanduser(os.path.join(data_dir, "citeseer.cites")) g_nx = nx.read_edgelist(path=citeseer_location, create_using=nx.DiGraph()).reverse()Convert the graph to undirected graph.g_nx = g_nx.to_undirected()Load the node attribute data.citeseer_data_location = os.path.expanduser(os.path.join(data_dir, "citeseer.content")) attr_names = ["w_{}".format(ii) for ii in range(3703)] node_column_names = attr_names + ["subject"] node_attr = pd.read_csv(citeseer_data_location, sep='\t', header=None, names=node_column_names)Change the type of the indexes of node_attr to str.node_attr.index = Index(list(map(str, list(node_attr.index))))The original graph contains some nodes with no attributes. We remove them here.g_nx = g_nx.subgraph(list(node_attr.index))Select the largest connected component. For clarity we ignore isolated nodes and subgraphs.g_nx_ccs = (g_nx.subgraph(c).copy() for c in nx.connected_components(g_nx)) g_nx = max(g_nx_ccs, key=len) print("Largest subgraph statistics: {} nodes, {} edges".format( g_nx.number_of_nodes(), g_nx.number_of_edges()))Largest subgraph statistics: 2110 nodes, 3720 edgesSpecify node and edge types.nx.set_node_attributes(g_nx, "paper", "label") nx.set_edge_attributes(g_nx, "cites", "label")Get the ids of the nodes in the selected largest connected component.node_ids = sorted(list(g_nx.nodes))Get node features.node_features = node_attr[attr_names].reindex(node_ids)Create the Stellargraph with node features.G = sg.StellarGraph(g_nx, node_features=node_features) print(G.info())NetworkXStellarGraph: Undirected multigraph Nodes: 2110, Edges: 3720 Node types: paper: [2110] Edge types: paper-cites->paper Edge types: paper-cites->paper: [3720]Train attri2vec on Citeseer Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk.nodes = list(G.nodes()) number_of_walks = 4 length = 5Create the UnsupervisedSampler instance with the relevant parameters passed to it.unsupervised_samples = UnsupervisedSampler(G, nodes=nodes, length=length, number_of_walks=number_of_walks)Set the batch size and the number of epochs.batch_size = 50 epochs = 4Define an attri2vec training generator, which generates a batch of (feature of target node, index of context node, label of node pair) pairs per iteration.generator = Attri2VecLinkGenerator(G, batch_size) train_gen = generator.flow(unsupervised_samples)Building the model: a 1-hidden-layer node representation ('input embedding') of the `target` node and the parameter vector ('output embedding') for predicting the existence of `context node` for each `(target context)` pair, with a link classification layer performed on the dot product of the 'input embedding' of the `target` node and the 'output embedding' of the `context` node.Attri2Vec part of the model, with a 128-dimenssion hidden layer, no bias term and no normalization. (Normalization can be set to 'l2').layer_sizes = [128] attri2vec = Attri2Vec( layer_sizes=layer_sizes, generator=generator, bias=False, normalize=None ) # Build the model and expose input and output sockets of attri2vec, for node pair inputs: x_inp, x_out = attri2vec.build()Use the link_classification function to generate the prediction, with the 'ip' edge embedding generation method and the 'sigmoid' activation, which actually performs the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node followed by a sigmoid activation.prediction = link_classification( output_dim=1, output_act="sigmoid", edge_embedding_method='ip' )(x_out)link_classification: using 'ip' method to combine node embeddings into edge embeddingsStack the Attri2Vec encoder and prediction layer into a Keras model, and specify the loss.model = keras.Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss=keras.losses.binary_crossentropy, metrics=[keras.metrics.binary_accuracy], )Train the model.history = model.fit_generator( train_gen, epochs=epochs, verbose=2, use_multiprocessing=False, workers=1, shuffle=True, )Epoch 1/4 1351/1351 - 33s - loss: 0.6828 - binary_accuracy: 0.5571 Epoch 2/4 1351/1351 - 41s - loss: 0.5447 - binary_accuracy: 0.7269 Epoch 3/4 1351/1351 - 38s - loss: 0.3652 - binary_accuracy: 0.8618 Epoch 4/4 1351/1351 - 37s - loss: 0.2667 - binary_accuracy: 0.9101Visualise Node Embeddings Build the node based model for predicting node representations from node content attributes with the learned parameters. Below a Keras model is constructed, with x_inp[0] as input and x_out[0] as output. Note that this model's weights are the same as those of the corresponding node encoder in the previously trained node pair classifier.x_inp_src = x_inp[0] x_out_src = x_out[0] embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)Get the node embeddings by applying the learned mapping function to node content features.node_gen = Attri2VecNodeGenerator(G, batch_size).flow(node_ids) node_embeddings = embedding_model.predict_generator(node_gen, workers=1, verbose=1)43/43 [==============================] - 0s 3ms/stepGet node subjects.node_targets = [ node_attr["subject"][node_id] for node_id in node_ids ]Transform the embeddings to 2d space for visualisation.transform = TSNE # PCA trans = transform(n_components=2) node_embeddings_2d = trans.fit_transform(node_embeddings) # draw the embedding points, coloring them by the target label (paper subject) alpha = 0.7 label_map = { l: i for i, l in enumerate(np.unique(node_targets)) } node_colours = [ label_map[target] for target in node_targets ] plt.figure(figsize=(7,7)) plt.axes().set(aspect="equal") plt.scatter(node_embeddings_2d[:,0], node_embeddings_2d[:,1], c=node_colours, cmap="jet", alpha=alpha) plt.title('{} visualization of node embeddings'.format(transform.__name__)) plt.show()Ejercicio 01Convertir el siguiente algoritmo de O(n^2)a un algoritmo secuencial O(n)# 1. Convert from 0(n^2) to 0(n) # Algortimo de O(n^2) def greatestNumber(array): # Dado un conjunto de valores la función # nos retorna el mayor número for i in array: isIValTheGreatest = True for j in array: if j > i: isIValTheGreatest = False if isIValTheGreatest: return i A=[1,5,7,8,2,15,6] print(greatestNumber(A))15**Complejidad computacional**![](images/E1_cod1_1.png)![](https://drive.google.com/uc?export=view&id=1G7IHSogKuBc051kniSNAyLrwEQBKw85S)Entonces la complejidad computacional será $O(n^2)$ es cuadrática Conversión del algoritmo# Algoritmo convertido a O(n) def mayorNumber(arreglo): # Dado un conjunto de valores la función # nos retorna el mayor número mayor = arreglo[0] # Recorrer y buscar for elemento in arreglo: if elemento > mayor: mayor = elemento return mayor A=[1,5,7,8,2,15,6] print(mayorNumber(A))15Checking the Dataset py = 0, ey = 1with h5py.File("/content/drive/My Drive/Projects/GSoC 2021/electron-photon-dataset/photon.hdf5", "r") as f: px = np.asarray(f['X'][()], dtype=np.float32) py = np.asarray(f['y'][()], dtype=np.float32) print("Loaded photon dataset!") with h5py.File("/content/drive/My Drive/Projects/GSoC 2021/electron-photon-dataset/electron.hdf5", "r") as f: ex = np.asarray(f['X'][()], dtype=np.float32) ey = np.asarray(f['y'][()], dtype=np.float32) print("Loaded electron dataset!") print("Photon dataset shape:", px.shape, py.shape) print("Electron dataset shape:", ex.shape, ey.shape) max_photon, min_photon = np.max(px[:, :, :, 0]), np.min(px[:, :, :, 0]) max_electron, min_electron = np.max(ex[:, :, :, 0]), np.min(ex[:, :, :, 0]) print(max_photon, min_photon) print(max_electron, min_electron) max(max_photon, abs(min_photon), max_electron, abs(min_electron)) #sample_size = 10000 X = np.concatenate((px[:, :, :, 0], ex[:, :, :, 0]), axis=0) y = np.concatenate((py[:], ey[:]), axis=0) #_, X, _, y = train_test_split(X, y, test_size=sample_size, random_state=88) print(X.shape, y.shape) print("Proton:", np.argmax(np.mean(px[:, :, :, 0], axis=0))) print("Electron:", np.argmax(np.mean(ex[:, :, :, 0], axis=0))) center = [int(528/32), 528%32] img_size = 8 X = X[:, (center[0]-int(img_size/2)):(center[0]+int(img_size/2)), (center[1]-int(img_size/2)):(center[1]+int(img_size/2))] X.shape x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=2021) scaler = StandardScaler() scaler.fit(x_train.reshape(-1, img_size*img_size)) x_train = scaler.transform(x_train.reshape(-1, img_size*img_size)) x_train = x_train.reshape(-1, img_size, img_size) y_train = tf.keras.utils.to_categorical(y_train) x_test = scaler.transform(x_test.reshape(-1, img_size*img_size)) x_test = x_test.reshape(-1, img_size, img_size) y_test = tf.keras.utils.to_categorical(y_test) print("Train set shape:", x_train.shape, y_train.shape) print("Test set shape:", x_test.shape, y_test.shape) plt.plot(scaler.scale_) # final reshape to flatten the image x_train = x_train.reshape([-1,64]) x_test = x_test.reshape([-1,64]) print("Train set shape:", x_train.shape, y_train.shape) print("Test set shape:", x_test.shape, y_test.shape)Train set shape: (423300, 64) (423300, 2) Test set shape: (74700, 64) (74700, 2)Model Trainingdef lr_schedule(epoch): """Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate """ lr = 1e-3 if epoch > 180: lr *= 0.5e-3 elif epoch > 160: lr *= 1e-3 elif epoch > 120: lr *= 1e-2 elif epoch > 80: lr *= 1e-1 print('Learning rate: ', lr) return lr # Training parameters BATCH_SIZE = 128 # orig paper trained all networks with batch_size=128 EPOCHS = 200 # 200 # Print parameters for sanity check print("Batch size, epochs:", BATCH_SIZE, EPOCHS)Batch size, epochs: 128 200Classical Fully-connected NNmodel = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense(3, activation='relu', input_shape=(64,))) model.add(tf.keras.layers.Dense(2, activation='softmax')) model.summary() opt_adam = tf.keras.optimizers.Adam(learning_rate=lr_schedule(0)) model.compile(loss='categorical_crossentropy', optimizer=opt_adam, metrics=['accuracy', tf.keras.metrics.AUC()]) import time #start_time = time.time() last_epoch = 0 H = model.fit(x_train, y_train, initial_epoch=last_epoch, batch_size=BATCH_SIZE, epochs=10, validation_data=(x_test, y_test), shuffle=True, #callbacks=callbacks ) print(H.history) max(H.history['val_auc'])Get 5 min volume for Feb 2018 from Unprocessed Speed Created by: Date: April 24, 2019 Import required modulesimport getpass import boto3 import pandas as pd from impala.dbapi import connect from impala.util import as_pandas from io import BytesIO as StringIOConnect to the databaseusername = "" password1 = () conn = connect(host="172.18.1.20",auth_mechanism='PLAIN',port=10000,user=username, password=)Get device ids from "wydot_speed_sensors_index"cursor = conn.cursor() cursor.execute('select * from wydot_speed_sensors_index') # Store the index data tanle SSindex = as_pandas(cursor) # Remove "wydot_speed_sensors_index." from the col names Rename1 ={x:x.split('wydot_speed_sensors_index.')[1] for x in SSindex} SSindex=SSindex.rename(index=str,columns=Rename1) print(SSindex['deviceid'].values,SSindex.columns) SSindexSubset data for Lamarie & CheyenneSS2index=SSindex.loc[(SSindex['milepost']>=314)&(SSindex['milepost']<=360)] SS2index = SS2index[["deviceid","milepost","direction","2015_adt"]].copy() print(SS2index['deviceid'].values) start_date='2018-02-01' end_date='2018-03-01' create_query='''SELECT t1.controller, t1.lanedir, t1.vehclass, t1.Time5M, t1.speedmph_avg, t1.NRec, t2.direction, t2.milepost, t2.2015_adt FROM (SELECT sensor AS controller, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") AS Time5M, class AS vehclass, AVG(speed) AS speedmph_avg, count(*) AS NRec, lanedir FROM wydot_speed_processed WHERE from_unixtime(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss"),"yyyy-MM-dd hh:mm:ss") between '{}' AND '{}' GROUP BY sensor, lanedir, class, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss")) t1 JOIN (SELECT * FROM wydot_speed_sensors_index WHERE milepost BETWEEN 314 AND 360) t2 ON (t1.controller = t2.deviceid) ORDER BY t1.controller, t1.Time5M, t1.lanedir, t1.vehclass'''.format(start_date,end_date) cursor.execute(create_query) Vol_dat3=as_pandas(cursor) Vol_dat3.head() start_date='2018-02-01' end_date='2018-03-01' Q1='''SELECT sensor AS controller, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") AS Time5M, class AS vehclass, AVG(speed) AS speedmph_avg, count(*) AS NRec, lanedir FROM wydot_speed_processed WHERE (from_unixtime(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss"),"yyyy-MM-dd hh:mm:ss")) between '{}' AND '{}' GROUP BY sensor, lanedir, class, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss")'''.format(start_date,end_date) cursor.execute(Q1) TempDat=as_pandas(cursor) TempDat.head() start_date='2018-02-01' end_date='2018-03-01' Q1='''SELECT sensor, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") AS Time5M , lanedir FROM wydot_speed_processed WHERE unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss") between unix_timestamp('{}',"yyyy-MM-dd") AND unix_timestamp('{}',"yyyy-MM-dd") GROUP BY sensor, lanedir, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss")'''.format(start_date,end_date) cursor.execute(Q1) TempDat=as_pandas(cursor) TempDat.head() Q1='''SELECT lanedir, class, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") AS Min5Int, from_unixtime(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss"),"yyyy-MM-dd hh:mm:ss") AS Tstamp FROM wydot_speed_processed LIMIT 10''' cursor.execute(Q1) TempDat1=as_pandas(cursor) TempDat1.head() start_date='2018-01-01' end_date='2018-12-31' Q1='''SELECT max(FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss")) AS MaxDate2018 FROM wydot_speed_processed WHERE unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss") between unix_timestamp('{}',"yyyy-MM-dd") AND unix_timestamp('{}',"yyyy-MM-dd")'''.format(start_date,end_date) cursor.execute(Q1) TempDat1=as_pandas(cursor) TempDat1.head() start_date='2018-01-01' end_date='2018-12-31' Q1='''SELECT min(FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss")) AS MinDate2018 FROM wydot_speed_processed WHERE unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss") between unix_timestamp('{}',"yyyy-MM-dd") AND unix_timestamp('{}',"yyyy-MM-dd")'''.format(start_date,end_date) cursor.execute(Q1) TempDat1=as_pandas(cursor) TempDat1.head() start_date='2018-01-01' end_date='2018-12-31' Q1='''SELECT date_time, FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") AS DateTime2018 FROM wydot_speed_processed WHERE unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss") between unix_timestamp('{}',"yyyy-MM-dd") AND unix_timestamp('{}',"yyyy-MM-dd") ORDER BY FROM_UNIXTIME(CEILING(unix_timestamp(date_time,"MM/dd/yyyy hh:mm:ss")/300)*300,"yyyy-MM-dd hh:mm:ss") '''.format(start_date,end_date) cursor.execute(Q1) TempDat1=as_pandas(cursor) TempDat1.head()A Tutorial on Model Ensembles: Maximum Margin Output Coding (MMOC)Author: ** ([LinkedIn](https://www.linkedin.com/in/emilioespositousa) | [Portfolio](https://eesposito.com/) | [GitHub](https://github.com/EmilioEsposito))** Table of Contents* [Introduction](introduction)* [Data Prep](Data Prep)* [Binarize the target](Binarize the target)* [Create N Binary Classifiers](Create N Binary Classifiers)* [Plotting Individual ROC curves](Plotting Individual ROC curves)* [Picking the winning class: Applying MMOC](Picking the winning class: Applying MMOC)* [Measuring Multiclassification Error: Confusion Matrix](Measuring Multiclassification Error: Confusion Matrix)* [Measuring Multiclassification Error: Accuracy, TPR, and FPR](Measuring Multiclassification Error: Accuracy, TPR, and FPR)* [Conclusion](Conclusion) IntroductionThe objective of this tutorial is to introduce the concept of model ensembles, and show one specific type of model ensemble in detail. The type of model ensemble we will cover is called Maximum Margin Output Coding (MMOC). So what exactly is a model ensemble? **A model ensemble is the process of using more than one model instead of a single model for prediction; the outputs of several models are combined together using a specified voting scheme to achieve a single prediction for each record.** MMOC is an approach that is used for multi-classification (i.e. binary classification is just between two classes A and B, but multi-classification can handle N number of classes A,B,C,D, etc). It does this by using N number of binary classifiers, and then combining their results into a single prediction for each record.MMOC is similar to Error Correcting Output Coding (ECOC), but is simpler because it takes advantage of the probabilistic real-valued output values from margin-based classifiers (in other words, it uses the probability output number between 0.0 and 1.0 ). ECOC requires individual cutoff values to be chosen before the outputs can be combined, but MMOC simply uses the probabilistic output as a proxy for "confidence". The simplest version of MMOC simply chooses the final class by selecting the model with the highest confidence score (highest probability of being in the class). An extremely simple example is shown below, but we'll also implement this in practice later: For a detailed research paper on MMOC, refer to this publication: * [ & . Carnegie Mellon University. Maximum Margin Output Coding, ICML 2012.](http://www.cs.cmu.edu/~yizhang1/docs/MaxMarginCoding.pdf)And for a easier to understand slide-deck on margin based encoding:* [, , & . Princeton University. Reducing Multiclass to Binary: A Unifying Approach for Margin Classifiers](http://www.cs.princeton.edu/~schapire/talks/ecoc-icml10.pdf)There are other equally valid approaches to tackle multi-classification besides MMOC that do not even use an ensemble (such as a Bayes Net or Softmax Regression), but there are some scenarios where it makes more sense to use an ensemble of several models rather than a single model. For example, if the different classes are driven by different and muttually exclusive features, then an ensemble would be more appropriate. Here is classic example where MMOC would be preferred over a single multinomial classfier:* class A is best predicted by features x1 and x2, but is completely independent of other features* class B is best predicted by features x3 and x4, but is completely independent of other features* class C is best predicted by features x5 and x6, but is completely independent of other featuresAlso, MMOC ensembling allows the individual N binary classifers to use different features and even different models. Class "A" might use logistic regression, but class "B" might use Support Vector Machines (just to illustrate, this scenario would actually be unusual).Now let's get to our example! Data Prep# import necessary libaries from sklearn import datasets from sklearn import metrics import sklearn as sk import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.cross_validation import KFold from sklearn import svm from matplotlib import colors %matplotlib inlineFor this exercise, we are going to use the standard Iris dataset found in the scikit-learn library.iris_dict = datasets.load_iris() print iris_dict.keys()['target_names', 'data', 'target', 'DESCR', 'feature_names']The Iris data has has 3 possible target classes:target_names = list(iris_dict['target_names']) feature_names = iris_dict['feature_names'] print target_names print feature_names # put the X variables into a dataframe irisX = pd.DataFrame(iris_dict['data'], columns=iris_dict['feature_names']) # let's make a copy so we can add the target variable iris = irisX.copy() # add the numeric target iris['target'] = iris_dict['target'] # let's shuffle the rows so we get a more representative sample of targets when we preview the data using head() np.random.seed(seed=4) iris = iris.iloc[np.random.choice(len(iris), len(iris), replace=False, ),:] # create mapping of numeric target to target actual name tn_dict = {i:target_names[i] for i in range(len(target_names))} # add target name as well iris['target_name'] = iris['target'].map(tn_dict) # preview final data iris.head()Now let's show some descriptive statistics for each column. Note the uniform distribution of the target. This uniform distribution will allow us to simplify our approach to MMOC later on:# Prints descriptive stats on the data. # For numerics, it prints mean, min, max, and standard deviation # For categorical, it prints the count distribution def desc_statistics(df): for col in df: x = df[col] # print name & datatype print col, "[datatype="+str(x.dtype)+"]" # handle categorical if x.dtype=='object': print "\tCounts:" print "\t",str(x.value_counts()).replace("\n","\n\t") # handle numeric else: print "\tMean:",str(np.mean(x)) print "\tMin:",str(np.min(x)) print "\tMax:",str(np.max(x)) print "\tStd Dev:",str(np.std(x)) desc_statistics(iris)sepal length (cm) [datatype=float64] Mean: 5.84333333333 Min: 4.3 Max: 7.9 Std Dev: 0.825301291785 sepal width (cm) [datatype=float64] Mean: 3.054 Min: 2.0 Max: 4.4 Std Dev: 0.432146580071 petal length (cm) [datatype=float64] Mean: 3.75866666667 Min: 1.0 Max: 6.9 Std Dev: 1.75852918341 petal width (cm) [datatype=float64] Mean: 1.19866666667 Min: 0.1 Max: 2.5 Std Dev: 0.760612618588 target [datatype=int32] Mean: 1.0 Min: 0 Max: 2 Std Dev: 0.816496610641 target_name [datatype=object] Counts: virginica 50 setosa 50 versicolor 50 Name: target_name, dtype: int64Binarize the targetYou may have thought we were ready to begin modeling, but remember since we have multiple classes (3 in this case), we need to create 3 binary variables to represent our targets.# binarize the target dummies = pd.get_dummies(iris['target_name']) # add/update binarized cols to df for predcol in dummies.columns: iris[predcol] = dummies[predcol] iris.head()Create N Binary ClassifiersNow it's time to build our binary models. Each model should be trained and cross-validated separately. We could use different sets of features for each class and even use different types of models. However, the purpose of this tutorial is not to teach feature selection, model selection, and cross-validation, so I won't explain the details here. I just want to show how to combine output from several models. For more background information on classification and cross-validation, please refer to [these lecture notes](http://www.datasciencecourse.org/nonlinear_modeling.pdf) from Carnegie Mellon University's [Practical Data Science](http://www.datasciencecourse.org/) course.Now let's create our 3 binary classifiers using SVM models:# loop through each target class for target_name in target_names: # split data into X and y X = iris[feature_names] y = iris[target_name] k = len(X) kf = KFold(len(X), n_folds=k, shuffle=True, random_state=5) #loop through each fold for train_index, hold_index in kf: # split into train and hold trainx = X.iloc[train_index,:] trainy = y[train_index] holdx = X.iloc[hold_index,:] # build SVM model = svm.SVC(probability=True, C=1,gamma=.05,random_state=5) # fit the model using train and return probabilistic predictions of holdout fit = model.fit(trainx, trainy) prob = fit.predict_proba(holdx) # add the probabilist output to the original dataframe iris.loc[hold_index,'probclass_'+target_name] = prob[:,0]As you see below, we added a probabilistic output for each classifer (the columns with the "probclass_" prefixiris.head()Plotting Individual ROC curvesEven though this is a multinomial classification problem, we still want to check the validity of the individual binary classifiers before we combine results. However, even if all of our binary classifiers have good AUCs, this tells us NOTHING about the error of our final output. In the next section we'll combine the binary classifiers and then measure the overall error using a confusion matrix. But for now, let's take a look at how our binary classifiers did:# take a dataframe and plot all of the binary classifier curves on a single figure def plotROC(df,pred_col_prefix): df = df.copy() # get a list of the prediction columns predcols = [col for col in df.columns if col[:10]==pred_col_prefix] fig, ax = plt.subplots() mycolors = ['blue','red','green'] +list(colors.cnames) for i, (target_name, predcol) in enumerate(zip(target_names,predcols)): truth = df[target_name] pred = df[predcol] # calclulate fpr/tpr metrics fpr, tpr, _ = metrics.roc_curve(truth, pred) # calc the AUC auc = metrics.roc_auc_score(truth, pred) # set labels plt.ylabel("TPR") plt.xlabel("FPR") plt.title("ROC Curve (using k-fold Cross Validation)") # plot the default model line plt.plot([0,1]) # plot tpr/fpr on ROC leg_label = target_name + ", AUC=" + str(round(auc, 2)) ax.plot(fpr, tpr, color=mycolors[i], label=leg_label) # Begin citation for legend: http://matplotlib.org/1.3.0/examples/pylab_examples/legend_demo.html # Now add the legend with some customizations. legend = ax.legend(loc='lower right') plt.show() plotROC(iris,"probclass_")We'll discuss detailed interpretation of TPR and FPR later. For now, just recognize that a random choice model would produce the diagonal line. A perfect model (0 error) would be a line that goes straight up from (0,0) to (0,1), then takes a 90 degree turn right to (1,1). Any curve above the diagonal is doing better than random chance, and the closer it bulges towards (0,1) the better. AUC measures the area under the curve. AUC=1 indicates a perfect model, and AUC=0.50 indicates a random choice model (the diagonal line). Picking the winning class: Applying MMOCNow that we validated our individual binary classifiers, it's time to combine their results into a single final prediction for each record. As we eluded to earlier, we will do this by using the simplest approach of MMOC: pick the classifier that has the highest confidence (in this case, we are simply using the real-valued probability as the confidence). This approach is only valid if the distribution of our classes is relatively uniform. If the distribution was not uniform, we would either need another measure of confidence taking the distribution into account, or we would need to apply cutoff values and employ ECOC instead.# this function takes a dataframe, looks for columns that have probabilistic output (denoted by prefix probclass_) # it then picks the winning classifier based on which one had the highest confidence # it records the winner in both a pred_target_name column, and also in binarized predclass_ columns def applyMMOC(df): df = df.copy() # get a list of the prediction columns predcols = [col for col in df.columns if col[:10]=='probclass_'] # choose the winning classifier by picking the one with the highest probability/confidence df['pred_target_name'] = df[predcols].idxmax(axis=1) # trim the predicted target name df['pred_target_name'] = df['pred_target_name'].apply(lambda x: x.replace('probclass_',"")) # get dummies of pred as well, we can use them later to easily calc error metrics pred_dummies = pd.get_dummies(df['pred_target_name']) pred_dummies.columns = ["predclass_"+str(col) for col in pred_dummies.columns] # add the dummies to the df for predcol in pred_dummies.columns: df[predcol]=pred_dummies[predcol] return df # preview final data with predictions iris = applyMMOC(iris) iris.head()Measuring Multiclassification Error: Confusion MatrixNow that we have combined the output of our 3 binary models, we can now measure our error using a confusion matrix.# This funtion returns a nice df confusion matrix. Sklearn.metrics has it's own function but this is nicer. def getConfusionMatrix(df, y_truth, y_pred): df = df.copy() labels = set(df[y_truth]).union(set(df[y_pred])) # create column of ones df['ones'] = np.ones(len(df)) # create pivoted df that shows truth vs predicted value counts confmat = pd.pivot_table(df, values = 'ones', index=[y_truth], columns=[y_pred], aggfunc=np.sum, margins=True) return confmat confmat = getConfusionMatrix(iris,'target_name','pred_target_name') confmatThe matrix rows contain the true label counts, and the columns show the predicted label counts. A perfect model would result in a diagonal matrix (non-zero counts along the diagonal, with zero everywhere else). Measuring Multiclassification Error: Accuracy, TPR, and FPR Overall Accuracy You can think of overall accuracy as the sum of the values along the diagonal in the confusion matrix, divided by the total record count:$$ Accuracy = \frac{\\ correct\ predictions}{N} $$accuracy = sk.metrics.accuracy_score(iris['target_name'],iris['pred_target_name']) accuracyTrue Positive Rate & False Positive RateNow we will calculate the True Positive Rate (TPR) and the False Postive Rate (FPR) for each class. If you look at the ROC curves earlier, you'll notice these are the 2 metrics on each axis. A TPR measures how many times a model correctly classifies a record as being part of a given class. A FPR measures how many times a model classifies a record as being a member of the given class, when in reality it is NOT a member of that class. A perfect model would have a TPR of 1.00 (100%) and a FPR of 0.00 (0%). In practice, there is usually a tradeoff between these 2 metrics. They can also be expressed as the equations below:$$ TPR_{ClassA} = \frac{\\ Records\ Model\ Classified\ Correctly\ as\ Class\ A}{\\ of\ Class\ A\ Records} = \frac{\\ True\ Positives}{\\ Positives} $$$$ FPR_{ClassA} = \frac{\\ Records\ Model\ Classified\ INCORRECTLY\ as\ Class\ A}{\\ of\ NOT\ Class\ A\ Records} = \frac{\\ False\ Positives}{\\ Negatives} $$# given a confusion matrix df, return a df of the TPR and FPR of each class def getTPRFPR(confusion_matrix_df): df = confusion_matrix_df.copy() # get target_names and remove "All" target_names = list(df.index) target_names.remove("All") # take a subset of the confusion matrix, removing "All" rows/cols df = df.loc[target_names,target_names] # create a dict to fill with other dictionaries of metrics for each label label_metrics = {} for label in target_names: # add sub dictionary for metric label_metrics[label] = {} label_metrics[label]["FPR"] = sum(df.loc[((df.index!=label) ),label])/(sum(sum(df.values)) - sum(df.loc[label,target_names])) label_metrics[label]["TPR"] = df.loc[label,label]/sum(df.loc[label,target_names]) # make it into a nice dataframe for display tprfpr_df = pd.DataFrame(label_metrics).transpose() return tprfpr_df getTPRFPR(confmat)1. IntroductionBioCRNpyler is a software tool designed to rapidly compile large biological chemical reaction networks (CRNs) from simple user specifications (written in python). It has built in support of a number of models for transcription, translation, and gene expression regulation using components common in _E. coli_ synthetic biology. This tutorial explains the inner workings of BioCRNpyler and shows how to create custom mixtures, components, and mechanisms. Specifically, we will go through making a custom gene expression model:>$G \to G + P \rightleftharpoons G:P \to G + P + X$here $G$ is a gene and $P$ is a polymerase and $X$ is the protein expressed by $G$. No translational machinery is included in this model, making it one of the simplest possible for expression. Note that we are ignoring translation for simplicitiy, not becuase it isn't important.On the top level, BioCRNpyler uses three kinds of objects:* __Mechanisms__: are the details of how a physics process is implemented as a CRN. These take the form of black box reaction schemas which compile into a CRN containing all the intermediate steps required to get from a specified input to an output.* __Components__: are the ingredients one might imagine adding to a test tube, say from a pipette. They do not include all chemical species involved in a reaction, but just the key ones we might experimentally modulate. Components may contain their own mechanisms or default to those used by a mixture. An example of a component is a piece of DNA encoding a gene. A DNA-Transcription factor complex, on the other hand, would not normally be a component.* __Mixtures__: can be thought of as the "reaction soup" we are working in. Mixtures contain default components and mechanisms. Components are added to mixtures to create different reaction conditions. Internally, BioCRNpyler tells the Mixture to compile all its Components. Each Component contains its own Mechanisms (or defaults to Mechanisms defined in the Mixture) and calls each Mechanism (read: reaction schema) to generate a set of chemical species and reactions which are combined into a complete CRN. BioCRNpyler also has its own internal CRN representation, which we will discuss next. Chemical Reaction Network (CRN) modelA CRN is a set of species $S$ and a set of reactions $R$ where each reaction is expressed $I \rightarrow O$ where $I$ are the inputs species, $O$ are the output species. Each reaction occurs with a rate function (propensity) $\rho(x)$ which takes the state of the CRN (the values of all the species) as an input. By default, reactions use massaction rates: $\rho(x) = k \Pi_{s \in I} x_s$ here $k$ is some constant and $x_s$ is the value of the species $s$. A number of built in propensities exist and are described in the documentation, including a general propensity allowing for an arbitrary function. Internally, BioCRNpyler represents species as strings involving a type identifier and a name: type_name. This is to allow for species to be identified as "gene_X", "mrna_X", etc. Complexes between species can be created automatically using the ComplexSpecies constructor or given custom defined names. By default, a complex of gene_X and protein_Y would be called complex_gene_X_protein_Y. This would be considered different from complex_protein_Y_gene_X in Bioscrape's CRN semantics because species here are effectively strings.Reactions are stored as lists of species (for the inputs and outputs) and a rate constant k. Non massaction reactions also require a parameter dictionary of their relevant parameter values. Massaction reactions are allowed to be reversible, in which case they are thought of as two irreversible reactions. Reaction rates default to 1.0.Now, we will create the CRN described above directly and approximate it with a non-massaction propensity.from biocrnpyler.chemical_reaction_network import ChemicalReactionNetwork from biocrnpyler.species import Species, Complex from biocrnpyler.reaction import Reaction from biocrnpyler.propensities import HillPositive #create the three species in the CRN G = Species(name = "G", material_type = "dna") P = Species(name = "P", material_type = "protein") X = Species(name = "X", material_type = "protein") PG = Complex([P, G]) #complex takes a list of species and returns a complex species = [P, G, X, PG] #a list of species #Create the reversible reaction: + P <--> G:P kf = 100 #Forward reaction rate kr = .01 inputs1 = [G, P] outputs1 = [PG] rxn1 = Reaction.from_massaction(inputs1, outputs1, k_forward = kf, k_reverse = kr) #type defaults to massaction #Create the irreversible reaction G:P --> G + P + X inputs2 = [PG] outputs2 = [G, P, X] kexpress = 1. rxn2 = Reaction.from_massaction(inputs2, outputs2, k_forward = kexpress) rxns = [rxn1, rxn2] #a list of reactions CRN = ChemicalReactionNetwork(species, rxns) #Species, reactions, and CRNs can all be directly printed print("species representation:\n", [a.material_type for a in species]) print("\nrxns representation:\n", rxns) print("\nCRN Representation:\n", CRN) #We will now create a third reaction which models the production of X as a positive hill function of P inputs3 = [G, P] outputs3 = [] khill = 10 #parmeters can be numbers or strings pos_hill = HillPositive(k=khill, K=0.3, n=2, s1=P) rxn3 = Reaction(inputs3, outputs3,propensity_type=pos_hill) CRN2 = ChemicalReactionNetwork(species, [rxn3]) print("\nCRN2:\n",CRN2)species representation: ['protein', 'dna', 'protein', 'complex'] rxns representation: [dna[G]+protein[P] <--> complex[dna[G]:protein[P]], complex[dna[G]:protein[P]] --> dna[G]+protein[P]+protein[X]] CRN Representation: Species = protein_P, dna_G, protein_X, complex_dna_G_protein_P Reactions = [ dna[G]+protein[P] <--> complex[dna[G]:protein[P]] complex[dna[G]:protein[P]] --> dna[G]+protein[P]+protein[X] ] CRN2: Species = protein_P, dna_G, protein_X, complex_dna_G_protein_P Reactions = [ dna[G]+protein[P] --> ]2. Creating a Custom Mechanism: GeneExpressionTo create custom Mechanism objects, subclass the Mechanism class and rewrite the object constructor, the update_species function, and the update_reactions function. Briefly:* In the constructor we will set the name of the mechanism and the name of the polymerase species, rnap. * In update_species, we will create a list of all the species used in the reaction schema: the gene, gene-rnap complex, and the product species.* In update_reactions we create a list of all the reactions required for our reaction schema: the polymerase binding and unbinding reactions as well as the reaction producing the gene product X. Note that this code could be generated much faster using the built in MichaelisMentenRXN Mechanism, but we will do it by hand here for educational purposes.from biocrnpyler.mechanism import Mechanism class GeneExpression(Mechanism): #Overwrite the constructor. # Name: the name of the Mechanism (set when it is instantiated). # rnap: the polymerase, which we will allow to be multiple types of object for user convenience # type: this is the "kind" of mechanism - used as a key in mechanism dictionaries def __init__(self, name, rnap, type = "gene_expression", **keywords): #Check if the rnap type species (see chemical reaction network details below) if isinstance(rnap, Species): self.rnap = rnap else: raise ValueError("'rnap' must be a Species!") #The superclass constructor will take care of the name Mechanism.__init__(self = self, name = name, mechanism_type = type, **keywords) #MUST CALL THE SUPER CONSTRUCTOR! #Overwrite update_species: # dna: the name of the gene to be expressed # product: the name of the gene product #update_species returns a list of all species used by the reaction schema def update_species(self, dna, product): #We do not need to do a check on the DNA or product types because that will be performed at the Component level. #Create the list of species to return species = [dna, self.rnap, product] #The Complex returns a ComplexSpecies made up a list of species species += [Complex([dna, self.rnap])] #Return a list of species return species #Overwrite update_species: # dna: the name of the gene to be expressed # product: the name of the gene product # component and part_id are used for the mechanism to find parameters approrpiately #update_species returns a list of all species used by the reaction schema #update_reactions will require rates as well as the relevant species. Returns a list of chemical_reaction_network.reaction def update_reactions(self, dna, product, component, part_id = None): #Component.get_parameter will automatically search the ParameterDatabases for the best parameter to use. #The string names here, 'kexpress', 'kb', 'ku', must be defined by you to match the parameter data file. #see parameter jupyter notebook for more information. kexpress = component.get_parameter("kexpress", part_id = part_id, mechanism = self) kb = component.get_parameter("kb", part_id = part_id, mechanism = self) ku = component.get_parameter("ku", part_id = part_id, mechanism = self) #complex specie comp = Complex([dna, self.rnap]) #Binding Reaction: dna + rnap <--> dna:rnap binding_rxn = Reaction.from_massaction(inputs=[dna, self.rnap], outputs=[comp], k_forward=kb, k_reverse=ku) #Catalytic Reaction: dna:rnap --> dna + rnap + product cat_rxn = Reaction.from_massaction(inputs=[comp], outputs=[dna, product, self.rnap], k_forward=kexpress) #Return a list of reactions return [binding_rxn, cat_rxn]3. Creating a Custom Component: GeneTo create custom Component objects, subclass the Component class and rewrite constructor, update_species, and update_reactions functions.* The Constructor: will set the name of the DNA specie and the name of the protein product* update_species: will call each mechanism (in this case just GeneExpression) to get their species* update_reactions: will call each mechanism (in this case just GeneExpression) to get their reactionsIn general, each component's functions update_species and update_reactions need to know (via you, the programmer) what the names of the mechanisms they are expected to use are. These mechanisms will be automatically inherited from the Mixture object the Component is added to (by default) but can also be overwritten with the mechanisms keyword in the Component constructor.from biocrnpyler.component import Component class Gene(Component): #OVERWRITE CONSTRUCTOR def __init__(self, dna, product = None, **keywords): #check types for name and product and set internal variables #self.internal_species = Component.set_species(species, material_type = None, attributes = None) #is a helper function that allows for species to be strings, Species, or Components. self.dna = self.set_species(dna, material_type = "dna") if product is None: #provide default name for the product self.product = self.set_species(self.dna.name, material_type = "protein") else: self.product = self.set_species(product) Component.__init__(self = self, name = dna, **keywords) #MUST CALL THE SUPERCLASS CONSTRUCTOR! #OVERWRITE update_species def update_species(self): #The Component will automatically search for a mechanism called "gene_expression", which it can find in 2 ways # 1: it can inherit this from its Mixture (which requires the Mixture has an appropriate "gene_expression" mechanism) # 2: this can be passed into the Gene constructor in a dictionary as a keyword arg mechanisms= {'gene_expression':Mechanism [Object Instance]} mech_express = self.get_mechanism("gene_expression") #argument is the mechanism type #Return the species from the mechanisms in your mixture. In this case, just one. return mech_express.update_species(self.dna, self.product) #OVERWRITE update_reactions def update_reactions(self): #argument is the mechanism type mech_express = self.get_mechanism("gene_expression") #Return the reactions from each mechanism in your mixture. In this case, just this one. return mech_express.update_reactions(self.dna, self.product, component = self, part_id = self.name)4. Creating a Custom Mixture: ExpressionMixtureTo create custom Mixture objects, subclass the Mixture class and rewrite the object constructor function to contain the appropriate default mechanisms and components. All other functionalities will be inherited from the Mixture super class.#ExpressionMixture from biocrnpyler import Mixture, Protein class ExpressionMixture(Mixture): #OVERWRITE THIS METHOD def __init__(self, name="", rnap = "RNAP", **keywords): #MUST CALL THE SUPERCLASS CONSTRUCTOR! Mixture.__init__(self, name = name, **keywords) #RNAP is a component which will be added to the Mixture self.rnap = Protein(rnap) #add the components to the Mixture self.add_components([self.rnap]) #Create an instance of the GeneExpression mechanism mech_express = GeneExpression("gene_expression", self.rnap.get_species()) #notice the Species inside the Component is passed in with Component.get_species() #Create default mechanism dict default_mechanisms = { mech_express.mechanism_type:mech_express } #add the mechanisms to the Mixture self.add_mechanisms(default_mechanisms)5. Combine everything and compile a CRN and print it.#Create a fake parameter dictionary for the example parameters = {("gene_expression","Reporter", "kexpress"):1.0, ("gene_expression","Reporter", "ku"):.01, ("gene_expression","Reporter", "kb"):100.0 } #Instantiate a gene G1 = Gene("Reporter", "GFP", parameters = parameters) myMixture = ExpressionMixture(components = [G1]) print(myMixture) CRN = myMixture.compile_crn() #Print the CRN print("Internal String Representation of the CRN:\n", CRN) print("\nFancier Pretty Print Representation Can also be used for Species, Reactions, and CRNS:\n", CRN.pretty_print(show_rates = True, show_material = True, show_attributes = True))ExpressionMixture: Internal String Representation of the CRN: Species = dna_Reporter, protein_RNAP, GFP, complex_dna_Reporter_protein_RNAP Reactions = [ dna[Reporter]+protein[RNAP] <--> complex[dna[Reporter]:protein[RNAP]] complex[dna[Reporter]:protein[RNAP]] --> dna[Reporter]+GFP+protein[RNAP] ] Fancier Pretty Print Representation Can also be used for Species, Reactions, and CRNS: Species (4) = {0. dna[Reporter], 1. protein[RNAP], 2. GFP, 3. complex[dna[Reporter]:protein[RNAP]]} Reactions (2) = [ 0. dna[Reporter]+protein[RNAP] <--> complex[dna[Reporter]:protein[RNAP]] Kf=k_forward * dna_Reporter * protein_RNAP Kr=k_reverse * complex_dna_Reporter_protein_RNAP k_forward=100.0 found_key=(mech=gene_expression, partid=Reporter, name=kb). search_key=(mech=gene_expression, partid=Reporter, name=kb). k_reverse=0.01 found_key=(mech=gene_expression, partid=Reporter, name=ku). search_key=(mech=gene_expression, partid=Reporter, name=ku). 1. complex[dna[Reporter]:protein[RNAP[...]MuZeroThis notebook is just an example to show how to run [MuZero (https://github.com/werner-duvaud/muzero-general)](https://github.com/werner-duvaud/muzero-general) in Google Colab or Jupyter Notebook. You can also launch MuZero directly by cloning the github repository and running the command `python muzero.py`. See [readme](https://github.com/werner-duvaud/muzero-general) for detailed instructions.import math from abc import ABC, abstractmethod import torch import tensorflow as tf logits_torch = torch.tensor([[-0.0078, 0.4007, -0.1592, 0.3446, 0.2232, -0.0814, -0.1400, 0.6435, 0.5683, -0.4653, -0.3925, 0.0400, 0.1729, -0.0152, 0.0551, -0.1042, 0.5908, 0.0915, 0.2475, 0.2067, -0.5062]]) support_size_torch = 10 x_torch = torch.tensor([[-0.4167]]) def support_to_scalar_torch(logits, support_size): """ Transform a categorical representation to a scalar See paper appendix Network Architecture """ # Decode to a scalar probabilities = torch.softmax(logits, dim=1) print("stonks:", torch.tensor([x for x in range(-support_size, support_size + 1)]) .expand(probabilities.shape) ) support = ( torch.tensor([x for x in range(-support_size, support_size + 1)]) .expand(probabilities.shape) .float() .to(device=probabilities.device) ) x = torch.sum(support * probabilities, dim=1, keepdim=True) print("mul stonks:", support * probabilities) print("big stonks:", x) # Invert the scaling (defined in https://arxiv.org/abs/1805.11593) x = torch.sign(x) * ( ((torch.sqrt(1 + 4 * 0.001 * (torch.abs(x) + 1 + 0.001)) - 1) / (2 * 0.001)) ** 2 - 1 ) return x support_to_scalar_torch(logits_torch, support_size_torch) logits_tf = tf.constant([[-0.0078, 0.4007, -0.1592, 0.3446, 0.2232, -0.0814, -0.1400, 0.6435, 0.5683, -0.4653, -0.3925, 0.0400, 0.1729, -0.0152, 0.0551, -0.1042, 0.5908, 0.0915, 0.2475, 0.2067, -0.5062]]) support_size_tf = 10 x_tf = tf.constant([[-0.4167]]) def support_to_scalar_tf(logits, support_size): """ Transform a categorical representation to a scalar See paper appendix Network Architecture """ # Decode to a scalar probabilities = tf.nn.softmax(logits, axis=1) support = tf.constant([x for x in range(-support_size, support_size + 1)], dtype=tf.float32) support = tf.broadcast_to(support, probabilities.shape) x = tf.math.reduce_sum(support * probabilities, axis=1, keepdims=True) # Invert the scaling (defined in https://arxiv.org/abs/1805.11593) x = tf.math.sign(x) * ( ((tf.math.sqrt(1 + 4 * 0.001 * (tf.math.abs(x) + 1 + 0.001)) - 1) / (2 * 0.001)) ** 2 - 1 ) return x support_to_scalar_tf(logits_tf, support_size_tf)scalar to supportdef scalar_to_support_torch(x, support_size): """ Transform a scalar to a categorical representation with (2 * support_size + 1) categories See paper appendix Network Architecture """ # Reduce the scale (defined in https://arxiv.org/abs/1805.11593) x = torch.sign(x) * (torch.sqrt(torch.abs(x) + 1) - 1) + 0.001 * x # Encode on a vector x = torch.clamp(x, -support_size, support_size) floor = x.floor() prob = x - floor # print("prob:", prob.shape) logits = torch.zeros(x.shape[0], x.shape[1], 2 * support_size + 1).to(x.device) logits.scatter_( 2, (floor + support_size).long().unsqueeze(-1), (1 - prob).unsqueeze(-1) ) print("stonks:", logits) print("stonks:", logits.shape) indexes = floor + support_size + 1 prob = prob.masked_fill_(2 * support_size < indexes, 0.0) indexes = indexes.masked_fill_(2 * support_size < indexes, 0.0) logits.scatter_(2, indexes.long().unsqueeze(-1), prob.unsqueeze(-1)) # return logits scalar_to_support_torch( torch.tensor([[2.1025, 2.0129, 2.1019, 2.0780, 2.0724, 2.0395], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [2.1156, 1.9219, 1.9878, 2.0014, 2.0230, 1.9698], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [1.9868, 2.0178, 2.1383, 1.9936, 2.0089, 1.9848], [1.9503, 1.9601, 2.0509, 1.9869, 1.9181, 1.9885], [2.0072, 1.9588, 2.0606, 2.0278, 2.0264, 2.0652]]), support_size_torch) def scalar_to_support_tf(x, support_size): """ Transform a scalar to a categorical representation with (2 * support_size + 1) categories See paper appendix Network Architecture """ # Reduce the scale (defined in https://arxiv.org/abs/1805.11593) x = tf.math.sign(x) * (tf.math.sqrt(tf.math.abs(x) + 1) - 1) + 0.001 * x # Encode on a vector x = tf.clip_by_value(x, -support_size, support_size) # print("x:", x) floor = tf.math.floor(x) prob = x - floor # print("prob:", prob.shape) logits = tf.zeros([x.shape[0], x.shape[1], 2 * support_size + 1]) logits = tf.scatter_nd( tf.expand_dims(floor + support_size, -1), tf.expand_dims(1 - prob, -1) # shape required, account for dims=2 ) print("stonks:", logits) indexes = floor + support_size + 1 prob = tf.where(2 * support_size < indexes, prob, 0.0) indexes = tf.where(2 * support_size < indexes, indexes, 0.0) logits.tf.scatter_nd( tf.expand_dims(indexes.long(), -1), tf.expand_dims(prob, -1) # shape required, account for dims=2 ) # return logits scalar_to_support_tf( tf.constant([[2.1025, 2.0129, 2.1019, 2.0780, 2.0724, 2.0395], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [2.1156, 1.9219, 1.9878, 2.0014, 2.0230, 1.9698], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [1.9868, 2.0178, 2.1383, 1.9936, 2.0089, 1.9848], [1.9503, 1.9601, 2.0509, 1.9869, 1.9181, 1.9885], [2.0072, 1.9588, 2.0606, 2.0278, 2.0264, 2.0652]]), support_size_tf)stonks: tf.Tensor( [[[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] [[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] [[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] ... [[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] [[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] [[0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]]], shape=(16, 6, 21), dtype=f[...]EXPORTING A MODEL FROM PYTORCH TO ONNX AND RUNNING IT USING ONNX RUNTIME# Some standard imports import io import time import numpy as np from torch import nn import torch.utils.model_zoo as model_zoo import torch.onnxdefine model# Super Resolution model definition in PyTorch import torch.nn as nn import torch.nn.init as init class SuperResolutionNet(nn.Module): def __init__(self, upscale_factor, inplace=False): super(SuperResolutionNet, self).__init__() self.relu = nn.ReLU(inplace=inplace) self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights() def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.relu(self.conv3(x)) x = self.pixel_shuffle(self.conv4(x)) return x def _initialize_weights(self): init.orthogonal_(self.conv1.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv2.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv3.weight, init.calculate_gain('relu')) init.orthogonal_(self.conv4.weight) # Create the super-resolution model by using the above model definition. torch_model = SuperResolutionNet(upscale_factor=3) # Load pretrained model weights model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth' # Initialize model with the pretrained weights map_location = lambda storage, loc: storage if torch.cuda.is_available(): map_location = None torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location)) # set the model to inference mode torch_model.eval()convert to onnxExporting a model in PyTorch works via tracing or scripting.# Input to the model batch_size = 1 # just a random number x = torch.randn(batch_size, 1, 224, 224, requires_grad=True) torch_out = torch_model(x) # Export the model torch.onnx.export(torch_model, # model being run x, # model input (or a tuple for multiple inputs) "super_resolution.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=10, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['input'], # the model's input names output_names = ['output'], # the model's output names dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes 'output' : {0 : 'batch_size'}}, verbose = True) ! ls -a. demo Planet_EDA.ipynb .. f_score.py pr_curve.py confusion_matrix.py graph roc_curve.py custom_bce_ce_loss.ipynb .ipynb_checkpoints super_resolution.onnx custom_bce_ce_loss.py onnx_demo.ipynbcheck modelimport onnx # load onnx model onnx_model = onnx.load("super_resolution.onnx") # verify the model’s structure and confirm that the model has a valid schema. onnx.checker.check_model(onnx_model)ompute the output using ONNX Runtimeimport onnxruntime ort_session = onnxruntime.InferenceSession("super_resolution.onnx") def to_numpy(tensor): return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() # compute ONNX Runtime output prediction ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)} ort_outs = ort_session.run(None, ort_inputs) # compare ONNX Runtime and PyTorch results np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)Running the model on an image using ONNX Runtimefrom PIL import Image import torchvision.transforms as transforms import matplotlib.pyplot as plt img = Image.open("./demo/cat.jpg") plt.imshow(img) plt.show() resize = transforms.Resize([224, 224]) img = resize(img) img_ycbcr = img.convert('YCbCr') img_y, img_cb, img_cr = img_ycbcr.split() to_tensor = transforms.ToTensor() img_y = to_tensor(img_y) img_y.unsqueeze_(0) start_time = time.perf_counter() torch_out = torch_model(img_y) torch_cost_time = time.perf_counter() - start_time start_time = time.perf_counter() ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)} ort_outs = ort_session.run(None, ort_inputs) onnx_cost_time = time.perf_counter() - start_time img_out_y = ort_outs[0] print('pytorch cost time {}'.format(torch_cost_time)) print('onnx_cost_time {}'.format(onnx_cost_time)) img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L') # get the output image follow post-processing step from PyTorch implementation final_img = Image.merge( "YCbCr", [ img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]).convert("RGB") # Save the image, we will compare this with the output image from mobile device final_img.save("./demo/cat_superres_with_ort.jpg") plt.imshow(final_img) plt.show()EXTRACTION (Finding Data)# Store New York CSV into a dataframe csv_file = "./Resources/newyork_housing.csv" ny_area_df = pd.read_csv(csv_file) ny_area_df # Store Austin CSV into a dataframe csv_file = "./Resources/austin_housing.csv" austin_df = pd.read_csv(csv_file) austin_dfTRANSFORMATIONS (Data Cleanup & Analysis) 1. New York Housing Datany_area_df.columns # Create new dataframe with select columns ny_clean_df = ny_area_df[["bedrooms", "price", "address/streetAddress", "address/city"]].copy() ny_clean_df = ny_clean_df.rename(columns={'bedrooms': 'num_bedrooms', 'address/streetAddress': 'street_address', 'address/city': 'city'}) ny_clean_df # Drop NaN values new_york_df = ny_clean_df.dropna() new_york_dfStatistical Analysis# Find average price grouped by number of bedrooms avg_ny_price = new_york_df.groupby('num_bedrooms').mean() avg_ny_price # Convert 'price' column from string to float type avg_ny = avg_ny_price["price"].astype(float) avg_ny # Find median price grouped by number of bedrooms median_ny_price = ny_clean_df.groupby('num_bedrooms').median() median_ny_price # Convert 'price' column from string to float type median_ny = median_ny_price["price"].astype(float) median_ny # Merge the New York area's average and median price dataframes ny_stats = pd.merge(avg_ny_price, median_ny_price, how="left", on="num_bedrooms") ny_stats = ny_stats.rename(columns={'price_x': 'NY Average Price', 'price_y': 'NY Median Price'}) ny_stats.head() # Format 'NY Average Price' column ny_stats["NY Average Price"] = ny_stats["NY Average Price"].map("${:,.2f}".format) ny_stats.head() # Format 'NY Median Price' column ny_stats["NY Median Price"] = ny_stats["NY Median Price"].map("${:,.2f}".format) ny_stats.head()2. Austin Housing Dataaustin_df.columns # Create new data with select columns for Austin dataframe austin_clean_df = austin_df[["numOfBedrooms", "latestPrice", "streetAddress", "city"]].copy() austin_clean_df = austin_clean_df.rename(columns={'numOfBedrooms': 'num_bedrooms', 'latestPrice': 'price', "streetAddress": "street_address"}) austin_clean_df # Drop NaN values austin_df = austin_clean_df.dropna() austin_df # Find Austin average price grouped by number of bedrooms avg_austin_price = austin_df.groupby('num_bedrooms').mean() avg_austin_price # Convert 'price' column from string to float type austin_avg = avg_austin_price["price"].astype(float) austin_avg # Find median price grouped by number of bedrooms median_austin_price = austin_clean_df.groupby('num_bedrooms').median() median_austin_price # Convert 'price' column from string to float type austin_median = median_austin_price["price"].astype(float) austin_median # Merge the Austin average and median price dataframes austin_stats = pd.merge(avg_austin_price, median_austin_price, how="left", on="num_bedrooms") austin_stats = austin_stats.rename(columns={'price_x': 'Austin Average Price', 'price_y': 'Austin Median Price'}) austin_stats.head() # Format 'Austin Average Price' column austin_stats["Austin Average Price"] = austin_stats["Austin Average Price"].map("${:,.2f}".format) austin_stats.head() # Format 'Austin Median Price' column austin_stats["Austin Median Price"] = austin_stats["Austin Median Price"].map("${:,.2f}".format) austin_stats.head()Analysis: NY vs. Austin Housing Prices# Analyzing New York vs. Austin housing prices # Merge NY and Austin dataframes ny_austin_df = pd.merge(ny_stats, austin_stats, how='left', on='num_bedrooms') ny_austin_df = ny_austin_df.rename(columns={'Average Price_x': 'New York Average Price', 'Median Price_x': 'New York Median Price', 'Average Price_y': 'Austin Average Price', 'Median Price_y': 'Austin Median Price'}) ny_austin_df.head()Create a schema for where data will be loaded```sqlCREATE TABLE newyork_housing ( num_bedrooms INT PRIMARY KEY, price NUMERIC, street_address TEXT, city TEXT);CREATE TABLE austin_housing ( num_bedrooms INT PRIMARY KEY, price NUMERIC, street_address TEXT, city TEXT);SELECT * FROM newyork_housingSELECT * FROM austin_housing```# Create engine and connection to ETL_city_housing db. engine = create_engine(f'postgresql://postgres:{password}@localhost:5432/ETL_city_housing') connection = engine.connect() # Check the tables engine.table_names()LOADING# Use pandas to load csv converted to DF into database new_york_df.to_sql(name="newyork_housing", con=engine, if_exists='append', index=False) # # Use pandas to load csv converted to DF into database austin_clean_df.to_sql(name="austin_housing", con=engine, if_exists='append', index=False) # Confirm data is in the newyork_housing table pd.read_sql_query('SELECT * FROM newyork_housing', con=engine) # Confirm data is in the newyork_housing table pd.read_sql_query('SELECT * FROM austin_housing', con=engine) # Calculate average New York housing price grouped by number of bedrooms avg_ny_price = pd.read_sql('SELECT "num_bedrooms", AVG("price") AS "Average Housing Price" FROM newyork_housing GROUP BY "num_bedrooms" ORDER BY "num_bedrooms"', con=engine) avg_ny_price # Calculate average Austin housing price grouped by number of bedrooms avg_austin_price = pd.read_sql('SELECT "num_bedrooms", AVG("price") AS "Austin Average Price" FROM austin_housing GROUP BY "num_bedrooms" ORDER BY "num_bedrooms"', con=engine) avg_austin_price.Project: Internet temperature. 2015-12-31 1.5.3IPython research for internet temperature. We use now only fontanka.ru website, later other sites and methods will be added.Version with database recording. Now full archive of headers since 2000.Here we count good and bad words in the database. No more downloading info from websites.import datetime now = datetime.datetime.now() import sqlite3 #%pylab inline import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates from datetime import datetime, date, time, timedelta from time import gmtime, strftime # main db #db = "mp-nettemp3-fru-2015.db" #dbcf = "mp-nettemp3-fru-2015-stat.db" db = "mp-nettemp3-fru-2000-2015.db" dbcf = "mp-nettemp3-fru-2000-2015-stat.db" conn = sqlite3.connect(db) cur = conn.cursor() # temp db, later - also to main #dbc = sqlite3.connect(":memory:") dbc = sqlite3.connect(dbcf) curc = dbc.cursor() # calc data per days #dbc.execute("drop table if exists daydata") dbc.execute("create table if not exists daydata (day text, wpos int, wneg int, mark number)") dbc.commit() cur.execute("select dtyear, sum(wpos), sum(wneg), sum(mark) from netdata group by dtyear") vals = [] for row in cur: # print (row) vals += [row] #print (vals) vdates = [] vpos = [] vneg = [] vmark = [] for val in vals: vdates += [val[0]] vpos += [val[1]] vneg += [(-val[2])] vmark += [val[3]] #print (vdates, vpos, vneg, vmark) from matplotlib.finance import quotes_historical_yahoo_ochl from matplotlib.dates import YearLocator, MonthLocator, DateFormatter from matplotlib.finance import quotes_historical_yahoo_ochl years = YearLocator() # every year months = MonthLocator() # every month yearsFmt = DateFormatter('%Y') x = mdates.drange(datetime.strptime(vdates[0], "%Y-%m-%d"), datetime.strptime(vdates[-1], "%Y-%m-%d"), timedelta(days=1)) y = np.array(vmark) #x = [v[0] for v in vdates] #y = [v[0] for v in vdates] x = np.array(range(len(y))) fig = plt.figure() sp = fig.add_subplot(111) sp.plot_date(x, y, '-') #x #y sp.plot(x, y, '-') #sp.title('title') #sp.ylabel('marks') sp.grid(True) #sp.plot_date(vdates, vmark, '-') plt.show() conn.close() dbc.close()Detect similar images using clusteringThis notebooks demonstrates the idea behind the `ImageClusteringSkill` using a small dataset. Basically, the algorithm consists of the following two steps:1. Extract VGG16 embeddings2. Cluster embeddings using DBSCANimport sys sys.path.append("../") from sklearn.utils.estimator_checks import check_estimator import logging import matplotlib.pyplot as plt import ipyplot from ml.extractors.vgg16_extractor import VGG16Extractor from ml.models.DBSCAN import DBSCANv2 from ml.similarity.detector import ImageSimilarityDetector from ml.utils.image import read_images_from_folder logging.getLogger().setLevel(logging.INFO)Download sample dataset and display imagesimages_train, _ = read_images_from_folder('../data/train') ipyplot.plot_images(images_train, img_width=150, max_images=10)Now we detect the similarity of the images# Create VGG16 extractor extractor = VGG16Extractor(weights="../models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5") # Create DBSCANv2 instance and verify whether the custom class is correct model = DBSCANv2(eps=0.64, min_samples=3, metric="cosine") check_estimator(model) # Create similarity detector detector = ImageSimilarityDetector(extractor, model) # Train detector labels_train = detector.train(images_train)Let's display the images# Display images for cluster #0 cluster_id = 0 clust = images_train[labels_train == cluster_id] ipyplot.plot_images(clust, img_width=150, max_images=len(clust), labels=labels_train[labels_train == cluster_id]) # Download test data images_test, _ = read_images_from_folder('../data/test') # Scoring labels_test = detector.assign_group(images_test) ipyplot.plot_images(images_test, img_width=150, max_images=20, labels=labels_test)Dual Momentum Sector Rotation (DMSR)'Relative momentum looks at price strength with respect to other assets.Absolute momentum uses an asset’s own past performance to infer futureperformance. Absolute momentum can reduce downside exposure as wellenhance returns. The best approach is to use both types of momentumtogether. That is what dual momentum is all about.' https://www.optimalmomentum.com/momentum/**Buy Signal**: When the S&P 500 is above its 10-month simple moving average, buy the sectors with the biggest gains over a three-month timeframe and (optionally) has positive absolute momentum.**Sell Signal**: (Optionally) Exit all positions when the S&P 500 moves below its 10-month simple moving average on a monthly closing basis, or (optionaly) exit a single position if it has negative absolute momentum.**Rebalance**: Once per month, sell sectors that fall out of the top tier (three) and buy the sectors that move into the top tier (two or three).https://school.stockcharts.com/doku.php?id=trading_strategies:sector_rotation_roc https://robotwealth.com/dual-momentum-review/ You can reproduce the results on robowealth by setting the 'end' date to (2017, 1, 1). You can also note that these methods have NOT done so well since 2018, and especially didn't handle the COVID downturn very well.import datetime import matplotlib.pyplot as plt import pandas as pd from talib.abstract import * import pinkfish as pf import strategy # Format price data. pd.options.display.float_format = '{:0.2f}'.format %matplotlib inline # Set size of inline plots. '''note: rcParams can't be in same cell as import matplotlib or %matplotlib inline %matplotlib notebook: will lead to interactive plots embedded within the notebook, you can zoom and resize the figure %matplotlib inline: only draw static images in the notebook ''' plt.rcParams["figure.figsize"] = (10, 7)Some global dataSP500_Sectors = ['SPY', 'XLB', 'XLE', 'XLF', 'XLI', 'XLK', 'XLP', 'XLU', 'XLV', 'XLY'] Other_Sectors = ['RSP', 'DIA', 'IWM', 'QQQ', 'DAX', 'EEM', 'TLT', 'GLD', 'XHB'] Diversified_Assets = ['SPY', 'TLT', 'NLY', 'GLD'] Diversified_Assets_Reddit = ['IWB', 'IEV', 'EWJ', 'EPP', 'IEF', 'SHY', 'GLD'] Robot_Dual_Momentum_Equities = ['SPY', 'CWI'] Robot_Dual_Momentum_Bonds = ['CSJ', 'HYG'] Robot_Dual_Momentum_Equities_Bonds = ['SPY', 'AGG'] Robot_Wealth = ['IWM', 'SPY', 'VGK', 'IEV', 'EWJ', 'EPP', 'IEF', 'SHY', 'GLD'] # Pick one of the above symbols = SP500_Sectors capital = 10000 start = datetime.datetime(2007, 1, 1) #start = datetime.datetime(*pf.SP500_BEGIN) end = datetime.datetime.now() #end = datetime.datetime(2019, 12, 1) options = { 'use_adj' : True, 'use_cache' : True, 'lookback': 6, 'margin': 1, 'use_absolute_mom': False, 'use_regime_filter': False, 'top_tier': 2 #'top_tier': int(len(symbols)/2) } optionsRun Strategys = strategy.Strategy(symbols, capital, start, end, options) s.run()View logss.rlog.head() s.tlog.tail() s.dbal.tail()Generate strategy stats - display all available statspf.print_full(s.stats)start 2007-06-11 end 2021-04-09 beginning_balance 10000 ending_balance 24442.80 total_net_profit 14442.80 gross_profit 27019.96 gross_loss -12577.16 profit_factor 2.15 return_on_initial_capital 144.43 annual_return_rate 6.68 trading_period 13 years 9 months 29 days pct_time_in_market 98.31 margin 1 avg_leverage 1.00 max_leverage [...]Run Benchmark, Retrieve benchmark logs, and Generate benchmark statsbenchmark = pf.Benchmark('SPY', s.capital, s.start, s.end, use_adj=True) benchmark.run()Plot Equity Curves: Strategy vs Benchmarkpf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)Bar Graph: Strategy vs Benchmarkdf = pf.plot_bar_graph(s.stats, benchmark.stats) dfEstimate piWrite a program that estimates the value of pi by the 'throw darts at a wall' method. That is, generate random `(x,y)` points inside a square and see how many of these fall in the circle that has for diameter the length of the side of the square in which coordinates were generated. The ratio of the number of points in the circle to the number of points in the square should allow you to estimate pi. For this exercise, make a module called `pi_estimate.py` within a `day2/exercises/yourname` folder. Besides these instructions, I will not give any more requirements to how you structure this exercise. You may want to start with just writing functions and then at some point reorganize into an object-oriented design.* Do this estimate using 100, 1000, 10,000, and 1e6 points.* How long does the calculation take for different numbers of points? Make a figure that illustrates how the calculation time depends on the number of points. * Make a figure that displays the "darts."* Run this calculation many times for a single `N` (number of darts), and plot a histogram of the results. What is the mean and standard deviation of these estimates?* Make a plot that illustrates the precision of the pi estimate as a function of number of random points used.Make a notebook that demonstrates how your `pi_estimate` module works, as well as presenting the results of all your calculations. The notebook should have minimal complicated code in it; rather, it should initialize objects and call functions defined within the `pi_estimate` module.If you want to see and see again the explanation for the throwing dart technique, here is a nice explanatory video https://youtu.be/M34TO71SKGk (all is in the first minute of the video)#Entirely your move!Train Telecom Customer Churn Prediction with XGBoost This tutorial is based on [this](https://www.kaggle.com/pavanraj159/telecom-customer-churn-prediction/comments6.-Model-Performances) Kaggle notebook and [this](https://github.com/gojek/feast/tree/master/examples/feast-xgboost-churn-prediction-tutorial) Feast notebookimport numpy as np import pandas as pd from hops import featurestore, hdfs from hops import numpy_helper as numpy from hops import pandas_helper as pandas import os import itertools import warnings warnings.filterwarnings("ignore") import io import statsmodels, yellowbrick import sklearn # Tested with 0.22.1 import imblearn from slugify import slugifyStarting Spark application1.1 Datatelecom_df = featurestore.get_featuregroup("telcom_featuregroup", dataframe_type="pandas") telecom_df.head()Running sql: use telecom_featurestore against offline feature store SQL string for the query created successfully Running sql: SELECT * FROM telcom_featuregroup_1 against offline feature store churn ... tenure_group_tenure_gt_60 0 0 ... 0 1 1 ... 0 2 1 ... 0 3 0 ... 0 4 1 ... 0 [5 rows x 47 columns]1.6 Data Preparation for Trainingfrom sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix,accuracy_score,classification_report from sklearn.metrics import roc_auc_score,roc_curve,scorer from sklearn.metrics import f1_score import statsmodels.api as sm from sklearn.metrics import precision_score,recall_score from yellowbrick.classifier import DiscriminationThreshold Id_col = ['customer_id'] target_col = ["churn"] # Split into a train and test set train, test = train_test_split(telecom_df,test_size = .25 ,random_state = 111) # Seperating dependent and independent variables cols = [i for i in telecom_df.columns if i not in Id_col + target_col] training_x = train[cols] training_y = train[target_col] testing_x = test[cols] testing_y = test[target_col]1.7 Trainingfrom xgboost import XGBClassifier xgb_model = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.9, max_delta_step=0, max_depth=7, min_child_weight=1, missing=None, n_estimators=100, n_jobs=1, nthread=None, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=True, subsample=1) # Train model xgb_model.fit(training_x, training_y) predictions = xgb_model.predict(testing_x) probabilities = xgb_model.predict_proba(testing_x)1.8 Analysiscoefficients = pd.DataFrame(xgb_model.feature_importances_) column_df = pd.DataFrame(cols) coef_sumry = (pd.merge(coefficients, column_df, left_index=True, right_index=True, how="left")) coef_sumry.columns = ["coefficients", "features"] coef_sumry = coef_sumry.sort_values(by="coefficients", ascending=False) acc = accuracy_score(testing_y, predictions) print(xgb_model) print("\n Classification report : \n", classification_report(testing_y, predictions)) print("Accuracy Score : ", acc) from hops import model import pickle MODEL_NAME = "XGBoost_Churn_Classifier" file_name = "xgb_reg.pkl" hdfs_path = "Resources/xgboost_model" pickle.dump(xgb_model, open(file_name, "wb")) hdfs.mkdir(hdfs_path) hdfs.copy_to_hdfs(file_name, hdfs_path, overwrite=True) # test that we can load and use the model xgb_model_loaded = pickle.load(open(file_name, "rb")) xgb_model_loaded.predict(testing_x)[0] == xgb_model.predict(testing_x)[0] # save to the model registry model.export(hdfs_path, MODEL_NAME, metrics={'accuracy': acc})Started copying local path xgb_reg.pkl to hdfs path hdfs://rpc.namenode.service.consul:8020/Projects/telecom/Resources/xgboost_model/xgb_reg.pkl Finished copying Exported model XGBoost_Churn_Classifier as version 1 successfully. Polling XGBoost_Churn_Classifier version 1 for model availability. Model now available.Linear model overfitting* This simple notebook demonstrates how to overfit a linear regression* It really isn't that hard* Also, it shows basic problem for linear regresssion in the "big data" world* It then tries to fix this using Ridge and Lasso# Load helpers # Will try to just load what I need on this %matplotlib inline import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso # Function to generate linear data experiments def genLinData(N,M,noise): # y = x_1 + x_2 .. x_M + eps # X's scaled so the variance of explained part is same order as noise variance (if std(eps) = 1) sigNoise = np.sqrt(1./M) X = np.random.normal(size=(N,M),loc=0,scale=sigNoise) eps = np.random.normal(size=N,loc=0,scale=noise) y = np.sum(X,axis=1)+eps return X,y* Model equation:* $y = \sum_{i=1}^M x_i + \epsilon_i$* $E(y)=E(x_i) = E(\epsilon)=0$* $\hat y = \sum_{i=1}^M x_i$* $\text{Var}({x_i}) = (1/M)$* $\text{Var}({\sum x_i}) = 1 = \text{Var} (\hat y)$* Equals $\epsilon$ or noise variance, if noise = 1* Variance of y = $\text{Var}(\hat y) + \text{Var}(\epsilon)$* Variance of y = 1 + 1 = 2* R-squared = $1-\frac{(N-1)*\text{Var}(\epsilon)}{(N-1)\text{Var}(y)}=( 1 - \frac{1}{2}) = 0.5$X, y = genLinData(50,1,1.0) lr = LinearRegression() lr.fit(X, y) xtarget = np.arange(start=-3.0,stop=3.0,step=0.05) xt = xtarget.reshape(len(xtarget),1) yhat = lr.predict(xt) plt.plot(xtarget,yhat) plt.plot(X,y,"*") plt.plot() plt.grid() # Set up a system that you might have estimated once in your econometrics class X, y = genLinData(200,2,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) print(lr.score(X_train,y_train)) print(lr.score(X_test,y_test)) print("lr.coef_: {}".format(lr.coef_)) print("lr.intercept_: {}".format(lr.intercept_))lr.coef_: [0.65885973 1.04095232] lr.intercept_: 0.20399119466991084Now increase right hand side forecast variables (a lot)# Set up a system you would be told never to try in your econometrics class X, y = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) print(lr.score(X_train,y_train)) print(lr.score(X_test,y_test))0.6808236088877091 -0.3817118156632482Overfitting* This regression is clearly overfitting* Try many different runs of this* This is a form of overfitting* **Note:** The model is technically the correct model A quick monte-carlo example* Note: you can do this many times* This is known as a monte-carlo* See code below* There is a big for loop * Statistics for each run are stored in numpy vector (scoreVec)nmc = 500 scoreVec = np.zeros(nmc) for i in range(nmc): X, y = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) scoreVec[i] = lr.score(X_test,y_test) print(np.mean(scoreVec)) print(np.std(scoreVec)) print(np.mean(scoreVec<0)) # A function to automate MC experiments def MCtraintest(nmc,X,y,modelObj,testFrac): trainScore = np.zeros(nmc) testScore = np.zeros(nmc) for i in range(nmc): X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=testFrac) modelObj.fit(X_train,y_train) trainScore[i] = modelObj.score(X_train,y_train) testScore[i] = modelObj.score(X_test,y_test) return trainScore,testScore X, y = genLinData(200,50,1.0) lr = LinearRegression() trainS, testS = MCtraintest(500,X,y,lr,0.5) print(np.mean(trainS)) print(np.std(trainS)) print(np.mean(testS)) print(np.std(testS))0.7202938723384411 0.045496948831382736 -0.20024775954112023 0.23152654625274632Increase sample size* The ultimate solution for overfitting# Set up a system you would be told never to try in your econometrics class X, y = genLinData(20000,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) print(lr.score(X_train,y_train)) print(lr.score(X_test,y_test))0.49162580006682516 0.5034100487612816Complex functions* Add polynomial features where there are none* Scikit Learn polynomial features function* $y=b_0 + b_1 x + b_2 x^2 + b_3 x^3 + \ldots $from sklearn.preprocessing import PolynomialFeatures X, y = genLinData(25,1,1.0) poly = PolynomialFeatures(degree=7) Xpoly = poly.fit_transform(X) lr.fit(Xpoly,y) xt = np.arange(start=-1.5,stop=1.5,step=0.05) xtarget = xt.reshape(len(xt),1) xtargetPoly = poly.fit_transform(xtarget) yhat = lr.predict(xtargetPoly) plt.plot(xtarget,yhat) plt.plot(X,y,'o') plt.grid() # print(Xpoly) X, y = genLinData(50,1,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) poly = PolynomialFeatures(degree=7) Xpoly_train = poly.fit_transform(X_train) Xpoly_test = poly.fit_transform(X_test) lr.fit(Xpoly_train,y_train) print(lr.score(Xpoly_train,y_train)) print(lr.score(Xpoly_test,y_test))0.7805925109719885 -0.1673685914808758Ridge and Lasso ---- Try Ridge and Lasso* Control overfitting* Parameter alpha now added* Note: In this experiment no coefficients are zeroX, y = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) ridge = Ridge(alpha=1.0) ridge.fit(X_train, y_train) print(ridge.score(X_train,y_train)) print(ridge.score(X_test,y_test)) X, y = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lasso = Lasso(alpha=0.005) lasso.fit(X_train, y_train) print(lasso.score(X_train,y_train)) print(lasso.score(X_test,y_test))0.6123416473659897 0.14803452726945876A more complicated model* Set some of the parameters to zero* This may give even more overfitting* Can also see how well Lasso gets zeros* Finding irrelevant information# Function to generate linear data experiments # Now drop some coefficients to zero (sparse) def genLinData(N,M,noise): # y = x_1 + x_2 .. x_M + eps # X's scaled so the variance of explained part is same order as noise variance (if eps = 1) sigNoise = np.sqrt(1./M) # set up random beta for regression beta = np.random.normal(size=(M,1),loc=0.,scale=1.) # force smaller beta to zero beta[abs(beta)<1.0] = 0. betaUsed= np.sum( beta != 0.) X = np.random.normal(size=(N,M),loc=0,scale=sigNoise) eps = np.random.normal(size=(N,1),loc=0,scale=noise) # Modern Python with matrix multiplication y = X @ beta + eps # Find theoretical best R-squared sse = np.sum(eps**2) meany = np.mean(y) sse2 = np.sum( (y-meany)**2) trsquared = 1. - sse/sse2 # Old style Python # X = np.random.normal(size=(N,M),loc=0,scale=sigNoise) # eps = np.random.normal(size=N,loc=0,scale=noise) # y = np.sum(X,axis=1)+eps return X,y,betaUsed,trsquaredOrdinary least squaresX, y, nvar, trs = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) print(trs) print(lr.score(X_train,y_train)) print(lr.score(X_test,y_test)) print(nvar) print(lr.coef_) print(np.mean(np.abs(lr.coef_))) print(np.sum(lr.coef_!=0))0.5117550042422836 0.7826269595268323 -0.16165976103625934 14 [[-3.01349293e-01 -2.24407278e+00 1.08698110e+00 1.46428484e-03 2.87625839e+00 1.01710345e+00 2.20324905e+00 1.93426693e-02 -2.47174845e+00 1.80599035e+00 -1.01607937e+00 2.62370519e-01 -1.46628553e-01 -5.50893014e-01 6.47500993e-01 5.35537043e-01 -1.05539383e+00 -2.21919140e+00 -1.19570779e+00 2.63931634e-01 -1.39609171e+00 -7.87601320e-01 1.73235330e+00 -3.05716209e-01 2.57899634e+00 -1.75295654e+00 -1.25673208e+00 4.78563030e+00 2.72394387e-01 3.38025119e+00 -3.05864709e-01 5.73019941e-01 2.69869766e+00 -5.62412983e-02 4.08253268e-01 7.56803664e-01 -4.29597307e-01 -1.41353778e-01 -9.75920418e-02 4.42433066e-01 2.84937940e+00 -1.18479336e+00 1.16220071e+00 -2.51570364e+00 1.33525786e+00 4.99660819e-02 5.24928225e-01 -1.10198121e+00 -6.94365790e-01 -6.93403782e-01]] 1.1638270827612027 50Now quick monte-carlonmc = 1000 scoreVec = np.zeros(nmc) scoreVecInSamp = np.zeros(nmc) trsVec = np.zeros(nmc) for i in range(nmc): X, y, nvar, trs = genLinData(250,50,1.0) trsVec[i] = trs X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5) # Now run regression # print score, which is R-squared (fit) lr = LinearRegression() lr.fit(X_train, y_train) scoreVecInSamp[i] = lr.score(X_train,y_train) scoreVec[i] = lr.score(X_test,y_test) print(np.mean(trsVec)) print(np.mean(scoreVecInSamp)) print(np.mean(scoreVec)) print(np.mean(scoreVec<0))0.4340192181881749 0.6636461706272556 0.018934703600057015 0.418Ridge regressionX, y, nvar, trs = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) ridge = Ridge(alpha=1.0) ridge.fit(X_train, y_train) print(trs) print(ridge.score(X_train,y_train)) print(ridge.score(X_test,y_test)) print(nvar) print(ridge.coef_) print(np.mean(np.abs(ridge.coef_))) print(np.sum(ridge.coef_!=0))0.45511969847796774 0.6520702419421494 0.23350977505183235 17 [[ 0.17015394 -0.82761649 -0.43109037 -1.27384248 0.18373044 -0.35431999 0.53168231 -0.62791659 -0.24759256 0.06836831 -0.3603374 0.53737374 -1.00913689 -1.09974103 -0.77369359 -0.10776131 0.34284253 0.60080409 -0.1925334 1.05161455 -0.20636497 -0.27880304 0.67068373 -1.12649931 0.34244029 -0.04538607 -0.17787471 0.76285855 -0.92361021 -0.43184387 0.91894013 -0.4637596 -0.21972635 -0.09797793 -1.89686149 -0.77245623 0.63906827 -0.1830806 -0.08281724 -0.6997603 -0.04730691 -0.76279963 -0.32514477 -0.01941144 0.21394583 -0.39007657 -0.22534093 0.30567489 -1.02888189 0.57455554]] 0.5125220664265492 50Monte-carlo for Ridgenmc = 1000 scoreVec = np.zeros(nmc) scoreVecInSamp = np.zeros(nmc) trsVec = np.zeros(nmc) for i in range(nmc): X, y, nvar, trs = genLinData(250,50,1.0) trsVec[i] = trs X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5) # Now run regression # print score, which is R-squared (fit) ridge = Ridge(alpha=1.0) ridge.fit(X_train, y_train) scoreVecInSamp[i] = ridge.score(X_train,y_train) scoreVec[i] = ridge.score(X_test,y_test) print(np.mean(trsVec)) print(np.mean(scoreVecInSamp)) print(np.mean(scoreVec)) print(np.mean(scoreVec<0))0.434418527980755 0.5928849706368247 0.24313523265525117 0.016Lasso regressionX, y, nvar, trs = genLinData(200,50,1.0) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5,random_state=0) # Now run regression # print score, which is R-squared (fit) lasso = Lasso(alpha=0.005) lasso.fit(X_train, y_train) print(trs) print(lasso.score(X_train,y_train)) print(lasso.score(X_test,y_test)) print(nvar) print(lasso.coef_) print(np.mean(np.abs(lasso.coef_))) print(np.sum(lasso.coef_!=0))0.4766985589834687 0.6484865026499906 0.18354432888358663 13 [-0. -0. 0.05123075 -0. -0. -1.46181678 -2.57005686 -0.54462588 -0. 2.76106956 2.77417913 0.46841493 -0. 0.01899875 -1.02461637 0. 0. -0. 0. 0. -0.73777972 0.38628045 0. 0.82074723 1.82314029 -0. 0.38131716 0.6135143 -1.47525448 1.62806354 0. 0.12463412 0.47836877 1.12144804 0.88043718 -0.12937665 0. 0.83523062 -0. 0.58991758 -0. 0. -0. 0.8581768 1.11052159 -0. -0.63284531 0. -0. 0. ] 0.5260412564939729 27Monte-carlo for Lassonmc = 1000 scoreVec = np.zeros(nmc) scoreVecInSamp = np.zeros(nmc) trsVec = np.zeros(nmc) for i in range(nmc): X, y, nvar, trs = genLinData(250,50,1.0) trsVec[i] = trs X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5) # Now run regression # print score, which is R-squared (fit) lasso = Lasso(alpha=0.005) lasso.fit(X_train, y_train) scoreVecInSamp[i] = lasso.score(X_train,y_train) scoreVec[i] = lasso.score(X_test,y_test) print(np.mean(trsVec)) print(np.mean(scoreVecInSamp)) print(np.mean(scoreVec)) print(np.mean(scoreVec<0))0.43425022630415866 0.6176815929346151 0.2292835237575616 0.045Mini-Project 2: Network Intrusion Detector CSC 180 Intelligent Systems (Fall 2019) , , 10-11-2019%matplotlib inline import matplotlib.pyplot as plt from matplotlib.pyplot import figure, show import collections import io import requests import shutil import os, json import csv from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split import sklearn.feature_extraction.text as sk_text from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from tensorflow.keras import optimizers, regularizers # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name, x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df, name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df, name, mean=None, sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name] - mean) / sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df, target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. target_type = df[target].dtypes target_type = target_type[0] if isinstance(target_type, collections.Sequence) else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification dummies = pd.get_dummies(df[target]) return df[result].values.astype(np.float32), dummies.values.astype(np.float32) else: # Regression return df[result].values.astype(np.float32), df[target].values.astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))] df.drop(drop_rows, axis=0, inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # Regression chart. def chart_regression(pred,y,sort=True): t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()}) if sort: t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() # Plot a confusion matrix. # cm is the confusion matrix, names are the names of the classes. def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Plot an ROC. pred - the predictions, y - the expected output. def plot_roc(pred,y): fpr, tpr, thresholds = roc_curve(y, pred) roc_auc = auc(fpr, tpr) plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic (ROC)') plt.legend(loc="lower right") plt.show()Data Preparation#encode_csv = X_test_prep.to_csv(r'data/nid_test_prep.csv', header=True, index=False) X_test_prep = pd.read_csv(r'data/nid_test_prep.csv') y=X_test_prep['outcome_normal.'] X_test_prep=X_test_prep.drop('outcome_normal.', axis=1) X_numpy = X_test_prep.to_numpy() x = X_numpy.reshape((len(X_test_prep), 1, len(X_test_prep.columns), 1)) # Split into train/test x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=42) x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = tf.keras.utils.to_categorical(y_train, 2) y_test = tf.keras.utils.to_categorical(y_test, 2) x_train.shape y_train.shape x_test.shape y_test.shapeConvolutional Model# define a CNN cnn = Sequential() cnn.add(Conv2D(64, kernel_size=(1, y_train.shape[1]), strides=(1, 1), activation='tanh', input_shape=(1, x_train.shape[2], 1))) cnn.add(MaxPooling2D(pool_size=(1,2))) cnn.add(Conv2D(x_train.shape[2], kernel_size=(1, y_train.shape[1]), strides=(1, 1), activation='tanh')) cnn.add(MaxPooling2D(pool_size=(1,2))) cnn.add(Flatten()) cnn.add(Dense(32, activation="relu")) cnn.add(Dropout(0.5)) cnn.add(Dense(y_train.shape[1], activation="softmax")) cnn.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) cnn.summary() import time start_time = time.time() # 1% of dataset # Define batch_size and # of epochs batch_size = 128 monitor = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=3, verbose=1, mode='auto') checkpointer = ModelCheckpoint(filepath="data/best_weights.hdf5", verbose=0, save_best_only=True) # save best model cnn.fit(x_train, y_train, batch_size=batch_size, callbacks=[monitor, checkpointer], epochs=100, verbose=2, validation_data=(x_test, y_test)) cnn.load_weights('data/best_weights.hdf5') elapsed_time = time.time() - start_time print("Model: CNN, Activation: tanh, tanh, relu, softmax, Optimizer: adam, Kernel number/size: 118/64, 1x1") print("Elapsed time: {}".format(hms_string(elapsed_time))) # evaluate() computes the loss and accuracy score = cnn.evaluate(x_test[0:100], y_test[0:100], verbose=2) print('Test loss: {}'.format(score[0])) print('Test accuracy: {}'.format(score[1])) y_true = np.argmax(y_test[0:100],axis=1) pred = cnn.predict(x_test[0:100]) pred = np.argmax(pred,axis=1) score = metrics.accuracy_score(y_true, pred) print('Accuracy: {}'.format(score)) f1 = metrics.f1_score(y_true, pred, average='weighted') print('Averaged F1: {}'.format(f1)) print(metrics.classification_report(y_true, pred)) pred = cnn.predict(x_test[0:100]) pred = pred[:,1] # Only positive class (M) plot_roc(pred,y_true) cnn.load_weights('data/best_weights.hdf5') # Plot non-normalized confusion matrix y_true = np.argmax(y_test[0:100],axis=1) pred = cnn.predict(x_test[0:100]) pred = np.argmax(pred,axis=1) cm = confusion_matrix(y_true, pred) print(cm) plt.figure() plot_confusion_matrix(cm, ['normal','intruder']) plt.show() print(classification_report(y_true, pred)) reg_score = np.sqrt(metrics.mean_squared_error(pred,y_true)) print("Score (RMSE): {}".format(reg_score)) # Plot the chart chart_regression(pred, y_true, sort=True)Score (RMSE): 0.1Dense ModelX_test_prep = pd.read_csv(r'data/nid_test_prep.csv') y=X_test_prep['outcome_normal.'] X_test_prep=X_test_prep.drop('outcome_normal.', axis=1) X_numpy = X_test_prep.to_numpy() # Split into train/test x_train, x_test, y_train, y_test = train_test_split(X_numpy, y, test_size=0.25, random_state=42) x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = tf.keras.utils.to_categorical(y_train, 2) y_test = tf.keras.utils.to_categorical(y_test, 2) # Defining a dense model model = Sequential() model.add(Dense(32, input_dim=X_numpy.shape[1], activation='tanh')) model.add(Dense(6)) model.add(Dense(2,activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) monitor = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=3, verbose=1, mode='auto') checkpointer = ModelCheckpoint(filepath="data/best_fullyconnected_weights.hdf5", verbose=0, save_best_only=True) # save best model model.fit(x_train, y_train, validation_data=(x_test,y_test), callbacks=[monitor,checkpointer], verbose=2,epochs=100) model.load_weights('data/best_fullyconnected_weights.hdf5') # load weights from best model elapsed_time = time.time() - start_time print("Model: Sequential, Activation: tanh, Layers, count: 32, 6, 2 Optimizer: adam ") print("Elapsed time: {}".format(hms_string(elapsed_time))) # evaluate() computes the loss and accuracy score = model.evaluate(x_test[0:100], y_test[0:100], verbose=2) model.summary() print('Test loss: {}'.format(score[0])) print('Test accuracy: {}'.format(score[1])) y_true = np.argmax(y_test[0:100],axis=1) pred = model.predict(x_test[0:100]) pred = np.argmax(pred,axis=1) score = metrics.accuracy_score(y_true, pred) print('Accuracy: {}'.format(score)) f1 = metrics.f1_score(y_true, pred, average='weighted') print('Averaged F1: {}'.format(f1)) print(metrics.classification_report(y_true, pred)) pred = model.predict(x_test[0:100]) pred = pred[:,1] # Only positive class (M) plot_roc(pred,y_true) y_true = np.argmax(y_test[0:100],axis=1) pred = model.predict(x_test[0:100]) pred = np.argmax(pred,axis=1) cm = confusion_matrix(y_true, pred) print(cm) plt.figure() plot_confusion_matrix(cm, ['normal','intruder']) plt.show() print(classification_report(y_true, pred)) reg_score2 = np.sqrt(metrics.mean_squared_error(pred,y_true)) print("Score (RMSE): {}".format(reg_score2)) # Plot the chart chart_regression(pred, y_true, sort=True)Score (RMSE): 0.1Function Definintions# 1. Add the clean movie function that takes in the argument, "movie". def clean_movie(movie): """ Takes a single wikipedia record, extracts all known values for alternate titles, and moves them to a list. Additionally maps redundant/duplicative column names. """ movie = dict(movie) # creates a non-destructive copy # Clean alternate titles alt_titles = dict() languages = ['Arabic', 'Cantonese', 'Chinese', 'French', 'Hangul', 'Hebrew', 'Hepburn', 'Japanese', 'Literally', 'Mandarin', 'McCune–Reischauer', 'Polish', 'Revised Romanization', 'Romanized', 'Russian', 'Simplified', 'Traditional', 'Yiddish'] for language in languages: if language in movie: alt_titles[language] = movie[language] movie.pop(language) if len(alt_titles) > 0: movie['alt_titles'] = alt_titles def change_column_name(old_name, new_name): if old_name in movie: movie[new_name] = movie.pop(old_name) change_column_name('Country of origin', 'Country') change_column_name('Directed by', 'Director(s)') change_column_name('Director', 'Director(s)') change_column_name('Distributed by', 'Distributor') change_column_name('Edited by', 'Editor(s)') change_column_name('Length', 'Running time') change_column_name('Produced by', 'Producer(s)') change_column_name('Producer', 'Producer(s)') change_column_name('Written by', 'Writer(s)') change_column_name('Original release', 'Release date') change_column_name('Productioncompany ', 'Production company(s)') change_column_name('Productioncompanies ', 'Production company(s)') change_column_name('Theme music composer', 'Composer(s)') change_column_name('Music by', 'Composer(s)') return movie def parse_dollars(s): """ Given string s, parse currency strings to float. """ if type(s) != str: return np.nan # form one: r"\$\s*\d{1,3}\.?\d*\s*[mb]illi?on" # form two: r"\$\s*\d+[,\.]\d{3}" # form: "$###.# billion: # remove dollar signs, whitespace, and text. # Multiply by 1billion if re.match(r"\$\s*\d{1,3}\.?\d*\s*billi?on", s, flags=re.IGNORECASE): s = re.sub('\$|\s|[a-zA-Z]', '', s) value = float(s) * 10**9 return value # form: "$###.# million: # remove dollar signs, whitespace, and text. # Multiply by 1million if re.match(r"\$\s*\d{1,3}\.?\d*\s*milli?on", s, flags=re.IGNORECASE): s = re.sub('\$|\s|[a-zA-Z]', '', s) value = float(s) * 10**6 return value # form: $###,###,### # strip dollar signs and thousands separators if re.match(r"\$\s*\d+[,\.]\d{3}", s, flags=re.IGNORECASE): s = re.sub('\$|,|\.','',s) value = float(s) return value else: return np.nan # 2 Add the function that takes in three arguments; # Wikipedia data, Kaggle metadata, and MovieLens rating data (from Kaggle) def import_source_files(wiki_file: str, kaggle_file: str, ratings_file: str): """ Function takes three arguments, each corresponding to the name of a specific source csv or json file for the three types of data objects we are importing. Returns all three objects as unique pandas DataFrames. """ kaggle_metadata = pd.read_csv(kaggle_file, low_memory=False) ratings = pd.read_csv(ratings_file) with open(wiki_file, mode='r') as file: wiki_movies_json = json.load(file) # Remove TV shows wiki_movies_json = [wiki_movies_json[i]\ for i in range(len(wiki_movies_json))\ if 'No. of episodes' not in wiki_movies_json[i]] # Iterate through clean movie function to tidy columns wiki_movies_json = [clean_movie(wiki_movies_json[i]) for i in range(len(wiki_movies_json))] # Create dataframe wiki_movies_df = pd.DataFrame(wiki_movies_json) # Extract all IMDB IDs from valid URls and remove records that do not contain them try: wiki_movies_df.dropna(subset=['imdb_link'], inplace=True) wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})') wiki_movies_df.drop_duplicates(subset='imdb_id', inplace=True) except Exception as e: print(f'IMDB extraction failed. {e}') #Consolidate writer columns without overwriting for col in ["Writer(s)", "Screenplay by", "Story by"]: wiki_movies_df[col] = wiki_movies_df[col].apply(lambda x: ', '.join(x) if type(x) == list else x) wiki_movies_df["Writer(s)"] = wiki_movies_df.apply(lambda row: row["Screenplay by"] if pd.isna(row["Writer(s)"]) else row["Writer(s)"], axis=1) wiki_movies_df["Writer(s)"] = wiki_movies_df.apply(lambda row: row["Story by"] if pd.isna(row["Writer(s)"]) else row["Writer(s)"], axis=1) wiki_movies_df.drop(columns=["Screenplay by", "Story by"], inplace=True) columns_to_drop = [column\ for column in wiki_movies_df.columns\ if wiki_movies_df[column].count()/len(wiki_movies_df) <= 0.1] wiki_movies_df.drop(columns=columns_to_drop, inplace=True) #Convert year to int wiki_movies_df["year"] = wiki_movies_df["year"].apply(lambda x: int(x)) #Regex strings for currency patterns form_one = r"\$\s*\d{1,3}\.?\d*\s*[mb]illi?on" form_two = r"\$\s*\d+[,\.]\d{3}" # CLEAN BOX OFFICE DATA box_office = wiki_movies_df['Box office'].dropna() box_office = box_office.apply(lambda x: ' '.join(x) if type(x) == list else x) wiki_movies_df['box_office'] = box_office.str.\ extract(f"({form_one}|{form_two})",\ flags=re.IGNORECASE)[0].apply(parse_dollars) wiki_movies_df.drop('Box office', axis=1, inplace=True) # CLEAN BUDGET data budget = wiki_movies_df['Budget'].dropna() budget = budget.map(lambda x: ' '.join(x) if type(x) == list else x) # Omit wikipedia citation markers using square brackets budget = budget.str.replace(r'\[\d+\]\s*', '') # Remove any hyphens and defer to smaller end of range budget = budget.str.replace(r'\$.*[-—–](?![a-z])' , '$', regex=True) contains_form_one = budget.str.contains(pat=form_one, flags=re.IGNORECASE, na=False) contains_form_two = budget.str.contains(pat=form_two, flags=re.IGNORECASE, na=False) wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars) wiki_movies_df.drop('Budget', axis=1, inplace=True) # CLEAN RELEASE DATE DATA release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x) # pattern 1: Month Name, 1-2 digits, 4 digit year date_pat_1 = r"\w*\s\d{1,2},\s\d{4}" matches_pat_1 = release_date.str.contains(date_pat_1, flags=re.IGNORECASE, na=False) # pattern 2: yyyy-dd-mm date_pat_2 = r"\d{4}[-—–]\d{2}[-—–]\d{2}" matches_pat_2 = release_date.str.contains(date_pat_2, flags=re.IGNORECASE, na=False) # pattern 3: (optional day), month name, year date_pat_3 = r"\d{0,2}\s*\w{3,10}\s\d{4}" matches_pat_3 = release_date.str.contains(date_pat_3, flags=re.IGNORECASE, na=False) # pattern 4: four digit year only date_pat_4 = r"\d{4}" matches_pat_4 = release_date.str.contains(date_pat_4, flags=re.IGNORECASE, na=False) wiki_movies_df['release_date'] = pd.to_datetime( release_date.str.extract(f'({date_pat_1}|{date_pat_2}|{date_pat_3}|{date_pat_4})')[0], infer_datetime_format=True, errors='coerce') wiki_movies_df.drop('Release date', axis=1, inplace=True) # RUNTIME DATA # two string forms transformed: "# h(ours) ## m(inutes)", and '### minutes" running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x) running_time_extract = running_time.str.extract(r"(\d+)\s*ho?u?r?s?\s*(\d*)|(\d{1,3})\s*m") running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0) wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1) wiki_movies_df.drop('Running time', axis=1, inplace=True) return wiki_movies_df, kaggle_metadata, ratingsMap to dataframesfile_dir = './data' wiki_file = f'{file_dir}/wikipedia-movies.json' kaggle_file = f'{file_dir}/movies_metadata.csv' ratings_file = f'{file_dir}/ratings.csv' wiki_file, kaggle_file, ratings_file = import_source_files(wiki_file=wiki_file, kaggle_file=kaggle_file, ratings_file=ratings_file) wiki_movies_df = wiki_file # 20. Check that the wiki_movies_df DataFrame looks like this. wiki_movies_df # 21. Check that wiki_movies_df DataFrame columns are correct. wiki_movies_df.columns.to_list()BERTopicBERTopic is a topic modeling technique that leverages transformers and a custom class-based TF-IDF to create dense clusters allowing for easily interpretable topics whilst keeping important words in the topic descriptions. Enabling the GPUFirst, you'll need to enable GPUs for the notebook:- Navigate to Edit→Notebook Settings- select GPU from the Hardware Accelerator drop-down[Reference](https://colab.research.google.com/notebooks/gpu.ipynb) **Installing BERTopic**We start by installing BERTopic from PyPi:!pip install bertopic !pip install nltkRequirement already satisfied: bertopic in /usr/local/lib/python3.7/dist-packages (0.8.1) Requirement already satisfied: numpy>=1.20.0 in /usr/local/lib/python3.7/dist-packages (from bertopic) (1.21.1) Requirement already satisfied: plotly<4.14.3,>=4.7.0 in /usr/local/lib/python3.7/dist-packages (from bertopic) (4.14.2) Requirement already satisfied: hdbscan>=0.8.27 in /usr/local/lib/python3.7/dist-packages (from bertopic) (0.8.27) Requirement already satisfied: tqdm>=4.41.1 in /usr/local/lib/python3.7/dist-packages (from bertopic) (4.41.1) Requirement already satisfied: pandas>=1.1.5 in /usr/local/lib/python3.7/dist-packages (from bertopic) (1.1.5) Requirement already satisfied: umap-learn>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from bertopic) (0.5.1) Requirement already satisfied: sentence-transformers>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from bertopic) (2.0.0) Requirement already satisfied: scikit-learn>=0.22.2.post1 in /usr/local/lib/python3.7/dist-packages ([...]Restart the NotebookAfter installing BERTopic, some packages that were already loaded were updated and in order to correctly use them, we should now restart the notebook.From the Menu:Runtime → Restart Runtime DataI will use the Kaggle Glassdoor Employee Review Dataset which contains roughly 33000 positive and 30000 negative reviewsfrom google.colab import drive drive.mount("/content/gdrive") import pandas as pd train_df = pd.read_csv('/content/gdrive/My Drive/BERTopic_glassdoor/train.csv') pos_docs = [review for review in train_df['positives']] pos_docs[:5] len(pos_docs) import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize print(stopwords.words('english')) stop_words = set(stopwords.words('english')) def clean_stopwords(sentence): word_tokens = word_tokenize(sentence) filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words] filtered_sentence =' '.join(filtered_sentence) return filtered_sentence pos_docs_clean = [clean_stopwords(sentence) for sentence in pos_docs] pos_docs_clean[:5] len(pos_docs_clean)**Topic Modeling**We will go through the main components of BERTopic and the steps necessary to create a strong topic model. TrainingWe start by instantiating BERTopic. We set language to `english` since our documents are in the English language. If you would like to use a multi-lingual model, please use `language="multilingual"` instead. We will also calculate the topic probabilities. However, this can slow down BERTopic significantly at large amounts of data (>100_000 documents). It is advised to turn this off if you want to speed up the model.from bertopic import BERTopic topic_model = BERTopic(language="english", top_n_words=15, n_gram_range=(1, 2), min_topic_size=30, nr_topics='auto', calculate_probabilities=True, verbose=True) topics, probs = topic_model.fit_transform(pos_docs_clean)/usr/local/lib/python3.7/dist-packages/distributed/config.py:20: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. defaults = yaml.load(f)Extracting TopicsAfter fitting our model, we can start by looking at the results. Typically, we look at the most frequent topics first as they best represent the collection of documents.freq = topic_model.get_topic_info() freq-1 refers to all outliers and should typically be ignored. Next, let's take a look at a frequent topic that were generated:topic_model.get_topic(0) # Select the most frequent topic topic_model.get_topic(1) topic_model.get_topic(2)**NOTE**: BERTopic is stocastich which means that the topics might differ across runs. This is mostly due to the stocastisch nature of UMAP. **Visualization**There are several visualization options available in BERTopic, namely the visualization of topics, probabilities and topics over time. Topic modeling is, to a certain extent, quite subjective. Visualizations help understand the topics that were created. Visualize TopicsAfter having trained our `BERTopic` model, we can iteratively go through perhaps a hundred topic to get a good understanding of the topics that were extract. However, that takes quite some time and lacks a global representation. Instead, we can visualize the topics that were generated in a way very similar to [LDAvis](https://github.com/cpsievert/LDAvis):topic_model.visualize_topics()Visualize Topic ProbabilitiesThe variable `probabilities` that is returned from `transform()` or `fit_transform()` can be used to understand how confident BERTopic is that certain topics can be found in a document. To visualize the distributions, we simply call:topic_model.visualize_distribution(probs[0], min_probability=0.02)Visualize Topic HierarchyThe topics that were created can be hierarchically reduced. In order to understand the potential hierarchical structure of the topics, we can use scipy.cluster.hierarchy to create clusters and visualize how they relate to one another. This might help selecting an appropriate nr_topics when reducing the number of topics that you have created.topic_model.visualize_hierarchy(top_n_topics=69) topic_model.visualize_hierarchy(top_n_topics=30)Visualize TermsWe can visualize the selected terms for a few topics by creating bar charts out of the c-TF-IDF scores for each topic representation. Insights can be gained from the relative c-TF-IDF scores between and within topics. Moreover, you can easily compare topic representations to each other.topic_model.visualize_barchart(top_n_topics=15)Visualize Topic SimilarityHaving generated topic embeddings, through both c-TF-IDF and embeddings, we can create a similarity matrix by simply applying cosine similarities through those topic embeddings. The result will be a matrix indicating how similar certain topics are to each other.topic_model.visualize_heatmap(n_clusters=20, width=1000, height=1000)Visualize Term Score DeclineTopics are represented by a number of words starting with the best representative word. Each word is represented by a c-TF-IDF score. The higher the score, the more representative a word to the topic is. Since the topic words are sorted by their c-TF-IDF score, the scores slowly decline with each word that is added. At some point adding words to the topic representation only marginally increases the total c-TF-IDF score and would not be beneficial for its representation.To visualize this effect, we can plot the c-TF-IDF scores for each topic by the term rank of each word. In other words, the position of the words (term rank), where the words with the highest c-TF-IDF score will have a rank of 1, will be put on the x-axis. Whereas the y-axis will be populated by the c-TF-IDF scores. The result is a visualization that shows you the decline of c-TF-IDF score when adding words to the topic representation. It allows you, using the elbow method, the select the best number of words in a topic.topic_model.visualize_term_rank()**Topic Representation**After having created the topic model, you might not be satisfied with some of the parameters you have chosen. Fortunately, BERTopic allows you to update the topics after they have been created. This allows for fine-tuning the model to your specifications and wishes. Update TopicsWhen you have trained a model and viewed the topics and the words that represent them,you might not be satisfied with the representation. Perhaps you forgot to removestopwords or you want to try out a different `n_gram_range`. We can use the function `update_topics` to update the topic representation with new parameters for `c-TF-IDF`:topic_model.update_topics(pos_docs_clean, topics, n_gram_range=(1, 3)) topic_model.get_topic(0) #The most frequent topic is about Amazon topic_model.get_topic(1) #The second most frequent topic is seems to be about team and teamworkTopic ReductionWe can also reduce the number of topics after having trained a BERTopic model. The advantage of doing so, is that you can decide the number of topics after knowing how many are actually created. It is difficult to predict before training your model how many topics that are in your documents and how many will be extracted. Instead, we can decide afterwards how many topics seems realistic:new_topics, new_probs = topic_model.reduce_topics(pos_docs_clean, topics, probs, nr_topics=30) topic_model.visualize_topics() topic_model.visualize_distribution(probs[1], min_probability=0.005) topic_model.visualize_hierarchy(top_n_topics=31) topic_model.visualize_barchart(top_n_topics=10) topic_model.visualize_heatmap(n_clusters=20, width=1000, height=1000) topic_model.visualize_term_rank()**Search Topics**After having trained our model, we can use `find_topics` to search for topics that are similar to an input search_term. Here, we are going to be searching for topics that closely relate the search term "vehicle". Then, we extract the most similar topic and check the results:similar_topics, similarity = topic_model.find_topics("management", top_n=5); similar_topics topic_model.get_topic(10) topic_model.get_topic(3) topic_model.get_topic(26)**Model serialization**The model and its internal settings can easily be saved. Note that the documents and embeddings will not be saved. However, UMAP and HDBSCAN will be saved.# Save model #topic_model.save("my_model") # Load model #my_model = BERTopic.load("my_model")**Embedding Models**The parameter `embedding_model` takes in a string pointing to a sentence-transformers model, a SentenceTransformer, or a Flair DocumentEmbedding model. Sentence-TransformersYou can select any model from sentence-transformers here and pass it through BERTopic with embedding_model:topic_model = BERTopic(embedding_model="xlm-r-bert-base-nli-stsb-mean-tokens")Or select a SentenceTransformer model with your own parameters:from sentence_transformers import SentenceTransformer sentence_model = SentenceTransformer("distilbert-base-nli-mean-tokens", device="cpu") topic_model = BERTopic(embedding_model=sentence_model, verbose=True)PreliminariesInitial setupimport requests # best library to manage HTTP transactions import csv # library to read/write/parse CSV files from bs4 import BeautifulSoup # web-scraping library acceptMime = 'text/html' cikList = [] cikPath = 'cik.txt'Open the file containing the list of CIK codes, read them in, and turn them into a list with whitespace strippedcikFileObject = open(cikPath, newline='') cikRows = cikFileObject.readlines() for cik in cikRows: cikList.append(cik.strip()) print(cikList)['0001085917', '0000105598', '0000034088']Searching for 10-K formsCreate a list of dictionaries for appropriate resultsresultsList = []Create the search URL using one hacked from playing around onlinecik = cikList[2] # in the final script, this will loop through all of the CIK codes. (elements 0 and 1 don't produce any results) # this query string selects for 10-K forms, but also retrieves forms whose code start with 10-K baseUri = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK='+cik+'&type=10-K&dateb=&owner=exclude&start=0&count=40&output=atom' print(baseUri)https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=0000034088&type=10-K&dateb=&owner=exclude&start=0&count=40&output=atomRetrieve the XML document and turn it into a Beautiful Soup object (well-structured with magical properties)r = requests.get(baseUri, headers={'Accept' : 'application/xml'}) soup = BeautifulSoup(r.text,features="html5lib") print(soup) Webmaster
IRVING TX 5959 LAS COLINAS BLVD 75039-2298
IRVING 9729406000 TX 5959 LAS COLINAS BLVD 75039-2298
2911 PETROLEUM REFINING http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&SIC=2911&owner=exclude&count=40 4 [...]The search string (term="10-k") limits results to only category elements with the attribute that's exactly equal to"10-K"The select function returns a list of soup objects that can each be searchedfor cat in soup.select('category[term="10-K"]'): # can't use cat.filing-href because hyphen in tag is interpreted by Python as a minus # also, couldn't get .strings to work, so used first child element (the string content of the tag) date = cat.find('filing-date').contents[0] year = date[:4] # the year is the first four characters of the date string print(year) # create a dictionary of an individual result searchResults = {'cik':cik,'year':year,'uri':cat.find('filing-href').contents[0]} if year == "2016" or year == "2014": # append the dictionary to the list of results resultsList.append(searchResults)2018 2017 2016 2015 2014 2013 2012 2011 2010 2009 2008 2007 2006 2005 2004 2003 2002 2001 2000 1999 1996 1995 1994The loop is done, now show the resultsprint(resultsList)[{'cik': '0000034088', 'year': '2016', 'uri': 'http://www.sec.gov/Archives/edgar/data/34088/000003408816000065/0000034088-16-000065-index.htm'}, {'cik': '0000034088', 'year': '2014', 'uri': 'http://www.sec.gov/Archives/edgar/data/34088/000003408814000012/0000034088-14-000012-index.htm'}]Searching for the components of an individual 10-K filingStart by showing the URL to be retrievedform10kList = [] # create an empty list to put the results in hitNumber = 0 # in the final script, loop through the resultsList. Here, just do the first result. # for hitNumber in range(0,len(resultsList)): print(resultsList[hitNumber]['uri'])http://www.sec.gov/Archives/edgar/data/34088/000003408816000065/0000034088-16-000065-index.htmRetrieve the HTML and turn it into a cleaned-up soupt objectr = requests.get(resultsList[hitNumber]['uri'], headers={'Accept' : 'text/html'}) soup = BeautifulSoup(r.text,features="html5lib") print(soup) EDGAR Filing Documents for 0000034088-16-000065